kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
21,908,637
submission = validation_forecast(0 )<concatenate>
cat_dist(tmp_train, var='IsNumericTicket', hue='Survived' )
Titanic - Machine Learning from Disaster
21,908,637
for i in tqdm(range(1,600)) : submission = pd.concat([submission,validation_forecast(i)] )<categorify>
all_data['Age'] = all_data.Age.fillna(train.Age.median()) all_data['Fare'] = all_data.Fare.fillna(train.Fare.median()) all_data.dropna(subset=['Embarked'], inplace=True) cabins = all_data.Cabin all_data.drop(['Cabin'], axis=1, inplace=True )
Titanic - Machine Learning from Disaster
21,908,637
frame = submission.to_frame()<rename_columns>
all_data['CabinCnt'] = cabins.apply(lambda x: 0 if pd.isna(x)else len(x.split(' '))) all_data['CabinClass'] = cabins.apply(lambda x: str(x)[0]) all_data['IsNumericTicket'] = all_data.Ticket.apply(lambda x: 1 if x.isnumeric() else 0) all_data['TicketType'] = all_data.Ticket.apply(lambda x: ''.join(x.split(' ')[:-1] ).replace('.','' ).replace('/','' ).lower() if len(x.split(' ')[:-1])> 0 else 0) all_data['Title'] = all_data.Name.apply(lambda x: x.split(',')[1].split('.')[0].strip()) all_data['Family'] = all_data.SibSp + all_data.Parch
Titanic - Machine Learning from Disaster
21,908,637
frame.columns = ['error']<save_to_csv>
numeric_vars = ['Age', 'SibSp', 'Parch', 'Fare', 'CabinCnt', 'Family'] ordinal_vars = ['Pclass'] nominal_vars = ['Name', 'Sex', 'Ticket', 'Embarked', 'CabinClass', 'IsNumericTicket', 'TicketType', 'Title'] all_data[nominal_vars] = all_data[nominal_vars].astype('str') for feature in numeric_vars: all_data[feature] = np.log1p(all_data[feature]) scaler = StandardScaler() numeric_vars = all_data.columns[(all_data.dtypes != 'object')&(all_data.columns != 'PassengerId')&(all_data.columns != 'Survived')&(all_data.columns != 'IsTrain')] all_data[numeric_vars] = scaler.fit_transform(all_data[numeric_vars] )
Titanic - Machine Learning from Disaster
21,908,637
frame.to_csv('submit.csv' )<set_options>
all_data.drop(['PassengerId', 'Name', 'Ticket'], axis=1, inplace=True) data_dummies = pd.get_dummies(all_data) X_train = data_dummies[data_dummies.Survived.notnull() ].drop(['Survived'], axis=1) y_train = data_dummies[data_dummies.Survived.notnull() ].Survived X_test = data_dummies[data_dummies.Survived.isnull() ].drop(['Survived'], axis=1 )
Titanic - Machine Learning from Disaster
21,908,637
warnings.filterwarnings('ignore') %matplotlib inline<load_from_csv>
all_data.Title = all_data.Title.apply(lambda x: 'Others' if x in list(all_data.Title.value_counts() [all_data.Title.value_counts() < 8].index)else x) all_data.TicketType = all_data.TicketType.apply(lambda x: 'Others' if x in list(all_data.TicketType.value_counts() [all_data.TicketType.value_counts() < 10].index)else x )
Titanic - Machine Learning from Disaster
21,908,637
df = pd.read_csv('.. /input/sputnik/train.csv') df['epoch'] = pd.to_datetime(df.epoch) df.index = df.epoch df.drop('epoch', axis = 1, inplace = True) df.head()<filter>
data_dummies = pd.get_dummies(all_data) X_train = data_dummies[data_dummies.Survived.notnull() ].drop(['Survived'], axis=1) X_test = data_dummies[data_dummies.Survived.isnull() ].drop(['Survived'], axis=1 )
Titanic - Machine Learning from Disaster
21,908,637
train = df.loc[df['type'] == 'train']<feature_engineering>
allow_tuning = False
Titanic - Machine Learning from Disaster
21,908,637
train['error'] = np.linalg.norm(train[['x', 'y', 'z']].values - train[['x_sim', 'y_sim', 'z_sim']].values, axis=1 )<filter>
def xgb_gridsearch(params_grid_xgb, features, values, X, y, last=False): x_train, x_test = train_test_split(X, test_size=.2, random_state=42) y_train_tmp, y_test_tmp = train_test_split(y, test_size=.2, random_state=42) cv = RepeatedStratifiedKFold(n_splits = 10, n_repeats = 3, random_state = 42) model_xgb = XGBClassifier(use_label_encoder = False, objective = 'binary:logistic') for i in range(len(features)) : params_grid_xgb[features[i]] = values[i] search_xgb = GridSearchCV(model_xgb, params_grid_xgb, verbose = 0, scoring = 'neg_log_loss', cv = cv ).fit(x_train, y_train_tmp, early_stopping_rounds = 15, eval_set = [[x_test, y_test_tmp]], eval_metric = 'logloss', verbose = False) for i in range(len(features)) : print(f"{features[i]}: {search_xgb.best_params_[features[i]]}") if not last: for k, v in search_xgb.best_params_.items() : search_xgb.best_params_[k] = [v] return search_xgb, search_xgb.best_params_
Titanic - Machine Learning from Disaster
21,908,637
test = df.loc[df['type'] == 'test']<define_variables>
if allow_tuning: params_knn = { 'n_neighbors' : range(1, 10), 'weights' : ['uniform', 'distance'], 'algorithm' : ['auto', 'ball_tree','kd_tree'], 'p' : [1,2] } model_knn = knn() search_knn = GridSearchCV(model_knn, params_knn, cv=5, scoring='accuracy', n_jobs=-1, verbose=1 ).fit(X_train, y_train) print(search_knn.best_params_ )
Titanic - Machine Learning from Disaster
21,908,637
train_frames = [] for i in range(600): train_frames.append(train.loc[train['sat_id'] == i] )<filter>
if allow_tuning: params_logistic = { 'max_iter': [2000], 'penalty': ['l1', 'l2'], 'C': np.logspace(-4, 4, 20), 'solver': ['liblinear'] } model_logistic = LogisticRegression() search_logistic = GridSearchCV(model_logistic, params_logistic, cv=5, scoring='accuracy', n_jobs=-1, verbose=1 ).fit(X_train, y_train) print(search_logistic.best_params_ )
Titanic - Machine Learning from Disaster
21,908,637
test_frames = [] for i in range(600): test_frames.append(test.loc[test['sat_id'] == i] )<choose_model_class>
if allow_tuning: params_svc = [{'kernel': ['rbf'], 'gamma': [.01,.1,.5, 1, 2, 5, 10], 'C': [.1, 1, 10, 100, 1000], 'probability': [True]}, {'kernel': ['poly'], 'degree' : [2, 3, 4, 5], 'C': [.01,.1, 1, 10, 100, 1000], 'probability': [True]}] model_svc = SVC() search_svc = GridSearchCV(model_svc, params_svc, cv=5, scoring='accuracy', n_jobs=-1, verbose=1 ).fit(X_train, y_train) print(search_svc.best_params_ )
Titanic - Machine Learning from Disaster
21,908,637
season=24<prepare_x_and_y>
if allow_tuning: params_svc = {'kernel': ['rbf'], 'gamma': [i/10000 for i in range(90, 110)], 'C': range(50, 80, 10), 'probability': [True]} model_svc = SVC() search_svc = GridSearchCV(model_svc, params_svc, cv=5, scoring='accuracy', n_jobs=-1, verbose=1 ).fit(X_train, y_train) print(search_svc.best_params_ )
Titanic - Machine Learning from Disaster
21,908,637
sample_data=train_frames[0].x<compute_test_metric>
Titanic - Machine Learning from Disaster
21,908,637
test_stationarity(sample_data )<split>
if allow_tuning: params_rf = { 'n_estimators': [95, 100, 105], 'criterion':['entropy'], 'bootstrap': [True, False], 'max_depth': [40, 45, 50], 'max_features': [4, 5, 6], 'min_samples_leaf': [1, 2, 3], 'min_samples_split': [9, 10, 11], 'random_state': [734]} model_rf = RandomForestClassifier() search_rf = GridSearchCV(model_rf, params_rf, cv=5, scoring='accuracy', n_jobs=-1, verbose=1 ).fit(X_train, y_train) search_rf.best_params_['random_state']=242 search_rf.best_estimator_.random_state=242 print(search_rf.best_params_ )
Titanic - Machine Learning from Disaster
21,908,637
rcParams['figure.figsize'] = 12, 7 sample_data_diff = sample_data - sample_data.shift(1) sample_data_diff.dropna(inplace = True) test_stationarity(sample_data_diff, window = 48 )<define_variables>
if allow_tuning: params_xgb = {'n_estimators': [1000], 'learning_rate': [0.1], 'max_depth': [5], 'min_child_weight': [1], 'gamma': [0], 'subsample': [0.8], 'colsample_bytree': [0.8], 'n_jobs': [-1], 'objective': ['binary:logistic'], 'use_label_encoder': [False], 'eval_metric': ['logloss'], 'scale_pos_weight': [1]} search_xgb, params_xgb = xgb_gridsearch(params_xgb, ['learning_rate'], [[0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.15, 0.2]], X_train, y_train) search_xgb, params_xgb = xgb_gridsearch(params_xgb, ['max_depth', 'min_child_weight'], [range(3, 10), range(1, 6)], X_train, y_train) search_xgb, params_xgb = xgb_gridsearch(params_xgb, ['gamma'], [[0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 2]], X_train, y_train) search_xgb, params_xgb = xgb_gridsearch(params_xgb, ['subsample', 'colsample_bytree'], [[i/100.0 for i in range(75,90,5)], [i/100.0 for i in range(75,90,5)]], X_train, y_train) search_xgb, params_xgb = xgb_gridsearch(params_xgb, ['reg_alpha'], [[1e-5, 1e-2, 0.1, 1, 100]], X_train, y_train) params_xgb['n_estimators'] = [5000] search_xgb, params_xgb = xgb_gridsearch(params_xgb, ['learning_rate'], [[0.001, 0.005, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.15, 0.2]], X_train, y_train, last=True) x_train, x_test = train_test_split(X_train, test_size=.2, random_state=42) y_train_tmp, y_test_tmp = train_test_split(y_train, test_size=.2, random_state=42) model_xgb = XGBClassifier(**params_xgb) model_xgb = model_xgb.fit(x_train, y_train_tmp, eval_set=[(x_test, y_test_tmp)], eval_metric=['logloss'], early_stopping_rounds=15, verbose=0) search_xgb.best_estimator_.n_estimators = model_xgb.best_iteration
Titanic - Machine Learning from Disaster
21,908,637
d=1 D=0<import_modules>
if allow_tuning: model_knn = search_knn.best_estimator_ model_logistic = search_logistic.best_estimator_ model_svc = search_svc.best_estimator_ model_rf = search_rf.best_estimator_ model_xgb = search_xgb.best_estimator_ else: model_knn = knn(algorithm='auto', n_neighbors=9, p=1, weights='uniform') model_logistic = LogisticRegression(C=0.08858667904100823, max_iter=2000, penalty='l2', solver='liblinear') model_svc = SVC(C=70, gamma=0.0106, kernel='rbf', probability=True) model_rf = RandomForestClassifier(bootstrap=True, criterion='entropy', max_depth=50, max_features=6, min_samples_leaf=1, min_samples_split=10, n_estimators=100, random_state=734) model_xgb = XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1, colsample_bynode=1, colsample_bytree=0.8, enable_categorical=False, eval_metric='logloss', gamma=0.8,gpu_id=-1, importance_type=None, interaction_constraints='', learning_rate=0.15, max_delta_step=0, max_depth=5, min_child_weight=1, missing=np.nan, monotone_constraints='() ', n_estimators=15, n_jobs=-1, num_parallel_tree=1, predictor='auto', random_state=0, reg_alpha=1e-05, reg_lambda=1, scale_pos_weight=1, subsample=0.8, tree_method='exact', use_label_encoder=False, validate_parameters=1, verbosity=0) models = { 'knn': model_knn, 'logistic': model_logistic, 'svc': model_svc, 'rf': model_rf, 'xgb': model_xgb }
Titanic - Machine Learning from Disaster
21,908,637
from scipy.stats import linregress<compute_train_metric>
def select_models(start, cnt, goal, estimators, voting): if cnt == goal: estimators_copy = copy.deepcopy(estimators) voting_name = f'{voting}_' + '_'.join([i[0] for i in list(estimators_copy)]) models[voting_name] = VotingClassifier(estimators=estimators_copy, voting=voting) return for i in range(start, 5): estimators.append(list(models.items())[i]) select_models(i + 1, cnt + 1, goal, estimators, voting) estimators.pop()
Titanic - Machine Learning from Disaster
21,908,637
def prediction(train,test,period = 24): count = test.shape[0]//period left = test.shape[0]%period trend = train.error[-47:].rolling(window=24 ).mean() os1 =(train.iloc[-period:].error.values ).tolist() *count if left != 0: os1 += train.iloc[-period:-(period-left)].error.values.tolist() seas = np.array(os1) a0,*_ = linregress(np.arange(24),trend.dropna().values) seas+=np.array(range(seas.shape[0])*a0) return seas<predict_on_test>
select_models(0, 0, 2, [], 'hard') select_models(0, 0, 3, [], 'hard') select_models(0, 0, 4, [], 'hard') select_models(0, 0, 5, [], 'hard') select_models(0, 0, 2, [], 'soft') select_models(0, 0, 3, [], 'soft') select_models(0, 0, 4, [], 'soft') select_models(0, 0, 5, [], 'soft' )
Titanic - Machine Learning from Disaster
21,908,637
pr = prediction(train_frames[0],test_frames[0] )<prepare_x_and_y>
result_by_model = pd.DataFrame({'model name': models.keys() , 'model': models.values() , 'score': 0} )
Titanic - Machine Learning from Disaster
21,908,637
y = np.hstack(( np.asarray(train_frames[0].error), pr))<concatenate>
for name, model in models.items() : result_by_model.loc[result_by_model['model name'] == name, 'score'] = cross_val_score(model, X_train,y_train,cv=5 ).mean()
Titanic - Machine Learning from Disaster
21,908,637
x = np.hstack(( np.asarray(train_frames[0].index),(np.asarray(test_frames[0].index))))<predict_on_test>
result_by_model.sort_values('score', ascending=False ).reset_index(drop=True )
Titanic - Machine Learning from Disaster
21,908,637
<define_variables><EOS>
model_name = 'rf' models[model_name].fit(X_train, y_train) y_pred = models[model_name].predict(X_test ).astype('int') submission = pd.DataFrame({'PassengerId': test.PassengerId, 'Survived': y_pred}) submission.to_csv('submission.csv', index = False )
Titanic - Machine Learning from Disaster
22,003,418
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<create_dataframe>
import numpy as np import pandas as pd from sklearn import ensemble from sklearn.metrics import accuracy_score from sklearn.model_selection import cross_validate,GridSearchCV import lightgbm as lgbm from collections import Counter
Titanic - Machine Learning from Disaster
22,003,418
df_pred_err = pd.DataFrame() df_pred_err ['id'] = pred_id df_pred_err['error'] = result_errors<save_to_csv>
train = pd.read_csv(".. /input/titanic/train.csv") test = pd.read_csv(".. /input/titanic/test.csv" )
Titanic - Machine Learning from Disaster
22,003,418
df_pred_err.to_csv('prediction.csv',sep=",",index = False )<compute_test_metric>
y = train['Survived'] X = train.drop(['Survived'], axis=1) X_test = test
Titanic - Machine Learning from Disaster
22,003,418
def smape(predict, test): return np.mean(2 * np.abs(test - predict)/(np.abs(test)+ np.abs(predict)))* 100 def get_sat(sat_id): sat = df.loc[df['sat_id'] == sat_id].copy() sat.drop(sat[sat['timedelta'] == 1].index, inplace=True) return sat def simple_period(sat_id): def get_loc_extr(arr): loc_max_ind_ = np.where(( arr[1:-1] > arr[:-2])&(arr[1:-1] > arr[2:])) loc_min_ind_ = np.where(( arr[1:-1] < arr[:-2])&(arr[1:-1] < arr[2:])) return loc_max_ind_, loc_min_ind_ sat = get_sat(sat_id) train = sat.loc[sat['type'] == 'train'] x_loc_max, x_loc_min = get_loc_extr(np.array(train['x'])) y_loc_max, y_loc_min = get_loc_extr(np.array(train['y'])) z_loc_max, z_loc_min = get_loc_extr(np.array(train['z'])) return int(np.concatenate([ x_loc_max[0][1:] - x_loc_max[0][:-1], x_loc_min[0][1:] - x_loc_min[0][:-1], y_loc_max[0][1:] - y_loc_max[0][:-1], y_loc_min[0][1:] - y_loc_min[0][:-1], z_loc_max[0][1:] - z_loc_max[0][:-1], z_loc_min[0][1:] - z_loc_min[0][:-1], ] ).mean()) plt.rcParams['figure.figsize'] = [16, 8] df = pd.read_csv('/kaggle/input/sputnik/train.csv') df['datetime'] = list(map(lambda x: datetime.datetime.strptime(x, "%Y-%m-%dT%H:%M:%S.%f"), df['epoch'])) df['timedelta'] = np.zeros(len(df.index)) df['timedelta'].values[1:] =(df['datetime'].values[1:] - df['datetime'].values[:-1])/ np.timedelta64(1, 'ms') df['error'] = np.linalg.norm(df[['x', 'y', 'z']].values - df[['x_sim', 'y_sim', 'z_sim']].values, axis=1) def regressed(train, test, period, target): sat_df = pd.DataFrame({target: pd.concat(( train, test), axis=0)[target].to_numpy() }) features = [] for period_mult in range(int(np.ceil(len(test)/ period)) , min(len(test), int(len(train)/ period))): sat_df["lag_period_{}".format(period_mult)] = sat_df[target].shift(period_mult * period) features.append("lag_period_{}".format(period_mult)) sat_df['lagf_mean'] = sat_df[features].mean(axis=1) features.extend(['lagf_mean']) train_df = sat_df[:-len(test)].dropna() test_df = sat_df[-len(test):][features] lin_reg = LinearRegression() lin_reg.fit(train_df.drop(target, axis=1), train_df[target]) result = lin_reg.predict(test_df) return result def regressed(train, test, period, target): result = [] sat_df = pd.DataFrame({target: pd.concat(( train, test), axis=0)[target].to_numpy() }) features = [] for period_mult in range(1, int(np.ceil(len(train)/ period)) - 1): sat_df["lag_period_{}".format(period_mult)] = sat_df[target].shift(period_mult * period) features.append("lag_period_{}".format(period_mult)) features.extend(['lagf_mean']) for i in range(int(np.ceil(len(test)/ period))): sat_df['lagf_mean'] = sat_df[features[i:-1]].mean(axis=1) train_df = sat_df[:-len(test)][[target] + features[i:]].dropna().copy() train_df['lagf_mean'] = train_df[features[i:]].mean(axis=1) if -len(test)+(i + 1)* period >= 0: test_df = sat_df[-len(test)+ i * period:][features[i:]] else: test_df = sat_df[-len(test)+ i * period:-len(test)+(i + 1)* period][features[i:]] lin_reg = LinearRegression() lin_reg.fit(train_df.drop(target, axis=1), train_df[target]) result.extend(lin_reg.predict(test_df)) result.extend([0] *(len(test)- len(result))) return result warnings.filterwarnings('ignore') for sat_id in tqdm(np.unique(df['sat_id'])) : sat = get_sat(sat_id) train_ = sat.loc[sat['type'] == 'train'] test_ = sat.loc[sat['type'] == 'test'] period_ = simple_period(sat_id) for target_ in ['x', 'y', 'z']: sat.loc[sat['type'] == 'test', target_] = regressed(train_, test_, period_, target_) pred = sat.loc[sat['type'] == 'test'] pred['error'] = np.linalg.norm(pred[['x', 'y', 'z']].values - pred[['x_sim', 'y_sim', 'z_sim']].values, axis=1) for k in ['error', 'x', 'y', 'z']: df.loc[(df['sat_id'] == sat_id)&(df['type'] == 'test')&(df['timedelta'] != 1), k] = pred[k] warnings.filterwarnings('default') for index, row in df.loc[(df['timedelta'] == 1)&(df['type'] == 'test')].iterrows() : if np.isnan(df['error'][index]): df['error'][index] = np.linalg.norm([df['x'][index - 1] - df['x_sim'][index], df['y'][index - 1] - df['y_sim'][index], df['z'][index - 1] - df['z_sim'][index]]) df.loc[df['type'] == 'test'][['id', 'error']].to_csv('submission.csv', index=False )<set_options>
params={"n_estimators":np.arange(50,200,10), "max_depth":np.arange(1,11,1), "learning_rate":[0.1,0.01,0.001] }
Titanic - Machine Learning from Disaster
22,003,418
warnings.filterwarnings('ignore') <load_from_csv>
xgb_est=lgbm.LGBMClassifier( random_state=42, objective='binary', eval_metric="auc" ) gr_xgb_est=GridSearchCV(xgb_est,param_grid=params,cv=5,n_jobs=-1,verbose=10) gr_xgb_est.fit(X,y )
Titanic - Machine Learning from Disaster
22,003,418
initi = pd.read_csv('/kaggle/input/sputnik/train.csv' )<prepare_x_and_y>
gr_xgb_est.best_estimator_.get_params()
Titanic - Machine Learning from Disaster
22,003,418
lag_period = 24 resultsx = pd.DataFrame() for j in range(600): dfr = initi[initi.sat_id==j][['x','type']] train = dfr[dfr.type=='train'].reset_index(drop=True) test = dfr[dfr.type=='test'].reset_index(drop=True) i=0 while(i*24<len(test)) : df = pd.concat(( train, test), axis = 0, ignore_index=True) df['target'] = df.x features = [] for period_mult in range(1,3,1): df["lag_period_{}".format(period_mult)] = df.target.shift(period_mult*lag_period) features.append("lag_period_{}".format(period_mult)) df['lagf_mean'] = df[features].mean(axis = 1) features.extend(['lagf_mean']) model = LinearRegression() train_df = df[df.type=='train'][features + ['target']].dropna() test_df = df[df.type=='test'][features].reset_index(drop=True) test_df = test_df.loc[:23,:] model.fit(train_df.drop(['target'], axis = 1), train_df['target']) forecast = model.predict(test_df) test.loc[i*24:i*24+23,:]['x'] = forecast test.loc[i*24:i*24+23,:]['type'] = 'train' i+=1 resultsx = pd.concat(( resultsx, test),axis=0, ignore_index=True )<drop_column>
pred_test = gr_xgb_est.predict(X_test )
Titanic - Machine Learning from Disaster
22,003,418
resultsx.drop(['type'], axis=1, inplace=True )<prepare_x_and_y>
submission = pd.read_csv('.. /input/titanic/gender_submission.csv') submission['Survived'] =(pred_test > 0.5 ).astype(int) submission.to_csv('sub.csv', index=False) submission.head()
Titanic - Machine Learning from Disaster
9,566,582
lag_period = 24 resultsy= pd.DataFrame() for j in range(600): dfr = initi[initi.sat_id==j][['y','type']] train = dfr[dfr.type=='train'].reset_index(drop=True) test = dfr[dfr.type=='test'].reset_index(drop=True) i=0 while(i*24<len(test)) : df = pd.concat(( train, test), axis = 0, ignore_index=True) df['target'] = df.y features = [] for period_mult in range(1,3,1): df["lag_period_{}".format(period_mult)] = df.target.shift(period_mult*lag_period) features.append("lag_period_{}".format(period_mult)) df['lagf_mean'] = df[features].mean(axis = 1) features.extend(['lagf_mean']) model = LinearRegression() train_df = df[df.type=='train'][features + ['target']].dropna() test_df = df[df.type=='test'][features].reset_index(drop=True) test_df = test_df.loc[:23,:] model.fit(train_df.drop(['target'], axis = 1), train_df['target']) forecast = model.predict(test_df) test.loc[i*24:i*24+23,:]['y'] = forecast test.loc[i*24:i*24+23,:]['type'] = 'train' i+=1 resultsy = pd.concat(( resultsy, test),axis=0, ignore_index=True )<drop_column>
train = pd.read_csv(".. /input/titanic/train.csv", index_col='PassengerId') test = pd.read_csv('.. /input/titanic/test.csv', index_col='PassengerId') train.head(10 )
Titanic - Machine Learning from Disaster
9,566,582
resultsy.drop(['type'], axis=1, inplace=True )<drop_column>
train.corr().abs() ['Survived'].sort_values(ascending=False )
Titanic - Machine Learning from Disaster
9,566,582
resultsz.drop(['type'], axis=1, inplace=True )<concatenate>
print(train.isnull().sum()) print(' The observation ratio of missing Cabin values is',round(train['Cabin'].isnull().sum() /len(train),2)) print('The observation ratio of missing Age values is',round(train['Age'].isnull().sum() /len(train),2)) print(' ') print(test.isnull().sum()) print(' The observation ratio of missing Cabin values is',round(test['Cabin'].isnull().sum() /len(test),2)) print('The observation ratio of missing Age values is',round(test['Age'].isnull().sum() /len(test),2))
Titanic - Machine Learning from Disaster
9,566,582
all_results = pd.concat(( resultsx,resultsy,resultsz), axis=1 )<concatenate>
train_copy = train.copy() train_copy['Cabin'] = train_copy['Cabin'].apply(lambda x: 0 if pd.isnull(x)else 1 )
Titanic - Machine Learning from Disaster
9,566,582
df = pd.concat(( all_results, initi[initi.type=='test'][['x_sim', 'y_sim', 'z_sim']].reset_index(drop=True)) , axis=1 )<filter>
train.corr().abs() ['Age'].sort_values(ascending=False )
Titanic - Machine Learning from Disaster
9,566,582
df.loc[:243,:]<feature_engineering>
class age_inferer(BaseEstimator, TransformerMixin): def __init__(self, columns=None): self.columns = columns def fit(self, X, y=None, **fit_params): self.train_mean_age = X.groupby(['Pclass'])['Age'].mean() return self def transform(self, X, **transform_params): X['Age'] = X.apply(lambda row: self.train_mean_age[row.Pclass] if row.Age!=row.Age else row.Age, axis=1) return X
Titanic - Machine Learning from Disaster
9,566,582
df['error'] = np.linalg.norm(df[['x', 'y', 'z']].values - df[['x_sim', 'y_sim', 'z_sim']].values, axis=1 )<define_variables>
train_copy = train.copy() train_copy['Title'] = train_copy.Name.str.split(',' ).str[1].str.split('.' ).str[0].str.strip()
Titanic - Machine Learning from Disaster
9,566,582
submit = df[['error']]<load_from_csv>
df = pd.DataFrame(columns = ['Feature','Count','Survived','Survivel_Percent']) feature = 'Title' categories = train_copy[feature].value_counts().index.tolist() for cat in categories: tmp = train_copy[(train_copy[feature]==cat)] cnt = len(tmp) cnt_target=len(train_copy[(train_copy[feature]==cat)&(train_copy['Survived']==1)]) df.loc[cat] = [feature,cnt,cnt_target,cnt_target/cnt] df.sort_index()
Titanic - Machine Learning from Disaster
9,566,582
sub = pd.read_csv('/kaggle/input/sputnik/sub.csv' )<feature_engineering>
class feature_engineering(BaseEstimator, TransformerMixin): def __init__(self, columns=None): self.columns = columns def fit(self, X, y=None, **fit_params): return self def transform(self, X, **transform_params): X['Title'] = X.Name.str.split(',' ).str[1].str.split('.' ).str[0].str.strip() X['Title'] = X['Title'].apply(lambda x: x if x in ['Mrs', 'Master', 'Miss', 'Dr','Mr','Rev' ] else 'else') X['Family_Size'] = X['SibSp']+X['Parch'] + 1 X = X.drop(columns=['SibSp', 'Parch']) X['has_cabin'] = X['Cabin'].apply(lambda x: 0 if pd.isnull(x)else 1) return X
Titanic - Machine Learning from Disaster
9,566,582
submit['id'] =(sub.reset_index() ).id<prepare_output>
cat_pipeline = Pipeline([ ("imputer", SimpleImputer(strategy="most_frequent")) , ("cat_encoder", OneHotEncoder()), ] )
Titanic - Machine Learning from Disaster
9,566,582
submit.index=sub.id<drop_column>
num_attribs = ["Age", "Fare","Family_Size"] cat_attribs = [ "Sex", "Embarked","Pclass","Title","has_cabin"] columns_trans = ColumnTransformer([ ("num", num_pipeline, num_attribs), ("cat", cat_pipeline, cat_attribs), ] )
Titanic - Machine Learning from Disaster
9,566,582
submit.drop(['id'], axis=1, inplace=True )<save_to_csv>
full_pipeline = Pipeline([ ('age_inferer', age_inferer(['Age', 'Pclass'])) , ('feature_engineering', feature_engineering()), ('ColumnTransformer', columns_trans), ] )
Titanic - Machine Learning from Disaster
9,566,582
submit.to_csv('submit1.csv' )<import_modules>
y_train = train["Survived"] train = train.drop(columns=["Survived"]) x_train = full_pipeline.fit_transform(train)
Titanic - Machine Learning from Disaster
9,566,582
import numpy as np import pandas as pd import os<load_from_csv>
def run_classifiers(X_train: pd.DataFrame , y_train: pd.DataFrame): df = pd.DataFrame(columns=['Model', 'Accuracy']) models = [ ('LogReg', LogisticRegression()), ('RF', RandomForestClassifier()), ('KNN', KNeighborsClassifier()), ('XGB', XGBClassifier()), ('SVC',SVC(probability=True)) , ('GBC',GradientBoostingClassifier()), ] for name, model in models: accuracy = cross_val_score(model, x_train, y_train, cv=5, scoring='accuracy' ).mean() df = df.append({'Model': name, 'Accuracy': accuracy}, ignore_index = True) return df.sort_values(by=['Accuracy'], ascending=False )
Titanic - Machine Learning from Disaster
9,566,582
train_dataset_full = pd.read_csv('.. /input/08_train_dataset.csv' )<load_from_csv>
run_classifiers(x_train,y_train )
Titanic - Machine Learning from Disaster
9,566,582
test_dataset = pd.read_csv('.. /input/08_test_dataset_no_response.csv' )<import_modules>
final_classifier = SVC(probability=True) final_classifier.fit(x_train, y_train) x_test = full_pipeline.transform(test) result = final_classifier.predict(x_test)
Titanic - Machine Learning from Disaster
9,566,582
<import_modules><EOS>
submission = pd.DataFrame({'PassengerId':test.index,'Survived':result}) submission.to_csv('submissionRF_optim_param.csv',index=False )
Titanic - Machine Learning from Disaster
21,830,273
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<define_variables>
%matplotlib inline style = "<style>svg{width: 70% !important; height: 60% !important;} </style>" HTML(style )
Titanic - Machine Learning from Disaster
21,830,273
LABEL='volume_in_cans' FEATURE_COLUMNS = ['vpo_stores_nearby','closest_store_vpo','closest_store_dist','stores_count_nearby', 'population_2018_year_average_total_number','population_2018_year_average_per_mill_of_country', 'purchasing_power_2018_million_euro','purchasing_power_2018_per_mill_of_country','purchasing_power_2018_euro_per_capita', 'purchasing_power_2018_index_country_eq_100']<prepare_x_and_y>
titanic_data = pd.read_csv(".. /input/titanic/train.csv") titanic_data_test = pd.read_csv(".. /input/titanic/test.csv" )
Titanic - Machine Learning from Disaster
21,830,273
def make_train_input_fn(df, num_epochs): return tf.estimator.inputs.pandas_input_fn( x = df[FEATURE_COLUMNS], y = df[LABEL], batch_size = 256, num_epochs = num_epochs, shuffle = True, queue_capacity = 1000 )<prepare_x_and_y>
titanic_data.isnull().sum()
Titanic - Machine Learning from Disaster
21,830,273
def make_eval_input_fn(df): return tf.estimator.inputs.pandas_input_fn( x = df[FEATURE_COLUMNS], y = df[LABEL], batch_size = 256, shuffle = False, queue_capacity = 1000 )<prepare_x_and_y>
train_X = titanic_data.drop(['PassengerId', 'Survived', 'Name', 'Ticket', 'Cabin'], axis = 1) test_X = titanic_data_test.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis = 1) train_y = titanic_data.Survived
Titanic - Machine Learning from Disaster
21,830,273
def make_prediction_input_fn(df): return tf.estimator.inputs.pandas_input_fn( x = df[FEATURE_COLUMNS], y = None, batch_size = 128, shuffle = False, queue_capacity = 1000 )<define_variables>
train_X = train_X.fillna({'Age': train_X.Age.median() , 'Fare': train_X.Fare.median() }) test_X = test_X.fillna({'Age': test_X.Age.median() , 'Fare': test_X.Fare.median() } )
Titanic - Machine Learning from Disaster
21,830,273
def make_feature_cols() : input_columns = [tf.feature_column.numeric_column(k)for k in FEATURE_COLUMNS] return input_columns<import_modules>
train_X = pd.get_dummies(train_X) test_X = pd.get_dummies(test_X )
Titanic - Machine Learning from Disaster
21,830,273
import shutil<train_model>
test_X.isnull().sum()
Titanic - Machine Learning from Disaster
21,830,273
tf.logging.set_verbosity(tf.logging.INFO) OUTDIR = './test_model' shutil.rmtree(OUTDIR, ignore_errors = True) model = tf.estimator.LinearRegressor( feature_columns = make_feature_cols() , model_dir = OUTDIR) model.train(input_fn = make_train_input_fn(train_dataset_full, num_epochs = 1))<compute_test_metric>
clf_rf = RandomForestClassifier(n_jobs=-1, criterion='entropy', min_samples_split=2, min_samples_leaf=1) parameters = {'n_estimators': range(3,40), 'max_depth': range(1,10)} grid_search_cv_clf = GridSearchCV(clf_rf, parameters, cv = 5 )
Titanic - Machine Learning from Disaster
21,830,273
def print_rmse(model, df): metrics = model.evaluate(input_fn = make_eval_input_fn(df)) print('RMSE on dataset = {}'.format(np.sqrt(metrics['average_loss'])) )<compute_test_metric>
grid_search_cv_clf.fit(train_X,train_y )
Titanic - Machine Learning from Disaster
21,830,273
print_rmse(model, train_dataset_full )<predict_on_test>
best_clf = grid_search_cv_clf.best_estimator_ grid_search_cv_clf.best_params_
Titanic - Machine Learning from Disaster
21,830,273
predictions = model.predict(input_fn = make_prediction_input_fn(test_dataset)) predicted_vol = [item['predictions'][0] for item in predictions]<prepare_output>
best_clf.score(train_X, train_y )
Titanic - Machine Learning from Disaster
21,830,273
<save_to_csv><EOS>
test_y = best_clf.predict(test_X) test_ID= titanic_data_test.PassengerId submission = pd.DataFrame({ "PassengerId": test_ID, "Survived": test_y}) submission.to_csv('./gender_submission.csv', index=False )
Titanic - Machine Learning from Disaster
14,332,228
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<set_options>
import numpy as np import pandas as pd from catboost import CatBoostClassifier from sklearn.model_selection import train_test_split import re import matplotlib.pyplot as plt from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import accuracy_score from sklearn.metrics import mean_absolute_error from sklearn.ensemble import RandomForestClassifier from catboost import CatBoostClassifier from sklearn.preprocessing import OneHotEncoder from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from sklearn.linear_model import LogisticRegression
Titanic - Machine Learning from Disaster
14,332,228
%matplotlib inline <import_modules>
train_data = pd.read_csv("/kaggle/input/titanic/train.csv") train_data.info()
Titanic - Machine Learning from Disaster
14,332,228
torch.__version__<import_modules>
test_data = pd.read_csv("/kaggle/input/titanic/test.csv") test_data.info() hui = test_data['PassengerId']
Titanic - Machine Learning from Disaster
14,332,228
from torch.autograd import Variable<set_options>
len(train_data.loc[train_data['Survived']==1])/len(train_data )
Titanic - Machine Learning from Disaster
14,332,228
use_gpu = torch.cuda.is_available() use_gpu<load_from_zip>
train_names = train_data[['Name','Age']].dropna().reset_index() train_names['Name'] = train_names['Name'].str.lower() new_date = [] for i in train_names['Name']: d = [" ".join(re.sub(r"[^a-z ]", ' ', str(i)).split())] new_date.append(d) df = pd.DataFrame(new_date, columns = ['Name']) df['Age'] = train_names['Age'] features_train = df['Name'] target_train = df['Age']
Titanic - Machine Learning from Disaster
14,332,228
!tar -zxvf.. /input/cifar10-python/cifar-10-python.tar.gz<set_options>
count_tf_idf = TfidfVectorizer() tf_idf = count_tf_idf.fit_transform(features_train) count_tf_idf_ts = TfidfVectorizer() tf_id_ts = count_tf_idf.transform(features_train )
Titanic - Machine Learning from Disaster
14,332,228
!ls .<load_pretrained>
clf_t = RandomForestRegressor(random_state=42, criterion='mae' ).fit(count_tf_idf.transform(features_train), target_train )
Titanic - Machine Learning from Disaster
14,332,228
transform = transforms.Compose( [transforms.ToTensor() , transforms.Normalize(( 0.5, 0.5, 0.5),(0.5, 0.5, 0.5)) ]) trainset = torchvision.datasets.CIFAR10(root='.', train=True, download=False, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=32, shuffle=True, num_workers=2) testset = torchvision.datasets.CIFAR10(root='.', train=False, download=False, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=False, num_workers=2) classes =('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck' )<not_enough_vertices>
ind = train_data.loc[train_data['Age'].isnull() ==True].index ddf = train_data.loc[ind, ['Name']] ddf['Name'] = ddf['Name'].str.lower() new_date = [] for i in ddf['Name']: d = [" ".join(re.sub(r"[^a-z ]", ' ', str(i)).split())] new_date.append(d) df = pd.DataFrame(new_date, columns = ['Name']) df.index=ind features_train = df['Name']
Titanic - Machine Learning from Disaster
14,332,228
def imshow(img): img = img / 2 + 0.5 npimg = img.numpy() plt.imshow(np.transpose(npimg,(1, 2, 0))) dataiter = iter(trainloader) images, labels = dataiter.next() imshow(torchvision.utils.make_grid(images)) print(' '.join('%5s' % classes[labels[j]] for j in range(32)) )<choose_model_class>
d = round(pd.DataFrame(clf_t.predict(count_tf_idf.transform(features_train)) ,columns=['new_adge'])) d.index=ind d
Titanic - Machine Learning from Disaster
14,332,228
class Net(nn.Module): def __init__(self): super(Net, self ).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x net = Net() if torch.cuda.is_available() : net.cuda()<choose_model_class>
for i in d.index: for a in train_data.index: if i == a: train_data.loc[i,['Age']] = int(d.loc[i,['new_adge']]) train_data['Age'] = train_data['Age'].astype('int') train_data.info()
Titanic - Machine Learning from Disaster
14,332,228
criterion = nn.CrossEntropyLoss() if use_gpu: criterion = criterion.cuda() optimizer = optim.SGD(net.parameters() , lr=0.001, momentum=0.9 )<train_model>
ind = test_data.loc[test_data['Age'].isnull() ==True].index ddf = test_data.loc[ind, ['Name']] ddf['Name'] = ddf['Name'].str.lower() new_date = [] for i in ddf['Name']: d = [" ".join(re.sub(r"[^a-z ]", ' ', str(i)).split())] new_date.append(d) df = pd.DataFrame(new_date, columns = ['Name']) df.index=ind features_test = df['Name']
Titanic - Machine Learning from Disaster
14,332,228
for epoch in tqdm_notebook(range(10)) : running_loss = 0.0 for i, data in tqdm_notebook(enumerate(trainloader, 0)) : inputs, labels = data if torch.cuda.is_available() : inputs, labels = Variable(inputs ).cuda() , Variable(labels ).cuda() optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.data[0] if i % 2000 == 1999: print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000)) running_loss = 0.0 print('Finished Training' )<not_enough_vertices>
d = round(pd.DataFrame(clf_t.predict(count_tf_idf.transform(features_test)) ,columns=['new_adge'])) d.index=ind for i in d.index: for a in test_data.index: if i == a: test_data.loc[i,['Age']] = int(d.loc[i,['new_adge']]) test_data['Age'] = test_data['Age'].astype('int') test_data.info()
Titanic - Machine Learning from Disaster
14,332,228
dataiter = iter(testloader) images, labels = dataiter.next() imshow(torchvision.utils.make_grid(images)) print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(32)) )<define_variables>
train_data.loc[5,['Age']]
Titanic - Machine Learning from Disaster
14,332,228
outputs = net(Variable(images ).cuda())if use_gpu else net(Variable(images))<something_strange>
train_data['Embarked'].value_counts()
Titanic - Machine Learning from Disaster
14,332,228
_, predicted = torch.max(outputs.data, 1) print('Predicted: ', ' '.join('%5s' % classes[predicted[j]] for j in range(32)) )<define_variables>
train_data['Embarked'] = train_data['Embarked'].fillna('S') train_data.info()
Titanic - Machine Learning from Disaster
14,332,228
all_pred = np.empty(( 0, 10), float )<predict_on_test>
train_data['Sex'] = pd.get_dummies(train_data['Sex']) train_data
Titanic - Machine Learning from Disaster
14,332,228
for data in tqdm_notebook(testloader): images, _ = data if use_gpu: images = images.cuda() outputs = net(Variable(images)) curr_pred = F.softmax(outputs ).data.cpu().numpy() all_pred = np.vstack([all_pred, curr_pred] )<save_to_csv>
train_data['Sex'] = train_data['Sex'].astype('int') train_data.info()
Titanic - Machine Learning from Disaster
14,332,228
pd.DataFrame(all_pred, columns=classes ).to_csv('baseline.csv', index_label='id' )<set_options>
test_data['Sex'] = pd.get_dummies(test_data['Sex']) test_data['Sex'] = test_data['Sex'].astype('int') test_data
Titanic - Machine Learning from Disaster
14,332,228
%matplotlib inline<load_from_csv>
one_hot = pd.get_dummies(train_data['Embarked'],drop_first=True) train_data = train_data.drop('Embarked',axis = 1) train_data = train_data.join(one_hot) train_data
Titanic - Machine Learning from Disaster
14,332,228
train = pd.read_csv('.. /input/train.csv') test = pd.read_csv('.. /input/test.csv') print('Train info:') train.info() print(' Test info:') test.info()<feature_engineering>
one_hot = pd.get_dummies(test_data['Embarked'],drop_first=True) test_data = test_data.drop('Embarked',axis = 1) test_data = test_data.join(one_hot) test_data
Titanic - Machine Learning from Disaster
14,332,228
train['mind'] = train['type'].str[0] train['energy'] = train['type'].str[1] train['nature'] = train['type'].str[2] train['tactics'] = train['type'].str[3]<feature_engineering>
len(train_data.loc[train_data['Ticket'].duplicated() ==True][['Ticket','Cabin']].sort_values('Ticket'))
Titanic - Machine Learning from Disaster
14,332,228
train['mind'] = train['mind'].apply(lambda x: 0 if x == 'I' else 1) train['energy'] = train['energy'].apply(lambda x: 0 if x == 'S' else 1) train['nature'] = train['nature'].apply(lambda x: 0 if x == 'F' else 1) train['tactics'] = train['tactics'].apply(lambda x: 0 if x == 'P' else 1) train.head()<feature_engineering>
train_data['Name'] = train_data['Name'].str.lower()
Titanic - Machine Learning from Disaster
14,332,228
def word_replace(df): df['posts'] = df['posts'].str.replace(r'http.?://[^\s]+[\s]?', 'url ') df['posts'] = df['posts'].str.replace(r'n't', ' not') df['posts'] = df['posts'].str.replace(r''s', ' is') df['posts'] = df['posts'].str.replace(r''m', ' am') df['posts'] = df['posts'].str.replace(r''re', ' are') df['posts'] = df['posts'].str.replace(r''ve', ' have') df['posts'] = df['posts'].str.replace(r''ll', ' will') df['posts'] = df['posts'].str.replace(r''d', ' would') df['posts'] = df['posts'].str.replace(r' df['posts'] = df['posts'].str.replace(r"[',.():|-]", " ") df['posts'] = df['posts'].str.lower() return df<categorify>
Titanic - Machine Learning from Disaster
14,332,228
train_clean = word_replace(train.copy()) test_clean = word_replace(test.copy() )<import_modules>
Titanic - Machine Learning from Disaster
14,332,228
<feature_engineering>
test_data['Name'] = test_data['Name'].str.lower()
Titanic - Machine Learning from Disaster
14,332,228
get_tokens = TweetTokenizer() get_lemmas = WordNetLemmatizer() def tokenize_lemmatize(df): df['posts'] = df.apply(lambda row: [get_lemmas.lemmatize(w)for w in get_tokens.tokenize(row['posts'])], axis=1) return df <categorify>
train_data.loc[(train_data['Ticket']=='S.O.C.14879')]
Titanic - Machine Learning from Disaster
14,332,228
train_clean = tokenize_lemmatize(train_clean.copy()) test_clean = tokenize_lemmatize(test_clean.copy() )<categorify>
train_data.loc[train_data['Cabin'].isnull() ==False]
Titanic - Machine Learning from Disaster
14,332,228
emojies = set(emoji.UNICODE_EMOJI.keys()) def swap_emoji(word): if word in emojies: return 'emoji' return word def emoji_convert(df): df['posts'] = df['posts'].apply(lambda row: [swap_emoji(word)for word in row]) return df <categorify>
Titanic - Machine Learning from Disaster
14,332,228
train_clean = emoji_convert(train_clean.copy()) test_clean = emoji_convert(test_clean.copy() )<categorify>
Titanic - Machine Learning from Disaster
14,332,228
stop_words = stopwords.words('english') stop_words = stop_words + ['could', 'would'] stop_words = [w for w in stop_words if w not in ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', 'her', 'hers', 'herself', 'it', 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves']] def remove_words(df): df['posts'] = df['posts'].apply(lambda row: [word for word in row if word not in stop_words]) return df<drop_column>
Titanic - Machine Learning from Disaster
14,332,228
train_clean = remove_words(train_clean.copy()) test_clean = remove_words(test_clean.copy() )<count_values>
Titanic - Machine Learning from Disaster
14,332,228
E = train_clean['mind'][train_clean['mind'] == 1].count() / train_clean['mind'].count() print('E:I = {} : {}'.format(round(E, 2), 1-round(E, 2))) N = train_clean['energy'][train_clean['energy'] == 1].count() / train_clean['energy'].count() print('N:S = {} : {}'.format(round(N, 2), 1-round(N, 2))) T = train_clean['nature'][train_clean['nature'] == 1].count() / train_clean['nature'].count() print('T:F = {} : {}'.format(round(T, 2), 1-round(T, 2))) J = train_clean['tactics'][train_clean['tactics'] == 1].count() / train_clean['tactics'].count() print('J:P = {} : {}'.format(round(J, 2), 1-round(J, 2)) )<train_model>
train_data.loc[[128,699,715,75]]
Titanic - Machine Learning from Disaster
14,332,228
tt = TfidfVectorizer(preprocessor=list, tokenizer=list, ngram_range=(1,2), min_df=2, smooth_idf=False) def vectorise(train_set, test_set): tt.fit(train_set) train_vect = tt.transform(train_set) test_vect = tt.transform(test_set) return train_vect, test_vect <import_modules>
train_data = train_data.drop('PassengerId',axis=1) dt = train_data
Titanic - Machine Learning from Disaster
14,332,228
from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.metrics import log_loss, confusion_matrix, accuracy_score from sklearn.linear_model import LogisticRegression<prepare_x_and_y>
Titanic - Machine Learning from Disaster
14,332,228
X_train_vect, X_test_vect = vectorise(train_clean['posts'], test_clean['posts']) X_train_vect.shape<train_on_grid>
train_data = train_data.drop('Cabin',axis=1) test_data = test_data.drop('Cabin',axis=1 )
Titanic - Machine Learning from Disaster
14,332,228
cm = LogisticRegression(penalty='l1', solver='liblinear', random_state=11) params = {'C': [0.1, 0.5, 1, 5, 10, 50, 100]} grid = GridSearchCV(cm, param_grid=params, scoring='neg_log_loss', n_jobs=-1, cv=4) categories = ['mind', 'energy', 'nature', 'tactics'] for cat in categories: grid.fit(X_train_vect, train_clean[cat]) print(cat + ':') print('Best score: ', -grid.best_score_) print('Best paramaters: ', grid.best_params_) print(' ' )<split>
train, test = train_test_split(train_data, test_size=0.33, random_state=42) features_train = train.drop(['Survived'],axis=1) target_train = train['Survived'] features_test = test.drop(['Survived'],axis=1) target_test = test['Survived']
Titanic - Machine Learning from Disaster
14,332,228
X_train, X_test, y_train, y_test = train_test_split(train_clean['posts'], train_clean[['mind', 'energy', 'nature', 'tactics']], random_state=11) X_train_vect, X_test_vect = vectorise(X_train, X_test) X_train_vect.shape<train_model>
model = CatBoostClassifier(iterations=200, depth=9,learning_rate=0.05,l2_leaf_reg=10,random_seed=42, loss_function='Logloss',grow_policy='Lossguide', max_leaves=39, verbose=True,eval_metric='Accuracy',nan_mode='Min',cat_features=['Pclass','Sex','Q','S']) model.fit(features_train, target_train,text_features=['Ticket','Name'], plot=True,eval_set=(features_test,target_test),verbose=False) print(accuracy_score(model.predict(features_train),target_train)) print(accuracy_score(model.predict(features_test),target_test))
Titanic - Machine Learning from Disaster
14,332,228
def build_model(X_train_vect_f, X_test_vect_f, y_train_f, y_test_f, model): model.fit(X_train_vect_f, y_train_f) train_pred = model.predict(X_train_vect_f) test_pred = model.predict(X_test_vect_f) train_proba = model.predict_proba(X_train_vect_f) test_proba = model.predict_proba(X_test_vect_f) train_loss = log_loss(y_train_f, train_proba, eps=1e-15) test_loss = log_loss(y_test_f, test_proba, eps=1e-15) test_accuracy = accuracy_score(y_test_f, test_pred) test_matrix = confusion_matrix(y_test_f, test_pred) return train_pred, test_pred, train_loss, test_loss, test_accuracy, test_matrix<define_variables>
test_data = test_data.drop('PassengerId',axis=1) dt = test_data test_data
Titanic - Machine Learning from Disaster
14,332,228
ll_scores = [] ac_scores = []<train_model>
submission=pd.DataFrame(model.predict(test_data),columns=['Survived']) submission['PassengerId'] = hui submission.Survived = submission.Survived.astype(int) filename = 'Titanic Predictions1.csv' submission.to_csv(filename,index=False) print('Saved file: ' + filename )
Titanic - Machine Learning from Disaster
21,208,150
cm = LogisticRegression(penalty='l1', solver='liblinear', C=4, random_state=11) pred_train, pred_test, loss_train, loss_test, acc_test, matrix_test = build_model(X_train_vect, X_test_vect, y_train['mind'], y_test['mind'], cm) ll_scores.append(loss_test) ac_scores.append(acc_test) print('Mind:') print('Log loss on training set: ',loss_train) print('Log loss on test set: ', loss_test) print('Accuracy on test set:', acc_test) print('Test set confusion matrix: ', matrix_test )<train_model>
train = pd.read_csv("/kaggle/input/titanic/train.csv") test = pd.read_csv("/kaggle/input/titanic/test.csv") print(train.shape) train.head()
Titanic - Machine Learning from Disaster