kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
8,119,418
data2[data2.countries == "Russia"]<feature_engineering>
feature_importances = grid_search2.best_estimator_.feature_importances_ indices_of_top = indices_of_top_k(feature_importances, 25) X_train_prepared = X_train_prepared[:, indices_of_top] X_test_prepared = X_test_prepared[:, indices_of_top] knn_model = KNeighborsClassifier() params_grid = [ {'n_neighbors': [20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35], 'weights': ['uniform', 'distance'], 'algorithm': ['auto', 'ball_tree', 'kd_tree', 'brute'], 'metric': ['minkowski', 'euclidean', 'manhattan']} ] grid_search = GridSearchCV(knn_model, params_grid, cv=5, scoring="accuracy", n_jobs=1) grid_search.fit(X_train_prepared, y_train_prepared) grid_search.best_params_
Titanic - Machine Learning from Disaster
8,119,418
data2.confirmed = np.log10(data2.confirmed+1) data2.deaths = np.log10(data2.deaths+1) old_con = data2["confirmed"].iloc[:-1] old_con2 = data2["deaths"].iloc[:-1] data2 = data2.iloc[1:] data2["pred_conf"] = old_con.values data2["pred_deaths"] = old_con2.values data2 = data2.iloc[1:] data2["delta_conf1"] = old_con.values[1:] - old_con.values[:-1] data2["delta_deaths1"] = old_con2.values[1:] - old_con2.values[:-1] data2 = data2.iloc[1:] data2["delta_conf2"] = old_con.values[1:-1] - old_con.values[:-2] data2["delta_deaths2"] = old_con2.values[1:-1] - old_con2.values[:-2] data2 = data2.iloc[1:] data2["delta_conf3"] = old_con.values[1:-2] - old_con.values[:-3] data2["delta_deaths3"] = old_con2.values[1:-2] - old_con2.values[:-3] data2 = data2.iloc[1:] data2["delta_conf4"] = old_con.values[1:-3] - old_con.values[:-4] data2["delta_deaths4"] = old_con2.values[1:-3] - old_con2.values[:-4] data2.confirmed = data2.confirmed - data2["pred_conf"] data2.deaths = data2.deaths - data2["pred_deaths"] data2[data2.countries == "Russia"].iloc[0:70]<drop_column>
print("Training dataset accuracy: ", grid_search.best_score_, sep="") knn_model = grid_search.best_estimator_ knn_predictions = knn_model.predict(X_test_prepared) print("Test dataset accuracy: ", accuracy_score(y_test_prepared, knn_predictions), sep="" )
Titanic - Machine Learning from Disaster
8,119,418
days_x = 45 model_Confirmed, model_Death = {}, {} data3 = data2.drop(['Quarantine','world_share', 'migrants','median_age','land_area','fertility_rate','density','Date'], axis='columns') data3.population = data3.population/10**6 one_hot = pd.get_dummies(data3['Province_State']) data3 = data3.join(one_hot) data_Korea = data3[(data3.countries == 'Korea, South')&(data3.confirmed > 0)] data_Russia = data3[(data3.countries == 'Russia')&(data3.confirmed > 0)] new_data = data3[data2.days_mart >= days_x] old_data = data3[data2.days_mart < days_x] train_labels = old_data.confirmed train_death = old_data.deaths train_data = old_data.drop(['Province_State', 'ForecastId','confirmed','countries','deaths'], axis='columns') train_data<predict_on_test>
knn_final_model = KNeighborsClassifier(algorithm='auto', metric='manhattan', n_neighbors=25, weights='uniform') knn_final_model.fit(X_train_prepared, y_train_prepared )
Titanic - Machine Learning from Disaster
8,119,418
def pred_score(models, data, death = False, plot = 0): predictions_all = 0 plt.rcParams['figure.figsize'] = [20, len(models)*10] data = data[(data.confirmed > 0)] death_labels = 10**(data.deaths + data.pred_deaths)- 1 labels = 10**(data.confirmed + data.pred_conf)- 1 data = data.drop(['Province_State','ForecastId','confirmed','countries','deaths'], axis='columns') for i, model in enumerate(models): if model == "reg": predictions = models[model].predict(scaler.transform(data)) else: predictions = models[model].predict(data) if death: predictions = predictions + data.pred_deaths.values else: predictions = predictions + data.pred_conf.values predictions = 10**predictions - 1 predictions_all = predictions_all + predictions if death: print("Ошибка MALE по смертям", model, np.mean(np.abs(np.log10(( predictions+1)/(death_labels+1))))) else: print("Ошибка MALE по заражениям", model, np.mean(np.abs(np.log10(( predictions+1)/(labels+1))))) if plot: plt.subplot(len(models), 1, i+1) if plot==1: plt.plot(predictions, label = "Предсказанное значение") if death: plt.plot(death_labels.values, label = "Истинное значение") plt.gca().set(xlabel='Дни от случая первого заражения', ylabel='Смерти') else: plt.plot(labels.values, label = "Истинное значение") plt.gca().set(xlabel='Дни от случая первого заражения', ylabel='Заражения') if plot==2: plt.scatter(np.arange(0,len(predictions),1), predictions, s = 1, label = "Предсказанное значение") if death: plt.scatter(np.arange(0,len(predictions),1), death_labels.values, s = 1, label = "Истинное значение") plt.gca().set(xlabel='Дни от случая первого заражения', ylabel='Смерти') else: plt.scatter(np.arange(0,len(predictions),1), labels.values, s = 1, label = "Истинное значение") plt.gca().set(xlabel='Дни от случая первого заражения', ylabel='Заражения') plt.title(model) plt.grid(True) plt.legend() predictions_all = predictions_all/len(models) if death: print("Ошибка MALE по смертям средняя", np.mean(np.abs(np.log10(( predictions_all+1)/(death_labels+1))))) else: print("Ошибка MALE по заражениям средняя", np.mean(np.abs(np.log10(( predictions_all+1)/(labels+1)))) )<train_on_grid>
titanic_sub_prepared = full_pipeline.fit_transform(titanic_sub) titanic_sub_prepared = full_pipeline2.fit_transform(titanic_sub) titanic_sub_prepared = titanic_sub_prepared[:, indices_of_top] titanic_sub_predictions = knn_final_model.predict(titanic_sub_prepared) submission = pd.DataFrame({'PassengerId':titanic_sub['PassengerId'], 'Survived':titanic_sub_predictions}) submission.to_csv('submission.csv', sep=',', index=False )
Titanic - Machine Learning from Disaster
3,282,978
treeDepth = 30 mdl = tree.DecisionTreeRegressor(max_depth=treeDepth) param_grid = { 'n_estimators': [100], 'learning_rate': [0.0002], 'loss' : ["exponential"] } regrMdl = ensemble.AdaBoostRegressor(base_estimator=mdl) model_Confirmed["Adaboost"] = model_selection.RandomizedSearchCV(estimator = regrMdl, param_distributions = param_grid, n_iter = 100, cv = 3, verbose=0, random_state=42, n_jobs = -1 ).fit(train_data, train_labels) model_Death["Adaboost"] = model_selection.RandomizedSearchCV(estimator = regrMdl, param_distributions = param_grid, n_iter = 100, cv = 3, verbose=0, random_state=42, n_jobs = -1 ).fit(train_data, train_death )<train_model>
df = pd.read_csv('.. /input/train.csv') df.head(2 )
Titanic - Machine Learning from Disaster
3,282,978
model_Confirmed["RandomForest"] = ensemble.RandomForestRegressor(n_estimators=200, max_depth=30, random_state=42, n_jobs = -1 ).fit(train_data, train_labels) model_Death["RandomForest"] = ensemble.RandomForestRegressor(n_estimators=200, max_depth=30, random_state=42, n_jobs = -1 ).fit(train_data, train_death) <train_model>
df[df['Embarked'].isna() ]
Titanic - Machine Learning from Disaster
3,282,978
model_Confirmed["Xgboost"] = xgb.XGBRegressor(objective ='reg:squarederror',n_estimators=200, max_depth=20, random_state=42, n_jobs = -1 ).fit(train_data, train_labels) model_Death["Xgboost"] = xgb.XGBRegressor(objective ='reg:squarederror',n_estimators=200, max_depth=20, random_state=42, n_jobs = -1 ).fit(train_data, train_death )<compute_test_metric>
titles = df['Name'].str.extract('([A-za-z]+)\.', expand=False) titles_df = pd.DataFrame() titles_df['Title'] = titles titles_df['Survived'] = df['Survived']
Titanic - Machine Learning from Disaster
3,282,978
pred_score(model, data_pred, death_bool, plot_type )<define_variables>
titles_df['Title'].value_counts().head(6 )
Titanic - Machine Learning from Disaster
3,282,978
days_prog = 30 Matrix_confirmed,Matrix_Death ={},{} for model_name in model_Confirmed: new_data_list = new_data[new_data.days_mart == days_x] labels = new_data_list.countries predictions_confirmed = [] predictions_Death = [] new_data_list = new_data_list.drop(['Province_State', 'ForecastId','confirmed','countries','deaths'], axis='columns') for _ in range(days_prog): prediction_confirmed = model_Confirmed[model_name].predict(new_data_list) prediction_Death = model_Death[model_name].predict(new_data_list) prediction_confirmed = prediction_confirmed*(0.95**(new_data_list.days_after_Quarantine.values/10 - 1.4)) prediction_Death = prediction_Death*(0.95**(new_data_list.days_after_Quarantine.values/10 - 1.4)) prediction_confirmed = prediction_confirmed + new_data_list.pred_conf.values prediction_Death = prediction_Death + new_data_list.pred_deaths.values new_data_list["delta_conf2"] = new_data_list["delta_conf1"] new_data_list["delta_conf1"] = prediction_confirmed - new_data_list["pred_conf"] new_data_list["pred_conf"] = prediction_confirmed new_data_list["delta_deaths2"] = new_data_list["delta_deaths1"] new_data_list["delta_deaths1"] = prediction_Death - new_data_list["pred_deaths"] new_data_list["pred_deaths"] = prediction_Death prediction_Death = 10**prediction_Death - 1 prediction_confirmed = 10**prediction_confirmed - 1 new_data_list[["days_mart", "days"]] += 1 predictions_Death.append(np.round(prediction_Death)) predictions_confirmed.append(np.round(prediction_confirmed)) data_list = pd.date_range('2020-04-'+str(days_x-30), periods = days_prog, freq ='d') data_list = data_list.strftime('% Matrix_confirmed[model_name] = pd.DataFrame(predictions_confirmed,columns = labels,index = data_list) Matrix_Death[model_name] = pd.DataFrame(predictions_Death,columns = labels,index = data_list) Matrix_confirmed["Среднее"] =(Matrix_confirmed["Xgboost"] + Matrix_confirmed["RandomForest"] + Matrix_confirmed["Adaboost"])//3 Matrix_Death["Среднее"] =(Matrix_Death["Xgboost"] + Matrix_Death["RandomForest"] + Matrix_Death["Adaboost"])//3<feature_engineering>
age_eda = df.groupby(['Survived', 'Age'] ).size().rename('count' ).reset_index()
Titanic - Machine Learning from Disaster
3,282,978
ans = Matrix_confirmed["Adaboost"].stack().reset_index() ans_Death = Matrix_Death["Adaboost"].stack().reset_index() ans["prediction_deaths"] = ans_Death[0] ans["Province_State"] = new_data.groupby(["countries", "Province_State"] ).max().reset_index() ["Province_State"].tolist() *30 ans.columns = ['Date','Country_Region','ConfirmedCases', 'Fatalities',"Province_State"] ans['Date'] = '2020-'+ans['Date'] train2 = train[(train.Date >= "2020-04-02")] train2['Province_State'] = train2['Province_State'].fillna("zzz") train2.drop(['Id'], axis='columns',inplace=True) ans = pd.concat([train2, ans],ignore_index=True) ans['ConfirmedCases'] = ans['ConfirmedCases'] ans['Fatalities'] = ans['Fatalities'] ans = ans.sort_values(by=['Country_Region','Province_State','Date']) ans['ForecastId'] = np.arange(1,len(ans)+1) ans = ans[['ForecastId','ConfirmedCases','Fatalities']] ans.to_csv('submission.csv', index=False )<load_from_csv>
del age_eda age_eda = df.copy() age_eda['Age'].dropna(inplace=True) age_eda['Age'] = age_eda['Age'].astype(int) age_eda.loc[ age_eda['Age'] <= 11, 'Age'] = 0 age_eda.loc[(age_eda['Age'] > 11)&(age_eda['Age'] <= 18), 'Age'] = 1 age_eda.loc[(age_eda['Age'] > 18)&(age_eda['Age'] <= 22), 'Age'] = 2 age_eda.loc[(age_eda['Age'] > 22)&(age_eda['Age'] <= 27), 'Age'] = 3 age_eda.loc[(age_eda['Age'] > 27)&(age_eda['Age'] <= 32), 'Age'] = 4 age_eda.loc[(age_eda['Age'] > 32)&(age_eda['Age'] <= 40), 'Age'] = 5 age_eda.loc[(age_eda['Age'] > 40), 'Age'] = 6
Titanic - Machine Learning from Disaster
3,282,978
train_df = pd.read_csv(sorted(paths)[-1]) test_df = pd.read_csv(sorted(paths)[-2]) submission = pd.read_csv(sorted(paths)[-3]) sub_08_04 = pd.read_csv(sorted(paths)[-5] )<import_modules>
from sklearn import linear_model from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier from sklearn.naive_bayes import GaussianNB from xgboost import XGBClassifier from sklearn.model_selection import train_test_split from sklearn.model_selection import cross_val_score
Titanic - Machine Learning from Disaster
3,282,978
from statsmodels.tsa.statespace.sarimax import SARIMAX<define_variables>
train = pd.read_csv('.. /input/train.csv') test = pd.read_csv('.. /input/test.csv') train_test = train.append(test, sort=False) train_test.reset_index(inplace=True) train_test.drop(['index'], inplace=True, axis=1) train_test.head(2 )
Titanic - Machine Learning from Disaster
3,282,978
count_len = len(train_df[train_df['Country_Region'] == 'Russia']) train_cc = [] train_f = [] count = 0 for i in range(int(len(train_df)/ count_len)) : train_cc.append(train_df.ConfirmedCases[count:count+count_len].values.tolist()) train_f.append(train_df.Fatalities[count:count+count_len].values.tolist()) count += count_len<define_variables>
def add_titles(data): data['Title'] = data['Name'].str.extract('([A-za-z]+)\.', expand=False) title_mapping = {"Mr": 'Mr', "Miss": 'Miss', "Mrs": 'Mrs', "Master": 'Master', "Rev": 'Rev', "Dr": 'Other', "Col": 'Other', "Major": 'Other', "Mlle": 'Other', "Countess": 'Other', "Ms": 'Other', "Lady": 'Other', "Jonkheer": 'Other', "Don": 'Other', "Dona" : 'Other', "Mme": 'Other', "Capt": 'Other', "Sir": 'Other'} data['Title'] = data['Title'].map(title_mapping )
Titanic - Machine Learning from Disaster
3,282,978
delta =(datetime.today().date() - date(2020, 4, 2)).days<train_on_grid>
add_titles(train_test )
Titanic - Machine Learning from Disaster
3,282,978
test_count = len(test_df[test_df['Country_Region'] == 'Russia'])- delta - 1 predicted_cc = [] for i in range(len(train_cc)) : try: data1 = train_cc[i] model1 = SARIMAX(data1, order=(1,1,0), seasonal_order=(1,1,0,12), measurement_error=True) model1_fit = model1.fit(disp=False) predicted1 = model1_fit.predict(len(data1), len(data1)+test_count) predicted_cc.append(predicted1.tolist()) except: data1 = train_cc[i] model1 = SARIMAX(data1,order=(1,1,0),seasonal_order=(1,1,0,12),measurement_error=True,enforce_stationarity=False) model1_fit = model1.fit(disp=False) predicted1 = model1_fit.predict(len(data1), len(data1)+test_count) predicted_cc.append(predicted1.tolist() )<train_on_grid>
def clean_fare(data): data['Fare'] = data['Fare'].fillna(data['Fare'].dropna().median() )
Titanic - Machine Learning from Disaster
3,282,978
predicted_f = [] for i in range(len(train_f)) : try: data2 = train_f[i] model2 = SARIMAX(data2,order=(1,1,0), seasonal_order=(1,1,0,12), measurement_error=True) model2_fit = model2.fit(disp=False) predicted2 = model2_fit.predict(len(data2), len(data2)+test_count) predicted_f.append(predicted2.tolist()) except: data2 = train_f[i] model2 = SARIMAX(data2,order=(1,1,0),seasonal_order=(1,1,0,12),measurement_error=True,enforce_stationarity=False) model2_fit = model2.fit(disp=False) predicted2 = model2_fit.predict(len(data2), len(data2)+test_count) predicted_f.append(predicted2.tolist() )<train_model>
clean_fare(train_test )
Titanic - Machine Learning from Disaster
3,282,978
check_lenght = len(train_cc[0][-delta:])+ len(predicted_cc[0]) if check_lenght == 43: print('Check OK') else: print('Check failed' )<define_variables>
def clean_age(data): data["Age"] = data.groupby(['Sex','Pclass','Title'])['Age'].transform(lambda x: x.fillna(x.median()))
Titanic - Machine Learning from Disaster
3,282,978
new_countries_idxs = [121, 166, 183, 210, 221, 299, 310]<define_variables>
clean_age(train_test )
Titanic - Machine Learning from Disaster
3,282,978
days_from =(datetime.today().date() - date(2020, 4, 9)).days<filter>
train_test.loc[train_test['Age'].isnull() ]
Titanic - Machine Learning from Disaster
3,282,978
sub_08_04[sub_08_04['Country_Region'] == 'Russia'].iloc[14:14+days_from]<filter>
train_test.loc[979, 'Age'] = train_test['Age'].median()
Titanic - Machine Learning from Disaster
3,282,978
train_df[train_df['Country_Region'] == 'Russia'].iloc[78:78+days_from]<define_variables>
def bin_age(dataset): dataset['Age'] = dataset['Age'].astype(int) dataset.loc[ dataset['Age'] <= 11, 'Age'] = 0 dataset.loc[(dataset['Age'] > 11)&(dataset['Age'] <= 18), 'Age'] = 1 dataset.loc[(dataset['Age'] > 18)&(dataset['Age'] <= 22), 'Age'] = 2 dataset.loc[(dataset['Age'] > 22)&(dataset['Age'] <= 27), 'Age'] = 3 dataset.loc[(dataset['Age'] > 27)&(dataset['Age'] <= 32), 'Age'] = 4 dataset.loc[(dataset['Age'] > 32)&(dataset['Age'] <= 40), 'Age'] = 5 dataset.loc[(dataset['Age'] > 40)&(dataset['Age'] <= 66), 'Age'] = 6 dataset.loc[ dataset['Age'] > 66, 'Age'] = 6
Titanic - Machine Learning from Disaster
3,282,978
sub_count_len = len(sub_08_04[sub_08_04['Country_Region'] == 'Russia']) sub_0804_cc_preds = [] sub_0804_f_preds = [] sub_count = 0 for i in range(int(len(sub_08_04)/ sub_count_len)) : sub_0804_cc_preds.append(sub_08_04.ConfirmedCases[sub_count+14:sub_count+14+days_from].values.tolist()) sub_0804_f_preds.append(sub_08_04.Fatalities[sub_count+14:sub_count+14+days_from].values.tolist()) sub_count += sub_count_len<define_variables>
def clean_embarked(data): data['Embarked'] = data['Embarked'].fillna('S' )
Titanic - Machine Learning from Disaster
3,282,978
train_count_len = len(train_df[train_df['Country_Region'] == 'Russia']) train_cc_act = [] train_f_act = [] train_count = 0 for i in range(int(len(train_df)/ train_count_len)) : train_cc_act.append(train_df.ConfirmedCases[train_count+78:train_count+78+days_from].values.tolist()) train_f_act.append(train_df.Fatalities[train_count+78:train_count+78+days_from].values.tolist()) train_count += train_count_len<concatenate>
def clean_cabin(data): data['Cabin'].fillna('U', inplace=True) data['Cabin'] = data['Cabin'].map(lambda x: x[0] )
Titanic - Machine Learning from Disaster
3,282,978
train_0804_cc_act = [] train_0804_f_act = [] for i in range(len(train_cc_act)) : if i not in new_countries_idxs: train_0804_cc_act.append(train_cc_act[i]) train_0804_f_act.append(train_f_act[i] )<categorify>
def clean_family(data): data['FamilySize'] = data['SibSp'] + data['Parch'] + 1 data.drop(['SibSp', 'Parch'], axis = 1, inplace = True )
Titanic - Machine Learning from Disaster
3,282,978
div_act_sub_cc = [] div_act_sub_f = [] for i in range(len(train_0804_cc_act)) : div_act_sub_cc.append([train_0804_cc_act[i][0] / sub_0804_cc_preds[i][0], train_0804_cc_act[i][1] / sub_0804_cc_preds[i][1]]) div_act_sub_f.append([(train_0804_f_act[i][0]+1)/(sub_0804_f_preds[i][0]+1), (train_0804_f_act[i][1]+1)/(sub_0804_f_preds[i][1]+1)] )<feature_engineering>
clean_family(train_test )
Titanic - Machine Learning from Disaster
3,282,978
cc_becs = [] f_becs = [] for i in range(len(div_act_sub_cc)) : cc_becs.append(np.mean(div_act_sub_cc[i])) f_becs.append(np.mean(div_act_sub_f[i]))<prepare_output>
def encode(data, labels): for label in labels: data = data.join(pd.get_dummies(data[label], prefix = label)) data.drop(label, axis=1, inplace=True) return data
Titanic - Machine Learning from Disaster
3,282,978
for idx in new_countries_idxs: cc_becs.insert(idx, 1) f_becs.insert(idx, 1 )<sort_values>
store = train_test.copy()
Titanic - Machine Learning from Disaster
3,282,978
predicted_ConfirmedCases = [] predicted_Fatalities = [] for i in range(int(len(train_df)/ count_len)) : predicted_ConfirmedCases.append(train_cc[i][-delta:]) predicted_ConfirmedCases.append(predicted_cc[i]) predicted_Fatalities.append(train_f[i][-delta:]) predicted_Fatalities.append(predicted_f[i]) predicted_ConfirmedCases = list(itertools.chain.from_iterable(predicted_ConfirmedCases)) predicted_Fatalities = list(itertools.chain.from_iterable(predicted_Fatalities))<categorify>
train_test = encode(train_test, ['Pclass', 'Sex', 'Embarked', 'Title', 'Cabin']) train_test.head(1 ).T
Titanic - Machine Learning from Disaster
3,282,978
mod_predicted_cc = [] mod_predicted_f = [] part = 1 for i in range(len(predicted_cc)) : mod_predicted_cc.append([x*(cc_becs[i]*part)for x in predicted_cc[i]]) mod_predicted_f.append([x*(f_becs[i]*part)for x in predicted_f[i]] )<define_variables>
train = train_test.loc[:890, :] test = train_test.loc[891:, :]
Titanic - Machine Learning from Disaster
3,282,978
mean_ConfirmedCases_preds = [] mean_Fatalities_preds = [] for i in range(int(len(predicted_ConfirmedCases))): mean_ConfirmedCases_preds.append(predicted_ConfirmedCases[i] * 0.15 + m_predicted_ConfirmedCases[i] * 0.85) mean_Fatalities_preds.append(predicted_Fatalities[i] * 0.15 + m_predicted_Fatalities[i] * 0.85 )<prepare_output>
def model(classifier, train, test): target = train['Survived'].values features = train.drop(['PassengerId', 'Survived', 'Name', 'Ticket'], axis = 1 ).values scores = cross_val_score(classifier, features, target, cv=5) print(f'Scores for 5 fold CV: {round(np.mean(scores*100)) }') classifier_ = classifier.fit(features, target) test = test.drop(['PassengerId', 'Survived', 'Name', 'Ticket'], axis = 1) predictions = classifier_.predict(test ).astype(int) return predictions
Titanic - Machine Learning from Disaster
3,282,978
submission['ConfirmedCases'] = predicted_ConfirmedCases submission['Fatalities'] = predicted_Fatalities<create_dataframe>
def submit(predictions): submission = pd.read_csv('.. /input/gender_submission.csv') submission['Survived'] = predictions submission.to_csv('submission.csv', index=False) return submission
Titanic - Machine Learning from Disaster
3,282,978
submission1 = pd.DataFrame(data=submission.ForecastId, columns=['ForecastId']) submission1['ConfirmedCases'] = m_predicted_ConfirmedCases submission1['Fatalities'] = m_predicted_Fatalities<create_dataframe>
classifier = linear_model.LogisticRegression(solver='liblinear') predictions = model(classifier, train, test )
Titanic - Machine Learning from Disaster
3,282,978
submission2 = pd.DataFrame(data=submission.ForecastId, columns=['ForecastId']) submission2['ConfirmedCases'] = mean_ConfirmedCases_preds submission2['Fatalities'] = mean_Fatalities_preds<load_from_csv>
classifier = SVC(gamma='auto', kernel='linear') predictions = model(classifier, train, test )
Titanic - Machine Learning from Disaster
3,282,978
concl_df = pd.read_csv(sorted(paths)[-2]) concl_feats = ['ForecastId', 'Country_Region', 'Date'] conclusion = pd.concat([concl_df[concl_feats], submission[['ConfirmedCases', 'Fatalities']]], axis=1) concl1_df = pd.read_csv(sorted(paths)[-2]) conclusion1 = pd.concat([concl1_df[concl_feats], submission1[['ConfirmedCases', 'Fatalities']]], axis=1) concl2_df = pd.read_csv(sorted(paths)[-2]) conclusion2 = pd.concat([concl2_df[concl_feats], submission2[['ConfirmedCases', 'Fatalities']]], axis=1 )<filter>
classifier = DecisionTreeClassifier(random_state = 1, max_depth = 3) predictions = model(classifier, train, test )
Titanic - Machine Learning from Disaster
3,282,978
region = 'Russia' conclusion2[conclusion2['Country_Region'] == region]<feature_engineering>
classifier = GradientBoostingClassifier() predictions = model(classifier, train, test )
Titanic - Machine Learning from Disaster
3,282,978
original_cc = conclusion[conclusion['Country_Region'] == region]['ConfirmedCases'].values corrected_cc = conclusion1[conclusion1['Country_Region'] == region]['ConfirmedCases'].values mean_cc = conclusion2[conclusion2['Country_Region'] == region]['ConfirmedCases'].values original_f = conclusion[conclusion['Country_Region'] == region]['Fatalities'].values corrected_f = conclusion1[conclusion1['Country_Region'] == region]['Fatalities'].values mean_f = conclusion2[conclusion2['Country_Region'] == region]['Fatalities'].values date = conclusion[conclusion['Country_Region'] == region]['Date'].values for i in range(len(date)) : date[i] = date[i][5:] fig,(ax1, ax2)= plt.subplots(2,1, figsize=(14,14), dpi=120) ax1.plot(date, original_cc, ':g', label='original SARIMAX') ax1.plot(date, corrected_cc, ':b', label='corrected SARIMAX') ax1.plot(date, mean_cc, 'r', label='weighted SARIMAX') ax2.plot(date, original_f, ':g', label='original SARIMAX') ax2.plot(date, corrected_f, ':b', label='corrected SARIMAX') ax2.plot(date, mean_f, 'r', label='weighted SARIMAX') ax1.set_title(f'ConfirmedCases differences {region}') ax1.set_xlabel('Date') ax1.set_xticklabels(date, rotation=40) ax1.set_ylabel('ConfirmedCases') ax1.legend(loc='best') ax1.grid() ax2.set_title(f'Fatalities differences {region}') ax2.set_xlabel('Date') ax2.set_xticklabels(date, rotation=40) ax2.set_ylabel('Fatalities') ax2.legend(loc='best') ax2.grid() plt.show() ;<save_to_csv>
classifier = KNeighborsClassifier(3) predictions = model(classifier, train, test )
Titanic - Machine Learning from Disaster
3,282,978
submission2.to_csv('submission.csv', index=False )<import_modules>
classifier = GaussianNB() predictions = model(classifier, train, test )
Titanic - Machine Learning from Disaster
3,282,978
import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import nltk from sklearn.preprocessing import LabelBinarizer,LabelEncoder,StandardScaler,MinMaxScaler from sklearn.linear_model import LogisticRegression,SGDClassifier,LinearRegression from sklearn.naive_bayes import MultinomialNB from sklearn.svm import SVC from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import classification_report,confusion_matrix,accuracy_score from sklearn.model_selection import train_test_split import keras from keras.wrappers.scikit_learn import KerasRegressor from sklearn.model_selection import cross_val_score from sklearn.model_selection import KFold from keras.models import Sequential from keras.layers import Dense,LSTM import tensorflow as tf<load_from_csv>
classifier = AdaBoostClassifier() predictions = model(classifier, train, test )
Titanic - Machine Learning from Disaster
3,282,978
train_df = pd.read_csv(".. /input/covid19-global-forecasting-week-4/train.csv") test_df = pd.read_csv(".. /input/covid19-global-forecasting-week-4/test.csv") submission = pd.read_csv(".. /input/covid19-global-forecasting-week-4/submission.csv" )<count_missing_values>
classifier = RandomForestClassifier(n_estimators=100, max_depth = 7) predictions = model(classifier, train, test )
Titanic - Machine Learning from Disaster
3,282,978
train_df.isna().sum()<count_missing_values>
xg_test = test.drop(['PassengerId', 'Survived', 'Name', 'Ticket'], axis = 1 ).as_matrix() xg_train = train.drop(['PassengerId', 'Survived', 'Name', 'Ticket'], axis = 1 ).as_matrix() target = train['Survived'].values classifier = XGBClassifier() classifier_ = classifier.fit(xg_train, target) classifier_.score(xg_train, target )
Titanic - Machine Learning from Disaster
3,282,978
test_df.isna().sum()<drop_column>
predictions = classifier_.predict(xg_test ).astype(int )
Titanic - Machine Learning from Disaster
3,282,978
train_df['Country_Region'] = train_df['Country_Region'] + ' ' + train_df['Province_State'] test_df['Country_Region'] = test_df['Country_Region'] + ' ' + test_df['Province_State'] del train_df['Province_State'] del test_df['Province_State']<feature_engineering>
Titanic - Machine Learning from Disaster
3,282,978
def split_date(date): date = date.split('-') date[0] = int(date[0]) if(date[1][0] == '0'): date[1] = int(date[1][1]) else: date[1] = int(date[1]) if(date[2][0] == '0'): date[2] = int(date[2][1]) else: date[2] = int(date[2]) return date train_df.Date = train_df.Date.apply(split_date) test_df.Date = test_df.Date.apply(split_date )<feature_engineering>
submission = submit(predictions) submission['Survived'].value_counts()
Titanic - Machine Learning from Disaster
902,619
year = [] month = [] day = [] for i in train_df.Date: year.append(i[0]) month.append(i[1]) day.append(i[2] )<feature_engineering>
train = pd.read_csv('.. /input/train.csv') test = pd.read_csv('.. /input/test.csv') data = pd.concat([train, test]) train.shape
Titanic - Machine Learning from Disaster
902,619
train_df['Year'] = year train_df['Month'] = month train_df['Day'] = day del train_df['Date']<feature_engineering>
sex_label = LabelEncoder() cabin_label = LabelEncoder() embarked_label = LabelEncoder() family_name_label = LabelEncoder() title_label = LabelEncoder() title_remap_label = LabelEncoder() data['Sex_Code'] = sex_label.fit_transform(data.Sex) data['Cabin_Prefix'] = data.Cabin.str.get(0 ).fillna('Z') data['Cabin_Code'] = cabin_label.fit_transform(data.Cabin.str.get(0 ).fillna('Z')) data['Has_Cabin'] =(data.Cabin.str.get(0 ).fillna('Z')!= 'Z' ).astype('int32') data['Embarked_fillZ'] = data.Embarked.fillna('Z') data['Embarked_Code'] = embarked_label.fit_transform(data.Embarked.fillna('S')) data['FamilySize'] = data.Parch + data.SibSp + 1 data['BigFamily'] = data.FamilySize.apply(lambda s: s if s < 5 else 5) data['IsAlone'] = data.FamilySize == 1 data['FamilyName'] = data.Name.str.extract('(\w+),', expand=False) data['FamilyName_Code'] = family_name_label.fit_transform(data.FamilyName) data['Title'] = data.Name.str.extract('([A-Za-z]+)\.', expand=False) mapping = { 'Mlle': 'Miss', 'Ms': 'Miss', 'Dona': 'Mrs', 'Mme': 'Miss', 'Lady': 'Mrs', 'Capt': 'Honorable', 'Countess': 'Honorable', 'Major': 'Honorable', 'Col': 'Honorable', 'Sir': 'Honorable', 'Don': 'Honorable', 'Jonkheer': 'Honorable', 'Rev': 'Honorable', 'Dr': 'Honorable' } data['Title_Remap'] = data.Title.replace(mapping) data['Title_Code'] = title_label.fit_transform(data.Title) data['Title_Remap_Code'] = title_remap_label.fit_transform(data.Title_Remap) data.Age = data.Age.fillna(data.Age.median()) data.Fare = data.Fare.fillna(data.Fare.median() )
Titanic - Machine Learning from Disaster
902,619
year = [] month = [] day = [] for i in test_df.Date: year.append(i[0]) month.append(i[1]) day.append(i[2] )<feature_engineering>
m = data[['FamilyName', 'Survived']].groupby('FamilyName' ).max() c = data[['FamilyName', 'PassengerId']].groupby('FamilyName' ).count() m = m.rename(columns={'Survived': 'FamilySurvived'}) c = c.rename(columns={'PassengerId': 'FamilyMemberCount'}) m = m.where(m.join(c ).FamilyMemberCount > 1, other=-1, axis=1 ).fillna(-1 ).join(c) m.FamilySurvived = m.FamilySurvived.astype('int32') joined_data = data.join(m, on='FamilyName' )
Titanic - Machine Learning from Disaster
902,619
test_df['Year'] = year test_df['Month'] = month test_df['Day'] = day del test_df['Date'] del train_df['Id'] del test_df['ForecastId']<drop_column>
optimal_d = 20 Z = linkage(ticket.reshape(data.shape[0], 1), 'single') clusters = fcluster(Z, optimal_d, criterion='distance') joined_data['Ticket_Code'] = clusters
Titanic - Machine Learning from Disaster
902,619
del train_df['Year'] del test_df['Year']<data_type_conversions>
joined_data[['FamilyName', 'Name', 'Age', 'Fare', 'BigFamily', 'Pclass', 'Has_Cabin', 'Embarked', 'Sex', 'Title', 'Ticket_Code', 'Ticket_Code_Remap', 'Survived']][joined_data.Ticket_Code==89].sort_values(by='FamilyName' )
Titanic - Machine Learning from Disaster
902,619
train_df['ConfirmedCases'] = train_df['ConfirmedCases'].apply(int) train_df['Fatalities'] = train_df['Fatalities'].apply(int )<drop_column>
joined_data[['FamilyName', 'Name', 'Age', 'Fare', 'BigFamily', 'Pclass', 'Has_Cabin', 'Embarked', 'Sex', 'Title', 'Ticket_Code', 'Ticket_Code_Remap', 'Survived']][joined_data.Ticket_Code==186].sort_values(by='FamilyName' )
Titanic - Machine Learning from Disaster
902,619
cases = train_df.ConfirmedCases fatalities = train_df.Fatalities del train_df['ConfirmedCases'] del train_df['Fatalities']<categorify>
joined_data[['FamilyName', 'Name', 'Age', 'Fare', 'BigFamily', 'Pclass', 'Has_Cabin', 'Embarked', 'Sex', 'Title', 'Ticket_Code', 'Ticket_Code_Remap', 'Survived']][joined_data.Ticket_Code==127].sort_values(by='FamilyName' )
Titanic - Machine Learning from Disaster
902,619
lb = LabelEncoder() train_df['Country_Region'] = lb.fit_transform(train_df['Country_Region']) test_df['Country_Region'] = lb.transform(test_df['Country_Region'] )<normalization>
selected_features = ['Age', 'Fare', 'BigFamily', 'Pclass', 'Has_Cabin', 'Embarked_Code', 'Sex_Code', 'Title_Remap_Code', 'Ticket_Code_Remap', 'FamilySurvived', ] one_hot_features = ['Pclass', 'BigFamily', 'FamilySurvived', 'Embarked_Code', 'Title_Remap_Code', 'Ticket_Code_Remap', ] selected_data = joined_data[selected_features] print('Does the following feature contain any NaN? ') for f in selected_features: print('%s: %s' %(f, repr(selected_data[f].isna().any())) )
Titanic - Machine Learning from Disaster
902,619
scaler = MinMaxScaler() x_train = scaler.fit_transform(train_df.values) x_test = scaler.transform(test_df.values )<import_modules>
selected_data_one_hot = pd.get_dummies(selected_data, columns = one_hot_features) rescaling_features = ['Age', 'Fare'] std_scaler = StandardScaler() for f in rescaling_features: selected_data_one_hot[f] = std_scaler.fit_transform(selected_data_one_hot[f].values.reshape(-1, 1)) train_x = selected_data[:train.shape[0]] test_x = selected_data[train.shape[0]:] train_x_one_hot = selected_data_one_hot[:train.shape[0]] test_x_one_hot = selected_data_one_hot[train.shape[0]:] train_y = data[:train.shape[0]].Survived
Titanic - Machine Learning from Disaster
902,619
from xgboost import XGBRegressor<train_model>
parameters = {'n_estimators': [10,50,100,200], 'learning_rate': [0.05, 0.1], 'max_depth': [2,3,4], 'min_samples_leaf': [2,3], 'verbose': [0]} grid_obj = GridSearchCV(GradientBoostingClassifier() , parameters, scoring = 'roc_auc', cv = 4, n_jobs = 4, verbose = 1) grid_obj = grid_obj.fit(train_x, train_y) gb = grid_obj.best_estimator_ gb
Titanic - Machine Learning from Disaster
902,619
rf = XGBRegressor(n_estimators = 2500 , random_state = 0 , max_depth = 27) rf.fit(x_train,cases )<predict_on_test>
model = gb.fit(train_x, train_y) pred_y = gb.predict(train_x) f1 = f1_score(train_y, pred_y) acc = accuracy_score(train_y, pred_y )
Titanic - Machine Learning from Disaster
902,619
cases_pred = rf.predict(x_test) cases_pred<feature_engineering>
test_y = pd.Series(gb.predict(test_x), name="Survived", dtype='int32') results = pd.concat([data[train.shape[0]:].PassengerId, test_y], axis=1 )
Titanic - Machine Learning from Disaster
902,619
cases_pred = np.around(cases_pred,decimals = 0) cases_pred<concatenate>
results.to_csv("gbdt_csv_to_submit.csv",index=False )
Titanic - Machine Learning from Disaster
902,619
x_train_cas = [] for i in range(len(x_train)) : x = list(x_train[i]) x.append(cases[i]) x_train_cas.append(x) x_train_cas[0]<prepare_x_and_y>
training_config = { 'gbdt': { 'clf': GradientBoostingClassifier() , 'parameters': { 'n_estimators': [10,50,100,200], 'learning_rate': [0.05, 0.1], 'max_depth': [2,3,4], 'min_samples_leaf': [2,3], }, 'n_jobs': 4, 'one_hot': False }, 'logit' : { 'clf': LogisticRegression() , 'parameters': { 'penalty': ['l1', 'l2'], 'C': list(np.arange(0.5, 8.0, 0.1)) } }, 'svm': { 'clf': LinearSVC() , 'parameters': { 'penalty': ['l2'], 'loss': ['hinge', 'squared_hinge'], 'C': list(np.arange(0.5, 8.0, 0.1)) } }, 'rf': { 'clf': RandomForestClassifier() , 'parameters': { 'n_estimators': [10,50,100,200], 'criterion': ['gini', 'entropy'], 'max_depth': [2,3,4], 'min_samples_leaf': [2,3], }, 'n_jobs': 4, 'one_hot': False }, 'ada': { 'clf': AdaBoostClassifier() , 'parameters': { 'n_estimators': [10,50,100,200], 'learning_rate': [0.05, 0.1, 0.5, 1.0, 2.0], }, 'n_jobs': 4, 'one_hot': False } } exp_to_run = training_config.keys()
Titanic - Machine Learning from Disaster
902,619
x_train_cas = np.array(x_train_cas )<train_model>
results = { 'name': [], 'f1': [], 'accuracy': [] } train_pred = {} test_pred = {} for name in exp_to_run: conf = training_config[name] clf = conf['clf'] parameters = conf['parameters'] n_jobs = conf.get('n_jobs', 1) one_hot = conf.get('one_hot', True) print('=' * 20) print('Starting training:', name) grid_obj = GridSearchCV(clf, parameters, scoring = 'roc_auc', cv = 4, n_jobs = n_jobs, verbose = 1) train_X = train_x_one_hot if one_hot else train_x print('Number of Features:', train_X.columns.shape[0]) grid_obj = grid_obj.fit(train_X, train_y) best_clf = grid_obj.best_estimator_ print('Best classifier:', repr(best_clf)) model = best_clf.fit(train_X, train_y) pred_y = best_clf.predict(train_X) train_pred[name] = pred_y f1 = f1_score(train_y, pred_y) acc = accuracy_score(train_y, pred_y) results['name'].append(name) results['f1'].append(f1) results['accuracy'].append(acc) test_X = test_x_one_hot if one_hot else test_x test_y = pd.Series(best_clf.predict(test_X), name="Survived", dtype='int32') test_pred[name] = test_y output = pd.concat([test.PassengerId, test_y], axis=1) output_filename = name + "_csv_to_submit.csv" print('Writing submission file:', output_filename) output.to_csv(output_filename, index=False)
Titanic - Machine Learning from Disaster
902,619
rf = XGBRegressor(n_estimators = 2500 , random_state = 0 , max_depth = 27) rf.fit(x_train_cas,fatalities )<concatenate>
pred_y = pd.DataFrame.from_dict(train_pred ).mean(axis=1)> 0.5 f1 = f1_score(train_y, pred_y) acc = accuracy_score(train_y, pred_y) results['name'].append('voting') results['f1'].append(f1) results['accuracy'].append(acc) test_y = pd.Series(pd.DataFrame.from_dict(test_pred ).mean(axis=1)> 0.5, name="Survived", dtype='int32') output = pd.concat([test.PassengerId, test_y], axis=1) output_filename = 'voting_csv_to_submit.csv' print('Writing submission file:', output_filename) output.to_csv(output_filename, index=False )
Titanic - Machine Learning from Disaster
902,619
<predict_on_test><EOS>
pd.DataFrame.from_dict(results )
Titanic - Machine Learning from Disaster
8,612,326
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<feature_engineering>
sns.set()
Titanic - Machine Learning from Disaster
8,612,326
fatalities_pred = np.around(fatalities_pred,decimals = 0) fatalities_pred<prepare_output>
test = pd.read_csv(".. /input/titanic/test.csv") train = pd.read_csv(".. /input/titanic/train.csv") data_cleaner = [train, test]
Titanic - Machine Learning from Disaster
8,612,326
submission['ConfirmedCases'] = cases_pred submission['Fatalities'] = fatalities_pred<save_to_csv>
for data in data_cleaner: print(data.isnull().sum()) print(' ' )
Titanic - Machine Learning from Disaster
8,612,326
submission.to_csv("submission.csv" , index = False )<load_from_csv>
age_ref = pd.DataFrame(data=[train.groupby('Pclass')['Age'].mean() ],columns=train['Pclass'].unique()) age_ref
Titanic - Machine Learning from Disaster
8,612,326
train_df = pd.read_csv(".. /input/covid19-global-forecasting-week-4/train.csv") test_df = pd.read_csv(".. /input/covid19-global-forecasting-week-4/test.csv") submission = pd.read_csv(".. /input/covid19-global-forecasting-week-4/submission.csv" )<drop_column>
def fill_age(pclass,age): if pd.isnull(age): return float(age_ref[pclass]) else: return age for data in data_cleaner: data['Age'] = train.apply(lambda x: fill_age(x['Pclass'],x['Age']), axis=1 )
Titanic - Machine Learning from Disaster
8,612,326
train_df['Province_State'].fillna("",inplace = True) test_df['Province_State'].fillna("",inplace = True) train_df['Country_Region'] = train_df['Country_Region'] + ' ' + train_df['Province_State'] test_df['Country_Region'] = test_df['Country_Region'] + ' ' + test_df['Province_State'] del train_df['Province_State'] del test_df['Province_State']<feature_engineering>
def fill_fare(fare): if pd.isnull(fare): return train['Fare'].mean() else: return fare def fill_embark(embark): if pd.isnull(embark): return train['Embarked'].mode().iloc[0] else: return embark for data in data_cleaner: data['Fare'] = train.apply(lambda x: fill_fare(x['Fare']), axis=1) data['Embarked'] = train.apply(lambda x: fill_embark(x['Embarked']), axis=1 )
Titanic - Machine Learning from Disaster
8,612,326
def split_date(date): date = date.split('-') date[0] = int(date[0]) if(date[1][0] == '0'): date[1] = int(date[1][1]) else: date[1] = int(date[1]) if(date[2][0] == '0'): date[2] = int(date[2][1]) else: date[2] = int(date[2]) return date train_df.Date = train_df.Date.apply(split_date) test_df.Date = test_df.Date.apply(split_date )<feature_engineering>
for data in data_cleaner: data.drop(['Cabin'],axis=1,inplace=True )
Titanic - Machine Learning from Disaster
8,612,326
year = [] month = [] day = [] for i in train_df.Date: year.append(i[0]) month.append(i[1]) day.append(i[2] )<feature_engineering>
for data in data_cleaner: print(data.isnull().sum()) print(' ' )
Titanic - Machine Learning from Disaster
8,612,326
train_df['Year'] = year train_df['Month'] = month train_df['Day'] = day del train_df['Date']<feature_engineering>
title_list = list() for data in data_cleaner: for title in data['Name']: title = title.split('.')[0].split(',')[1] title_list.append(title) data['Title'] = title_list title_list = list()
Titanic - Machine Learning from Disaster
8,612,326
year = [] month = [] day = [] for i in test_df.Date: year.append(i[0]) month.append(i[1]) day.append(i[2] )<feature_engineering>
for data in data_cleaner: print(data['Title'].value_counts()) print(' ' )
Titanic - Machine Learning from Disaster
8,612,326
test_df['Year'] = year test_df['Month'] = month test_df['Day'] = day del test_df['Date'] del train_df['Id'] del test_df['ForecastId']<train_model>
train['Title'] = train['Title'].replace([ ' Don', ' Rev', ' Dr', ' Mme',' Ms', ' Major', ' Lady', ' Sir', ' Mlle', ' Col', ' Capt', ' the Countess', ' Jonkheer'], 'Others') train['Title'].value_counts()
Titanic - Machine Learning from Disaster
8,612,326
rf = XGBRegressor(n_estimators = 1600 , random_state = 0 , max_depth = 15) rf.fit(x_train,cases) cases_pred = rf.predict(x_test) cases_pred = np.around(cases_pred,decimals = 0) x_train_cas = [] for i in range(len(x_train)) : x = list(x_train[i]) x.append(cases[i]) x_train_cas.append(x) x_train_cas[0] x_train_cas = np.array(x_train_cas) rf = XGBRegressor(n_estimators = 1600 , random_state = 0 , max_depth = 15) rf.fit(x_train_cas,fatalities) x_test_cas = [] for i in range(len(x_test)) : x = list(x_test[i]) x.append(cases_pred[i]) x_test_cas.append(x) x_test_cas[0] x_test_cas = np.array(x_test_cas) fatalities_pred = rf.predict(x_test_cas) fatalities_pred = np.around(fatalities_pred,decimals = 0) submission['ConfirmedCases'] = cases_pred submission['Fatalities'] = fatalities_pred submission.to_csv("submission.csv" , index = False )<set_options>
test['Title'] = test['Title'].replace([ ' Don', ' Rev', ' Dr', ' Mme',' Ms', ' Major', ' Lady', ' Sir', ' Mlle', ' Col', ' Capt', ' the Countess', ' Jonkheer',' Dona'], 'Others') test['Title'].value_counts()
Titanic - Machine Learning from Disaster
8,612,326
plt.style.use('fivethirtyeight') le = preprocessing.LabelEncoder() warnings.filterwarnings('ignore' )<import_modules>
def get_size(df): if df['SibSp'] + df['Parch'] + 1 == 1: return 'Single' if df['SibSp'] + df['Parch'] + 1 > 1: return 'Small' if df['SibSp'] + df['Parch'] + 1 > 4: return 'Big' for data in data_cleaner: data['FamilySize'] = data.apply(get_size,axis=1) for data in data_cleaner: data['IsAlone'] = 1 data['IsAlone'].loc[data['FamilySize'] != 'Single'] = 0
Titanic - Machine Learning from Disaster
8,612,326
import plotly.io as pio import plotly.express as px import plotly.graph_objects as go <load_from_csv>
sex = pd.get_dummies(train['Sex'],drop_first=True) embark = pd.get_dummies(train['Embarked'],drop_first=True) title = pd.get_dummies(train['Title'],drop_first=True) Pclass = pd.get_dummies(train['Pclass'],drop_first=True) FamilySize = pd.get_dummies(train['FamilySize'],drop_first=True) sex2 = pd.get_dummies(test['Sex'],drop_first=True) embark2 = pd.get_dummies(test['Embarked'],drop_first=True) title2 = pd.get_dummies(test['Title'],drop_first=True) Pclass2 = pd.get_dummies(test['Pclass'],drop_first=True) FamilySize2 = pd.get_dummies(test['FamilySize'],drop_first=True) for data in data_cleaner: data.drop(['Sex','Embarked','Name','Ticket','PassengerId','Title','FamilySize'],axis=1,inplace=True) train = pd.concat([sex,embark,train,title,FamilySize],axis=1) test = pd.concat([sex2,embark2,test,title2,FamilySize2],axis=1 )
Titanic - Machine Learning from Disaster
8,612,326
train_df = pd.read_csv(".. /input/covid19-global-forecasting-week-4/train.csv") test_df = pd.read_csv(".. /input/covid19-global-forecasting-week-4/test.csv") submission = pd.read_csv(".. /input/covid19-global-forecasting-week-4/submission.csv" )<count_missing_values>
X = train.drop('Survived',axis=1) y = train['Survived'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=101 )
Titanic - Machine Learning from Disaster
8,612,326
display(train_df.isnull().sum() /len(train_df)*100) display(test_df.isnull().sum() /len(test_df)*100 )<rename_columns>
scaler = MinMaxScaler() scaler.fit(X_train) scaler.transform(X_train) scaler.transform(X_test) scaler.transform(test )
Titanic - Machine Learning from Disaster
8,612,326
train_df.rename(columns={'Province_State':'State','Country_Region':'Country'}, inplace=True) test_df.rename(columns={'Province_State':'State','Country_Region':'Country'}, inplace=True )<compute_test_metric>
logistic_model = LogisticRegression() logistic_model.fit(X_train, y_train) y_pred = logistic_model.predict(X_test )
Titanic - Machine Learning from Disaster
8,612,326
def missings(state, country): return country if pd.isna(state)== True else state<feature_engineering>
print(classification_report(y_test,y_pred)) print(' ') print(confusion_matrix(y_test,y_pred))
Titanic - Machine Learning from Disaster
8,612,326
<count_unique_values><EOS>
predictions = logistic_model.predict(test) pred_list = [int(x)for x in predictions] test2 = pd.read_csv(".. /input/titanic/test.csv") output = pd.DataFrame({'PassengerId': test2['PassengerId'], 'Survived': pred_list}) output.to_csv('Titanic_with_logistic.csv', index=False )
Titanic - Machine Learning from Disaster
11,019,551
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<feature_engineering>
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import StandardScaler from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KNeighborsClassifier
Titanic - Machine Learning from Disaster
11,019,551
df_plot = train_df.loc[: , ['Date', 'Country', 'ConfirmedCases', 'Fatalities']].groupby(['Date', 'Country'] ).max().reset_index() df_plot.loc[:, 'Size'] = np.power(df_plot["ConfirmedCases"]+1,0.3)-1 fig = px.scatter_geo(df_plot, locations="Country", locationmode = "country names", hover_name="Country", color="ConfirmedCases", animation_frame="Date", size='Size', projection="natural earth", title="Global Spread of Covid-19", width=1500, height=800) fig.layout.updatemenus[0].buttons[0].args[1]["frame"]["duration"] = 5 fig.show() <merge>
train = pd.read_csv('.. /input/titanic/train.csv') test = pd.read_csv('.. /input/titanic/test.csv' )
Titanic - Machine Learning from Disaster
11,019,551
data_leak = pd.merge(train_df,test_df, how='inner', on='Date')['Date'].unique().tolist() data_leak.append('2020-04-01') data_leak.sort() print("Both data sets contain the following dates: {}".format(data_leak))<feature_engineering>
train_len = len(train) test_copy = test.copy()
Titanic - Machine Learning from Disaster
11,019,551
def create_features(df): df['Day_num'] = le.fit_transform(df['Date']) df['Date'] = pd.to_datetime(df['Date']) df['Day'] = df['Date'].dt.day df['Week'] = df['Date'].dt.week df['Month'] = df['Date'].dt.month df['DayOfWeek'] = df['Date'].dt.dayofweek df['Country'] = le.fit_transform(df['Country']) country_dict = dict(zip(le.inverse_transform(df['Country']), df['Country'])) df['State'] = le.fit_transform(df['State']) state_dict = dict(zip(le.inverse_transform(df['State']), df['State'])) return df, country_dict, state_dict<feature_engineering>
total = train.append(test) total.isnull().sum()
Titanic - Machine Learning from Disaster
11,019,551
df_all, country_dict, state_dict = create_features(df_all )<prepare_x_and_y>
total[total.Fare.isnull() ]
Titanic - Machine Learning from Disaster
11,019,551
def train_test_split_extend(df,d,day,filter_col_confirmed,filter_col_fatalities): df=df.loc[df['Day_num'] >= day] df_train = df.loc[df['Day_num'] < d] X_train = df_train Y_train_1 = df_train['ConfirmedCases'] Y_train_2 = df_train['Fatalities'] X_train_1 = X_train.drop(columns=filter_col_fatalities ).drop(columns='ConfirmedCases') X_train_2 = X_train.drop(columns=filter_col_confirmed ).drop(columns='Fatalities') df_test = df.loc[df['Day_num'] == d] x_test = df_test x_test_1 = x_test.drop(columns=filter_col_fatalities ).drop(columns='ConfirmedCases') x_test_2 = x_test.drop(columns=filter_col_confirmed ).drop(columns='Fatalities') x_test.drop(['ConfirmedCases', 'Fatalities'], axis=1, inplace=True) X_train_1.drop('Id', inplace=True, errors='ignore', axis=1) X_train_1.drop('ForecastId', inplace=True, errors='ignore', axis=1) X_train_2.drop('Id', inplace=True, errors='ignore', axis=1) X_train_2.drop('ForecastId', inplace=True, errors='ignore', axis=1) x_test_1.drop('Id', inplace=True, errors='ignore', axis=1) x_test_1.drop('ForecastId', inplace=True, errors='ignore', axis=1) x_test_2.drop('Id', inplace=True, errors='ignore', axis=1) x_test_2.drop('ForecastId', inplace=True, errors='ignore', axis=1) return X_train_1, X_train_2, Y_train_1, Y_train_2, x_test_1, x_test_2<train_model>
total['Fare'].fillna(value = total[total.Pclass==3]['Fare'].median() , inplace = True )
Titanic - Machine Learning from Disaster
11,019,551
def lin_reg(X_train, Y_train, x_test): regr = linear_model.LinearRegression() regr.fit(X_train, Y_train) pred = regr.predict(x_test) return regr, pred<drop_column>
titles = list(total.Title.unique()) for title in titles: age = total.groupby('Title')['Age'].median().loc[title] total.loc[(total.Age.isnull())&(total.Title == title),'Age'] = age
Titanic - Machine Learning from Disaster
11,019,551
def country_calculation(df_all,country,date,day): df_country = df_all.copy() df_country = df_country.loc[df_country['Date'] >= date] df_country = df_country.loc[df_country['Country'] == country_dict[country]] features = ['Id', 'State', 'Country','ConfirmedCases', 'Fatalities', 'Day_num'] df_country = df_country[features] df_country = lag_feature(df_country, 'ConfirmedCases',range(1, 40)) df_country = lag_feature(df_country, 'Fatalities', range(1,20)) filter_col_confirmed = [col for col in df_country if col.startswith('Confirmed')] filter_col_fatalities= [col for col in df_country if col.startswith('Fataliti')] filter_col = np.append(filter_col_confirmed, filter_col_fatalities) df_country[filter_col] = df_country[filter_col].apply(lambda x: np.log1p(x)) df_country.replace([np.inf, -np.inf], 0, inplace=True) df_country.fillna(0, inplace=True) start = df_country[df_country['Id']==-999].Day_num.min() end = df_country[df_country['Id']==-999].Day_num.max() for d in range(start,end+1): X_train_1, X_train_2, Y_train_1, Y_train_2, x_test_1, x_test_2 = train_test_split_extend(df_country,d,day,filter_col_confirmed,filter_col_fatalities) regr_1, pred_1 = lin_reg(X_train_1, Y_train_1, x_test_1) df_country.loc[(df_country['Day_num'] == d)&(df_country['Country'] == country_dict[country]), 'ConfirmedCases'] = pred_1[0] regr_2, pred_2 = lin_reg(X_train_2, Y_train_2, x_test_2) df_country.loc[(df_country['Day_num'] == d)&(df_country['Country'] == country_dict[country]), 'Fatalities'] = pred_2[0] df_country = lag_feature(df_country, 'ConfirmedCases',range(1, 40)) df_country = lag_feature(df_country, 'Fatalities', range(1,20)) df_country.replace([np.inf, -np.inf], 0, inplace=True) df_country.fillna(0, inplace=True) print("Calculation done.") return df_country<drop_column>
total['Family_Size'] = total['Parch'] + total['SibSp']
Titanic - Machine Learning from Disaster
11,019,551
def country_state_calculation(df_all,country, state, date,day): df_country = df_all.copy() df_country = df_country.loc[df_country['Date'] >= date] df_country = df_country.loc[df_country['Country'] == country_dict[country] &(df_country['State']==state_dict[state])] features = ['Id', 'State', 'Country','ConfirmedCases', 'Fatalities', 'Day_num'] df_country = df_country[features] df_country = lag_feature(df_country, 'ConfirmedCases',range(1, 40)) df_country = lag_feature(df_country, 'Fatalities', range(1,20)) filter_col_confirmed = [col for col in df_country if col.startswith('Confirmed')] filter_col_fatalities= [col for col in df_country if col.startswith('Fataliti')] filter_col = np.append(filter_col_confirmed, filter_col_fatalities) df_country[filter_col] = df_country[filter_col].apply(lambda x: np.log1p(x)) df_country.replace([np.inf, -np.inf], 0, inplace=True) df_country.fillna(0, inplace=True) start = df_country[df_country['Id']==-999].Day_num.min() end = df_country[df_country['Id']==-999].Day_num.max() for d in range(start,end+1): X_train_1, X_train_2, Y_train_1, Y_train_2, x_test_1, x_test_2 = train_test_split_extend(df_country,d,day,filter_col_confirmed,filter_col_fatalities) regr_1, pred_1 = lin_reg(X_train_1, Y_train_1, x_test_1) df_country.loc[(df_country['Day_num'] == d)&(df_country['Country'] == country_dict[country])&(df_country['State'] == state_dict[state]), 'ConfirmedCases'] = pred_1[0] regr_2, pred_2 = lin_reg(X_train_2, Y_train_2, x_test_2) df_country.loc[(df_country['Day_num'] == d)&(df_country['Country'] == country_dict[country]), 'Fatalities'] = pred_2[0] df_country = lag_feature(df_country, 'ConfirmedCases',range(1, 10)) df_country = lag_feature(df_country, 'Fatalities', range(1,8)) df_country.replace([np.inf, -np.inf], 0, inplace=True) df_country.fillna(0, inplace=True) print("Calculation done.") return df_country<data_type_conversions>
total['Last_Name'] = total['Name'].apply(lambda x: str.split(x, ",")[0]) total['Fare'].fillna(total['Fare'].mean() , inplace=True) default_survival_rate = 0.5 total['Family_Survival'] = default_survival_rate for grp, grp_df in total[['Survived','Name', 'Last_Name', 'Fare', 'Ticket', 'PassengerId', 'SibSp', 'Parch', 'Age', 'Cabin']].groupby(['Last_Name', 'Fare']): if(len(grp_df)!= 1): for ind, row in grp_df.iterrows() : smax = grp_df.drop(ind)['Survived'].max() smin = grp_df.drop(ind)['Survived'].min() passID = row['PassengerId'] if(smax == 1.0): total.loc[total['PassengerId'] == passID, 'Family_Survival'] = 1 elif(smin==0.0): total.loc[total['PassengerId'] == passID, 'Family_Survival'] = 0 print("Number of passengers with family survival information:", total.loc[total['Family_Survival']!=0.5].shape[0] )
Titanic - Machine Learning from Disaster
11,019,551
<install_modules>
for _, grp_df in total.groupby('Ticket'): if(len(grp_df)!= 1): for ind, row in grp_df.iterrows() : if(row['Family_Survival'] == 0)|(row['Family_Survival']== 0.5): smax = grp_df.drop(ind)['Survived'].max() smin = grp_df.drop(ind)['Survived'].min() passID = row['PassengerId'] if(smax == 1.0): total.loc[total['PassengerId'] == passID, 'Family_Survival'] = 1 elif(smin==0.0): total.loc[total['PassengerId'] == passID, 'Family_Survival'] = 0 print("Number of passenger with family/group survival information: " +str(total[total['Family_Survival']!=0.5].shape[0]))
Titanic - Machine Learning from Disaster
11,019,551
!pip install pyramid.arima <load_from_csv>
total.Sex.replace({'male':0, 'female':1}, inplace = True) features = ['Survived','Pclass','Sex','Family_Size','Family_Survival','Fare_Bin','Age_Bin'] total = total[features]
Titanic - Machine Learning from Disaster
11,019,551
def RMSLE(pred,actual): return np.sqrt(np.mean(np.power(( np.log(pred+1)-np.log(actual+1)) ,2))) pd.set_option('mode.chained_assignment', None) test = pd.read_csv(".. /input/covid19-global-forecasting-week-4/test.csv") train = pd.read_csv(".. /input/covid19-global-forecasting-week-4/train.csv") train['Province_State'].fillna('', inplace=True) test['Province_State'].fillna('', inplace=True) train['Date'] = pd.to_datetime(train['Date']) test['Date'] = pd.to_datetime(test['Date']) train = train.sort_values(['Country_Region','Province_State','Date']) test = test.sort_values(['Country_Region','Province_State','Date'] )<feature_engineering>
train = total[:train_len] x_train = train.drop(columns = ['Survived']) y_train = train['Survived'].astype(int) x_test = total[train_len:].drop(columns = ['Survived'] )
Titanic - Machine Learning from Disaster
11,019,551
feature_day = [1,5,10,15,20,30,40,50,75,100,150,200,300,400,500,750,1000,1250,1500,1750,2000,2250,2500,2750,3000,3250,3500,3750,4000] def CreateInput(data): feature = [] for day in feature_day: data.loc[:,'Number day from ' + str(day)+ ' case'] = 0 if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0): fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max() else: fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min() for i in range(0, len(data)) : if(data['Date'].iloc[i] > fromday): day_denta = data['Date'].iloc[i] - fromday data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days feature = feature + ['Number day from ' + str(day)+ ' case'] return data[feature]<prepare_x_and_y>
scaler = StandardScaler() scaler.fit(x_train) x_train = scaler.transform(x_train) x_test = scaler.transform(x_test )
Titanic - Machine Learning from Disaster
11,019,551
pred_data_all = pd.DataFrame() for country in train['Country_Region'].unique() : for province in train[(train['Country_Region'] == country)]['Province_State'].unique() : print(country + ' and ' + province) df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)] df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] X_train = CreateInput(df_train) y_train_confirmed = df_train['ConfirmedCases'].ravel() y_train_fatalities = df_train['Fatalities'].ravel() X_pred = CreateInput(df_test) for day in sorted(feature_day,reverse = True): feature_use = 'Number day from ' + str(day)+ ' case' idx = X_train[X_train[feature_use] == 0].shape[0] if(X_train[X_train[feature_use] > 0].shape[0] >= 20): break adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1) adjusted_y_train_confirmed = y_train_confirmed[idx:] adjusted_y_train_fatalities = y_train_fatalities[idx:] idx = X_pred[X_pred[feature_use] == 0].shape[0] adjusted_X_pred = X_pred[idx:][feature_use].values.reshape(-1, 1) pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] max_train_date = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].max() min_test_date = pred_data['Date'].min() if len(adjusted_y_train_confirmed)< 1: adjusted_y_train_confirmed = np.zeros(3) else: if len(adjusted_y_train_confirmed)< 2: adjusted_y_train_confirmed = np.append(adjusted_y_train_confirmed,adjusted_y_train_confirmed[len(adjusted_y_train_confirmed)-1],adjusted_y_train_confirmed[len(adjusted_y_train_confirmed)-1]) else: if len(adjusted_y_train_confirmed)< 3: adjusted_y_train_confirmed = np.append(adjusted_y_train_confirmed,adjusted_y_train_confirmed[len(adjusted_y_train_confirmed)-1]) else: pass model = SARIMAX(adjusted_y_train_confirmed, order=(1,1,0), measurement_error=True ).fit(disp=False) y_hat_confirmed = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_confirmed = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['ConfirmedCases'].values y_hat_confirmed = np.concatenate(( y_train_confirmed,y_hat_confirmed), axis = 0) if len(adjusted_y_train_fatalities)< 1: adjusted_y_train_fatalities = np.zeros(3) else: if len(adjusted_y_train_fatalities)< 2: adjusted_y_train_fatalities = np.append(adjusted_y_train_fatalities,adjusted_y_train_fatalities[len(adjusted_y_train_fatalities)-1],adjusted_y_train_fatalities[len(adjusted_y_train_fatalities)-1]) else: if len(adjusted_y_train_fatalities)< 3: adjusted_y_train_fatalities = np.append(adjusted_y_train_fatalities,adjusted_y_train_fatalities[len(adjusted_y_train_fatalities)-1]) else: pass model = SARIMAX(adjusted_y_train_fatalities, order=(1,1,0), measurement_error=True ).fit(disp=False) y_hat_fatalities = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_fatalities = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['Fatalities'].values y_hat_fatalities = np.concatenate(( y_train_fatalities,y_hat_fatalities), axis = 0) pred_data['ConfirmedCases_hat'] = y_hat_confirmed pred_data['Fatalities_hat'] = y_hat_fatalities pred_data_all = pred_data_all.append(pred_data )<save_to_csv>
clf = KNeighborsClassifier() params = {'n_neighbors':[6,8,10,12,14,16,18,20], 'leaf_size':list(range(1,50,5)) } gs = GridSearchCV(clf, param_grid= params, cv = 5,scoring = "roc_auc",verbose=1) gs.fit(x_train, y_train) print(gs.best_score_) print(gs.best_estimator_) print(gs.best_params_ )
Titanic - Machine Learning from Disaster
11,019,551
<prepare_x_and_y><EOS>
preds = gs.predict(x_test) pd.DataFrame({'PassengerId': test_copy['PassengerId'], 'Survived': preds} ).to_csv('submission.csv', index = False)
Titanic - Machine Learning from Disaster
3,782,114
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<feature_engineering>
sns.set() %matplotlib inline
Titanic - Machine Learning from Disaster
3,782,114
def crosscheck_sarima(country): crosscheck = train_df[(train_df['Country'] == country)&(train_df['Date'] >= '2020-04-02')].reset_index() arima = pred_data_all[(pred_data_all['Country_Region'] == country)].reset_index() arima['ConfirmedCases_In'] = arima['ConfirmedCases_hat'] arima['Fatalities_In'] = arima['Fatalities_hat'] arima['CC_Crosscheck'] = crosscheck['ConfirmedCases'] arima['Fat_Crosscheck'] = crosscheck['Fatalities'] arima['Day_num'] = arima['Date'] return arima<feature_engineering>
train_df = pd.read_csv('.. /input/train.csv') test_df = pd.read_csv('.. /input/test.csv' )
Titanic - Machine Learning from Disaster
3,782,114
def crosscheck_sarima_cs(country,state): crosscheck = train_df[(train_df['Country'] == country)&(train_df['State'] == state)&(train_df['Date'] >= '2020-04-02')].reset_index() arima = pred_data_all[(pred_data_all['Country_Region'] == country)&(pred_data_all['Province_State'] == state)].reset_index() arima['ConfirmedCases_In'] = arima['ConfirmedCases_hat'] arima['Fatalities_In'] = arima['Fatalities_hat'] arima['CC_Crosscheck'] = crosscheck['ConfirmedCases'] arima['Fat_Crosscheck'] = crosscheck['Fatalities'] arima['Day_num'] = arima['Date'] return arima<compute_test_metric>
train_df.isnull().sum()
Titanic - Machine Learning from Disaster
3,782,114
def rmsle(y, y_pred): assert len(y)== len(y_pred) terms_to_sum = [(math.log(y_pred[i] + 1)- math.log(y[i] + 1)) ** 2.0 for i,pred in enumerate(y_pred)] return(sum(terms_to_sum)*(1.0/len(y)))** 0.5 def fix_target(frame, key, target, new_target_name="target"): corrections = 0 group_keys = frame[ key].values.tolist() target = frame[target].values.tolist() for i in range(1, len(group_keys)- 1): previous_group = group_keys[i - 1] current_group = group_keys[i] previous_value = target[i - 1] current_value = target[i] if current_group == previous_group: if current_value<previous_value: current_value=previous_value target[i] =current_value target[i] =max(0,target[i]) frame[new_target_name] = np.array(target) def rate(frame, key, target, new_target_name="rate"): corrections = 0 group_keys = frame[ key].values.tolist() target = frame[target].values.tolist() rate=[1.0 for k in range(len(target)) ] for i in range(1, len(group_keys)- 1): previous_group = group_keys[i - 1] current_group = group_keys[i] previous_value = target[i - 1] current_value = target[i] if current_group == previous_group: if previous_value!=0.0: rate[i]=current_value/previous_value rate[i] =max(1,rate[i]) frame[new_target_name] = np.array(rate) def get_data_by_key(dataframe, key, key_value, fields=None): mini_frame=dataframe[dataframe[key]==key_value] if not fields is None: mini_frame=mini_frame[fields].values return mini_frame directory="/kaggle/input/covid19-global-forecasting-week-4/" model_directory="/kaggle/input/modelv3-dir/model" geo_dir=None extra_stable_columns=None group_by_columns=None if not group_by_columns is None and "continent" in group_by_columns: assert not geo_dir is None train=pd.read_csv(directory + "train.csv", parse_dates=["Date"] , engine="python") test=pd.read_csv(directory + "test.csv", parse_dates=["Date"], engine="python") train["key"]=train[["Province_State","Country_Region"]].apply(lambda row: str(row[0])+ "_" + str(row[1]),axis=1) test["key"]=test[["Province_State","Country_Region"]].apply(lambda row: str(row[0])+ "_" + str(row[1]),axis=1) if not geo_dir is None: region_metadata=pd.read_csv(geo_dir+"region_metadata.csv") train=pd.merge(train,region_metadata, how="left", left_on=["Province_State","Country_Region"], right_on=["Province_State","Country_Region"]) test=pd.merge(test,region_metadata, how="left", left_on=["Province_State","Country_Region"], right_on=["Province_State","Country_Region"]) train.to_csv(directory + "train_plus_geo.csv", index=False) group_names=None if not group_by_columns is None and len(group_by_columns)>0: group_names=[] for group in group_by_columns: groupss=train[["Date", group,target1,target2]] grp=groupss.groupby(["Date", group], as_index=False ).sum() grp.columns=["Date", group,group +"_" + target1,group +"_" + target2 ] train=pd.merge(train,grp, how="left", left_on=["Date", group,group], right_on=["Date", group,group]) for gr in [group +"_" + target1,group +"_" + target2]: rate(train, key, gr, new_target_name="rate_" +gr) group_names+=["rate_" +gr] train.to_csv(directory + "train_plus_groups.csv", index=False) max_train_date=train["Date"].max() max_test_date=test["Date"].max() horizon=(max_test_date-max_train_date ).days print("horizon", int(horizon)) print("max_train_date",(max_train_date)) print("max_test_date",(max_test_date)) target1="ConfirmedCases" target2="Fatalities" key="key" <categorify>
test_df.isnull().sum()
Titanic - Machine Learning from Disaster
3,782,114
fix_target(train, key, target1, new_target_name=target1) fix_target(train, key, target2, new_target_name=target2) rate(train, key, target1, new_target_name="rate_" +target1) rate(train, key, target2, new_target_name="rate_" +target2) unique_keys=train[key].unique() print(len(unique_keys)) train<categorify>
train_df['Survived'].value_counts()
Titanic - Machine Learning from Disaster
3,782,114
def get_lags(rate_array, current_index, size=20): lag_confirmed_rate=[-1 for k in range(size)] for j in range(0, size): if current_index-j>=0: lag_confirmed_rate[j]=rate_array[current_index-j] else : break return lag_confirmed_rate def days_ago_thresold_hit(full_array, indx, thresold): days_ago_confirmed_count_10=-1 if full_array[indx]>thresold: for j in range(indx,-1,-1): entered=False if full_array[j]<=thresold: days_ago_confirmed_count_10=abs(j-indx) entered=True break if entered==False: days_ago_confirmed_count_10=100 return days_ago_confirmed_count_10 def ewma_vectorized(data, alpha): sums=sum([(alpha**(k+1)) *data[k] for k in range(len(data)) ]) counts=sum([(alpha**(k+1)) for k in range(len(data)) ]) return sums/counts def generate_ma_std_window(rate_array, current_index, size=20, window=3): ma_rate_confirmed=[-1 for k in range(size)] std_rate_confirmed=[-1 for k in range(size)] for j in range(0, size): if current_index-j>=0: ma_rate_confirmed[j]=np.mean(rate_array[max(0,current_index-j-window+1):current_index-j+1]) std_rate_confirmed[j]=np.std(rate_array[max(0,current_index-j-window+1):current_index-j+1]) else : break return ma_rate_confirmed, std_rate_confirmed def generate_ewma_window(rate_array, current_index, size=20, window=3, alpha=0.05): ewma_rate_confirmed=[-1 for k in range(size)] for j in range(0, size): if current_index-j>=0: ewma_rate_confirmed[j]=ewma_vectorized(rate_array[max(0,current_index-j-window+1):current_index-j+1, ], alpha) else : break return ewma_rate_confirmed def get_target(rate_col, indx, horizon=33, average=3, use_hard_rule=False): target_values=[-1 for k in range(horizon)] cou=0 for j in range(indx+1, indx+1+horizon): if j<len(rate_col): if average==1: target_values[cou]=rate_col[j] else : if use_hard_rule and j +average <=len(rate_col): target_values[cou]=np.mean(rate_col[j:j +average]) else : target_values[cou]=np.mean(rate_col[j:min(len(rate_col),j +average)]) cou+=1 else : break return target_values def dereive_features(frame, confirmed, fatalities, rate_confirmed, rate_fatalities, horizon ,size=20, windows=[3,7], days_back_confimed=[1,10,100], days_back_fatalities=[1,2,10], extra_data=None, groups_data=None, windows_group=[3,7], size_group=20, days_back_confimed_group=[1,10,100]): targets=[] if not extra_data is None: assert len(extra_stable_columns)==extra_data.shape[1] if not groups_data is None: assert len(group_names)==groups_data.shape[1] names=["lag_confirmed_rate" + str(k+1)for k in range(size)] for day in days_back_confimed: names+=["days_ago_confirmed_count_" + str(day)] for window in windows: names+=["ma" + str(window)+ "_rate_confirmed" + str(k+1)for k in range(size)] names+=["std" + str(window)+ "_rate_confirmed" + str(k+1)for k in range(size)] names+=["ewma" + str(window)+ "_rate_confirmed" + str(k+1)for k in range(size)] names+=["lag_fatalities_rate" + str(k+1)for k in range(size)] for day in days_back_fatalities: names+=["days_ago_fatalitiescount_" + str(day)] for window in windows: names+=["ma" + str(window)+ "_rate_fatalities" + str(k+1)for k in range(size)] names+=["std" + str(window)+ "_rate_fatalities" + str(k+1)for k in range(size)] names+=["ewma" + str(window)+ "_rate_fatalities" + str(k+1)for k in range(size)] names+=["confirmed_level"] names+=["fatalities_level"] if not extra_data is None: names+=[k for k in extra_stable_columns] if not groups_data is None: for gg in range(groups_data.shape[1]): names+=["lag_rate_group_"+ str(gg+1)+ "_" + str(k+1)for k in range(size_group)] for day in days_back_confimed_group: names+=["days_ago_grooupcount_" + str(gg+1)+ "_" + str(day)] for window in windows_group: names+=["ma_group_" + str(gg+1)+ "_" + str(window)+ "_rate_" + str(k+1)for k in range(size_group)] names+=["std_group_" + str(gg+1)+ "_" + str(window)+ "_rate_" + str(k+1)for k in range(size_group)] names+=["confirmed_plus" + str(k+1)for k in range(horizon)] names+=["fatalities_plus" + str(k+1)for k in range(horizon)] features=[] for i in range(len(confirmed)) : row_features=[] lag_confirmed_rate=get_lags(rate_confirmed, i, size=size) row_features+=lag_confirmed_rate for day in days_back_confimed: days_ago_confirmed_count_10=days_ago_thresold_hit(confirmed, i, day) row_features+=[days_ago_confirmed_count_10] for window in windows: ma3_rate_confirmed,std3_rate_confirmed= generate_ma_std_window(rate_confirmed, i, size=size, window=window) row_features+= ma3_rate_confirmed row_features+= std3_rate_confirmed ewma3_rate_confirmed=generate_ewma_window(rate_confirmed, i, size=size, window=window, alpha=0.05) row_features+= ewma3_rate_confirmed lag_fatalities_rate=get_lags(rate_fatalities, i, size=size) row_features+=lag_fatalities_rate for day in days_back_fatalities: days_ago_fatalitiescount_2=days_ago_thresold_hit(fatalities, i, day) row_features+=[days_ago_fatalitiescount_2] for window in windows: ma3_rate_fatalities,std3_rate_fatalities= generate_ma_std_window(rate_fatalities, i, size=size, window=window) row_features+= ma3_rate_fatalities row_features+= std3_rate_fatalities ewma3_rate_fatalities=generate_ewma_window(rate_fatalities, i, size=size, window=window, alpha=0.05) row_features+= ewma3_rate_fatalities confirmed_level=0 confirmed_level= confirmed[i] row_features+=[confirmed_level] fatalities_is_level=0 fatalities_is_level= fatalities[i] row_features+=[fatalities_is_level] if not extra_data is None: row_features+=extra_data[i].tolist() if not groups_data is None: for gg in range(groups_data.shape[1]): this_group=groups_data[:,gg].tolist() lag_group_rate=get_lags(this_group, i, size=size_group) row_features+=lag_group_rate for day in days_back_confimed_group: days_ago_groupcount_2=days_ago_thresold_hit(this_group, i, day) row_features+=[days_ago_groupcount_2] for window in windows_group: ma3_rate_group,std3_rate_group= generate_ma_std_window(this_group, i, size=size_group, window=window) row_features+= ma3_rate_group row_features+= std3_rate_group confirmed_plus=get_target(rate_confirmed, i, horizon=horizon) row_features+= confirmed_plus fatalities_plus=get_target(rate_fatalities, i, horizon=horizon) row_features+= fatalities_plus features.append(row_features) new_frame=pd.DataFrame(data=features, columns=names ).reset_index(drop=True) frame=frame.reset_index(drop=True) frame=pd.concat([frame, new_frame], axis=1) return frame def feature_engineering_for_single_key(frame, group, key, horizon=33, size=20, windows=[3,7], days_back_confimed=[1,10,100], days_back_fatalities=[1,2,10], extra_stable_=None, group_nams=None,windows_group=[3,7], size_group=20, days_back_confimed_group=[1,10,100]): mini_frame=get_data_by_key(frame, group, key, fields=None) mini_frame_with_features=dereive_features(mini_frame, mini_frame["ConfirmedCases"].values, mini_frame["Fatalities"].values, mini_frame["rate_ConfirmedCases"].values, mini_frame["rate_Fatalities"].values, horizon ,size=size, windows=windows, days_back_confimed=days_back_confimed, days_back_fatalities=days_back_fatalities, extra_data=mini_frame[extra_stable_].values if not extra_stable_ is None else None, groups_data=mini_frame[group_nams].values if not group_nams is None else None, windows_group=windows_group, size_group=size_group, days_back_confimed_group=days_back_confimed_group) return mini_frame_with_features<predict_on_test>
train_df[["Sex", "Survived"]].groupby(['Sex'], as_index=False ).mean().sort_values(by='Survived', ascending=False )
Titanic - Machine Learning from Disaster