kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
10,357,342
def prepare_features(df, gap): df["perc_1_ac"] =(df[f"lag_{gap}_cc"] - df[f"lag_{gap}_ft"] - df[f"lag_{gap}_rc"])/ df[f"lag_{gap}_cc"] df["perc_1_cc"] = df[f"lag_{gap}_cc"] / df.population df["diff_1_cc"] = df[f"lag_{gap}_cc"] - df[f"lag_{gap + 1}_cc"] df["diff_2_cc"] = df[f"lag_{gap + 1}_cc"] - df[f"lag_{gap + 2}_cc"] df["diff_3_cc"] = df[f"lag_{gap + 2}_cc"] - df[f"lag_{gap + 3}_cc"] df["diff_1_ft"] = df[f"lag_{gap}_ft"] - df[f"lag_{gap + 1}_ft"] df["diff_2_ft"] = df[f"lag_{gap + 1}_ft"] - df[f"lag_{gap + 2}_ft"] df["diff_3_ft"] = df[f"lag_{gap + 2}_ft"] - df[f"lag_{gap + 3}_ft"] df["diff_123_cc"] =(df[f"lag_{gap}_cc"] - df[f"lag_{gap + 3}_cc"])/ 3 df["diff_123_ft"] =(df[f"lag_{gap}_ft"] - df[f"lag_{gap + 3}_ft"])/ 3 df["diff_change_1_cc"] = df.diff_1_cc / df.diff_2_cc df["diff_change_2_cc"] = df.diff_2_cc / df.diff_3_cc df["diff_change_1_ft"] = df.diff_1_ft / df.diff_2_ft df["diff_change_2_ft"] = df.diff_2_ft / df.diff_3_ft df["diff_change_12_cc"] =(df.diff_change_1_cc + df.diff_change_2_cc)/ 2 df["diff_change_12_ft"] =(df.diff_change_1_ft + df.diff_change_2_ft)/ 2 df["change_1_cc"] = df[f"lag_{gap}_cc"] / df[f"lag_{gap + 1}_cc"] df["change_2_cc"] = df[f"lag_{gap + 1}_cc"] / df[f"lag_{gap + 2}_cc"] df["change_3_cc"] = df[f"lag_{gap + 2}_cc"] / df[f"lag_{gap + 3}_cc"] df["change_1_ft"] = df[f"lag_{gap}_ft"] / df[f"lag_{gap + 1}_ft"] df["change_2_ft"] = df[f"lag_{gap + 1}_ft"] / df[f"lag_{gap + 2}_ft"] df["change_3_ft"] = df[f"lag_{gap + 2}_ft"] / df[f"lag_{gap + 3}_ft"] df["change_123_cc"] = df[f"lag_{gap}_cc"] / df[f"lag_{gap + 3}_cc"] df["change_123_ft"] = df[f"lag_{gap}_ft"] / df[f"lag_{gap + 3}_ft"] for case in DAYS_SINCE_CASES: df[f"days_since_{case}_case"] =(df[f"case_{case}_date"] - df.Date ).astype("timedelta64[D]") df.loc[df[f"days_since_{case}_case"] < gap, f"days_since_{case}_case"] = np.nan df["country_flag"] = df.Province_State.isna().astype(int) df["density"] = df.population / df.area df["target_cc"] = np.log1p(df.ConfirmedCases)- np.log1p(df[f"lag_{gap}_cc"]) df["target_ft"] = np.log1p(df.Fatalities)- np.log1p(df[f"lag_{gap}_ft"]) features = [ f"lag_{gap}_cc", f"lag_{gap}_ft", f"lag_{gap}_rc", "perc_1_ac", "perc_1_cc", "diff_1_cc", "diff_2_cc", "diff_3_cc", "diff_1_ft", "diff_2_ft", "diff_3_ft", "diff_123_cc", "diff_123_ft", "diff_change_1_cc", "diff_change_2_cc", "diff_change_1_ft", "diff_change_2_ft", "diff_change_12_cc", "diff_change_12_ft", "change_1_cc", "change_2_cc", "change_3_cc", "change_1_ft", "change_2_ft", "change_3_ft", "change_123_cc", "change_123_ft", "days_since_1_case", "days_since_10_case", "days_since_50_case", "days_since_100_case", "days_since_500_case", "days_since_1000_case", "days_since_5000_case", "days_since_10000_case", "country_flag", "lat", "lon", "continent", "population", "area", "density", "target_cc", "target_ft" ] return df[features]<predict_on_test>
scaler = StandardScaler() train2 = scaler.fit_transform(train) test2 = scaler.fit_transform(test )
Titanic - Machine Learning from Disaster
10,357,342
def build_predict_lgbm(df_train, df_test, gap): df_train.dropna(subset = ["target_cc", "target_ft", f"lag_{gap}_cc", f"lag_{gap}_ft"], inplace = True) target_cc = df_train.target_cc target_ft = df_train.target_ft test_lag_cc = df_test[f"lag_{gap}_cc"].values test_lag_ft = df_test[f"lag_{gap}_ft"].values df_train.drop(["target_cc", "target_ft"], axis = 1, inplace = True) df_test.drop(["target_cc", "target_ft"], axis = 1, inplace = True) categorical_features = ["continent"] dtrain_cc = lgb.Dataset(df_train, label = target_cc, categorical_feature = categorical_features) dtrain_ft = lgb.Dataset(df_train, label = target_ft, categorical_feature = categorical_features) model_cc = lgb.train(LGB_PARAMS, train_set = dtrain_cc, num_boost_round = 200) model_ft = lgb.train(LGB_PARAMS, train_set = dtrain_ft, num_boost_round = 200) y_pred_cc = np.expm1(model_cc.predict(df_test, num_boost_round = 200)+ np.log1p(test_lag_cc)) y_pred_ft = np.expm1(model_ft.predict(df_test, num_boost_round = 200)+ np.log1p(test_lag_ft)) return y_pred_cc, y_pred_ft, model_cc, model_ft<predict_on_test>
KFold_Score = pd.DataFrame() classifiers = ['Linear SVM', 'Radial SVM', 'LogisticRegression', 'RandomForestClassifier', 'AdaBoostClassifier', 'XGBoostClassifier', 'KNeighborsClassifier','GradientBoostingClassifier'] models = [svm.SVC(kernel='linear'), svm.SVC(kernel='rbf'), LogisticRegression(max_iter = 1000), RandomForestClassifier(n_estimators=200, random_state=0), AdaBoostClassifier(random_state = 0), xgb.XGBClassifier(n_estimators=100), KNeighborsClassifier() , GradientBoostingClassifier(random_state=0) ] j = 0 for i in models: model = i cv = KFold(n_splits=5, random_state=0, shuffle=True) KFold_Score[classifiers[j]] =(cross_val_score(model, train2, np.ravel(pred), scoring = 'accuracy', cv=cv)) j = j+1
Titanic - Machine Learning from Disaster
10,357,342
def predict_mad(df_test, gap, val = False): df_test["avg_diff_cc"] =(df_test[f"lag_{gap}_cc"] - df_test[f"lag_{gap + 3}_cc"])/ 3 df_test["avg_diff_ft"] =(df_test[f"lag_{gap}_ft"] - df_test[f"lag_{gap + 3}_ft"])/ 3 if val: y_pred_cc = df_test[f"lag_{gap}_cc"] + gap * df_test.avg_diff_cc -(1 - MAD_FACTOR)* df_test.avg_diff_cc * np.sum([x for x in range(gap)])/ VAL_DAYS y_pred_ft = df_test[f"lag_{gap}_ft"] + gap * df_test.avg_diff_ft -(1 - MAD_FACTOR)* df_test.avg_diff_ft * np.sum([x for x in range(gap)])/ VAL_DAYS else: y_pred_cc = df_test[f"lag_{gap}_cc"] + gap * df_test.avg_diff_cc -(1 - MAD_FACTOR)* df_test.avg_diff_cc * np.sum([x for x in range(gap)])/ n_dates_test y_pred_ft = df_test[f"lag_{gap}_ft"] + gap * df_test.avg_diff_ft -(1 - MAD_FACTOR)* df_test.avg_diff_ft * np.sum([x for x in range(gap)])/ n_dates_test return y_pred_cc, y_pred_ft<init_hyperparams>
mean = pd.DataFrame(KFold_Score.mean() , index= classifiers) KFold_Score = pd.concat([KFold_Score,mean.T]) KFold_Score.index=['Fold 1','Fold 2','Fold 3','Fold 4','Fold 5','Mean'] KFold_Score.T.sort_values(by=['Mean'], ascending = False )
Titanic - Machine Learning from Disaster
10,357,342
SEED = 24 LGB_PARAMS = {"objective": "regression", "num_leaves": 5, "learning_rate": 0.013, "bagging_fraction": 0.91, "feature_fraction": 0.81, "reg_alpha": 0.13, "reg_lambda": 0.13, "metric": "rmse", "seed": SEED } VAL_DAYS = 7 MAD_FACTOR = 0.5<split>
col_name1[0],col_name1[2] = col_name1[2],col_name1[0] col_name2[0],col_name2[2] = col_name2[2],col_name2[0]
Titanic - Machine Learning from Disaster
10,357,342
df_train = df[~df.Id.isna() ] df_test_full = df[~df.ForecastId.isna() ]<feature_engineering>
train_new = train[col_name1] test_new = test[col_name2]
Titanic - Machine Learning from Disaster
10,357,342
df_preds_val = [] df_preds_test = [] for date in df_test_full.Date.unique() : print("[INFO] Date:", date) if date in df_train.Date.values: df_pred_test = df_test_full.loc[df_test_full.Date == date, ["ForecastId", "ConfirmedCases", "Fatalities"]].rename(columns = {"ConfirmedCases": "ConfirmedCases_test", "Fatalities": "Fatalities_test"}) else: df_test = df_test_full[df_test_full.Date == date] gap =(pd.Timestamp(date)- max_date_train ).days if gap <= VAL_DAYS: val_date = max_date_train - pd.Timedelta(VAL_DAYS, "D")+ pd.Timedelta(gap, "D") df_build = df_train[df_train.Date < val_date] df_val = df_train[df_train.Date == val_date] X_build = prepare_features(df_build, gap) X_val = prepare_features(df_val, gap) y_val_cc_lgb, y_val_ft_lgb, _, _ = build_predict_lgbm(X_build, X_val, gap) y_val_cc_mad, y_val_ft_mad = predict_mad(df_val, gap, val = True) df_pred_val = pd.DataFrame({"Id": df_val.Id.values, "ConfirmedCases_val_lgb": y_val_cc_lgb, "Fatalities_val_lgb": y_val_ft_lgb, "ConfirmedCases_val_mad": y_val_cc_mad, "Fatalities_val_mad": y_val_ft_mad, }) df_preds_val.append(df_pred_val) X_train = prepare_features(df_train, gap) X_test = prepare_features(df_test, gap) y_test_cc_lgb, y_test_ft_lgb, model_cc, model_ft = build_predict_lgbm(X_train, X_test, gap) y_test_cc_mad, y_test_ft_mad = predict_mad(df_test, gap) if gap == 1: model_1_cc = model_cc model_1_ft = model_ft features_1 = X_train.columns.values elif gap == 14: model_14_cc = model_cc model_14_ft = model_ft features_14 = X_train.columns.values elif gap == 28: model_28_cc = model_cc model_28_ft = model_ft features_28 = X_train.columns.values df_pred_test = pd.DataFrame({"ForecastId": df_test.ForecastId.values, "ConfirmedCases_test_lgb": y_test_cc_lgb, "Fatalities_test_lgb": y_test_ft_lgb, "ConfirmedCases_test_mad": y_test_cc_mad, "Fatalities_test_mad": y_test_ft_mad, }) df_preds_test.append(df_pred_test )<merge>
train_new = train_new.drop(['Cabin'],axis = 1) test_new = test_new.drop(['Cabin'],axis = 1 )
Titanic - Machine Learning from Disaster
10,357,342
df = df.merge(pd.concat(df_preds_val, sort = False), on = "Id", how = "left") df = df.merge(pd.concat(df_preds_test, sort = False), on = "ForecastId", how = "left") rmsle_cc_lgb = np.sqrt(mean_squared_error(np.log1p(df[~df.ConfirmedCases_val_lgb.isna() ].ConfirmedCases), np.log1p(df[~df.ConfirmedCases_val_lgb.isna() ].ConfirmedCases_val_lgb))) rmsle_ft_lgb = np.sqrt(mean_squared_error(np.log1p(df[~df.Fatalities_val_lgb.isna() ].Fatalities), np.log1p(df[~df.Fatalities_val_lgb.isna() ].Fatalities_val_lgb))) rmsle_cc_mad = np.sqrt(mean_squared_error(np.log1p(df[~df.ConfirmedCases_val_mad.isna() ].ConfirmedCases), np.log1p(df[~df.ConfirmedCases_val_mad.isna() ].ConfirmedCases_val_mad))) rmsle_ft_mad = np.sqrt(mean_squared_error(np.log1p(df[~df.Fatalities_val_mad.isna() ].Fatalities), np.log1p(df[~df.Fatalities_val_mad.isna() ].Fatalities_val_mad))) print("LGB CC RMSLE Val of", VAL_DAYS, "days for CC:", round(rmsle_cc_lgb, 2)) print("LGB FT RMSLE Val of", VAL_DAYS, "days for FT:", round(rmsle_ft_lgb, 2)) print("LGB Overall RMSLE Val of", VAL_DAYS, "days:", round(( rmsle_cc_lgb + rmsle_ft_lgb)/ 2, 2)) print("MAD CC RMSLE Val of", VAL_DAYS, "days for CC:", round(rmsle_cc_mad, 2)) print("MAD FT RMSLE Val of", VAL_DAYS, "days for FT:", round(rmsle_ft_mad, 2)) print("MAD Overall RMSLE Val of", VAL_DAYS, "days:", round(( rmsle_cc_mad + rmsle_ft_mad)/ 2, 2))<feature_engineering>
sc = StandardScaler() train3 = sc.fit_transform(train_new) test3 = sc.transform(test_new )
Titanic - Machine Learning from Disaster
10,357,342
test = df.loc[~df.ForecastId.isna() , ["ForecastId", "Country_Region", "Province_State", "Date", "ConfirmedCases_test", "ConfirmedCases_test_lgb", "ConfirmedCases_test_mad", "Fatalities_test", "Fatalities_test_lgb", "Fatalities_test_mad"]].reset_index() test["ConfirmedCases"] = 0.3 * test.ConfirmedCases_test_lgb + 0.7 * test.ConfirmedCases_test_mad test["Fatalities"] = 0.25 * test.Fatalities_test_lgb + 0.75 * test.Fatalities_test_mad test.loc[test.Country_Region.isin(["China", "US", "Diamond Princess"]), "ConfirmedCases"] = test[test.Country_Region.isin(["China", "US", "Diamond Princess"])].ConfirmedCases_test_mad.values test.loc[test.Country_Region.isin(["China", "US", "Diamond Princess"]), "Fatalities"] = test[test.Country_Region.isin(["China", "US", "Diamond Princess"])].Fatalities_test_mad.values test.loc[test.Date.isin(df_train.Date.values), "ConfirmedCases"] = test[test.Date.isin(df_train.Date.values)].ConfirmedCases_test.values test.loc[test.Date.isin(df_train.Date.values), "Fatalities"] = test[test.Date.isin(df_train.Date.values)].Fatalities_test.values sub0 = test[["ForecastId", "ConfirmedCases", "Fatalities"]] sub0.ForecastId = sub0.ForecastId.astype(int) sub0.head()<load_from_csv>
clf = RandomForestClassifier(random_state=0 )
Titanic - Machine Learning from Disaster
10,357,342
test = pd.read_csv(".. /input/covid19-global-forecasting-week-4/test.csv") train = pd.read_csv(".. /input/covid19-global-forecasting-week-4/train.csv") train['Province_State'].fillna('', inplace=True) test['Province_State'].fillna('', inplace=True) train['Date'] = pd.to_datetime(train['Date']) test['Date'] = pd.to_datetime(test['Date']) train = train.sort_values(['Country_Region','Province_State','Date']) test = test.sort_values(['Country_Region','Province_State','Date'] )<feature_engineering>
param_grid={ 'n_estimators': [200,300], 'max_features': ['auto', 'sqrt'], 'max_depth': [6,7,8], 'criterion':['gini','entropy'] }
Titanic - Machine Learning from Disaster
10,357,342
FirstDate = train.groupby('Country_Region' ).min() ['Date'].unique() [0] train['Last Confirm'] = train['ConfirmedCases'].shift(1) while train[(train['Last Confirm'] > train['ConfirmedCases'])&(train['Date'] > FirstDate)].shape[0] > 0: train['Last Confirm'] = train['ConfirmedCases'].shift(1) train['Last Fatalities'] = train['Fatalities'].shift(1) train.loc[(train['Last Confirm'] > train['ConfirmedCases'])&(train['Date'] > FirstDate),'ConfirmedCases'] = train.loc[(train['Last Confirm'] > train['ConfirmedCases'])&(train['Date'] > FirstDate),'Last Confirm'] train.loc[(train['Last Fatalities'] > train['Fatalities'])&(train['Date'] > FirstDate),'Fatalities'] = train.loc[(train['Last Fatalities'] > train['Fatalities'])&(train['Date'] > FirstDate),'Last Fatalities'] train['Last Confirm'] = train['ConfirmedCases'].shift(1) train['Last Fatalities'] = train['Fatalities'].shift(1 )<import_modules>
CV_clf = GridSearchCV(estimator=clf, param_grid=param_grid, cv=5) CV_clf.fit(train3, pred) CV_clf.best_params_
Titanic - Machine Learning from Disaster
10,357,342
from statsmodels.tsa.statespace.sarimax import SARIMAX from statsmodels.tsa.arima_model import ARIMA<feature_engineering>
clf1 = RandomForestClassifier(random_state=0, n_estimators=200, criterion='gini', max_features='auto', max_depth=8) clf1.fit(train3, pred )
Titanic - Machine Learning from Disaster
10,357,342
feature_day = [1,20,50,100,200,500,1000] def CreateInput(data): feature = [] for day in feature_day: data.loc[:,'Number day from ' + str(day)+ ' case'] = 0 if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0): fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max() else: fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min() for i in range(0, len(data)) : if(data['Date'].iloc[i] > fromday): day_denta = data['Date'].iloc[i] - fromday data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days feature = feature + ['Number day from ' + str(day)+ ' case'] return data[feature] pred_data_all = pd.DataFrame() with tqdm(total=len(train['Country_Region'].unique())) as pbar: for country in train['Country_Region'].unique() : for province in train[(train['Country_Region'] == country)]['Province_State'].unique() : with warnings.catch_warnings() : warnings.filterwarnings("ignore") df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)] df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] X_train = CreateInput(df_train) y_train_confirmed = df_train['ConfirmedCases'].ravel() y_train_fatalities = df_train['Fatalities'].ravel() X_pred = CreateInput(df_test) feature_use = X_pred.columns[0] for i in range(X_pred.shape[1] - 1,0,-1): if(X_pred.iloc[0,i] > 0): feature_use = X_pred.columns[i] break idx = X_train[X_train[feature_use] == 0].shape[0] adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1) adjusted_y_train_confirmed = y_train_confirmed[idx:] adjusted_y_train_fatalities = y_train_fatalities[idx:] idx = X_pred[X_pred[feature_use] == 0].shape[0] adjusted_X_pred = X_pred[idx:][feature_use].values.reshape(-1, 1) pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)] max_train_date = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].max() min_test_date = pred_data['Date'].min() model = SARIMAX(adjusted_y_train_confirmed, order=(1,1,0), measurement_error=True ).fit(disp=False) y_hat_confirmed = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_confirmed = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['ConfirmedCases'].values y_hat_confirmed = np.concatenate(( y_train_confirmed,y_hat_confirmed), axis = 0) model = SARIMAX(adjusted_y_train_fatalities, order=(1,1,0), measurement_error=True ).fit(disp=False) y_hat_fatalities = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0]) y_train_fatalities = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['Fatalities'].values y_hat_fatalities = np.concatenate(( y_train_fatalities,y_hat_fatalities), axis = 0) pred_data['ConfirmedCases_hat'] = y_hat_confirmed pred_data['Fatalities_hat'] = y_hat_fatalities pred_data_all = pred_data_all.append(pred_data) pbar.update(1) df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left') df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0 df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0 df_val_3 = df_val.copy()<rename_columns>
pred3 = clf1.predict(test3 )
Titanic - Machine Learning from Disaster
10,357,342
sub1 = df_val_3 submission = sub1[['ForecastId','ConfirmedCases_hat','Fatalities_hat']] submission.columns = ['ForecastId','ConfirmedCases','Fatalities']<save_to_csv>
pred_test = pred3 output = pd.DataFrame({ 'PassengerId': test_data.PassengerId, 'Survived': pred_test }) output.to_csv('./submission.csv', index=False )
Titanic - Machine Learning from Disaster
12,084,336
TARGETS = ["ConfirmedCases", "Fatalities"] sub_df = sub0.copy() for t in TARGETS: sub_df[t] = np.expm1(np.log1p(submission[t].values)*0.4 + np.log1p(sub0[t].values)*0.6) sub_df.to_csv("submission.csv", index=False )<count_missing_values>
sns.set(style="darkgrid") warnings.filterwarnings('ignore') SEED = 42
Titanic - Machine Learning from Disaster
12,084,336
sub0.isna().sum()<import_modules>
train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') train.shape, test.shape
Titanic - Machine Learning from Disaster
12,084,336
import numpy as np import pandas as pd import seaborn as sns from sklearn.model_selection import train_test_split from xgboost import XGBRegressor from sklearn.multioutput import MultiOutputRegressor from sklearn.impute import SimpleImputer<load_from_csv>
def concat(train, test): return pd.concat([train, test] ).reset_index(drop = True) def df_divide(df): return df[:890], df[891:].drop('Survived', axis =1 )
Titanic - Machine Learning from Disaster
12,084,336
train_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') submission_csv = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv' )<data_type_conversions>
df_all = concat(train, test) a, b = df_divide(df_all )
Titanic - Machine Learning from Disaster
12,084,336
train_data['Date'] = pd.to_datetime(train_data['Date'], infer_datetime_format=True) test_data['Date'] = pd.to_datetime(test_data['Date'], infer_datetime_format=True )<data_type_conversions>
dfs = [train, test]
Titanic - Machine Learning from Disaster
12,084,336
train_data.loc[:, 'Date'] = train_data.Date.dt.strftime('%y%m%d') train_data.loc[:, 'Date'] = train_data['Date'].astype(int) test_data.loc[:, 'Date'] = test_data.Date.dt.strftime('%y%m%d') test_data.loc[:, 'Date'] = test_data['Date'].astype(int )<feature_engineering>
df_all.isna().sum()
Titanic - Machine Learning from Disaster
12,084,336
train_data['Province_State'] = np.where(train_data['Province_State'] == 'nan',train_data['Country_Region'],train_data['Province_State']) test_data['Province_State'] = np.where(test_data['Province_State'] == 'nan',test_data['Country_Region'],test_data['Province_State'] )<data_type_conversions>
df_all['Age'] = df_all.groupby(['Sex', 'Pclass'])['Age'].apply(lambda x : x.fillna(x.median()))
Titanic - Machine Learning from Disaster
12,084,336
convert_dict = {'Province_State': str} train_data = train_data.astype(convert_dict) test_data = test_data.astype(convert_dict )<define_variables>
df_all['Embarked'].fillna('S', inplace = True )
Titanic - Machine Learning from Disaster
12,084,336
s =(train_data.dtypes == 'object') object_cols = list(s[s].index )<import_modules>
df_all[df_all['Embarked'].isna() ]
Titanic - Machine Learning from Disaster
12,084,336
from sklearn.preprocessing import LabelEncoder<categorify>
df_all[df_all['Fare'].isna() ]
Titanic - Machine Learning from Disaster
12,084,336
label_encoder1 = LabelEncoder() label_encoder2 = LabelEncoder() train_data['Province_State'] = label_encoder1.fit_transform(train_data['Province_State']) test_data['Province_State'] = label_encoder1.transform(test_data['Province_State']) train_data['Country_Region'] = label_encoder2.fit_transform(train_data['Country_Region']) test_data['Country_Region'] = label_encoder2.transform(test_data['Country_Region']) <define_variables>
g = df_all.groupby(['Pclass','Parch', 'SibSp'])['Fare'].median() g
Titanic - Machine Learning from Disaster
12,084,336
Test_id = test_data.ForecastId<drop_column>
df_all['Cabin'].value_counts()
Titanic - Machine Learning from Disaster
12,084,336
train_data.drop(['Id'], axis=1, inplace=True) test_data.drop('ForecastId', axis=1, inplace=True )<count_missing_values>
df_all['Deck'] = df_all['Cabin'].astype(str ).apply(lambda x : x[0] if x != 'nan' else 'M' )
Titanic - Machine Learning from Disaster
12,084,336
missing_val_count_by_column =(train_data.isnull().sum()) print(missing_val_count_by_column[missing_val_count_by_column>0] )<import_modules>
i = df_all[df_all['Deck'] == 'T'].index df_all['Deck'].iloc[i] = 'A'
Titanic - Machine Learning from Disaster
12,084,336
from xgboost import XGBRegressor<prepare_x_and_y>
df_all['Deck'].replace(['A','B','C'], 'ABC', inplace = True) df_all['Deck'].replace(['D', 'E'], 'DE', inplace = True) df_all['Deck'].replace(['F', 'G'], 'FG', inplace = True )
Titanic - Machine Learning from Disaster
12,084,336
X_train = train_data[['Province_State','Country_Region','Date']] y_train = train_data[['ConfirmedCases', 'Fatalities']]<prepare_x_and_y>
df_all.isna().sum()
Titanic - Machine Learning from Disaster
12,084,336
y_train_confirm = y_train.ConfirmedCases y_train_fatality = y_train.Fatalities<split>
df_all.drop('Cabin', axis = 1, inplace = True )
Titanic - Machine Learning from Disaster
12,084,336
x_train = X_train.iloc[:,:].values x_test = X_train.iloc[:,:].values<train_model>
a[(a>0.1)&(a<1)].dropna().drop_duplicates().rename(columns = {0:"Correlations"} ).style.background_gradient()
Titanic - Machine Learning from Disaster
12,084,336
model1 = XGBRegressor(n_estimators=400000) model1.fit(X_train, y_train_confirm) y_pred_confirm = model1.predict(test_data )<train_model>
df_all.isnull().sum()
Titanic - Machine Learning from Disaster
12,084,336
model2 = XGBRegressor(n_estimators=200000) model2.fit(X_train,y_train_fatality) y_pred_fat = model2.predict(test_data )<save_to_csv>
df_all['Fare'] = pd.qcut(df_all['Fare'], 13 )
Titanic - Machine Learning from Disaster
12,084,336
df_sub = pd.DataFrame() df_sub['ForecastId'] = Test_id df_sub['ConfirmedCases'] = y_pred_confirm df_sub['Fatalities'] = y_pred_fat df_sub.to_csv('submission.csv', index=False )<import_modules>
df_all['Age'] = pd.qcut(df_all['Age'], 10 )
Titanic - Machine Learning from Disaster
12,084,336
%matplotlib inline <load_from_csv>
df_all['Family Size'] = df_all['Parch'] + df_all['SibSp'] + 1
Titanic - Machine Learning from Disaster
12,084,336
train_df = pd.read_csv(".. /input/covid19-global-forecasting-week-4/train.csv") test_df = pd.read_csv(".. /input/covid19-global-forecasting-week-4/test.csv") submission = pd.read_csv(".. /input/covid19-global-forecasting-week-4/submission.csv") <define_variables>
a = df_all['Ticket'].value_counts() df_all['TicketC'] = df_all['Ticket'].map(a )
Titanic - Machine Learning from Disaster
12,084,336
columns = ['ln(No_of_Days)', 'Country_Region','ConfirmedCases','Fatalities'] test_set_columns = ['ln(No_of_Days)', 'Country_Region']<feature_engineering>
df_all['Title'] = df_all['Name'].apply(lambda x : x.split(', ')[1].split('.')[0] )
Titanic - Machine Learning from Disaster
12,084,336
def extract_features(df): df['month'] = df['Date'].apply(lambda x: int(x.split(' ')[0].split('-')[1])) df['day'] = df['Date'].apply(lambda x: int(x.split(' ')[0].split('-')[2])) df['is_weekend'] =(( df.Date.astype('datetime64[ns]' ).dt.dayofweek)// 4 == 1 ).astype(float) df['weekday'] = df.Date.astype('datetime64[ns]' ).dt.dayofweek df['is_holyday'] = df.apply(lambda row: 1 if(row['month']==1 and row['day']==1)or(row['month']==7 and row['day']==4)or(row['month']==11 and row['day']==11)or(row['month']==12 and row['day']==25)or(row['month']==1 and row['day'] >= 15 and row['day'] <= 21 and row['weekday'] == 0)or(row['month']==2 and row['day'] >= 15 and row['day'] <= 21 and row['weekday'] == 0)or(row['month']==5 and row['day'] >= 25 and row['day'] <= 31 and row['weekday'] == 0)or(row['month']==9 and row['day'] >= 1 and row['day'] <= 7 and row['weekday'] == 0)or(row['month']==10 and row['day'] >= 8 and row['day'] <= 14 and row['weekday'] == 0)or(row['month']==11 and row['day'] >= 22 and row['day'] <= 28 and row['weekday'] == 3)else 0, axis=1) df['is_day_before_holyday'] = df.apply(lambda row: 1 if(row['month']==12 and row['day']==31)or(row['month']==7 and row['day']==3)or(row['month']==11 and row['day']==10)or(row['month']==12 and row['day']==24)or(row['month']==1 and row['day'] >= 14 and row['day'] <= 20 and row['weekday'] == 6)or(row['month']==2 and row['day'] >= 14 and row['day'] <= 20 and row['weekday'] == 6)or(row['month']==5 and row['day'] >= 24 and row['day'] <= 30 and row['weekday'] == 6)or(( row['month']==9 and row['day'] >= 1 and row['day'] <= 6)or(row['month']==8 and row['day'] == 31)and row['weekday'] == 6)or(row['month']==10 and row['day'] >= 7 and row['day'] <= 13 and row['weekday'] == 6)or(row['month']==11 and row['day'] >= 21 and row['day'] <= 27 and row['weekday'] == 2)else 0, axis=1) <drop_column>
df_all['Married'] = 0
Titanic - Machine Learning from Disaster
12,084,336
extract_features(train_df) extract_features(test_df )<drop_column>
df_all['Married'].loc[(df_all['Title'] == 'Mrs')] = 1
Titanic - Machine Learning from Disaster
12,084,336
for i in range(len(train_df)) : if train_df["Province_State"][i] != '': train_df["Country_Region"][i] = train_df["Province_State"][i] + "(" + str(train_df["Country_Region"][i])+ ")" for i in range(len(test_df)) : if test_df["Province_State"][i] != '': test_df["Country_Region"][i] = test_df["Province_State"][i] + "(" + str(test_df["Country_Region"][i])+ ")" train_df.drop(columns = "Province_State", inplace=True) test_df.drop(columns = "Province_State", inplace=True) <feature_engineering>
df_all['Title'] = df_all['Title'].replace(['Miss', 'Mrs','Ms', 'Mlle', 'Lady', 'Mme', 'the Countess', 'Dona'], 'Miss/Mrs/Ms') df_all['Title'] = df_all['Title'].replace(['Dr', 'Col', 'Major', 'Jonkheer', 'Capt', 'Sir', 'Don', 'Rev'], 'Dr/Military/Noble/Clergy' )
Titanic - Machine Learning from Disaster
12,084,336
i = 0 for value in train_df["Country_Region"].unique() : if i < len(train_df): j = 1 while(train_df["Country_Region"][i] == value): train_df["day"][i] = j j += 1; i += 1 if i == len(train_df): break i = 0 for value in test_df["Country_Region"].unique() : if i < len(test_df): j = 72 while(test_df["Country_Region"][i] == value): test_df["day"][i] = j j += 1; i += 1 if i == len(test_df): break train_df.rename(columns = {"day" : "No_of_Days"}, inplace = True) test_df.rename(columns = {"day" : "No_of_Days"}, inplace = True) train_df['ln(No_of_Days)'] = np.log1p(train_df['No_of_Days']) test_df['ln(No_of_Days)'] = np.log1p(test_df['No_of_Days'] )<merge>
df_all['Family'] = df_all['Name'].apply(lambda x : x.split(', ')[0] )
Titanic - Machine Learning from Disaster
12,084,336
last_date = train_df.No_of_Days.max() df_countries = train_df[train_df['No_of_Days']==last_date] df_countries = df_countries.groupby('Country_Region', as_index=False)['ConfirmedCases','Fatalities'].sum() df_countries = df_countries.nlargest(10,'ConfirmedCases') df_trend = train_df.groupby(['No_of_Days','Country_Region'], as_index=False)['ConfirmedCases','Fatalities'].sum() df_trend = df_trend.merge(df_countries, on='Country_Region') df_trend.drop(['ConfirmedCases_y','Fatalities_y'],axis=1, inplace=True) df_trend.rename(columns={'Country_Region':'Country', 'ConfirmedCases_x':'Cases', 'Fatalities_x':'Deaths'}, inplace=True) df_trend['ln(Cases)'] = np.log1p(df_trend['Cases']) df_trend['ln(Deaths)'] = np.log1p(df_trend['Deaths'] )<feature_engineering>
train = df_all[:891] test = df_all[891:] dfs = [train,test]
Titanic - Machine Learning from Disaster
12,084,336
train_df['ConfirmedCases'] = np.log1p(train_df['ConfirmedCases']) train_df['Fatalities'] = np.log1p(train_df['Fatalities'] )<split>
fam_survival_rate = train.groupby('Family')['Survived', 'Family', 'Family Size'].median() ticket_survival_rate = train.groupby('Ticket')['Survived', 'Ticket','TicketC'].median()
Titanic - Machine Learning from Disaster
12,084,336
df_train = train_df[columns] df_test = test_df[test_set_columns] <normalization>
family_rates = {} ticket_rates = {} for i in range(len(fam_survival_rate)) : if fam_survival_rate.index[i] in non_unique_fams and fam_survival_rate.iloc[i, 1] > 1: family_rates[fam_survival_rate.index[i]] = fam_survival_rate.iloc[i,0] for i in range(len(ticket_survival_rate)) : if ticket_survival_rate.index[i] in non_unique_tickets and ticket_survival_rate.iloc[i, 1]>1: ticket_rates[ticket_survival_rate.index[i]] = ticket_survival_rate.iloc[i,0]
Titanic - Machine Learning from Disaster
12,084,336
submission = [] for country in df_train.Country_Region.unique() : df_train1 = df_train[df_train["Country_Region"]==country] cases = np.array(df_train1.ConfirmedCases) fatalities = np.array(df_train1.Fatalities) del df_train1['ConfirmedCases'] del df_train1['Fatalities'] lb = LabelEncoder() df_train1['Country_Region'] = lb.fit_transform(df_train1['Country_Region']) scaler = MinMaxScaler() train = scaler.fit_transform(df_train1.values) X_train, y_train = train, cases param_dict = { 'n_estimators': [1500, 2000, 2200, 2500, 3000], 'max_depth': [10,15,20, 25, 30], 'min_child_weight': [1,2,3,4, 5, 6, 7, 8, 9, 10], 'learning_rate': [0.05, 0.1,0.15, 0.2, 0.3, 0.5], 'gamma': [0.0, 0.1, 0.2, 0.3, 0.4] } model_xgb_c = XGBRegressor(gamma = 0.4, learning_rate = 0.05, max_depth = 10, min_child_weight= 7, n_estimators = 1500, random_state = 37) polynomial_features_c= PolynomialFeatures(degree=2) X_train_poly = polynomial_features_c.fit_transform(X_train) steps1 = [('poly_features', PolynomialFeatures(4, interaction_only=True)) , ('lg', LinearRegression())] pipeline1 = Pipeline(steps=steps1) pipeline1.fit(X_train, y_train) x_train_cas = [] for i in range(len(X_train)) : x = list(X_train[i]) x.append(y_train[i]) x_train_cas.append(x) x_train_cas[0] model2 = XGBRegressor(gamma = 0.4, learning_rate = 0.05, max_depth = 10, min_child_weight= 7, n_estimators = 1500, random_state = 37) steps2 = [('poly_features', PolynomialFeatures(4, interaction_only=True)) , ('XGBoost', LinearRegression())] pipeline2 = Pipeline(steps=steps2) pipeline2.fit(np.array(x_train_cas), fatalities) df_test1 = test_df[(test_df["Country_Region"]==country)] df_test1['Country_Region'] = lb.transform(df_test1['Country_Region']) ForecastId = df_test1.ForecastId.values df_test1 = df_test1[test_set_columns] df_test1_scaled = scaler.transform(df_test1.values) cases_pred = pipeline1.predict(df_test1_scaled) cases_pred[cases_pred < 0] = 0 x_test_cas = [] for i in range(len(df_test1_scaled)) : x = list(df_test1_scaled[i]) x.append(cases_pred[i]) x_test_cas.append(x) x_test_cas[0] fatalities_pred = pipeline2.predict(np.array(x_test_cas)) fatalities_pred[fatalities_pred < 0] = 0 for i in range(len(cases_pred)) : d = {'ForecastId':ForecastId[i], 'ConfirmedCases':np.around(np.expm1(cases_pred[i]),decimals = 0), 'Fatalities':np.around(np.expm1(fatalities_pred[i]),decimals = 0)} submission.append(d) <save_to_csv>
mean_survival_rate = train['Survived'].mean() train_fam_survival = [] train_fam_survival_NA = [] test_fam_survival = [] test_fam_survival_NA = [] for i in range(len(train)) : if(train['Family'].iloc[i] in family_rates): train_fam_survival.append(family_rates[train['Family'].iloc[i]]) train_fam_survival_NA.append(1) else: train_fam_survival.append(mean_survival_rate) train_fam_survival_NA.append(0) for i in range(len(test)) : if(test['Family'].iloc[i] in family_rates): test_fam_survival.append(family_rates[test['Family'].iloc[i]]) test_fam_survival_NA.append(1) else: test_fam_survival.append(mean_survival_rate) test_fam_survival_NA.append(0)
Titanic - Machine Learning from Disaster
12,084,336
df_submit = pd.DataFrame(submission) df_submit.to_csv(r'submission.csv', index=False )<merge>
train_ticket_survival = [] train_ticket_survival_NA = [] test_ticket_survival = [] test_ticket_survival_NA = [] for i in range(len(train)) : if(train['Ticket'].iloc[i] in ticket_rates): train_ticket_survival.append(ticket_rates[train['Ticket'].iloc[i]]) train_ticket_survival_NA.append(1) else: train_ticket_survival.append(mean_survival_rate) train_ticket_survival_NA.append(0) for i in range(len(test)) : if(test['Ticket'].iloc[i] in ticket_rates): test_ticket_survival.append(ticket_rates[test['Ticket'].iloc[i]]) test_ticket_survival_NA.append(1) else: test_ticket_survival.append(mean_survival_rate) test_ticket_survival_NA.append(0 )
Titanic - Machine Learning from Disaster
12,084,336
output_df = pd.merge(test_df, df_submit, on='ForecastId') <filter>
train['Family_survival_rate'] = train_fam_survival test['Family_survival_rate'] = test_fam_survival train['Family_survival_rate_NA'] = train_fam_survival_NA test['Family_survival_rate_NA'] = test_fam_survival_NA train['Ticket_survival_rate'] = train_ticket_survival test['Ticket_survival_rate'] = test_ticket_survival train['Ticket_survival_rate_NA'] = train_ticket_survival_NA test['Ticket_survival_rate_NA'] = test_ticket_survival_NA
Titanic - Machine Learning from Disaster
12,084,336
output_df[output_df['Country_Region'] == 'India']<set_options>
for df in [train, test]: df['Survival_Rate'] =(df['Family_survival_rate'] + df['Ticket_survival_rate'])/2 df['Survival_Rate_NA'] =(df['Family_survival_rate_NA'] + df['Ticket_survival_rate_NA'])/2
Titanic - Machine Learning from Disaster
12,084,336
warnings.filterwarnings("ignore") output_notebook(resources=INLINE )<load_from_csv>
non_numerica_features = ['Embarked', 'Sex', 'Deck', 'Title', 'Family Size Grouped', 'Age', 'Fare'] for df in dfs: for feature in non_numerica_features: df[feature] = LabelEncoder().fit_transform(df[feature])
Titanic - Machine Learning from Disaster
12,084,336
country_codes = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/2014_world_gdp_with_codes.csv') country_codes = country_codes.drop('GDP(BILLIONS)', 1) country_codes.rename(columns={'COUNTRY': 'Country', 'CODE': 'Code'}, inplace=True )<load_from_csv>
cat_features = ['Sex', 'Pclass', 'Embarked', 'Title', 'Family Size Grouped', 'Deck'] encoded_features =[] for df in dfs: for feature in cat_features: encoded_feature = OneHotEncoder().fit_transform(df[feature].values.reshape(-1,1)).toarray() n = df[feature].nunique() cols = ['{}_{}'.format(feature,n)for n in range(1,n+1)] encoded_df = pd.DataFrame(encoded_feature, columns = cols) encoded_df.index = df.index encoded_features.append(encoded_df )
Titanic - Machine Learning from Disaster
12,084,336
virus_data = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/covid_19_data.csv') prev_index = 0 first_time = False tmp = 0 for i, row in virus_data.iterrows() : if(virus_data.loc[i,'SNo'] < 1342 and virus_data.loc[i,'Province/State']=='Hubei'): if(first_time): tmp = virus_data.loc[i,'Confirmed'] prev_index = i virus_data.loc[i,'Confirmed'] = virus_data.loc[i,'Confirmed'] + 593 first_time = False else: increment = virus_data.loc[i,'Confirmed'] - tmp tmp = virus_data.loc[i,'Confirmed'] virus_data.loc[i,'Confirmed'] = virus_data.loc[prev_index,'Confirmed'] + increment + 593 prev_index = i virus_data.rename(columns={'Country/Region': 'Country', 'ObservationDate': 'Date'}, inplace=True) virus_data = virus_data.fillna('unknow') virus_data['Country'] = virus_data['Country'].str.replace('US','United States') virus_data['Country'] = virus_data['Country'].str.replace('UK','United Kingdom') virus_data['Country'] = virus_data['Country'].str.replace('Mainland China','China') virus_data['Country'] = virus_data['Country'].str.replace('South Korea','Korea, South') virus_data['Country'] = virus_data['Country'].str.replace('North Korea','Korea, North') virus_data['Country'] = virus_data['Country'].str.replace('Macau','China') virus_data['Country'] = virus_data['Country'].str.replace('Ivory Coast','Cote d'Ivoire') virus_data = pd.merge(virus_data,country_codes,on=['Country']) <sort_values>
train = pd.concat([train, *encoded_features[:6]], axis = 1) test = pd.concat([test, *encoded_features[6:]], axis = 1) train.head()
Titanic - Machine Learning from Disaster
12,084,336
top_country = virus_data.loc[virus_data['Date'] == virus_data['Date'].iloc[-1]] top_country = top_country.groupby(['Code','Country'])['Confirmed'].sum().reset_index() top_country = top_country.sort_values('Confirmed', ascending=False) top_country = top_country[:30] top_country_codes = top_country['Country'] top_country_codes = list(top_country_codes) countries = virus_data[virus_data['Country'].isin(top_country_codes)] countries_day = countries.groupby(['Date','Code','Country'])['Confirmed','Deaths','Recovered'].sum().reset_index() exponential_line_x = [] exponential_line_y = [] for i in range(16): exponential_line_x.append(i) exponential_line_y.append(i) china = countries_day.loc[countries_day['Code']=='CHN'] new_confirmed_cases_china = [] new_confirmed_cases_china.append(list(china['Confirmed'])[0] - list(china['Deaths'])[0] - list(china['Recovered'])[0]) for i in range(1,len(china)) : new_confirmed_cases_china.append(list(china['Confirmed'])[i] - list(china['Deaths'])[i] - list(china['Recovered'])[i]) italy = countries_day.loc[countries_day['Code']=='ITA'] new_confirmed_cases_ita = [] new_confirmed_cases_ita.append(list(italy['Confirmed'])[0] - list(italy['Deaths'])[0] - list(italy['Recovered'])[0]) for i in range(1,len(italy)) : new_confirmed_cases_ita.append(list(italy['Confirmed'])[i] - list(italy['Deaths'])[i] - list(italy['Recovered'])[i]) india = countries_day.loc[countries_day['Code']=='IND'] new_confirmed_cases_india = [] new_confirmed_cases_india.append(list(india['Confirmed'])[0] - list(india['Deaths'])[0] - list(india['Recovered'])[0]) for i in range(1,len(india)) : new_confirmed_cases_india.append(list(india['Confirmed'])[i] - list(india['Deaths'])[i] - list(india['Recovered'])[i]) spain = countries_day.loc[countries_day['Code']=='ESP'] new_confirmed_cases_spain = [] new_confirmed_cases_spain.append(list(spain['Confirmed'])[0] - list(spain['Deaths'])[0] - list(spain['Recovered'])[0]) for i in range(1,len(spain)) : new_confirmed_cases_spain.append(list(spain['Confirmed'])[i] - list(spain['Deaths'])[i] - list(spain['Recovered'])[i]) us = countries_day.loc[countries_day['Code']=='USA'] new_confirmed_cases_us = [] new_confirmed_cases_us.append(list(us['Confirmed'])[0] - list(us['Deaths'])[0] - list(us['Recovered'])[0]) for i in range(1,len(us)) : new_confirmed_cases_us.append(list(us['Confirmed'])[i] - list(us['Deaths'])[i] - list(us['Recovered'])[i]) german = countries_day.loc[countries_day['Code']=='DEU'] new_confirmed_cases_german = [] new_confirmed_cases_german.append(list(german['Confirmed'])[0] - list(german['Deaths'])[0] - list(german['Recovered'])[0]) for i in range(1,len(german)) : new_confirmed_cases_german.append(list(german['Confirmed'])[i] - list(german['Deaths'])[i] - list(german['Recovered'])[i]) p1 = figure(plot_width=800, plot_height=550, title="Trajectory of Covid-19") p1.grid.grid_line_alpha=0.3 p1.ygrid.band_fill_color = "olive" p1.ygrid.band_fill_alpha = 0.1 p1.xaxis.axis_label = 'Total number of detected cases(Log scale)' p1.yaxis.axis_label = 'New confirmed cases(Log scale)' p1.line(exponential_line_x, exponential_line_y, line_dash="4 4", line_width=0.5) p1.line(np.log(list(china['Confirmed'])) , np.log(new_confirmed_cases_china), color=' legend_label='China', line_width=1) p1.circle(np.log(list(china['Confirmed'])[-1]), np.log(new_confirmed_cases_china[-1]), fill_color="white", size=5) p1.line(np.log(list(italy['Confirmed'])) , np.log(new_confirmed_cases_ita), color=' legend_label='Italy', line_width=1) p1.circle(np.log(list(italy['Confirmed'])[-1]), np.log(new_confirmed_cases_ita[-1]), fill_color="white", size=5) p1.line(np.log(list(india['Confirmed'])) , np.log(new_confirmed_cases_india), color=' legend_label='India', line_width=1) p1.circle(np.log(list(india['Confirmed'])[-1]), np.log(new_confirmed_cases_india[-1]), fill_color="white", size=5) p1.line(np.log(list(spain['Confirmed'])) , np.log(new_confirmed_cases_spain), color=' legend_label='Spain', line_width=1) p1.circle(np.log(list(spain['Confirmed'])[-1]), np.log(new_confirmed_cases_spain[-1]), fill_color="white", size=5) p1.line(np.log(list(us['Confirmed'])) , np.log(new_confirmed_cases_us), color=' legend_label='United States', line_width=1) p1.circle(np.log(list(us['Confirmed'])[-1]), np.log(new_confirmed_cases_us[-1]), fill_color="white", size=5) p1.line(np.log(list(german['Confirmed'])) , np.log(new_confirmed_cases_german), color=' legend_label='Germany', line_width=1) p1.circle(np.log(list(german['Confirmed'])[-1]), np.log(new_confirmed_cases_german[-1]), fill_color="white", size=5) p1.legend.location = "bottom_right" output_file("coronavirus.html", title="coronavirus.py") show(p1 )<define_variables>
df_all = concat(train, test )
Titanic - Machine Learning from Disaster
12,084,336
countries = virus_data[virus_data['Country'].isin(top_country_codes)] countries_day = countries.groupby(['Date','Code','Country'])['Confirmed','Deaths','Recovered'].sum().reset_index() exponential_line_x = [] exponential_line_y = [] for i in range(16): exponential_line_x.append(i) exponential_line_y.append(i) india = countries_day.loc[countries_day['Code']=='IND'] new_confirmed_cases_india = [] new_confirmed_cases_india.append(list(india['Confirmed'])[0] - list(india['Deaths'])[0] - list(india['Recovered'])[0]) for i in range(1,len(india)) : new_confirmed_cases_india.append(list(india['Confirmed'])[i] - list(india['Deaths'])[i] - list(india['Recovered'])[i]) p1 = figure(plot_width=800, plot_height=550, title="Trajectory of Covid-19 in India") p1.grid.grid_line_alpha=0.3 p1.ygrid.band_fill_color = "olive" p1.ygrid.band_fill_alpha = 0.1 p1.xaxis.axis_label = 'Total number of detected cases(Log scale)' p1.yaxis.axis_label = 'New confirmed cases(Log scale)' p = figure(plot_width=400, plot_height=400) p.outline_line_width = 7 p.outline_line_alpha = 0.3 p.outline_line_color = "navy" p1.line(exponential_line_x, exponential_line_y, line_dash="4 4", line_width=0.5) p1.line(np.log(list(india['Confirmed'])) , np.log(new_confirmed_cases_india), color=' legend_label='India', line_width=1) p1.circle(np.log(list(india['Confirmed'])[-1]), np.log(new_confirmed_cases_india[-1]), fill_color="white", size=5) p1.legend.location = "bottom_right" output_file("coronavirus_india.html", title="India.py") show(p1 )<set_options>
drop_cols = ['Deck', 'Embarked', 'Family', 'Family Size', 'Family Size Grouped', 'Survived', 'Name', 'Parch', 'PassengerId', 'Pclass', 'Sex', 'SibSp', 'Ticket', 'Title', 'Ticket_survival_rate', 'Family_survival_rate', 'Ticket_survival_rate_NA', 'Family_survival_rate_NA'] df_all.drop(columns = drop_cols, inplace = True) df_all.head()
Titanic - Machine Learning from Disaster
12,084,336
init_notebook_mode(connected=False )<load_from_csv>
X_train = train.drop(columns= drop_cols) Y_train = train['Survived'].values X_test = test.drop(columns = drop_cols )
Titanic - Machine Learning from Disaster
12,084,336
corona_data=pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/covid_19_data.csv') choro_map=px.choropleth(corona_data, locations="Country/Region", locationmode = "country names", color="Confirmed", hover_name="Country/Region", animation_frame="ObservationDate" ) choro_map.update_layout( title_text = 'Global Spread of Coronavirus', title_x = 0.5, geo=dict( showframe = False, showcoastlines = False, )) choro_map.show()<load_from_csv>
X_train = StandardScaler().fit_transform(X_train )
Titanic - Machine Learning from Disaster
12,084,336
zone=pd.read_csv('/kaggle/input/covid-19-india-zone-classification/lockdownindiawarningzones.csv' )<load_from_csv>
X_test = StandardScaler().fit_transform(X_test )
Titanic - Machine Learning from Disaster
12,084,336
covid_India_cases = pd.read_csv('.. /input/covid19-in-india/covid_19_india.csv') covid_India_cases.rename(columns={'State/UnionTerritory': 'State', 'Cured': 'Recovered', 'Confirmed': 'Confirmed'}, inplace=True) statewise_cases = pd.DataFrame(covid_India_cases.groupby(['State'])['Confirmed', 'Deaths', 'Recovered'].max().reset_index()) statewise_cases["Country"] = "India" fig = px.treemap(statewise_cases, path=['Country','State'], values='Confirmed', color='Confirmed', hover_data=['State'], color_continuous_scale='Rainbow') fig.show()<load_from_csv>
single_best_model = RandomForestClassifier(criterion='gini', n_estimators=1100, max_depth=5, min_samples_split=4, min_samples_leaf=5, max_features='auto', oob_score=True, random_state=SEED, n_jobs=-1, verbose=1) leaderboard_model = RandomForestClassifier(criterion='gini', n_estimators=1750, max_depth=7, min_samples_split=6, min_samples_leaf=6, max_features='auto', oob_score=True, random_state=SEED, n_jobs=-1, verbose=1 )
Titanic - Machine Learning from Disaster
12,084,336
covid_India_cases = pd.read_csv('.. /input/covid19-in-india/covid_19_india.csv') covid_India_cases.rename(columns={'State/UnionTerritory': 'State', 'Cured': 'Recovered', 'Confirmed': 'Confirmed'}, inplace=True) statewise_cases = pd.DataFrame(covid_India_cases.groupby(['State'])['Confirmed', 'Deaths', 'Recovered'].max().reset_index()) last=statewise_cases pos=pd.read_csv('.. /input/utm-of-india/UTM ZONES of INDIA.csv') ind_grp=last.merge(pos , left_on='State', right_on='State / Union Territory' )<load_from_csv>
kfold = StratifiedKFold(n_splits = 10 )
Titanic - Machine Learning from Disaster
12,084,336
ind_map=pd.read_csv('.. /input/covid19-in-india/covid_19_india.csv') pos=pd.read_csv('.. /input/utm-of-india/UTM ZONES of INDIA.csv') ind_map1=ind_map.merge(pos , left_on='State/UnionTerritory', right_on='State / Union Territory') <set_options>
from collections import Counter from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier, VotingClassifier from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.neural_network import MLPClassifier from sklearn.svm import SVC from sklearn.model_selection import GridSearchCV, cross_val_score, StratifiedKFold, learning_curve
Titanic - Machine Learning from Disaster
12,084,336
warnings.filterwarnings('ignore') %matplotlib inline<load_from_csv>
random_state = 2 classifiers = [] classifiers.append(SVC(random_state=random_state, max_iter=1000)) classifiers.append(DecisionTreeClassifier(random_state=random_state)) classifiers.append(AdaBoostClassifier(DecisionTreeClassifier(random_state=random_state),random_state=random_state,learning_rate=0.1)) classifiers.append(RandomForestClassifier(random_state=random_state)) classifiers.append(ExtraTreesClassifier(random_state=random_state)) classifiers.append(GradientBoostingClassifier(random_state=random_state)) classifiers.append(MLPClassifier(random_state=random_state, max_iter=1000)) classifiers.append(KNeighborsClassifier()) classifiers.append(LogisticRegression(random_state = random_state, max_iter=1000)) classifiers.append(LinearDiscriminantAnalysis() )
Titanic - Machine Learning from Disaster
12,084,336
age_details = pd.read_csv('.. /input/covid19-in-india/AgeGroupDetails.csv') india_covid_19 = pd.read_csv('.. /input/covid19-in-india/covid_19_india.csv') hospital_beds = pd.read_csv('.. /input/covid19-in-india/HospitalBedsIndia.csv') individual_details = pd.read_csv('.. /input/covid19-in-india/IndividualDetails.csv') ICMR_labs = pd.read_csv('.. /input/covid19-in-india/ICMRTestingLabs.csv') state_testing = pd.read_csv('.. /input/covid19-in-india/StatewiseTestingDetails.csv') <load_from_csv>
cv_results = [] for clf in classifiers: cv_results.append(cross_val_score(clf, X_train, y = Y_train, cv = kfold, scoring = 'accuracy'))
Titanic - Machine Learning from Disaster
12,084,336
confirmed_df = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv') deaths_df = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv') recovered_df = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv') latest_data = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/04-04-2020.csv' )<data_type_conversions>
Scores = [] Params = [] Algorithm = [] GB = GradientBoostingClassifier() GB_param = {'loss' : ["deviance"], 'n_estimators' : [100,200,300], 'learning_rate': [0.1, 0.05, 0.01], 'max_depth': [4, 8], 'min_samples_leaf': [100,150], 'max_features': [0.3, 0.1] } grid_GB = GridSearchCV(GB, param_grid = GB_param, scoring = 'accuracy', cv = kfold, n_jobs = -1, verbose = 1) grid_GB.fit(X_train, Y_train) Scores.append(grid_GB.best_score_) Params.append(grid_GB.best_params_) Algorithm.append(GB.__class__.__name__ )
Titanic - Machine Learning from Disaster
12,084,336
india_covid_19['Date'] = pd.to_datetime(india_covid_19['Date']) state_testing['Date'] = pd.to_datetime(state_testing['Date'] )<data_type_conversions>
LDA = LinearDiscriminantAnalysis() LDA_param = {"solver" : ["svd"], "tol" : [0.0001,0.0002,0.0003]} grid_LDA = GridSearchCV(LDA, param_grid = LDA_param, cv = kfold, n_jobs = -1, verbose = 1, scoring = 'accuracy') grid_LDA.fit(X_train, Y_train) Scores.append(grid_LDA.best_score_) Params.append(grid_LDA.best_params_) Algorithm.append(LDA.__class__.__name__ )
Titanic - Machine Learning from Disaster
12,084,336
dates = list(confirmed_df.columns[4:]) dates = list(pd.to_datetime(dates)) dates_india = dates[8:]<feature_engineering>
LR = LogisticRegression(max_iter=1000) LR_params = { 'penalty':['l1', 'l2'], 'C': np.logspace(0,4,10) } grid_LR = GridSearchCV(LR, param_grid = LR_params, cv = kfold, n_jobs = 1, verbose = 1, scoring = 'accuracy') grid_LR.fit(X_train, Y_train) Scores.append(grid_LR.best_score_) Params.append(grid_LR.best_params_) Algorithm.append(LR.__class__.__name__ )
Titanic - Machine Learning from Disaster
12,084,336
df = pd.read_csv('.. /input/covid19-in-india/covid_19_india.csv') data = df.copy() data['Date'] = data['Date'].apply(pd.to_datetime) data.drop(['Sno', 'Time'],axis=1,inplace=True) data_apr = data[data['Date'] > pd.Timestamp(date(2020,4,12)) ] state_cases = data_apr.groupby('State/UnionTerritory')['Confirmed','Deaths','Cured'].max().reset_index() state_cases['Active'] = state_cases['Confirmed'] -(state_cases['Deaths']- state_cases['Cured']) state_cases["Death Rate(per 100)"] = np.round(100*state_cases["Deaths"]/state_cases["Confirmed"],2) state_cases["Cure Rate(per 100)"] = np.round(100*state_cases["Cured"]/state_cases["Confirmed"],2 )<load_from_csv>
RF = RandomForestClassifier() RF_params = {"max_depth": [None], "max_features": [1, 3, 10], "min_samples_split": [2, 3, 10], "min_samples_leaf": [1, 3, 10], "bootstrap": [False], "n_estimators" :[100,300], "criterion": ["gini"] } grid_RF = GridSearchCV(RF, param_grid = RF_params, scoring = 'accuracy', cv = kfold, n_jobs = -1, verbose = 1) grid_RF.fit(X_train, Y_train) Scores.append(grid_RF.best_score_) Params.append(grid_RF.best_params_) Algorithm.append(RF.__class__.__name__ )
Titanic - Machine Learning from Disaster
12,084,336
state_testing = pd.read_csv('.. /input/covid19-in-india/StatewiseTestingDetails.csv') state_testing<load_from_csv>
tuned = pd.DataFrame({ 'Algorithm':Algorithm, 'Score':Scores, 'Best Parameters':Params }) tuned
Titanic - Machine Learning from Disaster
12,084,336
labs = pd.read_csv(".. /input/covid19-in-india/ICMRTestingLabs.csv") fig = px.treemap(labs, path=['state','city'], color='city', hover_data=['lab','address'], color_continuous_scale='reds') fig.show()<load_from_csv>
tuned[tuned['Algorithm'] == 'LinearDiscriminantAnalysis']['Best Parameters'].values
Titanic - Machine Learning from Disaster
12,084,336
zone=pd.read_csv('/kaggle/input/covid-19-india-zone-classification/lockdownindiawarningzones.csv') zone.style.set_properties(**{'background-color': 'black', 'color': 'lawngreen', 'border-color': 'white'} )<load_from_csv>
RF = RandomForestClassifier(bootstrap= False, criterion='gini', max_depth=None, max_features= 3, min_samples_leaf= 10, min_samples_split= 10, n_estimators= 100) LDA = LinearDiscriminantAnalysis(solver='svd', tol=0.0001) GB = GradientBoostingClassifier(learning_rate = 0.1, loss = 'deviance', max_depth= 4, max_features= 0.1, min_samples_leaf= 100, n_estimators= 300 )
Titanic - Machine Learning from Disaster
12,084,336
country_codes = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/2014_world_gdp_with_codes.csv') country_codes = country_codes.drop('GDP(BILLIONS)', 1) country_codes.rename(columns={'COUNTRY': 'Country', 'CODE': 'Code'}, inplace=True )<load_from_csv>
from sklearn.ensemble import StackingClassifier
Titanic - Machine Learning from Disaster
12,084,336
virus_data = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/covid_19_data.csv') prev_index = 0 first_time = False tmp = 0 for i, row in virus_data.iterrows() : if(virus_data.loc[i,'SNo'] < 1342 and virus_data.loc[i,'Province/State']=='Hubei'): if(first_time): tmp = virus_data.loc[i,'Confirmed'] prev_index = i virus_data.loc[i,'Confirmed'] = virus_data.loc[i,'Confirmed'] + 593 first_time = False else: increment = virus_data.loc[i,'Confirmed'] - tmp tmp = virus_data.loc[i,'Confirmed'] virus_data.loc[i,'Confirmed'] = virus_data.loc[prev_index,'Confirmed'] + increment + 593 prev_index = i virus_data.rename(columns={'Country/Region': 'Country', 'ObservationDate': 'Date'}, inplace=True) virus_data = virus_data.fillna('unknow') virus_data['Country'] = virus_data['Country'].str.replace('US','United States') virus_data['Country'] = virus_data['Country'].str.replace('UK','United Kingdom') virus_data['Country'] = virus_data['Country'].str.replace('Mainland China','China') virus_data['Country'] = virus_data['Country'].str.replace('South Korea','Korea, South') virus_data['Country'] = virus_data['Country'].str.replace('North Korea','Korea, North') virus_data['Country'] = virus_data['Country'].str.replace('Macau','China') virus_data['Country'] = virus_data['Country'].str.replace('Ivory Coast','Cote d'Ivoire') virus_data = pd.merge(virus_data,country_codes,on=['Country']) <load_from_csv>
def get_stacking() : base = list() base.append(( 'rf', RF)) base.append(( 'lda', LDA)) base.append(( 'GB', GB)) meta = LogisticRegression() model = StackingClassifier(estimators=base,final_estimator=meta, cv = 5) return model
Titanic - Machine Learning from Disaster
12,084,336
covid_India_cases = pd.read_csv('.. /input/covid19-in-india/covid_19_india.csv') covid_India_cases=covid_India_cases.dropna() covid_India_cases.rename(columns={'State/UnionTerritory': 'State', 'Cured': 'Recovered', 'Confirmed': 'Confirmed'}, inplace=True) covid_India_cases = covid_India_cases.fillna('unknow') top_country = covid_India_cases.loc[covid_India_cases['Date'] == covid_India_cases['Date'].iloc[-1]] top_country = top_country.groupby(['State'])['Confirmed'].sum().reset_index() top_country = top_country.sort_values('Confirmed', ascending=False) top_country = top_country[:30] top_country_codes = top_country['State'] top_country_codes = list(top_country_codes) countries = covid_India_cases[covid_India_cases['State'].isin(top_country_codes)] countries_day = countries.groupby(['Date','State'])['Confirmed','Deaths','Recovered'].sum().reset_index() exponential_line_x = [] exponential_line_y = [] for i in range(16): exponential_line_x.append(i) exponential_line_y.append(i) Maharashtra = countries_day.loc[countries_day['State']=='Maharashtra'] Maharashtra=Maharashtra.sort_values('Confirmed',ascending=True) new_confirmed_cases_Maharashtra = [] new_confirmed_cases_Maharashtra.append(list(Maharashtra['Confirmed'])[0] - list(Maharashtra['Deaths'])[0] - list(Maharashtra['Recovered'])[0]) for i in range(1,len(Maharashtra)) : new_confirmed_cases_Maharashtra.append(list(Maharashtra['Confirmed'])[i] - list(Maharashtra['Deaths'])[i] - list(Maharashtra['Recovered'])[i]) Gujarat = countries_day.loc[countries_day['State']=='Gujarat'] Gujarat=Gujarat.sort_values('Confirmed',ascending=True) new_confirmed_cases_Gujarat = [] new_confirmed_cases_Gujarat.append(list(Gujarat['Confirmed'])[0] - list(Gujarat['Deaths'])[0] - list(Gujarat['Recovered'])[0]) for i in range(1,len(Gujarat)) : new_confirmed_cases_Gujarat.append(list(Gujarat['Confirmed'])[i] - list(Gujarat['Deaths'])[i] - list(Gujarat['Recovered'])[i]) Delhi = countries_day.loc[countries_day['State']=='Delhi'] Delhi=Delhi.sort_values('Confirmed',ascending=True) new_confirmed_cases_Delhi = [] new_confirmed_cases_Delhi.append(list(Delhi['Confirmed'])[0] - list(Delhi['Deaths'])[0] - list(Delhi['Recovered'])[0]) for i in range(1,len(Delhi)) : new_confirmed_cases_Delhi.append(list(Delhi['Confirmed'])[i] - list(Delhi['Deaths'])[i] - list(Delhi['Recovered'])[i]) Madhya_Pradesh = countries_day.loc[countries_day['State']=='Madhya Pradesh'] Madhya_Pradesh=Madhya_Pradesh.sort_values('Confirmed',ascending=True) new_confirmed_cases_Madhya_Pradesh = [] new_confirmed_cases_Madhya_Pradesh.append(list(Madhya_Pradesh['Confirmed'])[0] - list(Madhya_Pradesh['Deaths'])[0] - list(Madhya_Pradesh['Recovered'])[0]) for i in range(1,len(Madhya_Pradesh)) : new_confirmed_cases_Madhya_Pradesh.append(list(Madhya_Pradesh['Confirmed'])[i] - list(Madhya_Pradesh['Deaths'])[i] - list(Madhya_Pradesh['Recovered'])[i]) Rajasthan = countries_day.loc[countries_day['State']=='Rajasthan'] Rajasthan=Rajasthan.sort_values('Confirmed',ascending=True) new_confirmed_cases_Rajasthan = [] new_confirmed_cases_Rajasthan.append(list(Rajasthan['Confirmed'])[0] - list(Rajasthan['Deaths'])[0] - list(Rajasthan['Recovered'])[0]) for i in range(1,len(Rajasthan)) : new_confirmed_cases_Rajasthan.append(list(Rajasthan['Confirmed'])[i] - list(Rajasthan['Deaths'])[i] - list(Rajasthan['Recovered'])[i]) Uttar_Pradesh = countries_day.loc[countries_day['State']=='Uttar Pradesh'] Uttar_Pradesh=Uttar_Pradesh.sort_values('Confirmed',ascending=True) new_confirmed_cases_Uttar_Pradesh = [] new_confirmed_cases_Uttar_Pradesh.append(list(Uttar_Pradesh['Confirmed'])[0] - list(Uttar_Pradesh['Deaths'])[0] - list(Uttar_Pradesh['Recovered'])[0]) for i in range(1,len(Uttar_Pradesh)) : new_confirmed_cases_Uttar_Pradesh.append(list(Uttar_Pradesh['Confirmed'])[i] - list(Uttar_Pradesh['Deaths'])[i] - list(Uttar_Pradesh['Recovered'])[i]) Tamil_Nadu = countries_day.loc[countries_day['State']=='Tamil Nadu'] Tamil_Nadu=Tamil_Nadu.sort_values('Confirmed',ascending=True) new_confirmed_cases_Tamil_Nadu = [] new_confirmed_cases_Tamil_Nadu.append(list(Tamil_Nadu['Confirmed'])[0] - list(Tamil_Nadu['Deaths'])[0] - list(Tamil_Nadu['Recovered'])[0]) for i in range(1,len(Tamil_Nadu)) : new_confirmed_cases_Tamil_Nadu.append(list(Tamil_Nadu['Confirmed'])[i] - list(Tamil_Nadu['Deaths'])[i] - list(Tamil_Nadu['Recovered'])[i]) Andhra_Pradesh = countries_day.loc[countries_day['State']=='Andhra Pradesh'] Andhra_Pradesh=Andhra_Pradesh.sort_values('Confirmed',ascending=True) new_confirmed_cases_Andhra_Pradesh = [] new_confirmed_cases_Andhra_Pradesh.append(list(Andhra_Pradesh['Confirmed'])[0] - list(Andhra_Pradesh['Deaths'])[0] - list(Andhra_Pradesh['Recovered'])[0]) for i in range(1,len(Andhra_Pradesh)) : new_confirmed_cases_Andhra_Pradesh.append(list(Andhra_Pradesh['Confirmed'])[i] - list(Andhra_Pradesh['Deaths'])[i] - list(Andhra_Pradesh['Recovered'])[i]) West_Bengal = countries_day.loc[countries_day['State']=='West Bengal'] West_Bengal=West_Bengal.sort_values('Confirmed',ascending=True) new_confirmed_cases_West_Bengal = [] new_confirmed_cases_West_Bengal.append(list(West_Bengal['Confirmed'])[0] - list(West_Bengal['Deaths'])[0] - list(West_Bengal['Recovered'])[0]) for i in range(1,len(West_Bengal)) : new_confirmed_cases_West_Bengal.append(list(West_Bengal['Confirmed'])[i] - list(West_Bengal['Deaths'])[i] - list(West_Bengal['Recovered'])[i]) p1 = figure(plot_width=800, plot_height=550, title="Trajectory of Covid-19") p1.grid.grid_line_alpha=0.3 p1.ygrid.band_fill_color = "olive" p1.ygrid.band_fill_alpha = 0.1 p1.xaxis.axis_label = 'Total number of detected cases(Log scale)' p1.yaxis.axis_label = 'New confirmed cases(Log scale)' p1.line(exponential_line_x, exponential_line_y, line_dash="4 4", line_width=0.5) p1.line(np.log(list(Maharashtra['Confirmed'])) , np.log(new_confirmed_cases_Maharashtra), color=' legend_label='Maharashtra', line_width=1) p1.circle(np.log(list(Maharashtra['Confirmed'])[-1]), np.log(new_confirmed_cases_Maharashtra[-1]), fill_color="white", size=5) p1.line(np.log(list(Gujarat['Confirmed'])) , np.log(new_confirmed_cases_Gujarat), color=' legend_label='Gujarat', line_width=1) p1.circle(np.log(list(Gujarat['Confirmed'])[-1]), np.log(new_confirmed_cases_Gujarat[-1]), fill_color="white", size=5) p1.line(np.log(list(Delhi['Confirmed'])) , np.log(new_confirmed_cases_Delhi), color=' legend_label='Delhi', line_width=1) p1.circle(np.log(list(Delhi['Confirmed'])[-1]), np.log(new_confirmed_cases_Delhi[-1]), fill_color="white", size=5) p1.line(np.log(list(Madhya_Pradesh['Confirmed'])) , np.log(new_confirmed_cases_Madhya_Pradesh), color=' legend_label='Madhya Pradesh', line_width=1) p1.circle(np.log(list(Madhya_Pradesh['Confirmed'])[-1]), np.log(new_confirmed_cases_Madhya_Pradesh[-1]), fill_color="white", size=5) p1.line(np.log(list(Rajasthan['Confirmed'])) , np.log(new_confirmed_cases_Rajasthan), color=' legend_label='Rajasthan', line_width=1) p1.circle(np.log(list(Rajasthan['Confirmed'])[-1]), np.log(new_confirmed_cases_Rajasthan[-1]), fill_color="white", size=5) p1.line(np.log(list(Uttar_Pradesh['Confirmed'])) , np.log(new_confirmed_cases_Uttar_Pradesh), color=' legend_label='Uttar Pradesh', line_width=1) p1.circle(np.log(list(Uttar_Pradesh['Confirmed'])[-1]), np.log(new_confirmed_cases_Uttar_Pradesh[-1]), fill_color="white", size=5) p1.line(np.log(list(Tamil_Nadu['Confirmed'])) , np.log(new_confirmed_cases_Tamil_Nadu), color=' legend_label='Tamil Nadu', line_width=1) p1.circle(np.log(list(Tamil_Nadu['Confirmed'])[-1]), np.log(new_confirmed_cases_Tamil_Nadu[-1]), fill_color="white", size=5) p1.line(np.log(list(Andhra_Pradesh['Confirmed'])) , np.log(new_confirmed_cases_Andhra_Pradesh), color=' legend_label='Andhra Pradesh', line_width=1) p1.circle(np.log(list(Andhra_Pradesh['Confirmed'])[-1]), np.log(new_confirmed_cases_Andhra_Pradesh[-1]), fill_color="white", size=5) p1.line(np.log(list(West_Bengal['Confirmed'])) , np.log(new_confirmed_cases_West_Bengal), color=' legend_label='West Bengal', line_width=1) p1.circle(np.log(list(West_Bengal['Confirmed'])[-1]), np.log(new_confirmed_cases_West_Bengal[-1]), fill_color="white", size=5) p1.legend.location = "bottom_right" output_file("coronavirus.html", title="coronavirus.py") show(p1 )<load_from_csv>
from sklearn.model_selection import RepeatedStratifiedKFold
Titanic - Machine Learning from Disaster
12,084,336
plt.style.use('fivethirtyeight') train=pd.read_csv('/kaggle/input/coronavirus-2019ncov/covid-19-all.csv' )<groupby>
def get_models() : models = dict() models['rf'] = RF models['lda'] = LDA models['GB'] = GB models['stacking'] = get_stacking() return models
Titanic - Machine Learning from Disaster
12,084,336
in_df = train[train['Country/Region']=='India'].groupby('Date')['Confirmed','Deaths','Recovered'].sum().reset_index(False) in_df['Active']=in_df['Confirmed']-in_df['Deaths']-in_df['Recovered'] in_df = in_df[in_df.Active>=100]<prepare_x_and_y>
def eval_models(model, X, y): kfold = RepeatedStratifiedKFold(n_splits=5, random_state= 1) scores = cross_val_score(model, X, y, cv =kfold, scoring = 'accuracy') return scores
Titanic - Machine Learning from Disaster
12,084,336
in_df['day_count'] = list(range(1,len(in_df)+1)) in_df['increase'] =(in_df.Active-in_df.Active.shift(1)) in_df['rate'] =(in_df.Active-in_df.Active.shift(1)) /in_df.Active def sigmoid(x,c,a,b): y = c*1 /(1 + np.exp(-a*(x-b))) return y xdata = np.array(list(in_df.day_count)[::2]) ydata = np.array(list(in_df.Active)[::2]) population=1.332*10**9 <set_options>
models = get_models() results = [] for name, model in models.items() : scores = eval_models(model, X_train, Y_train) results.append(( name, np.mean(scores)))
Titanic - Machine Learning from Disaster
12,084,336
warnings.filterwarnings("ignore") output_notebook(resources=INLINE )<load_from_csv>
fit = get_stacking().fit(X_train, Y_train )
Titanic - Machine Learning from Disaster
12,084,336
country_codes = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/2014_world_gdp_with_codes.csv') country_codes = country_codes.drop('GDP(BILLIONS)', 1) country_codes.rename(columns={'COUNTRY': 'Country', 'CODE': 'Code'}, inplace=True )<load_from_csv>
test_sur = pd.Series(fit.predict(X_test), name ='Survived') test_sur
Titanic - Machine Learning from Disaster
12,084,336
virus_data = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/covid_19_data.csv') prev_index = 0 first_time = False tmp = 0 for i, row in virus_data.iterrows() : if(virus_data.loc[i,'SNo'] < 1342 and virus_data.loc[i,'Province/State']=='Hubei'): if(first_time): tmp = virus_data.loc[i,'Confirmed'] prev_index = i virus_data.loc[i,'Confirmed'] = virus_data.loc[i,'Confirmed'] + 593 first_time = False else: increment = virus_data.loc[i,'Confirmed'] - tmp tmp = virus_data.loc[i,'Confirmed'] virus_data.loc[i,'Confirmed'] = virus_data.loc[prev_index,'Confirmed'] + increment + 593 prev_index = i virus_data.rename(columns={'Country/Region': 'Country', 'ObservationDate': 'Date'}, inplace=True) virus_data = virus_data.fillna('unknow') virus_data['Country'] = virus_data['Country'].str.replace('US','United States') virus_data['Country'] = virus_data['Country'].str.replace('UK','United Kingdom') virus_data['Country'] = virus_data['Country'].str.replace('Mainland China','China') virus_data['Country'] = virus_data['Country'].str.replace('South Korea','Korea, South') virus_data['Country'] = virus_data['Country'].str.replace('North Korea','Korea, North') virus_data['Country'] = virus_data['Country'].str.replace('Macau','China') virus_data['Country'] = virus_data['Country'].str.replace('Ivory Coast','Cote d'Ivoire') virus_data = pd.merge(virus_data,country_codes,on=['Country']) <sort_values>
test1 = test[['PassengerId','Survived']]
Titanic - Machine Learning from Disaster
12,084,336
top_country = virus_data.loc[virus_data['Date'] == virus_data['Date'].iloc[-1]] top_country = top_country.groupby(['Code','Country'])['Confirmed'].sum().reset_index() top_country = top_country.sort_values('Confirmed', ascending=False) top_country = top_country[:30] top_country_codes = top_country['Country'] top_country_codes = list(top_country_codes) countries = virus_data[virus_data['Country'].isin(top_country_codes)] countries_day = countries.groupby(['Date','Code','Country'])['Confirmed','Deaths','Recovered'].sum().reset_index() exponential_line_x = [] exponential_line_y = [] for i in range(16): exponential_line_x.append(i) exponential_line_y.append(i) china = countries_day.loc[countries_day['Code']=='CHN'] new_confirmed_cases_china = [] new_confirmed_cases_china.append(list(china['Confirmed'])[0] - list(china['Deaths'])[0] - list(china['Recovered'])[0]) for i in range(1,len(china)) : new_confirmed_cases_china.append(list(china['Confirmed'])[i] - list(china['Deaths'])[i] - list(china['Recovered'])[i]) italy = countries_day.loc[countries_day['Code']=='ITA'] new_confirmed_cases_ita = [] new_confirmed_cases_ita.append(list(italy['Confirmed'])[0] - list(italy['Deaths'])[0] - list(italy['Recovered'])[0]) for i in range(1,len(italy)) : new_confirmed_cases_ita.append(list(italy['Confirmed'])[i] - list(italy['Deaths'])[i] - list(italy['Recovered'])[i]) india = countries_day.loc[countries_day['Code']=='IND'] new_confirmed_cases_india = [] new_confirmed_cases_india.append(list(india['Confirmed'])[0] - list(india['Deaths'])[0] - list(india['Recovered'])[0]) for i in range(1,len(india)) : new_confirmed_cases_india.append(list(india['Confirmed'])[i] - list(india['Deaths'])[i] - list(india['Recovered'])[i]) spain = countries_day.loc[countries_day['Code']=='ESP'] new_confirmed_cases_spain = [] new_confirmed_cases_spain.append(list(spain['Confirmed'])[0] - list(spain['Deaths'])[0] - list(spain['Recovered'])[0]) for i in range(1,len(spain)) : new_confirmed_cases_spain.append(list(spain['Confirmed'])[i] - list(spain['Deaths'])[i] - list(spain['Recovered'])[i]) us = countries_day.loc[countries_day['Code']=='USA'] new_confirmed_cases_us = [] new_confirmed_cases_us.append(list(us['Confirmed'])[0] - list(us['Deaths'])[0] - list(us['Recovered'])[0]) for i in range(1,len(us)) : new_confirmed_cases_us.append(list(us['Confirmed'])[i] - list(us['Deaths'])[i] - list(us['Recovered'])[i]) german = countries_day.loc[countries_day['Code']=='DEU'] new_confirmed_cases_german = [] new_confirmed_cases_german.append(list(german['Confirmed'])[0] - list(german['Deaths'])[0] - list(german['Recovered'])[0]) for i in range(1,len(german)) : new_confirmed_cases_german.append(list(german['Confirmed'])[i] - list(german['Deaths'])[i] - list(german['Recovered'])[i]) p1 = figure(plot_width=800, plot_height=550, title="Trajectory of Covid-19") p1.grid.grid_line_alpha=0.3 p1.ygrid.band_fill_color = "olive" p1.ygrid.band_fill_alpha = 0.1 p1.xaxis.axis_label = 'Total number of detected cases(Log scale)' p1.yaxis.axis_label = 'New confirmed cases(Log scale)' p1.line(exponential_line_x, exponential_line_y, line_dash="4 4", line_width=0.5) p1.line(np.log(list(china['Confirmed'])) , np.log(new_confirmed_cases_china), color=' legend_label='China', line_width=1) p1.circle(np.log(list(china['Confirmed'])[-1]), np.log(new_confirmed_cases_china[-1]), fill_color="white", size=5) p1.line(np.log(list(italy['Confirmed'])) , np.log(new_confirmed_cases_ita), color=' legend_label='Italy', line_width=1) p1.circle(np.log(list(italy['Confirmed'])[-1]), np.log(new_confirmed_cases_ita[-1]), fill_color="white", size=5) p1.line(np.log(list(india['Confirmed'])) , np.log(new_confirmed_cases_india), color=' legend_label='India', line_width=1) p1.circle(np.log(list(india['Confirmed'])[-1]), np.log(new_confirmed_cases_india[-1]), fill_color="white", size=5) p1.line(np.log(list(spain['Confirmed'])) , np.log(new_confirmed_cases_spain), color=' legend_label='Spain', line_width=1) p1.circle(np.log(list(spain['Confirmed'])[-1]), np.log(new_confirmed_cases_spain[-1]), fill_color="white", size=5) p1.line(np.log(list(us['Confirmed'])) , np.log(new_confirmed_cases_us), color=' legend_label='United States', line_width=1) p1.circle(np.log(list(us['Confirmed'])[-1]), np.log(new_confirmed_cases_us[-1]), fill_color="white", size=5) p1.line(np.log(list(german['Confirmed'])) , np.log(new_confirmed_cases_german), color=' legend_label='Germany', line_width=1) p1.circle(np.log(list(german['Confirmed'])[-1]), np.log(new_confirmed_cases_german[-1]), fill_color="white", size=5) p1.legend.location = "bottom_right" output_file("coronavirus.html", title="coronavirus.py") show(p1 )<define_variables>
test1['Survived'] = test_sur.values.astype(int) test1
Titanic - Machine Learning from Disaster
12,084,336
<set_options><EOS>
test1.to_csv('Stacked.csv', index = False )
Titanic - Machine Learning from Disaster
13,735,522
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<load_from_csv>
style.use('https://raw.githubusercontent.com/JoseGuzman/minibrain/master/minibrain/paper.mplstyle') train = pd.read_csv(".. /input/titanic/train.csv", index_col='PassengerId') test = pd.read_csv('.. /input/titanic/test.csv', index_col='PassengerId' )
Titanic - Machine Learning from Disaster
13,735,522
corona_data=pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/covid_19_data.csv') choro_map=px.choropleth(corona_data, locations="Country/Region", locationmode = "country names", color="Confirmed", hover_name="Country/Region", animation_frame="ObservationDate" ) choro_map.update_layout( title_text = 'Global Spread of Coronavirus', title_x = 0.5, geo=dict( showframe = False, showcoastlines = False, )) choro_map.show()<load_from_csv>
mydf = train.groupby('Survived' ).mean() mydf
Titanic - Machine Learning from Disaster
13,735,522
zone=pd.read_csv('/kaggle/input/covid-19-india-zone-classification/lockdownindiawarningzones.csv' )<load_from_csv>
train.groupby('Sex')['Survived'].mean()
Titanic - Machine Learning from Disaster
13,735,522
covid_India_cases = pd.read_csv('.. /input/covid19-in-india/covid_19_india.csv') covid_India_cases.rename(columns={'State/UnionTerritory': 'State', 'Cured': 'Recovered', 'Confirmed': 'Confirmed'}, inplace=True) statewise_cases = pd.DataFrame(covid_India_cases.groupby(['State'])['Confirmed', 'Deaths', 'Recovered'].max().reset_index()) statewise_cases["Country"] = "India" fig = px.treemap(statewise_cases, path=['Country','State'], values='Confirmed', color='Confirmed', hover_data=['State'], color_continuous_scale='Rainbow') fig.show()<load_from_csv>
from sklearn.pipeline import Pipeline
Titanic - Machine Learning from Disaster
13,735,522
covid_India_cases = pd.read_csv('.. /input/covid19-in-india/covid_19_india.csv') covid_India_cases.rename(columns={'State/UnionTerritory': 'State', 'Cured': 'Recovered', 'Confirmed': 'Confirmed'}, inplace=True) statewise_cases = pd.DataFrame(covid_India_cases.groupby(['State'])['Confirmed', 'Deaths', 'Recovered'].max().reset_index()) last=statewise_cases pos=pd.read_csv('.. /input/utm-of-india/UTM ZONES of INDIA.csv') ind_grp=last.merge(pos , left_on='State', right_on='State / Union Territory' )<load_from_csv>
class FeaturesTransformer() : def __init__(self, create_family=True, dissect_cabin = True): self.create_family = create_family self.dissect_cabin = dissect_cabin def fit(self, X, y=None, **fit_params): return self def transform(self, X, **transform_params): mydf = X.copy() mydf['Title'] = X.Name.str.extract('([A-Za-z]+)\.', expand=False) mydf.drop(['Name'], axis = 1, inplace = True) mydf['Title'].replace(['Ms','Lady', 'Countess','Dona'],'Mrs', inplace=True) mydf['Title'].replace(['Mme','Mlle'], 'Miss', inplace = True) mydf['Title'].replace(['Major', 'Sir', 'Jonkheer', 'Dr','Col','Don', 'Capt','Rev'],'Mr', inplace=True) if self.create_family: mydf['FamilySize'] = X['SibSp'] + X['Parch'] + 1 mydf.drop(['SibSp', 'Parch'], axis=1, inplace=True) if self.dissect_cabin: mydf['Cabin'].fillna('N', inplace=True) mydf['Room'] = X['Cabin'].str.extract("([0-9]+)", expand=False ).astype("float") mydf['nRooms'] = mydf.Cabin.apply(lambda x: len(str(x ).split())) mydf.loc[mydf.Cabin == 'N', 'nRooms'] = 0 mydf['CabinDeck'] = mydf.Cabin.apply(lambda x: str(x[0])) mydf['CabinDeck'].replace(['T'], 'A', inplace=True) del mydf['Cabin'] return mydf
Titanic - Machine Learning from Disaster
13,735,522
ind_map=pd.read_csv('.. /input/covid19-in-india/covid_19_india.csv') pos=pd.read_csv('.. /input/utm-of-india/UTM ZONES of INDIA.csv') ind_map1=ind_map.merge(pos , left_on='State/UnionTerritory', right_on='State / Union Territory') <set_options>
preprocess = Pipeline([ ('Preprocessing', FeaturesTransformer(create_family=True)) ]) mytrain = preprocess.fit_transform(train )
Titanic - Machine Learning from Disaster
13,735,522
warnings.filterwarnings('ignore') %matplotlib inline<load_from_csv>
mytrain.groupby('Survived')['Title'].value_counts()
Titanic - Machine Learning from Disaster
13,735,522
age_details = pd.read_csv('.. /input/covid19-in-india/AgeGroupDetails.csv') india_covid_19 = pd.read_csv('.. /input/covid19-in-india/covid_19_india.csv') hospital_beds = pd.read_csv('.. /input/covid19-in-india/HospitalBedsIndia.csv') individual_details = pd.read_csv('.. /input/covid19-in-india/IndividualDetails.csv') ICMR_labs = pd.read_csv('.. /input/covid19-in-india/ICMRTestingLabs.csv') state_testing = pd.read_csv('.. /input/covid19-in-india/StatewiseTestingDetails.csv') <load_from_csv>
class FillerTransformer() : def __init__(self, mean=True): self.mean = mean def fit(self, X, y=None, **fit_params): return self def transform(self, X, **transform_params): mydf = X.copy() mydf['Embarked'] = mydf['Embarked'].fillna('C') mydf['Fare'].fillna(mydf['Fare'].median() , inplace = True) if self.mean: mydf['Age'] = mydf.groupby(['Pclass','Title'])['Age'].apply(lambda x: x.fillna(x.mean())) else: mydf['Age'] = mydf.groupby(['Pclass','Title'])['Age'].apply(lambda x: x.fillna(x.median())) return mydf
Titanic - Machine Learning from Disaster
13,735,522
confirmed_df = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv') deaths_df = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv') recovered_df = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv') latest_data = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/04-04-2020.csv' )<data_type_conversions>
preprocess = Pipeline([ ('Preprocessing', FeaturesTransformer(create_family=True)) , ('FillingValues', FillerTransformer(mean=True)) ]) mytrain = preprocess.fit_transform(train) mytrain.Age.isnull().sum()
Titanic - Machine Learning from Disaster
13,735,522
india_covid_19['Date'] = pd.to_datetime(india_covid_19['Date']) state_testing['Date'] = pd.to_datetime(state_testing['Date'] )<data_type_conversions>
class ColumnsDelete() : def __init__(self, columns=None): self.columns = columns def fit(self, X, y=None, **fit_params): return self def transform(self, X, **transform_params): mydf = X.copy() if self.columns: mydf.drop(self.columns, axis=1, inplace=True) return mydf
Titanic - Machine Learning from Disaster
13,735,522
dates = list(confirmed_df.columns[4:]) dates = list(pd.to_datetime(dates)) dates_india = dates[8:]<feature_engineering>
preprocess = Pipeline([ ('Preprocessing', FeaturesTransformer(create_family=True)) , ('FillingValues', FillerTransformer(mean=True)) , ('DeleteColumns', ColumnsDelete(['Room', 'Ticket'])) ]) mytrain = preprocess.fit_transform(train) mytest = preprocess.fit_transform(test) mytrain.info() , mytest.info()
Titanic - Machine Learning from Disaster
13,735,522
df = pd.read_csv('.. /input/covid19-in-india/covid_19_india.csv') data = df.copy() data['Date'] = data['Date'].apply(pd.to_datetime) data.drop(['Sno', 'Time'],axis=1,inplace=True) data_apr = data[data['Date'] > pd.Timestamp(date(2020,4,12)) ] state_cases = data_apr.groupby('State/UnionTerritory')['Confirmed','Deaths','Cured'].max().reset_index() state_cases['Active'] = state_cases['Confirmed'] -(state_cases['Deaths']- state_cases['Cured']) state_cases["Death Rate(per 100)"] = np.round(100*state_cases["Deaths"]/state_cases["Confirmed"],2) state_cases["Cure Rate(per 100)"] = np.round(100*state_cases["Cured"]/state_cases["Confirmed"],2 )<load_from_csv>
class CategoryEncoder() : def __init__(self, columns=None): self.columns = columns def fit(self, X, y=None, **fit_params): return self def transform(self, X, **transform_params): mydf = X.copy() mydf['Sex'] = mydf['Sex'].map({'male': 1, 'female': 0}) if self.columns: mydf = pd.get_dummies(mydf, columns=self.columns, prefix=self.columns) return mydf
Titanic - Machine Learning from Disaster
13,735,522
state_testing = pd.read_csv('.. /input/covid19-in-india/StatewiseTestingDetails.csv') state_testing<load_from_csv>
preprocess = Pipeline([ ('Preprocessing', FeaturesTransformer(create_family=True)) , ('FillingValues', FillerTransformer(mean=True)) , ('DeleteColumns', ColumnsDelete(['Room', 'Ticket'])) , ('CategoEncoder', CategoryEncoder(['Embarked', 'CabinDeck', 'Title'])) , ]) mytrain = preprocess.fit_transform(train) mytrain
Titanic - Machine Learning from Disaster
13,735,522
labs = pd.read_csv(".. /input/covid19-in-india/ICMRTestingLabs.csv") fig = px.treemap(labs, path=['state','city'], color='city', hover_data=['lab','address'], color_continuous_scale='reds') fig.show()<load_from_csv>
class myZScaler(BaseEstimator, TransformerMixin): def __init__(self, columns = all, **init_params): self.columns = columns self.scaler = StandardScaler(**init_params) def fit(self, X, y=None): self.scaler.fit(X[self.columns], y) return self def transform(self, X: pd): if self.columns is all: self.columns = X.columns init_col_order = X.columns X_scaled = pd.DataFrame(self.scaler.transform(X[self.columns]), columns=self.columns, index = X.index) X_not_scaled = X.loc[:,~X.columns.isin(self.columns)] return pd.concat([X_not_scaled, X_scaled], axis=1)[init_col_order] myscaler = myZScaler(['Age', 'Fare'], copy=True) myfoo = myscaler.fit_transform(mytrain) myfoo.head()
Titanic - Machine Learning from Disaster
13,735,522
zone=pd.read_csv('/kaggle/input/covid-19-india-zone-classification/lockdownindiawarningzones.csv') zone.style.set_properties(**{'background-color': 'black', 'color': 'lawngreen', 'border-color': 'white'} )<load_from_csv>
preprocess = Pipeline([ ('Preprocessing', FeaturesTransformer(create_family=True)) , ('FillingValues', FillerTransformer(mean=False)) , ('DeleteColumns', ColumnsDelete(['Room', 'Ticket'])) , ('CategoEncoder', CategoryEncoder(['Embarked', 'CabinDeck', 'Title'])) , ('z-score' , myZScaler(['Age', 'Fare', 'FamilySize', 'Pclass'], copy=True)) ]) train.loc[797,'Name'] = "Leader, Ms.Alice(Farnham)" mytrain = preprocess.fit_transform(train )
Titanic - Machine Learning from Disaster
13,735,522
country_codes = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/2014_world_gdp_with_codes.csv') country_codes = country_codes.drop('GDP(BILLIONS)', 1) country_codes.rename(columns={'COUNTRY': 'Country', 'CODE': 'Code'}, inplace=True )<load_from_csv>
mytrain = preprocess.fit_transform(train) mytest = preprocess.fit_transform(test )
Titanic - Machine Learning from Disaster
13,735,522
virus_data = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/covid_19_data.csv') prev_index = 0 first_time = False tmp = 0 for i, row in virus_data.iterrows() : if(virus_data.loc[i,'SNo'] < 1342 and virus_data.loc[i,'Province/State']=='Hubei'): if(first_time): tmp = virus_data.loc[i,'Confirmed'] prev_index = i virus_data.loc[i,'Confirmed'] = virus_data.loc[i,'Confirmed'] + 593 first_time = False else: increment = virus_data.loc[i,'Confirmed'] - tmp tmp = virus_data.loc[i,'Confirmed'] virus_data.loc[i,'Confirmed'] = virus_data.loc[prev_index,'Confirmed'] + increment + 593 prev_index = i virus_data.rename(columns={'Country/Region': 'Country', 'ObservationDate': 'Date'}, inplace=True) virus_data = virus_data.fillna('unknow') virus_data['Country'] = virus_data['Country'].str.replace('US','United States') virus_data['Country'] = virus_data['Country'].str.replace('UK','United Kingdom') virus_data['Country'] = virus_data['Country'].str.replace('Mainland China','China') virus_data['Country'] = virus_data['Country'].str.replace('South Korea','Korea, South') virus_data['Country'] = virus_data['Country'].str.replace('North Korea','Korea, North') virus_data['Country'] = virus_data['Country'].str.replace('Macau','China') virus_data['Country'] = virus_data['Country'].str.replace('Ivory Coast','Cote d'Ivoire') virus_data = pd.merge(virus_data,country_codes,on=['Country']) <load_from_csv>
target = mytrain['Survived'] mytrain.drop(['Survived'], axis=1, inplace=True )
Titanic - Machine Learning from Disaster
13,735,522
covid_India_cases = pd.read_csv('.. /input/covid19-in-india/covid_19_india.csv') covid_India_cases=covid_India_cases.dropna() covid_India_cases.rename(columns={'State/UnionTerritory': 'State', 'Cured': 'Recovered', 'Confirmed': 'Confirmed'}, inplace=True) covid_India_cases = covid_India_cases.fillna('unknow') top_country = covid_India_cases.loc[covid_India_cases['Date'] == covid_India_cases['Date'].iloc[-1]] top_country = top_country.groupby(['State'])['Confirmed'].sum().reset_index() top_country = top_country.sort_values('Confirmed', ascending=False) top_country = top_country[:30] top_country_codes = top_country['State'] top_country_codes = list(top_country_codes) countries = covid_India_cases[covid_India_cases['State'].isin(top_country_codes)] countries_day = countries.groupby(['Date','State'])['Confirmed','Deaths','Recovered'].sum().reset_index() exponential_line_x = [] exponential_line_y = [] for i in range(16): exponential_line_x.append(i) exponential_line_y.append(i) Maharashtra = countries_day.loc[countries_day['State']=='Maharashtra'] Maharashtra=Maharashtra.sort_values('Confirmed',ascending=True) new_confirmed_cases_Maharashtra = [] new_confirmed_cases_Maharashtra.append(list(Maharashtra['Confirmed'])[0] - list(Maharashtra['Deaths'])[0] - list(Maharashtra['Recovered'])[0]) for i in range(1,len(Maharashtra)) : new_confirmed_cases_Maharashtra.append(list(Maharashtra['Confirmed'])[i] - list(Maharashtra['Deaths'])[i] - list(Maharashtra['Recovered'])[i]) Gujarat = countries_day.loc[countries_day['State']=='Gujarat'] Gujarat=Gujarat.sort_values('Confirmed',ascending=True) new_confirmed_cases_Gujarat = [] new_confirmed_cases_Gujarat.append(list(Gujarat['Confirmed'])[0] - list(Gujarat['Deaths'])[0] - list(Gujarat['Recovered'])[0]) for i in range(1,len(Gujarat)) : new_confirmed_cases_Gujarat.append(list(Gujarat['Confirmed'])[i] - list(Gujarat['Deaths'])[i] - list(Gujarat['Recovered'])[i]) Delhi = countries_day.loc[countries_day['State']=='Delhi'] Delhi=Delhi.sort_values('Confirmed',ascending=True) new_confirmed_cases_Delhi = [] new_confirmed_cases_Delhi.append(list(Delhi['Confirmed'])[0] - list(Delhi['Deaths'])[0] - list(Delhi['Recovered'])[0]) for i in range(1,len(Delhi)) : new_confirmed_cases_Delhi.append(list(Delhi['Confirmed'])[i] - list(Delhi['Deaths'])[i] - list(Delhi['Recovered'])[i]) Madhya_Pradesh = countries_day.loc[countries_day['State']=='Madhya Pradesh'] Madhya_Pradesh=Madhya_Pradesh.sort_values('Confirmed',ascending=True) new_confirmed_cases_Madhya_Pradesh = [] new_confirmed_cases_Madhya_Pradesh.append(list(Madhya_Pradesh['Confirmed'])[0] - list(Madhya_Pradesh['Deaths'])[0] - list(Madhya_Pradesh['Recovered'])[0]) for i in range(1,len(Madhya_Pradesh)) : new_confirmed_cases_Madhya_Pradesh.append(list(Madhya_Pradesh['Confirmed'])[i] - list(Madhya_Pradesh['Deaths'])[i] - list(Madhya_Pradesh['Recovered'])[i]) Rajasthan = countries_day.loc[countries_day['State']=='Rajasthan'] Rajasthan=Rajasthan.sort_values('Confirmed',ascending=True) new_confirmed_cases_Rajasthan = [] new_confirmed_cases_Rajasthan.append(list(Rajasthan['Confirmed'])[0] - list(Rajasthan['Deaths'])[0] - list(Rajasthan['Recovered'])[0]) for i in range(1,len(Rajasthan)) : new_confirmed_cases_Rajasthan.append(list(Rajasthan['Confirmed'])[i] - list(Rajasthan['Deaths'])[i] - list(Rajasthan['Recovered'])[i]) Uttar_Pradesh = countries_day.loc[countries_day['State']=='Uttar Pradesh'] Uttar_Pradesh=Uttar_Pradesh.sort_values('Confirmed',ascending=True) new_confirmed_cases_Uttar_Pradesh = [] new_confirmed_cases_Uttar_Pradesh.append(list(Uttar_Pradesh['Confirmed'])[0] - list(Uttar_Pradesh['Deaths'])[0] - list(Uttar_Pradesh['Recovered'])[0]) for i in range(1,len(Uttar_Pradesh)) : new_confirmed_cases_Uttar_Pradesh.append(list(Uttar_Pradesh['Confirmed'])[i] - list(Uttar_Pradesh['Deaths'])[i] - list(Uttar_Pradesh['Recovered'])[i]) Tamil_Nadu = countries_day.loc[countries_day['State']=='Tamil Nadu'] Tamil_Nadu=Tamil_Nadu.sort_values('Confirmed',ascending=True) new_confirmed_cases_Tamil_Nadu = [] new_confirmed_cases_Tamil_Nadu.append(list(Tamil_Nadu['Confirmed'])[0] - list(Tamil_Nadu['Deaths'])[0] - list(Tamil_Nadu['Recovered'])[0]) for i in range(1,len(Tamil_Nadu)) : new_confirmed_cases_Tamil_Nadu.append(list(Tamil_Nadu['Confirmed'])[i] - list(Tamil_Nadu['Deaths'])[i] - list(Tamil_Nadu['Recovered'])[i]) Andhra_Pradesh = countries_day.loc[countries_day['State']=='Andhra Pradesh'] Andhra_Pradesh=Andhra_Pradesh.sort_values('Confirmed',ascending=True) new_confirmed_cases_Andhra_Pradesh = [] new_confirmed_cases_Andhra_Pradesh.append(list(Andhra_Pradesh['Confirmed'])[0] - list(Andhra_Pradesh['Deaths'])[0] - list(Andhra_Pradesh['Recovered'])[0]) for i in range(1,len(Andhra_Pradesh)) : new_confirmed_cases_Andhra_Pradesh.append(list(Andhra_Pradesh['Confirmed'])[i] - list(Andhra_Pradesh['Deaths'])[i] - list(Andhra_Pradesh['Recovered'])[i]) West_Bengal = countries_day.loc[countries_day['State']=='West Bengal'] West_Bengal=West_Bengal.sort_values('Confirmed',ascending=True) new_confirmed_cases_West_Bengal = [] new_confirmed_cases_West_Bengal.append(list(West_Bengal['Confirmed'])[0] - list(West_Bengal['Deaths'])[0] - list(West_Bengal['Recovered'])[0]) for i in range(1,len(West_Bengal)) : new_confirmed_cases_West_Bengal.append(list(West_Bengal['Confirmed'])[i] - list(West_Bengal['Deaths'])[i] - list(West_Bengal['Recovered'])[i]) p1 = figure(plot_width=800, plot_height=550, title="Trajectory of Covid-19") p1.grid.grid_line_alpha=0.3 p1.ygrid.band_fill_color = "olive" p1.ygrid.band_fill_alpha = 0.1 p1.xaxis.axis_label = 'Total number of detected cases(Log scale)' p1.yaxis.axis_label = 'New confirmed cases(Log scale)' p1.line(exponential_line_x, exponential_line_y, line_dash="4 4", line_width=0.5) p1.line(np.log(list(Maharashtra['Confirmed'])) , np.log(new_confirmed_cases_Maharashtra), color=' legend_label='Maharashtra', line_width=1) p1.circle(np.log(list(Maharashtra['Confirmed'])[-1]), np.log(new_confirmed_cases_Maharashtra[-1]), fill_color="white", size=5) p1.line(np.log(list(Gujarat['Confirmed'])) , np.log(new_confirmed_cases_Gujarat), color=' legend_label='Gujarat', line_width=1) p1.circle(np.log(list(Gujarat['Confirmed'])[-1]), np.log(new_confirmed_cases_Gujarat[-1]), fill_color="white", size=5) p1.line(np.log(list(Delhi['Confirmed'])) , np.log(new_confirmed_cases_Delhi), color=' legend_label='Delhi', line_width=1) p1.circle(np.log(list(Delhi['Confirmed'])[-1]), np.log(new_confirmed_cases_Delhi[-1]), fill_color="white", size=5) p1.line(np.log(list(Madhya_Pradesh['Confirmed'])) , np.log(new_confirmed_cases_Madhya_Pradesh), color=' legend_label='Madhya Pradesh', line_width=1) p1.circle(np.log(list(Madhya_Pradesh['Confirmed'])[-1]), np.log(new_confirmed_cases_Madhya_Pradesh[-1]), fill_color="white", size=5) p1.line(np.log(list(Rajasthan['Confirmed'])) , np.log(new_confirmed_cases_Rajasthan), color=' legend_label='Rajasthan', line_width=1) p1.circle(np.log(list(Rajasthan['Confirmed'])[-1]), np.log(new_confirmed_cases_Rajasthan[-1]), fill_color="white", size=5) p1.line(np.log(list(Uttar_Pradesh['Confirmed'])) , np.log(new_confirmed_cases_Uttar_Pradesh), color=' legend_label='Uttar Pradesh', line_width=1) p1.circle(np.log(list(Uttar_Pradesh['Confirmed'])[-1]), np.log(new_confirmed_cases_Uttar_Pradesh[-1]), fill_color="white", size=5) p1.line(np.log(list(Tamil_Nadu['Confirmed'])) , np.log(new_confirmed_cases_Tamil_Nadu), color=' legend_label='Tamil Nadu', line_width=1) p1.circle(np.log(list(Tamil_Nadu['Confirmed'])[-1]), np.log(new_confirmed_cases_Tamil_Nadu[-1]), fill_color="white", size=5) p1.line(np.log(list(Andhra_Pradesh['Confirmed'])) , np.log(new_confirmed_cases_Andhra_Pradesh), color=' legend_label='Andhra Pradesh', line_width=1) p1.circle(np.log(list(Andhra_Pradesh['Confirmed'])[-1]), np.log(new_confirmed_cases_Andhra_Pradesh[-1]), fill_color="white", size=5) p1.line(np.log(list(West_Bengal['Confirmed'])) , np.log(new_confirmed_cases_West_Bengal), color=' legend_label='West Bengal', line_width=1) p1.circle(np.log(list(West_Bengal['Confirmed'])[-1]), np.log(new_confirmed_cases_West_Bengal[-1]), fill_color="white", size=5) p1.legend.location = "bottom_right" output_file("coronavirus.html", title="coronavirus.py") show(p1 )<load_from_csv>
RFCmodel = RandomForestClassifier(random_state=42) RFCmodel.fit(mytrain, target) print(f'Accuracy: {RFCmodel.score(mytrain,target)*100:.2f}%' )
Titanic - Machine Learning from Disaster
13,735,522
plt.style.use('fivethirtyeight') train=pd.read_csv('/kaggle/input/coronavirus-2019ncov/covid-19-all.csv' )<groupby>
myprediction = RFCmodel.predict(mytrain) print(f'Accuracy : {accuracy_score(myprediction,target)*100:.2f}%') print(f'Precission: {precision_score(myprediction, target)*100:.2f}%') print(f'Recall : {recall_score(myprediction, target)*100:.2f}%') print(f'F1-score : {f1_score(myprediction, target)*100:.2f}%' )
Titanic - Machine Learning from Disaster
13,735,522
in_df = train[train['Country/Region']=='India'].groupby('Date')['Confirmed','Deaths','Recovered'].sum().reset_index(False) in_df['Active']=in_df['Confirmed']-in_df['Deaths']-in_df['Recovered'] in_df = in_df[in_df.Active>=100]<prepare_x_and_y>
def evaluate_model(model): accuracy = cross_val_score(model, mytrain, target, cv=5, scoring='accuracy' ).mean() return accuracy models = { 'Logistic regression':LogisticRegression(random_state = 42), 'Decision tree':DecisionTreeClassifier(random_state = 42), 'Random forest':RandomForestClassifier(random_state = 42) } for key, mymodel in models.items() : accuracy = evaluate_model(model = mymodel) print('{:20}: {:2.2f}%'.format(key, accuracy*100))
Titanic - Machine Learning from Disaster
13,735,522
in_df['day_count'] = list(range(1,len(in_df)+1)) in_df['increase'] =(in_df.Active-in_df.Active.shift(1)) in_df['rate'] =(in_df.Active-in_df.Active.shift(1)) /in_df.Active def sigmoid(x,c,a,b): y = c*1 /(1 + np.exp(-a*(x-b))) return y xdata = np.array(list(in_df.day_count)[::2]) ydata = np.array(list(in_df.Active)[::2]) population=1.332*10**9 <load_from_csv>
from sklearn.model_selection import GridSearchCV
Titanic - Machine Learning from Disaster