kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
3,782,114 | def predict(xtest,input_name=None):
baggedpred=np.array([ 0.0 for d in range(0, xtest.shape[0])])
model= joblib.load(input_name)
preds=model.predict(xtest)
baggedpred+=preds
return baggedpred
<define_variables> | train_df[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
3,782,114 | names=["lag_confirmed_rate" + str(k+1)for k in range(size)]
for day in days_back_confimed:
names+=["days_ago_confirmed_count_" + str(day)]
for window in windows:
names+=["ma" + str(window)+ "_rate_confirmed" + str(k+1)for k in range(size)]
names+=["std" + str(window)+ "_rate_confirmed" + str(k+1)for k in range(size)]
names+=["ewma" + str(window)+ "_rate_confirmed" + str(k+1)for k in range(size)]
names+=["lag_fatalities_rate" + str(k+1)for k in range(size)]
for day in days_back_fatalities:
names+=["days_ago_fatalitiescount_" + str(day)]
for window in windows:
names+=["ma" + str(window)+ "_rate_fatalities" + str(k+1)for k in range(size)]
names+=["std" + str(window)+ "_rate_fatalities" + str(k+1)for k in range(size)]
names+=["ewma" + str(window)+ "_rate_fatalities" + str(k+1)for k in range(size)]
names+=["confirmed_level"]
names+=["fatalities_level"]
if not extra_stable_columns is None and len(extra_stable_columns)>0:
names+=[k for k in extra_stable_columns]
if not group_names is None:
for gg in range(len(group_names)) :
names+=["lag_rate_group_"+ str(gg+1)+ "_" + str(k+1)for k in range(size_group)]
for day in days_back_confimed_group:
names+=["days_ago_grooupcount_" + str(gg+1)+ "_" + str(day)]
for window in windows_group:
names+=["ma_group_" + str(gg+1)+ "_" + str(window)+ "_rate_" + str(k+1)for k in range(size_group)]
names+=["std_group_" + str(gg+1)+ "_" + str(window)+ "_rate_" + str(k+1)for k in range(size_group)]
<categorify> | train_df[["SibSp", "Survived"]].groupby(['SibSp'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
3,782,114 | def decay_4_first_10_then_1_f(array):
arr=[1.0 for k in range(len(array)) ]
for j in range(len(array)) :
if j<10:
arr[j]=1.+(max(1,array[j])-1.) /4.
else :
arr[j]=1.
return arr
def decay_16_first_10_then_1_f(array):
arr=[1.0 for k in range(len(array)) ]
for j in range(len(array)) :
if j<10:
arr[j]=1.+(max(1,array[j])-1.) /16.
else :
arr[j]=1.
return arr
def decay_2_f(array):
arr=[1.0 for k in range(len(array)) ]
for j in range(len(array)) :
arr[j]=1.+(max(1,array[j])-1.) /2.
return arr
def decay_4_f(array):
arr=[1.0 for k in range(len(array)) ]
for j in range(len(array)) :
arr[j]=1.+(max(1,array[j])-1.) /4.
return arr
def acceleratorx2_f(array):
arr=[1.0 for k in range(len(array)) ]
for j in range(len(array)) :
arr[j]=1.+(max(1,array[j])-1.) *2.
return arr
def decay_1_5_f(array):
arr=[1.0 for k in range(len(array)) ]
for j in range(len(array)) :
arr[j]=1.+(max(1,array[j])-1.) /1.5
return arr
def stay_same_f(array):
arr=[1.0 for k in range(len(array)) ]
for j in range(len(array)) :
arr[j]=1.
return arr
def decay_2_last_12_linear_inter_f(array):
arr=[1.0 for k in range(len(array)) ]
for j in range(len(array)) :
arr[j]=1.+(max(1,array[j])-1.) /2.
arr12=(max(1,arr[-12])-1.) /12.
for j in range(0, 12):
arr[len(arr)-12 +j]= max(1, 1 +(( arr12*12)-(j+1)*arr12))
return arr
def decay_4_last_12_linear_inter_f(array):
arr=[1.0 for k in range(len(array)) ]
for j in range(len(array)) :
arr[j]=1.+(max(1,array[j])-1.) /4.
arr12=(max(1,arr[-12])-1.) /12.
for j in range(0, 12):
arr[len(arr)-12 +j]= max(1, 1 +(( arr12*12)-(j+1)*arr12))
return arr
def linear_last_12_f(array):
arr=[1.0 for k in range(len(array)) ]
for j in range(len(array)) :
arr[j]=max(1,array[j])
arr12=(max(1,arr[-12])-1.) /12.
for j in range(0, 12):
arr[len(arr)-12 +j]= max(1, 1 +(( arr12*12)-(j+1)*arr12))
return arr
decay_4_first_10_then_1 =[ "Heilongjiang_China","Liaoning_China","Shanghai_China"]
decay_4_first_10_then_1_fatality=[]
decay_16_first_10_then_1 =["Beijing_China","Fujian_China","Guangdong_China","Shandong_China","Sichuan_China","Zhejiang_China"]
decay_16_first_10_then_1_fatality=[]
decay_4=["nan_Bhutan","nan_Burundi","nan_Cabo Verde","Prince Edward Island_Canada",
"nan_Central African Republic","Inner Mongolia_China","nan_Maldives"]
decay_4_fatality=["nan_Congo(Kinshasa)"]
decay_2 =["nan_Congo(Kinshasa)","Faroe Islands_Denmark","nan_Eritrea","French Guiana_France","nan_Korea, South","nan_MS Zaandam"]
decay_2_fatality=[]
stay_same=["nan_Diamond Princess","nan_Timor-Leste"]
stay_same_fatality=["Beijing_China","Fujian_China","Guangdong_China","Shandong_China",
"Sichuan_China","Zhejiang_China", "Heilongjiang_China","Liaoning_China","Shanghai_China"]
normal=[]
normal_fatality=["nan_Korea, South","New York_US"]
decay_4_last_12_linear_inter =[ "Greenland_Denmark","nan_Dominica","nan_Equatorial Guinea","nan_Eswatini","New Caledonia_France",
"Saint Barthelemy_France","St Martin_France","nan_Gambia","nan_Grenada","nan_Holy See","nan_Mauritania","nan_Namibia","nan_Nicaragua"
,"nan_Papua New Guinea","nan_Saint Lucia","nan_Saint Vincent and the Grenadines","nan_Seychelles","nan_Sierra Leone","nan_Somalia","nan_Suriname",
"Anguilla_United Kingdom","British Virgin Islands_United Kingdom","Montserrat_United Kingdom","Turks and Caicos Islands_United Kingdom","nan_Zimbabwe"
, "Hong Kong_China","Curacao_Netherlands"]
decay_4_last_12_linear_inter_fatality=[]
decay_2_last_12_linear_inter =[ "nan_Chad",
"nan_Congo(Brazzaville)","nan_Fiji","French Polynesia_France","nan_Gabon",
"nan_Guyana","nan_Laos","nan_Nepal","Sint Maarten_Netherlands",
"nan_Saint Kitts and Nevis","nan_Sudan","nan_Syria","nan_Tanzania",
"Bermuda_United Kingdom","Cayman Islands_United Kingdom","nan_Zambia","Northwest Territories_Canada","Yukon_Canada"
,"nan_Mongolia","nan_Uganda"]
decay_2_last_12_linear_inter_fatality=[]
acceleratorx2=[]
acceleratorx2_fatality=[]
warm_st=['nan_Angola','nan_Antigua and Barbuda','Northern Territory_Australia','nan_Bahamas',
'nan_Bangladesh','nan_Belize','nan_Benin','nan_Botswana','nan_Burundi','nan_Cabo Verde','nan_Cameroon',
'nan_Central African Republic','nan_Chad','Hong Kong_China',"nan_Cote d'Ivoire",'nan_Cuba','Greenland_Denmark',
'nan_Dominica','nan_Equatorial Guinea','nan_Eritrea','nan_Eswatini','nan_Fiji','French Polynesia_France','New Caledonia_France',
'Saint Barthelemy_France','St Martin_France','nan_Gabon','nan_Gambia','nan_Grenada','nan_Guyana','nan_Haiti','nan_Holy See',
'nan_Honduras','nan_Ireland','nan_Korea, South','nan_Laos','nan_Liberia','nan_Libya','nan_Maldives','nan_Mali',
'nan_Mauritania','nan_Mauritius','nan_Mongolia','nan_Mozambique','nan_Namibia','nan_Nepal','Aruba_Netherlands',
'nan_Nicaragua','nan_Niger','nan_Papua New Guinea','nan_Saint Kitts and Nevis','nan_Saint Lucia',
'nan_Saint Vincent and the Grenadines','nan_Seychelles','nan_Sierra Leone','nan_Somalia',
'nan_Spain','nan_Sudan','nan_Suriname','nan_Syria','nan_Tanzania','nan_Togo','nan_Uganda','Anguilla_United Kingdom',
'Bermuda_United Kingdom','British Virgin Islands_United Kingdom','Channel Islands_United Kingdom',
'Gibraltar_United Kingdom','Isle of Man_United Kingdom','Montserrat_United Kingdom','nan_United Kingdom',
'Turks and Caicos Islands_United Kingdom','nan_Uzbekistan','nan_Zimbabwe',
]
decay_1_5 =["nan_Angola" ,"nan_Antigua and Barbuda","Montana_US","Nebraska_US","nan_Bangladesh","Illinois_US"
,"Northern Territory_Australia","nan_Bahamas","nan_Bahrain","nan_Barbados" ,"nan_Belize","nan_Benin",
"nan_Botswana","nan_Brunei","Manitoba_Canada","New Brunswick_Canada","Saskatchewan_Canada",
"nan_Cote d'Ivoire","nan_France","nan_Guinea-Bissau","nan_Haiti","nan_Italy","nan_Libya","nan_Malta","nan_Mauritius",
"Aruba_Netherlands","nan_Niger","nan_Spain","nan_Togo","Guam_US","Iowa_US","Idaho_US","Connecticut_US","California_US","New York_US","Virgin Islands_US",
"Channel Islands_United Kingdom","Gibraltar_United Kingdom","Isle of Man_United Kingdom","nan_United Kingdom",'nan_Burma']
decay_1_5_fatality=["nan_Cameroon","nan_Mali","nan_Cuba","Delaware_US","District of Columbia_US",
"Kansas_US","Louisiana_US","Michigan_US","New Mexico_US","Ohio_US","Oklahoma_US","Pennsylvania_US","Puerto Rico_US","Rhode Island_US",
"South Dakota_US" ,"Tennessee_US","Texas_US","Vermont_US","Virginia_US","West Virginia_US","nan_Uzbekistan"]
linear_last_12=["nan_Honduras","nan_Ireland","Colorado_US","nan_Liberia","nan_Mozambique"]
linear_last_12_fatality=[]
tr_frame=train_frame
features_train=tr_frame[names].values
standard_confirmed_train=tr_frame["ConfirmedCases"].values
standard_fatalities_train=tr_frame["Fatalities"].values
current_confirmed_train=tr_frame["ConfirmedCases"].values
features_cv=[]
name_cv=[]
standard_confirmed_cv=[]
standard_fatalities_cv=[]
names_=tr_frame["key"].values
training_horizon=int(features_train.shape[0]/len(unique_keys))
print("training horizon = ",training_horizon)
for dd in range(training_horizon-1,features_train.shape[0],training_horizon):
features_cv.append(features_train[dd])
name_cv.append(names_[dd])
standard_confirmed_cv.append(standard_confirmed_train[dd])
standard_fatalities_cv.append(standard_fatalities_train[dd])
print(name_cv[-1], standard_confirmed_cv[-1], standard_fatalities_cv[-1])
features_cv=np.array(features_cv)
preds_confirmed_cv=np.zeros(( features_cv.shape[0],horizon))
preds_confirmed_standard_cv=np.zeros(( features_cv.shape[0],horizon))
preds_fatalities_cv=np.zeros(( features_cv.shape[0],horizon))
preds_fatalities_standard_cv=np.zeros(( features_cv.shape[0],horizon))
overal_rmsle_metric_confirmed=0.0
for j in range(preds_confirmed_cv.shape[1]):
this_features_cv=features_cv
preds=predict(features_cv, input_name=model_directory +"confirmed"+ str(j))
preds_confirmed_cv[:,j]=preds
print(" modelling confirmed, case %d, , original cv %d and after %d "%(j,this_features_cv.shape[0],this_features_cv.shape[0]))
predictions=[]
for ii in range(preds_confirmed_cv.shape[0]):
current_prediction=standard_confirmed_cv[ii]
if current_prediction==0 :
current_prediction=0.1
this_preds=preds_confirmed_cv[ii].tolist()
name=name_cv[ii]
reserve=this_preds[0]
if name in normal:
this_preds=this_preds
elif name in decay_4_first_10_then_1:
this_preds=decay_4_first_10_then_1_f(this_preds)
elif name in decay_16_first_10_then_1:
this_preds=decay_16_first_10_then_1_f(this_preds)
elif name in decay_4_last_12_linear_inter:
this_preds=decay_4_last_12_linear_inter_f(this_preds)
elif name in decay_4:
this_preds=decay_4_f(this_preds)
elif name in decay_2:
this_preds=decay_2_f(this_preds)
elif name in decay_2_last_12_linear_inter:
this_preds=decay_2_last_12_linear_inter_f(this_preds)
elif name in decay_1_5:
this_preds=decay_1_5_f(this_preds)
elif name in linear_last_12:
this_preds=linear_last_12_f(this_preds)
elif name in acceleratorx2:
this_preds=acceleratorx2_f(this_preds)
elif name in stay_same or "China" in name:
this_preds=stay_same_f(this_preds)
if name in warm_st:
this_preds[0]=reserve
for j in range(preds_confirmed_cv.shape[1]):
current_prediction*=max(1,this_preds[j])
preds_confirmed_standard_cv[ii][j]=current_prediction
for j in range(preds_confirmed_cv.shape[1]):
this_features_cv=features_cv
preds=predict(features_cv, input_name=model_directory +"fatal"+ str(j))
preds_fatalities_cv[:,j]=preds
print(" modelling fatalities, case %d, original cv %d and after %d "%(j,this_features_cv.shape[0],this_features_cv.shape[0]))
predictions=[]
for ii in range(preds_fatalities_cv.shape[0]):
current_prediction=standard_fatalities_cv[ii]
if current_prediction==0 and standard_confirmed_cv[ii]>400:
current_prediction=0.1
this_preds=preds_fatalities_cv[ii].tolist()
name=name_cv[ii]
reserve=this_preds[0]
if name in normal_fatality:
this_preds=this_preds
elif name in decay_4_first_10_then_1_fatality:
this_preds=decay_4_first_10_then_1_f(this_preds)
elif name in decay_16_first_10_then_1_fatality:
this_preds=decay_16_first_10_then_1_f(this_preds)
elif name in decay_4_last_12_linear_inter_fatality:
this_preds=decay_4_last_12_linear_inter_f(this_preds)
elif name in decay_4_fatality:
this_preds=decay_4_f(this_preds)
elif name in decay_2_fatality:
this_preds=decay_2_f(this_preds)
elif name in decay_2_last_12_linear_inter_fatality:
this_preds=decay_2_last_12_linear_inter_f(this_preds)
elif name in decay_1_5_fatality:
this_preds=decay_1_5_f(this_preds)
elif name in linear_last_12_fatality:
this_preds=linear_last_12_f(this_preds)
elif name in acceleratorx2_fatality:
this_preds=acceleratorx2_f(this_preds)
elif name in stay_same_fatality:
this_preds=stay_same_f(this_preds)
elif name in normal:
this_preds=this_preds
elif name in decay_4_first_10_then_1:
this_preds=decay_4_first_10_then_1_f(this_preds)
elif name in decay_16_first_10_then_1:
this_preds=decay_16_first_10_then_1_f(this_preds)
elif name in decay_4_last_12_linear_inter:
this_preds=decay_4_last_12_linear_inter_f(this_preds)
elif name in decay_4:
this_preds=decay_4_f(this_preds)
elif name in decay_2:
this_preds=decay_2_f(this_preds)
elif name in decay_2_last_12_linear_inter:
this_preds=decay_2_last_12_linear_inter_f(this_preds)
elif name in decay_1_5:
this_preds=decay_1_5_f(this_preds)
elif name in linear_last_12:
this_preds=linear_last_12_f(this_preds)
elif name in acceleratorx2:
this_preds=acceleratorx2_f(this_preds)
elif name in stay_same or "China" in name:
this_preds=stay_same_f(this_preds)
if name in warm_st:
this_preds[0]=reserve
for j in range(preds_fatalities_cv.shape[1]):
if current_prediction==0 and(preds_confirmed_standard_cv[ii][j]>400 or "Malta" in name or "Somalia" in name):
current_prediction=1.
if j==0 and "nan_Antigua and Barbuda" in name:
current_prediction=2.
if j==0 and 'nan_Burma' in name:
current_prediction=3.
current_prediction*=max(1,this_preds[j])
preds_fatalities_standard_cv[ii][j]=current_prediction<categorify> | train_df[["Parch", "Survived"]].groupby(['Parch'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
3,782,114 | key_to_confirmed_rate={}
key_to_fatality_rate={}
key_to_confirmed={}
key_to_fatality={}
print(len(features_cv), len(name_cv),len(standard_confirmed_cv),len(standard_fatalities_cv))
print(preds_confirmed_cv.shape,preds_confirmed_standard_cv.shape,preds_fatalities_cv.shape,preds_fatalities_standard_cv.shape)
for j in range(len(name_cv)) :
key_to_confirmed_rate[name_cv[j]]=preds_confirmed_cv[j,:].tolist()
key_to_fatality_rate[name_cv[j]]=preds_fatalities_cv[j,:].tolist()
key_to_confirmed[name_cv[j]] =preds_confirmed_standard_cv[j,:].tolist()
key_to_fatality[name_cv[j]]=preds_fatalities_standard_cv[j,:].tolist()<merge> | train_test_data = [train_df, test_df]
for dataset in train_test_data:
dataset['Title'] = dataset['Name'].str.extract('([A-Za-z]+)\.', expand=False)
train_df['Title'].value_counts() | Titanic - Machine Learning from Disaster |
3,782,114 | train_new=train[["Date","ConfirmedCases","Fatalities","key","rate_ConfirmedCases","rate_Fatalities"]]
test_new=pd.merge(test,train_new, how="left", left_on=["key","Date"], right_on=["key","Date"] ).reset_index(drop=True)
test_new<categorify> | test_df['Title'].value_counts() | Titanic - Machine Learning from Disaster |
3,782,114 | def fillin_columns(frame,key_column, original_name, training_horizon, test_horizon, unique_values, key_to_values):
keys=frame[key_column].values
original_values=frame[original_name].values.tolist()
print(len(keys), len(original_values), training_horizon ,test_horizon,len(key_to_values))
for j in range(unique_values):
current_index=(j *(training_horizon +test_horizon)) +training_horizon
current_key=keys[current_index]
values=key_to_values[current_key]
co=0
for g in range(current_index, current_index + test_horizon):
original_values[g]=values[co]
co+=1
frame[original_name]=original_values
all_days=int(test_new.shape[0]/len(unique_keys))
tr_horizon=all_days-horizon
print(all_days,tr_horizon, horizon)
fillin_columns(test_new,"key", 'ConfirmedCases', tr_horizon, horizon, len(unique_keys), key_to_confirmed)
fillin_columns(test_new,"key", 'Fatalities', tr_horizon, horizon, len(unique_keys), key_to_fatality)
submission=test_new[["ForecastId","ConfirmedCases","Fatalities"]]
submission['ConfirmedCases'] = submission.ConfirmedCases * 1.011
submission['Fatalities'] = submission.Fatalities * 1.011
submission.to_csv("submission.csv", index=False )<set_options> | title_mapping = {"Mr": 0, "Miss": 1, "Mrs": 2,
"Master": 3, "Dr": 3, "Rev": 3, "Col": 3, "Major": 3, "Mlle": 3,"Countess": 3,
"Ms": 3, "Lady": 3, "Jonkheer": 3, "Don": 3, "Dona" : 3, "Mme": 3,"Capt": 3,"Sir": 3 }
for dataset in train_test_data:
dataset['Title'] = dataset['Title'].map(title_mapping ) | Titanic - Machine Learning from Disaster |
3,782,114 | if not sys.warnoptions:
warnings.simplefilter("ignore")
warnings.filterwarnings("ignore")
<compute_test_metric> | X_train_df = train_df.drop(columns=['Survived', 'PassengerId', 'Name', 'Ticket', 'Cabin'])
X_test_df = test_df.drop(columns=['PassengerId', 'Name', 'Ticket', 'Cabin'] ) | Titanic - Machine Learning from Disaster |
3,782,114 | def RMSLE(pred,actual):
return np.sqrt(np.mean(np.power(( np.log(pred+1)-np.log(actual+1)) ,2)) )<load_from_csv> | y_train_df = train_df['Survived']
y_test_df = test_df['PassengerId'] | Titanic - Machine Learning from Disaster |
3,782,114 | pd.set_option('mode.chained_assignment', None)
test = pd.read_csv("/kaggle/input/covid19-global-forecasting-week-4/test.csv")
train = pd.read_csv("/kaggle/input/covid19-global-forecasting-week-4/train.csv")
train['Province_State'].fillna('', inplace=True)
test['Province_State'].fillna('', inplace=True)
train['Date'] = pd.to_datetime(train['Date'])
test['Date'] = pd.to_datetime(test['Date'])
train = train.sort_values(['Country_Region','Province_State','Date'])
test = test.sort_values(['Country_Region','Province_State','Date'] )<feature_engineering> | X_train_df.isnull().sum() | Titanic - Machine Learning from Disaster |
3,782,114 | feature_day = [1,20,50,100,200,500,1000]
def CreateInput(data):
feature = []
for day in feature_day:
data.loc[:,'Number day from ' + str(day)+ ' case'] = 0
if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0):
fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max()
else:
fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min()
for i in range(0, len(data)) :
if(data['Date'].iloc[i] > fromday):
day_denta = data['Date'].iloc[i] - fromday
data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days
feature = feature + ['Number day from ' + str(day)+ ' case']
return data[feature]
pred_data_all = pd.DataFrame()
for country in train['Country_Region'].unique() :
for province in train[(train['Country_Region'] == country)]['Province_State'].unique() :
df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]
df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)]
X_train = CreateInput(df_train)
y_train_confirmed = df_train['ConfirmedCases'].ravel()
y_train_fatalities = df_train['Fatalities'].ravel()
X_pred = CreateInput(df_test)
for day in sorted(feature_day,reverse = True):
feature_use = 'Number day from ' + str(day)+ ' case'
idx = X_train[X_train[feature_use] == 0].shape[0]
if(X_train[X_train[feature_use] > 0].shape[0] >= 20):
break
adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1)
adjusted_y_train_confirmed = y_train_confirmed[idx:]
adjusted_y_train_fatalities = y_train_fatalities[idx:]
idx = X_pred[X_pred[feature_use] == 0].shape[0]
adjusted_X_pred = X_pred[idx:][feature_use].values.reshape(-1, 1)
pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)]
max_train_date = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].max()
min_test_date = pred_data['Date'].min()
model = ExponentialSmoothing(adjusted_y_train_confirmed, trend = 'additive' ).fit()
y_hat_confirmed = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0])
y_train_confirmed = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['ConfirmedCases'].values
y_hat_confirmed = np.concatenate(( y_train_confirmed,y_hat_confirmed), axis = 0)
model = ExponentialSmoothing(adjusted_y_train_fatalities, trend = 'additive' ).fit()
y_hat_fatalities = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0])
y_train_fatalities = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['Fatalities'].values
y_hat_fatalities = np.concatenate(( y_train_fatalities,y_hat_fatalities), axis = 0)
pred_data['ConfirmedCases_hat'] = y_hat_confirmed
pred_data['Fatalities_hat'] = y_hat_fatalities
pred_data_all = pred_data_all.append(pred_data)
df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left')
df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0
df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0
df_val_1 = df_val.copy()<feature_engineering> | X_test_df.isnull().sum() | Titanic - Machine Learning from Disaster |
3,782,114 | feature_day = [1,20,50,100,200,500,1000]
def CreateInput(data):
feature = []
for day in feature_day:
data.loc[:,'Number day from ' + str(day)+ ' case'] = 0
if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0):
fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max()
else:
fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min()
for i in range(0, len(data)) :
if(data['Date'].iloc[i] > fromday):
day_denta = data['Date'].iloc[i] - fromday
data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days
feature = feature + ['Number day from ' + str(day)+ ' case']
return data[feature]
pred_data_all = pd.DataFrame()
for country in train['Country_Region'].unique() :
for province in train[(train['Country_Region'] == country)]['Province_State'].unique() :
df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]
df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)]
X_train = CreateInput(df_train)
y_train_confirmed = df_train['ConfirmedCases'].ravel()
y_train_fatalities = df_train['Fatalities'].ravel()
X_pred = CreateInput(df_test)
for day in sorted(feature_day,reverse = True):
feature_use = 'Number day from ' + str(day)+ ' case'
idx = X_train[X_train[feature_use] == 0].shape[0]
if(X_train[X_train[feature_use] > 0].shape[0] >= 20):
break
adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1)
adjusted_y_train_confirmed = y_train_confirmed[idx:]
adjusted_y_train_fatalities = y_train_fatalities[idx:]
idx = X_pred[X_pred[feature_use] == 0].shape[0]
adjusted_X_pred = X_pred[idx:][feature_use].values.reshape(-1, 1)
pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)]
max_train_date = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].max()
min_test_date = pred_data['Date'].min()
model = SARIMAX(adjusted_y_train_confirmed, order=(1,1,0),
measurement_error=True ).fit(disp=False)
y_hat_confirmed = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0])
y_train_confirmed = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['ConfirmedCases'].values
y_hat_confirmed = np.concatenate(( y_train_confirmed,y_hat_confirmed), axis = 0)
model = SARIMAX(adjusted_y_train_fatalities, order=(1,1,0),
measurement_error=True ).fit(disp=False)
y_hat_fatalities = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0])
y_train_fatalities = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['Fatalities'].values
y_hat_fatalities = np.concatenate(( y_train_fatalities,y_hat_fatalities), axis = 0)
pred_data['ConfirmedCases_hat'] = y_hat_confirmed
pred_data['Fatalities_hat'] = y_hat_fatalities
pred_data_all = pred_data_all.append(pred_data)
df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left')
df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0
df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0
df_val_2 = df_val.copy()<compute_test_metric> | def impute_age(cols):
Age = cols[0]
Pclass = cols[1]
if pd.isnull(Age):
if Pclass == 1:
return 37
elif Pclass == 2:
return 29
else:
return 24
else:
return Age | Titanic - Machine Learning from Disaster |
3,782,114 | method_list = ['Exponential Smoothing','SARIMA']
method_val = [df_val_1,df_val_2]
for i in range(0,2):
df_val = method_val[i]
method_score = [method_list[i]] + [RMSLE(df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases'].values,df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases_hat'].values)] + [RMSLE(df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities'].values,df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities_hat'].values)]
print(method_score )<save_to_csv> | X_train_df['Age'] = X_train_df[['Age','Pclass']].apply(impute_age,axis=1)
X_test_df['Age'] = X_test_df[['Age','Pclass']].apply(impute_age,axis=1 ) | Titanic - Machine Learning from Disaster |
3,782,114 | df_val = df_val_2
submission = df_val[['ForecastId','ConfirmedCases_hat','Fatalities_hat']]
submission.columns = ['ForecastId','ConfirmedCases','Fatalities']
submission.to_csv('submission.csv', index=False)
submission<set_options> | def Age_cat(x):
if x <=4 :
return 1
elif x>4 and x<=14:
return 2
elif x>14 and x<=30:
return 3
else:
return 4 | Titanic - Machine Learning from Disaster |
3,782,114 | warnings.filterwarnings("ignore" )<load_from_csv> | X_train_df['Age'] = X_train_df['Age'].apply(Age_cat)
X_test_df['Age'] = X_test_df['Age'].apply(Age_cat ) | Titanic - Machine Learning from Disaster |
3,782,114 | df_train=pd.read_csv(".. /input/covid19-global-forecasting-week-4/train.csv")
df_test=pd.read_csv(".. /input/covid19-global-forecasting-week-4/test.csv")
df_sub=pd.read_csv(".. /input/covid19-global-forecasting-week-4/submission.csv")
print(df_train.shape)
print(df_test.shape)
print(df_sub.shape )<count_unique_values> | X_train_df['With_someone'] = X_train_df['SibSp'] | X_train_df['Parch']
X_test_df['With_someone'] = X_test_df['SibSp'] | X_test_df['Parch']
X_train_df['Family'] = X_train_df['SibSp'] + X_train_df['Parch']+1
X_test_df['Family'] = X_test_df['SibSp'] + X_test_df['Parch']+1 | Titanic - Machine Learning from Disaster |
3,782,114 | print(f"Unique Countries: {len(df_train.Country_Region.unique())}" )<count_unique_values> | X_train_df['With_someone'] =X_train_df['With_someone'].apply(lambda x:1 if x >=1 else 0)
X_test_df['With_someone'] =X_test_df['With_someone'].apply(lambda x:1 if x >=1 else 0 ) | Titanic - Machine Learning from Disaster |
3,782,114 | print(f"Unique Regions: {df_train.shape[0]/len(df_train.Date.unique())}" )<count_values> | mod = X_train_df.Embarked.value_counts().argmax()
X_train_df.Embarked.fillna(mod, inplace=True ) | Titanic - Machine Learning from Disaster |
3,782,114 | df_train.Country_Region.value_counts()<count_missing_values> | fare_med = train_df.Fare.median()
X_test_df.Fare.fillna(fare_med, inplace=True ) | Titanic - Machine Learning from Disaster |
3,782,114 | print(f"Number of rows without Country_Region : {df_train.Country_Region.isna().sum() }" )<feature_engineering> | X_train_df.isnull().sum() | Titanic - Machine Learning from Disaster |
3,782,114 | df_train["UniqueRegion"]=df_train.Country_Region
df_train.UniqueRegion[df_train.Province_State.isna() ==False]=df_train.Province_State+" , "+df_train.Country_Region
df_train[df_train.Province_State.isna() ==False]<drop_column> | X_test_df.isnull().sum() | Titanic - Machine Learning from Disaster |
3,782,114 | df_train.drop(labels=["Id","Province_State","Country_Region"], axis=1, inplace=True )<feature_engineering> | X_train_df.replace({"male": 0, "female": 1}, inplace=True)
X_test_df.replace({"male": 0, "female": 1}, inplace=True)
X_train_df.replace({"S": 0, "C": 1, "Q": 2}, inplace=True)
X_test_df.replace({"S": 0, "C": 1, "Q": 2}, inplace=True ) | Titanic - Machine Learning from Disaster |
3,782,114 | test_dates=list(df_test.Date.unique())
print(f"Period :{len(df_test.Date.unique())} days")
print(f"From : {df_test.Date.min() } To : {df_test.Date.max() }" )<define_variables> | X_train_df = pd.get_dummies(X_train_df, columns=['Pclass', 'Embarked','Age','Title'], drop_first=True)
X_test_df = pd.get_dummies(X_test_df, columns=['Pclass', 'Embarked','Age','Title'], drop_first=True)
X_train_df.head() | Titanic - Machine Learning from Disaster |
3,782,114 | print(f"Total Regions : {df_test.shape[0]/43}" )<feature_engineering> | X_train_df = X_train_df.drop(columns=['SibSp','Parch'])
X_test_df = X_test_df.drop(columns=['SibSp','Parch'] ) | Titanic - Machine Learning from Disaster |
3,782,114 | df_test["UniqueRegion"]=df_test.Country_Region
df_test.UniqueRegion[df_test.Province_State.isna() ==False]=df_test.Province_State+" , "+df_test.Country_Region
df_test.drop(labels=["Province_State","Country_Region"], axis=1, inplace=True )<count_unique_values> | sc_X = MinMaxScaler()
X_train_df[['Fare','Family']] = sc_X.fit_transform(X_train_df[['Fare','Family']])
X_test_df[['Fare','Family']] = sc_X.transform(X_test_df[['Fare','Family']] ) | Titanic - Machine Learning from Disaster |
3,782,114 | len(df_test.UniqueRegion.unique() )<define_variables> | from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.ensemble import RandomForestClassifier | Titanic - Machine Learning from Disaster |
3,782,114 | only_train_dates=set(train_dates)-set(test_dates)
print("Only train dates : ",len(only_train_dates))
intersection_dates=set(test_dates)&set(train_dates)
print("Intersection dates : ",len(intersection_dates))
only_test_dates=set(test_dates)-set(train_dates)
print("Only Test dates : ",len(only_test_dates))<feature_engineering> | logi_clf = LogisticRegression(random_state=0)
logi_parm = {"penalty": ['l1', 'l2'], "C": [0.1, 0.5, 1, 5, 10, 50]}
svm_clf = SVC(random_state=0)
svm_parm = {'kernel': ['rbf', 'poly'], 'C': [0.1, 0.5, 1, 5, 10, 50], 'degree': [3, 5, 7],
'gamma': ['auto', 'scale']}
dt_clf = DecisionTreeClassifier(random_state=0)
dt_parm = {'criterion':['gini', 'entropy']}
knn_clf = KNeighborsClassifier()
knn_parm = {'n_neighbors':[5, 10, 15, 20], 'weights':['uniform', 'distance'], 'p': [1,2]}
gnb_clf = GaussianNB()
gnb_parm = {'priors':['None']}
clfs = [logi_clf, svm_clf, dt_clf, knn_clf]
params = [logi_parm, svm_parm, dt_parm, knn_parm] | Titanic - Machine Learning from Disaster |
3,782,114 | df_test_temp=pd.DataFrame()
df_test_temp["Date"]=df_test.Date
df_test_temp["ConfirmedCases"]=0.0
df_test_temp["Fatalities"]=0.0
df_test_temp["UniqueRegion"]=df_test.UniqueRegion
df_test_temp["Delta"]=1.0<feature_engineering> | clf1 = RandomForestClassifier()
clf1.fit(X_train_df,y_train_df)
rf_rand = GridSearchCV(clf1,{'n_estimators':[50,100,200,300,500],'max_depth':[i for i in range(2,11)]},cv=10)
rf_rand.fit(X_train_df,y_train_df)
print(rf_rand.best_score_)
print(rf_rand.best_params_ ) | Titanic - Machine Learning from Disaster |
3,782,114 | %%time
final_df=pd.DataFrame(columns=["Date","ConfirmedCases","Fatalities","UniqueRegion"])
for region in df_train.UniqueRegion.unique() :
df_temp=df_train[df_train.UniqueRegion==region].reset_index()
df_temp["Delta"]=1.0
size_train=df_temp.shape[0]
for i in range(1,df_temp.shape[0]):
if(df_temp.ConfirmedCases[i-1]>0):
df_temp.Delta[i]=df_temp.ConfirmedCases[i]/df_temp.ConfirmedCases[i-1]
n=7
delta_list=df_temp.tail(n ).Delta
delta_avg=df_temp.tail(n ).Delta.mean()
death_rate=df_temp.tail(1 ).Fatalities.sum() /df_temp.tail(1 ).ConfirmedCases.sum()
df_test_app=df_test_temp[df_test_temp.UniqueRegion==region]
df_test_app=df_test_app[df_test_app.Date>df_temp.Date.max() ]
X=np.arange(1,n+1 ).reshape(-1,1)
Y=delta_list
model=LinearRegression()
model.fit(X,Y)
df_temp=pd.concat([df_temp,df_test_app])
df_temp=df_temp.reset_index()
for i in range(size_train, df_temp.shape[0]):
n=n+1
pred=max(1,model.predict(np.array([n] ).reshape(-1,1)) [0])
df_temp.Delta[i]=pred
for i in range(size_train, df_temp.shape[0]):
df_temp.ConfirmedCases[i]=round(df_temp.ConfirmedCases[i-1]*df_temp.Delta[i],0)
df_temp.Fatalities[i]=round(death_rate*df_temp.ConfirmedCases[i],0)
size_test=df_temp.shape[0]-df_test_temp[df_test_temp.UniqueRegion==region].shape[0]
df_temp=df_temp.iloc[size_test:,:]
df_temp=df_temp[["Date","ConfirmedCases","Fatalities","UniqueRegion","Delta"]]
final_df=pd.concat([final_df,df_temp], ignore_index=True)
final_df.shape<save_to_csv> | clf2 = GradientBoostingClassifier()
clf2.fit(X_train_df,y_train_df)
gb_rand = GridSearchCV(clf2,{'n_estimators':[50,100,200,300,500],'learning_rate':[0.01,0.1,1],'max_depth':[i for i in range(2,11)]},cv=10)
gb_rand.fit(X_train_df,y_train_df)
print(gb_rand.best_score_)
print(gb_rand.best_params_ ) | Titanic - Machine Learning from Disaster |
3,782,114 | df_sub.Fatalities=final_df.Fatalities
df_sub.ConfirmedCases=final_df.ConfirmedCases
df_sub.to_csv("submission.csv", index=None )<import_modules> | clf3 = SVC(gamma='auto')
clf3.fit(X_train_df,y_train_df)
svc_rand = GridSearchCV(clf3,{'C':[5,10,15,20],'degree':[i for i in range(1,11)]},cv=10)
svc_rand.fit(X_train_df,y_train_df)
print(svc_rand.best_score_)
print(svc_rand.best_params_ ) | Titanic - Machine Learning from Disaster |
3,782,114 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
import catboost<set_options> | clf1 = RandomForestClassifier(max_depth=6,n_estimators=200)
clf1.fit(X_train_df,y_train_df)
clf2 = GradientBoostingClassifier(n_estimators=300,learning_rate=0.01,max_depth=4,random_state=0)
clf2.fit(X_train_df,y_train_df)
clf3 = SVC(C=5,degree=1,gamma='auto',probability=True)
clf3.fit(X_train_df,y_train_df ) | Titanic - Machine Learning from Disaster |
3,782,114 | print(tf.test.is_gpu_available() )<load_from_disk> | eclf = VotingClassifier(estimators=[('rf',clf1),('gb',clf2),('svc',clf3)],voting='soft',weights=[2.5,2.5,2] ) | Titanic - Machine Learning from Disaster |
3,782,114 | df=pd.read_json('.. /input/whats-cooking-kernels-only/train.json', orient='records', dtype={"id":int, "cuisine":str,"ingredients":list} )<define_variables> | eclf.fit(X_train_df,y_train_df ) | Titanic - Machine Learning from Disaster |
3,782,114 | lists = [df['ingredients'].values[i] for i in range(len(df)) ]<concatenate> | pred = eclf.predict(X_test_df ) | Titanic - Machine Learning from Disaster |
3,782,114 | unique_ingredients = list(set(list(np.concatenate(lists))))<count_unique_values> | cols = ['PassengerId', 'Survived']
submit_df = pd.DataFrame(np.hstack(( y_test_df.values.reshape(-1,1),pred.reshape(-1,1))),
columns=cols ) | Titanic - Machine Learning from Disaster |
3,782,114 | <define_variables><EOS> | submit_df.to_csv('submission.csv', index=False ) | Titanic - Machine Learning from Disaster |
3,326,054 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<define_variables> | GradientBoostingClassifier, ExtraTreesClassifier)
INPUT_DIR = '.. /input'
N_FOLDS = 4
N_ITER = 50
SEED = 32 | Titanic - Machine Learning from Disaster |
3,326,054 | inv_d = dict(zip(unique_ingredients, np.arange(len(unique_ingredients))))<feature_engineering> |
df_train_raw = pd.read_csv(os.path.join(INPUT_DIR, 'train.csv'))
df_test_raw = pd.read_csv(os.path.join(INPUT_DIR, 'test.csv')) | Titanic - Machine Learning from Disaster |
3,326,054 | X = np.zeros(shape =(len(df), len(unique_ingredients)))
for i in range(len(df)) :
if i % 10000 == 0:
print(i)
sample = df.loc[i]
l_ingr = sample['ingredients']
for ingr in l_ingr:
X[i,inv_d[ingr]] = 1.0<prepare_x_and_y> |
def get_title(name):
title_search = re.search('([A-Za-z]+)\.', name)
if title_search:
return title_search.group(1)
return ""
df_full = [df_train_raw.copy() , df_test_raw.copy() ]
for dataset in df_full:
dataset['LastName'] = dataset['Name'].apply(lambda x: str.split(x, ",")[0])
dataset['LastName'] = dataset['LastName'].astype('category' ).cat.codes
dataset['Namelength'] = dataset['Name'].apply(len)
dataset['Title'] = dataset['Name'].apply(get_title)
dataset['Title'] = dataset['Title'].replace(
['Lady', 'Countess', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
dataset['Title'] = dataset['Title'].map({"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5})
dataset['Title'] = dataset['Title'].fillna(0)
dataset['HasCabin'] = dataset["Cabin"].apply(lambda x: 0 if type(x)== float else 1)
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
dataset['IsAlone'] = 0
dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1
dataset['IsAlone'] = dataset['IsAlone'].astype('category')
dataset['Embarked'].fillna(dataset['Embarked'].mode() [0], inplace = True)
dataset['Embarked'] = dataset['Embarked'].map({'S': 0, 'C': 1, 'Q': 2})
dataset['Fare'] = dataset['Fare'].fillna(dataset['Fare'].median())
dataset['CatFare'] = pd.qcut(dataset['Fare'], q=5, labels=False)
dataset['Age'].fillna(dataset['Age'].median() , inplace=True)
dataset['CatAge'] = pd.qcut(dataset['Age'], q=4, labels=False)
dataset['Sex'] = dataset['Sex'].map({'female': 0, 'male': 1})
drop_columns = ['PassengerId', 'Name', 'Cabin', 'Ticket']
dataset.drop(drop_columns, axis=1, inplace=True)
df_train, df_test = df_full
df_train.describe(include='all' ) | Titanic - Machine Learning from Disaster |
3,326,054 | y = df['cuisine'].values<count_unique_values> |
def min_max_scale(train_data, test_data, numeric_cols):
data = pd.concat([train_data, test_data])
scaled_train_data, scaled_test_data = train_data.copy() , test_data.copy()
for feature_name in numeric_cols:
max_v = data[feature_name].max()
min_v = data[feature_name].min()
scaled_train_data[feature_name] =(train_data[feature_name] - min_v)/(max_v - min_v)
scaled_test_data[feature_name] =(test_data[feature_name] - min_v)/(max_v - min_v)
return scaled_train_data, scaled_test_data
def categorical_encode(train_data, test_data, categorical_cols):
data = pd.concat([train_data, test_data])
scaled_train_data, scaled_test_data = train_data.copy() , test_data.copy()
result = df.copy()
for feature_name in categorical_cols:
max_v = df[feature_name].max()
min_v = df[feature_name].min()
result[feature_name] =(df[feature_name] - min_v)/(max_v - min_v)
return result
label_column = 'Survived'
cols = list(df_train.columns.values)
num_cols = [e for e in df_train.select_dtypes(include=[np.number] ).columns.tolist() if e != label_column]
cat_cols = [e for e in cols if e not in num_cols and e != label_column]
print(num_cols, cat_cols)
x_train, y_train = df_train.drop(label_column, axis=1), df_train[label_column].astype(int)
x_test = df_test
x_train, x_test = min_max_scale(x_train, x_test, num_cols)
x_train.describe(include = 'all' ) | Titanic - Machine Learning from Disaster |
3,326,054 | len(np.unique(y))<load_from_disk> |
MODELS = {
'lr': {
'model': LogisticRegression,
'params': {
'fit_intercept': [True, False],
'multi_class': ['ovr'],
'penalty': ['l2'],
'solver': ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'],
'tol': [0.01, 0.05, 0.1, 0.5, 1, 5],
'random_state': [SEED],
},
'best_params': {'tol': 0.05, 'solver': 'newton-cg', 'random_state': 32, 'penalty': 'l2', 'multi_class': 'ovr', 'fit_intercept': True},
'best score': 0.813692480359147,
},
'mlp': {
'model': MLPClassifier,
'params': {
'activation' : ['identity', 'logistic', 'tanh', 'relu'],
'solver' : ['lbfgs', 'adam'],
'learning_rate' : ['constant', 'invscaling', 'adaptive'],
'learning_rate_init': [.01,.05,.1,.2,.5, 1, 2],
'random_state': [SEED],
},
'best_params': {'solver': 'lbfgs', 'random_state': 32, 'learning_rate_init': 2, 'learning_rate': 'adaptive', 'activation': 'identity'},
'best_score': 0.8092031425364759,
},
'dt': {
'model': DecisionTreeClassifier,
'params': {
'criterion': ['gini', 'entropy'],
'max_depth': range(6, 10),
'max_features': ['auto', 'sqrt', 'log2', None],
'min_samples_split': [2, 5, 10],
'min_samples_leaf': [1, 2, 4],
},
'best_params': {'min_samples_split': 2, 'min_samples_leaf': 4, 'max_features': None, 'max_depth': 6, 'criterion': 'gini'},
'best_score': 0.8181818181818182,
},
'svc': {
'model': SVC,
'params': {
'C': [0.1, 0.5, 1., 2., 4.],
'kernel': ['linear', 'poly', 'rbf', 'sigmoid'],
'gamma': ['auto', 'scale'],
'degree': range(5),
'tol': [0.1, 0.5, 1, 5],
},
'best_params': {'tol': 1, 'shrinking': False, 'probability': False, 'kernel': 'rbf', 'gamma': 'scale', 'degree': 4, 'C': 2.0},
'best_score': 0.8428731762065096
},
'rf': {
'model': RandomForestClassifier,
'params': {
'n_estimators': range(10, 251, 20),
'max_features': ['auto', 'sqrt', 'log2', None],
'max_depth': range(5, 20),
'min_samples_split': range(2, 10),
'min_samples_leaf': range(1, 10),
'bootstrap': [True, False],
'random_state': [SEED],
},
'best_params': {'random_state': 32, 'n_jobs': -1, 'n_estimators': 70, 'min_samples_split': 10, 'min_samples_leaf': 2, 'max_features': None, 'max_depth': 17, 'bootstrap': True},
'best_score': 0.8417508417508418
},
'ada': {
'model': AdaBoostClassifier,
'params': {
'n_estimators': range(10, 251, 20),
'learning_rate': [.01,.05,.1,.2,.5, 1, 2],
'algorithm': ['SAMME', 'SAMME.R'],
'random_state': [SEED],
},
'best_params': {'random_state': 32, 'n_estimators': 170, 'learning_rate': 1, 'algorithm': 'SAMME.R'},
'best_score': 0.8237934904601572
},
'et': {
'model': ExtraTreesClassifier,
'params': {
'n_estimators': range(10, 251, 20),
'max_features': ['auto', 'sqrt', 'log2', None],
'max_depth': range(5, 20),
'min_samples_split': range(2, 10),
'min_samples_leaf': range(1, 10),
'bootstrap': [True, False],
'random_state': [SEED],
},
'best_params': {'random_state': 32, 'n_jobs': -1, 'n_estimators': 70, 'min_samples_split': 5, 'min_samples_leaf': 2, 'max_features': None, 'max_depth': 11, 'bootstrap': True},
'best_score': 0.8294051627384961
},
'gb': {
'model': GradientBoostingClassifier,
'params': {
'n_estimators': range(10, 251, 20),
'max_depth': range(5, 20),
'loss': ['deviance', 'exponential'],
'learning_rate': [.01,.05,.1,.2,.5, 1, 2],
'subsample': [.25,.5,.8, 1.],
'min_samples_split': range(2, 10),
'min_samples_leaf': range(1, 10),
'random_state': [SEED],
},
'best_params': {'subsample': 0.5, 'random_state': 32, 'n_estimators': 150, 'min_samples_split': 5, 'min_samples_leaf': 1, 'max_depth': 13, 'loss': 'exponential', 'learning_rate': 1},
'best_score': 0.8361391694725028
}
} | Titanic - Machine Learning from Disaster |
3,326,054 | df_test=pd.read_json('.. /input/whats-cooking-kernels-only/test.json', orient='records', dtype={"id":int,"ingredients":list} )<prepare_x_and_y> |
FIT_FROM_SCRATCH = True
for name, model in MODELS.items() :
if 'best_score' in model and not FIT_FROM_SCRATCH:
print(f'Fitting {name}...')
model['best_estimator'] = model['model'](**model['best_params'] ).fit(x_train, y_train)
scores = cross_val_score(model['best_estimator'], x_train, y_train, cv=N_FOLDS)
score = sum(scores)/ len(scores)
diff = score - model['best_score']
if diff > 0:
print(f'Accuracy of model {name}: {score}(BIGGER for {diff})')
elif diff < 0:
print(f'Accuracy of model {name}: {score}(SMALLER for {-diff})')
else:
print(f'Accuracy of model {name}: {score}(SAME)')
else:
searcher = RandomizedSearchCV(param_distributions=model['params'],
estimator=model['model']() , scoring="accuracy",
verbose=1, n_iter=N_ITER, cv=N_FOLDS)
print(f'Fitting {name}...')
searcher.fit(x_train, y_train)
print(f'Best parameters found for {name}: {searcher.best_params_}')
print(f'Best accuracy found {name}: {searcher.best_score_}')
model['best_estimator'] = searcher.best_estimator_
model['best_params'] = searcher.best_params_
model['best_score'] = searcher.best_score_ | Titanic - Machine Learning from Disaster |
3,326,054 | X_test = np.zeros(shape =(len(df_test), len(unique_ingredients)))
for i in range(len(df_test)) :
if i % 1000 == 0:
print(i)
sample = df_test.loc[i]
l_ingr = sample['ingredients']
for ingr in l_ingr:
try:
X_test[i,inv_d[ingr]] = 1.0
except:
pass<import_modules> |
df = pd.DataFrame()
X_train, X_test = {}, {}
for name, model in MODELS.items() :
vtrain = MODELS[name]['best_estimator'].predict(x_train)
vtest = MODELS[name]['best_estimator'].predict(x_test)
df[name] = np.reshape(vtrain, [-1])
X_train[name] = vtrain
X_test[name] = vtest | Titanic - Machine Learning from Disaster |
3,326,054 | from sklearn.neural_network import MLPClassifier
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split<train_model> | pred = MODELS['svc']['best_estimator'].predict(x_test ) | Titanic - Machine Learning from Disaster |
3,326,054 | <predict_on_test><EOS> | submission = pd.DataFrame({'PassengerId': df_test_raw['PassengerId'], 'Survived': pred})
submission.to_csv('submission.csv', index=False ) | Titanic - Machine Learning from Disaster |
3,786,214 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<create_dataframe> | import os
import warnings
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn import metrics, preprocessing
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier | Titanic - Machine Learning from Disaster |
3,786,214 | subm = pd.DataFrame({'id':df_test['id'], 'cuisine':Y_test} )<save_to_csv> | warnings.filterwarnings("ignore")
df_train_original = pd.read_csv(".. /input/train.csv")
df_test_original = pd.read_csv(".. /input/test.csv")
| Titanic - Machine Learning from Disaster |
3,786,214 | subm.to_csv('submission.csv', index=False )<set_options> | def trataDados(train, test, drop_list, target):
dados_origin = pd.concat(( train, test))
dados_origin = dados_origin.drop(drop_list, axis=1)
dados_origin[dados_origin.isnull().any(axis=1)]
if dados_origin.isnull().values.any() :
dados_origin = dados_origin.fillna(dados_origin.mean())
dados_origin = pd.get_dummies(dados_origin)
target_rounded = [round(label, ndigits=None)for label in dados_origin[target]]
target_rounded = [int(survived)for survived in target_rounded]
dados_origin[target] = target_rounded
return_train = dados_origin[: train.shape[0]]
train1, test1 = train_test_split(
return_train, test_size=0.20, random_state=600, stratify=train[target]
)
yo_train = train1[target]
yo_test = test1[target]
Xo_train = train1.drop(target, axis=1)
Xo_test = test1.drop(target, axis=1)
return_test = dados_origin[train.shape[0] :]
return_test = return_test.drop(target, axis=1)
return Xo_train, Xo_test, yo_train, yo_test, return_test
def export(name_csv, predict, base_test):
d_test = base_test[["PassengerId"]]
d_test["Survived"] = predict
d_test.to_csv(name_csv + "_titanic.csv", index=False ) | Titanic - Machine Learning from Disaster |
3,786,214 | %matplotlib inline<string_transform> | drop_list = ["Name", "Fare", "Cabin", "Ticket"]
X_train, X_test, y_train, y_test, df_validate = trataDados(
df_train_original, df_test_original, drop_list, "Survived"
) | Titanic - Machine Learning from Disaster |
3,786,214 | tokenize = TweetTokenizer().tokenize<load_from_disk> | temp = []
classifier = [
"Decision Tree",
"Random Forest",
"KNN",
"Extra Trees",
"Ada Boost",
"Gradient Boosting",
]
models = [
DecisionTreeClassifier(
criterion="gini",
max_depth=2,
max_features=None,
min_samples_leaf=1,
min_samples_split=2,
random_state=8,
splitter="random",
),
RandomForestClassifier(
min_samples_split=5,
min_samples_leaf=5,
criterion="entropy",
max_depth=1,
max_features="auto",
n_estimators=10,
random_state=10,
verbose=3,
),
KNeighborsClassifier(
algorithm="auto", leaf_size=30, n_neighbors=4, p=2, weights="uniform"
),
ExtraTreesClassifier(
criterion="gini",
max_depth=5,
max_features=None,
min_samples_leaf=1,
min_samples_split=2,
n_estimators=10,
random_state=5,
verbose=5,
),
AdaBoostClassifier(algorithm="SAMME", n_estimators=10, random_state=1),
GradientBoostingClassifier(
min_samples_split=5,
min_samples_leaf=5,
learning_rate=0.1,
loss="exponential",
max_depth=2,
max_features="sqrt",
n_estimators=20,
random_state=2,
verbose=1,
),
] | Titanic - Machine Learning from Disaster |
3,786,214 | print(os.listdir())
train = pd.read_json('/kaggle/input/whats-cooking-kernels-only/train.json')
test = pd.read_json('/kaggle/input/whats-cooking-kernels-only/test.json')
print(train.head())
ytrain = train['cuisine']
print(ytrain.head(5))
Id = test['id']
print(Id.head(5))<feature_engineering> | for index, model in enumerate(models):
model.fit(X_train, y_train)
predict = model.predict(X_test)
temp.append(metrics.accuracy_score(predict, y_test))
predict = model.predict(df_validate)
export(classifier[index], predict, df_validate ) | Titanic - Machine Learning from Disaster |
3,786,214 | tfidf = TfidfVectorizer(binary=True )<data_type_conversions> | models_dataframe = pd.DataFrame(temp, index=classifier)
models_dataframe.columns = ["Accuracy"]
print(models_dataframe)
| Titanic - Machine Learning from Disaster |
10,597,702 | train2 = train
print(( train2['ingredients'][0]))
print(arraytotext(train2['ingredients'][0]))<categorify> | %matplotlib inline | Titanic - Machine Learning from Disaster |
10,597,702 | train_features = tfidf.fit_transform(arraytotext(train['ingredients']))
test_features = tfidf.transform(arraytotext(test['ingredients']))<choose_model_class> | df_train = pd.read_csv(".. /input/titanic/train.csv")
df_test = pd.read_csv(".. /input/titanic/test.csv" ) | Titanic - Machine Learning from Disaster |
10,597,702 | classifier = SVC(C=200, kernel='rbf', degree=3,gamma=1, \
coef0=1, shrinking=True,tol=0.001, probability=False,\
cache_size=200,class_weight=None, verbose=False,\
max_iter=-1,decision_function_shape=None,\
random_state=None )<compute_train_metric> | df_train.isnull().sum() | Titanic - Machine Learning from Disaster |
10,597,702 | model = OneVsRestClassifier(classifier)
scores = cross_val_score(classifier,train_features, ytrain, cv=2)
print("Accuracy: %0.2f(+/- %0.2f)" % \
(scores.mean() , scores.std() * 2))<train_model> | df_test.isnull().sum() | Titanic - Machine Learning from Disaster |
10,597,702 | model.fit(train_features, ytrain )<predict_on_test> | df_train.drop(columns = ["Name" ,"Ticket" , "Cabin"], inplace=True)
df_test.drop(columns = ["Name" ,"Ticket" , "Cabin"], inplace=True ) | Titanic - Machine Learning from Disaster |
10,597,702 | predictions = model.predict(test_features)
print(predictions )<save_to_csv> | df_train.Sex.replace("male" , 0 , inplace =True)
df_train.Sex.replace("female" , 1 , inplace =True)
df_train.Embarked.replace("C" , 0 , inplace =True)
df_train.Embarked.replace("S" , 1 , inplace =True)
df_train.Embarked.replace("Q" , 2 , inplace =True ) | Titanic - Machine Learning from Disaster |
10,597,702 | submission = pd.DataFrame()
submission['id'] = Id
submission['cuisine'] = predictions
submission.to_csv('submission.csv', index=False)
<import_modules> | df_test.Sex.replace("male" , 0 , inplace =True)
df_test.Sex.replace("female" , 1 , inplace =True)
df_test.Embarked.replace("C" , 0 , inplace =True)
df_test.Embarked.replace("S" , 1 , inplace =True)
df_test.Embarked.replace("Q" , 2 , inplace =True ) | Titanic - Machine Learning from Disaster |
10,597,702 | tqdm.pandas()<load_from_disk> | df_train.Age.fillna(df_train.Age.median() , inplace= True)
df_test.Age.fillna(df_train.Age.median() , inplace = True ) | Titanic - Machine Learning from Disaster |
10,597,702 | train = pd.read_json('.. /input/train.json')
test = pd.read_json('.. /input/test.json' )<feature_engineering> | df_test.Fare.fillna(df_train.Fare.median() , inplace = True ) | Titanic - Machine Learning from Disaster |
10,597,702 | train['num_ingredients'] = train['ingredients'].apply(len)
train = train[train['num_ingredients'] > 1]<string_transform> | df_test.isnull().sum() | Titanic - Machine Learning from Disaster |
10,597,702 | lemmatizer = WordNetLemmatizer()
def preprocess(ingredients):
ingredients_text = ' '.join(ingredients)
ingredients_text = ingredients_text.lower()
ingredients_text = ingredients_text.replace('-', ' ')
words = []
for word in ingredients_text.split() :
if re.findall('[0-9]', word): continue
if len(word)<= 2: continue
if '’' in word: continue
word = lemmatizer.lemmatize(word)
if len(word)> 0: words.append(word)
return ' '.join(words)
for ingredient, expected in [
('Eggs', 'egg'),
('all-purpose flour', 'all purpose flour'),
('purée', 'purée'),
('1% low-fat milk', 'low fat milk'),
('half & half', 'half half'),
('safetida(powder)', 'safetida(powder)')
]:
actual = preprocess([ingredient])
assert actual == expected, f'"{expected}" is excpected but got "{actual}"'<feature_engineering> | df_train.isnull().sum() | Titanic - Machine Learning from Disaster |
10,597,702 | train['x'] = train['ingredients'].progress_apply(preprocess)
test['x'] = test['ingredients'].progress_apply(preprocess)
train.head()<feature_engineering> | X = df_train.drop(columns = ["Survived"])
y = df_train["Survived"] | Titanic - Machine Learning from Disaster |
10,597,702 | vectorizer = make_pipeline(
TfidfVectorizer(sublinear_tf=True),
FunctionTransformer(lambda x: x.astype('float16'), validate=False)
)
x_train = vectorizer.fit_transform(train['x'].values)
x_train.sort_indices()
x_test = vectorizer.transform(test['x'].values )<categorify> | train_X, val_X, train_y, val_y = train_test_split(X, y, test_size=0.2, random_state=0 ) | Titanic - Machine Learning from Disaster |
10,597,702 | label_encoder = LabelEncoder()
y_train = label_encoder.fit_transform(train['cuisine'].values)
dict(zip(label_encoder.classes_, label_encoder.transform(label_encoder.classes_)) )<choose_model_class> | classifier = DecisionTreeClassifier(max_leaf_nodes=8 , random_state = 1)
classifier.fit(train_X,train_y)
preds_val = classifier.predict(df_test ) | Titanic - Machine Learning from Disaster |
10,597,702 | estimator = SVC(
C=80,
kernel='rbf',
gamma=1.7,
coef0=1,
cache_size=500,
)
classifier = OneVsRestClassifier(estimator, n_jobs=-1 )<train_model> | test_out = pd.DataFrame({
'PassengerId': df_test.PassengerId,
'Survived': preds_val
})
test_out.to_csv('submission.csv', index=False ) | Titanic - Machine Learning from Disaster |
11,196,065 | %%time
classifier.fit(x_train, y_train )<categorify> | %matplotlib inline
sns.set()
warnings.filterwarnings('ignore' ) | Titanic - Machine Learning from Disaster |
11,196,065 | y_pred = label_encoder.inverse_transform(classifier.predict(x_train))
y_true = label_encoder.inverse_transform(y_train)
print(f'accuracy score on train data: {accuracy_score(y_true, y_pred)}' )<save_to_csv> | train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv' ) | Titanic - Machine Learning from Disaster |
11,196,065 | y_pred = label_encoder.inverse_transform(classifier.predict(x_test))
test['cuisine'] = y_pred
test[['id', 'cuisine']].to_csv('submission.csv', index=False)
test[['id', 'cuisine']].head()<load_from_disk> | train.isnull().sum() | Titanic - Machine Learning from Disaster |
11,196,065 | recipe_data = json.loads(open('.. /input/train.json' ).read() )<create_dataframe> | test.isnull().sum() | Titanic - Machine Learning from Disaster |
11,196,065 | colnames = list(unique_ing)
data = pd.DataFrame(0, index=recipe_id, columns=colnames )<feature_engineering> | train.drop(columns=['PassengerId','Name','Ticket','Cabin'], axis=1, inplace=True)
test.drop(columns=['PassengerId','Name','Ticket','Cabin'], axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
11,196,065 | data['cuisine'] = cuisine<feature_engineering> | train['Sex'] = [1 if gender=='male' else 0 for gender in train['Sex']]
test['Sex'] = [1 if gender=='male' else 0 for gender in test['Sex']] | Titanic - Machine Learning from Disaster |
11,196,065 | for recipe in recipe_data:
index = recipe['id']
ingredients =recipe['ingredients']
for ingredient in ingredients:
data.at[index, ingredient] = 1<prepare_x_and_y> | train.Age.fillna(train.Age.median() , inplace=True)
test.Age.fillna(test.Age.median() , inplace=True ) | Titanic - Machine Learning from Disaster |
11,196,065 | y, label = pd.factorize(data['cuisine'] )<data_type_conversions> | train.drop(columns=['Age'], inplace=True)
test.drop(columns=['Age'], inplace=True ) | Titanic - Machine Learning from Disaster |
11,196,065 | X_train = data[colnames].values.astype(float)
y_train = keras.utils.to_categorical(y, num_classes=20 )<choose_model_class> | test.Fare.fillna(test.Fare.median() , inplace=True ) | Titanic - Machine Learning from Disaster |
11,196,065 | model = Sequential()
model.add(Dropout(0.3))
model.add(Dense(512, input_dim=6714, activation='linear'))
model.add(LeakyReLU(alpha=.02))
model.add(Dropout(0.5))
model.add(Dense(100, activation='relu'))
model.add(Dense(20, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='Adamax', metrics=['accuracy'] )<train_model> | train.drop(columns=['Fare'], axis=1, inplace=True)
test.drop(columns=['Fare'], axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
11,196,065 | model.fit(X_train, y_train,
epochs=25,
batch_size=250)
<load_from_disk> | train.Embarked.fillna(train.Embarked.mode() , inplace=True ) | Titanic - Machine Learning from Disaster |
11,196,065 | test_data = json.loads(open('.. /input/test.json' ).read() )<create_dataframe> | train = pd.get_dummies(train, columns=['Pclass','AgeGroup','Embarked'])
test = pd.get_dummies(test, columns=['Pclass','AgeGroup','Embarked'] ) | Titanic - Machine Learning from Disaster |
11,196,065 | test_recipe_id = []
for recipe in test_data:
test_recipe_id.append(recipe['id'])
test_df = pd.DataFrame(0, index=test_recipe_id, columns=colnames )<define_variables> | predictors = train.drop(columns=['Survived'], axis=1 ) | Titanic - Machine Learning from Disaster |
11,196,065 | ingr_checker = dict.fromkeys(colnames )<feature_engineering> | target = train[['Survived']] | Titanic - Machine Learning from Disaster |
11,196,065 | for recipe in test_data:
index = recipe['id']
ingredients = recipe['ingredients']
for ingredient in ingredients:
if ingredient in ingr_checker:
test_df.at[index, ingredient] = 1<data_type_conversions> | from sklearn.model_selection import train_test_split | Titanic - Machine Learning from Disaster |
11,196,065 | X_test = test_df[colnames].values.astype(float )<predict_on_test> | from sklearn.model_selection import train_test_split | Titanic - Machine Learning from Disaster |
11,196,065 | prediction = model.predict(X_test )<prepare_output> | x_train,x_val,y_train,y_val = train_test_split(predictors,target,test_size=0.2,random_state=123 ) | Titanic - Machine Learning from Disaster |
11,196,065 | prediction_classes = prediction.argmax(axis=-1 )<prepare_output> | from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.svm import SVC | Titanic - Machine Learning from Disaster |
11,196,065 | label_names = label[prediction_classes]
df_output = pd.DataFrame({'id' : test_recipe_id, 'cuisine' : label_names})
df_output.head()<save_to_csv> | from sklearn.metrics import accuracy_score | Titanic - Machine Learning from Disaster |
11,196,065 | df_output.to_csv("output.csv", header=True, index=False )<import_modules> | lr = LogisticRegression()
lr.fit(x_train,y_train)
preds = lr.predict(x_val)
lr_accuracy = accuracy_score(y_val,preds)
print(f'Logistic Regression accuracy: {lr_accuracy*100}' ) | Titanic - Machine Learning from Disaster |
11,196,065 | import os
import json
import re
import pandas as pd
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer<categorify> | knn = KNeighborsClassifier()
knn.fit(x_train,y_train)
preds = knn.predict(x_val)
knn_accuracy = accuracy_score(y_val, preds)
print(f'KNN accuracy: {knn_accuracy*100}' ) | Titanic - Machine Learning from Disaster |
11,196,065 | def get_replacements() :
return {'wasabe': 'wasabi', '-': '', 'sauc': 'sauce',
'baby spinach': 'babyspinach', 'coconut cream': 'coconutcream',
'coriander seeds': 'corianderseeds', 'corn tortillas': 'corntortillas',
'cream cheese': 'creamcheese', 'fish sauce': 'fishsauce',
'purple onion': 'purpleonion','refried beans': 'refriedbeans',
'rice cakes': 'ricecakes', 'rice syrup': 'ricesyrup',
'sour cream': 'sourcream', 'toasted sesame seeds': 'toastedsesameseeds',
'toasted sesame oil': 'toastedsesameoil', 'yellow onion': 'yellowonion'}<string_transform> | dt = DecisionTreeClassifier()
dt.fit(x_train,y_train)
preds = dt.predict(x_val)
dt_accuracy = accuracy_score(y_val, preds)
print(f'Decission Tree accuracy: {dt_accuracy*100}' ) | Titanic - Machine Learning from Disaster |
11,196,065 | def tranform_to_single_string(ingredients, lemmatizer, replacements, stop_pattern):
ingredients_text = ' '.join(iter(ingredients))
for key, value in replacements.items() :
ingredients_text = ingredients_text.replace(key, value)
words = []
for word in ingredients_text.split() :
if not stop_pattern.match(word)and len(word)> 2:
word = lemmatizer.lemmatize(word)
words.append(word)
return ' '.join(words )<choose_model_class> | rf = RandomForestClassifier()
rf.fit(x_train,y_train)
preds = rf.predict(x_val)
rf_accuracy = accuracy_score(y_val, preds)
print(f'RandomForest accuracy: {rf_accuracy*100}' ) | Titanic - Machine Learning from Disaster |
11,196,065 | def get_estimator() :
return SVC(C=300,
kernel='rbf',
gamma=1.5,
shrinking=True,
tol=0.001,
cache_size=1000,
class_weight=None,
max_iter=-1,
decision_function_shape='ovr',
random_state=42 )<feature_engineering> | gbc = GradientBoostingClassifier()
gbc.fit(x_train,y_train)
preds = gbc.predict(x_val)
gbc_accuracy = accuracy_score(y_val, preds)
print(f'GradientBoostClassifier accuracy: {gbc_accuracy*100}' ) | Titanic - Machine Learning from Disaster |
11,196,065 | def show_unique_ingredients(train):
ingredients = {}
for idx, row in train.iterrows() :
for ingredient in row['ingredients']:
if ingredient not in ingredients:
ingredients[ingredient] = {'sum': 0}
previous = ingredients[ingredient][row['cuisine']] if row['cuisine'] in ingredients[ingredient] else 0
ingredients[ingredient][row['cuisine']] = 1 + previous
ingredients[ingredient]['sum'] += 1
for ingredient in sorted(ingredients):
for cuisine in sorted(ingredients[ingredient], key=ingredients[ingredient].get, reverse=True):
print(f'{ingredient}:{cuisine}:{ingredients[ingredient][cuisine]}' )<feature_engineering> | svc = SVC()
svc.fit(x_train,y_train)
preds = svc.predict(x_val)
svc_accuracy = accuracy_score(y_val, preds)
print(f'SVC accuracy: {svc_accuracy*100}' ) | Titanic - Machine Learning from Disaster |
11,196,065 | def preprocess(train, test):
lemmatizer = WordNetLemmatizer()
replacements = get_replacements()
train['ingredients'] = train['ingredients'].apply(lambda x: list(map(lambda y: y.lower() , x)))
test['ingredients'] = test['ingredients'].apply(lambda x: list(map(lambda y: y.lower() , x)))
stop_pattern = re.compile('[\d’%]')
transform = lambda ingredients: tranform_to_single_string(ingredients, lemmatizer, replacements, stop_pattern)
train['x'] = train['ingredients'].apply(transform)
test['x'] = test['ingredients'].apply(transform)
vectorizer = make_pipeline(
TfidfVectorizer(sublinear_tf=True),
FunctionTransformer(lambda x: x.astype('float'), validate=False)
)
x_train = vectorizer.fit_transform(train['x'].values)
x_train.sort_indices()
x_test = vectorizer.transform(test['x'].values)
return x_train, x_test<choose_model_class> | models = pd.DataFrame({'Model':['LogisticRegression','KNN','DecissionTree','RandomForest','GradientBoostClassifier','SVM'],
'Accuracy':[lr_accuracy*100,knn_accuracy*100,dt_accuracy*100,rf_accuracy*100,gbc_accuracy*100,svc_accuracy*100]})
models | Titanic - Machine Learning from Disaster |
11,196,065 | %%time
def main() :
train = pd.read_json('.. /input/train.json')
test = pd.read_json('.. /input/test.json')
train['num_ingredients'] = train['ingredients'].apply(lambda x: len(x))
test['num_ingredients'] = test['ingredients'].apply(lambda x: len(x))
train = train[train['num_ingredients'] > 2]
x_train, x_test = preprocess(train, test)
estimator = get_estimator()
y_train = train['cuisine'].values
classifier = OneVsRestClassifier(estimator, n_jobs=-1)
classifier.fit(x_train, y_train)
test['cuisine'] = classifier.predict(x_test)
test[['id', 'cuisine']].to_csv('submission.csv', index=False)
main()<set_options> | data = pd.read_csv('/kaggle/input/titanic/test.csv')
ids = data['PassengerId'] | Titanic - Machine Learning from Disaster |
11,196,065 | %matplotlib inline
init_notebook_mode(connected=True)
warnings.filterwarnings('ignore')
print(os.listdir(".. /input"))<load_from_disk> | preds = dt.predict(test ) | Titanic - Machine Learning from Disaster |
11,196,065 | train_data = pd.read_json('.. /input/train.json')
test_data = pd.read_json('.. /input/test.json' )<train_model> | output = pd.DataFrame({'PassengerId':ids, 'Survived':preds} ) | Titanic - Machine Learning from Disaster |
11,196,065 | print("The training data consists of {} recipes".format(len(train_data)) )<count_unique_values> | output.to_csv('submission.csv', index=False ) | Titanic - Machine Learning from Disaster |
9,888,019 | print("Number of cuisine categories: {}".format(len(train_data.cuisine.unique())))
train_data.cuisine.unique()<randomize_order> | warnings.filterwarnings("ignore")
| Titanic - Machine Learning from Disaster |
9,888,019 | def random_colours(number_of_colors):
colors = []
for i in range(number_of_colors):
colors.append("
return colors<count_values> | train = pd.read_csv('/kaggle/input/titanic/train.csv')
train.info() | Titanic - Machine Learning from Disaster |
9,888,019 | labelpercents = []
for i in train_data.cuisine.value_counts() :
percent =(i/sum(train_data.cuisine.value_counts())) *100
percent = "%.2f" % percent
percent = str(percent + '%')
labelpercents.append(percent )<set_options> |
def substrings_in_string(big_string, substrings):
for substring in substrings:
if big_string.find(substring)!= -1:
return substring
print(big_string)
return np.nan
def phase1clean(df):
df.Fare = df.Fare.map(lambda x: np.nan if x==0 else x)
df.Cabin = df.Cabin.fillna('Unknown')
cabin_list = ['A', 'B', 'C', 'D', 'E', 'F', 'T', 'G', 'Unknown']
df['Deck']=df['Cabin'].map(lambda x: substrings_in_string(x, cabin_list))
title_list=['Mrs', 'Mr', 'Master', 'Miss', 'Major', 'Rev',
'Dr', 'Ms', 'Mlle','Col', 'Capt', 'Mme', 'Countess',
'Don', 'Jonkheer']
df['Title']=df['Name'].map(lambda x: substrings_in_string(x, title_list))
def replace_titles(x):
title=x['Title']
if title in ['Countess','Don', 'Major', 'Capt', 'Jonkheer', 'Rev', 'Col']:
return 'Rare'
elif title in ['Countess', 'Mme']:
return 'Mrs'
elif title in ['Mlle', 'Ms']:
return 'Miss'
elif title =='Dr':
if x['Sex']=='Male':
return 'Mr'
else:
return 'Mrs'
else:
return title
df['Title']=df.apply(replace_titles, axis=1)
df['Family_Size']=df['SibSp']+df['Parch']
return df
def fare_grouping(x):
bins = [-1, 7.91, 14.454, 31, 99, 250, np.inf]
names = ['a', 'b','c', 'd', 'e', 'f']
names = [1, 2, 3, 4, 5, 6]
x['Fare_Bin'] = pd.cut(x['Fare'], bins ,labels=names ).astype('int')
dict_age={1 : 'a' , 2 : 'b' , 3 : 'c' , 4 : 'd' , 5: 'e' , 6 : 'f'}
x['Fare_Bin']=x['Fare_Bin'].map(dict_age)
return x
def fill_nan(x):
null_ind=x.loc[x['Age'].isnull() ,:].index
null_count=x.loc[x['Age'].isnull() ,]['PassengerId'].count()
num_ages=x.groupby('Title')['Age'].mean().to_dict()
x.loc[x['Age'].isnull() ,'Age']=x.loc[x['Age'].isnull() ,'Title'].map(num_ages)
null_ind=x.loc[x['Fare'].isnull() ,:].index
null_count=x.loc[x['Fare'].isnull() ,]['PassengerId'].count()
num_fare=x.groupby('Pclass')['Fare'].mean().to_dict()
x.loc[x['Fare'].isnull() ,'Fare']=x.loc[x['Fare'].isnull() ,'Pclass'].map(num_fare)
fare_grouping(x)
f_index=x[x['Embarked'].isnull() ].index
x=x.drop(f_index,axis=0)
return x
def age_grouping(x):
k_data=pd.concat([x['Age'],x['Survived']],axis=1)
k_data.rename(columns={ 0 :'Age' , 1 :'Survived'}, inplace=True)
model = KMeans()
sil = []
for k in range(2, 9):
kmeans = KMeans(n_clusters = k ).fit(k_data)
labels = kmeans.labels_
sil.append(silhouette_score(k_data, labels, metric = 'euclidean'))
plt_data=pd.concat([pd.Series(range(2,9)) ,pd.Series(sil)],axis=1)
plt_data.rename(columns={ 0 :'clusters' , 1 :'silhouette scores'}, inplace=True)
kmeans = KMeans(n_clusters=4, random_state=0 ).fit(k_data)
labels_=kmeans.labels_
k_data['age_grp']=labels_+1
k_data['Passenger_Id']=x['PassengerId']
agelist=[]
for i in range(0, 15):
agelist.append('a')
for i in range(15, 29):
agelist.append('b')
for i in range(29, 45):
agelist.append('c')
for i in range(45, 90):
agelist.append('d')
age_dict={v: k for v, k in enumerate(agelist)}
x['Age_Grp']=x['Age'].astype(int ).map(age_dict)
return x,age_dict
def survived_fams(df):
df['Last_Name'] = df['Name'].apply(
lambda df: str.split(df, ",")[0])
default_survival_rate = 0.5
df['Family_Survival'] = default_survival_rate
for grp, grp_df in df[['Survived', 'Name', 'Last_Name', 'Fare', 'Ticket', 'PassengerId','SibSp', 'Parch', 'Age', 'Cabin']].groupby(['Last_Name', 'Fare']):
if(len(grp_df)!= 1):
for ind, row in grp_df.iterrows() :
smax = grp_df.drop(ind)['Survived'].max()
smin = grp_df.drop(ind)['Survived'].min()
passID = row['PassengerId']
if(smax == 1.0):
df.loc[df['PassengerId'] ==
passID, 'Family_Survival'] = 1
elif(smin == 0.0):
df.loc[df['PassengerId'] ==
passID, 'Family_Survival'] = 0
for _, grp_df in df.groupby('Ticket'):
if(len(grp_df)!= 1):
for ind, row in grp_df.iterrows() :
if(row['Family_Survival'] == 0)|(
row['Family_Survival'] == 0.5):
smax = grp_df.drop(ind)['Survived'].max()
smin = grp_df.drop(ind)['Survived'].min()
passID = row['PassengerId']
if(smax == 1.0):
df.loc[df['PassengerId'] ==
passID, 'Family_Survival'] = 1
elif(smin == 0.0):
df.loc[df['PassengerId'] ==
passID, 'Family_Survival'] = 0
return df
def is_alone(x):
fam_list={False : 0 , True : 1}
x['is_alone'] =(x['Family_Size']==0 ).map(fam_list)
return x
def has_cabin(x):
fam_list={False : 0 , True : 1}
x['has_cabin'] =(x['Deck']!='Unknown' ).map(fam_list)
return x
def is_3stclass(x):
class_list={False : 0 , True : 1}
x['is_3rdclass'] =(x['Pclass']== 3 ).map(class_list)
return x
def class_categorizer(x):
dict_class={1 : 'a' , 2 : 'b' , 3 : 'c' }
x['Pclass']=x['Pclass'].map(dict_class)
return x
def phase2clean(train, test):
train=fill_nan(train)
train,age_dict=age_grouping(train)
test=fill_nan(test)
test['Age_Grp']=test['Age'].astype(int ).map(age_dict)
for df in [train, test]:
df['Fare_Per_Person']=df['Fare']/(df['Family_Size']+1)
for df in [train, test]:
df['Age*Class']=df['Age']*df['Pclass']
combined=pd.concat([train,test])
combined=survived_fams(combined)
combined=is_alone(combined)
combined=has_cabin(combined)
combined=is_3stclass(combined)
train=combined.iloc[:len(train),:]
test=combined.iloc[len(train):,:]
test=test.drop(['Survived'],axis=1)
return [train,test]
def get_data() :
train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
submit_x =pd.read_csv('/kaggle/input/titanic/test.csv')
original_train = pd.read_csv('/kaggle/input/titanic/train.csv')
original_test = pd.read_csv('/kaggle/input/titanic/test.csv')
x=phase1clean(train_data)
pred_set=phase1clean(submit_x)
x,pred_set=phase2clean(x, pred_set)
x=x.drop(['PassengerId','Name','Ticket','Cabin','Last_Name'],axis=1)
pred_set=pred_set.drop(['PassengerId','Name','Ticket','Cabin','Last_Name'],axis=1)
y = x.Survived
x = x.loc[:,x.columns!='Survived']
return x,y,pred_set,original_train,original_test
| Titanic - Machine Learning from Disaster |
9,888,019 | print("Word Cloud Function.. ")
stopwords = set(STOPWORDS)
size =(20,10)
def cloud(text, title, stopwords=stopwords, size=size):
mpl.rcParams['figure.figsize']=(10.0,10.0)
mpl.rcParams['font.size']=12
mpl.rcParams['savefig.dpi']=100
mpl.rcParams['figure.subplot.bottom']=.1
wordcloud = WordCloud(width=1600, height=800,
background_color='black',
stopwords=stopwords,
).generate(str(text))
fig = plt.figure(figsize=size, dpi=80, facecolor='k',edgecolor='k')
plt.imshow(wordcloud,interpolation='bilinear')
plt.axis('off')
plt.title(title, fontsize=50,color='y')
plt.tight_layout(pad=0)
plt.show()
train_data["ing"] = train_data.ingredients.apply(lambda x: list(map(str, x)) , 1 ).str.join(' ')
cloud(train_data["ing"].values, title="All Cuisine", size=[8,5] )<string_transform> | x,y,pred_set,original_train,pred_set_original=get_data()
x.info()
x.head() | Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.