kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
13,735,522
datas = [] for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) datas.append(pd.read_csv(os.path.join(dirname, filename))) datas[1] = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') datas[2] = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') population = pd.read_csv('/kaggle/input/covid19-global-forecasting-locations-population/locations_population.csv') population = population.rename(columns = {'Province.State':'Province_State', 'Country.Region':'Country_Region'}) <feature_engineering>
models = [ { 'name':'Logistic regression', 'estimator':LogisticRegression(random_state = 42), 'hyperparameters':{ 'solver': ['newton-cg', 'lbfgs', 'liblinear'] } }, { 'name':'Decision tree', 'estimator':DecisionTreeClassifier(random_state = 42), 'hyperparameters':{ 'criterion':['entropy','gini'], 'splitter':['best','random'], 'max_depth':range(4,9), 'max_features':['auto','sqrt','log2'], 'min_samples_split':[3,4,5] } }, { 'name':'RandomForestClassifier', 'estimator':RandomForestClassifier(random_state = 42), 'hyperparameters':{ 'n_estimators' : [10, 15, 18, 19, 20, 21, 22], 'criterion':['entropy','gini'], 'max_depth':range(4,9), 'max_features':['auto','sqrt','log2'], 'min_samples_split':[3,4,5] } } ] def tune_models(models): for model in models: print(model['name']) print('-' * len(model['name'])) grid = GridSearchCV(model['estimator'], param_grid=model['hyperparameters'], cv=5, scoring='accuracy') grid.fit(mytrain, target) model['best_score'] = grid.best_score_ model['best_params'] = grid.best_params_ model['best_model'] = grid.best_estimator_ print('Best accuracy: {:.2f}%'.format(model['best_score']*100)) print('Best parameters: {} '.format(model['best_params'])) return models models = tune_models(models )
Titanic - Machine Learning from Disaster
13,735,522
tmp_country_label = [] for idata in datas[1].itertuples() : try: tmp_country_label.append(idata.Country_Region + '_' + idata.Province_State) except: tmp_country_label.append(idata.Country_Region) datas[1]['country_label'] = tmp_country_label np.isnan(datas[1].loc[0].Province_State) country_list = datas[1].country_label.unique().tolist()<compute_test_metric>
mymodel = RandomForestClassifier(criterion='entropy', max_depth=7, max_features = 'auto', min_samples_split = 4, n_estimators = 18, random_state=42 )
Titanic - Machine Learning from Disaster
13,735,522
def rmsle(pred,true): assert pred.shape[0]==true.shape[0] return K.sqrt(K.mean(K.square(K.log(pred+1)- K.log(true+1)))) def attention_mechanism(days, input_): x = Dense(256, activation='sigmoid' )(input_) x = Dense(days, activation='softmax' )(x) return x def attention_model(input_size, days=21, batch_size=32, epochs=200, lr=1e-3): country_input = Input(shape=(313,), name='country_onehot') inputs = Input(shape=(None, input_size), name='encoder_input') target_number = Input(shape=(1,), name='target_input') flag_input = Input(shape=(1,), name='flag_input') x = Masking(mask_value=0, input_shape=(None, input_size))(inputs) x = GRU(128, name='GRU_layer1', return_sequences=True )(x) attention_x = attention_mechanism(days, x) gru_out = Permute(( 2, 1))(x) attention_mul = K.batch_dot(gru_out, attention_x) attention_mul = Permute(( 2, 1))(attention_mul) x = GRU(128, name='GRU_layer2', return_sequences=True )(attention_mul) gru_x = Flatten()(x) x = Dense(32, activation='relu' )(gru_x) x = Dense(128, activation='relu' )(x) x = Dense(256, activation='relu' )(x) outputs = Dense(1, activation='sigmoid' )(x) print(outputs.shape, flag_input.shape, target_number.shape) outputs = target_number *(flag_input + outputs) optimizer = Adam(lr=lr, name='adam') model = Model([inputs, country_input, target_number, flag_input], outputs, name='gru_network') model.compile(optimizer=optimizer, loss=rmsle) return model<init_hyperparams>
mymodel.fit(mytrain, target) myprediction = mymodel.predict(mytrain) print(f'Precision: {precision_score(myprediction, target)*100:.2f}%') print(f'Accuracy: {accuracy_score(myprediction, target)*100:.2f}%')
Titanic - Machine Learning from Disaster
13,735,522
class corona19_predict: def __init__(self, df, population, days=21, batch_size=8, epochs=200): self.days = days self.batch_size = batch_size self.epochs = epochs self.confirmed_cases_model = attention_model(2, days, lr=1e-4) self.fatalities_model = attention_model(5, days, lr=1e-6) self.cal_increase_rate(df, population) def cal_increase_rate(self, df, population): pre_ccd = 0 pre_fd = 0 confirmed_cases_diff = [] fatalities_diff = [] for idata in df.itertuples() : if idata.ConfirmedCases < pre_ccd: pre_ccd = 0 pre_fd = 0 confirmed_cases_diff.append(idata.ConfirmedCases - pre_ccd) fatalities_diff.append(idata.Fatalities - pre_fd) pre_ccd = idata.ConfirmedCases pre_fd = idata.Fatalities df['ConfirmedCases_diff'] = confirmed_cases_diff df['Fatalities_diff'] = fatalities_diff df['Fatalities_diff'] = df['Fatalities_diff'].clip(0) df['ConfirmedCases_diff_percent'] = df['ConfirmedCases_diff'].values /(df['ConfirmedCases'].values + 1.0e-10) df['Fatalities_diff_percent'] = df['Fatalities_diff'].values /(df['Fatalities'].values + 1.0e-10) tmp_country_label = [] for idata in df.itertuples() : try: tmp_country_label.append(idata.Country_Region + '_' + idata.Province_State) except: tmp_country_label.append(idata.Country_Region) df['country_label'] = tmp_country_label tmp_country_label = [] for idata in population.itertuples() : try: tmp_country_label.append(idata.Country_Region + '_' + idata.Province_State) except: tmp_country_label.append(idata.Country_Region) population['country_label'] = tmp_country_label self.target_df = df self.population_df = population self.country_list = df['country_label'].unique().tolist() return df def get_country_onehot(self, country_str): country_onehot = np.zeros(len(self.country_list)) country_onehot[self.country_list.index(country_str)] = 1 return country_onehot def encoded_data(self, country_onehot, target_date_str, country_population): date_list = target_date_str.split('-') delta = timedelta(days=1) date = datetime(int(date_list[0]), int(date_list[1]), int(date_list[2])) - delta day_list = [] for i in range(self.days): day_list.append(date.strftime('%Y-%m-%d')) date -= delta day_list = day_list[::-1] confirmed_cases = 0 fatalities = 0 encoded_data = [] if self.country_df.country_label.values[0] != self.country_list[np.argmax(country_onehot)]: self.country_df = self.target_df[self.target_df.country_label == self.country_list[np.argmax(country_onehot)]] for date_str in day_list: tmp_data_df = self.country_df[self.country_df.Date == date_str] if len(tmp_data_df)== 0: 'train data not exist' else: confirmed_cases_diff = tmp_data_df.ConfirmedCases_diff.values[0] fatalities_diff = tmp_data_df.Fatalities_diff.values[0] confirmed_cases = tmp_data_df.ConfirmedCases.values[0] fatalities = tmp_data_df.Fatalities.values[0] confirmed_cases_diff_percent = tmp_data_df.ConfirmedCases_diff_percent.values[0] fatalities_diff_percent = tmp_data_df.Fatalities_diff_percent.values[0] encoded_data.append([confirmed_cases, fatalities, confirmed_cases_diff, fatalities_diff, confirmed_cases_diff_percent, fatalities_diff_percent, country_population]) return encoded_data def make_train_data(self): train_data = {'country_onehot': [], 'encoder_input': []} train_label = [] p_country = '' for idata in tqdm(self.target_df.itertuples() , total=len(self.target_df), position=0): if p_country != idata.country_label: self.country_df = self.target_df[self.target_df.country_label == idata.country_label] country_population = self.population_df[self.population_df.country_label == idata.country_label].Population.values[0] p_country = idata.country_label if idata.Date > self.target_df.iloc[self.days + 1].Date: tmp_onehot_data = self.get_country_onehot(idata.country_label) tmp_encoded_data = self.encoded_data(tmp_onehot_data, idata.Date, country_population) try: if np.sum(np.array(tmp_encoded_data)[:, :])!= 0: train_data['country_onehot'].append(tmp_onehot_data) train_data['encoder_input'].append(tmp_encoded_data) train_label.append([idata.ConfirmedCases, idata.Fatalities, idata.ConfirmedCases_diff, idata.Fatalities_diff, idata.ConfirmedCases_diff_percent, idata.Fatalities_diff_percent, country_population]) except: print(idata.country_label, country_population) print(tmp_ecoded_data, idata.country_label, country_population) return [np.array(train_data['encoder_input']), np.array(train_data['country_onehot'])], np.array(train_label) def train_data_fatalities(self): try: with gzip.open('encoded_data.dat', 'rb')as f: X_train, y_train = pickle.load(f) except: X_train, y_train = self.make_train_data() with gzip.open('encoded_data.dat', 'wb')as f: pickle.dump([X_train, y_train], f) x_train = X_train.copy() x_train[0] = np.concatenate([\ (x_train[0][:, :, 0] / x_train[0][:, :, 6] ).reshape(list(x_train[0].shape[:-1])+ [1]), \ (x_train[0][:, :, 2] / x_train[0][:, :, 6] ).reshape(list(x_train[0].shape[:-1])+ [1]), \ (x_train[0][:, :, 1] / x_train[0][:, :, 6] ).reshape(list(x_train[0].shape[:-1])+ [1]), \ (x_train[0][:, :, 3] / x_train[0][:, :, 6] ).reshape(list(x_train[0].shape[:-1])+ [1]), \ (x_train[0][:, :, 1] /(x_train[0][:, :, 0] + 1e-8)).reshape(list(x_train[0].shape[:-1])+ [1])\ ], axis=2) death_rate_index = np.where(y_train[:, 1] /(y_train[:, 0] + 1e-8)< 0.15)[0] tb_hist = tensorflow.keras.callbacks.TensorBoard(log_dir='./graph_gru_attention', histogram_freq=1, write_graph=True, write_images=True) model_path = './fatalities_gru_attention.h5' cb_checkpoint = ModelCheckpoint(filepath=model_path, monitor='val_loss', verbose=1, save_best_only=True) early_stopping = EarlyStopping(patience=10) history = self.fatalities_model.fit({'encoder_input':x_train[0][death_rate_index], 'country_onehot':x_train[1][death_rate_index], 'target_input': X_train[0][death_rate_index][:, -1, 1].reshape(list(X_train[0][death_rate_index].shape[:-2])+ [1]), 'flag_input': np.ones(len(x_train[0][death_rate_index])) }, y_train[:, 1][death_rate_index], batch_size=self.batch_size, epochs=self.epochs, verbose=1, shuffle=True, validation_split=0.2, callbacks=[tb_hist, cb_checkpoint, early_stopping]) y_predict = self.fatalities_model.predict({'encoder_input':x_train[0], 'country_onehot':x_train[1], 'target_input': X_train[0][:, -1, 0].reshape(list(X_train[0].shape[:-2])+ [1]), 'flag_input': np.zeros(len(x_train[0])) }) return y_predict, y_train def train_data_confirmed_cases(self): try: with gzip.open('encoded_data.dat', 'rb')as f: X_train, y_train = pickle.load(f) except: X_train, y_train = self.make_train_data() with gzip.open('encoded_data.dat', 'wb')as f: pickle.dump([X_train, y_train], f) x_train = X_train.copy() x_train[0] = np.concatenate([(x_train[0][:, :, 0] / x_train[0][:, :, 6] ).reshape(list(x_train[0].shape[:-1])+ [1]),(x_train[0][:, :, 2] / x_train[0][:, :, 6] ).reshape(list(x_train[0].shape[:-1])+ [1])], axis=2) tb_hist = tensorflow.keras.callbacks.TensorBoard(log_dir='./graph_gru_attention', histogram_freq=1, write_graph=True, write_images=True) model_path = './confirmed_cases_gru_attention.h5' cb_checkpoint = ModelCheckpoint(filepath=model_path, monitor='val_loss', verbose=1, save_best_only=True) early_stopping = EarlyStopping(patience=10) history = self.confirmed_cases_model.fit({'encoder_input':x_train[0], 'country_onehot':X_train[1], 'target_input': X_train[0][:,-1,0].reshape(list(X_train[0].shape[:-2])+ [1]), 'flag_input':np.ones(( len(x_train[0]), 1)) }, y_train[:, 0], batch_size=self.batch_size, epochs=self.epochs, verbose=1, shuffle=True, validation_split=0.2, callbacks=[tb_hist, cb_checkpoint, early_stopping]) y_predict = self.confirmed_cases_model.predict({'encoder_input':x_train[0], 'country_onehot':X_train[1], 'target_input': X_train[0][:,-1,0].reshape(list(X_train[0].shape[:-2])+ [1]), 'flag_input':np.ones(( len(x_train[0]), 1)) }) return y_predict, y_train def load_models(self, country_list): self.confirmed_cases_model.load_weights('/kaggle/working/confirmed_cases_gru_attention.h5') self.fatalities_model.load_weights('/kaggle/working/fatalities_gru_attention.h5') self.country_list = country_list def predict_encoder_confirmed_cases(self, day_list, country_label, data_df, country_population): encoded_data_c = [] encoded_data_f = [] country_onehot = np.zeros(len(self.country_list)) country_onehot[self.country_list.index(country_label)] = 1 before_confirmed_case = 0 before_fatalities_case = 0 for day in day_list: tmp_data_df = data_df[data_df.Date == day] try: encoded_data_c.append([(tmp_data_df.ConfirmedCases.values[0] / country_population),tmp_data_df.ConfirmedCases_diff.values[0] / country_population]) except: print(country_population, day, country_label, tmp_data_df, day_list) return encoded_data_f.append([\ (tmp_data_df.ConfirmedCases.values[0] / country_population), \ (tmp_data_df.ConfirmedCases_diff.values[0] / country_population), \ (tmp_data_df.Fatalities.values[0] / country_population), \ (tmp_data_df.Fatalities_diff.values[0] / country_population), (tmp_data_df.Fatalities.values[0] /(tmp_data_df.ConfirmedCases.values[0] + 1e-8)) \ ]) before_confirmed_case = tmp_data_df.ConfirmedCases.values[0] before_fatalities_case = tmp_data_df.Fatalities.values[0] return np.array([country_onehot]), np.array([encoded_data_c]), before_confirmed_case, np.array([encoded_data_f]), before_fatalities_case def predict_encoder_fatalities(self, day_list, country_label, data_df, country_population): encoded_data = [] before_fatalities = 0 for day in day_list: tmp_data_df = data_df[data_df.Date == day] encoded_data.append([\ (tmp_data_df.ConfirmedCases.values[0] / country_population), \ (tmp_data_df.ConfirmedCases_diff.values[0] / country_population), \ (tmp_data_df.Fatalities.values[0] / country_population), \ (tmp_data_df.Fatalities_diff.values[0] / country_population), (tmp_data_df.Fatalities.values[0] /(tmp_data_df.ConfirmedCases.values[0] + 1e-8)) \ ]) before_fatalities = tmp_data_df.Fatalities.values[0] return np.array([encoded_data]), before_fatalities def predict_test(self, test_df): predict_confirmed_cases = [] predict_fatalities = [] country_list = self.country_list.copy() p_country = '' for itest in tqdm(test_df.itertuples() , total=len(test_df), position=0): date_list = itest.Date.split('-') delta = timedelta(days=1) date = datetime(int(date_list[0]), int(date_list[1]), int(date_list[2])) - delta day_list = [] self.country_list = country_list.copy() for i in range(self.days): day_list.append(date.strftime('%Y-%m-%d')) date -= delta day_list = day_list[::-1] try: country_label = itest.Country_Region + '_' + itest.Province_State except: country_label = itest.Country_Region if p_country != country_label: data_df = self.target_df[self.target_df.country_label == country_label] country_population = self.population_df[self.population_df.country_label == country_label].Population.values[0] p_country = country_label country_onehot, encoded_data_c, bc, encoded_data_f, bf = self.predict_encoder_confirmed_cases(day_list, country_label, data_df, country_population) confirmed_cases_increase_rate = self.confirmed_cases_model.predict_on_batch({'encoder_input':encoded_data_c, 'country_onehot':country_onehot, 'target_input':np.array([bc] ).reshape(( 1,1)) , 'flag_input':np.array([1])}) mortality_rate = self.fatalities_model.predict_on_batch({'encoder_input':encoded_data_f, 'country_onehot':country_onehot, 'target_input': np.array([bf] ).reshape(( 1,1)) , 'flag_input':np.array([1])}) confirmed_cases_increase_rate = confirmed_cases_increase_rate.numpy().reshape(1) mortality_rate = mortality_rate.numpy().reshape(1) t_df = self.target_df[(self.target_df.Date == itest.Date)&(self.target_df.country_label == country_label)] if len(t_df)== 0: new_data = [-1, itest.Province_State, itest.Country_Region, itest.Date, confirmed_cases_increase_rate[0], mortality_rate[0]] new_data += [0] *(len(self.target_df.columns)- len(new_data)) self.target_df.loc[len(self.target_df)] = new_data self.target_df = self.target_df.sort_values(by='Date') self.target_df = self.target_df.sort_values(by='Province_State') self.target_df = self.target_df.sort_values(by='Country_Region') self.cal_increase_rate(self.target_df, self.population_df) data_df = self.target_df[self.target_df.country_label == country_label] predict_confirmed_cases.append(confirmed_cases_increase_rate[0]) predict_fatalities.append(mortality_rate[0]) else: predict_confirmed_cases.append(t_df.ConfirmedCases.values[0]) predict_fatalities.append(t_df.Fatalities.values[0]) test_df['ConfirmedCases'] = predict_confirmed_cases test_df['Fatalities'] = predict_fatalities test_df[['ForecastId', 'ConfirmedCases', 'Fatalities']].to_csv('submission.csv', index=False) return test_df <predict_on_test>
myprediction = mymodel.predict(mytest) submission = pd.DataFrame({'PassengerId':mytest.index,'Survived':myprediction}) submission.to_csv('submissionRFC_nosplit.csv',index=False )
Titanic - Machine Learning from Disaster
13,735,522
test_c19 = corona19_predict(datas[1], population, 71, epochs=200) y_predict_f, y_train_f = test_c19.train_data_fatalities() <prepare_x_and_y>
from sklearn.model_selection import train_test_split
Titanic - Machine Learning from Disaster
13,735,522
y_predict_c, y_train_c = test_c19.train_data_confirmed_cases()<predict_on_test>
from sklearn.model_selection import train_test_split
Titanic - Machine Learning from Disaster
13,735,522
test_c19 = corona19_predict(datas[1], population, 71, epochs=200) country_list = test_c19.country_list.copy() test_c19.load_models(country_list) rr = test_c19.predict_test(datas[2] )<filter>
train_x, test_x, train_y, test_y = train_test_split(mytrain, target, test_size = 0.2, random_state=42) mymodel = RandomForestClassifier(criterion='entropy', max_depth=7, max_features = 'auto', min_samples_split = 4, n_estimators = 18, random_state=42) mymodel.fit(train_x, train_y) mymodel.score(test_x, test_y)
Titanic - Machine Learning from Disaster
13,735,522
datas[1][(datas[1].Country_Region == 'Canada')&(datas[1].Province_State == 'Yukon')]<load_from_csv>
myprediction = mymodel.predict(mytest )
Titanic - Machine Learning from Disaster
13,735,522
<create_dataframe><EOS>
submission = pd.DataFrame({'PassengerId':mytest.index,'Survived':myprediction}) submission.to_csv('submissionRF_optim_param.csv',index=False )
Titanic - Machine Learning from Disaster
14,039,531
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<load_from_csv>
plt.style.use('seaborn')
Titanic - Machine Learning from Disaster
14,039,531
train = pd.read_csv(".. /input/covid19-global-forecasting-week-4/train.csv") test = pd.read_csv(".. /input/covid19-global-forecasting-week-4/test.csv" )<drop_column>
train_df=pd.read_csv('/kaggle/input/titanic/train.csv') test_df=pd.read_csv('/kaggle/input/titanic/test.csv') print(train_df.head(5)) print(" ") print(test_df.head(5))
Titanic - Machine Learning from Disaster
14,039,531
train = train[['Province_State','Country_Region','Date','ConfirmedCases','Fatalities']] train.head()<count_missing_values>
train_df["Pclass"].replace(1, "Upper", inplace=True) train_df["Pclass"].replace(2, "Middle", inplace=True) train_df["Pclass"].replace(3, "Lower", inplace=True) train_df["Age"].fillna(np.nanmedian(train_df["Age"]), inplace=True) bins = [0,10,20,30,40,50,60,70,80,100] train_df["Age_bin"] = pd.cut(train_df['Age'], bins) train_df["with_family"] =(train_df["SibSp"] + train_df["Parch"])>0 train_df["Embarked"].fillna("S", inplace=True) train_df["Cabin"].fillna("Unknown", inplace=True) train_df["Title"] = train_df.Name.apply(lambda x: x.split(",")[1].split(".")[0].strip()) title_list = ["Mr", "Miss", "Mrs", "Master", "Dr", "Rev", "Col"] train_df.loc[~train_df["Title"].isin(title_list), "Title"] = "NA" for i in range(0, len(train_df)) : train_df.at[i, "Deck"] = " ".join(re.findall("[a-zA-Z]+", train_df.at[i, "Cabin"])) train_df["Deck"].replace("B B", "B", inplace=True) train_df["Deck"].replace("B B B", "B", inplace=True) train_df["Deck"].replace("B B B B", "B", inplace=True) train_df["Deck"].replace("C C", "C", inplace=True) train_df["Deck"].replace("D D", "D", inplace=True) train_df["Deck"].replace("C C C", "C", inplace=True) train_df["Deck"].replace("F G", "F", inplace=True) train_df["Deck"].replace("F E", "E", inplace=True) train_df["Deck"].replace("T", "Unknown", inplace=True )
Titanic - Machine Learning from Disaster
14,039,531
print("Any missing sample in training set:",train.isnull().values.any()) print("Any missing sample in test set:",test.isnull().values.any() , " " )<groupby>
train_df_pre = train_df.drop(columns=["Name", "Ticket", "Cabin"]) test_df_pre = test_df.drop(columns=[ "Name", "Ticket", "Cabin"]) num_attribs = ["Age", "SibSp", "Parch", "Fare"] cat_attribs = ["Pclass", "Embarked", "Deck", "Sex", "Title", "with_family", "Age_bin"] col_transformer = ColumnTransformer([ ("num", StandardScaler() , num_attribs), ("cat", OneHotEncoder() , cat_attribs), ], remainder="passthrough") train_array_transformed = col_transformer.fit_transform(train_df_pre) train_df_transformed = pd.DataFrame(data=train_array_transformed) column_names = num_attribs + list(col_transformer.named_transformers_['cat'].get_feature_names())+ ["PassengerId"] + ["Survived"] train_df_transformed.columns = column_names test_array_transformed = col_transformer.fit_transform(test_df_pre) test_df_transformed = pd.DataFrame(data=test_array_transformed) column_names = num_attribs + list(col_transformer.named_transformers_['cat'].get_feature_names())+ ["PassengerId"] test_df_transformed.columns = column_names
Titanic - Machine Learning from Disaster
14,039,531
ConfirmedCases_date_US = train[train['Country_Region']=='US'].groupby(['Date'] ).agg({'ConfirmedCases':['sum']}) fatalities_date_US = train[train['Country_Region']=='US'].groupby(['Date'] ).agg({'Fatalities':['sum']}) total_date_US = ConfirmedCases_date_US.join(fatalities_date_US) ConfirmedCases_date_China = train[train['Country_Region']=='China'].groupby(['Date'] ).agg({'ConfirmedCases':['sum']}) fatalities_date_China = train[train['Country_Region']=='China'].groupby(['Date'] ).agg({'Fatalities':['sum']}) total_date_China = ConfirmedCases_date_China.join(fatalities_date_China) ConfirmedCases_date_Italy = train[train['Country_Region']=='Italy'].groupby(['Date'] ).agg({'ConfirmedCases':['sum']}) fatalities_date_Italy = train[train['Country_Region']=='Italy'].groupby(['Date'] ).agg({'Fatalities':['sum']}) total_date_Italy = ConfirmedCases_date_Italy.join(fatalities_date_Italy) ConfirmedCases_date_Australia = train[train['Country_Region']=='Australia'].groupby(['Date'] ).agg({'ConfirmedCases':['sum']}) fatalities_date_Australia = train[train['Country_Region']=='Australia'].groupby(['Date'] ).agg({'Fatalities':['sum']}) total_date_Australia = ConfirmedCases_date_Australia.join(fatalities_date_Australia) plt.figure(figsize=(15,10)) plt.subplot(2, 2, 1) total_date_US.plot(ax=plt.gca() , title='US') plt.ylabel("Confirmed cases", size=13) plt.subplot(2, 2, 2) total_date_China.plot(ax=plt.gca() , title='China') plt.subplot(2, 2, 3) total_date_Italy.plot(ax=plt.gca() , title='Italy') plt.ylabel("Confirmed cases", size=13) plt.subplot(2, 2, 4) total_date_Australia.plot(ax=plt.gca() , title='Australia') <merge>
X_train = train_df_transformed.drop(columns=["PassengerId","Survived"]) y_train = train_df_transformed["Survived"] X_test = test_df_transformed.drop(columns=["PassengerId"]) lr = LogisticRegression(max_iter=2000) cv_lr = cross_val_score(lr, X_train, y_train, cv=3) print(cv_lr) print("mean accuracy: " + str(cv_lr.mean())) lr.fit(X_train, y_train) lr.score(X_train, y_train) y_test = lr.predict(X_test) print("Number of survivors predicted: " + str(sum(y_test)) )
Titanic - Machine Learning from Disaster
14,039,531
ConfirmedCases_date_Indonesia = train[train['Country_Region']=='Indonesia'].groupby(['Date'] ).agg({'ConfirmedCases':['sum']}) fatalities_date_Indonesia = train[train['Country_Region']=='Indonesia'].groupby(['Date'] ).agg({'Fatalities':['sum']}) total_date_Indonesia = ConfirmedCases_date_Indonesia.join(fatalities_date_Indonesia) ConfirmedCases_date_Malaysia = train[train['Country_Region']=='Malaysia'].groupby(['Date'] ).agg({'ConfirmedCases':['sum']}) fatalities_date_Malaysia = train[train['Country_Region']=='Malaysia'].groupby(['Date'] ).agg({'Fatalities':['sum']}) total_date_Malaysia = ConfirmedCases_date_Malaysia.join(fatalities_date_Malaysia) ConfirmedCases_date_Thailand = train[train['Country_Region']=='Thailand'].groupby(['Date'] ).agg({'ConfirmedCases':['sum']}) fatalities_date_Thailand = train[train['Country_Region']=='Thailand'].groupby(['Date'] ).agg({'Fatalities':['sum']}) total_date_Thailand = ConfirmedCases_date_Thailand.join(fatalities_date_Thailand) ConfirmedCases_date_Singapore = train[train['Country_Region']=='Singapore'].groupby(['Date'] ).agg({'ConfirmedCases':['sum']}) fatalities_date_Singapore = train[train['Country_Region']=='Singapore'].groupby(['Date'] ).agg({'Fatalities':['sum']}) total_date_Singapore = ConfirmedCases_date_Singapore.join(fatalities_date_Singapore) plt.figure(figsize=(15,10)) plt.subplot(2, 2, 1) total_date_Indonesia.plot(ax=plt.gca() , title='Indonesia') plt.ylabel("Confirmed cases", size=13) plt.subplot(2, 2, 2) total_date_Malaysia.plot(ax=plt.gca() , title='Malaysia') plt.subplot(2, 2, 3) total_date_Thailand.plot(ax=plt.gca() , title='Thailand') plt.ylabel("Confirmed cases", size=13) plt.subplot(2, 2, 4) total_date_Singapore.plot(ax=plt.gca() , title='Singapore') <merge>
X_train = train_df_transformed.drop(columns=["PassengerId","Survived"]) y_train = train_df_transformed["Survived"] X_test = test_df_transformed.drop(columns=["PassengerId"]) lr = LogisticRegression() param_grid = {"max_iter" : [500,1000,2000], "penalty" : ["l1", "l2"], "C" : np.logspace(-4, 4, 20), "solver" : ["liblinear"]} lr_clf = GridSearchCV(lr, param_grid=param_grid, cv=3, verbose=True, n_jobs=-1) best_lr_clf = lr_clf.fit(X_train, y_train) print("Best Logistic Regression Score: " + str(best_lr_clf.best_score_)) print("Best Parameter: " + str(best_lr_clf.best_params_)) y_test = lr_clf.predict(X_test) print("Number of survivors predicted: " + str(sum(y_test)) )
Titanic - Machine Learning from Disaster
14,039,531
ConfirmedCases_date_Vietnam = train[train['Country_Region']=='Vietnam'].groupby(['Date'] ).agg({'ConfirmedCases':['sum']}) fatalities_date_Vietnam = train[train['Country_Region']=='Vietnam'].groupby(['Date'] ).agg({'Fatalities':['sum']}) total_date_Vietnam = ConfirmedCases_date_Vietnam.join(fatalities_date_Vietnam) ConfirmedCases_date_Philippines = train[train['Country_Region']=='Philippines'].groupby(['Date'] ).agg({'ConfirmedCases':['sum']}) fatalities_date_Philippines = train[train['Country_Region']=='Philippines'].groupby(['Date'] ).agg({'Fatalities':['sum']}) total_date_Philippines = ConfirmedCases_date_Philippines.join(fatalities_date_Philippines) ConfirmedCases_date_Cambodia = train[train['Country_Region']=='Cambodia'].groupby(['Date'] ).agg({'ConfirmedCases':['sum']}) fatalities_date_Cambodia = train[train['Country_Region']=='Cambodia'].groupby(['Date'] ).agg({'Fatalities':['sum']}) total_date_Cambodia = ConfirmedCases_date_Cambodia.join(fatalities_date_Cambodia) ConfirmedCases_date_Laos = train[train['Country_Region']=='Laos'].groupby(['Date'] ).agg({'ConfirmedCases':['sum']}) fatalities_date_Laos = train[train['Country_Region']=='Laos'].groupby(['Date'] ).agg({'Fatalities':['sum']}) total_date_Laos = ConfirmedCases_date_Laos.join(fatalities_date_Laos) plt.figure(figsize=(15,10)) plt.subplot(2, 2, 1) total_date_Vietnam.plot(ax=plt.gca() , title='Vietnam') plt.ylabel("Confirmed cases", size=13) plt.subplot(2, 2, 2) total_date_Philippines.plot(ax=plt.gca() , title='Philippines') plt.subplot(2, 2, 3) total_date_Cambodia.plot(ax=plt.gca() , title='Cambodia') plt.ylabel("Confirmed cases", size=13) plt.subplot(2, 2, 4) total_date_Laos.plot(ax=plt.gca() , title='Laos') <merge>
X_train = train_df_transformed.drop(columns=["PassengerId","Survived"]) y_train = train_df_transformed["Survived"] X_test = test_df_transformed.drop(columns=["PassengerId"]) rnd_clf = RandomForestClassifier(n_estimators=500, n_jobs=-1) cv_rnd_clf = cross_val_score(rnd_clf, X_train, y_train, cv=5) print(cv_rnd_clf) print("mean accuracy: " + str(cv_rnd_clf.mean())) rnd_clf.fit(X_train, y_train) rnd_clf.score(X_train, y_train) y_test = rnd_clf.predict(X_test) print("Number of survivors predicted: " + str(sum(y_test)) )
Titanic - Machine Learning from Disaster
14,039,531
ConfirmedCases_date_Brunei = train[train['Country_Region']=='Brunei'].groupby(['Date'] ).agg({'ConfirmedCases':['sum']}) fatalities_date_Brunei = train[train['Country_Region']=='Brunei'].groupby(['Date'] ).agg({'Fatalities':['sum']}) total_date_Brunei = ConfirmedCases_date_Brunei.join(fatalities_date_Brunei) plt.figure(figsize=(15,10)) plt.subplot(2, 2, 1) total_date_Brunei.plot(ax=plt.gca() , title='Brunei') plt.ylabel("Confirmed cases", size=13) <data_type_conversions>
X_train = train_df_transformed.drop(columns=["PassengerId","Survived"]) y_train = train_df_transformed["Survived"] X_test = test_df_transformed.drop(columns=["PassengerId"]) rf = RandomForestClassifier() param_grid = {'n_estimators': [200,300,400,600,800,1000,1500], 'bootstrap': [True,False], 'max_depth': [3,5,10,20,50,75,100,None], 'max_features': ['auto'], 'min_samples_leaf': [1,2,4,6,10], 'min_samples_split': [2,5,10,20]} rnd_clf = RandomizedSearchCV(rf, param_distributions=param_grid, n_iter=500, cv=3, verbose=True, n_jobs=-1) best_rnd_clf = rnd_clf.fit(X_train, y_train) print("Best Random Forest Score: " + str(best_rnd_clf.best_score_)) print("Best Parameter: " + str(best_rnd_clf.best_params_)) best_rnd_clf.score(X_train, y_train) y_test = best_rnd_clf.predict(X_test) print("Number of survivors predicted: " + str(sum(y_test)) )
Titanic - Machine Learning from Disaster
14,039,531
<data_type_conversions><EOS>
export_df = pd.DataFrame() export_df["PassengerId"] = test_df_transformed["PassengerId"].astype(int) export_df["Survived"] = y_test.astype(int) now = datetime.datetime.now() name_add = "date_"+str(now.year)+"-"+str(now.month)+"-"+str(now.day)+"_time_"+str(now.hour)+"-"+str(now.minute) export_df.to_csv(f"random_forest_tuned_{name_add}.csv", index=False )
Titanic - Machine Learning from Disaster
13,998,051
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<categorify>
from sklearn.ensemble import ExtraTreesClassifier import seaborn as sns import pandas as pd import numpy as np import os from sklearn.model_selection import GridSearchCV
Titanic - Machine Learning from Disaster
13,998,051
le = LabelEncoder() def FunLabelEncoder(df): for c in df.columns: if df.dtypes[c] == object: le.fit(df[c].astype(str)) df[c] = le.transform(df[c].astype(str)) return df<categorify>
train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') union = [train, test] passenger_id = []
Titanic - Machine Learning from Disaster
13,998,051
train = FunLabelEncoder(train) train.info() train.iloc[235:300,:]<categorify>
for i, df in enumerate(union): name = df['Name'].str.split('.', n=1, expand = True) name = name[1].str.split(expand = True)[0] name.replace(['(\() ','(\)) '],'',regex=True, inplace = True) df['Name'] = name del name mean_age = df[['Name','Age']].groupby(['Name'] ).mean() df = df.merge(mean_age, on='Name') df['Age'] = df['Age_x'].fillna(df['Age_y']) df = df.drop(['Age_x', 'Age_y', 'Name'], axis=1) df['Age'] = df['Age'].fillna(df['Age'].mean()) age_buckets= [0,2,10,18,60,200] age_labels = [0,1,2,3,4] df['AgeGroup'] = pd.cut(df['Age'], bins=age_buckets, labels=age_labels, right=False) parch_buckets= [0,1,200] parch_labels = [0,1] df['Parch'] = pd.cut(df['Parch'], bins=parch_buckets, labels=parch_labels, right=False) sibsp_buckets= [0,1,2,200] sibsp_labels = [0,1,2] df['SibSp'] = pd.cut(df['SibSp'], bins=sibsp_buckets, labels=sibsp_labels, right=False ).astype(np.int8) df['Fare'] = df['Fare'].fillna(df['Fare'].mean()) df['Pclass'] = df['Pclass'].astype(np.int8) df['Ability'] = df['Fare'] / df['Pclass'] df['Family'] = df['SibSp'].astype(np.int8)+ df['Parch'].astype(np.int8)+ 1 fare_buckets= [0,23,10000] fare_labels = [0,1] df['Fare'] = pd.cut(df['Fare'], bins=fare_buckets, labels=fare_labels, right=False) ab_buckets= [0,4,9,15,20,59,70,10000] ab_labels = [0,1,2,3,4,5,6] df['Ability'] = pd.cut(df['Ability'], bins=ab_buckets, labels=ab_labels, right=False) df = df.sort_values(by=['PassengerId']) passenger_id.append(df["PassengerId"]) df['Sex'] = pd.get_dummies(df['Sex']) df['Fare'] = pd.get_dummies(df['Fare']) df['SibSp'] = pd.get_dummies(df['SibSp']) df['Parch'] = pd.get_dummies(df['Parch']) df = df.drop(['Embarked', 'PassengerId', 'Ticket', 'Age', 'Cabin'], axis=1) union[i] = df
Titanic - Machine Learning from Disaster
13,998,051
test = FunLabelEncoder(test) test.info() test.iloc[235:300,:]<define_variables>
x_train = union[0].drop("Survived", axis=1) y_train = union[0]["Survived"] x_test = union[1] x_train.shape, y_train.shape, x_test.shape
Titanic - Machine Learning from Disaster
13,998,051
features=['Date','Province_State','Country_Region'] target = 'ConfirmedCases'<train_model>
ex = ExtraTreesClassifier(random_state = 6, bootstrap=True, oob_score=True) ex.fit(x_train, y_train) y_pred = ex.predict(x_test) ex.score(x_train, y_train) score = round(ex.score(x_train, y_train)* 100, 2) print('Extremely Randomized Trees', score )
Titanic - Machine Learning from Disaster
13,998,051
dtcla = DecisionTreeClassifier(random_state=1000) dtcla.fit(train[features],train[target] )<predict_on_test>
for i,j in enumerate(x_train.head(1)) : print('%s: %s' %(j, int(ex.feature_importances_[i]*100)) + '%' )
Titanic - Machine Learning from Disaster
13,998,051
<define_variables><EOS>
submission = pd.DataFrame({ "PassengerId": passenger_id[1], "Survived": y_pred }) submission.to_csv('/kaggle/working/submission.csv', index=False)
Titanic - Machine Learning from Disaster
13,977,623
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<train_model>
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns
Titanic - Machine Learning from Disaster
13,977,623
dtcla = DecisionTreeClassifier(random_state=1000) dtcla.fit(train[features1],train[target1] )<predict_on_test>
train_data = pd.read_csv('.. /input/titanic/train.csv') test_data = pd.read_csv('.. /input/titanic/test.csv') train = train_data.copy() test = test_data.copy()
Titanic - Machine Learning from Disaster
13,977,623
predictions1 = dtcla.predict(test[features1]) print(predictions1[0:50] )<prepare_output>
train.drop(['PassengerId'],axis=1,inplace=True) test.drop(['PassengerId'],axis=1,inplace=True) pred = train_data['Survived']
Titanic - Machine Learning from Disaster
13,977,623
submission = pd.DataFrame({'ForecastId':test['ForecastId'],'ConfirmedCases':predictions,'Fatalities':predictions1}) submission.head(10 )<save_to_csv>
train.isnull().sum()
Titanic - Machine Learning from Disaster
13,977,623
filename = 'submission.csv' submission.to_csv(filename,index=False) print('Saved file: ' + filename )<import_modules>
train.isnull().sum()
Titanic - Machine Learning from Disaster
13,977,623
import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import nltk from sklearn.preprocessing import LabelBinarizer,LabelEncoder,StandardScaler,MinMaxScaler from sklearn.linear_model import LogisticRegression,SGDClassifier,LinearRegression from sklearn.naive_bayes import MultinomialNB from sklearn.svm import SVC from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import classification_report,confusion_matrix,accuracy_score from sklearn.model_selection import train_test_split import keras from keras.wrappers.scikit_learn import KerasRegressor from sklearn.model_selection import cross_val_score from sklearn.model_selection import KFold from keras.models import Sequential from keras.layers import Dense,LSTM import tensorflow as tf<load_from_csv>
test.isnull().sum()
Titanic - Machine Learning from Disaster
13,977,623
train_df = pd.read_csv(".. /input/covid19-global-forecasting-week-4/train.csv") test_df = pd.read_csv(".. /input/covid19-global-forecasting-week-4/test.csv") submission = pd.read_csv(".. /input/covid19-global-forecasting-week-4/submission.csv" )<count_missing_values>
test.isnull().sum()
Titanic - Machine Learning from Disaster
13,977,623
train_df.isna().sum()<count_missing_values>
sex1 = pd.get_dummies(train['Sex']) sex2 = pd.get_dummies(test['Sex']) train.drop(['Sex'],axis=1,inplace=True) test.drop(['Sex'],axis=1,inplace=True) train = pd.concat([train,sex1],axis=1) test = pd.concat([test,sex2],axis=1 )
Titanic - Machine Learning from Disaster
13,977,623
test_df.isna().sum()<drop_column>
train.drop(['female'],axis=1,inplace=True) test.drop(['female'],axis=1,inplace=True )
Titanic - Machine Learning from Disaster
13,977,623
train_df['Country_Region'] = train_df['Country_Region'] + ' ' + train_df['Province_State'] test_df['Country_Region'] = test_df['Country_Region'] + ' ' + test_df['Province_State'] del train_df['Province_State'] del test_df['Province_State']<feature_engineering>
plt.figure(figsize=(8, 5)) sns.set_style('whitegrid') sns.countplot(x = 'Pclass',hue='Survived',data=train_data,palette='deep' )
Titanic - Machine Learning from Disaster
13,977,623
def split_date(date): date = date.split('-') date[0] = int(date[0]) if(date[1][0] == '0'): date[1] = int(date[1][1]) else: date[1] = int(date[1]) if(date[2][0] == '0'): date[2] = int(date[2][1]) else: date[2] = int(date[2]) return date train_df.Date = train_df.Date.apply(split_date) test_df.Date = test_df.Date.apply(split_date )<feature_engineering>
Fare_0 = [] Fare_1 = [] for i in range(0,891): if train_data["Survived"][i] == 0: Fare_0.append(train["Fare"][i]) else: Fare_1.append(train["Fare"][i] )
Titanic - Machine Learning from Disaster
13,977,623
year = [] month = [] day = [] for i in train_df.Date: year.append(i[0]) month.append(i[1]) day.append(i[2] )<feature_engineering>
train["Embarked"].fillna("S", inplace = True) test["Embarked"].fillna("S", inplace = True )
Titanic - Machine Learning from Disaster
13,977,623
train_df['Year'] = year train_df['Month'] = month train_df['Day'] = day del train_df['Date']<feature_engineering>
embark1 = pd.get_dummies(train['Embarked']) embark2 = pd.get_dummies(test['Embarked']) train.drop(['Embarked'],axis=1,inplace=True) test.drop(['Embarked'],axis=1,inplace=True) train = pd.concat([train,embark1],axis=1) test = pd.concat([test,embark2],axis=1 )
Titanic - Machine Learning from Disaster
13,977,623
year = [] month = [] day = [] for i in test_df.Date: year.append(i[0]) month.append(i[1]) day.append(i[2] )<feature_engineering>
def fam(x): if(x['SibSp'] + x['Parch'])> 0: return 1 else: return 0 train['Family'] = train.apply(fam, axis = 1) test['Family'] = test.apply(fam, axis = 1 )
Titanic - Machine Learning from Disaster
13,977,623
test_df['Year'] = year test_df['Month'] = month test_df['Day'] = day del test_df['Date'] del train_df['Id'] del test_df['ForecastId']<drop_column>
train = train.drop(['SibSp','Parch'],axis=1) test = test.drop(['SibSp','Parch'],axis=1 )
Titanic - Machine Learning from Disaster
13,977,623
del train_df['Year'] del test_df['Year']<data_type_conversions>
train["Cabin"] = pd.Series([i[0] if not pd.isnull(i)else 'X' for i in train['Cabin'] ]) test["Cabin"] = pd.Series([i[0] if not pd.isnull(i)else 'X' for i in test['Cabin'] ] )
Titanic - Machine Learning from Disaster
13,977,623
train_df['ConfirmedCases'] = train_df['ConfirmedCases'].apply(int) train_df['Fatalities'] = train_df['Fatalities'].apply(int )<drop_column>
train["Cabin"] = train["Cabin"].map({"X":0, "A":1, "B" : 2 , "C":3, "D":4, "E":5, "F":6, "G":7,"T":0}) train["Cabin"] = train["Cabin"].astype(int) test["Cabin"] = test["Cabin"].map({"X":0, "A":1, "B" : 2 , "C":3, "D":4, "E":5, "F":6, "G":7,"T":0}) test["Cabin"] = test["Cabin"].astype(int )
Titanic - Machine Learning from Disaster
13,977,623
cases = train_df.ConfirmedCases fatalities = train_df.Fatalities del train_df['ConfirmedCases'] del train_df['Fatalities']<categorify>
train_title = [i.split(",")[1].split(".")[0].strip() for i in train["Name"]] train["Title"] = pd.Series(train_title) test_title = [i.split(",")[1].split(".")[0].strip() for i in test["Name"]] test["Title"] = pd.Series(test_title )
Titanic - Machine Learning from Disaster
13,977,623
lb = LabelEncoder() train_df['Country_Region'] = lb.fit_transform(train_df['Country_Region']) test_df['Country_Region'] = lb.transform(test_df['Country_Region'] )<normalization>
train = train.drop(['Name'], axis = 1) test = test.drop(['Name'], axis = 1 )
Titanic - Machine Learning from Disaster
13,977,623
scaler = MinMaxScaler() x_train = scaler.fit_transform(train_df.values) x_test = scaler.transform(test_df.values )<import_modules>
train["Title"] = train["Title"].replace(['Lady', 'the Countess','Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') train["Title"] = train["Title"].map({"Master":0, "Miss":1, "Ms" : 1 , "Mme":1, "Mlle":1, "Mrs":1, "Mr":2, "Rare":3}) train["Title"] = train["Title"].astype(int) test["Title"] = test["Title"].replace(['Lady', 'the Countess','Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') test["Title"] = test["Title"].map({"Master":0, "Miss":1, "Ms" : 1 , "Mme":1, "Mlle":1, "Mrs":1, "Mr":2, "Rare":3}) test["Title"] = test["Title"].astype(int )
Titanic - Machine Learning from Disaster
13,977,623
from xgboost import XGBRegressor<train_model>
Ticket1 = [] for i in list(train.Ticket): if not i.isdigit() : Ticket1.append(i.replace(".","" ).replace("/","" ).strip().split(' ')[0]) else: Ticket1.append("X") train["Ticket"] = Ticket1 Ticket2 = [] for j in list(test.Ticket): if not j.isdigit() : Ticket2.append(j.replace(".","" ).replace("/","" ).strip().split(' ')[0]) else: Ticket2.append("X") test["Ticket"] = Ticket2
Titanic - Machine Learning from Disaster
13,977,623
rf = XGBRegressor(n_estimators = 2500 , random_state = 0 , max_depth = 27) rf.fit(x_train,cases )<predict_on_test>
np.union1d(train["Ticket"], test["Ticket"] )
Titanic - Machine Learning from Disaster
13,977,623
cases_pred = rf.predict(x_test) cases_pred<feature_engineering>
train= pd.get_dummies(train, columns = ["Ticket"], prefix="T") test = pd.get_dummies(test, columns = ["Ticket"], prefix="T" )
Titanic - Machine Learning from Disaster
13,977,623
cases_pred = np.around(cases_pred,decimals = 0) cases_pred<concatenate>
train = train.drop(['T_SP','T_SOP','T_Fa','T_LINE','T_SWPP','T_SCOW','T_PPP','T_AS','T_CASOTON'],axis = 1) test = test.drop(['T_SCA3','T_STONOQ','T_AQ4','T_A','T_LP','T_AQ3'],axis = 1 )
Titanic - Machine Learning from Disaster
13,977,623
x_train_cas = [] for i in range(len(x_train)) : x = list(x_train[i]) x.append(cases[i]) x_train_cas.append(x) x_train_cas[0]<prepare_x_and_y>
train.drop(['Survived'],axis=1,inplace=True )
Titanic - Machine Learning from Disaster
13,977,623
x_train_cas = np.array(x_train_cas )<train_model>
print(train.isnull().sum()) print("Number of columns are :",train.isnull().sum().count() )
Titanic - Machine Learning from Disaster
13,977,623
rf = XGBRegressor(n_estimators = 2500 , random_state = 0 , max_depth = 27) rf.fit(x_train_cas,fatalities )<concatenate>
print(test.isnull().sum()) print("Number of columns are :",test.isnull().sum().count() )
Titanic - Machine Learning from Disaster
13,977,623
x_test_cas = [] for i in range(len(x_test)) : x = list(x_test[i]) x.append(cases_pred[i]) x_test_cas.append(x) x_test_cas[0]<predict_on_test>
sc = StandardScaler() train2 = sc.fit_transform(train) test2 = sc.transform(test )
Titanic - Machine Learning from Disaster
13,977,623
fatalities_pred = rf.predict(x_test_cas) fatalities_pred<feature_engineering>
KFold_Score = pd.DataFrame() classifiers = ['Linear SVM', 'Radial SVM', 'LogisticRegression', 'RandomForestClassifier', 'AdaBoostClassifier', 'XGBoostClassifier', 'KNeighborsClassifier','GradientBoostingClassifier'] models = [svm.SVC(kernel='linear'), svm.SVC(kernel='rbf'), LogisticRegression(max_iter = 1000), RandomForestClassifier(n_estimators=200, random_state=0), AdaBoostClassifier(random_state = 0), xgb.XGBClassifier(n_estimators=100), KNeighborsClassifier() , GradientBoostingClassifier(random_state=0) ] j = 0 for i in models: model = i cv = KFold(n_splits=5, random_state=0, shuffle=True) KFold_Score[classifiers[j]] =(cross_val_score(model, train, np.ravel(pred), scoring = 'accuracy', cv=cv)) j = j+1
Titanic - Machine Learning from Disaster
13,977,623
fatalities_pred = np.around(fatalities_pred,decimals = 0) fatalities_pred<prepare_output>
mean = pd.DataFrame(KFold_Score.mean() , index= classifiers) KFold_Score = pd.concat([KFold_Score,mean.T]) KFold_Score.index=['Fold 1','Fold 2','Fold 3','Fold 4','Fold 5','Mean'] KFold_Score.T.sort_values(by=['Mean'], ascending = False )
Titanic - Machine Learning from Disaster
13,977,623
submission['ConfirmedCases'] = cases_pred submission['Fatalities'] = fatalities_pred<save_to_csv>
col_name1[0],col_name1[2] = col_name1[2],col_name1[0] col_name2[0],col_name2[2] = col_name2[2],col_name2[0]
Titanic - Machine Learning from Disaster
13,977,623
submission.to_csv("submission.csv" , index = False )<load_from_csv>
train_new = train[col_name1] test_new = test[col_name2]
Titanic - Machine Learning from Disaster
13,977,623
pd.set_option('mode.chained_assignment', None) test = pd.read_csv(".. /input/covid19-global-forecasting-week-4/test.csv") train = pd.read_csv(".. /input/covid19-global-forecasting-week-4/train.csv") train['Province_State'].fillna('', inplace=True) test['Province_State'].fillna('', inplace=True) train['Date'] = pd.to_datetime(train['Date']) test['Date'] = pd.to_datetime(test['Date']) train = train.sort_values(['Country_Region','Province_State','Date']) test = test.sort_values(['Country_Region','Province_State','Date']) train[['ConfirmedCases', 'Fatalities']] = train.groupby(['Country_Region', 'Province_State'])[['ConfirmedCases', 'Fatalities']].transform('cummax' )<feature_engineering>
train_new = train_new.drop(['Cabin'],axis = 1) test_new = test_new.drop(['Cabin'],axis = 1 )
Titanic - Machine Learning from Disaster
13,977,623
def RMSLE(pred,actual): return np.sqrt(np.mean(np.power(( np.log(pred+1)-np.log(actual+1)) ,2))) feature_day = [1,20,50,100,200,500,1000,5000,10000,15000,20000,50000,100000,200000] def CreateInput(data): feature = [] for day in feature_day: data.loc[:,'Number day from ' + str(day)+ ' case'] = 0 if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0): fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max() else: fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min() for i in range(0, len(data)) : if(data['Date'].iloc[i] > fromday): day_denta = data['Date'].iloc[i] - fromday data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days feature = feature + ['Number day from ' + str(day)+ ' case'] return data[feature] <install_modules>
sc = StandardScaler() train3 = sc.fit_transform(train_new) test3 = sc.transform(test_new )
Titanic - Machine Learning from Disaster
13,977,623
!pip install pmdarima<save_to_csv>
rfc = RandomForestClassifier(random_state=0 )
Titanic - Machine Learning from Disaster
13,977,623
df_val = df_val_2 submission = df_val[['ForecastId','ConfirmedCases_hat','Fatalities_hat']] submission.columns = ['ForecastId','ConfirmedCases','Fatalities'] submission = submission.round({'ConfirmedCases': 0, 'Fatalities': 0}) submission.to_csv('submission.csv', index=False) submission<import_modules>
param_grid = { 'n_estimators': [ 200,300], 'max_features': ['auto', 'sqrt'], 'max_depth' : [6,7,8], 'criterion' :['gini', 'entropy'] }
Titanic - Machine Learning from Disaster
13,977,623
tqdm.pandas()<load_from_disk>
CV_rfc = GridSearchCV(estimator=rfc, param_grid=param_grid, cv= 5) CV_rfc.fit(train3,pred) CV_rfc.best_params_
Titanic - Machine Learning from Disaster
13,977,623
train = pd.read_json('.. /input/train.json') test = pd.read_json('.. /input/test.json' )<feature_engineering>
rfc1=RandomForestClassifier(random_state=0, n_estimators= 200, criterion = 'gini',max_features = 'auto',max_depth = 8) rfc1.fit(train3, pred )
Titanic - Machine Learning from Disaster
13,977,623
train['num_ingredients'] = train['ingredients'].apply(len) train = train[train['num_ingredients'] > 1]<string_transform>
pred3= rfc1.predict(test3) print(pred3 )
Titanic - Machine Learning from Disaster
13,977,623
lemmatizer = WordNetLemmatizer() def preprocess(ingredients): ingredients_text = ' '.join(ingredients) ingredients_text = ingredients_text.lower() ingredients_text = ingredients_text.replace('-', ' ') words = [] for word in ingredients_text.split() : if re.findall('[0-9]', word): continue if len(word)<= 2: continue if '’' in word: continue word = lemmatizer.lemmatize(word) if len(word)> 0: words.append(word) return ' '.join(words) for ingredient, expected in [ ('Eggs', 'egg'), ('all-purpose flour', 'all purpose flour'), ('purée', 'purée'), ('1% low-fat milk', 'low fat milk'), ('half & half', 'half half'), ('safetida(powder)', 'safetida(powder)') ]: actual = preprocess([ingredient]) assert actual == expected, f'"{expected}" is excpected but got "{actual}"'<feature_engineering>
pred_test = pred3 output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': pred_test}) output.to_csv('./submission.csv', index=False )
Titanic - Machine Learning from Disaster
13,977,623
train['x'] = train['ingredients'].progress_apply(preprocess) test['x'] = test['ingredients'].progress_apply(preprocess) train.head()<feature_engineering>
!pip install fugue-incubator==0.0.6
Titanic - Machine Learning from Disaster
13,977,623
vectorizer = make_pipeline( TfidfVectorizer(sublinear_tf=True), FunctionTransformer(lambda x: x.astype('float16'), validate=False) ) x_train = vectorizer.fit_transform(train['x'].values) x_train.sort_indices() x_test = vectorizer.transform(test['x'].values )<categorify>
train_set = pd.DataFrame(train3, columns=train_new.columns ).assign(label=pred) test_set = pd.DataFrame(test3, columns=test_new.columns )
Titanic - Machine Learning from Disaster
13,977,623
label_encoder = LabelEncoder() y_train = label_encoder.fit_transform(train['cuisine'].values) dict(zip(label_encoder.classes_, label_encoder.transform(label_encoder.classes_)) )<choose_model_class>
def get_space() : space = ss(LogisticRegression, max_iter=1000) return space suggest_sk_model( get_space() , train_set, scoring="accuracy", serialize_path = "/tmp", )
Titanic - Machine Learning from Disaster
13,977,623
estimator = SVC( C=50, kernel='rbf', gamma=1.4, coef0=1, cache_size=500, ) classifier = OneVsRestClassifier(estimator, n_jobs=-1 )<compute_train_metric>
def get_space() : space1 = ss(LogisticRegression, max_iter=1000) space2 = ss(XGBClassifier, n_estimators=10, random_state=0) return space1 + space2 suggest_sk_model( get_space() , train_set, scoring="accuracy", serialize_path = "/tmp", )
Titanic - Machine Learning from Disaster
13,977,623
%%time scores = cross_validate(classifier, x_train, y_train, cv=3) scores['test_score'].mean()<train_model>
def get_space() : space1 = ss(LogisticRegression, max_iter=1000) space2 = ss(XGBClassifier, n_estimators=Grid(10,20), random_state=0) return space1 + space2 suggest_sk_model( get_space() , train_set, scoring="accuracy", serialize_path = "/tmp", )
Titanic - Machine Learning from Disaster
13,977,623
%%time classifier.fit(x_train, y_train )<categorify>
def get_space() : space1 = ss(LogisticRegression, max_iter=1000) space2 = ss(XGBClassifier, n_estimators=Grid(10,20), learning_rate=Rand(0.01,0.99), random_state=0) return space1 + space2 suggest_sk_model( get_space() , train_set, scoring="accuracy", serialize_path = "/tmp", objective_runner = HyperoptRunner(max_iter=20, seed=0) )
Titanic - Machine Learning from Disaster
13,977,623
y_pred = label_encoder.inverse_transform(classifier.predict(x_train)) y_true = label_encoder.inverse_transform(y_train) print(f'accuracy score on train data: {accuracy_score(y_true, y_pred)}') def report2dict(cr): rows = [] for row in cr.split(" "): parsed_row = [x for x in row.split(" ")if len(x)> 0] if len(parsed_row)> 0: rows.append(parsed_row) measures = rows[0] classes = defaultdict(dict) for row in rows[1:]: class_label = row[0] for j, m in enumerate(measures): classes[class_label][m.strip() ] = float(row[j + 1].strip()) return classes report = classification_report(y_true, y_pred) pd.DataFrame(report2dict(report)).T<save_to_csv>
%%time def get_space() : space1 = ss(LogisticRegression, max_iter=1000) space2 = ss(XGBClassifier, n_estimators=Grid(10,20), max_depth=Grid(5,10), learning_rate=Rand(0.01,0.99), random_state=0) return space1 + space2 suggest_sk_model( get_space() , train_set, scoring="accuracy", serialize_path = "/tmp", objective_runner = HyperoptRunner(max_iter=20, seed=0) )
Titanic - Machine Learning from Disaster
13,977,623
y_pred = label_encoder.inverse_transform(classifier.predict(x_test)) test['cuisine'] = y_pred test[['id', 'cuisine']].to_csv('submission.csv', index=False) test[['id', 'cuisine']].head()<data_type_conversions>
%%time def get_space() : space1 = ss(LogisticRegression, max_iter=1000) space2 = ss(XGBClassifier, n_estimators=Grid(10,20), max_depth=Grid(5,10), learning_rate=Rand(0.01,0.99), random_state=0, n_jobs=1) return space1 + space2 suggest_sk_model( get_space() , train_set, scoring="accuracy", serialize_path = "/tmp", objective_runner = HyperoptRunner(max_iter=20, seed=0), execution_engine = DaskExecutionEngine )
Titanic - Machine Learning from Disaster
13,977,623
train_df = pd.read_json(".. /input/train.json" ).set_index("id") train_df.cuisine = train_df.cuisine.astype("category") train_df.head()<string_transform>
def get_space() : space = ss(LogisticRegression, max_iter=1000) return space suggest_sk_model( get_space() , train_set, scoring="accuracy", serialize_path = "/tmp", save_model=True, )
Titanic - Machine Learning from Disaster
13,977,623
cuisines = train_df.cuisine.cat.categories.values.tolist() texts = [] labels = [] label2index = { cuisine: i for i, cuisine in enumerate(cuisines)} for i, row in train_df.iterrows() : texts.append(" ".join(row.ingredients)) labels.append(label2index[row.cuisine]) labels = to_categorical(np.asarray(labels, dtype=np.int32)) word_count = Counter() def lemmatize(texts): global word_count wnl = nltk.WordNetLemmatizer() for text in texts: tokens_recipe = [] for sentence in text.split(" "): tokens_ingredient = [ wnl.lemmatize(w)for w in wordpunct_tokenize(sentence.lower())if w.isalpha() ] word_count.update(tokens_ingredient) tokens_recipe.append(" ".join(tokens_ingredient)) yield " ".join(tokens_recipe) def preprocess(texts): processed_texts = list(lemmatize(texts)) black_list = [ word for word, count in word_count.items() if count < 5 ] return [[ word for word in sentence.split() if word not in black_list] for sentence in processed_texts ] tokenizer = Tokenizer(oov_token="<UNK>") processed_texts = preprocess(texts) tokenizer.fit_on_texts(processed_texts) feature_matrix = tokenizer.texts_to_matrix(processed_texts) word2index = tokenizer.word_index print("Unique tokens: {}".format(len(word2index))) feature_matrix.shape<choose_model_class>
def get_space() : space1 = ss(LogisticRegression, max_iter=1000) space2 = ss(XGBClassifier, n_estimators=Grid(10,20), max_depth=Grid(5,10), learning_rate=Rand(0.01,0.99), random_state=0, n_jobs=1) return space1 + space2 suggest_sk_model( get_space() , train_set, scoring="accuracy", serialize_path = "/tmp", objective_runner = HyperoptRunner(max_iter=20, seed=0), visualize_top_n = 3 )
Titanic - Machine Learning from Disaster
13,977,623
def build_model(hidden_units, dropout): model = Sequential() model.add(Dense(hidden_units, input_shape=[1722,], activation="relu", name="hidden")) model.add(Dropout(dropout, name="dropout")) model.add(Dense(20, name="output")) model.compile("adam", "categorical_hinge", metrics=["accuracy"]) return model<split>
def get_space() : return ss(XGBClassifier, n_estimators=RandInt(5,100), max_depth=RandInt(3,10), learning_rate=Rand(0.01,0.99), random_state=0) suggest_sk_model( get_space() , train_set, scoring="accuracy", serialize_path = "/tmp", partition_keys = ["male"], objective_runner = HyperoptRunner(20,0) )
Titanic - Machine Learning from Disaster
13,977,623
X_train, X_val, y_train, y_val = train_test_split(feature_matrix, labels, test_size=0.2, random_state=42 )<string_transform>
def get_space() : return sum([ ss(LogisticRegression, max_iter=1000), ss(RandomForestClassifier, n_estimators=RandInt(10,300), max_depth=Grid(*list(range(3, 20))), random_state=0, max_features=Grid("auto", "sqrt", "log2"), criterion=Grid("gini","entropy")) , ss(GradientBoostingClassifier, n_estimators=RandInt(10,300), max_depth=Grid(*list(range(3, 20))), random_state=0, max_features=Grid("auto", "sqrt", "log2"), learning_rate=Rand(0.01,0.99)) , ss(XGBClassifier, n_estimators=RandInt(10,300), max_depth=Grid(*list(range(3, 20))), random_state=0, learning_rate=Rand(0.01,0.99), booster=Grid("gbtree", "gblinear", "dart")) , ]) print(len(list(get_space())) )
Titanic - Machine Learning from Disaster
13,977,623
test_df = pd.read_json(".. /input/test.json") test_texts = [] for i, row in test_df.iterrows() : test_texts.append(" ".join(row.ingredients)) processed_texts = preprocess(test_texts) test_feature_matrix = tokenizer.texts_to_matrix(processed_texts) test_feature_matrix.shape<split>
def get_space() : tree_space = sum([ ss(RandomForestClassifier, max_features=Grid("auto", "sqrt", "log2"), criterion=Grid("gini","entropy")) , ss(GradientBoostingClassifier, max_features=Grid("auto", "sqrt", "log2"), learning_rate=Rand(0.01,0.99)) , ss(XGBClassifier, learning_rate=Rand(0.01,0.99), booster=Grid("gbtree", "gblinear", "dart")) , ]) common = Space(n_estimators=RandInt(10,300), max_depth=Grid(*list(range(3, 20))), random_state=0) return tree_space * common + ss(LogisticRegression, max_iter=1000) print(len(list(get_space())) )
Titanic - Machine Learning from Disaster
13,977,623
<create_dataframe><EOS>
model = RandomForestClassifier(random_state=0, n_estimators= 246, criterion = 'entropy',max_features = 'auto',max_depth = 13) model.fit(train_new, pred) pred_test = model.predict(test_new) output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': pred_test}) output.to_csv('./submission.csv', index=False )
Titanic - Machine Learning from Disaster
13,911,451
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<predict_on_test>
warnings.filterwarnings('ignore' )
Titanic - Machine Learning from Disaster
13,911,451
test_pred = np.zeros(( 9944, 5), dtype=np.int32) for i, model in enumerate(models): y_pred = model.predict(test_feature_matrix) test_pred[:, i] = y_pred.argmax(axis=1) def voting(arr): return np.bincount(arr ).argmax() predictions = np.apply_along_axis(voting, 1, test_pred )<save_to_csv>
titanic = pd.read_csv('.. /input/titanic/train.csv') titanic.head(5 )
Titanic - Machine Learning from Disaster
13,911,451
result = pd.Series(pd.Categorical.from_codes(predictions, cuisines), test_df.id, name="cuisine") result.to_csv("submission.csv", header=True) result.value_counts()<import_modules>
titanic['Sex_label'] = titanic['Sex'].astype('category' ).cat.codes titanic[['Sex','Sex_label']].sample(3 )
Titanic - Machine Learning from Disaster
13,911,451
import json import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from collections import Counter from sklearn.preprocessing import LabelEncoder from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import Perceptron from sklearn.multiclass import OneVsRestClassifier<load_from_disk>
onehots = pd.get_dummies(titanic['Embarked'], prefix='Embarked') titanic = titanic.join(onehots )
Titanic - Machine Learning from Disaster
13,911,451
data = pd.read_json('.. /input/train.json') data.head()<create_dataframe>
print('Missing value =',' ',titanic.isna().sum()) print('-'*50) print('Duplicated data =',titanic.duplicated().sum() )
Titanic - Machine Learning from Disaster
13,911,451
n = 6714 frame= pd.DataFrame(Counter([i for sublist in data.ingredients for i in sublist] ).most_common(n)) frame = frame.head(10) frame<categorify>
X = titanic[['Pclass', 'Sex_label', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked_C', 'Embarked_Q', 'Embarked_S']] y = titanic['Survived'] X_train, X_test,y_train,y_test = train_test_split(X, y, test_size = 0.3, random_state = 789 )
Titanic - Machine Learning from Disaster
13,911,451
train = json.load(open('.. /input/train.json')) test = json.load(open('.. /input/test.json')) train_doc = [" ".join(doc['ingredients'] ).lower() for doc in train] test_doc = [" ".join(doc['ingredients'] ).lower() for doc in test] y_train = [doc['cuisine'] for doc in train] lb = LabelEncoder() y_train = lb.fit_transform(y_train) y_train<feature_engineering>
logreg = LogisticRegression(random_state=789) logreg.fit(X_train, y_train )
Titanic - Machine Learning from Disaster
13,911,451
tfidf = TfidfVectorizer(binary=True) X_train = tfidf.fit_transform(train_doc) X_test = tfidf.transform(test_doc) X_train<train_model>
y_predicted = logreg.predict(X_test) y_predicted
Titanic - Machine Learning from Disaster
13,911,451
svc = SVC(C=100, gamma=0.9, coef0=1, tol=0.001, decision_function_shape=None) model_svc = OneVsRestClassifier(svc, n_jobs=1) model_svc.fit(X_train, y_train )<choose_model_class>
y_predicted_proba = logreg.predict_proba(X_test) y_predicted_proba[:10]
Titanic - Machine Learning from Disaster
13,911,451
<save_to_csv>
print(' confustion matrix') print(confusion_matrix(y_test, y_predicted)) print(' accuracy') print(accuracy_score(y_test, y_predicted)) print(' classification report') print(classification_report(y_test, y_predicted))
Titanic - Machine Learning from Disaster
13,911,451
y_test = model_svc.predict(X_test) print(y_test) y_pred = lb.inverse_transform(y_test) print(y_pred) test_id = [doc['id'] for doc in test] sub = pd.DataFrame({'id': test_id, 'cuisine': y_pred}, columns=['id', 'cuisine']) sub.to_csv('svm_output.csv', index=False )<save_to_csv>
THRESHOLD = 0.58 predsfinal = np.where(logreg.predict_proba(X_test)[:,1] > THRESHOLD, 1, 0) print(classification_report(y_test, predsfinal))
Titanic - Machine Learning from Disaster
13,911,451
test_id = [doc['id'] for doc in test] sub = pd.DataFrame({'id': test_id, 'cuisine': y_pred}, columns=['id', 'cuisine']) sub.to_csv('svm_output.csv', index=False )<import_modules>
fpr, tpr, thresholds = roc_curve(y_test, predsfinal, pos_label=1) print('Nilai Area Under ROC Curve:', auc(fpr, tpr)) print("train Accuracy : ",logreg.score(X_train,y_train)) print("test Accuracy : ",logreg.score(X_test,y_test))
Titanic - Machine Learning from Disaster
13,911,451
import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from nltk import word_tokenize, download from sklearn.feature_extraction.text import TfidfVectorizer from nltk.stem import WordNetLemmatizer from sklearn.multiclass import OneVsRestClassifier from sklearn.ensemble import ExtraTreesClassifier<prepare_x_and_y>
neigh = KNeighborsClassifier(n_neighbors = 30) neigh.fit(X_train,y_train) y_predicted = neigh.predict(X_test) print(' confustion matrix') print(confusion_matrix(y_test, y_predicted)) print(' accuracy') print(accuracy_score(y_test, y_predicted)) print(' classification report') print(classification_report(y_test, y_predicted))
Titanic - Machine Learning from Disaster
13,911,451
train = pd.read_json(".. /input/train.json") test = pd.read_json(".. /input/test.json") y_tr = train.cuisine<remove_duplicates>
fpr, tpr, thresholds = roc_curve(y_test, y_predicted, pos_label=1) print('Nilai Area Under ROC Curve:', auc(fpr, tpr)) print("train Accuracy : ",neigh.score(X_train,y_train)) print("test Accuracy : ",neigh.score(X_test,y_test))
Titanic - Machine Learning from Disaster
13,911,451
lemmatizer = WordNetLemmatizer() def lemmatize_ingredients(df): all_ingredients = set() ingredients_list = [] for i in range(len(df.ingredients)) : lemmatized_list = [] for ingr in df.ingredients[i]: split_ingr = ingr.split(" ") lemmatized = [] for word in split_ingr: lemmatized.append(lemmatizer.lemmatize(word.lower())) all_ingredients.add(" ".join(lemmatized)) lemmatized_list.append(" ".join(lemmatized)) ingredients_list.append(lemmatized_list) df['ingredients_lem'] = ingredients_list return df, all_ingredients<normalization>
tree = DecisionTreeClassifier() tree.fit(X_train,y_train) y_predicted = tree.predict(X_test) print(' confustion matrix') print(confusion_matrix(y_test, y_predicted)) print('-'*111) print(' accuracy') print(accuracy_score(y_test, y_predicted)) print('-'*111) print(' classification report') print(classification_report(y_test, y_predicted)) print('-'*111 )
Titanic - Machine Learning from Disaster
13,911,451
train, all_ingredients = lemmatize_ingredients(train) test, _ = lemmatize_ingredients(test )<categorify>
fpr, tpr, thresholds = roc_curve(y_test, y_predicted, pos_label=1) print('Nilai Area Under ROC Curve:', auc(fpr, tpr)) print("train Accuracy : ",tree.score(X_train,y_train)) print("test Accuracy : ",tree.score(X_test,y_test))
Titanic - Machine Learning from Disaster
13,911,451
train_tfidf_features = tfidf.fit_transform(arraytotext(train.ingredients_lem)) test_tfidf_features= tfidf.transform(arraytotext(test.ingredients_lem))<choose_model_class>
rf = RandomForestClassifier(n_estimators= 100, max_depth=10, max_features = 'sqrt', min_samples_split = 8, min_samples_leaf = 7, bootstrap = True, n_jobs = -1, random_state = 42) rf.fit(X_train, y_train) y_predicted = rf.predict(X_test) print(' confustion matrix') print(confusion_matrix(y_test, y_predicted)) print(' accuracy') print(accuracy_score(y_test, y_predicted)) print(' precision') print(precision_score(y_test, y_predicted)) print(' classification report') print(classification_report(y_test, y_predicted))
Titanic - Machine Learning from Disaster
13,911,451
et = ExtraTreesClassifier(n_estimators=25, max_depth=300, min_samples_split=5, min_samples_leaf=1, random_state=None, min_impurity_decrease=1e-7 )<train_model>
fpr, tpr, thresholds = roc_curve(y_test, y_predicted, pos_label=1) print('Nilai Area Under ROC Curve:', auc(fpr, tpr)) print("train Accuracy : ",rf.score(X_train,y_train)) print("test Accuracy : ",rf.score(X_test,y_test))
Titanic - Machine Learning from Disaster
13,911,451
model = OneVsRestClassifier(et) model.fit(train_tfidf_features, y_tr )<save_to_csv>
X = titanic[['Pclass', 'Sex_label', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked_C', 'Embarked_Q', 'Embarked_S']] y = titanic['Survived'] X_train, X_test,y_train,y_test = train_test_split(X, y, test_size = 0.3, random_state = 789 )
Titanic - Machine Learning from Disaster
13,911,451
predictions = model.predict(test_tfidf_features) submission = pd.DataFrame() submission['id'] = test.id submission['cuisine'] = predictions submission.to_csv('20180718_ova_etc_4.csv', index=False )<set_options>
clf = AdaBoostClassifier() clf.fit(X, y) y_predicted = clf.predict(X_test) print(' confustion matrix') print(confusion_matrix(y_test, y_predicted)) print(' accuracy') print(accuracy_score(y_test, y_predicted)) print(' precision') print(precision_score(y_test, y_predicted)) print(' classification report') print(classification_report(y_test, y_predicted))
Titanic - Machine Learning from Disaster
13,911,451
%matplotlib inline warnings.filterwarnings("ignore", category=FutureWarning )<load_from_disk>
fpr, tpr, thresholds = roc_curve(y_test, y_predicted, pos_label=1) print('Nilai Area Under ROC Curve:', auc(fpr, tpr)) print("train Accuracy : ",clf.score(X_train,y_train)) print("test Accuracy : ",clf.score(X_test,y_test))
Titanic - Machine Learning from Disaster