kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
13,795,550 | label_encoder1 = LabelEncoder()
label_encoder2 = LabelEncoder()
train_data['Country_Region'] = label_encoder2.fit_transform(train_data['Country_Region'])
test_data['Country_Region'] = label_encoder2.transform(test_data['Country_Region'] )<define_variables> | shuffled_indices = np.arange(len(input_data.values)-1)
np.random.shuffle(shuffled_indices)
shuffled_inputs = input_data.values[shuffled_indices]
shuffled_targets = target.values[shuffled_indices]
shuffled_inputs
| Titanic - Machine Learning from Disaster |
13,795,550 | Test_id = test_data.ForecastId<drop_column> | num_train_samples=int(0.8*len(shuffled_inputs))
num_validation_samples=int(len(shuffled_inputs)) -num_train_samples
train_input=shuffled_inputs[:num_train_samples]
train_target=shuffled_targets[:num_train_samples]
validation_input=shuffled_inputs[num_train_samples:]
validation_target=shuffled_targets[num_train_samples:]
| Titanic - Machine Learning from Disaster |
13,795,550 | train_data.drop(['Id'], axis=1, inplace=True)
test_data.drop('ForecastId', axis=1, inplace=True )<count_missing_values> | input_size=7
output_size=2
hidden_layer_size=2700
num_hidden_layers=10
model=tf.keras.Sequential()
for i in range(num_hidden_layers):
model.add(tf.keras.layers.Dense(units=hidden_layer_size,activation='relu'))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(units=output_size,activation='softmax'))
learning_rate=0.005
optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate, beta_1=0.9)
model.compile(optimizer=optimizer,loss=tf.keras.losses.SparseCategoricalCrossentropy() ,metrics=['accuracy'])
batch_size=90
epochs=30
early_stopping=tf.keras.callbacks.EarlyStopping(monitor='val_loss',patience=10)
def scheduler1(epoch, lr):
if epoch < 10:
return lr
elif epoch < 20:
return 0.003
elif epoch < 30:
return 0.002
elif epoch < 50:
return 0.001
else:
return 0.0009
def scheduler2(epoch, lr):
if epoch < 10:
return lr
else:
return lr * tf.math.exp(-0.1)
def scheduler3(epoch, lr):
if epoch < 10:
return lr
elif epoch < 20:
return lr * tf.math.exp(-0.1)
else:
return lr * tf.math.exp(-1.0)
scheduler=scheduler3
lrs=tf.keras.callbacks.LearningRateScheduler(scheduler)
model.fit(
x=train_input,
y=train_target,
batch_size=batch_size,
epochs=epochs,
verbose=1,
callbacks=[
lrs
],
validation_data=(validation_input,validation_target)
)
| Titanic - Machine Learning from Disaster |
13,795,550 | missing_val_count_by_column =(train_data.isnull().sum())
print(missing_val_count_by_column[missing_val_count_by_column>0] )<import_modules> | test_data_raw = pd.read_csv("/kaggle/input/titanic/test.csv")
test_data=test_data_raw.drop(['Name','Ticket','Cabin'],axis='columns')
test_data.head()
| Titanic - Machine Learning from Disaster |
13,795,550 | from xgboost.sklearn import XGBRegressor<prepare_x_and_y> | age_arr=test_data.Age.values
bool_arr=pd.isna(test_data.Age.values)
total_age=0
num_age=0
for i in range(len(age_arr)) :
if bool_arr[i]==False:
total_age+=age_arr[i]
num_age+=1
avg_age=(total_age/num_age)
for i in range(len(age_arr)) :
if bool_arr[i]==True:
age_arr[i]=avg_age
scaled_age_arr=[round(age/avg_age,2)for age in age_arr]
test_data.Age=test_data.Age.replace(to_replace=test_data.Age.values,value=scaled_age_arr)
fare_arr=test_data.Fare.values
bool_arr=pd.isna(test_data.Fare.values)
total_fare=0
num_fare=0
for i in range(len(fare_arr)) :
if bool_arr[i]==False:
total_fare+=fare_arr[i]
num_fare+=1
avg_fare=(total_fare/num_fare)
for i in range(len(fare_arr)) :
if bool_arr[i]==True:
fare_arr[i]=avg_fare
scaled_fare_arr=[round(fare/avg_fare,2)for fare in fare_arr]
test_data.Fare=test_data.Fare.replace(to_replace=test_data.Fare.values,value=scaled_fare_arr)
test_data
| Titanic - Machine Learning from Disaster |
13,795,550 | X_train = train_data[['Country_Region','Date']]
y_train = train_data[['ConfirmedCases', 'Fatalities']]<import_modules> | d_Sex={'male':0,'female':1}
d_Embarked={'S':0,'C':1,'Q':2}
test_data.Sex = test_data.Sex.replace(d_Sex)
test_data.Embarked = test_data.Embarked.replace(d_Embarked)
test_data.Embarked=test_data.Embarked.astype(int)
| Titanic - Machine Learning from Disaster |
13,795,550 | from sklearn.tree import DecisionTreeRegressor<choose_model_class> | test_data.drop(['PassengerId'],axis=1 ).values | Titanic - Machine Learning from Disaster |
13,795,550 | tree_regressor1 = DecisionTreeRegressor(ccp_alpha=0.0, criterion='mse', max_depth=None,
max_features=None, max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, presort='deprecated',
random_state=6967, splitter='best' )<choose_model_class> | probabilities = model.predict(test_data.drop(['PassengerId'],axis=1 ).values)
predictions=list()
for p in probabilities:
if p[0]>p[1]:
predictions.append(0)
else:
predictions.append(1)
print(predictions ) | Titanic - Machine Learning from Disaster |
13,795,550 | tree_regressor2 = DecisionTreeRegressor(ccp_alpha=0.0, criterion='mse', max_depth=None,
max_features=None, max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, presort='deprecated',
random_state=6967, splitter='best' )<train_model> | output = pd.DataFrame({'PassengerId': test_data.PassengerId.values, 'Survived': predictions})
output.to_csv('my_submission.csv', index=False)
print("Your submission was successfully saved!" ) | Titanic - Machine Learning from Disaster |
13,795,550 | tree_regressor1.fit(X_train, y_train.ConfirmedCases )<train_model> | with open('my_submission.csv','r')as f:
reader=csv.reader(f)
for row in reader:
print(row ) | Titanic - Machine Learning from Disaster |
4,687,939 | tree_regressor2.fit(X_train, y_train.Fatalities )<predict_on_test> | warnings.filterwarnings("ignore")
| Titanic - Machine Learning from Disaster |
4,687,939 | best_best_estimate_1 = tree_regressor1.predict(test_data )<predict_on_test> | train=pd.read_csv('.. /input/train.csv')
test=pd.read_csv('.. /input/test.csv')
test.head() | Titanic - Machine Learning from Disaster |
4,687,939 | best_best_estimate_2 = tree_regressor2.predict(test_data )<create_dataframe> | train=train.drop('PassengerId',axis=1)
PassengerId=test['PassengerId']
test=test.drop('PassengerId',axis=1)
Survived=train['Survived']
| Titanic - Machine Learning from Disaster |
4,687,939 | df_sub = pd.DataFrame()<save_to_csv> | o=[]
c=['SibSp','Age','Parch','Fare']
for f in c:
q1=np.percentile(train[f],25)
q3=np.percentile(train[f],75)
iq=q3-q1
iqs=1.5*iq
oi=train[(train[f]<q1-iqs)|(train[f]>q3+iqs)].index
o.extend(oi)
o=Counter(o)
mo=list(k for k,v in o.items() if v>2)
train=train.drop(mo,axis=0)
train.shape
| Titanic - Machine Learning from Disaster |
4,687,939 | df_sub['ForecastId'] = Test_id
df_sub['ConfirmedCases'] = np.round(best_best_estimate_1,0)
df_sub['Fatalities'] = np.round(best_best_estimate_2,0)
df_sub.to_csv('submission.csv', index=False )<install_modules> | print(train[['Pclass','Survived']].groupby(['Pclass'],as_index=False ).mean() ) | Titanic - Machine Learning from Disaster |
4,687,939 | !pip install pycountry_convert<set_options> | tlen=len(train)
data=pd.concat(objs=[train,test],axis=0 ).reset_index(drop=True)
data['size']=data['SibSp']+data['Parch']+1
| Titanic - Machine Learning from Disaster |
4,687,939 | %matplotlib inline
warnings.filterwarnings('ignore')
%config InlineBackend.figure_format = 'retina'
<load_from_csv> | data['alone']=0
data.loc[data['size']==1,'alone']=1
data['sf']=0
data.loc[data['size']==2,'sf']=1
data['mf']=data['size'].apply(lambda s: 1 if 3<= s <= 4 else 0)
data['bf']=data['size'].apply(lambda s: 1 if s>4 else 0)
| Titanic - Machine Learning from Disaster |
4,687,939 | df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv' )<data_type_conversions> | data['Sex']=data['Sex'].map({'male':1,'female':0} ).astype(int)
| Titanic - Machine Learning from Disaster |
4,687,939 | df_train['Date'] = pd.to_datetime(df_train['Date'], format = '%Y-%m-%d')
df_test['Date'] = pd.to_datetime(df_test['Date'], format = '%Y-%m-%d' )<categorify> | data.isnull().sum()
| Titanic - Machine Learning from Disaster |
4,687,939 | class country_utils() :
def __init__(self):
self.d = {}
def get_dic(self):
return self.d
def get_country_details(self,country):
try:
country_obj = pycountry.countries.get(name=country)
continent_code = pc.country_alpha2_to_continent_code(country_obj.alpha_2)
continent = pc.convert_continent_code_to_continent_name(continent_code)
return country_obj.alpha_3, continent
except:
if 'Congo' in country:
country = 'Congo'
elif country == 'Diamond Princess' or country == 'Laos' or country == 'MS Zaandam'\
or country == 'Holy See' or country == 'Timor-Leste':
return country, country
elif country == 'Korea, South':
country = 'Korea, Republic of'
elif country == 'Taiwan*':
country = 'Taiwan'
elif country == 'Burma':
country = 'Myanmar'
elif country == 'West Bank and Gaza':
country = 'Gaza'
country_obj = pycountry.countries.search_fuzzy(country)
continent_code = pc.country_alpha2_to_continent_code(country_obj[0].alpha_2)
continent = pc.convert_continent_code_to_continent_name(continent_code)
return country_obj[0].alpha_3, continent
def get_iso3(self, country):
return self.d[country]['code']
def get_continent(self,country):
return self.d[country]['continent']
def add_values(self,country):
self.d[country] = {}
self.d[country]['code'],self.d[country]['continent'] = self.get_country_details(country)
def fetch_iso3(self,country):
if country in self.d.keys() :
return self.get_iso3(country)
else:
self.add_values(country)
return self.get_iso3(country)
def fetch_continent(self,country):
if country in self.d.keys() :
return self.get_continent(country)
else:
self.add_values(country)
return self.get_continent(country )<feature_engineering> | data['Embarked']=data['Embarked'].fillna('S')
| Titanic - Machine Learning from Disaster |
4,687,939 | def add_daily_measures(df):
df.loc[0,'Daily Cases'] = df.loc[0,'ConfirmedCases']
df.loc[0,'Daily Deaths'] = df.loc[0,'Fatalities']
for i in range(1,len(df)) :
df.loc[i,'Daily Cases'] = df.loc[i,'ConfirmedCases'] - df.loc[i-1,'ConfirmedCases']
df.loc[i,'Daily Deaths'] = df.loc[i,'Fatalities'] - df.loc[i-1,'Fatalities']
df.loc[0,'Daily Cases'] = 0
df.loc[0,'Daily Deaths'] = 0
return df<groupby> | l=data[data['Age'].isnull() ].index
for i in l:
am=data['Age'].median()
ap= data["Age"][(( data['SibSp'] == data.iloc[i]["SibSp"])&(data['Parch'] == data.iloc[i]["Parch"])&(data['Pclass'] == data.iloc[i]["Pclass"])) ].median()
if not np.isnan(ap):
data['Age'].iloc[i] = ap
else :
data['Age'].iloc[i] = am | Titanic - Machine Learning from Disaster |
4,687,939 | df_world = df_train.copy()
df_world = df_world.groupby('Date',as_index=False)['ConfirmedCases','Fatalities'].sum()
df_world = add_daily_measures(df_world )<data_type_conversions> | data['title'] = data['title'].replace(['Lady', 'Countess','Capt', 'Col','Don'\
, 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
data['title'] = data['title'].replace('Mlle', 'Miss')
data['title'] = data['title'].replace('Ms', 'Miss')
data['title'] = data['title'].replace('Mme', 'Mrs')
| Titanic - Machine Learning from Disaster |
4,687,939 | df_map = df_train.copy()
df_map = df_map[:24500]
df_map['Date'] = df_map['Date'].astype(str)
df_map = df_map.groupby(['Date','Country_Region'], as_index=False)['ConfirmedCases','Fatalities'].sum()<feature_engineering> | title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
data['title'] = data['title'].map(title_mapping)
| Titanic - Machine Learning from Disaster |
4,687,939 | df_map['iso_alpha'] = df_map.apply(lambda x: obj.fetch_iso3(x['Country_Region']), axis=1 )<feature_engineering> | data['Embarked'] = data['Embarked'].map({'S': 0, 'C': 1, 'Q': 2})
| Titanic - Machine Learning from Disaster |
4,687,939 | df_map['ln(ConfirmedCases)'] = np.log(df_map.ConfirmedCases + 1)
df_map['ln(Fatalities)'] = np.log(df_map.Fatalities + 1 )<filter> | Ticket = []
for i in list(data.Ticket):
if not i.isdigit() :
Ticket.append(i.replace(".","" ).replace("/","" ).strip().split(' ')[0])
else:
Ticket.append("X")
data["Ticket"] = Ticket
data["Ticket"].head()
| Titanic - Machine Learning from Disaster |
4,687,939 | last_date = df_train.Date.max()
df_countries = df_train[df_train['Date']==last_date]
df_countries = df_countries.groupby('Country_Region', as_index=False)['ConfirmedCases','Fatalities'].sum()
df_countries = df_countries.nlargest(10,'ConfirmedCases')
df_trend = df_train.groupby(['Date','Country_Region'], as_index=False)['ConfirmedCases','Fatalities'].sum()
df_trend = df_trend.merge(df_countries, on='Country_Region')
df_trend.drop(['ConfirmedCases_y','Fatalities_y'],axis=1, inplace=True)
df_trend.rename(columns={'Country_Region':'Country', 'ConfirmedCases_x':'Cases', 'Fatalities_x':'Deaths'}, inplace=True)
df_trend['ln(Cases)'] = np.log(df_trend['Cases']+1)
df_trend['ln(Deaths)'] = np.log(df_trend['Deaths']+1 )<feature_engineering> | data["Pclass"] = data["Pclass"].astype("category")
data = pd.get_dummies(data, columns = ["Pclass",'Ticket','Embarked','Cabin'] ) | Titanic - Machine Learning from Disaster |
4,687,939 | df_map['Mortality Rate%'] = round(( df_map.Fatalities/df_map.ConfirmedCases)*100,2 )<define_variables> | data=data.drop(['Name'],axis=1 ) | Titanic - Machine Learning from Disaster |
4,687,939 | us_state_abbrev = {
'Alabama': 'AL',
'Alaska': 'AK',
'American Samoa': 'AS',
'Arizona': 'AZ',
'Arkansas': 'AR',
'California': 'CA',
'Colorado': 'CO',
'Connecticut': 'CT',
'Delaware': 'DE',
'District of Columbia': 'DC',
'Florida': 'FL',
'Georgia': 'GA',
'Guam': 'GU',
'Hawaii': 'HI',
'Idaho': 'ID',
'Illinois': 'IL',
'Indiana': 'IN',
'Iowa': 'IA',
'Kansas': 'KS',
'Kentucky': 'KY',
'Louisiana': 'LA',
'Maine': 'ME',
'Maryland': 'MD',
'Massachusetts': 'MA',
'Michigan': 'MI',
'Minnesota': 'MN',
'Mississippi': 'MS',
'Missouri': 'MO',
'Montana': 'MT',
'Nebraska': 'NE',
'Nevada': 'NV',
'New Hampshire': 'NH',
'New Jersey': 'NJ',
'New Mexico': 'NM',
'New York': 'NY',
'North Carolina': 'NC',
'North Dakota': 'ND',
'Northern Mariana Islands':'MP',
'Ohio': 'OH',
'Oklahoma': 'OK',
'Oregon': 'OR',
'Pennsylvania': 'PA',
'Puerto Rico': 'PR',
'Rhode Island': 'RI',
'South Carolina': 'SC',
'South Dakota': 'SD',
'Tennessee': 'TN',
'Texas': 'TX',
'Utah': 'UT',
'Vermont': 'VT',
'Virgin Islands': 'VI',
'Virginia': 'VA',
'Washington': 'WA',
'West Virginia': 'WV',
'Wisconsin': 'WI',
'Wyoming': 'WY'
}<feature_engineering> | train=data[:tlen]
test=data[tlen:]
| Titanic - Machine Learning from Disaster |
4,687,939 | df_us = df_train[df_train['Country_Region']=='US']
df_us['Date'] = df_us['Date'].astype(str)
df_us['state_code'] = df_us.apply(lambda x: us_state_abbrev.get(x.Province_State,float('nan')) , axis=1)
df_us['ln(ConfirmedCases)'] = np.log(df_us.ConfirmedCases + 1)
df_us['ln(Fatalities)'] = np.log(df_us.Fatalities + 1 )<groupby> | xtrain=train.drop(['Survived'],axis=1 ).values
xtest=test.drop(['Survived'],axis=1 ).values
ytrain=train['Survived'].values | Titanic - Machine Learning from Disaster |
4,687,939 | df_train.Province_State.fillna('NaN', inplace=True)
df_plot = df_train.groupby(['Date','Country_Region','Province_State'], as_index=False)['ConfirmedCases','Fatalities'].sum()<groupby> | sv=SVC() | Titanic - Machine Learning from Disaster |
4,687,939 | df_train.Province_State.fillna('NaN', inplace=True)
df_plot = df_train.groupby(['Date','Country_Region','Province_State'], as_index=False)['ConfirmedCases','Fatalities'].sum()<groupby> | kf=KFold(10,True,0 ) | Titanic - Machine Learning from Disaster |
4,687,939 | df_train.Province_State.fillna('NaN', inplace=True)
df_plot = df_train.groupby(['Date','Country_Region','Province_State'], as_index=False)['ConfirmedCases','Fatalities'].sum()<load_from_csv> | cross_val_score(sv, xtrain, y = ytrain, scoring = "accuracy", cv = kf, n_jobs=4 ).mean() | Titanic - Machine Learning from Disaster |
4,687,939 | config = tf.compat.v1.ConfigProto(device_count = {'GPU': 1 , 'CPU': 10})
sess = tf.compat.v1.Session(config=config)
tf.compat.v1.keras.backend.set_session(sess)
def main_for_train(save_model_train=False, save_public_test=False):
train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
train['Province_State'].fillna('', inplace=True)
train['Date'] = pd.to_datetime(train['Date'])
train['day'] = train.Date.dt.dayofyear
train['my_geoloc'] = ['_'.join(x)for x in zip(train['Country_Region'], train['Province_State'])]
train
test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
test['Province_State'].fillna('', inplace=True)
test['Date'] = pd.to_datetime(test['Date'])
test['day'] = test.Date.dt.dayofyear
test['my_geoloc'] = ['_'.join(x)for x in zip(test['Country_Region'], test['Province_State'])]
test
day_min = train['day'].min()
train['day'] -= day_min
test['day'] -= day_min
min_test_val_day = test.day.min()
max_test_val_day = train.day.max()
max_test_day = test.day.max()
num_days = max_test_day + 1
min_test_val_day, max_test_val_day, num_days
train['ForecastId'] = -1
test['Id'] = -1
test['ConfirmedCases'] = 0
test['Fatalities'] = 0
debug = False
data = pd.concat([train,
test[test.day > max_test_val_day][train.columns]
] ).reset_index(drop=True)
if debug:
data = data[data['my_geoloc'] >= 'France_'].reset_index(drop=True)
gc.collect()
dates = data[data['my_geoloc'] == 'France_'].Date.values
if 0:
gr = data.groupby('my_geoloc')
data['ConfirmedCases'] = gr.ConfirmedCases.transform('cummax')
data['Fatalities'] = gr.Fatalities.transform('cummax')
my_geoloc_data = data.pivot(index='my_geoloc', columns='day', values='ForecastId')
num_my_geoloc = my_geoloc_data.shape[0]
my_geoloc_data
my_geoloc_id = {}
for i,g in enumerate(my_geoloc_data.index):
my_geoloc_id[g] = i
ConfirmedCases = data.pivot(index='my_geoloc', columns='day', values='ConfirmedCases')
Fatalities = data.pivot(index='my_geoloc', columns='day', values='Fatalities')
if debug:
cases = ConfirmedCases.values
deaths = Fatalities.values
else:
cases = np.log1p(ConfirmedCases.values)
deaths = np.log1p(Fatalities.values)
def load_my_dataset(start_pred, num_train, lag_period):
days = np.arange(start_pred - num_train + 1, start_pred + 1)
lag_cases = np.vstack([cases[:, d - lag_period : d] for d in days])
lag_deaths = np.vstack([deaths[:, d - lag_period : d] for d in days])
target_cases = np.vstack([cases[:, d : d + 1] for d in days])
target_deaths = np.vstack([deaths[:, d : d + 1] for d in days])
my_geoloc_ids = np.vstack([my_geoloc_ids_base for d in days])
country_ids = np.vstack([country_ids_base for d in days])
return lag_cases, lag_deaths, target_cases, target_deaths, my_geoloc_ids, country_ids, days
def update_valid_dataset(data, pred_death, pred_case):
lag_cases, lag_deaths, target_cases, target_deaths, my_geoloc_ids, country_ids, days = data
day = days[-1] + 1
new_lag_cases = np.hstack([lag_cases[:, 1:], pred_case])
new_lag_deaths = np.hstack([lag_deaths[:, 1:], pred_death])
new_target_cases = cases[:, day:day+1]
new_target_deaths = deaths[:, day:day+1]
new_my_geoloc_ids = my_geoloc_ids
new_country_ids = country_ids
new_days = 1 + days
return new_lag_cases, new_lag_deaths, new_target_cases, new_target_deaths, new_my_geoloc_ids, new_country_ids, new_days
def infer_model(lr_death, lr_case, data, start_lag_death, end_lag_death, num_lag_case, fit, score):
lag_cases, lag_deaths, target_cases, target_deaths, my_geoloc_ids, country_ids, days = data
X_death = np.hstack([lag_cases[:, -start_lag_death:-end_lag_death], country_ids])
X_death = np.hstack([lag_deaths[:, -num_lag_case:], country_ids])
X_death = np.hstack([lag_cases[:, -start_lag_death:-end_lag_death], lag_deaths[:, -num_lag_case:], country_ids])
y_death = target_deaths
y_death_prev = lag_deaths[:, -1:]
if fit:
if 0:
keep =(y_death > 0 ).ravel()
X_death = X_death[keep]
y_death = y_death[keep]
y_death_prev = y_death_prev[keep]
lr_death.fit(X_death, y_death)
y_pred_death = lr_death.predict(X_death)
y_pred_death = np.maximum(y_pred_death, y_death_prev)
X_case = np.hstack([lag_cases[:, -num_lag_case:], my_geoloc_ids])
X_case = lag_cases[:, -num_lag_case:]
y_case = target_cases
y_case_prev = lag_cases[:, -1:]
if fit:
lr_case.fit(X_case, y_case)
y_pred_case = lr_case.predict(X_case)
y_pred_case = np.maximum(y_pred_case, y_case_prev)
if score:
death_score = val_score(y_death, y_pred_death)
case_score = val_score(y_case, y_pred_case)
else:
death_score = 0
case_score = 0
return death_score, case_score, y_pred_death, y_pred_case
def train_model(train, valid, start_lag_death, end_lag_death, num_lag_case, num_val, score=True):
alpha = 2
lr_death = Ridge(alpha=alpha, fit_intercept=False)
lr_case = Ridge(alpha=alpha, fit_intercept=True)
(train_death_score, train_case_score, train_pred_death, train_pred_case,
)= infer_model(lr_death, lr_case, train, start_lag_death, end_lag_death, num_lag_case, fit=True, score=score)
death_scores = []
case_scores = []
death_pred = []
case_pred = []
for i in range(num_val):
(valid_death_score, valid_case_score, valid_pred_death, valid_pred_case,
)= infer_model(lr_death, lr_case, valid, start_lag_death, end_lag_death, num_lag_case, fit=False, score=score)
death_scores.append(valid_death_score)
case_scores.append(valid_case_score)
death_pred.append(valid_pred_death)
case_pred.append(valid_pred_case)
if 0:
print('val death: %0.3f' % valid_death_score,
'val case: %0.3f' % valid_case_score,
'val : %0.3f' % np.mean([valid_death_score, valid_case_score]),
flush=True)
valid = update_valid_dataset(valid, valid_pred_death, valid_pred_case)
if score:
death_scores = np.sqrt(np.mean([s**2 for s in death_scores]))
case_scores = np.sqrt(np.mean([s**2 for s in case_scores]))
if 0:
print('train death: %0.3f' % train_death_score,
'train case: %0.3f' % train_case_score,
'val death: %0.3f' % death_scores,
'val case: %0.3f' % case_scores,
'val : %0.3f' %(( death_scores + case_scores)/ 2),
flush=True)
else:
print('%0.4f' % case_scores,
', %0.4f' % death_scores,
'= %0.4f' %(( death_scores + case_scores)/ 2),
flush=True)
death_pred = np.hstack(death_pred)
case_pred = np.hstack(case_pred)
return death_scores, case_scores, death_pred, case_pred
countries = [g.split('_')[0] for g in my_geoloc_data.index]
countries = pd.factorize(countries)[0]
country_ids_base = countries.reshape(( -1, 1))
ohe = OneHotEncoder(sparse=False)
country_ids_base = 0.2 * ohe.fit_transform(country_ids_base)
country_ids_base.shape
my_geoloc_ids_base = np.arange(num_my_geoloc ).reshape(( -1, 1))
ohe = OneHotEncoder(sparse=False)
my_geoloc_ids_base = 0.1 * ohe.fit_transform(my_geoloc_ids_base)
my_geoloc_ids_base.shape
def val_score(true, pred):
pred = np.log1p(np.round(np.expm1(pred)- 0.2))
return np.sqrt(mean_squared_error(true.ravel() , pred.ravel()))
def val_score(true, pred):
return np.sqrt(mean_squared_error(true.ravel() , pred.ravel()))
start_lag_death, end_lag_death = 14, 6,
num_train = 6
num_lag_case = 14
lag_period = max(start_lag_death, num_lag_case)
def load_outputs_fit(start_val_delta=0):
start_val = min_test_val_day + start_val_delta
last_train = start_val - 1
num_val = max_test_val_day - start_val + 1
print(dates[start_val], start_val, num_val)
train_data = load_my_dataset(last_train, num_train, lag_period)
valid_data = load_my_dataset(start_val, 1, lag_period)
_, _, val_death_preds, val_case_preds = train_model(train_data, valid_data,
start_lag_death, end_lag_death, num_lag_case, num_val)
pred_deaths = Fatalities.iloc[:, start_val:start_val+num_val].copy()
pred_deaths.iloc[:, :] = np.expm1(val_death_preds)
pred_deaths = pred_deaths.stack().reset_index()
pred_deaths.columns = ['my_geoloc', 'day', 'Fatalities']
pred_deaths
pred_cases = ConfirmedCases.iloc[:, start_val:start_val+num_val].copy()
pred_cases.iloc[:, :] = np.expm1(val_case_preds)
pred_cases = pred_cases.stack().reset_index()
pred_cases.columns = ['my_geoloc', 'day', 'ConfirmedCases']
pred_cases
sub = train[['Date', 'Id', 'my_geoloc', 'day']]
sub = sub.merge(pred_cases, how='left', on=['my_geoloc', 'day'])
sub = sub.merge(pred_deaths, how='left', on=['my_geoloc', 'day'])
sub = sub[sub.day >= start_val]
sub = sub[['Id', 'ConfirmedCases', 'Fatalities']].copy()
return sub
if save_model_train:
for start_val_delta, date in zip(range(3, -8, -3),
['2020-04-27', '2020-04-24', '2020-04-21', '2020-04-18']):
print(date, end=' ')
outputs_fit = load_outputs_fit(start_val_delta)
outputs_fit.to_csv('.. /submissions/cpmp-%s.csv' % date, index=None)
def get_sub(start_val_delta=0):
start_val = min_test_val_day + start_val_delta
last_train = start_val - 1
num_val = max_test_val_day - start_val + 1
print(dates[last_train], start_val, num_val)
num_lag_case = 14
train_data = load_my_dataset(last_train, num_train, lag_period)
valid_data = load_my_dataset(start_val, 1, lag_period)
_, _, val_death_preds, val_case_preds = train_model(train_data, valid_data,
start_lag_death, end_lag_death, num_lag_case, num_val)
pred_deaths = Fatalities.iloc[:, start_val:start_val+num_val].copy()
pred_deaths.iloc[:, :] = np.expm1(val_death_preds)
pred_deaths = pred_deaths.stack().reset_index()
pred_deaths.columns = ['my_geoloc', 'day', 'Fatalities']
pred_deaths
pred_cases = ConfirmedCases.iloc[:, start_val:start_val+num_val].copy()
pred_cases.iloc[:, :] = np.expm1(val_case_preds)
pred_cases = pred_cases.stack().reset_index()
pred_cases.columns = ['my_geoloc', 'day', 'ConfirmedCases']
pred_cases
sub = test[['Date', 'ForecastId', 'my_geoloc', 'day']]
sub = sub.merge(pred_cases, how='left', on=['my_geoloc', 'day'])
sub = sub.merge(pred_deaths, how='left', on=['my_geoloc', 'day'])
sub = sub.fillna(0)
sub = sub[['ForecastId', 'ConfirmedCases', 'Fatalities']]
return sub
return sub
known_test = train[['my_geoloc', 'day', 'ConfirmedCases', 'Fatalities']
].merge(test[['my_geoloc', 'day', 'ForecastId']], how='left', on=['my_geoloc', 'day'])
known_test = known_test[['ForecastId', 'ConfirmedCases', 'Fatalities']][known_test.ForecastId.notnull() ].copy()
known_test
unknow_test = test[test.day > max_test_val_day]
unknow_test
def get_final_sub() :
start_val = max_test_val_day + 1
last_train = start_val - 1
num_val = max_test_day - start_val + 1
print(dates[last_train], start_val, num_val)
num_lag_case = num_val + 3
train_data = load_my_dataset(last_train, num_train, lag_period)
valid_data = load_my_dataset(start_val, 1, lag_period)
(_, _, val_death_preds, val_case_preds
)= train_model(train_data, valid_data, start_lag_death, end_lag_death, num_lag_case, num_val, score=False)
pred_deaths = Fatalities.iloc[:, start_val:start_val+num_val].copy()
pred_deaths.iloc[:, :] = np.expm1(val_death_preds)
pred_deaths = pred_deaths.stack().reset_index()
pred_deaths.columns = ['my_geoloc', 'day', 'Fatalities']
pred_deaths
pred_cases = ConfirmedCases.iloc[:, start_val:start_val+num_val].copy()
pred_cases.iloc[:, :] = np.expm1(val_case_preds)
pred_cases = pred_cases.stack().reset_index()
pred_cases.columns = ['my_geoloc', 'day', 'ConfirmedCases']
pred_cases
print(unknow_test.shape, pred_deaths.shape, pred_cases.shape)
sub = unknow_test[['Date', 'ForecastId', 'my_geoloc', 'day']]
sub = sub.merge(pred_cases, how='left', on=['my_geoloc', 'day'])
sub = sub.merge(pred_deaths, how='left', on=['my_geoloc', 'day'])
sub = sub[['ForecastId', 'ConfirmedCases', 'Fatalities']]
sub = pd.concat([known_test, sub])
return sub
if save_public_test:
sub = get_sub()
else:
sub = get_final_sub()
return sub
def load_deep_nn() :
df = pd.read_csv(".. /input/covid19-global-forecasting-week-4/train.csv")
dataframe_for_submission = pd.read_csv(".. /input/covid19-global-forecasting-week-4/test.csv")
co_train_data = pd.read_csv(".. /input/mytrainweek7/train(3 ).csv" ).rename(columns={"Country/Region": "Country_Region"})
co_train_data = co_train_data.groupby("Country_Region")[["Lat", "Long"]].mean().reset_index()
co_train_data = co_train_data[co_train_data["Country_Region"].notnull() ]
loc_group = ["Province_State", "Country_Region"]
def preprocess(df):
df["Date"] = df["Date"].astype("datetime64[ms]")
df["days"] =(df["Date"] - pd.to_datetime("2020-01-01")).dt.days
df["weekend"] = df["Date"].dt.dayofweek//5
df = df.merge(co_train_data, how="left", on="Country_Region")
df["Lat"] =(df["Lat"] // 30 ).astype(np.float32 ).fillna(0)
df["Long"] =(df["Long"] // 60 ).astype(np.float32 ).fillna(0)
for col in loc_group:
df[col].fillna("none", inplace=True)
return df
df = preprocess(df)
dataframe_for_submission = preprocess(dataframe_for_submission)
print(df.shape)
TARGETS = ["ConfirmedCases", "Fatalities"]
for col in TARGETS:
df[col] = np.log1p(df[col])
NUM_SHIFT = 5
features = ["Lat", "Long"]
for s in range(1, NUM_SHIFT+1):
for col in TARGETS:
df["prev_{}_{}".format(col, s)] = df.groupby(loc_group)[col].shift(s)
features.append("prev_{}_{}".format(col, s))
df = df[df["Date"] >= df["Date"].min() + timedelta(days=NUM_SHIFT)].copy()
TEST_FIRST = dataframe_for_submission["Date"].min()
TEST_DAYS =(df["Date"].max() - TEST_FIRST ).days + 1
dev_df, test_df = df[df["Date"] < TEST_FIRST].copy() , df[df["Date"] >= TEST_FIRST].copy()
def nn_block(input_layer, size, dropout_rate, activation):
out_layer = KL.Dense(size, activation=None )(input_layer)
out_layer = KL.Activation(activation )(out_layer)
out_layer = KL.Dropout(dropout_rate )(out_layer)
return out_layer
def get_model() :
inp = KL.Input(shape=(len(features),))
hidden_layer = nn_block(inp, 208, 0.0, "relu")
gate_layer = nn_block(hidden_layer, 104, 0.0, "hard_sigmoid")
hidden_layer = nn_block(hidden_layer, 104, 0.0, "relu")
hidden_layer = KL.multiply([hidden_layer, gate_layer])
out = KL.Dense(len(TARGETS), activation="linear" )(hidden_layer)
model = tf.keras.models.Model(inputs=[inp], outputs=out)
return model
get_model().summary()
def get_input(df):
return [df[features]]
NUM_MODELS = 100
def train_models(df, save=False):
models = []
for i in range(NUM_MODELS):
model = get_model()
model.compile(loss="mean_squared_error", optimizer=Nadam(lr=1e-4))
hist = model.fit(get_input(df), df[TARGETS],
batch_size=2250, epochs=1000, verbose=0, shuffle=True)
if save:
model.save_weights("model{}.h5".format(i))
models.append(model)
return models
models = train_models(dev_df)
prev_targets = ['prev_ConfirmedCases_1', 'prev_Fatalities_1']
def predict_one(df, models):
pred = np.zeros(( df.shape[0], 2))
for model in models:
pred += model.predict(get_input(df)) /len(models)
pred = np.maximum(pred, df[prev_targets].values)
pred[:, 0] = np.log1p(np.expm1(pred[:, 0])+ 0.1)
pred[:, 1] = np.log1p(np.expm1(pred[:, 1])+ 0.01)
return np.clip(pred, None, 15)
print([mean_squared_error(dev_df[TARGETS[i]], predict_one(dev_df, models)[:, i])for i in range(len(TARGETS)) ])
def rmse(y_true, y_pred):
return np.sqrt(mean_squared_error(y_true, y_pred))
def evaluate(df):
error = 0
for col in TARGETS:
error += rmse(df[col].values, df["pred_{}".format(col)].values)
return np.round(error/len(TARGETS), 5)
def predict(test_df, first_day, num_days, models, val=False):
temp_df = test_df.loc[test_df["Date"] == first_day].copy()
y_pred = predict_one(temp_df, models)
for i, col in enumerate(TARGETS):
test_df["pred_{}".format(col)] = 0
test_df.loc[test_df["Date"] == first_day, "pred_{}".format(col)] = y_pred[:, i]
print(first_day, np.isnan(y_pred ).sum() , y_pred.min() , y_pred.max())
if val:
print(evaluate(test_df[test_df["Date"] == first_day]))
y_prevs = [None]*NUM_SHIFT
for i in range(1, NUM_SHIFT):
y_prevs[i] = temp_df[['prev_ConfirmedCases_{}'.format(i), 'prev_Fatalities_{}'.format(i)]].values
for d in range(1, num_days):
date = first_day + timedelta(days=d)
print(date, np.isnan(y_pred ).sum() , y_pred.min() , y_pred.max())
temp_df = test_df.loc[test_df["Date"] == date].copy()
temp_df[prev_targets] = y_pred
for i in range(2, NUM_SHIFT+1):
temp_df[['prev_ConfirmedCases_{}'.format(i), 'prev_Fatalities_{}'.format(i)]] = y_prevs[i-1]
y_pred, y_prevs = predict_one(temp_df, models), [None, y_pred] + y_prevs[1:-1]
for i, col in enumerate(TARGETS):
test_df.loc[test_df["Date"] == date, "pred_{}".format(col)] = y_pred[:, i]
if val:
print(evaluate(test_df[test_df["Date"] == date]))
return test_df
test_df = predict(test_df, TEST_FIRST, TEST_DAYS, models, val=True)
print(evaluate(test_df))
for col in TARGETS:
test_df[col] = np.expm1(test_df[col])
test_df["pred_{}".format(col)] = np.expm1(test_df["pred_{}".format(col)])
models = train_models(df, save=True)
dataframe_for_submission_public = dataframe_for_submission[dataframe_for_submission["Date"] <= df["Date"].max() ].copy()
dataframe_for_submission_private = dataframe_for_submission[dataframe_for_submission["Date"] > df["Date"].max() ].copy()
pred_cols = ["pred_{}".format(col)for col in TARGETS]
dataframe_for_submission_public = dataframe_for_submission_public.merge(test_df[["Date"] + loc_group + TARGETS], how="left", on=["Date"] + loc_group)
SUB_FIRST = dataframe_for_submission_private["Date"].min()
SUB_DAYS =(dataframe_for_submission_private["Date"].max() - dataframe_for_submission_private["Date"].min() ).days + 1
dataframe_for_submission_private = df.append(dataframe_for_submission_private, sort=False)
for s in range(1, NUM_SHIFT+1):
for col in TARGETS:
dataframe_for_submission_private["prev_{}_{}".format(col, s)] = dataframe_for_submission_private.groupby(loc_group)[col].shift(s)
dataframe_for_submission_private = dataframe_for_submission_private[dataframe_for_submission_private["Date"] >= SUB_FIRST].copy()
dataframe_for_submission_private = predict(dataframe_for_submission_private, SUB_FIRST, SUB_DAYS, models)
for col in TARGETS:
dataframe_for_submission_private[col] = np.expm1(dataframe_for_submission_private["pred_{}".format(col)])
dataframe_for_submission = dataframe_for_submission_public.append(dataframe_for_submission_private, sort=False)
dataframe_for_submission["ForecastId"] = dataframe_for_submission["ForecastId"].astype(np.int16)
return dataframe_for_submission[["ForecastId"] + TARGETS]
sub1 = main_for_train()
sub1['ForecastId'] = sub1['ForecastId'].astype('int')
sub2 = load_deep_nn()
sub1.sort_values("ForecastId", inplace=True)
sub2.sort_values("ForecastId", inplace=True)
TARGETS = ["ConfirmedCases", "Fatalities"]
[np.sqrt(mean_squared_error(np.log1p(sub1[t].values), np.log1p(sub2[t].values)))for t in TARGETS]
dataframe_for_submission = sub1.copy()
for t in TARGETS:
dataframe_for_submission[t] = np.expm1(np.log1p(sub1[t].values)*0.5 + np.log1p(sub2[t].values)*0.5 )<load_from_csv> | scores=[]
kf=KFold(10,True,0)
for(train_index,test_index)in kf.split(xtrain):
X_train, X_test, y_train, y_test = xtrain[train_index], xtrain[test_index], ytrain[train_index], ytrain[test_index]
sv.fit(X_train, y_train)
scores.append(sv.score(X_test, y_test))
print(np.mean(scores))
| Titanic - Machine Learning from Disaster |
4,687,939 | train = pd.read_csv('.. /input/covid19-global-forecasting-week-4/train.csv')
train['Date'] = pd.to_datetime(train['Date'])
def dealing_with_null_values(dataset):
dataset = dataset
for i in dataset.columns:
replace = []
data = dataset[i].isnull()
count = 0
for j,k in zip(data,dataset[i]):
if(j==True):
count = count+1
replace.append('No Information Available')
else:
replace.append(k)
print("Num of null values(",i,"):",count)
dataset[i] = replace
return dataset
train = dealing_with_null_values(train)
def fillState(state, country):
if state == 'No Information Available': return country
return state
train['Province_State'] = train.loc[:, ['Province_State', 'Country_Region']].apply(lambda x : fillState(x['Province_State'], x['Country_Region']), axis=1)
train.loc[:, 'Date'] = train.Date.dt.strftime("%m%d")
train["Date"] = train["Date"].astype(int)
le = preprocessing.LabelEncoder()
train.Country_Region = le.fit_transform(train.Country_Region)
train.Province_State = le.fit_transform(train.Province_State)
data = pd.DataFrame()
data['Province_State']=train['Province_State']
data['Country_Region']=train['Country_Region']
data['Date']=train['Date']
xgb_model1 = XGBRegressor(n_estimators=1000)
xgb_model1.fit(data,train['ConfirmedCases'])
xgb_model2 = XGBRegressor(n_estimators=1000)
xgb_model2.fit(data,train['Fatalities'])
test = pd.read_csv('.. /input/covid19-global-forecasting-week-4/test.csv')
test['Date'] = pd.to_datetime(test['Date'], infer_datetime_format=True)
test = dealing_with_null_values(test)
test['Province_State'] = test.loc[:, ['Province_State', 'Country_Region']].apply(lambda x : fillState(x['Province_State'], x['Country_Region']), axis=1)
test.loc[:, 'Date'] = test.Date.dt.strftime("%m%d")
test["Date"] = test["Date"].astype(int)
le = preprocessing.LabelEncoder()
test.Country_Region = le.fit_transform(test.Country_Region)
test.Province_State = le.fit_transform(test.Province_State)
test_data = pd.DataFrame()
test_data['Province_State'] = test['Province_State']
test_data['Country_Region'] = test['Country_Region']
test_data['Date'] = test['Date']
filterwarnings('ignore')
xgb_sub = pd.DataFrame({'ForecastId': [], 'ConfirmedCases': [], 'Fatalities': []})
for i in test['Country_Region'].unique() :
s = test[test['Country_Region'] == i].Province_State.unique()
for j in s:
xgb_sub_train = data[(data['Country_Region'] == i)&(data['Province_State'] == j)]
xgb_sub_test = test_data[(test_data['Country_Region']==i)&(test_data['Province_State'] == j)]
xgb_sub_train_with_labels = train[(train['Country_Region'] == i)&(train['Province_State'] == j)]
index_test = test[(test['Country_Region']==i)&(test['Province_State'] == j)]
index_test = index_test['ForecastId']
xgb_model1 = XGBRegressor(n_estimators=2000)
xgb_model1.fit(xgb_sub_train,xgb_sub_train_with_labels['ConfirmedCases'])
xgb_model2 = XGBRegressor(n_estimators=2000)
xgb_model2.fit(xgb_sub_train,xgb_sub_train_with_labels['Fatalities'])
y1_xpred_xgb_sub = xgb_model1.predict(xgb_sub_test)
y2_xpred_xgb_sub = xgb_model2.predict(xgb_sub_test)
xgb_xgb_subs = pd.DataFrame()
xgb_xgb_subs['ForecastId'] = index_test
xgb_xgb_subs['ConfirmedCases'] = y1_xpred_xgb_sub
xgb_xgb_subs['Fatalities']=y2_xpred_xgb_sub
xgb_sub = pd.concat([xgb_sub, xgb_xgb_subs], axis=0)
xgb_sub['ForecastId']= xgb_sub['ForecastId'].astype('int' )<import_modules> | from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier,ExtraTreesClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import GridSearchCV, cross_val_score, StratifiedKFold, learning_curve
import xgboost as xgb | Titanic - Machine Learning from Disaster |
4,687,939 | import pandas as pd
from pathlib import Path
from pandas_profiling import ProfileReport
from sklearn.tree import DecisionTreeClassifier
from sklearn.preprocessing import LabelEncoder
import datetime
from sklearn.model_selection import GridSearchCV
from sklearn import preprocessing
from sklearn.model_selection import cross_val_score
<load_from_csv> | classifiers = [
SVC() ,
AdaBoostClassifier() ,
GradientBoostingClassifier() ,
LogisticRegression() ,
LinearDiscriminantAnalysis() ,
xgb.XGBClassifier() ] | Titanic - Machine Learning from Disaster |
4,687,939 | dataset_path = Path('/kaggle/input/covid19-global-forecasting-week-4')
train = pd.read_csv(dataset_path/'train.csv')
test = pd.read_csv(dataset_path/'test.csv')
dtree_sub = pd.read_csv(dataset_path/'submission.csv' )<create_dataframe> | for cf in classifiers:
print(cross_val_score(cf,xtrain,ytrain,cv=10 ).mean())
| Titanic - Machine Learning from Disaster |
4,687,939 | train_profile = ProfileReport(train, title='COVID19 WEEK 4 Profiling Report', html={'style':{'full_width':True}},progress_bar=False);
train_profile<categorify> | n=xtrain.shape[0]
| Titanic - Machine Learning from Disaster |
4,687,939 | def fill_state(state,country):
if pd.isna(state): return country
return state<feature_engineering> | kf=KFold(10,True,0)
sttrain=np.zeros(( n,1))
stest=np.zeros(( xtest.shape[0],1))
for cf in classifiers:
otr=np.zeros(( n,))
oof_test = np.zeros(( xtest.shape[0],))
oof_test_skf = np.empty(( 10, xtest.shape[0]))
for i,(train_index,test_index)in enumerate(kf.split(train)) :
X_train, X_test, y_train, y_test = xtrain[train_index], xtrain[test_index], ytrain[train_index], ytrain[test_index]
cf.fit(X_train,y_train)
otr[test_index]=cf.predict(X_test)
oof_test_skf[i, :] = cf.predict(xtest)
oof_test[:] = oof_test_skf.mean(axis=0)
otr=otr.reshape(-1,1)
oof_test= oof_test.reshape(-1, 1)
sttrain=np.append(sttrain, otr, axis=1)
stest=np.append(stest, oof_test, axis=1)
sttrain=sttrain[:,1:]
stest=stest[:,1:] | Titanic - Machine Learning from Disaster |
4,687,939 | train['Province_State'] = train.loc[:, ['Province_State', 'Country_Region']].apply(lambda x : fill_state(x['Province_State'], x['Country_Region']), axis=1)
test['Province_State'] = test.loc[:, ['Province_State', 'Country_Region']].apply(lambda x : fill_state(x['Province_State'], x['Country_Region']), axis=1)
train['Date'] = pd.to_datetime(train['Date'],infer_datetime_format=True)
test['Date'] = pd.to_datetime(test['Date'],infer_datetime_format=True)
train['Day_of_Week'] = train['Date'].dt.dayofweek
test['Day_of_Week'] = test['Date'].dt.dayofweek
train['Month'] = train['Date'].dt.month
test['Month'] = test['Date'].dt.month
train['Day'] = train['Date'].dt.day
test['Day'] = test['Date'].dt.day
train['Day_of_Year'] = train['Date'].dt.dayofyear
test['Day_of_Year'] = test['Date'].dt.dayofyear
train['Week_of_Year'] = train['Date'].dt.weekofyear
test['Week_of_Year'] = test['Date'].dt.weekofyear
train['Quarter'] = train['Date'].dt.quarter
test['Quarter'] = test['Date'].dt.quarter
train.drop('Date',1,inplace=True)
test.drop('Date',1,inplace=True )<categorify> | xgc=xgb.XGBClassifier()
xgc.fit(sttrain,ytrain)
pred=xgc.predict(stest ).astype(int ) | Titanic - Machine Learning from Disaster |
4,687,939 | dtree_sub=pd.DataFrame(columns=dtree_sub.columns)
l1=LabelEncoder()
l2=LabelEncoder()
l1.fit(train['Country_Region'])
l2.fit(train['Province_State'] )<categorify> | Titanic - Machine Learning from Disaster | |
4,687,939 | countries=train['Country_Region'].unique()
for country in countries:
country_df=train[train['Country_Region']==country]
provinces=country_df['Province_State'].unique()
for province in provinces:
train_df=country_df[country_df['Province_State']==province]
train_df.pop('Id')
x=train_df[['Province_State','Country_Region','Day_of_Week','Month','Day','Day_of_Year','Week_of_Year','Quarter']]
x['Country_Region']=l1.transform(x['Country_Region'])
x['Province_State']=l2.transform(x['Province_State'])
y1=train_df[['ConfirmedCases']]
y2=train_df[['Fatalities']]
model_1=DecisionTreeClassifier()
model_2=DecisionTreeClassifier()
model_1.fit(x,y1)
model_2.fit(x,y2)
test_df=test.query('Province_State==@province & Country_Region==@country')
test_id=test_df['ForecastId'].values.tolist()
test_df.pop('ForecastId')
test_x=test_df[['Province_State','Country_Region','Day_of_Week','Month','Day','Day_of_Year','Week_of_Year','Quarter']]
test_x['Country_Region']=l1.transform(test_x['Country_Region'])
test_x['Province_State']=l2.transform(test_x['Province_State'])
test_y1=model_1.predict(test_x)
test_y2=model_2.predict(test_x)
test_res=pd.DataFrame(columns=dtree_sub.columns)
test_res['ForecastId']=test_id
test_res['ConfirmedCases']=test_y1
test_res['Fatalities']=test_y2
dtree_sub=dtree_sub.append(test_res )<save_to_csv> | FileLinks('.')
| Titanic - Machine Learning from Disaster |
4,687,939 | dtree_confirmed=dtree_sub["ConfirmedCases"]
dtree_fatal=dtree_sub["Fatalities"]
boost_confirmed = xgb_sub["ConfirmedCases"]
boost_fatal = xgb_sub["Fatalities"]
deep_confirmed = dataframe_for_submission["ConfirmedCases"]
deep_fatal = dataframe_for_submission["Fatalities"]
dataframe_for_submission["ConfirmedCases"] = 0.1 * boost_confirmed.values + 0.70 * deep_confirmed.values + 0.20 *dtree_confirmed.values
dataframe_for_submission["Fatalities"] = 0.1 * boost_fatal.values + 0.70 * deep_fatal.values + 0.20 * dtree_fatal.values
dataframe_for_submission.to_csv('submission.csv',index=False )<import_modules> | kfold = StratifiedKFold(n_splits=10 ) | Titanic - Machine Learning from Disaster |
4,687,939 | warnings.filterwarnings('ignore')
<load_from_csv> | SVMC = SVC(probability=True)
svc_param_grid = {'kernel': ['rbf'],
'gamma': [ 0.001, 0.01, 0.1, 1],
'C': [1, 10, 50, 100,200,300, 1000]}
gsSVMC = GridSearchCV(SVMC,param_grid = svc_param_grid, cv=kf, scoring="accuracy", n_jobs= 4, verbose = 1)
gsSVMC.fit(xtrain,ytrain)
SVMC_best = gsSVMC.best_estimator_
gsSVMC.best_score_ | Titanic - Machine Learning from Disaster |
4,687,939 | covid_cases = pd.read_csv('.. /input/novel-corona-virus-2019-dataset/covid_19_data.csv')
covid_cases.head()<load_from_csv> | RFC = RandomForestClassifier()
rf_param_grid = {"max_depth": [None],
"max_features": [1, 3, 10],
"min_samples_split": [2, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [False],
"n_estimators" :[100,300],
"criterion": ["gini"]}
gsRFC = GridSearchCV(RFC,param_grid = rf_param_grid, cv=kf, scoring="accuracy", n_jobs= 4, verbose = 1)
gsRFC.fit(xtrain,ytrain)
RFC_best = gsRFC.best_estimator_
gsRFC.best_score_ | Titanic - Machine Learning from Disaster |
4,687,939 | training_data = pd.read_csv(".. /input/covid19-global-forecasting-week-4/train.csv")
testing_data = pd.read_csv(".. /input/covid19-global-forecasting-week-4/test.csv" )<data_type_conversions> | ExtC = ExtraTreesClassifier()
ex_param_grid = {"max_depth": [None],
"max_features": [1, 3, 10],
"min_samples_split": [2, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [False],
"n_estimators" :[100,300],
"criterion": ["gini"]}
gsExtC = GridSearchCV(ExtC,param_grid = ex_param_grid, cv=kf, scoring="accuracy", n_jobs= 4, verbose = 1)
gsExtC.fit(xtrain,ytrain)
ExtC_best = gsExtC.best_estimator_
gsExtC.best_score_
| Titanic - Machine Learning from Disaster |
4,687,939 | print(training_data.isnull().sum())
print(testing_data.isnull().sum())
print(training_data.dtypes)
print(testing_data.dtypes)
training_data['Province_State'].fillna("",inplace = True)
testing_data['Province_State'].fillna("",inplace = True )<concatenate> | GBC = GradientBoostingClassifier()
gb_param_grid = {'loss' : ["deviance"],
'n_estimators' : [100,200,300],
'learning_rate': [0.1, 0.05, 0.01],
'max_depth': [4, 8],
'min_samples_leaf': [100,150],
'max_features': [0.3, 0.1]
}
gsGBC = GridSearchCV(GBC,param_grid = gb_param_grid, cv=kfold, scoring="accuracy", n_jobs= 4, verbose = 1)
gsGBC.fit(xtrain,ytrain)
GBC_best = gsGBC.best_estimator_
gsGBC.best_score_ | Titanic - Machine Learning from Disaster |
4,687,939 | country_list = covid_cases['Country/Region'].unique()
country_grouped_covid = covid_cases[0:1]
for country in country_list:
test_data = covid_cases['Country/Region'] == country
test_data = covid_cases[test_data]
country_grouped_covid = pd.concat([country_grouped_covid, test_data], axis=0)
country_grouped_covid.reset_index(drop=True)
country_grouped_covid.head()
country_grouped_covid.drop('Last Update', axis=1, inplace=True)
country_grouped_covid['Province/State'].replace(np.nan, "Not Reported", inplace=True)
country_grouped_covid.head()
<count_unique_values> | votingC = VotingClassifier(estimators=[('rfc', RFC_best),('extc', ExtC_best),
('svc', SVMC_best),('gbc',GBC_best)], voting='soft', n_jobs=4)
votingC = votingC.fit(xtrain, ytrain ) | Titanic - Machine Learning from Disaster |
4,687,939 | latest_data = country_grouped_covid['ObservationDate'] == '04/10/2020'
country_data = country_grouped_covid[latest_data]
country_list = country_data['Country/Region'].unique()
print("The total number of countries with COVID-19 Confirmed cases = {}".format(country_list.size))<feature_engineering> | test_Survived = pd.Series(votingC.predict(xtest), name="Survived" ).astype(int)
results = pd.concat([PassengerId,test_Survived],axis=1)
results.to_csv("ensemble_python_voting.csv",index=False ) | Titanic - Machine Learning from Disaster |
4,687,939 | py.init_notebook_mode(connected=True)
formated_gdf = covid_cases.groupby(['ObservationDate', 'Country/Region'])['Confirmed', 'Deaths', 'Recovered'].max()
formated_gdf = formated_gdf.reset_index()
formated_gdf['Date'] = pd.to_datetime(formated_gdf['ObservationDate'])
formated_gdf['Date'] = formated_gdf['Date'].dt.strftime('%m/%d/%Y')
formated_gdf['log_ConfirmedCases'] = np.log(formated_gdf.Confirmed + 1)
formated_gdf['log_Fatalities'] = np.log(formated_gdf.Deaths + 1)
fig = px.choropleth(formated_gdf, locations="Country/Region", locationmode='country names',
color="log_ConfirmedCases", hover_name="Country/Region",projection="mercator",
animation_frame="Date",width=1000, height=800,
color_continuous_scale=px.colors.sequential.Viridis,
title='The Spread of COVID-19 Cases Across World')
fig.update(layout_coloraxis_showscale=True)
py.offline.iplot(fig)
<feature_engineering> | FileLinks('.' ) | Titanic - Machine Learning from Disaster |
4,687,939 | <load_from_csv><EOS> | StackingSubmission = pd.DataFrame({ 'PassengerId': PassengerId,
'Survived': pred })
StackingSubmission.to_csv("StackingSubmission.csv", index=False)
df=StackingSubmission | Titanic - Machine Learning from Disaster |
13,712,258 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<create_dataframe> | !pip install pycomp --upgrade --no-cache-dir | Titanic - Machine Learning from Disaster |
13,712,258 | covid_data = dataframe[['Date', 'State', 'Country', 'Cumulative_cases', 'Cumulative_death',
'Daily_cases', 'Daily_death', 'Latitude', 'Longitude', 'Temperature',
'Min_temperature', 'Max_temperature', 'Wind_speed', 'Precipitation',
'Fog_Presence', 'Population', 'Population Density/km', 'Median_Age',
'Sex_Ratio', 'Age%_65+', 'Hospital Beds/1000', 'Available Beds/1000',
'Confirmed Cases/1000', 'Lung Patients(F)', 'Lung Patients(M)',
'Life Expectancy(M)', 'Life Expectancy(F)', 'Total_tests_conducted',
'Out_Travels(mill.) ', 'In_travels(mill.) ', 'Domestic_Travels(mill.) ']]<feature_engineering> | filterwarnings('ignore')
DATA_PATH = '.. /input/titanic'
TRAIN_FILENAME = 'train.csv'
TEST_FILENAME = 'test.csv' | Titanic - Machine Learning from Disaster |
13,712,258 | training_data['Country_Region'] = training_data['Country_Region'] + ' ' + training_data['Province_State']
testing_data['Country_Region'] = testing_data['Country_Region'] + ' ' + testing_data['Province_State']
del training_data['Province_State']
del testing_data['Province_State']
def split_date(date):
date = date.split('-')
date[0] = int(date[0])
if(date[1][0] == '0'):
date[1] = int(date[1][1])
else:
date[1] = int(date[1])
if(date[2][0] == '0'):
date[2] = int(date[2][1])
else:
date[2] = int(date[2])
return date
training_data.Date = training_data.Date.apply(split_date)
testing_data.Date = testing_data.Date.apply(split_date )<feature_engineering> | df = pd.read_csv(os.path.join(DATA_PATH, TRAIN_FILENAME))
df.head() | Titanic - Machine Learning from Disaster |
13,712,258 | year = []
month = []
day = []
for i in training_data.Date:
year.append(i[0])
month.append(i[1])
day.append(i[2])
training_data['Year'] = year
training_data['Month'] = month
training_data['Day'] = day
del training_data['Date']
year = []
month = []
day = []
for i in testing_data.Date:
year.append(i[0])
month.append(i[1])
day.append(i[2])
testing_data['Year'] = year
testing_data['Month'] = month
testing_data['Day'] = day
del testing_data['Date']
del training_data['Id']
del testing_data['ForecastId']
del testing_data['Year']
del training_data['Year']<drop_column> | feature_adder = CustomFeaturesTitanic(name_title=True, cabin_class=True, ticket_class=True)
df_custom = feature_adder.fit_transform(df)
df_custom.head() | Titanic - Machine Learning from Disaster |
13,712,258 | latest_data = covid_data['Date'] == '30-03-2020'
country_data_detailed = covid_data[latest_data]
country_data_detailed.drop(['Daily_cases','Daily_death','Latitude','Longitude'],axis=1,inplace=True)
country_data_detailed.head(3 )<data_type_conversions> | dup_dropper = EliminaDuplicatas()
df_nodup = dup_dropper.fit_transform(df_slct)
print(f'Total of duplicates before: {df_slct.duplicated().sum() }')
print(f'Total of duplicates after: {df_nodup.duplicated().sum() }' ) | Titanic - Machine Learning from Disaster |
13,712,258 | country_data_detailed['Lung Patients(F)'].replace('Not reported',np.nan,inplace=True)
country_data_detailed['Lung Patients(F)'] = country_data_detailed['Lung Patients(F)'].astype("float" )<load_from_csv> | cat_custom_features = ['Pclass']
mod_dict = {col: str for col in cat_custom_features}
print(f'Selected columns dtype before transformation:
')
print(df_nodup.dtypes[cat_custom_features])
dtype_mod = ModificaTipoPrimitivo(mod_dict=mod_dict)
df_mod = dtype_mod.fit_transform(df_nodup)
print(f'Selected columns dtype after transformation:
')
print(df_mod.dtypes[cat_custom_features] ) | Titanic - Machine Learning from Disaster |
13,712,258 | temperature_data = pd.read_csv('.. /input/covcsd-covid19-countries-statistical-dataset/temperature_data.csv')
temperature_data.head()<compute_train_metric> | imputer = SimpleImputer(strategy='median')
X_train_num_imp = imputer.fit_transform(X_train_num)
X_train_num_imp = pd.DataFrame(X_train_num_imp, columns=num_features)
print(f'Null data before imputer: {X_train_num.isnull().sum().sum() }')
print(f'Null data after imputer: {X_train_num_imp.isnull().sum().sum() }' ) | Titanic - Machine Learning from Disaster |
13,712,258 | sample = temperature_dataset['Temperature'].sample(n=250)
test = temperature_dataset['Temperature']
stat, p = ttest_ind(sample, test)
print('Statistics=%.3f, p=%.3f' %(stat, p))<normalization> | tmp_ov = data_overview(df=X_train_num_imp)
tmp_ov['skew'] = tmp_ov.query('feature in @num_features')['feature'].apply(lambda x: skew(X_train_num_imp[x]))
tmp_ov['kurtosis'] = tmp_ov.query('feature in @num_features')['feature'].apply(lambda x: kurtosis(X_train_num_imp[x]))
tmp_ov[~tmp_ov['skew'].isnull() ].sort_values(by='skew', ascending=False ).loc[:, ['feature', 'skew', 'kurtosis']] | Titanic - Machine Learning from Disaster |
13,712,258 | training_data['ConfirmedCases'] = training_data['ConfirmedCases'].apply(int)
training_data['Fatalities'] = training_data['Fatalities'].apply(int)
cases = training_data.ConfirmedCases
fatalities = training_data.Fatalities
del training_data['ConfirmedCases']
del training_data['Fatalities']
lb = LabelEncoder()
training_data['Country_Region'] = lb.fit_transform(training_data['Country_Region'])
testing_data['Country_Region'] = lb.transform(testing_data['Country_Region'])
scaler = MinMaxScaler()
x_train = scaler.fit_transform(training_data.values)
x_test = scaler.transform(testing_data.values )<train_model> | scaler = DynamicScaler(scaler_type='Standard')
X_train_num_scaled = scaler.fit_transform(X_train_num_log)
X_train_num_scaled = pd.DataFrame(X_train_num_scaled, columns=num_features)
X_train_num_scaled.head() | Titanic - Machine Learning from Disaster |
13,712,258 | rf = XGBRegressor(n_estimators = 1500 , max_depth = 15, learning_rate=0.1)
rf.fit(x_train,cases)
cases_pred = rf.predict(x_test)
rf = XGBRegressor(n_estimators = 1500 , max_depth = 15, learning_rate=0.1)
rf.fit(x_train,fatalities)
fatalities_pred = rf.predict(x_test )<feature_engineering> | encoder = DummiesEncoding(cat_features_ori=cat_features, dummy_na=True)
X_train_cat_enc = encoder.fit_transform(X_train_cat)
print(f'Shape before encoding: {X_train_cat.shape}')
print(f'Shape after encoding: {X_train_cat_enc.shape}')
X_train_cat_enc.head() | Titanic - Machine Learning from Disaster |
13,712,258 | cases_pred = np.around(cases_pred)
fatalities_pred = np.around(fatalities_pred)
cases_pred[cases_pred < 0] = 0
fatalities_pred[fatalities_pred < 0] = 0<load_from_csv> | TARGET = 'Survived'
INITIAL_FEATURES = ['Survived', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked', 'name_title',
'ticket_class', 'cabin_class', 'name_length', 'age_cat', 'fare_cat', 'family_size']
INITIAL_PRED_FEATURES = [col for col in INITIAL_FEATURES if col not in TARGET]
DTYPE_MODIFICATION_DICT = {'Pclass': str}
NUM_FEATURES = ['Age', 'SibSp', 'Parch', 'Fare', 'name_length', 'family_size']
CAT_FEATURES = ['Pclass', 'Sex', 'Embarked', 'name_title', 'ticket_class', 'cabin_class', 'age_cat', 'fare_cat']
CAT_FEATURES_FINAL = ['Pclass_1', 'Pclass_2', 'Pclass_3', 'Pclass_nan', 'Sex_female', 'Sex_male', 'Sex_nan',
'Embarked_C', 'Embarked_Q', 'Embarked_S', 'Embarked_nan', 'name_title_Master.',
'name_title_Miss.', 'name_title_Mr.', 'name_title_Mrs.', 'name_title_OTHER',
'name_title_nan', 'ticket_class_A', 'ticket_class_OTHER', 'ticket_class_PC',
'ticket_class_SC', 'ticket_class_STON_SOTON', 'ticket_class_nan', 'cabin_class_A',
'cabin_class_B', 'cabin_class_C', 'cabin_class_D', 'cabin_class_E', 'cabin_class_FGT',
'cabin_class_nan', 'age_cat_0_10', 'age_cat_10_20', 'age_cat_20_40', 'age_cat_40_60',
'age_cat_greater_60', 'age_cat_nan', 'fare_cat_0_8', 'fare_cat_15_25',
'fare_cat_25_50', 'fare_cat_8_15', 'fare_cat_greater_50', 'fare_cat_nan']
MODEL_FEATURES = NUM_FEATURES + CAT_FEATURES_FINAL
NUM_STRATEGY_IMPUTER = 'median'
SCALER_TYPE = None
LOG_APPLICATION = False
COLS_TO_LOG = ['Fare', 'Age']
ENCODER_DUMMY_NA = True
NAME_TITLE = True
CABIN_CLASS = True
TICKET_CLASS = True
NAME_LENGTH = True
AGE_CAT = True
FARE_CAT = True
FAMILY_SIZE = True
initial_train_pipeline = Pipeline([
('feature_adder', CustomFeaturesTitanic(name_title=NAME_TITLE, cabin_class=CABIN_CLASS,
ticket_class=TICKET_CLASS, name_length=NAME_LENGTH,
age_cat=AGE_CAT, fare_cat=FARE_CAT, family_size=FAMILY_SIZE)) ,
('col_filter', FiltraColunas(features=INITIAL_FEATURES)) ,
('dtype_modifier', ModificaTipoPrimitivo(mod_dict=DTYPE_MODIFICATION_DICT)) ,
('dup_dropper', EliminaDuplicatas())
])
initial_pred_pipeline = Pipeline([
('feature_adder', CustomFeaturesTitanic(name_title=NAME_TITLE, cabin_class=CABIN_CLASS,
ticket_class=TICKET_CLASS, name_length=NAME_LENGTH,
age_cat=AGE_CAT, fare_cat=FARE_CAT, family_size=FAMILY_SIZE)) ,
('col_filter', FiltraColunas(features=INITIAL_PRED_FEATURES)) ,
('dtype_modifier', ModificaTipoPrimitivo(mod_dict=DTYPE_MODIFICATION_DICT))
])
num_pipeline = Pipeline([
('imputer', SimpleImputer(strategy=NUM_STRATEGY_IMPUTER)) ,
('log_transformer', DynamicLogTransformation(application=LOG_APPLICATION, num_features=NUM_FEATURES,
cols_to_log=COLS_TO_LOG)) ,
('scaler', DynamicScaler(scaler_type=SCALER_TYPE))
])
cat_pipeline = Pipeline([
('encoder', DummiesEncoding(dummy_na=ENCODER_DUMMY_NA, cat_features_final=CAT_FEATURES_FINAL))
])
prep_pipeline = ColumnTransformer([
('num', num_pipeline, NUM_FEATURES),
('cat', cat_pipeline, CAT_FEATURES)
] ) | Titanic - Machine Learning from Disaster |
13,712,258 | submission_dataset = pd.read_csv(".. /input/covid19-global-forecasting-week-4/submission.csv")
submission_dataset['ConfirmedCases'] = cases_pred
submission_dataset['Fatalities'] = fatalities_pred
submission_dataset.head()<save_to_csv> | df = pd.read_csv(os.path.join(DATA_PATH, TRAIN_FILENAME))
df_prep = initial_train_pipeline.fit_transform(df)
X_train, X_val, y_train, y_val = train_test_split(df_prep.drop(TARGET, axis=1), df_prep[TARGET].values,
test_size=.20, random_state=42)
X_train_prep = prep_pipeline.fit_transform(X_train)
X_val_prep = prep_pipeline.fit_transform(X_val)
print(f'Shape of X_train_prep: {X_train_prep.shape}')
print(f'Shape of X_val_prep: {X_val_prep.shape}')
print(f'
Total features considered: {len(MODEL_FEATURES)}' ) | Titanic - Machine Learning from Disaster |
13,712,258 | submission_dataset.to_csv("submission.csv" , index = False )<load_from_csv> | df_prep = pd.DataFrame(X_train_prep, columns=MODEL_FEATURES)
df_prep['Survived'] = y_train
plot_corr_matrix(df=df_prep, corr_col='Survived', figsize=(12, 12), cbar=False, n_vars=15 ) | Titanic - Machine Learning from Disaster |
13,712,258 | test = pd.read_csv(".. /input/covid19-global-forecasting-week-4/test.csv")
train = pd.read_csv(".. /input/covid19-global-forecasting-week-4/train.csv" )<sort_values> | dtree = DecisionTreeClassifier()
forest = RandomForestClassifier()
lgbm = LGBMClassifier()
xgb = XGBClassifier()
adaboost = AdaBoostClassifier()
gradboost = GradientBoostingClassifier()
model_obj = [dtree, forest, lgbm, xgb, adaboost, gradboost]
model_names = [type(model ).__name__ for model in model_obj]
set_classifiers = {name: {'model': obj, 'params': {}} for(name, obj)in zip(model_names, model_obj)}
print(f'Classifiers that will be trained on next steps:
{model_names}' ) | Titanic - Machine Learning from Disaster |
13,712,258 | train[train['Country_Region'] == 'US'].sort_values('ConfirmedCases',ascending = False )<groupby> | trainer = ClassificadorBinario()
trainer.fit(set_classifiers, X_train_prep, y_train ) | Titanic - Machine Learning from Disaster |
13,712,258 | train[train['Country_Region'] == 'US'].groupby(['Date'] ).sum()<data_type_conversions> | metrics = trainer.evaluate_performance(X_train_prep, y_train, X_val_prep, y_val)
metrics | Titanic - Machine Learning from Disaster |
13,712,258 | train['Province_State'].fillna('', inplace=True)
test['Province_State'].fillna('', inplace=True)
train['Date'] = pd.to_datetime(train['Date'])
test['Date'] = pd.to_datetime(test['Date'])
train = train.sort_values(['Country_Region','Province_State','Date'])
test = test.sort_values(['Country_Region','Province_State','Date'])
train[['ConfirmedCases', 'Fatalities']] = train.groupby(['Country_Region', 'Province_State'])[['ConfirmedCases', 'Fatalities']].transform('cummax' )<compute_test_metric> | full_trainer = ClassificadorBinario()
full_trainer.training_flow(set_classifiers, X_train_prep, y_train, X_val_prep, y_val,
features=MODEL_FEATURES, random_search=True)
full_trainer.visual_analysis(features=MODEL_FEATURES, model_shap='LGBMClassifier' ) | Titanic - Machine Learning from Disaster |
13,712,258 | def RMSLE(pred,actual):
return np.sqrt(np.mean(np.power(( np.log(pred+1)-np.log(actual+1)) ,2)))
feature_day = [1,20,50,100,200,500,1000,5000,10000,15000,20000,50000,100000,200000, 500000]
def CreateInput(data):
feature = []
for day in feature_day:
data.loc[:,'Number day from ' + str(day)+ ' case'] = 0
if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0):
fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max()
else:
fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min()
for i in range(0, len(data)) :
if(data['Date'].iloc[i] > fromday):
day_denta = data['Date'].iloc[i] - fromday
data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days
feature = feature + ['Number day from ' + str(day)+ ' case']
return data[feature]<install_modules> | metrics = pd.read_csv('output/metrics/metrics.csv')
metrics | Titanic - Machine Learning from Disaster |
13,712,258 | !pip install pmdarima<save_to_csv> | forest_tunning_grid = {
'bootstrap': [True, False],
'class_weight': [None, 'balanced'],
'criterion': ['gini', 'entropy'],
'max_depth': [5, 6, 7, 9, 10],
'n_estimators': np.arange(300, 600, 50),
'random_state': [42]
}
lgbm_tunning_grid = {
'boosting_type': ['gbdt'],
'class_weight': [None, 'balanced'],
'learning_rate': [0.003, 0.01, 0.03, 0.1, 0.3, 1, 3],
'max_depth': np.arange(-1, 100, 2),
'n_estimators': np.arange(300, 700, 50),
'num_leaves': [5, 10, 15, 20],
'objective': ['binary'],
'random_state': [42],
'reg_alpha': np.linspace (.0, 25, 15)
}
xgboost_tunning_grid = {
'booster': ['gbtree'],
'max_depth': [3, 4, 6, 7],
'learning_rate': [0.003, 0.01, 0.03, 0.1, 0.3, 1, 3],
'n_estimators': np.arange(300, 700, 50),
'objective': ['binary:logistic'],
'seed': [42],
'reg_alpha': np.linspace (.0, 25, 15),
'reg_lambda': np.linspace (.0, 25, 15),
'colsample_bylevel': [0.5, 0.7, 0.9]
}
adaboost_tunning_grid = {
'base_estimator': [DecisionTreeClassifier(max_depth=7)],
'n_estimators': np.arange(50, 700, 50),
'learning_rate': [0.003, 0.01, 0.03, 0.1, 0.3, 1, 3],
'algorithm': ['SAMME', 'SAMME.R'],
'random_state': [42]
}
gradboost_tunning_grid = {
'n_estimators': np.arange(50, 700, 50),
'max_depth': [3, 6, 7, 8, 9, 10, 15],
'max_features': [None, 'auto', 'sqrt', 'log2'],
'max_leaf_nodes': np.arange(3, 50, 2),
'random_state': [42]
} | Titanic - Machine Learning from Disaster |
13,712,258 | df_val = df_val_2
submission = df_val[['ForecastId','ConfirmedCases_hat','Fatalities_hat']]
submission.columns = ['ForecastId','ConfirmedCases','Fatalities']
submission = submission.round({'ConfirmedCases': 0, 'Fatalities': 0})
submission.to_csv('submission.csv', index=False)
submission<import_modules> | tunning_models_keys = ['RandomForestClassifier', 'LGBMClassifier', 'XGBClassifier', 'AdaBoostClassifier',
'GradientBoostingClassifier']
tunning_param_grids = [forest_tunning_grid, lgbm_tunning_grid, xgboost_tunning_grid, adaboost_tunning_grid,
gradboost_tunning_grid]
tunned_pipelines = {}
general_metrics = pd.DataFrame({})
pipe_model_key = 'model'
X = X_train.append(X_val)
y = np.concatenate(( y_train, y_val))
for model_name, param_grid in zip(tunning_models_keys, tunning_param_grids):
baseline_model = trainer._get_estimator(model_name)
feature_importance = baseline_model.feature_importances_
general_metrics = general_metrics.append(trainer._get_metrics(model_name))
tunning_pipeline = Pipeline([
('prep', prep_pipeline),
('selector', SeletorTopFeatures(feature_importance, k=len(MODEL_FEATURES))),
(pipe_model_key, baseline_model)
])
model_param_grid = {pipe_model_key + '__' + k: v for k, v in param_grid.items() }
tunning_param_grid = {
'prep__num__imputer__strategy': ['mean', 'median', 'most_frequent'],
'prep__num__log_transformer__application': [True, False],
'prep__num__scaler__scaler_type': [None, 'Standard', 'MinMax'],
'selector__k': np.arange(5, len(MODEL_FEATURES)+ 1, 2)
}
tunning_param_grid.update(model_param_grid)
tunning_search = RandomizedSearchCV(tunning_pipeline, tunning_param_grid, scoring='accuracy', cv=5,
n_jobs=-1, verbose=-1, random_state=42)
tunning_search.fit(X, y)
print(f'
Best hyperparameters for {model_name} found by RandomizedSearch:
')
for k, v in tunning_search.best_params_.items() :
print(f'{k}: {v}')
final_pipeline = tunning_search.best_estimator_
tunned_pipelines[model_name] = final_pipeline
final_metrics = clf_cv_performance(final_pipeline, X, y, model_name=model_name)
model_metrics = trainer._get_metrics(model_name=model_name)
metrics_cols = general_metrics.columns
final_metrics = final_metrics.loc[:, metrics_cols]
general_metrics = general_metrics.append(final_metrics)
general_metrics | Titanic - Machine Learning from Disaster |
13,712,258 | warnings.filterwarnings('ignore')
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
<load_from_csv> | FINAL_MODEL = 'RandomForestClassifier'
final_pipeline = tunned_pipelines[FINAL_MODEL] | Titanic - Machine Learning from Disaster |
13,712,258 | covid_cases = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/covid_19_data.csv')
covid_cases.head()<load_from_csv> | df_test = pd.read_csv(os.path.join(DATA_PATH, TEST_FILENAME))
print(f'Shape of test dataset: {df_test.shape}')
df_test.head() | Titanic - Machine Learning from Disaster |
13,712,258 | training_data = pd.read_csv("/kaggle/input/covid19-global-forecasting-week-4/train.csv")
testing_data = pd.read_csv("/kaggle/input/covid19-global-forecasting-week-4/test.csv" )<data_type_conversions> | model_consumer = ConsumoModelo(model=final_pipeline, features=INITIAL_PRED_FEATURES)
prediction_pipeline = Pipeline([
('initial', initial_pred_pipeline),
('prediction', model_consumer)
])
df_pred = prediction_pipeline.fit_transform(df_test)
df_pred.head() | Titanic - Machine Learning from Disaster |
13,712,258 | <concatenate><EOS> | df_sub = df_test.merge(df_pred, how='left', left_index=True, right_index=True)
df_sub = df_sub.loc[:, ['PassengerId', 'y_pred']]
df_sub.columns = ['PassengerId', 'Survived']
df_sub.to_csv('output/submission.csv', index=False)
df_sub.head() | Titanic - Machine Learning from Disaster |
10,357,342 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<count_unique_values> | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns | Titanic - Machine Learning from Disaster |
10,357,342 | latest_data = country_grouped_covid['ObservationDate'] == '04/13/2020'
country_data = country_grouped_covid[latest_data]
country_list = country_data['Country/Region'].unique()
print("The total number of countries with COVID-19 Confirmed cases = {}".format(country_list.size))<feature_engineering> | train_data = pd.read_csv('.. /input/titanic/train.csv')
test_data = pd.read_csv('.. /input/titanic/test.csv')
train = train_data.copy()
test = test_data.copy() | Titanic - Machine Learning from Disaster |
10,357,342 | py.init_notebook_mode(connected=True)
formated_gdf = covid_cases.groupby(['ObservationDate', 'Country/Region'])['Confirmed', 'Deaths', 'Recovered'].max()
formated_gdf = formated_gdf.reset_index()
formated_gdf['Date'] = pd.to_datetime(formated_gdf['ObservationDate'])
formated_gdf['Date'] = formated_gdf['Date'].dt.strftime('%m/%d/%Y')
formated_gdf['log_ConfirmedCases'] = np.log(formated_gdf.Confirmed + 1)
formated_gdf['log_Fatalities'] = np.log(formated_gdf.Deaths + 1 )<feature_engineering> | train.drop(['PassengerId'], axis=1, inplace=True)
test.drop(['PassengerId'], axis=1, inplace=True)
pred = train['Survived'] | Titanic - Machine Learning from Disaster |
10,357,342 | py.init_notebook_mode(connected=True)
formated_gdf = covid_cases.groupby(['ObservationDate', 'Country/Region'])['Confirmed', 'Deaths', 'Recovered'].max()
formated_gdf = formated_gdf.reset_index()
formated_gdf['Date'] = pd.to_datetime(formated_gdf['ObservationDate'])
formated_gdf['Date'] = formated_gdf['Date'].dt.strftime('%m/%d/%Y')
formated_gdf['log_ConfirmedCases'] = np.log(formated_gdf.Confirmed + 1)
formated_gdf['log_Fatalities'] = np.log(formated_gdf.Deaths + 1 )<load_from_csv> | train.isnull().sum() | Titanic - Machine Learning from Disaster |
10,357,342 | folder_name = '/kaggle/input/covcsd-covid19-countries-statistical-dataset/'
file_type = 'csv'
seperator =','
dataframe = pd.concat([pd.read_csv(f, sep=seperator)for f in glob.glob(folder_name + "/*."+file_type)],ignore_index=True,sort=False )<create_dataframe> | train.isnull().sum() | Titanic - Machine Learning from Disaster |
10,357,342 | covid_data = dataframe[['Date', 'State', 'Country', 'Cumulative_cases', 'Cumulative_death',
'Daily_cases', 'Daily_death', 'Latitude', 'Longitude', 'Temperature',
'Min_temperature', 'Max_temperature', 'Wind_speed', 'Precipitation',
'Fog_Presence', 'Population', 'Population Density/km', 'Median_Age',
'Sex_Ratio', 'Age%_65+', 'Hospital Beds/1000', 'Available Beds/1000',
'Confirmed Cases/1000', 'Lung Patients(F)', 'Lung Patients(M)',
'Life Expectancy(M)', 'Life Expectancy(F)', 'Total_tests_conducted',
'Out_Travels(mill.) ', 'In_travels(mill.) ', 'Domestic_Travels(mill.) ']]<feature_engineering> | test.isnull().sum() | Titanic - Machine Learning from Disaster |
10,357,342 | training_data['Country_Region'] = training_data['Country_Region'] + ' ' + training_data['Province_State']
testing_data['Country_Region'] = testing_data['Country_Region'] + ' ' + testing_data['Province_State']
del training_data['Province_State']
del testing_data['Province_State']
def split_date(date):
date = date.split('-')
date[0] = int(date[0])
if(date[1][0] == '0'):
date[1] = int(date[1][1])
else:
date[1] = int(date[1])
if(date[2][0] == '0'):
date[2] = int(date[2][1])
else:
date[2] = int(date[2])
return date
training_data.Date = training_data.Date.apply(split_date)
testing_data.Date = testing_data.Date.apply(split_date )<feature_engineering> | test.isnull().sum() | Titanic - Machine Learning from Disaster |
10,357,342 | year = []
month = []
day = []
for i in training_data.Date:
year.append(i[0])
month.append(i[1])
day.append(i[2])
training_data['Year'] = year
training_data['Month'] = month
training_data['Day'] = day
del training_data['Date']<feature_engineering> | train['Age'].fillna(train['Age'].quantile(0.5), inplace=True)
test['Age'].fillna(test['Age'].quantile(0.5), inplace=True ) | Titanic - Machine Learning from Disaster |
10,357,342 | year = []
month = []
day = []
for i in testing_data.Date:
year.append(i[0])
month.append(i[1])
day.append(i[2])
testing_data['Year'] = year
testing_data['Month'] = month
testing_data['Day'] = day
del testing_data['Date']
del training_data['Id']
del testing_data['ForecastId']
del testing_data['Year']
del training_data['Year']<drop_column> | train['Embarked'].fillna('S', inplace=True)
test['Embarked'].fillna('S', inplace=True ) | Titanic - Machine Learning from Disaster |
10,357,342 | latest_data = covid_data['Date'] == '30-03-2020'
country_data_detailed = covid_data[latest_data]
country_data_detailed.drop(['Daily_cases','Daily_death','Latitude','Longitude'],axis=1,inplace=True)
country_data_detailed.head(3 )<categorify> | test['Fare'].fillna(test['Fare'].quantile(0.5), inplace=True ) | Titanic - Machine Learning from Disaster |
10,357,342 | country_data_detailed.replace('Not Reported',np.nan,inplace=True)
country_data_detailed.replace('N/A',np.nan,inplace=True)
country_data_detailed.head(3 )<data_type_conversions> | sex1 = pd.get_dummies(train['Sex'])
sex2 = pd.get_dummies(test['Sex'] ) | Titanic - Machine Learning from Disaster |
10,357,342 | country_data_detailed['Lung Patients(F)'].replace('Not reported',np.nan,inplace=True)
country_data_detailed['Lung Patients(F)'] = country_data_detailed['Lung Patients(F)'].astype("float" )<load_from_csv> | train.drop(['Sex'], axis=1, inplace=True)
test.drop(['Sex'], axis=1, inplace=True)
train = pd.concat([train, sex1], axis=1)
test = pd.concat([test, sex2], axis=1 ) | Titanic - Machine Learning from Disaster |
10,357,342 | temperature_data = pd.read_csv('/kaggle/input/covcsd-covid19-countries-statistical-dataset/temperature_data.csv')
temperature_data.head()<compute_train_metric> | train.drop(['female'], axis=1, inplace=True)
test.drop(['female'], axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
10,357,342 | sample = temperature_dataset['Temperature'].sample(n=250)
test = temperature_dataset['Temperature']
stat, p = ttest_ind(sample, test)
print('Statistics=%.3f, p=%.3f' %(stat, p))<normalization> | embark1 = pd.get_dummies(train['Embarked'])
embark2 = pd.get_dummies(test['Embarked'])
train.drop(['Embarked'], axis=1, inplace=True)
test.drop(['Embarked'], axis=1, inplace=True)
train = pd.concat([train, embark1], axis=1)
test = pd.concat([test, embark2], axis=1 ) | Titanic - Machine Learning from Disaster |
10,357,342 | training_data['ConfirmedCases'] = training_data['ConfirmedCases'].apply(int)
training_data['Fatalities'] = training_data['Fatalities'].apply(int)
cases = training_data.ConfirmedCases
fatalities = training_data.Fatalities
del training_data['ConfirmedCases']
del training_data['Fatalities']
lb = LabelEncoder()
training_data['Country_Region'] = lb.fit_transform(training_data['Country_Region'])
testing_data['Country_Region'] = lb.transform(testing_data['Country_Region'])
scaler = MinMaxScaler()
x_train = scaler.fit_transform(training_data.values)
x_test = scaler.transform(testing_data.values )<train_model> | def family(x):
if x['SibSp'] + x['Parch'] > 1:
return 1
else:
return 0
train['Family'] = train.apply(family, axis=1)
test['Family'] = test.apply(family, axis=1 ) | Titanic - Machine Learning from Disaster |
10,357,342 | rf = XGBRegressor(n_estimators = 1500 , max_depth = 15, learning_rate=0.1)
rf.fit(x_train,cases)
cases_pred = rf.predict(x_test)
rf = XGBRegressor(n_estimators = 1500 , max_depth = 15, learning_rate=0.1)
rf.fit(x_train,fatalities)
fatalities_pred = rf.predict(x_test )<feature_engineering> | train.drop(['SibSp','Parch'], axis=1, inplace=True)
test.drop(['SibSp','Parch'], axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
10,357,342 | cases_pred = np.around(cases_pred)
fatalities_pred = np.around(fatalities_pred)
cases_pred[cases_pred < 0] = 0
fatalities_pred[fatalities_pred < 0] = 0<load_from_csv> | train['Cabin'] = pd.Series(i[0] if not pd.isnull(i)else 'X' for i in train['Cabin'])
test['Cabin'] = pd.Series(i[0] if not pd.isnull(i)else 'X' for i in test['Cabin'] ) | Titanic - Machine Learning from Disaster |
10,357,342 | submission_dataset = pd.read_csv(".. /input/covid19-global-forecasting-week-4/submission.csv")
submission_dataset['ConfirmedCases'] = cases_pred
submission_dataset['Fatalities'] = fatalities_pred
submission_dataset.head()<save_to_csv> | train['Cabin'] = train['Cabin'].map({
'X': 0,
'A': 1,
'B': 2,
'C': 3,
'D': 4,
'E': 5,
'F': 6,
'G': 7,
'T': 0
})
train['Cabin'] = train['Cabin'].astype(int)
test['Cabin'] = test['Cabin'].map({
'X': 0,
'A': 1,
'B': 2,
'C': 3,
'D': 4,
'E': 5,
'F': 6,
'G': 7,
'T': 0
})
test['Cabin'] = test['Cabin'].astype(int ) | Titanic - Machine Learning from Disaster |
10,357,342 | submission_dataset.to_csv("submission.csv" , index = False )<save_to_csv> | train_title = [i.split(",")[1].split(".")[0].strip() for i in train["Name"]]
train["Title"] = pd.Series(train_title)
test_title = [i.split(",")[1].split(".")[0].strip() for i in test["Name"]]
test["Title"] = pd.Series(test_title ) | Titanic - Machine Learning from Disaster |
10,357,342 | submission_dataset.to_csv("submission.csv" , index = False )<set_options> | train = train.drop(['Name'], axis = 1)
test = test.drop(['Name'], axis = 1 ) | Titanic - Machine Learning from Disaster |
10,357,342 | plotly.offline.init_notebook_mode()
%matplotlib inline
def RMSLE(pred,actual):
return np.sqrt(np.mean(np.power(( np.log(pred+1)-np.log(actual+1)) ,2)) )<set_options> | train["Title"] = train["Title"].replace(['Lady', 'the Countess','Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
train["Title"] = train["Title"].map({"Master":0, "Miss":1, "Ms" : 1 , "Mme":1, "Mlle":1, "Mrs":1, "Mr":2, "Rare":3})
train["Title"] = train["Title"].astype(int)
test["Title"] = test["Title"].replace(['Lady', 'the Countess','Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
test["Title"] = test["Title"].map({"Master":0, "Miss":1, "Ms" : 1 , "Mme":1, "Mlle":1, "Mrs":1, "Mr":2, "Rare":3})
test["Title"] = test["Title"].astype(int ) | Titanic - Machine Learning from Disaster |
10,357,342 | warnings.filterwarnings("ignore")
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
<load_from_csv> | Ticket1 = []
for i in list(train.Ticket):
if not i.isdigit() :
Ticket1.append(i.replace(".","" ).replace("/","" ).strip().split(' ')[0])
else:
Ticket1.append("X")
train["Ticket"] = Ticket1
Ticket2 = []
for j in list(test.Ticket):
if not j.isdigit() :
Ticket2.append(j.replace(".","" ).replace("/","" ).strip().split(' ')[0])
else:
Ticket2.append("X")
test["Ticket"] = Ticket2 | Titanic - Machine Learning from Disaster |
10,357,342 | train = pd.read_csv("/kaggle/input/covid19-global-forecasting-week-4/train.csv")
test = pd.read_csv("/kaggle/input/covid19-global-forecasting-week-4/test.csv")
region_metadata = pd.read_csv("/kaggle/input/covid19-forecasting-metadata/region_metadata.csv")
region_date_metadata = pd.read_csv("/kaggle/input/covid19-forecasting-metadata/region_date_metadata.csv" )<merge> | train= pd.get_dummies(train, columns = ["Ticket"], prefix="T")
test = pd.get_dummies(test, columns = ["Ticket"], prefix="T" ) | Titanic - Machine Learning from Disaster |
10,357,342 | train = train.merge(test[["ForecastId", "Province_State", "Country_Region", "Date"]], on = ["Province_State", "Country_Region", "Date"], how = "left")
display(train.head())
test = test[~test.Date.isin(train.Date.unique())]
display(test.head())
df = pd.concat([train, test], sort = False)
df.head()<categorify> | train = train.drop(['T_SP','T_SOP','T_Fa','T_LINE','T_SWPP','T_SCOW','T_PPP','T_AS','T_CASOTON'],axis = 1)
test = test.drop(['T_SCA3','T_STONOQ','T_AQ4','T_A','T_LP','T_AQ3'],axis = 1 ) | Titanic - Machine Learning from Disaster |
10,357,342 | df["geo"] = df.Country_Region.astype(str)+ ": " + df.Province_State.astype(str)
df.loc[df.Province_State.isna() , "geo"] = df[df.Province_State.isna() ].Country_Region
df.ConfirmedCases = df.groupby("geo")["ConfirmedCases"].cummax()
df.Fatalities = df.groupby("geo")["Fatalities"].cummax()
df = df.merge(region_metadata, on = ["Country_Region", "Province_State"])
df = df.merge(region_date_metadata, on = ["Country_Region", "Province_State", "Date"], how = "left")
df.continent = LabelEncoder().fit_transform(df.continent)
df.Date = pd.to_datetime(df.Date, format = "%Y-%m-%d")
df.sort_values(["geo", "Date"], inplace = True)
df.head()<categorify> | train.drop(['Survived'],axis=1,inplace=True ) | Titanic - Machine Learning from Disaster |
10,357,342 | DAYS_SINCE_CASES = [1, 10, 50, 100, 500, 1000, 5000, 10000]
min_date_train = np.min(df[~df.Id.isna() ].Date)
max_date_train = np.max(df[~df.Id.isna() ].Date)
min_date_test = np.min(df[~df.ForecastId.isna() ].Date)
max_date_test = np.max(df[~df.ForecastId.isna() ].Date)
n_dates_test = len(df[~df.ForecastId.isna() ].Date.unique())
print("Train date range:", str(min_date_train), " - ", str(max_date_train))
print("Test date range:", str(min_date_test), " - ", str(max_date_test))
for lag in range(1, 41):
df[f"lag_{lag}_cc"] = df.groupby("geo")["ConfirmedCases"].shift(lag)
df[f"lag_{lag}_ft"] = df.groupby("geo")["Fatalities"].shift(lag)
df[f"lag_{lag}_rc"] = df.groupby("geo")["Recoveries"].shift(lag)
for case in DAYS_SINCE_CASES:
df = df.merge(df[df.ConfirmedCases >= case].groupby("geo")["Date"].min().reset_index().rename(columns = {"Date": f"case_{case}_date"}), on = "geo", how = "left" )<feature_engineering> | print('Train:')
print(train.isnull().sum())
print('Test:')
print(test.isnull().sum() ) | Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.