kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
14,110,105 | pred_val_y, pred_test_y = RCNN_train_pred(model_RCNN(embedding_matrix, hidden_dim_1=128, hidden_dim_2=64,max_features=max_features+1), epochs = 5)
outputs.append([pred_val_y, pred_test_y, 'RCNN'])
results = threshold_search(val_y, pred_val_y)
print(results)
print(confusion_matrix(val_y,pred_val_y>results['threshold']))<choose_model_class> | X_tr_pclass =enc.fit_transform(np.array(X_train['Pclass'] ).reshape(-1,1))
X_cv_pclass =enc.transform(np.array(X_cv['Pclass'] ).reshape(-1,1))
X_te_pclass =enc.transform(np.array(test['Pclass'] ).reshape(-1,1)) | Titanic - Machine Learning from Disaster |
14,110,105 | clr = CyclicLR(base_lr=0.001, max_lr=0.003,step_size=300., mode='exp_range', gamma=0.99994 )<compute_train_metric> | X_tr_sex =vectorizer.fit_transform(X_train['Sex'])
X_cv_sex =vectorizer.transform(X_cv['Sex'])
X_te_sex =vectorizer.transform(test['Sex'] ) | Titanic - Machine Learning from Disaster |
14,110,105 | pred_val_y, pred_test_y = train_pred(model_lstm_atten(embedding_matrix), epochs = 4)
outputs.append([pred_val_y, pred_test_y, 'LSTM w/ max'])
results = threshold_search(val_y, pred_val_y)
print(results)
print(confusion_matrix(val_y,pred_val_y>results['threshold']))<choose_model_class> | X_tr_cabin =vectorizer.fit_transform(X_train['Cabin'])
X_cv_cabin =vectorizer.transform(X_cv['Cabin'])
X_te_cabin =vectorizer.transform(test['Cabin'] ) | Titanic - Machine Learning from Disaster |
14,110,105 | clr = CyclicLR(base_lr=0.001, max_lr=0.003,step_size=300., mode='exp_range', gamma=0.99994 )<compute_train_metric> | X_tr_tkt =vectorizer.fit_transform(X_train['Ticket'])
X_cv_tkt =vectorizer.transform(X_cv['Ticket'])
X_te_tkt =vectorizer.transform(test['Ticket'] ) | Titanic - Machine Learning from Disaster |
14,110,105 | pred_val_y, pred_test_y = train_pred(model_gru_conv_3(embedding_matrix), epochs = 4)
outputs.append([pred_val_y, pred_test_y, 'LSTM conv 3'])
results = threshold_search(val_y, pred_val_y)
print(results)
print(confusion_matrix(val_y,pred_val_y>results['threshold']))<choose_model_class> | X_tr_fmix =enc.fit_transform(np.array(X_train['feature_mix'] ).reshape(-1,1))
X_cv_fmix =enc.transform(np.array(X_cv['feature_mix'] ).reshape(-1,1))
X_te_fmix =enc.transform(np.array(test['feature_mix'] ).reshape(-1,1)) | Titanic - Machine Learning from Disaster |
14,110,105 | clr = CyclicLR(base_lr=0.001, max_lr=0.003,step_size=300., mode='exp_range', gamma=0.99994 )<compute_train_metric> |
X_tr = hstack(( X_tr_age,X_tr_fare,X_tr_sex,X_tr_pclass,X_tr_emb)).tocsr()
X_cv = hstack(( X_cv_age,X_cv_fare,X_cv_sex,X_cv_pclass,X_cv_emb)).tocsr()
X_te = hstack(( X_te_age,X_te_fare,X_te_sex,X_te_pclass,X_te_emb)).tocsr()
print(X_tr.shape)
print(X_te.shape)
print(X_cv.shape ) | Titanic - Machine Learning from Disaster |
14,110,105 | pred_val_y, pred_test_y = train_pred(model_lstm_max(embedding_matrix), epochs = 4)
outputs.append([pred_val_y, pred_test_y, 'LSTM w/ atten'])
results = threshold_search(val_y, pred_val_y)
print(results)
print(confusion_matrix(val_y,pred_val_y>results['threshold']))<compute_train_metric> | alpha = [10 ** x for x in range(-5, 1)]
cv_log_error_array=[]
for i in alpha:
clf = SGDClassifier(alpha=i,class_weight="balanced", penalty='l2', loss='log', random_state=42)
clf.fit(X_tr, y_train)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid")
sig_clf.fit(X_tr, y_train)
predict_y = sig_clf.predict_proba(X_cv)
cv_log_error_array.append(log_loss(y_cv, predict_y, labels=clf.classes_, eps=1e-15))
print('For values of alpha = ', i, "The log loss is:",log_loss(y_cv, predict_y, labels=clf.classes_, eps=1e-15))
fig, ax = plt.subplots()
ax.plot(alpha, cv_log_error_array,c='g')
for i, txt in enumerate(np.round(cv_log_error_array,3)) :
ax.annotate(( alpha[i],np.round(txt,3)) ,(alpha[i],cv_log_error_array[i]))
plt.grid()
plt.title("Cross Validation Error for each alpha")
plt.xlabel("Alpha i's")
plt.ylabel("Error measure")
plt.show()
best_alpha = np.argmin(cv_log_error_array)
clf = SGDClassifier(alpha=alpha[best_alpha],class_weight="balanced", penalty='l2', loss='log', random_state=42)
clf.fit(X_tr, y_train)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid")
sig_clf.fit(X_tr, y_train)
predict_y = sig_clf.predict_proba(X_tr)
print('For values of best alpha = ', alpha[best_alpha], "The train log loss is:",log_loss(y_train, predict_y, labels=clf.classes_, eps=1e-15))
predict_y = sig_clf.predict_proba(X_cv)
print('For values of best alpha = ', alpha[best_alpha], "The cross validation log loss is:",log_loss(y_cv, predict_y, labels=clf.classes_, eps=1e-15))
predict_y = sig_clf.predict_proba(X_te)
print('For values of best alpha = ', alpha[best_alpha], "The test log loss is:",log_loss(y_test, predict_y, labels=clf.classes_, eps=1e-15))
lr=alpha[best_alpha] | Titanic - Machine Learning from Disaster |
14,110,105 | coefs = [0.35,0.25,0.2,0.2]
pred_val_y = np.sum([outputs[i][0]*coefs[i] for i in range(len(outputs)) ], axis = 0)
results = threshold_search(val_y, pred_val_y)
print(results)
print(confusion_matrix(val_y,pred_val_y>results['threshold']))<save_to_csv> | pred=sig_clf.predict(X_te)
df=pd.DataFrame(zip(PassengerId,pred),columns=['PassengerId',"Survived"])
df
df.to_csv('/kaggle/working/output.csv',index=False ) | Titanic - Machine Learning from Disaster |
14,110,105 | <import_modules><EOS> | d=pd.read_csv('/kaggle/working/output.csv')
d | Titanic - Machine Learning from Disaster |
14,096,495 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<load_from_csv> | pd.options.display.max_rows=200
pd.set_option('mode.chained_assignment', None)
simplefilter("ignore", category=ConvergenceWarning)
simplefilter("ignore", category=RuntimeWarning)
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | Titanic - Machine Learning from Disaster |
14,096,495 | train_df = pd.read_csv(".. /input/train.csv")
test_df = pd.read_csv(".. /input/test.csv")
print('Train data dimension: ', train_df.shape)
display(train_df.head())
print('Test data dimension: ', test_df.shape)
display(test_df.head() )<set_options> | train = pd.read_csv('/kaggle/input/titanic/train.csv', index_col='PassengerId')
test = pd.read_csv('/kaggle/input/titanic/test.csv', index_col='PassengerId' ) | Titanic - Machine Learning from Disaster |
14,096,495 | def seed_torch(seed=1234):
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True<compute_test_metric> | y_train = train.Survived.copy()
train = train.drop('Survived', axis=1)
X_test = test | Titanic - Machine Learning from Disaster |
14,096,495 | def threshold_search(y_true, y_proba):
best_threshold = 0
best_score = 0
for threshold in tqdm([i * 0.01 for i in range(100)]):
score = f1_score(y_true=y_true, y_pred=y_proba > threshold)
if score > best_score:
best_threshold = threshold
best_score = score
search_result = {'threshold': best_threshold, 'f1': best_score}
return search_result<compute_test_metric> | def name_labeling(df):
for i in ['Mr.', 'Mrs.', 'Miss', 'Master', 'Army', 'Revered/Important', 'rare', 'Doctor']:
if i == 'Army':
df.Name[df.Name.str.contains(pat='(Major.|Col.|Capt.) ', regex=True)] = 'Army'
elif i == 'Revered/Important':
df.Name[df.Name.str.contains(pat='(Rev.|Countess.|Jonkheer.|Sir.|Lady.) ', regex=True)] = 'Revered/Important'
elif i == 'rare':
df.Name[df.Name.str.contains(pat='(Mme.|Ms.|Mlle.|Don.|Dona.) ', regex=True)] = 'rare'
elif i == 'Doctor':
df.Name[df.Name.str.contains(pat='Dr.')] = 'doctor'
else:
df.Name[df.Name.str.contains(pat=f'{i}', regex=False)] = i
return df | Titanic - Machine Learning from Disaster |
14,096,495 | def sigmoid(x):
return 1 /(1 + np.exp(-x))<define_variables> | def ticket_labeling(df):
for label, pattern in [('ca', 'C[.]?A[.]?'),('soton', 'SOTON'),('ston', 'STON'),('wc', 'W[.]?[/]?C'),
('sc', 'S[.]?C[.]?'),('a', 'A[.]?'),('soc', 'S[.]?O[.]?[/]?C'),('pp', 'PP'),
('fc', '(F.C.|F.C.C.) '),('rest_char', '[A-Z]'),('small_serial_number', '^\d{3,5}$'),
('large_serial_number', '^\d{6,7}$')]:
df.Ticket[df.Ticket.str.contains(pattern)] = label
return df
| Titanic - Machine Learning from Disaster |
14,096,495 | embed_size = 300
max_features = 75000
maxlen = 50<define_variables> | def cabin_labeling(df):
for i in ['A', 'B', 'C', 'D', 'E', 'F', 'G']:
df.Cabin[df.Cabin.str.contains(i, na=False)] = i
df.Cabin.fillna('missing', inplace=True)
return df | Titanic - Machine Learning from Disaster |
14,096,495 | puncts = [',', '.', '"', ':', ')', '(', '-', '!', '?', '|', ';', "'", '$', '&', '/', '[', ']', '>', '%', '=', '
'·', '_', '{', '}', '©', '^', '®', '`', '<', '→', '°', '€', '™', '›', '♥', '←', '×', '§', '″', '′', 'Â', '█', '½', 'à', '…',
'“', '★', '”', '–', '●', 'â', '►', '−', '¢', '²', '¬', '░', '¶', '↑', '±', '¿', '▾', '═', '¦', '║', '―', '¥', '▓', '—', '‹', '─',
'▒', ':', '¼', '⊕', '▼', '▪', '†', '■', '’', '▀', '¨', '▄', '♫', '☆', 'é', '¯', '♦', '¤', '▲', 'è', '¸', '¾', 'Ã', '⋅', '‘', '∞',
'∙', ')', '↓', '、', '│', '(', '»', ',', '♪', '╩', '╚', '³', '・', '╦', '╣', '╔', '╗', '▬', '❤', 'ï', 'Ø', '¹', '≤', '‡', '√', ]
def clean_text(x):
x = str(x)
for punct in puncts:
x = x.replace(punct, f' {punct} ')
return x<feature_engineering> | temp = cabin_labeling(train.copy() ) | Titanic - Machine Learning from Disaster |
14,096,495 | train_df["question_text"] = train_df["question_text"].str.lower()
test_df["question_text"] = test_df["question_text"].str.lower()
train_df["question_text"] = train_df["question_text"].apply(lambda x: clean_text(x))
test_df["question_text"] = test_df["question_text"].apply(lambda x: clean_text(x))
x_train = train_df["question_text"].fillna("_
x_test = test_df["question_text"].fillna("_
tokenizer = Tokenizer(num_words=max_features)
tokenizer.fit_on_texts(list(x_train))
x_train = tokenizer.texts_to_sequences(x_train)
x_test = tokenizer.texts_to_sequences(x_test)
x_train = pad_sequences(x_train, maxlen=maxlen)
x_test = pad_sequences(x_test, maxlen=maxlen)
y_train = train_df['target'].values<statistical_test> | temp = temp.groupby(['Pclass', 'Cabin'])[['Name']].count().rename(columns={'Name':'Passengers'} ) | Titanic - Machine Learning from Disaster |
14,096,495 | def load_glove(word_index):
EMBEDDING_FILE = '.. /input/embeddings/glove.840B.300d/glove.840B.300d.txt'
def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')[:300]
embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE))
all_embs = np.stack(embeddings_index.values())
emb_mean,emb_std = -0.005838499,0.48782197
embed_size = all_embs.shape[1]
nb_words = min(max_features, len(word_index))
embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size))
for word, i in word_index.items() :
if i >= max_features: continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None: embedding_matrix[i] = embedding_vector
return embedding_matrix
def load_fasttext(word_index):
EMBEDDING_FILE = '.. /input/embeddings/wiki-news-300d-1M/wiki-news-300d-1M.vec'
def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')
embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE)if len(o)>100)
all_embs = np.stack(embeddings_index.values())
emb_mean,emb_std = all_embs.mean() , all_embs.std()
embed_size = all_embs.shape[1]
nb_words = min(max_features, len(word_index))
embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size))
for word, i in word_index.items() :
if i >= max_features: continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None: embedding_matrix[i] = embedding_vector
return embedding_matrix
def load_para(word_index):
EMBEDDING_FILE = '.. /input/embeddings/paragram_300_sl999/paragram_300_sl999.txt'
def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')
embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE, encoding="utf8", errors='ignore')if len(o)>100)
all_embs = np.stack(embeddings_index.values())
emb_mean,emb_std = -0.0053247833,0.49346462
embed_size = all_embs.shape[1]
nb_words = min(max_features, len(word_index))
embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size))
for word, i in word_index.items() :
if i >= max_features: continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None: embedding_matrix[i] = embedding_vector
return embedding_matrix<normalization> | temp = temp.reset_index()
temp_no_missing_value = temp[temp.Cabin != 'missing']
temp_missing_value = temp[temp.Cabin == 'missing'] | Titanic - Machine Learning from Disaster |
14,096,495 | glove_embeddings = load_glove(tokenizer.word_index)
paragram_embeddings = load_para(tokenizer.word_index)
embedding_matrix = np.mean([glove_embeddings, paragram_embeddings], axis=0)
np.shape(embedding_matrix )<split> | def combined_labeling(df):
return cabin_labeling(ticket_labeling(name_labeling(df)) ) | Titanic - Machine Learning from Disaster |
14,096,495 | splits = list(StratifiedKFold(n_splits=4, shuffle=True, random_state=10 ).split(x_train, y_train))<normalization> | X_train = combined_labeling(train.copy())
X_test = combined_labeling(test.copy() ) | Titanic - Machine Learning from Disaster |
14,096,495 | class Attention(nn.Module):
def __init__(self, feature_dim, step_dim, bias=True, **kwargs):
super(Attention, self ).__init__(**kwargs)
self.supports_masking = True
self.bias = bias
self.feature_dim = feature_dim
self.step_dim = step_dim
self.features_dim = 0
weight = torch.zeros(feature_dim, 1)
nn.init.xavier_uniform_(weight)
self.weight = nn.Parameter(weight)
if bias:
self.b = nn.Parameter(torch.zeros(step_dim))
def forward(self, x, mask=None):
feature_dim = self.feature_dim
step_dim = self.step_dim
eij = torch.mm(
x.contiguous().view(-1, feature_dim),
self.weight
).view(-1, step_dim)
if self.bias:
eij = eij + self.b
eij = torch.tanh(eij)
a = torch.exp(eij)
if mask is not None:
a = a * mask
a = a / torch.sum(a, 1, keepdim=True)+ 1e-10
weighted_input = x * torch.unsqueeze(a, -1)
return torch.sum(weighted_input, 1 )<define_variables> | def proportions(df):
df = df.groupby(['Pclass', 'Cabin'])['Name'].count().reset_index().rename(columns={'Name':'Passengers'})
total_passengers_in_cabins = df.Passengers[df.Cabin != 'missing'].sum()
cabin_proportions = df['Passengers'][df.Cabin != 'missing'] / total_passengers_in_cabins
return cabin_proportions
def no_cabins(df):
df = df.groupby(['Pclass', 'Cabin'])['Name'].count().reset_index().rename(columns={'Name':'Passengers'})
return df.Passengers[df.Cabin == 'missing']
def cabin_imputer(df, missing_vals, cabin_proportions):
if all(df.Pclass == 1):
imputation_ndarray = np.random.choice(['A', 'B', 'C', 'D', 'E', 'T'], size=missing_vals[0], p=cabin_proportions[1])
missing_values_index = df[df.Cabin == 'missing'].index
imputation_series = pd.Series(imputation_ndarray, index=missing_values_index)
return imputation_series
elif all(df.Pclass == 2):
imputation_ndarray = np.random.choice(['D', 'E', 'F'], size=missing_vals[1], p=cabin_proportions[2])
missing_values_index = df[df.Cabin == 'missing'].index
imputation_series = pd.Series(imputation_ndarray, index=missing_values_index)
return imputation_series
elif all(df.Pclass == 3):
imputation_ndarray = np.random.choice(['E', 'F', 'G'], size=missing_vals[2], p=cabin_proportions[3])
missing_values_index = df[df.Cabin == 'missing'].index
imputation_series = pd.Series(imputation_ndarray, index=missing_values_index)
return imputation_series
def imputer(df):
df.Age.fillna(df.groupby(['Pclass', 'Sex'] ).Age.transform('mean'), inplace=True)
cabin_proportions = X_train.groupby(['Pclass'] ).apply(proportions)
missing_vals = list(df.groupby(['Pclass'] ).apply(no_cabins))
missing_val_imputed_series = df.groupby(['Pclass'] ).apply(partial(cabin_imputer,
missing_vals=missing_vals,
cabin_proportions=cabin_proportions))
missing_val_imputed_series = missing_val_imputed_series.reset_index().set_index('PassengerId')
missing_val_imputed_series = missing_val_imputed_series.drop('Pclass', axis=1 ).sort_index().iloc[:, 0]
for index, val in zip(missing_val_imputed_series.index, missing_val_imputed_series):
df.Cabin[index] = val
for feature in df.columns:
df[feature].fillna(df[feature].mode() [0], inplace=True)
df.Cabin[df.Cabin == 'T'] = 'E'
df.reset_index(drop=True, inplace=True)
return df | Titanic - Machine Learning from Disaster |
14,096,495 | 128*4<define_variables> | X_train_imputed = imputer(X_train.copy())
X_test_imputed = imputer(X_test.copy() ) | Titanic - Machine Learning from Disaster |
14,096,495 | batch_size = 512
n_epochs = 5<choose_model_class> | class FeatureEngineering(BaseEstimator, TransformerMixin):
def __init__(self, drop_Cabin=False, drop_Name=False, Embarked_target=False, SibSp_Parch_simplify=True,
drop_Ticket=False, scaler='MinMaxScaler', smoothing=10, test=False):
self.drop_Cabin = drop_Cabin
self.drop_Name = drop_Name
self.drop_Ticket = drop_Ticket
self.Embarked_target = Embarked_target
self.SibSp_Parch_simplify = SibSp_Parch_simplify
self.scaler = scaler
self.smoothing = smoothing
self.target_encoder = None
self.test = test
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
X['Pclass'] = X.Pclass.astype(str)
dummies = pd.get_dummies(X.loc[:, ['Pclass', 'Sex']])
X = pd.concat([X, dummies], axis=1 ).drop(['Pclass', 'Sex'], axis=1)
if not self.test and len(X)> 400:
self.cols = ['Ticket', 'Name', 'Cabin'] if not self.Embarked_target else ['Cabin', 'Name', 'Ticket', 'Embarked']
self.target_encoder = TargetEncoder(cols=self.cols, smoothing=self.smoothing)
self.target_encoder.fit(X.loc[:, self.cols], y_train.iloc[X.index])
X.loc[:, self.cols] = self.target_encoder.transform(X.loc[:, self.cols])
else:
X.loc[:, self.cols] = self.target_encoder.transform(X.loc[:, self.cols])
if self.drop_Cabin:
X.drop('Cabin', axis=1, inplace=True)
if self.drop_Name:
X.drop('Name', axis=1, inplace=True)
if self.drop_Ticket:
X.drop('Ticket', axis=1, inplace=True)
if self.SibSp_Parch_simplify:
X['SibSp'].replace({0:'No', 1:'Yes', 2:'Yes', 3:'Yes', 4:'Yes',
5:'Yes', 6:'Yes', 7:'Yes', 8:'Yes', 9:'Yes'}, inplace=True)
X['Parch'].replace({0:'No', 1:'Yes', 2:'Yes', 3:'Yes', 4:'Yes',
5:'Yes', 6:'Yes', 7:'Yes', 8:'Yes', 9:'Yes'}, inplace=True)
dummies = pd.get_dummies(X.loc[:, ['SibSp', 'Parch']])
X = pd.concat([X, dummies], axis=1 ).drop(['SibSp', 'Parch'], axis=1)
X = pd.get_dummies(X)
features_to_scale = [feature for feature in X.columns if X[feature].max() != 1 and X[feature].min() != 1]
if not self.test and len(X)> 400:
if self.scaler == 'StandardScaler':
self.scale_transformer = StandardScaler()
elif self.scaler == 'MinMaxScaler':
self.scale_transformer = MinMaxScaler()
elif self.scaler == 'RobustScaler':
self.scale_transformer = RobustScaler()
scaled_features = self.scale_transformer.fit_transform(X.loc[:, features_to_scale])
X.loc[:, features_to_scale] = pd.DataFrame(data=scaled_features, columns=features_to_scale, index=X.index)
else:
scaled_features = self.scale_transformer.fit_transform(X.loc[:, features_to_scale])
X.loc[:, features_to_scale] = pd.DataFrame(data=scaled_features, columns=features_to_scale, index=X.index)
return X
| Titanic - Machine Learning from Disaster |
14,096,495 | class NeuralNet(nn.Module):
def __init__(self):
super(NeuralNet, self ).__init__()
hidden_size = 128
self.embedding = nn.Embedding(max_features, embed_size)
self.embedding.weight = nn.Parameter(torch.tensor(embedding_matrix, dtype=torch.float32))
self.embedding.weight.requires_grad = False
self.embedding_dropout = nn.Dropout2d(0.1)
self.lstm = nn.LSTM(embed_size, hidden_size, bidirectional=True, batch_first=True)
self.gru = nn.GRU(hidden_size * 2, hidden_size, bidirectional=True, batch_first=True)
self.lstm_attention = Attention(hidden_size * 2, maxlen)
self.gru_attention = Attention(hidden_size * 2, maxlen)
self.linear = nn.Linear(1024, 16)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(0.1)
self.out = nn.Linear(16, 1)
def forward(self, x):
h_embedding = self.embedding(x)
h_embedding = torch.squeeze(
self.embedding_dropout(torch.unsqueeze(h_embedding, 0)))
h_lstm, _ = self.lstm(h_embedding)
h_gru, _ = self.gru(h_lstm)
h_lstm_atten = self.lstm_attention(h_lstm)
h_gru_atten = self.gru_attention(h_gru)
avg_pool = torch.mean(h_gru, 1)
max_pool, _ = torch.max(h_gru, 1)
conc = torch.cat(( h_lstm_atten, h_gru_atten, avg_pool, max_pool), 1)
conc = self.relu(self.linear(conc))
conc = self.dropout(conc)
out = self.out(conc)
return out<define_variables> | def results(cv_results_, n):
df = pd.DataFrame(cv_results_)[['params', 'mean_test_score']].nlargest(n, columns='mean_test_score')
for i in range(len(df)) :
print(f'{df.iloc[i, 0]} : {df.iloc[i, 1]}' ) | Titanic - Machine Learning from Disaster |
14,096,495 | train_preds = np.zeros(( len(train_df)))
test_preds = np.zeros(( len(test_df)))
seed_torch()
x_test_cuda = torch.tensor(x_test, dtype=torch.long ).cuda()
test = torch.utils.data.TensorDataset(x_test_cuda)
test_loader = torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=False)
for i,(train_idx, valid_idx)in enumerate(splits):
x_train_fold = torch.tensor(x_train[train_idx], dtype=torch.long ).cuda()
y_train_fold = torch.tensor(y_train[train_idx, np.newaxis], dtype=torch.float32 ).cuda()
x_val_fold = torch.tensor(x_train[valid_idx], dtype=torch.long ).cuda()
y_val_fold = torch.tensor(y_train[valid_idx, np.newaxis], dtype=torch.float32 ).cuda()
model = NeuralNet()
model.cuda()
loss_fn = torch.nn.BCEWithLogitsLoss(reduction='sum')
optimizer = torch.optim.Adam(model.parameters())
train = torch.utils.data.TensorDataset(x_train_fold, y_train_fold)
valid = torch.utils.data.TensorDataset(x_val_fold, y_val_fold)
train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid, batch_size=batch_size, shuffle=False)
print(f'Fold {i + 1}')
for epoch in range(n_epochs):
start_time = time.time()
model.train()
avg_loss = 0.
for x_batch, y_batch in tqdm(train_loader, disable=True):
y_pred = model(x_batch)
loss = loss_fn(y_pred, y_batch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
avg_loss += loss.item() / len(train_loader)
model.eval()
valid_preds_fold = np.zeros(( x_val_fold.size(0)))
test_preds_fold = np.zeros(( len(test_df)))
avg_val_loss = 0.
for i,(x_batch, y_batch)in enumerate(valid_loader):
y_pred = model(x_batch ).detach()
avg_val_loss += loss_fn(y_pred, y_batch ).item() / len(valid_loader)
valid_preds_fold[i * batch_size:(i+1)* batch_size] = sigmoid(y_pred.cpu().numpy())[:, 0]
elapsed_time = time.time() - start_time
print('Epoch {}/{} \t loss={:.4f} \t val_loss={:.4f} \t time={:.2f}s'.format(
epoch + 1, n_epochs, avg_loss, avg_val_loss, elapsed_time))
for i,(x_batch,)in enumerate(test_loader):
y_pred = model(x_batch ).detach()
test_preds_fold[i * batch_size:(i+1)* batch_size] = sigmoid(y_pred.cpu().numpy())[:, 0]
train_preds[valid_idx] = valid_preds_fold
test_preds += test_preds_fold / len(splits )<compute_test_metric> | param_grid = {'feature_engineering__drop_Cabin':[True, False],
'feature_engineering__drop_Ticket':[True, False],
'feature_engineering__drop_Name':[True, False],
'feature_engineering__Embarked_target':[True, False],
'feature_engineering__SibSp_Parch_simplify':[False],
'feature_engineering__scaler':['StandardScaler'],
'feature_engineering__smoothing':[5]}
feature_engineering_pipeline = Pipeline([('feature_engineering', FeatureEngineering()),
('model', LogisticRegression())])
grid = GridSearchCV(feature_engineering_pipeline, param_grid,
cv=RepeatedStratifiedKFold(n_splits=10, n_repeats=2, random_state=42),
scoring='accuracy', verbose=2, n_jobs=-1 ) | Titanic - Machine Learning from Disaster |
14,096,495 | search_result = threshold_search(y_train, train_preds)
search_result<save_to_csv> | grid.fit(X_train_imputed.copy() , y_train ) | Titanic - Machine Learning from Disaster |
14,096,495 | submission = test_df[['qid']].copy()
submission['prediction'] = test_preds > search_result['threshold']
submission.to_csv('submission.csv', index=False )<feature_engineering> | fe = FeatureEngineering(drop_Cabin=True, drop_Name=False, drop_Ticket=False, Embarked_target=False, SibSp_Parch_simplify=False,
scaler='StandardScaler', smoothing=5 ) | Titanic - Machine Learning from Disaster |
14,096,495 | train=pd.read_json('.. /input/train.json')
test=pd.read_json('.. /input/test.json')
train['ingredients'] = [", ".join(ingredients)for ingredients in train['ingredients']]
test['ingredients']=[", ".join(ingredients)for ingredients in test['ingredients']]
def multiclass_logloss(actual, predicted, eps=1e-15):
if len(actual.shape)== 1:
actual2 = np.zeros(( actual.shape[0], predicted.shape[1]))
for i, val in enumerate(actual):
actual2[i, val] = 1
actual = actual2
clip = np.clip(predicted, eps, 1 - eps)
rows = actual.shape[0]
vsota = np.sum(actual * np.log(clip))
return -1.0 / rows * vsota
lblencdr=preprocessing.LabelEncoder()
y_cuisine=lblencdr.fit_transform(train['cuisine'].values)
X_train, X_val, y_train, y_val = train_test_split(train['ingredients'].values, y_cuisine,
stratify=y_cuisine,
test_size=0.1,
shuffle=True,
random_state=0)
vect=TfidfVectorizer().fit(list(X_train)+list(X_val))
X_train_vect=vect.transform(X_train)
x_valid_vect=vect.transform(X_val)
ftwo_scorer=metrics.make_scorer(fbeta_score, average='micro', beta=0.5)
param_grid={
'kernel': ['rbf'],
'C': [0.1, 1.0, 100],
'gamma': [0,1,10]
}
classifier=SVC()
model=GridSearchCV(estimator=classifier, param_grid=param_grid, scoring=ftwo_scorer,
verbose=10, n_jobs=4, cv=5)
model.fit(X_train_vect, y_train)
print("Best score: %0.3f" % model.best_score_)
print("Best Parameters set:")
best_parameters=model.best_estimator_.get_params()
for param_name in sorted(param_grid.keys()):
print("\t%s: %r" %(param_name, best_parameters[param_name]))
predictions=model.predict(vect.transform(X_val))
print(f1_score(predictions, y_val, average='micro'))
y_predict=model.predict(vect.transform(test['ingredients']))
test['cuisine']=lblencdr.inverse_transform(y_predict)
test = test.sort_values('id' , ascending=True)
test[['id' , 'cuisine' ]].to_csv("submission.csv", index=False )<load_from_csv> | X_train_fe = fe.fit_transform(X_train_imputed.copy() ) | Titanic - Machine Learning from Disaster |
14,096,495 | warnings.filterwarnings('ignore')
sub0 = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv')
train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv', parse_dates=['Date'])
test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv', parse_dates=['Date'])
train.shape, test.shape<merge> | fe.test = True | Titanic - Machine Learning from Disaster |
14,096,495 | know = test[test.Date <= train.Date.max() ]
not_know = test[test.Date > train.Date.max() ]
know = know.merge(train, on=['Date','Country_Region','Province_State'], how='left')
know.head()<feature_engineering> | X_test_fe = fe.transform(X_test_imputed.copy() ) | Titanic - Machine Learning from Disaster |
14,096,495 | train['days'] =(train['Date'] - train.Date.min() ).dt.days
train['location'] = train['Country_Region'] + ' ' + train['Province_State'].fillna('')
train['location'] = train['location'].str.strip()
not_know['days'] =(not_know['Date'] - train.Date.min() ).dt.days
not_know['location'] = not_know['Country_Region'] + ' ' + not_know['Province_State'].fillna('')
not_know['location']= not_know['location'].str.strip()<feature_engineering> | def parameter_plot(model, X, y, n_estimators=[100, 200, 300, 400, 600, 900, 1300, 1700, 2000, 2500], hyper_param=None, **kwargs):
param_name, param_vals = hyper_param
param_grid = {'n_estimators':n_estimators,
f'{param_name}':param_vals}
grid = GridSearchCV(model(**kwargs), param_grid,
cv=RepeatedStratifiedKFold(n_splits=10, n_repeats=2, random_state=42),
scoring='accuracy', n_jobs=-1, verbose=2)
grid.fit(X, y)
results = pd.DataFrame(grid.cv_results_)['mean_test_score'].values
results = results.reshape(len(param_vals), len(n_estimators))
plt.figure(figsize=(15, 9))
for i in range(1, len(param_vals)+ 1):
plt.plot(n_estimators, results[i-1], label=f'{param_name} - {param_vals[i-1]}')
plt.legend()
plt.show() | Titanic - Machine Learning from Disaster |
14,096,495 | def get_param(loc):
_ = train[train.location == loc]
_['diff'] = _.ConfirmedCases.diff()
_['pct'] = _.ConfirmedCases.pct_change()
initial_speed = _.loc[_.ConfirmedCases.diff().argmax() ,'pct']
initial_mid = _.loc[_.ConfirmedCases.diff().argmax() , 'days']
initial_max = _.ConfirmedCases.max() * 2.1
return initial_speed, initial_mid, initial_max
get_param('Finland' )<train_on_grid> | def learning_curve_plotter(Model, X, y, params_1, params_2, step=50):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
plt.figure(figsize=(16, 7))
for i,(name, params)in enumerate([params_1, params_2]):
train_score = []
val_score = []
plt.subplot(1, 2, i+1)
for j in range(100, len(X_train), step):
model = Model(**params ).fit(X_train[:j], y_train[:j])
train_score.append(model.score(X_train[:j], y_train[:j]))
val_score.append(model.score(X_test, y_test))
plt.plot(train_score, 'r-', label='Training accuracy')
plt.plot(val_score, 'b-', label='Validation accuracy')
plt.title(f'{name}')
plt.xlabel('Training set size')
plt.ylabel('Accuracy')
plt.legend()
plt.show() | Titanic - Machine Learning from Disaster |
14,096,495 | loc_list = train.location.unique()
all_param = pd.DataFrame(index=loc_list, columns=['k','x_0','y_max'])
for loc in loc_list:
_ = train[train.location == loc]
nn = not_know[not_know.location == loc]
initial_max = _.ConfirmedCases.max() *2
x = _.days
y1 = _.ConfirmedCases
try:
popt, pcov = opt.curve_fit(log_curve, x, y1, p0 = get_param(loc))
popt[2] = max(popt[2],get_param(loc)[2]/2)
except RuntimeError:
popt = get_param(loc)
y1_hat = log_curve(nn.days, *popt)
not_know.loc[y1_hat.index, 'ConfirmedCases'] = y1_hat
all_param.loc[loc, 'k'] = popt[0]
all_param.loc[loc, 'x_0'] = popt[1]
all_param.loc[loc, 'y_max'] = popt[2]
print('Done' )<filter> | param_grid_logreg = {'penalty':['elasticnet'],
'C':[0.03],
'l1_ratio':[0.0],
'solver':['saga']} | Titanic - Machine Learning from Disaster |
14,096,495 | all_param.loc['Finland', 'y_max']<merge> | grid_logreg = GridSearchCV(LogisticRegression() , param_grid_logreg,
cv=RepeatedStratifiedKFold(n_splits=10, n_repeats=2, random_state=42),
scoring='accuracy', verbose=2, n_jobs=-1 ) | Titanic - Machine Learning from Disaster |
14,096,495 | latest = train[train.Date == train.Date.max() ]
latest['DeathRate'] = latest['Fatalities'] / latest['ConfirmedCases']
not_know2 = not_know.merge(latest[['location','DeathRate']], on='location')
not_know2['Fatalities'] = not_know2['ConfirmedCases'] * not_know2['DeathRate']*1.1<feature_engineering> | grid_logreg.fit(X_train_fe, y_train ) | Titanic - Machine Learning from Disaster |
14,096,495 | not_know2['ConfirmedCases'] = not_know2['ConfirmedCases'].round()
not_know2['Fatalities'] = not_know2['Fatalities'].round()<save_to_csv> | param_grid_knn = {'n_neighbors':[20],
'weights':['uniform'],
'algorithm':['ball_tree']} | Titanic - Machine Learning from Disaster |
14,096,495 | sub1 = pd.concat([know[['ForecastId', 'ConfirmedCases','Fatalities']],
not_know2[['ForecastId', 'ConfirmedCases','Fatalities']]])
sub1=sub1.sort_values('ForecastId' ).reset_index(drop=True)
sub1.to_csv('submission.csv', index=False )<load_from_csv> | grid_knn = GridSearchCV(KNeighborsClassifier() , param_grid_knn,
cv=RepeatedStratifiedKFold(n_splits=10, n_repeats=2, random_state=42),
scoring='accuracy', verbose=2, n_jobs=-1 ) | Titanic - Machine Learning from Disaster |
14,096,495 | PATH_WEEK4='/kaggle/input/covid19-global-forecasting-week-4'
df_train = pd.read_csv(f'{PATH_WEEK4}/train.csv')
df_test = pd.read_csv(f'{PATH_WEEK4}/test.csv' )<load_from_csv> | grid_knn.fit(X_train_fe, y_train ) | Titanic - Machine Learning from Disaster |
14,096,495 | mitigation = pd.read_csv('/kaggle/input/mitigation-day/mitigations.csv')
mitigation.loc[mitigation['Name']=="United States",'Name']='US'
mitigation['start'] = pd.to_datetime(mitigation['start'], infer_datetime_format=True )<data_type_conversions> | param_grid_svc = {'C':[0.5],
'kernel':['rbf'],
'gamma':[0.1]} | Titanic - Machine Learning from Disaster |
14,096,495 | df_train.rename(columns={'Country_Region':'Country'}, inplace=True)
df_test.rename(columns={'Country_Region':'Country'}, inplace=True)
df_train.rename(columns={'Province_State':'State'}, inplace=True)
df_test.rename(columns={'Province_State':'State'}, inplace=True)
df_train['Date'] = pd.to_datetime(df_train['Date'], infer_datetime_format=True)
df_test['Date'] = pd.to_datetime(df_test['Date'], infer_datetime_format=True )<feature_engineering> | grid_svc = GridSearchCV(SVC() , param_grid_svc, cv=RepeatedStratifiedKFold(n_splits=10, n_repeats=2, random_state=42),
scoring='accuracy', verbose=2, n_jobs=-1 ) | Titanic - Machine Learning from Disaster |
14,096,495 | NULL_VAL = "NULL_VAL"
def fillState(state, country):
if state == NULL_VAL:
return country
return state +':' + country
def fillState2(state, country):
if type(state)==str:
return country
return state +':' + country
X_Train = df_train.loc[:, ['State', 'Country', 'Date', 'ConfirmedCases', 'Fatalities']]
X_Train['State'].fillna(NULL_VAL, inplace=True)
X_Train['State'] = X_Train.loc[:, ['State', 'Country']].apply(lambda x : fillState(x['State'], x['Country']), axis=1)
firstDay = np.min(X_Train['Date'])
X_Train.loc[:, 'Date'] =(X_Train['Date']-np.min(X_Train['Date'])).values / 86400000000000
X_Train["Date"] = X_Train["Date"].astype(int)
X_Test = df_test.loc[:, ['State', 'Country', 'Date', 'ForecastId']]
X_Test['State'].fillna(NULL_VAL, inplace=True)
X_Test['State'] = X_Test.loc[:, ['State', 'Country']].apply(lambda x : fillState(x['State'], x['Country']), axis=1)
X_Test.loc[:, 'Date'] =(X_Test['Date']-firstDay ).values / 86400000000000
X_Test["Date"] = X_Test["Date"].astype(int )<data_type_conversions> | grid_svc.fit(X_train_fe, y_train ) | Titanic - Machine Learning from Disaster |
14,096,495 | mitigation['start_day'] =(( mitigation['start'] - firstDay ).values / 86400000000000 ).astype(int )<categorify> | param_grid_random = {'n_estimators':[200, 500],
'max_depth':[5, 9],
'max_samples':[0.5, 0.7],
'max_features':[0.5, 0.7],
'min_samples_split':[2, 5, 8]} | Titanic - Machine Learning from Disaster |
14,096,495 | le = LabelEncoder()
countries = X_Train.Country.unique()
df_out = pd.DataFrame({'ForecastId': [], 'ConfirmedCases': [], 'Fatalities': []})
for country in countries:
states = X_Train.loc[X_Train.Country == country, :].State.unique()
for state in states:
condition_train =(X_Train.Country == country)&(X_Train.State == state)
X_Train_CS = X_Train.loc[condition_train, ['Date', 'ConfirmedCases', 'Fatalities']]
y1_Train_CS = X_Train_CS.loc[:, 'ConfirmedCases']
y2_Train_CS = X_Train_CS.loc[:, 'Fatalities']
X_Train_CS = X_Train_CS.loc[:, ['Date']]
condition_test =(X_Test.Country == country)&(X_Test.State == state)
X_Test_CS = X_Test.loc[condition_test, ['Date', 'ForecastId']]
X_Test_CS_Id = X_Test_CS.loc[:, 'ForecastId']
X_Test_CS = X_Test_CS.loc[:, ['Date']]
row = mitigation[mitigation['State'] == state]
if row.shape[0]>0:
day = row['start_day'].values[0]
print("use mitigation", state, day)
X_Train_CS['Mitigation'] =(X_Train_CS['Date'] - day)> 0
X_Train_CS['Mitigation_day'] = X_Train_CS['Date'] - day
X_Train_CS.loc[X_Train_CS['Mitigation_day']<0, 'Mitigation_day'] = 0
X_Test_CS['Mitigation'] =(X_Test_CS['Date'] - day)> 0
X_Test_CS['Mitigation_day'] = X_Test_CS['Date'] - day
X_Test_CS.loc[X_Test_CS['Mitigation_day']<0, 'Mitigation_day'] = 0
else:
X_Train_CS['Mitigation'] = False
X_Train_CS['Mitigation_day'] = 0
X_Test_CS['Mitigation'] = False
X_Test_CS['Mitigation_day'] = 0
model1 = XGBRegressor(n_estimators=1000)
model1.fit(X_Train_CS, y1_Train_CS)
y1_pred = model1.predict(X_Test_CS)
model2 = XGBRegressor(n_estimators=1000)
model2.fit(X_Train_CS, y2_Train_CS)
y2_pred = model2.predict(X_Test_CS)
df = pd.DataFrame({'ForecastId': X_Test_CS_Id, 'ConfirmedCases': y1_pred, 'Fatalities': y2_pred})
df_out = pd.concat([df_out, df], axis=0)
df_out.ForecastId = df_out.ForecastId.astype('int')
df_out.tail()<save_to_csv> | grid_random = GridSearchCV(RandomForestClassifier() , param_grid_random,
cv=RepeatedStratifiedKFold(n_splits=10, n_repeats=2, random_state=42),
scoring='accuracy', verbose=2, n_jobs=4 ) | Titanic - Machine Learning from Disaster |
14,096,495 | df_out.to_csv('submission.csv', index=False )<load_from_csv> | grid_random.fit(X_train_fe, y_train ) | Titanic - Machine Learning from Disaster |
14,096,495 | train = pd.read_csv("/kaggle/input/covid19-global-forecasting-week-4/train.csv")
train.rename(columns={'Country_Region':'Country'}, inplace=True)
train.rename(columns={'Province_State':'State'}, inplace=True)
train['Date'] = pd.to_datetime(train['Date'], infer_datetime_format=True)
train['Date'] = train.Date.dt.strftime("%m%d")
train['Date'] = train['Date'].astype(int)
train["State"].fillna("",inplace=True)
train["CountryState"] = train["Country"] + train["State"]
print("train done")
test = pd.read_csv("/kaggle/input/covid19-global-forecasting-week-4/test.csv")
test.rename(columns={'Country_Region':'Country'}, inplace=True)
test.rename(columns={'Province_State':'State'}, inplace=True)
test['Date'] = pd.to_datetime(test['Date'], infer_datetime_format=True)
test['Date'] = test.Date.dt.strftime("%m%d")
test['Date'] = test['Date'].astype(int)
test["State"].fillna("",inplace=True)
test["CountryState"] = test["Country"] + test["State"]
print("test done")
<categorify> | param_grid_gradient = {'max_depth':[3],
'n_estimators':[300, 400, 500],
'learning_rate':[0.035, 0.055],
'subsample':[0.4, 0.6],
'max_features':[0.4, 0.6],
'min_samples_split':[2, 5, 8, 12]
} | Titanic - Machine Learning from Disaster |
14,096,495 | %%time
filterwarnings('ignore')
le = LabelEncoder()
finaloutput = pd.DataFrame({'ForecastId': [], 'ConfirmedCases': [], 'Fatalities': []})
CountryState = train.CountryState.unique()
for CS in CountryState:
trainIndia = train[train["CountryState"] == CS]
testIndia = test[test["CountryState"] == CS]
trainIndia.CountryState = le.fit_transform(trainIndia.CountryState)
X = trainIndia[['CountryState', 'Date']]
Y = trainIndia[['ConfirmedCases']]
eval_set = [(X,Y)]
model1 = boostmodel(learning_rate=0.3,silent=0, n_estimators=1000)
model1.fit(X, Y,eval_set=eval_set,early_stopping_rounds=100)
testX = testIndia[['CountryState', 'Date']]
testX.CountryState = le.fit_transform(testX.CountryState)
ConfirmedCases_Pred = model1.predict(testX)
X = trainIndia[['CountryState', 'Date']]
Y = trainIndia[['Fatalities']]
eval_set = [(X,Y)]
model2 = boostmodel(learning_rate=0.3,silent=0, n_estimators=1000)
model2.fit(X, Y,eval_set=eval_set,early_stopping_rounds=100)
testX = testIndia[['CountryState', 'Date']]
testX.CountryState = le.fit_transform(testX.CountryState)
Fatalities_Pred = model2.predict(testX)
XForecastId = testIndia.loc[:, 'ForecastId']
output = pd.DataFrame({'ForecastId': XForecastId, 'ConfirmedCases': ConfirmedCases_Pred, 'Fatalities': Fatalities_Pred})
finaloutput = pd.concat([finaloutput, output], axis=0)
print("Program completed" )<feature_engineering> | grid_gradient = GridSearchCV(GradientBoostingClassifier() , param_grid_gradient,
cv=RepeatedStratifiedKFold(n_splits=10, n_repeats=2, random_state=42),
scoring='accuracy', verbose=2, n_jobs=-1 ) | Titanic - Machine Learning from Disaster |
14,096,495 | finaloutput.ConfirmedCases.apply(math.floor )<save_to_csv> | grid_gradient.fit(X_train_fe, y_train ) | Titanic - Machine Learning from Disaster |
14,096,495 | finaloutput.ForecastId = finaloutput.ForecastId.astype('int')
finaloutput.ConfirmedCases = round(finaloutput.ConfirmedCases,1)
finaloutput.Fatalities = round(finaloutput.Fatalities,1)
finaloutput = finaloutput[['ForecastId','ConfirmedCases','Fatalities']]
finaloutput.to_csv("submission.csv",index=False)
print("done" )<import_modules> | param_grid_xgb = {'n_estimators':[300, 450],
'learning_rate':[0.02, 0.03],
'max_depth':[6],
'subsample':[0.5, 0.7],
'colsample_bylevel':[0.5, 0.7],
'reg_lambda':[1, 5, 15, ]
} | Titanic - Machine Learning from Disaster |
14,096,495 | import numpy as np
import pandas as pd<load_from_csv> | grid_xgb = GridSearchCV(XGBClassifier() , param_grid_xgb,
cv=RepeatedStratifiedKFold(n_splits=10, n_repeats=2, random_state=42),
scoring='accuracy', verbose=2, n_jobs=-1 ) | Titanic - Machine Learning from Disaster |
14,096,495 | X_train = pd.read_csv('.. /input/covid19-global-forecasting-week-4/train.csv')
X_test = pd.read_csv('.. /input/covid19-global-forecasting-week-4/test.csv')
X_submission = pd.read_csv('.. /input/covid19-global-forecasting-week-4/submission.csv' )<data_type_conversions> | grid_xgb.fit(X_train_fe, y_train ) | Titanic - Machine Learning from Disaster |
14,096,495 | X_train['Date'] = pd.to_datetime(X_train['Date'])
X_test['Date'] = pd.to_datetime(X_test['Date'])
X_test['Date']<count_unique_values> | logreg = LogisticRegression(**{'C': 0.03, 'l1_ratio': 0, 'penalty': 'elasticnet', 'solver': 'saga'})
svc = SVC(**{'C': 0.5, 'gamma': 0.1, 'kernel': 'rbf'})
knn = KNeighborsClassifier(**{'algorithm': 'ball_tree', 'n_neighbors': 20, 'weights': 'uniform'})
rfc = RandomForestClassifier(**{'max_depth': 5, 'max_features': 0.5, 'max_samples': 0.7,
'min_samples_split': 5, 'n_estimators': 500})
gradient = GradientBoostingClassifier(**{'learning_rate': 0.055, 'max_depth': 3, 'max_features': 0.4,
'min_samples_split': 2, 'n_estimators': 400, 'subsample': 0.4})
xgb = XGBClassifier(**{'colsample_bylevel': 0.5, 'learning_rate': 0.03, 'max_depth': 6,
'n_estimators': 300, 'reg_lambda': 5, 'subsample': 0.7})
estimators = [('logreg', logreg),('knn', knn),('svc', svc),('rfc', rfc),('gradient', gradient),
('xgb', xgb)]
stack = StackingClassifier(estimators=estimators,
cv=10, n_jobs=-1 ) | Titanic - Machine Learning from Disaster |
14,096,495 | print(X_train.Country_Region.nunique() )<define_variables> | stack.fit(X_train_fe, y_train ) | Titanic - Machine Learning from Disaster |
14,096,495 | countries_no_province = [i for i in countries if i not in countries_with_provinces]
len(countries_no_province )<data_type_conversions> | y_preds = stack.predict(X_test_fe ) | Titanic - Machine Learning from Disaster |
14,096,495 | X_train['Province_State'] = X_train['Province_State'].fillna('unknown')
X_test['Province_State'] = X_test['Province_State'].fillna('unknown' )<groupby> | submission = pd.DataFrame({'PassengerId':test.index,
'Survived':y_preds} ) | Titanic - Machine Learning from Disaster |
14,096,495 | X_train[X_train['Country_Region'].isin(countries_with_provinces)].groupby(['Country_Region'] ).agg({'Province_State':'nunique'} )<data_type_conversions> | submission.to_csv('submission.csv', index=False ) | Titanic - Machine Learning from Disaster |
14,096,495 | X_train['Date'] = X_train['Date'].dt.strftime("%m%d")
X_train['Date'] = X_train['Date'].astype(int)
X_test['Date'] = X_test['Date'].dt.strftime("%m%d")
X_test['Date'] = X_test['Date'].astype(int )<data_type_conversions> | submission.to_csv('submission.csv', index=False ) | Titanic - Machine Learning from Disaster |
14,096,495 | <data_type_conversions><EOS> | pd.read_csv('submission.csv' ) | Titanic - Machine Learning from Disaster |
14,142,829 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<prepare_x_and_y> | mytrainset = pd.read_csv('.. /input/titanic/train.csv')
mytrainset.head()
| Titanic - Machine Learning from Disaster |
14,142,829 | FEATURES = ['Date']
X_submission = pd.DataFrame(columns=['ForecastId', 'ConfirmedCases', 'Fatalities'])
for i in tqdm(X_train.Country_Region.unique()):
z_train = X_train[X_train['Country_Region'] == i]
z_test = X_test[X_test['Country_Region'] == i]
for k in z_train.Province_State.unique() :
p_train = z_train[z_train['Province_State'] == k]
p_test = z_test[z_test['Province_State'] == k]
X_train_final = p_train[FEATURES]
y1 = p_train['ConfirmedCases']
y2 = p_train['Fatalities']
model = xgb.XGBRegressor(n_estimators=2000)
model.fit(X_train_final, y1)
ConfirmedCasesPreds = model.predict(p_test[FEATURES])
model.fit(X_train_final, y2)
FatalitiesPreds = model.predict(p_test[FEATURES])
p_test['ConfirmedCases'] = ConfirmedCasesPreds
p_test['Fatalities'] = FatalitiesPreds
X_submission = pd.concat([X_submission, p_test[['ForecastId', 'ConfirmedCases', 'Fatalities']]], axis=0)
<save_to_csv> | mytestset = pd.read_csv('.. /input/titanic/test.csv')
mytestset.head()
| Titanic - Machine Learning from Disaster |
14,142,829 | X_submission.to_csv('submission.csv', index=False )<import_modules> | ages_mean_train = mytrainset['Age'].mean()
ages_mean_train = round(ages_mean_train)
ages_mean_test = mytestset['Age'].mean()
ages_mean_test = round(ages_mean_test)
| Titanic - Machine Learning from Disaster |
14,142,829 | import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.metrics import mean_squared_log_error<load_from_csv> | mytrainset['Age'] = mytrainset['Age'].replace(np.nan, ages_mean_train)
mytrainset['Age'] = mytrainset['Age'].replace(np.nan, ages_mean_train)
mytrainset = mytrainset.drop(["Cabin"], axis=1)
mytrainset = mytrainset.drop(["Name"], axis=1)
mytrainset = mytrainset.drop(["Ticket"], axis=1)
mytrainset['Embarked'].replace({'C':1, 'Q':2, 'S':3},inplace=True)
mytrainset['Embarked'] = mytrainset['Embarked'].fillna(0)
mytrainset['Sex'].replace({'male':0, 'female':1},inplace=True)
mytestset['Age'] = mytestset['Age'].replace(np.nan, ages_mean_test)
mytestset = mytestset.drop(["Cabin"], axis=1)
mytestset = mytestset.drop(["Name"], axis=1)
mytestset = mytestset.drop(["Ticket"], axis=1)
mytestset['Embarked'].replace({'C':1, 'Q':2, 'S':3},inplace=True)
mytestset['Sex'].replace({'male':0, 'female':1},inplace=True)
mytestset['Fare'] = mytestset['Fare'].fillna(mytestset['Fare'].mean())
| Titanic - Machine Learning from Disaster |
14,142,829 | df=pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv', index_col='Id')
dtest=pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv', index_col='ForecastId' )<prepare_x_and_y> | y = mytrainset['Survived']
features = ["Pclass", "Age", "Sex", "SibSp", "Parch", "Embarked"]
X = mytrainset[features]
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.3, random_state=1 ) | Titanic - Machine Learning from Disaster |
14,142,829 | y1=df['ConfirmedCases']
y2=df['Fatalities']
df.drop('ConfirmedCases', axis=1, inplace=True)
df.drop('Fatalities', axis=1, inplace=True )<feature_engineering> | model= RandomForestClassifier(n_estimators=200,max_depth=5, random_state=1)
model = model.fit(X_train, Y_train)
predictions = model.predict(X_test ) | Titanic - Machine Learning from Disaster |
14,142,829 | df['check']=1
dtest['check']=2
combo=pd.concat([df,dtest])
def date_split(date):
d=date.str.split('-', n=1, expand=True)
return d[1]
combo['MM_DD']= date_split(combo['Date'])
combo['Province_State']=combo['Province_State'].fillna(0 )<categorify> | accuracy_score(predictions, Y_test)
| Titanic - Machine Learning from Disaster |
14,142,829 | le=LabelEncoder()
combo['MM_DD']=le.fit_transform(combo['MM_DD'])
combo=pd.get_dummies(combo)
df1=combo[combo['check']==1]
dtest1=combo[combo['check']==2]<drop_column> | gnb = GaussianNB()
NB_model_predictions = gnb.fit(X_train, Y_train ).predict(X_test)
accuracy_score(NB_model_predictions, Y_test)
| Titanic - Machine Learning from Disaster |
14,142,829 | df1.drop('check', axis=1, inplace=True)
dtest1.drop('check', axis=1, inplace=True )<split> | decisiontreeModel = DecisionTreeClassifier()
decisiontreeModel = decisiontreeModel.fit(X_train, Y_train)
decisiontreePredicition = decisiontreeModel.predict(X_test)
accuracy_score(decisiontreePredicition, Y_test)
| Titanic - Machine Learning from Disaster |
14,142,829 | X_train1, X_valid1, y_train1, y_valid1 = train_test_split(df1, y1, train_size=0.8, test_size=0.2, random_state=0)
X_train2, X_valid2, y_train2, y_valid2 = train_test_split(df1, y2, train_size=0.8, test_size=0.2, random_state=0 )<compute_train_metric> | knn = KNeighborsClassifier(n_neighbors=5, metric='euclidean')
knn.fit(X_train, Y_train)
knnPredictions = knn.predict(X_test)
accuracy_score(Y_test, knnPredictions)
| Titanic - Machine Learning from Disaster |
14,142,829 | ef= ExtraTreesRegressor(n_estimators=15, random_state=3)
p2=ef.fit(X_train1, y_train1 ).predict(X_valid1)
rmsle2=np.sqrt(mean_squared_log_error(y_valid1 , p2))
print(rmsle2 )<compute_train_metric> | LRModel = LogisticRegression(max_iter = 200)
LRModel.fit(X_train, Y_train)
LRModel_Prediction = LRModel.predict(X_test)
accuracy_score(Y_test, LRModel_Prediction)
| Titanic - Machine Learning from Disaster |
14,142,829 | ef2= ExtraTreesRegressor(n_estimators=29,criterion='friedman_mse', random_state=7)
p3=ef2.fit(X_train2, y_train2 ).predict(X_valid2)
rmsle3=np.sqrt(mean_squared_log_error(y_valid2 , p3))
print(rmsle3 )<predict_on_test> | cv = KFold(n_splits=10, random_state=1, shuffle=True)
scores = cross_val_score(model, X, y, scoring='accuracy', cv=cv, n_jobs=-1)
print("%0.2f accuracy" %(scores.mean())) | Titanic - Machine Learning from Disaster |
14,142,829 | pre1=ef.fit(df1,y1 ).predict(dtest1)
pre2=ef2.fit(df1,y2 ).predict(dtest1 )<save_to_csv> | scores = cross_val_score(gnb, X, y, scoring='accuracy', cv=cv, n_jobs=-1)
scores
print("%0.2f accuracy" %(scores.mean())) | Titanic - Machine Learning from Disaster |
14,142,829 | output=pd.DataFrame({'ForecastId': dtest.index, 'ConfirmedCases':pre1, 'Fatalities':pre2})
output.to_csv('submission.csv', index=False )<import_modules> | scores = cross_val_score(decisiontreeModel, X, y, scoring='accuracy', cv=cv, n_jobs=-1)
scores
print("%0.2f accuracy" %(scores.mean())) | Titanic - Machine Learning from Disaster |
14,142,829 | import pandas as pd
from pathlib import Path
from pandas_profiling import ProfileReport
from sklearn.tree import DecisionTreeClassifier
from sklearn.preprocessing import LabelEncoder
import datetime
from sklearn.model_selection import GridSearchCV
from sklearn import preprocessing
from sklearn.model_selection import cross_val_score<load_from_csv> | scores = cross_val_score(knn, X, y, scoring='accuracy', cv=cv, n_jobs=-1)
scores
print("%0.2f accuracy" %(scores.mean())) | Titanic - Machine Learning from Disaster |
14,142,829 | dataset_path = Path('/kaggle/input/covid19-global-forecasting-week-4')
train = pd.read_csv(dataset_path/'train.csv')
test = pd.read_csv(dataset_path/'test.csv')
submission = pd.read_csv(dataset_path/'submission.csv' )<categorify> | scores = cross_val_score(LRModel, X, y, scoring='accuracy', cv=cv, n_jobs=-1)
scores
print("%0.2f accuracy" %(scores.mean())) | Titanic - Machine Learning from Disaster |
14,142,829 | def fill_state(state,country):
if pd.isna(state): return country
return state<feature_engineering> | testing = mytestset[features]
testPredicitons = model.predict(testing ) | Titanic - Machine Learning from Disaster |
14,142,829 | <feature_engineering><EOS> | testPredicitons = {'PassengerId':mytestset["PassengerId"], "Survived": testPredicitons}
pd.DataFrame(testPredicitons ).to_csv("predictions.csv", index = False ) | Titanic - Machine Learning from Disaster |
13,882,987 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<categorify> | import numpy as np
import pandas as pd
from xgboost import XGBClassifier
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier,BaggingClassifier,AdaBoostClassifier,ExtraTreesClassifier,GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression,SGDClassifier,RidgeClassifier,PassiveAggressiveClassifier,Perceptron
from sklearn.neural_network import MLPClassifier
import re
import optuna
from optuna.samplers import TPESampler
from lightgbm import LGBMClassifier
from mlens.ensemble import SuperLearner
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn import feature_selection
from sklearn import model_selection
from sklearn import metrics
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.utils import shuffle | Titanic - Machine Learning from Disaster |
13,882,987 | submission=pd.DataFrame(columns=submission.columns)
l1=LabelEncoder()
l2=LabelEncoder()
l1.fit(train['Country_Region'])
l2.fit(train['Province_State'] )<categorify> | train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
PassengerId = test['PassengerId'] | Titanic - Machine Learning from Disaster |
13,882,987 | countries=train['Country_Region'].unique()
for country in countries:
country_df=train[train['Country_Region']==country]
provinces=country_df['Province_State'].unique()
for province in provinces:
train_df=country_df[country_df['Province_State']==province]
train_df.pop('Id')
x=train_df[['Province_State','Country_Region','Day_of_Week','Month','Day','Day_of_Year','Week_of_Year','Quarter']]
x['Country_Region']=l1.transform(x['Country_Region'])
x['Province_State']=l2.transform(x['Province_State'])
y1=train_df[['ConfirmedCases']]
y2=train_df[['Fatalities']]
model_1=DecisionTreeClassifier()
model_2=DecisionTreeClassifier()
model_1.fit(x,y1)
model_2.fit(x,y2)
test_df=test.query('Province_State==@province & Country_Region==@country')
test_id=test_df['ForecastId'].values.tolist()
test_df.pop('ForecastId')
test_x=test_df[['Province_State','Country_Region','Day_of_Week','Month','Day','Day_of_Year','Week_of_Year','Quarter']]
test_x['Country_Region']=l1.transform(test_x['Country_Region'])
test_x['Province_State']=l2.transform(test_x['Province_State'])
test_y1=model_1.predict(test_x)
test_y2=model_2.predict(test_x)
test_res=pd.DataFrame(columns=submission.columns)
test_res['ForecastId']=test_id
test_res['ConfirmedCases']=test_y1
test_res['Fatalities']=test_y2
submission=submission.append(test_res )<save_to_csv> | train.isna().sum() | Titanic - Machine Learning from Disaster |
13,882,987 | submission
submission.to_csv('submission.csv',index=False )<define_variables> | test.isna().sum() | Titanic - Machine Learning from Disaster |
13,882,987 | datapath = '.. /input/covid19-global-forecasting-week-4/'
datapath2 = '.. /input/worldpopulationinfo/'
datapath3 = '.. /input/country-ppp/'
datapath4 = '.. /input/populationandcountryinfo/'
datapath5 = '.. /input/usstateland/'
datapath_week1 = '.. /input/covid19week1/'
add_other = True
CURVE_SMOOTHING = True
USE_NEW = False
normfactor = 1.0
NUM_SHIFT = 25
days_shift =[0,1,2,3,5,7,10,14,18,22,27,32,37,42]
days_shift =[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,16,18,20,22,24,26,28,30,32,34,36]
NUM_MODELS = 10
TRAIN_START_DAY = 1
TARGETS = ["ConfirmedCases", "Fatalities"]
if USE_NEW:
TARGETS = ["NewCases", "NewFatalities"]
loc_group = ["Province_State", "Country_Region"]
if add_other:
base_features = ['ages 0-14', 'ages 15-64', 'ages 64-',
'Population(2020)', 'Density(P/Km²)', 'Med.Age', 'Urban Pop %',
'Apr', 'year', 'Low Temp', 'ppp', 'Season',
'R_Africa','R_China',
'R_Australia and New Zealand', 'R_Eastern Asia', 'R_Europe',
'R_Latin America and the Caribbean', 'R_Northern America', 'R_Oceania',
'R_Southern Asia', 'R_Western Asia',
'I_High income', 'I_Low income', 'I_Lower middle income',
'I_Upper middle income']
base_features = ['ages 64-',
'Urban Pop %',
'Season','Apr','ppp']
if USE_NEW:
base_features =['NewFatalities_ppm','NewCases_ppm'] + base_features
else:
a=1
else:
base_features=[]
def get_shift_features() :
shift_features = []
for s in range(1, NUM_SHIFT+1):
for col in TARGETS:
shift_features.append("prev_{}_{}".format(col, days_shift[s]))
return shift_features
shift_features = get_shift_features()
prev_targets = shift_features[0:2]
def fill_shift_columns(df,targets):
for s in range(1, NUM_SHIFT+1):
for col in targets:
df["prev_{}_{}".format(col, days_shift[s])] = df.groupby(loc_group)[col].shift(days_shift[s])
return df
def add_seasons(coor_df):
coor_df["Season"] = 0
mask = coor_df["Lat"]>20
coor_df.loc[mask,"Season"] = 1
mask = coor_df["Lat"]<-20
coor_df.loc[mask,"Season"] = -1
return coor_df
def preprocess(df):
df["Date"] = df["Date"].astype("datetime64[ms]")
df["days"] =(df["Date"] - pd.to_datetime("2020-01-01")).dt.days
for col in loc_group:
df[col].fillna("none", inplace=True)
return df
def print_rows_with_nan(df,features=False):
if not features:
dyt = df.isna()
dyt2 = dyt.any(axis=1)
dyt2.sum()
print(df.loc[dyt2])
else:
dyt = df[features].isna()
dyt2 = dyt.any(axis=1)
dyt2.sum()
print(df[features].loc[dyt2])
return
def diff(first, second):
second = set(second)
return [item for item in first if item not in second]
def has_nan(df,features=False):
if not features:
dyt = df.isnull()
else:
dyt = df[features].isnull()
dyt2 = dyt.any(axis=1)
return dyt2.sum()
def print_washinton(df,features,values):
print(len(values))
if len(values)== 1:
mask1 = df[features[0]]==values[0]
print(df.loc[mask1,features+["Date","ConfirmedCases","Fatalities"]].tail(50))
elif len(values)== 2:
mask1 = df[features[0]]==values[0]
mask2 = df[features[1]]==values[1]
mask4 = pd.concat(( mask1,mask2), axis=1)
mask5 = mask4.all(axis=1)
print(df.loc[mask5,features+["Date","ConfirmedCases","Fatalities"]].tail(50))
else:
mask1 = df[features[0]]==values[0]
mask2 = df[features[1]]==values[1]
mask3 = df[features[2]]==values[2]
mask4 = pd.concat(( mask1,mask2,mask3), axis=1)
mask5 = mask4.all(axis=1)
print(df.loc[mask5,["Date"]])
return
def find_anomalities(df):
df[['Cases_test']] = df.groupby([ 'Country_Region', 'Province_State'])\
[['ConfirmedCases']].shift(1)
df[['Fatality_test']] = df.groupby(['Country_Region', 'Province_State'])\
[['Fatalities']].shift(1)
mask1 = df['ConfirmedCases'] < df['Cases_test']
mask2 = df['Fatalities'] < df['Fatality_test']
print("ANOMALITIES ConfirmedCases")
print(df.loc[mask1])
print("ANOMALITIES Fatalities")
print(df.loc[mask2])
print("SLUT")
return
def correct_anomalities(df):
df[['Cases_testm']] = df.groupby([ 'Country_Region', 'Province_State'])\
[['ConfirmedCases']].shift(1)
df[['Fatalities_testm']] = df.groupby(['Country_Region', 'Province_State'])\
[['Fatalities']].shift(1)
df[['Cases_testp']] = df.groupby([ 'Country_Region', 'Province_State'])\
[['ConfirmedCases']].shift(-1)
df[['Fatalities_testp']] = df.groupby(['Country_Region', 'Province_State'])\
[['Fatalities']].shift(-1)
print("CORRECTING ConfirmedCases")
mask1 = df['ConfirmedCases'] < df['Cases_testm']
mask2 = df['Cases_testp'] >= df['Cases_testm']
mask3 = pd.concat(( mask1,mask2),axis=1)
mask4 = mask3.all(axis=1)
df.loc[mask4,'ConfirmedCases'] = 0.5*(df.loc[mask4,'Cases_testp']+df.loc[mask4,'Cases_testm'])
print(df.loc[mask4,['Country_Region', 'Province_State','Cases_testm','ConfirmedCases','Cases_testp']])
mask1 = df['ConfirmedCases'] > df['Cases_testm']
mask2 = df['Cases_testp'] == df['Cases_testm']
mask3 = pd.concat(( mask1,mask2),axis=1)
mask4a = mask3.all(axis=1)
df.loc[mask4a,'ConfirmedCases'] = 0.5*(df.loc[mask4a,'Cases_testp']+df.loc[mask4a,'Cases_testm'])
print(df.loc[mask4a,['Country_Region', 'Province_State','Cases_testm','ConfirmedCases','Cases_testp']])
print("CORRECTED ConfirmedCases:", mask4.sum() ,"+", mask4a.sum())
print("CORRECTING Fatalities")
mask1 = df['Fatalities'] < df['Fatalities_testm']
mask2 = df['Fatalities_testp'] >= df['Fatalities_testm']
mask3 = pd.concat(( mask1,mask2),axis=1)
mask4 = mask3.all(axis=1)
df.loc[mask4,'Fatalities'] = 0.5*(df.loc[mask4,'Fatalities_testp']+df.loc[mask4,'Fatalities_testm'])
print(df.loc[mask4,['Country_Region', 'Province_State','Fatalities_testm','Fatalities','Fatalities_testp']])
mask1 = df['Fatalities'] > df['Fatalities_testm']
mask2 = df['Fatalities_testp'] == df['Fatalities_testm']
mask3 = pd.concat(( mask1,mask2),axis=1)
mask4a = mask3.all(axis=1)
df.loc[mask4a,'Fatalities'] = 0.5*(df.loc[mask4a,'Fatalities_testp']+df.loc[mask4a,'Fatalities_testm'])
print(df.loc[mask4a,['Country_Region', 'Province_State','Fatalities_testm','Fatalities','Fatalities_testp']])
print("CORRECTED Fatalities:",mask4.sum() ,"+",mask4a.sum())
print("CORRECTING Series(in Mix)")
cases = 1
sumcases = 0
while cases > 0:
mask1 = df['ConfirmedCases'] < df['Cases_testm']
df.loc[mask1,'ConfirmedCases'] = df.loc[mask1,'Cases_testm']
print("Cases",df.loc[mask1,['Country_Region', 'Province_State','Cases_testm','ConfirmedCases']])
mask2 = df['Fatalities'] < df['Fatalities_testm']
df.loc[mask2,'Fatalities'] = df.loc[mask2,'Fatalities_testm']
print("Fatas",df.loc[mask2,['Country_Region', 'Province_State','Fatalities_testm','Fatalities']])
cases = mask1.sum() +mask2.sum()
sumcases+=cases
print
df[['Cases_testm']] = df.groupby([ 'Country_Region', 'Province_State'])\
[['ConfirmedCases']].shift(1)
df[['Fatalities_testm']] = df.groupby(['Country_Region', 'Province_State'])\
[['Fatalities']].shift(1)
df.drop([ 'Cases_testm', 'Fatalities_testm', 'Cases_testp','Fatalities_testp'],axis=1,inplace=True)
print("Corrected in Series in all",sumcases)
return<load_from_csv> | df = pd.concat([train, test])
df = df.reset_index(drop=True)
df['FamilySize'] = df['SibSp'] + df['Parch'] + 1
df['IsAlone'] = 0
df.loc[df['FamilySize'] == 1, 'IsAlone'] = 1
df['Fare'] = df['Fare'].fillna(df['Fare'].median())
df['Has_Cabin'] = df["Cabin"].apply(lambda x: 0 if type(x)== float else 1)
df['Title'] = df['Name'].apply(get_title)
mapping = {'Mlle': 'Miss', 'Major': 'Mr', 'Col': 'Mr', 'Sir': 'Mr', 'Don': 'Mr', 'Mme': 'Miss',
'Jonkheer': 'Mr', 'Lady': 'Mrs', 'Capt': 'Mr', 'Countess': 'Mrs', 'Ms': 'Miss', 'Dona': 'Mrs'}
df['Title'] = df['Title'].replace(['Lady', 'Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
df['Title'] = df['Title'].replace('Mlle', 'Miss')
df['Title'] = df['Title'].replace('Ms', 'Miss')
df['Title'] = df['Title'].replace('Mme', 'Mrs')
df['Sex'] = df['Sex'].map({'female': 0, 'male': 1} ).astype(int)
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
df['Title'] = df['Title'].map(title_mapping)
df.loc[ df['Fare'] <= 7.91, 'Fare'] = 0
df.loc[(df['Fare'] > 7.91)&(df['Fare'] <= 14.454), 'Fare'] = 1
df.loc[(df['Fare'] > 14.454)&(df['Fare'] <= 31), 'Fare'] = 2
df.loc[ df['Fare'] > 31, 'Fare'] = 3
df['Fare'] = df['Fare'].astype(int ) | Titanic - Machine Learning from Disaster |
13,882,987 | population_by_age_df = pd.read_csv(datapath4 + "population_age_info.csv")
population_by_age_df.drop('ID',axis=1,inplace=True)
population_by_age_df[['ages 0-14', 'ages 15-64','Density(P/Km²)','Med.Age', 'ages 64-','Urban Pop %']] = \
population_by_age_df[['ages 0-14', 'ages 15-64','Density(P/Km²)','Med.Age', 'ages 64-','Urban Pop %']].apply(lambda x: x.fillna(x.mean()))
translations = [["Brunei Darussalam","Brunei"],
["Myanmar","Bruma"],
["Gambia, The","Gambia"],
["Egypt, Arab Rep.","Egypt"],
["Congo, Rep.","Congo(Brazzaville)"],
["Congo, Dem.Rep.","Congo(Kinshasa)"],
["Iran, Islamic Rep.","Iran"],
["Korea, Rep.","Korea, South"],
["Russian Federation","Russia"],
["Syrian Arab Republic","Syria"],
["Venezuela, RB","Venezuela"],
["Slovak Republic","Slovakia"]]
for post in translations:
mask = population_by_age_df["Country_Region"] == post[0]
population_by_age_df.loc[mask,"Country_Region"] = post[1]
usstates_land = pd.read_csv(datapath5 + "us-state-land.csv", sep='\t')
usstates_population = pd.read_csv(datapath2 + "us-state-population.csv", sep='\t')
usstates_info = pd.merge(usstates_land,usstates_population, on=['Province_State'], how='left')
usstates_info['Country_Region'] = 'US'
usstates_info["Province_State"].replace('_',' ', regex=True,inplace=True)
supp_info = pd.read_csv(datapath2 + "population_info_supplement.csv")
supp_info["Country_Region"].replace(':',',', regex=True,inplace=True)
CountryRegion = pd.read_csv(datapath2+'CountryRegion.csv')
CountryRegion.drop('ID',axis=1,inplace=True)
state_temperatures = pd.read_csv(datapath4+'state_temperature_info.csv')
state_temperatures.drop('ID',axis=1,inplace=True)
state_temperatures["Province_State"].fillna("", inplace=True)
ppp_tabel = pd.read_csv(datapath3 + 'Country_PPP.csv', sep='\s+')
ppp_tabel.drop('Id', 1,inplace=True)
ppp_tabel = ppp_tabel.append({'Country' : 'Burma' , 'ppp' : 8000} , ignore_index=True)
ppp_tabel = ppp_tabel.append({'Country' : 'MS_Zaandam' , 'ppp' : 40000} , ignore_index=True)
ppp_tabel = ppp_tabel.append({'Country' : 'West_Bank_and_Gaza' , 'ppp' : 20000} , ignore_index=True)
ppp_tabel["Country"].replace('_',' ', regex=True,inplace=True)
ppp_tabel["Country"].replace('United States','US', regex=True,inplace=True)
ppp_tabel.rename(columns={'Country':'Country_Region'},inplace=True)
ppp_tabel.sort_values('Country_Region',inplace=True)
coor_df = pd.read_csv(datapath_week1 + "train.csv" ).rename(columns={"Country/Region": "Country_Region","Province/State":"Province_State"})
coor_df["Province_State"].fillna("", inplace=True)
coor_df = coor_df[coor_df["Country_Region"].notnull() ]
coor_df = coor_df.groupby(["Country_Region","Province_State"])[["Lat", "Long"]].mean().reset_index()
coor_df = add_seasons(coor_df)
coor_country = coor_df.groupby(["Country_Region"])[["Lat", "Long"]].mean().reset_index()
coor_country = add_seasons(coor_country)
coor_country.rename(columns={'Season':'Season2'},inplace=True)
coor_df = pd.merge(coor_df,coor_country, on=['Country_Region'], how='left')
<merge> | df['Title2'] = df['Name'].apply(get_title)
mapping = {'Mlle': 'Miss', 'Major': 'Mr', 'Col': 'Mr', 'Sir': 'Mr', 'Don': 'Mr', 'Mme': 'Miss',
'Jonkheer': 'Mr', 'Lady': 'Mrs', 'Capt': 'Mr', 'Countess': 'Mrs', 'Ms': 'Miss', 'Dona': 'Mrs'}
df.replace({'Title2': mapping}, inplace=True)
titles = ['Dr', 'Master', 'Miss', 'Mr', 'Mrs', 'Rev']
for title in titles:
age_to_impute = df.groupby('Title2')['Age'].median() [titles.index(title)]
df.loc[(df['Age'].isnull())&(df['Title2'] == title), 'Age'] = age_to_impute
df.drop('Title2', axis = 1, inplace = True ) | Titanic - Machine Learning from Disaster |
13,882,987 | def add_other_info(df,istest_df,usstates_info,supp_info,population_by_age_df,CountryRegion,state_temperatures,ppp_tabel,coor_df):
df = pd.merge(df, usstates_info, on=['Country_Region','Province_State'], how='left')
df = pd.merge(df, population_by_age_df, on=['Country_Region'], how='left')
df[['ages 0-14', 'ages 15-64','Density(P/Km²)','Med.Age', 'ages 64-','Urban Pop %']] = \
df[['ages 0-14', 'ages 15-64','Density(P/Km²)','Med.Age', 'ages 64-','Urban Pop %']].apply(lambda x: x.fillna(x.mean()))
print(df[df['IncomeGroup'].isnull() ]["Country_Region"].unique())
df = pd.merge(df, CountryRegion, on=['Country_Region'], how='left')
mask = df['Country_Region']=='US'
df.loc[mask,'Population(2020)'] = df.loc[mask,'population']
df.loc[mask,'Density(P/Km²)'] = df.loc[mask,'population']/df.loc[mask,'land']
df.drop(["population","land"],axis=1,inplace=True)
df = pd.merge(df, supp_info, on=['Country_Region'], how='left')
mask = df['Population(2020)'].isnull()
df.loc[mask,'Population(2020)'] = df.loc[mask,'population']
df.loc[mask,'Density(P/Km²)'] = df.loc[mask,'population']/df.loc[mask,'land']
df.drop(["population","land"],axis=1,inplace=True)
df = pd.merge(df, state_temperatures, on=['Country_Region','Province_State'], how='left')
df[['Jan', 'Feb', 'Mar', 'Apr', 'May',
'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec', 'year', 'Low Temp']] = \
df[['Jan', 'Feb', 'Mar', 'Apr', 'May',
'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec', 'year', 'Low Temp']].apply(lambda x: x.fillna(x.mean()))
df.drop(['May','Jun', 'Jul', 'Aug', 'Sep', 'Oct','Nov', 'Dec'],axis=1,inplace=True)
df.drop(['Jan', 'Feb', 'Mar'],axis=1,inplace=True)
df = pd.merge(df,ppp_tabel, on=['Country_Region'], how='left')
df = pd.merge(df,coor_df[["Country_Region",'Province_State',"Season","Season2"]],
on=["Country_Region",'Province_State'], how="left")
mask = df['Season'].isna()
df.loc[mask,["Season"]]= df.loc[mask,"Season2"]
df["Season"].fillna(1, inplace=True)
mask = df['Country_Region']== 'China'
df.loc[mask,["sub-region"]]= 'China'
onehot = pd.get_dummies(df['sub-region'],prefix='R')
onehot2 = pd.get_dummies(df['IncomeGroup'],prefix='I')
df.drop(["Season2","sub-region",'IncomeGroup'],axis=1,inplace= True)
df = pd.concat([df,onehot],axis=1,ignore_index=False)
df = pd.concat([df,onehot2],axis=1,ignore_index=False)
if istest_df:
if USE_NEW:
df["NewCases_ppm"] = np.log1p(1000000/df["Population(2020)"])
df["NewFatalities_ppm"] = np.log1p(1000000/df["Population(2020)"])
else:
df["ConfirmedCases_ppm"] = np.log1p(1000000/df["Population(2020)"])
df["Fatalities_ppm"] = np.log1p(1000000/df["Population(2020)"])
else:
if USE_NEW:
df["NewCases_ppm"] = np.log1p(df["NewCases"]*1000000/df["Population(2020)"])
df["NewFatalities_ppm"] = np.log1p(df["NewFatalities"]*1000000/df["Population(2020)"])
else:
df["ConfirmedCases_ppm"] = np.log1p(df["ConfirmedCases"]*1000000/df["Population(2020)"])
df["Fatalities_ppm"] = np.log1p(df["Fatalities"]*1000000/df["Population(2020)"])
df['ages 0-14'] = df['ages 0-14']/100.
df['ages 15-64'] = df['ages 15-64']/100.
df['ages 64-'] = df['ages 64-']/100.
df['Population(2020)'] = np.log1p(df['Population(2020)'])
df['Density(P/Km²)'] = np.log1p(df['Density(P/Km²)'])
df['Med.Age'] = df['Med.Age']/50.
df['Urban Pop %'] = df['Urban Pop %']/100.
df['Apr'] = df['Apr']/30.
df['year'] = df['year']/30.
df['Low Temp'] = df['Low Temp']/30.
df['ppp'] = np.log1p(df['ppp'])
return df
<load_from_csv> | df['Last_Name'] = df['Name'].apply(lambda x: str.split(x, ",")[0])
DEFAULT_SURVIVAL_VALUE = 0.5
df['Family_Survival'] = DEFAULT_SURVIVAL_VALUE
for grp, grp_df in df[['Survived','Name', 'Last_Name', 'Fare', 'Ticket', 'PassengerId',
'SibSp', 'Parch', 'Age', 'Cabin']].groupby(['Last_Name', 'Fare']):
if(len(grp_df)!= 1):
for ind, row in grp_df.iterrows() :
smax = grp_df.drop(ind)['Survived'].max()
smin = grp_df.drop(ind)['Survived'].min()
passID = row['PassengerId']
if(smax == 1.0):
df.loc[df['PassengerId'] == passID, 'Family_Survival'] = 1
elif(smin==0.0):
df.loc[df['PassengerId'] == passID, 'Family_Survival'] = 0
print("Number of passengers with family survival information:",
df.loc[df['Family_Survival']!=0.5].shape[0] ) | Titanic - Machine Learning from Disaster |
13,882,987 | df = pd.read_csv(datapath + "train.csv")
sub_df = pd.read_csv(datapath + "test.csv")
df['Province_State'].fillna('', inplace=True)
sub_df['Province_State'].fillna('', inplace=True)
gem_targets = df[['Country_Region','Province_State','Date']+TARGETS]
gem_targets["Date"] = gem_targets["Date"].astype("datetime64[ms]")
combi= df.groupby(["Country_Region","Province_State"] ).size().reset_index()
combi =combi[["Country_Region","Province_State"]].values.tolist()
mask1 = df["Country_Region"]=='Guyana'
mask2 = df["Date"] > '2020-03-21'
mask3 = df["Date"] < '2020-03-28'
mask4 = pd.concat(( mask1,mask2,mask3),axis=1)
mask5 = mask4.all(axis=1)
df.loc[mask5,"ConfirmedCases"] = 7.0
mask1 = df["Province_State"]=='Northern Territory'
mask2 = df["Date"] > '2020-03-05'
mask3 = df["Date"] < '2020-03-10'
mask4 = pd.concat(( mask1,mask2,mask3),axis=1)
mask5 = mask4.all(axis=1)
df.loc[mask5,"ConfirmedCases"] = 1.0
mask1 = df["Country_Region"]=='Philippines'
mask2 = df["Date"] > '2020-03-17'
mask3 = df["Date"] < '2020-03-19'
mask4 = pd.concat(( mask1,mask2,mask3),axis=1)
mask5 = mask4.all(axis=1)
df.loc[mask5,"Fatalities"] = 14.0
correct_anomalities(df)
if USE_NEW:
df[['NewCases','NewFatalities']] = df.groupby(['Country_Region', 'Province_State'])\
[['ConfirmedCases','Fatalities']].transform(lambda x: x.diff())
mask = df['NewCases'].isnull()
df.loc[mask,['NewCases']] = 0
df.loc[mask,['NewFatalities']] = 0
provins = 'Virgin Islands'
print("testprint for",provins)
print(df[df['Province_State']== provins][['NewCases','NewFatalities']])
def row_count(filename):
with open(filename)as in_file:
return sum(1 for _ in in_file)
df = preprocess(df)
sub_df = preprocess(sub_df)
GIVEN_FIRST_DATE = df["Date"].min()
GIVEN_LAST_DATE = df["Date"].max()
SUBMISSION_FIRST_DATE = sub_df["Date"].min()
ESTIMATE_FIRST_DATE = df["Date"].max() + timedelta(days=1)
ESTIMATE_LAST_DATE = sub_df["Date"].max()
ESTIMATE_DAYS =(ESTIMATE_LAST_DATE - ESTIMATE_FIRST_DATE ).days +1
if add_other:
df = add_other_info(df,False,usstates_info,supp_info,population_by_age_df,CountryRegion,state_temperatures,ppp_tabel,coor_df)
sub_df = add_other_info(sub_df,True,usstates_info,supp_info,population_by_age_df,CountryRegion,state_temperatures,ppp_tabel,coor_df)
print("SUB_DF COLUMNS NYLAVET:")
print(sub_df.columns)
<categorify> | for _, grp_df in df.groupby('Ticket'):
if(len(grp_df)!= 1):
for ind, row in grp_df.iterrows() :
if(row['Family_Survival'] == 0)|(row['Family_Survival']== 0.5):
smax = grp_df.drop(ind)['Survived'].max()
smin = grp_df.drop(ind)['Survived'].min()
passID = row['PassengerId']
if(smax == 1.0):
df.loc[df['PassengerId'] == passID, 'Family_Survival'] = 1
elif(smin==0.0):
df.loc[df['PassengerId'] == passID, 'Family_Survival'] = 0
print("Number of passenger with family/group survival information: "
+str(df[df['Family_Survival']!=0.5].shape[0])) | Titanic - Machine Learning from Disaster |
13,882,987 | if CURVE_SMOOTHING:
df['Cases_m'] = df.groupby(['Country_Region', 'Province_State'])[['ConfirmedCases']].transform(lambda x: x.shift(1))
df['Cases_p'] = df.groupby(['Country_Region', 'Province_State'])[['ConfirmedCases']].transform(lambda x: x.shift(-1))
df['Cases_ave'] = 0.5*(df['ConfirmedCases']+0.5*(df['Cases_p']+df['Cases_m']))
case_cols = ['ConfirmedCases','Cases_m','Cases_p','Cases_ave']
df['Fatalities_m'] = df.groupby(['Country_Region', 'Province_State'])[['Fatalities']].transform(lambda x: x.shift(1))
df['Fatalities_p'] = df.groupby(['Country_Region', 'Province_State'])[['Fatalities']].transform(lambda x: x.shift(-1))
df['Fatalities_ave'] = 0.5*(df['Fatalities']+0.5*(df['Fatalities_p']+df['Fatalities_m']))
fata_cols = ['Fatalities','Fatalities_m','Fatalities_p','Fatalities_ave']
if USE_NEW:
df[['NewCases','NewFatalities']] = df.groupby(['Country_Region', 'Province_State'])\
[['ConfirmedCases','Fatalities']].transform(lambda x: x.diff())
df['NewCases_m'] = df.groupby(['Country_Region', 'Province_State'])[['NewCases']].transform(lambda x: x.shift(1))
df['NewCases_p'] = df.groupby(['Country_Region', 'Province_State'])[['NewCases']].transform(lambda x: x.shift(-1))
df['NewCases_m2'] = df.groupby(['Country_Region', 'Province_State'])[['NewCases']].transform(lambda x: x.shift(2))
df['NewCases_p2'] = df.groupby(['Country_Region', 'Province_State'])[['NewCases']].transform(lambda x: x.shift(-2))
df['NewCases_ave'] = 0.2*(df['NewCases']+df['NewCases_p']+df['NewCases_m']+df['NewCases_p2']+df['NewCases_m2'])
df['NewFatalities_m'] = df.groupby(['Country_Region', 'Province_State'])[['NewFatalities']].transform(lambda x: x.shift(1))
df['NewFatalities_p'] = df.groupby(['Country_Region', 'Province_State'])[['NewFatalities']].transform(lambda x: x.shift(-1))
df['NewFatalities_m2'] = df.groupby(['Country_Region', 'Province_State'])[['NewFatalities']].transform(lambda x: x.shift(2))
df['NewFatalities_p2'] = df.groupby(['Country_Region', 'Province_State'])[['NewFatalities']].transform(lambda x: x.shift(-2))
df['NewFatalities_ave'] =0.2*(df['NewFatalities']+df['NewFatalities_p']+df['NewFatalities_m']+df['NewFatalities_p2']+df['NewFatalities_m2'])
date_max = df["Date"].max()
mask = df["Date"] ==date_max
mask2 = df["Date"]==date_max - timedelta(days=1)
df.loc[mask,'Cases_ave'] = 0.75*df.loc[mask,'ConfirmedCases']+0.25*df.loc[mask,'Cases_m']
df.loc[mask,'Fatalities_ave'] = 0.75*df.loc[mask,'Fatalities'] +0.25*df.loc[mask,'Fatalities_m']
df.loc[mask,'Cases_ave'] = 0.75*df.loc[mask,'ConfirmedCases']+0.25*df.loc[mask,'Cases_m']
df.loc[mask,'Fatalities_ave'] = 0.75*df.loc[mask,'Fatalities'] +0.25*df.loc[mask,'Fatalities_m']
df.drop(['Cases_m', 'Cases_p', 'Fatalities_m','Fatalities_p','ConfirmedCases','Fatalities'],axis=1,inplace=True)
df.rename(columns={'Cases_ave':'ConfirmedCases','Fatalities_ave':'Fatalities'},inplace=True)
if USE_NEW:
df.loc[mask2,'NewCases_ave'] = 0.3*df.loc[mask2,'NewCases_p']+0.3*df.loc[mask2,'NewCases']+\
0.3*df.loc[mask2,'NewCases_m']+0.1*df.loc[mask2,'NewCases_m2']
df.loc[mask2,'NewFatalities_ave'] = 0.3*df.loc[mask2,'NewFatalities_p']+0.3*df.loc[mask2,'NewFatalities']+\
0.3*df.loc[mask2,'NewFatalities_m']+0.1*df.loc[mask2,'NewFatalities_m2']
df.loc[mask,'NewCases_ave'] = 0.55*df.loc[mask,'NewCases'] +0.3*df.loc[mask,'NewCases_m']+\
0.2*df.loc[mask,'NewCases_m2']
df.loc[mask,'NewFatalities_ave'] = 0.5*df.loc[mask,'NewFatalities'] +0.3*df.loc[mask,'NewFatalities_m']+\
0.2*df.loc[mask,'NewFatalities_m2']
df.drop(['NewCases_m', 'NewCases_p', 'NewFatalities_m','NewFatalities_p','NewCases','NewFatalities'],axis=1,inplace=True)
df.drop(['NewCases_m2', 'NewCases_p2', 'NewFatalities_m2','NewFatalities_p2'],axis=1,inplace=True)
df.rename(columns={'NewCases_ave':'NewCases','NewFatalities_ave':'NewFatalities'},inplace=True)
if add_other:
df["NewCases_ppm"] = np.log1p(df["NewCases"]*1000000/df["Population(2020)"])
df["NewFatalities_ppm"] = np.log1p(df["NewFatalities"]*1000000/df["Population(2020)"])
df.fillna(0, inplace=True)
sub_df.fillna(0,inplace =True)
DEFAULT_VALUE = 0
if df is None:
df = DEFAULT_VALUE
if sub_df is None:
sub_df = DEFAULT_VALUE
if has_nan(df,base_features):
print("DYT-DYYYYYT: der er NAN")
print_rows_with_nan(df)
else:
print("HURRA, HURRA")
if has_nan(df)>0:
print("DOOOOOOOOOT: der er NAN")
print_rows_with_nan(df)
print("DOT DONE")
else:
print("HIP HIP")
<feature_engineering> | df.loc[ df['Age'] <= 16, 'Age'] = 0
df.loc[(df['Age'] > 16)&(df['Age'] <= 32), 'Age'] = 1
df.loc[(df['Age'] > 32)&(df['Age'] <= 48), 'Age'] = 2
df.loc[(df['Age'] > 48)&(df['Age'] <= 64), 'Age'] = 3
df.loc[ df['Age'] > 64, 'Age'] = 4 ;
df = df.drop(['PassengerId','Cabin','Name','SibSp','Parch','Embarked','Ticket','Last_Name'], axis=1 ) | Titanic - Machine Learning from Disaster |
13,882,987 | df = df[df["Date"] >= df["Date"].min() + timedelta(days=days_shift[NUM_SHIFT])].copy()
for col in TARGETS:
df[col] = np.log1p(df[col])/normfactor
df = df[df['days']>TRAIN_START_DAY]
<choose_model_class> | df.isna().sum() | Titanic - Machine Learning from Disaster |
13,882,987 | def nn_block(input_layer, size, dropout_rate, activation):
out_layer = KL.Dense(size, activation=None )(input_layer)
out_layer = KL.Activation(activation )(out_layer)
out_layer = KL.Dropout(dropout_rate )(out_layer)
return out_layer
def get_model(feature_length,target_length,):
inp = KL.Input(shape=(feature_length,))
hidden_layer = nn_block(inp, 64, 0.0, "relu")
gate_layer = nn_block(hidden_layer, 32, 0.0, "sigmoid")
hidden_layer = nn_block(hidden_layer, 64, 0.0, "relu")
hidden_layer = nn_block(hidden_layer, 32, 0.0, "relu")
hidden_layer = KL.multiply([hidden_layer, gate_layer])
out = KL.Dense(target_length, activation="linear" )(hidden_layer)
model = tf.keras.models.Model(inputs=[inp], outputs=out)
return model
def get_input(df,features):
return [df[features]]
def train_models(df,features,targets,save=False):
print()
print("TRAINING.Der regnes på denne model:")
get_model(len(features),len(targets)).summary()
models = []
for i in range(NUM_MODELS):
print("PHS1")
model = get_model(len(features),len(targets))
print("PHS2")
model.compile(loss="mean_squared_error", optimizer=Nadam(lr=1e-4))
print("PHS3 - Targets:",targets)
hist = model.fit(get_input(df,features), df[targets],
batch_size=2048, epochs=500, verbose=0, shuffle=True)
print("PHS4")
if save:
print("PHS5")
model.save_weights("model{}.h5".format(i))
print("PHS6")
models.append(model)
print("PHS7")
return models
<compute_train_metric> | train = df[df['Survived'].notnull() ]
test = df[df['Survived'].isnull() ]
test = test.drop(['Survived'], axis=1 ) | Titanic - Machine Learning from Disaster |
13,882,987 | def rmse(y_true, y_pred):
return np.sqrt(mean_squared_error(y_true, y_pred))
def evaluate(df,targets):
error = 0
for col in targets:
error += rmse(df[col].values, df["pred_{}".format(col)].values)
return np.round(error/len(targets), 5)
def predict_one(df,features,prev_targets, models):
pred = np.zeros(( df.shape[0], 2))
for model in models:
pred += model.predict(get_input(df,features)) /len(models)
if np.isnan(np.sum(pred)) :
print("DYYYYYYYYYYYYT: der er NAN")
print(pred)
pred = np.maximum(pred, df[prev_targets].values)
pred[:, 0] = np.log1p(np.expm1(normfactor*pred[:, 0])+ 0.1)/normfactor
pred[:, 1] = np.log1p(np.expm1(normfactor*pred[:, 1])+ 0.01)/normfactor
if np.isnan(np.sum(pred)) :
print("DYT DYT DYT: der er NAN")
return np.clip(pred, None, 15)
def predict(test_df,features,targets,prev_targets,first_day, num_days, models, val=False):
for d in range(0, num_days):
print("DAY NO.",d)
test_df = fill_shift_columns(test_df,targets)
date = first_day + timedelta(days=d)
temp_df = test_df.loc[test_df["Date"] == date].copy()
y_pred = predict_one(temp_df,features,prev_targets, models)
for i, col in enumerate(targets):
test_df.loc[test_df["Date"] == date, col] = y_pred[:, i]
if add_other:
if USE_NEW:
test_df.loc[test_df["Date"] == date,"NewCases_ppm"] = \
np.log1p(np.expm1(y_pred[:, 0])*1000000/ \
np.expm1(test_df.loc[test_df["Date"] == date,"Population(2020)"]))
test_df.loc[test_df["Date"] == date,"NewFatalities_ppm"] = \
np.log1p(np.expm1(y_pred[:, 1])*1000000/ \
np.expm1(test_df.loc[test_df["Date"] == date,"Population(2020)"]))
else:
test_df.loc[test_df["Date"] == date,"ConfirmedCases_ppm"] = \
np.log1p(np.expm1(y_pred[:, 0])*1000000/ \
np.expm1(test_df.loc[test_df["Date"] == date,"Population(2020)"]))
test_df.loc[test_df["Date"] == date,"Fatalities_ppm"] = \
np.log1p(np.expm1(y_pred[:, 1])*1000000/ \
np.expm1(test_df.loc[test_df["Date"] == date,"Population(2020)"]))
if val:
print(evaluate(test_df[test_df["Date"] == date]))
return test_df
<load_pretrained> | x = train.copy()
y = x.pop('Survived')
x_test = test.copy()
x = x.values
y = y.values
x_test = x_test.values
std_scaler = StandardScaler()
x = std_scaler.fit_transform(x)
xf_test = std_scaler.transform(x_test)
x, x_val, y, y_val = train_test_split(x, y,test_size=0.2, shuffle=False ) | Titanic - Machine Learning from Disaster |
13,882,987 | all_features = base_features + shift_features
df = fill_shift_columns(df,TARGETS)
df[all_features] = df[all_features].fillna(0)
print("Kolonner i modelller",all_features)
print("BEFORE TRAINING")
print(df[(df['Country_Region']=='Germany')&(df['days'] >75)])
final_models = train_models(df,all_features,TARGETS, save=True )<predict_on_test> | class Optimizer:
def __init__(self, metric, trials=30):
self.metric = metric
self.trials = trials
self.sampler = TPESampler()
def objective(self, trial):
model = create_model(trial)
model.fit(x, y)
preds = model.predict(x_val)
if self.metric == 'acc':
return accuracy_score(y_val, preds)
else:
return f1_score(y_val, preds)
def optimize(self):
study = optuna.create_study(direction="maximize", sampler=self.sampler)
study.optimize(self.objective, n_trials=self.trials)
return study.best_params | Titanic - Machine Learning from Disaster |
13,882,987 | full_df_pred= predict(full_df,all_features,TARGETS,prev_targets,ESTIMATE_FIRST_DATE,ESTIMATE_DAYS, final_models)
for col in TARGETS:
full_df_pred[col] = np.expm1(full_df_pred[col])
<merge> | def create_model(trial):
max_depth = trial.suggest_int("max_depth", 2, 6)
n_estimators = trial.suggest_int("n_estimators", 2, 150)
min_samples_leaf = trial.suggest_int("min_samples_leaf", 1, 10)
model = RandomForestClassifier(
min_samples_leaf=min_samples_leaf,
n_estimators=n_estimators,
max_depth=max_depth,
)
return model
optimizer = Optimizer('f1')
rf_f1_params = optimizer.optimize()
optimizer = Optimizer('acc')
rf_acc_params = optimizer.optimize()
def create_model(trial):
max_depth = trial.suggest_int("max_depth", 2, 6)
n_estimators = trial.suggest_int("n_estimators", 1, 150)
learning_rate = trial.suggest_uniform('learning_rate', 0.0000001, 1)
gamma = trial.suggest_uniform('gamma', 0.0000001, 1)
subsample = trial.suggest_uniform('subsample', 0.0001, 1.0)
model = XGBClassifier(
learning_rate=learning_rate,
n_estimators=n_estimators,
max_depth=max_depth,
gamma=gamma,
subsample=subsample,
)
return model
optimizer = Optimizer('f1')
xgb_f1_params = optimizer.optimize()
optimizer = Optimizer('acc')
xgb_acc_params = optimizer.optimize()
def create_model(trial):
max_depth = trial.suggest_int("max_depth", 2, 6)
n_estimators = trial.suggest_int("n_estimators", 1, 150)
learning_rate = trial.suggest_uniform('learning_rate', 0.0000001, 1)
num_leaves = trial.suggest_int("num_leaves", 2, 3000)
min_child_samples = trial.suggest_int('min_child_samples', 3, 200)
model = LGBMClassifier(
learning_rate=learning_rate,
n_estimators=n_estimators,
max_depth=max_depth,
num_leaves=num_leaves,
min_child_samples=min_child_samples,
)
return model
optimizer = Optimizer('f1')
lgb_f1_params = optimizer.optimize()
optimizer = Optimizer('acc')
lgb_acc_params = optimizer.optimize()
def create_model(trial):
max_depth = trial.suggest_int("max_depth", 2, 6)
min_samples_split = trial.suggest_int('min_samples_split', 2, 16)
min_weight_fraction_leaf = trial.suggest_uniform('min_weight_fraction_leaf', 0.0, 0.5)
min_samples_leaf = trial.suggest_int('min_samples_leaf', 1, 10)
model = DecisionTreeClassifier(
min_samples_split=min_samples_split,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
)
return model
optimizer = Optimizer('f1')
dt_f1_params = optimizer.optimize()
optimizer = Optimizer('acc')
dt_acc_params = optimizer.optimize()
def create_model(trial):
n_estimators = trial.suggest_int('n_estimators', 2, 200)
max_samples = trial.suggest_int('max_samples', 1, 100)
model = BaggingClassifier(
n_estimators=n_estimators,
max_samples=max_samples,
)
return model
optimizer = Optimizer('f1')
bc_f1_params = optimizer.optimize()
optimizer = Optimizer('acc')
bc_acc_params = optimizer.optimize()
def create_model(trial):
n_neighbors = trial.suggest_int("n_neighbors", 2, 25)
model = KNeighborsClassifier(n_neighbors=n_neighbors)
return model
optimizer = Optimizer('f1')
knn_f1_params = optimizer.optimize()
optimizer = Optimizer('acc')
knn_acc_params = optimizer.optimize()
def create_model(trial):
n_estimators = trial.suggest_int("n_estimators", 2, 150)
learning_rate = trial.suggest_uniform('learning_rate', 0.0005, 1.0)
model = AdaBoostClassifier(
n_estimators=n_estimators,
learning_rate=learning_rate,
)
return model
optimizer = Optimizer('f1')
abc_f1_params = optimizer.optimize()
optimizer = Optimizer('acc')
abc_acc_params = optimizer.optimize()
def create_model(trial):
n_estimators = trial.suggest_int("n_estimators", 2, 150)
max_depth = trial.suggest_int("max_depth", 2, 6)
model = ExtraTreesClassifier(
n_estimators=n_estimators,
max_depth=max_depth,
random_state=0
)
return model
optimizer = Optimizer('f1')
et_f1_params = optimizer.optimize()
optimizer = Optimizer('acc')
et_acc_params = optimizer.optimize()
| Titanic - Machine Learning from Disaster |
13,882,987 | gem_targets = gem_targets[gem_targets["Date"]>=SUBMISSION_FIRST_DATE]
print(gem_targets.head(15))
values_to_submit = full_df_pred[full_df_pred["Date"]>=ESTIMATE_FIRST_DATE]
values_to_submit = values_to_submit[['Date','Country_Region','Province_State','ConfirmedCases', 'Fatalities']]
print(values_to_submit.head(15))
values_to_submit = gem_targets.append(values_to_submit, sort=False)
print(values_to_submit.tail(10))
print(sub_df[['Date','Country_Region','Province_State','ForecastId']])
values_to_submit = pd.merge(values_to_submit,sub_df[['Date','Country_Region','Province_State','ForecastId']], on=['Date','Country_Region','Province_State'], how='left')
print(values_to_submit.head(20))
print(values_to_submit.tail(20))
sub2 = values_to_submit[["ForecastId"] + TARGETS]
sub2["ForecastId"] = sub2["ForecastId"].astype(np.int16)
<save_to_csv> | mdict = {
'RF': RandomForestClassifier() ,
'XGB': XGBClassifier() ,
'LGBM': LGBMClassifier() ,
'DT': DecisionTreeClassifier() ,
'KNN': KNeighborsClassifier() ,
'BC': BaggingClassifier() ,
'OARF': RandomForestClassifier(**rf_acc_params),
'OFRF': RandomForestClassifier(**rf_f1_params),
'OAXGB': XGBClassifier(**xgb_acc_params),
'OFXGB': XGBClassifier(**xgb_f1_params),
'OALGBM': LGBMClassifier(**lgb_acc_params),
'OFLGBM': LGBMClassifier(**lgb_f1_params),
'OADT': DecisionTreeClassifier(**dt_acc_params),
'OFDT': DecisionTreeClassifier(**dt_f1_params),
'OAKNN': KNeighborsClassifier(**knn_acc_params),
'OFKNN': KNeighborsClassifier(**knn_f1_params),
'OABC': BaggingClassifier(**bc_acc_params),
'OFBC': BaggingClassifier(**bc_f1_params),
'OAABC': AdaBoostClassifier(**abc_acc_params),
'OFABC': AdaBoostClassifier(**abc_f1_params),
'OAET': ExtraTreesClassifier(**et_acc_params),
'OFET': ExtraTreesClassifier(**et_f1_params),
'LR': LogisticRegression(max_iter=1000),
'ABC': AdaBoostClassifier() ,
'SGD': SGDClassifier() ,
'ET': ExtraTreesClassifier() ,
'GB': GradientBoostingClassifier() ,
'RDG': RidgeClassifier() ,
'PCP': Perceptron(max_iter=1500),
'PAC': PassiveAggressiveClassifier()
} | Titanic - Machine Learning from Disaster |
13,882,987 | sub2.sort_values("ForecastId", inplace=True)
sub2.to_csv("submission.csv", index=False)
<merge> | def create_model(trial):
model_names = list()
models_list = [
'RF', 'XGB', 'LGBM', 'DT',
'KNN', 'BC', 'OARF', 'OFRF',
'OAXGB', 'OFXGB', 'OALGBM',
'OFLGBM', 'OADT', 'OFDT',
'OAKNN', 'OFKNN', 'OABC',
'OFBC', 'OAABC', 'OFABC',
'OAET', 'OFET', 'LR',
'ABC', 'SGD', 'ET',
'GB', 'RDG',
'PCP', 'PAC'
]
head_list = [
'RF',
'XGB',
'LGBM',
'DT',
'KNN',
'BC',
'LR',
'ABC',
'SGD',
'ET',
'GB',
'RDG',
'PCP',
'PAC'
]
n_models = trial.suggest_int("n_models", 2, 6)
for i in range(n_models):
model_item = trial.suggest_categorical('model_{}'.format(i), models_list)
if model_item not in model_names:
model_names.append(model_item)
folds = trial.suggest_int("folds", 2, 6)
model = SuperLearner(
folds=folds,
)
models = [
mdict[item] for item in model_names
]
model.add(models)
head = trial.suggest_categorical('head', head_list)
model.add_meta(
mdict[head]
)
return model
def objective(trial):
model = create_model(trial)
model.fit(x, y)
preds = model.predict(x_val)
score = accuracy_score(y_val, preds)
return score
study = optuna.create_study(
direction="maximize",
sampler= TPESampler()
)
study.optimize(
objective,
n_trials=50
) | Titanic - Machine Learning from Disaster |
13,882,987 | full_df_pred['Cases_Estimate'] = full_df_pred['ConfirmedCases']
full_df_pred['Fatalities_Estimate'] = full_df_pred['Fatalities']
full_df_pred2 = full_df_pred[['Date','Country_Region','Province_State']+TARGETS]
full_df2 = full_df[['Date','Country_Region','Province_State']+TARGETS]
full_df3 = pd.merge(full_df2,full_df_pred[['Date','Country_Region','Province_State','Cases_Estimate','Fatalities_Estimate']], on=['Date','Country_Region','Province_State'], how='left')
mask = full_df3['Cases_Estimate'].notna()
full_df3.loc[mask,'CasesConfirmed'] = full_df3.loc[mask,'Cases_Estimate']
full_df3.loc[mask,'Fatalities'] = full_df3.loc[mask,'Fatalities_Estimate']
split_on = 'Country_Region'
split_values = full_df3[split_on].unique()
kolonne = 'CasesConfirmed'
for imin in range(0,175,25):
plt.figure(figsize=(30,30))
imax = imin+25
for i in range(imin,imax):
plt.subplot(5,5,i-imin+1)
idx = i
df_interest = full_df3[full_df3[split_on]==split_values[idx]].reset_index(drop=True)
tmp = df_interest[kolonne].values
sns.lineplot(x=df_interest['Date'], y=tmp, label='pred')
df_interest2 = full_df3[(full_df3[split_on]==split_values[idx])&(full_df3['Date']<=GIVEN_LAST_DATE)].reset_index(drop=True)
sns.lineplot(x=df_interest2['Date'].values, y=df_interest2[kolonne].values, label='true')
plt.title(split_on+' '+str(split_values[idx]))
plt.show()
<load_from_csv> | model = SuperLearner(
folds=folds,
)
models = [
mdict[item] for item in result
]
model.add(models)
model.add_meta(mdict[head])
xf = train.copy()
yf = xf.pop('Survived')
xf = xf.values
xf = std_scaler.fit_transform(xf)
yf = yf.values
model.fit(xf, yf ) | Titanic - Machine Learning from Disaster |
13,882,987 | train_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
test_data = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
submission_csv = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv' )<data_type_conversions> | preds = model.predict(xf_test ).astype(int)
output = pd.DataFrame({ 'PassengerId': PassengerId,
'Survived': preds })
output.to_csv('boosted_tree.csv', index=False)
| Titanic - Machine Learning from Disaster |
13,795,550 | convert_dict = {'Province_State': str,'Country_Region':str,'ConfirmedCases':int,'Fatalities':int}
convert_dict_test = {'Province_State': str,'Country_Region':str}
train_data = train_data.astype(convert_dict)
test_data = test_data.astype(convert_dict_test )<data_type_conversions> | train_data_raw = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data_raw.head() | Titanic - Machine Learning from Disaster |
13,795,550 | train_data['Date'] = pd.to_datetime(train_data['Date'], infer_datetime_format=True)
test_data['Date'] = pd.to_datetime(test_data['Date'], infer_datetime_format=True )<data_type_conversions> | train_data=train_data_raw.drop(columns=['PassengerId','Name','Cabin','Ticket'])
train_data.head() | Titanic - Machine Learning from Disaster |
13,795,550 | train_data.loc[:, 'Date'] = train_data.Date.dt.strftime('%m%d')
train_data.loc[:, 'Date'] = train_data['Date'].astype(int)
test_data.loc[:, 'Date'] = test_data.Date.dt.strftime('%m%d')
test_data.loc[:, 'Date'] = test_data['Date'].astype(int )<feature_engineering> | train_data=train_data.loc[pd.notna(train_data.Embarked)]
train_data | Titanic - Machine Learning from Disaster |
13,795,550 | train_data['Country_Region'] = np.where(train_data['Province_State'] == 'nan',train_data['Country_Region'],train_data['Province_State']+' '+train_data['Country_Region'])
test_data['Country_Region'] = np.where(test_data['Province_State'] == 'nan',test_data['Country_Region'],test_data['Province_State']+' '+test_data['Country_Region'])
<drop_column> | age_arr=train_data.Age.values
bool_arr=pd.isna(train_data.Age.values)
total_age=0
num_age=0
for i in range(len(age_arr)) :
if bool_arr[i]==False:
total_age+=age_arr[i]
num_age+=1
avg_age=(total_age/num_age)
for i in range(len(age_arr)) :
if bool_arr[i]==True:
age_arr[i]=avg_age
train_data.replace(to_replace=train_data.Age.values,value=age_arr)
print(train_data.Age.values ) | Titanic - Machine Learning from Disaster |
13,795,550 | train_data = train_data.drop(columns=['Province_State'])
test_data = test_data.drop(columns=['Province_State'] )<define_variables> | scaled_age_arr=[round(age/avg_age,2)for age in age_arr]
train_data.Age=train_data.Age.replace(to_replace=train_data.Age.values,value=scaled_age_arr)
fare_arr=train_data.Fare.values
total_fare=0
num_fare=len(fare_arr)
for fare in fare_arr:
total_fare+=fare
avg_fare=total_fare/num_fare
scaled_fare_arr=[round(fare/avg_fare,2)for fare in fare_arr]
train_data.Fare=train_data.Fare.replace(to_replace=train_data.Fare.values,value=scaled_fare_arr)
train_data | Titanic - Machine Learning from Disaster |
13,795,550 | s =(train_data.dtypes == 'object')
object_cols = list(s[s].index )<import_modules> | d_Sex={'male':0,'female':1}
d_Embarked={'S':0,'C':1,'Q':2}
train_data.Sex = train_data.Sex.replace(d_Sex)
train_data.Embarked = train_data.Embarked.replace(d_Embarked)
train_data.Embarked=train_data.Embarked.astype(int)
train_data | Titanic - Machine Learning from Disaster |
13,795,550 | from sklearn.preprocessing import LabelEncoder<categorify> | input_data=train_data[['Pclass','Sex','Age','SibSp','Parch','Fare','Embarked']][:]
target=train_data['Survived'][:]
print(input_data.values)
| Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.