kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
2,213,168
embed_size = 300 max_features = 120000 maxlen = 72 batch_size = 1536 train_epochs = 8 SEED = 1029<define_variables>
X_test = np.delete(X_test, [1], 1) print(X_test[0:5, :]) X_test.shape
Titanic - Machine Learning from Disaster
2,213,168
puncts = [',', '.', '"', ':', ')', '(', '-', '!', '?', '|', ';', "'", '$', '&', '/', '[', ']', '>', '%', '=', ' '·', '_', '{', '}', '©', '^', '®', '`', '<', '→', '°', '€', '™', '›', '♥', '←', '×', '§', '″', '′', 'Â', '█', '½', 'à', '…', '“', '★', '”', '–', '●', 'â', '►', '−', '¢', '²', '¬', '░', '¶', '↑', '±', '¿', '▾', '═', '¦', '║', '―', '¥', '▓', '—', '‹', '─', '▒', ':', '¼', '⊕', '▼', '▪', '†', '■', '’', '▀', '¨', '▄', '♫', '☆', 'é', '¯', '♦', '¤', '▲', 'è', '¸', '¾', 'Ã', '⋅', '‘', '∞', '∙', ')', '↓', '、', '│', '(', '»', ',', '♪', '╩', '╚', '³', '・', '╦', '╣', '╔', '╗', '▬', '❤', 'ï', 'Ø', '¹', '≤', '‡', '√', ] def clean_text(x): x = str(x) for punct in puncts: x = x.replace(punct, f' {punct} ') return x def clean_numbers(x): x = re.sub('[0-9]{5,}', ' x = re.sub('[0-9]{4}', ' x = re.sub('[0-9]{3}', ' x = re.sub('[0-9]{2}', ' return x mispell_dict = {"aren't" : "are not", "can't" : "cannot", "couldn't" : "could not", "didn't" : "did not", "doesn't" : "does not", "don't" : "do not", "hadn't" : "had not", "hasn't" : "has not", "haven't" : "have not", "he'd" : "he would", "he'll" : "he will", "he's" : "he is", "i'd" : "I would", "i'd" : "I had", "i'll" : "I will", "i'm" : "I am", "isn't" : "is not", "it's" : "it is", "it'll":"it will", "i've" : "I have", "let's" : "let us", "mightn't" : "might not", "mustn't" : "must not", "shan't" : "shall not", "she'd" : "she would", "she'll" : "she will", "she's" : "she is", "shouldn't" : "should not", "that's" : "that is", "there's" : "there is", "they'd" : "they would", "they'll" : "they will", "they're" : "they are", "they've" : "they have", "we'd" : "we would", "we're" : "we are", "weren't" : "were not", "we've" : "we have", "what'll" : "what will", "what're" : "what are", "what's" : "what is", "what've" : "what have", "where's" : "where is", "who'd" : "who would", "who'll" : "who will", "who're" : "who are", "who's" : "who is", "who've" : "who have", "won't" : "will not", "wouldn't" : "would not", "you'd" : "you would", "you'll" : "you will", "you're" : "you are", "you've" : "you have", "'re": " are", "wasn't": "was not", "we'll":" will", "didn't": "did not", "tryin'":"trying"} def _get_mispell(mispell_dict): mispell_re = re.compile('(%s)' % '|'.join(mispell_dict.keys())) return mispell_dict, mispell_re mispellings, mispellings_re = _get_mispell(mispell_dict) def replace_typical_misspell(text): def replace(match): return mispellings[match.group(0)] return mispellings_re.sub(replace, text )<load_from_csv>
sc_X = StandardScaler() X_train = sc_X.fit_transform(X_train) X_test = sc_X.transform(X_test )
Titanic - Machine Learning from Disaster
2,213,168
def load_and_prec() : train_df = pd.read_csv(".. /input/train.csv") test_df = pd.read_csv(".. /input/test.csv") print("Train shape : ",train_df.shape) print("Test shape : ",test_df.shape) train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: x.lower()) test_df["question_text"] = test_df["question_text"].progress_apply(lambda x: x.lower()) train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: clean_text(x)) test_df["question_text"] = test_df["question_text"].progress_apply(lambda x: clean_text(x)) train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: clean_numbers(x)) test_df["question_text"] = test_df["question_text"].progress_apply(lambda x: clean_numbers(x)) train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: replace_typical_misspell(x)) test_df["question_text"] = test_df["question_text"].progress_apply(lambda x: replace_typical_misspell(x)) train_X = train_df["question_text"].fillna("_ test_X = test_df["question_text"].fillna("_ tokenizer = Tokenizer(num_words=max_features) tokenizer.fit_on_texts(list(train_X)) train_X = tokenizer.texts_to_sequences(train_X) test_X = tokenizer.texts_to_sequences(test_X) train_X = pad_sequences(train_X, maxlen=maxlen) test_X = pad_sequences(test_X, maxlen=maxlen) train_y = train_df['target'].values np.random.seed(SEED) trn_idx = np.random.permutation(len(train_X)) train_X = train_X[trn_idx] train_y = train_y[trn_idx] return train_X, test_X, train_y, tokenizer.word_index<statistical_test>
classifier = LogisticRegression() classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) acc_log = round(classifier.score(X_train, y_train)* 100, 2) acc_log
Titanic - Machine Learning from Disaster
2,213,168
def load_glove(word_index): EMBEDDING_FILE = '.. /input/embeddings/glove.840B.300d/glove.840B.300d.txt' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE)) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = all_embs.mean() , all_embs.std() embed_size = all_embs.shape[1] nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix def load_para(word_index): EMBEDDING_FILE = '.. /input/embeddings/paragram_300_sl999/paragram_300_sl999.txt' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE, encoding="utf8", errors='ignore')if len(o)>100) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = all_embs.mean() , all_embs.std() embed_size = all_embs.shape[1] nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix<normalization>
svc = SVC() svc.fit(X_train, y_train) y_pred = svc.predict(X_test) acc_svc = round(svc.score(X_train, y_train)* 100, 2) acc_svc result[:, 1] = y_pred result
Titanic - Machine Learning from Disaster
2,213,168
tqdm.pandas() start_time = time.time() train_X, test_X, train_y, word_index = load_and_prec() embedding_matrix_1 = load_glove(word_index) embedding_matrix_2 = load_para(word_index) total_time =(time.time() - start_time)/ 60 print("Took {:.2f} minutes".format(total_time)) embedding_matrix = np.mean([embedding_matrix_1, embedding_matrix_2], axis=0) print(np.shape(embedding_matrix)) del embedding_matrix_1, embedding_matrix_2 gc.collect()<normalization>
knn = KNeighborsClassifier(n_neighbors = 3) knn.fit(X_train, y_train) y_pred = knn.predict(X_test) acc_knn = round(knn.score(X_train, y_train)* 100, 2) acc_knn
Titanic - Machine Learning from Disaster
2,213,168
class Attention(nn.Module): def __init__(self, feature_dim, step_dim, bias=True, **kwargs): super(Attention, self ).__init__(**kwargs) self.supports_masking = True self.bias = bias self.feature_dim = feature_dim self.step_dim = step_dim self.features_dim = 0 weight = torch.zeros(feature_dim, 1) nn.init.xavier_uniform_(weight) self.weight = nn.Parameter(weight) if bias: self.b = nn.Parameter(torch.zeros(step_dim)) def forward(self, x, mask=None): feature_dim = self.feature_dim step_dim = self.step_dim eij = torch.mm( x.contiguous().view(-1, feature_dim), self.weight ).view(-1, step_dim) if self.bias: eij = eij + self.b eij = torch.tanh(eij) a = torch.exp(eij) if mask is not None: a = a * mask a = a / torch.sum(a, 1, keepdim=True)+ 1e-10 weighted_input = x * torch.unsqueeze(a, -1) return torch.sum(weighted_input, 1 )<choose_model_class>
linear_svc = LinearSVC() linear_svc.fit(X_train, y_train) y_pred = linear_svc.predict(X_test) acc_linear_svc = round(linear_svc.score(X_train, y_train)* 100, 2) acc_linear_svc
Titanic - Machine Learning from Disaster
2,213,168
class NeuralNet(nn.Module): def __init__(self): super(NeuralNet, self ).__init__() hidden_size = 60 self.embedding = nn.Embedding(max_features, embed_size) self.embedding.weight = nn.Parameter(torch.tensor(embedding_matrix, dtype=torch.float32)) self.embedding.weight.requires_grad = False self.embedding_dropout = nn.Dropout2d(0.1) self.lstm = nn.GRU(embed_size, hidden_size, bidirectional=True, batch_first=True) self.gru = nn.GRU(hidden_size*2, hidden_size, bidirectional=True, batch_first=True) self.lstm_attention = Attention(hidden_size*2, maxlen) self.gru_attention = Attention(hidden_size*2, maxlen) self.linear = nn.Linear(480, 16) self.relu = nn.ReLU() self.dropout = nn.Dropout(0.1) self.out = nn.Linear(16, 1) def forward(self, x): h_embedding = self.embedding(x) h_embedding = torch.squeeze(self.embedding_dropout(torch.unsqueeze(h_embedding, 0))) h_lstm, _ = self.lstm(h_embedding) h_gru, _ = self.gru(h_lstm) h_lstm_atten = self.lstm_attention(h_lstm) h_gru_atten = self.gru_attention(h_gru) avg_pool = torch.mean(h_gru, 1) max_pool, _ = torch.max(h_gru, 1) conc = torch.cat(( h_lstm_atten, h_gru_atten, avg_pool, max_pool), 1) conc = self.relu(self.linear(conc)) conc = self.dropout(conc) out = self.out(conc) return out<split>
decision_tree = DecisionTreeClassifier() decision_tree.fit(X_train, y_train) y_pred = decision_tree.predict(X_test) acc_decision_tree = round(decision_tree.score(X_train, y_train)* 100, 2) acc_decision_tree
Titanic - Machine Learning from Disaster
2,213,168
splits = list(StratifiedKFold(n_splits=5, shuffle=True, random_state=SEED ).split(train_X, train_y))<compute_test_metric>
random_forest = RandomForestClassifier(n_estimators=100) random_forest.fit(X_train, y_train) y_pred = random_forest.predict(X_test) random_forest.score(X_train, y_train) acc_random_forest = round(random_forest.score(X_train, y_train)* 100, 2) acc_random_forest
Titanic - Machine Learning from Disaster
2,213,168
def sigmoid(x): return 1 /(1 + np.exp(-x))<compute_test_metric>
submission = pd.DataFrame({'PassengerId':result[:, 0],'Survived':result[:, 1]}) submission.head()
Titanic - Machine Learning from Disaster
2,213,168
<categorify><EOS>
filename = 'Titanic_Survival_Trial_9.csv' submission.to_csv(filename, index=False) print('Saved file: ' + filename )
Titanic - Machine Learning from Disaster
1,003,820
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<compute_test_metric>
%matplotlib inline
Titanic - Machine Learning from Disaster
1,003,820
search_result = threshold_search(train_y, train_preds) search_result<save_to_csv>
data_train = pd.read_csv('.. /input/train.csv') data_test = pd.read_csv('.. /input/test.csv' )
Titanic - Machine Learning from Disaster
1,003,820
sub = pd.read_csv('.. /input/sample_submission.csv') sub.prediction = test_preds > search_result['threshold'] sub.to_csv("submission.csv", index=False )<import_modules>
def simplify_ages(df): unknow = df['Age'].isnull() master_title = df['Title'] == 'Master' mr_title = df['Title'] == 'Mr' mrs_title = df['Title'] == 'Mrs' miss_title = df['Title'] == 'Miss' dr_title = df['Title'] == 'Dr' df.loc[master_title & unknow,'Age'] = df[master_title]['Age'].mean() df.loc[mr_title & unknow,'Age'] = df[mr_title]['Age'].mean() df.loc[mrs_title & unknow,'Age'] = df[mrs_title]['Age'].mean() df.loc[miss_title & unknow,'Age'] = df[miss_title]['Age'].mean() df.loc[dr_title & unknow,'Age'] = df[dr_title]['Age'].mean() df.Age = df.Age.fillna(-0.5) return df def simplify_cabins(df): df.Cabin = df.Cabin.fillna('N') df.Cabin = df.Cabin.apply(lambda x: x[0]) return df def simplify_fares(df): df.Fare = df.Fare.fillna(-0.5) return df def format_name(df): df['Lname'] = df.Name.apply(lambda x: x.split(',')[0]) df['Title'] = df.Name.str.extract('([A-Za-z]+)\.', expand=False) return df def simplify_family(df): df['FamilySize'] = df['SibSp'] + df['Parch'] + 1 return df def simplify_embarks(df): df.Embarked = df.Embarked.fillna('N') df.Embarked = df.Embarked.apply(lambda x: x[0]) return df def simplify_titles(df): df['Title'] = df['Title'].replace(['Col','Dr','Major'], 'MidNoble') df['Title'] = df['Title'].replace(['Mlle','Ms','Mme','Countess','Sir', 'Lady'], 'Noble') df['Title'] = df['Title'].replace(['Don','Jonkheer'], 'Mr') df['Title'] = df['Title'].replace(['Capt','Rev'], 'Worker') return df def create_sex_class(df): male = df['Sex']=='male' female = df['Sex']=='female' Class1 = df['Pclass']==1 Class2 = df['Pclass']==2 Class3 = df['Pclass']==3 df.loc[female & Class1,'Class'] =(0) df.loc[female & Class2,'Class'] =(1) df.loc[female & Class3,'Class'] =(2) df.loc[male & Class1,'Class'] =(3) df.loc[male & Class2,'Class'] =(4) df.loc[male & Class3,'Class'] =(5) return df def drop_features(df): return df.drop(['Ticket', 'SibSp', 'Parch', 'Name', 'Lname', 'Pclass'], axis=1) def transform_features(df): df = format_name(df) df = simplify_ages(df) df = simplify_cabins(df) df = simplify_fares(df) df = simplify_family(df) df = simplify_embarks(df) df = simplify_titles(df) df = create_sex_class(df) df = drop_features(df) return df def encode_features(df_train, df_test): features = ['Cabin', 'Title', 'Embarked', 'Sex'] df_combined = pd.concat([df_train[features], df_test[features]]) for feature in features: le = preprocessing.LabelEncoder() le = le.fit(df_combined[feature]) df_train[feature] = le.transform(df_train[feature]) df_test[feature] = le.transform(df_test[feature]) return df_train, df_test
Titanic - Machine Learning from Disaster
1,003,820
import numpy as np import pandas as pd import time import lightgbm as lgb from sklearn.model_selection import StratifiedKFold, KFold from sklearn.metrics import mean_squared_error from sklearn.metrics import log_loss<load_from_csv>
data_train = transform_features(data_train) data_test = transform_features(data_test )
Titanic - Machine Learning from Disaster
1,003,820
%%time df_train = pd.read_csv('.. /input/predicting-outliers-to-improve-your-score/train_clean.csv') df_test = pd.read_csv('.. /input/predicting-outliers-to-improve-your-score/test_clean.csv' )<drop_column>
data_train, data_test = encode_features(data_train, data_test )
Titanic - Machine Learning from Disaster
1,003,820
df_train = df_train[df_train['outliers'] == 0] target = df_train['target'] del df_train['target'] features = [c for c in df_train.columns if c not in ['card_id', 'first_active_month','outliers']] categorical_feats = [c for c in features if 'feature_' in c]<init_hyperparams>
X_all = data_train.drop(['Survived', 'PassengerId'], axis=1) y_all = data_train['Survived']
Titanic - Machine Learning from Disaster
1,003,820
param = {'objective':'regression', 'num_leaves': 31, 'min_data_in_leaf': 25, 'max_depth': 7, 'learning_rate': 0.01, 'lambda_l1':0.13, "boosting": "gbdt", "feature_fraction":0.85, 'bagging_freq':8, "bagging_fraction": 0.9 , "metric": 'rmse', "verbosity": -1, "random_state": 2333}<split>
def MultivariateGaussian(x,y): k = 2 d =(x.shape)[1] mu = np.zeros(( k,d)) sigma = np.zeros(( k,d,d)) pi = np.zeros(k) for label in range(2): indices =(y == label) mu[label] = np.mean(x[indices,:], axis=0) sigma[label] = np.cov(x[indices,:], rowvar=0, bias=1) pi[label] = float(sum(indices)) /float(len(y)) return mu, sigma, pi
Titanic - Machine Learning from Disaster
1,003,820
%%time folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=2333) oof = np.zeros(len(df_train)) predictions = np.zeros(len(df_test)) feature_importance_df = pd.DataFrame() for fold_,(trn_idx, val_idx)in enumerate(folds.split(df_train,df_train['outliers'].values)) : print("fold {}".format(fold_)) trn_data = lgb.Dataset(df_train.iloc[trn_idx][features], label=target.iloc[trn_idx]) val_data = lgb.Dataset(df_train.iloc[val_idx][features], label=target.iloc[val_idx]) num_round = 10000 clf = lgb.train(param, trn_data, num_round, valid_sets = [trn_data, val_data], verbose_eval= 100, early_stopping_rounds = 200) oof[val_idx] = clf.predict(df_train.iloc[val_idx][features], num_iteration=clf.best_iteration) fold_importance_df = pd.DataFrame() fold_importance_df["Feature"] = features fold_importance_df["importance"] = clf.feature_importance() fold_importance_df["fold"] = fold_ + 1 feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0) predictions += clf.predict(df_test[features], num_iteration=clf.best_iteration)/ folds.n_splits print("CV score: {:<8.5f}".format(mean_squared_error(oof, target)**0.5))<create_dataframe>
mu, sigma, pi = MultivariateGaussian(X_all.values,y_all.values )
Titanic - Machine Learning from Disaster
1,003,820
model_without_outliers = pd.DataFrame({"card_id":df_test["card_id"].values}) model_without_outliers["target"] = predictions<load_from_csv>
def test_model(mu, sigma, pi, features, tx, ty): preds = [] errors = 0 for x,y in zip(tx,ty): piP_list = [] for label in range(2): S = np.linalg.inv(sigma[label]) dS = np.linalg.det(sigma[label]) xTsigmax=0 for i in features: for j in features: xTsigmax += S[i][j]*(x[i]-mu[label][i])*(x[j]-mu[label][j]) piP_list.append(pi[label]*np.exp(-0.5*xTsigmax)/np.sqrt(dS)) predict = np.argmax(piP_list) preds.append(predict) if predict != y: errors+=1 return errors, preds
Titanic - Machine Learning from Disaster
1,003,820
%%time df_train = pd.read_csv('.. /input/predicting-outliers-to-improve-your-score/train_clean.csv') df_test = pd.read_csv('.. /input/predicting-outliers-to-improve-your-score/test_clean.csv' )<drop_column>
errors, preds = test_model(mu, sigma, pi, [0,1,2,3,4,5,6,7], X_all.values, y_all.values) print('Accuracy for training data:',(1-errors/len(y_all)) *100, '%' )
Titanic - Machine Learning from Disaster
1,003,820
target = df_train['outliers'] del df_train['outliers'] del df_train['target']<define_variables>
def write_predictions(mu, sigma, pi, features, tx): preds = [] for x in tx: piP_list = [] for label in range(2): S = np.linalg.inv(sigma[label]) dS = np.linalg.det(sigma[label]) xTsigmax=0 for i in features: for j in features: xTsigmax += S[i][j]*(x[i]-mu[label][i])*(x[j]-mu[label][j]) piP_list.append(pi[label]*np.exp(-0.5*xTsigmax)/np.sqrt(dS)) predict = np.argmax(piP_list) preds.append(predict) return preds
Titanic - Machine Learning from Disaster
1,003,820
features = [c for c in df_train.columns if c not in ['card_id', 'first_active_month']] categorical_feats = [c for c in features if 'feature_' in c]<init_hyperparams>
preds = write_predictions(mu, sigma, pi, [0,1,2,3,4,5,6,7], data_test.drop('PassengerId',axis=1 ).values )
Titanic - Machine Learning from Disaster
1,003,820
param = {'num_leaves': 31, 'min_data_in_leaf': 30, 'objective':'binary', 'max_depth': 6, 'learning_rate': 0.01, "boosting": "rf", "feature_fraction": 0.9, "bagging_freq": 1, "bagging_fraction": 0.9 , "bagging_seed": 11, "metric": 'binary_logloss', "lambda_l1": 0.1, "verbosity": -1, "random_state": 2333}<split>
data_pred = pd.DataFrame() data_pred['PassengerId']=data_test['PassengerId'] data_pred['Survived']=preds
Titanic - Machine Learning from Disaster
1,003,820
<prepare_output><EOS>
data_pred.to_csv('predictions_generative_multGauss.csv', index = False )
Titanic - Machine Learning from Disaster
998,653
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<prepare_output>
%matplotlib inline GradientBoostingClassifier, ExtraTreesClassifier) warnings.filterwarnings('ignore') df_train=pd.read_csv('.. /input/train.csv',sep=',') df_test=pd.read_csv('.. /input/test.csv',sep=',') df_data = df_train.append(df_test) PassengerId = df_test['PassengerId'] Submission=pd.DataFrame() Submission['PassengerId'] = df_test['PassengerId'] print('Components imported' )
Titanic - Machine Learning from Disaster
998,653
outlier_id = pd.DataFrame(df_outlier_prob.sort_values(by='target',ascending = False ).head(25000)['card_id'] )<load_from_csv>
df_data["Title"] = df_data.Name.str.extract('([A-Za-z]+)\.', expand=False) df_data["Title"] = df_data["Title"].replace('Mlle', 'Miss') df_data["Title"] = df_data["Title"].replace('Master', 'Master') df_data["Title"] = df_data["Title"].replace(['Mme', 'Dona', 'Ms'], 'Mrs') df_data["Title"] = df_data["Title"].replace(['Jonkheer','Don'],'Mr') df_data["Title"] = df_data["Title"].replace(['Capt','Major', 'Col','Rev','Dr'], 'Millitary') df_data["Title"] = df_data["Title"].replace(['Lady', 'Countess','Sir'], 'Honor') df_train["Title"] = df_data['Title'][:891] df_test["Title"] = df_data['Title'][891:] titledummies=pd.get_dummies(df_train[['Title']], prefix_sep='_') df_train = pd.concat([df_train, titledummies], axis=1) ttitledummies=pd.get_dummies(df_test[['Title']], prefix_sep='_') df_test = pd.concat([df_test, ttitledummies], axis=1) print('Title Feature created' )
Titanic - Machine Learning from Disaster
998,653
best_submission = pd.read_csv('.. /input/predicting-outliers-to-improve-your-score/3.695.csv' )<merge>
df_data["Embarked"]=df_data["Embarked"].fillna('S') df_train["Embarked"] = df_data['Embarked'][:891] df_test["Embarked"] = df_data['Embarked'][891:] print('Missing Embarkations Added' )
Titanic - Machine Learning from Disaster
998,653
most_likely_liers = best_submission.merge(outlier_id,how='right') most_likely_liers.head()<filter>
dummies=pd.get_dummies(df_train[["Embarked"]], prefix_sep='_') df_train = pd.concat([df_train, dummies], axis=1) dummies=pd.get_dummies(df_test[["Embarked"]], prefix_sep='_') df_test = pd.concat([df_test, dummies], axis=1) print("Embarked Feature created" )
Titanic - Machine Learning from Disaster
998,653
%%time for card_id in most_likely_liers['card_id']: model_without_outliers.loc[model_without_outliers['card_id']==card_id,'target']\ = most_likely_liers.loc[most_likely_liers['card_id']==card_id,'target'].values<save_to_csv>
df_data["Fare"]=df_data["Fare"].fillna(np.median(df_data["Fare"])) df_train["Fare"] = df_data["Fare"][:891] df_test["Fare"] = df_data["Fare"][891:] print('Estimate missing Fare' )
Titanic - Machine Learning from Disaster
998,653
model_without_outliers.to_csv("combining_submission.csv", index=False )<import_modules>
Pclass = [1,2,3] for aclass in Pclass: fare_to_impute = df_data.groupby('Pclass')['Fare'].median() [aclass] df_data.loc[(df_data['Fare'].isnull())&(df_data['Pclass'] == aclass), 'Fare'] = fare_to_impute df_train["Fare"] = df_data["Fare"][:891] df_test["Fare"] = df_data["Fare"][891:] df_train["FareBand"] = pd.qcut(df_train['Fare'], 4, labels = [1, 2, 3, 4] ).astype('category') df_test["FareBand"] = pd.qcut(df_test['Fare'], 4, labels = [1, 2, 3, 4] ).astype('category') dummies=pd.get_dummies(df_train[["FareBand"]], prefix_sep='_') df_train = pd.concat([df_train, dummies], axis=1) dummies=pd.get_dummies(df_test[["FareBand"]], prefix_sep='_') df_test = pd.concat([df_test, dummies], axis=1) print("Fareband categories created" )
Titanic - Machine Learning from Disaster
998,653
import numpy as np import pandas as pd import time import lightgbm as lgb from sklearn.model_selection import StratifiedKFold, KFold from sklearn.metrics import mean_squared_error from sklearn.metrics import log_loss<load_from_csv>
titles = ['Master', 'Miss', 'Mr', 'Mrs', 'Millitary','Honor'] for title in titles: age_to_impute = df_data.groupby('Title')['Age'].median() [title] df_data.loc[(df_data['Age'].isnull())&(df_data['Title'] == title), 'Age'] = age_to_impute df_train["Age"] = df_data['Age'][:891] df_test["Age"] = df_data['Age'][891:] print('Missing Ages Estimated' )
Titanic - Machine Learning from Disaster
998,653
%%time df_train = pd.read_csv('.. /input/predicting-outliers-to-improve-your-score/train_clean.csv') df_test = pd.read_csv('.. /input/predicting-outliers-to-improve-your-score/test_clean.csv' )<drop_column>
df_train["Pclass"]=df_train["Pclass"].astype('category') df_test["Pclass"]=df_test["Pclass"].astype('category') dummies=pd.get_dummies(df_train[["Pclass"]], prefix_sep='_') df_train = pd.concat([df_train, dummies], axis=1) dummies=pd.get_dummies(df_test[["Pclass"]], prefix_sep='_') df_test = pd.concat([df_test, dummies], axis=1) print("Pclass Feature created" )
Titanic - Machine Learning from Disaster
998,653
df_train = df_train[df_train['outliers'] == 0] target = df_train['target'] del df_train['target'] features = [c for c in df_train.columns if c not in ['card_id', 'first_active_month','outliers']] categorical_feats = [c for c in features if 'feature_' in c]<init_hyperparams>
bins = [0,12,24,45,60,np.inf] labels = ['Child', 'Young Adult', 'Adult','Older Adult','Senior'] df_train["AgeGroup"] = pd.cut(df_train["Age"], bins, labels = labels) df_test["AgeGroup"] = pd.cut(df_test["Age"], bins, labels = labels) print('Age Feature created') dummies=pd.get_dummies(df_train[["AgeGroup"]], prefix_sep='_') df_train = pd.concat([df_train, dummies], axis=1) dummies=pd.get_dummies(df_test[["AgeGroup"]], prefix_sep='_') df_test = pd.concat([df_test, dummies], axis=1) print("AgeGroup categories created" )
Titanic - Machine Learning from Disaster
998,653
param = {'objective':'regression', 'num_leaves': 31, 'min_data_in_leaf': 25, 'max_depth': 7, 'learning_rate': 0.01, 'lambda_l1':0.13, "boosting": "gbdt", "feature_fraction":0.85, 'bagging_freq':8, "bagging_fraction": 0.9 , "metric": 'rmse', "verbosity": -1, "random_state": 2333}<split>
dummies=pd.get_dummies(df_train[['Sex']], prefix_sep='_') df_train = pd.concat([df_train, dummies], axis=1) testdummies=pd.get_dummies(df_test[['Sex']], prefix_sep='_') df_test = pd.concat([df_test, testdummies], axis=1) print('Gender Categories created' )
Titanic - Machine Learning from Disaster
998,653
%%time folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=2333) oof = np.zeros(len(df_train)) predictions = np.zeros(len(df_test)) feature_importance_df = pd.DataFrame() for fold_,(trn_idx, val_idx)in enumerate(folds.split(df_train,df_train['outliers'].values)) : print("fold {}".format(fold_)) trn_data = lgb.Dataset(df_train.iloc[trn_idx][features], label=target.iloc[trn_idx]) val_data = lgb.Dataset(df_train.iloc[val_idx][features], label=target.iloc[val_idx]) num_round = 10000 clf = lgb.train(param, trn_data, num_round, valid_sets = [trn_data, val_data], verbose_eval= 100, early_stopping_rounds = 200) oof[val_idx] = clf.predict(df_train.iloc[val_idx][features], num_iteration=clf.best_iteration) fold_importance_df = pd.DataFrame() fold_importance_df["Feature"] = features fold_importance_df["importance"] = clf.feature_importance() fold_importance_df["fold"] = fold_ + 1 feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0) predictions += clf.predict(df_test[features], num_iteration=clf.best_iteration)/ folds.n_splits print("CV score: {:<8.5f}".format(mean_squared_error(oof, target)**0.5))<create_dataframe>
df_data["Alone"] = np.where(df_data['SibSp'] + df_data['Parch'] + 1 == 1, 1,0) df_train["Alone"] = df_data['Alone'][:891] df_test["Alone"] = df_data['Alone'][891:] print('Lone Traveller feature created' )
Titanic - Machine Learning from Disaster
998,653
model_without_outliers = pd.DataFrame({"card_id":df_test["card_id"].values}) model_without_outliers["target"] = predictions<load_from_csv>
df_data["Last_Name"] = df_data['Name'].apply(lambda x: str.split(x, ",")[0]) DEFAULT_SURVIVAL_VALUE = 0.5 df_data["Family_Survival"] = DEFAULT_SURVIVAL_VALUE for grp, grp_df in df_data[['Survived','Name', 'Last_Name', 'Fare', 'Ticket', 'PassengerId', 'SibSp', 'Parch', 'Age', 'Cabin']].groupby(['Last_Name', 'Fare']): if(len(grp_df)!= 1): for ind, row in grp_df.iterrows() : smax = grp_df.drop(ind)['Survived'].max() smin = grp_df.drop(ind)['Survived'].min() passID = row['PassengerId'] if(smax == 1.0): df_data.loc[df_data['PassengerId'] == passID, 'Family_Survival'] = 1 elif(smin==0.0): df_data.loc[df_data['PassengerId'] == passID, 'Family_Survival'] = 0 print("Number of passengers with family survival information:", df_data.loc[df_data['Family_Survival']!=0.5].shape[0]) for _, grp_df in df_data.groupby('Ticket'): if(len(grp_df)!= 1): for ind, row in grp_df.iterrows() : if(row['Family_Survival'] == 0)|(row['Family_Survival']== 0.5): smax = grp_df.drop(ind)['Survived'].max() smin = grp_df.drop(ind)['Survived'].min() passID = row['PassengerId'] if(smax == 1.0): df_data.loc[df_data['PassengerId'] == passID, 'Family_Survival'] = 1 elif(smin==0.0): df_data.loc[df_data['PassengerId'] == passID, 'Family_Survival'] = 0 print("Number of passenger with family/group survival information: " +str(df_data[df_data['Family_Survival']!=0.5].shape[0])) df_train["Family_Survival"] = df_data['Family_Survival'][:891] df_test["Family_Survival"] = df_data['Family_Survival'][891:]
Titanic - Machine Learning from Disaster
998,653
%%time df_train = pd.read_csv('.. /input/predicting-outliers-to-improve-your-score/train_clean.csv') df_test = pd.read_csv('.. /input/predicting-outliers-to-improve-your-score/test_clean.csv' )<drop_column>
df_data["HadCabin"] =(df_data["Cabin"].notnull().astype('int')) df_train["HadCabin"] = df_data["HadCabin"][:891] df_test["HadCabin"] = df_data["HadCabin"][891:] print('HasCabin feature created' )
Titanic - Machine Learning from Disaster
998,653
target = df_train['outliers'] del df_train['outliers'] del df_train['target']<define_variables>
df_data["Deck"] = df_data.Cabin.str.extract('([A-Za-z])', expand=False) deck_mapping = {"0":0,"A": 1, "B": 2, "C": 3, "D": 4, "E": 5} df_data['Deck'] = df_data['Deck'].map(deck_mapping) df_data["Deck"] = df_data["Deck"].fillna("0") df_data["Deck"]=df_data["Deck"].astype('int') df_train["Deck"] = df_data['Deck'][:891] df_test["Deck"] = df_data['Deck'][891:] print('Deck feature created') dummies=pd.get_dummies(df_train[['Deck']], prefix_sep='_') df_train = pd.concat([df_train, dummies], axis=1) dummies=pd.get_dummies(df_test[['Deck']], prefix_sep='_') df_test = pd.concat([df_test,dummies], axis=1) print('Deck Categories created' )
Titanic - Machine Learning from Disaster
998,653
features = [c for c in df_train.columns if c not in ['card_id', 'first_active_month']] categorical_feats = [c for c in features if 'feature_' in c]<init_hyperparams>
( df_train['SibSp'])=(df_train['SibSp'] ).astype('category') dummies=pd.get_dummies(df_train[['SibSp']], prefix_sep='_') df_train = pd.concat([df_train, dummies], axis=1) (df_test['SibSp'])=(df_test['SibSp'] ).astype('category') dummies=pd.get_dummies(df_test[['SibSp']], prefix_sep='_') df_test = pd.concat([df_test,dummies], axis=1) print('Sibsp Categories created' )
Titanic - Machine Learning from Disaster
998,653
param = {'num_leaves': 31, 'min_data_in_leaf': 30, 'objective':'binary', 'max_depth': 6, 'learning_rate': 0.01, "boosting": "rf", "feature_fraction": 0.9, "bagging_freq": 1, "bagging_fraction": 0.9 , "bagging_seed": 11, "metric": 'binary_logloss', "lambda_l1": 0.1, "verbosity": -1, "random_state": 2333}<split>
( df_train['Parch'])=(df_train['Parch'] ).astype('category') dummies=pd.get_dummies(df_train[['Parch']], prefix_sep='_') df_train = pd.concat([df_train, dummies], axis=1) (df_test['Parch'])=(df_test['Parch'] ).astype('category') dummies=pd.get_dummies(df_test[['Parch']], prefix_sep='_') df_test = pd.concat([df_test,dummies], axis=1) print('Parch Categories created' )
Titanic - Machine Learning from Disaster
998,653
%%time folds = KFold(n_splits=5, shuffle=True, random_state=15) oof = np.zeros(len(df_train)) predictions = np.zeros(len(df_test)) feature_importance_df = pd.DataFrame() start = time.time() for fold_,(trn_idx, val_idx)in enumerate(folds.split(df_train.values, target.values)) : print("fold n°{}".format(fold_)) trn_data = lgb.Dataset(df_train.iloc[trn_idx][features], label=target.iloc[trn_idx], categorical_feature=categorical_feats) val_data = lgb.Dataset(df_train.iloc[val_idx][features], label=target.iloc[val_idx], categorical_feature=categorical_feats) num_round = 10000 clf = lgb.train(param, trn_data, num_round, valid_sets = [trn_data, val_data], verbose_eval=100, early_stopping_rounds = 200) oof[val_idx] = clf.predict(df_train.iloc[val_idx][features], num_iteration=clf.best_iteration) fold_importance_df = pd.DataFrame() fold_importance_df["feature"] = features fold_importance_df["importance"] = clf.feature_importance() fold_importance_df["fold"] = fold_ + 1 feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0) predictions += clf.predict(df_test[features], num_iteration=clf.best_iteration)/ folds.n_splits print("CV score: {:<8.5f}".format(log_loss(target, oof)) )<prepare_output>
df_data=df_data.drop(['Cabin','Embarked','Title','Age','Sex','Name','Ticket','Deck','Fare'], axis=1) df_train=df_train.drop(['Cabin','Embarked','Title','Age','Sex','Name','Ticket','AgeGroup','Deck','Pclass','Fare','FareBand','SibSp','Parch','Parch_7','Parch_8','Parch_9'], axis=1) df_test=df_test.drop(['Cabin','Embarked','Title','Age','Sex','Name','Ticket','AgeGroup','Deck','Pclass','Fare','FareBand','SibSp','Parch','Parch_7','Parch_8','Parch_9'], axis=1) print('None Numeric Columns droped' )
Titanic - Machine Learning from Disaster
998,653
df_outlier_prob = pd.DataFrame({"card_id":df_test["card_id"].values}) df_outlier_prob["target"] = predictions df_outlier_prob.head()<prepare_output>
NUMERIC_COLUMNS=['Alone','Family Size','Sex','Pclass','Fare','FareBand','Age','TitleCat','Embarked'] ORIGINAL_NUMERIC_COLUMNS=['Pclass','Age','SibSp','Parch','Sex_female','Sex_male','Title_Master', 'Title_Miss','Title_Mr', 'Title_Mrs', 'Title_Millitary','Embarked'] REVISED_NUMERIC_COLUMNS=['Title_Master', 'Title_Millitary', 'Title_Miss', 'Title_Mr', 'Title_Mrs', 'Embarked_C', 'Embarked_Q', 'Embarked_S', 'FareBand_1', 'FareBand_2', 'FareBand_3', 'FareBand_4', 'Pclass_1', 'Pclass_2', 'Pclass_3', 'AgeGroup_Child', 'AgeGroup_Young Adult', 'AgeGroup_Adult', 'AgeGroup_Older Adult', 'AgeGroup_Senior', 'Sex_female', 'Sex_male', 'Alone', 'Family_Survival', 'HadCabin','SibSp_0', 'SibSp_1', 'SibSp_2', 'SibSp_3', 'SibSp_4', 'SibSp_5', 'SibSp_8', 'Parch_0', 'Parch_1', 'Parch_2', 'Parch_3', 'Parch_4', 'Parch_5', 'Parch_6', ] data_to_train = df_train[REVISED_NUMERIC_COLUMNS].fillna(-1000) y=df_train['Survived'] X=data_to_train X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,random_state=21, stratify=y) print('Model Split' )
Titanic - Machine Learning from Disaster
998,653
outlier_id = pd.DataFrame(df_outlier_prob.sort_values(by='target',ascending = False ).head(25000)['card_id'] )<load_from_csv>
clf = SVC() clf.fit(X_train, y_train) y_pred = clf.predict(X_test) acc_clf = round(accuracy_score(y_pred, y_test)* 100, 2) print(acc_clf )
Titanic - Machine Learning from Disaster
998,653
best_submission = pd.read_csv('.. /input/predicting-outliers-to-improve-your-score/3.695.csv' )<merge>
test = df_test[REVISED_NUMERIC_COLUMNS].fillna(-1000) Submission['Survived']=clf.predict(test) Submission.set_index('PassengerId', inplace=True) Submission.to_csv('baselinemodel01.csv',sep=',') print('Submission Created' )
Titanic - Machine Learning from Disaster
998,653
most_likely_liers = best_submission.merge(outlier_id,how='right') most_likely_liers.head()<filter>
REVISED_NUMERIC_COLUMNS=['Title_Master', 'Title_Millitary', 'Title_Miss', 'Title_Mr', 'Title_Mrs', 'Embarked_C', 'Embarked_Q', 'Embarked_S', 'FareBand_1', 'FareBand_2', 'FareBand_3', 'FareBand_4', 'Pclass_1', 'Pclass_2', 'Pclass_3', 'AgeGroup_Child', 'AgeGroup_Young Adult', 'AgeGroup_Adult', 'AgeGroup_Older Adult', 'AgeGroup_Senior', 'Sex_female', 'Sex_male', 'Alone', 'Family_Survival', 'HadCabin','SibSp_0', 'SibSp_1', 'SibSp_2', 'SibSp_3', 'SibSp_4', 'SibSp_5', 'SibSp_8', 'Parch_0', 'Parch_1', 'Parch_2', 'Parch_3', 'Parch_4', 'Parch_5', 'Parch_6', ] predictors = df_train.drop(['Survived', 'PassengerId'], axis=1) data_to_train = df_train[REVISED_NUMERIC_COLUMNS].fillna(-1000) X=data_to_train y = df_train["Survived"] x_train, x_val, y_train, y_val = train_test_split(data_to_train, y, test_size = 0.3,random_state=21, stratify=y) print('Data Split' )
Titanic - Machine Learning from Disaster
998,653
%%time for card_id in most_likely_liers['card_id']: model_without_outliers.loc[model_without_outliers['card_id']==card_id,'target']\ = most_likely_liers.loc[most_likely_liers['card_id']==card_id,'target'].values<save_to_csv>
param_dist = {"max_depth": np.arange(1, 6), "max_features": np.arange(1, 10), "min_samples_leaf": np.arange(1, 6), "criterion": ["gini","entropy"]} tree = DecisionTreeClassifier() tree_cv = RandomizedSearchCV(tree, param_dist, cv=30) tree_cv.fit(X,y) y_pred = tree_cv.predict(x_val) print("Tuned Decision Tree Parameters: {}".format(tree_cv.best_params_)) print("Best score is {}".format(tree_cv.best_score_)) acc_tree_cv = round(accuracy_score(y_pred, y_val)* 100, 2) print(acc_tree_cv )
Titanic - Machine Learning from Disaster
998,653
model_without_outliers.to_csv("combining_submission.csv", index=False )<import_modules>
test = df_test[REVISED_NUMERIC_COLUMNS].fillna(-1000) tree = DecisionTreeClassifier(max_depth=5,max_features=7,min_samples_leaf=1,criterion="entropy") tree.fit(X,y) Submission['Survived']=tree.predict(test) print(Submission.head(5))
Titanic - Machine Learning from Disaster
998,653
import pandas as pd import numpy as np import seaborn as sns<load_from_csv>
Submission.to_csv('Tunedtree1submission.csv',sep=',') print("Submission Submitted" )
Titanic - Machine Learning from Disaster
998,653
df1 = pd.read_csv(".. /input/elo-blending/BlendingRLSR.csv") df1.head()<save_to_csv>
REVISED_NUMERIC_COLUMNS=['Title_Master', 'Title_Millitary', 'Title_Miss', 'Title_Mr', 'Title_Mrs', 'Embarked_C', 'Embarked_Q', 'Embarked_S', 'FareBand_1', 'FareBand_2', 'FareBand_3', 'FareBand_4', 'Pclass_1', 'Pclass_2', 'Pclass_3', 'AgeGroup_Child', 'AgeGroup_Young Adult', 'AgeGroup_Adult', 'AgeGroup_Older Adult', 'AgeGroup_Senior', 'Sex_female', 'Sex_male', 'Alone', 'Family_Survival', 'HadCabin','SibSp_0', 'SibSp_1', 'SibSp_2', 'SibSp_3', 'SibSp_4', 'SibSp_5', 'SibSp_8', 'Parch_0', 'Parch_1', 'Parch_2', 'Parch_3', 'Parch_4', 'Parch_5', 'Parch_6', ] predictors = df_train.drop(['Survived', 'PassengerId'], axis=1) data_to_train = df_train[REVISED_NUMERIC_COLUMNS].fillna(-1000) data_to_predict=df_test[REVISED_NUMERIC_COLUMNS].fillna(-1000) y=df_train['Survived'] X=data_to_train X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,random_state=21, stratify=y) print('Data split' )
Titanic - Machine Learning from Disaster
998,653
df2 = pd.read_csv(".. /input/elo-blending/combining_submission(1 ).csv") df2.head() df3 = pd.read_csv(".. /input/simple-lightgbm-without-blending/submission.csv") df3.head() df2['target'] = df2['target'] * 0.35 + df1['target'] * 0.65 df2['target'] = df2['target'] * 0.57 + df3['target'] * 0.43 df2.to_csv("blend.csv",index = False )<import_modules>
print('Modules imported' )
Titanic - Machine Learning from Disaster
998,653
import numpy as np import pandas as p from matplotlib import pyplot as plt import seaborn as sns from math import sqrt from sklearn import metrics<load_from_csv>
model = Sequential() model.add(Dense(units=56, input_dim=X.shape[1], activation='selu')) model.add(Dropout(0.5)) model.add(Dense(units=27, activation='selu')) model.add(Dropout(0.5)) model.add(Dense(units=1, activation='tanh')) model.compile(loss='mse', optimizer='sgd') print('Keras Model Created' )
Titanic - Machine Learning from Disaster
998,653
df_base0 = p.read_csv('.. /input/elo-blending/3.695.csv',names=["card_id","target0"], skiprows=[0],header=None) df_base1 = p.read_csv('.. /input/elo-blending/3.696.csv',names=["card_id","target1"], skiprows=[0],header=None) df_base2 = p.read_csv('.. /input/elo-blending/3.6999.csv',names=["card_id","targe2"], skiprows=[0],header=None) df_base3 = p.read_csv('.. /input/elo-blending/3.69991.csv',names=["card_id","target3"], skiprows=[0],header=None) df_base4 = p.read_csv('.. /input/elo-blending/3.699992.csv',names=["card_id","target4"], skiprows=[0],header=None) df_base5 = p.read_csv('.. /input/elo-blending/3.70.csv',names=["card_id","target5"], skiprows=[0],header=None) df_base6 = p.read_csv('.. /input/elo-blending/3.701.csv',names=["card_id","target6"], skiprows=[0],header=None) df_base7 = p.read_csv('.. /input/elo-blending/3.702.csv',names=["card_id","target7"], skiprows=[0],header=None) df_base8 = p.read_csv('.. /input/elo-blending/3.703.csv',names=["card_id","target8"], skiprows=[0],header=None) df_base9 = p.read_csv('.. /input/elo-blending/3.704.csv',names=["card_id","target9"], skiprows=[0],header=None) df_base10 = p.read_csv('.. /input/elo-blending/Blending.csv',names=["card_id","target10"], skiprows=[0],header=None) df_base11 = p.read_csv('.. /input/elo-blending/BlendingRLS.csv',names=["card_id","target11"], skiprows=[0],header=None) df_base12 = p.read_csv('.. /input/elo-blending/combining_submission(1 ).csv',names=["card_id","target12"], skiprows=[0],header=None) df_base13 = p.read_csv('.. /input/elo-blending/BlendingRLSR.csv',names=["card_id","target13"], skiprows=[0],header=None) df_base14 = p.read_csv('.. /input/simple-lightgbm-without-blending/submission.csv',names=["card_id","target14"], skiprows=[0],header=None )<merge>
model.fit(X.values, y.values, epochs=500, verbose=0) print('Keras model fitted' )
Titanic - Machine Learning from Disaster
998,653
df_base = p.merge(df_base12,df_base0,how='inner',on='card_id') df_base = p.merge(df_base,df_base1,how='inner',on='card_id') df_base = p.merge(df_base,df_base2,how='inner',on='card_id') df_base = p.merge(df_base,df_base3,how='inner',on='card_id') df_base = p.merge(df_base,df_base4,how='inner',on='card_id') df_base = p.merge(df_base,df_base5,how='inner',on='card_id') df_base = p.merge(df_base,df_base6,how='inner',on='card_id') df_base = p.merge(df_base,df_base7,how='inner',on='card_id') df_base = p.merge(df_base,df_base8,how='inner',on='card_id') df_base = p.merge(df_base,df_base9,how='inner',on='card_id') df_base = p.merge(df_base,df_base10,how='inner',on='card_id') df_base = p.merge(df_base,df_base11,how='inner',on='card_id') df_base = p.merge(df_base,df_base12,how='inner',on='card_id') df_base = p.merge(df_base,df_base13,how='inner',on='card_id') df_base = p.merge(df_base,df_base14,how='inner',on='card_id' )<compute_test_metric>
df_test=df_test.set_index('PassengerId') p_survived = model.predict_classes(df_test) print('Prediction Completed' )
Titanic - Machine Learning from Disaster
998,653
M = np.zeros([df_base.iloc[:,1:].shape[1],df_base.iloc[:,1:].shape[1]]) for i in np.arange(M.shape[1]): for j in np.arange(M.shape[1]): M[i,j] = sqrt(metrics.mean_squared_error(df_base.iloc[:,i+1], df_base.iloc[:,j+1]))<save_to_csv>
submission = pd.DataFrame() submission['PassengerId'] = df_test.index submission['Survived'] = p_survived print('predictions added to submission' )
Titanic - Machine Learning from Disaster
998,653
df_base['target'] = df_base.iloc[:,1:].mean(axis=1) df_base[['card_id','target']].to_csv("Bestoutput.csv",index=False )<load_from_csv>
submission.to_csv('DeepLearning03.csv', index=False) print('csv created' )
Titanic - Machine Learning from Disaster
998,653
df_base14 = p.read_csv('.. /input/simple-lightgbm-without-blending/submission.csv',names=["card_id","target14"], skiprows=[0],header=None) df_base6 = p.read_csv('.. /input/elo-blending/3.701.csv',names=["card_id","target6"], skiprows=[0],header=None) df_base7 = p.read_csv('.. /input/elo-blending/3.702.csv',names=["card_id","target7"], skiprows=[0],header=None) df_base = p.merge(df_base12,df_base6,how='inner',on='card_id') df_base = p.merge(df_base,df_base7,how='inner',on='card_id') plt.figure(figsize=(16,12)) sns.heatmap(df_base.iloc[:,1:].corr() ,annot=True,fmt=".2f" )<compute_test_metric>
seed=70 kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed) cvscores = [] for train, test in kfold.split(X, y): model = Sequential() model.add(Dense(54, input_dim=X.shape[1], activation='relu')) model.add(Dropout(0.25)) model.add(Dense(54, activation='relu')) model.add(Dropout(0.25)) model.add(Dense(54, activation='relu')) model.add(Dropout(0.25)) model.add(Dense(1, activation='tanh')) model.compile(loss='binary_crossentropy', optimizer='Adagrad', metrics=['accuracy']) model.fit(X_train, y_train, epochs=150, batch_size=10, verbose=0) scores = model.evaluate(X_test, y_test, verbose=0) print("%s: %.2f%%" %(model.metrics_names[1], scores[1]*100)) cvscores.append(scores[1] * 100) print("%.2f%%(+/- %.2f%%)" %(np.mean(cvscores), np.std(cvscores)) )
Titanic - Machine Learning from Disaster
998,653
M = np.zeros([df_base.iloc[:,1:].shape[1],df_base.iloc[:,1:].shape[1]]) for i in np.arange(M.shape[1]): for j in np.arange(M.shape[1]): M[i,j] = sqrt(metrics.mean_squared_error(df_base.iloc[:,i+1], df_base.iloc[:,j+1]))<save_to_csv>
submission = pd.DataFrame() submission['PassengerId'] = df_test.index submission['Survived'] = p_survived print('predictions added to submission' )
Titanic - Machine Learning from Disaster
998,653
df_base['target'] = df_base.iloc[:,1:].mean(axis=1) df_base[['card_id','target']].to_csv("blend2.csv",index=False )<save_to_csv>
submission.to_csv('OptimisedDeepLearning04.csv', index=False) print('csv created' )
Titanic - Machine Learning from Disaster
9,649,480
df_base['target'] = df2['target']* 0.3 + df_base['target'] * 0.7 plt.figure(figsize=(8,8)) plt.subplot(1, 2, 1) sns.boxplot(df2['target'],orient='v') plt.subplot(1, 2, 2) sns.boxplot(df_base['target'], orient='v') plt.show() <import_modules>
train = pd.read_csv("/kaggle/input/titanic/train.csv") test = pd.read_csv("/kaggle/input/titanic/test.csv" )
Titanic - Machine Learning from Disaster
9,649,480
from scipy.stats import truncnorm<save_to_csv>
train = train.drop(columns= ['Name','Ticket','Cabin']) test = test.drop(columns= ['Name','Ticket','Cabin'] )
Titanic - Machine Learning from Disaster
9,649,480
df_base['target'] = truncnorm.mean(df2['target'],df_base['target']) df_base[['card_id','target']].to_csv("blend3.csv",index=False )<set_options>
train['Embarked_S'] =(train['Embarked'] == 'S' ).astype(int) train['Embarked_C'] =(train['Embarked'] == 'C' ).astype(int) train['Embarked_Q'] =(train['Embarked'] == 'Q' ).astype(int) train['Gender'] =(train['Sex'] == 'male' ).astype(int )
Titanic - Machine Learning from Disaster
9,649,480
pd.set_option('display.max_columns', None) print('available GPU devices catboost:', get_gpu_device_count() )<define_variables>
test['Embarked_S'] =(test['Embarked'] == 'S' ).astype(int) test['Embarked_C'] =(test['Embarked'] == 'C' ).astype(int) test['Embarked_Q'] =(test['Embarked'] == 'Q' ).astype(int) test['Gender'] =(test['Sex'] == 'male' ).astype(int )
Titanic - Machine Learning from Disaster
9,649,480
DATA_DIR = '/kaggle/input/m5-forecasting-accuracy' MODEL_VER = 'v0' BACKWARD_LAGS = 60 END_D = 1913 CUT_D = END_D - int(365 * 1.2) END_DATE = '2016-04-24' print(datetime.strptime(END_DATE, '%Y-%m-%d'))<define_variables>
train = train.drop(columns = ['Sex']) test = test.drop(columns = ['Sex'] )
Titanic - Machine Learning from Disaster
9,649,480
CALENDAR_DTYPES = { 'date': 'str', 'wm_yr_wk': 'int16', 'weekday': 'object', 'wday': 'int16', 'month': 'int16', 'year': 'int16', 'd': 'object', 'event_name_1': 'object', 'event_type_1': 'object', 'event_name_2': 'object', 'event_type_2': 'object', 'snap_CA': 'int16', 'snap_TX': 'int16', 'snap_WI': 'int16' } PARSE_DATES = ['date'] SPRICES_DTYPES = { 'store_id': 'object', 'item_id': 'object', 'wm_yr_wk': 'int16', 'sell_price': 'float32' }<load_from_csv>
train = train.drop(columns = ['Embarked']) test = test.drop(columns = ['Embarked'] )
Titanic - Machine Learning from Disaster
9,649,480
def get_df(is_train=True, backward_lags=None): strain = pd.read_csv('{}/sales_train_validation.csv'.format(DATA_DIR)) print('read train:', strain.shape) cat_cols = ['id', 'item_id', 'dept_id','store_id', 'cat_id', 'state_id'] last_day = int(strain.columns[-1].replace('d_', '')) print('first day is:', CUT_D) print('last day is:', last_day) if not is_train: for day in range(last_day + 1, last_day + 28 + 28 + 1): strain['d_{}'.format(day)] = np.nan value_vars = [col for col in strain.columns if(col.startswith('d_')and(int(col.replace('d_', '')) >= END_D - backward_lags)) ] else: value_vars = [col for col in strain.columns if(col.startswith('d_')and(int(col.replace('d_', '')) >= CUT_D)) ] strain = pd.melt( strain, id_vars = cat_cols, value_vars = value_vars, var_name = 'd', value_name = 'sales' ) print('melted train:', strain.shape) calendar = pd.read_csv('{}/calendar.csv'.format(DATA_DIR), dtype=CALENDAR_DTYPES, parse_dates=PARSE_DATES) print('read calendar:', calendar.shape) strain = strain.merge(calendar, on='d', copy=False) del calendar gc.collect() print('calendar merge done') sprices = pd.read_csv('{}/sell_prices.csv'.format(DATA_DIR), dtype=SPRICES_DTYPES) print('read prices:', sprices.shape) strain = strain.merge( sprices, on=['store_id', 'item_id', 'wm_yr_wk'], copy=False ) del sprices gc.collect() print('prices merge done') print('begin train date:', strain['date'].min()) print('end train date:', strain['date'].max()) if not is_train: strain = strain.loc[ strain['date'] >=(datetime.strptime(END_DATE, '%Y-%m-%d')- timedelta(days=backward_lags)) ] print('date cut train:', strain.shape) print('cut train date:', strain['date'].min()) print('end train date:', strain['date'].max()) return strain<categorify>
train.fillna(0, inplace=True) test.fillna(0, inplace=True )
Titanic - Machine Learning from Disaster
9,649,480
def make_features(strain): print('in dataframe:', strain.shape) lags = [7, 28] windows= [7, 28] wnd_feats = ['id', 'item_id'] lag_cols = ['lag_{}'.format(lag)for lag in lags ] for lag, lag_col in zip(lags, lag_cols): strain[lag_col] = strain[['id', 'sales']].groupby('id')['sales'].shift(lag) print('lag sales done') for wnd_feat in wnd_feats: for wnd in windows: for lag_col in lag_cols: wnd_col = '{}_{}_rmean_{}'.format(lag_col, wnd_feat, wnd) strain[wnd_col] = strain[[wnd_feat, lag_col]].groupby(wnd_feat)[lag_col].transform( lambda x: x.rolling(wnd ).mean() ) print('rolling mean sales for feature done:', wnd_feat) date_features = { 'week_num': 'weekofyear', 'quarter': 'quarter', 'mday': 'day' } for date_feat_name, date_feat_func in date_features.items() : strain[date_feat_name] = getattr(strain['date'].dt, date_feat_func ).astype('int16') print('date features done') strain['d'] = strain['d'].apply(lambda x: int(x.replace('d_', ''))) print('out dataframe:', strain.shape) return strain<feature_engineering>
X = train.drop(columns = ['Survived']) y = train['Survived']
Titanic - Machine Learning from Disaster
9,649,480
%%time strain = get_df(is_train=True, backward_lags=None) strain = make_features(strain )<feature_engineering>
model = XGBClassifier(learning_rate=0.01, n_estimators=1000) model.fit(X,y )
Titanic - Machine Learning from Disaster
9,649,480
drop_cols = ['id', 'sales', 'date', 'wm_yr_wk', 'weekday'] train_cols = strain.columns[~strain.columns.isin(drop_cols)] cat_cols = [ 'item_id', 'dept_id', 'store_id', 'cat_id', 'state_id', 'event_name_1', 'event_type_1', 'event_name_2', 'event_type_2' ] strain[cat_cols] = strain[cat_cols].fillna(0 )<define_variables>
pred = model.predict(test )
Titanic - Machine Learning from Disaster
9,649,480
<train_model><EOS>
result = pd.DataFrame({'PassengerId':test['PassengerId'], 'Survived':pred}) result.to_csv("submission.csv", index=False )
Titanic - Machine Learning from Disaster
777,941
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<save_model>
train_df=pd.read_csv(".. /input/train.csv") test_df=pd.read_csv(".. /input/test.csv") train_df.head()
Titanic - Machine Learning from Disaster
777,941
model.save_model('model_{}.cbm'.format(MODEL_VER))<define_variables>
combined_df=pd.concat([train_df,test_df] )
Titanic - Machine Learning from Disaster
777,941
%%time spred = get_df(is_train=False, backward_lags=BACKWARD_LAGS) for pred_day in tqdm(range(1, 28 + 28 + 1)) : pred_date = datetime.strptime(END_DATE, '%Y-%m-%d')+ timedelta(days=pred_day) pred_date_back = pred_date - timedelta(days=BACKWARD_LAGS + 1) print('-' * 70) print('forecast day forward:', pred_day, '| forecast date:', pred_date) spred_data = spred[(spred['date'] >= pred_date_back)&(spred['date'] <= pred_date)].copy() spred_data = make_features(spred_data) spred_data = spred_data.loc[spred['date'] == pred_date, train_cols] spred_data[cat_cols] = spred_data[cat_cols].fillna(0) spred.loc[spred['date'] == pred_date, 'sales'] = model.predict(spred_data) del spred_data gc.collect()<feature_engineering>
combined_df.loc[combined_df["Age"].isnull() ].groupby(["Pclass","Sex"])["Sex"].agg(["count"] )
Titanic - Machine Learning from Disaster
777,941
spred_subm = spred.loc[spred['date'] > END_DATE, ['id', 'd', 'sales']].copy() last_d = int(spred.loc[spred['date'] == END_DATE, 'd'].unique() [0].replace('d_', '')) print('last d num:', last_d) spred_subm['d'] = spred_subm['d'].apply(lambda x: 'F{}'.format(int(x.replace('d_', '')) - last_d)) spred_subm.loc[spred_subm['sales'] < 0, 'sales'] = 0<save_to_csv>
combined_df.groupby(["Sex","SibSp","Parch"])["Age"].agg(["median"] )
Titanic - Machine Learning from Disaster
777,941
f_cols_val = ['F{}'.format(x)for x in range(1, 28 + 1)] f_cols_eval = ['F{}'.format(x)for x in range(28 + 1, 28 + 28 + 1)] spred_subm_eval = spred_subm.copy() spred_subm.drop(columns=f_cols_eval, inplace=True) spred_subm_eval.drop(columns=f_cols_val, inplace=True) spred_subm_eval.columns = spred_subm.columns spred_subm_eval['id'] = spred_subm_eval['id'].str.replace('validation', 'evaluation') spred_subm = pd.concat([spred_subm, spred_subm_eval], axis=0, sort=False) spred_subm.reset_index(drop=True, inplace=True) spred_subm.to_csv('submission.csv', index=False) print('submission saved:', spred_subm.shape )<import_modules>
combined_df.loc[combined_df["SibSp"]==8]
Titanic - Machine Learning from Disaster
777,941
from datetime import datetime, timedelta import gc import numpy as np, pandas as pd import lightgbm as lgb<define_variables>
combined_df["Name_key"]="" combined_df["Name_key"]=combined_df["Name"].str.split(',',expand=True)[1].str.split(' ',expand=True)[1]
Titanic - Machine Learning from Disaster
777,941
CAL_DTYPES={"event_name_1": "category", "event_name_2": "category", "event_type_1": "category", "event_type_2": "category", "weekday": "category", 'wm_yr_wk': 'int16', "wday": "int16", "month": "int16", "year": "int16", "snap_CA": "float32", 'snap_TX': 'float32', 'snap_WI': 'float32' } PRICE_DTYPES = {"store_id": "category", "item_id": "category", "wm_yr_wk": "int16","sell_price":"float32" }<set_options>
combined_df.groupby(["Sex","Name_key","Parch"])["Age"].agg(["median"] )
Titanic - Machine Learning from Disaster
777,941
pd.options.display.max_columns = 50<define_variables>
combined_df["Fare_Group"]=pd.cut(combined_df["Fare"],range(0,350,50),right=False) combined_df["Fare_Group"]=combined_df["Fare_Group"].astype("object") combined_df["Fare_Group"].fillna("[300,600)",inplace=True )
Titanic - Machine Learning from Disaster
777,941
h = 28 max_lags = 57 tr_last = 1913 fday = datetime(2016,4, 25) fday<load_from_csv>
Age_values=combined_df.groupby(["Sex","Pclass","Name_key","Parch"])["Age"].agg(["median"] ).reset_index() Age_values.loc[Age_values["median"].isnull() ]
Titanic - Machine Learning from Disaster
777,941
def create_dt(is_train = True, nrows = None, first_day = 1200): prices = pd.read_csv(".. /input/m5-forecasting-accuracy/sell_prices.csv", dtype = PRICE_DTYPES) for col, col_dtype in PRICE_DTYPES.items() : if col_dtype == "category": prices[col] = prices[col].cat.codes.astype("int16") prices[col] -= prices[col].min() cal = pd.read_csv(".. /input/m5-forecasting-accuracy/calendar.csv", dtype = CAL_DTYPES) cal["date"] = pd.to_datetime(cal["date"]) for col, col_dtype in CAL_DTYPES.items() : if col_dtype == "category": cal[col] = cal[col].cat.codes.astype("int16") cal[col] -= cal[col].min() start_day = max(1 if is_train else tr_last-max_lags, first_day) numcols = [f"d_{day}" for day in range(start_day,tr_last+1)] catcols = ['id', 'item_id', 'dept_id','store_id', 'cat_id', 'state_id'] dtype = {numcol:"float32" for numcol in numcols} dtype.update({col: "category" for col in catcols if col != "id"}) dt = pd.read_csv(".. /input/m5-forecasting-accuracy/sales_train_validation.csv", nrows = nrows, usecols = catcols + numcols, dtype = dtype) for col in catcols: if col != "id": dt[col] = dt[col].cat.codes.astype("int16") dt[col] -= dt[col].min() if not is_train: for day in range(tr_last+1, tr_last+ 28 +1): dt[f"d_{day}"] = np.nan dt = pd.melt(dt, id_vars = catcols, value_vars = [col for col in dt.columns if col.startswith("d_")], var_name = "d", value_name = "sales") dt = dt.merge(cal, on= "d", copy = False) dt = dt.merge(prices, on = ["store_id", "item_id", "wm_yr_wk"], copy = False) return dt<categorify>
null_age=combined_df.loc[combined_df["Age"].isnull() ][['PassengerId','Sex','Name_key','Pclass','Parch']] Age_values_f=pd.merge(Age_values,null_age,how='inner',on=['Sex','Name_key','Pclass','Parch']) Age_values_f=Age_values_f.rename(columns={'median':"Age"}) Age_values_f.loc[Age_values_f["Age"].isnull() ]
Titanic - Machine Learning from Disaster
777,941
def create_fea(dt): lags = [7, 28] lag_cols = [f"lag_{lag}" for lag in lags ] for lag, lag_col in zip(lags, lag_cols): dt[lag_col] = dt[["id","sales"]].groupby("id")["sales"].shift(lag) wins = [7, 28] for win in wins : for lag,lag_col in zip(lags, lag_cols): dt[f"rmean_{lag}_{win}"] = dt[["id", lag_col]].groupby("id")[lag_col].transform(lambda x : x.rolling(win ).mean()) date_features = { "wday": "weekday", "week": "weekofyear", "month": "month", "quarter": "quarter", "year": "year", "mday": "day", } for date_feat_name, date_feat_func in date_features.items() : if date_feat_name in dt.columns: dt[date_feat_name] = dt[date_feat_name].astype("int16") else: dt[date_feat_name] = getattr(dt["date"].dt, date_feat_func ).astype("int16" )<define_variables>
combined_df.loc[combined_df["PassengerId"].isin([1257,980,1234])]
Titanic - Machine Learning from Disaster
777,941
FIRST_DAY = 350<correct_missing_values>
combined_df.groupby(["Sex"])["Age"].agg(["median"] )
Titanic - Machine Learning from Disaster
777,941
df.dropna(inplace = True) df.shape<prepare_x_and_y>
Age_values_f.loc[(Age_values_f["Age"].isnull())&(Age_values_f["Sex"]=='female'),"Age"]=27.0 Age_values_f.loc[(Age_values_f["Age"].isnull())&(Age_values_f["Sex"]=='male'),"Age"]=28.0
Titanic - Machine Learning from Disaster
777,941
cat_feats = ['item_id', 'dept_id','store_id', 'cat_id', 'state_id'] + ["event_name_1", "event_name_2", "event_type_1", "event_type_2"] useless_cols = ["id", "date", "sales","d", "wm_yr_wk", "weekday"] train_cols = df.columns[~df.columns.isin(useless_cols)] X_train = df[train_cols] y_train = df["sales"]<create_dataframe>
train_df.set_index('PassengerId',inplace=True) Age_values_f.set_index('PassengerId',inplace=True) train_df.update(Age_values_f) train_df.reset_index(inplace=True) test_df.set_index('PassengerId',inplace=True) test_df.update(Age_values_f) test_df.reset_index(inplace=True)
Titanic - Machine Learning from Disaster
777,941
<create_dataframe>
train_df.groupby(["Embarked"])["Embarked"].agg(["count"] )
Titanic - Machine Learning from Disaster
777,941
%%time np.random.seed(777) fake_valid_inds = np.random.choice(X_train.index.values, 2_000_000, replace = False) train_inds = np.setdiff1d(X_train.index.values, fake_valid_inds) train_data = lgb.Dataset(X_train.loc[train_inds] , label = y_train.loc[train_inds], categorical_feature=cat_feats, free_raw_data=False) fake_valid_data = lgb.Dataset(X_train.loc[fake_valid_inds], label = y_train.loc[fake_valid_inds], categorical_feature=cat_feats, free_raw_data=False )<prepare_x_and_y>
train_df.loc[train_df["PassengerId"]==62.0,"Embarked"]='S' train_df.loc[train_df["PassengerId"]==830,"Embarked"]='S'
Titanic - Machine Learning from Disaster
777,941
del df, X_train, y_train, fake_valid_inds,train_inds ; gc.collect()<init_hyperparams>
train_df["Cabin_prefix"]=train_df["Cabin"].str[0:1] train_df.groupby(["Cabin_prefix","Pclass"])["Pclass"].agg(["count"] ).reset_index()
Titanic - Machine Learning from Disaster
777,941
params = { "objective" : "poisson", "metric" :"rmse", "force_row_wise" : True, "learning_rate" : 0.075, "sub_row" : 0.75, "bagging_freq" : 1, "lambda_l2" : 0.1, "metric": ["rmse"], 'verbosity': 1, 'num_iterations' : 1200, 'num_leaves': 128, "min_data_in_leaf": 100, }<train_model>
train_df.drop(columns={"Cabin","Cabin_prefix"},inplace=True )
Titanic - Machine Learning from Disaster
777,941
%%time m_lgb = lgb.train(params, train_data, valid_sets = [fake_valid_data], verbose_eval=20 )<save_model>
test_df.loc[test_df["Fare"].isnull() ]
Titanic - Machine Learning from Disaster
777,941
m_lgb.save_model("model.lgb" )<count_unique_values>
train_df.loc[train_df["Pclass"]==3].loc[train_df["Age"]>50].groupby(["Age","Sex"])["Fare"].agg(["mean"] )
Titanic - Machine Learning from Disaster
777,941
sub.id.nunique() , sub["id"].str.contains("validation$" ).sum()<import_modules>
train_df.loc[train_df["Pclass"]==3].loc[train_df["Age"]>50].groupby(["Sex"])["Fare"].agg(["mean"] )
Titanic - Machine Learning from Disaster
777,941
from datetime import datetime, timedelta import gc import numpy as np, pandas as pd import lightgbm as lgb<define_variables>
test_df.loc[test_df["Fare"].isnull() ,"Fare"]=7.518522
Titanic - Machine Learning from Disaster
777,941
CAL_DTYPES={"event_name_1": "category", "event_name_2": "category", "event_type_1": "category", "event_type_2": "category", "weekday": "category", 'wm_yr_wk': 'int16', "wday": "int16", "month": "int16", "year": "int16", "snap_CA": "float32", 'snap_TX': 'float32', 'snap_WI': 'float32' } PRICE_DTYPES = {"store_id": "category", "item_id": "category", "wm_yr_wk": "int16","sell_price":"float32" }<set_options>
related_people=combined_df[["PassengerId","Name","SibSp","Parch","Ticket","Embarked"]].copy() related_people["Last_Name"]="" related_people["Last_Name"]=related_people["Name"].str.split(",",expand=True)[0] related_people["total_related"]=related_people["SibSp"]+related_people["Parch"]+1 X=related_people.loc[(related_people["SibSp"]>0)|(related_people["Parch"]>0)].groupby(["Last_Name","total_related"])["PassengerId"].agg(["count"]) X.reset_index(inplace=True) Y=pd.DataFrame(X.loc[X["count"]==X["total_related"]]["Last_Name"]) Y["RGroup"]=Y["Last_Name"] Y.drop_duplicates(inplace=True) Y.set_index("Last_Name",inplace=True) related_people["RGroup"]="" related_people.set_index("Last_Name",inplace=True) related_people.update(Y) related_people.reset_index(inplace=True) related_people.loc[related_people["Ticket"]=='113760',"RGroup"]="Carter_1" related_people.loc[related_people["Ticket"]=='224252',"RGroup"]="Carter_2"
Titanic - Machine Learning from Disaster
777,941
pd.options.display.max_columns = 50<define_variables>
Y=pd.DataFrame(related_people.loc[(related_people["SibSp"]==0)&(related_people["Parch"]==0)][["Last_Name","Ticket"]]) Y["RGroup"]=Y["Last_Name"]+'_'+Y["Ticket"] Y.drop_duplicates(inplace=True) Y.set_index(["Last_Name","Ticket"],inplace=True) related_people.set_index(["Last_Name","Ticket"],inplace=True) related_people.update(Y) related_people.reset_index(inplace=True )
Titanic - Machine Learning from Disaster
777,941
h = 28 max_lags = 70 tr_last = 1913 fday = datetime(2016,4, 25) fday<load_from_csv>
X=related_people.loc[related_people["RGroup"]==""].groupby(["Ticket","total_related"])["PassengerId"].agg(["count"]) X.reset_index(inplace=True) Y=pd.DataFrame(X.loc[X["count"]==X["total_related"]]["Ticket"]) Y["RGroup"]=Y["Ticket"]+'_R' Y.drop_duplicates(inplace=True) Y.set_index("Ticket",inplace=True) related_people.set_index("Ticket",inplace=True) related_people.update(Y) related_people.reset_index(inplace=True)
Titanic - Machine Learning from Disaster
777,941
def create_dt(is_train = True, nrows = None, first_day = 1200): prices = pd.read_csv(".. /input/m5-forecasting-accuracy/sell_prices.csv", dtype = PRICE_DTYPES) for col, col_dtype in PRICE_DTYPES.items() : if col_dtype == "category": prices[col] = prices[col].cat.codes.astype("int16") prices[col] -= prices[col].min() cal = pd.read_csv(".. /input/m5-forecasting-accuracy/calendar.csv", dtype = CAL_DTYPES) cal["date"] = pd.to_datetime(cal["date"]) for col, col_dtype in CAL_DTYPES.items() : if col_dtype == "category": cal[col] = cal[col].cat.codes.astype("int16") cal[col] -= cal[col].min() start_day = max(1 if is_train else tr_last-max_lags, first_day) numcols = [f"d_{day}" for day in range(start_day,tr_last+1)] catcols = ['id', 'item_id', 'dept_id','store_id', 'cat_id', 'state_id'] dtype = {numcol:"float32" for numcol in numcols} dtype.update({col: "category" for col in catcols if col != "id"}) dt = pd.read_csv(".. /input/m5-forecasting-accuracy/sales_train_validation.csv", nrows = nrows, usecols = catcols + numcols, dtype = dtype) for col in catcols: if col != "id": dt[col] = dt[col].cat.codes.astype("int16") dt[col] -= dt[col].min() if not is_train: for day in range(tr_last+1, tr_last+ 28 +1): dt[f"d_{day}"] = np.nan dt = pd.melt(dt, id_vars = catcols, value_vars = [col for col in dt.columns if col.startswith("d_")], var_name = "d", value_name = "sales") dt = dt.merge(cal, on= "d", copy = False) dt = dt.merge(prices, on = ["store_id", "item_id", "wm_yr_wk"], copy = False) return dt<categorify>
related_people.loc[related_people["PassengerId"]==249,"RGroup"]="Beckwith_M" related_people.loc[related_people["PassengerId"]==872,"RGroup"]="Beckwith_M" related_people.loc[related_people["PassengerId"]==137,"RGroup"]="Beckwith_M" related_people.loc[related_people["PassengerId"]==572,"RGroup"]="Lamson_M" related_people.loc[related_people["PassengerId"]==1248,"RGroup"]="Lamson_M" related_people.loc[related_people["PassengerId"]==969,"RGroup"]="Lamson_M" related_people.loc[related_people["PassengerId"]==1200,"RGroup"]="Hays_M" related_people.loc[related_people["PassengerId"]==821,"RGroup"]="Hays_M" related_people.loc[related_people["PassengerId"]==588,"RGroup"]="Frolicher_M" related_people.loc[related_people["PassengerId"]==1289,"RGroup"]="Frolicher_M" related_people.loc[related_people["PassengerId"]==540,"RGroup"]="Frolicher_M" related_people.loc[related_people["PassengerId"]==581,"RGroup"]="Jacobsohn_M" related_people.loc[related_people["PassengerId"]==1133,"RGroup"]="Jacobsohn_M" related_people.loc[related_people["PassengerId"]==218,"RGroup"]="Jacobsohn_M" related_people.loc[related_people["PassengerId"]==601,"RGroup"]="Jacobsohn_M" related_people.loc[related_people["PassengerId"]==756,"RGroup"]="Hamalainen_M" related_people.loc[related_people["PassengerId"]==248,"RGroup"]="Hamalainen_M" related_people.loc[related_people["PassengerId"]==1130,"RGroup"]="Hamalainen_M" related_people.loc[related_people["PassengerId"]==313,"RGroup"]="Lahtinen_M" related_people.loc[related_people["PassengerId"]==1041,"RGroup"]="Lahtinen_M" related_people.loc[related_people["PassengerId"]==418,"RGroup"]="Lahtinen_M" related_people.loc[related_people["PassengerId"]==530,"RGroup"]="Hocking_M" related_people.loc[related_people["PassengerId"]==944,"RGroup"]="Hocking_M" related_people.loc[related_people["PassengerId"]==775,"RGroup"]="Hocking_M" related_people.loc[related_people["PassengerId"]==832,"RGroup"]="Richards_M" related_people.loc[related_people["PassengerId"]==408,"RGroup"]="Richards_M" related_people.loc[related_people["PassengerId"]==438,"RGroup"]="Richards_M" related_people.loc[related_people["PassengerId"]==105,"RGroup"]="Gustafsson_M" related_people.loc[related_people["PassengerId"]==393,"RGroup"]="Gustafsson_M" related_people.loc[related_people["PassengerId"]==207,"RGroup"]="Gustafsson_M" related_people.loc[related_people["PassengerId"]==86,"RGroup"]="Gustafsson_M" related_people.loc[related_people["PassengerId"]==69,"RGroup"]="3101281" related_people.loc[related_people["PassengerId"]==480,"RGroup"]="Hirvonen_M" related_people.loc[related_people["PassengerId"]==896,"RGroup"]="Hirvonen_M" related_people.loc[related_people["PassengerId"]==477,"RGroup"]="Renouf_M" related_people.loc[related_people["PassengerId"]==727,"RGroup"]="Renouf_M" related_people.loc[related_people["PassengerId"]==70,"RGroup"]="Kink_M" related_people.loc[related_people["PassengerId"]==1268,"RGroup"]="Kink_M" related_people.loc[related_people["PassengerId"]==185,"RGroup"]="Kink_M" related_people.loc[related_people["PassengerId"]==1286,"RGroup"]="Kink_M" related_people.loc[related_people["PassengerId"]==1057,"RGroup"]="Kink_M" related_people.loc[related_people["PassengerId"]==1037,"RGroup"]="Vander Planke_M" related_people.loc[related_people["PassengerId"]==19,"RGroup"]="Vander Planke_M" related_people.loc[related_people["PassengerId"]==39,"RGroup"]="Vander Planke_M" related_people.loc[related_people["PassengerId"]==334,"RGroup"]="Vander Planke_M" related_people.loc[related_people["PassengerId"]==206,"RGroup"]="Strom_M" related_people.loc[related_people["PassengerId"]==252,"RGroup"]="Strom_M" related_people.loc[related_people["PassengerId"]==443,"RGroup"]="347076" related_people.loc[related_people["PassengerId"]==268,"RGroup"]="Strom_M" related_people.loc[related_people["PassengerId"]==1106,"RGroup"]="347091" related_people.loc[related_people["PassengerId"]==193,"RGroup"]="350046" related_people.loc[related_people["PassengerId"]==722,"RGroup"]="350048" related_people.loc[related_people["PassengerId"]==893,"RGroup"]="Hocking_M" related_people.loc[related_people["PassengerId"]==41,"RGroup"]="7546" related_people.loc[related_people["PassengerId"]==566,"RGroup"]="Davies_M" related_people.loc[related_people["PassengerId"]==901,"RGroup"]="Davies_M" related_people.loc[related_people["PassengerId"]==1079,"RGroup"]="Davies_M" related_people.loc[related_people["PassengerId"]==923,"RGroup"]="Renouf_M" related_people.loc[related_people["PassengerId"]==1211,"RGroup"]="Renouf_M" related_people.loc[related_people["PassengerId"]==672,"RGroup"]="Hays_M" related_people.loc[related_people["PassengerId"]==984,"RGroup"]="Hays_M" related_people.loc[related_people["PassengerId"]==274,"RGroup"]="PC 17596" related_people.loc[related_people["PassengerId"]==665,"RGroup"]="Hirvonen_M"
Titanic - Machine Learning from Disaster
777,941
def create_fea(dt): lags = [7, 28] lag_cols = [f"lag_{lag}" for lag in lags ] for lag, lag_col in zip(lags, lag_cols): dt[lag_col] = dt[["id","sales"]].groupby("id")["sales"].shift(lag) wins = [7, 28] for win in wins : for lag,lag_col in zip(lags, lag_cols): dt[f"rmean_{lag}_{win}"] = dt[["id", lag_col]].groupby("id")[lag_col].transform(lambda x : x.rolling(win ).mean()) date_features = { "wday": "weekday", "week": "weekofyear", "month": "month", "quarter": "quarter", "year": "year", "mday": "day", } for date_feat_name, date_feat_func in date_features.items() : if date_feat_name in dt.columns: dt[date_feat_name] = dt[date_feat_name].astype("int16") else: dt[date_feat_name] = getattr(dt["date"].dt, date_feat_func ).astype("int16" )<define_variables>
related_people.drop(columns={"Ticket","Last_Name","Name","SibSp","Parch","total_related"}) related_people.set_index("PassengerId",inplace=True) train_df["RGroup"]="" train_df.set_index("PassengerId",inplace=True) train_df.update(related_people) train_df.reset_index(inplace=True) related_people.reset_index(inplace=True)
Titanic - Machine Learning from Disaster
777,941
FIRST_DAY = 800 <correct_missing_values>
related_people.set_index("PassengerId",inplace=True) test_df["RGroup"]="" test_df.set_index("PassengerId",inplace=True) test_df.update(related_people) test_df.reset_index(inplace=True) related_people.reset_index(inplace=True )
Titanic - Machine Learning from Disaster
777,941
df.dropna(inplace = True) df.shape<prepare_x_and_y>
train_df["Sex"]=train_df["Sex"].astype("category") train_df["Pclass"]=train_df["Pclass"].astype("category") train_df["Embarked"]=train_df["Embarked"].astype("category") train_df["RGroup"]=train_df["RGroup"].astype("category") train_df.drop(columns={"Name","Ticket","PassengerId"},inplace=True) train_df=pd.get_dummies(train_df,columns=["Pclass"]) train_df=pd.get_dummies(train_df,columns=["Sex"]) train_df=pd.get_dummies(train_df,columns=["Embarked"]) train_df.rename(columns={"Pclass_1.0":"Pclass_1","Pclass_2.0":"Pclass_2","Pclass_3.0":"Pclass_3"},inplace=True)
Titanic - Machine Learning from Disaster
777,941
cat_feats = ['item_id', 'dept_id','store_id', 'cat_id', 'state_id'] + ["event_name_1", "event_name_2", "event_type_1", "event_type_2"] useless_cols = ["id", "date", "sales","d", "wm_yr_wk", "weekday"] train_cols = df.columns[~df.columns.isin(useless_cols)] X_train = df[train_cols] y_train = df["sales"]<create_dataframe>
test_df["Sex"]=test_df["Sex"].astype("category") test_df["Pclass"]=test_df["Pclass"].astype("category") test_df["Embarked"]=test_df["Embarked"].astype("category") test_df["RGroup"]=test_df["RGroup"].astype("category") test_passengerId=test_df["PassengerId"].copy() test_df.drop(columns={"Name","Cabin","Ticket","PassengerId"},inplace=True) test_df=pd.get_dummies(test_df,columns=["Pclass"]) test_df=pd.get_dummies(test_df,columns=["Sex"]) test_df=pd.get_dummies(test_df,columns=["Embarked"])
Titanic - Machine Learning from Disaster
777,941
train_data = lgb.Dataset(X_train, label = y_train, categorical_feature=cat_feats, free_raw_data=False) fake_valid_inds = np.random.choice(len(X_train), 1000000) fake_valid_data = lgb.Dataset(X_train.iloc[fake_valid_inds], label = y_train.iloc[fake_valid_inds],categorical_feature=cat_feats, free_raw_data=False )<init_hyperparams>
train_df_dec=train_df.copy() train_df_dec = train_df_dec.sample(frac=1 ).reset_index(drop=True) row_count = train_df_dec.shape[0] split_point = int(row_count*0.30) cv_data, train_data = train_df_dec[:split_point].copy() , train_df_dec[split_point:].copy()
Titanic - Machine Learning from Disaster