kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
7,000,072
cross_val_model(X_stack, y_stack, XGB_model )<train_model>
Results = pd.DataFrame({'Model': [],'Accuracy Score': [], 'Recall':[], 'F1score':[]})
Titanic - Machine Learning from Disaster
7,000,072
class Ensemble(object): def __init__(self, n_splits, stacker, base_models): self.n_splits = n_splits self.stacker = stacker self.base_models = base_models def fit_predict(self, X, y, T): X = np.array(X) y = np.array(y) T = np.array(T) folds = list(StratifiedKFold(n_splits=self.n_splits, shuffle=True, random_state=2017 ).split(X, y)) S_train = np.zeros(( X.shape[0], len(self.base_models))) S_test = np.zeros(( T.shape[0], len(self.base_models))) for i, clf in enumerate(self.base_models): S_test_i = np.zeros(( T.shape[0], self.n_splits)) for j,(train_idx, test_idx)in enumerate(folds): X_train = X[train_idx] y_train = y[train_idx] X_holdout = X[test_idx] print("Fit %s fold %d" %(str(clf ).split('(')[0], j+1)) clf.fit(X_train, y_train) y_pred = clf.predict_proba(X_holdout)[:,1] S_train[test_idx, i] = y_pred S_test_i[:, j] = clf.predict_proba(T)[:,1] S_test[:, i] = S_test_i.mean(axis=1) results = cross_val_score(self.stacker, S_train, y, cv=3, scoring='roc_auc') print("Stacker score: %.5f" %(results.mean())) self.stacker.fit(S_train, y) res = self.stacker.predict_proba(S_test)[:,1] return res<init_hyperparams>
model = DecisionTreeClassifier(max_depth=4) model.fit(trainX, trainY) y_pred = model.predict(testX) res = pd.DataFrame({"Model":['DecisionTreeClassifier'], "Accuracy Score": [accuracy_score(y_pred,testY)], "Recall": [recall_score(testY, y_pred)], "F1score": [f1_score(testY, y_pred)]}) Results = Results.append(res )
Titanic - Machine Learning from Disaster
7,000,072
rf_params = {} rf_params['n_estimators'] = 80 rf_params['max_depth'] = 12 rf_params['min_samples_split'] = 50 rf_params['min_samples_leaf'] = 23 <init_hyperparams>
pd.crosstab(testY, y_pred, rownames=['Real data'], colnames=['Predicted'] )
Titanic - Machine Learning from Disaster
7,000,072
xgb_params = {} xgb_params['learning_rate'] =0.03660642032718193 xgb_params['n_estimators'] = 70 xgb_params['max_depth'] = 7 xgb_params['reg_alpha'] = 0.1 xgb_params['reg_lambda'] = 0.1 xgb_params['colsample_bytree'] = 0.6162725690461764 xgb_params['min_child_weight'] = 0.751826989118936 <choose_model_class>
model = RandomForestClassifier(n_estimators=2500, max_depth=4) model.fit(trainX, trainY) y_pred = model.predict(testX) res = pd.DataFrame({"Model":['RandomForestClassifier'], "Accuracy Score": [accuracy_score(y_pred,testY)], "Recall": [recall_score(testY, y_pred)], "F1score": [f1_score(testY, y_pred)]}) Results = Results.append(res )
Titanic - Machine Learning from Disaster
7,000,072
rf_model = RandomForestClassifier(**rf_params, random_state=584867 )<choose_model_class>
pd.crosstab(testY, y_pred, rownames=['Real data'], colnames=['Predicted'] )
Titanic - Machine Learning from Disaster
7,000,072
xgb_model = XGBClassifier(**xgb_params, random_state=2943 )<choose_model_class>
model = XGBClassifier(learning_rate=0.001,n_estimators=2500, max_depth=4, min_child_weight=0, gamma=0, subsample=0.7, colsample_bytree=0.7, scale_pos_weight=1, seed=27, reg_alpha=0.00006) model.fit(trainX, trainY) y_pred = model.predict(testX) res = pd.DataFrame({"Model":['XGBClassifier'], "Accuracy Score": [accuracy_score(y_pred,testY)], "Recall": [recall_score(testY, y_pred)], "F1score": [f1_score(testY, y_pred)]}) Results = Results.append(res )
Titanic - Machine Learning from Disaster
7,000,072
log_model = LogisticRegression(random_state=29 )<choose_model_class>
pd.crosstab(testY, y_pred, rownames=['Real data'], colnames=['Predicted'] )
Titanic - Machine Learning from Disaster
7,000,072
stack = Ensemble(n_splits=3, stacker = log_model, base_models =(rf_model, xgb_model))<predict_on_test>
trainX = data[data.Survived.isnull() ==False].drop(['Survived','train'],axis=1) trainY = data.Survived[data.Survived.isnull() ==False] testX = data[data.Survived.isnull() ==True].drop(['Survived','train'],axis=1) model = XGBClassifier(learning_rate=0.001,n_estimators=2500, max_depth=4, min_child_weight=0, gamma=0, subsample=0.7, colsample_bytree=0.7, scale_pos_weight=1, seed=27, reg_alpha=0.00006) model.fit(trainX, trainY) test = data[data.train==0] test['Survived'] = model.predict(testX ).astype(int) test = test.reset_index() test[['PassengerId','Survived']].to_csv("submissionXGB.csv",index=False) print("done1" )
Titanic - Machine Learning from Disaster
13,832,044
X_stack.fillna(-1, inplace = True) test_stack_val.fillna(-1,inplace=True) y_pred = stack.fit_predict(X_stack, target_stack, test_stack_val )<save_to_csv>
%matplotlib inline
Titanic - Machine Learning from Disaster
13,832,044
sub = pd.DataFrame() sub['ids'] = test_id sub['prob'] = y_pred sub.to_csv('stacked_main.csv', index=False )<load_from_csv>
train_df = pd.read_csv('.. /input/titanic/train.csv') test_df = pd.read_csv('.. /input/titanic/test.csv') combine = [train_df , test_df]
Titanic - Machine Learning from Disaster
13,832,044
data = pd.read_csv('.. /input/train.csv') test = pd.read_csv('.. /input/test.csv') data.head()<train_model>
train_df.groupby('Sex' ).Survived.agg(['mean' , 'count'] ).sort_values('count' , ascending = False )
Titanic - Machine Learning from Disaster
13,832,044
X = data.drop(['AveragePrice'], axis=1) y = data['AveragePrice'] X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.33, random_state=42) <save_to_csv>
train_df.groupby('Ticket' ).Survived.agg(['mean' , 'count'] ).sort_values('count' , ascending = False )
Titanic - Machine Learning from Disaster
13,832,044
model = XGBRegressor(n_jobs=4, learning_rate=0.05, max_depth=8, min_child_weight=0.5, n_estimators=1300) model.fit(X, y, verbose=True) test_predict_optimal = model.predict(test) new_df = pd.DataFrame({"id": test['id'] , "AveragePrice":test_predict_optimal}) new_df.to_csv("XGB_manual_custom_5.csv", index=False )<compute_train_metric>
train_df.groupby('Embarked' ).Survived.agg(['mean' , 'count'] ).sort_values('count' , ascending = False )
Titanic - Machine Learning from Disaster
13,832,044
predictions = model.predict(X_val) scores = cross_val_score(model, X, y, scoring='neg_mean_absolute_error', cv=5) print(scores) print('Mean Absolute Error: %2f' %(-1 * scores.mean()))<compute_test_metric>
train_df.Cabin.value_counts()
Titanic - Machine Learning from Disaster
13,832,044
mae = mean_absolute_error(predictions, y_val) print("Mean Absolute Error : " + str(mae)) error_percent = mae/data['AveragePrice'].mean() *100 print(str(error_percent)+ ' %') <import_modules>
features_df = train_df.drop('Survived', axis=1) num_features = features_df.select_dtypes(np.number) num_features.sample(5 )
Titanic - Machine Learning from Disaster
13,832,044
import lightgbm as lgb from sklearn.metrics import mean_squared_log_error import matplotlib.pyplot as plt import seaborn as sns<load_from_csv>
train_df.groupby('Pclass' ).Survived.agg(['mean' , 'count'] ).sort_values('count' , ascending = False )
Titanic - Machine Learning from Disaster
13,832,044
train = pd.read_csv('/kaggle/input/bike-sharing-demand-for-education/train.csv') test = pd.read_csv('/kaggle/input/bike-sharing-demand-for-education/test.csv') sample_submission = pd.read_csv('/kaggle/input/bike-sharing-demand-for-education/sample_submission.csv' )<prepare_x_and_y>
train_df.groupby('Sex' ).Pclass.agg(['mean' , 'count'] ).sort_values('count' , ascending = False )
Titanic - Machine Learning from Disaster
13,832,044
X = train.drop(['datetime','casual','registered','cnt'],axis = 1) X_test = test.drop(['datetime'],axis = 1) y = train[['year','cnt']]<train_model>
train_df.groupby('Fare' ).Survived.agg(['mean' , 'count'] ).sort_values('count' , ascending = False )
Titanic - Machine Learning from Disaster
13,832,044
%%time columns = X.columns y_preds = np.zeros(X_test.shape[0]) feature_importances = pd.DataFrame() feature_importances['feature'] = columns X_train = X.query("year == 2011") X_valid = X.query("year == 2012") y_train = y.query("year == 2011") y_valid = y.query("year == 2012") y_train.drop('year',axis=1, inplace=True) y_valid.drop('year',axis=1, inplace=True) dtrain = lgb.Dataset(X_train, label=y_train) params = { 'objective': 'mean_squared_error', 'metric': 'rmse' } LGBM = lgb.train(params, dtrain, 1000, verbose_eval=100) feature_importances['importance'] = LGBM.feature_importance(importance_type='gain') y_pred_valid = LGBM.predict(X_valid) y_pred_valid = np.where(y_pred_valid < 0, 0, y_pred_valid) print("RMSLE:", mean_squared_log_error(y_valid, y_pred_valid)) print('******************************************************') y_preds = LGBM.predict(X_test) y_preds = np.where(y_preds < 0, 0, y_preds )<concatenate>
features_df = train_df.drop('Survived', axis=1) FEATURE_COLUMNS = features_df.columns NUM_FEATURES = features_df.select_dtypes(include=[np.number] ).columns CAT_FEATURES = ['Sex', 'Ticket'] numeric_transformer = Pipeline(steps=[ ('imputer', SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=0)) , ('scaler', StandardScaler()), ]) categorical_transformer = Pipeline(steps=[ ('imputer', SimpleImputer(strategy='constant', fill_value='missing')) , ('one_hot', OneHotEncoder(handle_unknown='ignore')) , ]) preprocessor = ColumnTransformer( transformers=[ ('num', numeric_transformer, NUM_FEATURES), ('cat', categorical_transformer, CAT_FEATURES) ] )
Titanic - Machine Learning from Disaster
13,832,044
submission = pd.concat([test['datetime'], pd.Series(y_preds)], axis=1) submission = submission.rename(columns={0:'cnt'} )<save_to_csv>
x_train, x_test, y_train, y_test = train_test_split(features_df, target, test_size=0.25 )
Titanic - Machine Learning from Disaster
13,832,044
submission.to_csv('base_submission.csv',header=True, index=False )<train_model>
baseline = DummyClassifier(strategy='most_frequent') model = RandomForestClassifier()
Titanic - Machine Learning from Disaster
13,832,044
pca = PCA(n_components=19) pca.fit(data) data = pca.transform(data) data_for_submission = pca.transform(data_for_submission) skf = StratifiedKFold(n_splits=5, shuffle=False) classifier = MLPClassifier(alpha=1, learning_rate='adaptive', max_iter=750, hidden_layer_sizes=(38, 76)) accuracy_list = [] for train_index, test_index in skf.split(data, label): train_data = data[train_index] train_label = label.iloc[train_index] test_data = data[test_index] test_label = label.iloc[test_index] classifier.fit(train_data, train_label) accuracy_list.append(classifier.score(test_data, test_label)) prediction_labels = classifier.predict(data_for_submission) predicted_data = pd.DataFrame([data_for_submission_id,prediction_labels] ).transpose() predicted_data.columns = ['id ','label'] predicted_data.to_csv('submission.csv',index=False,header=True )<feature_engineering>
baseline.fit(x_train, y_train) model.fit(x_train, y_train )
Titanic - Machine Learning from Disaster
13,832,044
class Dictionary(object): def __init__(self): self.token2idx = {} self.idx2token = [] def add_token(self, token): if token not in self.token2idx: self.idx2token.append(token) self.token2idx[token] = len(self.idx2token)- 1 return self.token2idx[token] def __len__(self): return len(self.idx2token )<string_transform>
baseline_predictions = baseline.predict(x_test) model_predictions = model.predict(x_test )
Titanic - Machine Learning from Disaster
13,832,044
char_vocab = Dictionary() pad_token = '<pad>' unk_token = '<unk>' pad_index = char_vocab.add_token(pad_token) unk_index = char_vocab.add_token(unk_token) chars = set(''.join(x_train_full)) for char in sorted(chars): char_vocab.add_token(char) print("Vocabulary:", len(char_vocab), "UTF characters") lang_vocab = Dictionary() languages = set(y_train_full) for lang in sorted(languages): lang_vocab.add_token(lang) print("Labels:", len(lang_vocab), "languages" )<prepare_x_and_y>
print(classification_report(y_test, baseline_predictions))
Titanic - Machine Learning from Disaster
13,832,044
print('a ->', char_vocab.token2idx['a']) print('cat ->', lang_vocab.token2idx['cat']) print(y_train_full[0], x_train_full[0][:10]) x_train_idx = [np.array([char_vocab.token2idx[c] for c in line])for line in x_train_full] y_train_idx = np.array([lang_vocab.token2idx[lang] for lang in y_train_full]) print(y_train_idx[0], x_train_idx[0][:10] )<split>
print(classification_report(y_test, model_predictions))
Titanic - Machine Learning from Disaster
13,832,044
x_train, x_val, y_train, y_val = train_test_split(x_train_idx, y_train_idx, test_size=0.15, random_state=seed) train_data = [(x, y)for x, y in zip(x_train, y_train)] val_data = [(x, y)for x, y in zip(x_val, y_val)] print(len(train_data), "training samples") print(len(val_data), "validation samples" )<define_variables>
test_df = pd.read_csv('.. /input/titanic/test.csv' )
Titanic - Machine Learning from Disaster
13,832,044
def batch_generator(data, batch_size, token_size): minibatch, sequences_so_far, tokens_so_far = [], 0, 0 for ex in data: minibatch.append(ex) seq_len = len(ex[0]) if seq_len > token_size: ex =(ex[0][:token_size], ex[1]) seq_len = token_size sequences_so_far += 1 tokens_so_far += seq_len if sequences_so_far == batch_size or tokens_so_far == token_size: yield minibatch minibatch, sequences_so_far, tokens_so_far = [], 0, 0 elif sequences_so_far > batch_size or tokens_so_far > token_size: yield minibatch[:-1] minibatch, sequences_so_far, tokens_so_far = minibatch[-1:], 1, len(minibatch[-1][0]) if minibatch: yield minibatch<define_variables>
sub_model = RandomForestClassifier() sub_model.fit(features_df, target) test_features = preprocessor.transform(test_df[train_df.drop('Survived', axis=1 ).columns]) predictions = sub_model.predict(test_features )
Titanic - Machine Learning from Disaster
13,832,044
def pool_generator(data, batch_size, token_size, shuffle=False): for p in batch_generator(data, batch_size * 100, token_size * 100): p_batch = batch_generator(sorted(p, key=lambda t: len(t[0]), reverse=True), batch_size, token_size) p_list = list(p_batch) if shuffle: for b in random.sample(p_list, len(p_list)) : yield b else: for b in p_list: yield b<choose_model_class>
sub_df = pd.DataFrame({'PassengerId' : test_df.PassengerId, 'Survived': predictions}) sub_df.head()
Titanic - Machine Learning from Disaster
13,832,044
class CharRNNClassifier(torch.nn.Module): def __init__(self, input_size, embedding_size, hidden_size, output_size, model="lstm", num_layers=1, bidirectional=False, pad_idx=0): super().__init__() self.model = model.lower() self.hidden_size = hidden_size self.embed = torch.nn.Embedding(input_size, embedding_size, padding_idx=pad_idx) if self.model == "gru": self.rnn = torch.nn.GRU(embedding_size, hidden_size, num_layers, bidirectional=bidirectional) elif self.model == "lstm": self.rnn = torch.nn.LSTM(embedding_size, hidden_size, num_layers, bidirectional=bidirectional) self.linear1 = torch.nn.Linear(hidden_size, output_size) self.linear2 = torch.nn.Linear(output_size, input_size) def forward(self, input, input_lengths): encoded = self.embed(input) packed = torch.nn.utils.rnn.pack_padded_sequence(encoded, input_lengths) output, _ = self.rnn(packed) padded, _ = torch.nn.utils.rnn.pad_packed_sequence(output, padding_value=float('-inf')) padded = padded.permute(1,2,0) output = F.adaptive_max_pool1d(padded, 1 ).view(-1, self.hidden_size) output = self.linear1(output) output = self.linear2(output) return output<set_options>
sub_df.to_csv('titanic_gender_submission.csv', index=False )
Titanic - Machine Learning from Disaster
13,502,313
if not torch.cuda.is_available() : print("WARNING: CUDA is not available.Select 'GPU On' on kernel settings") device = torch.device("cuda") torch.cuda.manual_seed(seed )<choose_model_class>
train_data = pd.read_csv("/kaggle/input/titanic/train.csv") train_data.head()
Titanic - Machine Learning from Disaster
13,502,313
criterion = torch.nn.CrossEntropyLoss(reduction='sum' )<train_model>
test_data = pd.read_csv("/kaggle/input/titanic/test.csv") test_data.head()
Titanic - Machine Learning from Disaster
13,502,313
def train(model, optimizer, data, batch_size, token_size, log=False): model.train() total_loss = 0 ncorrect = 0 nsentences = 0 ntokens = 0 niterations = 0 for batch in pool_generator(data, batch_size, token_size, shuffle=True): X = [torch.from_numpy(d[0])for d in batch] X_lengths = [x.numel() for x in X] ntokens += sum(X_lengths) X_lengths = torch.tensor(X_lengths, dtype=torch.long, device=device) y = torch.tensor([d[1] for d in batch], dtype=torch.long, device=device) X = torch.nn.utils.rnn.pad_sequence(X ).to(device) model.zero_grad() output = model(X, X_lengths) loss = criterion(output, y) loss.backward() optimizer.step() total_loss += loss.item() ncorrect +=(torch.max(output, 1)[1] == y ).sum().item() nsentences += y.numel() niterations += 1 total_loss = total_loss / nsentences accuracy = 100 * ncorrect / nsentences if log: print(f'Train: wpb={ntokens//niterations}, bsz={nsentences//niterations}, num_updates={niterations}') return accuracy<prepare_x_and_y>
train_data['titles'] = train_data.Name.str.split().str[1] test_data['titles'] = test_data.Name.str.split().str[1]
Titanic - Machine Learning from Disaster
13,502,313
def validate(model, data, batch_size, token_size): model.eval() ncorrect = 0 nsentences = 0 with torch.no_grad() : for batch in pool_generator(data, batch_size, token_size): X = [torch.from_numpy(d[0])for d in batch] X_lengths = torch.tensor([x.numel() for x in X], dtype=torch.long, device=device) y = torch.tensor([d[1] for d in batch], dtype=torch.long, device=device) X = torch.nn.utils.rnn.pad_sequence(X ).to(device) answer = model(X, X_lengths) ncorrect +=(torch.max(answer, 1)[1] == y ).sum().item() nsentences += y.numel() dev_acc = 100 * ncorrect / nsentences return dev_acc<define_variables>
train_data[train_data.Age >12][:3]
Titanic - Machine Learning from Disaster
13,502,313
hidden_size = 256 embedding_size = 64 bidirectional = False ntokens = len(char_vocab) nlabels = len(lang_vocab )<train_model>
train_data['age_fill'] = train_data['Age'] test_data['age_fill'] = test_data['Age'] def fill_age(df): for i, title in enumerate(df.titles): if df.loc[i,'age_fill'] != df.loc[i,'age_fill']: if df.loc[i,'titles'] == 'Master.': df.loc[i,'age_fill']= round(random.uniform(0,12),1) if df.loc[i,'titles'] == 'Miss.': df.loc[i,'age_fill']= round(random.uniform(0.0,df.Age.max()),1) else: df.loc[i,'age_fill'] = round(random.uniform(13.0,df.Age.max()),1) fill_age(train_data) fill_age(test_data)
Titanic - Machine Learning from Disaster
13,502,313
def testParameters(model, optimizer, epochs, batch_size, token_size): train_accuracy = [] valid_accuracy = [] for epoch in range(1, epochs + 1): acc = train(model, optimizer, train_data, batch_size, token_size, log=epoch==1) train_accuracy.append(acc) acc = validate(model, val_data, batch_size, token_size) valid_accuracy.append(acc) return train_accuracy, valid_accuracy<train_model>
train_data['relatives'] = train_data['SibSp'] + train_data['Parch'] test_data['relatives'] = train_data['SibSp'] + train_data['Parch'] train_data.relatives.value_counts()
Titanic - Machine Learning from Disaster
13,502,313
Hyperparameter optimisation: Trains and vlidates the model, then returns the accuracy to persist over iterations .<choose_model_class>
def combine_fam(df): for i, r in enumerate(df['relatives']): if df.loc[i,'relatives'] == 0: df.loc[i,'relatives_code'] = 0 if df.loc[i,'relatives'] > 0 & df.loc[i,'relatives'] <= 3: df.loc[i,'relatives_code'] = 1 if df.loc[i,'relatives'] > 3 & df.loc[i,'relatives'] <= 5: df.loc[i,'relatives_code'] = 2 if df.loc[i,'relatives'] > 5: df.loc[i,'relatives_code'] = 3 combine_fam(train_data) train_data.relatives_code = train_data.relatives_code.astype(int) combine_fam(test_data) test_data.relatives_code = test_data.relatives_code.astype(int )
Titanic - Machine Learning from Disaster
13,502,313
model = CharRNNClassifier(ntokens, embedding_size, hidden_size, nlabels, bidirectional=bidirectional, pad_idx=pad_index ).to(device) optimizer = torch.optim.Adam(model.parameters() )<choose_model_class>
train_data['Fare_new'] = train_data['Fare'] test_data['Fare_new'] = test_data['Fare'] def find_price(df): freqs = {} for ticket in df.Ticket: freqs[ticket]= freqs.get(ticket, 0)+ 1 combined_ticket = {k:v for k,v in freqs.items() if v > 1} for i, ticket in enumerate(df['Ticket']): if ticket in combined_ticket: df.loc[i,'Fare_new'] = df.loc[i,'Fare']/combined_ticket[ticket] train_data['Fare_new'] = train_data['Fare'].astype(int) test_data['Fare_new'] = test_data['Fare_new'].fillna(test_data.Fare_new.mean() ).astype(int )
Titanic - Machine Learning from Disaster
13,502,313
batch_sizes = [64, 128, 256, 512, 1024] token_sizes = np.arange(50000, 350000, 50000) hidden_sizes = np.arange(128, 768, 64) embedding_sizes = np.arange(64, 256*4, 64) embedding_size = 128 hidden_size = 512 hidden_models = [] hidden_optimizers = [] figure = 0 batch_size = 256 token_size = 150000 epochs = 20 max_acc = 0 std_epochs = 10 bidirectional = False results = testParameters(model,optimizer, epochs, batch_size, token_size) plt.figure(figure) plt.plot(range(1, len(results[0])+1), results[0]) plt.plot(range(1, len(results[1])+1), results[1]) plt.title('Acuracy') plt.xlabel('epoch') plt.ylabel('Accuracy'); <choose_model_class>
train_data["Sex"] = train_data["Sex"].astype("category") test_data["Sex"] = test_data["Sex"].astype("category" )
Titanic - Machine Learning from Disaster
13,502,313
<choose_model_class>
embark = train_data.Embarked.unique() [:3] train_data['Embarked_fill'] = train_data['Embarked'].fillna(random.choice(embark)) test_data['Embarked_fill'] = test_data['Embarked'].fillna(random.choice(embark)) train_data["Embarked_fill"] = train_data["Embarked_fill"].astype("category") test_data["Embarked_fill"] = test_data["Embarked_fill"].astype("category" )
Titanic - Machine Learning from Disaster
13,502,313
<choose_model_class>
decks = train_data["Cabin"].str[0].unique() [1:] train_data["Deck"] = train_data["Cabin"].str[0].fillna(random.choice(decks)) test_data["Deck"] = test_data["Cabin"].str[0].fillna(random.choice(decks)) train_data["Deck"] = train_data["Deck"].astype("category") test_data["Deck"] = test_data["Deck"].astype("category")
Titanic - Machine Learning from Disaster
13,502,313
model = CharRNNClassifier(ntokens, embedding_size, hidden_size, nlabels, bidirectional=bidirectional, pad_idx=pad_index ).to(device) optimizer = torch.optim.Adam(model.parameters() )<choose_model_class>
y_train = train_data["Survived"] features = ['age_fill','Pclass', 'Sex','Deck','Fare_new','Embarked_fill','SibSp','Parch', 'relatives_code'] X_test = test_data[features].copy() X_train = train_data[features].copy()
Titanic - Machine Learning from Disaster
13,502,313
model = CharRNNClassifier(ntokens, embedding_size, hidden_size, nlabels, bidirectional=bidirectional ).to(device) optimizer = torch.optim.Adam(model.parameters() )<train_model>
X_train["Sex"] = X_train["Sex"].cat.codes X_test["Sex"] = X_test["Sex"].cat.codes X_train["Embarked_fill"] = X_train["Embarked_fill"].cat.codes X_test["Embarked_fill"] = X_test["Embarked_fill"].cat.codes X_train["Deck"] = X_train["Deck"].cat.codes X_test["Deck"] = X_test["Deck"].cat.codes
Titanic - Machine Learning from Disaster
13,502,313
print(f'Training final model for {epochs} epochs') for epoch in range(1, epochs + 1): print(f'| epoch {epoch:03d} | train accuracy={train(model, optimizer, train_data + val_data, batch_size, token_size, log=epoch==1):.3f}' )<train_model>
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1) model.fit(X_train, y_train) predictions = model.predict(X_test)
Titanic - Machine Learning from Disaster
13,502,313
def test(model, data, batch_size, token_size): model.eval() sindex = [] labels = [] with torch.no_grad() : for batch in pool_generator(data, batch_size, token_size): X = [torch.from_numpy(d[0])for d in batch] X_lengths = torch.tensor([x.numel() for x in X], dtype=torch.long, device=device) X = torch.nn.utils.rnn.pad_sequence(X ).to(device) answer = model(X, X_lengths) label = torch.max(answer, 1)[1].cpu().numpy() labels.append(label) sindex += [d[1] for d in batch] return np.array(sindex), np.concatenate(labels )<string_transform>
output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions}) output.to_csv('my_submission.csv', index=False) print("Your submission was successfully saved!" )
Titanic - Machine Learning from Disaster
14,025,979
x_test_txt = open(".. /input/x_test.txt" ).read().splitlines() x_test_idx = [np.array([char_vocab.token2idx[c] if c in char_vocab.token2idx else unk_index for c in line])for line in x_test_txt] test_data = [(x, idx)for idx, x in enumerate(x_test_idx)]<train_model>
%matplotlib inline warnings.filterwarnings('ignore') train=pd.read_csv('/kaggle/input/titanic/train.csv') test=pd.read_csv('/kaggle/input/titanic/test.csv') PassengerId=test['PassengerId'] all_data = pd.concat([train, test], ignore_index = True )
Titanic - Machine Learning from Disaster
14,025,979
index, labels = test(model, test_data, batch_size, token_size) order = np.argsort(index) labels = labels[order]<save_to_csv>
all_data['Title'] = all_data['Name'].apply(lambda x:x.split(',')[1].split('.')[0].strip()) Title_Dict = {} Title_Dict.update(dict.fromkeys(['Capt', 'Col', 'Major', 'Dr', 'Rev'], 'Officer')) Title_Dict.update(dict.fromkeys(['Don', 'Sir', 'the Countess', 'Dona', 'Lady'], 'Royalty')) Title_Dict.update(dict.fromkeys(['Mme', 'Ms', 'Mrs'], 'Mrs')) Title_Dict.update(dict.fromkeys(['Mlle', 'Miss'], 'Miss')) Title_Dict.update(dict.fromkeys(['Mr'], 'Mr')) Title_Dict.update(dict.fromkeys(['Master','Jonkheer'], 'Master')) all_data['Title'] = all_data['Title'].map(Title_Dict) sns.barplot(x='Title', y='Survived', data=all_data )
Titanic - Machine Learning from Disaster
14,025,979
with open('submission.csv', 'w')as f: print('Id,Language', file=f) for sentence_id, lang_id in enumerate(labels): language = lang_vocab.idx2token[lang_id] if sentence_id < 10: print(f'{sentence_id},{language}') print(f'{sentence_id},{language}', file=f )<set_options>
all_data[all_data['Embarked'].isnull() ]
Titanic - Machine Learning from Disaster
14,025,979
%matplotlib inline plt.style.use('seaborn-whitegrid') warnings.filterwarnings('ignore' )<load_from_csv>
all_data[all_data['Fare'].isnull() ]
Titanic - Machine Learning from Disaster
14,025,979
train_data = pd.read_csv('.. /input/challenge-gh/TrainData.csv', encoding='ISO-8859-1',sep=';') test_data = pd.read_csv('.. /input/challenge-gh/TestData.csv', encoding='ISO-8859-1',sep=';') submission_data = pd.read_csv('.. /input/challenge-gh/Lsungstemplate.csv', encoding='ISO-8859-1' )<categorify>
def info_fill(all_data): all_data['Embarked'] = all_data['Embarked'].fillna('C') fare=all_data[(all_data['Embarked'] == "S")&(all_data['Pclass'] == 3)].Fare.median() all_data['Fare']=all_data['Fare'].fillna(fare) age_df = all_data[['Age', 'Pclass', 'Sex', 'Title', 'SibSp', 'Parch']] age_df=pd.get_dummies(age_df) known_age = age_df[age_df.Age.notnull() ].values unknown_age = age_df[age_df.Age.isnull() ].values y = known_age[:, 0] X = known_age[:, 1:] rfr = RandomForestRegressor(random_state=0, n_estimators=100, n_jobs=-1) rfr.fit(X, y) predictedAges = rfr.predict(unknown_age[:, 1::]) all_data.loc[(all_data.Age.isnull()), 'Age' ] = predictedAges return all_data
Titanic - Machine Learning from Disaster
14,025,979
df_train = train_data.replace('Unbekannt', np.nan, inplace=False )<categorify>
def TrainPreprocess(train): cols=['Survived', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Title', 'Fare', 'Embarked'] train = train[cols] train_linear = train[['Age', 'Fare']] train['Sex'] = train['Sex'].map({'male': 0, 'female': 1} ).astype(int) train['Embarked'] = train['Embarked'].map({'S': 0, 'C': 1, 'Q': 2} ).astype(int) train['Title'] = train['Title'].map({'Officer': 0, 'Royalty': 1, 'Mrs': 2, 'Miss': 3, 'Mr': 4, 'Master':5} ).astype(int) train_non_linear = train[['Pclass', 'Sex', 'SibSp', 'Parch', 'Title', 'Embarked']] train_non_linear = pd.get_dummies(train_non_linear) features = pd.concat([train_linear, train_non_linear], axis=1) features = features.values mean = features.mean(axis=0) features -= mean std = features.std(axis=0) features /= std label = train['Survived'].values return features, label
Titanic - Machine Learning from Disaster
14,025,979
def encode_categorical_features(df): lb_make = LabelEncoder() obj_df = df.select_dtypes(include=['object'] ).copy() cat_col_names=obj_df.columns.tolist() for name in cat_col_names: df[name] = lb_make.fit_transform(df[name]) return df<train_model>
def TestPreprocess(test): cols=['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Title', 'Fare', 'Embarked'] test = test[cols] test_linear = test[['Age', 'Fare']] test['Sex'] = test['Sex'].map({'male': 0, 'female': 1} ).astype(int) test['Embarked'] = test['Embarked'].map({'S': 0, 'C': 1, 'Q': 2} ).astype(int) test['Title'] = test['Title'].map({'Officer': 0, 'Royalty': 1, 'Mrs': 2, 'Miss': 3, 'Mr': 4, 'Master':5} ).astype(int) test_non_linear = test[['Pclass', 'Sex', 'SibSp', 'Parch', 'Title', 'Embarked']] test_non_linear = pd.get_dummies(test_non_linear) test = pd.concat([test_linear, test_non_linear], axis=1) features = test.values mean = features.mean(axis=0) features -= mean std = features.std(axis=0) features /= std return features
Titanic - Machine Learning from Disaster
14,025,979
<categorify>
batchsz = 20 _lambda = 0.1
Titanic - Machine Learning from Disaster
14,025,979
columns_to_drop=columns_to_drop = ['Stammnummer', 'Anruf-ID','Tage seit letzter Kampagne','Ergebnis letzte Kampagne','Tag','Alter','Kontostand','Anzahl der Ansprachen','Anzahl Kontakte letzte Kampagne'] df_train=df_train.drop(columns_to_drop,axis=1) df_train = df_train.apply(lambda x: x.fillna(x.value_counts().index[0])) df_train=encode_categorical_features(df_train )<normalization>
def build_network() : network = Sequential([ layers.Dense(64, activation='relu', kernel_regularizer=regularizers.l2(_lambda)) , layers.Dropout(rate=0.5), layers.Dense(32, activation='relu', kernel_regularizer=regularizers.l2(_lambda)) , layers.Dropout(rate=0.5), layers.Dense(1, activation='sigmoid') ]) network.build(input_shape=(batchsz, 8)) network.compile(optimizer=optimizers.Adam(lr=0.001),loss=losses.BinaryCrossentropy(from_logits=False), metrics='accuracy') network.summary() return network
Titanic - Machine Learning from Disaster
14,025,979
df_train[['Dauer']] = df_train[['Dauer']].apply(np.sqrt) scaler = MinMaxScaler(feature_range=(0, 1)) df_train[['Dauer']] = scaler.fit_transform(df_train[['Dauer']]) train_features_all = df_train.drop(['Zielvariable'], axis=1, errors='ignore') train_labels_all =df_train.Zielvariable<train_on_grid>
train = all_data[0:891] test = all_data[891:len(all_data)] x_train, y_train = TrainPreprocess(train) x_test = TestPreprocess(test) db_train = tf.data.Dataset.from_tensor_slices(( x_train, y_train)) db_train = db_train.shuffle(1000 ).batch(batchsz) network = build_network() network.fit(db_train, epochs=300) y_test = network.predict(x_test) print(y_test) with open('survival_rate.csv','w+',newline='')as f: csv_file = csv.writer(f) csv_file.writerows(y_test) my_prediction = [] for i in range(len(y_test)) : my_prediction.append(1 if(y_test[i] > 0.5)else 0) my_submission = pd.DataFrame({'PassengerId': PassengerId, 'Survived': my_prediction}) my_submission.to_csv('my_submission.csv', index=False, sep=',' )
Titanic - Machine Learning from Disaster
13,824,870
def hyper_parameter_tuning(clf_model,Xtrain,ytrain,parameters): precision_scorer = make_scorer(precision_score) grid_obj = GridSearchCV(estimator=clf_model, param_grid=parameters, scoring=precision_scorer,cv = 5, n_jobs = -1, verbose = 2) grid_obj = grid_obj.fit(Xtrain, ytrain) model = grid_obj.best_estimator_ return model <choose_model_class>
%matplotlib inline
Titanic - Machine Learning from Disaster
13,824,870
def recursiveFeature_elimination(df,model): features = df.drop('Zielvariable', axis=1) label = df['Zielvariable'] print('original dataset shape %s' % Counter(label)) sm = SMOTE(random_state=43,sampling_strategy='auto') features_resampled, labels_resampled =sm.fit_resample(features, label) print('Resampled dataset shape %s' % Counter(labels_resampled)) clf = model RFE = RFECV(clf, step=1, cv=5,scoring='accuracy') RFE.fit(features_resampled,labels_resampled) support=RFE.support_ feature_names = np.array(features.columns.tolist()) features_to_use= feature_names[support].tolist() print(len(feature_names)) print(len(features_to_use)) print(features_to_use) return features_to_use <train_model>
train = pd.read_csv('.. /input/titanic/train.csv') test = pd.read_csv('.. /input/titanic/test.csv' )
Titanic - Machine Learning from Disaster
13,824,870
def run_kfold(model,X_all,y_all): kf = StratifiedKFold(n_splits=10, random_state=43, shuffle=True) Precisionscore_list = [] accuracyscore_list=[] F1_score_list= [] fold = 0 for train_index, test_index in kf.split(X_all,y_all): fold += 1 X_train, X_test = X_all.values[train_index], X_all.values[test_index] y_train, y_test = y_all.values[train_index], y_all.values[test_index] model.fit(X_train, y_train) predictions = model.predict(X_test) precisionscore = precision_score(y_test, predictions) accuracy = accuracy_score(y_test, predictions) F1score= f1_score(y_test,predictions) Precisionscore_list.append(precisionscore) accuracyscore_list.append(accuracy) F1_score_list.append(F1score) print("Fold {0} precision_score: {1}".format(fold, precisionscore)) print("Fold {0} accuracy_score: {1}".format(fold, accuracy)) print("Fold {0} F1_score: {1}".format(fold, F1score)) mean_precisionscore = np.mean(Precisionscore_list) mean_accuracy = np.mean(accuracyscore_list) mean_F1= np.mean(F1_score_list) print("Mean precision_score: {0}".format(mean_precisionscore)) print("Mean accuracy_score: {0}".format(mean_accuracy)) print("Mean F1_score: {0}".format(mean_F1)) return <choose_model_class>
train.set_index('PassengerId', inplace=True) test.set_index('PassengerId', inplace=True )
Titanic - Machine Learning from Disaster
13,824,870
classification_model=LogisticRegression(random_state=1) parameters = { 'C': [0.8,0.9,1.0,1.2], 'class_weight': [None, 'balanced'], 'solver': ['liblinear','sag', 'saga'], } best_model=hyper_parameter_tuning(classification_model,train_features_all,train_labels_all,parameters) features_to_use=recursiveFeature_elimination(df_train,best_model) df_filtered_features = df_train[features_to_use] df_filtered_label =df_train.Zielvariable run_kfold(best_model,df_filtered_features,df_filtered_label) sm_obj = SMOTE(random_state=43,sampling_strategy='auto') features_resampled, labels_resampled =sm_obj.fit_resample(df_filtered_features, df_filtered_label) best_model.fit(features_resampled,labels_resampled) df_test= test_data[features_to_use] df_test= encode_categorical_features(df_test) if 'Dauer' in df_test.columns: df_test[['Dauer']] = df_test[['Dauer']].apply(np.sqrt) scaler = MinMaxScaler(feature_range=(0, 1)) df_test[['Dauer']] = scaler.fit_transform(df_test[['Dauer']]) test_features_all = df_test test_labels_all =test_data.Zielvariable predictions=best_model.predict(test_features_all) prediction_proba =best_model.predict_proba(test_features_all) pos_label_proba= prediction_proba[:,0] <save_to_csv>
train.drop(['Ticket','Cabin'], axis=1, inplace=True) test.drop(['Ticket','Cabin'], axis=1, inplace=True )
Titanic - Machine Learning from Disaster
13,824,870
y = predictions pred = pos_label_proba fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=0) AUC_score= metrics.auc(fpr, tpr) df_submission=pd.DataFrame() df_submission['ID']=submission_data['ID'] df_submission['Expected']=pos_label_proba df_submission.to_csv('Logistc_Regression_prediction.csv',index=False) <choose_model_class>
train['Embarked'].fillna('S', inplace=True )
Titanic - Machine Learning from Disaster
13,824,870
classification_model=GradientBoostingClassifier(random_state=1) parameters = { 'n_estimators': [100, 200, 300], 'max_features': ['sqrt','auto'], 'max_depth': [2, 3, 5], } best_model=hyper_parameter_tuning(classification_model,train_features_all,train_labels_all,parameters) features_to_use=recursiveFeature_elimination(df_train,best_model) df_filtered_features = df_train[features_to_use] df_filtered_label =df_train.Zielvariable run_kfold(best_model,df_filtered_features,df_filtered_label) sm_obj = SMOTE(random_state=43,sampling_strategy='auto') features_resampled, labels_resampled =sm_obj.fit_resample(df_filtered_features, df_filtered_label) best_model.fit(features_resampled,labels_resampled) df_test= test_data[features_to_use] df_test= encode_categorical_features(df_test) if 'Dauer' in df_test.columns: df_test[['Dauer']] = df_test[['Dauer']].apply(np.sqrt) scaler = MinMaxScaler(feature_range=(0, 1)) df_test[['Dauer']] = scaler.fit_transform(df_test[['Dauer']]) test_features_all = df_test test_labels_all =test_data.Zielvariable predictions=best_model.predict(test_features_all) prediction_proba =best_model.predict_proba(test_features_all) pos_label_proba= prediction_proba[:,0] <save_to_csv>
test[test['Fare'].isnull() ]
Titanic - Machine Learning from Disaster
13,824,870
y = predictions pred = pos_label_proba fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=0) AUC_score= metrics.auc(fpr, tpr) AUC_score df_submission=pd.DataFrame() df_submission['ID']=submission_data['ID'] df_submission['Expected']=pos_label_proba df_submission.to_csv('Gradient_Boosting_prediction.csv',index=False) <choose_model_class>
test['Fare'].fillna(7.9, inplace=True )
Titanic - Machine Learning from Disaster
13,824,870
classification_model=DecisionTreeClassifier(random_state=1) parameters = { 'class_weight': [None, 'balanced'], 'max_features': ['sqrt','auto','log2'], 'max_depth': [None, 2, 3, 5], } best_model=hyper_parameter_tuning(classification_model,train_features_all,train_labels_all,parameters) features_to_use=recursiveFeature_elimination(df_train,best_model) df_filtered_features = df_train[features_to_use] df_filtered_label =df_train.Zielvariable run_kfold(best_model,df_filtered_features,df_filtered_label) sm_obj = SMOTE(random_state=43,sampling_strategy='auto') features_resampled, labels_resampled =sm_obj.fit_resample(df_filtered_features, df_filtered_label) best_model.fit(features_resampled,labels_resampled) df_test= test_data[features_to_use] df_test= encode_categorical_features(df_test) if 'Dauer' in df_test.columns: df_test[['Dauer']] = df_test[['Dauer']].apply(np.sqrt) scaler = MinMaxScaler(feature_range=(0, 1)) df_test[['Dauer']] = scaler.fit_transform(df_test[['Dauer']]) test_features_all = df_test test_labels_all =test_data.Zielvariable predictions=best_model.predict(test_features_all) prediction_proba =best_model.predict_proba(test_features_all) pos_label_proba= prediction_proba[:,0] y = predictions pred = pos_label_proba fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=0) AUC_score= metrics.auc(fpr, tpr) df_submission=pd.DataFrame() df_submission['ID']=submission_data['ID'] df_submission['Expected']=pos_label_proba df_submission.to_csv('Decision_tree_prediction.csv',index=False) <choose_model_class>
train['Sex'] = preprocessing.LabelEncoder().fit_transform(train['Sex'].values) test['Sex'] = preprocessing.LabelEncoder().fit_transform(test['Sex'].values) train['Embarked'] = preprocessing.LabelEncoder().fit_transform(train['Embarked'].values) test['Embarked'] = preprocessing.LabelEncoder().fit_transform(test['Embarked'].values )
Titanic - Machine Learning from Disaster
13,824,870
classification_model=RandomForestClassifier(random_state=1) parameters = { 'class_weight': [None, 'balanced'], 'n_estimators': [100, 200, 300], 'max_features': ['sqrt','auto'], 'max_depth': [None, 2, 3, 5], } best_model=hyper_parameter_tuning(classification_model,train_features_all,train_labels_all,parameters) features_to_use=recursiveFeature_elimination(df_train,best_model) df_filtered_features = df_train[features_to_use] df_filtered_label =df_train.Zielvariable run_kfold(best_model,df_filtered_features,df_filtered_label) sm_obj = SMOTE(random_state=43,sampling_strategy='auto') features_resampled, labels_resampled =sm_obj.fit_resample(df_filtered_features, df_filtered_label) best_model.fit(features_resampled,labels_resampled) df_test= test_data[features_to_use] df_test= encode_categorical_features(df_test) if 'Dauer' in df_test.columns: df_test[['Dauer']] = df_test[['Dauer']].apply(np.sqrt) scaler = MinMaxScaler(feature_range=(0, 1)) df_test[['Dauer']] = scaler.fit_transform(df_test[['Dauer']]) test_features_all = df_test test_labels_all =test_data.Zielvariable predictions=best_model.predict(test_features_all) prediction_proba =best_model.predict_proba(test_features_all) pos_label_proba= prediction_proba[:,0] y = predictions pred = pos_label_proba fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=0) AUC_score= metrics.auc(fpr, tpr) df_submission=pd.DataFrame() df_submission['ID']=submission_data['ID'] df_submission['Expected']=pos_label_proba df_submission.to_csv('Random_Forest_Classifier_prediction.csv',index=False) <import_modules>
for dataset in train, test: dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0 dataset.loc[(dataset['Fare'] > 7.91)&(dataset['Fare'] <= 14.454), 'Fare'] = 1 dataset.loc[(dataset['Fare'] > 14.454)&(dataset['Fare'] <= 31), 'Fare'] = 2 dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3 dataset['Fare'] = dataset['Fare'].astype(int )
Titanic - Machine Learning from Disaster
13,824,870
import lightgbm as lgb from sklearn.model_selection import GroupKFold<load_from_csv>
train["Age"] = train[['Pclass', 'Sex', 'Age']].groupby(['Pclass', 'Sex'] ).transform(lambda x: x.fillna(x.median())) test["Age"] = test[['Pclass', 'Sex', 'Age']].groupby(['Pclass', 'Sex'] ).transform(lambda x: x.fillna(x.median()))
Titanic - Machine Learning from Disaster
13,824,870
df = pd.read_csv('.. /input/Training_data.csv') df_test = pd.read_csv('.. /input/Testing_data.csv') df_sub = pd.read_csv('.. /input/Submission.csv' )<prepare_x_and_y>
for dataset in train, test: dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0 dataset.loc[(dataset['Age'] > 16)&(dataset['Age'] <= 32), 'Age'] = 1 dataset.loc[(dataset['Age'] > 32)&(dataset['Age'] <= 48), 'Age'] = 2 dataset.loc[(dataset['Age'] > 48)&(dataset['Age'] <= 64), 'Age'] = 3 dataset.loc[ dataset['Age'] > 64, 'Age'] = 4 dataset['Age'] = dataset['Age'].astype(int )
Titanic - Machine Learning from Disaster
13,824,870
features = df.columns[1:-1] target = df.target<count_unique_values>
train.drop('Name', axis=1, inplace=True) test.drop('Name', axis=1, inplace=True )
Titanic - Machine Learning from Disaster
13,824,870
def get_folds(df=None, n_splits=5, index = 'index'): unique_vis = np.array(sorted(df[index].unique())) folds = GroupKFold(n_splits=n_splits) fold_ids = [] ids = np.arange(df.shape[0]) for trn_vis, val_vis in folds.split(X=unique_vis, y=unique_vis, groups=unique_vis): fold_ids.append( [ ids[df[index].isin(unique_vis[trn_vis])], ids[df[index].isin(unique_vis[val_vis])] ] ) return fold_ids<prepare_x_and_y>
y_train = train["Survived"] train.drop('Survived', axis=1, inplace=True) train.shape, y_train.shape, test.shape
Titanic - Machine Learning from Disaster
13,824,870
X = df[features] y = target<init_hyperparams>
random_forest = RandomForestClassifier() random_forest.fit(train, y_train) scores = cross_validate(random_forest, train, y_train, scoring='roc_auc', cv=5, return_train_score=True) scores
Titanic - Machine Learning from Disaster
13,824,870
params = { 'boosting_type': 'gbdt', 'objective': 'binary', 'metric': 'auc', 'num_leaves': 63, 'max_depth': 6, 'learning_rate': 0.03, "bagging_fraction" : 0.9, "feature_fraction" : 0.9, 'random_state':1 } sub_reg_preds = np.zeros(df_test.shape[0]) folds = get_folds(df = df.reset_index() ,index = 'index', n_splits=5) for fold_,(trn_, val_)in enumerate(folds): print("-"* 20 + "Fold :"+str(fold_+1)+ "-"* 20) trn_x, trn_y = pd.DataFrame(X ).iloc[trn_], pd.DataFrame(y ).iloc[trn_] val_x, val_y = pd.DataFrame(X ).iloc[val_], pd.DataFrame(y ).iloc[val_] lgb_train = lgb.Dataset(trn_x,label=trn_y) lgb_eval = lgb.Dataset(val_x,label=val_y) model = lgb.train(params, lgb_train, 10000, valid_sets=[lgb_eval], early_stopping_rounds=50, verbose_eval=100) _preds = model.predict(df_test[features], num_iteration=model.best_iteration) _preds[_preds < 0] = 0 sub_reg_preds += _preds / len(folds) print(" modeling finished!! " )<save_to_csv>
n_est = [int(x)for x in np.linspace(start = 200, stop = 2000, num = 10)] max_feat = ['auto', 'sqrt'] max_depth = [int(x)for x in np.linspace(5, 110, num = 11)] max_depth.append(None) min_samples_split = [2, 5, 10] min_samples_leaf = [1, 2, 4] bootstrap = [True, False] random_grid = {'n_estimators': n_est, 'max_features': max_feat, 'max_depth': max_depth, 'criterion' :['gini', 'entropy'], 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf, 'bootstrap': bootstrap} random_grid
Titanic - Machine Learning from Disaster
13,824,870
df_sub['Probability'] = sub_reg_preds df_sub.to_csv('sub.csv',index=False )<set_options>
model = RandomForestClassifier(bootstrap=False, criterion='entropy', max_depth=5, min_samples_leaf=2, min_samples_split=10, n_estimators=1800) model.fit(train, y_train) y_pred = model.predict(test )
Titanic - Machine Learning from Disaster
13,824,870
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'<init_hyperparams>
submission = pd.DataFrame({ "PassengerId": test.index, "Survived": y_pred }) submission.to_csv('./submission.csv', index=False )
Titanic - Machine Learning from Disaster
13,900,391
look_back = 7 epochs = 10 batch_size = 32<define_variables>
train= pd.read_csv('.. /input/titanic/train.csv') test= pd.read_csv('.. /input/titanic/test.csv')
Titanic - Machine Learning from Disaster
13,900,391
np.random.seed(7 )<load_from_csv>
train.isnull().sum()
Titanic - Machine Learning from Disaster
13,900,391
prices_dataset = pd.read_csv('.. /input/train.csv', header=0) test_dataset = pd.read_csv('.. /input/test.csv', header=0 )<prepare_x_and_y>
train.isnull().sum()
Titanic - Machine Learning from Disaster
13,900,391
def create_dataset(dataset, look_back): inds = [] dataX, dataY = [], [] for i in range(len(dataset)-look_back-1): inds.append(i) a = dataset[i:(i+look_back), 0] dataX.append(a) dataY.append(dataset[i + look_back, 0]) return np.array(dataX), np.array(dataY )<normalization>
train['Embarked'].nunique()
Titanic - Machine Learning from Disaster
13,900,391
res_all = np.array([] ).reshape(-1, 1) for asset in prices_dataset.asset.unique() : apple = prices_dataset[prices_dataset['asset']==asset] apple_stock_prices = apple.close.values.astype('float32') apple_stock_prices = apple_stock_prices.reshape(len(apple_stock_prices), 1) scaler = MinMaxScaler(feature_range=(0, 1)) apple_stock_prices = scaler.fit_transform(apple_stock_prices) train_size = int(len(apple_stock_prices)* 0.67) test_size = len(apple_stock_prices)- train_size train, test = apple_stock_prices[0:train_size,:], apple_stock_prices[train_size:len(apple_stock_prices),:] print('Split data into training set and test set...Number of training samples/ test samples:', len(train), len(test)) trainX, trainY = create_dataset(train, look_back) testX, testY = create_dataset(test, look_back) trainX = np.reshape(trainX,(trainX.shape[0], trainX.shape[1], 1)) testX = np.reshape(testX,(testX.shape[0], testX.shape[1], 1)) model = Sequential() model.add(LSTM(4, input_shape=(look_back, 1))) model.add(Dense(1)) model.compile(loss='mse', optimizer='adam') model.fit(trainX, trainY, nb_epoch=epochs, batch_size=batch_size, verbose=0) trainPredict = model.predict(trainX) testPredict = model.predict(testX) trainPredict = scaler.inverse_transform(trainPredict) trainY = scaler.inverse_transform([trainY]) testPredict = scaler.inverse_transform(testPredict) testY = scaler.inverse_transform([testY]) trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0])) print('Train Score: %.2f RMSE' %(trainScore)) testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0])) print('Test Score: %.2f RMSE' %(testScore)) trainPredictPlot = np.empty_like(apple_stock_prices) trainPredictPlot[:, :] = np.nan trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict testPredictPlot = np.empty_like(apple_stock_prices) testPredictPlot[:, :] = np.nan testPredictPlot[len(trainPredict)+(look_back*2)+1:len(apple_stock_prices)-1, :] = testPredict plt.plot(scaler.inverse_transform(apple_stock_prices)) plt.plot(trainPredictPlot) plt.plot(testPredictPlot) plt.show() apple = test_dataset[test_dataset['asset']==asset] apple_stock_prices = apple.close.values.astype('float32') apple_stock_prices = apple_stock_prices.reshape(len(apple_stock_prices), 1) scaler = MinMaxScaler(feature_range=(0, 1)) apple_stock_prices = scaler.fit_transform(apple_stock_prices) testX, testY = create_dataset(apple_stock_prices, look_back) testX = np.reshape(testX,(testX.shape[0], testX.shape[1], 1)) testPredict = model.predict(testX) price = testPredict[1:]/testPredict[:-1] - 1 price = np.append([0], price ).reshape(-1, 1) z = 1440 - price.shape[0] res = np.append(price, np.zeros(z)).reshape(-1, 1) res_all = np.vstack(( res_all,res))<normalization>
def preprocess_data(data): data= data.drop(['Name', 'Ticket'], axis=1) data['Sex'].replace({'male':'0', 'female':'1'}, inplace= True) data['Embarked'] = data['Embarked'].fillna(0) data['Embarked'].replace({'S':'0', 'C':'1', 'Q':'2'}, inplace= True) data['Sex']= data['Sex'].astype(int) data['Embarked']= data['Embarked'].astype(int) data['Age']= data['Age'].fillna(method= 'ffill') data["Deck"] = data["Cabin"].str.slice(0,1) data["Room"] = data["Cabin"].str.slice(1,5 ).str.extract("([0-9]+)", expand=False ).astype("float") data['Deck']= data['Deck'].fillna(0) data['Room']= data['Room'].fillna(0) data['Fare'] = data['Fare'].fillna(0) data['Deck'].replace({'A':'0', 'B':'1', 'C':'2', 'D':'3', 'F':'4', 'G':'5', 'T':'6','E':'7'}, inplace= True) data['Deck']= data['Deck'].astype(int) return data
Titanic - Machine Learning from Disaster
13,900,391
scaler = MinMaxScaler(feature_range=(-1, 1)) res_all = scaler.fit_transform(res_all )<filter>
final_train= preprocess_data(train) final_test= preprocess_data(test )
Titanic - Machine Learning from Disaster
13,900,391
res_all[res_all<-1]=-1<filter>
final_train = final_train.drop(['Cabin'], axis=1) final_train
Titanic - Machine Learning from Disaster
13,900,391
res_all[res_all>1]=1<create_dataframe>
final_test= final_test.drop(['Cabin'], axis=1) final_test.isnull().sum()
Titanic - Machine Learning from Disaster
13,900,391
out_df = pd.DataFrame(res_all, columns=['expected'] )<save_to_csv>
y = final_train['Survived'] X = final_train.drop(['Survived'], axis=1 )
Titanic - Machine Learning from Disaster
13,900,391
out_df.to_csv('Elastic_net.csv',index = True )<load_from_csv>
x_train, x_test, y_train, y_test = train_test_split(X, y, random_state= 42, test_size=0.20) lin= LinearRegression()
Titanic - Machine Learning from Disaster
13,900,391
df_res = pd.read_csv('Elastic_net.csv') df_res.columns = ['id', 'expected']<save_to_csv>
model= RandomForestClassifier(random_state=1, n_estimators=200, max_depth=10, criterion='entropy', verbose=1) model.fit(x_train, y_train) prediction= model.predict(x_test) final_predict = model.predict(final_test)
Titanic - Machine Learning from Disaster
13,900,391
df_res.to_csv('submit.csv', index=False )<save_to_csv>
X, y = make_classification(n_samples=100, n_features=7, n_informative=2, n_redundant=0, random_state=1, shuffle=True) clf = BaggingClassifier(base_estimator=RandomForestClassifier() , n_estimators=10, random_state=1 ).fit(x_train, y_train) clf.predict(x_test) final_predict2= clf.predict(final_test )
Titanic - Machine Learning from Disaster
13,900,391
print(os.listdir(".. /input")) df = pd.DataFrame({'id': [1,2], 'y': [100,100]}) df.head() df.to_csv("submission.csv", header = True, index = False) <load_from_csv>
output = pd.DataFrame({'PassengerId': final_test.PassengerId, 'Survived': final_predict}) output.to_csv('my_submission_rf.csv', index=False) print(output )
Titanic - Machine Learning from Disaster
13,900,391
name = os.path.join(".. /input/ai-academy-intermediate-class-competition-1", "BBC News Train.csv") data = pd.read_csv(name) data = data[["Text", "Category"]] <feature_engineering>
output = pd.DataFrame({'PassengerId': final_test.PassengerId, 'Survived': final_predict2}) output.to_csv('my_submission_clf.csv', index=False) print(output )
Titanic - Machine Learning from Disaster
13,868,595
vectorizer = TfidfVectorizer(sublinear_tf=True, min_df=5, norm='l2', encoding='latin-1', ngram_range=(1, 2), stop_words='english') X = vectorizer.fit_transform(data["Text"]) print(len(vectorizer.get_feature_names())) print(X.shape )<feature_engineering>
import seaborn as sns import matplotlib.pyplot as plt
Titanic - Machine Learning from Disaster
13,868,595
data["category_id"]=data["Category"].factorize() [0]<remove_duplicates>
from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC, LinearSVC from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import Perceptron from sklearn.linear_model import SGDClassifier from sklearn.tree import DecisionTreeClassifier
Titanic - Machine Learning from Disaster
13,868,595
data = data[["Text", "category_id", "Category"]] data category_id_data = data[['Category', 'category_id']].drop_duplicates().sort_values('category_id') category_to_id = dict(category_id_data.values) id_to_category = dict(category_id_data[['category_id', 'Category']].values) category_id_data id_to_category<categorify>
from sklearn.ensemble import ExtraTreesClassifier
Titanic - Machine Learning from Disaster
13,868,595
tfidf = TfidfVectorizer(sublinear_tf=True, min_df=5, norm='l2', encoding='latin-1', ngram_range=(1, 2), stop_words='english') features = tfidf.fit_transform(data.Text ).toarray() print(features) labels = data.category_id print(labels) features.shape<statistical_test>
df = pd.read_csv('.. /input/titanic/train.csv') test_df = pd.read_csv('.. /input/titanic/test.csv') combine = [df, test_df]
Titanic - Machine Learning from Disaster
13,868,595
N = 5 for category, category_id in sorted(category_to_id.items()): features_chi2 = chi2(features, labels == category_id) indices = np.argsort(features_chi2[0]) feature_names = np.array(tfidf.get_feature_names())[indices] unigrams = [v for v in feature_names if len(v.split(' ')) == 1] bigrams = [v for v in feature_names if len(v.split(' ')) == 2] print(" print(".Most correlated unigrams: .{}".format(' .'.join(unigrams[-N:]))) print(".Most correlated bigrams: .{}".format(' .'.join(bigrams[-N:])) )<find_best_model_class>
df.isnull().sum()
Titanic - Machine Learning from Disaster
13,868,595
X_train, X_test, y_train, y_test, indices_train, indices_test = train_test_split(features, labels, data.index, test_size=0.33, random_state=0) for model in models: model.fit(X_train, y_train) y_pred_proba = model.predict_proba(X_test) y_pred = model.predict(X_test) accuracy = accuracy_score(y_test, y_pred) print(accuracy )<train_model>
for dataset in combine: dataset['Title'] = dataset.Name.str.extract('([A-Za-z]+)\.', expand=False) for dataset in combine: dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col',\ 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') dataset['Title'] = dataset['Title'].replace('Ms', 'Miss') dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') df[['Title', 'Survived']].groupby(['Title'], as_index=False ).mean()
Titanic - Machine Learning from Disaster
13,868,595
model = models[2] model.fit(features, labels) model.coef_<load_from_csv>
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5} for dataset in combine: dataset['Title'] = dataset['Title'].map(title_mapping) dataset['Title'] = dataset['Title'].fillna(0 )
Titanic - Machine Learning from Disaster
13,868,595
test_data = pd.read_csv(".. /input/bbc-test/BBC News Test.csv") test_data<predict_on_test>
emb = pd.get_dummies(df.Title) df = df.join(emb) df = df.rename(columns={1:"Mr", 2:"Miss", 3:"Mrs", 4:"Master", 5:"Rare"}) df = df.drop(["Title"],axis=1) emb = pd.get_dummies(test_df.Title) test_df = test_df.join(emb) test_df = test_df.rename(columns={1:"Mr", 2:"Miss", 3:"Mrs", 4:"Master", 5:"Rare"}) test_df = test_df.drop(["Title"],axis=1) combine = [df, test_df]
Titanic - Machine Learning from Disaster
13,868,595
test_data.Text.tolist() test_features = tfidf.transform(test_data.Text.tolist()) Y_pred = model.predict(test_features) Y_pred submission = [] for pred in Y_pred: submission.append(id_to_category[pred]) submission<create_dataframe>
freq_port = df.Embarked.dropna().mode() [0] freq_port
Titanic - Machine Learning from Disaster
13,868,595
submission = pd.DataFrame({ "ArticleId": test_data["ArticleId"], "Category": submission }) submission<save_to_csv>
for dataset in combine: dataset['Embarked'] = dataset['Embarked'].fillna(freq_port) df[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False ).mean().sort_values(by='Survived', ascending=False )
Titanic - Machine Learning from Disaster
13,868,595
submission.to_csv('submission.csv', index=False )<save_to_csv>
for dataset in combine: dataset["Sex"] = dataset["Sex"].map({'male': 1,'female':0}) dataset["Embarked"] = dataset["Embarked"].map({'S': 1,'C':2,'Q':3}) emb = pd.get_dummies(df.Embarked) df = df.join(emb) df = df.rename(columns={1.0: "S", 2.0: "C", 3.0 : "Q"}) emb = pd.get_dummies(test_df.Embarked) test_df = test_df.join(emb) test_df = test_df.rename(columns={1.0: "S", 2.0: "C", 3.0 : "Q"}) df = df.drop(["Name","Ticket","Embarked","PassengerId"],axis = 1) test_df = test_df.drop(["Name","Ticket","Embarked"],axis = 1) combine = [df, test_df]
Titanic - Machine Learning from Disaster
13,868,595
submission.to_csv('submission.csv', index=False )<load_from_csv>
def status(feature): print('Processing', feature, ': ok')
Titanic - Machine Learning from Disaster
13,868,595
TRAIN_PATH = os.path.join(".. /input/ai-academy-intermediate-class-competition-1", "BBC News Train.csv") df = pd.read_csv(TRAIN_PATH )<feature_engineering>
def process_cabin(combined): combined.Cabin.fillna('U', inplace=True) combined['Cabin'] = combined['Cabin'].map(lambda c: c[0]) cabin_dummies = pd.get_dummies(combined['Cabin'], prefix='Cabin') combined = pd.concat([combined, cabin_dummies], axis=1) combined.drop('Cabin', axis=1, inplace=True) status('cabin') return combined
Titanic - Machine Learning from Disaster
13,868,595
df['category_id'] = df['Category'].factorize() [0] df['category_id'][0:10]<remove_duplicates>
combine = [df,test_df]
Titanic - Machine Learning from Disaster
13,868,595
category_id_df = df[['Category', 'category_id']].drop_duplicates().sort_values('category_id' )<define_variables>
df = df.drop('Cabin_T', axis=1) df.head()
Titanic - Machine Learning from Disaster