kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
10,330,948 | classes = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']<load_from_csv> | Titanic - Machine Learning from Disaster | |
10,330,948 | folder = '/kaggle/input/fashion-mnist-itba-lab-2020/'
x = np.load(folder+'train_images.npy')
y = np.loadtxt(folder+'train_labels.csv', delimiter=',', skiprows=1)
x_test = np.load(folder+'test_images.npy' )<split> | Titanic - Machine Learning from Disaster | |
10,330,948 | x_train, x_valid, y_train, y_valid = train_test_split(x, y, test_size = 0.1 )<categorify> | def ticket_category(Ticket_length):
if Ticket_length <= 6:
return 0
if 6 < Ticket_length <= 10:
return 1
else:
return 2
| Titanic - Machine Learning from Disaster |
10,330,948 | y_train_categorical = to_categorical(y_train)
y_val_categorical = to_categorical(y_valid )<choose_model_class> | data['Family_size'] = data['Parch'] + data['SibSp'] + 1 | Titanic - Machine Learning from Disaster |
10,330,948 | output_size = 10
model_single_layer = Sequential()
model_single_layer.add(Flatten(input_shape=x_train.shape[1:]))
model_single_layer.add(Dense(output_size, name='Salida'))
model_single_layer.add(Activation('softmax'))
model_single_layer.summary()<choose_model_class> | data[['Family_size', 'Survived']].groupby('Family_size' ).mean() | Titanic - Machine Learning from Disaster |
10,330,948 | lr = 0.000001
SGD = optimizers.sgd(lr=lr)
model_single_layer.compile(loss = 'categorical_crossentropy', optimizer=SGD, metrics=['accuracy'] )<train_model> | data['Single'] = data['Family_size'].map(lambda x: 1 if x == 1 else 0)
| Titanic - Machine Learning from Disaster |
10,330,948 | batch_size = 512
model_single_layer.fit(x_train,
y_train_categorical,
epochs=20, batch_size=batch_size,
verbose=1,
validation_data =(x_valid, y_val_categorical)
)<compute_test_metric> | data['Is_Married'] = 0
data['Is_Married'] = data['Title'].map(lambda x: 1 if x == 'Mrs' else 0 ) | Titanic - Machine Learning from Disaster |
10,330,948 | loss, acc = model_single_layer.evaluate(x_valid, y_val_categorical, verbose=0)
print(acc, loss )<predict_on_test> | data = data.drop(['Name', 'PassengerId', 'Ticket', 'Cabin', 'Last_Name'], axis = 1 ) | Titanic - Machine Learning from Disaster |
10,330,948 | test_prediction = model_single_layer.predict(x_test)
print(test_prediction.shape )<prepare_output> | def object_to_int(df):
if df.dtype=='object':
df = LabelEncoder().fit_transform(df)
return df
data = data.apply(lambda x: object_to_int(x)) | Titanic - Machine Learning from Disaster |
10,330,948 | test_labels = np.argmax(test_prediction, axis = 1)
print(test_labels )<save_to_csv> | data= pd.get_dummies(data, columns = ['Age', 'Title', 'Embarked'], drop_first = True ) | Titanic - Machine Learning from Disaster |
10,330,948 | df = pandas.DataFrame(data={"Category": test_labels} ).astype(int)
df.to_csv("./submission.csv", sep=',',index=True, index_label='Id' )<import_modules> | def add_polynomial_features(frame, poly_degree=2, interaction=False):
poly = PolynomialFeatures(degree = poly_degree, interaction_only = interaction, include_bias = False)
poly_features = poly.fit_transform(frame[['Age', 'Name_length', 'Fare']])
df_poly = pd.DataFrame(poly_features, columns = poly.get_feature_names())
return pd.concat([frame, df_poly.drop(['x0'], axis=1)], axis=1)
| Titanic - Machine Learning from Disaster |
10,330,948 | import json
from sklearn.model_selection import train_test_split
from sklearn.base import BaseEstimator
from sklearn.metrics import f1_score
import scipy<load_from_csv> | train, test = divide_data(data ) | Titanic - Machine Learning from Disaster |
10,330,948 | train = pd.read_csv('/kaggle/input/finec-1941-hw6/train.csv', encoding='utf-8')
test = pd.read_csv('/kaggle/input/finec-1941-hw6/test.csv', encoding='utf-8')
for col in(['production_companies', 'production_countries', 'spoken_languages', 'cast']):
train[col] = train[col].apply(json.loads)
test[col] = test[col].apply(json.loads)
train.head().T<split> | X = train.drop(['Survived'], axis = 1)
y = train['Survived']
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size = 0.25, random_state = 12345 ) | Titanic - Machine Learning from Disaster |
10,330,948 | x_train = train.drop(['index', 'genre_id'], axis=1)
y_train = train['genre_id']
x_test = test.drop('index', axis=1)
np.random.seed(514229)
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.33 )<train_model> | Titanic - Machine Learning from Disaster | |
10,330,948 | class ClusteringClassifier(BaseEstimator):
def __init__(self, clustering):
self.clustering = clustering
def fit(self, x, y):
self.clustering.fit(x)
clustered = self.clustering.predict(x)
self.cluster_labels = {}
for c in np.unique(clustered):
ys = y[clustered == c]
most_popular_label = scipy.stats.mode(ys ).mode[0]
self.cluster_labels[c] = most_popular_label
return self
def predict(self, x):
clustered = self.clustering.predict(x)
prediction = [self.cluster_labels[c] for c in clustered]
return np.array(prediction)
<compute_train_metric> | Titanic - Machine Learning from Disaster | |
10,330,948 | model = ClusteringClassifier(clustering=ConstClustering())
model.fit(x_train, y_train)
pred_train = model.predict(x_train)
pred_val = model.predict(x_val)
print('f1 train:', f1_score(y_train, pred_train, average='macro'))
print('f1 val:', f1_score(y_val, pred_val, average='macro'))<save_to_csv> | lr = LogisticRegression(random_state = 12345)
parameters_lr = {'C': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 3, 5, 7, 10, 15, 20, 25, 30, 50],
'penalty':['l1', 'l2', 'elasticnet', 'none'],
'solver':['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'],
'class_weight': [1, 3, 10],
'max_iter': [200, 500, 800, 1000, 2000]}
search_lr = RandomizedSearchCV(lr, parameters_lr, cv=5, scoring = 'accuracy', n_jobs = -1, random_state = 12345)
search_lr.fit(X_train, y_train)
best_lr = search_lr.best_estimator_
predict_lr = best_lr.predict(X_valid)
auc_lr = cross_val_score(best_lr, X_valid, y_valid, scoring = 'roc_auc', cv = 10, n_jobs = -1)
acc_lr = cross_val_score(best_lr, X_valid, y_valid, scoring = 'accuracy', cv = 10, n_jobs = -1)
print('AUC-ROC for Logistic Regression on validation dataset:', sum(auc_lr)/len(auc_lr))
print('Accuracy for Logistic Regression on validation dataset:', sum(acc_lr)/len(acc_lr)) | Titanic - Machine Learning from Disaster |
10,330,948 | pred = model.predict(x_test)
pd.Series(pred, index=test['index'], name='genre_id' ).to_frame().to_csv(f"default_const_submit.csv" )<import_modules> | dt = DecisionTreeClassifier(random_state = 12345)
parameters_dt = {'criterion': ['gini', 'entropy'],
'max_depth':range(1, 100, 1),
'min_samples_leaf': range(1, 20),
'max_features':range(1, X_train.shape[1]+1)}
search_dt = RandomizedSearchCV(dt, parameters_dt, cv=5, scoring = 'accuracy', n_jobs = -1, random_state = 12345)
search_dt.fit(X_train, y_train)
best_dt = search_dt.best_estimator_
acc_dt = cross_val_score(best_dt, X_valid, y_valid, scoring = 'accuracy', cv = 10, n_jobs = -1)
auc_dt = cross_val_score(best_dt, X_valid, y_valid, scoring = 'roc_auc', cv = 10, n_jobs = -1)
print('AUC-ROC for Decision Tree on validation dataset:', sum(auc_dt)/len(auc_dt))
print('Accuracy for Decision Tree on validation dataset:', sum(acc_dt)/len(acc_dt)) | Titanic - Machine Learning from Disaster |
10,330,948 | import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression as LR
from sklearn.metrics import mean_squared_error
import lightgbm as lgb
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV<set_options> | confusion_m(best_dt, 'Confusion matrix for Logistic Regression')
feature_importance(best_dt.feature_importances_, 'Feature importance for Decision Tree' ) | Titanic - Machine Learning from Disaster |
10,330,948 | warnings.filterwarnings("ignore" )<compute_test_metric> | rf = RandomForestClassifier(random_state = 12345)
parameters_rf = {'n_estimators': range(1, 1800, 25),
'criterion': ['gini', 'entropy'],
'max_depth':range(1, 100),
'min_samples_split': range(1, 12),
'min_samples_leaf': range(1, 12),
'max_features':['auto', 'log2', 'sqrt', 'None']}
search_rf = RandomizedSearchCV(rf, parameters_rf, cv=5, scoring = 'accuracy', n_jobs = -1, random_state = 12345)
search_rf.fit(X_train, y_train)
best_rf = search_rf.best_estimator_
predict_rf = best_rf.predict(X_valid)
auc_rf = cross_val_score(best_rf, X_valid, y_valid, scoring = 'roc_auc', cv = 10, n_jobs = -1)
acc_rf = cross_val_score(best_rf, X_valid, y_valid, scoring = 'accuracy', cv = 10, n_jobs = -1)
print('AUC-ROC for Random Forest on validation dataset:', sum(auc_rf)/len(auc_rf))
print('Accuracy for Random Forest on validation dataset:', sum(acc_rf)/len(acc_rf)) | Titanic - Machine Learning from Disaster |
10,330,948 | def rsi(values):
up = values[values>0].mean()
down = -1*values[values<0].mean()
return 100 * up /(up + down)
<feature_engineering> | confusion_m(best_rf, 'Confusion matrix for Random Forest')
feature_importance(best_rf.feature_importances_, 'Feature importance for Random Forest' ) | Titanic - Machine Learning from Disaster |
10,330,948 | def abands(df):
df['AB_Middle_Band'] = df['close'].rolling(window = 20, center=False ).mean()
df['aupband'] = df['high'] *(1 + 4 *(df['high']-df['low'])/(df['high']+df['low']))
df['AB_Upper_Band'] = df['aupband'].rolling(window=20, center=False ).mean()
df['adownband'] = df['low'] *(1 - 4 *(df['high']-df['low'])/(df['high']+df['low']))
df['AB_Lower_Band'] = df['adownband'].rolling(window=20, center=False ).mean()<feature_engineering> | xgb = XGBClassifier(random_state = 12345, eval_metric='auc')
parameters_xgb = {'eta': [0.01, 0.05, 0.1, 0.001, 0.005, 0.04, 0.2, 0.0001],
'min_child_weight':range(1, 5),
'max_depth':range(1, 6),
'learning_rate': [0.01, 0.05, 0.1, 0.001, 0.005, 0.04, 0.2],
'n_estimators':range(0, 2001, 50)}
search_xgb = RandomizedSearchCV(xgb, parameters_xgb, cv = 5, scoring = 'accuracy', n_jobs = -1, random_state = 12345)
search_xgb.fit(X_train, y_train)
best_xgb = search_xgb.best_estimator_
predict_xgb = best_xgb.predict(X_valid)
auc_xgb = cross_val_score(best_xgb, X_valid, y_valid, scoring = 'roc_auc', cv = 10, n_jobs = -1)
acc_xgb = cross_val_score(best_xgb, X_valid, y_valid, scoring = 'accuracy', cv = 10, n_jobs = -1)
print('AUC-ROC for XGBoost on validation dataset:', sum(auc_xgb)/len(auc_xgb))
print('Accuracy for XGBoost on validation dataset:', sum(acc_xgb)/len(acc_xgb)) | Titanic - Machine Learning from Disaster |
10,330,948 | def STOK(df, n):
df['STOK'] =(( df['close'] - df['low'].rolling(window=n, center=False ).mean())/(df['high'].rolling(window=n, center=False ).max() - df['low'].rolling(window=n, center=False ).min())) * 100
df['STOD'] = df['STOK'].rolling(window = 3, center=False ).mean()<categorify> | confusion_m(best_xgb, 'Confusion matrix for XGBoost')
feature_importance(best_xgb.feature_importances_, 'Feature importance for XGBoost' ) | Titanic - Machine Learning from Disaster |
10,330,948 | def psar(df, iaf = 0.02, maxaf = 0.2):
length = len(df)
high =(df['high'])
low =(df['low'])
close =(df['close'])
psar = df['close'][0:len(df['close'])]
psarbull = [None] * length
psarbear = [None] * length
bull = True
af = iaf
ep = df['low'][0]
hp = df['high'][0]
lp = df['low'][0]
for i in range(2,length):
if bull:
psar[i] = psar[i - 1] + af *(hp - psar[i - 1])
else:
psar[i] = psar[i - 1] + af *(lp - psar[i - 1])
reverse = False
if bull:
if df['low'][i] < psar[i]:
bull = False
reverse = True
psar[i] = hp
lp = df['low'][i]
af = iaf
else:
if df['high'][i] > psar[i]:
bull = True
reverse = True
psar[i] = lp
hp = df['high'][i]
af = iaf
if not reverse:
if bull:
if df['high'][i] > hp:
hp = df['high'][i]
af = min(af + iaf, maxaf)
if df['low'][i - 1] < psar[i]:
psar[i] = df['low'][i - 1]
if df['low'][i - 2] < psar[i]:
psar[i] = df['low'][i - 2]
else:
if df['low'][i] < lp:
lp = df['low'][i]
af = min(af + iaf, maxaf)
if df['high'][i - 1] > psar[i]:
psar[i] = df['high'][i - 1]
if df['high'][i - 2] > psar[i]:
psar[i] = df['high'][i - 2]
if bull:
psarbull[i] = psar[i]
else:
psarbear[i] = psar[i]
df['psar'] = psar
<feature_engineering> | cb = CatBoostClassifier(random_state = 12345, iterations = 300, eval_metric='Accuracy', verbose = 100)
parameters_cb = {'depth': range(6, 11),
'learning_rate': [0.001, 0.01, 0.05, 0.1, 0.0001]}
search_cb = RandomizedSearchCV(cb, parameters_cb, cv = 5, scoring = 'accuracy', n_jobs = -1, random_state = 12345)
search_cb.fit(X_train, y_train, verbose = 100)
best_cb = search_cb.best_estimator_
predict_cb = best_cb.predict(X_valid)
auc_cb = cross_val_score(best_cb, X_valid, y_valid, scoring = 'roc_auc', cv = 10, n_jobs = -1)
acc_cb = cross_val_score(best_cb, X_valid, y_valid, scoring = 'accuracy', cv = 10, n_jobs = -1)
print('AUC-ROC for CatBoost on validation dataset:', sum(auc_cb)/len(auc_cb))
print('Accuracy for CatBoost on validation dataset:', sum(acc_cb)/len(acc_cb)) | Titanic - Machine Learning from Disaster |
10,330,948 | window = 12
def preprocess(data_in):
starting_ids = []
data = data_in.copy()
for k in range(5):
starting_ids.append(data[data.asset == k]['id'].min())
data['diff'] = data['high'] - data['low']
data['diff*volume'] = data['diff'] * data['volume']
data ['vol/trades'] = data.volume / data.trades
data.drop('id',axis = 1, inplace = True)
data.drop('asset',axis = 1, inplace = True)
data['Momentum_1D'] =(data['close']-data['close'].shift(1)).fillna(0)
data['RSI_14D'] = data['Momentum_1D'].rolling(center=False, window=14 ).apply(rsi ).fillna(0)
data['RSI_1D'] = data['Momentum_1D'].rolling(center=False, window=1 ).apply(rsi ).fillna(0)
data['RSI_2D'] = data['Momentum_1D'].rolling(center=False, window=2 ).apply(rsi ).fillna(0)
data['RSI_4D'] = data['Momentum_1D'].rolling(center=False, window=4 ).apply(rsi ).fillna(0)
data['RSI_8D'] = data['Momentum_1D'].rolling(center=False, window=8 ).apply(rsi ).fillna(0)
data['BB_Middle_Band'], data['BB_Upper_Band'], data['BB_Lower_Band'] = bbands(data['close'], length=20, numsd=1)
data['BB_Middle_Band'] = data['BB_Middle_Band'].fillna(0)
data['BB_Upper_Band'] = data['BB_Upper_Band'].fillna(0)
data['BB_Lower_Band'] = data['BB_Lower_Band'].fillna(0)
listofzeros = [0] * 25
up, down = aroon(data)
aroon_list = [x - y for x, y in zip(up,down)]
if len(aroon_list)==0:
aroon_list = [0] * data.shape[0]
data['Aroon_Oscillator'] = aroon_list
else:
data['Aroon_Oscillator'] = listofzeros+aroon_list
data["PVT"] =(data['Momentum_1D']/ data['close'].shift(1)) *data['volume']
data["PVT"] = data["PVT"] - data["PVT"].shift(1)
data["PVT"] = data["PVT"].fillna(0)
abands(data)
data.fillna(0)
STOK(data, 4)
data.fillna(0)
data['ROC'] =(( data['close'] - data['close'].shift(12)) /(data['close'].shift(12)))*100
data.fillna(0)
data['VWAP'] = np.cumsum(data['volume'] *(data['high'] + data['low'])/2)/ np.cumsum(data['volume'])
data.fillna(0)
scaler = StandardScaler(with_std=False)
data = scaler.fit_transform(data)
return data<load_from_csv> | confusion_m(best_cb, 'Confusion matrix for CatBoost')
feature_importance(best_cb.feature_importances_, 'Feature importance for CatBoost' ) | Titanic - Machine Learning from Disaster |
10,330,948 | data = pd.read_csv('.. /input/train.csv')
target = data.pop('y')
data_test = pd.read_csv('.. /input/test.csv' )<define_variables> | et = ExtraTreesClassifier(random_state = 12345)
parameters_et = {'n_estimators': range(50, 501, 25),
'criterion': ['gini', 'entropy'],
'max_depth':range(1, 100),
'min_samples_split': range(1, 12),
'min_samples_leaf': range(1, 12),
'max_features':['auto', 'log2', 'sqrt', 'None']}
search_et = RandomizedSearchCV(et, parameters_et, cv=5, scoring = 'accuracy', n_jobs = -1, random_state = 12345)
search_et.fit(X_train, y_train)
best_et = search_et.best_estimator_
predict_et = best_et.predict(X_valid)
auc_et = cross_val_score(best_et, X_valid, y_valid, scoring = 'roc_auc', cv = 10, n_jobs = -1)
acc_et = cross_val_score(best_et, X_valid, y_valid, scoring = 'accuracy', cv = 10, n_jobs = -1)
print('AUC-ROC for Extra Trees on validation dataset:', sum(auc_et)/len(auc_et))
print('Accuracy for Extra Trees on validation dataset:', sum(acc_et)/len(acc_et)) | Titanic - Machine Learning from Disaster |
10,330,948 | target_by_asset = []
test_by_asset = []
train_by_asset = []
PredID = []
preds_df = []
scores = []
best_params = []
grid_scores = []
grid_preds_df = []
for k in range(5):
train_by_asset.append(data[data['asset']==k])
target_by_asset.append(target[data['asset']==k])
PredID.append(data_test[data_test['asset']==k]['id'])
train_by_asset[k] = preprocess(train_by_asset[k])
test_by_asset.append(data_test[data_test['asset']==k])
test_by_asset[k] = preprocess(test_by_asset[k])
X_train, X_test, y_train, y_test = train_test_split(train_by_asset[k], target_by_asset[k], test_size=0.33, random_state=777, shuffle=False)
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
params = {
'boosting_type': 'gbdt',
'colsample_bytree': 0.65,
'learning_rate': 0.005,
'n_estimators': 40,
'num_leaves': 16,
'objective': 'regression',
'random_state': 501,
'reg_alpha': 1,
'reg_lambda': 0,
'subsample': 0.7,
'max_depth' : -1,
'max_bin': 512,
'subsample_for_bin': 200,
'subsample_freq': 1,
'min_split_gain': 0.5,
'min_child_weight': 1,
'min_child_samples': 5,
'scale_pos_weight': 1,
}
gbm = lgb.train(params,
lgb_train,
num_boost_round=20,
valid_sets=lgb_eval,
early_stopping_rounds=5)
mdl = lgb.LGBMRegressor(boosting_type= 'gbdt',
objective = 'regrssion',
n_jobs = -1,
silent = True,
max_depth = params['max_depth'],
max_bin = params['max_bin'],
subsample_for_bin = params['subsample_for_bin'],
subsample = params['subsample'],
subsample_freq = params['subsample_freq'],
min_split_gain = params['min_split_gain'],
min_child_weight = params['min_child_weight'],
min_child_samples = params['min_child_samples'],
scale_pos_weight = params['scale_pos_weight'])
gridParams = {
'learning_rate': [0.005],
'n_estimators': [40],
'num_leaves': [6,8,12,16],
'boosting_type' : ['gbdt'],
'objective': ['regression'],
'random_state' : [501],
'colsample_bytree' : [0.65, 0.66],
'subsample' : [0.7,0.75],
'reg_alpha' : [0,1,1.2],
'reg_lambda' : [0,1,1.2,1.4],
}
grid = RandomizedSearchCV(mdl, gridParams,
verbose=0,
cv=4,
n_jobs=-1)
grid.fit(train_by_asset[k], target_by_asset[k])
print(grid.best_params_)
print(grid.best_score_)
best_params.append(grid.best_params_)
grid_scores.append(grid.best_score_)
grid_pred = grid.predict(test_by_asset[k])
grid_pred[:12] = np.zeros(12)
out = pd.DataFrame(PredID[k])
out['expected'] = grid_pred
grid_preds_df.append(out)
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)
print('The rmse of prediction is:', mean_squared_error(y_test, y_pred)** 0.5)
scores.append(mean_squared_error(y_test, y_pred)** 0.5)
preds = gbm.predict(test_by_asset[k], num_iteration=gbm.best_iteration)
preds[:12] = np.zeros(12)
out = pd.DataFrame(PredID[k])
out['expected'] = preds
preds_df.append(out )<save_to_csv> | confusion_m(best_et, 'Confusion matrix for ExtraTreesClassifier')
feature_importance(best_et.feature_importances_, 'Feature importance for ExtraTreesClassifier' ) | Titanic - Machine Learning from Disaster |
10,330,948 | out_df = pd.DataFrame(np.concatenate(preds_df), columns=['id','expected'])
out_df.id = out_df.id.astype('int')
out_df.to_csv('by_asset_lgbm.csv',index = False )<save_to_csv> | vc = VotingClassifier(estimators=[('lr', best_lr),('xgb', best_xgb),('rf', best_rf),('et', best_et),('cb', best_cb)], voting='soft')
vc.fit(X_train, y_train)
predict_vc = vc.predict(X_valid)
auc_vc = cross_val_score(vc, X_valid, y_valid, scoring = 'roc_auc', cv = 5, n_jobs = -1)
acc_vc = cross_val_score(vc, X_valid, y_valid, scoring = 'accuracy', cv = 5, n_jobs = -1)
print('AUC-ROC for ensemble models on validation dataset:', sum(auc_vc)/len(auc_vc))
print('Accuracy for ensemble models on validation dataset:', sum(acc_vc)/len(acc_vc)) | Titanic - Machine Learning from Disaster |
10,330,948 | out_df = pd.DataFrame(np.concatenate(grid_preds_df), columns=['id','expected'])
out_df.id = out_df.id.astype('int')
out_df.to_csv('grid_by_asset.csv',index = False )<define_variables> | confusion_m(vc, 'Confusion matrix for VotingClassifier' ) | Titanic - Machine Learning from Disaster |
10,330,948 | regr = ElasticNetCV(cv=5, random_state=0, l1_ratio = [.1,.5,.7,.9,.95,.99, 1])
scores = []
target_by_asset = []
test_by_asset = []
train_by_asset = []
PredID = []
preds_df = []
scores = []
best_params = []
grid_scores = []
grid_preds_df = []
for k in range(5):
train_by_asset.append(data[data['asset']==k])
target_by_asset.append(target[data['asset']==k])
PredID.append(data_test[data_test['asset']==k]['id'])
train_by_asset[k] = preprocess(train_by_asset[k])
test_by_asset.append(data_test[data_test['asset']==k])
test_by_asset[k] = preprocess(test_by_asset[k])
X_train, X_test, y_train, y_test = train_test_split(np.nan_to_num(train_by_asset[k],0), target_by_asset[k], test_size=0.33, random_state=777, shuffle=False)
regr.fit(X_train,y_train)
y_pred = regr.predict(X_test)
print('The rmse of prediction is:', mean_squared_error(y_test, y_pred)** 0.5)
scores.append(mean_squared_error(y_test, y_pred)** 0.5)
preds = regr.predict(np.nan_to_num(test_by_asset[k],0))
out = pd.DataFrame(PredID[k])
out['expected'] = preds
preds_df.append(out )<save_to_csv> | models = ['logistic_regression', 'decision_tree', 'random_forest',
'xgboost', 'catboost',
'extra_trees', 'voting']
dict_values = {'accuracy': [acc_lr.mean() , acc_dt.mean() , acc_rf.mean() ,
acc_xgb.mean() , acc_cb.mean() ,
acc_et.mean() , acc_vc.mean() ],
'auc_roc': [auc_lr.mean() , auc_dt.mean() , auc_rf.mean() ,
auc_xgb.mean() , auc_cb.mean() ,
auc_et.mean() , auc_vc.mean() ]}
df_score = pd.DataFrame(dict_values, index = models, columns = ['accuracy', 'auc_roc'])
df_score | Titanic - Machine Learning from Disaster |
10,330,948 | out_df = pd.DataFrame(np.concatenate(preds_df), columns=['id','expected'])
out_df.id = out_df.id.astype('int')
out_df.to_csv('Elastic_net.csv',index = False )<import_modules> | X_train = X_train.values
test = test.values
ntrain = X_train.shape[0]
ntest = test.shape[0]
n_folds = 5
kf = KFold(n_splits = n_folds, random_state = 12345)
def get_oof(clf, x_train, y_train, x_test):
oof_train = np.zeros(( ntrain,))
oof_test = np.zeros(( ntest,))
oof_test_skf = np.empty(( n_folds, ntest))
for i,(train_index, test_index)in enumerate(kf.split(x_train)) :
x_tr = x_train[train_index]
y_tr = y_train.iloc[train_index]
x_te = x_train[test_index]
clf.fit(x_tr, y_tr)
oof_train[test_index] = clf.predict(x_te)
oof_test_skf[i, :] = clf.predict(x_test)
oof_test[:] = oof_test_skf.mean(axis=0)
return oof_train.reshape(-1, 1), oof_test.reshape(-1, 1 ) | Titanic - Machine Learning from Disaster |
10,330,948 | import pandas as pd
from pathlib import Path
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression<load_from_csv> | et_oof_train, et_oof_test = get_oof(best_et, X_train, y_train, test)
rf_oof_train, rf_oof_test = get_oof(best_rf,X_train, y_train, test)
cb_oof_train, cb_oof_test = get_oof(best_cb, X_train, y_train, test)
xgb_oof_train, xgb_oof_test = get_oof(best_xgb,X_train, y_train, test)
lr_oof_train, lr_oof_test = get_oof(best_lr,X_train, y_train, test ) | Titanic - Machine Learning from Disaster |
10,330,948 | data_dir = Path('.. /input')
train_df = pd.read_csv(data_dir / 'train.csv')
test_df = pd.read_csv(data_dir / 'test.csv')
sample_submission = pd.read_csv(data_dir / 'sampleSubmission.csv' )<define_variables> | X_train = np.concatenate(( et_oof_train, rf_oof_train, cb_oof_train, xgb_oof_train, lr_oof_train), axis=1)
X_test = np.concatenate(( et_oof_test, rf_oof_test, cb_oof_test, xgb_oof_test, lr_oof_test), axis=1 ) | Titanic - Machine Learning from Disaster |
10,330,948 | features = ['di_minus', 'rsi', 'cci']<prepare_x_and_y> | best_model = XGBClassifier(n_estimators= 2000, max_depth= 4, min_child_weight= 2, gamma=0.9,
subsample=0.8, colsample_bytree=0.8, objective= 'binary:logistic',
nthread= -1,scale_pos_weight=1 ).fit(X_train, y_train ) | Titanic - Machine Learning from Disaster |
10,330,948 | X_train = train_df[features]
y_train = train_df['y']
X_test = test_df[features]<normalization> | y_test = best_model.predict(X_test ) | Titanic - Machine Learning from Disaster |
10,330,948 | scaler = StandardScaler(with_std=False)
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test )<train_model> | submission = pd.DataFrame({'PassengerId': list(range(892, 1310)) , 'Survived': y_test})
submission['Survived'] = submission['Survived'].astype(int)
submission.to_csv('submission.csv', index=False)
print(submission ) | Titanic - Machine Learning from Disaster |
9,479,891 | regressor = RandomForestRegressor(n_estimators=200, random_state=42, n_jobs=-1,
max_depth=7)
regressor.fit(X_train, y_train)
y_rfr = regressor.predict(X_test)
print('RandomForestRegressor')
print(y_rfr.min())
print(y_rfr.max() )<train_model> | %matplotlib inline | Titanic - Machine Learning from Disaster |
9,479,891 | regressor = CatBoostRegressor(silent=True)
regressor.fit(X_train, y_train)
y_cbr = regressor.predict(X_test)
print('CatBoostRegressor')
print(y_cbr.min())
print(y_cbr.max() )<train_model> | train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
y_sample = pd.read_csv("/kaggle/input/titanic/gender_submission.csv" ) | Titanic - Machine Learning from Disaster |
9,479,891 | regressor = AdaBoostRegressor()
regressor.fit(X_train, y_train)
y_abr = regressor.predict(X_test)
print('AdaBoostRegressor')
print(y_abr.min())
print(y_abr.max() )<train_model> | train_data.groupby('Survived' ).mean() | Titanic - Machine Learning from Disaster |
9,479,891 | regressor = BaggingRegressor()
regressor.fit(X_train, y_train)
y_br = regressor.predict(X_test)
print('BaggingRegressor')
print(y_br.min())
print(y_br.max() )<train_model> | train_data.groupby(train_data['Age'].isnull() ).mean() | Titanic - Machine Learning from Disaster |
9,479,891 | regressor = LinearRegression()
regressor.fit(X_train, y_train)
y_lr = regressor.predict(X_test)
print('LinearRegressor')
print(y_lr.min())
print(y_lr.max() )<train_model> | train_data.groupby('Pclass' ).mean() | Titanic - Machine Learning from Disaster |
9,479,891 | regressor = BayesianRidge()
regressor.fit(X_train, y_train)
y_bridge = regressor.predict(X_test)
print('BayesianRidge')
print(y_bridge.min())
print(y_bridge.max() )<train_model> | train_data.groupby('Parch' ).mean() | Titanic - Machine Learning from Disaster |
9,479,891 | regressor = GradientBoostingRegressor()
regressor.fit(X_train, y_train)
y_gbr = regressor.predict(X_test)
print('GradientBoostingRegressor')
print(y_gbr.min())
print(y_gbr.max() )<feature_engineering> | train_data['Age'] = train_data[['Age','Parch']].apply(age_aprox, axis = 1)
test_data['Age'] = test_data[['Age','Parch']].apply(age_aprox, axis = 1 ) | Titanic - Machine Learning from Disaster |
9,479,891 | sample_submission['expected'] = y_test<save_to_csv> | train_data['Family_count'] = train_data['SibSp'] +train_data['Parch']
| Titanic - Machine Learning from Disaster |
9,479,891 | sample_submission.to_csv('submission.csv', index=False )<import_modules> | test_data['Family_count'] = test_data['SibSp'] +test_data['Parch']
| Titanic - Machine Learning from Disaster |
9,479,891 | import pandas as pd
import numpy as np
import sys
import os
import random
from pathlib import Path
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn import metrics
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestRegressor
import lightgbm as lgb<load_from_csv> | round(( train_data['Cabin'].isnull().sum() /len(train_data)) *100,2 ) | Titanic - Machine Learning from Disaster |
9,479,891 | train_df = pd.read_csv(PATH+"train.csv")
test_df = pd.read_csv(PATH+"test.csv" )<train_model> | train_data.pivot_table('Survived',index = 'Sex',columns = 'Embarked',aggfunc= 'count' ) | Titanic - Machine Learning from Disaster |
9,479,891 | print("Train: rows:{} cols:{}".format(train_df.shape[0], train_df.shape[1]))
print("Test: rows:{} cols:{}".format(test_df.shape[0], test_df.shape[1]))<count_missing_values> | for name in train_data['Name']:
train_data['Title'] = train_data['Name'].str.extract('([A-Za-z]+)\.',expand=True)
test_data['Title'] = test_data['Name'].str.extract('([A-Za-z]+)\.',expand=True ) | Titanic - Machine Learning from Disaster |
9,479,891 | def missing_data(data):
total = data.isnull().sum().sort_values(ascending = False)
percent =(data.isnull().sum() /data.isnull().count() *100 ).sort_values(ascending = False)
return np.transpose(pd.concat([total, percent], axis=1, keys=['Total', 'Percent']))<count_missing_values> | train_data.groupby('Title' ).count() ['PassengerId'] | Titanic - Machine Learning from Disaster |
9,479,891 | missing_data(train_df )<count_missing_values> | test_data.groupby('Title' ).count() ['PassengerId'] | Titanic - Machine Learning from Disaster |
9,479,891 | missing_data(test_df )<count_unique_values> | gender = {'male':1,'female': 0}
train_data['Sex'] = train_data['Sex'].map(gender)
test_data['Sex'] = test_data['Sex'].map(gender ) | Titanic - Machine Learning from Disaster |
9,479,891 | print("There are {} unique zipcodes.".format(train_df['zipcode'].nunique()))<feature_engineering> | title = {'Capt':'Others','Col':'Others','Countess':'Others','Don':'Others', 'Dr':'Others','Jonkheer':'Others', 'Lady':'Others', 'Major':'Others',
'Mlle':'Others', 'Mme':'Others', 'Ms':'Miss','Rev': 'Others','Sir':'Others','Dona': 'Others'} | Titanic - Machine Learning from Disaster |
9,479,891 | for df in [train_df, test_df]:
df['date'] = pd.to_datetime(df['date'])
df['dayofweek'] = df['date'].dt.dayofweek
df['weekofyear'] = df['date'].dt.weekofyear
df['dayofyear'] = df['date'].dt.dayofyear
df['quarter'] = df['date'].dt.quarter
df['is_month_start'] = pd.to_numeric(df['date'].dt.is_month_start)
df['month'] = df['date'].dt.month
df['year'] = df['date'].dt.year
df['is_weekend'] = pd.to_numeric(df['dayofweek']>=5 )<feature_engineering> | train_data.replace({'Title':title},inplace=True)
test_data.replace({'Title':title},inplace=True ) | Titanic - Machine Learning from Disaster |
9,479,891 | for df in [train_df, test_df]:
df['med_lat'] = np.round(df['lat'],1)
df['med_long'] = np.round(df['long'],1)
df['build_old'] = 2019 - df['yr_built']
df['sqft_living_diff'] = df['sqft_living'] - df['sqft_living15']
df['sqft_lot_diff'] = df['sqft_lot'] - df['sqft_lot15']
df['bedroom_bathroom_ratio'] = df['bedrooms'] / df['bathrooms']<feature_engineering> | train_data.groupby('Title' ).count() ['PassengerId'] | Titanic - Machine Learning from Disaster |
9,479,891 | features = ['bedrooms','bathrooms','floors',
'waterfront','view','condition','grade',
'sqft_living','sqft_lot','sqft_above','sqft_basement','sqft_living15','sqft_lot15',
'yr_built','yr_renovated',
'lat', 'long','zipcode',
'date', 'dayofweek', 'weekofyear', 'dayofyear', 'quarter',
'is_month_start', 'month', 'year', 'is_weekend',
'price']
mask = np.zeros_like(train_df[features].corr() , dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
f, ax = plt.subplots(figsize=(18,18))
plt.title('Pearson Correlation Matrix',fontsize=25)
sns.heatmap(train_df[features].corr() ,linewidths=0.25,vmax=1.0,square=True,cmap="Blues",
linecolor='w',annot=True,mask=mask,cbar_kws={"shrink":.75});
<split> | features = ["Pclass", "Sex", "SibSp","Parch","Age","Embarked"]
y = train_data["Survived"]
X = pd.get_dummies(train_data[features])
titanic_test = pd.get_dummies(test_data[features])
print(X.shape)
print(y.shape)
| Titanic - Machine Learning from Disaster |
9,479,891 | VALID_SIZE = 0.2
RANDOM_STATE = 2019
train, valid = train_test_split(train_df, test_size=VALID_SIZE, random_state=RANDOM_STATE, shuffle=True )<define_variables> | from sklearn.model_selection import train_test_split, cross_val_score | Titanic - Machine Learning from Disaster |
9,479,891 | predictors = ['sqft_living', 'grade']
target = 'price'<prepare_x_and_y> | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=42)
X_test, X_val, y_test, y_val = train_test_split(X_test, y_test, test_size=0.5, random_state=42 ) | Titanic - Machine Learning from Disaster |
9,479,891 | train_X = train[predictors]
train_Y = train[target].values
valid_X = valid[predictors]
valid_Y = valid[target].values<init_hyperparams> | for dataset in [y_train,y_val,y_test]:
print(round(len(dataset)/len(y),2)) | Titanic - Machine Learning from Disaster |
9,479,891 | RFC_METRIC = 'mse'
NUM_ESTIMATORS = 100
NO_JOBS = 4<choose_model_class> | from sklearn.model_selection import cross_val_score | Titanic - Machine Learning from Disaster |
9,479,891 | model = RandomForestRegressor(n_jobs=NO_JOBS,
random_state=RANDOM_STATE,
criterion=RFC_METRIC,
n_estimators=NUM_ESTIMATORS,
verbose=False )<train_model> | rf_basic = RandomForestClassifier()
scores = cross_val_score(rf_basic,X_train,y_train, cv = 5)
scores | Titanic - Machine Learning from Disaster |
9,479,891 | model.fit(train_X, train_Y )<predict_on_test> | print('Parameters currently in use:
')
pprint(rf_basic.get_params() ) | Titanic - Machine Learning from Disaster |
9,479,891 | preds = model.predict(valid_X )<compute_test_metric> | from sklearn.model_selection import GridSearchCV | Titanic - Machine Learning from Disaster |
9,479,891 | print("RF Model score: ", model.score(train_X, train_Y))<compute_test_metric> | from sklearn.model_selection import GridSearchCV | Titanic - Machine Learning from Disaster |
9,479,891 | def rmse(preds, y):
return np.sqrt(mean_squared_error(preds, y))<compute_test_metric> | def print_results(results):
print('BEST PARAMS: {}
'.format(results.best_params_))
means = results.cv_results_['mean_test_score']
stds = results.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, results.cv_results_['params']):
print('{}(+/-{})for {}'.format(round(mean, 3), round(std * 2, 3), params)) | Titanic - Machine Learning from Disaster |
9,479,891 | print("Root mean squared error(valid set):",round(rmse(preds, valid_Y),2))<train_model> | rf_grid = RandomForestClassifier()
parameters = {
'n_estimators': [5, 50, 100, 200],
'max_depth': [2, 10, 20, None],
'max_features': ['auto', 'sqrt'],
'min_samples_leaf': [1, 2, 4],
'min_samples_split': [2, 5, 10],
}
cv = GridSearchCV(rf_grid, parameters, cv=5,verbose = 2)
| Titanic - Machine Learning from Disaster |
9,479,891 | predictors = ['sqft_living', 'grade', 'sqft_above']
target = 'price'
train_X = train[predictors]
train_Y = train[target].values
valid_X = valid[predictors]
valid_Y = valid[target].values
model = RandomForestRegressor(n_jobs=NO_JOBS,
random_state=RANDOM_STATE,
criterion=RFC_METRIC,
n_estimators=NUM_ESTIMATORS,
verbose=False)
model.fit(train_X, train_Y)
preds = model.predict(valid_X )<compute_test_metric> | from sklearn.metrics import accuracy_score, precision_score, recall_score | Titanic - Machine Learning from Disaster |
9,479,891 | print("RF Model score: ", model.score(train_X, train_Y))
print("Root mean squared error(valid set):",round(rmse(preds, valid_Y),2))<prepare_x_and_y> | rf_best = RandomForestClassifier(n_estimators= 50, max_depth = 10, max_features = 'auto', min_samples_leaf =4, min_samples_split = 10)
rf_best.fit(X_train,y_train ) | Titanic - Machine Learning from Disaster |
9,479,891 | predictors = ['sqft_living', 'sqft_lot',
'sqft_above', 'sqft_living15',
'waterfront', 'view', 'condition', 'grade',
'bedrooms', 'bathrooms', 'floors',
'zipcode',
'month', 'dayofweek',
'med_lat', 'med_long',
'build_old', 'sqft_living_diff', 'sqft_lot_diff',
]
target = 'price'
train_X = train[predictors]
train_Y = train[target].values
valid_X = valid[predictors]
valid_Y = valid[target].values
model = RandomForestRegressor(n_jobs=NO_JOBS,
random_state=RANDOM_STATE,
criterion=RFC_METRIC,
n_estimators=NUM_ESTIMATORS,
verbose=False)
model.fit(train_X, train_Y)
preds = model.predict(valid_X)
plot_feature_importance()
print("RF Model score: ", model.score(train_X, train_Y))
print("Root mean squared error(valid set):",round(rmse(preds, valid_Y),2))<save_to_csv> | mdl = rf_best
y_pred = mdl.predict(X_val)
accuracy = round(accuracy_score(y_val, y_pred), 3)
precision = round(precision_score(y_val, y_pred), 3)
recall = round(recall_score(y_val, y_pred), 3)
print('MAX DEPTH: {} /
mdl.n_estimators,
accuracy,
precision,
recall)) | Titanic - Machine Learning from Disaster |
9,479,891 | test_X = test_df[predictors]
predictions_RF = model.predict(test_X)
submission = pd.read_csv(PATH+"sample_submission.csv")
submission['price'] = predictions_RF
submission.to_csv('submission.csv', index=False )<init_hyperparams> | y_pred = rf_best.predict(X_test)
accuracy = round(accuracy_score(y_test, y_pred), 3)
precision = round(precision_score(y_test, y_pred), 3)
recall = round(recall_score(y_test, y_pred), 3)
print('MAX DEPTH: {} /
rf_best.n_estimators,
accuracy,
precision,
recall)) | Titanic - Machine Learning from Disaster |
9,479,891 | param = {'num_leaves': 51,
'min_data_in_leaf': 30,
'objective':'regression',
'max_depth': -1,
'learning_rate': 0.01,
"boosting": "gbdt",
"metric": 'rmse',
"verbosity": -1,
"nthread": 4,
"random_state": 42}<define_variables> | rf_best.fit(X, y ) | Titanic - Machine Learning from Disaster |
9,479,891 | predictors = ['sqft_living', 'sqft_lot',
'sqft_above', 'sqft_living15',
'waterfront', 'view', 'condition', 'grade',
'bedrooms', 'bathrooms', 'floors',
'zipcode',
'month', 'dayofweek',
'med_lat', 'med_long',
'build_old', 'sqft_living_diff', 'sqft_lot_diff',
]
target = 'price'<create_dataframe> | predictions = rf_best.predict(titanic_test)
output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions})
output.to_csv('my_submission.csv', index=False)
print("Your submission was successfully saved!" ) | Titanic - Machine Learning from Disaster |
7,290,396 | folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
oof = np.zeros(len(train_df))
predictions_lgb_cv = np.zeros(len(test_df))
feature_importance_df = pd.DataFrame()<split> | dataset = pd.read_csv('/kaggle/input/titanic/train.csv')
X = dataset.iloc[:, [2, 4, 5, 6, 7]].values
y = dataset.iloc[:, 1].values | Titanic - Machine Learning from Disaster |
7,290,396 | for fold_,(trn_idx, val_idx)in enumerate(folds.split(train_df,train_df['price'].values)) :
strLog = "fold {}".format(fold_)
print(strLog)
trn_data = lgb.Dataset(train_df.iloc[trn_idx][predictors], label=train_df.iloc[trn_idx][target])
val_data = lgb.Dataset(train_df.iloc[val_idx][predictors], label=train_df.iloc[val_idx][target])
num_round = 10000
clf = lgb.train(param, trn_data, num_round, valid_sets = [trn_data, val_data], verbose_eval=100, early_stopping_rounds = 100)
oof[val_idx] = clf.predict(train_df.iloc[val_idx][predictors], num_iteration=clf.best_iteration)
fold_importance_df = pd.DataFrame()
fold_importance_df["Feature"] = predictors
fold_importance_df["importance"] = clf.feature_importance()
fold_importance_df["fold"] = fold_ + 1
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
predictions_lgb_cv += clf.predict(test_df[predictors], num_iteration=clf.best_iteration)/ folds.n_splits
strRMSE = "RMSE: {}".format(rmse(oof, train_df[target]))
print(strRMSE )<save_to_csv> | test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
test_data_X = test_data.iloc[:, [1, 3, 4, 5, 6]].values | Titanic - Machine Learning from Disaster |
7,290,396 | submission = pd.read_csv(PATH+"sample_submission.csv")
submission['price'] = predictions_lgb_cv
submission.to_csv('submission_cv.csv', index=False )<define_variables> | imputer_X = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)
imputer_X = imputer_X.fit(X[:, 2:3])
X[:, 2:3] = imputer_X.transform(X[:, 2:3] ) | Titanic - Machine Learning from Disaster |
7,290,396 | predictions_blending = predictions_RF * 0.55 + predictions_lgb_cv * 0.45<save_to_csv> | imputer_test_data_X = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)
imputer_test_data_X = imputer_test_data_X.fit(test_data_X[:, 2:3])
test_data_X[:, 2:3] = imputer_test_data_X.transform(test_data_X[:, 2:3] ) | Titanic - Machine Learning from Disaster |
7,290,396 | submission = pd.read_csv(PATH+"sample_submission.csv")
submission['price'] = predictions_blending
submission.to_csv('submission_blending.csv', index=False )<import_modules> | labelencoder_X = LabelEncoder()
X[:, 1] = labelencoder_X.fit_transform(X[:, 1] ) | Titanic - Machine Learning from Disaster |
7,290,396 | from pathlib import Path
import json
import re
import pandas as pd
import numpy as np
from tqdm import tqdm_notebook<define_variables> | labelencoder_test_data_X = LabelEncoder()
test_data_X[:, 1] = labelencoder_test_data_X.fit_transform(test_data_X[:, 1] ) | Titanic - Machine Learning from Disaster |
7,290,396 | DATA_DIR = Path('.. /input/ndsc-advanced')
BEAUTY_TRAIN_CSV = DATA_DIR / 'beauty_data_info_train_competition.csv'
FASHION_TRAIN_CSV = DATA_DIR / 'fashion_data_info_train_competition.csv'
MOBILE_TRAIN_CSV = DATA_DIR / 'mobile_data_info_train_competition.csv'
BEAUTY_TEST_CSV = DATA_DIR / 'beauty_data_info_val_competition.csv'
FASHION_TEST_CSV = DATA_DIR / 'fashion_data_info_val_competition.csv'
MOBILE_TEST_CSV = DATA_DIR / 'mobile_data_info_val_competition.csv'
LAST_SUBMITTED_CSV = Path('.. /input/aoeul-solution-step-3-linearsvc-dl-model/Ensembled_SVC_DL_predictions.csv' )<load_from_csv> | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0 ) | Titanic - Machine Learning from Disaster |
7,290,396 | beauty_train_df = pd.read_csv(BEAUTY_TRAIN_CSV)
fashion_train_df = pd.read_csv(FASHION_TRAIN_CSV)
mobile_train_df = pd.read_csv(MOBILE_TRAIN_CSV)
beauty_test_df = pd.read_csv(BEAUTY_TEST_CSV)
fashion_test_df = pd.read_csv(FASHION_TEST_CSV)
mobile_test_df = pd.read_csv(MOBILE_TEST_CSV)
prev_subm_df = pd.read_csv(LAST_SUBMITTED_CSV )<define_variables> | sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
test_data_X = sc_X.transform(test_data_X ) | Titanic - Machine Learning from Disaster |
7,290,396 | categories = ['beauty', 'fashion', 'mobile']
train_dfs = [beauty_train_df, fashion_train_df, mobile_train_df]
test_dfs = [beauty_test_df, fashion_test_df, mobile_test_df]<merge> | classifier = SVC(kernel = 'rbf', random_state = 0)
classifier.fit(X_train, y_train ) | Titanic - Machine Learning from Disaster |
7,290,396 | matched_label_df = pd.DataFrame()
for cat, train_df, test_df in zip(categories, train_dfs, test_dfs):
print(f'Matching {cat} training and test data...')
df_train = train_df.rename(columns={'title': 'title_train', 'itemid': 'itemid_train'})
df_test = test_df[['itemid', 'title']].rename(columns={'title': 'title_test', 'itemid': 'itemid_test'})
df_combined = pd.merge(df_train, df_test, how='inner', left_on=['title_train'], right_on=['title_test'])
df_combined.drop(['image_path','itemid_train', 'title_train'], axis=1, inplace=True)
df_clean = df_combined.drop_duplicates()
print('Finding test data with exact same title as train data with unique labels')
single_instance=[]
for title in tqdm_notebook(df_clean.title_test.unique()):
if(len(df_clean[df_clean.title_test==title])== 1):
single_instance.append(title)
print(f'{len(single_instance)} test-train title pair matches found in {cat} category')
single_inst_df = df_clean[df_clean.title_test.isin(single_instance)]
feat_cols = single_inst_df.columns.drop(['itemid_test', 'title_test'])
id_label_list=[]
for _, row in single_inst_df.iterrows() :
for feat in feat_cols:
if not np.isnan(row[feat]):
itemid = '_'.join([str(row['itemid_test']), str(feat)])
answer = str(int(row[feat]))
id_label_list.append(( itemid, answer))
prev_subm_df['first_pred'] = prev_subm_df['tagging'].apply(lambda x: x.split(' ')[0])
list_new_id = []
list_new_ans = []
for id_ans in tqdm_notebook(id_label_list):
subm_first_label = int(prev_subm_df.loc[prev_subm_df.id==id_ans[0]]['first_pred'])
if subm_first_label != int(id_ans[1]):
print(f'Test id: {id_ans[0]:28} Submitted label: {subm_first_label:<5} Train data label: {id_ans[1]}')
list_new_id.append(id_ans[0])
list_new_ans.append(id_ans[1])
label_df = pd.DataFrame(
{'id': list_new_id, 'tagging': list_new_ans},
columns = ['id', 'tagging'])
matched_label_df = pd.concat([matched_label_df, label_df], axis=0)
print()<save_to_csv> | y_pred = classifier.predict(test_data_X ) | Titanic - Machine Learning from Disaster |
7,290,396 | <import_modules><EOS> | output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': y_pred})
output.to_csv('my_submission.csv', index=False ) | Titanic - Machine Learning from Disaster |
459,782 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<load_from_csv> | sns.set(color_codes=True)
%matplotlib inline
print(check_output(["ls", ".. /input"] ).decode("utf8"))
| Titanic - Machine Learning from Disaster |
459,782 | dl_df = pd.read_csv('.. /input/aoeul-solution-step-2-deep-learning-model/Deep_Learning_predictions.csv')
svc_df = pd.read_csv('.. /input/aoeul-solution-step-1-linearsvc/LinearSVC_predictions.csv' )<predict_on_test> | train = pd.read_csv(".. /input/train.csv", dtype={"Age": np.float64},)
test = pd.read_csv(".. /input/test.csv", dtype={"Age": np.float64}, ) | Titanic - Machine Learning from Disaster |
459,782 | dl_df.head(10 )<predict_on_test> | train.isnull().sum(axis=0 ) | Titanic - Machine Learning from Disaster |
459,782 | svc_df.head(10 )<merge> | test['Sex'].value_counts() | Titanic - Machine Learning from Disaster |
459,782 | new_df = pd.merge(dl_df, svc_df, on='id')
new_df.head()<string_transform> | test_id = test['PassengerId']
target = train['Survived'] | Titanic - Machine Learning from Disaster |
459,782 | new_tag = []
i = 0
for _, row in tqdm_notebook(new_df.iterrows()):
x1, x2 = row.tagging_x.split(' ')
x1 = int(x1)
x2 = int(x2)
y = row.tagging_y
if x1 == y:
new_tag.append(row.tagging_x)
i = i + 1
else:
new_tag.append(str(y)+ ' '+ str(x1))<feature_engineering> | train["Fare"].fillna(train["Fare"].median() , inplace=True)
test["Fare"].fillna(test["Fare"].median() , inplace=True ) | Titanic - Machine Learning from Disaster |
459,782 | i/len(new_df)*100<save_to_csv> | train["Embarked"].fillna(train['Embarked'].mode() [0], inplace=True)
test["Embarked"].fillna(test['Embarked'].mode() [0], inplace=True ) | Titanic - Machine Learning from Disaster |
459,782 | new_df['tagging']= np.array(new_tag)
new_df = new_df[['id', 'tagging']]
new_df.to_csv(f'Ensembled_SVC_DL_predictions.csv', index=None )<load_from_csv> | train["Age"].fillna(train["Age"].median() , inplace=True)
test["Age"].fillna(test["Age"].median() , inplace=True ) | Titanic - Machine Learning from Disaster |
459,782 | TRAIN_PATH = os.path.join(".. /input/ai-academy-intermediate-class-competition-1", "BBC News Train.csv")
df = pd.read_csv(TRAIN_PATH )<feature_engineering> | Titanic - Machine Learning from Disaster | |
459,782 | df['category_id'] = df['Category'].factorize() [0]
df['category_id'][0:10]<remove_duplicates> | def substrings_in_string(big_string, substrings):
for substring in substrings:
if big_string.find(substring)!= -1:
return substring
return np.nan | Titanic - Machine Learning from Disaster |
459,782 | category_id_df = df[['Category', 'category_id']].drop_duplicates().sort_values('category_id' )<define_variables> | def replace_titles(x):
title=x['Title']
if title in ['Don', 'Major', 'Capt', 'Jonkheer', 'Rev', 'Col']:
return 'Mr'
elif title in ['Countess', 'Mme']:
return 'Mrs'
elif title in ['Mlle', 'Ms']:
return 'Miss'
elif title =='Dr':
if x['Sex']=='Male':
return 'Mr'
else:
return 'Mrs'
else:
return title | Titanic - Machine Learning from Disaster |
459,782 | category_to_id = dict(category_id_df.values)
id_to_category = dict(category_id_df[['category_id', 'Category']].values )<count_values> | title_list=['Mrs', 'Mr', 'Master', 'Miss', 'Major', 'Rev',
'Dr', 'Ms', 'Mlle','Col', 'Capt', 'Mme', 'Countess',
'Don', 'Jonkheer'] | Titanic - Machine Learning from Disaster |
459,782 | df.groupby('Category' ).category_id.count()
<categorify> | train['Title']=train['Name'].map(lambda x: substrings_in_string(x, title_list))
test['Title']=test['Name'].map(lambda x: substrings_in_string(x, title_list)) | Titanic - Machine Learning from Disaster |
459,782 | tfidf = TfidfVectorizer(sublinear_tf=True, min_df=5, norm='l2', encoding='latin-1', ngram_range=(1, 2), stop_words='english')
features = tfidf.fit_transform(df.Text ).toarray()
labels = df.category_id
<sort_values> | train['Title']=train.apply(replace_titles, axis=1)
test['Title']=test.apply(replace_titles, axis=1 ) | Titanic - Machine Learning from Disaster |
459,782 | sorted(category_to_id.items() )<statistical_test> | train['Family_Size']=train['SibSp']+train['Parch']
test['Family_Size']=test['SibSp']+test['Parch'] | Titanic - Machine Learning from Disaster |
459,782 | N = 3
for Category, category_id in sorted(category_to_id.items()):
features_chi2 = chi2(features, labels == category_id)
indices = np.argsort(features_chi2[0])
feature_names = np.array(tfidf.get_feature_names())[indices]
unigrams = [v for v in feature_names if len(v.split(' ')) == 1]
bigrams = [v for v in feature_names if len(v.split(' ')) == 2]
print("
print(".Most correlated unigrams:
.{}".format('
.'.join(unigrams[-N:])))
print(".Most correlated bigrams:
.{}".format('
.'.join(bigrams[-N:])))
<init_hyperparams> | train['Age*Class']=train['Age']*train['Pclass']
test['Age*Class']=test['Age']*test['Pclass'] | Titanic - Machine Learning from Disaster |
459,782 | SAMPLE_SIZE = int(len(features)* 0.3)
np.random.seed(0)
indices = np.random.choice(range(len(features)) , size=SAMPLE_SIZE, replace=False)
projected_features = TSNE(n_components=2, random_state=0 ).fit_transform(features[indices])
<filter> | train['Fare_Per_Person']=train['Fare']/(train['Family_Size']+1)
test['Fare_Per_Person']=test['Fare']/(test['Family_Size']+1 ) | Titanic - Machine Learning from Disaster |
459,782 | my_id = 0
projected_features[(labels[indices] == my_id ).values]<choose_model_class> | train["Embarked"].fillna(0, inplace=True)
test["Embarked"].fillna(0, inplace=True ) | Titanic - Machine Learning from Disaster |
459,782 | models = [
RandomForestClassifier(n_estimators=200, max_depth=3, random_state=0),
MultinomialNB() ,
LogisticRegression(random_state=0),
]
<create_dataframe> | lb_make = LabelEncoder()
train["Embarked"] = lb_make.fit_transform(train["Embarked"])
train["Sex"] = lb_make.fit_transform(train["Sex"])
train["Title"] = lb_make.fit_transform(train["Title"])
test["Embarked"] = lb_make.fit_transform(test["Embarked"])
test["Sex"] = lb_make.fit_transform(test["Sex"])
test["Title"] = lb_make.fit_transform(test["Title"] ) | Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.