kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
2,773,958 | rec_scores = weighted_hybrid([
(supp_scores, 0.25),
(conf_scores, 0.25),
(phi_scores, 0.25),
(is_scores, 0.25),
])
print("MAP = {0:.3f}".format(mapk_score(s_hidden, get_recs(rec_scores), k=k_top)))
print("UHR = {0:.3f}".format(uhr_score(s_hidden, get_recs(rec_scores), k=k_top)) )<compute_test_metric> | df_train['Age_binned'] = pd.cut(df_train['Age'], np.arange(0, 85, 5), include_lowest=True)
df_test['Age_binned'] = pd.cut(df_test['Age'], np.arange(0, 85, 5), include_lowest=True ) | Titanic - Machine Learning from Disaster |
2,773,958 | rec_scores = weighted_hybrid([
(supp_scores, 2.0),
(conf_scores, 0.25),
(phi_scores, 0.25),
(is_scores, 0.25),
])
print("MAP = {0:.3f}".format(mapk_score(s_hidden, get_recs(rec_scores), k=k_top)))
print("UHR = {0:.3f}".format(uhr_score(s_hidden, get_recs(rec_scores), k=k_top)) )<compute_test_metric> | df_train['Age_binned'] = pd.cut(df_train['Age'], [0, 5, 30, 60, 80], labels=[0, 1, 2, 3], retbins=False, include_lowest=True)
df_train['Age_binned'] = df_train['Age_binned'].astype('int')
df_test['Age_binned'] = pd.cut(df_test['Age'], [0, 5, 30, 60, 80], labels=[0, 1, 2, 3], retbins=False, include_lowest=True)
df_test['Age_binned'] = df_test['Age_binned'].astype('int' ) | Titanic - Machine Learning from Disaster |
2,773,958 | rec_scores = weighted_hybrid([
(supp_scores, 3.0),
(conf_scores, 1.5),
(phi_scores, 1.5),
(is_scores, 1.0),
])
print("MAP = {0:.3f}".format(mapk_score(s_hidden, get_recs(rec_scores), k=k_top)))
print("UHR = {0:.3f}".format(uhr_score(s_hidden, get_recs(rec_scores), k=k_top)) )<compute_train_metric> | df_train.drop(columns='Age', inplace=True)
df_test.drop(columns='Age', inplace=True ) | Titanic - Machine Learning from Disaster |
2,773,958 | cos_scores = collaborative_filter(s_train, s_input, sim_fn=cosine_sim, j=30)
print("MAP = {0:.3f}".format(mapk_score(s_hidden, get_recs(cos_scores), k=k_top)))
print("UHR = {0:.3f}".format(uhr_score(s_hidden, get_recs(cos_scores), k=k_top)) )<compute_test_metric> | df_train['SibSp'] = df_train['SibSp'].map(lambda x: 3 if x == 4 or x == 5 or x == 8 else x)
df_test['SibSp'] = df_test['SibSp'].map(lambda x: 3 if x == 4 or x == 5 or x == 8 else x ) | Titanic - Machine Learning from Disaster |
2,773,958 | rec_scores = weighted_hybrid([
(supp_scores, 1.0),
(conf_scores, 1.0),
(phi_scores, 1.0),
(is_scores, 1.0),
(cos_scores, 10.0)
])
print("MAP = {0:.3f}".format(mapk_score(s_hidden, get_recs(rec_scores), k=k_top)))
print("UHR = {0:.3f}".format(uhr_score(s_hidden, get_recs(rec_scores), k=k_top)) )<import_modules> | df_train['Parch'] = df_train['Parch'].map(lambda x: x if x == 0 else 1)
df_test['Parch'] = df_test['Parch'].map(lambda x: x if x == 0 else 1 ) | Titanic - Machine Learning from Disaster |
2,773,958 | from redcarpet import write_kaggle_recs<load_pretrained> | df_train['SibSp+Parch'] = df_train['SibSp+Parch'].map(lambda x: 1 if x == 1 or x == 2 or x == 3 else x)
df_test['SibSp+Parch'] = df_test['SibSp+Parch'].map(lambda x: 1 if x == 1 or x == 2 or x == 3 else x)
df_train['SibSp+Parch'] = df_train['SibSp+Parch'].map(lambda x: 2 if x == 4 or x == 5 or x == 6 or x == 7 or x == 10 else x)
df_test['SibSp+Parch'] = df_test['SibSp+Parch'].map(lambda x: 2 if x == 4 or x == 5 or x == 6 or x == 7 or x == 10 else x ) | Titanic - Machine Learning from Disaster |
2,773,958 | s_hold_input = pickle.load(open(".. /input/hold_set.pkl", "rb"))
print("Hold Out Set: N = {}".format(len(s_hold_input)))
s_all_input = s_input + s_hold_input
print("All Input: N = {}".format(len(s_all_input)) )<statistical_test> | df_train['Fare_binned'] = pd.cut(df_train['Fare'], bins=[0,25,75,513], labels=[0, 1, 2], retbins=False, include_lowest=True)
df_train['Fare_binned'] = df_train['Fare_binned'].astype('int')
df_test['Fare_binned'] = pd.cut(df_test['Fare'], bins=[0,25,75,513], labels=[0, 1, 2], retbins=False, include_lowest=True)
df_test['Fare_binned'] = df_test['Fare_binned'].astype('int' ) | Titanic - Machine Learning from Disaster |
2,773,958 | print("Final Model")
print("Strategy: Association Rules")
print("Scoring: Hybrid")
supp_scores, _ = association_filter(used_rules, m_train, s_all_input, score_fn=rule_support)
conf_scores, _ = association_filter(used_rules, m_train, s_all_input, score_fn=rule_confidence)
phi_scores, _ = association_filter(used_rules, m_train, s_all_input, score_fn=rule_phi_correlation)
is_scores, _ = association_filter(used_rules, m_train, s_all_input, score_fn=rule_is_score)
final_scores = weighted_hybrid([
(supp_scores, 0.25),
(conf_scores, 0.25),
(phi_scores, 0.25),
(is_scores, 0.25),
])
final_recs = get_recs(final_scores )<save_to_csv> | df_train.drop(columns='Fare', inplace=True)
df_test.drop(columns='Fare', inplace=True ) | Titanic - Machine Learning from Disaster |
2,773,958 | outfile = "kaggle_submission_association_rules_hybrid.csv"
n_lines = write_kaggle_recs(final_recs, outfile)
print("Wrote predictions for {} users to {}.".format(n_lines, outfile))<import_modules> | df_train['Cabin'] = df_train['Cabin'].str.get(0 ) | Titanic - Machine Learning from Disaster |
2,773,958 | import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeClassifier<load_from_csv> | df_test['HaveCabin'] = df_test['Cabin'].str.get(0)
df_train['HaveCabin'] = df_train['Cabin'].map(lambda x: 0 if x == 'Z' else 1)
df_test['HaveCabin'] = df_test['HaveCabin'].map(lambda x: 0 if x == 'Z' else 1 ) | Titanic - Machine Learning from Disaster |
2,773,958 | df_train = pd.read_csv('.. /input/train.csv')
df_test = pd.read_csv('.. /input/test.csv')
sample_sub = pd.read_csv('.. /input/sampleSubmission.csv' )<prepare_x_and_y> | df_train.drop(columns=['Cabin'], inplace=True)
df_test.drop(columns=['Cabin'], inplace=True ) | Titanic - Machine Learning from Disaster |
2,773,958 | X = df_train.profession.values
y = df_train.target.values
X_test = df_test.profession.values<predict_on_test> | estimator = [('Logistic Regression', LogisticRegression),('Ridge Classifier', RidgeClassifier),
('SGD Classifier', SGDClassifier),('Passive Aggressive Classifier', PassiveAggressiveClassifier),
('SVC', SVC),('Linear SVC', LinearSVC),('Nu SVC', NuSVC),
('K-Neighbors Classifier', KNeighborsClassifier),
('Gaussian Naive Bayes', GaussianNB),('Multinomial Naive Bayes', MultinomialNB),
('Bernoulli Naive Bayes', BernoulliNB),('Complement Naive Bayes', ComplementNB),
('Decision Tree Classifier', DecisionTreeClassifier),
('Random Forest Classifier', RandomForestClassifier),('AdaBoost Classifier', AdaBoostClassifier),
('Gradient Boosting Classifier', GradientBoostingClassifier),('Bagging Classifier', BaggingClassifier),
('Extra Trees Classifier', ExtraTreesClassifier),('XGBoost', XGBClassifier)]
X_train = df_train.drop(columns='Survived')
y_train = df_train['Survived']
comparison_cols = ['Algorithm', 'Training Time(Avg)', 'Accuracy(Avg)', 'Accuracy(3xSTD)']
comparison_df = pd.DataFrame(columns=comparison_cols)
cv_split = StratifiedShuffleSplit(n_splits=10, test_size=0.3, random_state=0)
for idx, est in enumerate(estimator):
cv_results = cross_validate(est[1]() , X_train, y_train, cv=cv_split)
comparison_df.loc[idx, 'Algorithm'] = est[0]
comparison_df.loc[idx, 'Training Time(Avg)'] = cv_results['fit_time'].mean()
comparison_df.loc[idx, 'Accuracy(Avg)'] = cv_results['test_score'].mean()
comparison_df.loc[idx, 'Accuracy(3xSTD)'] = cv_results['test_score'].std() * 3
comparison_df.set_index(keys='Algorithm', inplace=True)
comparison_df.sort_values(by='Accuracy(Avg)', ascending=False, inplace=True)
fig, ax = plt.subplots(figsize=(12,10))
y_pos = np.arange(len(comparison_df))
ax.barh(y_pos, comparison_df['Accuracy(Avg)'], xerr=comparison_df['Accuracy(3xSTD)'], color='skyblue')
ax.set_yticks(y_pos)
ax.set_yticklabels(comparison_df.index)
ax.set_xlabel('Accuracy Score(Average)')
ax.set_title('Performance Comparison After Simple Modelling', size=13)
ax.set_xlim(0, 1)
plt.show() | Titanic - Machine Learning from Disaster |
2,773,958 | model = DecisionTreeClassifier(max_depth=4)
model.fit(X.reshape(-1,1),y)
y_hat = model.predict_proba(X_test.reshape(-1,1)) [:,1]<prepare_output> | estimator = [('Logistic Regression', LogisticRegression),('Ridge Classifier', RidgeClassifier),('SVC', SVC),
('Linear SVC', LinearSVC),('Nu SVC', NuSVC),('Random Forest Classifier', RandomForestClassifier),
('AdaBoost Classifier', AdaBoostClassifier),
('Gradient Boosting Classifier', GradientBoostingClassifier),
('Bagging Classifier', BaggingClassifier),('XGBoost', XGBClassifier)
]
index = [est[0] for est in estimator]
grid_params = {'SVC': {'C': np.arange(1,21,1), 'gamma': [0.005, 0.01, 0.015, 0.02], 'random_state': [0]},
'Ridge Classifier': {'alpha': [0.001, 0.0025, 0.005], 'random_state': [0]},
'Nu SVC': {'nu': [0.5], 'gamma': [0.001, 0.01, 0.1, 1], 'random_state': [0]},
'Gradient Boosting Classifier': {'learning_rate': [0.001, 0.005, 0.01, 0.015], 'random_state': [0],
'max_depth': [1,2,3,4,5], 'n_estimators': [300, 350, 400, 450, 500]},
'Linear SVC': {'C': [1, 5, 10], 'random_state': [0]},
'Logistic Regression': {'C': np.arange(2,7.5,0.25),
'solver': ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'],
'random_state': [0]},
'AdaBoost Classifier': {'learning_rate': np.arange(0.05, 0.21, 0.01), 'n_estimators': [50, 75, 100, 125, 150],
'random_state': [0]},
'Random Forest Classifier': {'n_estimators': [200, 250, 300, 350], 'max_depth': [1,2,3,4,5,6],
'criterion': ['gini', 'entropy'], 'random_state': [0]},
'Bagging Classifier': {'n_estimators': np.arange(200, 300, 10), 'random_state': [0]},
'XGBoost': {'learning_rate': [0.001, 0.005, 0.01, 0.015], 'random_state': [0],
'max_depth': [1,2,3,4,5], 'n_estimators': [300, 350, 400, 450, 500]}
}
best_params_df = pd.DataFrame(columns=['Optimized Hyperparameters', 'Accuracy'], index=index)
for idx, est in enumerate(estimator):
best_clf = GridSearchCV(est[1]() , param_grid=grid_params[est[0]], cv=cv_split, scoring='accuracy', n_jobs=12)
best_clf.fit(X_train, y_train)
best_params_df.loc[est[0], 'Optimized Hyperparameters'] = [best_clf.best_params_]
best_params_df.loc[est[0], 'Accuracy'] = best_clf.best_score_
| Titanic - Machine Learning from Disaster |
2,773,958 | sample_sub['target'] = y_hat
<save_to_csv> | xg = GradientBoostingClassifier(learning_rate=0.005, max_depth=2, n_estimators=450, random_state=0)
selector = RFECV(xg, step=1, cv=cv_split, scoring='accuracy', n_jobs=8)
selector = selector.fit(X_train, y_train ) | Titanic - Machine Learning from Disaster |
2,773,958 | sample_sub.to_csv('v_01.csv', index=False )<load_pretrained> | pd.DataFrame([X_train.columns, selector.ranking_], index=['Features', 'Ranking'] ).T.sort_values(by='Ranking' ) | Titanic - Machine Learning from Disaster |
2,773,958 | class SentimentDataset(Dataset):
def __init__(self,datalines):
self.xydata = datalines
def __len__(self):
return len(self.xydata)
def __getitem__(self,idx):
return self.xydata[idx]
def load_data_set(filename):
istream = open(filename)
istream.readline()
xydataset = [ ]
for line in istream:
fields = line.split(',')
label = fields[0]
text = ','.join(fields[1:])
xydataset.append(( text,label))
istream.close()
return SentimentDataset(xydataset)
train_set = load_data_set('.. /input/sentimentIMDB_train.csv')
print('Loaded %d examples as train set.'%(len(train_set)))
print('Train data Sample(1st batch only)')
train_loader = DataLoader(train_set, batch_size=4, shuffle=True)
for text_batch, label_batch in train_loader:
for text,label in zip(text_batch, label_batch):
print(' ',text[:50],'...',label)
break
test_set = load_data_set('.. /input/sentimentIMDB_test.csv')
print('Loaded %d examples as test set.'%(len(test_set)) )<feature_engineering> | selected_features = ['Pclass', 'Sex', 'SibSp+Parch', 'FamilySurvival', 'Title_Mr', 'Fare_binned']
gb = GradientBoostingClassifier(learning_rate=0.005, max_depth=2, n_estimators=450, random_state=0)
gb.fit(X_train[selected_features], y_train)
gb_acc_score = cross_val_score(gb, X_train[selected_features], y_train, cv=cv_split, scoring='accuracy')
print('The performance of the model using the selected features: {:.2f}%'.format(gb_acc_score.mean() *100)) | Titanic - Machine Learning from Disaster |
2,773,958 | def make_w2idx(dataset):
wordset = set([])
for text,label in dataset:
words = text.split()
wordset.update(words)
return dict(zip(wordset,range(len(wordset))))
def vectorize_text(text,w2idx):
counts = Counter(text.split())
xvec = torch.zeros(len(w2idx))
for word in counts:
if word in w2idx:
xvec[w2idx[word]] = counts[word]
return xvec.squeeze()
def vectorize_target(ylabel):
return torch.tensor(float(ylabel))<train_model> | gb = GradientBoostingClassifier(learning_rate=0.005, max_depth=2, n_estimators=450, random_state=0)
gb.fit(X_train[selected_features], y_train)
test_Survived = pd.DataFrame(gb.predict(df_test[selected_features]), columns=['Survived'], index=np.arange(892,1310,1))
test_Survived = test_Survived.reset_index()
test_Survived.rename(columns={'index': 'PassengerID'}, inplace=True)
test_Survived.to_csv("gb.csv",index=False ) | Titanic - Machine Learning from Disaster |
2,295,536 | class SentimentAnalyzer(nn.Module):
def __init__(self):
super(SentimentAnalyzer, self ).__init__()
self.reset_structure(1,1)
def reset_structure(self,vocab_size, num_labels):
self.W = nn.Linear(vocab_size, num_labels)
def forward(self, text_vec):
return torch.sigmoid(self.W(text_vec))
def train(self,train_set,learning_rate,epochs):
self.w2idx = make_w2idx(train_set)
self.reset_structure(len(self.w2idx),1)
loss_func = nn.BCELoss()
optimizer = optim.SGD(self.parameters() , lr=learning_rate)
train_dataset, dev_dataset = random_split(train_set, [20000, 5000])
data_loader = DataLoader(train_dataset, batch_size=len(train_set), shuffle=True)
max_acc = 0
for epoch in range(epochs):
global_logloss = 0.0
for Xbatch,Ybatch in data_loader:
for X, Y in zip(Xbatch,Ybatch):
self.zero_grad()
xvec = vectorize_text(X,self.w2idx)
yvec = vectorize_target(Y)
prob = self(xvec ).squeeze()
loss = loss_func(prob,yvec)
loss.backward()
optimizer.step()
global_logloss += loss.item()
validation_acc = self.eval_test(dev_dataset)
print("Epoch %d, mean cross entropy = %f, Validation accurracy : %f"%(epoch,global_logloss/len(train_set),validation_acc))
if validation_acc >= max_acc:
torch.save(self.state_dict() , 'sentiment_model.wt')
max_acc = validation_acc
self.load_state_dict(torch.load('sentiment_model.wt'))
def eval_test(self,dev_set):
with torch.no_grad() :
data_loader = DataLoader(dev_set, batch_size=len(dev_set), shuffle=False)
ncorrect = 0
N = 0
for Xbatch,Ybatch in data_loader:
for X,Y in zip(Xbatch,Ybatch):
xvec = vectorize_text(X,self.w2idx)
prob = self(xvec ).squeeze()
if int(prob > 0.5)== int(Y):
ncorrect += 1
N += 1
return float(ncorrect)/float(N)
def run_test(self,test_set,pred_filename):
with torch.no_grad() :
data_loader = DataLoader(test_set, batch_size=len(train_set), shuffle=False)
idxList = []
sentList = []
for Xbatch,idxbatch in data_loader:
for X,idx in zip(Xbatch,idxbatch):
xvec = vectorize_text(X,self.w2idx)
prob = self(xvec ).squeeze()
idxList.append(idx)
sentList.append(int(prob > 0.5))
df = pa.DataFrame({'idx':idxList,'sentY':sentList})
df.to_csv(pred_filename,index=False)
print('done.' )<train_on_grid> | warnings.filterwarnings('ignore')
%matplotlib inline | Titanic - Machine Learning from Disaster |
2,295,536 | sent = SentimentAnalyzer()
sent.train(train_set,0.01,50 )<import_modules> | train_data = pd.read_csv(".. /input/train.csv")
train_data.columns | Titanic - Machine Learning from Disaster |
2,295,536 | import pandas as pd
import numpy as np<import_modules> | test = pd.read_csv(".. /input/test.csv")
IDtest = test["PassengerId"]
| Titanic - Machine Learning from Disaster |
2,295,536 | import pandas as pd
import numpy as np<define_variables> | train_data.drop(['PassengerId','Ticket'], axis=1, inplace = True)
| Titanic - Machine Learning from Disaster |
2,295,536 | t = 732
m=8
n=8<load_from_csv> | train_data.isnull().sum() | Titanic - Machine Learning from Disaster |
2,295,536 | df = pd.read_csv('.. /input/data.txt',skiprows=2,sep=' ',names=list(map(str,(list(range(n))))))<define_variables> | train_data['Age'].fillna(train_data['Age'].mean() , inplace = True)
test['Age'].fillna(test['Age'].mean() , inplace = True)
train_data["Embarked"] = train_data["Embarked"].fillna("C")
test["Embarked"] = test["Embarked"].fillna("C")
train_data['Fare'].fillna(train_data['Fare'].median() , inplace = True)
test['Fare'].fillna(test['Fare'].median() , inplace = True)
train_data.Cabin.fillna("N", inplace=True)
test.Cabin.fillna("N", inplace=True ) | Titanic - Machine Learning from Disaster |
2,295,536 | arr = np.array(df )<prepare_x_and_y> | train_data.isnull().sum() | Titanic - Machine Learning from Disaster |
2,295,536 | df_r = pd.DataFrame(columns=['id','m1','m2','m3','m4'])
for i in range(m):
for j in range(n):
for k in range(732):
temp = td[::24,i,j]
temp = temp[temp!=-1]
temp2 = td[k,:,:]
temp2 = temp2[temp2!=-1]
temp3 = td[::24*7,i,j]
temp3 = temp3[temp3!=-1]
x = i//4
y = j//4
temp4 = td[k,x*4:(x+1)*4,y*4:(y+1)*4]
temp4 = temp4[temp4!=-1]
st = str(k)+':'+str(i)+':'+str(j)
df_r = df_r.append({'id':st,'m1':temp.mean() ,'m2':temp2.mean() ,'m3':temp3.mean() ,'m4':temp4.mean() },ignore_index=True )<concatenate> | train_title = [i.split(",")[1].split(".")[0].strip() for i in train_data["Name"]]
train_data["Title"] = pd.Series(train_title)
train_data["Title"].head() | Titanic - Machine Learning from Disaster |
2,295,536 | new_df = df.stack()<create_dataframe> | test_title = [i.split(",")[1].split(".")[0].strip() for i in test["Name"]]
test["Title"] = pd.Series(test_title)
test["Title"].head() | Titanic - Machine Learning from Disaster |
2,295,536 | new_df = pd.DataFrame(new_df )<rename_columns> | train_data["Title"] = train_data["Title"].replace(['Lady', 'the Countess','Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
train_data["Title"] = train_data["Title"].map({"Master":0, "Miss":1, "Ms" : 1 , "Mme":1, "Mlle":1, "Mrs":1, "Mr":2, "Rare":3})
train_data["Title"] = train_data["Title"].astype(int ) | Titanic - Machine Learning from Disaster |
2,295,536 | new_df.columns=['value']<rename_columns> | test["Title"] = test["Title"].replace(['Lady', 'the Countess','Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
test["Title"] = test["Title"].map({"Master":0, "Miss":1, "Ms" : 1 , "Mme":1, "Mlle":1, "Mrs":1, "Mr":2, "Rare":3})
test["Title"] = test["Title"].astype(int ) | Titanic - Machine Learning from Disaster |
2,295,536 | new_df.columns= ['t','n','val']<feature_engineering> | train_data["Family_size"] = train_data["SibSp"] + train_data["Parch"] + 1
test["Family_size"] = test["SibSp"] + test["Parch"] + 1
| Titanic - Machine Learning from Disaster |
2,295,536 | new_df['m'] = new_df['t'].apply(lambda x : int(x)%8 )<feature_engineering> | train_data['Single'] = train_data['Family_size'].map(lambda s: 1 if s == 1 else 0)
train_data['Small_family'] = train_data['Family_size'].map(lambda s: 1 if s == 2 else 0)
train_data['Med_family'] = train_data['Family_size'].map(lambda s: 1 if 3 <= s <= 4 else 0)
train_data['Large_family'] = train_data['Family_size'].map(lambda s: 1 if s >= 5 else 0)
| Titanic - Machine Learning from Disaster |
2,295,536 | new_df['t'] = new_df['t'].apply(lambda x:x//8 )<feature_engineering> | test['Single'] = test['Family_size'].map(lambda s: 1 if s == 1 else 0)
test['Small_family'] = test['Family_size'].map(lambda s: 1 if s == 2 else 0)
test['Med_family'] = test['Family_size'].map(lambda s: 1 if 3 <= s <= 4 else 0)
test['Large_family'] = test['Family_size'].map(lambda s: 1 if s >= 5 else 0 ) | Titanic - Machine Learning from Disaster |
2,295,536 | new_df['hour'] = new_df['t'].apply(lambda x : x%24 )<feature_engineering> | train_data['survived_dead'] = train_data['Survived'].apply(lambda x : 'Survived' if x == 1 else 'Dead' ) | Titanic - Machine Learning from Disaster |
2,295,536 | new_df['day'] = new_df['t'].apply(lambda x : x//24 )<feature_engineering> | Titanic - Machine Learning from Disaster | |
2,295,536 | new_df['mm']=new_df['m']
new_df['nn']= new_df['n']<feature_engineering> | lbl = LabelEncoder()
lbl.fit(list(train_data['Embarked'].values))
train_data['Embarked'] = lbl.transform(list(train_data['Embarked'].values))
lbl.fit(list(test['Embarked'].values))
test['Embarked'] = lbl.transform(list(test['Embarked'].values)) | Titanic - Machine Learning from Disaster |
2,295,536 | new_df['id'] = new_df['t'].map(str)+':'+new_df['mm'].map(str)+':'+new_df['nn'].map(str )<merge> | train_data['FareBin'] = pd.qcut(train_data['Fare'], 4)
train_data['AgeBin'] = pd.cut(train_data['Age'].astype(int), 5)
test['FareBin'] = pd.qcut(test['Fare'], 4)
test['AgeBin'] = pd.cut(test['Age'].astype(int), 5 ) | Titanic - Machine Learning from Disaster |
2,295,536 | new_df = df_r.set_index('id' ).join(new_df.set_index('id'))<normalization> | train_data['AgeBin_Code'] = lbl.fit_transform(train_data['AgeBin'])
train_data['FareBin_Code'] = lbl.fit_transform(train_data['FareBin'])
test['AgeBin_Code'] = lbl.fit_transform(test['AgeBin'])
test['FareBin_Code'] = lbl.fit_transform(test['FareBin'] ) | Titanic - Machine Learning from Disaster |
2,295,536 | scaler = StandardScaler()
scaler.fit(new_df.drop(['val','t','mm','nn'],axis=1))<normalization> | def encode(x): return 1 if x == 'female' else 0
train_data['enc_sex'] = train_data.Sex.apply(encode)
test['enc_sex'] = test.Sex.apply(encode ) | Titanic - Machine Learning from Disaster |
2,295,536 | scaler.transform(new_df.drop(['val','t','mm','nn'],axis=1))<normalization> | train_data["has_cabin"] = [0 if i == 'N'else 1 for i in train_data.Cabin]
test["has_cabin"] = [0 if i == 'N'else 1 for i in test.Cabin] | Titanic - Machine Learning from Disaster |
2,295,536 | new_df[['m1','m2','m3','m4','n','m','hour','day']]=scaler.transform(new_df.drop(['val','t','mm','nn'],axis=1))<filter> | def detect_outliers(train_data,n,features):
outlier_indices = []
for col in features:
Q1 = np.percentile(train_data[col], 25)
Q3 = np.percentile(train_data[col],75)
IQR = Q3 - Q1
outlier_step = 1.5 * IQR
outlier_list_col = train_data[(train_data[col] < Q1 - outlier_step)|(train_data[col] > Q3 + outlier_step)].index
outlier_indices.extend(outlier_list_col)
outlier_indices = Counter(outlier_indices)
multiple_outliers = list(k for k, v in outlier_indices.items() if v > n)
return multiple_outliers
Outliers_to_drop = detect_outliers(train_data,2,["Age","SibSp","Parch","Fare"])
| Titanic - Machine Learning from Disaster |
2,295,536 | train = new_df[new_df['val'] !=-1]<prepare_x_and_y> | train_data.loc[Outliers_to_drop] | Titanic - Machine Learning from Disaster |
2,295,536 | X_train = train.drop(['val','t','mm','nn'], axis=1)
y_train = train['val'].values<filter> | train_data = train_data.drop(Outliers_to_drop, axis = 0 ).reset_index(drop=True ) | Titanic - Machine Learning from Disaster |
2,295,536 | test = new_df[new_df['val'] == -1]<drop_column> | y_train = train_data["Survived"]
X_train = data.drop(labels = ["Survived"],axis = 1 ) | Titanic - Machine Learning from Disaster |
2,295,536 | X_test = test.drop(['val','t','mm','nn'], axis=1 )<import_modules> | test = test.select_dtypes(include=[np.number] ).interpolate().dropna()
test = test[X_train.columns]
| Titanic - Machine Learning from Disaster |
2,295,536 | import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import adam<choose_model_class> | sc = StandardScaler()
X_train = sc.fit_transform(X_train)
test = sc.transform(test ) | Titanic - Machine Learning from Disaster |
2,295,536 | model = Sequential()
model.add(Dense(128, activation='relu', input_shape=(8,)))
model.add(Dense(256, activation='relu'))
model.add(Dense(256, activation='relu'))
model.add(Dense(1))
model.summary()<compute_test_metric> | kfold = StratifiedKFold(n_splits=10 ) | Titanic - Machine Learning from Disaster |
2,295,536 | def root_mean_squared_error(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))<choose_model_class> | ExtC = ExtraTreesClassifier()
ex_param_grid = {"max_depth": [4],
"max_features": [1, 3, 10],
"min_samples_split": [2, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [False],
"n_estimators" :[100,300],
"criterion": ["gini"]}
gsExtC = GridSearchCV(ExtC,param_grid = ex_param_grid, cv=kfold, scoring="accuracy", n_jobs= 4, verbose = 1)
gsExtC.fit(X_train,y_train)
ExtC_best = gsExtC.best_estimator_
gsExtC.best_score_ | Titanic - Machine Learning from Disaster |
2,295,536 | model.compile(loss=root_mean_squared_error, optimizer='adam' )<train_model> | RFC = RandomForestClassifier()
rf_param_grid = {"max_depth": [None],
"max_features": [1, 3, 10],
"min_samples_split": [2, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [False],
"n_estimators" :[100,300],
"criterion": ["gini"]}
gsRFC = GridSearchCV(RFC,param_grid = rf_param_grid, cv=kfold, scoring="accuracy", n_jobs= 4, verbose = 1)
gsRFC.fit(X_train,y_train)
RFC_best = gsRFC.best_estimator_
gsRFC.best_score_ | Titanic - Machine Learning from Disaster |
2,295,536 | model.fit(X_train, y_train, batch_size=128, epochs=240, verbose=1,validation_split=0.2 )<train_model> | DTC = DecisionTreeClassifier()
adaDTC = AdaBoostClassifier(DTC, random_state=7)
ada_param_grid = {"base_estimator__criterion" : ["gini", "entropy"],
"base_estimator__splitter" : ["best", "random"],
"algorithm" : ["SAMME","SAMME.R"],
"n_estimators" :[30],
"learning_rate": [0.0001, 0.001, 0.01, 0.1, 0.2, 0.3,1.5]}
gsadaDTC = GridSearchCV(adaDTC,param_grid = ada_param_grid, cv=kfold, scoring="accuracy", n_jobs= 4, verbose = 1)
gsadaDTC.fit(X_train,y_train)
ada_best = gsadaDTC.best_estimator_
gsadaDTC.best_score_ | Titanic - Machine Learning from Disaster |
2,295,536 | model_XGB = XGBRegressor()
model_XGB.fit(X_train,y_train )<feature_engineering> | SVMC = SVC(probability=True)
svc_param_grid = {'kernel': ['rbf'],
'gamma': [ 0.001, 0.01, 0.1, 1],
'C': [1, 10, 50, 100,200,300, 1000]}
gsSVMC = GridSearchCV(SVMC,param_grid = svc_param_grid, cv=kfold, scoring="accuracy", n_jobs= 4, verbose = 1)
gsSVMC.fit(X_train,y_train)
SVMC_best = gsSVMC.best_estimator_
gsSVMC.best_score_ | Titanic - Machine Learning from Disaster |
2,295,536 | def make_positive(x):
if x<0:
return 0
else:
return x<predict_on_test> | GBC = GradientBoostingClassifier()
gb_param_grid = {'loss' : ["deviance"],
'n_estimators' : [100,200,300],
'learning_rate': [0.1, 0.05, 0.01],
'max_depth': [4, 8],
'min_samples_leaf': [100,150],
'max_features': [0.3, 0.1]
}
gsGBC = GridSearchCV(GBC,param_grid = gb_param_grid, cv=kfold, scoring="accuracy", n_jobs= 4, verbose = 1)
gsGBC.fit(X_train,y_train)
GBC_best = gsGBC.best_estimator_
gsGBC.best_score_ | Titanic - Machine Learning from Disaster |
2,295,536 | predict = model.predict(X_test )<feature_engineering> | votingC = VotingClassifier(estimators=[('rfc', RFC_best),('extc', ExtC_best),('svm',SVMC_best),
('gbc',GBC_best)], voting='soft', n_jobs=4)
votingC = votingC.fit(X_train, y_train)
| Titanic - Machine Learning from Disaster |
2,295,536 | <feature_engineering><EOS> | test_Survived = pd.Series(votingC.predict(test), name="Survived")
Submission = pd.concat([IDtest,test_Survived],axis=1)
Submission.to_csv("submission.csv",index=False ) | Titanic - Machine Learning from Disaster |
1,259,505 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<save_to_csv> | %matplotlib inline
| Titanic - Machine Learning from Disaster |
1,259,505 | X_test[['id','demand']].to_csv('result.csv',index=False )<load_from_csv> | train_df = pd.read_csv('.. /input/train.csv')
test_df = pd.read_csv('.. /input/test.csv')
combine = [train_df, test_df] | Titanic - Machine Learning from Disaster |
1,259,505 | training_data, training_labels = load_svmlight_file('.. /input/movie-ratings/movie-ratings/data-splits/data.train')
testing_data, testing_labels = load_svmlight_file('.. /input/movie-ratings/movie-ratings/data-splits/data.test')
testing_data = csr_matrix(( testing_data.data, testing_data.indices, testing_data.indptr), shape=(testing_data.shape[0], training_data.shape[1]))<normalization> | for dataset in combine:
dataset['Title'] = dataset.Name.str.extract('([A-Za-z]+)\.', expand=False)
pd.crosstab(train_df['Title'], train_df['Sex'] ) | Titanic - Machine Learning from Disaster |
1,259,505 | class SVM:
def __init__(self, learning_rate=0.1, reg_const=0.05, l1=False, num_iters=10, batch_size=200):
self.W = None
self.learning_rate = learning_rate
self.reg_const = reg_const
self.l1 = l1
if self.l1:
self.reg_fun = lambda x: np.max(np.sum(np.abs(x), axis=0))
self.reg_fun_grad = lambda x: np.sum([np.sign(val)for val in x.flatten() if not np.isclose(val,0, atol=1e-5)])
else:
self.reg_fun = lambda x: 0.5*np.sum(x*x)
self.reg_fun_grad=lambda x:x
self.num_iters = num_iters
self.batch_size = batch_size
def loss(self,X_batch, y_batch):
num_train = X_batch.shape[0]
num_classes = self.W.shape[1]
scores = X_batch.dot(self.W)
correct_class_scores = scores[range(num_train), y_batch].reshape(-1,1)
margins = np.maximum(0, scores + 1 - correct_class_scores)
margins[range(num_train), y_batch] =0
loss = np.sum(margins)/ num_train + self.reg_const * self.reg_fun(self.W)
coeff_mat = np.zeros(( num_train, num_classes))
coeff_mat[margins > 0] = 1
coeff_mat[range(num_train), y_batch] = - np.sum(coeff_mat, axis=1)
dW =(X_batch.T ).dot(coeff_mat)
dW = dW/num_train + self.reg_const * self.reg_fun_grad(self.W)
return loss, dW
def fit(self, X, y, **kwargs):
if kwargs:
self.learning_rate = kwargs['learning_rate']
self.reg_const = kwargs['reg_const']
self.l1 = kwargs['l1']
if self.l1:
self.reg_fun = lambda x: np.max(np.sum(np.abs(x), axis=0))
self.reg_fun_grad = lambda x: np.sum([np.sign(val)for val in x.flatten() if not np.isclose(val,0, atol=1e-5)])
else:
self.reg_fun = lambda x: 0.5*np.sum(x*x)
self.reg_fun_grad=lambda x:x
self.num_iters = kwargs['num_iters']
self.batch_size = kwargs['batch_size']
num_train, dim = X.shape
num_classes = 2
if self.W is None:
self.W = 0.001 * np.random.randn(int(dim), int(num_classes))
for it in trange(self.num_iters, mininterval=5):
batch_idx = np.random.choice(num_train, self.batch_size, replace = True)
X_batch = X[batch_idx]
y_batch = y[batch_idx]
loss, grad = self.loss(X_batch, y_batch.astype(int))
self.W += - self.learning_rate * grad
def get_params(self, deep = False):
return {'learning_rate': self.learning_rate, 'reg_const': self.reg_const,
'l1': self.l1, 'num_iters':self.num_iters, 'batch_size':self.batch_size}
def set_params(self, **parameters):
for parameter, value in parameters.items() :
setattr(self, parameter, value)
return self
def predict(self, X):
scores = X.dot(self.W)
y_pred = np.argmax(scores, axis =1)
return y_pred
def score(self, X, y):
assert X.shape[0] == y.shape[0]
correct = sum(1 for actual, prediction in zip(y, self.predict(X)) if np.sign(actual)== np.sign(prediction))
return correct/X.shape[0]<train_model> | for dataset in combine:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col',\
'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
train_df[['Title', 'Survived']].groupby(['Title'], as_index=False ).mean() | Titanic - Machine Learning from Disaster |
1,259,505 | svm = SVM()
svm.fit(training_data, training_labels, learning_rate=0.01, reg_const=0.05, l1=False, num_iters=100_000, batch_size=200 )<compute_test_metric> | title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
for dataset in combine:
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
train_df.head() | Titanic - Machine Learning from Disaster |
1,259,505 | svm.score(testing_data, testing_labels )<train_on_grid> | train_df = train_df.drop(['Name', 'PassengerId'], axis=1)
test_df = test_df.drop(['Name'], axis=1)
combine = [train_df, test_df]
train_df.shape, test_df.shape | Titanic - Machine Learning from Disaster |
1,259,505 | clf = GridSearchCV(SVM() , param_grid={'learning_rate':[0.0001, 0.0005, 0.001], 'reg_const':[0.0001,0.001,0.01,0.1], 'l1': [False], 'num_iters':[200_000], 'batch_size':[200]}, cv=5, scoring='accuracy', n_jobs=-1)
clf.fit(testing_data, testing_labels )<find_best_score> | for dataset in combine:
dataset['Sex'] = dataset['Sex'].map({'female': 1, 'male': 0} ).astype(int)
train_df.head() | Titanic - Machine Learning from Disaster |
1,259,505 | print(clf.best_params_)
print(clf.best_score_ )<compute_train_metric> | guess_ages = np.zeros(( 2,3))
guess_ages | Titanic - Machine Learning from Disaster |
1,259,505 | cross_val_score(SVM() , training_data, y=training_labels, scoring='accuracy',
fit_params=clf.best_params_, cv=5, n_jobs=-1 )<compute_test_metric> | for dataset in combine:
for i in range(0, 2):
for j in range(0, 3):
guess_df = dataset[(dataset['Sex'] == i)& \
(dataset['Pclass'] == j+1)]['Age'].dropna()
age_guess = guess_df.median()
guess_ages[i,j] = int(age_guess/0.5 + 0.5)* 0.5
for i in range(0, 2):
for j in range(0, 3):
dataset.loc[(dataset.Age.isnull())&(dataset.Sex == i)&(dataset.Pclass == j+1),\
'Age'] = guess_ages[i,j]
dataset['Age'] = dataset['Age'].astype(int)
train_df.head() | Titanic - Machine Learning from Disaster |
1,259,505 | clf.best_estimator_.score(testing_data, testing_labels )<compute_train_metric> | train_df['AgeBand'] = pd.cut(train_df['Age'], 5)
train_df[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False ).mean().sort_values(by='AgeBand', ascending=True ) | Titanic - Machine Learning from Disaster |
1,259,505 | svm = clf.best_estimator_
eval_data, _ = load_svmlight_file('.. /input/movie-ratings/movie-ratings/data-splits/data.eval.anon')
eval_data = csr_matrix(( eval_data.data, eval_data.indices, eval_data.indptr), shape=(eval_data.shape[0], training_data.shape[1]))
submission_pred = svm.predict(eval_data )<save_to_csv> | for dataset in combine:
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
dataset.loc[(dataset['Age'] > 16)&(dataset['Age'] <= 32), 'Age'] = 1
dataset.loc[(dataset['Age'] > 32)&(dataset['Age'] <= 48), 'Age'] = 2
dataset.loc[(dataset['Age'] > 48)&(dataset['Age'] <= 64), 'Age'] = 3
dataset.loc[ dataset['Age'] > 64, 'Age']
train_df.head() | Titanic - Machine Learning from Disaster |
1,259,505 | with open('submission.csv', 'w')as submission:
with open('.. /input/movie-ratings/movie-ratings/data-splits/data.eval.anon.id', 'r')as example_ids:
submission.write('example_id,label
')
for example_id, label in zip(example_ids, submission_pred):
submission.write('{},{}
'.format(example_id.strip() , int(label)) )<set_options> | train_df = train_df.drop(['AgeBand'], axis=1)
combine = [train_df, test_df]
train_df.head() | Titanic - Machine Learning from Disaster |
1,259,505 | %reload_ext autoreload
%autoreload 2
%matplotlib inline
<define_variables> | for dataset in combine:
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
train_df[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
1,259,505 | PATH = ".. /input/"
TMP_PATH = "/tmp/tmp"
MODEL_PATH = "/tmp/model/"
sz=224<set_options> | for dataset in combine:
dataset['IsAlone'] = 0
dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1
train_df[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False ).mean() | Titanic - Machine Learning from Disaster |
1,259,505 | torch.cuda.is_available()<set_options> | train_df = train_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1)
test_df = test_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1)
combine = [train_df, test_df]
train_df.head() | Titanic - Machine Learning from Disaster |
1,259,505 | torch.backends.cudnn.enabled<define_variables> | for dataset in combine:
dataset['Age*Class'] = dataset.Age * dataset.Pclass
train_df.loc[:, ['Age*Class', 'Age', 'Pclass']].head(10 ) | Titanic - Machine Learning from Disaster |
1,259,505 | fnames = np.array([f'train/{f}' for f in sorted(os.listdir(f'{PATH}train')) ])
labels = np.array([(0 if 'cat' in fname else 1)for fname in fnames] )<load_pretrained> | freq_port = train_df.Embarked.dropna().mode() [0]
freq_port | Titanic - Machine Learning from Disaster |
1,259,505 | arch=resnet34
data=ImageClassifierData.from_paths(path=PATH,val_name='valid',test_name='test',tfms=tfms_from_model(arch, sz))<load_pretrained> | for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].fillna(freq_port)
train_df[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
1,259,505 | learn = ConvLearner.pretrained(arch, data, precompute=True, tmp_name=TMP_PATH, models_name=MODEL_PATH )<train_model> | for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].map({'S': 0, 'C': 1, 'Q': 2} ).astype(int)
train_df.head() | Titanic - Machine Learning from Disaster |
1,259,505 | learn.fit(0.01, 2 )<predict_on_test> | test_df['Fare'].fillna(test_df['Fare'].dropna().median() , inplace=True)
test_df.head() | Titanic - Machine Learning from Disaster |
1,259,505 | log_preds = learn.predict(is_test=True )<prepare_output> | train_df['FareBand'] = pd.qcut(train_df['Fare'], 4)
train_df[['FareBand', 'Survived']].groupby(['FareBand'], as_index=False ).mean().sort_values(by='FareBand', ascending=True ) | Titanic - Machine Learning from Disaster |
1,259,505 | lps = np.argmax(log_preds,axis=1 )<define_variables> | for dataset in combine:
dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0
dataset.loc[(dataset['Fare'] > 7.91)&(dataset['Fare'] <= 14.454), 'Fare'] = 1
dataset.loc[(dataset['Fare'] > 14.454)&(dataset['Fare'] <= 31), 'Fare'] = 2
dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3
dataset['Fare'] = dataset['Fare'].astype(int)
train_df = train_df.drop(['FareBand'], axis=1)
combine = [train_df, test_df]
train_df.head(10 ) | Titanic - Machine Learning from Disaster |
1,259,505 | preds_classes = [data.classes[i] for i in lps]<load_from_csv> | from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier | Titanic - Machine Learning from Disaster |
1,259,505 | sub_frame = pd.read_csv('.. /input/Sample_Sub.csv')
sub_frame['Category'] = preds_classes<save_to_csv> | from sklearn.model_selection import train_test_split | Titanic - Machine Learning from Disaster |
1,259,505 | sub_frame.to_csv('Submission.csv',index=False )<import_modules> | from sklearn.model_selection import train_test_split | Titanic - Machine Learning from Disaster |
1,259,505 | import numpy as np
import pandas as pd
from pathlib import Path
from fastai import *
from fastai.vision import *
import torchvision
import torch<define_variables> | X_train, X_test, y_train, y_test = train_test_split(train_df.drop("Survived", axis=1), train_df["Survived"], test_size=0.3 ) | Titanic - Machine Learning from Disaster |
1,259,505 | data_root_path = Path(".. /input" )<load_from_csv> | from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix | Titanic - Machine Learning from Disaster |
1,259,505 | train_df = pd.read_csv(data_root_path/"train.csv")
test_df = pd.read_csv(data_root_path/"sample_submission.csv" )<load_pretrained> | clf = DecisionTreeClassifier(random_state=42)
clf.fit(X_train, y_train)
print_score(clf, X_train, y_train, X_test, y_test, train=True)
print_score(clf, X_train, y_train, X_test, y_test, train=False)
| Titanic - Machine Learning from Disaster |
1,259,505 | transforms = get_transforms(
do_flip=True,
flip_vert=True,
max_rotate=15.0,
max_lighting=0.15,
max_warp=0.2
)
train_imgs = ImageList.from_df(train_df, path=data_root_path/'train', folder='train')
test_imgs = ImageList.from_df(test_df, path=data_root_path/'test', folder='test')
train_imgs =(train_imgs
.split_by_rand_pct(0.01)
.label_from_df()
.add_test(test_imgs)
.transform(transforms, size=128)
.databunch(path='.', bs=64, device= torch.device('cuda:0'))
.normalize(imagenet_stats))<choose_model_class> | bag_clf = BaggingClassifier(base_estimator=clf, n_estimators=1000,
bootstrap=True, n_jobs=-1,
random_state=42)
bag_clf.fit(X_train, y_train)
print_score(bag_clf, X_train, y_train, X_test, y_test, train=True)
print_score(bag_clf, X_train, y_train, X_test, y_test, train=False ) | Titanic - Machine Learning from Disaster |
1,259,505 | learn = cnn_learner(train_imgs, torchvision.models.densenet161, metrics=[error_rate, accuracy] )<train_model> | bag_clf = BaggingClassifier(base_estimator=clf, n_estimators=1000,
bootstrap=True, oob_score=True,
n_jobs=-1, random_state=42 ) | Titanic - Machine Learning from Disaster |
1,259,505 | lr = 3e-02
learn.fit_one_cycle(8, slice(lr))<predict_on_test> | bag_clf.fit(X_train, y_train ) | Titanic - Machine Learning from Disaster |
1,259,505 | preds,_ = learn.get_preds(ds_type=DatasetType.Test )<filter> | bag_clf.oob_score_ | Titanic - Machine Learning from Disaster |
1,259,505 | test_df.has_cactus = preds.numpy() [:, 0]<save_to_csv> | print_score(bag_clf, X_train, y_train, X_test, y_test, train=True ) | Titanic - Machine Learning from Disaster |
1,259,505 | test_df.to_csv('submission.csv', index=False )<set_options> | print_score(bag_clf, X_train, y_train, X_test, y_test, train=False ) | Titanic - Machine Learning from Disaster |
1,259,505 | plt.style.use('ggplot')
np.random.seed(7)
torch.cuda.manual_seed_all(7 )<load_from_csv> | from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix | Titanic - Machine Learning from Disaster |
1,259,505 | train_dir=".. /input/train/train"
test_dir=".. /input/test/test"
train = pd.read_csv('.. /input/train.csv')
sub_file = pd.read_csv(".. /input/sample_submission.csv")
data_folder = Path(".. /input" )<normalization> | rf_clf = RandomForestClassifier(random_state=42 ) | Titanic - Machine Learning from Disaster |
1,259,505 | trfm = get_transforms(do_flip=True, flip_vert=True, max_rotate=10.0, max_zoom=1.1, max_lighting=0.2, max_warp=0.2, p_affine=0.75, p_lighting=0.75 )<categorify> | rf_clf.fit(X_train, y_train ) | Titanic - Machine Learning from Disaster |
1,259,505 | test_img = ImageList.from_df(sub_file, path=data_folder/'test', folder='test')
databunch =(ImageList.from_df(train, path=data_folder/'train', folder='train')
.split_by_rand_pct(0.01)
.label_from_df()
.add_test(test_img)
.transform(trfm, size=48)
.databunch(path='.', bs=64, device= torch.device('cuda:0'))
.normalize(imagenet_stats)
)<define_variables> | print_score(rf_clf, X_train, y_train, X_test, y_test, train=True ) | Titanic - Machine Learning from Disaster |
1,259,505 | databunch.show_batch(rows=3, figsize=(8,8))<train_model> | print_score(rf_clf, X_train, y_train, X_test, y_test, train=False ) | Titanic - Machine Learning from Disaster |
1,259,505 | learn = cnn_learner(databunch, models.resnet34, metrics=[error_rate, accuracy])
learn.fit_one_cycle(5 )<train_model> | from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV | Titanic - Machine Learning from Disaster |
1,259,505 | learn.unfreeze()
learn.fit_one_cycle(5, max_lr=slice(1e-03))<find_best_params> | rf_clf = RandomForestClassifier(random_state=42 ) | Titanic - Machine Learning from Disaster |
1,259,505 | learn.show_results(rows=3 )<find_best_params> | params_grid = {"max_depth": [3, None],
"min_samples_split": [2, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ['gini', 'entropy']} | Titanic - Machine Learning from Disaster |
1,259,505 | interp = ClassificationInterpretation.from_learner(learn)
losses,idxs = interp.top_losses()
len(databunch.valid_ds)==len(losses)==len(idxs )<predict_on_test> | grid_search = GridSearchCV(rf_clf, params_grid,
n_jobs=-1, cv=5,
verbose=1, scoring='accuracy' ) | Titanic - Machine Learning from Disaster |
1,259,505 | predictions1=learn.get_preds(DatasetType.Test)
predictions2=learn.get_preds(DatasetType.Test)
predictions3=learn.get_preds(DatasetType.Test)
predictions4=learn.get_preds(DatasetType.Test)
predictions5=learn.get_preds(DatasetType.Test)
predictions6=learn.get_preds(DatasetType.Test)
predictions7=learn.get_preds(DatasetType.Test)
predictions8=learn.get_preds(DatasetType.Test)
comb_output=[predictions1[0],predictions2[0],predictions3[0],predictions4[0],
predictions5[0],predictions6[0],predictions7[0],predictions8[0]]
comb_output=torch.sum(torch.stack(comb_output),dim=0 )<save_to_csv> | grid_search.fit(X_train, y_train ) | Titanic - Machine Learning from Disaster |
1,259,505 | sub_file.has_cactus = comb_output.numpy() [:, 0]
sub_file.to_csv('submission.csv', index=False )<install_modules> | grid_search.best_score_ | Titanic - Machine Learning from Disaster |
1,259,505 | !pip install -q --upgrade pip
!pip install -q -U tensorflow-gpu==2.0.0-alpha0<import_modules> | grid_search.best_estimator_.get_params() | Titanic - Machine Learning from Disaster |
1,259,505 | AUTOTUNE = tf.data.experimental.AUTOTUNE<load_from_csv> | print_score(grid_search, X_train, y_train, X_test, y_test, train=True ) | Titanic - Machine Learning from Disaster |
1,259,505 | train_csv = pd.read_csv('.. /input/train.csv')
train_csv.head()<define_variables> | print_score(grid_search, X_train, y_train, X_test, y_test, train=False ) | Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.