kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
10,612,785 | sex_mapping = {"male": 0, "female": 1}
train['Sex'] = train['Sex'].map(sex_mapping)
test['Sex'] = test['Sex'].map(sex_mapping)
train.head()<categorify> | for i in index_nan_age:
age_predict=train_df["Age"][(( train_df["SibSp"]==train_df.iloc[i]["SibSp"])&(train_df["Parch"]==train_df.iloc[i]["Parch"])&(train_df["Pclass"]==train_df.iloc[i]["Pclass"])) ].median()
age_med=train_df["Age"].median()
if not np.isnan(age_predict):
train_df["Age"].iloc[i]=age_predict
else:
train_df["Age"].iloc[i]=age_med
| Titanic - Machine Learning from Disaster |
10,612,785 | embarked_mapping = {"S": 1, "C": 2, "Q": 3}
train['Embarked'] = train['Embarked'].map(embarked_mapping)
test['Embarked'] = test['Embarked'].map(embarked_mapping)
train.head()<split> | train_df[train_df["Age"].isnull() ] | Titanic - Machine Learning from Disaster |
10,612,785 | predictors = train.drop(['Survived', 'PassengerId'], axis=1)
target = train["Survived"]
x_train, x_val, y_train, y_val = train_test_split(predictors, target, test_size = 0.22, random_state = 0 )<predict_on_test> | s="Braund, Mr.Owen Harris"
s.split("." ) | Titanic - Machine Learning from Disaster |
10,612,785 | gaussian = GaussianNB()
gaussian.fit(x_train, y_train)
y_pred = gaussian.predict(x_val)
acc_gaussian = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_gaussian )<compute_train_metric> | s.split(".")[0].split("," ) | Titanic - Machine Learning from Disaster |
10,612,785 | logreg = LogisticRegression()
logreg.fit(x_train, y_train)
y_pred = logreg.predict(x_val)
acc_logreg = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_logreg )<compute_train_metric> | s.split(".")[0].split(",")[1] | Titanic - Machine Learning from Disaster |
10,612,785 | svc = SVC()
svc.fit(x_train, y_train)
y_pred = svc.predict(x_val)
acc_svc = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_svc )<compute_train_metric> | s.split(".")[0].split(",")[-1] | Titanic - Machine Learning from Disaster |
10,612,785 | linear_svc = LinearSVC()
linear_svc.fit(x_train, y_train)
y_pred = linear_svc.predict(x_val)
acc_linear_svc = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_linear_svc )<compute_train_metric> | name=train_df["Name"]
train_df["Title"]=[ i.split(".")[0].split(",")[-1].strip() for i in name] | Titanic - Machine Learning from Disaster |
10,612,785 | perceptron = Perceptron()
perceptron.fit(x_train, y_train)
y_pred = perceptron.predict(x_val)
acc_perceptron = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_perceptron )<train_model> | train_df["Title"].value_counts() | Titanic - Machine Learning from Disaster |
10,612,785 | decisiontree = DecisionTreeClassifier()
decisiontree.fit(x_train, y_train)
y_pred = decisiontree.predict(x_val)
acc_decisiontree = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_decisiontree )<train_model> | train_df["Title"]=train_df["Title"].replace(["Lady","the Countess","Capt","Col","Don","Dr","Major","Rev","Sir","Jonkheer","Dona"],"other" ) | Titanic - Machine Learning from Disaster |
10,612,785 | randomforest = RandomForestClassifier()
randomforest.fit(x_train, y_train)
y_pred = randomforest.predict(x_val)
acc_randomforest = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_randomforest )<compute_train_metric> | train_df["Title"].value_counts() | Titanic - Machine Learning from Disaster |
10,612,785 | knn = KNeighborsClassifier()
knn.fit(x_train, y_train)
y_pred = knn.predict(x_val)
acc_knn = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_knn )<predict_on_test> | train_df["Title"]=[0 if i == "Master" else 1 if i== "Miss" or i=="Ms" or i=="Mlle" or i=="Mrs" else 2 if i=="Mr" else 3 for i in train_df["Title"]] | Titanic - Machine Learning from Disaster |
10,612,785 | sgd = SGDClassifier()
sgd.fit(x_train, y_train)
y_pred = sgd.predict(x_val)
acc_sgd = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_sgd )<predict_on_test> | train_df.drop(labels=["Name"],axis=1,inplace=True ) | Titanic - Machine Learning from Disaster |
10,612,785 | gbk = GradientBoostingClassifier()
gbk.fit(x_train, y_train)
y_pred = gbk.predict(x_val)
acc_gbk = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_gbk )<create_dataframe> | train_df=pd.get_dummies(train_df,columns=["Title"] ) | Titanic - Machine Learning from Disaster |
10,612,785 | models = pd.DataFrame({
'Model': ['Support Vector Machines', 'KNN', 'Logistic Regression',
'Random Forest', 'Naive Bayes', 'Perceptron', 'Linear SVC',
'Decision Tree', 'Stochastic Gradient Descent', 'Gradient Boosting Classifier','XGBClassifier'],
'Score': [acc_svc, acc_knn, acc_logreg,
acc_randomforest, acc_gaussian, acc_perceptron,acc_linear_svc, acc_decisiontree,
acc_sgd, acc_gbk,acc_xgb]})
models.sort_values(by='Score', ascending=False )<save_to_csv> | train_df["Fsize"]=train_df["SibSp"] + train_df["Parch"] + 1 | Titanic - Machine Learning from Disaster |
10,612,785 | ids = test['PassengerId']
predictions = gbk.predict(test.drop('PassengerId', axis=1))
output = pd.DataFrame({ 'PassengerId' : ids, 'Survived': predictions })
output.to_csv('./submission.csv', index=False )<set_options> | train_df["family-size"]=[1 if i<5 else 0 for i in train_df["Fsize"]] | Titanic - Machine Learning from Disaster |
10,612,785 | %matplotlib inline
warnings.filterwarnings('ignore')
<load_from_csv> | train_df=pd.get_dummies(train_df,columns=["family-size"] ) | Titanic - Machine Learning from Disaster |
10,612,785 | df = pd.read_csv('/kaggle/input/eval-lab-2-f464/train.csv')
test = pd.read_csv('/kaggle/input/eval-lab-2-f464/test.csv' )<prepare_x_and_y> | train_df=pd.get_dummies(train_df,columns=["Embarked"] ) | Titanic - Machine Learning from Disaster |
10,612,785 | num_features = ['id','chem_1', 'chem_2', 'chem_4','chem_5','chem_6','attribute']
num_features1 = ['chem_1', 'chem_2', 'chem_4','chem_5','chem_6','attribute']
X_train = df[num_features]
y_train = df["class"]
X = X_train
y = y_train<import_modules> | a=" A./5.2152" | Titanic - Machine Learning from Disaster |
10,612,785 |
<predict_on_test> | tickets=[]
for i in list(train_df.Ticket):
if not i.isdigit() :
tickets.append(i.replace(".","" ).replace("/","" ).strip().split() [0])
else:
tickets.append("x")
train_df["Ticket"]=tickets | Titanic - Machine Learning from Disaster |
10,612,785 | clf1 = XGBClassifier().fit(X[num_features],y)
y_pred1 = clf1.predict(test[num_features])
clf2 = XGBClassifier().fit(X[num_features1],y)
y_pred2 = clf2.predict(test[num_features1] )<save_to_csv> | train_df=pd.get_dummies(train_df,columns=["Ticket"],prefix="T" ) | Titanic - Machine Learning from Disaster |
10,612,785 | answer = pd.DataFrame(data={'id':test['id'],'class':y_pred1})
answer.to_csv('final1.csv',index=False)
answer = pd.DataFrame(data={'id':test['id'],'class':y_pred2})
answer.to_csv('final2.csv',index=False )<import_modules> | train_df["Pclass"]=train_df["Pclass"].astype("category" ) | Titanic - Machine Learning from Disaster |
10,612,785 | for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
<load_from_csv> | train_df=pd.get_dummies(train_df,columns=["Pclass"] ) | Titanic - Machine Learning from Disaster |
10,612,785 | dataset = pd.read_csv('.. /input/eval-lab-2-f464/train.csv' )<load_from_csv> | train_df.Sex=train_df.Sex.astype("category" ) | Titanic - Machine Learning from Disaster |
10,612,785 | test = pd.read_csv('.. /input/eval-lab-2-f464/test.csv' )<count_missing_values> | train_df.Sex.value_counts() | Titanic - Machine Learning from Disaster |
10,612,785 | dataset.isnull().sum()<drop_column> | train_df=pd.get_dummies(train_df,columns=["Sex"] ) | Titanic - Machine Learning from Disaster |
10,612,785 | dataset.drop(['id'], axis = 1, inplace = True )<prepare_x_and_y> | train_df.drop(labels=["PassengerId", "Cabin"],axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
10,612,785 | X = dataset[dataset.drop(['class'], axis = 1 ).columns].values
y = dataset['class'].values<split> | train_df.drop(labels=["SibSp", "Parch"],axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
10,612,785 | train_x, test_x, train_y, test_y = train_test_split(X, y, test_size = 20/120 )<train_model> | train_df.Sex_0.value_counts() | Titanic - Machine Learning from Disaster |
10,612,785 | clf = DecisionTreeClassifier(random_state = 42)
clf.fit(train_x, train_y)
print(accuracy_score(test_y, clf.predict(test_x)) )<train_model> | from sklearn.model_selection import train_test_split, StratifiedKFold, GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
| Titanic - Machine Learning from Disaster |
10,612,785 | clf = RandomForestClassifier(random_state = 42)
clf.fit(train_x, train_y)
print(accuracy_score(test_y, clf.predict(test_x)) )<train_model> | test=train_df[train_df_len:] | Titanic - Machine Learning from Disaster |
10,612,785 | clf = GradientBoostingClassifier(random_state = 42)
clf.fit(train_x, train_y)
print(accuracy_score(test_y, clf.predict(test_x)) )<train_model> | test.drop(labels=["Survived"], axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
10,612,785 | clf = AdaBoostClassifier(random_state = 42)
clf.fit(train_x, train_y)
print(accuracy_score(test_y, clf.predict(test_x)) )<compute_train_metric> | train=train_df[:train_df_len]
X_train= train.drop(labels=["Survived"],axis=1)
y_train=train.Survived
print("X_train",len(X_train))
print("y_train",len(y_train))
X_train, X_test, y_train, y_test=train_test_split(X_train, y_train, test_size=0.10, random_state=42)
print("X_train",len(X_train))
print("X_test",len(X_test))
print("y_train",len(y_train))
print("y_test",len(y_test))
print("test",len(test)) | Titanic - Machine Learning from Disaster |
10,612,785 | clf = ExtraTreesClassifier(random_state = 42, max_depth = 10, n_estimators = 1000, min_samples_split = 2)
clf.fit(train_x, train_y)
print(accuracy_score(test_y, clf.predict(test_x)))
scoring = ['accuracy']
scores = cross_validate(clf, X, y, cv=6, scoring = scoring)
print(np.mean(scores['test_accuracy']))<compute_train_metric> | logreg=LogisticRegression(solver ='lbfgs',multi_class ='multinomial',max_iter = 500)
logreg.fit(X_train,y_train ) | Titanic - Machine Learning from Disaster |
10,612,785 | clf = BernoulliNB()
clf.fit(train_x, train_y)
print(accuracy_score(test_y, clf.predict(test_x)) )<train_model> | acc_log_train=round(logreg.score(X_train,y_train)*100,2 ) | Titanic - Machine Learning from Disaster |
10,612,785 | clf = LogisticRegression(random_state = 42, penalty='l1')
clf.fit(train_x, train_y)
print(accuracy_score(test_y, clf.predict(test_x)) )<train_on_grid> | acc_log_test=round(logreg.score(X_test,y_test)*100,2 ) | Titanic - Machine Learning from Disaster |
10,612,785 |
<compute_train_metric> | print("Training accuracy {}".format(acc_log_train))
print("Test accuracy {}".format(acc_log_test)) | Titanic - Machine Learning from Disaster |
10,612,785 | clf = SGDClassifier()
clf.fit(train_x, train_y)
print(accuracy_score(test_y, clf.predict(test_x)))
print(accuracy_score(train_y, clf.predict(train_x)) )<train_model> | nb=GaussianNB()
nb.fit(X_train,y_train)
acc_nb_train=round(nb.score(X_train,y_train)*100,2)
acc_nb_test=round(nb.score(X_test,y_test)*100,2)
print(acc_nb_train)
print(acc_nb_test)
| Titanic - Machine Learning from Disaster |
10,612,785 | clf = KNeighborsClassifier(n_neighbors = 5, leaf_size = 10, weights = 'distance', algorithm = 'auto')
clf.fit(train_x, train_y)
print(accuracy_score(test_y, clf.predict(test_x)))
print(accuracy_score(train_y, clf.predict(train_x)) )<train_on_grid> | random_state=42
classifier=[DecisionTreeClassifier(random_state=random_state),
SVC(random_state=random_state),
RandomForestClassifier(random_state=random_state),
LogisticRegression(random_state=random_state),
KNeighborsClassifier() ]
dt_param_grid={"min_samples_split": range(10,500,20),
"max_depth": range(1,20,2)}
svc_param_grid={"kernel":["rbf"],
"gamma":[0.001,0.01,0.1,1],
"C":[1,10,50,100,200,300,1000],
"probability" : [True]}
rf_param_grid={"max_features":[1,3,5,10,25,35,50],
"min_samples_split":[2,3,10],
"min_samples_leaf":[1,3,10],
"bootstrap":[False],
"n_estimators":[100,300],
"criterion":["gini","entropy"]}
logreg_param_grid={"C":np.logspace(-3,13,17),
"penalty":["l1","l2"],"max_iter": [500]}
knn_param_grid={"n_neighbors": np.linspace(1,50,50,dtype=int ).tolist() ,
"weights":["uniform","distance"],
"metric":["euclidean","manhattan"]}
classifier_param=[dt_param_grid,svc_param_grid,rf_param_grid,
logreg_param_grid,knn_param_grid]
| Titanic - Machine Learning from Disaster |
10,612,785 | clf = XGBClassifier()
clf.fit(train_x, train_y)
print(accuracy_score(test_y, clf.predict(test_x)) )<train_on_grid> | range(len(classifier)) | Titanic - Machine Learning from Disaster |
10,612,785 |
<compute_train_metric> | cv_result=[]
best_estimators=[]
for i in range(len(classifier)) :
clf=GridSearchCV(classifier[i],param_grid=classifier_param[i],
cv=StratifiedKFold(n_splits=10),scoring="accuracy",
n_jobs=-1,
verbose=1
)
clf.fit(X_train,y_train)
cv_result.append(clf.best_score_)
best_estimators.append(clf.best_estimator_)
print(cv_result[i])
| Titanic - Machine Learning from Disaster |
10,612,785 | clf = ExtraTreesClassifier(random_state = 42)
clf.fit(train_x, train_y)
print(accuracy_score(test_y, clf.predict(test_x)))
print(accuracy_score(train_y, clf.predict(train_x)) )<compute_train_metric> | votingC=VotingClassifier(estimators=[("dt",best_estimators[0]),
("rfc",best_estimators[2]),
("lr",best_estimators[3])],
voting="soft",n_jobs=-1)
votingC=votingC.fit(X_train,y_train)
print(accuracy_score(votingC.predict(X_test),y_test)) | Titanic - Machine Learning from Disaster |
10,612,785 | clf = ExtraTreesClassifier(random_state = 0)
clf.fit(train_x, train_y)
print(accuracy_score(test_y, clf.predict(test_x)))
print(accuracy_score(train_y, clf.predict(train_x)) )<compute_train_metric> | test_survived=pd.Series(votingC.predict(test),name="Survived")
results=pd.concat([test_PassengerId,test_survived],axis=1)
results.to_csv("titanic.csv",index=False ) | Titanic - Machine Learning from Disaster |
10,612,785 | clf = GaussianNB()
clf.fit(train_x, train_y)
print(accuracy_score(test_y, clf.predict(test_x)) )<prepare_x_and_y> | test_survived=pd.Series(votingC.predict(test),name="Survived" ).astype(int)
results=pd.concat([test_PassengerId,test_survived],axis=1)
results.to_csv("titanic.csv",index=False ) | Titanic - Machine Learning from Disaster |
14,169,066 | test_X = test[test.drop('id', axis = 1 ).columns].values<predict_on_test> | train_df = pd.read_csv("/kaggle/input/titanic/train.csv")
train_df.head() | Titanic - Machine Learning from Disaster |
14,169,066 | gsc = RandomForestClassifier(n_estimators=1000)
gsc.fit(X, y)
print(accuracy_score(train_y, gsc.predict(train_x)))
print(accuracy_score(test_y, gsc.predict(test_x)))
test_Y = gsc.predict(test_X )<create_dataframe> | test_df = pd.read_csv("/kaggle/input/titanic/test.csv")
test_df.head() | Titanic - Machine Learning from Disaster |
14,169,066 | data = {'id':test['id'],'class':test_Y}
df = pd.DataFrame(data )<save_to_csv> | train_df[['Sex', 'Survived']].groupby(['Sex'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
14,169,066 | df.to_csv(path_or_buf ='output.csv', index = False )<load_from_csv> | train_df[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
14,169,066 | df_train = pd.read_csv('/kaggle/input/1056lab-diabetes-diagnosis/train.csv', index_col=0)
df_test = pd.read_csv('/kaggle/input/1056lab-diabetes-diagnosis/test.csv', index_col=0 )<set_options> | train_df[['SibSp', 'Survived']].groupby(['SibSp'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
14,169,066 | %matplotlib inline
sns.countplot(x='Diabetes', data=df_train)
plt.show()<categorify> | train_df[['Parch', 'Survived']].groupby(['Parch'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
14,169,066 | df_train_dummies = pd.get_dummies(df_train_over, columns=['Gender'], drop_first=True)
df_test_dummies = pd.get_dummies(df_test, columns=['Gender'], drop_first=True )<train_model> | train_df[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False ).mean().sort_values(by='Survived', ascending=False)
| Titanic - Machine Learning from Disaster |
14,169,066 | X_train_dummies = df_train_dummies.drop(columns='Diabetes' ).values
y_train_dummies = df_train_dummies['Diabetes'].values
rfc = RandomForestClassifier(max_depth=4, n_estimators=50, criterion='gini')
rfc.fit(X_train_dummies, y_train_dummies )<predict_on_test> | all_df = [train_df, test_df] | Titanic - Machine Learning from Disaster |
14,169,066 | X_test = df_test_dummies.values
y_pred = rfc.predict_proba(X_test)[:, 1]<save_to_csv> | for df in all_df:
df['Sex'] = df['Sex'].map({'male': 0, 'female': 1} ).astype(int)
train_df.head() | Titanic - Machine Learning from Disaster |
14,169,066 | submit = pd.read_csv('/kaggle/input/1056lab-diabetes-diagnosis/sampleSubmission.csv')
submit['Diabetes'] = y_pred
submit.to_csv('submission.csv', index=False )<import_modules> | common_Pclass = 'S'
train_df['Embarked'].fillna(common_Pclass, inplace=True)
train_df['Embarked'] = train_df['Embarked'].map({'S': 0, 'C': 1, 'Q': 2} ).astype(int)
test_df['Embarked'] = test_df['Embarked'].map({'S': 0, 'C': 1, 'Q': 2} ).astype(int)
train_df.head() | Titanic - Machine Learning from Disaster |
14,169,066 | sys.path.insert(0, '/kaggle/input/efficientnetmodel/efficientnetModel.py')
sys.path.append('/kaggle/input/efficientnetmodel')
sys.path.append('/kaggle/input/efficientnetmodel/utils.py')
<init_hyperparams> | train_df['Cabin'].isna().sum() | Titanic - Machine Learning from Disaster |
14,169,066 |
IMAGE_SIZE = 320
def int_parameter(level, maxval):
return int(level * maxval / 10)
def float_parameter(level, maxval):
return float(level)* maxval / 10.
def sample_level(n):
return np.random.uniform(low=0.1, high=n)
def autocontrast(pil_img, _):
return ImageOps.autocontrast(pil_img)
def equalize(pil_img, _):
return ImageOps.equalize(pil_img)
def posterize(pil_img, level):
level = int_parameter(sample_level(level), 4)
return ImageOps.posterize(pil_img, 4 - level)
def rotate(pil_img, level):
degrees = int_parameter(sample_level(level), 30)
if np.random.uniform() > 0.5:
degrees = -degrees
return pil_img.rotate(degrees, resample=Image.BILINEAR)
def solarize(pil_img, level):
level = int_parameter(sample_level(level), 256)
return ImageOps.solarize(pil_img, 256 - level)
def shear_x(pil_img, level):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform(( IMAGE_SIZE, IMAGE_SIZE),
Image.AFFINE,(1, level, 0, 0, 1, 0),
resample=Image.BILINEAR)
def shear_y(pil_img, level):
level = float_parameter(sample_level(level), 0.3)
if np.random.uniform() > 0.5:
level = -level
return pil_img.transform(( IMAGE_SIZE, IMAGE_SIZE),
Image.AFFINE,(1, 0, 0, level, 1, 0),
resample=Image.BILINEAR)
def translate_x(pil_img, level):
level = int_parameter(sample_level(level), IMAGE_SIZE / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform(( IMAGE_SIZE, IMAGE_SIZE),
Image.AFFINE,(1, 0, level, 0, 1, 0),
resample=Image.BILINEAR)
def translate_y(pil_img, level):
level = int_parameter(sample_level(level), IMAGE_SIZE / 3)
if np.random.random() > 0.5:
level = -level
return pil_img.transform(( IMAGE_SIZE, IMAGE_SIZE),
Image.AFFINE,(1, 0, 0, 0, 1, level),
resample=Image.BILINEAR)
def color(pil_img, level):
level = float_parameter(sample_level(level), 1.8)+ 0.1
return ImageEnhance.Color(pil_img ).enhance(level)
def contrast(pil_img, level):
level = float_parameter(sample_level(level), 1.8)+ 0.1
return ImageEnhance.Contrast(pil_img ).enhance(level)
def brightness(pil_img, level):
level = float_parameter(sample_level(level), 1.8)+ 0.1
return ImageEnhance.Brightness(pil_img ).enhance(level)
def sharpness(pil_img, level):
level = float_parameter(sample_level(level), 1.8)+ 0.1
return ImageEnhance.Sharpness(pil_img ).enhance(level)
augmentations = [
autocontrast, equalize, posterize, rotate, solarize, shear_x, shear_y,
translate_x, translate_y
]
augmentations_all = [
autocontrast, equalize, posterize, rotate, solarize, shear_x, shear_y,
translate_x, translate_y, color, contrast, brightness, sharpness
]
<define_variables> | train_df['Cabin'].fillna('X', inplace=True)
test_df['Cabin'].fillna('X', inplace=True ) | Titanic - Machine Learning from Disaster |
14,169,066 | use_gpu = torch.cuda.is_available()
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
dir_test = ".. /input/issm2020-ai-challenge/semTest/semTest/"
data_dir = '.. /input/issm2020-ai-challenge/semTrain/semTrain'
batch_size = 16
lr = 0.01
momentum = 0.9
num_epochs = 100
input_size = 224
class_num = 10
net_name = 'efficientnet-oriDuAug-b7'
mixture_width = 3
mixture_depth = -1
aug_severity = 3
all_ops = True
no_jsd = True
def aug(image, preprocess):
aug_list = augmentations
if all_ops:
aug_list = augmentations_all
ws = np.float32(np.random.dirichlet([1] * mixture_width))
m = np.float32(np.random.beta(1, 1))
mix = torch.zeros_like(preprocess(image))
for i in range(mixture_width):
image_aug = image.copy()
depth = mixture_depth if mixture_depth > 0 else np.random.randint(
1, 4)
for _ in range(depth):
op = np.random.choice(aug_list)
image_aug = op(image_aug, aug_severity)
mix += ws[i] * preprocess(image_aug)
mixed =(1 - m)* preprocess(image)+ m * mix
return mixed
class AugMixDataset(torch.utils.data.Dataset):
def __init__(self, dataset, preprocess, no_jsd=False):
self.dataset = dataset
self.preprocess = preprocess
self.no_jsd = no_jsd
def __getitem__(self, i):
x, y = self.dataset[i]
if self.no_jsd:
return aug(x, self.preprocess), y
else:
im_tuple =(self.preprocess(x), aug(x, self.preprocess),
aug(x, self.preprocess))
return im_tuple, y
def __len__(self):
return len(self.dataset)
augmix_tf: Compose = transforms.Compose(
[
transforms.Resize(input_size),
transforms.RandomHorizontalFlip() ,
transforms.RandomCrop(input_size, padding=16)
]
)
preprocess = transforms.Compose(
[
transforms.Resize(input_size),
transforms.ToTensor() ,
transforms.Normalize(( 0.4914, 0.4822, 0.4465),(0.2023, 0.1994, 0.2010))
])
def loaddata(data_dir, batch_size, set_name, shuffle):
data_transforms = {
'train': transforms.Compose([
transforms.Resize(input_size),
transforms.CenterCrop(input_size),
transforms.RandomAffine(degrees=0, translate=(0.05, 0.05)) ,
transforms.RandomHorizontalFlip() ,
transforms.RandomGrayscale(p=0.1),
transforms.ToTensor() ,
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'test': transforms.Compose([
transforms.Resize(input_size),
transforms.CenterCrop(input_size),
transforms.ToTensor() ,
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
image_datasets = datasets.ImageFolder(data_dir, augmix_tf)
image_datasets = AugMixDataset(image_datasets, preprocess, no_jsd)
dataset_loaders = torch.utils.data.DataLoader(image_datasets,
batch_size=batch_size,
shuffle=shuffle, num_workers=0)
data_set_sizes = len(image_datasets)
return dataset_loaders, data_set_sizes
def train_model(model_ft, criterion, optimizer, lr_scheduler, num_epochs):
train_loss = []
since = time.time()
best_model_wts = model_ft.state_dict()
best_acc = 0.0
model_ft.train(True)
for epoch in range(num_epochs):
dset_loaders, dset_sizes = loaddata(data_dir=data_dir, batch_size=batch_size, set_name='train', shuffle=True)
print('Data Size', dset_sizes)
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
optimizer = lr_scheduler(optimizer, epoch)
running_loss = 0.0
running_corrects = 0
count = 0
for data in dset_loaders:
if count==189:
break
inputs, labels = data
labels = torch.squeeze(labels.type(torch.LongTensor))
if use_gpu:
inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
else:
inputs, labels = Variable(inputs), Variable(labels)
outputs = model_ft(inputs)
loss = criterion(outputs, labels)
_, preds = torch.max(outputs.data, 1)
optimizer.zero_grad()
loss.backward()
optimizer.step()
count += 1
if count % 10 == 0 or outputs.size() [0] < batch_size:
print('Epoch:{}: loss:{:.5f}'.format(epoch, loss.item()))
train_loss.append(loss.item())
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dset_sizes
epoch_acc = running_corrects.double() / dset_sizes
print('Loss: {:.4f} Acc: {:.6f}'.format(
epoch_loss, epoch_acc))
model_ft.load_state_dict(best_model_wts)
model_out_path = model_dir + "/" + net_name + '{}.pth'.format(epoch)
print(model_dir + '/' + net_name + '{}.pth'.format(epoch))
torch.save(model_ft, model_out_path)
if epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = model_ft.state_dict()
if epoch_acc > 0.999:
break
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
return train_loss, best_model_wts
def test_csv(model):
transform_test = transforms.Compose([
transforms.Resize(input_size),
transforms.CenterCrop(input_size),
transforms.ToTensor() ,
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
results = []
image_number = 0
resultFrame = pd.DataFrame(columns=["Id","LABEL"])
img_test = os.listdir(dir_test)
img_test.sort()
for i in range(len(img_test)) :
img = Image.open(dir_test + img_test[i])
input = transform_test(img ).cuda()
input = input.unsqueeze(0)
input = Variable(input)
score = model(input)
probability = torch.nn.functional.softmax(score, dim=1)
max_value, index = torch.max(probability, 1)
image_number += 1
probability = np.round(probability.cpu().detach().numpy())
index = np.round(index.cpu().detach().numpy())
resultFrame = resultFrame.append({'Id':int(image_number),'LABEL': int(index[0] + 1)}, ignore_index=True)
print(resultFrame)
resultFrame.to_csv('submission.csv', index=False)
def exp_lr_scheduler(optimizer, epoch, init_lr=lr, lr_decay_epoch=10):
<load_pretrained> | train_df['Cabin'].value_counts() | Titanic - Machine Learning from Disaster |
14,169,066 | model_ft=torch.load('/kaggle/input/efficientnetmodel/efficientnet-oriDuAug-b799.pth')
model_ft = model_ft.eval().cuda()
test_csv(model_ft )<prepare_x_and_y> | for df in all_df:
df['Cabin'] = df['Cabin'].astype(str ).str[0]
test_df['Cabin'] | Titanic - Machine Learning from Disaster |
14,169,066 | %matplotlib inline
train = pd.read_csv('.. /input/cars-train.csv')
val = pd.read_csv('.. /input/cars-test.csv')
x_train = pd.get_dummies(train.drop(['class', 'car.id'], axis=1))
x_val = pd.get_dummies(val.drop(['class', 'car.id'], axis=1))
y_train = pd.get_dummies(train['class'])
y_val = pd.get_dummies(val['class'] )<choose_model_class> | train_df.groupby(['Survived', 'Cabin'])['PassengerId'].sum() | Titanic - Machine Learning from Disaster |
14,169,066 | model = Sequential()
model.add(Dense(128, input_dim=(21), activation = 'selu'))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Dense(128, activation = 'selu'))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Dense(128, activation = 'selu'))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Dense(128, activation = 'selu'))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Dense(4, activation = 'softmax'))
model.compile(optimizer='adamax', loss='categorical_crossentropy', metrics=['accuracy'] )<train_model> | for df in all_df:
df.loc[(df['Cabin'] == 'A'), 'Cabin'] = 0
df.loc[(df['Cabin'] == 'B'), 'Cabin'] = 1
df.loc[(df['Cabin'] == 'C'), 'Cabin'] = 1
df.loc[(df['Cabin'] == 'D'), 'Cabin'] = 1
df.loc[(df['Cabin'] == 'E'), 'Cabin'] = 1
df.loc[(df['Cabin'] == 'F'), 'Cabin'] = 0
df.loc[(df['Cabin'] == 'G'), 'Cabin'] = 0
df.loc[(df['Cabin'] == 'T'), 'Cabin'] = 2
df.loc[(df['Cabin'] == 'X'), 'Cabin'] = 3
train_df.head() | Titanic - Machine Learning from Disaster |
14,169,066 | model.fit(x_train, y_train, epochs = 600, batch_size = 32 )<compute_test_metric> | guess_df = np.zeros(( 3,2))
guess_df | Titanic - Machine Learning from Disaster |
14,169,066 | score = model.evaluate(x_train, y_train)
print('
train loss is: ' + str(score[0].round(4)))
print('train accuracy is: ' + str(score[1]))
score = model.evaluate(x_val, y_val)
print('
test loss is: ' + str(score[0].round(4)))
print('test accuracy is: ' + str(score[1]))<load_from_csv> | for df in all_df:
for i in range(0,3):
for j in range(0,2):
guess = df[(df['Pclass']==i+1)&(df['Sex']==j)]['Age'].dropna()
guess_df[i,j] = guess.median()
for i in range(0,3):
for j in range(0,2):
df.loc[(df['Age'].isna())&(df['Pclass']==i+1)&(df['Sex']==j), 'Age'] = guess_df[i,j]
df['Age'] = df['Age'].dropna().astype(int)
train_df.head() | Titanic - Machine Learning from Disaster |
14,169,066 | test = pd.read_csv('.. /input/cars-final-prediction.csv')
test_new = pd.get_dummies(test.drop(['car.id'], axis=1))
preds = model.predict_classes(test_new)
keras_dict = {0: 'acc', 1: 'good', 2: 'unacc', 3: 'vgood'}
converted_preds = []
for prediction in preds:
converted_preds.append(keras_dict[prediction])
test['class']=converted_preds
output_csv = test[['car.id', 'class']]
output_csv<save_to_csv> | for df in all_df:
df.loc[(df['Age'] <= 16), 'Age'] = 0
df.loc[(df['Age'] > 16)&(df['Age'] <= 32), 'Age'] = 1
df.loc[(df['Age'] > 32)&(df['Age'] <= 48), 'Age'] = 2
df.loc[(df['Age'] > 48)&(df['Age'] <= 64), 'Age'] = 3
df.loc[(df['Age'] > 64), 'Age'] = 4
train_df.head() | Titanic - Machine Learning from Disaster |
14,169,066 | output_csv.to_csv('car-submission.csv', index=False )<install_modules> | train_df.groupby('Age', as_index=False)['Survived'].mean().sort_values(by='Age', ascending=True ) | Titanic - Machine Learning from Disaster |
14,169,066 | !pip install.. /input/kaggle-efficientnet-repo/efficientnet-1.0.0-py3-none-any.whl<set_options> | train_df.isna().sum() | Titanic - Machine Learning from Disaster |
14,169,066 | %matplotlib inline
pd.set_option('max_rows', 1000)
pd.set_option('max_columns', 1000 )<set_options> | train_df.drop(['AgeBand'], axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
14,169,066 | def seed_everything(seed=42):
os.environ['PYTHONHASHSEED'] = str(seed)
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
def get_strategy() :
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print('Running on TPU {}'.format(tpu.cluster_spec().as_dict() ['worker']))
except ValueError:
tpu = None
if tpu:
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
else:
strategy = tf.distribute.get_strategy()
print('REPLICAS: {}'.format(strategy.num_replicas_in_sync))
return strategy<define_variables> | train_df.isna().sum() | Titanic - Machine Learning from Disaster |
14,169,066 | INPUT_DIR = '.. /input/ailab-ml-training-1/'
ARTIFACT_DIR = '.. /input/v2-5-ailab1-training/'
PATH = {
'train': os.path.join(INPUT_DIR, 'train.csv'),
'submission': os.path.join(INPUT_DIR, 'sample_submission.csv'),
'train_image_dir': os.path.join(INPUT_DIR, 'train_images/train_images'),
'test_image_dir': os.path.join(INPUT_DIR, 'test_images/test_images'),
'f0_weight': os.path.join(ARTIFACT_DIR, 'f0_best_weight.h5'),
'f1_weight': os.path.join(ARTIFACT_DIR, 'f1_best_weight.h5'),
'f2_weight': os.path.join(ARTIFACT_DIR, 'f2_best_weight.h5'),
'f3_weight': os.path.join(ARTIFACT_DIR, 'f3_best_weight.h5'),
'f4_weight': os.path.join(ARTIFACT_DIR, 'f4_best_weight.h5'),
'gcs': 'v2-ailab1-tfrecord-dataset'
}
ID = 'fname'
TARGET = 'label'<define_variables> | test_df.isna().sum() | Titanic - Machine Learning from Disaster |
14,169,066 | seed_everything(args.seed )<load_from_csv> | test_df['Fare'].fillna(test_df['Fare'].median() , inplace=True ) | Titanic - Machine Learning from Disaster |
14,169,066 | train_df = pd.read_csv(PATH['train'])
submission_df = pd.read_csv(PATH['submission'] )<prepare_x_and_y> | for df in all_df:
df.loc[df['Fare'] <= 7.91, 'Fare'] = 0
df.loc[(df['Fare'] > 7.91)&(df['Fare'] <= 14.454), 'Fare'] = 1
df.loc[(df['Fare'] > 14.454)&(df['Fare'] <= 31.0), 'Fare'] = 2
df.loc[(df['Fare'] > 31.0), 'Fare'] = 3
df['Fare'] = df['Fare'].astype(int)
train_df.head() | Titanic - Machine Learning from Disaster |
14,169,066 | def normalize(image):
image -= tf.constant([0.485 * 255, 0.456 * 255, 0.406 * 255])
image /= tf.constant([0.229 * 255, 0.224 * 255, 0.225 * 255])
return image
def get_model(
input_size=(224, 224, 3),
backbone='efficientnet-b0',
weights='imagenet',
n_classes=10,
):
print(f'Using backbone {backbone} and weights {weights}')
model_fn = getattr(efn, f'EfficientNetB{backbone[-1]}')
x = L.Input(shape=input_size, dtype='float32')
y = normalize(x)
y = model_fn(input_shape=input_size, weights=weights, include_top=False )(y)
y = L.GlobalAveragePooling2D()(y)
y = L.Dense(n_classes, activation='softmax' )(y)
model = tf.keras.Model(x, y)
return model
def read_tfrecords(tfrec, input_size=(224, 224, 3)) :
formats = {
'fname': tf.io.FixedLenFeature([], tf.string),
'image': tf.io.FixedLenFeature([], tf.string),
'label': tf.io.FixedLenFeature([], tf.int64)
}
tfrec = tf.io.parse_single_example(tfrec, formats)
image = tf.image.decode_image(tfrec['image'])
image = tf.reshape(image,(input_size[0], input_size[1], 1))
image = tf.repeat(image, repeats=3, axis=2)
image = tf.cast(image, tf.float32)
return tfrec['fname'], image
def get_test_dataset(tfrec_filenames, args, ordered=True):
AUTO = tf.data.experimental.AUTOTUNE
ignore_order = tf.data.Options()
if not ordered:
ignore_order.experimental_deterministic = False
func_sample = [
lambda e: read_tfrecords(e, input_size=(args.imsize, args.imsize, 3)) ,
]
ds = tf.data.TFRecordDataset(tfrec_filenames, num_parallel_reads=AUTO)
ds = ds.map(func_sample[0], num_parallel_calls=AUTO)
return ds<define_variables> | train_df.drop('FareBand', axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
14,169,066 | oof = np.zeros(( len(train_df), args.n_classes))
predictions = np.zeros(( len(submission_df), args.n_classes))
train_fnames = train_df[ID].tolist()
submission_fnames = submission_df[ID].tolist()
gcs_path = KaggleDatasets().get_gcs_path(PATH['gcs'])
for fold in range(args.n_splits):
if fold not in args.use_folds:
print(f'skip fold {fold} training, because not in {args.use_folds}')
continue
strategy = get_strategy()
with strategy.scope() :
model = get_model(
input_size=(args.imsize, args.imsize, 3),
backbone=args.backbone,
weights=None,
n_classes=args.n_classes
)
model.load_weights(PATH[f'f{fold}_weight'])
print('load weight {}'.format(PATH[f'f{fold}_weight']))
tfrecs = tf.io.gfile.glob(os.path.join(gcs_path, f'records/train_f{fold}_*_*.tfrec'))
print(f'inference oof {tfrecs}')
ds = get_test_dataset(tfrecs, args, ordered=True)
num_val_samples = sum(int(re.findall(r'\d+', fname)[-1])for fname in tfrecs)
ds_fnames = ds.map(lambda fname, image: fname ).batch(num_val_samples)
ds_images = ds.map(lambda fname, image: image ).batch(args.batch_size)
_fnames = [fn.decode('utf-8')for fn in next(iter(ds_fnames)).numpy().tolist() ]
indices = [train_fnames.index(fn)for fn in _fnames]
oof[indices] = model.predict(ds_images)
tfrecs = tf.io.gfile.glob(os.path.join(gcs_path, f'records/test_f0_*_*.tfrec'))
print(f'inference test {tfrecs}')
ds = get_test_dataset(tfrecs, args, ordered=True)
ds_fnames = ds.map(lambda fname, image: fname ).batch(len(submission_df))
ds_images = ds.map(lambda fname, image: image ).batch(args.batch_size)
_fnames = [fn.decode('utf-8')for fn in next(iter(ds_fnames)).numpy().tolist() ]
indices = [submission_fnames.index(fn)for fn in _fnames]
predictions[indices] += model.predict(ds_images)/ args.n_splits<compute_test_metric> | for df in all_df:
df['FamilySize'] = df['SibSp'] + df['Parch'] + 1
train_df.groupby('FamilySize', as_index=False)['Survived'].mean().sort_values('Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
14,169,066 | score = accuracy_score(train_df[TARGET].values, np.argmax(oof, axis=1))
print(f'CV score: {score:.5f}' )<save_to_csv> | for df in all_df:
df['Family'] = 0
df.loc[(df['FamilySize'] >= 5), 'Family'] = 1
df.loc[(df['FamilySize'] > 1)&(df['FamilySize'] < 5), 'Family'] = 2
train_df.groupby('Family', as_index=False)['Survived'].mean().sort_values('Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
14,169,066 | np.save('oof', oof)
np.save('predictions', predictions)
submission_df[TARGET] = np.argmax(predictions, axis=1 ).tolist()
submission_df.to_csv('submission.csv', index=False)
FileLink('submission.csv' )<import_modules> | train_df.drop('FamilySize', axis=1, inplace=True)
test_df.drop('FamilySize', axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
14,169,066 | print(os.listdir(".. /input"))
<load_from_csv> | for df in all_df:
title = df['Name'].apply(lambda x:x.split(',')[1].split('.')[0] ).copy()
df['Title'] = title.str.strip()
train_df.groupby(['Title', 'Survived'])['PassengerId'].count().sort_values(ascending=False ) | Titanic - Machine Learning from Disaster |
14,169,066 | train_df = pd.read_csv('.. /input/train.csv' )<create_dataframe> | title_map = {"Mr": 0, "Miss": 1, "Mrs": 2, "Master": 3, "Rare": 4}
for df in all_df:
df['Title'] = df['Title'].map(title_map)
df['Title'] = df['Title'].fillna(0)
df['Title'] = df['Title'].astype(int)
all_df = [train_df, test_df]
train_df.head() | Titanic - Machine Learning from Disaster |
14,169,066 | train_data = []
for i, row in train_df.iterrows() :
for text in row['text'].split('.'):
train_data.append([row['president'], text])
train_data = pd.DataFrame(train_data, columns=['president', 'text'] )<string_transform> | train_df.drop(['Name', 'Ticket'], axis=1, inplace=True)
test_df.drop(['Name', 'Ticket'], axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
14,169,066 | def remove_punctuation_numbers(text):
punc_numbers = string.punctuation + '0123456789'
return ''.join([l for l in text if l not in punc_numbers] )<string_transform> | y_train = train_df['Survived']
features = ['Pclass', 'Sex', 'Age', 'Fare', 'Embarked', \
'Family', 'SibSp', 'Parch', 'Title', 'Cabin']
X_train = train_df[features]
X_test = test_df[features] | Titanic - Machine Learning from Disaster |
14,169,066 | def tokeniser(text):
return TreebankWordTokenizer().tokenize(text )<not_enough_vertices> | forest = RandomForestClassifier(n_estimators=153, max_depth=5, random_state=1)
forest.fit(X_train, y_train)
y_pred = forest.predict(X_train)
print('Accuracy Score: ', forest.score(X_train, y_train))
print('
Confusion Matric: ', confusion_matrix(y_train, y_pred))
print('
Classification Report: ', classification_report(y_train, y_pred)) | Titanic - Machine Learning from Disaster |
14,169,066 | <not_enough_vertices><EOS> | predict = forest.predict(X_test)
report = pd.DataFrame({'PassengerId': test_df.PassengerId, 'Survived': predict})
report.to_csv('submission.csv', index=False)
print("Your submission was successfully saved!" ) | Titanic - Machine Learning from Disaster |
13,816,070 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<string_transform> | pd.options.mode.chained_assignment = None
%matplotlib inline
warnings.filterwarnings('ignore' ) | Titanic - Machine Learning from Disaster |
13,816,070 | def data_cleaner(text):
text = text.lower()
text = remove_punctuation_numbers(text)
lst = tokeniser(text)
lst = remove_stop_words(lst)
return ' '.join(lemmetizer(lst))<feature_engineering> | train_data = pd.read_csv(".. /input/titanic/train.csv", index_col="PassengerId")
test_data = pd.read_csv(".. /input/titanic/test.csv", index_col="PassengerId" ) | Titanic - Machine Learning from Disaster |
13,816,070 | train_data['clean_text'] = train_data['text'].apply(data_cleaner )<categorify> | print(train_data["Pclass"].unique())
train_data[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
13,816,070 | train_data.president = train_data.president.map({'deKlerk':0,'Mandela':1,
'Mbeki':2, 'Motlanthe':3,
'Zuma': 4, 'Ramaphosa':5} )<split> | train_data.Name[1].split() | Titanic - Machine Learning from Disaster |
13,816,070 | X = train_data.clean_text
y = train_data.president
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.33,
random_state=0,
stratify=y )<not_enough_vertices> | train_data = train_data.assign(fname = train_data.Name.str.split("," ).str[0])
train_data["title"] = pd.Series([i.split(",")[1].split(".")[0].strip() for i in train_data.Name], index=train_data.index ) | Titanic - Machine Learning from Disaster |
13,816,070 | vect = CountVectorizer(ngram_range=(1,2))<feature_engineering> | test_data = test_data.assign(fname = test_data.Name.str.split("," ).str[0])
test_data["title"] = pd.Series([i.split(",")[1].split(".")[0].strip() for i in test_data.Name], index=test_data.index)
train_data.drop("Name", axis=1, inplace=True)
test_data.drop("Name", axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
13,816,070 | X_train_ = vect.fit_transform(X_train )<choose_model_class> | print(test_data.fname.nunique())
print(test_data.title.nunique() ) | Titanic - Machine Learning from Disaster |
13,816,070 | log = LogisticRegressionCV(dual=False, penalty='l2', multi_class='multinomial' )<train_model> | train_data["title"] = train_data['title'].replace(other_titles, 'Other')
train_data["title"] = train_data["title"].map({"Mr":0, "Miss":1, "Ms" : 1 , "Mme":1, "Mlle":1, "Mrs":1, "Master":2, "Other":3})
test_data["title"] = test_data['title'].replace(other_titles, 'Other')
test_data["title"] = test_data["title"].map({"Mr":0, "Miss":1, "Ms" : 1 , "Mme":1, "Mlle":1, "Mrs":1, "Master":2, "Other":3} ) | Titanic - Machine Learning from Disaster |
13,816,070 | log.fit(X_train_, y_train )<compute_train_metric> | print(train_data.title)
print(test_data.title.isna().sum() ) | Titanic - Machine Learning from Disaster |
13,816,070 | print(accuracy_score(y_train, log.predict(X_train_)) )<compute_test_metric> | oh = OneHotEncoder(handle_unknown="ignore", sparse = False)
train_data = train_data.join(pd.DataFrame(oh.fit_transform(train_data[["fname", "title"]]), index = train_data.index))
test_data = test_data.join(pd.DataFrame(oh.transform(test_data[["fname", "title"]]), index = test_data.index))
train_data.drop("fname", axis = 1, inplace = True)
test_data.drop("fname", axis = 1, inplace = True ) | Titanic - Machine Learning from Disaster |
13,816,070 | print(accuracy_score(y_test, log.predict(vect.transform(X_test))))<load_from_csv> | print(train_data["Sex"].unique())
train_data[['Sex', 'Survived']].groupby(['Sex'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
13,816,070 | test_data = pd.read_csv('.. /input/test.csv' )<feature_engineering> | interactions = train_data.assign(sex_class = train_data['Sex'] + "_" + train_data['Pclass'].astype("str"))
interactions[['sex_class', 'Survived']].groupby(['sex_class'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
13,816,070 | test_data.text = test_data.text.apply(data_cleaner )<predict_on_test> | train_data = train_data.assign(sex_class = train_data['Sex'] + "_" + train_data['Pclass'].astype("str"))
test_data = test_data.assign(sex_class = test_data['Sex'] + "_" + test_data['Pclass'].astype("str")) | Titanic - Machine Learning from Disaster |
13,816,070 | test_data['president'] = log.predict(vect.transform(test_data.text))<drop_column> | train_data = train_data.join(pd.get_dummies(train_data['Pclass'], prefix="Pclass"))
test_data = test_data.join(pd.get_dummies(test_data['Pclass'], prefix="Pclass")) | Titanic - Machine Learning from Disaster |
13,816,070 | test_data.drop('text', axis=1, inplace=True, )<save_to_csv> | train_data["Sex"] = train_data["Sex"].map({"female":0, "male":1})
test_data["Sex"] = test_data["Sex"].map({"female":0, "male":1} ) | Titanic - Machine Learning from Disaster |
13,816,070 | test_data.to_csv('Thapelo_log.csv',index=False )<import_modules> | train_data["sex_class"] = train_data["sex_class"].map({"female_1":0, "female_2":1, "female_3":2, "male_1":4, "male_2":5, "male_3":6})
test_data["sex_class"] = test_data["sex_class"].map({"female_1":0, "female_2":1, "female_3":2, "male_1":4, "male_2":5, "male_3":6} ) | Titanic - Machine Learning from Disaster |
13,816,070 | from sklearn.preprocessing import LabelEncoder
from sklearn.linear_model import LogisticRegression<load_from_csv> | def find_similar_passengers(id, dataset):
subset = dataset[(dataset.title == dataset.title[id])&
(dataset.Pclass == dataset.Pclass[id])]
if subset["Age"].mean() == "NaN":
subset = dataset[(dataset["sex_class"] == dataset.iloc[id]["sex_class"])]
if subset["Age"].mean() == "NaN":
subset = dataset[(dataset["sex"] == dataset.iloc[id]["sex"])]
age = subset["Age"].mean()
return age | Titanic - Machine Learning from Disaster |
13,816,070 | train_users_game1_df = pd.read_csv(".. /input/ds2019uec-task2/train_users_game1.csv")
train_users_game2_df = pd.read_csv(".. /input/ds2019uec-task2/train_users_game2.csv")
test_users_game1_df = pd.read_csv(".. /input/ds2019uec-task2/test_users_game1.csv" )<categorify> | no_ages = train_data[train_data["Age"].isna() ].index
for pid in no_ages:
train_data.Age[pid] = find_similar_passengers(pid, train_data)
no_ages_test = test_data[test_data["Age"].isna() ].index
for pid2 in no_ages_test:
test_data.Age[pid2] = find_similar_passengers(pid2, test_data ) | Titanic - Machine Learning from Disaster |
13,816,070 | train_user_encoder = LabelEncoder()
train_user_encoder.fit(np.concatenate([train_users_game1_df["user_id"], train_users_game2_df["user_id"]]))
print(train_user_encoder.classes_.shape)
game1_encoder = LabelEncoder()
game1_encoder.fit(np.concatenate([train_users_game1_df["game_title"], test_users_game1_df["game_title"]]))
print(game1_encoder.classes_.shape )<categorify> | train_data["age_group"] = pd.cut(train_data["Age"], bins=[0,5,65,100], labels=[0,1,2] ).astype("int64")
test_data["age_group"] = pd.cut(test_data["Age"], bins=[0,5,65,100], labels=[0,1,2] ).astype("int64" ) | Titanic - Machine Learning from Disaster |
13,816,070 | X = np.zeros(( 9393, 4155))
user_vec = train_user_encoder.transform(train_users_game1_df["user_id"])
game_vec = game1_encoder.transform(train_users_game1_df["game_title"])
for i, j in zip(user_vec, game_vec):
X[i, j] = 1<prepare_x_and_y> | train_data[['SibSp', 'Survived']].groupby(['SibSp'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
13,816,070 | Y_mat = np.zeros(( 9393, 1000))
user_vec = train_user_encoder.transform(train_users_game2_df["user_id"])
game_vec = train_users_game2_df["predict_game_id"]
for i, j in zip(user_vec, game_vec):
Y_mat[i, j] = 1<load_from_csv> | train_data[['Parch', 'Survived']].groupby(['Parch'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.