kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
21,768,867 | mean_absolute_error(y_true=modifiedDF['feature'],
y_pred=test_fcst['yhat'] )<compute_test_metric> | ord_cols = ['Deck', 'Title', 'FamilySurvival', 'GroupSurvival']
deck_cat = ['N', 'G', 'F', 'E', 'D', 'C', 'B', 'A']
title_cat = ['Mr', 'Officer', 'Master', 'Royalty', 'Miss', 'Mrs']
family_cat = group_cat = [0.0, 0.5, 1.0]
ord_enc = OrdinalEncoder(categories=[deck_cat, title_cat, family_cat, group_cat])
combined[ord_cols] = ord_enc.fit_transform(combined[ord_cols] ) | Titanic - Machine Learning from Disaster |
21,768,867 | def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(( y_true - y_pred)/ y_true)) * 100<compute_test_metric> | oh_cols = ['Embarked', 'FamilySizeCat', 'Sex']
combined = pd.get_dummies(data=combined, columns=oh_cols)
train, test = divide_df(combined, TRAIN_LEN ) | Titanic - Machine Learning from Disaster |
21,768,867 | mean_absolute_percentage_error(y_true=modifiedDF['feature'],
y_pred=test_fcst['yhat'] )<compute_test_metric> | passengerid_test = test.PassengerId
COLS_TO_DROP.extend(['PassengerId', 'Name', 'Ticket', 'Surname'])
drop_cols(COLS_TO_DROP)
display(train.head() ) | Titanic - Machine Learning from Disaster |
21,768,867 | y_true = np.random.randn(100)
y_pred = y_true * 3.5
print(mean_absolute_percentage_error(y_true, y_pred))<compute_test_metric> | y = train[['Survived']].copy()
train.drop(columns='Survived', inplace=True)
X_train, X_valid, y_train, y_valid = train_test_split(train, y, train_size=0.8, random_state=RNG_SEED)
X_test = test.copy() | Titanic - Machine Learning from Disaster |
21,768,867 | print('rmse:', np.sqrt(mean_squared_error(y_true, y_pred)) )<save_to_csv> | WANT_HYPERPARAMETERS = False
if WANT_HYPERPARAMETERS:
params = dict(max_depth = [n for n in range(3, 9)],
min_samples_split = [n for n in range(2, 4)],
min_samples_leaf = [n for n in range(2, 4)],
n_estimators = [20, 40, 60, 80],)
model = GridSearchCV(RandomForestClassifier(random_state=RNG_SEED),
params, cv=5, scoring='accuracy')
model.fit(X_train, y_train)
print(f'Best parameters {model_random_forest.best_params_}')
print(f'Mean cross-validated accuracy of the best parameters: {model.best_score_:.4f}')
else:
model = RandomForestClassifier(n_estimators=50, max_depth=5,
min_samples_leaf=2, min_samples_split=2,
random_state=RNG_SEED)
model.fit(X_train, y_train ) | Titanic - Machine Learning from Disaster |
21,768,867 | test_data["feature"]=data
test_data.to_csv("submission.csv", index=None )<load_from_csv> | predictions_valid = model.predict(X_valid)
score = mean_absolute_error(y_valid, predictions_valid)
print('MAE for validation set prediction:', round(score, 4))
folds = 5
scores = -1 * cross_val_score(model, X_valid, y_valid, cv=folds, scoring='neg_mean_absolute_error')
print('Average MAE score(across experiments using {} folds):'.format(folds))
print(round(scores.mean() , 4)) | Titanic - Machine Learning from Disaster |
21,768,867 | <load_from_csv><EOS> | predictions_test = model.predict(X_test)
predictions_test = predictions_test.astype(int)
output = pd.DataFrame({'PassengerId': passengerid_test,
'Survived': predictions_test})
output.to_csv('submission.csv', index=False ) | Titanic - Machine Learning from Disaster |
22,709,736 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<prepare_x_and_y> | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ["KMP_SETTINGS"] = "false" | Titanic - Machine Learning from Disaster |
22,709,736 | X, y = train.drop(columns="Chance of Admit"), train["Chance of Admit"]<import_modules> | train_file_path = ".. /input/titanic/train.csv"
X = pd.read_csv(train_file_path)
features = ["Pclass", "Sex", "Age", "Survived"]
X = pd.get_dummies(X[features])
X = pd.concat([X.drop('Pclass', axis=1), pd.get_dummies(X['Pclass'], prefix="Pclass")], axis=1)
imputer = SimpleImputer()
imputed_X = pd.DataFrame(imputer.fit_transform(X))
imputed_X.columns = X.columns
X = imputed_X
learnoutput = X.Survived
learninput = X.drop("Survived", axis=1)
test_file_path = ".. /input/titanic/test.csv"
test_data = pd.read_csv(test_file_path)
features = ["Pclass", "Sex", "Age"]
test = pd.get_dummies(test_data[features])
testinput = pd.concat([test.drop('Pclass', axis=1), pd.get_dummies(test['Pclass'], prefix="Pclass")], axis=1 ) | Titanic - Machine Learning from Disaster |
22,709,736 | from sklearn.ensemble import RandomForestRegressor<import_modules> | x_train_split, x_val_split, y_train_split, y_val_split = train_test_split(learninput, learnoutput, random_state=0 ) | Titanic - Machine Learning from Disaster |
22,709,736 | from sklearn.ensemble import RandomForestRegressor<train_model> | model = keras.Sequential([
layers.Dense(
units=128,
activation = "tanh",
input_shape = [6]
),
layers.Dropout(0.2),
layers.Dense(
units = 256,
activation = "tanh",
),
layers.Dropout(0.2),
layers.Dense(
units = 512,
activation = "tanh",
),
layers.Dropout(0.2),
layers.Dense(
units = 1024,
activation = "tanh",
),
layers.Dropout(0.2),
layers.Dense(
units = 1,
activation = "sigmoid"
)
] ) | Titanic - Machine Learning from Disaster |
22,709,736 | rf = RandomForestRegressor()
rf.fit(X, y )<predict_on_test> | history = model.fit(x_train_split, y_train_split, epochs=25, verbose=0, validation_data=(x_val_split, y_val_split))
| Titanic - Machine Learning from Disaster |
22,709,736 | pred = rf.predict(test )<save_to_csv> | model.evaluate(x_val_split, y_val_split ) | Titanic - Machine Learning from Disaster |
22,709,736 | def submit_predictions(pred):
pred = pd.Series(pred)
serialNumber = test["Serial No."]
submission = pd.concat([serialNumber, pred], axis=1, ignore_index=True)
submission.rename(columns={0:"Serial No.", 1:"Chance of Admit"}, inplace=True)
submission.to_csv("submission.csv", index=False)
<predict_on_test> | predictions = model.predict(testinput)
predictions = predictions.ravel()
predictions = predictions.round()
predictions = np.nan_to_num(predictions)
predictions = predictions.astype(int)
output = pd.DataFrame({'PassengerId': test_data.PassengerId, "Survived": predictions})
output.to_csv("./file1.csv", index=False ) | Titanic - Machine Learning from Disaster |
22,617,590 | submit_predictions(pred )<set_options> | print(f'Training path:{__training_path}
Test path:{__test_path}' ) | Titanic - Machine Learning from Disaster |
22,617,590 | %matplotlib inline<set_options> | !{sys.executable} -m pip install --upgrade scikit-learn=="0.24.2" | Titanic - Machine Learning from Disaster |
22,617,590 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu" )<load_from_csv> | import sklearn; sklearn.show_versions() | Titanic - Machine Learning from Disaster |
22,617,590 | df = pd.read_csv('.. /input/widsdatathon2019/traininglabels.csv')
df.head()<feature_engineering> | def __load__data(__training_path, __test_path, concat=False):
__train_dataset = pd.read_csv(__training_path, delimiter=',')
__test_dataset = pd.read_csv(__test_path, delimiter=',')
return __train_dataset, __test_dataset
__train_dataset, __test_dataset = __load__data(__training_path, __test_path, concat=True)
__train_dataset.head() | Titanic - Machine Learning from Disaster |
22,617,590 | class OilDataset(Dataset):
def __init__(self,csv,transform):
self.data = pd.read_csv(csv)
self.transform = transform
self.label = torch.eye(2)[self.data['has_oilpalm']]
def __len__(self):
return len(self.data)
def __getitem__(self,idx):
image_path = os.path.join('.. /input/widsdatathon2019/train_images/train_images/'+self.data.loc[idx,'image_id'])
image = Image.open(image_path)
image = self.transform(image)
label = torch.tensor(self.data.loc[idx,'has_oilpalm'])
return {'image':image,'labels':label}<load_pretrained> | __test_dataset_submission_columns = __test_dataset['PassengerId'] | Titanic - Machine Learning from Disaster |
22,617,590 | k = Image.open('.. /input/widsdatathon2019/train_images/train_images/img_000012017.jpg')
k.show()<categorify> | __train_dataset.drop(['PassengerId'], axis=1, inplace=True)
__test_dataset.drop(['PassengerId'], axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
22,617,590 | simple_transform = transforms.Compose([transforms.Resize(( 224,224)) ,transforms.RandomHorizontalFlip() ,transforms.RandomRotation(45),
transforms.ToTensor()
,transforms.Normalize([0.486,0.456,0.406],[0.229,0.225,0.224])] )<create_dataframe> | _NUMERIC_COLS_WITH_MISSING_VALUES = ['Age', 'Fare', 'Parch', 'Pclass', 'SibSp']
for _col in _NUMERIC_COLS_WITH_MISSING_VALUES:
__simple_imputer = SimpleImputer(missing_values=np.nan, strategy='mean')
__train_dataset[_col] = __simple_imputer.fit_transform(__train_dataset[_col].values.reshape(-1,1)) [:,0]
if _col in __test_dataset:
__test_dataset[_col] = __simple_imputer.transform(__test_dataset[_col].astype(__train_dataset[_col].dtypes ).values.reshape(-1,1)) [:,0] | Titanic - Machine Learning from Disaster |
22,617,590 | train_dataset = OilDataset('.. /input/widsdatathon2019/traininglabels.csv',simple_transform )<define_variables> | _STRING_COLS_WITH_MISSING_VALUES = ['Cabin', 'Ticket', 'Sex', 'Name', 'Embarked']
for _col in _STRING_COLS_WITH_MISSING_VALUES:
__simple_imputer = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
__train_dataset[_col] = __simple_imputer.fit_transform(__train_dataset[_col].values.reshape(-1,1)) [:,0]
if _col in __test_dataset:
__test_dataset[_col] = __simple_imputer.transform(__test_dataset[_col].astype(__train_dataset[_col].dtypes ).values.reshape(-1,1)) [:,0] | Titanic - Machine Learning from Disaster |
22,617,590 | data_size = len(train_dataset)
indics = list(range(data_size))<count_duplicates> | _CATEGORICAL_COLS = ['Sex', 'Embarked']
_ohe = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1)
__train_dataset[_CATEGORICAL_COLS] = pd.DataFrame(_ohe.fit_transform(__train_dataset[_CATEGORICAL_COLS]), columns=_CATEGORICAL_COLS)
__test_dataset[_CATEGORICAL_COLS] = pd.DataFrame(_ohe.transform(__test_dataset[_CATEGORICAL_COLS]), columns=_CATEGORICAL_COLS ) | Titanic - Machine Learning from Disaster |
22,617,590 | np.random.shuffle(indics )<define_variables> | _TEXT_COLUMNS = ['Name', 'Ticket', 'Cabin']
def process_text(__dataset):
for _col in _TEXT_COLUMNS:
process_text = [t.lower() for t in __dataset[_col]]
table = str.maketrans('', '', string.punctuation)
process_text = [t.translate(table)for t in process_text]
process_text = [re.sub(r'\d+', 'num', t)for t in process_text]
__dataset[_col] = process_text
return __dataset
__train_dataset = process_text(__train_dataset)
__test_dataset = process_text(__test_dataset ) | Titanic - Machine Learning from Disaster |
22,617,590 | validation_split = 0.1
split = int(np.round(validation_split*data_size))
train_indices,validation_indices = indics[split:],indics[:split]<randomize_order> | __feature_train = __train_dataset.drop(['Survived'], axis=1)
__target_train =__train_dataset['Survived']
__feature_test = __test_dataset | Titanic - Machine Learning from Disaster |
22,617,590 | train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(validation_indices )<load_pretrained> | _TEXT_COLUMNS = ['Cabin', 'Ticket', 'Name']
__temp_train_data = __feature_train[_TEXT_COLUMNS]
__feature_train.drop(_TEXT_COLUMNS, axis=1, inplace=True)
__feature_train_object_array = []
__temp_test_data = __feature_test[_TEXT_COLUMNS]
__feature_test.drop(_TEXT_COLUMNS, axis=1, inplace=True)
__feature_test_object_array = []
for _col in _TEXT_COLUMNS:
__tfidfvectorizer = TfidfVectorizer(max_features=3000)
vector_train = __tfidfvectorizer.fit_transform(__temp_train_data[_col])
__feature_train_object_array.append(vector_train)
vector_test = __tfidfvectorizer.transform(__temp_test_data[_col])
__feature_test_object_array.append(vector_test)
__feature_train = sparse.hstack([__feature_train] + __feature_train_object_array ).tocsr()
__feature_test = sparse.hstack([__feature_test] + __feature_test_object_array ).tocsr() | Titanic - Machine Learning from Disaster |
22,617,590 | train_loader = DataLoader(train_dataset,batch_size=32,sampler=train_sampler)
valid_loader = DataLoader(train_dataset,batch_size=32,sampler = valid_sampler )<choose_model_class> | __model = RandomForestClassifier()
__model.fit(__feature_train, __target_train)
__y_pred = __model.predict(__feature_test ) | Titanic - Machine Learning from Disaster |
22,617,590 | model = resnet50(pretrained=False)
model.load_state_dict(torch.load('.. /input/resnet50/resnet50.pth'))
for params in model.parameters() :
params.require_grad = False
model.fc = nn.Sequential(
nn.Linear(2048,1024),
nn.ReLU() ,
nn.Linear(1024,5)
)
fc_parameter = model.fc.parameters()
for params in fc_parameter:
params.require_grad = True
model = model.to(device )<choose_model_class> | submission = pd.DataFrame(columns=['PassengerId'], data=__test_dataset_submission_columns)
submission = pd.concat([submission, pd.DataFrame(__y_pred, columns=['Survived'])], axis=1)
submission.head() | Titanic - Machine Learning from Disaster |
22,617,590 | <train_model><EOS> | submission.to_csv("kaggle_submission.csv", index=False ) | Titanic - Machine Learning from Disaster |
14,131,271 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<train_model> | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt | Titanic - Machine Learning from Disaster |
14,131,271 | fit(12,model,optimizer,criteria )<load_from_csv> | train_dataset = pd.read_csv('.. /input/titanic/train.csv')
train_dataset.head()
| Titanic - Machine Learning from Disaster |
14,131,271 | class prediction(Dataset):
def __init__(self,csv,transform):
self.test_data = pd.read_csv(csv)
self.transform = transform
def __len__(self):
return len(self.test_data)
def __getitem__(self,idx):
try:
image_path = os.path.join('.. /input/widsdatathon2019/leaderboard_test_data/leaderboard_test_data/',self.test_data.loc[idx]['image_id'])
image = Image.open(image_path)
except:
image_path = os.path.join('.. /input/widsdatathon2019/leaderboard_holdout_data/leaderboard_holdout_data/',self.test_data.loc[idx]['image_id'])
image = Image.open(image_path)
image = self.transform(image)
name = self.test_data.loc[idx]['image_id']
return {'images':image,'names':name}<define_variables> | test_dataset = pd.read_csv('.. /input/titanic/test.csv')
test_dataset.head() | Titanic - Machine Learning from Disaster |
14,131,271 | predict_dataset = prediction('.. /input/widsdatathon2019/SampleSubmission.csv',simple_transform )<load_pretrained> | X = train_dataset[['Pclass','Sex','Age','Fare','SibSp','Parch', 'Embarked', 'Name', 'Cabin']]
print(X)
y = train_dataset.iloc[:, 1].values
print(y[0:10])
X_test = test_dataset[['Pclass','Sex','Age','Fare','SibSp','Parch', 'Embarked','Name','Cabin']]
print(X_test)
| Titanic - Machine Learning from Disaster |
14,131,271 | prediction_loader = DataLoader(predict_dataset )<set_options> | X['Age'] = X['Age'].fillna(( X['Age'].mean()))
X["Embarked"] = X["Embarked"].fillna("S")
X.loc[X["Embarked"] == "S", "Embarked"] = 0
X.loc[X["Embarked"] == "C", "Embarked"] = 1
X.loc[X["Embarked"] == "Q", "Embarked"] = 2
X_test['Age'] = X_test['Age'].fillna(( X_test['Age'].mean()))
X_test["Embarked"] = X_test["Embarked"].fillna("S")
X_test.loc[X_test["Embarked"] == "S", "Embarked"] = 0
X_test.loc[X_test["Embarked"] == "C", "Embarked"] = 1
X_test.loc[X_test["Embarked"] == "Q", "Embarked"] = 2
data = [X, X_test]
titles = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
for dataset in data:
dataset['Title'] = dataset.Name.str.extract('([A-Za-z]+)\.', expand=False)
dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col','Don', 'Dr','Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
dataset['Title'] = dataset['Title'].map(titles)
dataset['Title'] = dataset['Title'].fillna(0)
dataset['Sex'] = dataset['Sex'].map({'female': 1, 'male': 0} ).astype(int)
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
dataset.loc[(dataset['Age'] > 16)&(dataset['Age'] <= 32), 'Age'] = 1
dataset.loc[(dataset['Age'] > 32)&(dataset['Age'] <= 48), 'Age'] = 2
dataset.loc[(dataset['Age'] > 48)&(dataset['Age'] <= 64), 'Age'] = 3
dataset.loc[ dataset['Age'] > 64, 'Age'] = 4
dataset['Age'] = dataset['Age'].astype(int)
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
dataset['Fare'] = dataset['Fare'].fillna(dataset['Fare'].median())
dataset['Cabin'] = dataset['Cabin'].str[:1]
X = X.drop(['Name'], axis=1)
X_test = X_test.drop(['Name'], axis=1)
print(X)
print(X_test)
| Titanic - Machine Learning from Disaster |
14,131,271 | gc.collect()<predict_on_test> | cabin_mapping = {"A": 0, "B": 0.4, "C": 0.8, "D": 1.2, "E": 1.6, "F": 2, "G": 2.4, "T": 2.8}
data = [X, X_test]
for dataset in data:
dataset['Cabin'] = dataset['Cabin'].map(cabin_mapping)
X["Cabin"].fillna(X.groupby("Pclass")["Cabin"].transform("median"), inplace=True)
X_test["Cabin"].fillna(X_test.groupby("Pclass")["Cabin"].transform("median"), inplace=True ) | Titanic - Machine Learning from Disaster |
14,131,271 | predict = []
model.eval()
for i, d in enumerate(prediction_loader):
data = d['images'].cuda()
output = model(data)
output1 = output.cpu().detach().numpy()
predict.append(output1[0] )<load_from_csv> | X['FareBand'] = pd.qcut(X['Fare'], 4)
X_test['FareBand'] = pd.qcut(X_test['Fare'], 4)
data = [X, X_test]
for dataset in data:
dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0
dataset.loc[(dataset['Fare'] > 7.91)&(dataset['Fare'] <= 14.454), 'Fare'] = 1
dataset.loc[(dataset['Fare'] > 14.454)&(dataset['Fare'] <= 31), 'Fare'] = 2
dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3
dataset['Fare'] = dataset['Fare'].astype(int)
| Titanic - Machine Learning from Disaster |
14,131,271 | submission = pd.read_csv('.. /input/widsdatathon2019/SampleSubmission.csv')
submission['has_oilpalm'] = np.argmax(predict,1 )<save_to_csv> | data = [X, X_test]
for dataset in data:
dataset['IsAlone'] = 0
dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1
print(X)
print(X_test ) | Titanic - Machine Learning from Disaster |
14,131,271 | submission.to_csv('sample_submission.csv',index = False )<import_modules> | features_drop = [ 'SibSp', 'Parch', 'FamilySize', 'FareBand']
X = X.drop(features_drop, axis=1)
X_test = X_test.drop(features_drop, axis=1)
print(X)
print(X_test ) | Titanic - Machine Learning from Disaster |
14,131,271 | import fastai
from fastai.vision import *<define_variables> | sc = StandardScaler()
X = sc.fit_transform(X)
print('X')
print(X)
X_test = sc.fit_transform(X_test)
print('X_test')
print(X_test ) | Titanic - Machine Learning from Disaster |
14,131,271 | work_dir = Path('/kaggle/working/')
path = Path('.. /input' )<define_variables> | classifier = LogisticRegression(random_state = 0)
classifier.fit(X, y)
y_pred_l_reg = classifier.predict(X_test)
acc_l_reg = round(classifier.score(X, y)* 100, 2)
print(str(acc_l_reg)+ ' percent')
| Titanic - Machine Learning from Disaster |
14,131,271 | train = 'train_images/train_images'
test = path/'leaderboard_test_data/leaderboard_test_data'
holdout = path/'leaderboard_holdout_data/leaderboard_holdout_data'
sample_sub = path/'SampleSubmission.csv'
labels = path/'traininglabels.csv'<load_from_csv> | clf = SVC()
clf.fit(X, y)
y_pred_svc = clf.predict(X_test)
acc_svc = round(clf.score(X, y)* 100, 2)
print(acc_svc ) | Titanic - Machine Learning from Disaster |
14,131,271 | df = pd.read_csv(labels)
df_sample = pd.read_csv(sample_sub )<filter> | clf = KNeighborsClassifier(n_neighbors = 3)
clf.fit(X, y)
y_pred_knn = clf.predict(X_test)
acc_knn = round(clf.score(X, y)* 100, 2)
print(acc_knn)
| Titanic - Machine Learning from Disaster |
14,131,271 | df[df['score']<0.75]<define_variables> | clf = DecisionTreeClassifier()
clf.fit(X, y)
y_pred_decision_tree = clf.predict(X_test)
acc_decision_tree = round(clf.score(X, y)* 100, 2)
print(acc_decision_tree)
| Titanic - Machine Learning from Disaster |
14,131,271 | test_names = [f for f in test.iterdir() ]
holdout_names = [f for f in holdout.iterdir() ]<normalization> | clf = RandomForestClassifier(n_estimators=100)
clf.fit(X, y)
y_pred_random_forest = clf.predict(X_test)
acc_random_forest = round(clf.score(X, y)* 100, 2)
print(acc_random_forest ) | Titanic - Machine Learning from Disaster |
14,131,271 | data =(src.transform(get_transforms() , size=128)
.databunch(bs=64)
.normalize(imagenet_stats))<compute_test_metric> | sgd = SGDClassifier()
sgd.fit(X, y)
Y_pred = sgd.predict(X_test)
acc_sgd = round(sgd.score(X, y)* 100, 2)
print(acc_sgd ) | Titanic - Machine Learning from Disaster |
14,131,271 | <choose_model_class><EOS> | output = pd.DataFrame({'PassengerId': test_dataset.PassengerId, 'Survived': y_pred_l_reg})
output.to_csv('my_submission.csv', index=False)
print("Your submission was successfully saved" ) | Titanic - Machine Learning from Disaster |
22,533,554 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<define_search_space> | import matplotlib.pyplot as plt
import sklearn.model_selection
from sklearn.preprocessing import MinMaxScaler
from sklearn.impute import KNNImputer
from sklearn.metrics import accuracy_score
import seaborn as sns
import numpy as np
import pandas as pd | Titanic - Machine Learning from Disaster |
22,533,554 | lr = 1e-2<train_model> | train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head() | Titanic - Machine Learning from Disaster |
22,533,554 | learn.fit_one_cycle(6, lr )<train_model> | test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
test_data.head() | Titanic - Machine Learning from Disaster |
22,533,554 | Then we unfreeze and train the whole model, with lower lr .<train_model> | all_data = pd.concat([train_data, test_data])
all_data.head() | Titanic - Machine Learning from Disaster |
22,533,554 | learn.unfreeze()
learn.fit_one_cycle(3, slice(1e-4, 1e-3))<save_model> | survival_rate = train_data["Survived"].mean()
print(f"Survival rate: {survival_rate}")
print(f"Death rate: {1-survival_rate}" ) | Titanic - Machine Learning from Disaster |
22,533,554 | learn.save('128' )<compute_test_metric> | women = train_data.loc[train_data.Sex == 'female']["Survived"]
rate_women = sum(women)/len(women)
print("% of women who survived:", rate_women ) | Titanic - Machine Learning from Disaster |
22,533,554 | p,t = learn.get_preds()
auc_score(p,t )<find_best_params> | train_data.loc[train_data["Sex"] == "male"]["Embarked"] | Titanic - Machine Learning from Disaster |
22,533,554 | interp = ClassificationInterpretation.from_learner(learn)
losses,idxs = interp.top_losses()<predict_on_test> | features = ["Pclass", "Sex", "SibSp", "Parch", "Age", "Embarked"]
X_train = pd.get_dummies(train_data[features])
imputer = KNNImputer(n_neighbors=5, weights='uniform', metric='nan_euclidean')
X_train = imputer.fit_transform(X_train)
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
y_train = np.array(train_data["Survived"])
print(f"Shape of the training set: {X_train.shape}" ) | Titanic - Machine Learning from Disaster |
22,533,554 | p,t = learn.get_preds(ds_type=DatasetType.Test )<create_dataframe> | def trainClassifier(X_train, y_train, model_name, classifier, params, score, verbose=False, num_folds=10):
kf = sklearn.model_selection.StratifiedKFold(num_folds)
train_scores = []
best_score = 0
for config in sklearn.model_selection.ParameterGrid(params):
train_scores_run = []
counts = []
for train_indices, valid_indices in kf.split(X_train, y_train):
counts.append(len(train_indices))
X_train_kf = X_train[train_indices]
y_train_kf = y_train[train_indices]
X_valid_kf = X_train[valid_indices]
y_valid_kf = y_train[valid_indices]
model = classifier(**config)
model.fit(X_train_kf, y_train_kf)
y_hat = model.predict(X_valid_kf)
train_score = score(y_valid_kf, y_hat)
train_scores_run.append(train_score)
if np.average(train_scores_run, weights=counts)> best_score:
best_score = np.average(train_scores_run, weights=counts)
best_config = config
if(verbose):
print("New best score obtained")
print(f"Training with: {config}")
print(f"Total Score obtained with cross validation: {best_score}
")
train_scores.append(np.average(train_scores_run, weights=counts))
output_df = pd.DataFrame(data = [[model_name, best_config ,best_score]], \
columns=["model_name", "parameters", "training_score"])
return output_df | Titanic - Machine Learning from Disaster |
22,533,554 | sub = pd.DataFrame(np.stack([ids, p[:,1]], axis=1), columns=df_sample.columns )<save_to_csv> | results = pd.DataFrame() | Titanic - Machine Learning from Disaster |
22,533,554 | sub.to_csv(work_dir/'sub.csv', index=False )<create_dataframe> | params = {
"n_neighbors": [1, 3, 5, 7, 9, 11, 13, 15]
}
classifier = KNeighborsClassifier
classifier_df = trainClassifier(X_train, y_train, "k-NN", classifier, params, accuracy_score)
results = results.append(classifier_df ) | Titanic - Machine Learning from Disaster |
22,533,554 | def get_null(train):
columns = train.columns
values = []
features = []
for column in columns:
size = train[train[column].isnull() ].shape[0]
if(size != 0):
features.append(column)
values.append(size)
return pd.DataFrame({
'features': features,
'values': values
} )<count_values> | params = {
"C": [1e-3, 1e-2, 1e-1, 1],
"max_iter": [30000]
}
classifier = LinearSVC
classifier_df = trainClassifier(X_train, y_train, "LinearSVC", classifier, params, accuracy_score)
results = results.append(classifier_df ) | Titanic - Machine Learning from Disaster |
22,533,554 | def range_quantile(column):
q1 = train[column].quantile(0.25)
q2 = train[column].quantile(0.5)
q3 = train[column].quantile(0.75)
print(train[train[column] == q1]['isChurned'].value_counts())
print(train[(train[column] > q1)&(train[column] <= q2)]['isChurned'].value_counts())
print(train[(train[column] > q2)&(train[column] <= q3)]['isChurned'].value_counts())
print(train[train[column] > q3]['isChurned'].value_counts())
return q1, q2, q3<set_options> | params = {
"kernel" : ["rbf"],
"C": [1e-4, 1e-3, 1e-2, 1e-1, 1, 10],
"gamma": [1e-3, 1e-2, 1e-1, 1, 10]
}
classifier = SVC
classifier_df = trainClassifier(X_train, y_train, "SVC", classifier, params, accuracy_score)
results = results.append(classifier_df ) | Titanic - Machine Learning from Disaster |
22,533,554 | warnings.filterwarnings('ignore')
<load_from_csv> | params = {
"C": [1e-3, 1e-2, 1e-1, 1, 10]
}
classifier = LogisticRegression
classifier_df = trainClassifier(X_train, y_train, "LogisticRegression", classifier, params, accuracy_score)
results = results.append(classifier_df ) | Titanic - Machine Learning from Disaster |
22,533,554 | train = pd.read_csv('.. /input/seleksidukungaib/train.csv')
test = pd.read_csv('.. /input/seleksidukungaib/test.csv')
print(train.shape)
print(test.shape )<count_values> | params = {"max_depth": [3, 5, 7, 10, None],
"n_estimators":[3, 5,10, 25, 50],
"max_features": [1, 2, "auto"]}
classifier = RandomForestClassifier
classifier_df = trainClassifier(X_train, y_train, "RandomForests", classifier, params, accuracy_score)
results = results.append(classifier_df ) | Titanic - Machine Learning from Disaster |
22,533,554 | train['isChurned'].value_counts()<drop_column> | results = results.set_index("model_name")
results | Titanic - Machine Learning from Disaster |
22,533,554 | train.drop(columns=['idx'], inplace=True)
test.drop(columns=['idx'], inplace=True )<count_duplicates> | classifier = RandomForestClassifier
best_params = results.loc["RandomForests"]["parameters"]
submission_model = classifier(**best_params)
submission_model.fit(X_train, y_train)
X_test = pd.get_dummies(test_data[features])
X_test = imputer.transform(X_test)
X_test = scaler.transform(X_test)
y_hat = submission_model.predict(X_test)
output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': y_hat})
output.to_csv('submission.csv', index=False)
print("Your submission was successfully saved!")
| Titanic - Machine Learning from Disaster |
22,533,551 | print(train[train['userId'].duplicated() ]['userId'].unique().shape[0])
print(test[test['userId'].duplicated() ]['userId'].unique().shape[0] )<define_variables> | train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head() | Titanic - Machine Learning from Disaster |
22,533,551 | ddc = train[['date_collected', 'date']]<count_duplicates> | test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
test_data.head() | Titanic - Machine Learning from Disaster |
22,533,551 | print(ddc.shape[0])
print(ddc[ddc.duplicated() ].shape[0] )<drop_column> | train_data = train_data.set_index("PassengerId")
test_data = test_data.set_index("PassengerId" ) | Titanic - Machine Learning from Disaster |
22,533,551 | train.drop(columns=['date'], inplace=True)
test.drop(columns=['date'], inplace=True )<count_values> | num_attribs = ["Age", "SibSp", "Parch", "Fare", ]
cat_attribs = ["Pclass", "Sex", "Embarked", ] | Titanic - Machine Learning from Disaster |
22,533,551 | train['userId'].value_counts()<count_duplicates> | num_pipeline = Pipeline([
("imputer", SimpleImputer(strategy = "median")) ,
("scaler", StandardScaler()),
] ) | Titanic - Machine Learning from Disaster |
22,533,551 | duplicate = train[(train.duplicated(subset=['userId', 'date_collected'], keep=False)) ]
null_row = duplicate[(duplicate['max_recharge_trx'].isnull())|(duplicate['average_topup_trx'].isnull())|(duplicate['total_transaction'].isnull())]
count = 0
for i, el in null_row.iterrows() :
temp_df = duplicate[(duplicate['userId'] == el['userId'])&(duplicate['date_collected'] == el['date_collected'])]
idx_recharge, idx_topup, idx_transaction = -1, -1, -1
for j, row in temp_df.iterrows() :
if(~np.isnan(row['max_recharge_trx'])) :
idx_recharge = j
if(~np.isnan(row['average_topup_trx'])) :
idx_topup = j
if(~np.isnan(row['total_transaction'])) :
idx_transaction = j
if(idx_recharge != -1):
train.at[i, 'max_recharge_trx'] = train.at[idx_recharge, 'max_recharge_trx']
if(idx_topup != -1):
train.at[i, 'average_topup_trx'] = train.at[idx_topup, 'average_topup_trx']
if(idx_transaction != -1):
train.at[i, 'total_transaction'] = train.at[idx_transaction, 'total_transaction']
count += 1
if(count % 1000 == 0):
print(count )<feature_engineering> | cat_pipeline = Pipeline([
("imputer", SimpleImputer(strategy = "most_frequent")) ,
("cat_encoder", OneHotEncoder(sparse = False)) ,
] ) | Titanic - Machine Learning from Disaster |
22,533,551 | def update_max_recharge_trx(row):
if(np.isnan(row['max_recharge_trx'])) :
row['max_recharge_trx'] = row['average_recharge_trx'] * 2 - row['min_recharge_trx']
return row
train = train.apply(update_max_recharge_trx, axis=1 )<feature_engineering> | preprocess_pipeline = ColumnTransformer([
("num", num_pipeline, num_attribs),
("cat", cat_pipeline, cat_attribs),
] ) | Titanic - Machine Learning from Disaster |
22,533,551 | def update_average_topup_trx(row):
if(np.isnan(row['average_topup_trx'])) :
if(row['num_topup_trx'] < 2):
row['average_topup_trx'] = row['min_topup_trx']
else:
row['average_topup_trx'] =(row['min_topup_trx'] + row['max_topup_trx'])/2
return row
train = train.apply(update_average_topup_trx, axis=1 )<feature_engineering> | X_train = preprocess_pipeline.fit_transform(train_data[num_attribs + cat_attribs])
X_train | Titanic - Machine Learning from Disaster |
22,533,551 | def update_num_transfer(x):
return x['num_transaction'] - x['num_topup_trx'] - x['num_recharge_trx']<feature_engineering> | y_train = train_data["Survived"] | Titanic - Machine Learning from Disaster |
22,533,551 | train['num_transfer_trx'] = train.apply(update_num_transfer, axis=1)
test['num_transfer_trx'] = test.apply(update_num_transfer, axis=1 )<groupby> | forest_clf = RandomForestClassifier(n_estimators = 100, random_state = 42)
forest_clf.fit(X_train, y_train ) | Titanic - Machine Learning from Disaster |
22,533,551 | def update_average_trx(x):
if(x['num_transfer_trx'] == 0):
return 0
if(np.isnan(x['total_transaction'])) :
return np.nan
topup = x['average_topup_trx'] * x['num_topup_trx']
recharge = x['average_recharge_trx'] * x['num_recharge_trx']
total = x['total_transaction']
return(total-topup-recharge)/x['num_transfer_trx']<feature_engineering> | X_test = preprocess_pipeline.fit_transform(test_data[num_attribs + cat_attribs])
y_pred = forest_clf.predict(X_test ) | Titanic - Machine Learning from Disaster |
22,533,551 | train['average_transfer_trx'] = train.apply(update_average_trx, axis=1)
test['average_transfer_trx'] = test.apply(update_average_trx, axis=1 )<categorify> | forest_scores = cross_val_score(forest_clf, X_train, y_train, cv = 10)
forest_scores.mean() | Titanic - Machine Learning from Disaster |
22,533,551 | def fill_total_transaction(x):
if(np.isnan(x['total_transaction'])) :
if(np.isnan(x['average_transfer_trx'])) :
return x
x['total_transaction'] =(
(x['average_topup_trx'] * x['num_topup_trx'])+
(x['average_recharge_trx'] * x['num_recharge_trx'])+
(x['average_transfer_trx'] * x['num_transfer_trx'])
)
return x
train = train.apply(fill_total_transaction, axis=1 )<filter> | svm_clf = SVC(gamma = "auto")
svm_scores = cross_val_score(svm_clf, X_train, y_train, cv = 10)
svm_scores.mean() | Titanic - Machine Learning from Disaster |
22,533,551 | train[train['isActive'].isnull() ]<drop_column> | svm_clf.fit(X_train, y_train)
y_pred = svm_clf.predict(X_test ) | Titanic - Machine Learning from Disaster |
22,533,551 | train.drop(train[train['isActive'].isnull() ].index, inplace=True )<count_values> | output = pd.DataFrame({'PassengerId': test_data.index, 'Survived': y_pred})
output.to_csv('submission.csv', index = False)
print("Your submission was successfully saved!" ) | Titanic - Machine Learning from Disaster |
20,728,635 | test['premium'].value_counts()<feature_engineering> | df_train = pd.read_csv(".. /input/titanic/train.csv")
df_test = pd.read_csv(".. /input/titanic/test.csv" ) | Titanic - Machine Learning from Disaster |
20,728,635 | test['premium'] = test['premium'].fillna(False )<drop_column> | X_train = df_train.copy()
X_test = df_test.copy() | Titanic - Machine Learning from Disaster |
20,728,635 | train.drop(columns='isUpgradedUser', inplace=True)
test.drop(columns='isUpgradedUser', inplace=True )<drop_column> | X_train = X_train.set_index('PassengerId')
X_test = X_test.set_index('PassengerId' ) | Titanic - Machine Learning from Disaster |
20,728,635 | train.drop(columns=['random_number'], inplace=True)
test.drop(columns=['random_number'], inplace=True )<load_from_csv> | X_train_null = X_train.isnull().sum()
X_train_null | Titanic - Machine Learning from Disaster |
20,728,635 |
<count_duplicates> | X_train_null/len(X_train)* 100 | Titanic - Machine Learning from Disaster |
20,728,635 | print(train[train.duplicated() ].shape[0])
print(train[train['total_transaction'].isnull() ].shape[0])
print(train[train['average_transfer_trx'] < 0].shape[0] )<drop_column> | X_test_null = X_test.isnull().sum()
X_test_null | Titanic - Machine Learning from Disaster |
20,728,635 | train.drop(train[train['average_transfer_trx'] < 0].index, inplace=True )<count_duplicates> | X_test_null/len(X_test)* 100 | Titanic - Machine Learning from Disaster |
20,728,635 | train.drop(train[train.duplicated() ].index, inplace=True )<drop_column> | X_train['Age'] = X_train['Age'].fillna(X_train['Age'].mean())
X_test['Age'] = X_test['Age'].fillna(X_test['Age'].mean() ) | Titanic - Machine Learning from Disaster |
20,728,635 | train.drop(train[train['total_transaction'].isnull() ].index, inplace=True )<count_values> | X_train['Embarked'].value_counts() | Titanic - Machine Learning from Disaster |
20,728,635 | print(train[train['num_transfer_trx'] != 0]['isChurned'].value_counts())
print(train[train['num_transfer_trx'] == 0]['isChurned'].value_counts() )<feature_engineering> | X_train['Embarked'] = X_train['Embarked'].fillna('S')
| Titanic - Machine Learning from Disaster |
20,728,635 | def update_transaction(x):
if(x['num_transaction'] != 0):
x['num_transaction'] -= x['num_transfer_trx']
x['total_transaction'] -=(x['average_transfer_trx'] * x['num_transfer_trx'])
if(x['total_transaction'] > -1e-8 and x['total_transaction'] < 1e-8):
x['total_transaction'] = 0
return x
train = train.apply(update_transaction, axis=1)
test = test.apply(update_transaction, axis=1 )<drop_column> | X_train = X_train.drop('Cabin', axis=1)
X_test = X_test.drop('Cabin', axis=1 ) | Titanic - Machine Learning from Disaster |
20,728,635 | train.drop(columns=['num_transfer_trx', 'average_transfer_trx', 'min_transfer_trx', 'max_transfer_trx'], inplace=True)
test.drop(columns=['num_transfer_trx', 'average_transfer_trx', 'min_transfer_trx', 'max_transfer_trx'], inplace=True )<feature_engineering> | X_test['Fare'] = X_test['Fare'].fillna(X_test['Fare'].mean() ) | Titanic - Machine Learning from Disaster |
20,728,635 | train['average_recharge_trx'] /= 15000
train['min_recharge_trx'] /= 15000
train['max_recharge_trx'] /= 15000
train['average_topup_trx'] /= 15000
train['min_topup_trx'] /= 15000
train['max_topup_trx'] /= 15000
train['total_transaction'] /= 15000<feature_engineering> | X_train_null = X_train.isnull().sum()
X_train_null | Titanic - Machine Learning from Disaster |
20,728,635 | test['average_recharge_trx'] /= 15000
test['min_recharge_trx'] /= 15000
test['max_recharge_trx'] /= 15000
test['average_topup_trx'] /= 15000
test['min_topup_trx'] /= 15000
test['max_topup_trx'] /= 15000
test['total_transaction'] /= 15000<feature_engineering> | X_test_null = X_test.isnull().sum()
X_test_null | Titanic - Machine Learning from Disaster |
20,728,635 | train['total_recharge'] = train['average_recharge_trx'] * train['num_recharge_trx']
train['total_topup'] = train['average_topup_trx'] * train['num_topup_trx']
test['total_recharge'] = test['average_recharge_trx'] * test['num_recharge_trx']
test['total_topup'] = test['average_topup_trx'] * test['num_topup_trx']<define_variables> | X_train = X_train.drop(['Name', 'Ticket', 'Survived'], axis = 1)
X_train | Titanic - Machine Learning from Disaster |
20,728,635 | cols = ['userId',
'date_collected',
'num_recharge_trx',
'average_recharge_trx',
'max_recharge_trx',
'min_recharge_trx',
'num_topup_trx',
'average_topup_trx',
'max_topup_trx',
'min_topup_trx',
'total_recharge',
'total_topup',
'num_transaction',
'total_transaction',
'isActive',
'isVerifiedPhone',
'isVerifiedEmail',
'blocked',
'premium',
'super',
'userLevel',
'pinEnabled',
'isChurned']
train = train[cols]
test = test[cols[:-1]]<categorify> | X_test = X_test.drop(['Name', 'Ticket'], axis = 1)
X_test | Titanic - Machine Learning from Disaster |
20,728,635 | encode = ['premium', 'super', 'pinEnabled']
lbl = LabelEncoder()
for col in encode:
train[col] = lbl.fit_transform(train[col].values)
test[col] = lbl.transform(test[col].values )<drop_column> | X_test['family'] = X_test['SibSp'] + X_test['Parch']
X_test = X_test.drop(['SibSp','Parch'], axis=1)
X_test | Titanic - Machine Learning from Disaster |
20,728,635 | train.drop(columns=[
'userId',
'date_collected',
], inplace=True)
test.drop(columns=[
'userId',
'date_collected',
], inplace=True )<count_duplicates> | X_train['family'] = X_train['SibSp'] + X_train['Parch']
X_train = X_train.drop(['SibSp','Parch'], axis=1)
X_train | Titanic - Machine Learning from Disaster |
20,728,635 | no_dup_train = train.drop(train[train.duplicated() ].index ).copy()
dup_index = no_dup_train[no_dup_train.drop(columns='isChurned' ).duplicated() ].index<filter> | Y_train = df_train['Survived']
Y_train | Titanic - Machine Learning from Disaster |
20,728,635 | def get_same_row(index, data=train):
row = data.loc[index]
return train[(train['num_recharge_trx'] == row['num_recharge_trx'])&
(train['average_recharge_trx'] == row['average_recharge_trx'])&
(train['max_recharge_trx'] == row['max_recharge_trx'])&
(train['min_recharge_trx'] == row['min_recharge_trx'])&
(train['total_recharge'] == row['total_recharge'])&
(train['num_topup_trx'] == row['num_topup_trx'])&
(train['average_topup_trx'] == row['average_topup_trx'])&
(train['max_topup_trx'] == row['max_topup_trx'])&
(train['min_topup_trx'] == row['min_topup_trx'])&
(train['total_topup'] == row['total_topup'])&
(train['num_transaction'] == row['num_transaction'])&
(train['total_transaction'] == row['total_transaction'])&
(train['isActive'] == row['isActive'])&
(train['isVerifiedPhone'] == row['isVerifiedPhone'])&
(train['isVerifiedEmail'] == row['isVerifiedEmail'])&
(train['blocked'] == row['blocked'])&
(train['premium'] == row['premium'])&
(train['super'] == row['super'])&
(train['userLevel'] == row['userLevel'])&
(train['pinEnabled'] == row['pinEnabled'])]<feature_engineering> | X_train_null = X_train.isnull().sum()
X_train_null | Titanic - Machine Learning from Disaster |
20,728,635 | def update_row(threshold=0.3, dup_index=dup_index):
for a, i in enumerate(dup_index):
dup_row = get_same_row(i)
index = dup_row.index
zero, one = dup_row['isChurned'].value_counts() [0], dup_row['isChurned'].value_counts() [1]
if(min(zero, one)/(zero+one)<= threshold):
mode = dup_row.mode() ['isChurned'][0]
for idx in index:
train.at[idx, 'isChurned'] = mode
if(a % 100 == 0):
print(a )<count_values> | X_test_null = X_test.isnull().sum()
X_test_null | Titanic - Machine Learning from Disaster |
20,728,635 | train['isChurned'].value_counts()<count_values> | X_train = pd.get_dummies(X_train, columns=['Sex', 'Embarked'], drop_first=True)
X_train | Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.