kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
4,060,422 | unique_targets = data.groupby('text' ).agg(unique_target=('target', pd.Series.nunique))
controversial_tweets = unique_targets[unique_targets['unique_target'] > 1].index
data = data[~data['text'].isin(controversial_tweets)]
data = data.drop_duplicates(subset='text', keep='first')
data['text'] = data['text'].apply(clean_text)
test['text'] = test['text'].apply(clean_text )<load_pretrained> | dft2.to_csv('titanic_results.csv', index=False ) | Titanic - Machine Learning from Disaster |
7,957,348 | tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = RobertaForSequenceClassification.from_pretrained('roberta-base' )<categorify> | train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv' ) | Titanic - Machine Learning from Disaster |
7,957,348 | def prepare_features(data_set, labels=None, max_seq_length = 100,
zero_pad = True, include_special_tokens = True):
input_ids = []
attention_masks = []
for sent in data_set:
encoded_dict = tokenizer.encode_plus(
sent,
add_special_tokens = include_special_tokens,
max_length = max_seq_length,
pad_to_max_length = zero_pad,
return_attention_mask = True,
return_tensors = 'pt',
)
input_ids.append(encoded_dict['input_ids'])
attention_masks.append(encoded_dict['attention_mask'])
input_ids = torch.cat(input_ids, dim=0)
attention_masks = torch.cat(attention_masks, dim=0)
if labels is not None:
labels = torch.tensor(labels)
return input_ids, attention_masks, labels
else:
return input_ids, attention_masks<feature_engineering> | train.isnull().sum() | Titanic - Machine Learning from Disaster |
7,957,348 | train_input_ids, train_attention_masks, train_labels = prepare_features(
train['text'], train['target'])
val_input_ids, val_attention_masks, val_labels = prepare_features(
val['text'], val['target'])
test_input_ids, test_attention_masks = prepare_features(
test['text'] )<create_dataframe> | train.isnull().sum() | Titanic - Machine Learning from Disaster |
7,957,348 | training_set = TensorDataset(train_input_ids, train_attention_masks, train_labels)
validation_set = TensorDataset(val_input_ids, val_attention_masks, val_labels)
test_set = TensorDataset(test_input_ids, test_attention_masks )<train_model> | test.isnull().sum() | Titanic - Machine Learning from Disaster |
7,957,348 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device )<set_options> | test.isnull().sum() | Titanic - Machine Learning from Disaster |
7,957,348 | torch.cuda.is_available()<define_variables> | %matplotlib inline
sns.set() | Titanic - Machine Learning from Disaster |
7,957,348 | BATCH_SIZE = 32
LEARNING_RATE = 1e-05
EPSILON = 1e-8
MAX_EPOCHS = 10<load_pretrained> | train_test_data = [train, test]
for dataset in train_test_data:
dataset['Title']= dataset['Name'].str.extract('([A-Za-z]+)\.', expand=False ) | Titanic - Machine Learning from Disaster |
7,957,348 | loading_params = {'batch_size': BATCH_SIZE,
'shuffle': True,
'drop_last': False,
'num_workers': 1}
training_loader = DataLoader(training_set, **loading_params)
validation_loader = DataLoader(validation_set, **loading_params)
test_loading_params = {'batch_size': BATCH_SIZE,
'shuffle': False,
'drop_last': False,
'num_workers': 1}
testing_loader = DataLoader(test_set, **test_loading_params )<choose_model_class> | train['Title'].value_counts() | Titanic - Machine Learning from Disaster |
7,957,348 | loss_function = nn.CrossEntropyLoss()
optimizer = AdamW(model.parameters() ,
lr = LEARNING_RATE,
eps = EPSILON
)
total_steps = len(training_loader)* MAX_EPOCHS
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps = 0,
num_training_steps = total_steps )<data_type_conversions> | test['Title'].value_counts() | Titanic - Machine Learning from Disaster |
7,957,348 | def format_time(elapsed):
elapsed_rounded = int(round(( elapsed)))
return str(datetime.timedelta(seconds=elapsed_rounded))<compute_test_metric> | title_mapping = {"Mr": 0, "Miss": 1, "Mrs": 2,
"Master": 3, "Dr": 3, "Rev": 3, "Col": 3, "Major": 3, "Mlle": 3,"Countess": 3,
"Ms": 3, "Lady": 3, "Jonkheer": 3, "Don": 3, "Dona" : 3, "Mme": 3,"Capt": 3,"Sir": 3 }
for dataset in train_test_data:
dataset['Title'] = dataset['Title'].map(title_mapping ) | Titanic - Machine Learning from Disaster |
7,957,348 | def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1)
labels_flat = labels
return accuracy_score(labels_flat, pred_flat )<train_model> | train.drop('Name', axis=1, inplace=True)
test.drop('Name', axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
7,957,348 | for epoch in tqdm_notebook(range(MAX_EPOCHS)) :
t0 = time.time()
total_train_loss = 0
model.train()
print("EPOCH -- {} / {}".format(epoch, MAX_EPOCHS))
for step, batch in enumerate(training_loader):
if step % 30 == 0 and not step == 0:
elapsed = format_time(time.time() - t0)
print(' Batch {} of {}.Elapsed: {:}'.format(step, len(training_loader), elapsed))
input_ids = batch[0].to(device ).to(torch.int64)
input_masks = batch[1].to(device ).to(torch.int64)
labels = batch[2].to(device ).to(torch.int64)
model.zero_grad()
loss, logits = model(input_ids,
token_type_ids=None,
attention_mask=input_masks,
labels=labels)
logits = logits.detach().cpu().numpy()
label_ids = labels.to('cpu' ).numpy()
total_train_loss += loss.item()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters() , 1.0)
optimizer.step()
scheduler.step()
avg_train_loss = total_train_loss / len(training_loader)
training_time = format_time(time.time() - t0)
print('')
print(' Average training loss: {0:.4f}'.format(avg_train_loss))
print(' Training epoch took: {:}'.format(training_time))
print('Running Validation')
model.eval()
val_predictions = []
val_labels = []
for batch in validation_loader:
b_input_ids = batch[0].to(device)
b_input_mask = batch[1].to(device)
b_labels = batch[2].to(device)
with torch.no_grad() :
loss, logits = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels)
val_predictions.append(logits.detach().cpu().numpy())
val_labels.append(b_labels.to('cpu' ).numpy())
val_predictions = [item for sublist in val_predictions for item in sublist]
val_labels = [item for sublist in val_labels for item in sublist]
val_accuracy = flat_accuracy(val_predictions, val_labels)
print(' Accuracy: {0:.4f}'.format(val_accuracy))<predict_on_test> | sex_mapping = {"male": 0, "female": 1}
for dataset in train_test_data:
dataset['Sex'] = dataset['Sex'].map(sex_mapping ) | Titanic - Machine Learning from Disaster |
7,957,348 | model.eval()
predictions = []
for batch in testing_loader:
batch = tuple(t.to(device)for t in batch)
input_ids, input_masks = batch
with torch.no_grad() :
logits = model(input_ids,
token_type_ids=None,
attention_mask=input_masks)[0]
logits = logits.detach().cpu().numpy()
predictions.append(logits )<define_variables> | train["Age"].fillna(train.groupby("Title")["Age"].transform("median"), inplace=True)
test["Age"].fillna(test.groupby("Title")["Age"].transform("median"), inplace=True ) | Titanic - Machine Learning from Disaster |
7,957,348 | flat_predictions = [item for sublist in predictions for item in sublist]
targets = np.argmax(flat_predictions, axis=1 ).flatten()<feature_engineering> | train.groupby("Title")["Age"].transform("median" ) | Titanic - Machine Learning from Disaster |
7,957,348 | test['target'] = targets<data_type_conversions> | train.isnull().sum() | Titanic - Machine Learning from Disaster |
7,957,348 | tokenizer.convert_tokens_to_ids(tokenizer.tokenize('dick'))<save_to_csv> | train.isnull().sum() | Titanic - Machine Learning from Disaster |
7,957,348 | submission = test[['id', 'target']].to_csv("submission_roberta.csv", index=False )<categorify> | for dataset in train_test_data:
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0,
dataset.loc[(dataset['Age'] > 16)&(dataset['Age'] <= 26), 'Age'] = 1,
dataset.loc[(dataset['Age'] > 26)&(dataset['Age'] <= 36), 'Age'] = 2,
dataset.loc[(dataset['Age'] > 36)&(dataset['Age'] <= 62), 'Age'] = 3,
dataset.loc[ dataset['Age'] > 62, 'Age'] = 4 | Titanic - Machine Learning from Disaster |
7,957,348 | def preprocess(texts, allowed_postags=['NOUN', "ADJ", "VERB", "ADV", "DET"]):
texts_out = []
for text in texts:
lowered_text = text.lower()
doc = nlp(lowered_text)
tokens = [token for token in doc if not(token.is_punct |
token.is_space |
token.is_digit)]
tokens = [token for token in tokens if token.is_alpha]
lemmas = [token.lemma_ for token in tokens if token.pos_ in allowed_postags]
words = [lemma for lemma in lemmas if lemma not in stop_words]
texts_out.append(words)
return texts_out<feature_engineering> | Pclass1 = train[train['Pclass']==1]['Embarked'].value_counts()
Pclass2 = train[train['Pclass']==2]['Embarked'].value_counts()
Pclass3 = train[train['Pclass']==3]['Embarked'].value_counts()
df = pd.DataFrame([Pclass1, Pclass2, Pclass3])
df.index = ['1st class','2nd class', '3rd class']
df.plot(kind='bar',stacked=True, figsize=(10,5)) | Titanic - Machine Learning from Disaster |
7,957,348 | data_pruned = data.copy(deep=True)
data_pruned['text'] = data_pruned['text'].progress_apply(clean_text)
data_pruned['text'] = data_pruned['text'].progress_apply(preprocess )<feature_engineering> | for dataset in train_test_data:
dataset['Embarked']=dataset['Embarked'].fillna('S' ) | Titanic - Machine Learning from Disaster |
7,957,348 | test_pruned = test.copy(deep=True)
test_pruned['text'] = test_pruned['text'].apply(clean_text)
test_pruned['text'] = test_pruned['text'].apply(preprocess )<concatenate> | embarked_mapping = {"S": 0, "C": 1, "Q": 2}
for dataset in train_test_data:
dataset['Embarked'] = dataset['Embarked'].map(embarked_mapping ) | Titanic - Machine Learning from Disaster |
7,957,348 | all_text = pd.concat([data_pruned[['text']], test_pruned[['text']]], ignore_index=True )<feature_engineering> | train["Fare"].fillna(train.groupby("Pclass")["Fare"].transform("median"), inplace=True)
test["Fare"].fillna(test.groupby("Pclass")["Fare"].transform("median"), inplace=True)
train.head(50 ) | Titanic - Machine Learning from Disaster |
7,957,348 | cv = CountVectorizer(ngram_range=(1,2))
tfidf_transformer = TfidfTransformer()
x = cv.fit_transform(all_text['text'])
x_all_tfidf = tfidf_transformer.fit_transform(x )<train_model> | for dataset in train_test_data:
dataset.loc[ dataset['Fare'] <= 17, 'Fare'] = 0,
dataset.loc[(dataset['Fare'] > 17)&(dataset['Fare'] <= 30), 'Fare'] = 1,
dataset.loc[(dataset['Fare'] > 30)&(dataset['Fare'] <= 100), 'Fare'] = 2,
dataset.loc[ dataset['Fare'] > 100, 'Fare'] = 3 | Titanic - Machine Learning from Disaster |
7,957,348 | training_samples = data_pruned.shape[0]
X_train = x_all_tfidf[:training_samples,:]
X_test = x_all_tfidf[training_samples:,:]<prepare_x_and_y> | train.Cabin.value_counts() | Titanic - Machine Learning from Disaster |
7,957,348 | y_train = data_pruned['target']<train_model> | for dataset in train_test_data:
dataset['Cabin']=dataset['Cabin'].str[:1] | Titanic - Machine Learning from Disaster |
7,957,348 | mb_classifier = MB().fit(X_train, y_train )<compute_train_metric> | cabin_mapping = {"A": 0, "B": 0.4, "C": 0.8, "D": 1.2, "E": 1.6, "F": 2, "G": 2.4, "T": 2.8}
for dataset in train_test_data:
dataset['Cabin'] = dataset['Cabin'].map(cabin_mapping ) | Titanic - Machine Learning from Disaster |
7,957,348 | pred = mb_classifier.predict(X_train)
c = classification_report(y_train,pred )<find_best_model_class> | train["Cabin"].fillna(train.groupby("Pclass")["Cabin"].transform("median"), inplace=True)
test["Cabin"].fillna(test.groupby("Pclass")["Cabin"].transform("median"), inplace=True ) | Titanic - Machine Learning from Disaster |
7,957,348 | skf = StratifiedKFold(n_splits=5, random_state=RANDOM_STATE)
total_accuracy = []
total_precision = []
total_recall = []
for train_index, val_index in skf.split(X_train, y_train):
current_X_train = X_train[train_index]
current_y_train = y_train.iloc[train_index]
current_X_val = X_train[val_index]
current_y_val = y_train.iloc[val_index]
clf = clf_svm
clf.fit(current_X_train, current_y_train)
current_predictions = clf.predict(current_X_val)
total_accuracy.append(accuracy_score(current_y_val, current_predictions))
total_precision.append(precision_score(current_y_val, current_predictions))
total_recall.append(recall_score(current_y_val, current_predictions))
ave_accuracy = mean(total_accuracy)
ave_precision = mean(total_precision)
ave_recall = mean(total_recall)
print("Average Accuracy: {:.4f}".format(ave_accuracy))
print("Average Precision: {:.4f}".format(ave_precision))
print("Average Recall: {:.4f}".format(ave_recall))
<predict_on_test> | train["FamilySize"] = train["SibSp"] + train["Parch"] + 1
test["FamilySize"] = test["SibSp"] + test["Parch"] + 1 | Titanic - Machine Learning from Disaster |
7,957,348 | test_pruned['target'] = mb_classifier.predict(x_test )<save_to_csv> | family_mapping = {1: 0, 2: 0.4, 3: 0.8, 4: 1.2, 5: 1.6, 6: 2, 7: 2.4, 8: 2.8, 9: 3.2, 10: 3.6, 11: 4}
for dataset in train_test_data:
dataset['FamilySize'] = dataset['FamilySize'].map(family_mapping ) | Titanic - Machine Learning from Disaster |
7,957,348 | test_pruned[['id', 'target']].to_csv("submission_2.csv", index=False )<choose_model_class> | features_drop = ['Ticket', 'SibSp', 'Parch']
train = train.drop(features_drop, axis=1)
test = test.drop(features_drop, axis=1)
train = train.drop(['PassengerId'], axis=1 ) | Titanic - Machine Learning from Disaster |
7,957,348 | clf_svm = svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto' )<predict_on_test> | from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
import numpy as np | Titanic - Machine Learning from Disaster |
7,957,348 | clf_svm.fit(x_train, y_train)
pred = clf_svm.predict(x_train)
print(classification_report(y_train,pred))<save_to_csv> | k_fold = KFold(n_splits=10, shuffle=True, random_state=0 ) | Titanic - Machine Learning from Disaster |
7,957,348 | test_pruned['target'] = clf_svm.predict(x_test)
test_pruned[['id', 'target']].to_csv("submission_svm.csv", index=False )<prepare_x_and_y> | clf = KNeighborsClassifier(n_neighbors = 13)
scoring = 'accuracy'
score = cross_val_score(clf, train_data, target, cv=k_fold, n_jobs=1, scoring=scoring)
print(score ) | Titanic - Machine Learning from Disaster |
7,957,348 | TRAIN_VAL_SPLIT = 0.8
train = data.sample(frac=TRAIN_VAL_SPLIT, random_state=RANDOM_STATE)
val = data[~data.index.isin(train.index)]
X_train = train['text'].values
y_train = train['target'].values
X_val = val['text'].values
y_val = val['target'].values
X_test = test['text'].values<categorify> | round(np.mean(score)*100, 2 ) | Titanic - Machine Learning from Disaster |
7,957,348 | def preprocess(texts, allowed_postags=['NOUN', "ADJ", "VERB", "ADV", "PROPN", "DET"]):
texts_out = []
for text in texts:
lowered_text = text.lower()
lowered_text = re.sub(r'https?://\S+|www\.\S+', '', lowered_text)
lowered_text = re.sub(r'<.*?>', '', lowered_text)
doc = nlp(lowered_text)
tokens = [token for token in doc if not(token.is_punct |
token.is_space |
token.is_digit)]
lemmas = [token.lemma_ for token in tokens if token.pos_ in allowed_postags]
words = [lemma for lemma in lemmas if lemma not in stop_words]
texts_out.append(words)
return texts_out<split> | clf = DecisionTreeClassifier()
scoring = 'accuracy'
score = cross_val_score(clf, train_data, target, cv=k_fold, n_jobs=1, scoring=scoring)
print(score ) | Titanic - Machine Learning from Disaster |
7,957,348 | train_corpus = preprocess(X_train)
val_corpus = preprocess(X_val)
test_corpus = preprocess(X_test )<define_variables> | round(np.mean(score)*100, 2 ) | Titanic - Machine Learning from Disaster |
7,957,348 | corpus = train_corpus + val_corpus + test_corpus<string_transform> | clf = RandomForestClassifier(n_estimators=13)
scoring = 'accuracy'
score = cross_val_score(clf, train_data, target, cv=k_fold, n_jobs=1, scoring=scoring)
print(score ) | Titanic - Machine Learning from Disaster |
7,957,348 | tokenizer = Tokenizer()
tokenizer.fit_on_texts(corpus)
train_sequences = tokenizer.texts_to_sequences(train_corpus)
val_sequences = tokenizer.texts_to_sequences(val_corpus)
test_sequences = tokenizer.texts_to_sequences(test_corpus)
word_index = tokenizer.word_index
train_max_length = max([len(x)for x in train_sequences])
val_max_length = max([len(x)for x in val_sequences])
test_max_length = max([len(x)for x in test_sequences])
max_length = max(train_max_length, val_max_length, test_max_length)
X_train_pad = pad_sequences(train_sequences, maxlen=max_length, padding="post")
X_val_pad = pad_sequences(val_sequences, maxlen=max_length, padding="post")
X_test_pad = pad_sequences(test_sequences, maxlen=max_length, padding="post")
vocab = np.array(list(tokenizer.word_index.keys()))
vocab_size = len(tokenizer.word_index)+ 1<define_variables> | round(np.mean(score)*100, 2 ) | Titanic - Machine Learning from Disaster |
7,957,348 | EMBEDDING_DIM = 30<choose_model_class> | clf = GaussianNB()
scoring = 'accuracy'
score = cross_val_score(clf, train_data, target, cv=k_fold, n_jobs=1, scoring=scoring)
print(score ) | Titanic - Machine Learning from Disaster |
7,957,348 | model_tuned = Sequential()
model_tuned.add(Embedding(vocab_size, EMBEDDING_DIM, input_length=max_length))
model_tuned.add(GRU(units=30, dropout=0.2, recurrent_dropout=0.2, return_sequences=True))
model_tuned.add(GRU(units=30, dropout=0.2, recurrent_dropout=0.2, return_sequences=True))
model_tuned.add(GRU(units=30, dropout=0.2, recurrent_dropout=0.2,
kernel_regularizer=l1_l2(0.01, 0.01), recurrent_regularizer=l1_l2(0.01, 0.01)))
model_tuned.add(Dense(1, activation='sigmoid'))
model_tuned.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'] )<train_model> | round(np.mean(score)*100, 2 ) | Titanic - Machine Learning from Disaster |
7,957,348 | history = model_tuned.fit(X_train_pad, y_train, batch_size=128, epochs=25, validation_data=(X_val_pad, y_val), verbose=2 )<predict_on_test> | clf = SVC()
scoring = 'accuracy'
score = cross_val_score(clf, train_data, target, cv=k_fold, n_jobs=1, scoring=scoring)
print(score ) | Titanic - Machine Learning from Disaster |
7,957,348 | y_pred_tuned = model_tuned.predict(X_test_pad )<define_variables> | round(np.mean(score)*100,2 ) | Titanic - Machine Learning from Disaster |
7,957,348 | y_pred_binary = list(map(lambda x: 1 if x >= 0.5 else 0, y_pred_tuned))<feature_engineering> | clf = SVC()
clf.fit(train_data, target)
test_data = test.drop("PassengerId", axis=1 ).copy()
prediction = clf.predict(test_data ) | Titanic - Machine Learning from Disaster |
7,957,348 | test['target'] = y_pred_binary<save_to_csv> | submission = pd.DataFrame({
"PassengerId": test["PassengerId"],
"Survived": prediction
})
submission.to_csv('submission.csv', index=False ) | Titanic - Machine Learning from Disaster |
8,140,706 | test[['id', 'target']].to_csv("submission_rnn.csv", index=False )<choose_model_class> | from sklearn.model_selection import train_test_split
import random
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import GridSearchCV | Titanic - Machine Learning from Disaster |
8,140,706 | def ConvNet(max_sequence_length, num_words, embedding_dim, labels_index):
embedding_layer = Embedding(num_words,
embedding_dim,
input_length=max_sequence_length)
sequence_input = Input(shape=(max_sequence_length,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
convs = []
filter_sizes = [3,4,5,6]
for filter_size in filter_sizes:
l_conv = Conv1D(filters=50,
kernel_size=filter_size,
activation='relu' )(embedded_sequences)
l_conv = Dropout(0.2 )(l_conv)
l_pool = GlobalMaxPooling1D()(l_conv)
convs.append(l_pool)
l_merge = concatenate(convs, axis=1)
x = Dense(32, activation='relu' )(l_merge)
x = Dropout(0.2 )(x)
preds = Dense(labels_index, activation='sigmoid' )(x)
model = Model(sequence_input, preds)
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['acc'])
model.summary()
return model<choose_model_class> | import itertools
import matplotlib.pyplot as plt
import lightgbm as lgb
from sklearn import metrics | Titanic - Machine Learning from Disaster |
8,140,706 | model = ConvNet(max_length, vocab_size, EMBEDDING_DIM, 1 )<train_model> | from sklearn.feature_selection import SelectKBest, f_classif | Titanic - Machine Learning from Disaster |
8,140,706 | hist = model.fit(X_train_pad,
y_train,
epochs=25,
batch_size=128,
validation_data=(X_val_pad, y_val),
verbose=2 )<predict_on_test> | import itertools | Titanic - Machine Learning from Disaster |
8,140,706 | y_pred = model.predict(X_test_pad)
y_pred_binary = list(map(lambda x: 1 if x >= 0.5 else 0, y_pred))<save_to_csv> | def Feature_selection(X):
cat_features = [y for y in X.columns if X[y].dtypes == 'object']
cat_features = [col for col in cat_features if col not in ['Name', 'Ticket', 'Cabin']]
interactions = pd.DataFrame(index=X.index)
for col1, col2 in itertools.combinations(cat_features, 2):
new_col_name = '_'.join([col1, col2])
new_values = X[col1].map(str)+ "_" + X[col2].map(str)
encoder = LabelEncoder()
X[new_col_name] = encoder.fit_transform(new_values)
return X | Titanic - Machine Learning from Disaster |
8,140,706 | test['target'] = y_pred_binary
test[['id', 'target']].to_csv("submission_cnn.csv", index=False )<filter> | import category_encoders as ce | Titanic - Machine Learning from Disaster |
8,140,706 | data[data['target'] == 0].values<set_options> | Titanic - Machine Learning from Disaster | |
8,140,706 | stop = set(stopwords.words('english'))
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))<load_from_csv> | def Categorical_Encoding_train(X):
cat_features = [y for y in X.columns if X[y].dtypes == 'object']
cat_features = [col for col in cat_features if col not in ['Name']]
encoder = ce.CatBoostEncoder(cols = cat_features)
encoder.fit(X[cat_features], X['Survived'])
numerical_col = [y for y in X.columns if X[y].dtypes != 'object']
data = X[numerical_col].join(encoder.transform(X[cat_features]))
return data, encoder
| Titanic - Machine Learning from Disaster |
8,140,706 | train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv")
test = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv")
target = train['target']
print("Train shape", train.shape)
print("Test shape", test.shape )<string_transform> | def Categorical_Encoding_test(X, encoder):
cat_features = [y for y in X.columns if X[y].dtypes == 'object']
print(cat_features)
cat_features = [col for col in cat_features if col not in ['Name']]
numerical_col = [y for y in X.columns if X[y].dtypes != 'object']
data = X[numerical_col].join(encoder.transform(X[cat_features]))
print(data.head())
return data | Titanic - Machine Learning from Disaster |
8,140,706 | def create_corpus(target):
corpus = []
for x in train[train['target'] == target]['text'].str.split() :
for i in x:
corpus.append(i)
return corpus
def filter_specific_word(corpus, filters):
dic = defaultdict(int)
for word in corpus:
if word in filters:
dic[word] += 1
return dic<feature_engineering> | from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer | Titanic - Machine Learning from Disaster |
8,140,706 | def get_top_tweet_bigrams(corpus, n=None):
vec = CountVectorizer(ngram_range=(2, 2)).fit(corpus)
bag_of_words = vec.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx])for word, idx in vec.vocabulary_.items() ]
words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True)
return words_freq<concatenate> | def iterative_imputer(X):
numerical_feature = [y for y in X.columns if X[y].dtypes != 'object'
and X[y].isnull().sum() != 0]
imp_mean = IterativeImputer(max_iter=10, verbose=0)
X[numerical_feature] = imp_mean.fit_transform(X[numerical_feature])
return X | Titanic - Machine Learning from Disaster |
8,140,706 | df = pd.concat([train, test], axis=0, sort=False)
df.shape<drop_column> | def numerical_imputer(X):
numerical_feature = [y for y in X.columns if X[y].dtypes != 'object'
and X[y].isnull().sum() != 0]
my_imputer = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
X[numerical_feature] = pd.DataFrame(my_imputer.fit_transform(X[numerical_feature]))
return X | Titanic - Machine Learning from Disaster |
8,140,706 | def remove_url(text):
url = re.compile('https?://\S+|www\.\S+')
return url.sub(r'', text)
remove_url('http://www.kaggle.com/rakkaalhazimi/nlp-disaster-classification/edit?rvi=1' )<feature_engineering> | import lightgbm as lgb | Titanic - Machine Learning from Disaster |
8,140,706 | df['text'] = df['text'].apply(lambda x: remove_url(x))
retain = df['text'].str.contains(r'http[s]*' ).sum()
print("{} words were left behind".format(retain))<string_transform> | def train_and_predict(train_X, train_y, valid_X, valid_y, X_test):
dtrain = lgb.Dataset(train_X, label=train_y)
dvalid = lgb.Dataset(valid_X, label=valid_y)
param = {'num_leaves': 64, 'objective': 'binary'}
param['metric'] = 'auc'
num_round = 1000
bst = lgb.train(param, dtrain, num_round,
valid_sets=[dvalid], early_stopping_rounds=10, verbose_eval=False)
y_pred = bst.predict(X_test)
return y_pred | Titanic - Machine Learning from Disaster |
8,140,706 | residual = df[df['text'].str.contains(r'http[s]*')]
left_word = []
for i in range(len(residual)) :
print(residual['text'].values[i])
left_word.append(residual['text'].values[i] )<categorify> | if __name__ == '__main__':
seed = 123
random.seed(seed)
print('Loading Training Data')
baseline_data = pd.read_csv('/kaggle/input/titanic/train.csv')
baseline_data = Feature_selection(baseline_data)
baseline_data = numerical_imputer(baseline_data)
encoded_data, encoder = Categorical_Encoding_train(baseline_data)
cols = [col for col in encoded_data.columns if col not in ['Survived','PassengerId']]
X = encoded_data[cols]
y = baseline_data['Survived']
valid_fraction = 0.1
valid_size = int(len(encoded_data)* valid_fraction)
train_X = X[:-2 * valid_size]
train_y = y[:-2 * valid_size]
valid_X = X[-2 * valid_size:]
valid_y = y[-2 * valid_size:]
print('Loading Testing Data')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
test_data = Feature_selection(test_data)
test_data = numerical_imputer(test_data)
encoded_data_test = Categorical_Encoding_test(test_data, encoder)
X_test = encoded_data_test[cols].iloc[:,:]
print(X_test.head())
y_pred = train_and_predict(train_X, train_y, valid_X, valid_y, X_test)
y_pred = np.around(y_pred)
y_pred = y_pred.astype(int)
output = pd.DataFrame({'PassengerId': test_data.PassengerId,
'Survived': y_pred})
output.to_csv('submission_grid_search.csv', index=False)
| Titanic - Machine Learning from Disaster |
8,140,706 | for word in left_word:
compiler = re.compile(r'.http.+')
result = compiler.sub('', word)
print(result )<feature_engineering> | Titanic - Machine Learning from Disaster | |
8,140,706 | df['text'] = df['text'].str.replace(r'.http.+', '')
print("http words found {}".format(df['text'].str.contains('http' ).sum()))<drop_column> | baseline_data.groupby('Pclass')['Survived'].count() | Titanic - Machine Learning from Disaster |
8,140,706 | <feature_engineering><EOS> | baseline_data.groupby('Embarked')['Survived'].count() | Titanic - Machine Learning from Disaster |
8,355,010 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<drop_column> | sns.set(style='whitegrid', palette='muted', font_scale=1.5)
rcParams['figure.figsize'] = 10, 6
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
def print_metrics(y_true,y_pred):
conf_mx = confusion_matrix(y_true,y_pred)
print("------------------------------------------")
print(" Accuracy : ", accuracy_score(y_true,y_pred))
print(" Precision : ", precision_score(y_true,y_pred))
print(" Sensitivity : ", recall_score(y_true,y_pred))
print("------------------------------------------")
print(classification_report(y_true, y_pred))
print("------------------------------------------")
class_names = [0,1]
fig,ax = plt.subplots()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks,class_names)
plt.yticks(tick_marks,class_names)
sns.heatmap(pd.DataFrame(conf_mx),annot=True,cmap="Blues",fmt="d",cbar=False)
ax.xaxis.set_label_position('top')
plt.tight_layout()
plt.ylabel('Actual label')
plt.xlabel('Predicted label')
plt.show() | Titanic - Machine Learning from Disaster |
8,355,010 | def remove_emoji(text):
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F"
u"\U0001F300-\U0001F5FF"
u"\U0001F680-\U0001F6FF"
u"\U0001F1E0-\U0001F1FF"
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
"]+", flags=re.UNICODE)
return emoji_pattern.sub(r'', text)
remove_emoji("Omg another Earthquake 😔😔" )<feature_engineering> | train_df = pd.read_csv('/kaggle/input/titanic/train.csv')
test_df = pd.read_csv('/kaggle/input/titanic/test.csv')
submit_df = pd.read_csv('/kaggle/input/titanic/gender_submission.csv')
train_df.head(10 ) | Titanic - Machine Learning from Disaster |
8,355,010 | df['text'] = df['text'].apply(lambda x: remove_emoji(x))<categorify> | def feature_eng(df,columnshoice):
df['TicketLetter'] = df['Ticket'].apply(lambda x : str(x)[0])
df['TicketLetter'] = df['TicketLetter'].apply(lambda x : re.sub('[0-9]','N',x))
df['Title'] = df.Name.apply(lambda name: name.split(',')[1].split('.')[0].strip())
normalized_titles = {
"Capt": "Officer",
"Col": "Officer",
"Major": "Officer",
"Jonkheer": "Royalty",
"Don": "Royalty",
"Sir" : "Royalty",
"Dr": "Officer",
"Rev": "Officer",
"the Countess":"Royalty",
"Dona": "Royalty",
"Mme": "Mrs",
"Mlle": "Miss",
"Ms": "Mrs",
"Mr" : "Mr",
"Mrs" : "Mrs",
"Miss" : "Miss",
"Master" : "Master",
"Lady" : "Royalty"
}
df['Title'] = df['Title'].map(normalized_titles)
df['NameLength'] = df['Name'].apply(lambda x : len(x))
df['NameLength'] =(( df.NameLength)/15 ).astype(np.int64)+1
df["FamilySize"] = df['SibSp'] + df['Parch'] + 1
bins = [-1,1,4, np.inf]
labels = ['ONE','SMALL','BIG']
df['FamilyGroup'] = pd.cut(df["FamilySize"], bins, labels = labels)
df['IsAlone'] = 'Y'
df.loc[df['FamilySize'] > 1,'IsAlone'] = 'N'
df["Embarked"] = df["Embarked"].fillna("S")
df["Age"] = df.groupby(['Sex','Title'])["Age"].transform(lambda x: x.fillna(x.median()))
df["Cabin"] = df["Cabin"].str[0:1]
df["Cabin"] = df["Cabin"].fillna('T')
df['Fare'] = df['Fare'].fillna(-1)
return df[columnshoice]
columnshoice = ['Pclass', 'Sex', 'Age', 'SibSp',
'Parch', 'Fare', 'Embarked', 'TicketLetter', 'Title',
'FamilySize', 'IsAlone','NameLength']
y = train_df["Survived"]
X = feature_eng(train_df,columnshoice)
test = feature_eng(test_df,columnshoice)
print(X.columns)
print(X.shape)
numerical_features = list(X.select_dtypes(include=['int64', 'float64', 'int32'] ).columns)
scaler = preprocessing.StandardScaler()
X[numerical_features] = scaler.fit_transform(X[numerical_features])
test[numerical_features] = scaler.transform(test[numerical_features])
X = pd.get_dummies(X)
test = pd.get_dummies(test ) | Titanic - Machine Learning from Disaster |
8,355,010 | def remove_punct(text):
table = str.maketrans('', '', string.punctuation)
return text.translate(table)
example = "I am King
remove_punct(example )<feature_engineering> | model = tf.keras.models.Sequential([
tf.keras.layers.Dense(units=64, activation='relu',input_dim=X.shape[1]),
tf.keras.layers.Dense(units=32, activation='relu'),
tf.keras.layers.Dense(units=32, activation='relu'),
tf.keras.layers.Dense(units=1, activation='sigmoid')
])
early_stop = keras.callbacks.EarlyStopping(
monitor='val_loss',
patience=5
)
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
| Titanic - Machine Learning from Disaster |
8,355,010 | df['text'] = df['text'].apply(lambda x: remove_punct(x))<load_from_csv> | history = model.fit(
x=X,
y=y,
shuffle=True,
epochs=100,
validation_split=0.1,
verbose=0,
callbacks=[early_stop]
) | Titanic - Machine Learning from Disaster |
8,355,010 | df = pd.read_csv(".. /input/nlp-disaster-cleaned/tweetDisaster.csv" )<remove_duplicates> | predictions = model.predict(X)
predictions = tf.round(predictions ).numpy().flatten().astype(int ) | Titanic - Machine Learning from Disaster |
8,355,010 | def create_corpus(df):
copy_df = df.copy()
corpus = []
for tweet in tqdm(copy_df["text"]):
words = [word.lower() for word in word_tokenize(tweet)if(( word.isalpha() == 1)&(word not in stop)) ]
corpus.append(words)
return corpus<statistical_test> | print_metrics(y,predictions)
| Titanic - Machine Learning from Disaster |
8,355,010 | <define_variables><EOS> | y_sub= model.predict(test)
y_sub = tf.round(y_sub ).numpy().flatten().astype(int)
submission = pd.DataFrame({
"PassengerId": test_df["PassengerId"],
"Survived": y_sub
})
submission.to_csv('titanic.csv', index=False ) | Titanic - Machine Learning from Disaster |
9,074,751 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<train_model> | %matplotlib inline
def transform_dataset(ds):
transformed_dataset = ds.copy()
transformed_dataset['Age'].fillna(transformed_dataset['Age'].median() , inplace=True)
transformed_dataset['Fare'].fillna(transformed_dataset['Fare'].median() , inplace=True)
transformed_dataset['Sex'] = pd.factorize(transformed_dataset['Sex'])[0]
transformed_dataset=pd.concat([transformed_dataset, pd.get_dummies(transformed_dataset['Embarked'], prefix="Embarked")], axis=1)
transformed_dataset['Childs'] = transformed_dataset.apply(lambda row: get_childs(row), axis=1)
transformed_dataset['Parents'] = transformed_dataset.apply(lambda row: get_parents(row), axis=1)
transformed_dataset = transformed_dataset.drop(['PassengerId', 'Name', 'Ticket', 'Cabin', 'Embarked', 'Parch'], axis=1)
return transformed_dataset
def get_childs(row):
if(row.Parch>2 or row.Age>=18):
return row.Parch
return 0
def get_parents(row):
if(row.Parch <= 2 and row.Age<18):
return row.Parch
return 0 | Titanic - Machine Learning from Disaster |
9,074,751 | print("Embedding shape :({},{})".format(len(embedding_dict),
len(embedding_dict['the'])) )<string_transform> | dataset = pd.read_csv('.. /input/titanic/train.csv')
dataset.info() | Titanic - Machine Learning from Disaster |
9,074,751 | MAX_LEN = 50
tokenizer_obj = Tokenizer()
tokenizer_obj.fit_on_texts(corpus)
sequences = tokenizer_obj.texts_to_sequences(corpus)
tweet_pad = pad_sequences(sequences, maxlen=MAX_LEN, truncating='post', padding='post' )<count_unique_values> | transformed_dataset = transform_dataset(dataset)
transformed_dataset.info() | Titanic - Machine Learning from Disaster |
9,074,751 | word_index = tokenizer_obj.word_index
print("Number of unique words:", len(word_index))<sort_values> | X = transformed_dataset.drop(['Survived'], axis=1)
Y = dataset['Survived'] | Titanic - Machine Learning from Disaster |
9,074,751 | top = sorted(word_index, key=lambda x: word_index[x], reverse=True)[:10]
unknown_index = []
for word in top:
scores =(word, word_index[word])
unknown_index.append(scores)
unknown_index<categorify> | X_train, X_test, Y_train, Y_test = model_selection.train_test_split(X, Y, test_size = 0.2, random_state = 21 ) | Titanic - Machine Learning from Disaster |
9,074,751 | num_words = len(word_index)+ 1
embedding_matrix = np.zeros(( num_words, 100))
for word, i in tqdm(word_index.items()):
if i > num_words:
continue
emb_vec = embedding_dict.get(word)
if emb_vec is not None:
embedding_matrix[i] = emb_vec<split> | param_grid =[ {'n_estimators' : [10, 15, 20, 25, 30, 35, 40], 'max_depth' : [5,10,15, 20]},]
rf = ensemble.RandomForestClassifier(random_state=21, max_features= 3)
model = GridSearchCV(rf,param_grid, cv = 5 ) | Titanic - Machine Learning from Disaster |
9,074,751 | train = tweet_pad[:train.shape[0]]
test = tweet_pad[train.shape[0]:]<choose_model_class> | model.fit(X_train,Y_train)
print('train score = ', model.score(X_train,Y_train), '
test score = ', model.score(X_test,Y_test), '
', model.best_params_ ) | Titanic - Machine Learning from Disaster |
9,074,751 | model = Sequential()
embedding = Embedding(num_words, 100, embeddings_initializer=Constant(embedding_matrix),
input_length=MAX_LEN, trainable=False)
model.add(embedding)
model.add(Bidirectional(LSTM(128,
dropout=0.2,
recurrent_dropout=0.2))
)
model.add(Dense(1, activation='sigmoid'))
optimizer = Adam(learning_rate=1e-4)
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
model.summary()<train_model> | test = pd.read_csv('.. /input/titanic/test.csv')
test.info() | Titanic - Machine Learning from Disaster |
9,074,751 | history = model.fit(train, target, batch_size=32, epochs=15,
validation_split=0.2, verbose=1 )<load_from_csv> | passenger_id = test['PassengerId'] | Titanic - Machine Learning from Disaster |
9,074,751 | sample_sub = pd.read_csv("/kaggle/input/nlp-getting-started/sample_submission.csv")
sample_sub.shape<save_to_csv> | transformed_test = transform_dataset(test)
transformed_test.info() | Titanic - Machine Learning from Disaster |
9,074,751 | y_pred = model.predict(test)
y_pred = np.round(y_pred ).astype(int ).reshape(3263)
sub = pd.DataFrame({'id': sample_sub['id'].values.tolist() , 'target': y_pred})
sub.to_csv("submission.csv", index=False )<import_modules> | Y_predict = model.predict(transformed_test)
Y_p = pd.DataFrame(Y_predict, columns=['Survived'])
Y_p | Titanic - Machine Learning from Disaster |
9,074,751 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import re
import string
from tqdm import tqdm
from gensim.parsing.preprocessing import remove_stopwords
from bs4 import BeautifulSoup
from nltk.stem.snowball import SnowballStemmer
from nltk.stem.wordnet import WordNetLemmatizer
from collections import OrderedDict
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split,RandomizedSearchCV
from sklearn.metrics import classification_report,f1_score
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow import keras
from keras import backend as K
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.optimizers import Adam
import torch
import transformers<load_from_csv> | res = pd.concat([passenger_id, Y_p], axis=1)
res | Titanic - Machine Learning from Disaster |
9,074,751 | train = pd.read_csv(r'/kaggle/input/nlp-getting-started/train.csv')
test = pd.read_csv(r'/kaggle/input/nlp-getting-started/test.csv' )<categorify> | res.to_csv('res.csv', index=None ) | Titanic - Machine Learning from Disaster |
3,808,829 | def remove_shortforms(phrase):
phrase = re.sub(r"won't", "will not", phrase)
phrase = re.sub(r"can't", "can not", phrase)
phrase = re.sub(r"n't", " not", phrase)
phrase = re.sub(r"'re", " are", phrase)
phrase = re.sub(r"'s", " is", phrase)
phrase = re.sub(r"'d", " would", phrase)
phrase = re.sub(r"'ll", " will", phrase)
phrase = re.sub(r"'t", " not", phrase)
phrase = re.sub(r"'ve", " have", phrase)
phrase = re.sub(r"'m", " am", phrase)
return phrase
def remove_special_char(text):
text = re.sub('[^A-Za-z0-9]+'," ",text)
return text
def remove_wordswithnum(text):
text = re.sub("\S*\d\S*", "", text ).strip()
return text
def lowercase(text):
text = text.lower()
return text
def remove_stop_words(text):
text = remove_stopwords(text)
return text
st = SnowballStemmer(language='english')
def stemming(text):
r= []
for word in text :
a = st.stem(word)
r.append(a)
return r
def listToString(s):
str1 = " "
return(str1.join(s))
def remove_punctuations(text):
text = re.sub(r'[^\w\s]','',text)
return text
def remove_links(text):
text = re.sub(r'http\S+', '', text)
return text
lemmatizer = WordNetLemmatizer()
def lemmatize_words(text):
text = lemmatizer.lemmatize(text)
return text
def remove_html(text):
html=re.compile(r'<.*?>')
return html.sub(r'',text )<prepare_x_and_y> | discriminant_analysis, tree, gaussian_process, model_selection
| Titanic - Machine Learning from Disaster |
3,808,829 | Y = train['target']
train = train.drop('target',axis=1)
data = pd.concat([train,test],axis=0 ).reset_index(drop=True)
data.head()<feature_engineering> | train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv' ) | Titanic - Machine Learning from Disaster |
3,808,829 | for i in range(len(data['text'])) :
data['text'][i] = str(data['text'][i] )<feature_engineering> | passenger_id = test['PassengerId'].copy() | Titanic - Machine Learning from Disaster |
3,808,829 | for i in range(len(data['text'])) :
data['text'][i] = remove_shortforms(data['text'][i])
data['text'][i] = remove_special_char(data['text'][i])
data['text'][i] = remove_wordswithnum(data['text'][i])
data['text'][i] = lowercase(data['text'][i])
data['text'][i] = remove_stop_words(data['text'][i])
text = data['text'][i]
text = text.split()
data['text'][i] = stemming(text)
s = data['text'][i]
data['text'][i] = listToString(s)
data['text'][i] = lemmatize_words(data['text'][i] )<normalization> | traintest = pd.concat([train, test], axis=0, sort=False ) | Titanic - Machine Learning from Disaster |
3,808,829 | cv = CountVectorizer(ngram_range=(1,3))
text_bow = cv.fit_transform(data['text'])
print(text_bow.shape )<split> | isnull = traintest.isnull().sum().reset_index()
isnull.columns = ['Feature', 'Total_null']
total_null = isnull[isnull['Total_null']>0]
total_null | Titanic - Machine Learning from Disaster |
3,808,829 | train_text = text_bow[:train.shape[0]]
test_text = text_bow[train.shape[0]:]<split> | traintest['Title'] = traintest['Name'].str.split(", ", expand=True)[1].str.split(".", expand=True)[0]
replace = {
'Ms': 'Miss',
'Dona': 'Miss',
'Mlle': 'Miss',
'Mme': 'Miss',
'Don': 'Mr',
'Sir': 'Mr'
}
traintest.replace({'Title': replace}, inplace=True ) | Titanic - Machine Learning from Disaster |
3,808,829 | X_train,X_test,Y_train,Y_test = train_test_split(train_text,Y,test_size=0.2)
print(X_train.shape)
print(X_test.shape)
print(Y_train.shape)
print(Y_test.shape )<compute_train_metric> | df_nan_ages = traintest['Age'].isnull()
title_ages = traintest[['Title', 'Age']].groupby('Title' ).mean().to_dict() ['Age']
traintest['Age'][df_nan_ages] = traintest[df_nan_ages]['Title'].apply(lambda x: title_ages[x])
traintest.fillna({
'Fare': traintest['Fare'].mode() [0],
'Embarked': traintest['Embarked'].mode() [0]
}, inplace=True)
traintest.drop(columns='Cabin', inplace=True)
traintest.drop(columns='PassengerId', inplace=True ) | Titanic - Machine Learning from Disaster |
3,808,829 | lr = LogisticRegression(C=10,penalty='l2')
lr.fit(X_train,Y_train)
pred = lr.predict(X_test)
print("F1 score :",f1_score(Y_test,pred))
print("Classification Report
:",classification_report(Y_test,pred))<prepare_output> | traintest['FamilySize'] = traintest.SibSp + traintest.Parch + 1
traintest['IsAlone'] =(traintest['FamilySize'] == 1)*1
traintest['AgeStage'] = traintest['Age']
traintest['AgeStage'][traintest['Age'] <= 11] = 'Child'
traintest['AgeStage'][(traintest['Age'] <= 20)&(traintest['Age'] > 11)] = 'Young'
traintest['AgeStage'][(traintest['Age'] <= 40)&(traintest['Age'] > 20)] = 'Adult'
traintest['AgeStage'][traintest['Age'] > 40] = 'Old'
traintest.drop(columns='Name', inplace=True ) | Titanic - Machine Learning from Disaster |
3,808,829 | lr = LogisticRegression(C=10,penalty='l2',max_iter=2000)
lr.fit(train_text,Y)
pred = lr.predict(test_text)
submit = pd.DataFrame(test['id'],columns=['id'])
print(len(pred))
submit.head()<save_to_csv> | isnull = traintest.isnull().sum().reset_index()
isnull.columns = ['Feature', 'Total_null']
total_null = isnull[isnull['Total_null']>0]
total_null | Titanic - Machine Learning from Disaster |
3,808,829 | submit['target'] = pred
submit.to_csv("realnlp.csv",index=False )<categorify> |
categorical_vars = ["Pclass", "Sex", "Ticket", 'Embarked', 'Title', 'AgeStage']
numerical_vars = ['Fare', 'Age', 'FamilySize', 'Parch', 'SibSp']
traintest_set = []
traintest[categorical_vars] = traintest[categorical_vars].astype('category')
traintest_labelEncoding = traintest.copy()
label_encoder = preprocessing.LabelEncoder()
for var in categorical_vars:
print(var)
traintest_labelEncoding[var] = label_encoder.fit_transform(traintest_labelEncoding[var])
traintest_set.append(traintest_labelEncoding)
traintest_onehot = pd.get_dummies(traintest, columns=categorical_vars)
traintest_onehot[numerical_vars] =(traintest_onehot[numerical_vars] - traintest_onehot[numerical_vars].mean())/(traintest_onehot[numerical_vars].max() - traintest_onehot[numerical_vars].min())
traintest_set.append(traintest_onehot)
| Titanic - Machine Learning from Disaster |
3,808,829 | tfidf = TfidfVectorizer(ngram_range=(1,3))
text_tfidf = tfidf.fit_transform(data['text'])
print(text_tfidf.shape )<split> | X_trains, y_train, X_tests = [], None,[]
for traintest in traintest_set:
train = traintest[traintest['Survived'].notnull() ]
y_train = train['Survived']
X_train = train.drop(columns='Survived')
print("X train shape: ", X_train.shape)
print("Y train shape: ", y_train.shape)
X_trains.append(X_train)
test = traintest[traintest['Survived'].isnull() ]
X_test = test.drop(columns='Survived')
print("X test shape: ", X_test.shape)
X_tests.append(X_test ) | Titanic - Machine Learning from Disaster |
3,808,829 | X_train,X_test,Y_train,Y_test = train_test_split(train_text,Y,test_size=0.2)
print(X_train.shape)
print(X_test.shape)
print(Y_train.shape)
print(Y_test.shape )<compute_train_metric> | MLA = [
ensemble.AdaBoostClassifier() ,
ensemble.BaggingClassifier() ,
ensemble.ExtraTreesClassifier() ,
ensemble.GradientBoostingClassifier() ,
ensemble.RandomForestClassifier() ,
gaussian_process.GaussianProcessClassifier() ,
linear_model.LogisticRegressionCV() ,
linear_model.PassiveAggressiveClassifier() ,
linear_model.RidgeClassifierCV() ,
linear_model.SGDClassifier() ,
linear_model.Perceptron() ,
naive_bayes.BernoulliNB() ,
naive_bayes.GaussianNB() ,
neighbors.KNeighborsClassifier() ,
svm.SVC(probability=True),
svm.NuSVC(probability=True),
svm.LinearSVC() ,
tree.DecisionTreeClassifier() ,
tree.ExtraTreeClassifier() ,
XGBClassifier()
] | Titanic - Machine Learning from Disaster |
3,808,829 | lr = LogisticRegression(C=100,penalty='l2',max_iter=2000)
lr.fit(X_train,Y_train)
pred = lr.predict(X_test)
print("F1 score :",f1_score(Y_test,pred))
print("Classification Report :",classification_report(Y_test,pred))<count_missing_values> | cv_split = model_selection.ShuffleSplit(n_splits = 10, test_size =.3, train_size =.7, random_state = 0 ) | Titanic - Machine Learning from Disaster |
3,808,829 | print("Number of null values in data keywords column : ",data['keyword'].isnull().sum() )<data_type_conversions> | MLAs = [copy.deepcopy(MLA), copy.deepcopy(MLA)]
MLA_columns = ['MLA Name', 'MLA Parameters','MLA Train Accuracy Mean', 'MLA Test Accuracy Mean', 'MLA Test Accuracy 3*STD' ,'MLA Time']
result_table = []
for i in range(len(X_trains)) :
MLA_compare = pd.DataFrame(columns = MLA_columns)
MLA_predict = y_train.copy()
print("Y train shape: ", y_train.shape)
row_index = 0
for alg in MLAs[i]:
target = y_train.copy()
MLA_name = alg.__class__.__name__
MLA_compare.loc[row_index, 'MLA Name'] = MLA_name
MLA_compare.loc[row_index, 'MLA Parameters'] = str(alg.get_params())
print("Y train shape: ", y_train.shape)
print(MLA_name)
cv_results = model_selection.cross_validate(alg, X_trains[i], target, cv = cv_split)
MLA_compare.loc[row_index, 'MLA Time'] = cv_results['fit_time'].mean()
MLA_compare.loc[row_index, 'MLA Train Accuracy Mean'] = cv_results['train_score'].mean()
MLA_compare.loc[row_index, 'MLA Test Accuracy Mean'] = cv_results['test_score'].mean()
MLA_compare.loc[row_index, 'MLA Test Accuracy 3*STD'] = cv_results['test_score'].std() *3
alg.fit(X_trains[i], y_train)
MLA_predict[MLA_name] = alg.predict(X_trains[i])
row_index+=1
result_table.append(( MLA_compare, MLA_predict)) | Titanic - Machine Learning from Disaster |
3,808,829 | data['keyword'] = data['keyword'].fillna("unknown")
data.head()<feature_engineering> | MLA_compare = result_table[0][0]
MLA_compare.sort_values(by = ['MLA Test Accuracy Mean'], ascending = False, inplace = True)
MLA_compare | Titanic - Machine Learning from Disaster |
3,808,829 | combined_text = [None] * len(data['text'])
for i in range(len(data['text'])) :
if data['keyword'][i] == 'unknown':
combined_text[i] = data['text'][i]
else:
combined_text[i] = data['text'][i] + " " + data['keyword'][i] + " " + data['keyword'][i] + " " + data['keyword'][i]
data['combined_text'] = combined_text<feature_engineering> | MLA_compare = result_table[1][0]
MLA_compare.sort_values(by = ['MLA Test Accuracy Mean'], ascending = False, inplace = True)
MLA_compare | Titanic - Machine Learning from Disaster |
3,808,829 | for i in range(len(data['combined_text'])) :
data['combined_text'][i] = str(data['combined_text'][i] )<feature_engineering> | prediction_df = pd.DataFrame()
alg_name = 'XGBClassifier'
feature_index = 0
alg_index = 19
prediction = MLAs[feature_index][alg_index].predict(X_tests[feature_index])
temp = {'PassengerID': passenger_id, 'Survived': prediction.astype(int)}
result = pd.DataFrame(temp)
result.to_csv('result_%s_feature%s.csv'%(alg_name, feature_index), index=False)
prediction_df[alg_name] = prediction
alg_name = 'GradientBoostingClassifier'
feature_index = 0
alg_index = 3
prediction = MLAs[feature_index][alg_index].predict(X_tests[feature_index])
temp = {'PassengerID': passenger_id, 'Survived': prediction.astype(int)}
result = pd.DataFrame(temp)
result.to_csv('result_%s_feature%s.csv'%(alg_name, feature_index), index=False)
prediction_df[alg_name] = prediction
alg_name = 'RidgeClassifierCV'
feature_index = 1
alg_index = 8
prediction = MLAs[feature_index][alg_index].predict(X_tests[feature_index])
temp = {'PassengerID': passenger_id, 'Survived': prediction.astype(int)}
result = pd.DataFrame(temp)
result.to_csv('result_%s_feature%s.csv'%(alg_name, feature_index), index=False)
prediction_df[alg_name] = prediction
alg_name = 'LogisticRegressionCV'
feature_index = 1
alg_index = 6
prediction = MLAs[feature_index][alg_index].predict(X_tests[feature_index])
temp = {'PassengerID': passenger_id, 'Survived': prediction.astype(int)}
result = pd.DataFrame(temp)
result.to_csv('result_%s_feature%s.csv'%(alg_name, feature_index), index=False)
prediction_df[alg_name] = prediction
| Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.