kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
459,782
CV = 5 cv_df = pd.DataFrame(index=range(CV * len(models))) entries = []<find_best_model_class>
train = train.drop(['Name', 'PassengerId', 'Survived', 'Ticket', 'Cabin'], axis=1) test = test.drop(['Name', 'PassengerId', 'Ticket', 'Cabin'], axis=1 )
Titanic - Machine Learning from Disaster
459,782
for model in models: model_name = model.__class__.__name__ accuracies = cross_val_score(model, features, labels, scoring='accuracy', cv=CV) for fold_idx, accuracy in enumerate(accuracies): entries.append(( model_name, fold_idx, accuracy))<create_dataframe>
import xgboost as xgb
Titanic - Machine Learning from Disaster
459,782
cv_df = pd.DataFrame(entries, columns=['model_name', 'fold_idx', 'accuracy'] )<groupby>
vote_est = [ ('ada', AdaBoostClassifier()), ('bc', BaggingClassifier()), ('etc',ExtraTreesClassifier()), ('gbc', GradientBoostingClassifier()), ('rfc', RandomForestClassifier()), ('gpc', GaussianProcessClassifier()), ('lr', LogisticRegressionCV()), ('bnb', BernoulliNB()), ('gnb', GaussianNB()), ('knn', KNeighborsClassifier()), ('svc', SVC(probability=True)) , ('xgb', xgb.XGBClassifier()) ] model = VotingClassifier(estimators = vote_est , voting = 'soft' )
Titanic - Machine Learning from Disaster
459,782
cv_df.groupby('model_name' ).accuracy.mean()<train_model>
print(cross_val_score(model, train, target, cv=5, scoring=make_scorer(accuracy_score)) )
Titanic - Machine Learning from Disaster
459,782
model = LogisticRegression(random_state=0) X_train, X_test, y_train, y_test, indices_train, indices_test = train_test_split(features, labels, df.index, test_size=0.33, random_state=0) model.fit(X_train, y_train) y_pred_proba = model.predict_proba(X_test) y_pred = model.predict(X_test )<compute_test_metric>
model.fit(train,target) pred = model.predict(test )
Titanic - Machine Learning from Disaster
459,782
for predicted in category_id_df.category_id: for actual in category_id_df.category_id: if predicted != actual and conf_mat[actual, predicted] >= 2: print("'{}' predicted as '{}' : {} examples.".format(id_to_category[actual], id_to_category[predicted], conf_mat[actual, predicted])) display(df.loc[indices_test[(y_test == actual)&(y_pred == predicted)]]['Text']) print('' )<train_model>
submission = pd.DataFrame({ "PassengerId": test_id, "Survived": pred }) submission.to_csv('titanic_result_pandas.csv', index=False )
Titanic - Machine Learning from Disaster
13,013,084
model.fit(features, labels )<features_selection>
train_data = pd.read_csv("/kaggle/input/titanic/train.csv") train_data.head()
Titanic - Machine Learning from Disaster
13,013,084
N = 5 for Category, category_id in sorted(category_to_id.items()): indices = np.argsort(model.coef_[category_id]) feature_names = np.array(tfidf.get_feature_names())[indices] unigrams = [v for v in reversed(feature_names)if len(v.split(' ')) == 1][:N] bigrams = [v for v in reversed(feature_names)if len(v.split(' ')) == 2][:N] print(" print(".Top unigrams: .{}".format(' .'.join(unigrams))) print(".Top bigrams: .{}".format(' .'.join(bigrams)) )<predict_on_test>
test_data = pd.read_csv("/kaggle/input/titanic/test.csv") test_data.head()
Titanic - Machine Learning from Disaster
13,013,084
texts = ["Hooli stock price soared after a dip in PiedPiper revenue growth.", "Captain Tsubasa scores a magnificent goal for the Japanese team.", "Merryweather mercenaries are sent on another mission, as government oversight groups call for new sanctions.", "Beyoncé releases a new album, tops the charts in all of south-east Asia!", "You won't guess what the latest trend in data analysis is!"] text_features = tfidf.transform(texts) predictions = model.predict(text_features) for text, predicted in zip(texts, predictions): print('"{}"'.format(text)) print(" - Predicted as: '{}'".format(id_to_category[predicted])) print("" )<load_from_csv>
women = train_data.loc[train_data.Sex == 'female']["Survived"] rate_women = sum(women)/len(women) print("% of women who survived:", rate_women )
Titanic - Machine Learning from Disaster
13,013,084
TEST_PATH = os.path.join(".. /input/bbc-test", "BBC News Test.csv") test_df = pd.read_csv(TEST_PATH) <data_type_conversions>
men = train_data.loc[train_data.Sex == 'male']["Survived"] rate_men = sum(men)/len(men) print("% of men who survived:", rate_men )
Titanic - Machine Learning from Disaster
13,013,084
test_df.Text.tolist()<predict_on_test>
print(train_data.isna().sum()) print(test_data.isna().sum() )
Titanic - Machine Learning from Disaster
13,013,084
test_features = tfidf.transform(test_df.Text.tolist()) Y_pred = model.predict(test_features) Y_pred<define_variables>
import seaborn as sns import matplotlib.pyplot as plt
Titanic - Machine Learning from Disaster
13,013,084
Y_pred_name =[] for cat_id in Y_pred : Y_pred_name.append(id_to_category[cat_id] )<create_dataframe>
train_data['Survived'].value_counts()
Titanic - Machine Learning from Disaster
13,013,084
submission = pd.DataFrame({ "ArticleId": test_df["ArticleId"], "Category": Y_pred_name } )<save_to_csv>
train_data['SibSp'].value_counts()
Titanic - Machine Learning from Disaster
13,013,084
submission.to_csv('submission.csv', index=False )<load_from_csv>
train_data['Parch'].value_counts()
Titanic - Machine Learning from Disaster
13,013,084
TRAIN_PATH = os.path.join(".. /input/ai-academy-intermediate-class-competition-1", "BBC News Train.csv") df = pd.read_csv(TRAIN_PATH )<feature_engineering>
print(train_data['Name'].value_counts()) print(train_data['Ticket'].value_counts()) print(train_data['Fare'].value_counts() )
Titanic - Machine Learning from Disaster
13,013,084
df['category_id'] = df['Category'].factorize() [0] df['category_id'][0:10]<remove_duplicates>
print(train_data['Age'].isna().sum()) print(test_data['Age'].isna().sum() )
Titanic - Machine Learning from Disaster
13,013,084
category_id_df = df[['Category', 'category_id']].drop_duplicates().sort_values('category_id' )<define_variables>
train_data['Age'].fillna(value=train_data['Age'].median() , inplace=True) print(train_data['Age'].isna().sum()) test_data['Age'].fillna(value=test_data['Age'].median() , inplace=True) print(test_data['Age'].isna().sum() )
Titanic - Machine Learning from Disaster
13,013,084
category_to_id = dict(category_id_df.values) id_to_category = dict(category_id_df[['category_id', 'Category']].values )<count_values>
train_data = train_data[train_data['Fare'] < 100]
Titanic - Machine Learning from Disaster
13,013,084
df.groupby('Category' ).category_id.count() <categorify>
test_data['Fare'].isna().sum()
Titanic - Machine Learning from Disaster
13,013,084
tfidf = TfidfVectorizer(sublinear_tf=True, min_df=5, norm='l2', encoding='latin-1', ngram_range=(1, 2), stop_words='english') features = tfidf.fit_transform(df.Text ).toarray() labels = df.category_id <sort_values>
train_data['Embarked'].isna().sum()
Titanic - Machine Learning from Disaster
13,013,084
sorted(category_to_id.items() )<statistical_test>
train_data['Embarked'].fillna(method = 'ffill' , inplace = True)
Titanic - Machine Learning from Disaster
13,013,084
N = 3 for Category, category_id in sorted(category_to_id.items()): features_chi2 = chi2(features, labels == category_id) indices = np.argsort(features_chi2[0]) feature_names = np.array(tfidf.get_feature_names())[indices] unigrams = [v for v in feature_names if len(v.split(' ')) == 1] bigrams = [v for v in feature_names if len(v.split(' ')) == 2] print(" print(".Most correlated unigrams: .{}".format(' .'.join(unigrams[-N:]))) print(".Most correlated bigrams: .{}".format(' .'.join(bigrams[-N:])) )<define_variables>
y = train_data['Survived'] features = ['Pclass', 'Sex', 'SibSp', 'Parch'] X = pd.get_dummies(train_data[features]) X_test = pd.get_dummies(test_data[features]) X = pd.concat([train_data[['Age', 'Fare']], X], axis=1) X_test = pd.concat([test_data[['Age','Fare']], X_test], axis=1) print(X) print(X_test)
Titanic - Machine Learning from Disaster
13,013,084
SAMPLE_SIZE = int(len(features)* 0.3) np.random.seed(0) indices = np.random.choice(range(len(features)) , size=SAMPLE_SIZE, replace=False) projected_features = TSNE(n_components=2, random_state=0 ).fit_transform(features[indices]) <filter>
from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score
Titanic - Machine Learning from Disaster
13,013,084
my_id = 0 projected_features[(labels[indices] == my_id ).values]<choose_model_class>
Titanic - Machine Learning from Disaster
13,013,084
models = [ RandomForestClassifier(n_estimators=200, max_depth=3, random_state=0), MultinomialNB() , LogisticRegression(random_state=0), ] <create_dataframe>
model_RFC = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1) model_RFC.fit(X, y) predictions = model_RFC.predict(X_test) output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions}) output.to_csv('my_submission.csv', index=False) print("Your submission was successfully saved!" )
Titanic - Machine Learning from Disaster
13,013,084
<find_best_model_class><EOS>
Titanic - Machine Learning from Disaster
3,113,315
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<create_dataframe>
warnings.filterwarnings('ignore') %matplotlib inline
Titanic - Machine Learning from Disaster
3,113,315
BBC Ncv_df = pd.DataFrame(entries, columns=['model_name', 'fold_idx', 'accuracy'] )<groupby>
pd.__version__
Titanic - Machine Learning from Disaster
3,113,315
cv_df.groupby('model_name' ).accuracy.mean()<train_model>
np.__version__
Titanic - Machine Learning from Disaster
3,113,315
model = LogisticRegression(random_state=0) X_train, X_test, y_train, y_test, indices_train, indices_test = train_test_split(features, labels, df.index, test_size=0.33, random_state=0) model.fit(X_train, y_train) y_pred_proba = model.predict_proba(X_test) y_pred = model.predict(X_test )<compute_test_metric>
sns.__version__
Titanic - Machine Learning from Disaster
3,113,315
for predicted in category_id_df.category_id: for actual in category_id_df.category_id: if predicted != actual and conf_mat[actual, predicted] >= 2: print("'{}' predicted as '{}' : {} examples.".format(id_to_category[actual], id_to_category[predicted], conf_mat[actual, predicted])) display(df.loc[indices_test[(y_test == actual)&(y_pred == predicted)]]['Text']) print('' )<train_model>
train = pd.read_csv('.. /input/train.csv' )
Titanic - Machine Learning from Disaster
3,113,315
model.fit(features, labels )<features_selection>
test = pd.read_csv('.. /input/test.csv' )
Titanic - Machine Learning from Disaster
3,113,315
N = 5 for Category, category_id in sorted(category_to_id.items()): indices = np.argsort(model.coef_[category_id]) feature_names = np.array(tfidf.get_feature_names())[indices] unigrams = [v for v in reversed(feature_names)if len(v.split(' ')) == 1][:N] bigrams = [v for v in reversed(feature_names)if len(v.split(' ')) == 2][:N] print(" print(".Top unigrams: .{}".format(' .'.join(unigrams))) print(".Top bigrams: .{}".format(' .'.join(bigrams)) )<predict_on_test>
train[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False ).mean().sort_values(by='Survived', ascending=False )
Titanic - Machine Learning from Disaster
3,113,315
texts = ["Hooli stock price soared after a dip in PiedPiper revenue growth.", "Captain Tsubasa scores a magnificent goal for the Japanese team.", "Merryweather mercenaries are sent on another mission, as government oversight groups call for new sanctions.", "Beyoncé releases a new album, tops the charts in all of south-east Asia!", "You won't guess what the latest trend in data analysis is!"] text_features = tfidf.transform(texts) predictions = model.predict(text_features) for text, predicted in zip(texts, predictions): print('"{}"'.format(text)) print(" - Predicted as: '{}'".format(id_to_category[predicted])) print("" )<load_from_csv>
train[["Sex", "Survived"]].groupby(['Sex'], as_index=False ).mean().sort_values(by='Survived', ascending=False )
Titanic - Machine Learning from Disaster
3,113,315
TEST_PATH = os.path.join(".. /input/bbc-test", "BBC News Test(1 ).csv") test_df = pd.read_csv(TEST_PATH) <predict_on_test>
train[["SibSp", "Survived"]].groupby(['SibSp'], as_index=False ).mean().sort_values(by='Survived', ascending=False )
Titanic - Machine Learning from Disaster
3,113,315
test_features = tfidf.transform(test_df.Text.tolist()) Y_pred = model.predict(test_features) Y_pred<define_variables>
train[["Parch", "Survived"]].groupby(['Parch'], as_index=False ).mean().sort_values(by='Survived', ascending=False )
Titanic - Machine Learning from Disaster
3,113,315
Y_pred_name =[] for cat_id in Y_pred : Y_pred_name.append(id_to_category[cat_id] )<create_dataframe>
q = train.Fare.quantile(0.99) q
Titanic - Machine Learning from Disaster
3,113,315
submission = pd.DataFrame({ "ArticleId": test_df["ArticleId"], "Category": Y_pred_name } )<save_to_csv>
train = train[train['Fare'] < q]
Titanic - Machine Learning from Disaster
3,113,315
submission.to_csv('submission.csv', index=False )<load_from_csv>
Id = test['PassengerId']
Titanic - Machine Learning from Disaster
3,113,315
pokemon = pd.read_csv(".. /input/pokemon.csv") battles = pd.read_csv(".. /input/battles.csv") test = pd.read_csv(".. /input/test.csv" )<data_type_conversions>
split = len(train )
Titanic - Machine Learning from Disaster
3,113,315
pokemon = pokemon.drop(columns = ['Name','Generation','Legendary'],errors = 'ignore') pokemon['Type 2'] = pokemon['Type 2'].fillna('None') pokemon.loc[:,'Type 1'] = pokemon.loc[:,'Type 1'].astype('category') pokemon.loc[:,'Type 2'] = pokemon.loc[:,'Type 2'].astype('category') pokemon.dtypes <merge>
data = pd.concat(objs=[train, test], axis=0 ).reset_index(drop=True )
Titanic - Machine Learning from Disaster
3,113,315
data = pd.merge(battles,pokemon,left_on = 'First_pokemon', right_on = ' data = pd.merge(data,pokemon,left_on = 'Second_pokemon', right_on = ' data = data.drop(columns = [' data = data.sort_values(by = ['battle_number']) data.head()<merge>
data.drop('PassengerId', axis=1, inplace=True )
Titanic - Machine Learning from Disaster
3,113,315
data_test = pd.merge(test,pokemon,left_on = 'First_pokemon', right_on = ' data_test = pd.merge(data_test,pokemon,left_on = 'Second_pokemon', right_on = ' data_test = data_test.drop(columns = [' data_test = data_test.sort_values(by = ['battle_number']) data_test.head()<categorify>
median = data["Age"].median() std = data["Age"].std() is_null = data["Age"].isnull().sum() rand_age = np.random.randint(median - std, median + std, size = is_null) age_slice = data["Age"].copy() age_slice[np.isnan(age_slice)] = rand_age data["Age"] = age_slice data["Age"] = data["Age"].astype(int )
Titanic - Machine Learning from Disaster
3,113,315
lr = LinearRegression(normalize=True) Y_train = data.loc[:,'Winner'] X_train = data.drop(columns = ['Winner']) X_train = pd.get_dummies(X_train) X_test = pd.get_dummies(data_test) <choose_model_class>
data['Embarked'].fillna(data['Embarked'].mode() [0], inplace = True )
Titanic - Machine Learning from Disaster
3,113,315
input_dim = X_train.shape[1] print(np.shape(Y_train)) model = keras.models.Sequential([ keras.layers.Dense(310, activation=tf.nn.relu, input_shape=(X_train.shape[1],)) , keras.layers.GaussianNoise(0.16), keras.layers.Dropout(0.5), keras.layers.Dense(310, activation=tf.nn.relu), keras.layers.GaussianNoise(0.16), keras.layers.Dropout(0.5), keras.layers.Dense(310, activation=tf.nn.relu), keras.layers.Dropout(0.5), keras.layers.Dense(1, activation=tf.nn.sigmoid) ]) model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy']) model.summary<train_model>
data["Embarked"].isnull().sum()
Titanic - Machine Learning from Disaster
3,113,315
validation_split = 0.2 history = model.fit(X_train, Y_train, workers=4, epochs=400, batch_size = 512, shuffle = True, validation_split=validation_split )<save_to_csv>
data['Fare'].fillna(data['Fare'].mean() , inplace = True )
Titanic - Machine Learning from Disaster
3,113,315
prediction = model.predict_classes(X_test) sampleSubmission = pd.read_csv('.. /input/sampleSubmission.csv') sampleSubmission.loc[:,'Winner'] = prediction sampleSubmission.to_csv('resultados.csv', index=False) print(sampleSubmission )<define_variables>
data["CabinBool"] =(data["Cabin"].notnull().astype('int'))
Titanic - Machine Learning from Disaster
3,113,315
to_underscore = lambda x: re.sub("[^0-9a-zA-Z<load_from_csv>
data['Deck'] = data.Cabin.str.extract('([a-zA-Z]+)', expand=False) data[['Cabin', 'Deck']].sample(10) data['Deck'] = data['Deck'].fillna('Z') data = data.drop(['Cabin'], axis=1 )
Titanic - Machine Learning from Disaster
3,113,315
pokemon = pd.read_csv('.. /input/pokemon-challenge-mlh/pokemon.csv' ).rename(to_underscore, axis='columns' ).fillna("None") battles = pd.read_csv('.. /input/pokemon-challenge-mlh/battles.csv' ).rename(to_underscore, axis='columns' ).fillna("None") sampleSubmission = pd.read_csv('.. /input/pokemon-challenge-mlh/sampleSubmission.csv' ).rename(to_underscore, axis='columns' ).fillna("None") test = pd.read_csv('.. /input/pokemon-challenge-mlh/test.csv' ).rename(to_underscore, axis='columns' ).fillna("None") typetable = pd.read_csv('.. /input/pokemon-type-table/typetable.csv') print("pokemon", pokemon.shape) print("battles", battles.shape) print("sampleSubmision", sampleSubmission.shape) print("test", test.shape) print("typetable", typetable.shape )<drop_column>
data.groupby(['Embarked'])['Survived'].count()
Titanic - Machine Learning from Disaster
3,113,315
pokemon = pokemon.drop(labels=["name"], axis=1) pokemon = pokemon.fillna({'Type 2': 'None'}) pokemon.head()<normalization>
data['FamilySize'] = data['SibSp'] + data['Parch']
Titanic - Machine Learning from Disaster
3,113,315
columns = ['attack', 'defense', 'hp', 'speed', 'sp_atk', 'sp_def'] pokemon[columns] = MinMaxScaler().fit_transform(pokemon[columns]) pokemon.head()<data_type_conversions>
data['IsAlone'] = 1
Titanic - Machine Learning from Disaster
3,113,315
pokemon["legendary"] = pokemon["legendary"].astype(int) pokemon.head()<create_dataframe>
data['IsAlone'].loc[data['FamilySize'] > 0] = 0
Titanic - Machine Learning from Disaster
3,113,315
vals = [] for c1 in typetable.columns[1:]: vals.append(pd.DataFrame({ "idx": typetable["atck"].map(lambda x: "%s-vs-%s-None" %(x, c1)) , "mul": typetable[c1], })) for c2 in typetable.columns[1:]: vals.append(pd.DataFrame({ "idx": typetable["atck"].map(lambda x: "%s-vs-%s-%s" %(x, c1, c2)) , "mul": typetable[c1] * typetable[c2], })) mult = pd.concat(vals ).reset_index().drop(["index"], axis=1) mult = dict(zip(mult.values[:,0], mult.values[:,1])) def multiplier(cat): return mult.get(cat, 0) mult<merge>
bins = [-1, 13, 31, 60, 80] labels = ['Child', 'Young Adult', 'Adult', 'Senior'] data['AgeBin'] = pd.cut(data["Age"], bins, labels = labels ).astype('object' )
Titanic - Machine Learning from Disaster
3,113,315
def merge_data(battles): battles = battles \ .merge(pokemon.rename(lambda x: "f_%s" % x, axis="columns"), left_on="first_pokemon", right_on="f_ .merge(pokemon.rename(lambda x: "s_%s" % x, axis="columns"), left_on="second_pokemon", right_on="s_ battles["f_t1"] =(battles["f_type_1"] + "-vs-" + battles["s_type_1"] + "-" + battles["s_type_2"] ).map(multiplier) battles["f_t2"] =(battles["f_type_2"] + "-vs-" + battles["s_type_1"] + "-" + battles["s_type_2"] ).map(multiplier) battles["s_t1"] =(battles["s_type_1"] + "-vs-" + battles["f_type_1"] + "-" + battles["f_type_2"] ).map(multiplier) battles["s_t2"] =(battles["s_type_2"] + "-vs-" + battles["f_type_1"] + "-" + battles["f_type_2"] ).map(multiplier) battles["speed"] =(battles["f_speed"] - battles["s_speed"]) battles = battles\ .sort_values(['battle_number'])\ .reset_index() \ .drop(["index","battle_number", "first_pokemon", "second_pokemon", "f_ return battles print(battles.head()) train = merge_data(battles) train.head()<categorify>
data['IsBaby'] = 0
Titanic - Machine Learning from Disaster
3,113,315
train = pd.get_dummies(train )<prepare_x_and_y>
data['IsBaby'].loc[data['Age'] <= 5] = 1
Titanic - Machine Learning from Disaster
3,113,315
Y_train = train["winner"] X_train = train.drop(labels=["winner"],axis=1) print(X_train.shape) print(Y_train.shape )<split>
data['Title'] = data.Name.str.extract('([A-Za-z]+)\.', expand=False )
Titanic - Machine Learning from Disaster
3,113,315
X_train, X_val, y_train, y_val = train_test_split(X_train, Y_train, test_size=0.1, random_state = 3 )<choose_model_class>
data['Title'] = data['Title'].replace(['Lady', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Jonkheer', 'Dona'], 'Rare') data['Title'] = data['Title'].replace(['Countess', 'Lady', 'Sir'], 'Royal') data['Title'] = data['Title'].replace('Mlle', 'Miss') data['Title'] = data['Title'].replace('Ms', 'Miss') data['Title'] = data['Title'].replace('Mme', 'Mrs' )
Titanic - Machine Learning from Disaster
3,113,315
model = GradientBoostingClassifier(learning_rate=0.01, min_samples_split=3, max_depth=10, n_estimators=2000, random_state=2 )<train_model>
data['Title'] = data['Title'].astype('object' )
Titanic - Machine Learning from Disaster
3,113,315
model.fit(X_train, y_train )<train_model>
data[['Title', 'Survived']].groupby(['Title'], as_index=False ).mean()
Titanic - Machine Learning from Disaster
3,113,315
model.fit(X_train, y_train )<prepare_x_and_y>
bins = [-np.inf, 20, 30, 110, np.inf] labels = ['Low', 'Mid', 'High', 'Extreme'] data['FareBin'] = pd.cut(data["Fare"], bins, labels = labels ).astype('object' )
Titanic - Machine Learning from Disaster
3,113,315
y_val = np.array(y_val, dtype='int32') y_val<predict_on_test>
df = data
Titanic - Machine Learning from Disaster
3,113,315
predictions = model.predict(X_val) res = model.score(X_val, y_val) predictions<categorify>
df = df.drop(['Ticket', 'Name', 'Fare'], axis=1 )
Titanic - Machine Learning from Disaster
3,113,315
X_test = merge_data(test) X_test = pd.get_dummies(X_test) X_test.head()<predict_on_test>
train = df[:split] test = df[split:] x = train.drop(["Survived"], axis=1) y = train["Survived"] X_train, X_test, y_train, y_test = train_test_split(x,y, test_size=0.2, random_state=42) test.drop(["Survived"], axis = 1, inplace=True) cat_features = np.where(x.dtypes != float)[0] cat = CatBoostClassifier(one_hot_max_size=7, iterations=21, random_seed=42, use_best_model=True, eval_metric='Accuracy', loss_function='Logloss') cat.fit(X_train, y_train, cat_features = cat_features, eval_set=(X_test, y_test)) pred = cat.predict(X_test) pool = Pool(X_train, y_train, cat_features=cat_features) cv_scores = cv(pool, cat.get_params() , fold_count=10, plot=True) print('CV score: {:.5f}'.format(cv_scores['test-Accuracy-mean'].values[-1])) print('The test accuracy is :{:.6f}'.format(accuracy_score(y_test, cat.predict(X_test))))
Titanic - Machine Learning from Disaster
3,113,315
predictions_test = model.predict(X_test) predictions_test result = np.squeeze(np.around(predictions_test ).astype(int)) result<save_to_csv>
to_drop = ['Age', 'AgeBin', 'SibSp', 'Parch', 'FamilySize', 'Embarked', 'Title'] df = df.drop(to_drop, axis=1, inplace=False )
Titanic - Machine Learning from Disaster
3,113,315
data_to_submit = pd.DataFrame(columns=['Winner']) data_to_submit['Winner'] = result data_to_submit.insert(0, 'battle_number', range(0, len(data_to_submit))) data_to_submit data_to_submit.to_csv('submission.csv', index = False) data_to_submit<define_variables>
categorical_feature_mask = df.dtypes==object categorical_cols = df.columns[categorical_feature_mask].tolist() le = LabelEncoder() df[categorical_cols] = df[categorical_cols].apply(lambda col: le.fit_transform(col)) df[categorical_cols].head()
Titanic - Machine Learning from Disaster
3,113,315
to_underscore = lambda x: re.sub("[^0-9a-zA-Z<load_from_csv>
train = df[:split] test = df[split:] x = train.drop(["Survived"], axis=1) y = train["Survived"] X_train, X_test, y_train, y_test = train_test_split(x,y, test_size=0.2, random_state=42) test.drop(["Survived"], axis = 1, inplace=True )
Titanic - Machine Learning from Disaster
3,113,315
typetable = pd.read_csv(".. /input/pokemon-type-table/typetable.csv") vals = [] for c1 in typetable.columns[1:]: vals.append(pd.DataFrame({ "idx": typetable["atck"].map(lambda x: "%s-%s-" %(x, c1)) , "mul": typetable[c1], })) for c2 in typetable.columns[1:]: vals.append(pd.DataFrame({ "idx": typetable["atck"].map(lambda x: "%s-%s-%s" %(x, c1, c2)) , "mul": typetable[c1] * typetable[c2], })) mult = pd.concat(vals ).reset_index().drop(["index"], axis=1) mult = dict(zip(mult.values[:,0], mult.values[:,1])) def multiplier(cat): return mult.get(cat, 0) <normalization>
MLA = [ ensemble.AdaBoostClassifier() , ensemble.ExtraTreesClassifier() , ensemble.GradientBoostingClassifier() , ensemble.RandomForestClassifier() , gaussian_process.GaussianProcessClassifier() , linear_model.LogisticRegressionCV() , linear_model.RidgeClassifierCV() , linear_model.Perceptron() , naive_bayes.BernoulliNB() , naive_bayes.GaussianNB() , neighbors.KNeighborsClassifier() , svm.SVC(probability=True), svm.NuSVC(probability=True), svm.LinearSVC() , tree.DecisionTreeClassifier() , tree.ExtraTreeClassifier() , xgb.XGBClassifier() ]
Titanic - Machine Learning from Disaster
3,113,315
print(multiplier("Grass-Rock-")) print(multiplier("Rock-Grass-")) print(multiplier("Water-Fire-")) print(multiplier("Water-Fire-Grass")) print(multiplier("Fire-Water-Fire")) print(multiplier("Fire-Grass-Bug")) print(multiplier("-Grass-Bug"))<load_from_csv>
col = [] algorithms = pd.DataFrame(columns = col) idx = 0 for a in MLA: a.fit(X_train, y_train) pred = a.predict(X_test) acc = accuracy_score(y_test, pred) f1 = f1_score(y_test, pred) cv = cross_val_score(a, X_test, y_test ).mean() Alg = a.__class__.__name__ algorithms.loc[idx, 'Algorithm'] = Alg algorithms.loc[idx, 'Accuracy'] = round(acc * 100, 2) algorithms.loc[idx, 'F1 Score'] = round(f1 * 100, 2) algorithms.loc[idx, 'CV Score'] = round(cv * 100, 2) idx+=1
Titanic - Machine Learning from Disaster
3,113,315
pokemon = pd.read_csv(".. /input/pokemon-challenge-mlh/pokemon.csv" ).rename(to_underscore, axis='columns' ).fillna("") pokemon["legendary"] = pokemon["legendary"].map(int) pokemon.head()<merge>
algorithms.sort_values(by = ['CV Score'], ascending = False, inplace = True) algorithms.head()
Titanic - Machine Learning from Disaster
3,113,315
def merge_data(battles): battles = battles \ .merge(pokemon.rename(lambda x: "f_%s" % x, axis="columns"), left_on="first_pokemon", right_on="f_ .merge(pokemon.rename(lambda x: "s_%s" % x, axis="columns"), left_on="second_pokemon", right_on="s_ battles["f_t1"] =(battles["f_type_1"] + "-" + battles["s_type_1"] + "-" + battles["s_type_2"] ).map(multiplier) battles["f_t2"] =(battles["f_type_2"] + "-" + battles["s_type_1"] + "-" + battles["s_type_2"] ).map(multiplier) battles["s_t1"] =(battles["s_type_1"] + "-" + battles["f_type_1"] + "-" + battles["f_type_2"] ).map(multiplier) battles["s_t2"] =(battles["s_type_2"] + "-" + battles["f_type_1"] + "-" + battles["f_type_2"] ).map(multiplier) battles["f_attack"] = battles["f_attack"] * battles[["f_t1", "f_t2"]].max(axis=1) battles["s_attack"] = battles["s_attack"] * battles[["s_t1", "s_t2"]].max(axis=1) battles = battles\ .drop(["f_type_1", "f_type_2", "s_type_1", "s_type_2", "f_name", "s_name"], axis=1)\ .sort_values(['battle_number'])\ .reset_index() \ .drop(["index","battle_number", "f_ return battles def feature_scaling(train): sc = StandardScaler() x_train = pd.DataFrame(sc.fit_transform(train), columns = train.columns) if 'winner' in x_train.columns: x_train["winner"] = train["winner"] if 'f_t1' in x_train.columns: x_train["f_t1"] = train["f_t1"] if 'f_t2' in x_train.columns: x_train["f_t2"] = train["f_t2"] if 's_t1' in x_train.columns: x_train["s_t1"] = train["s_t1"] if 's_t2' in x_train.columns: x_train["s_t2"] = train["s_t2"] return x_train battles = pd.read_csv(".. /input/pokemon-challenge-mlh/battles.csv" ).rename(to_underscore, axis='columns') train = merge_data(battles) train.head()<split>
kfold = StratifiedKFold(n_splits=10 )
Titanic - Machine Learning from Disaster
3,113,315
train_split, test_split = train_test_split(train, test_size = 0.1, random_state = 0) features = ["first_pokemon", "f_hp", "f_attack", "f_sp_atk", "f_sp_def", "f_speed", "second_pokemon", "s_hp", "s_attack", "s_sp_atk", "s_sp_def", "s_speed", "f_t1","f_t2", "s_t1", "s_t2"] d_train = lgb.Dataset(train_split[features], label=np.ravel(train_split["winner"])) d_test = lgb.Dataset(test_split[features], label=np.ravel(test_split["winner"])) params = { 'objective':'binary', 'max_depth': -1, 'learning_rate': 0.05, 'boosting': "gbdt", 'metric': ['acu','binary_logloss', 'binary_error'], 'num_rounds': 90000, 'early_stopping': 2000} def set_values(data): for i in range(len(data)) : data[i] = 1 if data[i] >=.5 else 0 return data clf = lgb.train(params, d_train, valid_sets=[d_test], verbose_eval=1000) print("DONE!" )<find_best_params>
XGB = XGBClassifier() xgb_param = { 'loss' : ["deviance"], 'n_estimators' : [100,200,300], 'learning_rate': [0.1, 0.05, 0.01], 'max_depth': [4, 8], 'min_samples_leaf': [100,150], 'max_features': [0.3, 0.1] } gsXGB = GridSearchCV(XGB, param_grid = xgb_param, cv=kfold, scoring="accuracy", n_jobs= 4, verbose = 1) gsXGB.fit(X_train,y_train) XGB_best = gsXGB.best_estimator_ gsXGB.best_score_
Titanic - Machine Learning from Disaster
3,113,315
pred_train = clf.predict(train_split[features], num_iteration=clf.best_iteration) pred_test = clf.predict(test_split[features], num_iteration=clf.best_iteration) metrics = [] for x in np.arange (.3,.7,.01): t = pred_train.copy() t[t >= x] = int(1) t[t < x] = int(0) metrics.append(( x,sk_metric(train_split["winner"], t))) threshold = max(metrics, key=lambda item:item[1])[0] print("Winner threashold: ", threshold) print(pred_train) pred_test[pred_test >= threshold] = 1 pred_test[pred_test < threshold] = 0 pred_train[pred_train >= threshold] = 1 pred_train[pred_train < threshold] = 0 print(pred_train) accuracy_train = accuracy_score(pred_train, np.ravel(train_split["winner"])) accuracy_test = accuracy_score(pred_test, np.ravel(test_split["winner"])) print(accuracy_train) print(accuracy_test )<load_from_csv>
SVC = svm.SVC(probability=True) svc_param = { 'kernel': ['rbf'], 'gamma': [ 0.001, 0.01, 0.1, 1], 'C': [1, 10, 50, 100,200,300, 1000] } gsSVC = GridSearchCV(SVC, param_grid = svc_param, cv=kfold, scoring="accuracy", n_jobs= 4, verbose = 1) gsSVC.fit(X_train,y_train) SVC_best = gsSVC.best_estimator_ gsSVC.best_score_
Titanic - Machine Learning from Disaster
3,113,315
battles = pd.read_csv(INPUT_DIR + "test.csv" ).rename(to_underscore, axis='columns') test = merge_data(battles) col_names = test.columns test = pd.DataFrame(test, columns = col_names) test.head()<save_to_csv>
GB = ensemble.GradientBoostingClassifier() gb_param = { 'loss' : ["deviance"], 'n_estimators' : [100,200,300], 'learning_rate': [0.1, 0.05, 0.01], 'max_depth': [4, 8], 'min_samples_leaf': [100,150], 'max_features': [0.3, 0.1] } gsGB = GridSearchCV(GB, param_grid = gb_param, cv=kfold, scoring="accuracy", n_jobs= 4, verbose = 1) gsGB.fit(X_train,y_train) GB_best = gsGB.best_estimator_ gsGB.best_score_
Titanic - Machine Learning from Disaster
3,113,315
predictions = clf.predict(test[features]) predictions[predictions >= threshold] = 1 predictions[predictions < threshold] = 0 predictions = predictions.astype(int) sampleSubmission = pd.read_csv(INPUT_DIR + 'sampleSubmission.csv') sampleSubmission['Winner'] = predictions sampleSubmission.to_csv('my_sub.csv', index=False) sampleSubmission.head()<import_modules>
vc = ensemble.VotingClassifier( estimators = [('xgb', XGB_best),('gbc',GB_best),('svc', SVC_best)], voting='soft', n_jobs=4 )
Titanic - Machine Learning from Disaster
3,113,315
fastai.__version__<import_modules>
vc = vc.fit(X_train, y_train) pred = vc.predict(X_test) acc = accuracy_score(y_test, pred) f1 = f1_score(y_test, pred) cv = cross_val_score(vc, X_test, y_test ).mean() print("Accuracy: ", round(acc*100,2), " F1-Score: ", round(f1*100,2), " CV Score: ", round(cv*100,2))
Titanic - Machine Learning from Disaster
3,113,315
from fastai.text import * from fastai.callbacks import *<load_from_csv>
ada = ensemble.AdaBoostClassifier() ada.fit(X_train, y_train) lg = linear_model.LogisticRegressionCV() lg.fit(X_train, y_train) vc2 = ensemble.VotingClassifier( estimators = [('ada', ada),('lg',lg),('VotingClassifier', vc)], voting='soft', n_jobs=4) vc2.fit(X_train, y_train )
Titanic - Machine Learning from Disaster
3,113,315
data_train = pd.read_csv(".. /input/train.csv") data_valid = pd.read_csv(".. /input/valid.csv") data_test = pd.read_csv(".. /input/test.csv" )<data_type_conversions>
y_scores = vc2.predict_proba(X_test) y_scores = y_scores[:,1]
Titanic - Machine Learning from Disaster
3,113,315
data_train.fillna("xxempty", inplace=True) data_valid.fillna("xxempty", inplace=True) data_test.fillna("xxempty", inplace=True )<feature_engineering>
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, y_scores )
Titanic - Machine Learning from Disaster
3,113,315
data_train["full"] = data_train["text"].apply(lambda x: x + " xxtitle ")+ data_train["title"] data_valid["full"] = data_valid["text"].apply(lambda x: x + " xxtitle ")+ data_valid["title"] data_test["full"] = data_test["text"].apply(lambda x: x + " xxtitle ")+ data_test["title"]<feature_engineering>
auroc = roc_auc_score(y_test, y_scores) print("ROC-AUC Score:", auroc )
Titanic - Machine Learning from Disaster
3,113,315
<concatenate><EOS>
pred = vc2.predict(test ).astype(int) target = pd.Series(pred, name='Survived') output = pd.concat({'PassengerId':Id, 'Survived':target} ,axis='columns') output.to_csv('submission.csv', index=False, header=True )
Titanic - Machine Learning from Disaster
11,748,875
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<load_pretrained>
import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.preprocessing import LabelEncoder from sklearn.impute import KNNImputer from imblearn.over_sampling import SMOTE from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import BaggingClassifier from lightgbm import LGBMClassifier
Titanic - Machine Learning from Disaster
11,748,875
lm = language_model_learner(data_lm, AWD_LSTM, drop_mult=0.3, pretrained=True )<find_best_params>
train = pd.read_csv('.. /input/titanic/train.csv') test = pd.read_csv('.. /input/titanic/test.csv' )
Titanic - Machine Learning from Disaster
11,748,875
lm.lr_find()<train_model>
train.drop('PassengerId',axis=1,inplace=True) ID = test['PassengerId'] test.drop('PassengerId',axis=1,inplace=True )
Titanic - Machine Learning from Disaster
11,748,875
lm.fit_one_cycle(1, 4e-3, callbacks=[SaveModelCallback(lm, name="best_lm")], moms=(0.8,0.7))<something_strange>
def visualNA(df,perc=0): NAN = [(c, df[c].isna().mean() *100)for c in df] NAN = pd.DataFrame(NAN, columns=["column_name", "percentage"]) NAN = NAN[NAN.percentage > perc] print(NAN.sort_values("percentage", ascending=False))
Titanic - Machine Learning from Disaster
11,748,875
lm.unfreeze()<train_model>
train['Embarked'].value_counts()
Titanic - Machine Learning from Disaster
11,748,875
lm.fit_one_cycle(5, 4e-3, callbacks=[SaveModelCallback(lm, name="best_lm")], moms=(0.8,0.7))<load_pretrained>
train['Embarked'].fillna(value='S',inplace=True )
Titanic - Machine Learning from Disaster
11,748,875
lm.load("best_lm" )<categorify>
train[['Ticket_Class','Ticket_Number']]=train['Ticket'].str.split(" ",expand=True,n=1) train['Ticket_Class'],train['Ticket_Number'] = zip(*train[['Ticket_Class','Ticket_Number']].apply(( lambda x :(None,x['Ticket_Class'])if x['Ticket_Number'] is None else(x['Ticket_Class'],x['Ticket_Number'])) ,axis=1))
Titanic - Machine Learning from Disaster
11,748,875
lm.save_encoder("enc" )<split>
train['Title'] = train['Name'].str.split(', ', expand=True)[1].str.split('.', expand=True)[0]
Titanic - Machine Learning from Disaster
11,748,875
data_clf =(TextList.from_df(pd.concat([data_train, data_valid]), vocab=data_lm.vocab, cols=["full"] ). split_from_df("is_valid" ). label_from_df("label" ). add_test(data_test["full"] ). databunch() )<choose_model_class>
train['Cabin'].fillna(value='0',inplace=True) train['Ticket_Class'].fillna(value='0',inplace=True )
Titanic - Machine Learning from Disaster
11,748,875
clf = text_classifier_learner(data_clf, AWD_LSTM, drop_mult=0.3 )<find_best_params>
les = LabelEncoder() train['Sex'] = les.fit_transform(train['Sex']) let = LabelEncoder() train['Title'] = let.fit_transform(train['Title'].astype(str)) letn = LabelEncoder() train['Ticket_Number'] = letn.fit_transform(train['Ticket_Number'].astype(str)) letc = LabelEncoder() train['Ticket_Class'] = letc.fit_transform(train['Ticket_Class'].astype(str)) lec = LabelEncoder() train['Cabin'] = lec.fit_transform(train['Cabin'].astype(str)) lee = LabelEncoder() train['Embarked'] = lee.fit_transform(train['Embarked'].astype(str))
Titanic - Machine Learning from Disaster
11,748,875
del lm torch.cuda.empty_cache()<categorify>
train['Cabin'].replace(0,np.NaN,inplace=True )
Titanic - Machine Learning from Disaster
11,748,875
clf.load_encoder("enc" )<find_best_params>
X = train[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Cabin', 'Embarked', 'Ticket_Class','Ticket_Number','Title']] y = train['Survived']
Titanic - Machine Learning from Disaster
11,748,875
clf.lr_find()<train_model>
imputer = KNNImputer(n_neighbors=5) X = imputer.fit_transform(X )
Titanic - Machine Learning from Disaster
11,748,875
clf.fit(3, 2e-3, callbacks=[SaveModelCallback(clf, name="best_clf")] )<load_pretrained>
sm = SMOTE(random_state=42) X_res, y_res = sm.fit_resample(X, y )
Titanic - Machine Learning from Disaster
11,748,875
clf.load("best_clf" )<load_pretrained>
model_bc = BaggingClassifier(RandomForestClassifier(n_estimators=300, max_depth=5, random_state=42),n_estimators=100) model_bc.fit(X_res,y_res )
Titanic - Machine Learning from Disaster
11,748,875
clf.unfreeze()<train_model>
feature_importances = np.mean([ tree.feature_importances_ for tree in model_bc.estimators_ ], axis=0 )
Titanic - Machine Learning from Disaster
11,748,875
clf.fit(1, 3e-4, callbacks=[SaveModelCallback(clf, name="best_clf_ft1")] )<train_model>
test[['Ticket_Class','Ticket_Number']]=test['Ticket'].str.split(" ",expand=True,n=1) test['Ticket_Class'],test['Ticket_Number'] = zip(*test[['Ticket_Class','Ticket_Number']].apply(( lambda x :(None,x['Ticket_Class'])if x['Ticket_Number'] is None else(x['Ticket_Class'],x['Ticket_Number'])) ,axis=1))
Titanic - Machine Learning from Disaster
11,748,875
clf.fit(1, 3e-4, callbacks=[SaveModelCallback(clf, name="best_clf_ft2")] )<predict_on_test>
test['Title'] = test['Name'].str.split(', ', expand=True)[1].str.split('.', expand=True)[0]
Titanic - Machine Learning from Disaster
11,748,875
pred_val = clf.get_preds(DatasetType.Valid, ordered=True )<prepare_output>
for i,lechange in [('Ticket_Class',letc),('Cabin',lec),('Embarked',lee), ('Ticket_Number',letn),('Title',let)]: test[i] = test[i].map(lambda s: '<unknown>' if s not in lechange.classes_ else s) le_classes = lechange.classes_.tolist() le_classes.insert(len(le_classes), '<unknown>') lechange.classes_ = le_classes
Titanic - Machine Learning from Disaster