kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
13,868,595
category_to_id = dict(category_id_df.values) id_to_category = dict(category_id_df[['category_id', 'Category']].values )<count_values>
combine = [df,test_df]
Titanic - Machine Learning from Disaster
13,868,595
df.groupby('Category' ).category_id.count() <categorify>
guess_ages = np.zeros(( 2,3)) guess_ages for dataset in combine: for i in range(0, 2): for j in range(0, 3): guess_df = dataset[(dataset['Sex'] == i)& \ (dataset['Pclass'] == j+1)]['Age'].dropna() age_guess = guess_df.median() guess_ages[i,j] = int(age_guess/0.5 + 0.5)* 0.5 for i in range(0, 2): for j in range(0, 3): dataset.loc[(dataset.Age.isnull())&(dataset.Sex == i)&(dataset.Pclass == j+1),\ 'Age'] = guess_ages[i,j] dataset['Age'] = dataset['Age'].astype(int) df.head()
Titanic - Machine Learning from Disaster
13,868,595
tfidf = TfidfVectorizer(sublinear_tf=True, min_df=5, norm='l2', encoding='latin-1', ngram_range=(1, 2), stop_words='english') features = tfidf.fit_transform(df.Text ).toarray() labels = df.category_id <sort_values>
for dataset in combine: dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0 dataset.loc[(dataset['Age'] > 16)&(dataset['Age'] <= 32), 'Age'] = 1 dataset.loc[(dataset['Age'] > 32)&(dataset['Age'] <= 48), 'Age'] = 2 dataset.loc[(dataset['Age'] > 48)&(dataset['Age'] <= 64), 'Age'] = 3 dataset.loc[ dataset['Age'] > 64, 'Age'] df.head()
Titanic - Machine Learning from Disaster
13,868,595
sorted(category_to_id.items() )<statistical_test>
for dataset in combine: dataset['Age*Class'] = dataset.Age * dataset.Pclass
Titanic - Machine Learning from Disaster
13,868,595
N = 3 for Category, category_id in sorted(category_to_id.items()): features_chi2 = chi2(features, labels == category_id) indices = np.argsort(features_chi2[0]) feature_names = np.array(tfidf.get_feature_names())[indices] unigrams = [v for v in feature_names if len(v.split(' ')) == 1] bigrams = [v for v in feature_names if len(v.split(' ')) == 2] print(" print(".Most correlated unigrams: .{}".format(' .'.join(unigrams[-N:]))) print(".Most correlated bigrams: .{}".format(' .'.join(bigrams[-N:])) )<define_variables>
for dataset in combine: dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1 df[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False ).mean().sort_values(by='Survived', ascending=False )
Titanic - Machine Learning from Disaster
13,868,595
SAMPLE_SIZE = int(len(features)* 0.3) np.random.seed(0) indices = np.random.choice(range(len(features)) , size=SAMPLE_SIZE, replace=False) projected_features = TSNE(n_components=2, random_state=0 ).fit_transform(features[indices]) <filter>
for dataset in combine: dataset['IsAlone'] = 0 dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1 df[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False ).mean()
Titanic - Machine Learning from Disaster
13,868,595
my_id = 0 projected_features[(labels[indices] == my_id ).values]<choose_model_class>
df = df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1) test_df = test_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1) combine = [df, test_df] df.head()
Titanic - Machine Learning from Disaster
13,868,595
models = [ RandomForestClassifier(n_estimators=200, max_depth=3, random_state=0), MultinomialNB() , LogisticRegression(random_state=0), ] <create_dataframe>
test_df['Fare'].fillna(test_df['Fare'].dropna().median() , inplace=True )
Titanic - Machine Learning from Disaster
13,868,595
CV = 5 cv_df = pd.DataFrame(index=range(CV * len(models))) entries = []<find_best_model_class>
test_df.isnull().sum()
Titanic - Machine Learning from Disaster
13,868,595
for model in models: model_name = model.__class__.__name__ accuracies = cross_val_score(model, features, labels, scoring='accuracy', cv=CV) for fold_idx, accuracy in enumerate(accuracies): entries.append(( model_name, fold_idx, accuracy))<create_dataframe>
df['FareBand'] = pd.qcut(df['Fare'], 4) df[['FareBand', 'Survived']].groupby(['FareBand'], as_index=False ).mean().sort_values(by='FareBand', ascending=True )
Titanic - Machine Learning from Disaster
13,868,595
cv_df = pd.DataFrame(entries, columns=['model_name', 'fold_idx', 'accuracy'] )<groupby>
for dataset in combine: dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0 dataset.loc[(dataset['Fare'] > 7.91)&(dataset['Fare'] <= 14.454), 'Fare'] = 1 dataset.loc[(dataset['Fare'] > 14.454)&(dataset['Fare'] <= 31), 'Fare'] = 2 dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3 dataset['Fare'] = dataset['Fare'].astype(int) df = df.drop(['FareBand'], axis=1) combine = [df, test_df] df.head(10 )
Titanic - Machine Learning from Disaster
13,868,595
cv_df.groupby('model_name' ).accuracy.mean()<train_model>
X_train = df.drop("Survived", axis=1) Y_train = df["Survived"] X_test = test_df.drop("PassengerId", axis=1 ).copy() X_train.shape, Y_train.shape, X_test.shape
Titanic - Machine Learning from Disaster
13,868,595
model = LogisticRegression(random_state=0) X_train, X_test, y_train, y_test, indices_train, indices_test = train_test_split(features, labels, df.index, test_size=0.33, random_state=0) model.fit(X_train, y_train) y_pred_proba = model.predict_proba(X_test) y_pred = model.predict(X_test )<compute_test_metric>
logreg = LogisticRegression() logreg.fit(X_train, Y_train) Y_pred = logreg.predict(X_test) acc_log = round(logreg.score(X_train, Y_train)* 100, 2) acc_log
Titanic - Machine Learning from Disaster
13,868,595
for predicted in category_id_df.category_id: for actual in category_id_df.category_id: if predicted != actual and conf_mat[actual, predicted] >= 2: print("'{}' predicted as '{}' : {} examples.".format(id_to_category[actual], id_to_category[predicted], conf_mat[actual, predicted])) display(df.loc[indices_test[(y_test == actual)&(y_pred == predicted)]]['Text']) print('' )<train_model>
knn = KNeighborsClassifier(n_neighbors = 3) knn.fit(X_train, Y_train) Y_pred = knn.predict(X_test) acc_knn = round(knn.score(X_train, Y_train)* 100, 2) acc_knn
Titanic - Machine Learning from Disaster
13,868,595
model.fit(features, labels )<features_selection>
gaussian = GaussianNB() gaussian.fit(X_train, Y_train) Y_pred = gaussian.predict(X_test) acc_gaussian = round(gaussian.score(X_train, Y_train)* 100, 2) acc_gaussian
Titanic - Machine Learning from Disaster
13,868,595
N = 5 for Category, category_id in sorted(category_to_id.items()): indices = np.argsort(model.coef_[category_id]) feature_names = np.array(tfidf.get_feature_names())[indices] unigrams = [v for v in reversed(feature_names)if len(v.split(' ')) == 1][:N] bigrams = [v for v in reversed(feature_names)if len(v.split(' ')) == 2][:N] print(" print(".Top unigrams: .{}".format(' .'.join(unigrams))) print(".Top bigrams: .{}".format(' .'.join(bigrams)) )<predict_on_test>
gaussian = GaussianNB() gaussian.fit(X_train, Y_train) Y_pred = gaussian.predict(X_test) acc_gaussian = round(gaussian.score(X_train, Y_train)* 100, 2) acc_gaussian
Titanic - Machine Learning from Disaster
13,868,595
texts = ["Hooli stock price soared after a dip in PiedPiper revenue growth.", "Captain Tsubasa scores a magnificent goal for the Japanese team.", "Merryweather mercenaries are sent on another mission, as government oversight groups call for new sanctions.", "Beyoncé releases a new album, tops the charts in all of south-east Asia!", "You won't guess what the latest trend in data analysis is!"] text_features = tfidf.transform(texts) predictions = model.predict(text_features) for text, predicted in zip(texts, predictions): print('"{}"'.format(text)) print(" - Predicted as: '{}'".format(id_to_category[predicted])) print("" )<load_from_csv>
perceptron = Perceptron() perceptron.fit(X_train, Y_train) Y_pred = perceptron.predict(X_test) acc_perceptron = round(perceptron.score(X_train, Y_train)* 100, 2) acc_perceptron
Titanic - Machine Learning from Disaster
13,868,595
TEST_PATH = os.path.join(".. /input/bbc-news-test", "BBC News Test.csv") test_df = pd.read_csv(TEST_PATH) <predict_on_test>
linear_svc = LinearSVC() linear_svc.fit(X_train, Y_train) Y_pred = linear_svc.predict(X_test) acc_linear_svc = round(linear_svc.score(X_train, Y_train)* 100, 2) acc_linear_svc
Titanic - Machine Learning from Disaster
13,868,595
test_features = tfidf.transform(test_df.Text.tolist()) Y_pred = model.predict(test_features) Y_pred<define_variables>
sgd = SGDClassifier() sgd.fit(X_train, Y_train) Y_pred = sgd.predict(X_test) acc_sgd = round(sgd.score(X_train, Y_train)* 100, 2) acc_sgd
Titanic - Machine Learning from Disaster
13,868,595
Y_pred_name =[] for cat_id in Y_pred : Y_pred_name.append(id_to_category[cat_id] )<create_dataframe>
decision_tree = DecisionTreeClassifier() decision_tree.fit(X_train, Y_train) Y_predt = decision_tree.predict(X_test) acc_decision_tree = round(decision_tree.score(X_train, Y_train)* 100, 2) acc_decision_tree
Titanic - Machine Learning from Disaster
13,868,595
submission = pd.DataFrame({ "ArticleId": test_df["ArticleId"], "Category": Y_pred_name } )<save_to_csv>
ex = ExtraTreesClassifier(random_state = 6, bootstrap=True, oob_score=True) ex.fit(X_train, Y_train) y_predERT = ex.predict(X_test) ex.score(X_train, Y_train) score = round(ex.score(X_train, Y_train)* 100, 2) print('Extremely Randomized Trees', score )
Titanic - Machine Learning from Disaster
13,868,595
submission.to_csv('submission.csv', index=False )<install_modules>
from sklearn.model_selection import GridSearchCV
Titanic - Machine Learning from Disaster
13,868,595
<define_variables>
from sklearn.model_selection import GridSearchCV
Titanic - Machine Learning from Disaster
13,868,595
n_test= 49999 fichero = 'datos.csv' tests = 'entrega_para_predecir.csv' resultados_finales = 'resultados_finales_test.csv' sample = 'resultados_finales_sampleSubmission.csv' path_dir = 'pokemon-challenge-mlh/' pokemon_csv = '.. /input/pokemon.csv' battles_csv = '.. /input/battles.csv' test_csv = '.. /input/test.csv' <load_from_csv>
n_estimators = [int(x)for x in np.linspace(start = 10, stop = 80, num = 10)] max_features = ['auto', 'sqrt'] max_depth = [2,4] min_samples_split = [2, 5] min_samples_leaf = [1, 2] bootstrap = [True, False] param_grid = {'n_estimators': n_estimators, 'max_features': max_features, 'max_depth': max_depth, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf, 'bootstrap': bootstrap} rf_Model = RandomForestClassifier() rf_Grid = GridSearchCV(estimator = rf_Model, param_grid = param_grid, cv = 3, verbose=2, n_jobs = 4) rf_Grid.fit(X_train, Y_train) rf_Grid.best_params_
Titanic - Machine Learning from Disaster
13,868,595
def get_data() : df_pokemon = pd.read_csv(pokemon_csv) df_pokemon = df_pokemon.fillna({'Name': 'None', 'Type 1': 'None', 'Type 2': 'None'}) df_pokemon = df_pokemon.rename(index=str, columns={" df_pokemon['Legendary'] = np.where(df_pokemon['Legendary'] == True, 1, 0) valores_type1 = df_pokemon['Type 1'].values valores_type2 = df_pokemon['Type 2'].values valores_name = df_pokemon['Name'].values le1 = preprocessing.LabelEncoder() le2 = preprocessing.LabelEncoder() lename = preprocessing.LabelEncoder() encoding1 = le1.fit_transform(valores_type1) encoding2 = le2.fit_transform(valores_type2) encodingName = lename.fit_transform(valores_name) df_pokemon['Type 1'] = encoding1 df_pokemon['Type 2'] = encoding2 df_pokemon['Name'] = encodingName sum_speeds = np.sum(df_pokemon['Speed'].values) total_speeds = len(df_pokemon['Speed']) media_speeds = sum_speeds / total_speeds df_pokemon['Rapidez'] = np.where(df_pokemon['Speed'] > media_speeds, 1, 0) df_pokemon['total_stats'] = df_pokemon['HP'] + df_pokemon['Attack'] + df_pokemon['Defense'] + df_pokemon['Sp.Atk'] + df_pokemon['Sp.Def'] + df_pokemon['Speed'] df_battles = pd.read_csv(battles_csv) df_battles = df_battles[['First_pokemon','Second_pokemon', 'Winner']] df_test = pd.read_csv(test_csv) return df_pokemon, df_battles, df_test, le1, le2, lename<concatenate>
Y_pred = rf_Grid.predict(X_test) rf_Grid.score(X_train, Y_train) acc_random_forest = round(rf_Grid.score(X_train, Y_train)* 100, 2) acc_random_forest
Titanic - Machine Learning from Disaster
13,868,595
def diff_combates(df_lista): lista_ata_pok1 = df_lista[['Attack_id1']].values lista_ata_pok2 = df_lista[['Attack_id2']].values lista_def_pok1 = df_lista[['Defense_id1']].values lista_def_pok2 = df_lista[['Defense_id2']].values lista_diff_ataDef_pok1 = lista_ata_pok1 - lista_def_pok1 lista_diff_ataDef_pok2 = lista_ata_pok2 - lista_def_pok1 df_lista['diff_ata_def_pok1'] = lista_diff_ataDef_pok1 df_lista['diff_ata_def_pok2'] = lista_diff_ataDef_pok2 efec_pok1 = df_lista[['P1_type1', 'P1_type2']].values efec_pok2 = df_lista[['P2_type1', 'P2_type2']].values sumatorio_efectividad_pok1 = np.sum(efec_pok1, axis=1) sumatorio_efectividad_pok2 = np.sum(efec_pok2, axis=1) lista_ataESP_pok1 = df_lista[['Sp.Atk_id1']].values lista_ataESP_pok2 = df_lista[['Sp.Atk_id2']].values lista_defESP_pok1 = df_lista[['Sp.Def_id1']].values lista_defESP_pok2 = df_lista[['Sp.Def_id2']].values lista_ataESP_pok1_final = np.zeros(( len(df_lista))) lista_ataESP_pok2_final = np.zeros(( len(df_lista))) lista_diff_ataDefESP_pok1 = np.zeros(( len(df_lista))) lista_diff_ataDefESP_pok2 = np.zeros(( len(df_lista))) for i in range(0, len(df_lista)) : lista_ataESP_pok1_final[i] = lista_ataESP_pok1[i] * sumatorio_efectividad_pok1[i] lista_ataESP_pok2_final[i] = lista_ataESP_pok2[i] * sumatorio_efectividad_pok2[i] lista_diff_ataDefESP_pok1[i] = lista_ataESP_pok1_final[i] - lista_defESP_pok2[i] lista_diff_ataDefESP_pok2[i] = lista_ataESP_pok2_final[i] - lista_defESP_pok1[i] i+=1 df_lista['diff_ata_def_ESP_pok1'] = lista_diff_ataDef_pok1 df_lista['diff_ata_def_ESP_pok2'] = lista_diff_ataDef_pok2 return df_lista<compute_train_metric>
C=[0.05,0.1,0.2,0.3,0.25,0.4,0.5,0.6,0.7,0.8,0.9,1] gamma=[0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0] kernel=['rbf','linear'] hyper={'kernel':kernel,'C':C,'gamma':gamma} gd=GridSearchCV(SVC() ,param_grid=hyper,verbose=True) gd.fit(X_train, Y_train) Y_preds = gd.predict(X_test) acc_svc = round(gd.score(X_train, Y_train)* 100, 2) acc_svc
Titanic - Machine Learning from Disaster
13,868,595
def juntar_csvs(es_conjunto_de_test): df_pokemon, df_battles, df_test, le1, le2, lename = get_data() pokemon_values = df_pokemon.values if es_conjunto_de_test == False: battles_values = df_battles.values indice_pok1 = 0 indice_pok2 = 1 else: battles_values = df_test.values indice_pok1 = 1 indice_pok2 = 2 ids_pokemon = pokemon_values[:,0] ids_pok1, inv1 = np.unique(battles_values[:, indice_pok1], return_inverse=True) ids_pok2, inv2 = np.unique(battles_values[:, indice_pok2], return_inverse=True) if es_conjunto_de_test == False: resultados_batallas = battles_values[:, 2] indices1 = np.intersect1d(ids_pok1, ids_pokemon, return_indices=True) indices2 = np.intersect1d(ids_pok2, ids_pokemon, return_indices=True) vals_pok1 = pokemon_values[indices1[2], 1:] vals_pok2 = pokemon_values[indices2[2], 1:] sin_battles = pokemon_values[ np.where( np.logical_not( np.isin(ids_pokemon, ids_pok1)))] print('Pokemons que no han peleado:', len(sin_battles)) lon_values = len(battles_values) pok1 = vals_pok1[inv1] pok2 = vals_pok2[inv2] columnas = pok2.shape[1] + 13 + 6 + 6 pok_final = np.ones(( lon_values, columnas)) print(pok_final.shape) pok_final[:, :11] = pok1[:, :11] pok_final[:, 11:22] = pok2[:, :11] pok_final[:, 22:30] = pok1[:, 3:11] - pok2[:, 3:11] pok1_ratios = pok1[:, 3:9] pok2_ratios = pok2[:, 3:9] pok_final[:, 30:36] = pok1_ratios/pok2_ratios valores = np.array(( battles_values[:, indice_pok1], battles_values[:, indice_pok2], battles_values[:, indice_pok1])) valores = valores.T if es_conjunto_de_test == False: caracteristicas_y_resultados = np.ones(( lon_values, columnas + 1)) caracteristicas_y_resultados[:,:-1] = pok_final caracteristicas_y_resultados[:,-1] = resultados_batallas lista = np.concatenate(( valores, caracteristicas_y_resultados), axis=1) else: lista = np.concatenate(( valores, pok_final), axis=1) lista = lista.astype(int) columnas = ['First_pokemon', 'Second_pokemon', 'id_primer_ataq', 'nombre1', 'tipo1_id1', 'tipo2_id1', 'HP_id1','Attack_id1','Defense_id1','Sp.Atk_id1','Sp.Def_id1','Speed_id1', 'Generation_id1', 'Legendary_id1', 'nombre2', 'tipo1_id2', 'tipo2_id2', 'HP_id2','Attack_id2','Defense_id2','Sp.Atk_id2','Sp.Def_id2','Speed_id2', 'Generation_id2', 'Legendary_id2', 'diff_HP','diff_Attack','diff_Defense','diff_Sp.Atk','diff_Sp.Def','diff_Speed', 'diff_Generation', 'diff_Legendary', 'diff_Rapidez', 'diff_stats', 'ratio_HP','ratio_Attack','ratio_Defense','ratio_Sp.Atk','ratio_Sp.Def','ratio_Speed'] if es_conjunto_de_test == False: columnas.append('Winner') df_lista = pd.DataFrame(lista, columns=columnas) df_lista['tipo1_id1'] = le1.inverse_transform(df_lista['tipo1_id1']) df_lista['tipo2_id1'] = le2.inverse_transform(df_lista['tipo2_id1']) df_lista['tipo1_id2'] = le1.inverse_transform(df_lista['tipo1_id2']) df_lista['tipo2_id2'] = le2.inverse_transform(df_lista['tipo2_id2']) df_lista['nombre1'] = lename.inverse_transform(df_lista['nombre1']) df_lista['nombre2'] = lename.inverse_transform(df_lista['nombre2']) df_lista = calculate_effectiveness(df_lista) df_lista['diff_HPDefense_SpDef'] = df_lista['diff_HP'] + df_lista['diff_Defense'] + df_lista['diff_Sp.Def'] df_lista['ratio_HPDefense_SpDef'] = df_lista['ratio_HP'] + df_lista['ratio_Defense'] + df_lista['ratio_Sp.Def'] efec_pok1 = df_lista['P1_type1'].values + df_lista['P1_type2'].values efec_pok2 = df_lista['P2_type1'].values + df_lista['P2_type2'].values df_lista['diff_efectividad'] = np.subtract(efec_pok1,efec_pok2) if es_conjunto_de_test == False: winners = df_lista['Winner'].values df_lista = df_lista.drop(['Winner'], axis=1) df_lista['Winner'] = winners df_lista['tipo1_id1'] = le1.fit_transform(df_lista['tipo1_id1']) df_lista['tipo2_id1'] = le2.fit_transform(df_lista['tipo2_id1']) df_lista['tipo1_id2'] = le1.fit_transform(df_lista['tipo1_id2']) df_lista['tipo2_id2'] = le2.fit_transform(df_lista['tipo2_id2']) df_lista['nombre1'] = lename.fit_transform(df_lista['nombre1']) df_lista['nombre2'] = lename.fit_transform(df_lista['nombre2']) df_lista = df_lista.drop(['Legendary_id1', 'Legendary_id2', 'Generation_id1', 'Generation_id2'], axis=1) df_lista = df_lista.drop(['nombre1', 'nombre2'], axis = 1) print(df_lista.shape) if es_conjunto_de_test == False: df_lista.to_csv(fichero, index=False) else: df_lista.to_csv(tests, index=False) return lista<choose_model_class>
models = pd.DataFrame({ 'Model': ['Support Vector Machines', 'KNN', 'Logistic Regression', 'Random Forest', 'Naive Bayes', 'Perceptron', 'Stochastic Gradient Decent', 'Linear SVC', 'Decision Tree'], 'Score': [acc_svc, acc_knn, acc_log, acc_random_forest, acc_gaussian, acc_perceptron, acc_sgd, acc_linear_svc, acc_decision_tree]}) sorted_model=models.sort_values(by='Score', ascending=False) sorted_model
Titanic - Machine Learning from Disaster
13,868,595
<predict_on_test><EOS>
submission = pd.DataFrame({ "PassengerId": test_df["PassengerId"], "Survived": Y_pred }) submission.to_csv('submission2.csv', index=False )
Titanic - Machine Learning from Disaster
13,975,263
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<load_from_csv>
%matplotlib inline
Titanic - Machine Learning from Disaster
13,975,263
def agrupados() : df = pd.read_csv(fichero) lista = df.values print(df.columns) X = lista[:, :-1] y = lista[:, -1] train_x,train_y = X[:n_test], y[:n_test] test_x, test_y = X[n_test:], y[n_test:] clf = lightgbm_model(train_x, train_y, test_x, test_y, df.columns) return clf<save_to_csv>
train_data= pd.read_csv(".. /input/titanic/train.csv") test_data = pd.read_csv(".. /input/titanic/test.csv" )
Titanic - Machine Learning from Disaster
13,975,263
def resultado_final() : df = pd.read_csv(tests) lista = df.values print(lista.shape) clf = agrupados() y_pred=clf.predict(lista) for i in range(0,len(lista)) : if y_pred[i]>=.5: y_pred[i]=1 else: y_pred[i]=0 y_pred = y_pred.astype(int) df_test = pd.read_csv(test_csv) df_test['Winner'] = y_pred df_test.to_csv(resultados_finales, index=False) df_sample = df_test[['battle_number', 'Winner']] df_sample.to_csv(sample, index=False )<init_hyperparams>
attributes=["PassengerId", "Name", "Ticket", "Cabin"] train_data.drop(attributes, axis=1,inplace=True) test_data.drop(attributes, axis=1,inplace=True )
Titanic - Machine Learning from Disaster
13,975,263
params = {'num_iterations': 900, 'objective' :'binary', 'learning_rate' : 0.08, 'boosting_type' : 'goss', 'max_depth': 12, 'metric': 'binary_logloss', 'lambda_l1': 0.01, 'lambda_l2': 0.01, 'subsample': 0.75 } juntar_csvs(False) juntar_csvs(True) resultado_final()<define_variables>
duplicate = train_data[train_data.duplicated() ]
Titanic - Machine Learning from Disaster
13,975,263
n_test= 49999 fichero = 'datos_entrenamiento.csv' tests = 'entrega_para_predecir.csv' resultados_finales = 'resultados_finales_test.csv' sample = 'resultados_finales_sampleSubmission.csv' path_dir = '.. /input/'<load_from_csv>
train_data=train_data.drop_duplicates()
Titanic - Machine Learning from Disaster
13,975,263
def get_data() : df_pokemon = pd.read_csv(path_dir + 'pokemon.csv') df_pokemon = df_pokemon.fillna({'Name': 'None', 'Type 1': 'None', 'Type 2': 'None'}) df_pokemon = df_pokemon.rename(index=str, columns={" df_pokemon['Legendary'] = np.where(df_pokemon['Legendary'] == True, 1, 0) valores_type1 = df_pokemon['Type 1'].values valores_type2 = df_pokemon['Type 2'].values valores_name = df_pokemon['Name'].values le1 = preprocessing.LabelEncoder() le2 = preprocessing.LabelEncoder() lename = preprocessing.LabelEncoder() encoding1 = le1.fit_transform(valores_type1) encoding2 = le2.fit_transform(valores_type2) encodingName = lename.fit_transform(valores_name) df_pokemon['Type 1'] = encoding1 df_pokemon['Type 2'] = encoding2 df_pokemon['Name'] = encodingName sum_speeds = np.sum(df_pokemon['Speed'].values) total_speeds = len(df_pokemon['Speed']) media_speeds = sum_speeds / total_speeds df_pokemon['Rapidez'] = np.where(df_pokemon['Speed'] > media_speeds, 1, 0) df_battles = pd.read_csv(path_dir + 'battles.csv') df_battles = df_battles[['First_pokemon','Second_pokemon', 'Winner']] print(df_battles.columns) print(df_pokemon.head()) df_test = pd.read_csv(path_dir + 'test.csv') return df_pokemon, df_battles, df_test, le1, le2, lename <count_unique_values>
train_data[['Sex','Survived']].groupby(['Sex'], as_index=False ).mean()
Titanic - Machine Learning from Disaster
13,975,263
def juntar_csvs() : df_pokemon, df_battles, df_test, le1, le2, lename = get_data() pokemon_values = df_pokemon.values battles_values = df_battles.values ids_pokemon = pokemon_values[:,0] ids_pok1, inv1 = np.unique(battles_values[:, 0], return_inverse=True) ids_pok2, inv2 = np.unique(battles_values[:, 1], return_inverse=True) resultados_batallas = battles_values[:, 2] indices1 = np.intersect1d(ids_pok1, ids_pokemon, return_indices=True) indices2 = np.intersect1d(ids_pok2, ids_pokemon, return_indices=True) vals_pok1 = pokemon_values[indices1[2], 1:] vals_pok2 = pokemon_values[indices2[2], 1:] sin_battles = pokemon_values[ np.where( np.logical_not( np.isin(ids_pokemon, ids_pok1)))] print('Pokemons que no han peleado:', len(sin_battles)) lon_values = len(battles_values) pok1 = vals_pok1[inv1] pok2 = vals_pok2[inv2] columnas = pok2.shape[1] + 3 print(pok2.shape) pok_final = np.ones(( lon_values, columnas)) pok_final[:, :3] = pok1[:, :3] pok_final[:, 3:6] = pok2[:, :3] pok_final[:, 6:] = pok1[:, 3:] - pok2[:, 3:] juntar_carac = pok_final caracteristicas_y_resultados = np.ones(( lon_values, columnas + 1)) caracteristicas_y_resultados[:,:-1] = juntar_carac caracteristicas_y_resultados[:,-1] = resultados_batallas valores = np.array(( battles_values[:, 0], battles_values[:, 1], battles_values[:, 0])) valores = valores.T lista = np.concatenate(( valores, caracteristicas_y_resultados), axis=1) lista = lista.astype(int) df_lista = pd.DataFrame(lista, columns=['First_pokemon', 'Second_pokemon', 'id_primer_ataq', 'nombre1', 'tipo1_id1', 'tipo2_id1', 'nombre2', 'tipo1_id2', 'tipo2_id2', 'diff_HP','diff_Attack','diff_Defense','diff_Sp.Atk','diff_Sp.Def','diff_Speed', 'diff_Generation', 'diff_Legendary', 'diff_Rapidez', 'Winner']) df_lista['tipo1_id1'] = le1.inverse_transform(df_lista['tipo1_id1']) df_lista['tipo2_id1'] = le2.inverse_transform(df_lista['tipo2_id1']) df_lista['tipo1_id2'] = le1.inverse_transform(df_lista['tipo1_id2']) df_lista['tipo2_id2'] = le2.inverse_transform(df_lista['tipo2_id2']) df_lista['nombre1'] = lename.inverse_transform(df_lista['nombre1']) df_lista['nombre2'] = lename.inverse_transform(df_lista['nombre2']) df_lista = calculate_effectiveness(df_lista) winners = df_lista['Winner'].values df_lista = df_lista.drop(['Winner'], axis=1) df_lista['Winner'] = winners df_lista['tipo1_id1'] = le1.fit_transform(df_lista['tipo1_id1']) df_lista['tipo2_id1'] = le2.fit_transform(df_lista['tipo2_id1']) df_lista['tipo1_id2'] = le1.fit_transform(df_lista['tipo1_id2']) df_lista['tipo2_id2'] = le2.fit_transform(df_lista['tipo2_id2']) df_lista['nombre1'] = lename.fit_transform(df_lista['nombre1']) df_lista['nombre2'] = lename.fit_transform(df_lista['nombre2']) df_lista.to_csv(fichero, index=False) return lista<count_unique_values>
print("Train_data:",train_data.isnull().values.any()) print("Test_data:",test_data.isnull().values.any() )
Titanic - Machine Learning from Disaster
13,975,263
def preparar_test() : df_pokemon, df_battles, df_test, le1, le2, lename = get_data() pokemon_values = df_pokemon.values tests_values = df_test.values ids_pokemon = pokemon_values[:, 0] ids_pok1, inv1 = np.unique(tests_values[:, 1], return_inverse=True) ids_pok2, inv2 = np.unique(tests_values[:, 2], return_inverse=True) indices1 = np.intersect1d(ids_pok1, ids_pokemon, return_indices=True) indices2 = np.intersect1d(ids_pok2, ids_pokemon, return_indices=True) vals_pok1 = pokemon_values[indices1[2], 1:] vals_pok2 = pokemon_values[indices2[2], 1:] sin_battles = pokemon_values[ np.where( np.logical_not( np.isin(ids_pokemon, ids_pok1)))] print('Pokemons que no han peleado en test:', len(sin_battles)) lon_values = len(tests_values) pok1 = vals_pok1[inv1] pok2 = vals_pok2[inv2] columnas = pok2.shape[1] + 3 pok_final = np.ones(( lon_values, columnas)) pok_final[:, :3] = pok1[:, :3] pok_final[:, 3:6] = pok2[:, :3] pok_final[:, 6:] = pok1[:, 3:] - pok2[:, 3:] juntar_carac = pok_final valores = np.array(( tests_values[:, 1], tests_values[:, 2], tests_values[:, 1])) valores = valores.T lista = np.concatenate(( valores, juntar_carac), axis=1) lista = lista.astype(int) print(lista.shape) df_lista = pd.DataFrame(lista, columns=['First_pokemon', 'Second_pokemon', 'id_primer_ataq', 'nombre1', 'tipo1_id1', 'tipo2_id1', 'nombre2', 'tipo1_id2', 'tipo2_id2', 'HP','Attack','Defense','Sp.Atk','Sp.Def','Speed', 'Generation', 'Legendary', 'Rapidez' ]) df_lista['tipo1_id1'] = le1.inverse_transform(df_lista['tipo1_id1']) df_lista['tipo2_id1'] = le2.inverse_transform(df_lista['tipo2_id1']) df_lista['tipo1_id2'] = le1.inverse_transform(df_lista['tipo1_id2']) df_lista['tipo2_id2'] = le2.inverse_transform(df_lista['tipo2_id2']) df_lista['nombre1'] = lename.inverse_transform(df_lista['nombre1']) df_lista['nombre2'] = lename.inverse_transform(df_lista['nombre2']) df_lista = calculate_effectiveness(df_lista) df_lista['tipo1_id1'] = le1.fit_transform(df_lista['tipo1_id1']) df_lista['tipo2_id1'] = le2.fit_transform(df_lista['tipo2_id1']) df_lista['tipo1_id2'] = le1.fit_transform(df_lista['tipo1_id2']) df_lista['tipo2_id2'] = le2.fit_transform(df_lista['tipo2_id2']) df_lista['nombre1'] = lename.fit_transform(df_lista['nombre1']) df_lista['nombre2'] = lename.fit_transform(df_lista['nombre2']) df_lista.to_csv(tests, index=False )<choose_model_class>
train_data.isnull().sum()
Titanic - Machine Learning from Disaster
13,975,263
def GradientBoosting(train_x, train_y, test_x, test_y): clf = GradientBoostingClassifier(n_estimators=100, learning_rate=0.08, subsample=0.75,max_depth=9, verbose = 1) clf.fit(train_x, train_y) y_pred=clf.predict(test_x) print(clf.feature_importances_) print("Accuracy random forest:",metrics.accuracy_score(test_y, y_pred)) return clf<compute_train_metric>
train_data[['Pclass','Age']].groupby(['Pclass'], as_index=False ).mean()
Titanic - Machine Learning from Disaster
13,975,263
def agrupados() : lista = pd.read_csv(fichero ).values print(lista.shape) X = lista[:, :-1] y = lista[:, -1] train_x,train_y = X[:n_test], y[:n_test] test_x, test_y = X[n_test:], y[n_test:] rf = GradientBoosting(train_x, train_y, test_x, test_y) return rf def resultado_final() : lista = pd.read_csv(tests ).values print(lista.shape) clf = agrupados() y_pred = clf.predict(lista) print(y_pred) y_pred = y_pred.astype(int) df_test = pd.read_csv(path_dir + 'test.csv') df_test['Winner'] = y_pred df_sample = df_test[['battle_number', 'Winner']] df_sample.to_csv(sample, index=False )<import_modules>
train_data.loc[(train_data['Pclass'] == 1), 'Age'] = 38 train_data.loc[(train_data['Pclass'] == 2), 'Age'] = 30 train_data.loc[(train_data['Pclass'] == 3), 'Age'] = 25 train_data['Embarked'].fillna(train_data['Embarked'].mode() [0], inplace=True )
Titanic - Machine Learning from Disaster
13,975,263
fastai.__version__<load_from_csv>
test_data.isnull().sum()
Titanic - Machine Learning from Disaster
13,975,263
train = pd.read_csv('.. /input/train.csv' ).fillna(' ') valid = pd.read_csv('.. /input/valid.csv' ).fillna(' ') test = pd.read_csv('.. /input/test.csv' ).fillna(' ' )<save_to_csv>
test_data[['Pclass','Age']].groupby(['Pclass'], as_index=False ).mean()
Titanic - Machine Learning from Disaster
13,975,263
pd.concat([train['text'], valid['text'], test['text']] ).to_csv( 'unlabeled_news.csv', index=None, header=True )<save_to_csv>
test_data.loc[(test_data['Pclass'] == 1), 'Age'] = 38 test_data.loc[(test_data['Pclass'] == 2), 'Age'] = 30 test_data.loc[(test_data['Pclass'] == 3), 'Age'] = 25 test_data['Fare'].fillna(test_data['Fare'].mean() , inplace=True )
Titanic - Machine Learning from Disaster
13,975,263
pd.concat([train[['text', 'label']],valid[['text', 'label']]] ).to_csv( 'train_28k.csv', index=None, header=True) test[['text']].to_csv('test_5k.csv', index=None, header=True )<define_variables>
print("Train_data:",train_data.isnull().values.any()) print("Test_data:",test_data.isnull().values.any() )
Titanic - Machine Learning from Disaster
13,975,263
folder = '.' unlabeled_file = 'unlabeled_news.csv'<load_from_csv>
train_data['Sex']=train_data['Sex'].map({'male': 1,'female':2}) test_data['Sex']=test_data['Sex'].map({'male': 1,'female':2}) train_data['Embarked']=train_data['Embarked'].map({'S':1, 'C':2, 'Q':3}) test_data['Embarked']=test_data['Embarked'].map({'S':1, 'C':2, 'Q':3} )
Titanic - Machine Learning from Disaster
13,975,263
%%time data_lm = TextLMDataBunch.from_csv(folder, unlabeled_file, text_cols='text' )<load_pretrained>
X_train= train_data.iloc[:, 1:] y_train= train_data['Survived'].values.reshape(-1,1) X_test= test_data
Titanic - Machine Learning from Disaster
13,975,263
%%time learn = language_model_learner(data_lm, drop_mult=0.3, arch=AWD_LSTM )<find_best_params>
lg= LogisticRegression() lg.fit(X_train, y_train.ravel()) Y_pred= lg.predict(X_test) print(lg.score(X_train, y_train))
Titanic - Machine Learning from Disaster
13,975,263
%%time learn.lr_find(start_lr = slice(10e-7, 10e-5), end_lr=slice(0.1, 10))<find_best_params>
rf = RandomForestClassifier() rf.fit(X_train, y_train) Y_pred = rf.predict(X_test) print(rf.score(X_train, y_train))
Titanic - Machine Learning from Disaster
13,975,263
best_lm_lr = learn.recorder.min_grad_lr best_lm_lr<train_model>
knn = KNeighborsClassifier() knn.fit(X_train, y_train) Y_pred = knn.predict(X_test) print(knn.score(X_train, y_train))
Titanic - Machine Learning from Disaster
13,975,263
%%time learn.fit_one_cycle(1, best_lm_lr )<train_model>
dtree = DecisionTreeClassifier() dtree.fit(X_train, y_train) Y_pred = dtree.predict(X_test) print(dtree.score(X_train, y_train))
Titanic - Machine Learning from Disaster
13,975,263
%%time learn.fit_one_cycle(1, best_lm_lr )<predict_on_test>
passenger_id = pd.read_csv(".. /input/titanic/test.csv")[["PassengerId"]].values
Titanic - Machine Learning from Disaster
13,975,263
learn.predict('An italian man was found dead in his yard due to', n_words=200 )<categorify>
submission = {'PassengerId': passenger_id.ravel() , 'Survived': Y_pred} submission_pred = pd.DataFrame(submission ).set_index(['PassengerId'] )
Titanic - Machine Learning from Disaster
13,975,263
learn.save_encoder('clickbait_news_enc' )<define_variables>
submission_pred.to_csv('titanic_prediction.csv' )
Titanic - Machine Learning from Disaster
13,929,398
train_file, test_file = 'train_28k.csv', 'test_5k.csv'<load_from_csv>
X_full = pd.read_csv('.. /input/titanic/train.csv', index_col='PassengerId') X_test_full = pd.read_csv('.. /input/titanic/test.csv', index_col='PassengerId') print(X_full.columns) y = X_full.Survived features = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch','Fare', 'Cabin', 'Embarked'] X = X_full[features].copy() X_test = X_test_full[features].copy() X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0 )
Titanic - Machine Learning from Disaster
13,929,398
data_clas = TextClasDataBunch.from_csv(path=folder, csv_name=train_file, test=test_file, vocab=data_lm.train_ds.vocab, bs=64, text_cols='text', label_cols='label' )<save_model>
numerical_transformer = SimpleImputer(strategy='constant') categorical_transformer = Pipeline(steps=[ ('imputer', SimpleImputer(strategy='most_frequent')) , ('onehot', OneHotEncoder(handle_unknown='ignore')) ]) preprocessor = ColumnTransformer( transformers=[ ('num', numerical_transformer, numerical_cols), ('cat', categorical_transformer, categorical_cols) ]) X_train = preprocessor.fit_transform(X_train) X_valid = preprocessor.transform(X_valid,) X_test = preprocessor.transform(X_test,)
Titanic - Machine Learning from Disaster
13,929,398
data_clas.save('ulmfit_data_clas_clickbait_news' )<load_pretrained>
dtrain = xgb.DMatrix(X_train, label=y_train) dvalid = xgb.DMatrix(X_valid, label=y_valid) dtest = xgb.DMatrix(X_test) watchlist = [(dtrain, 'train'),(dvalid, 'valid')] param = {'min_child_weight': 100, 'eta': 0.04, 'colsample_bytree': 0.8, 'max_depth': 100, 'subsample': 0.75, 'lambda': 2, 'nthread': -1, 'booster' : 'gbtree', 'silent': 1, 'gamma' : 0, 'eval_metric': 'rmse', 'objective': 'reg:linear'} model = xgb.train(param, dtrain, 500, watchlist, early_stopping_rounds=250, maximize=False, verbose_eval=15) preds = model.predict(dtest)
Titanic - Machine Learning from Disaster
13,929,398
learn_clas = text_classifier_learner(data_clas, drop_mult=0.3, arch=AWD_LSTM) learn_clas.load_encoder('clickbait_news_enc' )<find_best_params>
test_predict = np.where(preds > 0.5, 1, 0) test_predict = pd.DataFrame({"PassengerId":X_test_full.index,"Survived":test_predict}) test_predict.to_csv("submission.csv",index=False)
Titanic - Machine Learning from Disaster
14,531,952
learn_clas.lr_find(start_lr=slice(10e-7, 10e-5), end_lr=slice(0.1, 10))<find_best_params>
train_data = pd.read_csv("/kaggle/input/titanic/train.csv") train_data.head(n=10)
Titanic - Machine Learning from Disaster
14,531,952
best_clf_lr = learn_clas.recorder.min_grad_lr best_clf_lr<train_model>
test_data = pd.read_csv("/kaggle/input/titanic/test.csv") test_data.describe()
Titanic - Machine Learning from Disaster
14,531,952
learn_clas.fit_one_cycle(1, best_clf_lr )<set_options>
women_survived_values = train_data.loc[train_data.Sex == 'female']["Survived"] women_survival_rate = sum(women_survived_values)/len(women_survived_values) print("Women survival rate:", women_survival_rate) print(" ") men_survived_values = train_data.loc[train_data.Sex == 'male']["Survived"] men_survival_rate = sum(men_survived_values)/len(men_survived_values) print("Men survival rate:", men_survival_rate )
Titanic - Machine Learning from Disaster
14,531,952
learn_clas.freeze_to(-2 )<train_model>
print(pd.pivot_table(train_data, index='Survived', columns='Pclass', values='PassengerId', aggfunc='count')) print(" ") print(pd.pivot_table(train_data, index='Survived', columns='Embarked', values='PassengerId', aggfunc='count'))
Titanic - Machine Learning from Disaster
14,531,952
learn_clas.fit_one_cycle(1, best_clf_lr )<load_pretrained>
all_data = pd.concat(objs=[train_data, test_data], axis=0 ).reset_index(drop=True) def extract_title(Name): if '.' in Name: return Name.split(',')[1].split('.')[0].strip() else: return 'None' all_data['Title'] = all_data['Name'].map(lambda x: extract_title(x)) train_data['Title'] = train_data['Name'].map(lambda x: extract_title(x)) test_data['Title'] = test_data['Name'].map(lambda x: extract_title(x)) title_list = all_data['Title'].unique() print("Title list:", title_list) print(" ") def identify_title_category(cols): Parch = cols[0] Title = cols[1] if Title in ['Miss', 'Master'] and Parch == 0: return 'Young without parent-child' elif Title in ['Miss', 'Master'] and Parch > 0: return 'Young with parent-child' elif Title not in ['Miss', 'Master'] and Parch == 0: return 'Old without parent-child' else: return 'Old with parent-child' all_data['Title'] = all_data[["Parch","Title"]].apply(identify_title_category, axis=1) train_data['Title'] = train_data[["Parch","Title"]].apply(identify_title_category, axis=1) test_data['Title'] = test_data[["Parch","Title"]].apply(identify_title_category, axis=1) def replace_null_age(cols): Age = cols[0] Pclass = cols[1] Sex = cols[2] Title = cols[3] if pd.isnull(Age): return float(round(all_data[(( all_data["Pclass"] == Pclass)&(all_data["Sex"] == Sex)&(all_data["Title"] == Title)) ]["Age"].mean())) else: return Age train_data["Age"] = train_data[["Age", "Pclass", "Sex", "Title"]].apply(replace_null_age,axis=1) test_data["Age"] = test_data[["Age", "Pclass", "Sex", "Title"]].apply(replace_null_age,axis=1) grouped_average_age = all_data.groupby(['Pclass', 'Sex', 'Title'])["Age"].mean() print("grouped_average_age:", " ", grouped_average_age) train_data.head(n=10)
Titanic - Machine Learning from Disaster
14,531,952
<train_model><EOS>
test_data = test_data.fillna(test_data.mean()) train_data.fillna(train_data.mean()) survived_data = train_data["Survived"] relevant_features = ["Pclass", "Sex", "SibSp", "Parch", "Embarked", "Age", "Fare"] X_train = pd.get_dummies(train_data[relevant_features]) X_test = pd.get_dummies(test_data[relevant_features]) k_fold = KFold(n_splits=10, shuffle=True, random_state=1) model = RandomForestClassifier(n_estimators=100) scoring = "accuracy" score = cross_val_score(model, X_train, survived_data, cv=k_fold, n_jobs=1, scoring=scoring) print("RF score:", score) print(round(np.mean(score)*100, 2)) model = SVC() scoring = "accuracy" score = cross_val_score(model, X_train, survived_data, cv=k_fold, n_jobs=1, scoring=scoring) print("SVC score:", score) print(round(np.mean(score)*100, 2)) model = KNeighborsClassifier(n_neighbors = 10) scoring = 'accuracy' score = cross_val_score(model, X_train, survived_data, cv=k_fold, n_jobs=1, scoring=scoring) print("K Neighbor score", score) print(round(np.mean(score)*100, 2)) model = GaussianNB() scoring = 'accuracy' score = cross_val_score(model, X_train, survived_data, cv=k_fold, n_jobs=1, scoring=scoring) print("Gaussian score", score) print(round(np.mean(score)*100, 2)) model = DecisionTreeClassifier() scoring = 'accuracy' score = cross_val_score(model, X_train, survived_data, cv=k_fold, n_jobs=1, scoring=scoring) print("Decision Tree score", score) print(round(np.mean(score)*100, 2)) model = RandomForestClassifier(n_estimators=200, max_depth=6, random_state=100) scoring = "accuracy" score = cross_val_score(model, X_train, survived_data, cv=k_fold, n_jobs=1, scoring=scoring) print("New RF score:", score) print(round(np.mean(score)*100, 2)) model.fit(X_train, survived_data) predictions = model.predict(X_test) output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions}) output.to_csv('submission_7_1.csv', index=False) print("Submission done!")
Titanic - Machine Learning from Disaster
14,319,955
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<feature_engineering>
!pip install seaborn==0.11.0
Titanic - Machine Learning from Disaster
14,319,955
data_clas.add_test(test["text"] )<predict_on_test>
pd.options.display.max_rows=200 pd.set_option('mode.chained_assignment', None) simplefilter("ignore", category=ConvergenceWarning) simplefilter("ignore", category=RuntimeWarning) sns.__version__
Titanic - Machine Learning from Disaster
14,319,955
test_preds, _ = learn_clas.get_preds(DatasetType.Test, ordered=True )<create_dataframe>
train = pd.read_csv('/kaggle/input/titanic/train.csv', index_col='PassengerId') test = pd.read_csv('/kaggle/input/titanic/test.csv', index_col='PassengerId' )
Titanic - Machine Learning from Disaster
14,319,955
test_pred_df = pd.DataFrame(test_preds.data.cpu().numpy() , columns=['clickbait', 'news', 'other']) ulmfit_preds = pd.Series(np.argmax(test_pred_df.values, axis=1), name='label' ).map({0: 'clickbait', 1: 'news', 2: 'other'}) <save_to_csv>
train.isna().sum()
Titanic - Machine Learning from Disaster
14,319,955
ulmfit_preds.to_csv('ulmfit_predictions.csv', index_label='id', header=True )<set_options>
test.isna().sum()
Titanic - Machine Learning from Disaster
14,319,955
%matplotlib inline font = {"family":"HGMaruGothicMPRO"} matplotlib.rc("font",**font) <load_from_csv>
def imputer(df): age_impute_series = df.groupby(['Pclass', 'Sex'] ).Age.transform('mean') df.Age.fillna(age_impute_series, inplace=True) df.Cabin = df.Cabin.str.extract(pat='([A-Z])') df.Cabin.fillna('M', inplace=True) df['Deck'] = df.Cabin.replace({'A':'ABC', 'B':'ABC', 'C':'ABC', 'D':'DE', 'E':'DE', 'F':'FG', 'G':'FG', 'T':'ABC'}) df.drop('Cabin', axis=1, inplace=True) for feature in df.columns: df[feature].fillna(df[feature].mode() [0], inplace=True) return df
Titanic - Machine Learning from Disaster
14,319,955
train = pd.read_csv(".. /input/exam-for-students20200129/train.csv") test = pd.read_csv(".. /input/exam-for-students20200129/test.csv") country = pd.read_csv(".. /input/exam-for-students20200129/country_info.csv" )<feature_engineering>
train_imputed = imputer(train.copy()) test_imputed = imputer(test.copy() )
Titanic - Machine Learning from Disaster
14,319,955
train["ConvertedSalary"] = np.log1p(train[["ConvertedSalary"]] )<concatenate>
def ticket_extractor(ticket): alpha = re.sub('\d', '', ticket) if alpha: return alpha else: num = re.search('\d{1,9}', ticket) return ticket temp = train_imputed.copy() temp['Ticket_extracted'] = temp.Ticket.apply(ticket_extractor) for i in range(len(temp.Ticket)) : try: int(temp.Ticket_extracted.iloc[i]) temp.Ticket_extracted.iloc[i] = f'Num_{len(temp.Ticket_extracted.iloc[i])}' except: continue for label, pattern in [('_ca', 'C[.]?A[.]?'),('_PC', 'PC'),('_SOTON', 'SOTON'),('_STON', 'STON'), ('_WC', 'W[.]?[/]?C'),('_SC', 'S[.]?C[.]?'),('_A', 'A[.]?'),('_SOC', 'S[.]?O[.]?[/]?C'), ('_PP', 'PP'),('_FC', '(F.C.|F.C.C.) '),('_LS_number', 'Num_(6|7)'),('_SS_number', 'Num_(3|4|5)'), ('rare', '^[^_]')]: temp.Ticket_extracted[temp.Ticket_extracted.str.contains(pattern)] = label temp['Ticket_extracted'].value_counts(dropna=False) fig, ax = plt.subplots(2, 1, figsize=(15, 15)) sns.countplot(data=temp, x='Ticket_extracted', hue='Survived', ax=ax[0]) ax[0].set_title('Extracted Ticket - Survival "count" plot', size=20, loc='Left', y=1.03) sns.barplot(data=temp, x='Ticket_extracted', y='Survived', ax=ax[1]) ax[1].set_title('Extracted Ticket - Survival "chance" plot', size=20, loc='Left', y=1.03) plt.show()
Titanic - Machine Learning from Disaster
14,319,955
train["hantei"] = "train" test["hantei"] = "test" full_data= pd.concat([train, test] )<merge>
temp = train_imputed.copy() temp['Title'] = temp.Name.str.extract(pat='([a-zA-Z]+\.) ') temp.Title[~temp.Title.isin(['Mr.', 'Miss.', 'Mrs.', 'Master.'])] = 'rare'
Titanic - Machine Learning from Disaster
14,319,955
full_data = full_data.merge(country, on='Country', how='left' ).set_index(full_data.index )<count_unique_values>
def feature_creator(df_train, df_test): df_train['Fare_cat'] = pd.qcut(df_train['Fare'], 7) df_test['Fare_cat'] = pd.qcut(df_test['Fare'], 7) df_train['Fare_cat'] = LabelEncoder().fit_transform(df_train['Fare_cat']) df_test['Fare_cat'] = LabelEncoder().fit_transform(df_test['Fare_cat']) df_train['Age_cat'] = pd.cut(df_train.Age, bins=[0, 5, 24, 30, 36, np.inf]) df_test['Age_cat'] = pd.cut(df_test.Age, bins=[0, 5, 24, 30, 36, np.inf]) df_train['Age_cat'] = LabelEncoder().fit_transform(df_train['Age_cat']) df_test['Age_cat'] = LabelEncoder().fit_transform(df_test['Age_cat']) for df in [df_train, df_test]: df['Title'] = df.Name.str.extract(pat='([a-zA-Z]+\.) ') df.Title[~df.Title.isin(['Mr.', 'Miss.', 'Mrs.', 'Master.'])] = 'rare' df_full = pd.concat([df_train, df_test], axis=0) df_full['Ticket_frequency'] = df_full.Ticket.map(df_full.Ticket.value_counts()) df_train = df_full.loc[df_train.index] df_test = df_full.loc[df_test.index].loc[df_test.index].drop('Survived', axis=1) for df in [df_train, df_test]: df['Ticket_extracted'] = df.Ticket.apply(ticket_extractor) for i in range(len(df.Ticket)) : try: int(df.Ticket_extracted.iloc[i]) df.Ticket_extracted.iloc[i] = f'Num_{len(df.Ticket_extracted.iloc[i])}' except: continue for label, pattern in [('_ca', 'C[.]?A[.]?'),('_PC', 'PC'),('_SOTON', 'SOTON'),('_STON', 'STON'), ('_WC', 'W[.]?[/]?C'),('_SC', 'S[.]?C[.]?'),('_A', 'A[.]?'),('_SOC', 'S[.]?O[.]?[/]?C'), ('_PP', 'PP'),('_FC', '(F.C.|F.C.C.) '),('_LS_number', 'Num_(6|7)'), ('_SS_number', 'Num_(3|4|5)'),('rare', '^[^_]')]: df.Ticket_extracted[df.Ticket_extracted.str.contains(pattern)] = label df_train['Family_size'] = df_train['SibSp'] + df_train['Parch'] + 1 df_test['Family_size'] = df_test['SibSp'] + df_test['Parch'] + 1 df_train['Family_size_cat'] = df_train.Family_size.replace({1:'alone', 2:'small_family', 3:'small_family', 4:'small_family' ,5:'large_family', 6:'large_family', 7:'large_family' ,8:'large_family', 9:'large_family', 10:'large_family' ,11:'large_family'}) df_test['Family_size_cat'] = df_test.Family_size.replace({1:'alone', 2:'small_family', 3:'small_family', 4:'small_family' ,5:'large_family', 6:'large_family', 7:'large_family' ,8:'large_family', 9:'large_family', 10:'large_family' ,11:'large_family'}) df_train['Name_length'] = df_train.Name.str.replace(pat='[^a-zA-Z]', repl='' ).str.len() df_test['Name_length'] = df_test.Name.str.replace(pat='[^a-zA-Z]', repl='' ).str.len() df_full = pd.concat([df_train, df_test], axis=0 ).reset_index() df_full['Family_name'] = df_full.Name.str.split(',', n=1, expand=True ).iloc[:, 0] df_full['Family_Survival'] = 0.5 for grp, grp_df in df_full.groupby(['Family_name', 'Fare']): if(len(grp_df)!= 1): for ind, row in grp_df.iterrows() : smax = grp_df.drop(ind)['Survived'].max() smin = grp_df.drop(ind)['Survived'].min() passID = row['PassengerId'] if(smax == 1.0): df_full.loc[df_full['PassengerId'] == passID, 'Family_Survival'] = 1 elif(smin==0.0): df_full.loc[df_full['PassengerId'] == passID, 'Family_Survival'] = 0 for _, grp_df in df_full.groupby('Ticket'): if(len(grp_df)!= 1): for ind, row in grp_df.iterrows() : if(row['Family_Survival'] == 0)|(row['Family_Survival']== 0.5): smax = grp_df.drop(ind)['Survived'].max() smin = grp_df.drop(ind)['Survived'].min() passID = row['PassengerId'] if(smax == 1.0): df_full.loc[df_full['PassengerId'] == passID, 'Family_Survival'] = 1 elif(smin==0.0): df_full.loc[df_full['PassengerId'] == passID, 'Family_Survival'] = 0 df_full.set_index('PassengerId', inplace=True) df_train = df_full[:891] df_test = df_full[891:] df_train.drop(['Family_name'], axis=1, inplace=True) df_test.drop(['Family_name', 'Survived'], axis=1, inplace=True) y_train =df_train.Survived df_train.drop('Survived', axis=1, inplace=True) return df_train, y_train, df_test
Titanic - Machine Learning from Disaster
14,319,955
print(train.nunique() )<count_unique_values>
X_train, y_train, X_test = feature_creator(train_imputed.copy() , test_imputed.copy()) y_train = y_train.astype(int )
Titanic - Machine Learning from Disaster
14,319,955
for i in retsumei: print(i,train[i].nunique() )<count_unique_values>
class FeatureEngineering(BaseEstimator, TransformerMixin): def __init__(self, bin_fare=False, bin_age=True, family_size=True, bin_family_size=True, drop_Name_length=False, drop_Ticket_frequency=False, drop_all=True, drop_Family_Survival=True, drop_Ticket_extracted=False, scaling='StandardScaler', target_encode_title=True, test=False): self.bin_fare = bin_fare self.bin_age = bin_age self.family_size = family_size self.bin_family_size = bin_family_size self.drop_Name_length = drop_Name_length self.drop_Ticket_frequency = drop_Ticket_frequency self.drop_all = drop_all self.drop_Ticket_extracted = drop_Ticket_extracted self.scaling = scaling self.drop_Family_Survival = drop_Family_Survival self.target_encode_title = target_encode_title self.transformer = None self.drop_list = [] self.test = test def fit(self, X, y=None): return self def transform(self, X, y=None): X['Pclass'] = X['Pclass'].astype(str) dummies = pd.get_dummies(X.loc[:, ['Pclass', 'Sex']], drop_first=True) X = pd.concat([X, dummies], axis=1) X.drop(['Pclass', 'Sex'], axis=1, inplace=True) X.drop(['Name', 'Ticket'], axis=1, inplace=True) if self.bin_fare: self.drop_list.append('Fare') else: self.drop_list.append('Fare_cat') if self.bin_age: self.drop_list.append('Age') else: self.drop_list.append('Age_cat') if self.family_size: self.drop_list.extend(['SibSp', 'Parch']) if self.bin_family_size: self.drop_list.append('Family_size') else: self.drop_list.append('Family_size_cat') else: self.drop_list.extend(['Family_size', 'Family_size_cat']) if self.drop_Name_length: X.drop('Name_length', axis=1, inplace=True) if self.drop_Ticket_frequency: X.drop('Ticket_frequency', axis=1, inplace=True) if self.drop_Family_Survival: X.drop('Family_Survival', axis=1, inplace=True) if self.drop_all: X.drop(self.drop_list, axis=1, inplace=True) self.cols = ['Title', 'Ticket_extracted'] if self.target_encode_title else ['Ticket_extracted'] if not self.test and len(X)> 400: self.target_encoder = TargetEncoder(cols=self.cols, smoothing=5) X.loc[:, self.cols] = self.target_encoder.fit_transform(X.loc[:, self.cols], y_train.loc[X.index]) else: X.loc[:, self.cols] = self.target_encoder.transform(X.loc[:, self.cols]) if self.drop_Ticket_extracted: X.drop('Ticket_extracted', axis=1, inplace=True) X = pd.get_dummies(X, drop_first=True) features_to_scale = [feature for feature in X.columns if X[feature].nunique() != 2] if not self.test and len(X)> 400: if self.scaling == 'StandardScaler': self.transformer = StandardScaler() X.loc[:, features_to_scale] = self.transformer.fit_transform(X.loc[:, features_to_scale]) elif self.scaling == 'RobustScaler': self.transformer = RobustScaler() X.loc[:, features_to_scale] = self.transformer.fit_transform(X.loc[:, features_to_scale]) elif self.scaling == 'MinMaxScaler': self.transformer = MinMaxScaler() X.loc[:, features_to_scale] = self.transformer.fit_transform(X.loc[:, features_to_scale]) else: X.loc[:, features_to_scale] = self.transformer.transform(X.loc[:, features_to_scale]) return X
Titanic - Machine Learning from Disaster
14,319,955
full_retsumei = [] full_retsumei = full_data.columns for j in full_retsumei: print(j,full_data[j].nunique() ,full_data[j].dtype )<count_unique_values>
param_grid_pipeline = {'feature_engineering__bin_fare':[True, False], 'feature_engineering__bin_age':[True, False], 'feature_engineering__family_size':[True, False], 'feature_engineering__bin_family_size':[True, False], 'feature_engineering__drop_Name_length':[False], 'feature_engineering__drop_Ticket_frequency':[False], 'feature_engineering__drop_all':[True], 'feature_engineering__drop_Ticket_extracted':[True, False], 'feature_engineering__target_encode_title':[True, False], 'feature_engineering__scaling':['StandardScaler'], 'feature_engineering__drop_Family_Survival':[True]} pipeline_ = Pipeline([('feature_engineering', FeatureEngineering()), ('model', LogisticRegression())]) cv = StratifiedShuffleSplit(n_splits=20, test_size=0.15, random_state=101) grid = GridSearchCV(pipeline_, param_grid_pipeline, cv=cv.split(X_train, X_train.Deck), scoring='accuracy', verbose=2, n_jobs=-1) grid.fit(X_train.copy() , y_train )
Titanic - Machine Learning from Disaster
14,319,955
full_retsumei = [] full_retsumei = full_data.columns for j in full_retsumei: if full_data[j].nunique() >50: if full_data[j].dtype =="object": print(j,full_data[j].nunique() ,full_data[j].dtype )<import_modules>
pd.DataFrame(grid.cv_results_)['mean_test_score'].isna().sum()
Titanic - Machine Learning from Disaster
14,319,955
<feature_engineering>
X_train_fe = fe.fit_transform(X_train.copy() )
Titanic - Machine Learning from Disaster
14,319,955
henkan = ["Pop.Density(per sq.mi.) ","Coastline(coast/area ratio)","Net migration","Infant mortality(per 1000 births)" ,"Literacy(%)","Phones(per 1000)","Arable(%)","Crops(%)","Other(%)" ,"Birthrate","Deathrate","Agriculture","Industry","Service"] for k in henkan: print(k) full_data[k] = full_data[k].str.replace(',', '.' )<data_type_conversions>
fe.test = True
Titanic - Machine Learning from Disaster
14,319,955
print(full_data['Service'].astype("float64"))<data_type_conversions>
X_test_fe = fe.transform(X_test.copy() )
Titanic - Machine Learning from Disaster
14,319,955
for l in henkan: print(full_data[l].astype("float64"))<data_type_conversions>
def learning_curve_plotter(Model, X, y, params_1, params_2, step=50): X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) plt.figure(figsize=(16, 7)) for i,(name, params)in enumerate([params_1, params_2]): train_score = [] val_score = [] plt.subplot(1, 2, i+1) for j in range(100, len(X_train), step): model = Model(**params ).fit(X_train[:j], y_train[:j]) train_score.append(model.score(X_train[:j], y_train[:j])) val_score.append(model.score(X_test, y_test)) plt.plot(train_score, 'r-', label='Training accuracy') plt.plot(val_score, 'b-', label='Validation accuracy') plt.title(f'{name}') plt.xlabel('Training set size') plt.ylabel('Accuracy') plt.legend() plt.show()
Titanic - Machine Learning from Disaster
14,319,955
for l in henkan: full_data[l] = full_data[l].astype("float64") <data_type_conversions>
param_grid_logreg = {'penalty':['elasticnet'], 'C':0.01 * np.arange(100), 'l1_ratio':0.1 * np.arange(10), 'solver':['saga']}
Titanic - Machine Learning from Disaster
14,319,955
<count_unique_values>
grid_logreg = GridSearchCV(LogisticRegression() , param_grid_logreg, cv=RepeatedStratifiedKFold(n_splits=10, n_repeats=2, random_state=42), scoring='accuracy', verbose=2, n_jobs=-1 )
Titanic - Machine Learning from Disaster
14,319,955
for j in full_retsumei: if full_data[j].nunique() >50: if full_data[j].dtype =="object": print(j,full_data[j].nunique() ,full_data[j].dtype )<count_unique_values>
grid_logreg.fit(X_train_fe, y_train )
Titanic - Machine Learning from Disaster
14,319,955
for j in full_retsumei: if full_data[j].nunique() <30 and full_data[j].nunique() > 2: if full_data[j].dtype =="object": print(j,full_data[j].nunique() ,full_data[j].dtype )<import_modules>
params_logreg = {'C': 0.28, 'l1_ratio': 0.9, 'penalty': 'elasticnet', 'solver': 'saga'}
Titanic - Machine Learning from Disaster
14,319,955
<import_modules>
param_grid_knn = {'n_neighbors':np.arange(50), 'weights':['uniform'], 'algorithm':['ball_tree'], 'leaf_size':np.arange(1, 40, 2)}
Titanic - Machine Learning from Disaster
14,319,955
<count_values>
grid_knn = GridSearchCV(KNeighborsClassifier() , param_grid_knn, cv=RepeatedStratifiedKFold(n_splits=10, n_repeats=2, random_state=42), scoring='accuracy', verbose=2, n_jobs=-1 )
Titanic - Machine Learning from Disaster
14,319,955
full_data["Age"].value_counts() <feature_engineering>
grid_knn.fit(X_train_fe, y_train )
Titanic - Machine Learning from Disaster
14,319,955
full_data["Age"] = full_data["Age"].str.replace('Under 18 years old', '1') full_data["Age"] = full_data["Age"].str.replace('18 - 24 years old', '2') full_data["Age"] = full_data["Age"].str.replace('25 - 34 years old', '3') full_data["Age"] = full_data["Age"].str.replace('35 - 44 years old', '4') full_data["Age"] = full_data["Age"].str.replace('45 - 54 years old', '5') full_data["Age"] = full_data["Age"].str.replace('55 - 64 years old', '6') full_data["Age"] = full_data["Age"].str.replace('65 years or older', '7' )<data_type_conversions>
params_knn = {'algorithm': 'ball_tree', 'leaf_size': 1, 'n_neighbors': 7, 'weights': 'uniform'}
Titanic - Machine Learning from Disaster
14,319,955
full_data['Age'].astype("float64" )<count_values>
param_grid_svc = {'C':[0.001, 0.01, 0.1, 1, 5], 'kernel':['rbf'], 'gamma':0.01 * np.arange(100), 'probability':[True]}
Titanic - Machine Learning from Disaster
14,319,955
full_data["Age"].value_counts()<count_unique_values>
grid_svc = GridSearchCV(SVC() , param_grid_svc, cv=RepeatedStratifiedKFold(n_splits=10, n_repeats=2, random_state=42), scoring='accuracy', verbose=2, n_jobs=-1 )
Titanic - Machine Learning from Disaster
14,319,955
full_retsumei = [] full_retsumei = full_data.columns ordinal = [] for j in full_retsumei: if full_data[j].dtype =="object": print(j,full_data[j].nunique() ,full_data[j].dtype) ordinal.append(j )<categorify>
grid_svc.fit(X_train_fe, y_train )
Titanic - Machine Learning from Disaster
14,319,955
oe = ce.OrdinalEncoder(cols=ordinal, return_df=False) full_data[ordinal] = oe.fit_transform(full_data[ordinal] )<train_model>
params_svc = {'C': 1, 'gamma': 0.09, 'kernel': 'rbf', 'probability': True}
Titanic - Machine Learning from Disaster
14,319,955
full_data.fillna(-1, inplace=True )<drop_column>
param_grid_random = {'n_estimators':[300, 500, 1000], 'max_depth':[5, 9], 'max_samples':[0.5, 0.7, 0.9], 'max_features':[0.5, 0.7, 0.9], 'min_samples_split':[2, 5, 8] }
Titanic - Machine Learning from Disaster
14,319,955
<count_values>
grid_random = GridSearchCV(RandomForestClassifier() , param_grid_random, cv=RepeatedStratifiedKFold(n_splits=10, n_repeats=2, random_state=42), scoring='accuracy', verbose=2, n_jobs=-1 )
Titanic - Machine Learning from Disaster
14,319,955
full_data["hantei"].value_counts()<filter>
grid_random.fit(X_train_fe, y_train )
Titanic - Machine Learning from Disaster
14,319,955
len(full_data[full_data['hantei'] == 1] )<prepare_x_and_y>
params_random = {'max_depth': 5, 'max_features': 0.5, 'max_samples': 0.9, 'min_samples_split': 8, 'n_estimators': 300}
Titanic - Machine Learning from Disaster
14,319,955
X_train = full_data[full_data['hantei'] == 1] X_test = full_data[full_data['hantei'] == 2] X_train.drop(['hantei'], axis=1, inplace=True) X_test.drop(['hantei'], axis=1, inplace=True )<prepare_x_and_y>
param_grid_gradient = {'max_depth':[3, 4], 'n_estimators':[300, 400, 500], 'learning_rate':[0.01, 0.03, 0.05], 'subsample':[0.5, 0.7], 'max_features':[0.5, 0.7], }
Titanic - Machine Learning from Disaster
14,319,955
y_train = X_train["ConvertedSalary"] X_train2 = X_train.drop(["ConvertedSalary"], axis=1) X_test = X_test.drop(["ConvertedSalary"], axis=1) X_train2.sort_index(axis=1, ascending=False) X_test.sort_index(axis=1, ascending=False )<split>
grid_gradient = GridSearchCV(GradientBoostingClassifier() , param_grid_gradient, cv=RepeatedStratifiedKFold(n_splits=10, n_repeats=2, random_state=42), scoring='accuracy', verbose=2, n_jobs=-1 )
Titanic - Machine Learning from Disaster
14,319,955
X_train1,X_test1,y_train,y_test = train_test_split(X_train2,y_train )<train_model>
grid_gradient.fit(X_train_fe, y_train )
Titanic - Machine Learning from Disaster
14,319,955
model = lgb.LGBMRegressor() model.fit(X_train1, y_train )<predict_on_test>
params_gradient = {'learning_rate': 0.01, 'max_depth': 3, 'max_features': 0.5, 'n_estimators': 500, 'subsample': 0.5}
Titanic - Machine Learning from Disaster
14,319,955
pred = model.predict(X_test1 )<prepare_output>
param_grid_xgb = {'n_estimators':[400, 600], 'learning_rate':[0.01, 0.03, 0.05], 'max_depth':[3, 4], 'subsample':[0.5, 0.7], 'colsample_bylevel':[0.5, 0.7], 'reg_lambda':[15, None], }
Titanic - Machine Learning from Disaster