kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
6,490,094 | test['nota_mat'] = -2<concatenate> | df_all_decks_survived = df_all.groupby(['Deck', 'Survived'] ).count().drop(columns=['Sex', 'Age', 'SibSp', 'Parch', 'Fare',
'Embarked', 'Pclass', 'Cabin', 'PassengerId', 'Ticket'] ).rename(columns={'Name':'Count'} ).transpose()
def get_survived_dist(df):
surv_counts = {'A':{}, 'B':{}, 'C':{}, 'D':{}, 'E':{}, 'F':{}, 'G':{}, 'M':{}}
decks = df.columns.levels[0]
for deck in decks:
for survive in range(0, 2):
surv_counts[deck][survive] = df[deck][survive][0]
df_surv = pd.DataFrame(surv_counts)
surv_percentages = {}
for col in df_surv.columns:
surv_percentages[col] = [(count / df_surv[col].sum())* 100 for count in df_surv[col]]
return surv_counts, surv_percentages
def display_surv_dist(percentages):
df_survived_percentages = pd.DataFrame(percentages ).transpose()
deck_names =('A', 'B', 'C', 'D', 'E', 'F', 'G', 'M')
bar_count = np.arange(len(deck_names))
bar_width = 0.85
not_survived = df_survived_percentages[0]
survived = df_survived_percentages[1]
plt.figure(figsize=(20, 10))
plt.bar(bar_count, not_survived, color='
plt.bar(bar_count, survived, bottom=not_survived, color='
plt.xlabel('Deck', size=15, labelpad=20)
plt.ylabel('Survival Percentage', size=15, labelpad=20)
plt.xticks(bar_count, deck_names)
plt.tick_params(axis='x', labelsize=15)
plt.tick_params(axis='y', labelsize=15)
plt.legend(loc='upper left', bbox_to_anchor=(1, 1), prop={'size': 15})
plt.title('Survival Percentage in Decks', size=18, y=1.05)
plt.show()
all_surv_count, all_surv_per = get_survived_dist(df_all_decks_survived)
display_surv_dist(all_surv_per ) | Titanic - Machine Learning from Disaster |
6,490,094 | df = df.append(test )<count_missing_values> | df_all['Deck'] = df_all['Deck'].replace(['A', 'B', 'C'], 'ABC')
df_all['Deck'] = df_all['Deck'].replace(['D', 'E'], 'DE')
df_all['Deck'] = df_all['Deck'].replace(['F', 'G'], 'FG')
df_all['Deck'].value_counts() | Titanic - Machine Learning from Disaster |
6,490,094 | df.isnull().sum()<feature_engineering> | df_all.drop(['Cabin'], inplace=True, axis=1)
df_train, df_test = divide_df(df_all)
dfs = [df_train, df_test]
for df in dfs:
display_missing(df ) | Titanic - Machine Learning from Disaster |
6,490,094 | df['codigo_mun'] = df['codigo_mun'].apply(lambda x: x.replace('ID_ID_', ''))<data_type_conversions> | corr = df_train_corr_nd['Correlation Coefficient'] > 0.1
df_train_corr_nd[corr] | Titanic - Machine Learning from Disaster |
6,490,094 | for col in df.columns:
if df[col].dtype == 'object' and col != 'codigo_mun':
df[col] = df[col].astype('category' ).cat.codes<define_variables> | corr = df_test_corr_nd['Correlation Coefficient'] > 0.1
df_test_corr_nd[corr] | Titanic - Machine Learning from Disaster |
6,490,094 | removed_cols = ['Unnamed: 0', 'codigo_mun','municipio', 'nota_mat']
feats = [c for c in df.columns if c not in removed_cols]<filter> | df_all = concat_df(df_train, df_test)
df_all.head() | Titanic - Machine Learning from Disaster |
6,490,094 | test = df[df['nota_mat'] == -2]
df = df[~(df['nota_mat'] == -2)]<split> | def extract_surname(data):
families = []
for i in range(len(data)) :
name = data.iloc[i]
if '(' in name:
name_no_bracket = name.split('(')[0]
else:
name_no_bracket = name
family = name_no_bracket.split(',')[0]
title = name_no_bracket.split(',')[1].strip().split(' ')[0]
for c in string.punctuation:
family = family.replace(c, '' ).strip()
families.append(family)
return families
df_all['Family'] = extract_surname(df_all['Name'])
df_train = df_all.loc[:890]
df_test = df_all.loc[891:]
dfs = [df_train, df_test] | Titanic - Machine Learning from Disaster |
6,490,094 | train, valid = train_test_split(df, test_size=0.3333, random_state=42 )<choose_model_class> | mean_survival_rate = np.mean(df_train['Survived'])
train_family_survival_rate = []
train_family_survival_rate_NA = []
test_family_survival_rate = []
test_family_survival_rate_NA = []
for i in range(len(df_train)) :
if df_train['Family'][i] in family_rates:
train_family_survival_rate.append(family_rates[df_train['Family'][i]])
train_family_survival_rate_NA.append(1)
else:
train_family_survival_rate.append(mean_survival_rate)
train_family_survival_rate_NA.append(0)
for i in range(len(df_test)) :
if df_test['Family'].iloc[i] in family_rates:
test_family_survival_rate.append(family_rates[df_test['Family'].iloc[i]])
test_family_survival_rate_NA.append(1)
else:
test_family_survival_rate.append(mean_survival_rate)
test_family_survival_rate_NA.append(0)
df_train['Family_Survival_Rate'] = train_family_survival_rate
df_train['Family_Survival_Rate_NA'] = train_family_survival_rate_NA
df_test['Family_Survival_Rate'] = test_family_survival_rate
df_test['Family_Survival_Rate_NA'] = test_family_survival_rate_NA
train_ticket_survival_rate = []
train_ticket_survival_rate_NA = []
test_ticket_survival_rate = []
test_ticket_survival_rate_NA = []
for i in range(len(df_train)) :
if df_train['Ticket'][i] in ticket_rates:
train_ticket_survival_rate.append(ticket_rates[df_train['Ticket'][i]])
train_ticket_survival_rate_NA.append(1)
else:
train_ticket_survival_rate.append(mean_survival_rate)
train_ticket_survival_rate_NA.append(0)
for i in range(len(df_test)) :
if df_test['Ticket'].iloc[i] in ticket_rates:
test_ticket_survival_rate.append(ticket_rates[df_test['Ticket'].iloc[i]])
test_ticket_survival_rate_NA.append(1)
else:
test_ticket_survival_rate.append(mean_survival_rate)
test_ticket_survival_rate_NA.append(0)
df_train['Ticket_Survival_Rate'] = train_ticket_survival_rate
df_train['Ticket_Survival_Rate_NA'] = train_ticket_survival_rate_NA
df_test['Ticket_Survival_Rate'] = test_ticket_survival_rate
df_test['Ticket_Survival_Rate_NA'] = test_ticket_survival_rate_NA | Titanic - Machine Learning from Disaster |
6,490,094 | rf = RandomForestRegressor(n_estimators=200, min_samples_split=5, max_depth=4, random_state=42 )<train_model> | for df in [df_train, df_test]:
df['Survival_Rate'] =(df['Ticket_Survival_Rate'] + df['Family_Survival_Rate'])/ 2
df['Survival_Rate_NA'] =(df['Ticket_Survival_Rate_NA'] + df['Family_Survival_Rate_NA'])/ 2 | Titanic - Machine Learning from Disaster |
6,490,094 | rf.fit(train[feats], train['nota_mat'] )<compute_train_metric> | non_numeric_features = ['Embarked', 'Sex', 'Deck', 'Title', 'Family_Size_Grouped', 'Age', 'Fare']
for df in dfs:
for feature in non_numeric_features:
df[feature] = LabelEncoder().fit_transform(df[feature] ) | Titanic - Machine Learning from Disaster |
6,490,094 | valid_preds = rf.predict(valid[feats])
mean_squared_error(( valid['nota_mat']), valid_preds)**(1/2 )<define_variables> | cat_features = ['Pclass', 'Sex', 'Deck', 'Embarked', 'Title', 'Family_Size_Grouped']
encoded_features = []
for df in dfs:
for feature in cat_features:
encoded_feat = OneHotEncoder().fit_transform(df[feature].values.reshape(-1, 1)).toarray()
n = df[feature].nunique()
cols = ['{}_{}'.format(feature, n)for n in range(1, n + 1)]
encoded_df = pd.DataFrame(encoded_feat, columns=cols)
encoded_df.index = df.index
encoded_features.append(encoded_df)
df_train = pd.concat([df_train, *encoded_features[:6]], axis=1)
df_test = pd.concat([df_test, *encoded_features[6:]], axis=1 ) | Titanic - Machine Learning from Disaster |
6,490,094 | removed_cols = ['exp_anos_estudo', 'densidade_dem','gasto_pc_saude', 'hab_p_medico','servidores',
'area','comissionados','comissionados_por_servidor','indice_governanca',
'ranking_igm','porte','capital']
feats = [c for c in df.columns if c not in removed_cols]<train_model> | df_all = concat_df(df_train, df_test)
drop_cols = ['Deck', 'Embarked', 'Family', 'Family_Size', 'Family_Size_Grouped', 'Survived',
'Name', 'Parch', 'PassengerId', 'Pclass', 'Sex', 'SibSp', 'Ticket', 'Title',
'Ticket_Survival_Rate', 'Family_Survival_Rate', 'Ticket_Survival_Rate_NA', 'Family_Survival_Rate_NA']
df_all.drop(columns=drop_cols, inplace=True)
df_all.head() | Titanic - Machine Learning from Disaster |
6,490,094 | rf.fit(train[feats], train['nota_mat'] )<predict_on_test> | X_train = StandardScaler().fit_transform(df_train.drop(columns=drop_cols))
y_train = df_train['Survived'].values
X_test = StandardScaler().fit_transform(df_test.drop(columns=drop_cols))
print('X_train shape: {}'.format(X_train.shape))
print('y_train shape: {}'.format(y_train.shape))
print('X_test shape: {}'.format(X_test.shape))
X_train = pd.DataFrame(X_train)
y_train = pd.DataFrame(y_train)
X_test = pd.DataFrame(X_test)
X_train.columns = feature_stores
X_test.columns = feature_stores
X_train.head() | Titanic - Machine Learning from Disaster |
6,490,094 | valid_preds = rf.predict(valid[feats])
mean_squared_error(( valid['nota_mat']), valid_preds)**(1/2 )<predict_on_test> | from sklearn import svm, tree, linear_model, neighbors, naive_bayes, ensemble, discriminant_analysis, gaussian_process
from xgboost import XGBClassifier
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn import feature_selection
from sklearn import model_selection
from sklearn import metrics | Titanic - Machine Learning from Disaster |
6,490,094 | test['nota_mat'] = rf.predict(valid[feats] )<save_to_csv> | single_best_model = RandomForestClassifier(criterion='gini',
n_estimators=1100,
max_depth=5,
min_samples_split=4,
min_samples_leaf=5,
max_features='auto',
oob_score=True,
random_state=SEED,
n_jobs=-1,
verbose=1)
old_leaderboard_model = RandomForestClassifier(criterion='gini',
n_estimators=1750,
max_depth=7,
min_samples_split=6,
min_samples_leaf=6,
max_features='auto',
oob_score=True,
random_state=SEED,
n_jobs=-1,
verbose=0)
leaderboard_model = RandomForestClassifier(criterion='gini',
n_estimators=20,
max_depth=3,
min_samples_split=2,
min_samples_leaf=1,
max_features='auto',
oob_score=True,
random_state=42,
n_jobs=-1,
verbose=0)
cv_split = model_selection.ShuffleSplit(n_splits = 10, test_size =.3, train_size =.6, random_state = 0 ) | Titanic - Machine Learning from Disaster |
6,490,094 | test[['codigo_mun','nota_mat']].to_csv('rf_iesb_2019.csv', index=False )<load_from_csv> | X_rfe = None | Titanic - Machine Learning from Disaster |
6,490,094 | df = pd.read_csv('.. /input/train.csv')
ts = pd.read_csv('.. /input/test.csv' )<concatenate> | Titanic - Machine Learning from Disaster | |
6,490,094 | df = df.append(ts )<feature_engineering> | FEATURES_pipeline = np.array(['Deck_1', 'Deck_4', 'Embarked_2', 'Family_Size_Grouped_1'
'Family_Size_Grouped_3', 'Fare', 'Pclass_1', 'Survival_Rate_NA'
'Ticket_Frequency', 'Title_4'] ) | Titanic - Machine Learning from Disaster |
6,490,094 | df['codigo_mun'] = df['codigo_mun'].str.replace('ID_ID_', '' )<define_variables> | myrun_bestresults = np.array(['Age', 'Deck_1', 'Deck_2', 'Deck_4', 'Embarked_1', 'Embarked_2',
'Embarked_3', 'Family_Size_Grouped_1', 'Family_Size_Grouped_3',
'Family_Size_Grouped_4', 'Fare', 'Is_Married', 'Pclass_1',
'Pclass_2', 'Pclass_3', 'Sex_1', 'Survival_Rate',
'Survival_Rate_NA', 'Ticket_Frequency', 'Title_2', 'Title_3',
'Title_4'] ) | Titanic - Machine Learning from Disaster |
6,490,094 | lista = ['estado','municipio','porte','populacao','pib','anos_estudo_empreendedor','jornada_trabalho','gasto_pc_educacao','exp_anos_estudo','nota_mat']<data_type_conversions> | choose = [FEATURES,X_rfe,FEATURES_pipeline,myrun_bestresults]
FEATURES = choose[3] | Titanic - Machine Learning from Disaster |
6,490,094 | for i in df[lista].select_dtypes(include=['object']):
df[i] = df[i].astype('category' ).cat.codes<filter> | MLA = [
ensemble.AdaBoostClassifier() ,
ensemble.BaggingClassifier() ,
ensemble.ExtraTreesClassifier() ,
ensemble.GradientBoostingClassifier() ,
leaderboard_model,
gaussian_process.GaussianProcessClassifier() ,
linear_model.LogisticRegressionCV() ,
linear_model.PassiveAggressiveClassifier() ,
linear_model.RidgeClassifierCV() ,
linear_model.SGDClassifier() ,
linear_model.Perceptron() ,
naive_bayes.BernoulliNB() ,
naive_bayes.GaussianNB() ,
neighbors.KNeighborsClassifier() ,
svm.SVC(probability=True),
svm.NuSVC(probability=True),
svm.LinearSVC() ,
tree.DecisionTreeClassifier() ,
tree.ExtraTreeClassifier() ,
discriminant_analysis.LinearDiscriminantAnalysis() ,
discriminant_analysis.QuadraticDiscriminantAnalysis() ,
XGBClassifier()
]
MLA_columns = ['MLA Name', 'MLA Parameters','MLA Train Accuracy Mean', 'MLA Test Accuracy Mean', 'MLA Test Accuracy 3*STD' ,'MLA Time']
MLA_compare = pd.DataFrame(columns = MLA_columns)
MLA_predict = pd.DataFrame()
row_index = 0
for alg in MLA:
MLA_name = alg.__class__.__name__
MLA_compare.loc[row_index, 'MLA Name'] = MLA_name
MLA_compare.loc[row_index, 'MLA Parameters'] = str(alg.get_params())
cv_results = model_selection.cross_validate(alg, X_train[FEATURES], y_train, cv = cv_split, return_train_score=True)
MLA_compare.loc[row_index, 'MLA Time'] = cv_results['fit_time'].mean()
MLA_compare.loc[row_index, 'MLA Train Accuracy Mean'] = cv_results['train_score'].mean()
MLA_compare.loc[row_index, 'MLA Test Accuracy Mean'] = cv_results['test_score'].mean()
MLA_compare.loc[row_index, 'MLA Test Accuracy 3*STD'] = cv_results['test_score'].std() *3
alg.fit(X_train[FEATURES], y_train)
MLA_predict[MLA_name] = alg.predict(X_train[FEATURES])
row_index+=1
MLA_compare.sort_values(by = ['MLA Test Accuracy Mean'], ascending = False, inplace = True)
MLA_compare
| Titanic - Machine Learning from Disaster |
6,490,094 | ts = df[df['nota_mat']==-2]
df = df[df['nota_mat']!=-2]<split> | grid_n_estimator = [250,500,1000, 10, 20, 40,50, 100, 1750, 2000, 1500]
grid_ratio = [.1,.25,.5,.75, 1.0]
grid_learn = [.01,.03,.05, 0.75,.1,1.25,.25]
grid_max_depth = [1,2,3, 4,5,6,7,8,9,10]
grid_min_samples = [5, 10,.03,.05,.10]
grid_criterion = ['gini', 'entropy']
grid_bool = [True, False]
grid_seed = [0]
min_sample_split = [6,4,8]
min_sample_leaf = [6,4,8]
SEED=np.arange(1,100,1)
vote_est = [
('rfc', ensemble.RandomForestClassifier()),
]
grid_param = [
[{
'n_estimators': [20],
'criterion': ["gini"],
'max_depth': [3],
'min_samples_split': [2],
'min_samples_leaf': [1],
'oob_score': [True],
'random_state': [42],
'n_jobs': [-1]
}]
]
start_total = time.perf_counter()
for clf, param in zip(vote_est, grid_param):
start = time.perf_counter()
best_search = model_selection.GridSearchCV(estimator = clf[1], param_grid = param, cv = cv_split, scoring = 'roc_auc')
best_search.fit(X_train[FEATURES], y_train)
run = time.perf_counter() - start
best_param = best_search.best_params_
print('The best parameter for {} is {} with a runtime of {:.2f} seconds.'.format(clf[1].__class__.__name__, best_param, run))
clf[1].set_params(**best_param)
run_total = time.perf_counter() - start_total
print('Total optimization time was {:.2f} minutes.'.format(run_total/60))
print('-'*10 ) | Titanic - Machine Learning from Disaster |
6,490,094 | train, valid = train_test_split(df[lista], random_state=42)
train.shape, valid.shape<train_model> | grid_hard = ensemble.VotingClassifier(estimators = vote_est , voting = 'hard')
grid_hard_cv = model_selection.cross_validate(grid_hard, X_train[FEATURES], y_train, cv = cv_split, return_train_score=True)
grid_hard.fit(X_train[FEATURES], y_train)
print("Hard Voting w/Tuned Hyperparameters Training w/bin score mean: {:.2f}".format(grid_hard_cv['train_score'].mean() *100))
print("Hard Voting w/Tuned Hyperparameters Test w/bin score mean: {:.2f}".format(grid_hard_cv['test_score'].mean() *100))
print("Hard Voting w/Tuned Hyperparameters Test w/bin score 3*std: +/- {:.2f}".format(grid_hard_cv['test_score'].std() *100*3))
print('-'*10)
grid_soft = ensemble.VotingClassifier(estimators = vote_est , voting = 'soft')
grid_soft_cv = model_selection.cross_validate(grid_soft, X_train[FEATURES], y_train, cv = cv_split, return_train_score=True)
grid_soft.fit(X_train[FEATURES], y_train)
print("Soft Voting w/Tuned Hyperparameters Training w/bin score mean: {:.2f}".format(grid_soft_cv['train_score'].mean() *100))
print("Soft Voting w/Tuned Hyperparameters Test w/bin score mean: {:.2f}".format(grid_soft_cv['test_score'].mean() *100))
print("Soft Voting w/Tuned Hyperparameters Test w/bin score 3*std: +/- {:.2f}".format(grid_soft_cv['test_score'].std() *100*3))
print('-'*10)
| Titanic - Machine Learning from Disaster |
6,490,094 | <compute_train_metric><EOS> | submission_df = pd.DataFrame(columns=['PassengerId', 'Survived'])
submission_df['PassengerId'] = df_test['PassengerId']
submission_df['Survived'] = grid_hard.predict(X_test[FEATURES])
submission_df["Survived"] = submission_df["Survived"].astype(int)
submission_df.to_csv('submission.csv', header=True, index=False)
submission_df.head(10 ) | Titanic - Machine Learning from Disaster |
3,365,084 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<predict_on_test> | print('*'*50)
print('Pandas Version : ', pd.__version__)
print('Numpy Version : ', np.__version__)
print('Matplotlib Version: ', mpl.__version__)
print('Seaborn Version : ', sns.__version__)
print('SKLearn Version : ', sk.__version__)
print('*'*50 ) | Titanic - Machine Learning from Disaster |
3,365,084 | ts['nota_mat'] = rf.predict(ts[feats] )<save_to_csv> | sns.set_style('whitegrid')
pd.options.display.max_rows = 100
pd.options.display.max_columns = 100
seed = 42
v_size = 0.33
num_folds = 10
scoring = 'accuracy'
np.random.seed(seed ) | Titanic - Machine Learning from Disaster |
3,365,084 | ts[['codigo_mun','nota_mat']].to_csv('lucio.csv',index=False )<load_from_csv> | path_train = '.. /input/train.csv'
path_test = '.. /input/test.csv'
df_train = pd.read_csv(path_train)
df_test = pd.read_csv(path_test ) | Titanic - Machine Learning from Disaster |
3,365,084 | myfile = pd.read_csv(".. /input/train.csv")
test_X = pd.read_csv(".. /input/test.csv" )<drop_column> | print('Datasets shapes: ')
print("Training shape: ", df_train.shape)
print("Test shape : ", df_test.shape ) | Titanic - Machine Learning from Disaster |
3,365,084 | test_X = test_X[['exp_vida','anos_estudo_empreendedor','idhm', 'perc_pop_econ_ativa']]<data_type_conversions> | missingData(df_train ) | Titanic - Machine Learning from Disaster |
3,365,084 | test_X = test_X.fillna(test_X.mean() )<feature_engineering> | missingData(df_test ) | Titanic - Machine Learning from Disaster |
3,365,084 | a = myfile[['densidade_dem']]
b = myfile[['area']]
c = myfile[['codigo_mun']]
d = myfile[['ranking_igm']]
e = myfile[['comissionados_por_servidor']]
myfile['densidade_dem'] = a.apply(lambda x: x.str.replace(',',''))
myfile['area'] = b.apply(lambda x: x.str.replace(',',''))
myfile['codigo_mun'] = c.apply(lambda x: x.str.replace('ID_',''))
myfile['ranking_igm'] = d.apply(lambda x: x.str.replace('º',''))
myfile['comissionados_por_servidor'] = e.apply(lambda x: x.str.replace('%',''))
e = myfile[['comissionados_por_servidor']]
myfile['comissionados_por_servidor'] = e.apply(lambda x: x.str.replace('
myfile['densidade_dem'] = pd.to_numeric(myfile['densidade_dem'])
myfile['area'] = pd.to_numeric(myfile['area'])
myfile['codigo_mun'] = pd.to_numeric(myfile['codigo_mun'])
myfile['ranking_igm'] = pd.to_numeric(myfile['ranking_igm'])
myfile['comissionados_por_servidor'] = pd.to_numeric(myfile['comissionados_por_servidor'] )<categorify> | df_train.drop("Cabin", axis=1, inplace = True)
df_test.drop("Cabin", axis=1, inplace = True ) | Titanic - Machine Learning from Disaster |
3,365,084 | a = myfile['regiao']
regioes = pd.get_dummies(a)
myfile = pd.concat([myfile, regioes], axis=1 )<categorify> | df_train["Age"].fillna(df_train["Age"].median() , inplace = True)
df_test["Age"].fillna(df_test["Age"].median() , inplace = True ) | Titanic - Machine Learning from Disaster |
3,365,084 | a = myfile['estado']
estado = pd.get_dummies(a)
myfile = pd.concat([myfile, estado], axis=1 )<categorify> | df_train['Embarked'] = df_train['Embarked'].fillna(df_train['Embarked'].mode() [0] ) | Titanic - Machine Learning from Disaster |
3,365,084 | a = myfile['porte']
porte = pd.get_dummies(a)
myfile = pd.concat([myfile, porte], axis=1 )<count_missing_values> | def displayNanValues() :
print("Check the NaN value in train data")
print(df_train.isnull().sum())
print("---"*30)
print("Check the NaN value in test data")
print(df_test.isnull().sum())
print("---"*30 ) | Titanic - Machine Learning from Disaster |
3,365,084 | myfile.isnull().sum()<drop_column> | all_data = [df_train, df_test] | Titanic - Machine Learning from Disaster |
3,365,084 | train_df = myfile[['nota_mat','exp_vida',
'anos_estudo_empreendedor','idhm', 'perc_pop_econ_ativa']]
train_df.dropna(inplace=True)
train_X = train_df.drop(columns=['nota_mat'])
train_X.head()<prepare_x_and_y> | def featureExtraction(all_data):
for dataset in all_data:
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
for dataset in all_data:
dataset['Age_bin'] = pd.cut(dataset['Age'], bins=[0,18,60,120], labels=['Children','Adult','Elder'])
for dataset in all_data:
dataset['Title'] = dataset.Name.str.extract('([A-Za-z]+)\.', expand=False)
for dataset in all_data:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col',
'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs' ) | Titanic - Machine Learning from Disaster |
3,365,084 | train_y = train_df[['nota_mat']]
train_y.head()<choose_model_class> | featureExtraction(all_data ) | Titanic - Machine Learning from Disaster |
3,365,084 | model = Sequential()
n_cols = train_X.shape[1]
model.add(Dense(10, activation='relu', input_shape=(n_cols,)))
model.add(Dense(10, activation='relu'))
model.add(Dense(1))<choose_model_class> | dfTrain = df_train.copy()
dfTest = df_test.copy() | Titanic - Machine Learning from Disaster |
3,365,084 | model.compile(optimizer='adam', loss='mean_squared_error' )<train_model> | traindf = pd.get_dummies(dfTrain, columns = ["Pclass","Title",'FamilySize',"Sex","Age_bin","Embarked"],
prefix=["Pclass","Title",'FamilySize',"Sex","Age_type","Em_type"])
testdf = pd.get_dummies(dfTest, columns = ["Pclass","Title",'FamilySize',"Sex","Age_bin","Embarked"],
prefix=["Pclass","Title",'FamilySize',"Sex","Age_type","Em_type"] ) | Titanic - Machine Learning from Disaster |
3,365,084 | early_stopping_monitor = EarlyStopping(patience=3)
model.fit(train_X, train_y, validation_split=0.1, epochs=30, callbacks=[early_stopping_monitor] )<predict_on_test> | allData = [traindf, testdf] | Titanic - Machine Learning from Disaster |
3,365,084 | test_y_predictions = model.predict(test_X )<create_dataframe> | for dataset in allData:
drop_column = ["Age","Fare","Name","Ticket","SibSp","Parch"]
dataset.drop(drop_column, axis=1, inplace = True)
traindf.drop(["PassengerId"], axis=1, inplace = True ) | Titanic - Machine Learning from Disaster |
3,365,084 | test_y_predictions = pd.DataFrame(test_y_predictions )<load_from_csv> | df_data_shuffled = traindf.iloc[np.random.permutation(len(traindf)) ] | Titanic - Machine Learning from Disaster |
3,365,084 | test_X = pd.read_csv(".. /input/test.csv")
b = test_X[['codigo_mun']]
b['codigo_mun'] = b.apply(lambda x: x.str.replace('ID_ID_',''))
b['codigo_mun'] =(b['codigo_mun'] ).astype(int)
a = pd.concat([b, test_y_predictions], axis=1)
a.columns = ['codigo_mun', 'nota_mat']<save_to_csv> | array = df_data_shuffled.values
features = array[:,1:].astype(float)
targeted = array[:,0].astype(float ) | Titanic - Machine Learning from Disaster |
3,365,084 | a.to_csv('Maria_de_Fatima.csv', index = False )<load_from_csv> | X_train,X_test,y_train,y_test = train_test_split(features,targeted,test_size=v_size,random_state=seed)
print('Data shapes: ')
print("X_train shape: ", X_train.shape)
print("X_test shape : ", X_test.shape)
print("y_train shape: ", y_train.shape)
print("y_test shape : ", y_test.shape ) | Titanic - Machine Learning from Disaster |
3,365,084 | df = pd.read_csv(".. /input/train.csv")
ts = pd.read_csv(".. /input/test.csv" )<import_modules> | lr_clf = LogisticRegression()
lr_param_grid = {'solver' : ['liblinear', 'lbfgs'],'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000] }
best_lr = algoGridTune(lr_clf, lr_param_grid, X_train, y_train,num_folds, scoring, seed ) | Titanic - Machine Learning from Disaster |
3,365,084 | import math<feature_engineering> | knn_clf = KNeighborsClassifier()
knn_param_grid = {'n_neighbors':[3,5,7,9,11],
'leaf_size':[1,2,3,5],
'weights':['uniform', 'distance'],
'algorithm':['auto', 'ball_tree','kd_tree','brute']
}
best_knn = algoGridTune(knn_clf, knn_param_grid, X_train, y_train, num_folds, scoring, seed ) | Titanic - Machine Learning from Disaster |
3,365,084 | math.log(1000)- math.log(1100), math.log(10)- math.log(11 )<split> | dt_clf = DecisionTreeClassifier()
dt_param_grid = {'max_depth' : [3,4,5,6,7,8,9,10],
'max_features': ['sqrt', 'log2'],
'min_samples_split': [3,5,7,9,11],
'min_samples_leaf':[1,3,5,7,9,11]
}
best_dt = algoGridTune(dt_clf, dt_param_grid, X_train, y_train,num_folds, scoring, seed ) | Titanic - Machine Learning from Disaster |
3,365,084 | test = df[df['nota_mat'].isnull() ]
df = df[~df['nota_mat'].isnull() ]<data_type_conversions> | svc_clf = SVC()
svc_param_grid = [{"kernel": ["rbf"],
"gamma": [10 ,1, 0.1, 1e-2, 1e-3],
"C": [0.1,1,10],
"random_state" : [seed]},
{"kernel": ["linear"], "C": [0.1,1,10,100]}
]
best_SVC = algoGridTune(svc_clf, svc_param_grid, X_train, y_train, num_folds, scoring, seed ) | Titanic - Machine Learning from Disaster |
3,365,084 | for c in df.columns:
if df[c].dtype == 'object':
df[c] = df[c].astype('category' ).cat.codes<import_modules> | DTC = DecisionTreeClassifier()
ABC_clf = AdaBoostClassifier(DTC, random_state=seed)
ABC_param_grid = {"base_estimator__criterion" : ["gini"],
"base_estimator__splitter" : ["best"],
"algorithm" : ["SAMME"],
"n_estimators" :[30, 100],
"learning_rate": [0.0001, 0.001, 0.01, 0.1, 0.2, 0.3,1.5]}
best_ABC = algoGridTune(ABC_clf, ABC_param_grid, X_train, y_train, num_folds, scoring, seed ) | Titanic - Machine Learning from Disaster |
3,365,084 | from sklearn.model_selection import train_test_split<import_modules> | ETC_clf = ExtraTreesClassifier()
ETC_param_grid = {"max_depth": [30],
"max_features": ['sqrt'],
"min_samples_split": [2, 3, 5],
"min_samples_leaf": [1, 3, 5],
"bootstrap": [True],
"n_estimators" :[300],
"criterion": ["gini"]}
best_ETC = algoGridTune(ETC_clf, ETC_param_grid, X_train, y_train, num_folds, scoring, seed ) | Titanic - Machine Learning from Disaster |
3,365,084 | from sklearn.model_selection import train_test_split<split> | GBC_clf = GradientBoostingClassifier()
GBC_param_grid = {'loss' : ["deviance"],
'n_estimators' : [100, 200, 300],
'learning_rate': [0.1, 0.05, 0.01],
'max_depth': [3, 5, 7],
'min_samples_leaf': [1, 5, 9],
'min_samples_split': [2, 6, 10],
'max_features': ['sqrt', 'log2']
}
best_GBC = algoGridTune(GBC_clf, GBC_param_grid, X_train, y_train, num_folds, scoring, seed ) | Titanic - Machine Learning from Disaster |
3,365,084 | df, valid = train_test_split(df, random_state=42 )<define_variables> | RFC_clf = RandomForestClassifier()
RFC_param_grid = {"max_depth": [None],
"max_features": [1, 3, 10],
"min_samples_split": [2, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"n_estimators" :[100,300],
"criterion": ["gini"]}
best_RFC = algoGridTune(RFC_clf, RFC_param_grid, X_train, y_train, num_folds, scoring, seed ) | Titanic - Machine Learning from Disaster |
3,365,084 | removed_cols = ['municipio','codigo_mun']
<import_modules> | kfold = KFold(n_splits=num_folds, random_state=seed ) | Titanic - Machine Learning from Disaster |
3,365,084 | from sklearn.tree import DecisionTreeRegressor<choose_model_class> | datacols = list(traindf.drop("Survived", axis=1)) | Titanic - Machine Learning from Disaster |
3,365,084 | dt = DecisionTreeRegressor(random_state=42, max_depth=2 )<define_variables> | passengerIds = testdf["PassengerId"].copy()
testdf.drop(["PassengerId"], axis=1, inplace = True ) | Titanic - Machine Learning from Disaster |
3,365,084 | lista= ['estado','anos_estudo_empreendedor','nota_mat', 'municipio','codigo_mun','area','capital','codigo_mun','comissionados','comissionados_por_servidor','densidade_dem','estado','exp_anos_estudo','exp_vida','gasto_pc_educacao','gasto_pc_saude','hab_p_medico','idhm','indice_governanca','jornada_trabalho','municipio','nota_mat','participacao_transf_receita', 'perc_pop_econ_ativa','pib','pib_pc','populacao','porte','ranking_igm','regiao','servidores','taxa_empreendedorismo']<split> | test = testdf.values | Titanic - Machine Learning from Disaster |
3,365,084 | train,valid = train_test_split(df,random_state=42)
train.shape,valid.shape<train_model> | test_Survived_lr = pd.Series(best_lr.predict(test), name="LR")
test_Survived_knn = pd.Series(best_knn.predict(test), name="KNN")
test_Survived_dt = pd.Series(best_dt.predict(test), name="DT")
test_Survived_SVC = pd.Series(best_SVC.predict(test), name="SVC")
test_Survived_ABC = pd.Series(best_ABC.predict(test), name="ABC")
test_Survived_RFC = pd.Series(best_RFC.predict(test), name="RFC")
test_Survived_ETC = pd.Series(best_ETC.predict(test), name="ETC")
test_Survived_GBC = pd.Series(best_GBC.predict(test), name="GBC")
ensemble_results = pd.concat([test_Survived_lr, test_Survived_knn,
test_Survived_dt, test_Survived_SVC,
test_Survived_ABC, test_Survived_RFC,
test_Survived_ETC, test_Survived_GBC],axis=1)
correlationHeatmap(ensemble_results, 'Correlation beetween models results' ) | Titanic - Machine Learning from Disaster |
3,365,084 | rf = RandomForestRegressor(random_state=42,n_estimators=100)
feats = [c for c in df[lista].columns if c not in ['nota_mat']]
rf.fit(train[feats], train['nota_mat'] )<prepare_output> | votingC = VotingClassifier(estimators=[('lr', best_lr),
('knn', best_knn),
('dt', best_dt),
('svc', best_SVC),
('abc', best_ABC),
('rfc', best_RFC),
('etc', best_ETC),
('gbc', best_GBC)],
voting='hard', n_jobs=-1)
votingC = votingC.fit(X_train, y_train ) | Titanic - Machine Learning from Disaster |
3,365,084 | math.exp(2.78), math.exp(5.27 )<import_modules> | predictions = votingC.predict(test)
test_Survived = pd.Series(votingC.predict(test), name="Survived")
test_Survived = test_Survived.apply(int)
results = pd.concat([passengerIds,test_Survived],axis=1)
results.to_csv('submission.csv', columns=['PassengerId', 'Survived'], index=False ) | Titanic - Machine Learning from Disaster |
2,684,355 | from sklearn.metrics import mean_squared_error<import_modules> | df_train = pd.read_csv('.. /input/train.csv')
df_test = pd.read_csv('.. /input/test.csv')
df_combined = pd.concat([df_train, df_test], sort=False ) | Titanic - Machine Learning from Disaster |
2,684,355 | from sklearn.metrics import mean_squared_error<feature_engineering> | df_train[['Pclass','Survived']].groupby(['Pclass'], as_index=False ).mean().sort_values(by="Survived", ascending=False ) | Titanic - Machine Learning from Disaster |
2,684,355 | df['preds'] = df['nota_mat'].mean()<import_modules> | df_train[['Sex','Survived']].groupby(['Sex'], as_index=False ).mean().sort_values(by="Survived", ascending=False ) | Titanic - Machine Learning from Disaster |
2,684,355 | from sklearn.ensemble import RandomForestRegressor<import_modules> | df_train[['SibSp','Survived']].groupby(['SibSp'], as_index=False ).mean().sort_values(by="Survived", ascending=False ) | Titanic - Machine Learning from Disaster |
2,684,355 | from sklearn.ensemble import RandomForestRegressor<predict_on_test> | df_train[['Parch','Survived']].groupby(['Parch'], as_index=False ).maean().sort_values(by="Survived", ascending=False ) | Titanic - Machine Learning from Disaster |
2,684,355 | df_preds = rf.predict(valid[feats] )<compute_test_metric> | df_test['Title'] = df_test['Name'].str.extract('([A-Za-z]+)\.', expand=False ) | Titanic - Machine Learning from Disaster |
2,684,355 | mean_squared_error(valid['nota_mat'], df_preds)**(1/2 )<predict_on_test> | df_train['Title'] = df_train['Title'].replace(['Capt', 'Col', 'Don', 'Dr', 'Major', 'Jonkheer', 'Sir', 'Rev', 'Dona'], 'Rare')
df_train['Title'] = df_train['Title'].replace(['Lady', 'Countess', 'Mme'], 'Mrs')
df_train['Title'] = df_train['Title'].replace(['Ms', 'Mlle'], 'Miss' ) | Titanic - Machine Learning from Disaster |
2,684,355 | valid_preds=rf.predict(valid[feats] )<compute_test_metric> | df_test['Title'] = df_test['Title'].replace(['Capt', 'Col', 'Don', 'Dr', 'Major', 'Jonkheer', 'Sir', 'Rev', 'Dona'], 'Rare')
df_test['Title'] = df_test['Title'].replace(['Lady', 'Countess', 'Mme'], 'Mrs')
df_test['Title'] = df_test['Title'].replace(['Ms', 'Mlle'], 'Miss' ) | Titanic - Machine Learning from Disaster |
2,684,355 | mean_squared_error(valid['nota_mat'], valid_preds)**(1/2 )<set_options> | df_train[['Title', 'Survived']].groupby(['Title'], as_index=False ).mean() | Titanic - Machine Learning from Disaster |
2,684,355 | ts.fillna(-1.0, inplace=True )<data_type_conversions> | df_train['FamSize'] = df_train['Parch'] + df_train['SibSp'] + 1
df_test['FamSize'] = df_test['Parch'] + df_test['SibSp'] + 1 | Titanic - Machine Learning from Disaster |
2,684,355 | ts['codigo_mun'] = ts['codigo_mun'].str.replace('ID_ID_','')
ts['codigo_mun'] = ts['codigo_mun'].values.astype('int64' )<data_type_conversions> | df_train[['FamSize','Survived']].groupby(['FamSize'], as_index=False ).mean().sort_values(by="Survived", ascending=False ) | Titanic - Machine Learning from Disaster |
2,684,355 | for c in ts.columns:
if ts[c].dtype == 'object':
ts[c] = df[c].astype('category' ).cat.codes<set_options> | df_train['IsAlone'] = df_train["FamSize"].map(lambda s: 1 if s==1 else 0)
df_test['IsAlone'] = df_test["FamSize"].map(lambda s: 1 if s==1 else 0 ) | Titanic - Machine Learning from Disaster |
2,684,355 | ts.fillna(-1.0, inplace=True )<predict_on_test> | df_train[['IsAlone','Survived']].groupby(['IsAlone'], as_index=False ).mean().sort_values(by="Survived", ascending=False ) | Titanic - Machine Learning from Disaster |
2,684,355 | ts['nota_mat']=rf.predict(ts[feats] )<save_to_csv> | df_train[['Embarked','Survived']].groupby(['Embarked'], as_index=False ).mean().sort_values(by="Survived", ascending=False ) | Titanic - Machine Learning from Disaster |
2,684,355 | ts[['codigo_mun','nota_mat']].to_csv('trabalho.csv', index=False )<set_options> | df_train['Fare'] = df_train['Fare'].fillna(df_train['Fare'].median())
df_test['Fare'] = df_test['Fare'].fillna(df_test['Fare'].median() ) | Titanic - Machine Learning from Disaster |
2,684,355 | warnings.simplefilter("ignore")
np.random.seed(10)
LEVEL = 'level_4a'<compute_test_metric> | df_train['CategoricalFare'] = pd.qcut(df_train['Fare'], 10)
| Titanic - Machine Learning from Disaster |
2,684,355 | class SigmoidNeuron:
def __init__(self):
self.w = None
self.b = None
def perceptron(self, x):
return np.dot(x, self.w.T)+ self.b
def sigmoid(self, x):
return 1.0/(1.0 + np.exp(-x))
def grad_w_mse(self, x, y):
y_pred = self.sigmoid(self.perceptron(x))
return(y_pred - y)* y_pred *(1 - y_pred)* x
def grad_b_mse(self, x, y):
y_pred = self.sigmoid(self.perceptron(x))
return(y_pred - y)* y_pred *(1 - y_pred)
def grad_w_ce(self, x, y):
y_pred = self.sigmoid(self.perceptron(x))
if y == 0:
return y_pred * x
elif y == 1:
return -1 *(1 - y_pred)* x
else:
raise ValueError("y should be 0 or 1")
def grad_b_ce(self, x, y):
y_pred = self.sigmoid(self.perceptron(x))
if y == 0:
return y_pred
elif y == 1:
return -1 *(1 - y_pred)
else:
raise ValueError("y should be 0 or 1")
def fit(self, X, Y, epochs=1, learning_rate=1, initialise=True, loss_fn="mse", display_loss=False):
if initialise:
self.w = X.mean(axis=0)
self.b = 0
if display_loss:
loss = {}
for i in tqdm_notebook(range(epochs), total=epochs, unit="epoch"):
dw = 0
db = 0
for x, y in zip(X, Y):
if loss_fn == "mse":
dw += self.grad_w_mse(x, y)
db += self.grad_b_mse(x, y)
elif loss_fn == "ce":
dw += self.grad_w_ce(x, y)
db += self.grad_b_ce(x, y)
self.w -= learning_rate * dw
self.b -= learning_rate * db
if display_loss:
Y_pred = self.sigmoid(self.perceptron(X))
if loss_fn == "mse":
loss[i] = mean_squared_error(Y, Y_pred)
elif loss_fn == "ce":
loss[i] = log_loss(Y, Y_pred)
if display_loss:
plt.plot(loss.values())
plt.xlabel('Epochs')
if loss_fn == "mse":
plt.ylabel('Mean Squared Error')
elif loss_fn == "ce":
plt.ylabel('Log Loss')
plt.show()
min_key = min(loss, key=loss.get)
print(min_key)
print(loss.get(min_key))
def predict(self, X):
Y_pred = []
for x in X:
y_pred = self.sigmoid(self.perceptron(x))
Y_pred.append(y_pred)
return np.array(Y_pred )<load_pretrained> | print(df_train[['CategoricalFare','Survived']].groupby(['CategoricalFare'], as_index=False ).mean().sort_values(by="Survived", ascending=False)) | Titanic - Machine Learning from Disaster |
2,684,355 | languages = ['en','ta', 'hi']
images_train = read_all(".. /input/"+LEVEL+"_train/"+LEVEL+"/"+"background", key_prefix='bgr_')
for language in languages:
images_train.update(read_all(".. /input/"+LEVEL+"_train/"+LEVEL+"/"+language, key_prefix=language+"_"))
print(len(images_train))
images_test = read_all(".. /input/"+LEVEL+"_test/kaggle_"+LEVEL, key_prefix='')
print(len(images_test))<normalization> | age_null_count = df_train['Age'].isnull().sum()
age_null_count | Titanic - Machine Learning from Disaster |
2,684,355 | scaler = StandardScaler()
X_scaled_train = scaler.fit_transform(X_train)
X_scaled_test = scaler.transform(X_test )<train_model> | df_train['Age'] = df_train['Age'].fillna(age_median)
df_train['CategoricalAge'] = pd.cut(df_train['Age'], 8)
print(df_train[['CategoricalAge', 'Survived']].groupby(['CategoricalAge'], as_index=False ).mean() ) | Titanic - Machine Learning from Disaster |
2,684,355 | sn_ce = SigmoidNeuron()
sn_ce.fit(X_scaled_train, Y_train, epochs=100, learning_rate=1,loss_fn="ce", display_loss=True)
sn_ce.fit(X_scaled_train, Y_train, epochs=200, learning_rate=0.1,initialise=False,loss_fn="ce", display_loss=True)
sn_ce.fit(X_scaled_train, Y_train, epochs=200, learning_rate=0.01,initialise=False,loss_fn="ce", display_loss=True)
sn_ce.fit(X_scaled_train, Y_train, epochs=400, learning_rate=0.001,initialise=False,loss_fn="ce", display_loss=True)
sn_ce.fit(X_scaled_train, Y_train, epochs=500, learning_rate=0.0001,initialise=False,loss_fn="ce", display_loss=True )<predict_on_test> | df_test['Age'] = df_test['Age'].fillna(age_median)
| Titanic - Machine Learning from Disaster |
2,684,355 | def print_accuracy(sn):
Y_pred_train = sn.predict(X_scaled_train)
Y_pred_binarised_train =(Y_pred_train >= 0.5 ).astype("int" ).ravel()
accuracy_train = accuracy_score(Y_pred_binarised_train, Y_train)
print("Train Accuracy : ", accuracy_train)
print("-"*50 )<compute_test_metric> | df_train['Title'] = df_train['Title'].map({"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 0} ).astype(int ) | Titanic - Machine Learning from Disaster |
2,684,355 | print_accuracy(sn_ce )<save_to_csv> | df_test['Title'] = df_test['Title'].map({"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 0} ).astype(int ) | Titanic - Machine Learning from Disaster |
2,684,355 | Y_pred_test = sn_ce.predict(X_scaled_test)
Y_pred_binarised_test =(Y_pred_test >= 0.5 ).astype("int" ).ravel()
submission = {}
submission['ImageId'] = ID_test
submission['Class'] = Y_pred_binarised_test
submission = pd.DataFrame(submission)
submission = submission[['ImageId', 'Class']]
submission = submission.sort_values(['ImageId'])
submission.to_csv("submisision.csv", index=False )<compute_test_metric> | df_train['Sex'] = df_train['Sex'].map({'female' : 0, 'male' : 1} ).astype(int)
df_train.head() | Titanic - Machine Learning from Disaster |
2,684,355 | def smape_score(y_true, y_pred):
return np.mean(2 * np.abs(y_true - y_pred)/
(np.abs(y_true)+ np.abs(y_pred)))* 100<load_from_csv> | df_test['Sex'] = df_test['Sex'].map({'female' : 0, 'male' : 1} ).astype(int)
df_test.head() | Titanic - Machine Learning from Disaster |
2,684,355 | sat_pos = pd.read_csv('/kaggle/input/sputnik/train.csv', sep=',')
sat_pos['epoch'] = pd.to_datetime(sat_pos.epoch, format='%Y-%m-%d %H:%M:%S')
sat_pos.index = sat_pos.epoch
sat_pos.drop('epoch', axis=1, inplace=True)
sat_pos['error'] = np.linalg.norm(sat_pos[['x', 'y', 'z']].values -
sat_pos[['x_sim', 'y_sim', 'z_sim']].values, axis=1)
sat_pos.drop(['x', 'y', 'z', 'x_sim', 'y_sim', 'z_sim'], axis=1, inplace=True)
np.array_equal(np.unique(sat_pos['sat_id']), np.arange(0,600))
sat_set = np.arange(0,600)
sat_set.shape[0]
sat_pos_train = sat_pos[sat_pos['type'] == 'train']
sat_pos_test = sat_pos[sat_pos['type'] == 'test']
sat_pos_train.drop('type', axis=1, inplace=True )<find_best_model_class> | df_train.loc[df_train['Embarked'] == 'Empty']
df_train['Embarked'] = df_train['Embarked'].fillna("Empty")
df_train['Embarked'] = df_train['Embarked'].map({'S' : 0, 'C' : 1, 'Q' : 2, 'Empty' : 0}, na_action='ignore' ).astype(int)
df_train.head() | Titanic - Machine Learning from Disaster |
2,684,355 | def timeseriesCVscore(series, n_splits, loss_function):
errors = []
tscv = TimeSeriesSplit(n_splits=n_splits)
for train, test in tscv.split(series.error.values):
x_true = series.error.values[train]
y_true = series.error.values[test]
l_train = len(x_true)
l_test = len(y_true)
lags_errors = []
lag_period = 24
n_steps = l_test // lag_period
remainder = l_test % lag_period
for step in np.arange(1, n_steps + 1):
lag_error =(series.error.shift(lag_period)+
step *(series.error.shift(lag_period)-
series.error.shift(2*lag_period)))[l_train:l_train+lag_period]
lags_errors.append(lag_error)
if remainder != 0:
lag_error =(series.error.shift(lag_period)+
(n_steps + 1)*(series.error.shift(lag_period)-
series.error.shift(2*lag_period)))[l_train:l_train+remainder]
lags_errors.append(lag_error)
y_pred = np.concatenate(lags_errors)
error = loss_function(y_pred, y_true)
errors.append(error)
return np.mean(np.array(errors))<compute_train_metric> | df_test['Embarked'] = df_test['Embarked'].fillna("Empty")
df_test.loc[df_test['Embarked'] == 'Empty']
df_test['Embarked'] = df_test['Embarked'].map({'S' : 0, 'C' : 1, 'Q' : 2, 'Empty' : 0}, na_action='ignore' ).astype(int)
df_test.head() | Titanic - Machine Learning from Disaster |
2,684,355 | series = sat_pos_train[sat_pos_train['sat_id'] == 150]
n_splits = 100
timeseriesCVscore(series=series, n_splits=n_splits, loss_function=smape_score )<define_variables> | df_train['CategoricalAge'] = 0
df_train.loc[df_train['Age'] <= 10.368, 'CategoricalAge'] = 0
df_train.loc[(df_train['Age'] > 10.368)&(df_train['Age'] <= 20.315), 'CategoricalAge'] = 1
df_train.loc[(df_train['Age'] > 20.315)&(df_train['Age'] <= 30.263), 'CategoricalAge'] = 2
df_train.loc[(df_train['Age'] > 30.263)&(df_train['Age'] <= 40.21), 'CategoricalAge'] = 3
df_train.loc[(df_train['Age'] > 40.21)&(df_train['Age'] <= 50.158), 'CategoricalAge'] = 4
df_train.loc[(df_train['Age'] > 50.158)&(df_train['Age'] <= 60.105), 'CategoricalAge'] = 5
df_train.loc[(df_train['Age'] > 60.105)&(df_train['Age'] <= 70.052), 'CategoricalAge'] = 6
df_train.loc[df_train['Age'] > 70.052, 'CategoricalAge'] = 7
df_train.head() | Titanic - Machine Learning from Disaster |
2,684,355 | train_lengths = []
test_lengths = []
for sat_i in tqdm(sat_set):
series = sat_pos[sat_pos['sat_id'] == sat_i]
l_train = len(series[series['type'] == 'train'])
l_test = len(series[series['type'] == 'test'])
train_lengths.append(l_train)
test_lengths.append(l_test)
len_train_max = max(train_lengths)
<statistical_test> | df_test['CategoricalAge'] = 0
df_test.loc[df_test['Age'] <= 10.368, 'CategoricalAge'] = 0
df_test.loc[(df_test['Age'] > 10.368)&(df_test['Age'] <= 20.315), 'CategoricalAge'] = 1
df_test.loc[(df_test['Age'] > 20.315)&(df_test['Age'] <= 30.263), 'CategoricalAge'] = 2
df_test.loc[(df_test['Age'] > 30.263)&(df_test['Age'] <= 40.21), 'CategoricalAge'] = 3
df_test.loc[(df_test['Age'] > 40.21)&(df_test['Age'] <= 50.158), 'CategoricalAge'] = 4
df_test.loc[(df_test['Age'] > 50.158)&(df_test['Age'] <= 60.105), 'CategoricalAge'] = 5
df_test.loc[(df_test['Age'] > 60.105)&(df_test['Age'] <= 70.052), 'CategoricalAge'] = 6
df_test.loc[df_test['Age'] > 70.052, 'CategoricalAge'] = 7
df_test.head() | Titanic - Machine Learning from Disaster |
2,684,355 | predictions = []
for sat_i in tqdm(sat_set):
series = sat_pos[sat_pos['sat_id'] == sat_i]
l_train = len(series[series['type'] == 'train'])
l_test = len(series[series['type'] == 'test'])
lags_errors = []
lag_period = 24
n_steps = l_test // lag_period
remainder = l_test % lag_period
for step in np.arange(1, n_steps + 1):
lag_error =(series.error.shift(lag_period)+
step *(series.error.shift(lag_period)-
series.error.shift(2*lag_period)))[l_train:l_train+lag_period]
lags_errors.append(lag_error)
if remainder != 0:
lag_error =(series.error.shift(lag_period)+
(n_steps + 1)*(series.error.shift(lag_period)-
series.error.shift(2*lag_period)))[l_train:l_train+remainder]
lags_errors.append(lag_error)
predictions.append(np.concatenate(lags_errors))<save_to_csv> | df_train['CategoricalFare'] = 0
df_train.loc[df_train['Fare'] <= 7.55, 'CategoricalFare'] = 0
df_train.loc[(df_train['Fare'] > 7.55)&(df_train['Fare'] <= 7.854), 'CategoricalFare'] = 1
df_train.loc[(df_train['Fare'] > 7.854)&(df_train['Fare'] <= 8.05), 'CategoricalFare'] = 2
df_train.loc[(df_train['Fare'] > 8.05)&(df_train['Fare'] <= 10.5), 'CategoricalFare'] = 3
df_train.loc[(df_train['Fare'] > 10.5)&(df_train['Fare'] <= 14.454), 'CategoricalFare'] = 4
df_train.loc[(df_train['Fare'] > 14.454)&(df_train['Fare'] <= 21.679), 'CategoricalFare'] = 5
df_train.loc[(df_train['Fare'] > 21.679)&(df_train['Fare'] <= 27.0), 'CategoricalFare'] = 6
df_train.loc[(df_train['Fare'] > 27.0)&(df_train['Fare'] <= 39.688), 'CategoricalFare'] = 7
df_train.loc[(df_train['Fare'] > 39.688)&(df_train['Fare'] <= 77.958), 'CategoricalFare'] = 8
df_train.loc[df_train['Fare'] > 77.958, 'CategoricalFare'] = 9
df_train.head() | Titanic - Machine Learning from Disaster |
2,684,355 | forecast = np.concatenate(predictions)
subm = pd.DataFrame({'id': sat_pos_test['id'],
'error': forecast})
subm.to_csv('submission_3_5.csv', index=False )<set_options> | df_test['CategoricalFare'] = 0
df_test.loc[df_test['Fare'] <= 7.55, 'CategoricalFare'] = 0
df_test.loc[(df_test['Fare'] > 7.55)&(df_test['Fare'] <= 7.854), 'CategoricalFare'] = 1
df_test.loc[(df_test['Fare'] > 7.854)&(df_test['Fare'] <= 8.05), 'CategoricalFare'] = 2
df_test.loc[(df_test['Fare'] > 8.05)&(df_test['Fare'] <= 10.5), 'CategoricalFare'] = 3
df_test.loc[(df_test['Fare'] > 10.5)&(df_test['Fare'] <= 14.454), 'CategoricalFare'] = 4
df_test.loc[(df_test['Fare'] > 14.454)&(df_test['Fare'] <= 21.679), 'CategoricalFare'] = 5
df_test.loc[(df_test['Fare'] > 21.679)&(df_test['Fare'] <= 27.0), 'CategoricalFare'] = 6
df_test.loc[(df_test['Fare'] > 27.0)&(df_test['Fare'] <= 39.688), 'CategoricalFare'] = 7
df_test.loc[(df_test['Fare'] > 39.688)&(df_test['Fare'] <= 77.958), 'CategoricalFare'] = 8
df_test.loc[df_test['Fare'] > 77.958, 'CategoricalFare'] = 9
df_test.head() | Titanic - Machine Learning from Disaster |
2,684,355 | warnings.filterwarnings('ignore')
%matplotlib inline
def rmse(y_actual, y_predicted):
rmse = sqrt(mean_squared_error(y_actual, y_predicted))
return rmse
def mean_absolute_percentage_error(y_true, y_pred):
return np.mean(np.abs(( y_true - y_pred)/ y_true)) * 100
<load_from_csv> | drop_elements = ['Name', 'Ticket', 'Cabin', 'Age', 'Fare', 'Parch', 'SibSp', 'IsAlone']
df_train = df_train.drop(drop_elements, axis = 1)
df_test = df_test.drop(drop_elements, axis = 1 ) | Titanic - Machine Learning from Disaster |
2,684,355 | raw_data = pd.read_csv('/kaggle/input/sputnik/train.csv', sep =',')
raw_data.head(2 )<feature_engineering> | train = df_train.copy()
train = train.drop('PassengerId', axis=1)
train.head() | Titanic - Machine Learning from Disaster |
2,684,355 | def make_coord(df,coord,coord_sim):
ts_x=df[coord]
ts_x_sim = df[coord_sim]
eps = np.mean(df.epoch-df.epoch.shift(1)) /5
bad_num_plus=[]
[bad_num_plus.append(i)for i in range(1,df.shape[0])if(df.epoch[i]-df.epoch[i-1])<eps]
ts_x.drop(ts_x.index[bad_num_plus], inplace=True)
if len(bad_num_plus)>0:
val_x=ts_x_sim.values[:-len(bad_num_plus)]
else:
val_x=ts_x_sim.values
ts_x_sim = pd.Series(val_x)
ts_x_sim.index=ts_x.index
d_x = ts_x - ts_x_sim
train = d_x[df.type=='train']
val = d_x[df.type=='test']
fit1 = ExponentialSmoothing(np.asarray(train), seasonal_periods=24, seasonal='add' ).fit()
forecast = pd.Series(fit1.forecast(len(val)))
forecast.index = val.index
c=1.1
x_pd = train.append(c*forecast)
x_pd = x_pd + ts_x_sim
x_new=np.array(x_pd.values)
bad_num=np.array(bad_num_plus)-1
for i in bad_num:
x_new = np.insert(x_new,i,x_new[i])
x_new = pd.Series(x_new)
x_new.index = df.index
df[coord+'_new']=x_new
return df<feature_engineering> | test = df_test.copy()
test = test.drop('PassengerId', axis=1)
test.head() | Titanic - Machine Learning from Disaster |
2,684,355 | def make_coord(df,coord,coord_sim):
ts_x=df[coord]
ts_x_sim = df[coord_sim]
eps = np.mean(df.epoch-df.epoch.shift(1)) /5
bad_num_plus=[]
[bad_num_plus.append(i)for i in range(1,df.shape[0])if(df.epoch[i]-df.epoch[i-1])<eps]
ts_x.drop(ts_x.index[bad_num_plus], inplace=True)
if len(bad_num_plus)>0:
val_x=ts_x_sim.values[:-len(bad_num_plus)]
else:
val_x=ts_x_sim.values
ts_x_sim = pd.Series(val_x)
ts_x_sim.index=ts_x.index
d_x = ts_x - ts_x_sim
train = d_x[df.type=='train']
val = d_x[df.type=='test']
fit1 = ExponentialSmoothing(np.asarray(train), seasonal_periods=24, seasonal='add' ).fit()
forecast = pd.Series(fit1.forecast(len(val)))
forecast.index = val.index
c_1=(max(train[-24:])-np.mean(train[-24:])) /(max(train[-48:-24])-np.mean(train[-24:]))
c_2=(min(train[-24:])-np.mean(train[-24:])) /(min(train[-48:-24])-np.mean(train[-24:]))
for j in range(len(forecast)) :
if forecast[j]>np.mean(forecast[-48:]):
forecast[j]= np.mean(forecast[-48:])+(forecast[j]-np.mean(forecast[-48:])) *(c_1**(1+j/24))
else:
forecast[j]= np.mean(forecast[-48:])+(forecast[j]-np.mean(forecast[-48:])) *(c_2**(1+j/24))
x_pd = train.append(forecast)
x_pd = x_pd + ts_x_sim
x_new=np.array(x_pd.values)
bad_num=np.array(bad_num_plus)-1
for i in bad_num:
x_new = np.insert(x_new,i,x_new[i])
x_new = pd.Series(x_new)
x_new.index = df.index
df[coord+'_new']=x_new
return df<feature_engineering> | from sklearn.preprocessing import StandardScaler | Titanic - Machine Learning from Disaster |
2,684,355 | raw_train = raw_data
ids=[]
n_err=[]
err_by_obj=[]
for i in range(600):
df = raw_train[raw_train.sat_id==i]
df['error'] = np.linalg.norm(df[['x', 'y', 'z']].values - df[['x_sim', 'y_sim', 'z_sim']].values, axis=1)
df.sort_values('epoch', axis = 0, inplace=True)
df.epoch = pd.to_datetime(df.epoch, format='%Y-%m-%dT%H:%M:%S')
df.index = df.epoch
df = make_coord(df,'x','x_sim')
df = make_coord(df,'y','y_sim')
df = make_coord(df,'z','z_sim')
df['new_error'] = np.linalg.norm(df[['x_new', 'y_new', 'z_new']].values - df[['x_sim', 'y_sim', 'z_sim']].values, axis=1)
for j in range(df.shape[0]):
ids.append(df.id[j])
n_err.append(df.new_error[j])
<feature_engineering> | X_train = train.drop('Survived', axis=1 ).astype(float)
y_train = df_train['Survived']
X_test = test.astype(float ) | Titanic - Machine Learning from Disaster |
2,684,355 | raw_train['new_error']=n_err
raw_train.head(2 )<save_to_csv> | scaler = StandardScaler()
scaler.fit(X_train ) | Titanic - Machine Learning from Disaster |
2,684,355 | res = raw_train[['id','new_error','type']]
res.columns = ['id', 'error','type']
res=res[res.type=='test']
res=res[['id','error']]
res.to_csv('my_sub_sec.csv', index = False, header=True )<set_options> | X_train = scaler.transform(X_train ) | Titanic - Machine Learning from Disaster |
2,684,355 | %matplotlib inline
warnings.filterwarnings('ignore')
<categorify> | X_test = scaler.transform(X_test ) | Titanic - Machine Learning from Disaster |
2,684,355 | def illustration(func):
mutex = ThreadLock()
n_thread = ThreadValue('i',0)
@functools.wraps(func)
def wrapper(*args, **argv):
result = func(*args, **argv)
with mutex:
nonlocal n_thread
n_thread.value +=1
if n_thread.value % 5 ==0:
print(f"\r{n_thread.value} objects are processed...",end ='',flush = True)
return result
return wrapper<load_from_csv> | from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier | Titanic - Machine Learning from Disaster |
2,684,355 | data_root = "/kaggle/input/wwwkagglecomborisgruzdev/"
df = pd.read_csv(data_root+'/train.csv',sep = ',')
df.sort_values(['sat_id','epoch'],axis = 0,inplace =True)
df['epoch'] = pd.to_datetime(df.epoch,format='%Y-%m-%d %H:%M:%S')
df.index = df.epoch
df.drop('epoch', axis = 1, inplace = True)
df['error'] = np.linalg.norm(df[['x', 'y', 'z']].values - df[['x_sim', 'y_sim', 'z_sim']].values, axis=1)
df.head()<filter> | logreg = LogisticRegression(solver='lbfgs')
logreg.fit(X_train, y_train)
Y_pred = logreg.predict(X_test)
acc_log = round(logreg.score(X_train, y_train)* 100, 3)
acc_log | Titanic - Machine Learning from Disaster |
2,684,355 | df_train = df[df.type == 'train']
df_test = df[df.type == 'test']<count_values> | svc = SVC(gamma='scale')
svc.fit(X_train, y_train)
Y_pred = svc.predict(X_test)
acc_svc = round(svc.score(X_train, y_train)* 100, 3)
acc_svc | Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.