kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
4,252,362 | etr = ExtraTreesRegressor(n_estimators = 200)
etr.fit(Xtrain,Ytrain)
total = 0
c_val = 10
scores = cross_val_score(etr,Xtrain,Ytrain, cv = c_val,scoring = scorer_rmsle)
total = 0
for j in scores:
total += j
acuracia_esperada = total/c_val
print(acuracia_esperada )<predict_on_test> | df["Cabin"].fillna("unknown", inplace = True)
test_df["Cabin"].fillna("unknown", inplace = True ) | Titanic - Machine Learning from Disaster |
4,252,362 | Ytest_pred7 = rdf.predict(Xtest)
<create_dataframe> | cabins = [i[0] if i!= 'unknown' else 'unknown' for i in df['Cabin']]
test_cabins = [i[0] if i!= 'unknown' else 'unknown' for i in test_df['Cabin']]
df.drop(["Cabin"], axis = 1, inplace = True)
test_df.drop(["Cabin"], axis = 1, inplace = True)
df["cabintype"] = cabins
test_df["cabintype"] = test_cabins | Titanic - Machine Learning from Disaster |
4,252,362 | result7 = np.vstack(( test['Id'], Ytest_pred7)).T.astype(int)
x7 = ["Id","median_house_value"]
Resultado = pd.DataFrame(columns = x7, data = result7 )<import_modules> | df.drop(["cabintype"], axis = 1, inplace = True)
test_df.drop(["cabintype"], axis = 1, inplace = True ) | Titanic - Machine Learning from Disaster |
4,252,362 | import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeClassifier<load_from_csv> | def name_to_int(df):
name = df["Name"].values.tolist()
namelist = []
for i in name:
index = 1
inew = i.split()
if inew[0].endswith(","):
index = 1
elif inew[1].endswith(","):
index = 2
elif inew[2].endswith(","):
index = 3
namelist.append(inew[index])
titlelist = []
for i in range(len(namelist)) :
titlelist.append(namelist[i])
return titlelist | Titanic - Machine Learning from Disaster |
4,252,362 | PATH = '.. /input/dm-and-pr-ws1920-machine-learning-competition/'
df_train = pd.read_csv(PATH+'train.csv')
df_test = pd.read_csv(PATH+'test.csv')
sample_sub = pd.read_csv(PATH+'sampleSubmission.csv' )<prepare_x_and_y> | titlelist = name_to_int(df)
df["titles"] = titlelist
df["titles"].value_counts()
testtitlelist = name_to_int(test_df)
test_df["titles"] = testtitlelist
df["titles"].value_counts() | Titanic - Machine Learning from Disaster |
4,252,362 | X = df_train.profession.values
y = df_train.target.values
X_test = df_test.profession.values<predict_on_test> | df["titles"].replace(["Mr.", "Miss.", "Mrs.", "Master.","sometitle"],[0,1,2,3,4], inplace = True)
df["titles"].astype("int64")
test_df["titles"].replace(["Mr.", "Miss.", "Mrs.", "Master.", "sometitle"],[0,1,2,3,4], inplace = True)
test_df["titles"].astype("int64")
df.drop(["Name"], axis = 1, inplace = True)
test_df.drop(["Name"], axis = 1, inplace = True ) | Titanic - Machine Learning from Disaster |
4,252,362 | model = DecisionTreeClassifier(max_depth=4)
model.fit(X.reshape(-1,1),y)
y_hat = model.predict_proba(X_test.reshape(-1,1)) [:,1]<prepare_output> | df.drop(["Fare","n_fam_mem","actual_fare"], axis = 1, inplace = True)
test_df.drop(["Fare","n_fam_mem","actual_fare"], axis = 1, inplace = True ) | Titanic - Machine Learning from Disaster |
4,252,362 | sample_sub['target'] = y_hat
sample_sub.head()
<save_to_csv> | labels = df["Survived"]
data = df.drop("Survived", axis = 1 ) | Titanic - Machine Learning from Disaster |
4,252,362 | sample_sub.to_csv('estimation_01.csv', index=False )<set_options> | final_clf = None
clf_names = ["Logistic Regression", "KNN(3)", "XGBoost Classifier", "Random forest classifier", "Decision Tree Classifier",
"Gradient Boosting Classifier", "Support Vector Machine"] | Titanic - Machine Learning from Disaster |
4,252,362 | %matplotlib inline
warnings.filterwarnings('ignore')
<load_from_csv> | classifiers = []
scores = []
for i in range(10):
X_train, X_test, Y_train, Y_test = train_test_split(data, labels, test_size = 0.1)
tempscores = []
lr_clf = LogisticRegression()
lr_clf.fit(X_train, Y_train)
tempscores.append(( lr_clf.score(X_test, Y_test)) *100)
knn3_clf = KNeighborsClassifier(n_neighbors = 3)
knn3_clf.fit(X_train, Y_train)
tempscores.append(( knn3_clf.score(X_test, Y_test)) *100)
xgbc = XGBClassifier(n_estimators=15, seed=41)
xgbc.fit(X_train, Y_train)
tempscores.append(( xgbc.score(X_test, Y_test)) *100)
rf_clf = RandomForestClassifier(n_estimators = 100)
rf_clf.fit(X_train, Y_train)
tempscores.append(( rf_clf.score(X_test, Y_test)) *100)
dt_clf = DecisionTreeClassifier()
dt_clf.fit(X_train, Y_train)
tempscores.append(( dt_clf.score(X_test, Y_test)) *100)
gb_clf = GradientBoostingClassifier()
gb_clf.fit(X_train, Y_train)
tempscores.append(( gb_clf.score(X_test, Y_test)) *100)
svm_clf = SVC(gamma = "scale")
svm_clf.fit(X_train, Y_train)
tempscores.append(( svm_clf.score(X_test, Y_test)) *100)
scores.append(tempscores ) | Titanic - Machine Learning from Disaster |
4,252,362 | teste_base = pd.read_csv('.. /input/dataset_teste.csv')
teste = teste_base.copy()
teste.info()<define_variables> | scores = np.array(scores)
clfs = pd.DataFrame({"Classifier":clf_names})
for i in range(len(scores)) :
clfs['iteration' + str(i)] = scores[i].T
means = clfs.mean(axis = 1)
means = means.values.tolist()
clfs["Average"] = means | Titanic - Machine Learning from Disaster |
4,252,362 | featuresTeste = [
"Postal Code",
"Latitude",
"Longitude",
"DOF Gross Floor Area",
"Year Built",
"Number of Buildings - Self-reported",
"Occupancy",
"Site EUI(kBtu/ft²)",
"Property GFA - Self-Reported(ft²)",
"Source EUI(kBtu/ft²)",
"Community Board",
"Council District",
"Census Tract",
"Weather Normalized Site EUI(kBtu/ft²)",
"Weather Normalized Site Electricity Intensity(kWh/ft²)",
"Weather Normalized Source EUI(kBtu/ft²)",
"Weather Normalized Site Natural Gas Use(therms)",
"Weather Normalized Site Electricity(kWh)",
"Water Use(All Water Sources )(kgal)",
"Water Intensity(All Water Sources )(gal/ft²)",
"Total GHG Emissions(Metric Tons CO2e)",
"Direct GHG Emissions(Metric Tons CO2e)",
"Indirect GHG Emissions(Metric Tons CO2e)",
"Electricity Use - Grid Purchase(kBtu)",
"Natural Gas Use(kBtu)",
"Manhattan", "Queens", "Brooklyn", "Staten Island"]
featuresTreino = featuresTeste + ["ENERGY STAR Score"]<data_type_conversions> | clfs.set_index("Classifier", inplace = True)
print("Accuracies : ")
clfs["Average"].head(10 ) | Titanic - Machine Learning from Disaster |
4,252,362 | def setCity(df):
lista = df["Borough"].value_counts()
for item in lista.index:
df[item] = df["Borough"] == item
df[item] = df[item].astype(int)
return df<data_type_conversions> | def create_multiple() :
ensembles = []
ensemble_scores = []
for i in range(5):
X_train, X_test, Y_train, Y_test = train_test_split(data, labels, test_size = 0.07)
svm_clf = SVC(gamma = "scale")
svm_clf = svm_clf.fit(X_train, Y_train)
ensemble_scores.append(( svm_clf.score(X_test, Y_test)) *100)
ensembles.append(svm_clf)
return ensembles, ensemble_scores
SVM_ensembles, SVM_ensemble_scores = create_multiple() | Titanic - Machine Learning from Disaster |
4,252,362 | def setPostalCode(df):
df["Postal Code"] = df["Postal Code"].str.replace("-", "")
df["Postal Code"] = df["Postal Code"].astype(int)
return df<data_type_conversions> | def print_ensemble_score(ensemble_scores, model_name):
e_score = 0
for i in range(len(ensemble_scores)) :
e_score = e_score + ensemble_scores[i]
print("SCORE(ENSEMBLE MODELS)" +str(model_name)+ " : " + str(e_score/len(ensemble_scores)))
return
print_ensemble_score(SVM_ensemble_scores, "SVM" ) | Titanic - Machine Learning from Disaster |
4,252,362 | def setMean(df, features):
df = df.replace('Not Available',np.nan, regex=True)
for item in features:
if df[item].dtype == "object":
df[item] = df[item].astype(float)
for item in features:
df[item] = df[item].fillna(df[item].mean())
return df<feature_engineering> | def per_model_prediction(ensembles):
test_data = test_df
predictions_ensembles = []
for clf in ensembles:
temppredictions = clf.predict(test_data)
predictions_ensembles.append(temppredictions)
return predictions_ensembles | Titanic - Machine Learning from Disaster |
4,252,362 | def setGeneralData(df, features):
df["Number of Buildings - Self-reported"][df["Number of Buildings - Self-reported"] > 30 ] = 30
df["Number of Buildings - Self-reported"][df["Number of Buildings - Self-reported"] <= 0] = 1
df["Occupancy"][df["Occupancy"] <= 0] = 1
df["Site EUI(kBtu/ft²)"][df["Site EUI(kBtu/ft²)"] <= 0] = 1
df["Property GFA - Self-Reported(ft²)"][df["Property GFA - Self-Reported(ft²)"] >= 2500000] = 2500000
df["Source EUI(kBtu/ft²)"][df["Source EUI(kBtu/ft²)"] < 1] = 1
df["Year Built"][df["Year Built"] < 1800] = 1800
df["Year Built"][df["Year Built"] > 2015] = 2015
df = df.round(2)
return df<categorify> | def get_predictions_modes(predictions_ensembles):
final_predictions_list = []
for i in range(len(predictions_ensembles[0])) :
temp = [predictions_ensembles[0][i], predictions_ensembles[1][i], predictions_ensembles[2][i], predictions_ensembles[3][i], predictions_ensembles[4][i]]
final_predictions_list.append(temp)
final_predictions_list = np.array(final_predictions_list)
pred_modes = stats.mode(final_predictions_list, axis = 1)
final_predictions = []
for i in pred_modes[0]:
final_predictions.append(i[0])
return final_predictions | Titanic - Machine Learning from Disaster |
4,252,362 | treino = setPostalCode(treino)
teste = setPostalCode(teste)
treino = setCity(treino)
teste = setCity(teste)
treino = setMean(treino, featuresTreino)
teste = setMean(teste, featuresTeste)
treino = setGeneralData(treino, featuresTreino)
teste = setGeneralData(teste, featuresTeste)
print(treino.shape)
print(teste.shape)
treino = treino.filter(items=featuresTreino)
teste = teste.filter(items=featuresTeste)
print(treino.shape)
print(teste.shape)
showCorr(treino )<split> | SVM_predictions_ensembles = per_model_prediction(SVM_ensembles)
SVM_final_predictions = get_predictions_modes(SVM_predictions_ensembles ) | Titanic - Machine Learning from Disaster |
4,252,362 | X_train, X_test, y_train, y_test = train_test_split(treino.drop(columns=['ENERGY STAR Score']), pd.DataFrame(treino["ENERGY STAR Score"]))<choose_model_class> | passengerid = [892 + i for i in range(len(SVM_final_predictions)) ]
sub = pd.DataFrame({'PassengerId': passengerid, 'Survived':SVM_final_predictions})
sub.to_csv('submission.csv', index = False ) | Titanic - Machine Learning from Disaster |
5,179,731 | finalModel = xgb.XGBClassifier(max_depth=10, learning_rate=0.1, n_estimators=1000, n_jobs=50)
finalModel<train_model> | train = pd.read_csv('/kaggle/input/train.csv')
train.head() | Titanic - Machine Learning from Disaster |
5,179,731 | finalModel.fit(X_train, y_train, eval_metric='mae' )<predict_on_test> | temp = train | Titanic - Machine Learning from Disaster |
5,179,731 | y_pred = finalModel.predict(X_test)
print("Mean squared error: %.2f" % mean_squared_error(y_test, y_pred))
print('Variance score: %.2f' % r2_score(y_test, y_pred))<predict_on_test> | train.drop('Cabin',inplace=True, axis=1 ) | Titanic - Machine Learning from Disaster |
5,179,731 | envio_final = pd.DataFrame(teste_base["Property Id"])
envio_final['score'] = finalModel.predict(teste ).round()
envio_final['score'] = envio_final["score"].astype(int)
sb.countplot(x='score',data=envio_final)
envio_final.describe().T<save_to_csv> | train.drop('PassengerId',inplace=True, axis=1)
train.drop('Ticket',inplace=True, axis=1 ) | Titanic - Machine Learning from Disaster |
5,179,731 | envio_final.to_csv('final.csv', index=False )<install_modules> | def fill_age(columns):
Age = columns[0]
Pclass = columns[1]
if pd.isnull(Age):
if Pclass == 1 :
return 37.0
elif Pclass == 2 :
return 29.0
else :
return 24.0
else:
return Age | Titanic - Machine Learning from Disaster |
5,179,731 | !pip install pymystem3<define_variables> | train['Age'] = train[['Age','Pclass']].apply(fill_age,axis=1 ) | Titanic - Machine Learning from Disaster |
5,179,731 | RAND_STATE = 37
N_JOBS = -1
VERB_LEVEL = 2
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
<load_from_csv> | names = train.Name.str.split(',')
names2 = []
for i in range(0,891):
names2.append(names[i][1].split('.')[0] ) | Titanic - Machine Learning from Disaster |
5,179,731 | df_train = pd.read_csv('/kaggle/input/ocrv-intent-classification/train.csv', index_col='id')
df_train.head()<filter> | namedummies = pd.get_dummies(names2,drop_first=True ) | Titanic - Machine Learning from Disaster |
5,179,731 | df_train[df_train.text.isna() ]<remove_duplicates> | train = pd.concat([train,namedummies],axis=1 ) | Titanic - Machine Learning from Disaster |
5,179,731 | df_train = df_train.drop_duplicates()
df_train.info()<load_from_csv> | sex = pd.get_dummies(train['Sex'],drop_first=True)
emb = pd.get_dummies(train['Embarked'],drop_first=True ) | Titanic - Machine Learning from Disaster |
5,179,731 | df_test = pd.read_csv('/kaggle/input/ocrv-intent-classification/test.csv', index_col='id' )<string_transform> | train.drop(['Sex','Embarked','Name'],inplace=True, axis=1 ) | Titanic - Machine Learning from Disaster |
5,179,731 | df_test = df_test.fillna(' ' )<categorify> | train = pd.concat([train,sex,emb],axis=1); | Titanic - Machine Learning from Disaster |
5,179,731 | lemmatizator = Mystem()
mystem_preprocessor = lambda x: ''.join(lemmatizator.lemmatize(x)[:-1])
X_train = df_train.text.apply(mystem_preprocessor )<prepare_x_and_y> | from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split | Titanic - Machine Learning from Disaster |
5,179,731 | y_train = df_train.label<feature_engineering> | X_train, X_test, y_train, y_test = train_test_split(train.drop(['Fare','Survived'],axis=1), train['Survived'], test_size=0.33, random_state=42 ) | Titanic - Machine Learning from Disaster |
5,179,731 | X_test = df_test.text.apply(mystem_preprocessor )<load_pretrained> | logmodel = LogisticRegression()
logmodel.fit(X_train,y_train); | Titanic - Machine Learning from Disaster |
5,179,731 | nltk.download('stopwords')
russian_stopwords = stopwords.words('russian' )<train_on_grid> | pred = logmodel.predict(X_test ) | Titanic - Machine Learning from Disaster |
5,179,731 | mnnb_clf = Pipeline(
[('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('mnnb', MultinomialNB()),])
parameters = {
'vect__ngram_range': [(1,1),(1,2),(1,3),],
'vect__min_df': [1,],
'vect__max_df': [1.],
'vect__stop_words': [None, russian_stopwords],
'tfidf__use_idf': [True, False],
'mnnb__alpha': [.1,.01,.001]}
gs_mnnb_clf= GridSearchCV(mnnb_clf, parameters, n_jobs=N_JOBS, verbose=VERB_LEVEL)
gs_mnnb_clf = gs_mnnb_clf.fit(X_train, y_train)
print(gs_mnnb_clf.best_score_, gs_mnnb_clf.best_params_ )<save_to_csv> | from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score | Titanic - Machine Learning from Disaster |
5,179,731 | def predict_submit(gs, df, name_suf):
y_pred = gs.predict(df)
submission = pd.DataFrame(y_pred, columns=['label'])
submission.index.name = 'id'
submission.to_csv(f'submission_{name_suf}.csv' )<predict_on_test> | test = pd.read_csv('.. /input/test.csv')
temp_test = test | Titanic - Machine Learning from Disaster |
5,179,731 | predict_submit(gs_mnnb_clf, X_test, name_suf='mnnb' )<define_search_model> | pass_id = test.PassengerId
test.drop(['Cabin','PassengerId','Ticket'],inplace=True, axis=1 ) | Titanic - Machine Learning from Disaster |
5,179,731 | sgd_clf = Pipeline(
[('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('sgdc', SGDClassifier(random_state=RAND_STATE))
])
parameters = {
'vect__ngram_range': [(1,3),],
'vect__stop_words': [None, russian_stopwords],
'tfidf__use_idf': [True, False],
'sgdc__loss': ['hinge', 'perceptron'],
'sgdc__penalty': ['l1', 'l2', 'elasticnet'],
'sgdc__alpha': [1.25e-5, 2.5e-5, 5e-5]}
gs_sgd_clf = GridSearchCV(sgd_clf, parameters, n_jobs=N_JOBS, verbose=VERB_LEVEL)
gs_sgd_clf = gs_sgd_clf.fit(X_train, y_train)
print(gs_sgd_clf.best_score_, gs_sgd_clf.best_params_ )<predict_on_test> | print("The mean before is ", test.Age.mean())
print(test.groupby('Pclass' ).mean() ['Age'] ) | Titanic - Machine Learning from Disaster |
5,179,731 | predict_submit(gs_sgd_clf, X_test, name_suf='sgd' )<define_search_model> | def age_fill(columns):
Age = columns[0]
Pclass = columns[1]
if pd.isnull(Age):
if Pclass == 1 :
return 40.69
elif Pclass == 2 :
return 28.83
else :
return 24.39
else:
return Age | Titanic - Machine Learning from Disaster |
5,179,731 | logreg_clf = Pipeline(
[('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('logreg', LogisticRegression(random_state=RAND_STATE)) ,])
parameters = {
'vect__ngram_range': [(1,3),],
'vect__min_df': [1,],
'vect__max_df': [1.],
'vect__stop_words': [None, russian_stopwords],
'tfidf__use_idf': [True, False],
'logreg__C': [11,17,23],
'logreg__multi_class': ['ovr', 'multinomial'],
'logreg__solver': ['lbfgs', 'newton-cg']}
gs_logreg_clf = GridSearchCV(logreg_clf, parameters, n_jobs=N_JOBS, verbose=VERB_LEVEL)
gs_logreg_clf = gs_logreg_clf.fit(X_train, y_train)
print(gs_logreg_clf.best_score_, gs_logreg_clf.best_params_ )<predict_on_test> | test['Age'] = test[['Age','Pclass']].apply(age_fill,axis=1 ) | Titanic - Machine Learning from Disaster |
5,179,731 | predict_submit(gs_logreg_clf, X_test, name_suf='logreg' )<feature_engineering> | namesx = test.Name.str.split(',')
namesx2 = []
for i in range(0,test.shape[0]):
namesx2.append(namesx[i][1].split('.')[0] ) | Titanic - Machine Learning from Disaster |
5,179,731 | word2vec_path = '.. /input/web-mystem-skipgram-500-2-2015/web.bin'
word2vec_size = 500
word2vec = gensim.models.KeyedVectors.load_word2vec_format(word2vec_path, binary=True)
words = word2vec.index2word
w_rank = {}
for i,word in enumerate(words):
word = word.split('_')[0]
w_rank[word] = i
WORDS = w_rank
def words(text): return re.findall(r'\w+', text.lower())
def P(word):
"Probability of `word`."
return - WORDS.get(word, 0)
def correction(word):
"Most probable spelling correction for word."
return max(candidates(word), key=P)
def candidates(word):
"Generate possible spelling corrections for word."
return(known([word])or known(edits1(word)) or known(edits2(word)) or [word])
def known(words):
"The subset of `words` that appear in the dictionary of WORDS."
return set(w for w in words if w in WORDS)
def edits1(word):
"All edits that are one edit away from `word`."
letters = 'абвгдеёжзийклмнопрстуфхцчшщъыьэюя'
splits = [(word[:i], word[i:])for i in range(len(word)+ 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R)>1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
def edits2(word):
"All edits that are two edits away from `word`."
return(e2 for e1 in edits1(word)for e2 in edits1(e1))<install_modules> | namexdummies = pd.get_dummies(namesx2,drop_first=True ) | Titanic - Machine Learning from Disaster |
5,179,731 | !pip install pymorphy2<string_transform> | test = pd.concat([test,namexdummies],axis=1 ) | Titanic - Machine Learning from Disaster |
5,179,731 | def spell_norm(ser):
tokenizer = RegexpTokenizer(r'[а-яА-Я]+')
morph = pymorphy2.MorphAnalyzer()
normolize = lambda t: morph.parse(t)[0].normal_form
def spelling_correct(token):
methods_stack = morph.parse(token)[0].methods_stack
if str(morph.parse(token)[0].methods_stack[0][0])!= '<DictionaryAnalyzer>':
return False
for method in methods_stack[1:]:
if 'unknown' in str(method[0] ).lower() :
return False
return True
def norm_spelling(tokens):
lemmas = []
for token in tokens:
if not spelling_correct(token):
token = correction(token)
lemma = normolize(token)
lemmas.append(lemma)
return lemmas
return ser.apply(tokenizer.tokenize ).apply(norm_spelling)
spell_norm(df_train.text[10:20] )<feature_engineering> | sex1 = pd.get_dummies(test['Sex'],drop_first=True)
emb1 = pd.get_dummies(test['Embarked'],drop_first=True)
test.drop(['Sex','Embarked','Name'],inplace=True, axis=1)
test = pd.concat([test,sex1,emb1],axis=1); | Titanic - Machine Learning from Disaster |
5,179,731 | %%time
df_train['spell_norm_text'] = spell_norm(df_train.text ).apply(lambda x: ' '.join(x))<prepare_x_and_y> | test.fillna(test['Fare'].median() ,inplace=True ) | Titanic - Machine Learning from Disaster |
5,179,731 | X_train, y_train = df_train.spell_norm_text, df_train.label<feature_engineering> | new_train = train.drop([' Jonkheer',' the Countess',' Mme',' Mlle',' Major',' Lady',' Col', ' Don', ' Sir'],axis=1)
new_test = test.drop([' Dona'],axis=1 ) | Titanic - Machine Learning from Disaster |
5,179,731 | %%time
df_test['spell_norm_text'] = spell_norm(df_test.text ).apply(lambda x: ' '.join(x))<prepare_x_and_y> | X_train1, X_test1, y_train1, y_test1 = train_test_split(new_train.drop(['Survived'],axis=1), new_train['Survived'], test_size=0.33, random_state=42 ) | Titanic - Machine Learning from Disaster |
5,179,731 | X_test = df_test.spell_norm_text<train_on_grid> | new_logmodel = LogisticRegression()
new_logmodel.fit(X_train1,y_train1 ) | Titanic - Machine Learning from Disaster |
5,179,731 | sgd_spnorm_clf = Pipeline(
[('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('sgdc', SGDClassifier(random_state=RAND_STATE))
])
parameters = {
'vect__ngram_range': [(1,3),],
'vect__stop_words': [None,],
'tfidf__use_idf': [False, ],
'sgdc__loss': ['hinge', 'perceptron'],
'sgdc__penalty': ['l1', 'l2', 'elasticnet'],
'sgdc__alpha': [5e-6, 1.25e-5, 2.5e-5, 5e-5],
'sgdc__class_weight': [None, 'balanced']}
gs_sgd_spnorm_clf = GridSearchCV(sgd_spnorm_clf, parameters, n_jobs=N_JOBS, verbose=VERB_LEVEL)
gs_sgd_spnorm_clf = gs_sgd_spnorm_clf.fit(X_train, y_train)
print(gs_sgd_spnorm_clf.best_score_, gs_sgd_spnorm_clf.best_params_ )<predict_on_test> | prediction = new_logmodel.predict(new_test ) | Titanic - Machine Learning from Disaster |
5,179,731 | predict_submit(gs_sgd_spnorm_clf, X_test, name_suf='sgd_spnorm' )<define_search_space> | output = pd.DataFrame({ 'PassengerId' : pass_id, 'Survived': prediction } ) | Titanic - Machine Learning from Disaster |
5,179,731 | logreg_spnorm_clf = Pipeline(
[('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('logreg', LogisticRegression(random_state=RAND_STATE)) ,])
parameters = {
'vect__ngram_range': [(1,3),],
'vect__min_df': [1,],
'vect__max_df': [1.],
'vect__stop_words': [None,],
'tfidf__use_idf': [False,],
'logreg__C': [8,11,17],
'logreg__multi_class': ['ovr', 'multinomial'],
'logreg__solver': ['lbfgs', 'newton-cg'],
'logreg__class_weight': [None, 'balanced']}
gs_logreg_spnorm_clf = GridSearchCV(logreg_spnorm_clf, parameters, n_jobs=N_JOBS, verbose=VERB_LEVEL)
gs_logreg_spnorm_clf = gs_logreg_spnorm_clf.fit(X_train, y_train)
print(gs_logreg_spnorm_clf.best_score_, gs_logreg_spnorm_clf.best_params_ )<predict_on_test> | output.to_csv('titanic-predictions.csv', index = False ) | Titanic - Machine Learning from Disaster |
5,179,731 | predict_submit(gs_logreg_spnorm_clf, X_test, name_suf='logreg_spnorm' )<feature_engineering> | from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix | Titanic - Machine Learning from Disaster |
5,179,731 | vectorizer = TfidfVectorizer(lowercase=True, analyzer='word', ngram_range=(1,3), use_idf=False)
train_vectors = vectorizer.fit_transform(df_train.spell_norm_text.apply(lambda tr_vect: np.str_(tr_vect)) )<string_transform> | print(classification_report(y_test1,pred)) | Titanic - Machine Learning from Disaster |
5,179,731 | def tagged_tokens(ser, tokenizer=RegexpTokenizer(r'\w+'), lemmatizator=Mystem()):
def lemteg(tokens, tokenizer=tokenizer, lemmatizator=lemmatizator):
lemtegs = []
for token in tokens:
try:
tag = lemmatizator.analyze(token)[0]['analysis'][0]['gr'].split(',')[0].split('=')[0]
except:
tag = 'XXX'
lemtegs.append(f'{token}_{tag}')
return lemtegs
return ser.apply(tokenizer.tokenize ).apply(lemteg)
tagged_tokens(df_train.spell_norm_text[:10] )<feature_engineering> | score = logmodel.score(X_test, y_test)
print("Accuracy for train.csv is ",score*100,"%" ) | Titanic - Machine Learning from Disaster |
5,179,731 | df_train['tagged_tokens']= tagged_tokens(df_train.spell_norm_text )<normalization> | score1 = new_logmodel.score(X_test1, y_test1)
print("Accuracy for test.csv is ",score1*100,"%" ) | Titanic - Machine Learning from Disaster |
2,300,413 | def get_average_word2vec(tokens_list, vector, vec_size, generate_missing=False):
if len(tokens_list)<1:
return np.zeros(vec_size)
if generate_missing:
vectorized = [vector[word] if word in vector else np.random.rand(vec_size)for word in tokens_list]
else:
vectorized = [vector[word] if word in vector else np.zeros(vec_size)for word in tokens_list]
length = len(vectorized)
summed = np.sum(vectorized, axis=0)
averaged = np.divide(summed, length)
return averaged
def get_word2vec_embeddings(vectors, ser, vec_size, generate_missing=False):
embeddings = ser.apply(
lambda x: get_average_word2vec(x, vectors, vec_size=vec_size, generate_missing=generate_missing))
return sparse.csr_matrix(list(embeddings))<categorify> | data_train=pd.read_csv(".. /input/train.csv")
data_test=pd.read_csv(".. /input/test.csv")
print("Train info:
")
data_train.info()
print("-"*40)
print("Test info:
")
data_test.info()
data_train.head() | Titanic - Machine Learning from Disaster |
2,300,413 | train_embeddings = get_word2vec_embeddings(word2vec, df_train.tagged_tokens, vec_size=word2vec_size )<prepare_x_and_y> | Age=pd.concat([data_train[['Title','Age']],data_test[['Title','Age']]],axis=0)
print(Age.groupby('Title')['Age'].mean())
data_train.loc[(data_train['Age'].isnull())&(data_train['Title']=='Master'),'Age'] = Age[Age['Title']=='Master'].Age.mean()
data_train.loc[(data_train['Age'].isnull())&(data_train['Title']=='Miss'),'Age'] = Age[Age['Title']=='Miss'].Age.mean()
data_train.loc[(data_train['Age'].isnull())&(data_train['Title']=='Mr'),'Age'] = Age[Age['Title']=='Mr'].Age.mean()
data_train.loc[(data_train['Age'].isnull())&(data_train['Title']=='Mrs'),'Age'] = Age[Age['Title']=='Mrs'].Age.mean()
data_train.loc[(data_train['Age'].isnull())&(data_train['Title']=='Rare'),'Age'] = Age[Age['Title']=='Rare'].Age.mean()
data_test.loc[(data_test['Age'].isnull())&(data_test['Title']=='Master'),'Age'] = Age[Age['Title']=='Master'].Age.mean()
data_test.loc[(data_test['Age'].isnull())&(data_test['Title']=='Miss'),'Age'] = Age[Age['Title']=='Miss'].Age.mean()
data_test.loc[(data_test['Age'].isnull())&(data_test['Title']=='Mr'),'Age'] = Age[Age['Title']=='Mr'].Age.mean()
data_test.loc[(data_test['Age'].isnull())&(data_test['Title']=='Mrs'),'Age'] = Age[Age['Title']=='Mrs'].Age.mean()
data_test.loc[(data_test['Age'].isnull())&(data_test['Title']=='Rare'),'Age'] = Age[Age['Title']=='Rare'].Age.mean() | Titanic - Machine Learning from Disaster |
2,300,413 | X_train, y_train = sparse.hstack(( train_embeddings, train_vectors)) , df_train.label<feature_engineering> | data_test[data_test['Fare'].isnull() ] | Titanic - Machine Learning from Disaster |
2,300,413 | test_vectors = vectorizer.transform(df_test.spell_norm_text.apply(lambda tr_vect: np.str_(tr_vect)))
df_test['tagged_tokens']= tagged_tokens(df_test.spell_norm_text)
test_embeddings = get_word2vec_embeddings(word2vec, df_test.tagged_tokens, vec_size=word2vec_size)
X_test = sparse.hstack(( test_embeddings, test_vectors))<train_on_grid> | Fare=pd.concat([data_train[['Fare','Pclass','Embarked','Parch','Sex','SibSp','Title']],
data_test[['Fare','Pclass','Embarked','Parch','Sex','SibSp','Title']]],axis=0)
data_test['Fare'].fillna(Fare[(Fare["Pclass"]==3)&(Fare["Embarked"]=='S')&(Fare["SibSp"]==0)&
(Fare["Parch"]==0)&(Fare["Sex"]=='male')&(Fare["Title"]=='Mr')].Fare.median() ,inplace=True)
data_test.iloc[152] | Titanic - Machine Learning from Disaster |
2,300,413 | sgd_w2v_clf = Pipeline(
[('sgdc', SGDClassifier(random_state=RAND_STATE)) ])
parameters = {
'sgdc__loss': ['hinge'],
'sgdc__penalty': ['l1', 'l2', 'elasticnet'],
'sgdc__alpha': [1.25e-5, 2.5e-5, 5e-5]}
gs_sgd_w2v_clf = GridSearchCV(sgd_w2v_clf, parameters, n_jobs=N_JOBS, verbose=VERB_LEVEL)
gs_sgd_w2v_clf = gs_sgd_w2v_clf.fit(X_train, y_train)
print(gs_sgd_w2v_clf.best_score_, gs_sgd_w2v_clf.best_params_ )<predict_on_test> | data_train['Cabin'] = data_train['Cabin'].str[0]
data_test['Cabin'] = data_test['Cabin'].str[0]
Cabin=pd.concat([data_train[['Cabin','Embarked','Pclass','Fare']],data_test[['Cabin','Embarked','Pclass','Fare']]],axis=0)
Cabin.groupby(['Pclass','Embarked','Cabin'])['Fare'].max() | Titanic - Machine Learning from Disaster |
2,300,413 | predict_submit(gs_sgd_w2v_clf, X_test, name_suf='sgd_w2v' )<train_on_grid> | data_train.loc[(data_train.Cabin.isnull())&(data_train.Pclass==1)&
(data_train.Embarked=='C')&(data_train.Fare<=56.9292),'Cabin']='A'
data_train.loc[(data_train.Cabin.isnull())&(data_train.Pclass==1)&
(data_train.Embarked=='C')&(data_train.Fare>56.9292)&(data_train.Fare<=113.2750),'Cabin']='D'
data_train.loc[(data_train.Cabin.isnull())&(data_train.Pclass==1)&
(data_train.Embarked=='C')&(data_train.Fare>113.2750)&(data_train.Fare<=134.5000),'Cabin']='E'
data_train.loc[(data_train.Cabin.isnull())&(data_train.Pclass==1)&
(data_train.Embarked=='C')&(data_train.Fare>134.5000)&(data_train.Fare<=227.5250),'Cabin']='C'
data_train.loc[(data_train.Cabin.isnull())&(data_train.Pclass==1)&
(data_train.Embarked=='C')&(data_train.Fare>227.5250),'Cabin']='B'
data_train.loc[(data_train.Cabin.isnull())&(data_train.Pclass==1)&
(data_train.Embarked=='S')&(data_train.Fare<=35.5000),'Cabin']='T'
data_train.loc[(data_train.Cabin.isnull())&(data_train.Pclass==1)&
(data_train.Embarked=='S')&(data_train.Fare>35.5000)&(data_train.Fare<=77.9583),'Cabin']='D'
data_train.loc[(data_train.Cabin.isnull())&(data_train.Pclass==1)&
(data_train.Embarked=='S')&(data_train.Fare>77.9583)&(data_train.Fare<=79.6500),'Cabin']='E'
data_train.loc[(data_train.Cabin.isnull())&(data_train.Pclass==1)&
(data_train.Embarked=='S')&(data_train.Fare>79.6500)&(data_train.Fare<=81.8583),'Cabin']='A'
data_train.loc[(data_train.Cabin.isnull())&(data_train.Pclass==1)&
(data_train.Embarked=='S')&(data_train.Fare>81.8583)&(data_train.Fare<=211.3375),'Cabin']='B'
data_train.loc[(data_train.Cabin.isnull())&(data_train.Pclass==1)&
(data_train.Embarked=='S')&(data_train.Fare>211.3375),'Cabin']='C'
data_train.loc[(data_train.Cabin.isnull())&(data_train.Pclass==1)&(data_train.Embarked=='Q'),'Cabin']='C'
data_train.loc[(data_train.Cabin.isnull())&(data_train.Pclass==2)&
(data_train.Embarked=='S')&(data_train.Fare<=13.0000),'Cabin']=random.sample(['D','E'],1)
data_train.loc[(data_train.Cabin.isnull())&(data_train.Pclass==2)&
(data_train.Embarked=='S')&(data_train.Fare>13.0000),'Cabin']='F'
data_train.loc[(data_train.Cabin.isnull())&(data_train.Pclass==2)&(data_train.Embarked=='C'),'Cabin']='D'
data_train.loc[(data_train.Cabin.isnull())&(data_train.Pclass==2)&(data_train.Embarked=='Q'),'Cabin']='E'
data_train.loc[(data_train.Cabin.isnull())&(data_train.Pclass==3)&
(data_train.Embarked=='S')&(data_train.Fare<=7.6500),'Cabin']='F'
data_train.loc[(data_train.Cabin.isnull())&(data_train.Pclass==3)&
(data_train.Embarked=='S')&(data_train.Fare>7.6500)&(data_train.Fare<=12.4750),'Cabin']='E'
data_train.loc[(data_train.Cabin.isnull())&(data_train.Pclass==3)&
(data_train.Embarked=='S')&(data_train.Fare>12.4750),'Cabin']='G'
data_train.loc[(data_train.Cabin.isnull())&(data_train.Pclass==3)&(data_train.Embarked=='C'),'Cabin']='F'
data_train.loc[(data_train.Cabin.isnull())&(data_train.Pclass==3)&(data_train.Embarked=='Q'),'Cabin']='F'
data_test.loc[(data_test.Cabin.isnull())&(data_test.Pclass==1)&
(data_test.Embarked=='C')&(data_test.Fare<=56.9292),'Cabin']='A'
data_test.loc[(data_test.Cabin.isnull())&(data_test.Pclass==1)&
(data_test.Embarked=='C')&(data_test.Fare>56.9292)&(data_test.Fare<=113.2750),'Cabin']='D'
data_test.loc[(data_test.Cabin.isnull())&(data_test.Pclass==1)&
(data_test.Embarked=='C')&(data_test.Fare>113.2750)&(data_test.Fare<=134.5000),'Cabin']='E'
data_test.loc[(data_test.Cabin.isnull())&(data_test.Pclass==1)&
(data_test.Embarked=='C')&(data_test.Fare>134.5000)&(data_test.Fare<=227.5250),'Cabin']='C'
data_test.loc[(data_test.Cabin.isnull())&(data_test.Pclass==1)&
(data_test.Embarked=='C')&(data_test.Fare>227.5250),'Cabin']='B'
data_test.loc[(data_test.Cabin.isnull())&(data_test.Pclass==1)&
(data_test.Embarked=='S')&(data_test.Fare<=35.5000),'Cabin']='T'
data_test.loc[(data_test.Cabin.isnull())&(data_test.Pclass==1)&
(data_test.Embarked=='S')&(data_test.Fare>35.5000)&(data_test.Fare<=77.9583),'Cabin']='D'
data_test.loc[(data_test.Cabin.isnull())&(data_test.Pclass==1)&
(data_test.Embarked=='S')&(data_test.Fare>77.9583)&(data_test.Fare<=79.6500),'Cabin']='E'
data_test.loc[(data_test.Cabin.isnull())&(data_test.Pclass==1)&
(data_test.Embarked=='S')&(data_test.Fare>79.6500)&(data_test.Fare<=81.8583),'Cabin']='A'
data_test.loc[(data_test.Cabin.isnull())&(data_test.Pclass==1)&
(data_test.Embarked=='S')&(data_test.Fare>81.8583)&(data_test.Fare<=211.3375),'Cabin']='B'
data_test.loc[(data_test.Cabin.isnull())&(data_test.Pclass==1)&
(data_test.Embarked=='S')&(data_test.Fare>211.3375),'Cabin']='C'
data_test.loc[(data_test.Cabin.isnull())&(data_test.Pclass==1)&(data_test.Embarked=='Q'),'Cabin']='C'
data_test.loc[(data_test.Cabin.isnull())&(data_test.Pclass==2)&
(data_test.Embarked=='S')&(data_test.Fare<=13.0000),'Cabin']=random.sample(['D','E'],1)
data_test.loc[(data_test.Cabin.isnull())&(data_test.Pclass==2)&
(data_test.Embarked=='S')&(data_test.Fare>13.0000),'Cabin']='F'
data_test.loc[(data_test.Cabin.isnull())&(data_test.Pclass==2)&(data_test.Embarked=='C'),'Cabin']='D'
data_test.loc[(data_test.Cabin.isnull())&(data_test.Pclass==2)&(data_test.Embarked=='Q'),'Cabin']='E'
data_test.loc[(data_test.Cabin.isnull())&(data_test.Pclass==3)&
(data_test.Embarked=='S')&(data_test.Fare<=7.6500),'Cabin']='F'
data_test.loc[(data_test.Cabin.isnull())&(data_test.Pclass==3)&
(data_test.Embarked=='S')&(data_test.Fare>7.6500)&(data_test.Fare<=12.4750),'Cabin']='E'
data_test.loc[(data_test.Cabin.isnull())&(data_test.Pclass==3)&
(data_test.Embarked=='S')&(data_test.Fare>12.4750),'Cabin']='G'
data_test.loc[(data_test.Cabin.isnull())&(data_test.Pclass==3)&(data_test.Embarked=='C'),'Cabin']='F'
data_test.loc[(data_test.Cabin.isnull())&(data_test.Pclass==3)&(data_test.Embarked=='Q'),'Cabin']='F'
print(data_test.Cabin.isnull().any() ,'
')
print(data_train.Cabin.isnull().any() ) | Titanic - Machine Learning from Disaster |
2,300,413 | logreg_w2v_clf = Pipeline(
[('logreg', LogisticRegression(random_state=RAND_STATE)) ,])
parameters = {
'logreg__C': [11, 14, 17],
'logreg__multi_class': ['ovr', 'multinomial'],
'logreg__solver': ['lbfgs', 'newton-cg']}
gs_logreg_w2v_clf = GridSearchCV(logreg_w2v_clf, parameters, n_jobs=N_JOBS, verbose=VERB_LEVEL)
gs_logreg_w2v_clf = gs_logreg_w2v_clf.fit(X_train, y_train)
print(gs_logreg_w2v_clf.best_score_, gs_logreg_w2v_clf.best_params_ )<predict_on_test> | Cabin=pd.concat([data_train[['Cabin','Embarked','Pclass','Fare']],data_test[['Cabin','Embarked','Pclass','Fare']]],axis=0)
data_train[data_train['Embarked'].isnull() ] | Titanic - Machine Learning from Disaster |
2,300,413 | predict_submit(gs_logreg_w2v_clf, X_test, name_suf='logreg_w2v' )<load_from_csv> | print('Train columns with null values:
',data_train.isnull().sum())
print("-"*40)
print('Test columns with null values:
',data_test.isnull().sum() ) | Titanic - Machine Learning from Disaster |
2,300,413 | df_train = pd.read_csv('.. /input/web-club-recruitment-2018/train.csv')
df_test = pd.read_csv('.. /input/web-club-recruitment-2018/test.csv')
corr_matrix=df_train.corr()
corr_matrix['Y'].sort_values()
<drop_column> | data_train['Sex'].replace(['male','female'],[0,1],inplace=True)
data_train['Embarked'].replace(['C','Q','S'],[0,1,2],inplace=True)
data_train['Title'].replace(['Master','Miss','Mr','Mrs','Rare'],[0,1,2,3,4],inplace=True)
data_train['Cabin'].replace(['A','B','C','D','E','F','G','T'],[0,1,2,3,4,5,6,7],inplace=True)
data_train.loc[data_train['Age']<=16,'Age']=0
data_train.loc[(data_train['Age']>16)&(data_train['Age']<=32),'Age']=1
data_train.loc[(data_train['Age']>32)&(data_train['Age']<=48),'Age']=2
data_train.loc[(data_train['Age']>48)&(data_train['Age']<=64),'Age']=3
data_train.loc[data_train['Age']>64,'Age']=4
data_test['Sex'].replace(['male','female'],[0,1],inplace=True)
data_test['Embarked'].replace(['C','Q','S'],[0,1,2],inplace=True)
data_test['Title'].replace(['Master','Miss','Mr','Mrs','Rare'],[0,1,2,3,4],inplace=True)
data_test['Cabin'].replace(['A','B','C','D','E','F','G','T'],[0,1,2,3,4,5,6,7],inplace=True)
data_test.loc[data_test['Age']<=16,'Age']=0
data_test.loc[(data_test['Age']>16)&(data_test['Age']<=32),'Age']=1
data_test.loc[(data_test['Age']>32)&(data_test['Age']<=48),'Age']=2
data_test.loc[(data_test['Age']>48)&(data_test['Age']<=64),'Age']=3
data_test.loc[data_test['Age']>64,'Age']=4 | Titanic - Machine Learning from Disaster |
2,300,413 | df_train=df_train.drop('X12',axis=1)
df_test=df_test.drop('X12',axis=1)
train_ID = df_train['id']
test_ID = df_test['id']
df_train=df_train.drop('id',axis=1)
df_test=df_test.drop('id',axis=1)
df_train['X1']=np.log1p(df_train['X1'])
df_test['X1']=np.log1p(df_test['X1'])
df_train<create_dataframe> | data_train['Family_Size']=0
data_train['Family_Size']=data_train['Parch']+data_train['SibSp']
data_test['Family_Size']=0
data_test['Family_Size']=data_test['Parch']+data_test['SibSp'] | Titanic - Machine Learning from Disaster |
2,300,413 | imputer=Imputer(strategy="median")
col=df_train.columns
cols=df_test.columns
df_train=imputer.fit_transform(df_train)
df_train=pd.DataFrame(df_train,columns=col)
df_test=imputer.fit_transform(df_test)
df_test=pd.DataFrame(df_test,columns=cols)
<prepare_x_and_y> | data_train.loc[data_train['Fare']<=8.0500,'Fare']=0
data_train.loc[(data_train['Fare']>8.0500)&(data_train['Fare']<=15.0458),'Fare']=1
data_train.loc[(data_train['Fare']>15.0458)&(data_train['Fare']<=60.0000),'Fare']=2
data_train.loc[data_train['Fare']>60.0000,'Fare']=3
data_test.loc[data_test['Fare']<=8.0500,'Fare']=0
data_test.loc[(data_test['Fare']>8.0500)&(data_test['Fare']<=15.0458),'Fare']=1
data_test.loc[(data_test['Fare']>15.0458)&(data_test['Fare']<=60.0000),'Fare']=2
data_test.loc[data_test['Fare']>60.0000,'Fare']=3
f,ax=plt.subplots(1,2,figsize=(12,5))
sns.countplot('Fare',data=data_train,hue='Survived',ax=ax[0])
ax[0].set_title('Survived',color = 'r',fontsize=15)
sns.barplot(x=data_train.groupby(['Fare'])['Survived'].mean().index,
y=data_train.groupby(['Fare'])['Survived'].mean().values,
ax=ax[1])
ax[1].set_title('Rate of the Survived by Fare',color = 'r',fontsize=15)
plt.show() | Titanic - Machine Learning from Disaster |
2,300,413 | train_X = df_train.loc[:, 'X1':'X23']
train_y = df_train.loc[:, 'Y']<count_missing_values> | data_train.drop(['Name','Ticket','PassengerId'],axis=1,inplace=True)
data_test.drop(['Name','Ticket','PassengerId'],axis=1,inplace=True ) | Titanic - Machine Learning from Disaster |
2,300,413 | col_mask=train_X.isnull().any(axis=0)
col_mask<split> | y=data_train['Survived']
x=data_train.drop(['Survived'],axis=1 ) | Titanic - Machine Learning from Disaster |
2,300,413 | dev_X, val_X, dev_y, val_y = train_test_split(train_X, train_y, test_size = 0.2, random_state = 42)
params = {'objective': 'binary:logistic','eval_metric': 'rmse', 'eta': 0.005, 'max_depth': 10, 'subsample': 0.7, 'colsample_bytree': 0.5, 'alpha':0, 'silent': True, 'random_state':5}
tr_data = xgb.DMatrix(train_X, train_y)
va_data = xgb.DMatrix(val_X, val_y)
watchlist = [(tr_data, 'train'),(va_data, 'valid')]
model_xgb = xgb.train(params, tr_data, 2000, watchlist, maximize=False, early_stopping_rounds = 30, verbose_eval=100)
dft = xgb.DMatrix(df_test)
xgb_pred_y = np.log1p(model_xgb.predict(dft, ntree_limit=model_xgb.best_ntree_limit))<save_to_csv> | x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.2,random_state=1)
print("x train: ",x_train.shape)
print("x test: ",x_test.shape)
print("y train: ",y_train.shape)
print("y test: ",y_test.shape ) | Titanic - Machine Learning from Disaster |
2,300,413 | result = pd.DataFrame()
result['id']=test_ID
result['predicted_val']=xgb_pred_y
print(result.head())
result.to_csv('output.csv',index=False )<set_options> | from sklearn import metrics
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve | Titanic - Machine Learning from Disaster |
2,300,413 | %reload_ext autoreload
%autoreload 2
%matplotlib inline<install_modules> | kfold = KFold(n_splits=10, random_state=22)
mean=[]
accuracy=[]
std=[]
def model(algorithm,x_train_,y_train_,x_test_,y_test_):
algorithm.fit(x_train_,y_train_)
predicts=algorithm.predict(x_test_)
prediction=pd.DataFrame(predicts)
prob=algorithm.predict_proba(x_test_)[:,1]
cross_val=cross_val_score(algorithm,x_train_,y_train_,cv=kfold)
mean.append(cross_val.mean())
std.append(cross_val.std())
accuracy.append(cross_val)
print(( '{}'.format(algorithm)).split("(")[0].strip() ,'
')
print("CV std :",cross_val.std() ,"
")
print("CV scores:",cross_val,"
")
print("CV mean:",cross_val.mean())
fpr, tpr, thresholds = roc_curve(y_test_, prob)
f,ax=plt.subplots(1,2,figsize=(11,4))
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr, tpr)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC')
y_pred = cross_val_predict(algorithm,x,y,cv=10)
sns.heatmap(confusion_matrix(y,y_pred),ax=ax[0],annot=True,fmt='2.0f')
ax[0].set_title(( 'Confusion Matrix for {}'.format(algorithm)).split("(")[0].strip())
plt.subplots_adjust(wspace=0.3)
plt.close(0)
plt.show()
| Titanic - Machine Learning from Disaster |
2,300,413 | ! pip install pretrainedmodels<set_options> | grids = {'n_neighbors': np.arange(1,50)}
grid = GridSearchCV(estimator=KNeighborsClassifier() , param_grid=grids, cv=kfold)
grid.fit(x_train, y_train)
print("Tuned hyperparameter k: {}".format(grid.best_params_),'
')
print("Best score: {}".format(grid.best_score_)) | Titanic - Machine Learning from Disaster |
2,300,413 | %matplotlib inline
warnings.filterwarnings('always')
warnings.filterwarnings('ignore')
style.use('fivethirtyeight')
sns.set(style='whitegrid',color_codes=True)
<import_modules> | knn = KNeighborsClassifier(n_neighbors = grid.best_estimator_.n_neighbors)
model(knn,x_train,y_train,x_test,y_test ) | Titanic - Machine Learning from Disaster |
2,300,413 | from fastai import *
from fastai.vision import *
import pretrainedmodels<define_variables> | Cs = [0.001, 0.01, 0.1, 1, 10]
gammas = [0.001, 0.01, 0.1, 1]
grids = {'C': Cs, 'gamma' : gammas}
grid = GridSearchCV(estimator=svm.SVC(kernel='linear'), param_grid=grids, cv=kfold)
grid.fit(x_train, y_train)
print("Tuned hyperparameter k: {}".format(grid.best_params_),'
')
print("Best score: {}".format(grid.best_score_)) | Titanic - Machine Learning from Disaster |
2,300,413 | train_dir = '.. /input/ifood-2019-fgvc6/train_set/train_set/'
val_dir = '.. /input/ifood-2019-fgvc6/val_set/val_set/'<load_from_csv> | svm = svm.SVC(kernel='linear',C=grid.best_estimator_.C,gamma=grid.best_estimator_.gamma,probability=True)
model(svm,x_train,y_train,x_test,y_test ) | Titanic - Machine Learning from Disaster |
2,300,413 | train_df = pd.read_csv('.. /input/ifood-2019-fgvc6/train_labels.csv')
train_df['path'] = train_df['img_name'].map(lambda x: os.path.join(train_dir,x))
val_df = pd.read_csv('.. /input/ifood-2019-fgvc6/val_labels.csv')
val_df['path'] = val_df['img_name'].map(lambda x: os.path.join(val_dir,x))<concatenate> | nb = GaussianNB()
model(nb,x_train,y_train,x_test,y_test ) | Titanic - Machine Learning from Disaster |
2,300,413 | df = pd.concat([train_df, val_df], ignore_index=True)
df.head()<define_variables> | grids={'min_samples_split' : range(10,500,20),'max_depth': range(1,20,2)}
grid = GridSearchCV(estimator=DecisionTreeClassifier() , param_grid=grids, cv=kfold)
grid.fit(x_train, y_train)
print("Tuned hyperparameter k: {}".format(grid.best_params_),'
')
print("Best score: {}".format(grid.best_score_)) | Titanic - Machine Learning from Disaster |
2,300,413 | val_idx = [i for i in range(len(train_df), len(df)) ]<define_variables> | dtc = DecisionTreeClassifier(min_samples_split=grid.best_estimator_.min_samples_split, max_depth=grid.best_estimator_.max_depth)
model(dtc,x_train,y_train,x_test,y_test ) | Titanic - Machine Learning from Disaster |
2,300,413 | sz = 256
bs = 32<define_variables> | grids={'n_estimators':range(100,500,100)}
grid = GridSearchCV(estimator=RandomForestClassifier() , param_grid=grids, cv=kfold)
grid.fit(x_train, y_train)
print("Tuned hyperparameter k: {}".format(grid.best_params_),'
')
print("Best score: {}".format(grid.best_score_)) | Titanic - Machine Learning from Disaster |
2,300,413 | data.show_batch(rows=3, figsize=(12,9))<set_options> | rf = RandomForestClassifier(n_estimators=grid.best_estimator_.n_estimators)
model(rf,x_train,y_train,x_test,y_test ) | Titanic - Machine Learning from Disaster |
2,300,413 | gc.collect()<compute_test_metric> | grids = {'C': np.logspace(-3, 3, 7), 'penalty': ['l1', 'l2']}
grid = GridSearchCV(estimator=LogisticRegression() , param_grid=grids, cv=kfold)
grid.fit(x_train, y_train)
print("Tuned hyperparameter k: {}".format(grid.best_params_),'
')
print("Best score: {}".format(grid.best_score_)) | Titanic - Machine Learning from Disaster |
2,300,413 | def top_3_accuracy(preds, targs):
return top_k_accuracy(preds, targs, 3 )<choose_model_class> | lr = LogisticRegression(C=grid.best_estimator_.C,penalty=grid.best_estimator_.penalty)
model(lr,x_train,y_train,x_test,y_test ) | Titanic - Machine Learning from Disaster |
2,300,413 | model_name = 'se_resnext101_32x4d'
def get_cadene_model(pretrained=True, model_name='se_resnext101_32x4d'):
if pretrained:
arch = pretrainedmodels.__dict__[model_name](num_classes=1000, pretrained='imagenet')
else:
arch = pretrainedmodels.__dict__[model_name](num_classes=1000, pretrained=None)
return arch<choose_model_class> | classifiers=['KNN','Svm','Naive Bayes','Decision Tree','Random Forest','Logistic Regression']
models=pd.DataFrame({'CV mean':mean,'Std':std},index=classifiers)
print(models ) | Titanic - Machine Learning from Disaster |
2,300,413 | <choose_model_class><EOS> | submission = pd.DataFrame({"PassengerId": pd.read_csv(".. /input/test.csv")["PassengerId"],"Survived": dtc.predict(data_test)})
submission.to_csv('titanic.csv', index=False ) | Titanic - Machine Learning from Disaster |
10,330,948 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<choose_model_class> | warnings.filterwarnings('ignore' ) | Titanic - Machine Learning from Disaster |
10,330,948 | stage = 1
csvlogger = callbacks.CSVLogger(learn=learn, filename='history_stage_'+str(stage)+'_'+model_name, append=True)
saveModel = callbacks.SaveModelCallback(learn, every='epoch',
monitor='top_3_accuracy', mode='max',
name='stage_'+str(stage))
reduceLR = callbacks.ReduceLROnPlateauCallback(learn=learn, monitor = 'top_3_accuracy', mode = 'max', patience = 1, factor = 0.5)
<train_model> | train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv' ) | Titanic - Machine Learning from Disaster |
10,330,948 | lr = 3e-3
learn.fit_one_cycle(4, slice(lr))<save_model> | def merge_data(train, test):
return pd.concat([train, test], sort = True ).reset_index(drop=True)
def divide_data(data):
return data.iloc[:891], data.iloc[891:].drop(['Survived'], axis = 1)
data = merge_data(train, test ) | Titanic - Machine Learning from Disaster |
10,330,948 | learn.save('stage-1-SE_Resnext101' )<choose_model_class> | data['Title'] = data['Name'].str.extract('([A-Za-z]+)\.', expand=False)
data['Title'].unique() | Titanic - Machine Learning from Disaster |
10,330,948 | stage = 2
csvlogger = callbacks.CSVLogger(learn=learn, filename='history_stage_'+str(stage)+'_'+model_name, append=True)
saveModel = callbacks.SaveModelCallback(learn, every='epoch',
monitor='top_3_accuracy', mode='max',
name='stage_'+str(stage))
reduceLR = callbacks.ReduceLROnPlateauCallback(learn=learn, monitor = 'top_3_accuracy', mode = 'max', patience = 1, factor = 0.5)
<train_model> | data.groupby('Title')['Sex'].count()
data['Title'] = data['Title'].replace(['Capt', 'Col', 'Countess', 'Don', 'Dr',
'Jonkheer', 'Major', 'Sir', 'Rev', 'Dona'], 'Rare')
data['Title'] = data['Title'].replace(['Lady', 'Mlle', 'Mme', 'Ms'],
['Mrs', 'Miss', 'Miss', 'Mrs'] ) | Titanic - Machine Learning from Disaster |
10,330,948 | learn.fit_one_cycle(10 , slice(1e-5, 1e-3))<save_model> | def family_survival() :
data['Last_Name'] = data['Name'].apply(lambda x: str.split(x, ",")[0])
default_survival_rate = 0.5
data['Family_survival'] = default_survival_rate
for grp, grp_df in data[['Survived', 'Name', 'Last_Name',
'Fare', 'Ticket', 'PassengerId',
'SibSp', 'Parch', 'Age', 'Cabin']].groupby(['Last_Name', 'Fare']):
if(len(grp_df)!= 1):
for ind, row in grp_df.iterrows() :
smax = grp_df.drop(ind)['Survived'].max()
smin = grp_df.drop(ind)['Survived'].min()
ID = row['PassengerId']
if(smax == 1.0):
data.loc[data['PassengerId'] == ID,
'Family_survival'] = 1
elif(smin == 0.0):
data.loc[data['PassengerId'] == ID,
'Family_survival'] = 0
for _, grp_df in data.groupby('Ticket'):
if(len(grp_df)!= 1):
for ind, row in grp_df.iterrows() :
if(row['Family_survival'] == 0)|(
row['Family_survival'] == 0.5):
smax = grp_df.drop(ind)['Survived'].max()
smin = grp_df.drop(ind)['Survived'].min()
ID = row['PassengerId']
if(smax == 1.0):
data.loc[data['PassengerId'] == ID,
'Family_survival'] = 1
elif(smin == 0.0):
data.loc[data['PassengerId'] == ID,
'Family_survival'] = 0
return data | Titanic - Machine Learning from Disaster |
10,330,948 | learn.save('stage-2-SE_Resnext101' )<define_variables> | Titanic - Machine Learning from Disaster | |
10,330,948 | test = ImageList.from_folder('.. /input/ifood-2019-fgvc6/test_set')
len(test )<load_pretrained> | data['Age'] = data.groupby(['Title', 'Pclass'])['Age'].apply(lambda x: x.fillna(x.median())) | Titanic - Machine Learning from Disaster |
10,330,948 | learn.export('/tmp/export.pkl')
learn = load_learner('/tmp/', test=test)
preds, _ = learn.get_preds(ds_type=DatasetType.Test )<define_variables> | def age_category(age):
if age <=2:
return 0
if 2 < age <= 18:
return 1
if 18 < age <= 35:
return 2
if 35 < age <= 65:
return 3
else:
return 4
data['Age'] = data['Age'].apply(age_category ) | Titanic - Machine Learning from Disaster |
10,330,948 | fnames = [f.name for f in learn.data.test_ds.items]
fnames[:4]
<create_dataframe> | data['Age*Pclass'] = data['Age']*data['Pclass'] | Titanic - Machine Learning from Disaster |
10,330,948 | col = ['img_name']
test_df = pd.DataFrame(fnames, columns=col)
test_df['label'] = ''<feature_engineering> | data[data['Fare'].isnull() ] | Titanic - Machine Learning from Disaster |
10,330,948 | for i, pred in T(enumerate(predictions), total=len(predictions)) :
test_df.loc[i, 'label'] = ' '.join(str(int(i)) for i in np.argsort(pred)[::-1][:3] )<save_to_csv> | data.loc[data['Fare'].isnull() , 'Fare'] = data.loc[(data['Embarked'] == 'S')
&(data['Pclass'] == 3)&(data['SibSp'] == 0)]['Fare'].median() | Titanic - Machine Learning from Disaster |
10,330,948 | test_df.to_csv('submission_SE_Resnext101_fastai_mixup_2.csv', index=False )<save_to_csv> | data['Fare'].value_counts() | Titanic - Machine Learning from Disaster |
10,330,948 | def create_download_link(df, title = "Download CSV file", filename = "data.csv"):
csv = df.to_csv()
b64 = base64.b64encode(csv.encode())
payload = b64.decode()
html = '<a download="{filename}" href="data:text/csv;base64,{payload}" target="_blank">{title}</a>'
html = html.format(payload=payload,title=title,filename=filename)
return HTML(html )<load_pretrained> | def fare_category(fare):
if fare <= 7.91:
return 0
if 7.91 < fare <= 14.454:
return 1
if 14.454 < fare <= 31:
return 2
if 31 < fare <= 99:
return 3
if 99 < fare <= 250:
return 4
else:
return 5
data['Fare'] = data['Fare'].apply(fare_category ) | Titanic - Machine Learning from Disaster |
10,330,948 | create_download_link(test_df, filename='submission_SE_Resnext101_fastai_mixup_2.csv' )<import_modules> | data[data['Embarked'].isnull() ] | Titanic - Machine Learning from Disaster |
10,330,948 | from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from keras.callbacks import ModelCheckpoint
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.normalization import BatchNormalization
from keras import optimizers
from keras import initializers
import numpy as np
from matplotlib import pyplot as plt<define_variables> | data.loc[(data['Fare'] < 80)&(data['Pclass'] == 1)]['Embarked'].value_counts()
data.loc[data['Embarked'].isnull() , 'Embarked'] = 'S' | Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.