kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
14,319,955
pred1 = np.expm1(pred )<predict_on_test>
grid_xgb = GridSearchCV(XGBClassifier() , param_grid_xgb, cv=RepeatedStratifiedKFold(n_splits=10, n_repeats=2, random_state=42), scoring='accuracy', verbose=2, n_jobs=-1 )
Titanic - Machine Learning from Disaster
14,319,955
pred = model.predict(X_test )<prepare_output>
grid_xgb.fit(X_train_fe, y_train )
Titanic - Machine Learning from Disaster
14,319,955
pred1 = np.expm1(pred )<save_to_csv>
params_xgb = {'colsample_bylevel': 0.7, 'learning_rate': 0.03, 'max_depth': 3, 'n_estimators': 400, 'reg_lambda': 15, 'subsample': 0.5}
Titanic - Machine Learning from Disaster
14,319,955
submission = pd.read_csv('.. /input/exam-for-students20200129/sample_submission.csv', index_col=0) submission['ConvertedSalary'] = pred1 submission.to_csv('submission.csv' )<set_options>
logreg = LogisticRegression(**params_logreg) svc = SVC(**params_svc) knn = KNeighborsClassifier(**params_knn) rfc = RandomForestClassifier(**params_random) gradient = GradientBoostingClassifier(**params_gradient) xgb = XGBClassifier(**params_xgb) estimators = [('logreg', logreg),('knn', knn),('svc', svc),('rfc', rfc),('gradient', gradient), ('xgb', xgb)] stack = StackingClassifier(estimators=estimators, cv=10, n_jobs=-1 )
Titanic - Machine Learning from Disaster
14,319,955
plt.style.use('ggplot') %matplotlib inline pd.set_option('display.max_columns', 500 )<load_from_csv>
y_preds = logreg.fit(X_train_fe, y_train ).predict(X_test_fe )
Titanic - Machine Learning from Disaster
14,319,955
df_train = pd.read_csv('.. /input/exam-for-students20200129/train.csv', index_col=0) df_test = pd.read_csv('.. /input/exam-for-students20200129/test.csv', index_col=0) df_train.ConvertedSalary = np.log1p(df_train.ConvertedSalary )<count_missing_values>
y_preds = svc.fit(X_train_fe, y_train ).predict(X_test_fe )
Titanic - Machine Learning from Disaster
14,319,955
df_train.isnull().sum()<count_missing_values>
y_preds = knn.fit(X_train_fe, y_train ).predict(X_test_fe )
Titanic - Machine Learning from Disaster
14,319,955
df_test.isnull().sum()<count_unique_values>
y_preds = rfc.fit(X_train_fe, y_train ).predict(X_test_fe )
Titanic - Machine Learning from Disaster
14,319,955
cats = [] for col in df_train.columns: if df_train[col].dtype == 'object': cats.append(col) print(col, df_train[col].nunique() )<groupby>
y_preds = gradient.fit(X_train_fe, y_train ).predict(X_test_fe )
Titanic - Machine Learning from Disaster
14,319,955
df_train.groupby(u ).size()<groupby>
y_preds = xgb.fit(X_train_fe, y_train ).predict(X_test_fe )
Titanic - Machine Learning from Disaster
14,319,955
df_train.groupby(u ).size()<sort_values>
submission = pd.DataFrame({'PassengerId':test.index, 'Survived':y_preds} )
Titanic - Machine Learning from Disaster
14,319,955
df_train.groupby(u ).size().sort_values() <sort_values>
submission.to_csv('submission.csv', index=False )
Titanic - Machine Learning from Disaster
14,319,955
df_test.groupby(u ).size().sort_values()<drop_column>
submission.to_csv('submission.csv', index=False )
Titanic - Machine Learning from Disaster
14,319,955
df_train.drop('Country',axis=1,inplace=True) df_test.drop('Country',axis=1,inplace=True )<groupby>
pd.read_csv('submission.csv' )
Titanic - Machine Learning from Disaster
14,653,637
df_train.groupby(u ).size()<groupby>
from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from mlxtend.feature_selection import SequentialFeatureSelector as SFS
Titanic - Machine Learning from Disaster
14,653,637
df_train.groupby(u ).size()<feature_engineering>
train_data = pd.read_csv("/kaggle/input/titanic/train.csv") train_data.head()
Titanic - Machine Learning from Disaster
14,653,637
<groupby>
test_data = pd.read_csv("/kaggle/input/titanic/test.csv") test_data.head()
Titanic - Machine Learning from Disaster
14,653,637
nf = 'count_Employment' df_train[nf] = df_train[u].map(df_train.groupby(u ).ConvertedSalary.count()) df_test[nf] = df_test[u].map(df_train.groupby(u ).ConvertedSalary.count() )<groupby>
train_data['Age'] = train_data['Age'].fillna(train_data.Age.mean()) test_data['Age'] = test_data['Age'].fillna(test_data.Age.mean()) test_data['Fare'] = test_data['Fare'].fillna(test_data.Fare.mean() )
Titanic - Machine Learning from Disaster
14,653,637
df_train.groupby(u ).size()<groupby>
train_data["Embarked"].value_counts()
Titanic - Machine Learning from Disaster
14,653,637
u = 'CompanySize' df_train.groupby(u ).size()<groupby>
train_data = train_data.fillna({"Embarked": "S"} )
Titanic - Machine Learning from Disaster
14,653,637
df_test.groupby(u ).size()<groupby>
train_data.drop(['Name','Ticket','Cabin'], axis = 1, inplace = True) test_data.drop(['Name','Ticket','Cabin'], axis = 1, inplace = True )
Titanic - Machine Learning from Disaster
14,653,637
df_train.groupby(u ).size()<groupby>
train_data.isnull().sum() test_data.isnull().sum()
Titanic - Machine Learning from Disaster
14,653,637
u = 'YearsCoding' df_train.groupby(u ).size()<groupby>
train_data = pd.get_dummies(train_data, columns=["Sex"]) train_data = pd.get_dummies(train_data, columns=["Embarked"]) test_data = pd.get_dummies(test_data, columns=["Sex"]) test_data = pd.get_dummies(test_data, columns=["Embarked"] )
Titanic - Machine Learning from Disaster
14,653,637
df_test.groupby(u ).size()<categorify>
X = train_data X = train_data.drop("Survived",axis=1) y = train_data["Survived"]
Titanic - Machine Learning from Disaster
14,653,637
df_train[u].replace({'0-2 years':0, '12-14 years':12, '15-17 years':15, '18-20 years':18, '21-23 years':21, '24-26 years':24, '27-29 years':27, '3-5 years':3, '30 or more years':30, '6-8 years':6, '9-11 years':9},inplace=True )<groupby>
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 1) X_train.shape, X_test.shape
Titanic - Machine Learning from Disaster
14,653,637
df_train.groupby(u ).size()<categorify>
sfs = SFS(RandomForestClassifier(n_estimators=250, max_depth=5, random_state=1, n_jobs = -1), k_features = 6, forward = False, floating = False, verbose = 2, scoring = 'accuracy', cv = 4, n_jobs = -1) sfs = sfs.fit(X_train, y_train )
Titanic - Machine Learning from Disaster
14,653,637
df_test[u].replace({'0-2 years':0, '12-14 years':12, '15-17 years':15, '18-20 years':18, '21-23 years':21, '24-26 years':24, '27-29 years':27, '3-5 years':3, '30 or more years':30, '6-8 years':6, '9-11 years':9},inplace=True )<groupby>
print(sfs.k_feature_names_) print('Sequential Forward Selection:') print(sfs.k_feature_idx_) print('CV Score:') print(sfs.k_score_ )
Titanic - Machine Learning from Disaster
14,653,637
u = 'YearsCodingProf' df_train.groupby(u ).size()<groupby>
features = ['Pclass', 'Age', 'SibSp', 'Fare', 'Sex_male', 'Embarked_S'] X = train_data[features] X_test = test_data[features] model = RandomForestClassifier(n_estimators=250, max_depth=5, random_state=1) model.fit(X, y) predictions = model.predict(X_test) output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions}) output.to_csv('my_submission.csv', index=False) print("Your submission was successfully saved!" )
Titanic - Machine Learning from Disaster
14,452,292
df_train.groupby(u ).size()<groupby>
from sklearn.linear_model import LogisticRegression
Titanic - Machine Learning from Disaster
14,452,292
u = 'Age' df_train.groupby(u ).size()<groupby>
x_train = pd.read_csv("/kaggle/input/titanic/train.csv") y_train = x_train['Survived'] x_train = x_train.drop(columns=['Survived']) x_test = pd.read_csv("/kaggle/input/titanic/test.csv" )
Titanic - Machine Learning from Disaster
14,452,292
df_test.groupby(u ).size()<groupby>
def preprocessing(df): df["Fare"] =(df["Fare"] - df["Fare"].min())/(df["Fare"].max() - df["Fare"].min()) df["Fare"] = df["Fare"].fillna(-999) df["Sex"] = df["Sex"].factorize() [0] df["Embarked"] = df["Embarked"].factorize() [0] for i in range(len(df["Name"])) : df["Name"][i] = df["Name"][i].split(',')[0] df["Name"] = df["Name"].factorize() [0] df["Cabin"] = df["Cabin"].factorize() [0] df["Age"] =(df["Age"] - df["Age"].mean())/ df["Age"].std() df["Age"] = df["Age"].fillna(-999) df["Ticket"] = df["Ticket"].factorize() [0]
Titanic - Machine Learning from Disaster
14,452,292
df_train.groupby(u ).size()<groupby>
x_train_processed = preprocessing(x_train) x_test_processed = preprocessing(x_test )
Titanic - Machine Learning from Disaster
14,452,292
u = 'LastNewJob' df_train.groupby(u ).size()<groupby>
x_train_processed = x_train.drop(columns=["Ticket"]) x_test_processed = x_test.drop(columns=["Ticket"] )
Titanic - Machine Learning from Disaster
14,452,292
df_train.groupby(u)['ConvertedSalary'].mean()<groupby>
model = LogisticRegression(random_state=0, max_iter=2500 ).fit(x_train_processed, y_train) pred = model.predict(x_test_processed) pred
Titanic - Machine Learning from Disaster
14,452,292
u = 'Currency' df_train.groupby(u ).size()<groupby>
model.score(x_train_processed, y_train )
Titanic - Machine Learning from Disaster
14,452,292
df_test.groupby(u ).size().index<categorify>
df = pd.DataFrame(pred, columns=["Survived"]) df.head()
Titanic - Machine Learning from Disaster
14,452,292
<count_unique_values>
df["PassengerId"] = x_test["PassengerId"].values df
Titanic - Machine Learning from Disaster
14,452,292
df_train[u].nunique()<count_unique_values>
df.to_csv('predicts.csv',index=False )
Titanic - Machine Learning from Disaster
14,468,025
df_test[u].nunique()<groupby>
data_train = pd.read_csv("/kaggle/input/titanic/train.csv") data_test = pd.read_csv("/kaggle/input/titanic/test.csv") y = data_train.Survived
Titanic - Machine Learning from Disaster
14,468,025
u = 'CurrencySymbol' df_train.groupby(u ).size()<groupby>
data_train.groupby('Sex' ).Survived.mean()
Titanic - Machine Learning from Disaster
14,468,025
df_test.groupby(u ).size().index<groupby>
data_train.groupby('SibSp' ).Survived.agg(['mean','count'] )
Titanic - Machine Learning from Disaster
14,468,025
df_train.groupby(u)['ConvertedSalary'].mean()<groupby>
data_train.groupby('Parch' ).Survived.agg(['mean','count'] )
Titanic - Machine Learning from Disaster
14,468,025
nf = 'count_CurrencySymbol' df_train[nf] = df_train[u].map(df_train.groupby(u ).ConvertedSalary.count()) df_test[nf] = df_test[u].map(df_train.groupby(u ).ConvertedSalary.count() )<categorify>
X_train = data_train.drop(['Name','Ticket','PassengerId'],axis=1) X_test = data_test.drop(['Name','Ticket','PassengerId'],axis=1 )
Titanic - Machine Learning from Disaster
14,468,025
cats = [] for col in df_train.columns: if df_train[col].dtype == 'object': cats.append(col) print(col, df_train[col].nunique()) encoder = OrdinalEncoder(cols=cats) df_train[cats] = encoder.fit_transform(df_train[cats]) df_test[cats] = encoder.transform(df_test[cats] )<define_variables>
X_train = X_train.drop(['Cabin'],axis=1) X_test = X_test.drop(['Cabin'],axis=1 )
Titanic - Machine Learning from Disaster
14,468,025
feature = ['CompanySize', 'LastNewJob', 'CurrencySymbol', 'YearsCoding', 'count_Employment', 'SalaryType', 'Currency', 'Employment', 'MilitaryUS', 'YearsCodingProf', 'count_CurrencySymbol', 'Age', 'RaceEthnicity', 'DevType', 'Student', 'AssessBenefits2', 'JobContactPriorities3', 'CareerSatisfaction', 'FrameworkWorkedWith', 'NumberMonitors', 'CheckInCode', 'AssessBenefits6', 'AssessJob1', 'AssessBenefits11', 'JobEmailPriorities5', 'AssessBenefits8', 'OperatingSystem', 'AssessBenefits9', 'FormalEducation', 'AssessBenefits7', 'AssessJob5', 'AssessJob4', 'AssessBenefits4', 'EducationParents', 'WakeTime', 'AssessBenefits1', 'JobEmailPriorities6', 'AssessBenefits10', 'AssessJob10', 'CommunicationTools', 'JobContactPriorities4', 'JobContactPriorities1', 'JobContactPriorities2', 'AdsPriorities5', 'UndergradMajor', 'AssessJob3', 'AssessJob8', 'AssessJob6', 'AssessJob2', 'AssessJob7', 'AssessBenefits5', 'AdsActions', 'AssessBenefits3', 'Exercise', 'AdsPriorities2', 'AdsPriorities3', 'JobEmailPriorities1', 'AdsAgreeDisagree3', 'AssessJob9', 'StackOverflowVisit', 'JobEmailPriorities2', 'AdsPriorities1', 'AdsPriorities6', 'Gender', 'JobEmailPriorities7', 'JobEmailPriorities3', 'AdsPriorities7', 'UpdateCV', 'AdsPriorities4', 'StackOverflowDevStory', 'HopeFiveYears', 'ErgonomicDevices']<prepare_x_and_y>
X_train[X_train.Embarked.isnull() ]
Titanic - Machine Learning from Disaster
14,468,025
y_train = df_train.ConvertedSalary X_train = df_train.drop(['ConvertedSalary'],axis=1) X_test = df_test.copy() <split>
X_train.groupby('Embarked' ).Embarked.count()
Titanic - Machine Learning from Disaster
14,468,025
scores = [] y_pred_test = 0 skf = KFold(n_splits=5, random_state=60, shuffle=True) for i,(train_ix, test_ix)in tqdm(enumerate(skf.split(X_train, y_train))): X_train_, y_train_ = X_train.values[train_ix], y_train.values[train_ix] X_val, y_val = X_train.values[test_ix], y_train.values[test_ix] clf = LGBMRegressor(boosting_type='gbdt', class_weight=None, colsample_bytree=0.9, importance_type='split', learning_rate=0.05, max_depth=-1, min_child_samples=20, min_child_weight=0.001, min_split_gain=0.0, n_estimators=9999, n_jobs=-1, num_leaves=15, objective=None, random_state=71, reg_alpha=0.0, reg_lambda=0.0, silent=True, subsample=1.0, subsample_for_bin=200000, subsample_freq=0) clf.fit(X_train_, y_train_, early_stopping_rounds=50, eval_metric='rmse', eval_set=[(X_val, y_val)]) y_pred = clf.predict(X_val) y_pred_test += clf.predict(X_test) score = mean_squared_error(y_val, y_pred) scores.append(score) print('CV Score of Fold_%d is %f' %(i, score))<split>
X_train.Embarked=X_train.Embarked.fillna('S' )
Titanic - Machine Learning from Disaster
14,468,025
skf = KFold(n_splits=5, random_state=40, shuffle=True) for i,(train_ix, test_ix)in tqdm(enumerate(skf.split(X_train, y_train))): X_train_, y_train_ = X_train.values[train_ix], y_train.values[train_ix] X_val, y_val = X_train.values[test_ix], y_train.values[test_ix] clf = LGBMRegressor(boosting_type='gbdt', class_weight=None, colsample_bytree=0.9, importance_type='split', learning_rate=0.05, max_depth=-1, min_child_samples=20, min_child_weight=0.001, min_split_gain=0.0, n_estimators=9999, n_jobs=-1, num_leaves=15, objective=None, random_state=71, reg_alpha=0.0, reg_lambda=0.0, silent=True, subsample=1.0, subsample_for_bin=200000, subsample_freq=0) clf.fit(X_train_, y_train_, early_stopping_rounds=50, eval_metric='rmse', eval_set=[(X_val, y_val)]) y_pred = clf.predict(X_val) y_pred_test += clf.predict(X_test) score = mean_squared_error(y_val, y_pred) scores.append(score) print('CV Score of Fold_%d is %f' %(i, score))<save_to_csv>
X_train[X_train.Embarked.isnull() ]
Titanic - Machine Learning from Disaster
14,468,025
y_pred = y_pred_test/10 y_pred = np.expm1(y_pred) submission = pd.read_csv('.. /input/exam-for-students20200129/sample_submission.csv', index_col=0) submission.ConvertedSalary = y_pred submission.to_csv('submission.csv' )<set_options>
def impute(cols): Age = cols[0] Pclass = cols[1] if(pd.isnull(Age)) : if Pclass==1: return 38 elif Pclass==2: return 30 else: return 25 return Age
Titanic - Machine Learning from Disaster
14,468,025
def reset_tf_session() : curr_session = tf.get_default_session() if curr_session is not None: curr_session.close() K.clear_session() config = tf.ConfigProto() config.gpu_options.allow_growth = True s = tf.InteractiveSession(config=config) K.set_session(s) return s<load_from_csv>
X_train.Age = X_train[['Age','Pclass']].apply(impute,axis=1) X_test.Age = X_test[['Age','Pclass']].apply(impute,axis=1 )
Titanic - Machine Learning from Disaster
14,468,025
train_data_np = pd.read_csv(".. /input/train_data.txt",delimiter=' ::: ',header=None,names=['id','title','genre','desc']) predict_data_np = pd.read_csv(".. /input/test_data.txt",delimiter=' ::: ',header=None,names=['id','title','desc']) train_data_np.shape <prepare_x_and_y>
X_test[X_test.Fare.isnull() ]
Titanic - Machine Learning from Disaster
14,468,025
lb = LabelBinarizer() lb.fit(genres) y = lb.transform(train_data_np.genre) print(lb.classes_) print(train_data_np.genre[0]) y[0]<init_hyperparams>
X_test['Fare']=X_test['Fare'].fillna(13 )
Titanic - Machine Learning from Disaster
14,468,025
desc = train_data_np['desc'].values tokenizer = Tokenizer(num_words=None,lower=True) tokenizer.fit_on_texts(desc) deleted = 0 high_count_words = [w for w,c in tokenizer.word_counts.items() if c > 10.0*train_data_np.shape[0]] for w in high_count_words: del tokenizer.word_index[w] del tokenizer.word_docs[w] del tokenizer.word_counts[w] deleted+=1 print("Delete ", w) print("Delete ", deleted, " words from tokenizer") desc_train, desc_test, y_train, y_test = train_test_split(desc, y, test_size=0.05, random_state=1000) X_train = tokenizer.texts_to_sequences(desc_train) X_test = tokenizer.texts_to_sequences(desc_test) vocab_size = len(tokenizer.word_index)+ 1 print(desc_train[2]) print(X_train[2]) <categorify>
X_train = X_train.drop(['Survived'],axis=1 )
Titanic - Machine Learning from Disaster
14,468,025
maxlen = 100 X_train = pad_sequences(X_train, padding='post', maxlen=maxlen) X_test = pad_sequences(X_test, padding='post', maxlen=maxlen) print(X_train[0, :] )<choose_model_class>
X_test.groupby('Sex' ).Sex.count()
Titanic - Machine Learning from Disaster
14,468,025
def make_model_8() : s = reset_tf_session() embedding_dim = 50 model = Sequential() model.add(layers.Embedding(input_dim=vocab_size, output_dim=embedding_dim, input_length=maxlen)) model.add(keras.layers.GlobalAveragePooling1D()) model.add(layers.Dense(32, activation='relu')) model.add(layers.Dense(len(genres), activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() return model <choose_model_class>
from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder , LabelEncoder
Titanic - Machine Learning from Disaster
14,468,025
def make_model_9() : s = reset_tf_session() embedding_dim = 50 model = Sequential() model.add(layers.Embedding(input_dim=vocab_size, output_dim=embedding_dim, input_length=maxlen)) model.add(layers.Bidirectional(layers.LSTM(100, return_sequences=True, dropout=0.7,recurrent_dropout=0.7))) model.add(layers.Flatten()) model.add(layers.Dense(len(genres), activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() return model<train_model>
le =LabelEncoder() X_train['Sex']=le.fit_transform(X_train['Sex']) X_test['Sex'] = le.fit_transform(X_test['Sex']) X_train.head()
Titanic - Machine Learning from Disaster
14,468,025
model = make_model_8() history = model.fit(X_train, y_train, epochs=20, verbose=True, validation_data=(X_test, y_test), batch_size=1000) plot_history(history )<predict_on_test>
onh = OneHotEncoder(handle_unknown='ignore', sparse=False) X_train_trans = pd.DataFrame(onh.fit_transform(X_train[['Embarked']])) X_test_trans = pd.DataFrame(onh.fit_transform(X_test[['Embarked']])) X_train_trans.index = X_train.index X_test_trans.index = X_test.index X_train_conc = X_train.drop(['Embarked'],axis=1) X_test_conc = X_test.drop(['Embarked'],axis=1) X_train_final = pd.concat([X_train_conc,X_train_trans],axis=1) X_test_final = pd.concat([X_test_conc,X_test_trans],axis=1 )
Titanic - Machine Learning from Disaster
14,468,025
y_predict_train = model.predict_proba(X_train )<find_best_params>
sc= StandardScaler() X_train_final = sc.fit_transform(X_train_final) X_test_final = sc.transform(X_test_final )
Titanic - Machine Learning from Disaster
14,468,025
T=201 y_predict = y_predict_train print(y_predict[T,:]) y_predict_max = np.argmax(y_predict[T,:]) print(y_predict_max, lb.classes_[y_predict_max], y_predict[T,y_predict_max]) print(np.argmax(y_train[T]),y_train[T]) print(train_data_np.genre[T]) <predict_on_test>
clf = SVC(kernel='rbf', degree = 5) clf.fit(X_train_final,y )
Titanic - Machine Learning from Disaster
14,468,025
<find_best_params><EOS>
pred = clf.predict(X_test_final) output = pd.DataFrame({'PassengerId':data_test.PassengerId,'Survived':pred}) output.to_csv('submission.csv',index=False )
Titanic - Machine Learning from Disaster
14,414,602
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<create_dataframe>
warnings.simplefilter(action='ignore', category=FutureWarning) warnings.filterwarnings("ignore") pd.set_option('max_columns',100 )
Titanic - Machine Learning from Disaster
14,414,602
genres_predict = [] ids = [] for i in range(y_predict.shape[0]): y_predict_max = np.argmax(y_predict[i,:]) genres_predict.extend([lb.classes_[y_predict_max]]) submission = pd.DataFrame({'id':predict_data_np['id'].values, 'genre':genres_predict, 'title':predict_data_np['title'].values}, columns=['id', 'genre','title']) submission.to_csv('submission.csv', index=False,columns=['id', 'genre']) print('Save submit') submission.head() <load_from_csv>
traindf = pd.read_csv('.. /input/titanic/train.csv' ).set_index('PassengerId') testdf = pd.read_csv('.. /input/titanic/test.csv' ).set_index('PassengerId') submission = pd.read_csv('.. /input/titanic/gender_submission.csv' )
Titanic - Machine Learning from Disaster
14,414,602
train=pd.read_csv('.. /input/kaggle18011884/train_cabbage_price.csv') train.avgPrice.plot()<load_from_csv>
df = pd.concat([traindf, testdf], axis=0, sort=False) df['Title'] = df.Name.str.split(',' ).str[1].str.split('.' ).str[0].str.strip() df['Title'] = df.Name.str.split(',' ).str[1].str.split('.' ).str[0].str.strip() df['IsWomanOrBoy'] =(( df.Title == 'Master')|(df.Sex == 'female')) df['LastName'] = df.Name.str.split(',' ).str[0] family = df.groupby(df.LastName ).Survived df['WomanOrBoyCount'] = family.transform(lambda s: s[df.IsWomanOrBoy].fillna(0 ).count()) df['WomanOrBoyCount'] = df.mask(df.IsWomanOrBoy, df.WomanOrBoyCount - 1, axis=0) df['FamilySurvivedCount'] = family.transform(lambda s: s[df.IsWomanOrBoy].fillna(0 ).sum()) df['FamilySurvivedCount'] = df.mask(df.IsWomanOrBoy, df.FamilySurvivedCount - \ df.Survived.fillna(0), axis=0) df['WomanOrBoySurvived'] = df.FamilySurvivedCount / df.WomanOrBoyCount.replace(0, np.nan) df.WomanOrBoyCount = df.WomanOrBoyCount.replace(np.nan, 0) df['Alone'] =(df.WomanOrBoyCount == 0) df['Title'] = df['Title'].replace('Ms','Miss') df['Title'] = df['Title'].replace('Mlle','Miss') df['Title'] = df['Title'].replace('Mme','Mrs') df['Embarked'] = df['Embarked'].fillna('S') df['Deck'] = df['Cabin'].apply(lambda s: s[0] if pd.notnull(s)else 'M') med_fare = df.groupby(['Pclass', 'Parch', 'SibSp'] ).Fare.median() [3][0][0] df['Fare'] = df['Fare'].fillna(med_fare) df['Age'] = df.groupby(['Sex', 'Pclass', 'Title'])['Age'].apply(lambda x: x.fillna(x.median())) df['Family_Size'] = df['SibSp'] + df['Parch'] + 1 cols_to_drop = ['Name','Ticket','Cabin', 'IsWomanOrBoy', 'WomanOrBoyCount', 'FamilySurvivedCount'] df = df.drop(cols_to_drop, axis=1) df.WomanOrBoySurvived = df.WomanOrBoySurvived.fillna(0) df.Alone = df.Alone.fillna(0) target = df.Survived.loc[traindf.index] df = df.drop(['Survived'], axis=1) train, test = df.loc[traindf.index], df.loc[testdf.index]
Titanic - Machine Learning from Disaster
14,414,602
test=pd.read_csv('.. /input/kaggle18011884/test_cabbage_price.csv') test<data_type_conversions>
numerics = ['int8', 'int16', 'int32', 'int64', 'float16', 'float32', 'float64'] categorical_columns = [] features = train.columns.values.tolist() for col in features: if train[col].dtype in numerics: continue categorical_columns.append(col) for col in categorical_columns: if col in train.columns: le = LabelEncoder() le.fit(list(train[col].astype(str ).values)+ list(test[col].astype(str ).values)) train[col] = le.transform(list(train[col].astype(str ).values)) test[col] = le.transform(list(test[col].astype(str ).values))
Titanic - Machine Learning from Disaster
14,414,602
train['yeargb']=[y[:6] for y in train['year'].astype('str')] test['yeargb']=test['year'] train['month']=[np.int(y[4:6])for y in train['year'].astype('str')] test['month']=[np.int(y[-2:])for y in test['year'].astype('str')] train['year2']=[np.int(y[:4])for y in train['year'].astype('str')] test['year2']=[np.int(y[:4])for y in test['year'].astype('str')] traingb=train.groupby('yeargb' ).mean().reset_index() sns.heatmap(traingb.corr() )<concatenate>
Xtrain, Xval, Ztrain, Zval = train_test_split(train, target, test_size=0.2, random_state=0) train_set = lgbm.Dataset(Xtrain, Ztrain, silent=False) valid_set = lgbm.Dataset(Xval, Zval, silent=False )
Titanic - Machine Learning from Disaster
14,414,602
traingb.reset_index().append(test.reset_index() )<create_dataframe>
params = { 'boosting_type':'gbdt', 'objective': 'binary', 'num_leaves': 31, 'learning_rate': 0.05, 'max_depth': -1, 'subsample': 0.8, 'bagging_fraction' : 1, 'max_bin' : 50 , 'bagging_freq': 20, 'colsample_bytree': 0.6, 'metric': 'binary', 'min_split_gain': 0.5, 'min_child_weight': 1, 'min_child_samples': 2, 'scale_pos_weight':1, 'zero_as_missing': True, 'seed':0, } modelL = lgbm.train(params, train_set = train_set, num_boost_round=2000, early_stopping_rounds=10, verbose_eval=10, valid_sets=valid_set )
Titanic - Machine Learning from Disaster
14,414,602
pd.DataFrame(np.dot(np.array(ymean.avgPrice ).reshape(-1,1), np.array(mmean.avgPrice ).reshape(1,-1)))/(traingb.avgPrice.mean() **2 )<prepare_x_and_y>
feature_score = pd.DataFrame(train.columns, columns = ['feature']) feature_score['LGB'] = modelL.feature_importance()
Titanic - Machine Learning from Disaster
14,414,602
def clustertechniques2(dtrain,label,indexv): print(' cols=[ci for ci in dtrain.columns if ci not in [indexv,'index',label]] dtest=dtrain[dtrain[label].isnull() ==True][[indexv,label]] print(dtest) print('encodings after shape',dtrain.shape) X_train=dtrain[dtrain[label].isnull() ==False].drop([indexv,label],axis=1 ).fillna(0) Y_train=dtrain[dtrain[label].isnull() ==False][label] X_test=dtrain[dtrain[label].isnull() ==True].drop([indexv,label],axis=1 ).fillna(0) Y_test=dtrain[dtrain[label].isnull() ==True][label].fillna(0) print(Y_test) for xi in range(len(Y_test)) : Y_test.iloc[xi]=np.random.random(( 1,1)) [0] print(Y_test) if len(X_test)==0: X_train,X_test,Y_train,Y_test = train_test_split(dtrain.drop(label,axis=1 ).fillna(0),dtrain[label],test_size=0.25,random_state=0) lenxtr=len(X_train) print('splitting data train test X-y',X_train.shape,Y_train.shape,X_test.shape,Y_test.shape) scale = preprocessing.MinMaxScaler().fit(X_train) X_train = scale.transform(X_train) X_test = scale.transform(X_test) clusters = [Dummy(1), PCA(n_components=0.7,random_state=0,whiten=True), FastICA(n_components=7,random_state=0), TruncatedSVD(n_components=5, n_iter=7, random_state=42), NMF(n_components=10,random_state=0), UMAP(n_neighbors=5,n_components=10, min_dist=0.3,metric='minkowski'), TSNE(n_components=2,random_state=0) ] clunaam=["raw",'PCA','tSVD','UMAP','tSNE'] classifiers = [ ExtraTreesRegressor() , RandomForestRegressor(random_state=1, n_estimators=10), BayesianRidge() , RANSACRegressor() , KNeighborsRegressor() , ElasticNetCV(cv=5, random_state=0), HuberRegressor() , LinearRegression() , ] clanaam= ['xTreer','rFor','BaysR','Ransac','KNNr','elast','huber','linear',] results=[] for clu in clusters: clunm=clunaam[clusters.index(clu)] X_total_clu = clu.fit_transform(np.concatenate(( X_train,X_test),axis=0)) X_total_clu=np.concatenate(( X_total_clu,np.concatenate(( X_train,X_test),axis=0)) ,axis=1) print(X_total_clu.shape) plt.scatter(X_total_clu[:lenxtr,0],X_total_clu[:lenxtr,1],c=Y_train.values,cmap='prism') plt.title(clu) plt.show() for cla in classifiers: start = datetime.datetime.now() clanm=clanaam[classifiers.index(cla)] print(' ',cla) cla.fit(X_total_clu,np.concatenate(( Y_train,Y_test))) cla.fit(X_total_clu[:lenxtr],Y_train) trainpredi=cla.predict(X_total_clu[:lenxtr]) testpredi=cla.predict(X_total_clu[lenxtr:]) if classifiers.index(cla)in [0,2,3,4,5,7,8,9,10,11,12,13]: trainprediprob=cla.predict(X_total_clu[:lenxtr]) testprediprob=cla.predict(X_total_clu[lenxtr:]) plt.scatter(x=testprediprob, y=testpredi, marker='.', alpha=0.53) plt.show() if len(dtest)==0: test_score=cla.score(X_total_clu[lenxtr:],Y_test) mse = mean_squared_error(testpredi,Y_test) train_score=cla.score(X_total_clu[:lenxtr],Y_train) li = [clunm,clanm,train_score,test_score,mse] results.append(li) r2s=r2_score(testpredi,Y_test) print(r2s) plt.title(clanm+'test corr & mse:'+np.str(test_score)+' '+np.str(mse)+' and test confusionmatrix') plt.scatter(x=Y_test, y=testpredi, marker='.', alpha=1) plt.scatter(x=[np.mean(Y_test)], y=[np.mean(testpredi)], marker='o', color='red') plt.xlabel('Real test'); plt.ylabel('Pred.test') plt.show() else: testpredlabel=testpredi print('train correl',r2_score(trainpredi,Y_train),'mse ',mean_squared_error(trainpredi,Y_train)) submit = pd.DataFrame({'Id': dtest[indexv],'Expected': testpredlabel}) submit['Expected']=submit['Expected'].astype('int') filenaam='subm_'+clunm+'_'+clanm+'.csv' submit.to_csv(path_or_buf =filenaam, index=False) print(clanm,'0 classifier time',datetime.datetime.now() -start) if len(dtest)==0: print(pd.DataFrame(results ).sort_values(3)) submit=[] return submit class Dummy() : def __init__(self, feature_names): self._feature_names = feature_names def fit(self, X, y = None): return self def fit_transform(self, X, y = None): return X clustertechniques2(traingb.reset_index().append(test.reset_index() ).drop('year',axis=1),'avgPrice','index' )<load_from_csv>
y_preds_lgb = modelL.predict(test, num_iteration=modelL.best_iteration )
Titanic - Machine Learning from Disaster
14,414,602
train=pd.read_csv('.. /input/utkml/train_final.csv') test=pd.read_csv('.. /input/utkml/test_final.csv') total=train.append(test,ignore_index=True) <sort_values>
data_tr = xgb.DMatrix(Xtrain, label=Ztrain) data_cv = xgb.DMatrix(Xval , label=Zval) data_train = xgb.DMatrix(train) data_test = xgb.DMatrix(test) evallist = [(data_tr, 'train'),(data_cv, 'valid')]
Titanic - Machine Learning from Disaster
14,414,602
total.sort_values('user_id' )<feature_engineering>
parms = {'max_depth':5, 'objective':'reg:logistic', 'eval_metric':'error', 'learning_rate':0.01, 'subsample':0.8, 'colsample_bylevel':0.9, 'min_child_weight': 2, 'seed': 0} modelx = xgb.train(parms, data_tr, num_boost_round=2000, evals = evallist, early_stopping_rounds=300, maximize=False, verbose_eval=100) print('score = %1.5f, n_boost_round =%d.'%(modelx.best_score,modelx.best_iteration))
Titanic - Machine Learning from Disaster
14,414,602
datacol=total[['user_id','JOKE:5']] datacol.columns=['user_id','rating'] datacol['item_id']=0 data=datacol.dropna() for ci in range(2,141): colnm=train.columns[ci] datacol=total[['user_id',colnm]] datacol.columns=['user_id','rating'] datacol['item_id']=ci-1 data=data.append(datacol.dropna()) data datacoo=coo_matrix(( data.rating,(data.item_id,data.user_id))) datacoo <import_modules>
feature_score['XGB'] = feature_score['feature'].map(modelx.get_score(importance_type='weight'))
Titanic - Machine Learning from Disaster
14,414,602
ratings=datacoo TFIDFRecommender, bm25_weight) log = logging.getLogger("implicit") <choose_model_class>
y_preds_xgb = modelx.predict(data_test )
Titanic - Machine Learning from Disaster
14,414,602
start = time.time() output_filename='output.txt' model_name='bpr' min_rating=-10.0, titles=train.columns[1:] ratings.data[ratings.data < min_rating] = 0 ratings.eliminate_zeros() ratings.data = np.ones(len(ratings.data)) log.info("read data file in %s", time.time() - start) if model_name == "als": model = AlternatingLeastSquares() print("weighting matrix by bm25_weight") ratings =(bm25_weight(ratings, B=0.9)* 5 ).tocsr() elif model_name == "bpr": model = BayesianPersonalizedRanking() elif model_name == "lmf": model = LogisticMatrixFactorization() elif model_name == "tfidf": model = TFIDFRecommender() elif model_name == "cosine": model = CosineRecommender() elif model_name == "bm25": model = BM25Recommender(B=0.2) else: raise NotImplementedError("TODO: model %s" % model_name) print("training model %s", model_name) start = time.time() model.fit(ratings) print("trained model '%s' in %s", model_name, time.time() - start) log.debug("calculating top movies") user_count = np.ediff1d(ratings.indptr) to_generate = sorted(np.arange(len(titles)) , key=lambda x: -user_count[x]) print("calculating similar movies") with tqdm.tqdm(total=len(to_generate)) as progress: with codecs.open(output_filename, "w", "utf8")as o: for movieid in to_generate: print(movieid,model.similar_items(movieid)) if ratings.indptr[movieid] != ratings.indptr[movieid + 1]: title = titles[movieid] for other, score in model.similar_items(movieid): " %(title, titles[other], score)) try: print(title,titles[other],score) print() except: print(title,other,score) progress.update(1) <sort_values>
Scaler_train = preprocessing.MinMaxScaler().fit(train) train = pd.DataFrame(Scaler_train.transform(train), columns=train.columns, index=train.index) test = pd.DataFrame(Scaler_train.transform(test), columns=test.columns, index=test.index )
Titanic - Machine Learning from Disaster
14,414,602
for xi in range(10): recommendations = model.recommend(xi, user_items) print('USER',xi,train.iloc[xi].sort_values(ascending=False)[:3]) for ri,prob in recommendations: print('recommended ',ri,titles[ri],prob) <categorify>
linreg = LinearRegression() linreg.fit(train, target )
Titanic - Machine Learning from Disaster
14,414,602
test['predictions']='np.nan' for xi in range(len(train),len(total)) : testxi=xi-len(train) testuserid=test.iloc[testxi]['user_id'] recommendations = model.recommend(testuserid, user_items) test.iat[testxi,141]=titles[recommendations[0][0]] if xi/1000==int(xi/1000): print('USER',testxi,total.iloc[xi].sort_values(ascending=False)[:3]) for ri,prob in recommendations: print('recommended ',ri,titles[ri],prob) <save_to_csv>
eli5.show_weights(linreg )
Titanic - Machine Learning from Disaster
14,414,602
test[['user_id','predictions']].to_csv('submit.csv',index=False )<load_from_csv>
coeff_linreg["LinRegress"] = coeff_linreg["LinRegress"].abs() feature_score = pd.merge(feature_score, coeff_linreg, on='feature') feature_score = feature_score.fillna(0) feature_score = feature_score.set_index('feature') feature_score
Titanic - Machine Learning from Disaster
14,414,602
train=pd.read_csv('.. /input/utkml/train_final.csv') test=pd.read_csv('.. /input/utkml/test_final.csv') total=train.append(test,ignore_index=True) <sort_values>
y_preds_linreg = linreg.predict(test )
Titanic - Machine Learning from Disaster
14,414,602
total.sort_values('user_id' )<feature_engineering>
feature_score = pd.DataFrame( preprocessing.MinMaxScaler().fit_transform(feature_score), columns=feature_score.columns, index=feature_score.index ) feature_score['Mean'] = feature_score.mean(axis=1 )
Titanic - Machine Learning from Disaster
14,414,602
datacol=total[['user_id','JOKE:5']] datacol.columns=['user_id','rating'] datacol['item_id']=0 data=datacol.dropna() for ci in range(2,141): colnm=train.columns[ci] datacol=total[['user_id',colnm]] datacol.columns=['user_id','rating'] datacol['item_id']=ci-1 data=data.append(datacol.dropna()) data datacoo=coo_matrix(( data.rating,(data.item_id,data.user_id))) datacoo <import_modules>
w_lgb = 0.4 w_xgb = 0.5 w_linreg = 1 - w_lgb - w_xgb w_linreg feature_score['Merging'] = w_lgb*feature_score['LGB'] + w_xgb*feature_score['XGB'] + w_linreg*feature_score['LinRegress'] feature_score.sort_values('Merging', ascending=False )
Titanic - Machine Learning from Disaster
14,414,602
ratings=datacoo TFIDFRecommender, bm25_weight) log = logging.getLogger("implicit") <choose_model_class>
def features_selection_by_weights(df, threshold): features_list = df.feature.tolist() features_best = [] for i in range(len(df)) : feature_name = features_list[i] feature_is_best = False for col in feature_score_columns: if df.loc[i, col] > threshold: feature_is_best = True if feature_is_best: features_best.append(feature_name) return df[df['feature'].isin(features_best)].reset_index(drop=True )
Titanic - Machine Learning from Disaster
14,414,602
start = time.time() output_filename='output.txt' model_name='bpr' min_rating=-10.0, titles=train.columns[1:] ratings.data[ratings.data < min_rating] = 0 ratings.eliminate_zeros() ratings.data = np.ones(len(ratings.data)) log.info("read data file in %s", time.time() - start) if model_name == "als": model = AlternatingLeastSquares() print("weighting matrix by bm25_weight") ratings =(bm25_weight(ratings, B=0.9)* 5 ).tocsr() elif model_name == "bpr": model = BayesianPersonalizedRanking() print("weighting matrix by bm25_weight") ratings =(bm25_weight(ratings, B=0.9)* 5 ).tocsr() elif model_name == "lmf": model = LogisticMatrixFactorization() elif model_name == "tfidf": model = TFIDFRecommender() elif model_name == "cosine": model = CosineRecommender() elif model_name == "bm25": model = BM25Recommender(B=0.2) else: raise NotImplementedError("TODO: model %s" % model_name) print("training model %s", model_name) start = time.time() model.fit(ratings) print("trained model '%s' in %s", model_name, time.time() - start) log.debug("calculating top movies") user_count = np.ediff1d(ratings.indptr) to_generate = sorted(np.arange(len(titles)) , key=lambda x: -user_count[x]) print("calculating similar movies") with tqdm.tqdm(total=len(to_generate)) as progress: with codecs.open(output_filename, "w", "utf8")as o: for movieid in to_generate: print(movieid,model.similar_items(movieid)) if ratings.indptr[movieid] != ratings.indptr[movieid + 1]: title = titles[movieid] for other, score in model.similar_items(movieid): " %(title, titles[other], score)) try: print(title,titles[other],score) print() except: print(title,other,score) progress.update(1) <sort_values>
threshold_fi = 0.25 feature_score_best = features_selection_by_weights(feature_score, threshold_fi) feature_score_best
Titanic - Machine Learning from Disaster
14,414,602
for xi in range(10): recommendations = model.recommend(xi, user_items) print('USER',xi,train.iloc[xi].sort_values(ascending=False)[:3]) for ri,prob in recommendations: print('recommended ',ri,titles[ri],prob) <categorify>
y_preds = w_lgb*y_preds_lgb + w_xgb*y_preds_xgb + w_linreg*y_preds_linreg submission['Survived'] = [1 if x>0.5 else 0 for x in y_preds] submission.head()
Titanic - Machine Learning from Disaster
14,414,602
test['predictions']='np.nan' for xi in range(len(train),len(total)) : testxi=xi-len(train) testuserid=test.iloc[testxi]['user_id'] recommendations = model.recommend(testuserid, user_items) test.iat[testxi,141]=titles[recommendations[0][0]] if xi/1000==int(xi/1000): print('USER',testxi,total.iloc[xi].sort_values(ascending=False)[:3]) for ri,prob in recommendations: print('recommended ',ri,titles[ri],prob) <save_to_csv>
submission.to_csv('submission.csv', index=False )
Titanic - Machine Learning from Disaster
14,414,602
<categorify><EOS>
submission.to_csv('submission.csv', index=False )
Titanic - Machine Learning from Disaster
14,398,634
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<choose_model_class>
import numpy as np import pandas as pd
Titanic - Machine Learning from Disaster
14,398,634
class ParaphraseClassifier(nn.Module): def __init__(self,hidden_dim,embedding_dim): super(ParaphraseClassifier, self ).__init__() self.hidden_dim = hidden_dim self.embedding_dim = embedding_dim self.embedding = nn.Embedding(len(TEXT.vocab), embedding_dim) self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers=1,bidirectional=False) self.Wadd = nn.Linear(hidden_dim,hidden_dim) self.Wtimes = nn.Linear(hidden_dim,hidden_dim) self.Wout = nn.Linear(hidden_dim,1) def forward(self,xinputA,xinputB): xembeddedA = self.embedding(xinputA) lstm_outA,(hiddenA,cellA)= self.lstm(xembeddedA.view(len(xinputA), -1, self.embedding_dim), None) xembeddedB = self.embedding(xinputB) lstm_outB,(hiddenB,cellB)= self.lstm(xembeddedB.view(len(xinputB), -1, self.embedding_dim), None) hiddenT = hiddenA * hiddenB hiddenD = torch.abs(hiddenA - hiddenB) hidden = torch.tanh(self.Wtimes(hiddenT)+ self.Wadd(hiddenD)) return torch.sigmoid(self.Wout(hidden)) def run_test(self,test_set,pred_filename,useGPU=True): device_code = int(useGPU-1) test_iterator = Iterator(test_set,batch_size=64,device=device_code,train=False,sort=False) idxList = [] predList = [] with torch.no_grad() : for batch in test_iterator: idx,xvecA,xvecB = batch.idx,batch.sentA,batch.sentB prob = 10 * self.forward(xvecA,xvecB ).squeeze() / 2.0 idxList.extend(idx.tolist()) predList.extend(prob.tolist()) df_submission = pa.DataFrame({'pairID':idxList,'Relatedness':predList}) df_submission.to_csv(pred_filename , index=False) print(df_submission.head(n=10)) print("done.") def run_train(self,train_set,dev_set,epochs,learning_rate=0.001,useGPU=True): loss_func = nn.BCELoss() optimizer = optim.Adam(self.parameters() , lr=learning_rate,weight_decay=0.00001) self.embedding = nn.Embedding(len(TEXT.vocab), self.embedding_dim) device_code = int(useGPU-1) train_iterator = BucketIterator(train_set,batch_size=64, device=device_code, sort=True,sort_key=lambda x:x.sentA,train=True) dev_iterator = BucketIterator(dev_set, batch_size=64, device=device_code, sort=True,sort_key=lambda x:x.sentA,train=False) max_corr = 0.0 for e in range(epochs): train_loss = 0 N = 0 train_predictions = [] train_reference = [] for batch in train_iterator: xvecA,xvecB,yRelness = batch.sentA,batch.sentB,batch.Relatedness self.zero_grad() prob = self.forward(xvecA,xvecB ).squeeze() loss = loss_func(prob,yRelness) loss.backward() optimizer.step() train_loss += loss.item() train_predictions.extend(prob.tolist()) train_reference.extend(yRelness.tolist()) N += 1 R = np.corrcoef(np.array(train_predictions),np.array(train_reference)) [0][1] print('Epoch',e,'loss =',train_loss/N,'train correlation(pearson r)=',R) with torch.no_grad() : dev_predictions = [] dev_reference = [] for batch in dev_iterator: xvecA,xvecB,yRelness = batch.sentA,batch.sentB,batch.Relatedness prob = self.forward(xvecA,xvecB ).squeeze() dev_predictions.extend(prob.tolist()) dev_reference.extend(yRelness.tolist()) R = np.corrcoef(np.array(dev_predictions),np.array(dev_reference)) [0][1] print('Epoch',e,'dev correlation(pearson r)=',R) if R > max_corr: torch.save(self.state_dict() , 'para_model.wt') max_corr = R print('model saved.') self.load_state_dict(torch.load('para_model.wt'))<train_model>
train_data = pd.read_csv("/kaggle/input/titanic/train.csv") test_data = pd.read_csv("/kaggle/input/titanic/test.csv" )
Titanic - Machine Learning from Disaster
14,398,634
pc = ParaphraseClassifier(150,30) pc.run_train(df_train,df_dev,50 )<load_from_csv>
train_data.isnull().sum()
Titanic - Machine Learning from Disaster
14,398,634
df_test = TabularDataset(".. /input/SICK_test.txt","tsv",skip_header=True,\ fields=[('idx',INTEGER),('sentA',TEXT),('sentB',TEXT)]) pc.run_test(df_test,'submission.csv',useGPU=True )<define_variables>
train_data[train_data['Age'].isnull() ]
Titanic - Machine Learning from Disaster
14,398,634
device = torch.device('cuda:0') train_path = ".. /input/train/train/" test_path = ".. /input/test/test/" print(torch.cuda.get_device_name(0))<categorify>
test_data.isnull().sum()
Titanic - Machine Learning from Disaster
14,398,634
class HindiDataset(Dataset): def __init__(self, train_img_path, test_img_path, transform = None, train = True): self.train_img_path = train_img_path self.test_img_path = test_img_path self.train_img_files = os.listdir(train_img_path) self.test_img_files = os.listdir(test_img_path) self.transform = transform self.train = train def __len__(self): return len(self.train_img_files) def __getitem__(self, indx): if self.train: if indx >= len(self.train_img_files): raise Exception("Index should be less than {}".format(len(self.img_files))) image = Image.open(self.train_img_path + self.train_img_files[indx] ).convert('RGB') labels = self.train_img_files[indx].split('_') V = int(labels[0][1]) C = int(labels[1][1]) label = {'Vowel' : V, 'Consonant' : C} if self.transform: image = self.transform(image) return image, label if self.train == False: image = Image.open(self.test_img_path + self.test_img_files[indx] ).convert('RGB') if self.transform: image = self.transform(image) return image, self.test_img_files[indx]<choose_model_class>
train_data[train_data['Embarked'].isnull() ]
Titanic - Machine Learning from Disaster
14,398,634
class BasicBlock(nn.Module): def __init__(self, channels = 256, stride = 1, padding = 1): super(BasicBlock, self ).__init__() self.channels = channels self.stride = stride self.padding = padding self.conv_1 = nn.Conv2d(in_channels = self.channels, out_channels = self.channels, kernel_size = 3, stride = self.stride, padding = self.padding) self.bn_1 = nn.BatchNorm2d(self.channels) self.prelu_1 = nn.PReLU() self.conv_2 = nn.Conv2d(in_channels = self.channels, out_channels = self.channels, kernel_size = 3, stride = self.stride, padding = self.padding) self.bn_2 = nn.BatchNorm2d(self.channels) self.prelu_2 = nn.PReLU() self.conv_3 = nn.Conv2d(in_channels = self.channels, out_channels = self.channels, kernel_size = 5, stride = self.stride, padding = self.padding + 1) self.bn_3 = nn.BatchNorm2d(self.channels) def forward(self, x): identity = x x = self.prelu_1(self.bn_1(self.conv_1(x))) x = self.bn_2(self.conv_2(x)) + self.bn_3(self.conv_3(identity)) x = self.prelu_2(x) return x<choose_model_class>
train_data[train_data['Ticket'] == '113572']
Titanic - Machine Learning from Disaster
14,398,634
class ModInception(nn.Module): def __init__(self, channels = 256, stride = 1, padding = 1): super(ModInception, self ).__init__() self.channels = channels self.stride = stride self.padding = padding self.conv_1 = nn.Conv2d(in_channels = self.channels, out_channels = 70, kernel_size = 1, stride = self.stride, padding = 0) self.conv_2 = nn.Conv2d(in_channels = self.channels, out_channels = 60, kernel_size = 3, stride = self.stride, padding = 1) self.conv_3 = nn.Conv2d(in_channels = self.channels, out_channels = 126,kernel_size = 5, stride = self.stride, padding = 2) self.bn = nn.BatchNorm2d(self.channels) self.prelu = nn.PReLU() def forward(self, x): x = torch.cat([self.conv_1(x), self.conv_2(x), self.conv_3(x)], dim=1) x = self.prelu(self.bn(x)) return x<choose_model_class>
Titanic - Machine Learning from Disaster
14,398,634
class ResNet(nn.Module): def __init__(self, block, incp_block): super(ResNet, self ).__init__() self.block = block self.incp_block = incp_block self.input_conv = nn.Sequential( nn.Conv2d(in_channels = 3, out_channels = 64, kernel_size = 3, padding = 1), nn.BatchNorm2d(64), nn.PReLU() , nn.Conv2d(in_channels = 64, out_channels = 128, kernel_size = 3, padding = 1), nn.BatchNorm2d(128), nn.PReLU() , nn.Conv2d(in_channels = 128, out_channels = 256, kernel_size = 3, padding = 1), nn.BatchNorm2d(256), nn.PReLU() , ) self.layer_64x64 = self.make_layers(3) self.layer_32x32 = self.make_layers(2) self.layer_16x16 = self.make_layers(2) self.layer_8x8 = self.make_layers(2) self.layer_4x4 = self.make_layers(2) self.downsample_conv_1 = nn.Sequential( nn.Conv2d(in_channels = 256, out_channels = 256, kernel_size = 2, stride = 2), nn.BatchNorm2d(256), nn.PReLU() ) self.downsample_conv_2 = nn.Sequential( nn.Conv2d(in_channels = 256, out_channels = 256, kernel_size = 2, stride = 2), nn.BatchNorm2d(256), nn.PReLU() ) self.downsample_conv_3 = nn.Sequential( nn.Conv2d(in_channels = 256, out_channels = 256, kernel_size = 2, stride = 2), nn.BatchNorm2d(256), nn.PReLU() ) self.downsample_conv_4 = nn.Sequential( nn.Conv2d(in_channels = 256, out_channels = 256, kernel_size = 2, stride = 2), nn.BatchNorm2d(256), nn.PReLU() ) self.downsample_conv_5 = nn.Sequential( nn.Conv2d(in_channels = 256, out_channels = 128, kernel_size = 2, stride = 2), nn.BatchNorm2d(128), nn.PReLU() ) self.V_classifier = nn.Sequential( nn.Linear(512, 256), nn.BatchNorm1d(256), nn.PReLU() , nn.Linear(256, 128), nn.BatchNorm1d(128), nn.PReLU() , nn.Linear(128, 64), nn.BatchNorm1d(64), nn.PReLU() , nn.Linear(64, 32), nn.BatchNorm1d(32), nn.PReLU() , nn.Linear(32, 16), nn.BatchNorm1d(16), nn.PReLU() , nn.Linear(16, 10) ) self.C_classifier = nn.Sequential( nn.Linear(512, 256), nn.BatchNorm1d(256), nn.PReLU() , nn.Linear(256, 128), nn.BatchNorm1d(128), nn.PReLU() , nn.Linear(128, 64), nn.BatchNorm1d(64), nn.PReLU() , nn.Linear(64, 32), nn.BatchNorm1d(32), nn.PReLU() , nn.Linear(32, 16), nn.BatchNorm1d(16), nn.PReLU() , nn.Linear(16, 10) ) def make_layers(self, layers): res_layers = [] for i in range(layers): res_layers.append(self.block()) res_layers.append(self.incp_block()) return nn.Sequential(*res_layers) def forward(self, x): x = self.input_conv(x) x = self.layer_64x64(x) x = self.downsample_conv_1(x) x = self.layer_32x32(x) x = self.downsample_conv_2(x) x = self.layer_16x16(x) x = self.downsample_conv_3(x) x = self.layer_8x8(x) x = self.downsample_conv_4(x) x = self.layer_4x4(x) x = self.downsample_conv_5(x) x = x.view(x.shape[0], -1) out_1 = self.V_classifier(x) out_2 = self.C_classifier(x) return out_1, out_2<create_dataframe>
train_data['Age'] = train_data['Age'].fillna(train_data.groupby(['Pclass','Sex','Survived'])['Age'].transform('median')) test_data['Age'] = test_data['Age'].fillna(test_data.groupby(['Pclass','Sex'])['Age'].transform('median'))
Titanic - Machine Learning from Disaster
14,398,634
data = HindiDataset(train_path, test_path, transform = transforms.Compose([transforms.ToTensor() , transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), ]), train = True) train_size = int(0.9 * len(data)) test_size = len(data)- train_size train_data, validation_data = random_split(data, [train_size, test_size]) train_loader = torch.utils.data.DataLoader(data, batch_size=32, shuffle=True) validation_loader = torch.utils.data.DataLoader(validation_data, batch_size=32, shuffle=False )<choose_model_class>
train_data['IsChild'] = np.where(train_data['Age'] <= 10, 'Yes', 'No') test_data['IsChild'] = np.where(test_data['Age'] <= 10, 'Yes', 'No' )
Titanic - Machine Learning from Disaster
14,398,634
deepNet = ResNet(BasicBlock, ModInception ).to(device) cnn_ce_loss = nn.CrossEntropyLoss() cnnet_optim = optim.Adam(deepNet.parameters() , lr = 0.0002, weight_decay=0 )<feature_engineering>
train_data = train_data[train_data['Ticket'] != '113572']
Titanic - Machine Learning from Disaster
14,398,634
def get_accuracy(output, label, batch_size): label = label.detach().cpu().numpy().squeeze() output = output.detach().cpu() _, indices = torch.max(output, dim=1) output = torch.zeros_like(output) itr = iter(indices) for i in range(output.shape[0]): output[i, int(next(itr)) ] = 1 label = torch.tensor(np.eye(10)[label] ).float() diff = torch.sum(torch.abs(output - label)) /(2*output.shape[0]) acc = 100 -(100 * diff) return acc<train_on_grid>
y = train_data["Survived"] features = ["Pclass", "Sex", "SibSp", "Parch", "Age"] X = pd.get_dummies(train_data[features]) X_test = pd.get_dummies(test_data[features]) model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1) model.fit(X, y) predictions = model.predict(X_test) output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions}) output.to_csv('my_submission.csv', index=False) print("Your submission was successfully saved!" )
Titanic - Machine Learning from Disaster
14,556,854
epochs = 85 Costs = [] Accuracy = [] deepNet = deepNet.train() for epoch in range(epochs): acc = 0 count = 0 for i, batch in enumerate(train_loader): count += 1 images, label = batch images = images.to(device) label['Vowel'] = label['Vowel'].to(device ).long() label['Consonant'] = label['Consonant'].to(device ).long() cnnet_optim.zero_grad() out_1, out_2 = deepNet(images) loss_1 = cnn_ce_loss(out_1, label['Vowel']) loss_2 = cnn_ce_loss(out_2, label['Consonant']) loss = loss_1 + loss_2 loss.backward() cnnet_optim.step() cost = loss.item() acc1 = get_accuracy(out_1, label['Vowel'], images.shape[0]) acc2 = get_accuracy(out_2, label['Consonant'], images.shape[0]) acc = acc +(acc1 + acc2)/2 Accuracy.append(acc/(10*count)) Costs.append(cost) print("Epoch [{}/{}], Loss : {}, Accuracy : {}, acc_1 : {}, acc_2 : {}".format(( epoch+1), epochs, round(float(cost), 2), round(float(Accuracy[-1].cpu()), 2), round(float(acc1.cpu()), 2), round(float(acc2.cpu()), 2))) plt.title("Loss and Accuracy with iterations") plt.plot(Costs, label = 'Cost') plt.plot(Accuracy, label = 'Accuracy') plt.xlabel("Iterations") plt.ylabel("Loss and Accuracy") plt.legend() plt.show() count = 0 acc = 0 deepNet = deepNet.eval() for i, batch in enumerate(train_loader): count += 1 images, label = batch images = images.to(device) label['Vowel'] = label['Vowel'].to(device ).long() label['Consonant'] = label['Consonant'].to(device ).long() out_1, out_2 = deepNet(images) out_1, out_2 = F.log_softmax(out_1, dim = 1), F.log_softmax(out_2, dim = 1) acc1 = get_accuracy(out_1, label['Vowel'], images.shape[0]) acc2 = get_accuracy(out_2, label['Consonant'], images.shape[0]) acc = acc +(acc1 + acc2)/2 print("Train Accuracy : {}".format(acc/count))<compute_train_metric>
%matplotlib inline
Titanic - Machine Learning from Disaster
14,556,854
deepNet = deepNet.eval() count = 0 acc = 0 for i, batch in enumerate(validation_loader): count += 1 images, label = batch images = images.to(device) label['Vowel'] = label['Vowel'].to(device ).long() label['Consonant'] = label['Consonant'].to(device ).long() out_1, out_2 = deepNet(images) out_1, out_2 = F.log_softmax(out_1, dim = 1), F.log_softmax(out_2, dim = 1) acc1 = get_accuracy(out_1, label['Vowel'], images.shape[0]) acc2 = get_accuracy(out_2, label['Consonant'], images.shape[0]) acc = acc +(acc1 + acc2)/2 print("Test Accuracy : {}".format(acc/count)) <create_dataframe>
train= pd.read_csv('/kaggle/input/titanic/train.csv' )
Titanic - Machine Learning from Disaster
14,556,854
test_data = HindiDataset(train_path, test_path, transform = transforms.Compose([transforms.ToTensor() , transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]), train = False) testset = DataLoader(test_data, batch_size = 32, shuffle = False )<find_best_params>
def impute_age(cols): Age = cols[0] Pclass = cols[1] if pd.isnull(Age): if Pclass==1: return 37 elif Pclass == 2: return 29 else : return 24 else: return Age
Titanic - Machine Learning from Disaster
14,556,854
preds = {} img_ids = [] F_results = [] deepNet = deepNet.eval() for i, batch in enumerate(testset): images, img_names = batch images = images.to(device) out_1, out_2 = deepNet(images) out_1, out_2 = F.log_softmax(out_1, dim = 1), F.log_softmax(out_2, dim = 1) out_1, out_2 = torch.max(out_1, dim=1)[1].cpu() , torch.max(out_2, dim=1)[1].cpu() for names,V,C in zip(img_names, out_1, out_2): F_results.append('V' + str(int(V)) + '_C' + str(int(C))) img_ids.append(names) if i%50 == 0: print("{} images tested.... ".format(( i + 1)* images.shape[0]))<save_to_csv>
train['Age']=train[['Age','Pclass']].apply(impute_age,axis = 1 )
Titanic - Machine Learning from Disaster