kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
777,941 | params = {
"objective" : "poisson",
"metric" :"rmse",
"force_row_wise" : True,
"learning_rate" : 0.075,
"sub_row" : 0.75,
"bagging_freq" : 1,
"lambda_l2" : 0.1,
"metric": ["rmse"],
'verbosity': 1,
'num_iterations' : 2500,
}<train_model> | train_df_dec
R=train_data.groupby(["RGroup"])["Survived"].agg(["count","sum"])
R.reset_index(inplace=True)
R["percentage"]=R["sum"]/R["count"]
R.drop(columns={"count","sum"},inplace=True ) | Titanic - Machine Learning from Disaster |
777,941 | %%time
m_lgb = lgb.train(params, train_data, valid_sets = [fake_valid_data], verbose_eval=100 )<save_model> | R.set_index("RGroup",inplace=True)
train_data["percentage"]=np.nan
train_data.set_index("RGroup",inplace=True)
train_data.update(R)
train_data.reset_index(inplace=True)
R.reset_index(inplace=True)
| Titanic - Machine Learning from Disaster |
777,941 | m_lgb.save_model("model.lgb" )<count_unique_values> | R.set_index("RGroup",inplace=True)
cv_data["percentage"]=np.nan
cv_data.set_index("RGroup",inplace=True)
cv_data.update(R)
cv_data.reset_index(inplace=True)
R.reset_index(inplace=True)
cv_data.loc[(cv_data["percentage"].isnull()),"percentage"]=0.5 | Titanic - Machine Learning from Disaster |
777,941 | sub.id.nunique() , sub["id"].str.contains("validation$" ).sum()<import_modules> | R.set_index("RGroup",inplace=True)
test_df["percentage"]=np.nan
test_df.set_index("RGroup",inplace=True)
test_df.update(R)
test_df.reset_index(inplace=True)
R.reset_index(inplace=True)
test_df.loc[(test_df["percentage"].isnull()),"percentage"]=0.5
| Titanic - Machine Learning from Disaster |
777,941 |
<save_to_csv> | Y_train=train_data["Survived"].copy()
X_train=train_data.drop(columns={"Survived"})
Y_cv=cv_data["Survived"].copy()
X_cv=cv_data.drop(columns={"Survived"})
X_train.drop(columns={"RGroup"},inplace=True)
X_cv.drop(columns={"RGroup"},inplace=True)
| Titanic - Machine Learning from Disaster |
777,941 | data1 = pd.read_csv('.. /input/blenddata2/M5 Darkmagic.csv' ).sort_values(by = 'id' ).reset_index(drop=True)
data2 = pd.read_csv('.. /input/blenddata/M5 ForecasteR v2.csv' ).sort_values(by = 'id' ).reset_index(drop=True)
submission = data1.copy()
for i in range(1,29):
data1['F'+str(i)] *= 1.02
for c in submission.columns :
if c != 'id' :
submission[c] = 0.33*data1[c] + 0.67*data2[c]
submission.to_csv('submission.csv',index=False )<import_modules> | scores=[]
for x in range(1,7):
clf=tree.DecisionTreeClassifier(criterion='entropy',max_depth=x)
clf=clf.fit(X_train,Y_train)
Y_pred=clf.predict(X_cv)
scores.append(metrics.accuracy_score(Y_cv,Y_pred))
print(scores ) | Titanic - Machine Learning from Disaster |
777,941 | import numpy as np
import pandas as pd<save_to_csv> | X_train.drop(columns={"percentage"},inplace=True)
X_cv.drop(columns={"percentage"},inplace=True)
test_df.drop(columns={"percentage"},inplace=True)
test_df.drop(columns={"RGroup"},inplace=True)
scores=[]
for x in range(1,18):
clf=tree.DecisionTreeClassifier(criterion='entropy',max_depth=x)
clf=clf.fit(X_train,Y_train)
Y_pred=clf.predict(X_cv)
scores.append(metrics.accuracy_score(Y_cv,Y_pred))
print(scores ) | Titanic - Machine Learning from Disaster |
777,941 | submission = pd.read_csv('.. /input/m5-forecast-attack-of-the-data-table/submission.csv')
for i in range(1,29):
submission['F'+str(i)] *= 1.04
submission.to_csv('submission.csv', index=False )<import_modules> | train_df_dec.drop(columns={"RGroup"},inplace=True)
Target_df=train_df_dec["Survived"]
train_df_dec.drop(columns={"Survived"},inplace=True ) | Titanic - Machine Learning from Disaster |
777,941 | import numpy as np
import pandas as pd<load_from_csv> | clf=tree.DecisionTreeClassifier(criterion='entropy',max_depth=4)
scores_cv = cross_val_score(clf, train_df_dec,Target_df, cv=10)
scores_cv | Titanic - Machine Learning from Disaster |
777,941 | data1 = pd.read_csv('.. /input/m5-more-data-table-and-xgb/submission_lgbm.csv')
data2 = pd.read_csv('.. /input/m5-dark-magic/submission.csv' )<define_variables> | clf=tree.DecisionTreeClassifier(criterion='entropy',max_depth=4)
clf=clf.fit(X_train,Y_train)
dot_data = tree.export_graphviz(clf, out_file=None,feature_names=X_train.columns.values.tolist())
graph = graphviz.Source(dot_data)
graph | Titanic - Machine Learning from Disaster |
777,941 | <define_variables><EOS> | Y_pred=clf.predict(test_df)
my_submission = pd.DataFrame({'PassengerId': test_passengerId, 'Survived': Y_pred})
my_submission.to_csv('submission.csv', index=False)
| Titanic - Machine Learning from Disaster |
373,189 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<define_variables> | pd.options.mode.chained_assignment = None | Titanic - Machine Learning from Disaster |
373,189 | sub_col = data1['id']<create_dataframe> | train_set = pd.read_csv('.. /input/train.csv')
test_set = pd.read_csv('.. /input/test.csv' ) | Titanic - Machine Learning from Disaster |
373,189 | all_cols = pd.DataFrame({})
all_cols['id'] = sub_col
all_cols[categories] = 0.60*data1[categories] + 0.40*data2[categories]<save_to_csv> | train_set.isnull().sum() | Titanic - Machine Learning from Disaster |
373,189 | all_cols.to_csv('sub.csv', index=False )<set_options> | test_set.isnull().sum() | Titanic - Machine Learning from Disaster |
373,189 | pd.set_option('display.max_columns', 500)
pd.set_option('display.max_rows', 500)
warnings.filterwarnings('ignore' )<define_variables> | fare_set = train_set[['Fare','Survived']].copy()
cont_plot(fare_set, 'Fare', 'Survived', survival_palette, [1, 0], range(0,550,50)) | Titanic - Machine Learning from Disaster |
373,189 | INPUT_DIR_PATH = '.. /input/m5-forecasting-accuracy/'<split> | combined_set = [train_set, test_set]
for dataset in combined_set:
dataset["Age"].fillna(dataset["Age"].median() , inplace=True)
dataset["Fare"].fillna(dataset["Fare"].median() , inplace=True)
train_set["Embarked"].fillna(train_set["Embarked"].value_counts().index[0], inplace=True ) | Titanic - Machine Learning from Disaster |
373,189 | sell_prices_df, calendar_df, sales_train_validation_df, submission_df = read_data()<define_variables> | train_set.isnull().sum() | Titanic - Machine Learning from Disaster |
373,189 | NUM_ITEMS = sales_train_validation_df.shape[0]
DAYS_PRED = 28
nrows = 365 * 2 * NUM_ITEMS<categorify> | test_set.isnull().sum() | Titanic - Machine Learning from Disaster |
373,189 | def encode_categorical(df, cols):
for col in cols:
le = preprocessing.LabelEncoder()
not_null = df[col][df[col].notnull() ]
df[col] = pd.Series(le.fit_transform(not_null), index=not_null.index)
return df
calendar_df = encode_categorical(calendar_df, ["event_name_1", "event_type_1", "event_name_2", "event_type_2"] ).pipe(reduce_mem_usage)
sales_train_validation_df = encode_categorical(sales_train_validation_df, ["item_id", "dept_id", "cat_id", "store_id", "state_id"] ).pipe(reduce_mem_usage)
sell_prices_df = encode_categorical(sell_prices_df, ["item_id", "store_id"] ).pipe(reduce_mem_usage )<categorify> | for dataset in combined_set:
dataset['Family'] = ''
dataset.loc[dataset['FamilySize'] == 0, 'Family'] = 'alone'
dataset.loc[(dataset['FamilySize'] > 0)&(dataset['FamilySize'] <= 3), 'Family'] = 'small'
dataset.loc[(dataset['FamilySize'] > 3)&(dataset['FamilySize'] <= 6), 'Family'] = 'medium'
dataset.loc[dataset['FamilySize'] > 6, 'Family'] = 'large' | Titanic - Machine Learning from Disaster |
373,189 | def melt_and_merge(calendar, sell_prices, sales_train_validation, submission, nrows = 55000000, merge = False):
sales_train_validation = pd.melt(sales_train_validation, id_vars = ['id', 'item_id', 'dept_id', 'cat_id', 'store_id', 'state_id'], var_name = 'day', value_name = 'demand')
print('Melted sales train validation has {} rows and {} columns'.format(sales_train_validation.shape[0], sales_train_validation.shape[1]))
sales_train_validation = reduce_mem_usage(sales_train_validation)
sales_train_validation = sales_train_validation.iloc[-nrows:,:]
test1_rows = [row for row in submission['id'] if 'validation' in row]
test2_rows = [row for row in submission['id'] if 'evaluation' in row]
test1 = submission[submission['id'].isin(test1_rows)]
test2 = submission[submission['id'].isin(test2_rows)]
test1.columns = ['id', 'd_1914', 'd_1915', 'd_1916', 'd_1917', 'd_1918', 'd_1919', 'd_1920', 'd_1921', 'd_1922', 'd_1923', 'd_1924', 'd_1925', 'd_1926', 'd_1927', 'd_1928', 'd_1929', 'd_1930', 'd_1931',
'd_1932', 'd_1933', 'd_1934', 'd_1935', 'd_1936', 'd_1937', 'd_1938', 'd_1939', 'd_1940', 'd_1941']
test2.columns = ['id', 'd_1942', 'd_1943', 'd_1944', 'd_1945', 'd_1946', 'd_1947', 'd_1948', 'd_1949', 'd_1950', 'd_1951', 'd_1952', 'd_1953', 'd_1954', 'd_1955', 'd_1956', 'd_1957', 'd_1958', 'd_1959',
'd_1960', 'd_1961', 'd_1962', 'd_1963', 'd_1964', 'd_1965', 'd_1966', 'd_1967', 'd_1968', 'd_1969']
product = sales_train_validation[['id', 'item_id', 'dept_id', 'cat_id', 'store_id', 'state_id']].drop_duplicates()
test2['id'] = test2['id'].str.replace('_evaluation','_validation')
test1 = test1.merge(product, how = 'left', on = 'id')
test2 = test2.merge(product, how = 'left', on = 'id')
test2['id'] = test2['id'].str.replace('_validation','_evaluation')
test1 = pd.melt(test1, id_vars = ['id', 'item_id', 'dept_id', 'cat_id', 'store_id', 'state_id'], var_name = 'day', value_name = 'demand')
test2 = pd.melt(test2, id_vars = ['id', 'item_id', 'dept_id', 'cat_id', 'store_id', 'state_id'], var_name = 'day', value_name = 'demand')
sales_train_validation['part'] = 'train'
test1['part'] = 'test1'
test2['part'] = 'test2'
data = pd.concat([sales_train_validation, test1, test2], axis = 0)
del sales_train_validation, test1, test2
print(data.shape)
calendar.drop(['weekday', 'wday', 'month', 'year'], inplace = True, axis = 1)
data = data[data['part'] != 'test2']
if merge:
data = pd.merge(data, calendar, how = 'left', left_on = ['day'], right_on = ['d'])
data.drop(['d', 'day'], inplace = True, axis = 1)
data = data.merge(sell_prices, on = ['store_id', 'item_id', 'wm_yr_wk'], how = 'left')
print('Our final dataset to train has {} rows and {} columns'.format(data.shape[0], data.shape[1]))
else:
pass
gc.collect()
return data<create_dataframe> | title_dict = {
"Mr" : "Mr",
"Miss" : "Miss",
"Mrs" : "Mrs",
"Master" : "Master",
"Dr": "Scholar",
"Rev": "Religious",
"Col": "Officer",
"Major": "Officer",
"Mlle": "Miss",
"Don": "Noble",
"the Countess":"Noble",
"Ms": "Mrs",
"Mme": "Mrs",
"Capt": "Noble",
"Lady" : "Noble",
"Sir" : "Noble",
"Jonkheer": "Noble"
}
for dataset in combined_set:
dataset['TitleGroup'] = dataset.Title.map(title_dict ) | Titanic - Machine Learning from Disaster |
373,189 | nrows = 27500000
data = melt_and_merge(calendar_df, sell_prices_df, sales_train_validation_df, submission_df, nrows = nrows, merge = True)
<categorify> | print(test_set[test_set['TitleGroup'].isnull() == True] ) | Titanic - Machine Learning from Disaster |
373,189 | def transform(data):
nan_features = ['event_name_1', 'event_type_1', 'event_name_2', 'event_type_2']
for feature in nan_features:
data[feature].fillna('unknown', inplace = True)
cat = ['item_id', 'dept_id', 'cat_id', 'store_id', 'state_id', 'event_name_1', 'event_type_1', 'event_name_2', 'event_type_2']
for feature in cat:
encoder = preprocessing.LabelEncoder()
data[feature] = encoder.fit_transform(data[feature])
return data
def simple_fe(data):
for val in [28, 29, 30]:
data[f"shift_t{val}"] = data.groupby(["id"])["demand"].transform(lambda x: x.shift(val))
for val in [7, 30, 60, 90, 180]:
data[f"rolling_std_t{val}"] = data.groupby(["id"])["demand"].transform(lambda x: x.shift(28 ).rolling(val ).std())
for val in [7, 30, 60, 90, 180]:
data[f"rolling_mean_t{val}"] = data.groupby(["id"])["demand"].transform(lambda x: x.shift(28 ).rolling(val ).mean())
data["rolling_skew_t30"] = data.groupby(["id"])["demand"].transform(lambda x: x.shift(28 ).rolling(30 ).skew())
data["rolling_kurt_t30"] = data.groupby(["id"])["demand"].transform(lambda x: x.shift(28 ).rolling(30 ).kurt())
data['lag_price_t1'] = data.groupby(['id'])['sell_price'].transform(lambda x: x.shift(1))
data['price_change_t1'] =(data['lag_price_t1'] - data['sell_price'])/(data['lag_price_t1'])
data['rolling_price_max_t365'] = data.groupby(['id'])['sell_price'].transform(lambda x: x.shift(1 ).rolling(365 ).max())
data['price_change_t365'] =(data['rolling_price_max_t365'] - data['sell_price'])/(data['rolling_price_max_t365'])
data['rolling_price_std_t7'] = data.groupby(['id'])['sell_price'].transform(lambda x: x.rolling(7 ).std())
data['rolling_price_std_t30'] = data.groupby(['id'])['sell_price'].transform(lambda x: x.rolling(30 ).std())
data.drop(['rolling_price_max_t365', 'lag_price_t1'], inplace = True, axis = 1)
data['date'] = pd.to_datetime(data['date'])
attrs = ["year", "quarter", "month", "week", "day", "dayofweek", "is_year_end", "is_year_start", "is_quarter_end", \
"is_quarter_start", "is_month_end","is_month_start",
]
for attr in attrs:
dtype = np.int16 if attr == "year" else np.int8
data[attr] = getattr(data['date'].dt, attr ).astype(dtype)
data["is_weekend"] = data["dayofweek"].isin([5, 6] ).astype(np.int8)
return data
<define_variables> | test_set.at[414, 'TitleGroup'] = 'Noble' | Titanic - Machine Learning from Disaster |
373,189 | features = [
"item_id", "dept_id", "cat_id", "store_id", "state_id", "event_name_1", "event_type_1", "snap_CA", "snap_TX", \
"snap_WI", "sell_price", \
"shift_t28", "rolling_std_t7", "rolling_std_t30", "rolling_std_t90", "rolling_std_t180", \
"rolling_mean_t7", "rolling_mean_t30", "rolling_mean_t60", \
"price_change_t1", "price_change_t365", "rolling_price_std_t7",
"year", "month", "dayofweek",
]
("wday", "month", "year",
"event_name_1", "event_type_1",
"snap_CA", "snap_TX", "snap_WI",
"sell_price", "sell_price_rel_diff", "sell_price_cumrel", "sell_price_roll_sd7",
"lag_t28", "rolling_mean_t7", "rolling_mean_t30", "rolling_mean_t60",
"rolling_mean_t90", "rolling_mean_t180", "rolling_sd_t7", "rolling_sd_t30",
"item_id", "dept_id", "cat_id", "store_id", "state_id" )<prepare_x_and_y> | X_train = train_set.drop(['Survived','PassengerId','Name','Age','Fare','Ticket','Cabin','SibSp','Parch','Title','FamilySize'], axis=1)
X_test = test_set.drop(['PassengerId','Name','Age','Fare','Ticket','Cabin','SibSp','Parch','Title','FamilySize'], axis=1)
y_train = train_set['Survived'] | Titanic - Machine Learning from Disaster |
373,189 | def run_lgb(data):
x_train = data[data['date'] <= '2016-03-27']
y_train = x_train['demand']
x_val = data[(data['date'] > '2016-03-27')&(data['date'] <= '2016-04-24')]
y_val = x_val['demand']
test = data[(data['date'] > '2016-04-24')]
del data
gc.collect()
params = {
'metric': 'rmse',
'objective': 'poisson',
'n_jobs': -1,
'seed': 20,
'learning_rate': 0.1,
'alpha': 0.1,
'lambda': 0.1,
'bagging_fraction': 0.66,
'bagging_freq': 2,
'colsample_bytree': 0.77}
train_set = lgb.Dataset(x_train[features], y_train)
val_set = lgb.Dataset(x_val[features], y_val)
del x_train, y_train
model = lgb.train(params, train_set, num_boost_round = 2000, early_stopping_rounds = 200, valid_sets = [train_set, val_set], verbose_eval = 100)
joblib.dump(model, 'lgbm_0.sav')
val_pred = model.predict(x_val[features], num_iteration=model.best_iteration)
val_score = np.sqrt(metrics.mean_squared_error(val_pred, y_val))
print(f'Our val rmse score is {val_score}')
y_pred = model.predict(test[features], num_iteration=model.best_iteration)
test['demand'] = y_pred
return test
def predict(test, submission):
predictions = test[['id', 'date', 'demand']]
predictions = pd.pivot(predictions, index = 'id', columns = 'date', values = 'demand' ).reset_index()
predictions.columns = ['id'] + ['F' + str(i + 1)for i in range(28)]
evaluation_rows = [row for row in submission['id'] if 'evaluation' in row]
evaluation = submission[submission['id'].isin(evaluation_rows)]
validation = submission[['id']].merge(predictions, on = 'id')
final = pd.concat([validation, evaluation])
final.to_csv('submission.csv', index = False)
def transform_train_and_eval(data):
data = simple_fe(data)
data = reduce_mem_usage(data)
test = run_lgb(data)
predict(test, submission_df)
<categorify> | X_train_analysis = X_train.copy()
X_train_analysis['Sex'] = X_train_analysis['Sex'].map({'male': 0, 'female': 1} ).astype(int)
X_train_analysis['Embarked'] = X_train_analysis['Embarked'].map({'C': 0, 'Q': 1, 'S': 2} ).astype(int)
X_train_analysis['Family'] = X_train_analysis['Family'].map({'alone': 0, 'small': 1, 'medium': 2, 'large': 3} ).astype(int)
agerange_dict = dict(zip(age_labels, list(range(len(age_labels)))))
X_train_analysis['AgeRange'] = X_train_analysis['AgeRange'].map(agerange_dict ).astype(int)
farerange_dict = dict(zip(fare_labels, list(range(len(fare_labels)))))
X_train_analysis['FareRange'] = X_train_analysis['FareRange'].map(farerange_dict ).astype(int)
titlegroup_labels = list(set(title_dict.values()))
titlegroup_dict = dict(zip(titlegroup_labels, list(range(len(titlegroup_labels)))))
X_train_analysis['TitleGroup'] = X_train_analysis['TitleGroup'].map(titlegroup_dict ).astype(int ) | Titanic - Machine Learning from Disaster |
373,189 | transform_train_and_eval(data )<install_modules> | rforest_checker = RandomForestClassifier(random_state = 0)
rforest_checker.fit(X_train_analysis, y_train)
importances_df = pd.DataFrame(rforest_checker.feature_importances_, columns=['Feature_Importance'],
index=X_train_analysis.columns)
importances_df.sort_values(by=['Feature_Importance'], ascending=False, inplace=True)
print(importances_df ) | Titanic - Machine Learning from Disaster |
373,189 | !pip install bert-for-tf2
!pip install sentencepiece<set_options> | my_imp_dict = {'Feature Importance' : pd.Series([0.360313, 0.113686, 0.109495, 0.103845, 0.100966, 0.099818, 0.056429, 0.055449],
index=['TitleGroup', 'Family', 'Pclass', 'Sex','FareRange', 'AgeRange', 'HasCabin', 'Embarked'])}
my_imp_df = pd.DataFrame(my_imp_dict)
print(my_imp_df ) | Titanic - Machine Learning from Disaster |
373,189 | try:
%tensorflow_version 2.x
except Exception:
pass
<feature_engineering> | X_train = X_train.drop(['HasCabin','Embarked'], axis=1)
X_test = X_test.drop(['HasCabin','Embarked'], axis=1 ) | Titanic - Machine Learning from Disaster |
373,189 | FullTokenizer = bert.bert_tokenization.FullTokenizer
bert_layer = hub.KerasLayer("https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/1", trainable=False)
vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
do_lower_case = bert_layer.resolved_object.do_lower_case.numpy()
tokenizer = FullTokenizer(vocab_file, do_lower_case )<load_from_csv> | X_train = pd.get_dummies(X_train, columns=['TitleGroup','Family','Pclass','Sex','AgeRange','FareRange'])
X_test = pd.get_dummies(X_test, columns=['TitleGroup','Family','Pclass','Sex','AgeRange','FareRange'] ) | Titanic - Machine Learning from Disaster |
373,189 | train_cols = ["id", "keyword", "location", "text", "target"]
train = pd.read_csv(
"/kaggle/input/nlp-getting-started/train.csv",
header=None,
names=train_cols,
skiprows=1,
engine="python",
encoding="latin1"
)
test_cols = ["id", "keyword", "location", "text"]
test = pd.read_csv(
"/kaggle/input/nlp-getting-started/test.csv",
header=None,
names=test_cols,
skiprows=1,
engine="python",
encoding="latin1"
)
def fix_keyword(x):
return str(x[1] ).replace('%20', ' ')
def new_text(x):
return str(x[1])+ ' ' + str(x[2])+ ' ' +str(x[3])
def clean_tweet(tweet):
tweet = re.sub(r"https?://[A-Za-z0-9./]+", ' ', tweet)
tweet = re.sub(r"@[A-Za-z0-9]+", ' ', tweet)
tweet = re.sub(r"[^a-zA-Z0-9!?']", ' ', tweet)
tweet = re.sub(r" +", ' ', tweet)
tweet = re.sub(r"\?+", ' Q', tweet)
tweet = re.sub(r"\!+", ' X', tweet)
return tweet
def encode_sentence(sent):
return ["[CLS]"] + tokenizer.tokenize(sent)+ ["[SEP]"]
train.keyword = train.apply(fix_keyword, axis=1)
train['new_text'] = train.apply(new_text, axis=1)
train_clean = [clean_tweet(tweet)for tweet in train.new_text]
train_inputs = [encode_sentence(sentence)for sentence in train_clean]
train_labels = train.target.values
test.keyword = test.apply(fix_keyword, axis=1)
test['new_text'] = test.apply(new_text, axis=1)
test_clean = [clean_tweet(tweet)for tweet in test.new_text]
test_inputs = [encode_sentence(sentence)for sentence in test_clean]<categorify> | X_train = X_train.drop(['Pclass_1','Sex_female','TitleGroup_Master','AgeRange_15-','FareRange_10-','Family_alone'], axis=1)
X_test = X_test.drop(['Pclass_1','Sex_female','TitleGroup_Master','AgeRange_15-','FareRange_10-','Family_alone'], axis=1 ) | Titanic - Machine Learning from Disaster |
373,189 | def get_ids(tokens):
return tokenizer.convert_tokens_to_ids(tokens)
def get_mask(tokens):
return np.char.not_equal(tokens, "[PAD]" ).astype(int)
def get_segments(tokens):
seg_ids = []
current_seg_id = 0
for tok in tokens:
seg_ids.append(current_seg_id)
if tok == "[SEP]":
current_seg_id = 1-current_seg_id
return seg_ids<randomize_order> | params_logreg = [{'C': [0.01, 0.1, 1, 10, 100], 'penalty': ['l1','l2']}]
grid_logreg = GridSearchCV(estimator = LogisticRegression() ,
param_grid = params_logreg,
scoring = 'accuracy',
cv = 10)
grid_logreg = grid_logreg.fit(X_train, y_train)
best_acc_logreg = grid_logreg.best_score_
best_params_logreg = grid_logreg.best_params_ | Titanic - Machine Learning from Disaster |
373,189 | data_with_len = [[sent, train_labels[i], len(sent)]
for i, sent in enumerate(train_inputs)]
random.shuffle(data_with_len)
data_with_len.sort(key=lambda x: x[2])
train_all = [
(
[
get_ids(sent_lab[0]),
get_mask(sent_lab[0]),
get_segments(sent_lab[0])
],
sent_lab[1]
)
for sent_lab in data_with_len]
<create_dataframe> | params_ksvm = [{'C': [0.1, 1, 10, 100], 'kernel': ['linear']},
{'C': [0.1, 1, 10, 100], 'kernel': ['rbf'],
'gamma': [0.1, 0.2, 0.3, 0.4, 0.5]},
{'C': [0.1, 1, 10, 100], 'kernel': ['poly'],
'degree': [1, 2, 3],
'gamma': [0.1, 0.2, 0.3, 0.4, 0.5]}]
grid_ksvm = GridSearchCV(estimator = SVC(random_state = 0),
param_grid = params_ksvm,
scoring = 'accuracy',
cv = 10,
n_jobs=-1)
grid_ksvm = grid_ksvm.fit(X_train, y_train)
best_acc_ksvm = grid_ksvm.best_score_
best_params_ksvm = grid_ksvm.best_params_ | Titanic - Machine Learning from Disaster |
373,189 | all_dataset = tf.data.Dataset.from_generator(lambda: train_all, output_types=(tf.int32, tf.int32))<define_variables> | params_dtree = [{'min_samples_split': [5, 10, 15, 20],
'min_samples_leaf': [1, 2, 3],
'max_features': ['auto', 'log2']}]
grid_dtree = GridSearchCV(estimator = DecisionTreeClassifier(criterion = 'gini',
random_state = 0),
param_grid = params_dtree,
scoring = 'accuracy',
cv = 10,
n_jobs=-1)
grid_dtree = grid_dtree.fit(X_train, y_train)
best_acc_dtree = grid_dtree.best_score_
best_params_dtree = grid_dtree.best_params_ | Titanic - Machine Learning from Disaster |
373,189 | BATCH_SIZE = 32
all_batched = all_dataset.padded_batch(BATCH_SIZE,
padded_shapes=(( 3, None),()),
padding_values=(0, 0))
NB_BATCHES = math.ceil(len(train_all)/ BATCH_SIZE)
NB_BATCHES_TEST = NB_BATCHES // 10
all_batched.shuffle(NB_BATCHES)
test_dataset = all_batched.take(NB_BATCHES_TEST)
train_dataset = all_batched.skip(NB_BATCHES_TEST )<choose_model_class> | params_rforest = [{'n_estimators': [200, 300],
'max_depth': [5, 7, 10],
'min_samples_split': [2, 4]}]
grid_rforest = GridSearchCV(estimator = RandomForestClassifier(criterion = 'gini',
random_state = 0, n_jobs=-1),
param_grid = params_rforest,
scoring = 'accuracy',
cv = 10,
n_jobs=-1)
grid_rforest = grid_rforest.fit(X_train, y_train)
best_acc_rforest = grid_rforest.best_score_
best_params_rforest = grid_rforest.best_params_ | Titanic - Machine Learning from Disaster |
373,189 | class DCNNBERTEmbedding(tf.keras.Model):
def __init__(self,
nb_filters=50,
FFN_units=512,
nb_classes=2,
dropout_rate=0.1,
name="dcnn"):
super(DCNNBERTEmbedding, self ).__init__(name=name)
self.bert_layer = hub.KerasLayer(
"https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/1",
trainable=True)
self.bigram = layers.Conv1D(filters=nb_filters,
kernel_size=2,
padding="valid",
activation="relu")
self.trigram = layers.Conv1D(filters=nb_filters,
kernel_size=3,
padding="valid",
activation="relu")
self.fourgram = layers.Conv1D(filters=nb_filters,
kernel_size=4,
padding="valid",
activation="relu")
self.pool = layers.GlobalMaxPool1D()
self.dense_1 = layers.Dense(units=FFN_units, activation="relu")
self.dropout = layers.Dropout(rate=dropout_rate)
if nb_classes == 2:
self.last_dense = layers.Dense(units=1,
activation="sigmoid")
else:
self.last_dense = layers.Dense(units=nb_classes,
activation="softmax")
def embed_with_bert(self, all_tokens):
_, embs = self.bert_layer([all_tokens[:, 0, :],
all_tokens[:, 1, :],
all_tokens[:, 2, :]])
return embs
def call(self, inputs, training):
x = self.embed_with_bert(inputs)
x_1 = self.bigram(x)
x_1 = self.pool(x_1)
x_2 = self.trigram(x)
x_2 = self.pool(x_2)
x_3 = self.fourgram(x)
x_3 = self.pool(x_3)
merged = tf.concat([x_1, x_2, x_3], axis=-1)
merged = self.dense_1(merged)
merged = self.dropout(merged, training)
output = self.last_dense(merged)
return output<define_variables> | grid_score_dict = {'Best Score': [best_acc_logreg,best_acc_ksvm,best_acc_dtree,best_acc_rforest],
'Optimized Parameters': [best_params_logreg,best_params_ksvm,best_params_dtree,best_params_rforest],
}
pd.DataFrame(grid_score_dict, index=['Logistic Regression','Kernel SVM','Decision Tree','Random Forest'] ) | Titanic - Machine Learning from Disaster |
373,189 | NB_FILTERS = 128
FFN_UNITS = 256
NB_CLASSES = 2
DROPOUT_RATE = 0.2
BATCH_SIZE = 32
NB_EPOCHS = 3<choose_model_class> | logreg = LogisticRegression(C = 1, penalty = 'l1')
logreg.fit(X_train, y_train)
y_pred_train_logreg = cross_val_predict(logreg, X_train, y_train)
y_pred_test_logreg = logreg.predict(X_test ) | Titanic - Machine Learning from Disaster |
373,189 | Dcnn = DCNNBERTEmbedding(nb_filters=NB_FILTERS,
FFN_units=FFN_UNITS,
nb_classes=NB_CLASSES,
dropout_rate=DROPOUT_RATE)
if NB_CLASSES == 2:
Dcnn.compile(loss="binary_crossentropy",
optimizer=tf.optimizers.Adam(learning_rate=2e-5),
metrics=["accuracy"])
else:
Dcnn.compile(loss="sparse_categorical_crossentropy",
optimizer="adam",
metrics=["sparse_categorical_accuracy"])
Dcnn.fit(all_batched, epochs=NB_EPOCHS )<compute_test_metric> | ksvm = SVC(C = 1, gamma = 0.2, kernel = 'rbf', random_state = 0)
ksvm.fit(X_train, y_train)
y_pred_train_ksvm = cross_val_predict(ksvm, X_train, y_train)
y_pred_test_ksvm = ksvm.predict(X_test ) | Titanic - Machine Learning from Disaster |
373,189 | results = Dcnn.evaluate(test_dataset)
print(results )<feature_engineering> | dtree = DecisionTreeClassifier(criterion = 'gini', max_features='auto', min_samples_leaf=1, min_samples_split=5, random_state = 0)
dtree.fit(X_train, y_train)
y_pred_train_dtree = cross_val_predict(dtree, X_train, y_train)
y_pred_test_dtree = dtree.predict(X_test ) | Titanic - Machine Learning from Disaster |
373,189 | cols = ["id", "keyword", "location", "text"]
test = pd.read_csv(
"/kaggle/input/nlp-getting-started/test.csv",
header=None,
names=cols,
skiprows=1,
engine="python",
encoding="latin1"
)
test.keyword = test.apply(fix_keyword, axis=1)
test['new_text'] = test.apply(new_text, axis=1)
test_clean = [clean_tweet(tweet)for tweet in test.new_text]
test_inputs = [encode_sentence(sentence)for sentence in test_clean]<predict_on_test> | rforest = RandomForestClassifier(max_depth = 7, min_samples_split=4, n_estimators = 200, random_state = 0)
rforest.fit(X_train, y_train)
y_pred_train_rforest = cross_val_predict(rforest, X_train, y_train)
y_pred_test_rforest = rforest.predict(X_test ) | Titanic - Machine Learning from Disaster |
373,189 | preds = []
for sentence in test_inputs:
input = [[
get_ids(sentence),
get_mask(sentence),
get_segments(sentence)
]]
preds.append(int(np.round(Dcnn.predict(input)[0][0])))
if len(preds)% 100 == 0:
print('Predictions made:', len(preds))
<save_to_csv> | second_layer_train = pd.DataFrame({'Logistic Regression': y_pred_train_logreg.ravel() ,
'Kernel SVM': y_pred_train_ksvm.ravel() ,
'Decision Tree': y_pred_train_dtree.ravel() ,
'Random Forest': y_pred_train_rforest.ravel()
})
second_layer_train.head()
X_train_second = np.concatenate(( y_pred_train_logreg.reshape(-1, 1), y_pred_train_ksvm.reshape(-1, 1),
y_pred_train_dtree.reshape(-1, 1), y_pred_train_rforest.reshape(-1, 1)) ,
axis=1)
X_test_second = np.concatenate(( y_pred_test_logreg.reshape(-1, 1), y_pred_test_ksvm.reshape(-1, 1),
y_pred_test_dtree.reshape(-1, 1), y_pred_test_rforest.reshape(-1, 1)) ,
axis=1)
xgb = XGBClassifier(
n_estimators= 800,
max_depth= 4,
min_child_weight= 2,
gamma=0.9,
subsample=0.8,
colsample_bytree=0.8,
objective= 'binary:logistic',
nthread= -1,
scale_pos_weight=1 ).fit(X_train_second, y_train)
y_pred = xgb.predict(X_test_second ) | Titanic - Machine Learning from Disaster |
373,189 | <load_from_url><EOS> | passengerId = np.array(test_set['PassengerId'] ).astype(int)
submission = pd.DataFrame({ 'PassengerId' : passengerId, 'Survived' : y_pred })
print(submission.shape)
submission.to_csv('submission.csv', index=False ) | Titanic - Machine Learning from Disaster |
4,060,422 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<import_modules> | %matplotlib inline
| Titanic - Machine Learning from Disaster |
4,060,422 | sns.set(style="darkgrid")
warnings.filterwarnings('ignore' )<load_from_csv> | file = '.. /input/train.csv'
df = pd.read_csv(file ) | Titanic - Machine Learning from Disaster |
4,060,422 | train = pd.read_csv('.. /input/nlp-getting-started/train.csv')
print('Training data shape: ', train.shape)
train.head()<load_from_csv> | df = df.drop(['PassengerId', 'Name', 'Ticket'], axis=1 ) | Titanic - Machine Learning from Disaster |
4,060,422 | test = pd.read_csv('.. /input/nlp-getting-started/test.csv')
print('Testing data shape: ', test.shape)
test.head()<count_missing_values> | df['Survived'].value_counts() | Titanic - Machine Learning from Disaster |
4,060,422 | train.isnull().sum()<count_missing_values> | df['SibSp'].value_counts() | Titanic - Machine Learning from Disaster |
4,060,422 | test.isnull().sum()<count_values> | df.isnull().sum(axis = 0 ) | Titanic - Machine Learning from Disaster |
4,060,422 | train['target'].value_counts()<create_dataframe> | df_hot = pd.concat([df, pd.get_dummies(df['Sex'])], axis=1)
df_hot = df_hot.drop(['Sex'], axis=1 ) | Titanic - Machine Learning from Disaster |
4,060,422 | train1 = train.copy()
test1 = test.copy()<categorify> | df_hot = pd.concat([df_hot, pd.get_dummies(df['Pclass'])], axis=1)
df_hot = df_hot.drop(['Pclass'], axis=1 ) | Titanic - Machine Learning from Disaster |
4,060,422 | def clean_text(text):
text = text.lower()
text = re.sub('\[.*?\]', '', text)
text = re.sub('https?://\S+|www\.\S+', '', text)
text = re.sub('<.*?>+', '', text)
text = re.sub('[%s]' % re.escape(string.punctuation), '', text)
text = re.sub('
', '', text)
text = re.sub('\w*\d\w*', '', text)
text = re.sub('[‘’“”…]', '', text)
return text<drop_column> | df_hot = df_hot.rename(columns={df_hot.columns[9]:'Pclass1', df_hot.columns[10]:'Pclass2', df_hot.columns[11]:'Pclass3'} ) | Titanic - Machine Learning from Disaster |
4,060,422 | def remove_emoji(text):
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F"
u"\U0001F300-\U0001F5FF"
u"\U0001F680-\U0001F6FF"
u"\U0001F1E0-\U0001F1FF"
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
"]+", flags=re.UNICODE)
return emoji_pattern.sub(r'', text)
train1['text'] = train1['text'].apply(lambda x: remove_emoji(x))
test1['text'] = test1['text'].apply(lambda x: remove_emoji(x))<feature_engineering> | df_hot['Embarked'].value_counts() | Titanic - Machine Learning from Disaster |
4,060,422 | def text_preprocessing(text):
tokenizer_reg = nltk.tokenize.RegexpTokenizer(r'\w+')
nopunc = clean_text(text)
tokenized_text = tokenizer_reg.tokenize(nopunc)
remove_stopwords = [w for w in tokenized_text if w not in stopwords.words('english')]
combined_text = ' '.join(remove_stopwords)
return combined_text
train1['text'] = train1['text'].apply(lambda x: text_preprocessing(x))
test1['text'] = test1['text'].apply(lambda x: text_preprocessing(x))
train1['text'].head()<feature_engineering> | df_hot['Embarked'].fillna("S", inplace = True ) | Titanic - Machine Learning from Disaster |
4,060,422 | count_vectorizer = CountVectorizer(ngram_range =(1,1), min_df = 1)
train_vectors = count_vectorizer.fit_transform(train1['text'])
test_vectors = count_vectorizer.transform(test1["text"])
train_vectors.shape<categorify> | pd.options.display.float_format = '{:.2f}'.format
le = LabelEncoder()
T = df_hot['Embarked']
encoded = le.fit_transform(np.ravel(T)) | Titanic - Machine Learning from Disaster |
4,060,422 | tfidf = TfidfVectorizer(ngram_range=(1, 2), min_df = 2, max_df = 0.5)
train_tfidf = tfidf.fit_transform(train1['text'])
test_tfidf = tfidf.transform(test1["text"])
train_tfidf.shape<compute_train_metric> | df_hot = pd.concat([df_hot, pd.DataFrame(encoded, columns = ['Emb'])], axis=1)
df_hot = df_hot.drop(['Embarked'], axis=1 ) | Titanic - Machine Learning from Disaster |
4,060,422 | logreg_bow = LogisticRegression(C=1.0)
scores = model_selection.cross_val_score(logreg_bow, train_vectors, train["target"], cv=5, scoring="f1")
scores.mean()<compute_train_metric> | df_hot = df_hot[['Survived', 'SibSp', 'Parch', 'Fare', 'Emb', 'Cabin', 'Age',
'female', 'male', 'Pclass1', 'Pclass2', 'Pclass3']] | Titanic - Machine Learning from Disaster |
4,060,422 | logreg_tfidf = LogisticRegression(C=1.0)
scores = model_selection.cross_val_score(logreg_tfidf, train_tfidf, train["target"], cv=5, scoring="f1")
scores.mean()<compute_train_metric> | df_hot = df_hot.drop(['Cabin'], axis=1 ) | Titanic - Machine Learning from Disaster |
4,060,422 | NB_bow = MultinomialNB()
scores = model_selection.cross_val_score(NB_bow, train_vectors, train["target"], cv=5, scoring="f1")
scores.mean()<compute_train_metric> | train_cols = list(df_hot ) | Titanic - Machine Learning from Disaster |
4,060,422 | NB_tfidf = MultinomialNB()
scores = model_selection.cross_val_score(NB_tfidf, train_tfidf, train["target"], cv=5, scoring="f1")
scores.mean()<train_model> | dfi = pd.DataFrame(KNN(k=5 ).fit_transform(df_hot))
dfi.columns = train_cols | Titanic - Machine Learning from Disaster |
4,060,422 | NB_bow.fit(train_vectors, train["target"] )<save_to_csv> | dfi.isnull().sum() | Titanic - Machine Learning from Disaster |
4,060,422 | sample_submission = pd.read_csv('.. /input/nlp-getting-started/sample_submission.csv')
sample_submission["target"] = NB_bow.predict(test_vectors)
os.chdir('/kaggle/working')
sample_submission.to_csv("submission1.csv", index=False )<categorify> | df1 = df.drop(['Age'], axis=1)
df1 = pd.concat([df1, dfi['Age']], axis=1 ) | Titanic - Machine Learning from Disaster |
4,060,422 | def bert_encode(texts, tokenizer, max_len = 512):
all_tokens = []
all_masks = []
all_segments = []
for text in texts:
text = tokenizer.tokenize(text)
text = text[:max_len-2]
input_sequence = ["[CLS]"] + text + ["[SEP]"]
pad_len = max_len - len(input_sequence)
tokens = tokenizer.convert_tokens_to_ids(input_sequence)
tokens += [0] * pad_len
pad_masks = [1] * len(input_sequence)+ [0] * pad_len
segment_ids = [0] * max_len
all_tokens.append(tokens)
all_masks.append(pad_masks)
all_segments.append(segment_ids)
return np.array(all_tokens), np.array(all_masks), np.array(all_segments )<choose_model_class> | df1['Embarked'].fillna("S", inplace = True ) | Titanic - Machine Learning from Disaster |
4,060,422 | def build_model(bert_layer, max_len = 512):
input_word_ids = Input(shape=(max_len,), dtype=tf.int32, name="input_word_ids")
input_mask = Input(shape=(max_len,), dtype=tf.int32, name="input_mask")
segment_ids = Input(shape=(max_len,), dtype=tf.int32, name="segment_ids")
_, sequence_output = bert_layer([input_word_ids, input_mask, segment_ids])
clf_output = sequence_output[:, 0, :]
out = Dense(1, activation='sigmoid' )(clf_output)
model = Model(inputs=[input_word_ids, input_mask, segment_ids], outputs=out)
model.compile(Adam(lr=1e-5), loss='binary_crossentropy', metrics=['accuracy'])
return model<load_from_csv> | df1 = df1.drop(['Cabin'], axis=1 ) | Titanic - Machine Learning from Disaster |
4,060,422 | train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv")
test = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv")
submission = pd.read_csv("/kaggle/input/nlp-getting-started/sample_submission.csv" )<categorify> | le = LabelEncoder()
T = df1['Sex']
encoded = le.fit_transform(np.ravel(T)) | Titanic - Machine Learning from Disaster |
4,060,422 |
<choose_model_class> | df1 = pd.concat([df1, pd.DataFrame(encoded, columns = ['Gender'])], axis=1)
df1 = df1.drop(['Sex'], axis=1 ) | Titanic - Machine Learning from Disaster |
4,060,422 | module_url = "https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/1"
bert_layer = hub.KerasLayer(module_url, trainable=True )<feature_engineering> | le = LabelEncoder()
T = df1['Embarked']
encoded = le.fit_transform(np.ravel(T)) | Titanic - Machine Learning from Disaster |
4,060,422 | vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
do_lower_case = bert_layer.resolved_object.do_lower_case.numpy()
tokenizer = tokenization.FullTokenizer(vocab_file, do_lower_case )<categorify> | df1 = pd.concat([df1, pd.DataFrame(encoded, columns = ['Emb'])], axis=1)
df1 = df1.drop(['Embarked'], axis=1 ) | Titanic - Machine Learning from Disaster |
4,060,422 | train_input = bert_encode(train.text.values, tokenizer, max_len=160)
test_input = bert_encode(test.text.values, tokenizer, max_len=160)
train_labels = train.target.values<train_model> | df1.isnull().sum() | Titanic - Machine Learning from Disaster |
4,060,422 | checkpoint = ModelCheckpoint('model.h5', monitor='val_loss', save_best_only=True)
train_history = model.fit(
train_input, train_labels,
validation_split=0.2,
epochs=3,
callbacks=[checkpoint],
batch_size=16
)<predict_on_test> | Titanic - Machine Learning from Disaster | |
4,060,422 | model.load_weights('model.h5')
test_pred = model.predict(test_input )<save_to_csv> | X = np.asarray(dfi.drop(['Survived'], axis = 1))
y = np.asarray(dfi[['Survived']] ) | Titanic - Machine Learning from Disaster |
4,060,422 | submission['target'] = test_pred.round().astype(int)
os.chdir('/kaggle/working')
submission.to_csv("submission2.csv", index=False )<define_variables> | X1 = np.asarray(df1.drop(['Survived'], axis = 1))
y1 = np.asarray(df1[['Survived']] ) | Titanic - Machine Learning from Disaster |
4,060,422 | %ls /kaggle/input/nlp-getting-started/<define_variables> | X = preprocessing.StandardScaler().fit(X ).transform(X ) | Titanic - Machine Learning from Disaster |
4,060,422 | prefix = '/kaggle/input/nlp-getting-started/'<load_from_csv> | X1 = preprocessing.StandardScaler().fit(X1 ).transform(X1 ) | Titanic - Machine Learning from Disaster |
4,060,422 | train_df = pd.read_csv(prefix + 'train.csv')
<prepare_output> | from sklearn.model_selection import train_test_split, cross_val_score
from sklearn import metrics | Titanic - Machine Learning from Disaster |
4,060,422 | train_df = pd.DataFrame({
'id':range(len(train_df)) ,
'label':train_df["target"],
'alpha':['a']*train_df.shape[0],
'text': train_df["text"].replace(r'
', ' ', regex=True)
})
train_df.head()
len(train_df )<load_from_csv> | GNB = GaussianNB()
scores_GNB = cross_val_score(GNB, X, y.ravel() , cv=100, scoring = "accuracy" ) | Titanic - Machine Learning from Disaster |
4,060,422 | test_df = pd.read_csv(prefix + 'test.csv')
<count_missing_values> | LR = LogisticRegression(solver = 'lbfgs', max_iter=1000)
scores_LR = cross_val_score(LR, X, y.ravel() , cv=100, scoring = "accuracy" ) | Titanic - Machine Learning from Disaster |
4,060,422 | test_df.isnull().sum()<define_variables> | RF = RandomForestClassifier(n_estimators=100, random_state=0)
scores_RF = cross_val_score(RF, X1, y1.ravel() , cv=100, scoring = "accuracy" ) | Titanic - Machine Learning from Disaster |
4,060,422 | test_df = pd.DataFrame({
'id':range(len(test_df)) ,
'label':[0]*test_df.shape[0],
'alpha':['a']*test_df.shape[0],
'text': test_df["text"].replace(r'
', ' ', regex=True)
})
test_df.head()
len(test_df )<install_modules> | VM = SVC(C = 0.01, kernel = 'rbf', gamma = 'scale', class_weight = 'balanced')
scores_SVC = cross_val_score(VM, X1, y1.ravel() , cv=100, scoring = "accuracy" ) | Titanic - Machine Learning from Disaster |
4,060,422 | !mkdir data
!pip install contractions<feature_engineering> | NGH = KNeighborsClassifier(n_neighbors = 2)
scores_KNN = cross_val_score(NGH, X, y.ravel() , cv=100, scoring = "accuracy" ) | Titanic - Machine Learning from Disaster |
4,060,422 | def fix_contractions(text):
return contractions.fix(text)
def remove_url(text):
url = re.compile(r'https?://\S+|www\.\S+')
return url.sub(r'',text)
def remove_mark(text):
table=str.maketrans('','',string.punctuation)
return text.translate(table)
print("tweet before contractions fix : ", train_df.iloc[1055]["text"])
print("-"*20)
train_df['text']=train_df['text'].apply(lambda x : fix_contractions(x))
test_df['text']=test_df['text'].apply(lambda x : fix_contractions(x))
train_df['text']=train_df['text'].apply(lambda x : remove_url(x))
test_df['text']=test_df['text'].apply(lambda x : remove_url(x))
train_df['text']=train_df['text'].apply(lambda x : remove_mark(x))
test_df['text']=test_df['text'].apply(lambda x : remove_mark(x))
print("tweet after contractions fix : ", train_df.iloc[1055]["text"])
train_df.to_csv('data/train.tsv', sep='\t', index=False, header=False)
test_df.to_csv('data/dev.tsv', sep='\t', index=False, header=False )<import_modules> | BST = XGBClassifier()
scores_XGB = cross_val_score(BST, X, y.ravel() , cv=10, scoring = "accuracy" ) | Titanic - Machine Learning from Disaster |
4,060,422 | logger = logging.getLogger(__name__)
csv.field_size_limit(2147483647)
class InputExample(object):
def __init__(self, guid, text_a, text_b=None, label=None):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
def get_train_examples(self, data_dir):
raise NotImplementedError()
def get_dev_examples(self, data_dir):
raise NotImplementedError()
def get_labels(self):
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
with open(input_file, "r", encoding="utf-8-sig")as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8')for cell in line)
lines.append(line)
return lines
class BinaryProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")) , "train")
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")) , "dev")
def get_labels(self):
return ["0", "1"]
def _create_examples(self, lines, set_type):
examples = []
for(i, line)in enumerate(lines):
guid = "%s-%s" %(set_type, i)
text_a = line[3]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_example_to_feature(example_row, pad_token=0,
sequence_a_segment_id=0, sequence_b_segment_id=1,
cls_token_segment_id=1, pad_token_segment_id=0,
mask_padding_with_zero=True):
example, label_map, max_seq_length, tokenizer, output_mode, cls_token_at_end, cls_token, sep_token, cls_token_segment_id, pad_on_left, pad_token_segment_id = example_row
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
if len(tokens_a)> max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
tokens = tokens_a + [sep_token]
segment_ids = [sequence_a_segment_id] * len(tokens)
if tokens_b:
tokens += tokens_b + [sep_token]
segment_ids += [sequence_b_segment_id] *(len(tokens_b)+ 1)
if cls_token_at_end:
tokens = tokens + [cls_token]
segment_ids = segment_ids + [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids =([pad_token] * padding_length)+ input_ids
input_mask =([0 if mask_padding_with_zero else 1] * padding_length)+ input_mask
segment_ids =([pad_token_segment_id] * padding_length)+ segment_ids
else:
input_ids = input_ids +([pad_token] * padding_length)
input_mask = input_mask +([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids +([pad_token_segment_id] * padding_length)
assert len(input_ids)== max_seq_length
assert len(input_mask)== max_seq_length
assert len(segment_ids)== max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
return InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id)
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer, output_mode,
cls_token_at_end=False, pad_on_left=False,
cls_token='[CLS]', sep_token='[SEP]', pad_token=0,
sequence_a_segment_id=0, sequence_b_segment_id=1,
cls_token_segment_id=1, pad_token_segment_id=0,
mask_padding_with_zero=True):
label_map = {label : i for i, label in enumerate(label_list)}
examples = [(example, label_map, max_seq_length, tokenizer, output_mode, cls_token_at_end, cls_token, sep_token, cls_token_segment_id, pad_on_left, pad_token_segment_id)for example in examples]
process_count = 1
with Pool(process_count)as p:
features = list(tqdm(p.imap(convert_example_to_feature, examples, chunksize=100), total=len(examples)))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
while True:
total_length = len(tokens_a)+ len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a)> len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
processors = {
"binary": BinaryProcessor
}
output_modes = {
"binary": "classification"
}
GLUE_TASKS_NUM_LABELS = {
"binary": 2
}<install_modules> | result_df = pd.DataFrame(columns=['Accuracy','Variance'], index=['Naive Bayes', 'Logistic Regression', 'Random Forest', 'SVC', 'KNN', 'XGBoost'] ) | Titanic - Machine Learning from Disaster |
4,060,422 | !pip install pytorch_transformers<import_modules> | result_df.iloc[0] = pd.Series({'Accuracy':scores_GNB.mean() ,
'Variance':np.std(scores_GNB)})
result_df.iloc[1] = pd.Series({'Accuracy':scores_LR.mean() ,
'Variance':np.std(scores_LR)})
result_df.iloc[2] = pd.Series({'Accuracy':scores_RF.mean() ,
'Variance':np.std(scores_RF)})
result_df.iloc[3] = pd.Series({'Accuracy':scores_SVC.mean() ,
'Variance':np.std(scores_SVC)})
result_df.iloc[4] = pd.Series({'Accuracy':scores_KNN.mean() ,
'Variance':np.std(scores_KNN)})
result_df.iloc[5] = pd.Series({'Accuracy':scores_XGB.mean() ,
'Variance':np.std(scores_XGB)})
| Titanic - Machine Learning from Disaster |
4,060,422 | TensorDataset)
XLMConfig, XLMForSequenceClassification, XLMTokenizer,
XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer,
RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__ )<define_variables> | result_df = result_df.sort_values('Accuracy', ascending = False, axis=0)
result_df.head(6 ) | Titanic - Machine Learning from Disaster |
4,060,422 | args = {
'data_dir': './data/',
'model_type': 'bert',
'model_name': 'bert-base-cased',
'task_name': 'binary',
'output_dir': 'outputs/',
'cache_dir': 'cache/',
'do_train': True,
'do_eval': True,
'fp16': False,
'fp16_opt_level': 'O1',
'max_seq_length': 128,
'output_mode': 'classification',
'train_batch_size': 8,
'eval_batch_size': 8,
'gradient_accumulation_steps': 1,
'num_train_epochs': 3,
'weight_decay': 0,
'learning_rate': 4e-5,
'adam_epsilon': 1e-8,
'warmup_steps': 0,
'max_grad_norm': 1.0,
'logging_steps': 50,
'evaluate_during_training': False,
'save_steps': 2000,
'eval_all_checkpoints': True,
'overwrite_output_dir': False,
'reprocess_input_data': True,
'notes': 'Using Yelp Reviews dataset'
}
device = torch.device("cuda" if torch.cuda.is_available() else "cpu" )<define_variables> | X_train, X_test, y_train, y_test = train_test_split(X, y.ravel() , test_size=0.2, random_state=4)
print('Train set:', X_train.shape, y_train.shape)
print('Test set:', X_test.shape, y_test.shape ) | Titanic - Machine Learning from Disaster |
4,060,422 | MODEL_CLASSES = {
'bert':(BertConfig, BertForSequenceClassification, BertTokenizer),
'xlnet':(XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
'xlm':(XLMConfig, XLMForSequenceClassification, XLMTokenizer),
'roberta':(RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer)
}
config_class, model_class, tokenizer_class = MODEL_CLASSES[args['model_type']]<load_pretrained> | cls = XGBClassifier()
cls.fit(X_train, y_train ) | Titanic - Machine Learning from Disaster |
4,060,422 | config = config_class.from_pretrained(args['model_name'], num_labels=2, finetuning_task=args['task_name'])
tokenizer = tokenizer_class.from_pretrained(args['model_name'] )<load_pretrained> | file3 = '.. /input/test.csv'
dft = pd.read_csv(file3 ) | Titanic - Machine Learning from Disaster |
4,060,422 | model = model_class.from_pretrained(args['model_name'] )<train_model> | dft = dft.drop(['PassengerId', 'Name', 'Ticket'], axis=1 ) | Titanic - Machine Learning from Disaster |
4,060,422 | model.to(device )<categorify> | dft_hot = pd.concat([dft, pd.get_dummies(dft['Sex'])], axis=1)
dft_hot = dft_hot.drop(['Sex'], axis=1 ) | Titanic - Machine Learning from Disaster |
4,060,422 | task = args['task_name']
processor = processors[task]()
label_list = processor.get_labels()
num_labels = len(label_list )<load_pretrained> | dft_hot = pd.concat([dft_hot, pd.get_dummies(dft['Pclass'])], axis=1)
dft_hot = dft_hot.drop(['Pclass'], axis=1 ) | Titanic - Machine Learning from Disaster |
4,060,422 | def load_and_cache_examples(task, tokenizer, evaluate=False):
processor = processors[task]()
output_mode = args['output_mode']
mode = 'dev' if evaluate else 'train'
cached_features_file = os.path.join(args['data_dir'], f"cached_{mode}_{args['model_name']}_{args['max_seq_length']}_{task}")
if os.path.exists(cached_features_file)and not args['reprocess_input_data']:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args['data_dir'])
label_list = processor.get_labels()
examples = processor.get_dev_examples(args['data_dir'])if evaluate else processor.get_train_examples(args['data_dir'])
features = convert_examples_to_features(examples, label_list, args['max_seq_length'], tokenizer, output_mode,
cls_token_at_end=bool(args['model_type'] in ['xlnet']),
cls_token=tokenizer.cls_token,
sep_token=tokenizer.sep_token,
cls_token_segment_id=2 if args['model_type'] in ['xlnet'] else 0,
pad_on_left=bool(args['model_type'] in ['xlnet']),
pad_token_segment_id=4 if args['model_type'] in ['xlnet'] else 0)
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
if output_mode == "classification":
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
elif output_mode == "regression":
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset<train_model> | dft_hot = dft_hot.rename(columns={dft_hot.columns[8]:'Pclass1', dft_hot.columns[9]:'Pclass2', dft_hot.columns[10]:'Pclass3'} ) | Titanic - Machine Learning from Disaster |
4,060,422 | def train(train_dataset, model, tokenizer):
tb_writer = SummaryWriter()
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args['train_batch_size'])
t_total = len(train_dataloader)// args['gradient_accumulation_steps'] * args['num_train_epochs']
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args['weight_decay']},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args['learning_rate'], eps=args['adam_epsilon'])
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args['warmup_steps'], t_total=t_total)
if args['fp16']:
try:
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args['fp16_opt_level'])
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args['num_train_epochs'])
logger.info(" Total train batch size = %d", args['train_batch_size'])
logger.info(" Gradient Accumulation steps = %d", args['gradient_accumulation_steps'])
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args['num_train_epochs']), desc="Epoch")
for _ in train_iterator:
epoch_iterator = tqdm_notebook(train_dataloader, desc="Iteration")
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(device)for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args['model_type'] in ['bert', 'xlnet'] else None,
'labels': batch[3]}
outputs = model(input_ids=batch[0],attention_mask=batch[1],token_type_ids=batch[2],labels=batch[3])
loss = outputs[0]
print("\r%f" % loss, end='')
if args['gradient_accumulation_steps'] > 1:
loss = loss / args['gradient_accumulation_steps']
if args['fp16']:
with amp.scale_loss(loss, optimizer)as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args['max_grad_norm'])
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters() , args['max_grad_norm'])
tr_loss += loss.item()
if(step + 1)% args['gradient_accumulation_steps'] == 0:
scheduler.step()
optimizer.step()
model.zero_grad()
global_step += 1
if args['logging_steps'] > 0 and global_step % args['logging_steps'] == 0:
if args['evaluate_during_training']:
results = evaluate(model, tokenizer)
for key, value in results.items() :
tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
tb_writer.add_scalar('lr', scheduler.get_lr() [0], global_step)
tb_writer.add_scalar('loss',(tr_loss - logging_loss)/args['logging_steps'], global_step)
logging_loss = tr_loss
if args['save_steps'] > 0 and global_step % args['save_steps'] == 0:
output_dir = os.path.join(args['output_dir'], 'checkpoint-{}'.format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module')else model
model_to_save.save_pretrained(output_dir)
logger.info("Saving model checkpoint to %s", output_dir)
return global_step, tr_loss / global_step<train_model> | le = LabelEncoder()
T = dft_hot['Embarked']
encoded = le.fit_transform(np.ravel(T)) | Titanic - Machine Learning from Disaster |
4,060,422 | if args['do_train']:
train_dataset = load_and_cache_examples(task, tokenizer)
global_step, tr_loss = train(train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss )<save_to_csv> | dft_hot = pd.concat([dft_hot, pd.DataFrame(encoded, columns = ['Emb'])], axis=1)
dft_hot = dft_hot.drop(['Embarked'], axis=1 ) | Titanic - Machine Learning from Disaster |
4,060,422 | def submit(pred_ids):
sub = pd.read_csv(prefix+'sample_submission.csv')
sub['target'] = list(map(int,pred_ids))
sub.to_csv('submission.csv', index=False )<compute_train_metric> | dft_hot = dft_hot[['SibSp', 'Parch', 'Fare', 'Emb', 'Cabin', 'Age',
'female', 'male', 'Pclass1', 'Pclass2', 'Pclass3']] | Titanic - Machine Learning from Disaster |
4,060,422 | def get_mismatched(labels, preds):
mismatched = labels != preds
examples = processor.get_dev_examples(args['data_dir'])
wrong = [i for(i, v)in zip(examples, mismatched)if v]
return wrong
def get_eval_report(labels, preds):
mcc = matthews_corrcoef(labels, preds)
tn, fp, fn, tp = confusion_matrix(labels, preds ).ravel()
return {
"mcc": mcc,
"tp": tp,
"tn": tn,
"fp": fp,
"fn": fn
}, get_mismatched(labels, preds)
def compute_metrics(task_name, preds, labels):
assert len(preds)== len(labels)
return get_eval_report(labels, preds)
def evaluate(model, tokenizer, prefix=""):
eval_output_dir = args['output_dir']
results = {}
EVAL_TASK = args['task_name']
eval_dataset = load_and_cache_examples(EVAL_TASK, tokenizer, evaluate=True)
if not os.path.exists(eval_output_dir):
os.makedirs(eval_output_dir)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args['eval_batch_size'])
preds_ = []
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args['eval_batch_size'])
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
preds_ = []
for batch in tqdm_notebook(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(device)for t in batch)
with torch.no_grad() :
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args['model_type'] in ['bert', 'xlnet'] else None,
'labels': batch[3]}
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
for i in range(logits.size(0)) :
preds_.append(logits[i,:].cpu().max(0)[1].item())
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy() , axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy() , axis=0)
submit(preds_)
eval_loss = eval_loss / nb_eval_steps
if args['output_mode'] == "classification":
preds = np.argmax(preds, axis=1)
elif args['output_mode'] == "regression":
preds = np.squeeze(preds)
result, wrong = compute_metrics(EVAL_TASK, preds, out_label_ids)
results.update(result)
output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
with open(output_eval_file, "w")as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s
" %(key, str(result[key])))
return results, wrong<load_from_csv> | dft_hot = dft_hot.drop(['Cabin'], axis=1 ) | Titanic - Machine Learning from Disaster |
4,060,422 |
<import_modules> | train_cols = list(dft_hot)
| Titanic - Machine Learning from Disaster |
4,060,422 | nltk.download('stopwords')
stop_words = stopwords.words('english')
nlp = spacy.load('en' )<install_modules> | dft_i = pd.DataFrame(KNN(k=5 ).fit_transform(dft_hot))
dft_i.columns = train_cols | Titanic - Machine Learning from Disaster |
4,060,422 | !pip install transformers<import_modules> | X_test = np.asarray(dft_i ) | Titanic - Machine Learning from Disaster |
4,060,422 | from transformers import RobertaModel, RobertaTokenizer
from transformers import RobertaForSequenceClassification, RobertaConfig, AdamW, get_linear_schedule_with_warmup<set_options> | X_test = preprocessing.StandardScaler().fit(X_test ).transform(X_test ) | Titanic - Machine Learning from Disaster |
4,060,422 | print(torch.cuda.is_available() )<load_from_csv> | y_pred = cls.predict(X_test ) | Titanic - Machine Learning from Disaster |
4,060,422 | data = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv")[["text", "target"]]
test = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv")
data.head()<categorify> | dft2 = pd.read_csv(file3 ) | Titanic - Machine Learning from Disaster |
4,060,422 | def clean_text(text):
text = re.sub(r"\x89Û_", "", text)
text = re.sub(r"\x89ÛÒ", "", text)
text = re.sub(r"\x89ÛÓ", "", text)
text = re.sub(r"\x89ÛÏWhen", "When", text)
text = re.sub(r"\x89ÛÏ", "", text)
text = re.sub(r"China\x89Ûªs", "China's", text)
text = re.sub(r"let\x89Ûªs", "let's", text)
text = re.sub(r"\x89Û÷", "", text)
text = re.sub(r"\x89Ûª", "", text)
text = re.sub(r"\x89Û\x9d", "", text)
text = re.sub(r"å_", "", text)
text = re.sub(r"\x89Û¢", "", text)
text = re.sub(r"\x89Û¢åÊ", "", text)
text = re.sub(r"fromåÊwounds", "from wounds", text)
text = re.sub(r"åÊ", "", text)
text = re.sub(r"åÈ", "", text)
text = re.sub(r"JapÌ_n", "Japan", text)
text = re.sub(r"Ì©", "e", text)
text = re.sub(r"å¨", "", text)
text = re.sub(r"Surṳ", "Suruc", text)
text = re.sub(r"åÇ", "", text)
text = re.sub(r"å£3million", "3 million", text)
text = re.sub(r"åÀ", "", text)
text = re.sub(r'https?://\S+|www\.\S+', '', text)
text = re.sub(r'<.*?>', '', text)
text = re.sub(r'[\d]+', ' ', text)
return text<feature_engineering> | dft2 = pd.concat([dft2['PassengerId'], pd.DataFrame(y_pred, columns = ['Survived'] ).astype(int)], axis=1 ) | Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.