kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
14,498,987
data_all['first_word_description_match'] = data_all['product_info'].map(lambda x:str_common_word(str(x ).split('\t')[0].split(" ")[0],x.split('\t')[2]))<create_dataframe>
Tabular Playground Series - Jan 2021
14,498,987
data_all_copy1 = data_all.copy()<drop_column>
df_shuffle = df.sample(frac=1, random_state=seed ).reset_index(drop=True) df_y = df_shuffle.pop('target') df_X = df_shuffle X_train, X_test, y_train, y_test = skms.train_test_split(df_X, df_y, train_size=0.9, random_state=seed) print(f"Train set has {X_train.shape[0]} records out of {len(df_shuffle)} which is {round(X_train.shape[0]/len(df_shuffle)*100)}%") print(f"Test set has {X_test.shape[0]} records out of {len(df_shuffle)} which is {round(X_test.shape[0]/len(df_shuffle)*100)}%" )
Tabular Playground Series - Jan 2021
14,498,987
data_all = data_all.drop(['attr','product_info'],axis=1 )<feature_engineering>
import sklearn.linear_model as sklm
Tabular Playground Series - Jan 2021
14,498,987
data_all["prod_desc_merge"] = data_all["product_description"].map(str)+' '+data_all["brand"].fillna('')+ ' ' + data_all["attribute"].fillna('' ).map(str )<choose_model_class>
scaler = skp.MinMaxScaler() X_train = pd.DataFrame(scaler.fit_transform(X_train), columns=X_train.columns) X_test = pd.DataFrame(scaler.transform(X_test), columns=X_train.columns) X_train.describe()
Tabular Playground Series - Jan 2021
14,498,987
tfidf = TfidfVectorizer(ngram_range=(1, 2), stop_words='english') svd = TruncatedSVD(n_components=100, random_state = 2019 )<choose_model_class>
def expm1(x): return np.expm1(x) def getRmse(y_train, y_train_pred): print(skm.mean_squared_error(y_train, y_train_pred))
Tabular Playground Series - Jan 2021
14,498,987
pipe = Pipeline(steps=[('tfidf', tfidf),('svd', svd)] )<categorify>
lmr = sklm.Ridge(alpha=0.001) lmr.fit(X_train, y_train) y_train_pred = lmr.predict(X_train) y_test_pred = lmr.predict(X_test) getRmse(y_train, y_train_pred) getRmse(y_test, y_test_pred )
Tabular Playground Series - Jan 2021
14,498,987
start_time = time.time() data_all["product_title"]=pipe.fit_transform(data_all["product_title"]) data_all["search_term"]=pipe.fit_transform(data_all["search_term"]) data_all["prod_desc_merge"]=pipe.fit_transform(data_all["prod_desc_merge"]) print("--- %s seconds ---" %(time.time() - start_time))<drop_column>
params = {'alpha': [0.0001, 0.001, 0.005, 0.01, 0.03, 0.05, 0.1, 0.5, 1.0, 5.0, 10]} ridge = sklm.Ridge() model_cv_ridge = skms.GridSearchCV(estimator = ridge, n_jobs=-1, param_grid = params, scoring= 'neg_mean_squared_error', cv = 5, return_train_score=True, verbose = 3) model_cv_ridge.fit(X_train, y_train) print(model_cv_ridge.best_estimator_) y_train_pred = model_cv_ridge.predict(X_train) y_test_pred = model_cv_ridge.predict(X_test) getRmse(y_train, y_train_pred) getRmse(y_test, y_test_pred )
Tabular Playground Series - Jan 2021
14,498,987
data_all.drop(["product_description","brand","attribute"],axis=1,inplace=True )<filter>
cbr = cb.CatBoostRegressor(loss_function='RMSE', verbose=0) cbr.fit(X_train, y_train, eval_set=(X_test, y_test)) print(cbr.best_score_) y_train_pred = cbr.predict(X_train) y_test_pred = cbr.predict(X_test) getRmse(y_train, y_train_pred) getRmse(y_test, y_test_pred )
Tabular Playground Series - Jan 2021
14,498,987
missing = pd.DataFrame(data_all.isnull().sum()) missing[missing[0]>0]<define_variables>
xgb = ske.GradientBoostingRegressor(criterion='mse', random_state=1) xgb.fit(X_train, y_train) y_train_pred = xgb.predict(X_train) y_test_pred = xgb.predict(X_test) getRmse(y_train, y_train_pred) getRmse(y_test, y_test_pred )
Tabular Playground Series - Jan 2021
14,498,987
num_train=train.shape[0]<split>
xgb = ske.ExtraTreesRegressor(criterion='mse', random_state=1) xgb.fit(X_train, y_train) y_train_pred = xgb.predict(X_train) y_test_pred = xgb.predict(X_test) getRmse(y_train, y_train_pred) getRmse(y_test, y_test_pred )
Tabular Playground Series - Jan 2021
14,498,987
df_train = data_all.iloc[:num_train] df_test = data_all.iloc[num_train:]<compute_test_metric>
xgb = ske.RandomForestRegressor(criterion='mse', random_state=1) xgb.fit(X_train, y_train) y_train_pred = xgb.predict(X_train) y_test_pred = xgb.predict(X_test) getRmse(y_train, y_train_pred) getRmse(y_test, y_test_pred )
Tabular Playground Series - Jan 2021
14,498,987
def fmean_squared_error(ground_truth, predictions): fmean_squared_error_ = mean_squared_error(ground_truth, predictions)**0.1 return fmean_squared_error_<compute_test_metric>
xgb = xg.XGBRegressor(objective ='reg:squarederror', random_state=1) xgb.fit(X_train, y_train) y_train_pred = xgb.predict(X_train) y_test_pred = xgb.predict(X_test) getRmse(y_train, y_train_pred) getRmse(y_test, y_test_pred )
Tabular Playground Series - Jan 2021
14,498,987
RMSE = make_scorer(fmean_squared_error, greater_is_better=False )<prepare_x_and_y>
print("TF version:-", tf.__version__) tf.random.set_seed(seed )
Tabular Playground Series - Jan 2021
14,498,987
df_test_1 = df_test.copy() df_train_1 = df_train.copy() id_test = df_test_1['id'] y_train = df_train_1['relevance'].values X_train = df_train_1.drop(['id','relevance'],axis=1 ).values X_test = df_test_1.drop(['id','relevance'],axis=1 ).values<choose_model_class>
epochs = 40 model_1 = k.models.Sequential([ k.layers.Dense(512, activation='relu', input_shape=(X_train.shape[1],)) , k.layers.Dropout(0.2), k.layers.Dense(256, activation='relu'), k.layers.Dropout(0.2), k.layers.Dense(1, activation='linear'), ]) print(model_1.summary()) model_1.compile(optimizer='adam', loss='mse', metrics='mse' ) history = model_1.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epochs, callbacks=[callbacks_list] )
Tabular Playground Series - Jan 2021
14,498,987
gbr = GradientBoostingRegressor()<define_search_space>
def getTestResults(m=None): df_final = df.sample(frac=1, random_state=1 ).reset_index(drop=True) test_cols = [x for x in df.columns if 'target' not in x] df_final_test = df_test[test_cols] df_y = df_final.pop('target') df_X = df_final scaler = skp.MinMaxScaler() df_X = pd.DataFrame(scaler.fit_transform(df_X), columns=df_X.columns) X_test = pd.DataFrame(scaler.transform(df_final_test), columns=df_X.columns) if m is None: lmr = cb.CatBoostRegressor(loss_function='RMSE', verbose=0) lmr.fit(df_X, df_y) else: lmr = m y_train_pred = lmr.predict(df_X) y_test_pred = lmr.predict(X_test) if m is not None: y_test_pred = [y[0] for y in y_test_pred] getRmse(df_y, y_train_pred) return y_test_pred results = getTestResults()
Tabular Playground Series - Jan 2021
14,498,987
param_grid = { 'n_estimators' : np.array([int(e)for e in np.linspace(90, 100, 11)]), 'max_depth': np.array([int(e)for e in np.linspace(3, 6, 4)]) }<define_search_space>
submission = pd.DataFrame({ 'id': df_test['id'], 'target': results, }) submission.head()
Tabular Playground Series - Jan 2021
14,498,987
<choose_model_class><EOS>
submission.to_csv('./submission_Catboost.csv', index=False )
Tabular Playground Series - Jan 2021
14,163,313
<SOS> metric: RMSE Kaggle data source: tabular-playground-series-jan-2021<train_model>
%matplotlib inline
Tabular Playground Series - Jan 2021
14,163,313
%time _ = gs.fit(X_train, y_train )<find_best_params>
!pip install optuna
Tabular Playground Series - Jan 2021
14,163,313
gs.best_params_, gs.best_score_<train_model>
train = pd.read_csv(DATA / "train.csv") test = pd.read_csv(DATA / "test.csv") smpl_sub = pd.read_csv(DATA / "sample_submission.csv") print("train: {}, test: {}, sample sub: {}".format( train.shape, test.shape, smpl_sub.shape ))
Tabular Playground Series - Jan 2021
14,163,313
gbr1 = GradientBoostingRegressor(max_depth=5,n_estimators=94) gbr1.fit(X_train, y_train )<predict_on_test>
features = ['cont1', 'cont2', 'cont3', 'cont4', 'cont5', 'cont6', 'cont7', 'cont8', 'cont9', 'cont10', 'cont11', 'cont12', 'cont13', 'cont14'] datax = pd.concat([train[features],test[features]], axis=0) datax1 = datax.abs() def featx(data, features): cx=int(1000000) for f in features: data[f] = data[f]*cx dmm = data[f].max() -data[f].min() data[f] =(data[f]-data[f].min())/dmm return data data = featx(datax, features) data.head()
Tabular Playground Series - Jan 2021
14,163,313
y_pred = gbr1.predict(X_test) for i in range(len(y_pred)) : if y_pred[i] > 3: y_pred[i] = 3<compute_train_metric>
X = data[:300000] Xtest = data[300000:] y = train['target']
Tabular Playground Series - Jan 2021
14,163,313
def caculator_RMSE(X_train, y_train, model): return np.sqrt(((y_train - model.predict(X_train)) ** 2 ).mean() )<compute_test_metric>
Tabular Playground Series - Jan 2021
14,163,313
print('RMSE using GBRegression : ' , caculator_RMSE(X_train, y_train, gbr1))<save_to_csv>
params={'objective': 'regression', 'metric': 'rmse', 'verbosity': -1, 'boosting_type': 'gbdt', 'learning_rate': 0.04, 'feature_pre_filter': False, 'min_data_in_leaf': 100, 'lambda_l1': 9.486545871986378, 'lambda_l2': 6.887206686817597, 'num_leaves': 230, 'feature_fraction': 0.4, 'bagging_fraction': 1.0, 'bagging_freq': 0, 'min_child_samples': 20}
Tabular Playground Series - Jan 2021
14,163,313
pd.DataFrame({"id": id_test, "relevance": y_pred} ).to_csv('submission.csv',index=False )<train_on_grid>
N_FOLDS = 12 oof = np.zeros(len(y)) preds = np.zeros(len(Xtest)) params['learning_rate'] = 0.004 params['num_iterations'] = 12000 i=0 for train_ind, test_ind in tqdm(KFold(n_splits = N_FOLDS,shuffle=True,random_state=21 ).split(X), total=N_FOLDS, desc="k-fold"): Xtrain = X.iloc[train_ind] Xval = X.iloc[test_ind] ytrain = y.iloc[train_ind] yval = y.iloc[test_ind] model = LGBMRegressor(**params) model.fit(Xtrain, ytrain, eval_set =(( Xval,yval)) , early_stopping_rounds = 120, verbose = 1000) p = model.predict(Xval) oof[test_ind] = p preds += model.predict(Xtest)/N_FOLDS i+=1 if i>5: print(f'MSE::: {np.round(mean_squared_error(y, oof, squared=False),8)}') else: pass
Tabular Playground Series - Jan 2021
14,163,313
<save_to_csv>
submission = test[['id']] submission['target'] = preds submission.to_csv('submission.csv', index = False )
Tabular Playground Series - Jan 2021
14,310,043
<load_from_csv>
import lightgbm as lgb import optuna.integration.lightgbm as oplgb from sklearn.model_selection import KFold from sklearn.metrics import mean_squared_error from tqdm.notebook import tqdm import matplotlib.pyplot as plt import seaborn as sns
Tabular Playground Series - Jan 2021
14,310,043
!unzip.. /input/home-depot-product-search-relevance/attributes.csv.zip !unzip.. /input/home-depot-product-search-relevance/product_descriptions.csv.zip !unzip.. /input/home-depot-product-search-relevance/test.csv.zip !unzip.. /input/home-depot-product-search-relevance/train.csv.zip<install_modules>
df_train = pd.read_csv("/kaggle/input/tabular-playground-series-jan-2021/train.csv") df_test = pd.read_csv("/kaggle/input/tabular-playground-series-jan-2021/test.csv") df_sample = pd.read_csv("/kaggle/input/tabular-playground-series-jan-2021/sample_submission.csv" )
Tabular Playground Series - Jan 2021
14,310,043
!pip install bs4<import_modules>
train_id = df_train["id"] test_id = df_test["id"] df_train.drop("id", axis=1, inplace=True) df_test.drop("id", axis=1, inplace=True )
Tabular Playground Series - Jan 2021
14,310,043
import pandas as pd import re import math from nltk.stem.snowball import SnowballStemmer from sklearn.feature_extraction.text import CountVectorizer import numpy as np from bs4 import BeautifulSoup import lxml import re import nltk from nltk.corpus import stopwords from nltk.metrics import edit_distance from string import punctuation from collections import Counter import numpy as np<load_from_csv>
feature_cols = [c for c in df_train.columns if c != "target"]
Tabular Playground Series - Jan 2021
14,310,043
train_data = pd.read_csv("train.csv", encoding="ISO-8859-1") test_data = pd.read_csv("test.csv", encoding="ISO-8859-1") attribute_data = pd.read_csv('attributes.csv') descriptions = pd.read_csv('product_descriptions.csv' )<count_duplicates>
train_x = df_train[feature_cols] train_y = df_train.target test_x = df_test
Tabular Playground Series - Jan 2021
14,310,043
train_data[train_data.duplicated(keep='first')].shape<count_duplicates>
folds = KFold(n_splits=10, shuffle=True, random_state=2021 )
Tabular Playground Series - Jan 2021
14,310,043
test_data[test_data.duplicated(keep='first')].shape<count_duplicates>
class FoldsAverageLGBM: def __init__(self, folds): self.folds = folds self.models = [] def fit(self, lgb_params, train_x, train_y): oof_preds = np.zeros_like(train_y) self.train_x = train_x.values self.train_y = train_y.values for tr_idx, va_idx in tqdm(folds.split(train_x)) : tr_x, va_x = self.train_x[tr_idx], self.train_x[va_idx] tr_y, va_y = self.train_y[tr_idx], self.train_y[va_idx] lgb_train_dataset = lgb.Dataset(tr_x, tr_y) lgb_valid_dataset = lgb.Dataset(va_x, va_y) model = lgb.train(lgb_params, lgb_train_dataset, valid_sets=[lgb_valid_dataset], verbose_eval=100) self.models.append(model) oof_pred = model.predict(va_x) oof_preds[va_idx] = oof_pred self.oof_preds = oof_preds def predict(self, test_x): preds = [] for model in tqdm(self.models): pred = model.predict(test_x) preds.append(pred) preds = np.mean(preds, axis=0) return preds
Tabular Playground Series - Jan 2021
14,310,043
attribute_data[attribute_data.duplicated(keep='first')] attribute_data = attribute_data.drop_duplicates()<count_duplicates>
best_lgb_params = { 'seed': 2021, 'objective': 'regression', 'metric': 'rmse', 'verbosity': -1, 'feature_pre_filter': False, 'lambda_l1': 6.540486456085813, 'lambda_l2': 0.01548480538099245, 'num_leaves': 256, 'feature_fraction': 0.52, 'bagging_fraction': 0.6161835249194311, 'bagging_freq': 7, 'min_child_samples': 20 } best_lgb_params["learning_rate"] = 0.001 best_lgb_params["early_stopping_round"] = 1000 best_lgb_params["num_iterations"] = 20000
Tabular Playground Series - Jan 2021
14,310,043
descriptions[descriptions.duplicated(keep='first')].shape<count_values>
folds_average_lgbm = FoldsAverageLGBM(folds )
Tabular Playground Series - Jan 2021
14,310,043
print('total data has html tags in',descriptions.product_description.str.count('<br$' ).values.sum() )<rename_columns>
folds_average_lgbm.fit(best_lgb_params, train_x, train_y )
Tabular Playground Series - Jan 2021
14,310,043
df_brand = attribute_data[attribute_data.name == "MFG Brand Name"][["product_uid", "value"]].rename(columns={"value": "brand"} )<feature_engineering>
np.sqrt(mean_squared_error(df_train.target, folds_average_lgbm.oof_preds))
Tabular Playground Series - Jan 2021
14,310,043
df_attr_stripped = attribute_data df_attr_stripped['name'] = df_attr_stripped['name'].astype(str) df_attr_stripped['name'] = df_attr_stripped['name'].apply(lambda s: re.sub(r"Bullet([0-9]+)", "", s)) df_attr_stripped['attribute'] = df_attr_stripped['name'] + " " + df_attr_stripped['value'] df_attr_test = df_attr_stripped.groupby('product_uid' ).agg({'attribute': lambda s : ' '.join(s.astype(str)) } ).reset_index() <concatenate>
y_pred = folds_average_lgbm.predict(test_x )
Tabular Playground Series - Jan 2021
14,310,043
<merge><EOS>
sub = df_sample.copy() sub["target"] = y_pred sub.to_csv("submission_lgbm_fold_10.csv", index=False) sub.head()
Tabular Playground Series - Jan 2021
14,011,238
<SOS> metric: RMSE Kaggle data source: tabular-playground-series-jan-2021<merge>
!pip3 install -qq optuna sns.set()
Tabular Playground Series - Jan 2021
14,011,238
data_all = pd.merge(data_all, df_brand, how = 'left', on = 'product_uid' )<merge>
train = pd.read_csv("/kaggle/input/tabular-playground-series-jan-2021/train.csv") test = pd.read_csv("/kaggle/input/tabular-playground-series-jan-2021/test.csv") print(train.shape) print(test.shape) X, y = train.iloc[:,1:-1].values, train.iloc[:,-1].values feat_names = list(train.columns[1:-1]) kfold = KFold(n_splits=5,random_state=2021, shuffle=True) X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.15, random_state=2021 )
Tabular Playground Series - Jan 2021
14,011,238
data_all = pd.merge(data_all, df_attr_test, how = 'left', on = 'product_uid' )<categorify>
baseline_score = mean_squared_error(y,np.mean(y)* np.ones(y.shape[0]),squared=False) lr = LinearRegression() lr_scores = cross_val_score(lr,X, y, cv=kfold,scoring='neg_root_mean_squared_error') lasso = Lasso() lasso_scores = cross_val_score(lasso,X, y, cv=kfold,scoring='neg_root_mean_squared_error') ridge = Ridge() ridge_scores = cross_val_score(ridge,X, y, cv=kfold,scoring='neg_root_mean_squared_error') print(-lr_scores, np.mean(-lr_scores)) print(-lasso_scores, np.mean(-lasso_scores)) print(-ridge_scores, np.mean(-ridge_scores)) fig, ax = plt.subplots(ncols=4,nrows=2, figsize=(20, 10)) ridge.fit(X, y)X lr.fit(X,y) feat_names2, coef_order = zip(*sorted(zip(feat_names,ridge.coef_), key=lambda k : abs(k[1]), reverse=True)) sns.barplot(list(coef_order),list(feat_names2), ax=ax[0,0]) sns.scatterplot(y - ridge.predict(X), y,ax=ax[0,1]) sns.distplot(y - ridge.predict(X),ax=ax[0,2]) sns.distplot(ridge.predict(X),ax=ax[0,3]) feat_names2, coef_order2 = zip(*sorted(zip(feat_names,lr.coef_), key=lambda k : abs(k[1]), reverse=True)) sns.barplot(list(coef_order2),list(feat_names2), ax=ax[1,0]) sns.scatterplot(y - lr.predict(X), y,ax=ax[1,1]) sns.distplot(y - lr.predict(X),ax=ax[1,2]) sns.distplot(lr.predict(X),ax=ax[1,3]) print("Ridge on full dataset",mean_squared_error(y, ridge.predict(X), squared=False))
Tabular Playground Series - Jan 2021
14,011,238
def string_edit(s): if isinstance(s, str): s = re.sub(r"(\w)\.( [A-Z])", r"\1 \2", s) s = s.lower() s = s.replace(" "," ") s = s.replace(",","") s = s.replace("$"," ") s = s.replace("?"," ") s = s.replace("-"," ") s = s.replace("//","/") s = s.replace(".. ",".") s = s.replace(" / "," ") s = s.replace(" \\ "," ") s = s.replace(".",".") s = re.sub(r"(^\.|/)", r"", s) s = re.sub(r"(\.|/)$", r"", s) s = re.sub(r"([0-9] )([a-z])", r"\1 \2", s) s = re.sub(r"([a-z] )([0-9])", r"\1 \2", s) s = s.replace(" x "," xbi ") s = re.sub(r"([a-z] )(*)\.( * )([a-z])", r"\1 \4", s) s = re.sub(r"([a-z] )(*)/(* )([a-z])", r"\1 \4", s) s = s.replace("*"," xbi ") s = s.replace(" by "," xbi ") s = re.sub(r"([0-9] )(*)\.( * )([0-9])", r"\1.\4", s) s = re.sub(r"([0-9]+ )(* )(inches|inch|in|')\.?", r"\1in.", s) s = re.sub(r"([0-9]+ )(* )(foot|feet|ft|'')\.?", r"\1ft.", s) s = re.sub(r"([0-9]+ )(* )(pounds|pound|lbs|lb)\.?", r"\1lb.", s) s = re.sub(r"([0-9]+ )(* )(square|sq)?\.?(feet|foot|ft)\.?", r"\1sq.ft.", s) s = re.sub(r"([0-9]+ )(* )(cubic|cu)?\.?(feet|foot|ft)\.?", r"\1cu.ft.", s) s = re.sub(r"([0-9]+ )(* )(gallons|gallon|gal)\.?", r"\1gal.", s) s = re.sub(r"([0-9]+ )(* )(ounces|ounce|oz)\.?", r"\1oz.", s) s = re.sub(r"([0-9]+ )(* )(centimeters|cm)\.?", r"\1cm.", s) s = re.sub(r"([0-9]+ )(* )(milimeters|mm)\.?", r"\1mm.", s) s = s.replace("°"," degrees ") s = re.sub(r"([0-9]+ )(* )(degrees|degree)\.?", r"\1deg.", s) s = s.replace(" v "," volts ") s = re.sub(r"([0-9]+ )(* )(volts|volt)\.?", r"\1volt.", s) s = re.sub(r"([0-9]+ )(* )(watts|watt)\.?", r"\1watt.", s) s = re.sub(r"([0-9]+ )(* )(amperes|ampere|amps|amp)\.?", r"\1amp.", s) s = s.replace(" "," ") s = s.replace("."," ") s = re.sub(r"zero\.?", r"0 ", s) s = re.sub(r"one\.?", r"1 ", s) s = re.sub(r"two\.?", r"2 ", s) s = re.sub(r"three\.?", r"3 ", s) s = re.sub(r"four\.?", r"4 ", s) s = re.sub(r"five\.?", r"5 ", s) s = re.sub(r"six\.?", r"6 ", s) s = re.sub(r"seven\.?", r"7 ", s) s = re.sub(r"eight\.?", r"8 ", s) s = re.sub(r"nine\.?", r"9 ", s) return s else: return "null"<feature_engineering>
%%time i = 0 for train_index, test_index in tqdm(kfold.split(X)) : cr = CatBoostRegressor(verbose=False,iterations=2000, learning_rate=0.01, random_strength=52, max_depth=12,random_seed=2021) cr.fit(X[train_index,:], y[train_index]) train[f'target{i}'] = cr.predict(X) test[f'target{i}'] = cr.predict(test.iloc[:,1:].values) i += 1 train['target_final'] = train.loc[:,["target"+str(i)for i in range(5)]].mean(axis=1) print("In Sample: ", mean_squared_error(y, train['target_final'], squared=False)) test['target'] = test[["target"+str(i)for i in range(5)]].mean(axis=1) test[['id','target']].to_csv("submission.csv",index=False )
Tabular Playground Series - Jan 2021
14,011,238
data_all['search_term'] = data_all['search_term'].map(lambda x:string_edit(str(x))) data_all['product_title'] = data_all['product_title'].map(lambda x:string_edit(str(x))) data_all['product_description'] = data_all['product_description'].map(lambda x:string_edit(str(x)) )<feature_engineering>
Tabular Playground Series - Jan 2021
14,332,086
data_all['attribute'] = data_all['attribute'].map(lambda x:string_edit(str(x))) data_all['brand'] = data_all['brand'].map(lambda x:string_edit(str(x)) )<string_transform>
train = pd.read_csv(input_path / 'train.csv', index_col='id' )
Tabular Playground Series - Jan 2021
14,332,086
def remove_html_tag(text): soup = BeautifulSoup(text, 'lxml') text = soup.get_text().replace('Click here to review our return policy for additional information regarding returns', '') return text def str_stemmer(doc): tokens = doc.split() table = str.maketrans('', '', punctuation) tokens = [w.translate(table)for w in tokens] tokens = [word for word in tokens if word.isalpha() ] stop_words = set(stopwords.words('english')) tokens = [w for w in tokens if not w in stop_words] tokens = [word for word in tokens if len(word)> 1] return ' '.join(tokens) def str_stemmer_title(s): return " ".join(map(stemmer.stem, s.lower().split())) def str_common_word(str1, str2): whole_set = set(str1.split()) return sum(int(str2.find(word)>=0)for word in whole_set) def get_shared_words(row_data): return np.sum([str_common_word(*row_data[:-1]), str_common_word(*row_data[1:])] )<feature_engineering>
test = pd.read_csv(input_path / 'test.csv', index_col='id' )
Tabular Playground Series - Jan 2021
14,332,086
data_all['search_term'] = pd.Series(data_all['search_term'].map(lambda x:str_stemmer(str(x)))) data_all['product_title'] = pd.Series(data_all['product_title'].map(lambda x:str_stemmer(str(x)))) data_all['product_description'] = pd.Series(data_all['product_description'].map(lambda x:str_stemmer(str(x)))) data_all['attribute'] = pd.Series(data_all['attribute'].map(lambda x:str_stemmer(str(x))))<feature_engineering>
submission = pd.read_csv(input_path / 'sample_submission.csv', index_col='id' )
Tabular Playground Series - Jan 2021
14,332,086
data_all['search_term'] = pd.Series(data_all['search_term'].map(lambda x:remove_html_tag(str(x)))) data_all['product_title'] = pd.Series(data_all['product_title'].map(lambda x:remove_html_tag(str(x)))) data_all['product_description'] = pd.Series(data_all['product_description'].map(lambda x:remove_html_tag(str(x))))<feature_engineering>
target = train.pop('target') X_train, X_test, y_train, y_test = train_test_split(train, target, train_size=0.60 )
Tabular Playground Series - Jan 2021
14,332,086
data_all['search_term_tokens'] = data_all.search_term.str.lower().str.split() data_all['product_title_tokens'] = data_all.product_title.str.lower().str.split() data_all['product_description_tokens'] = data_all.product_description.str.lower().str.split()<feature_engineering>
from xgboost import XGBRegressor import xgboost as xgb
Tabular Playground Series - Jan 2021
14,332,086
data_all["edistance_sprot"] = [edit_distance(word1, word2)for word1, word2 in data_all[["search_term","product_title"]].values.tolist() ] data_all["edistance_sd"] = [edit_distance(word1, word2)for word1, word2 in data_all[["search_term","product_description"]].values.tolist() ]<concatenate>
regressor = xgb.XGBRegressor(colsample_bytree=0.5, alpha=0.01, reg_lambda=0.003, learning_rate=0.01, max_depth=15, min_child_weight=257, n_estimators=1000, subsample=0.7, random_state=2020, metric_period=100, silent=1) regressor.fit(X_train, y_train, early_stopping_rounds=6, eval_set=[(X_test, y_test)], verbose=1) y_pred = regressor.predict(X_test) plot_results("XGBRegressor", y_test, y_pred)
Tabular Playground Series - Jan 2021
14,332,086
def get_jaccard_sim(columns): str1, str2 = columns[0], columns[1] a = set(str1) b = set(str2) c = a.intersection(b) return float(len(c)) /(len(a)+ len(b)- len(c))<feature_engineering>
import lightgbm as lgb
Tabular Playground Series - Jan 2021
14,332,086
data_all['j_dis_sqt'] = [get_jaccard_sim(rows)for rows in data_all[["search_term_tokens","product_title_tokens"]].values] data_all['j_dis_sqd'] = [get_jaccard_sim(rows)for rows in data_all[["search_term_tokens","product_description_tokens"]].values]<feature_engineering>
lgb_train = lgb.Dataset(X_train, y_train) lgb_valid = lgb.Dataset(X_test, y_test) param = { 'seed': 2021, 'objective': 'regression', 'metric': 'rmse', 'verbosity': -1, 'feature_pre_filter': False, 'lambda_l1': 6.540486456085813, 'lambda_l2': 0.01548480538099245, 'num_leaves': 256, 'feature_fraction': 0.52, 'bagging_fraction': 0.6161835249194311, 'bagging_freq': 7, 'min_child_samples': 20, 'learning_rate' : 0.001, 'early_stopping_round' : 1000, 'num_iterations' : 20000 } lgb_model = lgb.train(param, lgb_train, valid_sets=lgb_valid, num_boost_round=5000, early_stopping_rounds=100) plot_results('LightGBM', y_test, y_pred )
Tabular Playground Series - Jan 2021
14,332,086
data_all['search_query_length'] = data_all.search_term.str.len() data_all['number_of_words_in_descr'] = data_all.product_description.str.count("\\w+" )<feature_engineering>
import torch from torch import nn
Tabular Playground Series - Jan 2021
14,332,086
data_all['number_of_words_in_search'] = data_all['search_term_tokens'].map(lambda x: len(x)) data_all['number_of_words_in_title'] = data_all['product_title_tokens'].map(lambda x: len(x)) data_all['length_product_description'] = data_all.product_description.str.len() data_all['length_product_title'] = data_all.product_title.str.len() data_all['length_attribute'] = data_all.attribute.str.len()<data_type_conversions>
class linearRegression(torch.nn.Module): def __init__(self, inputSize, outputSize): super(linearRegression, self ).__init__() self.head = torch.nn.Linear(inputSize, inputSize//2) self.out = torch.nn.Linear(inputSize//2, outputSize) def forward(self, x): y = self.out(self.head(x)) return y
Tabular Playground Series - Jan 2021
14,332,086
data_all['len_of_attribute'] = data_all['attribute'].map(lambda x:len(str(x ).split())).astype(np.int64) data_all['len_of_brand'] = data_all['brand'].map(lambda x:len(str(x ).split())).astype(np.int64 )<compute_test_metric>
model = linearRegression(X_train.shape[-1], 1 )
Tabular Playground Series - Jan 2021
14,332,086
def cosineSim(v1, v2): sumxx, sumxy, sumyy = 0, 0, 0 for i in range(len(v1)) : x = v1[i] y = v2[i] sumxx += x*x sumyy += y*y sumxy += x*y return sumxy/math.sqrt(sumxx*sumyy )<choose_model_class>
learningRate = 3e-4 criterion = torch.nn.MSELoss() optimizer = torch.optim.AdamW(model.parameters() , lr=learningRate )
Tabular Playground Series - Jan 2021
14,332,086
arr1 = [] arr2 = [] arr3 = [] for i in range(len(data_all)) : product_title_i = data_all['product_title'][i] description_i = data_all['product_description'][i] attribute_i = data_all['attribute'][i] search_term_i = data_all['search_term'][i] TfidfVectorizer1 = TfidfVectorizer(ngram_range=(1, 2), stop_words='english') TfidfVectorizer2 = TfidfVectorizer(ngram_range=(1, 2), stop_words='english') TfidfVectorizer3 = TfidfVectorizer(ngram_range=(1, 2), stop_words='english') tf_idf1 = TfidfVectorizer1.fit_transform([product_title_i, search_term_i]) tf_idf2 = TfidfVectorizer2.fit_transform([description_i, search_term_i]) tf_idf3 = TfidfVectorizer3.fit_transform([attribute_i, search_term_i]) cosineSim_title_search = cosineSim(tf_idf1.toarray() [0], tf_idf1.toarray() [1]) arr1.append(cosineSim_title_search) cosineSim_description = cosineSim(tf_idf2.toarray() [0], tf_idf2.toarray() [1]) arr2.append(cosineSim_description) cosineSim_attribute = cosineSim(tf_idf3.toarray() [0], tf_idf3.toarray() [1]) arr3.append(cosineSim_attribute )<feature_engineering>
if torch.cuda.is_available() : inputs = torch.Tensor(np.array(X_train)).cuda() labels = torch.Tensor(np.array(y_train)).cuda() else: inputs = torch.Tensor(np.array(X_train)) labels = torch.Tensor(np.array(y_train))
Tabular Playground Series - Jan 2021
14,332,086
data_all['tfidf_cosineSim_search_title'] = arr1 data_all['tfidf_cosineSim_search_description'] = arr2 data_all['tfidf_cosineSim_search_attribute'] = arr3<feature_engineering>
for epoch in range(10): for i in range(0, len(labels), 64): X = inputs[i:i+64] target = labels[i:i+64] optimizer.zero_grad() outputs = model(X ).view(len(target)) loss = criterion(outputs, target) loss.backward() optimizer.step() print('epoch {}, loss {}'.format(epoch, loss.item()))
Tabular Playground Series - Jan 2021
14,332,086
data_all['tfidf_cosineSim_search_title'] = data_all['tfidf_cosineSim_search_title'].fillna(data_all['tfidf_cosineSim_search_title'].sum() /len(data_all)) data_all['tfidf_cosineSim_search_description'] = data_all['tfidf_cosineSim_search_description'].fillna(data_all['tfidf_cosineSim_search_description'].sum() /len(data_all))<feature_engineering>
with torch.no_grad() : if torch.cuda.is_available() : predicted = model(torch.Tensor(np.array(X_test)).cuda() ).view(len(y_test)).cpu() else: predicted = model(torch.Tensor(np.array(X_test)) ).view(len(y_test)) print(criterion(predicted, torch.Tensor(np.array(y_test))))
Tabular Playground Series - Jan 2021
14,332,086
arr1 = [] arr2 = [] arr3 = [] for i in range(len(data_all)) : product_title_i = data_all['product_title'][i] description_i = data_all['product_description'][i] attribute_i = data_all['attribute'][i] search_term_i = data_all['search_term'][i] vectorizer1 = CountVectorizer() vectorizer2 = CountVectorizer() vectorizer3 = CountVectorizer() tf1 = vectorizer1.fit_transform([product_title_i, search_term_i]) tf2 = vectorizer2.fit_transform([description_i, search_term_i]) tf3 = vectorizer3.fit_transform([attribute_i, search_term_i]) cosineSim_title_search = cosineSim(tf1.toarray() [0], tf1.toarray() [1]) arr1.append(cosineSim_title_search) cosineSim_description = cosineSim(tf2.toarray() [0], tf2.toarray() [1]) arr2.append(cosineSim_description) cosineSim_attribute = cosineSim(tf3.toarray() [0], tf3.toarray() [1]) arr3.append(cosineSim_attribute )<feature_engineering>
submission['target'] = model(torch.Tensor(np.array(test)) ).view(-1 ).detach() submission.to_csv('res.csv' )
Tabular Playground Series - Jan 2021
14,190,729
data_all['cosineSim_search_title'] = arr1 data_all['cosineSim_search_description'] = arr2 data_all['cosineSim_search_attribute'] = arr3<feature_engineering>
from catboost import CatBoostRegressor from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error import optuna
Tabular Playground Series - Jan 2021
14,190,729
data_all['product_info'] = data_all['search_term']+'\t'+data_all['product_title']+'\t'+data_all['product_description']+'\t'+data_all['attribute'] data_all['word_in_title'] = data_all['product_info'].map(lambda x:str_common_word(str(x ).split('\t')[0],str(x ).split('\t')[1])) data_all['word_in_description'] = data_all['product_info'].map(lambda x:str_common_word(str(x ).split('\t')[0],str(x ).split('\t')[2])) data_all['word_in_attributes'] = data_all['product_info'].map(lambda x:str_common_word(str(x ).split('\t')[0],str(x ).split('\t')[3])) data_all['ratio_title'] = data_all['word_in_title']/data_all['number_of_words_in_search'] data_all['ratio_description'] = data_all['word_in_description']/data_all['number_of_words_in_search'] data_all['ratio_attributes'] = data_all['word_in_attributes']/data_all['number_of_words_in_search'] data_all['title_length'] = data_all.product_title.str.len()<feature_engineering>
SAMPLE_RATE = 0.4 RANDOM_SEED = 1 EARLY_STOPPING_ROUND = 100
Tabular Playground Series - Jan 2021
14,190,729
data_all['attr'] = str(data_all['search_term'])+"\t"+str(data_all['brand']) data_all['brand_in_search'] = data_all['attr'].map(lambda x:str_common_word(str(x ).split('\t')[0],str(x ).split('\t')[1])) data_all['ratio_brand'] = data_all['brand_in_search']/data_all['len_of_brand']<feature_engineering>
train = pd.read_csv('/kaggle/input/tabular-playground-series-jan-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-jan-2021/test.csv') sample_sub = pd.read_csv('/kaggle/input/tabular-playground-series-jan-2021/sample_submission.csv' )
Tabular Playground Series - Jan 2021
14,190,729
data_all['ratio_title'] = data_all['ratio_title'].fillna(data_all['ratio_title'].sum() /len(data_all)) data_all['ratio_description'] = data_all['ratio_description'].fillna(data_all['ratio_description'].sum() /len(data_all)) data_all['ratio_attributes'] = data_all['ratio_attributes'].fillna(data_all['ratio_attributes'].sum() /len(data_all))<normalization>
features = [c for c in train.columns if c not in('id', 'target')] features
Tabular Playground Series - Jan 2021
14,190,729
def fuzzy_partial_ratio(string_1 , string_2): return fuzz.partial_ratio(string_1, string_2 )<compute_test_metric>
X = train_sample[features] y = train_sample.target X_test = test[features]
Tabular Playground Series - Jan 2021
14,190,729
def fuzzy_token_sort_ratio(string_1,string_2): return fuzz.token_sort_ratio(string_1,string_2 )<feature_engineering>
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.2, random_state=RANDOM_SEED )
Tabular Playground Series - Jan 2021
14,190,729
data_all['fuzzy_ratio_in_title'] = data_all['product_info'].map(lambda x:fuzzy_partial_ratio(x.split('\t')[0],x.split('\t')[1])) data_all['fuzzy_ratio_in_description'] = data_all['product_info'].map(lambda x:fuzzy_partial_ratio(x.split('\t')[0],x.split('\t')[2])) data_all['fuzzy_ratio_in_attribute'] = data_all['product_info'].map(lambda x:fuzzy_partial_ratio(x.split('\t')[0],x.split('\t')[3]))<feature_engineering>
X_train, X_eval, y_train, y_eval = train_test_split(X_train, y_train, test_size=0.1, random_state=RANDOM_SEED )
Tabular Playground Series - Jan 2021
14,190,729
data_all['fuzzy_token_sort_ratio_in_title'] = data_all['product_info'].map(lambda x:fuzzy_token_sort_ratio(x.split('\t')[0],x.split('\t')[1])) data_all['fuzzy_token_sort_ratio_in_description'] = data_all['product_info'].map(lambda x:fuzzy_token_sort_ratio(x.split('\t')[0],x.split('\t')[2])) data_all['fuzzy_token_sort_ratio_in_attribute'] = data_all['product_info'].map(lambda x:fuzzy_token_sort_ratio(x.split('\t')[0],x.split('\t')[3]))<feature_engineering>
def objective(trial): param = {} param['learning_rate'] = trial.suggest_discrete_uniform("learning_rate", 0.001, 0.02, 0.001) param['depth'] = trial.suggest_int('depth', 9, 15) param['l2_leaf_reg'] = trial.suggest_discrete_uniform('l2_leaf_reg', 1.0, 5.5, 0.5) param['min_child_samples'] = trial.suggest_categorical('min_child_samples', [1, 4, 8, 16, 32]) param['grow_policy'] = 'Depthwise' param['iterations'] = 10000 param['use_best_model'] = True param['eval_metric'] = 'RMSE' param['od_type'] = 'iter' param['od_wait'] = 20 param['random_state'] = RANDOM_SEED param['logging_level'] = 'Silent' regressor = CatBoostRegressor(**param) regressor.fit(X_train.copy() , y_train.copy() , eval_set=[(X_eval.copy() , y_eval.copy())], early_stopping_rounds=EARLY_STOPPING_ROUND) loss = mean_squared_error(y_valid, regressor.predict(X_valid.copy())) return loss
Tabular Playground Series - Jan 2021
14,190,729
tf = TfidfVectorizer() arr1 =[] arr2 = [] arr3 = [] arr4 = [] for i in range(len(data_all)) : arr1.append(tf.fit_transform([data_all['product_description'][i]] ).mean()) arr3.append(tf.fit_transform([data_all['product_title'][i]] ).mean()) arr4.append(tf.fit_transform([data_all['attribute'][i]] ).mean()) data_all['mean_product_des'] = arr1 data_all['mean_product_title'] = arr3 data_all['mean_attribute'] = arr4<feature_engineering>
%%time study = optuna.create_study(study_name=f'catboost-seed{RANDOM_SEED}') study.optimize(objective, n_trials=10000, n_jobs=-1, timeout=24000 )
Tabular Playground Series - Jan 2021
14,190,729
data_all['last_word_title_match'] = data_all['product_info'].map(lambda x:str_common_word(str(x ).split('\t')[0].split(" ")[-1],str(x ).split('\t')[1]))<feature_engineering>
study.best_value
Tabular Playground Series - Jan 2021
14,190,729
data_all['last_word_description_match'] = data_all['product_info'].map(lambda x:str_common_word(str(x ).split('\t')[0].split(" ")[-1],str(x ).split('\t')[2]))<feature_engineering>
study.best_params
Tabular Playground Series - Jan 2021
14,190,729
data_all['first_word_title_match'] = data_all['product_info'].map(lambda x:str_common_word(str(x ).split('\t')[0].split(" ")[0],str(x ).split('\t')[1]))<feature_engineering>
%%time optimized_regressor = CatBoostRegressor(learning_rate=study.best_params['learning_rate'], depth=study.best_params['depth'], l2_leaf_reg=study.best_params['l2_leaf_reg'], min_child_samples=study.best_params['min_child_samples'], grow_policy='Depthwise', iterations=10000, use_best_model=True, eval_metric='RMSE', od_type='iter', od_wait=20, random_state=RANDOM_SEED, logging_level='Silent') optimized_regressor.fit(X_train.copy() , y_train.copy() , eval_set=[(X_eval.copy() , y_eval.copy())], early_stopping_rounds=EARLY_STOPPING_ROUND) pred_train = optimized_regressor.predict(X_train.copy()) pred_valid = optimized_regressor.predict(X_valid.copy() )
Tabular Playground Series - Jan 2021
14,190,729
data_all['first_word_description_match'] = data_all['product_info'].map(lambda x:str_common_word(str(x ).split('\t')[0].split(" ")[0],x.split('\t')[2]))<define_variables>
mean_squared_error(y_train, pred_train )
Tabular Playground Series - Jan 2021
14,190,729
data_all_test = data_all<drop_column>
mean_squared_error(y_valid, pred_valid )
Tabular Playground Series - Jan 2021
14,190,729
data_all = data_all.drop(['product_title', 'search_term', 'product_description', 'search_term_tokens', 'product_title_tokens', 'product_description_tokens', 'product_info', 'attr', 'attribute', 'brand'], axis = 1 )<prepare_x_and_y>
%%time sample_sub['target'] = optimized_regressor.predict(X_test )
Tabular Playground Series - Jan 2021
14,190,729
train = data_all[:74067] test = data_all[74067:]<prepare_x_and_y>
sample_sub.to_csv('submission.csv', index=False )
Tabular Playground Series - Jan 2021
14,101,366
y_train = train['relevance'].values X_train = train.drop(['id', 'relevance'], axis = 1 ).values<drop_column>
PATH = '.. /input/tabular-playground-series-jan-2021/' train = pd.read_csv(PATH + 'train.csv') test = pd.read_csv(PATH + 'test.csv') sample = pd.read_csv(PATH + 'sample_submission.csv') print(train.shape, test.shape )
Tabular Playground Series - Jan 2021
14,101,366
id_test = test['id'] X_test = test.drop(['id', 'relevance'], axis = 1 ).values<train_on_grid>
FEATURES = train.drop(['id', 'target'], 1 ).columns FEATURES
Tabular Playground Series - Jan 2021
14,101,366
xgb = XGBRegressor() param_grid = {'max_depth':[5, 6], 'n_estimators': [130, 150, 170], 'learning_rate' : [0.1]} model_xgb = sklearn.model_selection.GridSearchCV(estimator = xgb, param_grid = param_grid, n_jobs = -1) model_xgb.fit(X_train, y_train) y_pred = model_xgb.predict(X_test )<feature_engineering>
cv = KFold(n_splits=5, shuffle=True) cv
Tabular Playground Series - Jan 2021
14,101,366
for i in range(len(y_pred)) : if y_pred[i] > 3: y_pred[i] = 3<save_to_csv>
model = lgb.LGBMRegressor() model
Tabular Playground Series - Jan 2021
14,101,366
pd.DataFrame({"id": id_test, "relevance": y_pred} ).to_csv('submission.csv',index=False )<compute_train_metric>
X = train.drop(['id', 'target'], 1) y = train['target'] print(X.shape, y.shape )
Tabular Playground Series - Jan 2021
14,101,366
def caculator_RMSE(X_train, y_train, model): return np.sqrt(((y_train - model.predict(X_train)) ** 2 ).mean() )<compute_test_metric>
params = { 'n_estimators' : [100, 200, 500, 1000], 'learning_rate' : [0.01, 0.05, 0.1], 'subsample' : [1, 0.9, 0.8], 'feature_fraction' : [1, 0.9, 0.8] } clf = GridSearchCV( estimator=model, cv=cv, scoring='neg_root_mean_squared_error', param_grid=params, verbose=10 )
Tabular Playground Series - Jan 2021
14,101,366
print('RMSE using XGBRegression : ' , caculator_RMSE(X_train, y_train, model_xgb))<split>
clf.fit(X, y) print(clf.best_score_) print(clf.best_estimator_ )
Tabular Playground Series - Jan 2021
14,101,366
test1 = data_all_test[:74067] test2 = data_all_test[74067:]<count_missing_values>
preds = clf.predict(test.drop(['id'], 1)) preds
Tabular Playground Series - Jan 2021
14,101,366
test2.isnull().sum()<drop_column>
sample['target']= preds sample
Tabular Playground Series - Jan 2021
14,101,366
<prepare_x_and_y><EOS>
sample.to_csv('submission.csv', index=False )
Tabular Playground Series - Jan 2021
14,199,994
<SOS> metric: RMSE Kaggle data source: tabular-playground-series-jan-2021<drop_column>
import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.model_selection import KFold from sklearn.metrics import mean_squared_error import lightgbm as lgb
Tabular Playground Series - Jan 2021
14,199,994
id_test = test2['id'] X_test = test2.drop(['id','relevance','product_title', 'search_term', 'product_description', 'search_term_tokens', 'product_title_tokens', 'product_description_tokens', 'product_info', 'attr', 'attribute', 'brand'], axis = 1 )<train_on_grid>
path = '.. /input/tabular-playground-series-jan-2021/' seed = 42 splits = 5
Tabular Playground Series - Jan 2021
14,199,994
model = LinearRegression().fit(X, y) y_pred = model.predict(X_test) print("RMSE using LinearRegressor:", caculator_RMSE(X, y, model))<train_model>
train = pd.read_csv(path + 'train.csv') test = pd.read_csv(path + 'test.csv') sample_submission = pd.read_csv(path + 'sample_submission.csv' )
Tabular Playground Series - Jan 2021
14,199,994
regressor = RandomForestRegressor(n_estimators=20, random_state=0) regressor.fit(X, y) y_pred = regressor.predict(X_test) print("RMSE using RandomForest:", caculator_RMSE(X, y, regressor))<train_on_grid>
def model_lgb() : return lgb.LGBMRegressor(boosting_type= 'gbdt', num_leaves=31, max_depth= 11, learning_rate=0.1, n_estimators=750, subsample_for_bin=200000, objective=None, class_weight=None, min_split_gain=0.0, min_child_weight=0.001, min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0, reg_lambda=0.0, random_state=None, n_jobs=- 1, silent=True )
Tabular Playground Series - Jan 2021
14,199,994
param_grid = { 'loss' : ['ls'], 'n_estimators' : [3], 'max_depth' : [9], 'max_features' : ['auto'] } gbr = GradientBoostingRegressor() model_gbr = sklearn.model_selection.GridSearchCV(estimator = gbr, n_jobs = -1, param_grid = param_grid) model_gbr.fit(X, y) y_pred = model_gbr.predict(X_test) print("RMSE using GBR:", caculator_RMSE(X, y, model_gbr))<train_on_grid>
mean_error = 0 all_predict = 0 cv = KFold(n_splits=splits, random_state=seed, shuffle=False) for train_index, test_index in cv.split(train): x_train = train.loc[train_index, ::] x_test = train.loc[test_index, ::] y_train, y_test = x_train.target, x_test.target x_train = x_train.drop(['id', 'target'], axis = 1) x_test = x_test.drop(['id', 'target'], axis = 1) model = model_lgb().fit(x_train, y_train) error = mean_squared_error(y_test, model.predict(x_test),squared=False) print(error, " ") mean_error += error all_predict += model.predict(test.drop(['id'], axis=1)) print('mean error', mean_error/splits) predict = all_predict/splits
Tabular Playground Series - Jan 2021
14,199,994
<set_options><EOS>
sample_submission['target'] = predict sample_submission.to_csv('submission.csv', index=False )
Tabular Playground Series - Jan 2021
14,045,167
<SOS> metric: RMSE Kaggle data source: tabular-playground-series-jan-2021<load_from_csv>
%matplotlib inline
Tabular Playground Series - Jan 2021
14,045,167
train = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/test.csv') train.describe()<drop_column>
train_df = pd.read_csv('/kaggle/input/tabular-playground-series-jan-2021/train.csv') test_df = pd.read_csv('/kaggle/input/tabular-playground-series-jan-2021/test.csv' )
Tabular Playground Series - Jan 2021
14,045,167
train_ID = train['Id'] test_ID = test['Id'] train.drop("Id", axis = 1, inplace = True) test.drop("Id", axis = 1, inplace = True )<drop_column>
y = train_df['target'] X_lr = train_df.drop(['id', 'target'], axis=1) scaler = StandardScaler() scaler.fit(X_lr) X_lr = scaler.transform(X_lr) X_lr_test = scaler.transform(test_df.drop('id', axis=1 ).values) X_train_lr, X_val_lr, y_train_lr, y_val_lr = train_test_split(X_lr, y, test_size=0.3, random_state=17, shuffle=False )
Tabular Playground Series - Jan 2021
14,045,167
train = train.drop(train[(train['GrLivArea']>4000)&(train['SalePrice']<300000)].index )<feature_engineering>
y = train_df['target'] X = train_df.drop(['target'], axis=1) X_train_df, X_val_df, y_train_df, y_val_df = train_test_split(X, y, test_size =0.3, shuffle=False) del train_df
Tabular Playground Series - Jan 2021
14,045,167
train["SalePrice"] = np.log1p(train["SalePrice"]) check_skewness('SalePrice' )<prepare_x_and_y>
m1 = Lasso(alpha=0.001, random_state=123 )
Tabular Playground Series - Jan 2021
14,045,167
ntrain = train.shape[0] ntest = test.shape[0] y_train = train.SalePrice.values all_data = pd.concat(( train, test)).reset_index(drop=True) all_data.drop(['SalePrice'], axis=1, inplace=True) print("all_data size is : {}".format(all_data.shape))<create_dataframe>
m1_fit = m1.fit(X_train_lr, y_train_lr) print('Score reached: {} '.format(m1.score(X_train_lr, y_train_lr)))
Tabular Playground Series - Jan 2021
14,045,167
all_data_na =(all_data.isnull().sum() / len(all_data)) * 100 all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index ).sort_values(ascending=False)[:30] missing_data = pd.DataFrame({'Missing Ratio' :all_data_na} )<filter>
X_test_lr = test_df.drop(['id'], axis=1) y_test_lasso = m1.predict(X_test_lr )
Tabular Playground Series - Jan 2021
14,045,167
all_data.PoolQC.loc[all_data.PoolQC.notnull() ]<data_type_conversions>
m2 = Ridge(alpha=0.1 )
Tabular Playground Series - Jan 2021