kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
13,668,673 | <load_from_csv><EOS> | df_test = pd.read_csv('/kaggle/input/titanic/test.csv', dtype = data_type)
df_test.Name = df_test.Name.apply(lambda x: x.lower())
df_test.Name = df_test.Name.apply(lambda x: x.translate(string.punctuation))
df_test.Name = df_test.Name.apply(lambda x: x.translate(string.digits))
df_submission = pd.read_csv('/kaggle/input/titanic/gender_submission.csv')
df_test.drop(columns=['PassengerId'], axis=1, inplace=True)
for col in categorical_cols:
df_test[col] = df_test[col].astype('category')
df_test[col] = df_test[col].cat.add_categories('Unknown')
df_test[col] = df_test[col].fillna('Unknown')
test_pool = Pool(data=df_test, cat_features=categorical_cols, text_features = text_feature)
predictions = model_with_text.predict(test_pool)
df_submission['Survived'] = predictions
df_submission.to_csv('cb_with_text.csv', index=False ) | Titanic - Machine Learning from Disaster |
13,086,423 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<count_missing_values> | warnings.filterwarnings('ignore')
%matplotlib inline
| Titanic - Machine Learning from Disaster |
13,086,423 | train.isnull().sum()<count_missing_values> | train_df = pd.read_csv('/kaggle/input/titanic/train.csv')
test_df = pd.read_csv('/kaggle/input/titanic/test.csv')
combine = [train_df, test_df]
| Titanic - Machine Learning from Disaster |
13,086,423 | test.isnull().sum()<feature_engineering> | train_df[['Pclass', 'Survived']].groupby(['Pclass'] ).mean() | Titanic - Machine Learning from Disaster |
13,086,423 | train['reviews_per_month'] = train['reviews_per_month'].fillna(0)
test['reviews_per_month'] = test['reviews_per_month'].fillna(0 )<feature_engineering> | target_count = train_df['Survived'].value_counts()
target_count | Titanic - Machine Learning from Disaster |
13,086,423 | train['last_review'] = train['last_review'].fillna('2019-12-31')
test['last_review'] = test['last_review'].fillna('2019-12-31' )<feature_engineering> | print(train_df.isnull().sum())
print()
print(test_df.isnull().sum())
| Titanic - Machine Learning from Disaster |
13,086,423 | train['last_review'] = pd.to_datetime(train['last_review'])
test['last_review'] = pd.to_datetime(test['last_review'])
train['year'] = train['last_review'].dt.year-2010
test['year'] = test['last_review'].dt.year-2010
train['month'] = train['last_review'].dt.month-1
test['month'] = test['last_review'].dt.month-1<feature_engineering> | for dataset in combine:
dataset['Title'] = dataset['Name'].str.extract('([A-Za-z]+)\.',\
expand=False ) | Titanic - Machine Learning from Disaster |
13,086,423 | train['latitude'] = train['latitude']-40
train['longitude'] = train['longitude']+75
test['latitude'] = test['latitude']-40
test['longitude'] = test['longitude']+75
train['waru'] = train['latitude'] / train['longitude']
train['kakeru'] = train['latitude'] * train['longitude']
test['waru'] = test['latitude'] / test['longitude']
test['kakeru'] = test['latitude'] * test['longitude']<feature_engineering> | for dataset in combine:
dataset['Title'] = dataset['Title'].\
replace(['Lady', 'Countess','Capt', 'Col', 'Don', 'Dr',\
'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs' ) | Titanic - Machine Learning from Disaster |
13,086,423 | train['minimum_nights_log'] = np.log1p(train['minimum_nights'])
test['minimum_nights_log'] = np.log1p(test['minimum_nights'])
train['calculated_host_listings_count_log'] = np.log1p(train['calculated_host_listings_count'])
test['calculated_host_listings_count_log'] = np.log1p(test['calculated_host_listings_count'])
train['availability_365_log'] = np.log1p(train['availability_365'])
test['availability_365_log'] = np.log1p(test['availability_365'])
train['number_of_reviews_log'] = np.log1p(train['number_of_reviews'])
test['number_of_reviews_log'] = np.log1p(test['number_of_reviews'])
train['reviews_per_month_log'] = np.log1p(train['reviews_per_month'])
test['reviews_per_month_log'] = np.log1p(test['reviews_per_month'] )<define_variables> | train_df[['Title', 'Survived']].groupby(['Title'] ).mean() | Titanic - Machine Learning from Disaster |
13,086,423 | target = 'price'
list_cols = ['neighbourhood_group', 'neighbourhood', 'room_type',]
target_columns = ['neighbourhood_group', 'neighbourhood', 'latitude', 'longitude', 'room_type',
'minimum_nights', 'number_of_reviews', 'reviews_per_month', 'calculated_host_listings_count', 'availability_365', 'waru', 'kakeru', 'minimum_nights_log',
'calculated_host_listings_count_log', 'availability_365_log',
'number_of_reviews_log', 'reviews_per_month_log', 'year', 'month']
ce_ohe = ce.OneHotEncoder(cols=list_cols)
train_onehot = ce_ohe.fit_transform(train[target_columns] )<count_missing_values> | for dataset in combine:
binary_encoded = pd.get_dummies(dataset.Title)
newcols = binary_encoded.columns
dataset[newcols] = binary_encoded
train_df.head() | Titanic - Machine Learning from Disaster |
13,086,423 | train_onehot.isnull().sum()<categorify> | train_df = train_df.drop(['Name', 'Title'], axis=1)
test_df = test_df.drop(['Name', 'Title'], axis=1)
combine = [train_df, test_df] | Titanic - Machine Learning from Disaster |
13,086,423 | test_onehot = ce_ohe.transform(test[target_columns] )<categorify> | for dataset in combine:
dataset['Sex'] = dataset['Sex'].\
map({'female': 1, 'male': 0} ).astype(int)
train_df.head() | Titanic - Machine Learning from Disaster |
13,086,423 | pca2 = PCA(n_components=5)
pca2_results = pca2.fit_transform(train_onehot)
train_onehot['pca0']=pca2_results[:,0]
train_onehot['pca1']=pca2_results[:,1]
train_onehot['pca2']=pca2_results[:,2]
train_onehot['pca3']=pca2_results[:,3]
train_onehot['pca4']=pca2_results[:,4]
pca2_results = pca2.transform(test_onehot)
test_onehot['pca0']=pca2_results[:,0]
test_onehot['pca1']=pca2_results[:,1]
test_onehot['pca2']=pca2_results[:,2]
test_onehot['pca3']=pca2_results[:,3]
test_onehot['pca4']=pca2_results[:,4]<prepare_x_and_y> | guess_ages = np.zeros(( 2,3),dtype=int)
guess_ages | Titanic - Machine Learning from Disaster |
13,086,423 | y_train = np.log1p(train["price"] )<split> | for idx,dataset in enumerate(combine):
if idx==0:
print('Working on Training Data set
')
else:
print('-'*35)
print('Working on Test Data set
')
print('Guess values of age based on sex and pclass of the passenger...')
for i in range(0, 2):
for j in range(0,3):
guess_df = dataset[(dataset['Sex'] == i)\
&(dataset['Pclass'] == j+1)]['Age'].dropna()
age_guess = guess_df.median()
guess_ages[i,j] = int(age_guess)
print('Guess_Age table:
',guess_ages)
print('
Assigning age values to NAN age values in the dataset...')
for i in range(0, 2):
for j in range(0, 3):
dataset.loc[(dataset.Age.isnull())&(dataset.Sex == i)\
&(dataset.Pclass == j+1),'Age'] = guess_ages[i,j]
dataset['Age'] = dataset['Age'].astype(int)
print()
print('Done!
')
train_df.head() | Titanic - Machine Learning from Disaster |
13,086,423 | kf = KFold(n_splits=5, shuffle=True, random_state=1)
s = list(kf.split(train_onehot, y_train))<choose_model_class> | for dataset in combine:
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
dataset.loc[(dataset['Age'] > 16)&(dataset['Age'] <= 32), 'Age'] = 1
dataset.loc[(dataset['Age'] > 32)&(dataset['Age'] <= 48), 'Age'] = 2
dataset.loc[(dataset['Age'] > 48)&(dataset['Age'] <= 64), 'Age'] = 3
dataset.loc[ dataset['Age'] > 64, 'Age']=4
train_df.head()
| Titanic - Machine Learning from Disaster |
13,086,423 | gbk_models = []
for train_i, val_i in s:
gbk = GradientBoostingRegressor()
gbk.fit(train_onehot.iloc[train_i], y_train.iloc[train_i])
y_pred = gbk.predict(train_onehot.iloc[val_i])
acc_gbk = round(mean_squared_error(np.expm1(y_pred), np.expm1(y_train[val_i])))
print(acc_gbk)
gbk_models.append(gbk )<train_model> | train_df = train_df.drop(['AgeBand'], axis=1)
combine = [train_df, test_df]
train_df.head() | Titanic - Machine Learning from Disaster |
13,086,423 | mlp_models = []
for train_i, val_i in s:
mlp = MLPRegressor(max_iter=100, hidden_layer_sizes=(100,100),
activation='relu', learning_rate_init=0.01)
mlp.fit(train_onehot.iloc[train_i], y_train.iloc[train_i])
y_pred = mlp.predict(train_onehot.iloc[val_i])
acc_gbk = round(mean_squared_error(np.expm1(y_pred), np.expm1(y_train[val_i])))
print(acc_gbk)
mlp_models.append(mlp )<find_best_model_class> | for dataset in combine:
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
train_df[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=True)\
.mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
13,086,423 | rfc_models = []
for train_i, val_i in s:
rfc = RandomForestRegressor()
rfc.fit(train_onehot.iloc[train_i], y_train.iloc[train_i])
y_pred = rfc.predict(train_onehot.iloc[val_i])
acc_gbk = round(mean_squared_error(np.expm1(y_pred), np.expm1(y_train[val_i])))
print(acc_gbk)
rfc_models.append(rfc )<predict_on_test> | for dataset in combine:
dataset['IsAlone'] = 0
dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1
train_df[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=True ).mean() | Titanic - Machine Learning from Disaster |
13,086,423 | models = gbk_models + rfc_models
preds = np.array([np.expm1(model.predict(test_onehot)) for model in models])
preds = preds.mean(axis=0)
preds = np.where(preds < 0 , 0, preds )<save_to_csv> | for df in combine:
df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1, inplace=True)
train_df.head() | Titanic - Machine Learning from Disaster |
13,086,423 | ans["price"] = preds
ans.to_csv("heroz_nakai.csv", index=False )<set_options> | for dataset in combine:
dataset['Age*Class'] = dataset.Age * dataset.Pclass
train_df.loc[:, ['Age*Class', 'Age', 'Pclass']].head() | Titanic - Machine Learning from Disaster |
13,086,423 | %matplotlib inline
<import_modules> | train_df[['Age*Class', 'Survived']].groupby(['Age*Class'], as_index=True ).mean() | Titanic - Machine Learning from Disaster |
13,086,423 | import pandas as pd
import numpy as np<import_modules> | for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].fillna(freq_port)
train_df[['Embarked', 'Survived']].groupby(['Embarked'], as_index=True)\
.mean().sort_values(by='Survived', ascending=False)
| Titanic - Machine Learning from Disaster |
13,086,423 | import pandas as pd
import numpy as np<save_to_csv> | for dataset in combine:
binary_encoded = pd.get_dummies(dataset.Embarked)
newcols = binary_encoded.columns
dataset[newcols] = binary_encoded
train_df.head() | Titanic - Machine Learning from Disaster |
13,086,423 | def create_download_link(df, title = "Download CSV file",count=[0]):
count[0] = count[0]+1
filename = "data"+str(count[0])+".csv"
csv = df.to_csv(index=False)
b64 = base64.b64encode(csv.encode())
payload = b64.decode()
html = '<a download="{filename}" href="data:text/csv;base64,{payload}" target="_blank">{title}</a>'
html = html.format(payload=payload,title=title,filename=filename)
return HTML(html )<load_from_csv> | for dataset in combine:
dataset.drop('Embarked', axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
13,086,423 | df = pd.read_csv("/kaggle/input/dmassign1/data.csv")
df_test = df.iloc[1300:]
df_train = df.iloc[:1300]<drop_column> | test_df['Fare'].fillna(test_df['Fare'].dropna().median() , inplace=True)
test_df.head() | Titanic - Machine Learning from Disaster |
13,086,423 | class_labels={}
class_id = {}
for i,j in df_train.iterrows() :
class_labels[j["ID"]] = j["Class"]
class_id[i]=j["ID"]
labels = df_train["Class"]
df_train1 = df_train.drop(["ID","Class"],axis=1)
df_test1 = df_test.drop(["ID","Class"],axis=1 )<concatenate> | for dataset in combine:
dataset['Fare']=pd.qcut(train_df['Fare'],4,labels=np.arange(4))
dataset['Fare'] = dataset['Fare'].astype(int)
train_df[['Fare','FareBand']].head() | Titanic - Machine Learning from Disaster |
13,086,423 | df_full = df_train1.append(df_test1)
df_full.replace("?",np.nan,inplace=True )<count_unique_values> | train_df = train_df.drop(['FareBand'], axis=1)
combine = [train_df, test_df] | Titanic - Machine Learning from Disaster |
13,086,423 | a = df_full.nunique()
print(a)
a = dict(a)
for i,j in a.items() :
a[i] = [j,df_full[i].dtype]
<count_values> | train_df.head(7 ) | Titanic - Machine Learning from Disaster |
13,086,423 | for i,j in a.items() :
t,q = j
if q == np.dtype('O'):
if t <100:
print(Counter(df_full[i]))<categorify> | X = train_df.drop("Survived", axis=1)
Y = train_df["Survived"]
X_submission = test_df.drop("PassengerId", axis=1 ).copy()
print(X.shape, Y.shape ) | Titanic - Machine Learning from Disaster |
13,086,423 | def myconverter(string):
if string=="me":return "me"
elif string=="ME":return "me"
elif string=="M.E.":return "me"
else:
try:
return string.lower()
except:
return string<categorify> | np.random.seed(1337)
X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size=0.2)
print(X_train.shape, Y_train.shape)
print(X_val.shape, Y_val.shape ) | Titanic - Machine Learning from Disaster |
13,086,423 | le = preprocessing.LabelEncoder()
scaler = StandardScaler()
ab=[]
for i,j in a.items() :
t,q = j
if q == np.dtype('O'):
if t <100:
df_full[i] = df_full[i].apply(lambda x:myconverter(x))
df_full[i].fillna(df_full[i].mode() [0], inplace=True)
ab.append(i)
else:
df_full[i]= df_full[i].astype("float64")
df_full[i].fillna(df_full[i].mean() ,inplace=True)
df_full[i] = scaler.fit_transform(df_full[i].values.reshape(-1,1))
else:
df_full[i] = scaler.fit_transform(df_full[i].values.reshape(-1,1))
df_full = pd.get_dummies(df_full, prefix=ab)
<feature_engineering> | logreg = LogisticRegression()
logreg.fit(X_train, Y_train)
Y_pred = logreg.predict(X_val)
acc_logreg = sum(Y_pred == Y_val)/len(Y_val)*100
print('Logistic Regression labeling accuracy:', str(round(acc_logreg,2)) ,'%' ) | Titanic - Machine Learning from Disaster |
13,086,423 | modified_PCA = PCA(n_components = 0.99,svd_solver="full" ).fit_transform(df_full.values )<create_dataframe> | acc_log_2 = logreg.score(X_val, Y_val)
print('Logistic Regression using built-in method:', str(round(acc_log_2*100,2)) ,'%' ) | Titanic - Machine Learning from Disaster |
13,086,423 | PCA_df = pd.DataFrame(data=modified_PCA )<find_best_params> | knn = KNeighborsClassifier(n_neighbors = 3)
knn.fit(X_train, Y_train)
acc_knn = knn.score(X_val, Y_val)
print('K-Nearest Neighbors labeling accuracy:', str(round(acc_knn*100,2)) ,'%' ) | Titanic - Machine Learning from Disaster |
13,086,423 | n_clusters=144
model = KMeans(n_clusters=n_clusters,random_state=42)
pred = model.fit_predict(PCA_df.values)
cntr = Counter(pred)
mapping={}
res={0:0,1:0,2:0,3:0,4:0,5:0}
score = [defaultdict(int)for i in range(n_clusters)]
for i,j in enumerate(pred[:1300]):
score[j][labels[i]]+=1
for j,i in enumerate(score):
t =[(j,k)for k,j in i.items() ]
t.sort(reverse=True)
t = [(j,k)for k,j in t]
try:
mapping[j]=int(t[0][0])
res[int(t[0][0])] +=cntr[j]
except:
random_int = 0
mapping[j]=random_int
res[random_int]+=cntr[j]
print(res )<prepare_output> | svc = SVC()
svc.fit(X_train, Y_train)
acc_svc = svc.score(X_val, Y_val)
print('Support Vector Machines labeling accuracy:', str(round(acc_svc*100,2)) ,'%' ) | Titanic - Machine Learning from Disaster |
13,086,423 | PCA_df["pred"] = pred<categorify> | perceptron = Perceptron()
perceptron.fit(X_train, Y_train)
acc_perceptron = perceptron.score(X_val, Y_val)
print('Perceptron labeling accuracy:', str(round(acc_perceptron*100,2)) ,'%' ) | Titanic - Machine Learning from Disaster |
13,086,423 | def mapper(x,mapping):
y = mapping[x]
if y==0:return np.random.randint(1,6)
else:return y<feature_engineering> | gradboost = xgb.XGBClassifier(n_estimators=1000)
gradboost.fit(X_train, Y_train)
acc_xgboost = gradboost.score(X_val, Y_val)
print('XGBoost labeling accuracy:', str(round(acc_xgboost*100,2)) ,'%' ) | Titanic - Machine Learning from Disaster |
13,086,423 | PCA_df["pred"]=PCA_df["pred"].apply(lambda x:mapper(x,mapping))<count_values> | random_forest = RandomForestClassifier(n_estimators=500)
random_forest.fit(X_train, Y_train)
acc_rf = random_forest.score(X_val, Y_val)
print('K-Nearest Neighbors labeling accuracy:', str(round(acc_rf*100,2)) ,'%' ) | Titanic - Machine Learning from Disaster |
13,086,423 | Counter(PCA_df["pred"] )<split> | import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense | Titanic - Machine Learning from Disaster |
13,086,423 | df_submission = PCA_df.iloc[1300:]<prepare_output> | model = Sequential()
model.add(Dense(units=300, activation='relu', input_shape=(14,)))
model.add(Dense(units=100, activation='relu'))
model.add(Dense(units=50, activation='relu'))
model.add(Dense(units=1, activation='sigmoid')) | Titanic - Machine Learning from Disaster |
13,086,423 | predictions = df_submission["pred"]<create_dataframe> | model.compile(loss = 'binary_crossentropy', optimizer = 'adam',\
metrics = ['accuracy'])
model.fit(X_train.values, Y_train.values, epochs = 50, batch_size= 50 ) | Titanic - Machine Learning from Disaster |
13,086,423 | submission = pd.DataFrame(data={"ID": df["ID"][1300:], "Class": predictions } )<count_values> | print('Neural Network accuracy:',str(round(model.evaluate(X_val.values, \
Y_val.values, batch_size=50,verbose=False)[1]*100,2)) ,'%' ) | Titanic - Machine Learning from Disaster |
13,086,423 | Counter(submission["Class"] )<load_pretrained> | base_model = RandomForestClassifier(n_jobs=-1,
random_state=42)
parameters = {
"max_depth": [None, 1,2,3,4,5,6,7,8,9,10,11,12,13,14],
"n_estimators": [350, 400, 450, 500, 550, 600,650,700,750],
"max_features": ["auto", 1,2,3,4,5,6,7,8,9,10,11,12,13,14],
"criterion": ["gini", "entropy"],
"bootstrap": [False, True],
"class_weight": [None, "balanced"]
}
optimal_model = RandomizedSearchCV(base_model,
param_distributions=parameters,
n_iter=30,
cv=3,
verbose=2,
scoring="accuracy")
optimal_model.fit(X_train, Y_train)
optimal_model.best_estimator_
pred = optimal_model.predict(X_val ) | Titanic - Machine Learning from Disaster |
13,086,423 | create_download_link(submission )<save_to_csv> | Y_submission = optimal_model.predict(X_submission)
submission = pd.DataFrame({
"PassengerId": test_df["PassengerId"],
"Survived": Y_submission
})
submission.to_csv('titanic.csv', index=False ) | Titanic - Machine Learning from Disaster |
13,633,060 | submission.to_csv("Final.csv",index=False )<set_options> | train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
test_data = pd.read_csv("/kaggle/input/titanic/test.csv" ) | Titanic - Machine Learning from Disaster |
13,633,060 | warnings.filterwarnings('ignore')
sns.set(style="white")
%matplotlib inline
%matplotlib inline<save_to_csv> | Col_with_missing = [col for col in train_data.columns if train_data[col].isnull().any() ]
print(Col_with_missing ) | Titanic - Machine Learning from Disaster |
13,633,060 | def rmse(y_test, y_pred):
return np.sqrt(((y_test - y_pred)** 2 ).mean())
def rmse_cv(estimator, X_test, y_test):
return - np.sqrt(((estimator.predict(X_test)- y_test)** 2 ).mean())
def rmse_log(estimator, X_test, y_test):
return - np.sqrt(((np.exp(estimator.predict(X_test)) - np.exp(y_test)) ** 2 ).mean())
def submit_file(y_pred, filename):
pd.Series(y_pred, name='DelayTime' ).to_csv('{}.csv'.format(filename),
index_label='id', header=True)
def hacking_score(pred, y_train):
zeros_submit_score = 68.79140
return pred + zeros_submit_score - np.sqrt(np.square(y_train.values ).mean() )<load_from_csv> | Col_with_missing = [col for col in test_data.columns if test_data[col].isnull().any() ]
print(Col_with_missing ) | Titanic - Machine Learning from Disaster |
13,633,060 | train_df = pd.read_csv('.. /input/train_features.csv')
test_df = pd.read_csv('.. /input/test_features.csv')
target = pd.read_csv('.. /input/train_target.csv', index_col='id')
train_df.head()<feature_engineering> | s =(train_data.dtypes == 'object')
object_cols = list(s[s].index)
print("Categorical variables:")
print(object_cols ) | Titanic - Machine Learning from Disaster |
13,633,060 | train_df['DepHour'] = train_df['CRSDepTime'] // 100
train_df['DepHour'].replace(to_replace=24, value=0, inplace=True)
train_df['DepTime'] = train_df['DepHour'] + train_df['CRSDepTime'] % 100 / 60
test_df['DepHour'] = test_df['CRSDepTime'] // 100
test_df['DepHour'].replace(to_replace=24, value=0, inplace=True)
test_df['DepTime'] = test_df['DepHour'] + test_df['CRSDepTime'] % 100 / 60
train_df['ArrHour'] = train_df['CRSArrTime'] // 100
train_df['ArrHour'].replace(to_replace=24, value=0, inplace=True)
train_df['ArrTime'] = train_df['ArrHour'] + train_df['CRSArrTime'] % 100 / 60
test_df['ArrHour'] = test_df['CRSArrTime'] // 100
test_df['ArrHour'].replace(to_replace=24, value=0, inplace=True)
test_df['ArrTime'] = test_df['ArrHour'] + test_df['CRSArrTime'] % 100 / 60
test_df.drop(['CRSDepTime', 'CRSArrTime', 'Year', ], axis=1, inplace=True)
train_df.drop(['CRSDepTime', 'CRSArrTime', 'Year', ], axis=1, inplace=True)
train_df['target'] = target<drop_column> | feature_name=['Pclass','Sex','Age','SibSp','Parch','Fare','Embarked']
X=train_data[feature_name]
y=train_data["Survived"]
X_test=test_data[feature_name] | Titanic - Machine Learning from Disaster |
13,633,060 | test_df.drop(['CRSElapsedTime', ], axis=1, inplace=True)
train_df.drop(['CRSElapsedTime', ], axis=1, inplace=True )<data_type_conversions> | X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.2 ) | Titanic - Machine Learning from Disaster |
13,633,060 | train_df.fillna('EVIL', inplace=True)
test_df.fillna('EVIL', inplace=True )<data_type_conversions> | my_imputer=SimpleImputer(strategy="most_frequent")
imputed_X_train= pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_test=pd.DataFrame(my_imputer.transform(X_test))
imputed_X_valid=pd.DataFrame(my_imputer.transform(X_valid))
imputed_X_train.index = X_train.index
imputed_X_valid.index = X_valid.index
imputed_X_test.index = X_test.index
imputed_X_train.columns=X_train.columns
imputed_X_valid.columns=X_valid.columns
imputed_X_test.columns=X_test.columns | Titanic - Machine Learning from Disaster |
13,633,060 |
<categorify> | Col_with_missing_2 = [col for col in imputed_X_test.columns if imputed_X_test[col].isnull().any() ]
print(Col_with_missing_2 ) | Titanic - Machine Learning from Disaster |
13,633,060 | OHE = OneHotEncoder(handle_unknown='ignore')
ohe_x_train = OHE.fit_transform(train_df.iloc[:, [0,1,2,3,4,5,6,7,9,11]])
num_x_train = train_df.iloc[:, [8, 10, 12]]
ohe_x_test = OHE.transform(test_df.iloc[:, [0,1,2,3,4,5,6,7,9,11]])
num_x_test = test_df.iloc[:, [8, 10, 12]]
X = hstack([ohe_x_train, num_x_train] ).tocsr()
X_test = hstack([ohe_x_test, num_x_test] ).tocsr()
y = train_df.target.values
<find_best_model_class> | New_feature_train = imputed_X_train['Sex'] + "_" + imputed_X_train['Embarked']
New_feature_valid = imputed_X_valid['Sex'] + "_" + imputed_X_valid['Embarked']
New_feature_test = imputed_X_test['Sex'] + "_" + imputed_X_test['Embarked'] | Titanic - Machine Learning from Disaster |
13,633,060 | def CB_cv_scoring(X_train, y_train, X_test, nfolds=5):
predict = np.zeros(X_test.shape[0])
oof = np.zeros(X_train.shape[0])
folds = StratifiedKFold(n_splits=nfolds, shuffle=True, random_state=870)
for fold_,(ind_trn, ind_valid)in enumerate(folds.split(X_train, y_train)) :
print("Fold {}".format(fold_))
clf = CatBoostRegressor(
random_seed=42,
logging_level='Silent',
early_stopping_rounds=200,
)
categorical_features_indices = [0,1,2,3,4,5,6,7,9,11]
clf.fit(
X_train.iloc[ind_trn], y_train.iloc[ind_trn],
cat_features=categorical_features_indices,
eval_set=(X_train.iloc[ind_valid], y_train.iloc[ind_valid]),
);
oof[ind_valid] = clf.predict(X_train.iloc[ind_valid],)
pred = clf.predict(X_test)/ nfolds
predict += pred
print("CV score: {:<8.5f}".format(rmse(y_train, oof)))
return np.array(predict )<compute_test_metric> | imputed_X_train["Sex_Embarked"]=New_feature_train
imputed_X_valid["Sex_Embarked"]=New_feature_valid
imputed_X_test["Sex_Embarked"]=New_feature_test | Titanic - Machine Learning from Disaster |
13,633,060 | lgb_pred = LGB_cv_scoring(X, y, X_test)
submit_file(lgb_pred, 'lgb_pred' )<compute_test_metric> | Cat_cols=['Sex','Embarked','Sex_Embarked'] | Titanic - Machine Learning from Disaster |
13,633,060 | cat_pred = CB_cv_scoring(train_df.drop('target', axis=1), train_df.target, test_df)
submit_file(cat_pred, 'cat_pred' )<train_model> | OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)
OH_cols_train = pd.DataFrame(OH_encoder.fit_transform(imputed_X_train[Cat_cols]))
OH_cols_valid = pd.DataFrame(OH_encoder.transform(imputed_X_valid[Cat_cols]))
OH_cols_test = pd.DataFrame(OH_encoder.transform(imputed_X_test[Cat_cols]))
OH_cols_train.index = imputed_X_train.index
OH_cols_valid.index = imputed_X_valid.index
OH_cols_test.index = imputed_X_test.index
num_X_train = imputed_X_train.drop(Cat_cols, axis =1)
num_X_valid = imputed_X_valid.drop(Cat_cols, axis =1)
num_X_test = imputed_X_test.drop(Cat_cols, axis =1)
OH_X_train = pd.concat([num_X_train, OH_cols_train], axis=1)
OH_X_valid = pd.concat([num_X_valid, OH_cols_valid], axis=1)
OH_X_test = pd.concat([num_X_test, OH_cols_test], axis=1 ) | Titanic - Machine Learning from Disaster |
13,633,060 | mix_pred =(lgb_pred + cat_pred)/ 2
submit_file(mix_pred, 'mix_pred' )<compute_test_metric> | OH_X_train = OH_X_train.apply(pd.to_numeric)
OH_X_valid = OH_X_valid.apply(pd.to_numeric)
OH_X_test = OH_X_test.apply(pd.to_numeric)
OH_X_train=OH_X_train.rename(columns={0:"Sex1", 1:"Sex2"})
OH_X_train=OH_X_train.rename(columns={2:"C", 3:"Q",4:"S"})
OH_X_valid=OH_X_valid.rename(columns={0:"Sex1", 1:"Sex2"})
OH_X_valid=OH_X_valid.rename(columns={2:"C", 3:"Q",4:"S"})
OH_X_test=OH_X_test.rename(columns={0:"Sex1", 1:"Sex2"})
OH_X_test=OH_X_test.rename(columns={2:"C", 3:"Q",4:"S"} ) | Titanic - Machine Learning from Disaster |
13,633,060 | hack_mix_pred = hacking_score(mix_pred, y)
submit_file(hack_mix_pred, 'hack_mix_pred' )<import_modules> | my_model = AdaBoostClassifier(n_estimators=200, learning_rate=0.05)
my_model.fit(OH_X_train, y_train)
y_pred5 = my_model.predict(OH_X_valid)
print("Accuracy:",metrics.accuracy_score(y_valid, y_pred5)) | Titanic - Machine Learning from Disaster |
13,633,060 | <load_from_csv><EOS> | predictions2 = my_model.predict(OH_X_test)
output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions2})
output.to_csv('my_submission_02_06.csv', index=False)
print("Your submission was successfully saved!" ) | Titanic - Machine Learning from Disaster |
13,737,929 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<split> | pd.plotting.register_matplotlib_converters()
%matplotlib inline
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
| Titanic - Machine Learning from Disaster |
13,737,929 | train_data, dev_data = train_test_split(data, test_size=0.1, random_state=0 )<prepare_x_and_y> | train_path = ".. /input/titanic/train.csv"
train_data = pd.read_csv(train_path)
train_data = train_data.set_index("PassengerId")
train_data[0:20] | Titanic - Machine Learning from Disaster |
13,737,929 | X_train, y_train = train_data.drop(columns=['G1', 'G2', 'G3']), train_data['G3']
X_dev, y_dev = dev_data.drop(columns=['G1', 'G2', 'G3']), dev_data['G3']<define_variables> | test_path = ".. /input/titanic/test.csv"
test_data = pd.read_csv(test_path)
test_data = test_data.set_index("PassengerId")
X_test = test_data
X_test.head() | Titanic - Machine Learning from Disaster |
13,737,929 | num_features = ['age', 'absences', 'failures', 'studytime', 'Medu', 'Fedu', 'goout', 'absences', 'freetime']
cat_features = ['sex']<train_model> | y = train_data.Survived
X = train_data.drop(['Survived','Name','Ticket'], axis = 1)
X_test = X_test.drop(['Name', 'Ticket'], axis = 1)
X.head() | Titanic - Machine Learning from Disaster |
13,737,929 | class FeatureSelector(BaseEstimator, TransformerMixin):
def __init__(self, columns):
self.columns = columns
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return X[self.columns]
class OutlierQuantileRemover(BaseEstimator, TransformerMixin):
def __init__(self, column, q=0.99):
self.column = column
self.q = q
self.quantile = None
def fit(self, X, y=None):
self.quantile = X[self.column].quantile()
return self
def transform(self, X, y=None):
X.loc[X[self.column] > self.quantile, self.column] = self.quantile
return X
class OutlierThresholdRemover(BaseEstimator, TransformerMixin):
def __init__(self, column, t):
self.column = column
self.t = t
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
X.loc[X[self.column] > self.t, self.column] = self.t
return X
pipeline = Pipeline([
('selector', FeatureSelector(columns=num_features+cat_features)) ,
('out', OutlierThresholdRemover(column='failures', t=3)) ,
('out2', OutlierQuantileRemover(column='freetime')) ,
('pre', make_column_transformer(( OneHotEncoder(handle_unknown='ignore'), cat_features), remainder='passthrough')) ,
('clf', LinearRegression(normalize=True))
])
pipeline.fit(X_train, y_train )<predict_on_test> | print("Column, dtype, null, unique")
for column in X:
print(column,X[column].dtype,X[column].isnull().sum() ,X[column].nunique())
print("Percentage of null Cabin values: " + str(X.Cabin.isnull().sum() /X.Cabin.isnull().count())) | Titanic - Machine Learning from Disaster |
13,737,929 | y_pred_train = pipeline.predict(X_train)
y_pred_dev = pipeline.predict(X_dev)
print('train: ', mean_absolute_error(y_train, y_pred_train))
print('dev: ', mean_absolute_error(y_dev, y_pred_dev))<load_from_csv> | print("Column, dtype, null")
for column in X_test:
print(column,X_test[column].dtype,X_test[column].isnull().sum())
print("Percentage of null Cabin values in test data: " + str(X_test.Cabin.isnull().sum() /X_test.Cabin.isnull().count())) | Titanic - Machine Learning from Disaster |
13,737,929 | X_test = pd.read_csv("/kaggle/input/machine-learning-lab-cas-data-science-hs-20/test-data.csv", index_col=0 )<predict_on_test> | X = X.drop(['Cabin'], axis = 1 ) | Titanic - Machine Learning from Disaster |
13,737,929 | y_test_pred = pipeline.predict(X_test )<save_to_csv> | X_test = X_test.drop(['Cabin'], axis = 1 ) | Titanic - Machine Learning from Disaster |
13,737,929 | X_test_submission = pd.DataFrame(index=X_test.index)
X_test_submission['G3'] = y_test_pred
X_test_submission.to_csv('submission_outlier_pipeline.csv', header=True, index_label='id' )<set_options> | enco = LabelEncoder()
X_label = train_data.copy()
enco.fit_transform(X.Sex.unique())
X_label['Sex'] = enco.transform(train_data['Sex'])
X_label.Embarked = X_label.Embarked.fillna(value = 'A')
X_labeled = X_label.copy()
enco2 = LabelEncoder()
enco2.fit_transform(X_label.Embarked.unique())
X_labeled['Embarked'] = enco2.transform(X_label['Embarked'] ) | Titanic - Machine Learning from Disaster |
13,737,929 | warnings.simplefilter(action='ignore', category=FutureWarning )<load_from_csv> | imp = SimpleImputer(strategy = 'most_frequent')
X_imp = pd.DataFrame(imp.fit_transform(X))
X_test_imp = pd.DataFrame(imp.transform(X_test))
X_imp.columns = X.columns
X_test_imp.columns = X_test.columns
cols = ['Sex','Embarked']
enco = OneHotEncoder(handle_unknown = 'ignore', sparse = False)
X_label = pd.DataFrame(enco.fit_transform(X_imp[cols]))
X_test_label = pd.DataFrame(enco.transform(X_test_imp[cols]))
X_label.index = X.index
X_test_label.index = X_test.index
X = X.drop(cols, axis=1)
X_test = X_test.drop(cols, axis=1)
X = pd.concat([X, X_label], axis=1)
X_test = pd.concat([X_test, X_test_label], axis=1)
X = X.rename(columns={0: "female", 1: "male", 2: "C", 3: "Q", 4: "S"})
X_test = X_test.rename(columns={0: "female", 1: "male", 2: "C", 3: "Q", 4: "S"} ) | Titanic - Machine Learning from Disaster |
13,737,929 | train = pd.read_csv('.. /input/insta_train.csv')
test = pd.read_csv('.. /input/insta_test.csv' )<categorify> | X_new = X.drop(['Parch','SibSp'], axis = 1)
X_test = X_test.drop(['Parch','SibSp'], axis = 1)
X_train, X_val, y_train, y_val = train_test_split(X_new, y, train_size=0.8, test_size=0.2, random_state=0)
X_train.head() | Titanic - Machine Learning from Disaster |
13,737,929 | v = TfidfVectorizer(max_features=None, max_df=0.1, stop_words='english', min_df=0.0003)
x = v.fit(train['caption'].fillna('' ).values)
a = v.transform(train['caption'].fillna('' ).values)
b = v.transform(test['caption'].fillna('' ).values )<categorify> | def testing(bs):
model = keras.Sequential([
layers.BatchNormalization(input_shape = [8]),
layers.Dense(units = 8, activation = 'relu'),
layers.Dense(units = 8, activation = 'relu'),
layers.Dense(units = 1, activation = 'sigmoid')
])
model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['binary_accuracy'])
early_stopping = keras.callbacks.EarlyStopping(patience = 25, min_delta = 0.001, restore_best_weights = True)
history = model.fit(X_train, y_train, validation_data =(X_val, y_val), batch_size = bs, epochs = 500, callbacks = [early_stopping], verbose = 0)
history = pd.DataFrame(history.history)
print(bs)
print(( "Best Validation Loss: {:0.4f}" +\
"
Best Validation Accuracy: {:0.4f}")\
.format(history['val_loss'].min() ,
history['val_binary_accuracy'].max())) | Titanic - Machine Learning from Disaster |
13,737,929 | <concatenate><EOS> | pred = model.predict(X_test)
preds = []
for i in range(0,418):
if np.isnan(pred[i][0]):
pred[i][0] = 1
preds.append(int(round(pred[i][0])))
output = pd.DataFrame({'PassengerId': X_test.index,'Survived': preds})
output.to_csv('submission.csv', index=False)
preds
| Titanic - Machine Learning from Disaster |
13,587,551 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<train_model> | warnings.filterwarnings("ignore")
%matplotlib inline | Titanic - Machine Learning from Disaster |
13,587,551 | kmeans = KMeans(
n_clusters=30,
random_state=0
)
kmeans_train = kmeans.fit(a )<predict_on_test> | train = pd.read_csv(".. /input/titanic/train.csv")
test = pd.read_csv(".. /input/titanic/test.csv")
submission = pd.read_csv(".. /input/titanic/gender_submission.csv")
data = train.append(test ) | Titanic - Machine Learning from Disaster |
13,587,551 | train['cluster'] = kmeans_train.labels_
test['cluster'] = kmeans.predict(b )<prepare_x_and_y> | train.isnull().sum() | Titanic - Machine Learning from Disaster |
13,587,551 | y = train['likes']<feature_engineering> | train.isnull().sum() | Titanic - Machine Learning from Disaster |
13,587,551 | def ing(df):
df['user_len'] = df['user'].str.len()
df['username_len'] = df['username'].str.len()
df['img_len'] = df['img'].str.len()
df['description_len'] = df['description'].str.len()
df['caption_upper'] = df['caption'].str.findall(r'[A-Z]' ).str.len()
df['po_co'] = df['posts']/(df['comments']+1)
df['po_co_pow'] = df['posts']*(df['comments'])
df['fol'] = df['followers']/(df['followings']+1)
df['fol_pow'] = df['followers']*(df['followings'])
df['act'] = df['comments']/(df['followers']+1)
df['pos'] = df['posts']/(df['followers']+1)
df['pof'] = df['posts']/(df['followings']+1)
df['user_count'] = df.groupby('user')['posts'].transform('count')
df['language_encoded'] = df.groupby('user')['language_encoded'].transform('median')
df['hashtag_max'] = df.groupby('user')['hashtag'].transform('max')
df['hashtag_min'] = df.groupby('user')['hashtag'].transform('min')
df['hashtag_mean'] = df.groupby('user')['hashtag'].transform('mean')
df['hashtag_std'] = df.groupby('user')['hashtag'].transform('std')
df['hashtag_mean_diff'] = df['hashtag'] /(df['hashtag_mean']+1)
df['comments_max'] = df.groupby('user')['comments'].transform('max')
df['comments_min'] = df.groupby('user')['comments'].transform('min')
df['comments_mean'] = df.groupby('user')['comments'].transform('mean')
df['comments_std'] = df.groupby('user')['comments'].transform('std')
df['comments_mean_diff'] = df['comments'] /(df['comments_mean']+1)
df['bot'] = df['user'].str.count('_|
df['bot_caption'] = pd.to_numeric(df['caption'].str.count('_|
df['numbers_user'] = df['user'].str.contains('0|1|2|3|4|5|6|7|8|9' ).astype(int)
df['numbers_caption'] = pd.to_numeric(df['caption'].str.contains('0|1|2|3|4|5|6|7|8|9'), errors='coerce')
df['s1080.1080'] = pd.to_numeric(df['img'].str.contains('1080\.1080'), errors='coerce')
df['e35'] = pd.to_numeric(df['img'].str.contains('e35'), errors='coerce')
return df<drop_column> | test.isnull().sum() | Titanic - Machine Learning from Disaster |
13,587,551 | train = ing(train ).select_dtypes(exclude=['object'] ).drop(['likes'], axis=1 ).fillna(0)
test = ing(test ).select_dtypes(exclude=['object'] ).fillna(0 )<choose_model_class> | test.isnull().sum() | Titanic - Machine Learning from Disaster |
13,587,551 | lgbm_model = lgb.LGBMRegressor(
application='mae',
metric='mape',
learning_rate=0.005,
n_estimators = 5500,
num_leaves = 10,
colsample_bytree = 0.59,
subsample = 0.5,
max_bin = 100,
reg_alpha = 1,
bagging_freq = 5,
reg_lamba=10,
min_data = 1)
param_grid = {
}
def smape(A, F):
A = 10**(A)-1
F = 10**(F)-1
return 100/len(A)* np.sum(2 * np.abs(F - A)/(np.abs(A)+ np.abs(F)))
scoring = {'smape': make_scorer(smape, greater_is_better=False)}
<choose_model_class> | train.groupby('Pclass' ).Survived.value_counts() | Titanic - Machine Learning from Disaster |
13,587,551 | xgb_model = xgb.XGBRegressor(max_depth=7,
learning_rate=0.01,
n_estimators=870,
booster='gbtree',
n_jobs=-1,
min_child_weight=5,
subsample=0.8,
colsample_bytree=0.8,
base_score=0.5,
random_state=42,
reg_alpha=0.2,
reg_lambda=3
)<train_model> | train['family'] = train['SibSp'] + train['Parch'] + 1 | Titanic - Machine Learning from Disaster |
13,587,551 | VotReg = VotingRegressor(estimators=[('lgbm', lgbm_model),('xgb', xgb_model)])
VotReg.fit(train, np.log10(y+1))<save_to_csv> | train.groupby('family' ).Survived.value_counts() | Titanic - Machine Learning from Disaster |
13,587,551 | print(10**(VotReg.predict(test)) -1)
prediction = pd.read_csv('.. /input/sample_submission.csv')
prediction['likes'] =(10**(VotReg.predict(test)))-1
prediction.to_csv('exp_submission.csv', index=False )<install_modules> | for i in range(len(train)) :
if(train['family'][i] > 1):
train['family'][i] = 1
else:
train['family'][i] = 0 | Titanic - Machine Learning from Disaster |
13,587,551 | !pip install gdown
!gdown https://drive.google.com/uc?id=1-2ayU2W8YnVgvfHEukbpT9-HyrLF5vXt<install_modules> | train['Name'] = train.Name.str.extract('([A-Za-z]+)\.',expand = False ) | Titanic - Machine Learning from Disaster |
13,587,551 | !pip install torchsummary<set_options> | top6 = train['Name'].value_counts() [:6].index.to_list()
top6 | Titanic - Machine Learning from Disaster |
13,587,551 | %matplotlib inline<set_options> | train['Name'] = train['Name'].apply(lambda x: x if x in top6 else 'Other' ) | Titanic - Machine Learning from Disaster |
13,587,551 | torch.manual_seed(1337)
torch.cuda.manual_seed(1337)
np.random.seed(1337)
random.seed(1337 )<load_from_csv> | train.groupby('Name' ).Survived.value_counts() | Titanic - Machine Learning from Disaster |
13,587,551 | class ImageDataset(Dataset):
def __init__(self, csv_file, root_dir, transform=None):
self.data_frame = pd.read_csv(csv_file)
self.root_dir = root_dir
self.transform = transform
def __len__(self):
return len(self.data_frame)
def __getitem__(self, idx):
img_name = os.path.join(self.root_dir, self.data_frame['Id'][idx])
image = Image.open(img_name ).convert('RGB')
label = np.array(self.data_frame['Category'][idx])
if self.transform:
image = self.transform(image)
sample =(image, label)
return sample<choose_model_class> | train['Cabin'].fillna(0,inplace = True ) | Titanic - Machine Learning from Disaster |
13,587,551 | class FocalLoss(nn.Module):
def __init__(self, num_class, alpha=None, gamma=2, balance_index=-1, smooth=None, size_average=True):
super(FocalLoss, self ).__init__()
self.num_class = num_class
self.alpha = alpha
self.gamma = gamma
self.smooth = smooth
self.size_average = size_average
if self.alpha is None:
self.alpha = torch.ones(self.num_class, 1)
elif isinstance(self.alpha,(list, np.ndarray)) :
assert len(self.alpha)== self.num_class
self.alpha = torch.FloatTensor(alpha ).view(self.num_class, 1)
self.alpha = self.alpha / self.alpha.sum()
elif isinstance(self.alpha, float):
alpha = torch.ones(self.num_class, 1)
alpha = alpha *(1 - self.alpha)
alpha[balance_index] = self.alpha
self.alpha = alpha
else:
raise TypeError('Not support alpha type')
if self.smooth is not None:
if self.smooth < 0 or self.smooth > 1.0:
raise ValueError('smooth value should be in [0,1]')
def forward(self, logit, target):
if logit.dim() > 2:
logit = logit.view(logit.size(0), logit.size(1), -1)
logit = logit.permute(0, 2, 1 ).contiguous()
logit = logit.view(-1, logit.size(-1))
target = target.view(-1, 1)
epsilon = 1e-10
alpha = self.alpha
idx = target.cpu().long()
one_hot_key = torch.FloatTensor(target.size(0), self.num_class ).zero_()
one_hot_key = one_hot_key.scatter_(1, idx, 1)
if one_hot_key.device != logit.device:
one_hot_key = one_hot_key.to(logit.device)
if self.smooth:
one_hot_key = torch.clamp(
one_hot_key, self.smooth/(self.num_class-1), 1.0 - self.smooth)
pt =(one_hot_key * logit ).sum(1)+ epsilon
logpt = pt.log()
gamma = self.gamma
alpha = alpha[idx]
alpha=alpha.to(device)
loss = -1 * alpha * torch.pow(( 1 - pt), gamma)* logpt
if self.size_average:
loss = loss.mean()
else:
loss = loss.sum()
return loss
<init_hyperparams> | train['Cabin'] = train['Cabin'].apply(lambda x: 1 if x != 0 else 0 ) | Titanic - Machine Learning from Disaster |
13,587,551 | class SubsetSampler(Sampler):
def __init__(self, indices):
self.num_samples = len(indices)
self.indices = indices
def __iter__(self):
return iter(self.indices)
def __len__(self):
return self.num_samples<split> | train.groupby('Cabin' ).Survived.value_counts() | Titanic - Machine Learning from Disaster |
13,587,551 | def train_valid_split(dataset, test_size = 0.25, shuffle = False, random_seed = 0):
length = dataset.__len__()
indices = list(range(1,length))
if shuffle == True:
random.seed(random_seed)
random.shuffle(indices)
if type(test_size)is float:
split = floor(test_size * length)
elif type(test_size)is int:
split = test_size
else:
raise ValueError('%s should be an int or a float' % str)
return indices[split:], indices[:split]<categorify> | train['fare_val'] = pd.qcut(train['Fare'],5,labels=False)
train['fare_val'] = train['fare_val'].astype(int ) | Titanic - Machine Learning from Disaster |
13,587,551 | transform_augmented = transforms.Compose([
transforms.RandomResizedCrop(224),
PowerPIL() ,
transforms.ToTensor() ,
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
transform_raw = transforms.Compose([
transforms.Resize(( 224,224)) ,
transforms.ToTensor() ,
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
<load_from_csv> | Titanic - Machine Learning from Disaster | |
13,587,551 | trainset = ImageDataset(csv_file = './.. /input/train.csv', root_dir = './.. /input/data/data', transform=transform_augmented)
valset = ImageDataset(csv_file = './.. /input/train.csv', root_dir = './.. /input/data/data', transform=transform_raw)
accset = ImageDataset(csv_file = './.. /input/train.csv', root_dir = './.. /input/data/data', transform=transform_raw )<load_from_csv> | train.groupby('fare_val' ).Survived.value_counts() | Titanic - Machine Learning from Disaster |
13,587,551 | df=pd.read_csv("./.. /input/train.csv" )<feature_engineering> | for i in train.Ticket:
train['ticket'] = train['Ticket'].apply(lambda x: x.replace('/','' ).replace('.','' ).split() [0] if not x.isdigit() else 'N' ) | Titanic - Machine Learning from Disaster |
13,587,551 | class_freq=defaultdict(int)
freq=[]
for ind,val in df.iterrows() :
class_freq[int(val["Category"])]+=1
for ind,val in class_freq.items() :
freq.append([ind,val])
freq.sort()
freq=[i[1] for i in freq]
wt_per_class=[0.]*67
N=float(sum(freq))
for i in range(67):
wt_per_class[i] = N/float(freq[i])
weight=[0]*len(df)
for ind,val in df.iterrows() :
cat=val["Category"]
weight[ind]=wt_per_class[cat]<load_pretrained> | train['SibSp'] = pd.cut(train['SibSp'],bins=[0,1,100],labels=[0,1],right=False ).astype(int)
train['Parch'] = pd.cut(train['Parch'], bins=[0,1,100], labels=[0,1], right=False ).astype(int ) | Titanic - Machine Learning from Disaster |
13,587,551 | train_idx, valid_idx = train_valid_split(trainset, 0.25)
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetSampler(valid_idx)
train_loader = DataLoader(trainset,batch_size=50,sampler=train_sampler,num_workers=4)
valid_loader = DataLoader(valset,batch_size=200,sampler=valid_sampler,num_workers=4)
acc_loader = DataLoader(accset,batch_size=200,num_workers=4)
<set_options> | train.groupby('Parch' ).Survived.value_counts() | Titanic - Machine Learning from Disaster |
13,587,551 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )<load_pretrained> | test['family'] = test['SibSp'] + test['Parch'] + 1
for i in range(len(test)) :
if(test['family'][i] > 1):
test['family'][i] = 1
else:
test['family'][i] = 0
test['Name'] = test['Name'].apply(lambda x: x if x in top6 else 'Other')
test['Cabin'].fillna(0,inplace = True)
test['Cabin'] = test['Cabin'].apply(lambda x: 1 if x != 0 else 0)
for i in test.Ticket:
test['ticket'] = test['Ticket'].apply(lambda x: x.replace('/','' ).replace('.','' ).split() [0] if not x.isdigit() else 'N')
test['fare_val'] = pd.qcut(test['Fare'],5,labels=False)
test['fare_val'] = test['fare_val'].fillna(value = test['fare_val'].mode() [0])
test['fare_val'] = test['fare_val'].astype(int)
test['SibSp'] = pd.cut(test['SibSp'],bins = [0,1,200],labels=[0,1],right = False ).astype(int)
test['Parch'] = pd.cut(test['Parch'],bins = [0,1,200],labels=[0,1],right = False ).astype(int ) | Titanic - Machine Learning from Disaster |
13,587,551 | model_ft = torch.hub.load('facebookresearch/WSL-Images', 'resnext101_32x16d_wsl')
<choose_model_class> | features = [
'Pclass',
'Name',
'Sex',
'Age',
'SibSp',
'Parch',
'fare_val',
]
target = ['Survived'] | Titanic - Machine Learning from Disaster |
13,587,551 | avgpool = model_ft.avgpool
l4 = model_ft.layer4
l3 = model_ft.layer3<find_best_params> | train[features].isnull().sum() | Titanic - Machine Learning from Disaster |
13,587,551 | for param in model_ft.parameters() :
param.requires_grad = False
<choose_model_class> | test[features].isnull().sum() | Titanic - Machine Learning from Disaster |
13,587,551 | model_ft.fc = nn.Sequential(nn.Dropout(p=0.5),nn.Linear(2048, 1024),nn.LeakyReLU(inplace=True),nn.Linear(1024,67),nn.Softmax(dim=1))
<train_model> | ; | Titanic - Machine Learning from Disaster |
13,587,551 | model_ft.to(device )<choose_model_class> | Age_mean = data['Age'].mean()
Age_std = data['Age'].std()
train['Age'] = train['Age'].fillna(np.random.randint(Age_mean-Age_std,Age_mean+Age_std))
test['Age'] = test['Age'].fillna(np.random.randint(Age_mean-Age_std,Age_mean+Age_std)) | Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.