kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
14,556,854
submission = pd.DataFrame({'ImageId':img_ids, 'Class':F_results}) submission = submission.sort_values(['ImageId']) submission.to_csv("submission.csv", index=False) submission.tail()<categorify>
train.drop('Cabin',inplace = True,axis =1 )
Titanic - Machine Learning from Disaster
14,556,854
class VowelConsonantDataset(Dataset): def __init__(self, file_path,train=True,transform=None): self.transform = transform self.file_path=file_path self.train=train self.file_names=[file for _,_,files in os.walk(self.file_path)for file in files] self.len = len(self.file_names) if self.train: self.classes_mapping=self.get_classes() def __len__(self): return len(self.file_names) def __getitem__(self, index): file_name=self.file_names[index] image_data=self.pil_loader(self.file_path+"/"+file_name) if self.transform: image_data = self.transform(image_data) if self.train: file_name_splitted=file_name.split("_") Y1 = self.classes_mapping[file_name_splitted[0]] Y2 = self.classes_mapping[file_name_splitted[1]] z1,z2=torch.zeros(10),torch.zeros(10) z1[Y1-10],z2[Y2]=1,1 label=torch.stack([z1,z2]) return image_data, label else: return image_data, file_name def pil_loader(self,path): with open(path, 'rb')as f: img = Image.open(f) return img.convert('RGB') def get_classes(self): classes=[] for name in self.file_names: name_splitted=name.split("_") classes.extend([name_splitted[0],name_splitted[1]]) classes=list(set(classes)) classes_mapping={} for i,cl in enumerate(sorted(classes)) : classes_mapping[cl]=i return classes_mapping <set_options>
sex = pd.get_dummies(train['Sex'],drop_first=True) embark = pd.get_dummies(train['Embarked'],drop_first=True )
Titanic - Machine Learning from Disaster
14,556,854
train_on_gpu = torch.cuda.is_available()<categorify>
train = pd.concat([train,sex,embark],axis=1 )
Titanic - Machine Learning from Disaster
14,556,854
transform_train = transforms.Compose([ transforms.ColorJitter() , transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor() , ] )<load_pretrained>
train.drop(['Sex','Embarked','PassengerId','Name','Ticket'],axis=1,inplace=True )
Titanic - Machine Learning from Disaster
14,556,854
full_data = VowelConsonantDataset(".. /input/train/train",train=True,transform=transform_train) train_size = int(0.9 * len(full_data)) test_size = len(full_data)- train_size train_data, validation_data = random_split(full_data, [train_size, test_size]) train_loader = torch.utils.data.DataLoader(train_data, batch_size=50, shuffle=True) validation_loader = torch.utils.data.DataLoader(validation_data, batch_size=50, shuffle=True )<create_dataframe>
st = StandardScaler()
Titanic - Machine Learning from Disaster
14,556,854
test_data = VowelConsonantDataset(".. /input/test/test",train=False,transform=transform_train) test_loader = torch.utils.data.DataLoader(test_data, batch_size=20,shuffle=False )<set_options>
feature_scale = ['Age','Fare'] train[feature_scale] = st.fit_transform(train[feature_scale] )
Titanic - Machine Learning from Disaster
14,556,854
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(device )<choose_model_class>
x = train.drop(['Survived'],axis=1) y = train['Survived']
Titanic - Machine Learning from Disaster
14,556,854
class MyModel(nn.Module): def __init__(self, num_classes1, num_classes2): super(MyModel, self ).__init__() self.model_snet = models.mobilenet_v2(pretrained=True) self.model_snet.classifier = nn.Sequential(nn.Dropout(p=0.2)) self.fc1 = nn.Linear(1280, num_classes1,bias=True) torch.nn.init.xavier_uniform_(self.fc1.weight) torch.nn.init.zeros_(self.fc1.bias) self.fc2 = nn.Linear(1280, num_classes2,bias=True) torch.nn.init.xavier_uniform_(self.fc2.weight) torch.nn.init.zeros_(self.fc2.bias) def forward(self, x): x = self.model_snet(x) out1 = self.fc1(x) out2 = self.fc2(x) return out1, out2<choose_model_class>
from sklearn.model_selection import GridSearchCV from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.neighbors import KNeighborsClassifier
Titanic - Machine Learning from Disaster
14,556,854
my_model = MyModel(10,10 )<train_model>
tree = DecisionTreeClassifier() tree.fit(x,y) tree.score(x,y )
Titanic - Machine Learning from Disaster
14,556,854
my_model = my_model.to(device )<categorify>
test = pd.read_csv('/kaggle/input/titanic/test.csv' )
Titanic - Machine Learning from Disaster
14,556,854
def evaluation(dataloader,model): total,correct=0,0 for data in dataloader: inputs,labels=data inputs,labels=inputs.to(device),labels.to(device) out1,out2=my_model(inputs) _,pred1=torch.max(out1.data,1) _,pred2=torch.max(out2.data,1) _,labels1=torch.max(labels[:,0,:].data,1) _,labels2=torch.max(labels[:,1,:].data,1) total+=labels.size(0) fin1=(pred1==labels1) fin2=(pred2==labels2) correct+=(fin1==fin2 ).sum().item() return 100*correct/total<choose_model_class>
test2 = test.copy()
Titanic - Machine Learning from Disaster
14,556,854
loss_fn = nn.CrossEntropyLoss() plist = [ {'params': my_model.fc1.parameters() , 'lr': 5e-3}, {'params': my_model.fc2.parameters() , 'lr': 5e-3} ] lr=0.01 opt = optim.SGD(my_model.parameters() ,lr=0.01,momentum=0.9,nesterov=True) <train_model>
test['Age']=test[['Age','Pclass']].apply(impute_age,axis = 1 )
Titanic - Machine Learning from Disaster
14,556,854
loss_epoch_arr = [] loss_arr = [] max_epochs = 10 min_loss = 1000 batch_size = 50 n_iters = np.ceil(9000/batch_size) for epoch in range(max_epochs): for i, data in enumerate(train_loader, 0): my_model.train() images, labels = data images = images.to(device) targetnp=labels[:,0,:].cpu().numpy() targetnp1 = labels[:,1,:].cpu().numpy() new_targets1 = np.argmax(targetnp,axis=1) new_targets2 = np.argmax(targetnp1,axis=1) new_targets1=torch.LongTensor(new_targets1) new_targets2=torch.LongTensor(new_targets2) new_targets1 = new_targets1.to(device) new_targets2 = new_targets2.to(device) opt.zero_grad() out = my_model.forward(images) loss_fc1 = loss_fn(out[0], new_targets1) loss_fc2 = loss_fn(out[1],new_targets2) loss = torch.add(loss_fc1,loss_fc2) loss.backward() opt.step() if min_loss > loss.item() : min_loss = loss.item() best_model = copy.deepcopy(my_model.state_dict()) print('Min loss %0.2f' % min_loss) if i % 100 == 0: print('Iteration: %d/%d, Loss: %0.2f' %(i, n_iters, loss.item())) del images, labels, out torch.cuda.empty_cache() loss_arr.append(loss.item()) print("Epoch number :",epoch) print("Train Accuracy :",evaluation(train_loader,my_model)) print("Test Accuracy :" ,evaluation(validation_loader,my_model)) print('Learning Rate :',lr) loss_epoch_arr.append(loss.item()) plt.plot(loss_arr) plt.show()<compute_test_metric>
test.drop('Cabin',inplace = True,axis =1 )
Titanic - Machine Learning from Disaster
14,556,854
evaluation(validation_loader,my_model )<find_best_params>
sex = pd.get_dummies(test['Sex'],drop_first=True) embark = pd.get_dummies(test['Embarked'],drop_first=True )
Titanic - Machine Learning from Disaster
14,556,854
my_model.eval() plist=[] fn_list=[] for inputs_test, fn in test_loader: inputs_test=inputs_test.to(device) out1,out2=my_model.forward(inputs_test) _,pred1=torch.max(out1,1) pred1=pred1.tolist() _,pred2=torch.max(out2,1) pred2=pred2.tolist() for x,y,z in zip(pred1,pred2,fn): p="V"+str(x)+"_"+"C"+str(y) plist.append(p) fn_list.append(z )<save_to_csv>
test = pd.concat([test,sex,embark],axis=1 )
Titanic - Machine Learning from Disaster
14,556,854
submission = pd.DataFrame({"ImageId":fn_list, "Class":plist}) submission.to_csv('submission.csv', index=False )<categorify>
test.drop(['Sex','Embarked','PassengerId','Name','Ticket'],axis=1,inplace=True )
Titanic - Machine Learning from Disaster
14,556,854
class VowelConsonantDataset(Dataset): def __init__(self, file_path,train=True,transform=None): self.transform = transform self.file_path=file_path self.train=train self.file_names=[file for _,_,files in os.walk(self.file_path)for file in files] self.len = len(self.file_names) if self.train: self.classes_mapping=self.get_classes() def __len__(self): return len(self.file_names) def __getitem__(self, index): file_name=self.file_names[index] image_data=self.pil_loader(self.file_path+"/"+file_name) if self.transform: image_data = self.transform(image_data) if self.train: file_name_splitted=file_name.split("_") Y1 = self.classes_mapping[file_name_splitted[0]] Y2 = self.classes_mapping[file_name_splitted[1]] z1,z2=torch.zeros(10),torch.zeros(10) z1[Y1-10],z2[Y2]=1,1 label=torch.stack([z1,z2]) return image_data, label else: return image_data, file_name def pil_loader(self,path): with open(path, 'rb')as f: img = Image.open(f) return img.convert('RGB') def get_classes(self): classes=[] for name in self.file_names: name_splitted=name.split("_") classes.extend([name_splitted[0],name_splitted[1]]) classes=list(set(classes)) classes_mapping={} for i,cl in enumerate(sorted(classes)) : classes_mapping[cl]=i return classes_mapping <set_options>
test['Fare'].fillna(test['Fare'].mean() ,inplace=True )
Titanic - Machine Learning from Disaster
14,556,854
train_on_gpu = torch.cuda.is_available()<categorify>
feature_scale = ['Age','Fare'] test[feature_scale] = st.fit_transform(test[feature_scale] )
Titanic - Machine Learning from Disaster
14,556,854
transform_train = transforms.Compose([ transforms.ColorJitter() , transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor() , ] )<split>
from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from sklearn.neighbors import KNeighborsClassifier
Titanic - Machine Learning from Disaster
14,556,854
full_data = VowelConsonantDataset(".. /input/padhai-hindivowelconsonantdataset/train/train",train=True,transform=transform_train) train_size = int(0.9 * len(full_data)) test_size = len(full_data)- train_size train_data, validation_data = random_split(full_data, [train_size, test_size]) train_loader = torch.utils.data.DataLoader(train_data, batch_size=50, shuffle=True) validation_loader = torch.utils.data.DataLoader(validation_data, batch_size=50, shuffle=True) <create_dataframe>
level1 = LogisticRegression() model = StackingClassifier(estimators=level0,final_estimator=level1,cv=5 )
Titanic - Machine Learning from Disaster
14,556,854
test_data = VowelConsonantDataset(".. /input/padhai-hindivowelconsonantdataset/test/test",train=False,transform=transform_train) test_loader = torch.utils.data.DataLoader(test_data, batch_size=20,shuffle=False) print(len(test_data))<set_options>
level1 = LogisticRegression() model = StackingClassifier(estimators=level0,final_estimator=level1,cv=5 )
Titanic - Machine Learning from Disaster
14,556,854
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(device )<choose_model_class>
model.fit(x,y )
Titanic - Machine Learning from Disaster
14,556,854
class MyModel(nn.Module): def __init__(self, num_classes1, num_classes2): super(MyModel, self ).__init__() self.model_snet = models.mobilenet_v2(pretrained=True) self.model_snet.classifier = nn.Sequential(nn.Dropout(p=0.2)) self.fc1 = nn.Linear(1280, num_classes1,bias=True) torch.nn.init.xavier_uniform_(self.fc1.weight) torch.nn.init.zeros_(self.fc1.bias) self.fc2 = nn.Linear(1280, num_classes2,bias=True) torch.nn.init.xavier_uniform_(self.fc2.weight) torch.nn.init.zeros_(self.fc2.bias) def forward(self, x): x = self.model_snet(x) out1 = self.fc1(x) out2 = self.fc2(x) return out1, out2<choose_model_class>
y_predicted = model.predict(test )
Titanic - Machine Learning from Disaster
14,556,854
my_model = MyModel(10,10 )<train_model>
submission = pd.DataFrame({ "PassengerId":test2['PassengerId'], "Survived":y_predicted } )
Titanic - Machine Learning from Disaster
14,556,854
my_model = my_model.to(device )<categorify>
submission.to_csv('first_kaggale_titanic_submission.csv',index=False )
Titanic - Machine Learning from Disaster
14,410,390
def evaluation(dataloader,model): total,correct=0,0 for data in dataloader: inputs,labels=data inputs,labels=inputs.to(device),labels.to(device) out1,out2=my_model(inputs) _,pred1=torch.max(out1.data,1) _,pred2=torch.max(out2.data,1) _,labels1=torch.max(labels[:,0,:].data,1) _,labels2=torch.max(labels[:,1,:].data,1) total+=labels.size(0) fin1=(pred1==labels1) fin2=(pred2==labels2) correct+=(fin1==fin2 ).sum().item() return 100*correct/total <choose_model_class>
train_data = pd.read_csv("/kaggle/input/titanic/train.csv") train_data.head()
Titanic - Machine Learning from Disaster
14,410,390
loss_fn = nn.CrossEntropyLoss() plist = [ {'params': my_model.fc1.parameters() , 'lr': 5e-3}, {'params': my_model.fc2.parameters() , 'lr': 5e-3} ] lr=0.01 opt = optim.SGD(my_model.parameters() ,lr=0.01,momentum=0.9,nesterov=True) <train_model>
test_data = pd.read_csv("/kaggle/input/titanic/test.csv") test_data.head()
Titanic - Machine Learning from Disaster
14,410,390
loss_epoch_arr = [] loss_arr = [] max_epochs = 2 min_loss = 1000 batch_size = 50 n_iters = np.ceil(9000/batch_size) for epoch in range(max_epochs): for i, data in enumerate(train_loader, 0): my_model.train() images, labels = data images = images.to(device) targetnp=labels[:,0,:].cpu().numpy() targetnp1 = labels[:,1,:].cpu().numpy() new_targets1 = np.argmax(targetnp,axis=1) new_targets2 = np.argmax(targetnp1,axis=1) new_targets1=torch.LongTensor(new_targets1) new_targets2=torch.LongTensor(new_targets2) new_targets1 = new_targets1.to(device) new_targets2 = new_targets2.to(device) opt.zero_grad() out = my_model.forward(images) loss_fc1 = loss_fn(out[0], new_targets1) loss_fc2 = loss_fn(out[1],new_targets2) loss = torch.add(loss_fc1,loss_fc2) loss.backward() opt.step() if min_loss > loss.item() : min_loss = loss.item() best_model = copy.deepcopy(my_model.state_dict()) print('Min loss %0.2f' % min_loss) if i % 100 == 0: print('Iteration: %d/%d, Loss: %0.2f' %(i, n_iters, loss.item())) del images, labels, out torch.cuda.empty_cache() loss_arr.append(loss.item()) print("Epoch number :",epoch) print("Train Accuracy :",evaluation(train_loader,my_model)) print("Test Accuracy :" ,evaluation(validation_loader,my_model)) print('Learning Rate :',lr) loss_epoch_arr.append(loss.item()) plt.plot(loss_arr) plt.show()<load_from_disk>
train_data['FamilySize'] = train_data['SibSp'] + train_data['Parch'] + 1 test_data['FamilySize'] = test_data['SibSp'] + test_data['Parch'] + 1
Titanic - Machine Learning from Disaster
14,410,390
my_model.load_state_dict(best_model )<compute_test_metric>
train_data['Fare'].fillna(train_data['Fare'].median() , inplace=True) test_data['Fare'].fillna(test_data['Fare'].median() , inplace=True)
Titanic - Machine Learning from Disaster
14,410,390
evaluation(validation_loader,my_model )<find_best_params>
women = train_data.loc[train_data['Sex']=='female']['Survived'] rate_women = sum(women)/len(women) print("% of women who survived:", rate_women )
Titanic - Machine Learning from Disaster
14,410,390
my_model.eval() plist=[] fn_list=[] for inputs_test, fn in test_loader: inputs_test=inputs_test.to(device) out1,out2=my_model.forward(inputs_test) _,pred1=torch.max(out1,1) pred1=pred1.tolist() _,pred2=torch.max(out2,1) pred2=pred2.tolist() for x,y,z in zip(pred1,pred2,fn): p="V"+str(x)+"_"+"C"+str(y) plist.append(p) fn_list.append(z )<save_to_csv>
men = train_data.loc[train_data['Sex']=='male']['Survived'] rate_men = sum(men)/len(men) print("% of men who survived:", rate_men )
Titanic - Machine Learning from Disaster
14,410,390
submission = pd.DataFrame({"ImageId":fn_list, "Class":plist}) submission.to_csv('submission.csv', index=False )<load_from_csv>
train_data.Sex = pd.get_dummies(train_data.Sex) test_data.Sex = pd.get_dummies(test_data.Sex )
Titanic - Machine Learning from Disaster
14,410,390
df = pd.read_csv('.. /input/train.csv' )<prepare_x_and_y>
train_data['Age'].fillna(train_data['Age'].median() , inplace=True) test_data['Age'].fillna(test_data['Age'].median() , inplace=True)
Titanic - Machine Learning from Disaster
14,410,390
y = df['Volume'] X = df.drop(['Volume','Date'],axis=1 )<import_modules>
train_data['IsAlone'] = 0 train_data.loc[train_data['FamilySize'] == 1, 'IsAlone'] = 1 test_data['IsAlone'] = 0 test_data.loc[test_data['FamilySize'] == 1, 'IsAlone'] = 1 train_data['Age*Class'] = train_data.Age * train_data.Pclass test_data['Age*Class'] = test_data.Age * test_data.Pclass
Titanic - Machine Learning from Disaster
14,410,390
from sklearn.linear_model import LinearRegression<choose_model_class>
old = train_data.loc[train_data['Age'] > 45]['Survived'] rate_old = sum(old)/len(old) print("% of old people who survived:", rate_old )
Titanic - Machine Learning from Disaster
14,410,390
reg = LinearRegression()<train_model>
mid_age = train_data.loc[(train_data['Age'] <= 45)&(train_data['Age'] >= 15)]['Survived'] rate_mid = sum(mid_age)/len(mid_age) print("% of middle aged people who survived:", rate_mid )
Titanic - Machine Learning from Disaster
14,410,390
reg.fit(X,y )<load_from_csv>
young = train_data.loc[train_data['Age'] < 15]['Survived'] rate_young = sum(young)/len(young) print("% of young people who survived:", rate_young )
Titanic - Machine Learning from Disaster
14,410,390
test = pd.read_csv('.. /input/test.csv' )<drop_column>
train_data['Age'] = pd.cut(train_data['Age'], bins=[0., 10., 25., 50, 80, np.inf], labels=[0,1,2,3,4] ).astype(int) test_data['Age'] = pd.cut(test_data['Age'], bins=[0., 10., 25., 50, 80, np.inf], labels=[0,1,2,3,4] ).astype(int)
Titanic - Machine Learning from Disaster
14,410,390
testdf = test.drop(['Date'],axis=1 )<predict_on_test>
print(train_data.isnull().sum()) train_data[train_data.Embarked.isnull() ]
Titanic - Machine Learning from Disaster
14,410,390
prediction = reg.predict(testdf )<save_to_csv>
pd.options.mode.chained_assignment = None train_data.Embarked = train_data.Embarked.fillna('None') test_data.Embarked = test_data.Embarked.fillna('None') label_encoder = LabelEncoder() train_data.Embarked = label_encoder.fit_transform(train_data.Embarked) test_data.Embarked = label_encoder.transform(test_data.Embarked)
Titanic - Machine Learning from Disaster
14,410,390
serial = test['Date'] data = {'Date': serial, 'Volume': prediction} submission = pd.DataFrame(data) submission.to_csv('Submission.csv', index=False )<load_from_csv>
y = train_data["Survived"] features = ["Pclass", "Sex", "Fare", "SibSp", "Parch","FamilySize", "Embarked", "IsAlone"] X = train_data[features] X_test = test_data[features] print(X) scoring_method = "f1"
Titanic - Machine Learning from Disaster
14,410,390
sample = pd.read_csv('/kaggle/input/mlbio1/sample_submission.csv') test = pd.read_csv('/kaggle/input/mlbio1/test.csv') train = pd.read_csv('/kaggle/input/mlbio1/train.csv') <import_modules>
rf_model = RandomForestClassifier() rf_params ={ 'bootstrap': [True, False], 'max_depth': [10, None], 'max_features': ['auto', 'sqrt'], 'min_samples_leaf': [1, 2, 4], 'min_samples_split': [2, 5, 10], 'n_estimators': [100]} rf_gs = GridSearchCV(rf_model, rf_params, scoring=scoring_method, cv=8, n_jobs=4) rf_gs.fit(X, y) print(rf_gs.best_params_) print(rf_gs.best_score_ )
Titanic - Machine Learning from Disaster
14,410,390
%matplotlib inline mpl.style.use('ggplot') sns.set_style('white') flatui = [" pl = sns.color_palette(flatui )<sort_values>
cross_val_score(rf_gs, X, y, cv=5 )
Titanic - Machine Learning from Disaster
14,410,390
<drop_column><EOS>
predictions = random_forest.predict(X_test) output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions}) output.to_csv('my_submission.csv', index=False) print("Your pipeline submission was successfully saved!" )
Titanic - Machine Learning from Disaster
13,505,880
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<data_type_conversions>
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns
Titanic - Machine Learning from Disaster
13,505,880
mean_bmi = train['bmi'].mean() train['bmi'] = train['bmi'].fillna(mean_bmi) test['bmi'] = test['bmi'].fillna(mean_bmi )<groupby>
train_data = pd.read_csv('.. /input/titanic/train.csv') test_data = pd.read_csv('.. /input/titanic/test.csv') train = train_data.copy() test = test_data.copy()
Titanic - Machine Learning from Disaster
13,505,880
train.groupby('stroke' ).size().reset_index(name='counts' )<sort_values>
train.drop(['PassengerId'],axis=1,inplace=True) test.drop(['PassengerId'],axis=1,inplace=True) pred = train_data['Survived']
Titanic - Machine Learning from Disaster
13,505,880
train[['gender', 'stroke']].groupby(['gender'], as_index=False ).mean().sort_values(by='stroke', ascending=False )<count_values>
train.isnull().sum()
Titanic - Machine Learning from Disaster
13,505,880
train['gender'].value_counts()<categorify>
train.isnull().sum()
Titanic - Machine Learning from Disaster
13,505,880
gender_target_dict =(train.groupby(['gender'])['stroke'].agg(['mean'])).to_dict() ['mean'] gender_target_dict['Other'] = global_mean_stroke train['gender_target_enc'] = train['gender'].replace(gender_target_dict) test['gender_target_enc'] = test['gender'].replace(gender_target_dict )<sort_values>
test.isnull().sum()
Titanic - Machine Learning from Disaster
13,505,880
work_type_data = train[['work_type', 'stroke']].groupby(['work_type'], as_index=False ).mean().sort_values(by='stroke', ascending=False) work_type_data<count_values>
test.isnull().sum()
Titanic - Machine Learning from Disaster
13,505,880
train['work_type'].value_counts()<categorify>
sex1 = pd.get_dummies(train['Sex']) sex2 = pd.get_dummies(test['Sex']) train.drop(['Sex'],axis=1,inplace=True) test.drop(['Sex'],axis=1,inplace=True) train = pd.concat([train,sex1],axis=1) test = pd.concat([test,sex2],axis=1 )
Titanic - Machine Learning from Disaster
13,505,880
n = train.groupby('work_type' ).size() mean_by_work_type = train.groupby('work_type')['stroke'].mean() m = 10 work_type_target_dict =(( mean_by_work_type*n + global_mean_stroke*m)/(n+m)).to_dict() train['work_type_smoothed_target_enc'] = train['work_type'].replace(work_type_target_dict) test['work_type_smoothed_target_enc'] = test['work_type'].replace(work_type_target_dict) <feature_engineering>
train.drop(['female'],axis=1,inplace=True) test.drop(['female'],axis=1,inplace=True )
Titanic - Machine Learning from Disaster
13,505,880
for work_type in train['work_type'].unique() : train['work_type_is_{}'.format(work_type)] =(train['work_type'] == work_type)*1 test['work_type_is_{}'.format(work_type)] =(test['work_type'] == work_type)*1<drop_column>
plt.figure(figsize=(8, 5)) sns.set_style('whitegrid') sns.countplot(x = 'Pclass',hue='Survived',data=train_data,palette='deep' )
Titanic - Machine Learning from Disaster
13,505,880
train = train.drop('Residence_type', axis=1) test = test.drop('Residence_type', axis=1 )<categorify>
Fare_0 = [] Fare_1 = [] for i in range(0,891): if train_data["Survived"][i] == 0: Fare_0.append(train["Fare"][i]) else: Fare_1.append(train["Fare"][i] )
Titanic - Machine Learning from Disaster
13,505,880
train['ever_married'] = train['ever_married'].replace({'Yes':1, 'No':0 }) test['ever_married'] = test['ever_married'].replace({'Yes':1, 'No':0 } )<categorify>
train["Embarked"].fillna("S", inplace = True) test["Embarked"].fillna("S", inplace = True )
Titanic - Machine Learning from Disaster
13,505,880
features = [ 'age', 'hypertension', 'heart_disease', 'ever_married', 'avg_glucose_level', 'gender_target_enc', 'work_type_smoothed_target_enc'] X_embedded = TSNE(n_components=2, random_state=21 ).fit_transform(train[features] )<find_best_model_class>
embark1 = pd.get_dummies(train['Embarked']) embark2 = pd.get_dummies(test['Embarked']) train.drop(['Embarked'],axis=1,inplace=True) test.drop(['Embarked'],axis=1,inplace=True) train = pd.concat([train,embark1],axis=1) test = pd.concat([test,embark2],axis=1 )
Titanic - Machine Learning from Disaster
13,505,880
def your_cross_validation_for_roc_auc(clf, X, y ,cv=5): X = np.array(X.copy()) y = np.array(y.copy()) kf = KFold(n_splits=cv) kf.get_n_splits(X) scores = [] for train_index, test_index in kf.split(X): X_train, X_test = X[train_index], X[test_index] y_train, y_test = y[train_index], y[test_index] clf.fit(X_train, y_train) prediction_on_this_fold = clf.predict_proba(X_test)[:,1] score = roc_auc_score(y_score=prediction_on_this_fold, y_true=y_test) scores.append(score) return scores<choose_model_class>
def fam(x): if(x['SibSp'] + x['Parch'])> 0: return 1 else: return 0 train['Family'] = train.apply(fam, axis = 1) test['Family'] = test.apply(fam, axis = 1 )
Titanic - Machine Learning from Disaster
13,505,880
models = [ SGDClassifier(loss='log', penalty = 'elasticnet', max_iter=50), XGBClassifier(n_estimators=45, max_depth=3), RandomForestClassifier() , ] models_names = dict(zip(models, ['svc', 'xgb', 'gnb']))<define_variables>
train = train.drop(['SibSp','Parch'],axis=1) test = test.drop(['SibSp','Parch'],axis=1 )
Titanic - Machine Learning from Disaster
13,505,880
all_features = [ 'age', 'hypertension', 'heart_disease', 'ever_married', 'avg_glucose_level', 'bmi', 'work_type_is_children', 'work_type_is_Private', 'work_type_is_Never_worked', 'work_type_is_Self-employed', 'work_type_is_Govt_job', 'gender_target_enc', 'work_type_smoothed_target_enc']<feature_engineering>
train["Cabin"] = pd.Series([i[0] if not pd.isnull(i)else 'X' for i in train['Cabin'] ]) test["Cabin"] = pd.Series([i[0] if not pd.isnull(i)else 'X' for i in test['Cabin'] ] )
Titanic - Machine Learning from Disaster
13,505,880
scores = {} for model in models: features_scores = {} print(models_names[model]) for f in all_features: scores = your_cross_validation_for_roc_auc(model, train[[f]] , train['stroke']) print(f, np.mean(scores)) features_scores[f] = np.mean(scores) <define_variables>
train["Cabin"] = train["Cabin"].map({"X":0, "A":1, "B" : 2 , "C":3, "D":4, "E":5, "F":6, "G":7,"T":0}) train["Cabin"] = train["Cabin"].astype(int) test["Cabin"] = test["Cabin"].map({"X":0, "A":1, "B" : 2 , "C":3, "D":4, "E":5, "F":6, "G":7,"T":0}) test["Cabin"] = test["Cabin"].astype(int )
Titanic - Machine Learning from Disaster
13,505,880
features_target = ['age', 'hypertension', 'heart_disease', 'ever_married', 'avg_glucose_level', 'bmi', 'work_type_smoothed_target_enc', 'gender_target_enc']<define_variables>
train_title = [i.split(",")[1].split(".")[0].strip() for i in train["Name"]] train["Title"] = pd.Series(train_title) test_title = [i.split(",")[1].split(".")[0].strip() for i in test["Name"]] test["Title"] = pd.Series(test_title )
Titanic - Machine Learning from Disaster
13,505,880
features_onehot = [ 'age', 'hypertension', 'heart_disease', 'ever_married', 'avg_glucose_level', 'bmi', 'work_type_is_children', 'work_type_is_Private', 'work_type_is_Never_worked', 'work_type_is_Self-employed', 'work_type_is_Govt_job', 'gender_target_enc']<choose_model_class>
train = train.drop(['Name'], axis = 1) test = test.drop(['Name'], axis = 1 )
Titanic - Machine Learning from Disaster
13,505,880
models = [ SGDClassifier(loss='log', penalty = 'elasticnet', max_iter=100), XGBClassifier(n_estimators=75, max_depth=3), RandomForestClassifier(n_estimators=75, max_depth=3), ] models_names = dict(zip(models, ['svc', 'xgb', 'rfc'])) for model in models: print(models_names[model]) scores_target = your_cross_validation_for_roc_auc(model, train[features_target] , train['stroke']) scores_onehot = your_cross_validation_for_roc_auc(model, train[features_onehot] , train['stroke']) print(np.mean(scores_target), np.mean(scores_onehot)) <save_to_csv>
train["Title"] = train["Title"].replace(['Lady', 'the Countess','Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') train["Title"] = train["Title"].map({"Master":0, "Miss":1, "Ms" : 1 , "Mme":1, "Mlle":1, "Mrs":1, "Mr":2, "Rare":3}) train["Title"] = train["Title"].astype(int) test["Title"] = test["Title"].replace(['Lady', 'the Countess','Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') test["Title"] = test["Title"].map({"Master":0, "Miss":1, "Ms" : 1 , "Mme":1, "Mlle":1, "Mrs":1, "Mr":2, "Rare":3}) test["Title"] = test["Title"].astype(int )
Titanic - Machine Learning from Disaster
13,505,880
best_model = XGBClassifier(n_estimators=75, max_depth=3) best_model.fit(train[features_target], train['stroke']) Y_pred = pd.DataFrame(best_model.predict_proba(test[features_target])) [1] submission = pd.DataFrame({ "id": test["id"], "stroke": Y_pred }) submission.to_csv(".. /working/submit.csv", index=False) submission.sample(10 )<import_modules>
Ticket1 = [] for i in list(train.Ticket): if not i.isdigit() : Ticket1.append(i.replace(".","" ).replace("/","" ).strip().split(' ')[0]) else: Ticket1.append("X") train["Ticket"] = Ticket1 Ticket2 = [] for j in list(test.Ticket): if not j.isdigit() : Ticket2.append(j.replace(".","" ).replace("/","" ).strip().split(' ')[0]) else: Ticket2.append("X") test["Ticket"] = Ticket2
Titanic - Machine Learning from Disaster
13,505,880
import pandas as pd import numpy as np import sklearn from sklearn import preprocessing from sklearn.decomposition import PCA from sklearn.ensemble import IsolationForest from sklearn import svm import statistics import matplotlib.pyplot as plt from sklearn.neural_network import MLPClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.gaussian_process.kernels import RBF from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier from sklearn.naive_bayes import GaussianNB from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis <load_from_csv>
np.union1d(train["Ticket"], test["Ticket"] )
Titanic - Machine Learning from Disaster
13,505,880
t_data = pd.read_csv(".. /input/train.csv") ts_data = pd.read_csv(".. /input/test.csv" )<drop_column>
train= pd.get_dummies(train, columns = ["Ticket"], prefix="T") test = pd.get_dummies(test, columns = ["Ticket"], prefix="T" )
Titanic - Machine Learning from Disaster
13,505,880
ddata = t_data.drop(["PlayerID","Name"], axis=1) sdata = ts_data.drop(["PlayerID","Name"], axis=1 )<normalization>
train = train.drop(['T_SP','T_SOP','T_Fa','T_LINE','T_SWPP','T_SCOW','T_PPP','T_AS','T_CASOTON'],axis = 1) test = test.drop(['T_SCA3','T_STONOQ','T_AQ4','T_A','T_LP','T_AQ3'],axis = 1 )
Titanic - Machine Learning from Disaster
13,505,880
ddata = ddata.interpolate() ddata = ddata.replace([np.inf], np.float64.max) ddata = ddata.replace([-np.inf], np.float64.min) features = ddata.loc[:, ddata.columns.values[:len(ddata.columns.values)-1]].values labels = ddata.loc[:, ['TARGET_5Yrs']].values st_features = preprocessing.StandardScaler().fit_transform(features) sdata = sdata.interpolate() sdata = sdata.replace([np.inf], np.float64.max) sdata = sdata.replace([-np.inf], np.float64.min) sfeatures = sdata.loc[:, sdata.columns.values].values st_sfeatures = preprocessing.StandardScaler().fit_transform(sfeatures )<categorify>
train.drop(['Survived'],axis=1,inplace=True )
Titanic - Machine Learning from Disaster
13,505,880
pca = PCA(n_components=10) pca.fit(features) principalComponents = pca.transform(features) test_principalComponenta = pca.transform(sfeatures) print(principalComponents.shape, " ", test_principalComponenta.shape )<predict_on_test>
print(train.isnull().sum()) print("Number of columns are :",train.isnull().sum().count() )
Titanic - Machine Learning from Disaster
13,505,880
testf = sfeatures <choose_model_class>
print(test.isnull().sum()) print("Number of columns are :",test.isnull().sum().count() )
Titanic - Machine Learning from Disaster
13,505,880
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Gaussian Process", "Decision Tree", "Random Forest", "Neural Net", "AdaBoost", "Naive Bayes", "QDA"] classifiers = [ KNeighborsClassifier(3), SVC(kernel="linear", C=0.025), SVC(gamma=2, C=1), GaussianProcessClassifier(1.0 * RBF(1.0)) , DecisionTreeClassifier(max_depth=5), RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1), MLPClassifier(alpha=1), AdaBoostClassifier() , GaussianNB() , QuadraticDiscriminantAnalysis() ] res = [] res2 = [] for name, clf in zip(names, classifiers): clf.fit(inputf, labels) res.append(clf.predict(testf)) temp = clf.predict(inputf) res2.append(temp) print(name, " : ", sklearn.metrics.accuracy_score(labels, temp)) <define_variables>
sc = StandardScaler() train2 = sc.fit_transform(train) test2 = sc.transform(test )
Titanic - Machine Learning from Disaster
13,505,880
res2 = np.array(res2) res3 = [] for i in range(res2.shape[1]): s = 0 for j in res2: s = s + j[i] if s >= 5: res3.append(1) else: res3.append(0 )<compute_test_metric>
KFold_Score = pd.DataFrame() classifiers = ['Linear SVM', 'Radial SVM', 'LogisticRegression', 'RandomForestClassifier', 'AdaBoostClassifier', 'XGBoostClassifier', 'KNeighborsClassifier','GradientBoostingClassifier'] models = [svm.SVC(kernel='linear'), svm.SVC(kernel='rbf'), LogisticRegression(max_iter = 1000), RandomForestClassifier(n_estimators=200, random_state=0), AdaBoostClassifier(random_state = 0), xgb.XGBClassifier(n_estimators=100), KNeighborsClassifier() , GradientBoostingClassifier(random_state=0) ] j = 0 for i in models: model = i cv = KFold(n_splits=5, random_state=0, shuffle=True) KFold_Score[classifiers[j]] =(cross_val_score(model, train, np.ravel(pred), scoring = 'accuracy', cv=cv)) j = j+1
Titanic - Machine Learning from Disaster
13,505,880
sklearn.metrics.accuracy_score(labels, res3 )<define_variables>
mean = pd.DataFrame(KFold_Score.mean() , index= classifiers) KFold_Score = pd.concat([KFold_Score,mean.T]) KFold_Score.index=['Fold 1','Fold 2','Fold 3','Fold 4','Fold 5','Mean'] KFold_Score.T.sort_values(by=['Mean'], ascending = False )
Titanic - Machine Learning from Disaster
13,505,880
res = np.array(res) res3 = [] for i in range(res.shape[1]): s = 0 for j in res: s = s + j[i] if s >= 5: res3.append(1) else: res3.append(0 )<save_to_csv>
col_name1[0],col_name1[2] = col_name1[2],col_name1[0] col_name2[0],col_name2[2] = col_name2[2],col_name2[0]
Titanic - Machine Learning from Disaster
13,505,880
cols = { 'PlayerID': [i+901 for i in range(440)] , 'TARGET_5Yrs': res3 } submission = pd.DataFrame(cols) submission.to_csv("submission.csv", index=False) print(submission )<import_modules>
train_new = train[col_name1] test_new = test[col_name2]
Titanic - Machine Learning from Disaster
13,505,880
print(os.listdir(".. /input")) <load_from_csv>
train_new = train_new.drop(['Cabin'],axis = 1) test_new = test_new.drop(['Cabin'],axis = 1 )
Titanic - Machine Learning from Disaster
13,505,880
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv") <prepare_x_and_y>
sc = StandardScaler() train3 = sc.fit_transform(train_new) test3 = sc.transform(test_new )
Titanic - Machine Learning from Disaster
13,505,880
<data_type_conversions>
rfc = RandomForestClassifier(random_state=0 )
Titanic - Machine Learning from Disaster
13,505,880
X = train.values[:,2:20] y = train.values[:,21] y = y.astype('int') <normalization>
param_grid = { 'n_estimators': [ 200,300], 'max_features': ['auto', 'sqrt'], 'max_depth' : [6,7,8], 'criterion' :['gini', 'entropy'] }
Titanic - Machine Learning from Disaster
13,505,880
imputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0) imputer = imputer.fit(X) X = imputer.transform(X) <categorify>
CV_rfc = GridSearchCV(estimator=rfc, param_grid=param_grid, cv= 5) CV_rfc.fit(train3,pred) CV_rfc.best_params_
Titanic - Machine Learning from Disaster
13,505,880
Xt = test.values[:,2:20] imputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0) imputer = imputer.fit(Xt) Xt = imputer.transform(Xt) <normalization>
rfc1=RandomForestClassifier(random_state=0, n_estimators= 200, criterion = 'gini',max_features = 'auto',max_depth = 8) rfc1.fit(train3, pred )
Titanic - Machine Learning from Disaster
13,505,880
sc = StandardScaler() sct = StandardScaler() X = sc.fit_transform(X) Xt = sc.transform(Xt) <train_model>
pred3= rfc1.predict(test3) print(pred3 )
Titanic - Machine Learning from Disaster
13,505,880
<import_modules><EOS>
pred_test = pred3 output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': pred_test}) output.to_csv('./submission.csv', index=False )
Titanic - Machine Learning from Disaster
13,668,673
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<load_from_csv>
!pip install sweetviz
Titanic - Machine Learning from Disaster
13,668,673
train = pd.read_csv('/kaggle/input/anokha-ai-adept/train.csv') train.head() train['label'].value_counts().plot.bar()<normalization>
data_type={ 'PassengerId': 'int64', 'Survived' : 'bool', 'Pclass' : 'category', 'Name' : 'string', 'Sex' : 'category', 'Age' : 'float64', 'SibSp' : 'int64', 'Parch' : 'category', 'Ticket' : 'string', 'Fare' : 'float64', 'Cabin' : 'category', 'Embarked' : 'category', }
Titanic - Machine Learning from Disaster
13,668,673
train_image = [] for i in tqdm(range(train.shape[0])) : img = image.load_img('/kaggle/input/anokha-ai-adept/Train/'+train['filename'][i], target_size=(64,64,1), grayscale=True) img = image.img_to_array(img) img = img/255 train_image.append(img) X = np.array(train_image )<load_pretrained>
df_train=pd.read_csv('/kaggle/input/titanic/train.csv', dtype=data_type )
Titanic - Machine Learning from Disaster
13,668,673
y=train['label'].values class_weights = class_weight.compute_class_weight('balanced',np.unique(y),y) class_weights = {0:1.4 , 1: 0.8, 2:1}<categorify>
X = df_train.drop(columns=['Survived','Name', 'PassengerId'], axis=1) di = {False: 0, True: 1} y = df_train.replace({"Survived": di} ).Survived
Titanic - Machine Learning from Disaster
13,668,673
y = to_categorical(y )<split>
categorical_cols = X.select_dtypes(include=['category', object, 'string'] ).columns.to_list() for col in categorical_cols: X[col] = X[col].astype('category') X[col] = X[col].cat.add_categories('Unknown') X[col] = X[col].fillna('Unknown' )
Titanic - Machine Learning from Disaster
13,668,673
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, test_size=0.3 )<choose_model_class>
X_train, X_validation, y_train, y_validation = train_test_split(X, y, test_size = 0.3, stratify=y, random_state = 123 )
Titanic - Machine Learning from Disaster
13,668,673
model = Sequential() model.add(Conv2D(256, kernel_size=(3, 3),activation='relu',input_shape=(64,64,1))) model.add(Conv2D(128,(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(128,(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(128,(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(128,(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.3)) model.add(Dense(3, activation='softmax')) <train_model>
train_pool = Pool(data=X_train, label = y_train, cat_features = categorical_cols) eval_pool = Pool(data=X_validation, label = y_validation, cat_features = categorical_cols )
Titanic - Machine Learning from Disaster
13,668,673
def recall_m(y_true, y_pred): true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) recall = true_positives /(possible_positives + K.epsilon()) return recall def precision_m(y_true, y_pred): true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives /(predicted_positives + K.epsilon()) return precision def f1_m(y_true, y_pred): precision = precision_m(y_true, y_pred) recall = recall_m(y_true, y_pred) return 2*(( precision*recall)/(precision+recall+K.epsilon())) model.compile(loss='categorical_crossentropy',optimizer='rmsprop',metrics=['acc',f1_m,precision_m, recall_m] )<choose_model_class>
model = CatBoostClassifier(custom_loss=['Accuracy'],iterations = 30000,od_type = 'Iter', od_wait= 1000,task_type = 'GPU', random_seed=42,) model.fit(train_pool, eval_set = eval_pool, use_best_model=True, verbose = False )
Titanic - Machine Learning from Disaster
13,668,673
earlystop = EarlyStopping(restore_best_weights=True )<choose_model_class>
df_test = pd.read_csv('/kaggle/input/titanic/test.csv', dtype = data_type) df_submission = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') df_test.drop(columns=['Name', 'PassengerId'], axis=1, inplace=True) for col in categorical_cols: df_test[col] = df_test[col].astype('category') df_test[col] = df_test[col].cat.add_categories('Unknown') df_test[col] = df_test[col].fillna('Unknown') test_pool = Pool(data=df_test, cat_features=categorical_cols) predictions = model.predict(test_pool) df_submission['Survived'] = predictions df_submission.to_csv('catboost_submission_no_cv.csv', index=False )
Titanic - Machine Learning from Disaster
13,668,673
learning_rate_reduction = ReduceLROnPlateau(monitor='val_f1_m', patience=3, verbose=1, factor=0.5, min_lr=0.00001 )<define_variables>
grid = {'learning_rate': [0.03, 0.1, 0.01], 'iterations' : [500, 1000, 3000, 5000, 10000, 20000], 'depth': [4, 6, 10], 'l2_leaf_reg': [1, 3, 5, 7, 9]} model = CatBoostClassifier(task_type = 'GPU', eval_metric = 'Accuracy') model_randomized_search_result = model.randomized_search(grid, Pool(X, y, cat_features=categorical_cols), cv = 3, plot=True )
Titanic - Machine Learning from Disaster
13,668,673
callbacks = [earlystop, learning_rate_reduction]<train_model>
best_params = model_randomized_search_result['params'] print('Best parms found', best_params )
Titanic - Machine Learning from Disaster
13,668,673
model.fit(X_train, y_train, epochs=45, validation_data=(X_test, y_test), batch_size=16, class_weight=class_weights )<load_from_csv>
best_model_params = best_params.copy() best_model_params.update({ 'task_type': 'GPU' }) model = CatBoostClassifier(**best_model_params) model.fit(train_pool, eval_set = eval_pool, use_best_model=True, verbose = False )
Titanic - Machine Learning from Disaster
13,668,673
test_file = pd.read_csv('/kaggle/input/anokha-ai-adept/test.csv' )<normalization>
predictions = model.predict(test_pool) df_submission['Survived'] = predictions df_submission.to_csv('cb_submission_with_cv_param.csv', index=False )
Titanic - Machine Learning from Disaster
13,668,673
test_image = [] for i in tqdm(range(test_file.shape[0])) : img = image.load_img('/kaggle/input/anokha-ai-adept/Test/'+test_file['filename'][i], target_size=(64,64,1), grayscale=True) img = image.img_to_array(img) img = img/255 test_image.append(img) test = np.array(test_image )<predict_on_test>
X = df_train.drop(columns=['Survived','PassengerId'], axis=1) for col in categorical_cols: X[col] = X[col].astype('category') X[col] = X[col].cat.add_categories('Unknown') X[col] = X[col].fillna('Unknown') di = {False: 0, True: 1} y = df_train.replace({"Survived": di} ).Survived text_feature = ['Name']
Titanic - Machine Learning from Disaster
13,668,673
prediction = model.predict_classes(test )<save_to_csv>
X.Name = X.Name.apply(lambda x: x.lower()) X.Name = X.Name.apply(lambda x: x.translate(string.punctuation)) X.Name = X.Name.apply(lambda x: x.translate(string.digits))
Titanic - Machine Learning from Disaster
13,668,673
sample = pd.read_csv('/kaggle/input/anokha-ai-adept/SampleSolution.csv') sample['label'] = prediction sample.to_csv('sample1.csv', header=True, index=False )<install_modules>
X_train, X_validation, y_train, y_validation = train_test_split(X, y, test_size = 0.3, stratify=y, random_state = 123) train_text_pool = Pool(data=X_train, label = y_train, cat_features = categorical_cols, text_features = text_feature, feature_names=list(X_train), ) eval_text_pool = Pool(data=X_validation, label = y_validation, cat_features = categorical_cols, text_features = text_feature, feature_names=list(X_validation), ) model_with_text = CatBoostClassifier( custom_loss=['Accuracy'], iterations = 30000, od_type = 'Iter', od_wait= 2000, task_type = 'GPU', random_seed=42,) model_with_text.fit(train_text_pool, eval_set = eval_text_pool, use_best_model=True, metric_period=1000, verbose = True )
Titanic - Machine Learning from Disaster