kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
13,587,551
criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(model_ft.parameters() , lr=0.00001,momentum=0.9 )<load_pretrained>
lbl = LabelEncoder() train['Sex'] = lbl.fit_transform(train[['Sex']].values.ravel()) test['Sex'] = lbl.fit_transform(test[['Sex']].values.ravel() )
Titanic - Machine Learning from Disaster
13,587,551
model_ft_wts=torch.load("best_model_resnext_16d_2048_1024_dropout_0.5_b.pkl" )<load_from_disk>
lbl2 = LabelEncoder() train['Name'] = lbl2.fit_transform(train[['Name']].values.ravel()) test['Name'] = lbl2.fit_transform(test[['Name']].values.ravel() )
Titanic - Machine Learning from Disaster
13,587,551
model_ft.load_state_dict(model_ft_wts )<find_best_params>
Titanic - Machine Learning from Disaster
13,587,551
best_acc=0.84516 best_model_wts = copy.deepcopy(model_ft.state_dict() )<train_model>
train['Embarked'] = train['Embarked'].fillna(value=train['Embarked'].mode() [0]) test['Embarked'] = test['Embarked'].fillna(value=test['Embarked'].mode() [0])
Titanic - Machine Learning from Disaster
13,587,551
num_epochs=0 for epoch in range(num_epochs): print("Epoch : "+str(epoch)) print("-"*10) running_loss = 0.0 running_corrects=0 wrong=0 model_ft.train() for inp,labels in train_loader: inp=inp.to(device) labels=labels.to(device) optimizer.zero_grad() with torch.set_grad_enabled(True): outputs = model_ft(inp) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() epoch_loss = running_loss /(len(train_loader)*1.0) print('TRAINING SET Loss: {}'.format(epoch_loss)) if True: correct=0 wrong=0 model_ft.eval() for inp,labels in valid_loader: inp=inp.to(device) labels=labels.to(device) optimizer.zero_grad() with torch.no_grad() : outputs = model_ft(inp) _, preds = torch.max(outputs.data, 1) correct += torch.sum(preds == labels) wrong += torch.sum(preds != labels) acc =(correct.float())/(( correct+wrong ).float()) print('VALIDATION SET Correct: {} Wrong {} Acc {}'.format(correct,wrong,acc)) if acc > best_acc: best_acc = acc best_model_wts = copy.deepcopy(model_ft.state_dict()) running_loss = 0.0 running_correct = 0 model_ft.load_state_dict(best_model_wts) torch.save(model_ft.state_dict() , "best_model_resnext_16d_2048_1024_dropout_0.5_c_wts.pkl") print('------ Finished Training -----' )<load_from_disk>
train_ds = train[features+target] test_ds = test[features]
Titanic - Machine Learning from Disaster
13,587,551
model_ft.load_state_dict(best_model_wts )<find_best_params>
Titanic - Machine Learning from Disaster
13,587,551
if True: correct=0 wrong=0 model_ft.eval() for inp,labels in acc_loader: inp=inp.to(device) labels=labels.to(device) optimizer.zero_grad() with torch.no_grad() : outputs = model_ft(inp) _, preds = torch.max(outputs.data, 1) correct += torch.sum(preds == labels) wrong += torch.sum(preds != labels) acc =(correct.float())/(( correct+wrong ).float()) print('ACCURACY SET Correct: {} Wrong {} ACC {} '.format(correct,wrong,acc))<load_from_csv>
Titanic - Machine Learning from Disaster
13,587,551
submission = pd.read_csv('.. /input/sample_sub.csv') submission.head()<load_from_csv>
torch.manual_seed(7 )
Titanic - Machine Learning from Disaster
13,587,551
testset = ImageDataset(csv_file = '.. /input/sample_sub.csv', root_dir = '.. /input/data/data/', transform=transform_raw) testloader = torch.utils.data.DataLoader(testset, batch_size=300, shuffle=False, num_workers=4) res_predict=[]<find_best_params>
class Titanicnn(nn.Module): def __init__(self,in_size,hidden_size1,hidden_size2,hidden_size3,hidden_size4,num_classes): super().__init__() self.linear1 = nn.Linear(in_size, hidden_size1) self.linear2 = nn.Linear(hidden_size1, hidden_size2) self.linear3 = nn.Linear(hidden_size2,hidden_size3) self.linear4 = nn.Linear(hidden_size3,hidden_size4) self.linear5 = nn.Linear(hidden_size4,num_classes) self.dropout = nn.Dropout(0.2) def forward(self,xb): xb = xb.view(-1,xb.size(1)).float() out = self.linear1(xb) out = F.relu(out) out = self.linear2(out) out = self.dropout(out) out = self.linear3(out) out = F.relu(out) out = self.linear4(out) out = self.dropout(out) out = self.linear5(out) out = self.dropout(out) return out def training_step(self,batch): features, labels = batch out = self(features) loss = F.cross_entropy(out, labels) return loss def validation_step(self,batch): features,labels = batch out = self(features) loss = F.cross_entropy(out, labels) acc = accuracy(out,labels) return {'valid_loss': loss, 'valid_acc': acc} def validation_epoch_end(self,outputs): batch_losses = [x['valid_loss'] for x in outputs] epoch_loss = torch.stack(batch_losses ).mean() batch_accs = [x['valid_acc'] for x in outputs] epoch_acc = torch.stack(batch_accs ).mean() return {'valid_loss': epoch_loss.item() , 'valid_acc': epoch_acc.item() } def epoch_end(self,epoch,result): print("Epoch [{}], valid_loss: {:.4f}, valid_acc: {:.4f}".format(epoch, result['valid_loss'], result['valid_acc']))
Titanic - Machine Learning from Disaster
13,587,551
model_ft.eval() for inp,label in testloader: inp = inp.to(device) label = label.to(device) optimizer.zero_grad() with torch.no_grad() : outputs = model_ft(inp) _, preds = torch.max(outputs.data, 1) for i in range(len(preds)) : res_predict.append(int(preds[i])) submission['Category'] = res_predict<save_to_csv>
def accuracy(outputs, labels): _, preds = torch.max(outputs, dim=1) return torch.tensor(torch.sum(preds == labels ).item() / len(preds)) return preds
Titanic - Machine Learning from Disaster
13,587,551
submission.to_csv('submission.csv', index=False, encoding='utf-8' )<set_options>
input_size = train_ds.shape[1] -1 hidden_size1 = 128 hidden_size2 = 1024 hidden_size3 = 512 hidden_size4 = 128 num_classes = 2
Titanic - Machine Learning from Disaster
13,587,551
%matplotlib inline<load_from_csv>
model = Titanicnn(input_size, hidden_size1=hidden_size1, hidden_size2= hidden_size2,hidden_size3 = hidden_size3,hidden_size4 = hidden_size4,num_classes = num_classes) model
Titanic - Machine Learning from Disaster
13,587,551
df = pd.read_csv('.. /input/train.csv') df.head()<load_from_csv>
def convert_to_tensors(ds, valid_size = 0.30,train_set=True): scaler = StandardScaler() batch_size = ds.shape[0] if(train_set == True): targets_t = ds.Survived.values features_t = ds.drop(labels = ['Survived'],axis = 1 ).values features_t = scaler.fit_transform(features_t) targetsTrain = torch.from_numpy(targets_t ).type(torch.LongTensor) featuresTrain = torch.from_numpy(features_t) train_tensor = torch.utils.data.TensorDataset(featuresTrain,targetsTrain) valid_size = round(len(train_tensor)*(valid_size)) train_size = len(train_tensor)- valid_size ttrain_ds, tvalid_ds = random_split(train_tensor, [train_size, valid_size]) print("train_size:- ",len(ttrain_ds),"test_size:- ", len(tvalid_ds)) train_loader = DataLoader(ttrain_ds, batch_size = batch_size, shuffle=True, num_workers=4, pin_memory=True) valid_loader = DataLoader(tvalid_ds, batch_size = batch_size*2, num_workers=4, pin_memory=True) return train_loader,valid_loader else: featuresTest = ds.values features_test = scaler.fit_transform(featuresTest) featuresTensor = torch.from_numpy(features_test) print(len(featuresTensor)) test_loader = DataLoader(featuresTensor, batch_size*2, num_workers=4, pin_memory = True) return test_loader
Titanic - Machine Learning from Disaster
13,587,551
class ImageDataset(Dataset): def __init__(self, csv_file, root_dir, transform=None): self.data_frame = pd.read_csv(csv_file) self.root_dir = root_dir self.transform = transform def __len__(self): return len(self.data_frame) def __getitem__(self, idx): img_name = os.path.join(self.root_dir, self.data_frame['Id'][idx]) image = Image.open(img_name ).convert('RGB') label = np.array(self.data_frame['Category'][idx]) if self.transform: image = self.transform(image) sample =(image, label) return sample<define_variables>
train_loader,valid_loader = convert_to_tensors(train_ds )
Titanic - Machine Learning from Disaster
13,587,551
for i in range(len(trainset)) : sample = trainset[i] print(i, sample[0].size() , " | Label: ", sample[1]) if i == 9: break<train_model>
for data, labels in train_loader: outputs = model(data) print(labels.shape) loss = F.cross_entropy(outputs, labels) acc = accuracy(outputs,labels) print('Loss:', loss.item()) print('Initial Acc:',float(acc),"%") break print('outputs.shape : ', outputs.shape) print('Sample outputs : ', outputs[:5].data) print('Sample labels : ', labels[:5].data )
Titanic - Machine Learning from Disaster
13,587,551
train_on_gpu = torch.cuda.is_available() if not train_on_gpu: print('CUDA is not available.Training on CPU...') else: print('CUDA is available! Training on GPU...' )<choose_model_class>
torch.cuda.is_available()
Titanic - Machine Learning from Disaster
13,587,551
dir(models) model=models.resnext101_32x8d(pretrained=True) model<find_best_params>
def get_default_device() : if torch.cuda.is_available() : return torch.device('cuda') else: return torch.device('cpu' )
Titanic - Machine Learning from Disaster
13,587,551
for param in model.parameters() : param.requires_grad = False<choose_model_class>
def to_device(data,device): if isinstance(data,(list,tuple)) : return [to_device(x,device)for x in data] return data.to(device,non_blocking = True )
Titanic - Machine Learning from Disaster
13,587,551
model.fc = nn.Sequential( nn.Linear(2048, 512), nn.ReLU() , nn.Dropout(0.05), nn.Linear(512,67), ) model.cuda() model<choose_model_class>
for xd, yd in train_loader: print(xd.shape) xds = to_device(xd, device) print(xds.device) break
Titanic - Machine Learning from Disaster
13,587,551
criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.fc.parameters() , lr=3e-4 )<train_model>
class DeviceDataLoader() : def __init__(self, ds, device): self.ds = ds self.device = device def __iter__(self): for d in self.ds: yield to_device(d,self.device) def __len__(self): return len(self.ds )
Titanic - Machine Learning from Disaster
13,587,551
total_params = sum(p.numel() for p in model.parameters()) print(f'{total_params:,} total parameters.') total_trainable_params = sum( p.numel() for p in model.parameters() if p.requires_grad) print(f'{total_trainable_params:,} training parameters.' )<find_best_params>
train_loader = DeviceDataLoader(train_loader, device) valid_loader = DeviceDataLoader(valid_loader, device )
Titanic - Machine Learning from Disaster
13,587,551
epochs=4 iter=0 for epoch in range(epochs): for i,(images,labels)in enumerate(trainloader): images=Variable(images.cuda()) labels=Variable(labels.cuda()) optimizer.zero_grad() outputs=model(images) loss=criterion(outputs, labels) loss.backward() optimizer.step() iter+=1 if iter%500==0: correct=0 total=0 for images, labels in trainloader: images=Variable(images.cuda()) outputs=model(images) _, predicted=torch.max(outputs.data,1) total+=labels.size(0) correct+=(predicted.cpu() ==labels.cpu() ).sum() accuracy=(100*correct/total) print("Iteration {}, Loss {}, Accuracy {}".format(iter, loss.data.item() , accuracy))<find_best_params>
for xd, yd in valid_loader: print('xd.device:', xd.device) print('yd:', yd) break
Titanic - Machine Learning from Disaster
13,587,551
for param in model.layer4.parameters() : param.requires_grad = True<choose_model_class>
def evaluate(model, valid_loader): outputs = [model.validation_step(batch)for batch in valid_loader] return model.validation_epoch_end(outputs )
Titanic - Machine Learning from Disaster
13,587,551
criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters() , lr=2e-4 )<train_model>
def fit(epochs, lr, model, train_loader, valid_loader, opt_func = torch.optim.SGD): history = [] optimizer = opt_func(model.parameters() ,lr) for epoch in tqdm(range(epochs)) : for batch in train_loader: loss = model.training_step(batch) loss.backward() optimizer.step() optimizer.zero_grad() result = evaluate(model,valid_loader) model.epoch_end(epoch, result) history.append(result) return history
Titanic - Machine Learning from Disaster
13,587,551
total_params = sum(p.numel() for p in model.parameters()) print(f'{total_params:,} total parameters.') total_trainable_params = sum( p.numel() for p in model.parameters() if p.requires_grad) print(f'{total_trainable_params:,} training parameters.' )<find_best_params>
model = Titanicnn(input_size,hidden_size1=hidden_size1, hidden_size2= hidden_size2, hidden_size3= hidden_size3,hidden_size4 = hidden_size4,num_classes = num_classes) to_device(model, device )
Titanic - Machine Learning from Disaster
13,587,551
for param in model.layer3.parameters() : param.requires_grad = True<choose_model_class>
history = [evaluate(model,valid_loader)] history
Titanic - Machine Learning from Disaster
13,587,551
criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters() , lr=1e-4 )<train_model>
history += fit(20, 0.2, model, train_loader, valid_loader )
Titanic - Machine Learning from Disaster
13,587,551
total_params = sum(p.numel() for p in model.parameters()) print(f'{total_params:,} total parameters.') total_trainable_params = sum( p.numel() for p in model.parameters() if p.requires_grad) print(f'{total_trainable_params:,} training parameters.' )<create_dataframe>
history += fit(40, 0.001, model, train_loader, valid_loader )
Titanic - Machine Learning from Disaster
13,587,551
test_transforms = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor() , transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) testset = ImageDataset(csv_file = '.. /input/sample_sub.csv', root_dir = '.. /input/data/data/', transform=test_transforms) testloader = torch.utils.data.DataLoader(testset, batch_size=16, shuffle=False, num_workers=4 )<load_from_csv>
history += fit(100, 0.01, model, train_loader, valid_loader )
Titanic - Machine Learning from Disaster
13,587,551
submission = pd.read_csv('.. /input/sample_sub.csv') submission.head()<save_to_csv>
history += fit(300, 0.01, model, train_loader, valid_loader )
Titanic - Machine Learning from Disaster
13,587,551
submission.to_csv('submission.csv', index=False, encoding='utf-8') <set_options>
history += fit(30, 0.001, model, train_loader, valid_loader )
Titanic - Machine Learning from Disaster
13,587,551
%matplotlib inline <load_from_csv>
test_loader = convert_to_tensors(test_ds,train_set= False )
Titanic - Machine Learning from Disaster
13,587,551
df = pd.read_csv('.. /input/train.csv') df.head()<load_from_csv>
def predict(pred_x,model): for pred in pred_x: y = model(pred) _, y_ = torch.max(y, dim=1) return y_
Titanic - Machine Learning from Disaster
13,587,551
class ImageDataset(Dataset): def __init__(self, csv_file, root_dir, transform=None): self.data_frame = pd.read_csv(csv_file) self.root_dir = root_dir self.transform = transform def __len__(self): return len(self.data_frame) def __getitem__(self, idx): img_name = os.path.join(self.root_dir, self.data_frame['Id'][idx]) image = Image.open(img_name ).convert('RGB') label = np.array(self.data_frame['Category'][idx]) if self.transform: image = self.transform(image) sample =(image, label) return sample<create_dataframe>
test_loader_gpu = DeviceDataLoader(test_loader, device )
Titanic - Machine Learning from Disaster
13,587,551
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) transform = transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip() , transforms.ToTensor() , normalize ]) trainset = ImageDataset(csv_file = '.. /input/train.csv', root_dir = '.. /input/data/data/', transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=16, shuffle=True, num_workers=0 )<load_from_csv>
test_preds = predict(test_loader_gpu,model )
Titanic - Machine Learning from Disaster
13,587,551
<define_variables>
passId = test[['PassengerId']].values
Titanic - Machine Learning from Disaster
13,587,551
for i in range(len(trainset)) : sample = trainset[i] print(i, sample[0].size() , " | Label: ", sample[1]) if i == 9: break<train_model>
final_pred = final_pred.data.numpy()
Titanic - Machine Learning from Disaster
13,587,551
train_on_gpu = torch.cuda.is_available() if not train_on_gpu: print('CUDA is not available.Training on CPU...') else: print('CUDA is available! Training on GPU...' )<choose_model_class>
sub = {'PassengerId':passId.ravel() , 'Survived':final_pred}
Titanic - Machine Learning from Disaster
13,587,551
<choose_model_class>
submission_csv = pd.DataFrame(sub )
Titanic - Machine Learning from Disaster
13,587,551
criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters() ,lr=0.00007) if train_on_gpu: criterion = criterion.cuda() <find_best_params>
submission_csv.to_csv('final_sub_titanic_pth.csv',index=False )
Titanic - Machine Learning from Disaster
13,587,551
epochs = 14 iter=0 for epoch in range(epochs): for i,(images, labels)in enumerate(trainloader): if train_on_gpu: images = Variable(images.cuda()) labels = Variable(labels.cuda()) else: images = Variable(images) labels = Variable(labels) optimizer.zero_grad() outputs = model(images) loss = criterion(outputs, labels) loss.backward() optimizer.step() iter+=1 if iter%100 == 0: print(iter, end="...") print(" ") print("<train_model>
x = pd.read_csv("./final_sub_titanic_pth.csv" )
Titanic - Machine Learning from Disaster
13,587,551
model.eval() <load_from_csv>
filename = "./titanic_pytorch.pth"
Titanic - Machine Learning from Disaster
13,587,551
submission = pd.read_csv('.. /input/sample_sub.csv') submission.head()<load_from_csv>
torch.save(model.state_dict() ,filename )
Titanic - Machine Learning from Disaster
13,587,551
<find_best_params><EOS>
loaded_model = Titanicnn(input_size,hidden_size1=hidden_size1, hidden_size2= hidden_size2, hidden_size3= hidden_size3,hidden_size4 = hidden_size4,num_classes = num_classes) to_device(model, device) loaded_model.load_state_dict(torch.load(filename)) loaded_model.eval()
Titanic - Machine Learning from Disaster
13,261,128
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<prepare_output>
sns.set() %matplotlib inline
Titanic - Machine Learning from Disaster
13,261,128
predictions = [] for data, target in testloader: if train_on_gpu: data, target = data.cuda() , target.cuda() output = model(data) _, pred = torch.max(output, 1) for i in range(len(pred)) : predictions.append(int(pred[i])) submission['Category'] = predictions <save_to_csv>
train_data = pd.read_csv('train_prep.csv') test_data = pd.read_csv('test_prep.csv') train_data.columns
Titanic - Machine Learning from Disaster
13,261,128
submission.to_csv('submission.csv', index=False, encoding='utf-8') <train_model>
train_data.set_index('PassengerId',inplace = True) test_data.set_index('PassengerId',inplace = True )
Titanic - Machine Learning from Disaster
13,261,128
X_train = np.load(input_dir + 'X_train.npy') X_test = np.load(input_dir + 'X_test.npy') y_train = np.load(input_dir + 'y_train.npy') print("X_train shape :",X_train.shape) print("X_test shape :",X_test.shape) print("y_train shape :",y_train.shape )<save_to_csv>
all_data = pd.concat([train_data,test_data] )
Titanic - Machine Learning from Disaster
13,261,128
pred = pd.DataFrame() pred['Id'] = np.arange(X_test.shape[0]) pred['Prediction'] = np.zeros(X_test.shape[0],dtype = np.int) pred.to_csv('pred.csv',index=False) pred.head()<set_options>
label = LabelEncoder()
Titanic - Machine Learning from Disaster
13,261,128
warnings.filterwarnings('ignore') np.seed = 1324 sns.set(style="white") %matplotlib inline <import_modules>
all_data['FareBin_Code'] = label.fit_transform(all_data['FareBin']) all_data['AgeBin_Code'] = label.fit_transform(all_data['AgeBin'] )
Titanic - Machine Learning from Disaster
13,261,128
import cv2 import sys import os import keras<import_modules>
all_data.drop(columns=['AgeBin'],inplace=True) all_data.drop(columns=['FareBin'],inplace=True )
Titanic - Machine Learning from Disaster
13,261,128
from keras.models import Sequential from keras import layers from keras.layers import Conv2D from keras.layers import MaxPooling2D from keras.layers import Flatten from keras.layers import Dense from keras.layers import SpatialDropout2D from keras.layers import Dropout from keras.layers import Activation, BatchNormalization from keras.optimizers import SGD from keras import regularizers from keras.callbacks import TerminateOnNaN, ModelCheckpoint, LearningRateScheduler from keras.utils.np_utils import to_categorical import tensorflow as tf from PIL import Image, ImageEnhance from keras.utils import np_utils from keras.preprocessing.image import ImageDataGenerator <save_to_csv>
for i in range(1,len(all_data)+1): all_data.loc[i,'Srate']=0.5
Titanic - Machine Learning from Disaster
13,261,128
def submit_file(y_pred, filename): submit = pd.Series(y_pred.reshape(y_pred.shape[0]), name='target' ).replace({0: 'Bird', 1: 'Airplane'}) submit.to_csv(filename +'.csv', index_label='id', header=True) print('file created' )<categorify>
for i in range(1,len(all_data)+1): number = all_data.loc[i,'TicketNumber'] group = all_data.loc[i,'TicketGroup'] if group>1: max = all_data.drop(i)[all_data.drop(i)['TicketNumber']==number]['Survived'].max() if not pd.isna(max): all_data.loc[i,'Srate']=all_data.drop(i)[all_data.drop(i)['TicketNumber']==number]['Survived'].max()
Titanic - Machine Learning from Disaster
13,261,128
def flip_image(array, show=False): image_obj = Image.fromarray(array.astype('uint8')) rotated_image = image_obj.transpose(Image.FLIP_LEFT_RIGHT) flipped_array = np.array(rotated_image, dtype=np.int) if show: plt.imshow(flipped_array) return flipped_array def contrast_image(array, c_level=4.0, show=False): image_obj = Image.fromarray(array.astype('uint8')) enhancer = ImageEnhance.Contrast(image_obj) enhanced_im = enhancer.enhance(c_level) contrasted_array = np.array(enhanced_im, dtype=np.int) if show: plt.imshow(contrasted_array) return contrasted_array<load_from_csv>
train_data = all_data[:891] test_data = all_data[891:]
Titanic - Machine Learning from Disaster
13,261,128
train_x = pd.read_csv('.. /input/train_x.csv', index_col=0, header=None) train_y = pd.read_csv('.. /input/train_y.csv', index_col=0) test_x = pd.read_csv('.. /input/test_x.csv', index_col=0, header=None )<categorify>
numerical = ["Pclass","Age","SibSp","Parch","Fare","TicketGroup","FamilySize","FareBin_Code","AgeBin_Code","Srate","TicketNumber"] categorical = ["Embarked","Title","Sex"] all_features = numerical + categorical
Titanic - Machine Learning from Disaster
13,261,128
train_y.replace(['Bird', 'Airplane'], [0,1], inplace=True) X = train_x.values.reshape(7200, 32, 32, 3) X_test = test_x.values.reshape(4800, 32, 32, 3) y_train = train_y.values <compute_test_metric>
from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from sklearn.model_selection import cross_val_score from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import OrdinalEncoder from sklearn.model_selection import GridSearchCV from sklearn.ensemble import VotingClassifier
Titanic - Machine Learning from Disaster
13,261,128
viz_img(X[100], y_train[100] )<compute_test_metric>
y = train_data['Survived'] X = train_data.drop(columns = ['Survived'] )
Titanic - Machine Learning from Disaster
13,261,128
viz_img(X[452], y_train[452] )<split>
log_reg = LogisticRegression(C=1.9 )
Titanic - Machine Learning from Disaster
13,261,128
X = X.astype('float32') X_test = X_test.astype('float32') x_train, x_test, y_train, y_test = train_test_split(X, y_train, test_size=0.3, random_state=12000 )<compute_test_metric>
num_att_LR = ["Pclass","Age","TicketNumber","SibSp","Parch","Srate","Fare"] cat_att1_LR = ["Sex"] cat_att2_LR = ["Title"] drop_att_LR = list(set(all_features)- set(num_att_LR+cat_att1_LR+cat_att2_LR)) full_pipline_LR = ColumnTransformer([ ("toDrop",'drop',drop_att_LR), ("numerical",StandardScaler() ,num_att_LR), ("categorical1",OrdinalEncoder() ,cat_att1_LR), ("categorical2",OneHotEncoder() ,cat_att2_LR) ] )
Titanic - Machine Learning from Disaster
13,261,128
mean = np.mean(x_train, axis=(0,1,2,3)) std = np.std(x_train,axis=(0,1,2,3)) x_train =(x_train-mean)/(std + 1e-7) x_test =(x_test-mean)/(std + 1e-7) X_test =(X_test-mean)/(std + 1e-7 )<choose_model_class>
X_prep = full_pipline_LR.fit_transform(X) cross_val_score(log_reg,X_prep,y,cv=10,scoring='roc_auc' ).mean()
Titanic - Machine Learning from Disaster
13,261,128
weight_decay = 1e-4 model = Sequential() model.add(Conv2D(32,(3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay), input_shape=x_train.shape[1:])) model.add(Activation('elu')) model.add(BatchNormalization()) model.add(Conv2D(32,(3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(Activation('elu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.2)) model.add(Conv2D(64,(3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(Activation('elu')) model.add(BatchNormalization()) model.add(Conv2D(64,(3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(Activation('elu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.3)) model.add(Conv2D(128,(3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(Activation('elu')) model.add(BatchNormalization()) model.add(Conv2D(128,(3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay))) model.add(Activation('elu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.4)) model.add(Flatten()) model.add(Dense(1, activation='sigmoid')) model.summary()<train_model>
cat_bayes = CategoricalNB(alpha=0.05 )
Titanic - Machine Learning from Disaster
13,261,128
datagen = ImageDataGenerator( rotation_range=15, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, ) datagen.fit(x_train )<train_model>
num_att_NB = ["FareBin_Code","AgeBin_Code"] cat_att_NB = ["Title"] drop_att_NB = list(set(all_features)- set(num_att_NB+cat_att_NB)) full_pipline_NB = ColumnTransformer([ ("cat",OrdinalEncoder() ,cat_att_NB), ("num",'passthrough',num_att_NB), ("drop",'drop',drop_att_NB) ] )
Titanic - Machine Learning from Disaster
13,261,128
batch_size = 64 opt_rms = keras.optimizers.rmsprop(lr=0.001,decay=1e-6) model.compile(loss='binary_crossentropy', optimizer=opt_rms, metrics=['accuracy']) model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size), steps_per_epoch=x_train.shape[0] // batch_size, epochs=125, verbose=1,validation_data=(x_test,y_test),callbacks=[LearningRateScheduler(lr_schedule)] )<predict_on_test>
X_prep = full_pipline_NB.fit_transform(X) cross_val_score(cat_bayes,X_prep,y,cv=10,scoring='roc_auc' ).mean()
Titanic - Machine Learning from Disaster
13,261,128
y_pred = model.predict_classes(X_test )<set_options>
from sklearn import tree
Titanic - Machine Learning from Disaster
13,261,128
warnings.filterwarnings('ignore') sns.set() %config InlineBackend.figure_format = 'svg' print(os.listdir('.. /input/'))<load_from_csv>
num_att_DT =["Pclass","FamilySize","Srate","AgeBin_Code","FareBin_Code"] cat_att_DT = ["Sex"] drop_att_DT = list(set(all_features)- set(num_att_DT + cat_att_DT)) full_pipline_DT = ColumnTransformer([ ("droping",'drop',drop_att_DT), ("num",'passthrough',num_att_DT), ("cat",OrdinalEncoder() ,cat_att_DT), ] )
Titanic - Machine Learning from Disaster
13,261,128
df = pd.read_csv('.. /input/train.csv') df.head()<load_from_csv>
X_prep = full_pipline_DT.fit_transform(X )
Titanic - Machine Learning from Disaster
13,261,128
test_df = pd.read_csv('.. /input/test.csv' )<feature_engineering>
criterion = ["gini", "entropy"] max_depth = list(range(1,12,1)) max_features = [1,2,3,4,5,6] splitter = ["best","random"] min_samples_split = [2,3] hyperparams = {'criterion': criterion, 'max_depth': max_depth, 'max_features': max_features, 'splitter': splitter, 'min_samples_split': min_samples_split} gd = GridSearchCV(estimator = tree.DecisionTreeClassifier(random_state=1), param_grid = hyperparams, verbose=0, cv=10, scoring = "roc_auc" )
Titanic - Machine Learning from Disaster
13,261,128
df['char_count'] = df['review'].apply(len) df['word_count'] = df['review'].apply(lambda x: len(x.split())) df['word_density'] = df['char_count'] /(df['word_count']+1) df['punctuation_count'] = df['review'].apply(lambda x: len("".join(_ for _ in x if _ in string.punctuation)) )<split>
gd.fit(X_prep, y) print(gd.best_score_) print(gd.best_estimator_ )
Titanic - Machine Learning from Disaster
13,261,128
X_train, X_val, y_train, y_val = train_test_split(df['review'], df['sentiment'], test_size=0.3, random_state=17 )<feature_engineering>
DT_clf = tree.DecisionTreeClassifier(criterion='entropy', max_depth=5, max_features=6, random_state=1, splitter='random' )
Titanic - Machine Learning from Disaster
13,261,128
tfidf_vect = TfidfVectorizer(analyzer='word', token_pattern=r'\w{1,}', max_features=5000) tfidf_vect.fit(df['review']) xtrain_tfidf = tfidf_vect.transform(X_train) xvalid_tfidf = tfidf_vect.transform(X_val) tfidf_vect_ngram = TfidfVectorizer(analyzer='word', token_pattern=r'\w{1,}', ngram_range=(2,3), max_features=5000) tfidf_vect_ngram.fit(df['review']) xtrain_tfidf_ngram = tfidf_vect_ngram.transform(X_train) xvalid_tfidf_ngram = tfidf_vect_ngram.transform(X_val) tfidf_vect_ngram_chars = TfidfVectorizer(analyzer='char', token_pattern=r'\w{1,}', ngram_range=(2,3), max_features=5000) tfidf_vect_ngram_chars.fit(df['review']) xtrain_tfidf_ngram_chars = tfidf_vect_ngram_chars.transform(X_train) xvalid_tfidf_ngram_chars = tfidf_vect_ngram_chars.transform(X_val) xtest_tfidf = tfidf_vect.transform(test_df['review']) xtest_tfidf_ngram = tfidf_vect_ngram.transform(test_df['review']) xtest_tfidf_ngram_chars = tfidf_vect_ngram_chars.transform(test_df['review'] )<feature_engineering>
svm_clf = LinearSVC(C=0.1, loss = 'hinge', max_iter=100000, random_state=1, )
Titanic - Machine Learning from Disaster
13,261,128
count_vect = CountVectorizer(analyzer='word', token_pattern=r'\w{1,}') count_vect.fit(df['review']) xtrain_count = count_vect.transform(X_train) xvalid_count = count_vect.transform(X_val) xtest_count = count_vect.transform(test_df['review'] )<choose_model_class>
num_att_SVM = numerical cat_att1_SVM = ["Sex"] cat_att2_SVM = ["Title"] drop_att_SVM = list(set(all_features)- set(num_att_SVM + cat_att1_SVM+cat_att2_SVM)) full_pipline_SVM = ColumnTransformer([ ("drop",'drop',drop_att_SVM), ("cat",OrdinalEncoder() ,cat_att1_SVM), ("num",StandardScaler() ,num_att_SVM), ("cat2",OneHotEncoder() ,cat_att2_SVM), ] )
Titanic - Machine Learning from Disaster
13,261,128
model1 = linear_model.LogisticRegression() model1.fit(xtrain_count, y_train) accuracy=model1.score(xvalid_count, y_val) print('Accuracy Count LR:', accuracy) test_pred1=model1.predict(xtest_count) model2 = linear_model.LogisticRegression() model2.fit(xtrain_tfidf, y_train) accuracy=model2.score(xvalid_tfidf, y_val) print('Accuracy TFIDF LR:', accuracy) test_pred2=model2.predict(xtest_tfidf) model3 = linear_model.LogisticRegression() model3.fit(xtrain_tfidf_ngram, y_train) accuracy = model3.score(xvalid_tfidf_ngram, y_val) print('Accuracy TFIDF NGRAM LR:', accuracy) test_pred3 = model3.predict(xtest_tfidf_ngram )<concatenate>
X_prep = full_pipline_SVM.fit_transform(X) cross_val_score(svm_clf,X_prep,y,cv=10,scoring='roc_auc' ).mean()
Titanic - Machine Learning from Disaster
13,261,128
final_pred = np.array([]) for i in range(0,len(test_df['review'])) : final_pred = np.append(final_pred, np.argmax(np.bincount([test_pred1[i], test_pred2[i], test_pred3[i]])) )<create_dataframe>
LR_pipe = Pipeline([('logReg pipline', full_pipline_LR), ('log_reg', log_reg)]) cross_val_score(LR_pipe,X,y,cv=10,scoring='accuracy' ).mean()
Titanic - Machine Learning from Disaster
13,261,128
sub_df = pd.DataFrame() sub_df['Id'] = test_df['Id'] sub_df['sentiment'] = [int(i)for i in final_pred]<save_to_csv>
NB_pipe = Pipeline([('NaiveBayes pipline', full_pipline_NB), ('cat_bayes', cat_bayes)]) cross_val_score(NB_pipe,X,y,cv=10,scoring='accuracy' ).mean()
Titanic - Machine Learning from Disaster
13,261,128
sub_df.to_csv('my_submission.csv', index=False )<import_modules>
DT_pipe = Pipeline([('DecisionTree pipline', full_pipline_DT), ('Decision Tree', DT_clf)]) cross_val_score(DT_pipe,X,y,cv=10,scoring='accuracy' ).mean()
Titanic - Machine Learning from Disaster
13,261,128
random_seed = 1213 np.random.seed(random_seed) <load_from_csv>
SVM_pipe = Pipeline([('SVM pipline', full_pipline_SVM), ('SVM', svm_clf)]) cross_val_score(SVM_pipe,X,y,cv=10,scoring='accuracy' ).mean()
Titanic - Machine Learning from Disaster
13,261,128
train = pd.read_csv('.. /input/ciproject/persianMNIST_train.csv') test = pd.read_csv('.. /input/ciproject/persianMNIST_test.csv') Y = train['0'] X = train.drop(labels="0", axis=1) X = X.values.reshape(-1, 28, 28, 1) test = test.values.reshape(-1, 28, 28, 1) X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size=0.10, random_state=random_seed) print(X.shape, test.shape )<choose_model_class>
voting_clf_4 = VotingClassifier( estimators = [('Logistic Regresion',LR_pipe),('SVM',SVM_pipe),('Naive Bayes',NB_pipe),('Decision Tree',DT_pipe)], voting = 'hard' ) cross_val_score(voting_clf_4,X,y,cv=10,scoring='accuracy' ).mean()
Titanic - Machine Learning from Disaster
13,261,128
learning_rate_reduction = ReduceLROnPlateau(monitor = 'val_acc', patience = 3, verbose = 1, factor = 0.5, min_lr = 0.0001) es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=15) def new_model(hidden=512, learning_rate=0.00128): INPUT = Input(( 28, 28, 1)) inputs = Conv2D(64,(7, 7), activation='relu', padding='same' )(INPUT) inputs = MaxPool2D(pool_size=(5,5), strides=(2,2))(inputs) inputs = BatchNormalization()(inputs) inputs = Activation('relu' )(inputs) inputs = Dropout(0.5 )(inputs) tower_1 = Conv2D(64,(1, 1), activation='relu', padding='same' )(inputs) tower_1 = Conv2D(64,(2, 2), activation='relu', padding='same' )(tower_1) tower_1 = Conv2D(64,(3, 3), activation='relu', padding='same' )(tower_1) tower_1 = MaxPool2D(pool_size=(3,3), strides=(2,2))(tower_1) tower_1 = BatchNormalization()(tower_1) tower_2 = Conv2D(64,(2, 2), activation='relu', padding='same' )(inputs) tower_2 = Conv2D(64,(3, 3), activation='relu', padding='same' )(tower_2) tower_2 = Conv2D(64,(5, 5), activation='relu', padding='same' )(tower_2) tower_2 = MaxPool2D(pool_size=(3,3), strides=(2,2))(tower_2) tower_2 = BatchNormalization()(tower_2) tower_3 = Conv2D(64,(3, 3), activation='relu', padding='same' )(inputs) tower_3 = Conv2D(64,(3, 3), activation='relu', padding='same' )(tower_3) tower_3 = Conv2D(64,(5, 5), activation='relu', padding='same' )(tower_3) tower_3 = MaxPool2D(pool_size=(3,3), strides=(2,2))(tower_3) tower_3 = BatchNormalization()(tower_3) x = Add()([tower_1, tower_2, tower_3]) x = Activation('relu' )(x) x = Conv2D(128,(5, 5), activation='relu', padding='same' )(x) x = MaxPool2D(pool_size=(5,5), strides=(3,3))(x) x = BatchNormalization()(x) x = Activation('relu' )(x) x = Flatten()(x) x = Dense(hidden, activation='relu' )(x) x = Dropout(0.5 )(x) preds = Dense(10, activation='softmax', name='preds' )(x) model = Model(inputs=INPUT, outputs=preds) optimizer = Adam(lr=learning_rate) model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['acc']) return model model = new_model()<train_model>
voting_clf_SVM_DT = VotingClassifier( estimators = [('SVM',SVM_pipe),('Decision Tree',DT_pipe)], voting = 'hard' ) cross_val_score(voting_clf_SVM_DT,X,y,cv=10,scoring='accuracy' ).mean()
Titanic - Machine Learning from Disaster
13,261,128
datagen = ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=5, zoom_range=0.05, shear_range=0.02, width_shift_range=0.05, height_shift_range=0.05, horizontal_flip=False, vertical_flip=False )<train_model>
voting_clf_NB_DT = VotingClassifier( estimators = [('Naive Bayes',NB_pipe),('Decision Tree',DT_pipe)], voting = 'hard' ) cross_val_score(voting_clf_NB_DT,X,y,cv=10,scoring='accuracy' ).mean()
Titanic - Machine Learning from Disaster
13,261,128
epochs = 200 batch_size = 128 print("Learning Properties: Epoch:%i \t Batch Size:%i" %(epochs, batch_size)) predict_accumulator = np.zeros(model.predict(test ).shape) accumulated_history = [] for i in range(1, 6): X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size=0.20, random_state=random_seed*i) model = new_model(100, 0.008/i) datagen.fit(X_train) history = model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size), epochs=epochs, validation_data=(X_val, Y_val), verbose=3, steps_per_epoch=X_train.shape[0]//batch_size, callbacks=[learning_rate_reduction, es], workers=4) loss, acc = model.evaluate(X, Y) if acc > 0.75: predict_accumulator += model.predict(test)*acc accumulated_history.append(history) print("Current Predictions on fold number %i", i) print(*np.argmax(predict_accumulator, axis=1), sep='\t' )<save_to_csv>
X_test = test_data.drop(columns=['Survived'] )
Titanic - Machine Learning from Disaster
13,261,128
print("Completed Training.") results = np.argmax(predict_accumulator, axis=1) results = pd.Series(results, name="result") print("Saving prediction to output...") submission = pd.concat([pd.Series(range(0, test.shape[0]), name="ids"), results], axis=1) submission.to_csv('prediction.csv', index=False )<import_modules>
voting_clf_NB_DT.fit(X,y )
Titanic - Machine Learning from Disaster
13,261,128
import numpy as np import pandas as pd from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.utils import np_utils from skimage.transform import rescale from sklearn import preprocessing from keras.models import load_model from keras.layers import BatchNormalization from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import LearningRateScheduler from sklearn.model_selection import train_test_split from tqdm import tqdm<define_variables>
predictions = voting_clf_NB_DT.predict(X_test) predictions = pd.to_numeric(predictions, downcast='integer') output = pd.DataFrame({'PassengerId': list(X_test.index), 'Survived': predictions} )
Titanic - Machine Learning from Disaster
13,261,128
img_rows = 40 img_cols = 40<load_from_csv>
output.to_csv('Submission_NB_DT.csv', index=False )
Titanic - Machine Learning from Disaster
13,482,615
<categorify>
train=pd.read_csv(".. /input/titanic/train.csv") test=pd.read_csv(".. /input/titanic/test.csv" )
Titanic - Machine Learning from Disaster
13,482,615
encoder = preprocessing.LabelEncoder() encoder.fit(np.load(".. /input/tmpfory/trainY.npy"))<load_pretrained>
train.isnull().sum()
Titanic - Machine Learning from Disaster
13,482,615
T = np.load(".. /input/inputtestdata40x40/testT.npy" )<choose_model_class>
test.isnull().sum()
Titanic - Machine Learning from Disaster
13,482,615
model = Sequential() model.add(Conv2D(64, kernel_size = 3, activation='relu', input_shape =(img_rows, img_cols, 1))) model.add(BatchNormalization()) model.add(Conv2D(64, kernel_size = 3, activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(64, kernel_size = 5, strides=2, padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Conv2D(128, kernel_size = 3, activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(128, kernel_size = 3, activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(128, kernel_size = 5, strides=2, padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Conv2D(256, kernel_size = 4, activation='relu')) model.add(BatchNormalization()) model.add(Flatten()) model.add(Dropout(0.4)) model.add(Dense(1000, activation='softmax')) model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) print(model.summary() )<create_dataframe>
train.drop(columns=['PassengerId','Name','Ticket','Cabin'],inplace=True) test.drop(columns=['PassengerId','Name','Ticket','Cabin'],inplace=True)
Titanic - Machine Learning from Disaster
13,482,615
datagen = ImageDataGenerator( rotation_range=8, zoom_range = 0.10, width_shift_range=0.1, height_shift_range=0.1 )<choose_model_class>
train.drop(columns=['SibSp','Parch'],inplace=True) test.drop(columns=['SibSp','Parch'],inplace=True)
Titanic - Machine Learning from Disaster
13,482,615
annealer = LearningRateScheduler(lambda x: 1e-3 * 0.95 ** x )<categorify>
a=train.groupby('Pclass' ).median() ['Age'].iloc[0] b=train.groupby('Pclass' ).median() ['Age'].iloc[1] c=train.groupby('Pclass' ).median() ['Age'].iloc[2] def fillAge_train(x): age=x[0] pclass=x[1] if pd.isnull(age): if pclass==1: return a elif pclass==2: return b else: return c else: return age a_test=test.groupby('Pclass' ).median() ['Age'].iloc[0] b_test=test.groupby('Pclass' ).median() ['Age'].iloc[1] c_test=test.groupby('Pclass' ).median() ['Age'].iloc[2] def fillAge_test(x): age=x[0] pclass=x[1] if pd.isnull(age): if pclass==1: return a_test elif pclass==2: return b_test else: return c_test else: return age train['Age']=train[['Age','Pclass']].apply(fillAge_train,axis=1) test['Age']=test[['Age','Pclass']].apply(fillAge_test,axis=1)
Titanic - Machine Learning from Disaster
13,482,615
X_train2 = np.load(".. /input/rescalinginputdata/X_train2.npy") X_val2 = np.load(".. /input/rescalinginputdata/X_val2.npy") Y_train2 = np_utils.to_categorical(encoder.transform(np.load(".. /input/rescalinginputdata/Y_train2.npy"))) Y_val2 = np_utils.to_categorical(encoder.transform(np.load(".. /input/rescalinginputdata/Y_val2.npy")) )<train_model>
a=test.groupby('Pclass' ).median() ['Fare'].iloc[0] b=test.groupby('Pclass' ).median() ['Fare'].iloc[1] c=test.groupby('Pclass' ).median() ['Fare'].iloc[2] def fillFare(x): fare=x[0] pclass=x[1] if pd.isnull(fare): if pclass==1: return a elif pclass==2: return b else: return c else: return fare test['Fare']=test[['Fare','Pclass']].apply(fillFare,axis=1 )
Titanic - Machine Learning from Disaster
13,482,615
history = model.fit_generator(datagen.flow(X_train2,Y_train2, batch_size=64), epochs = 18, steps_per_epoch = X_train2.shape[0]//64, validation_data =(X_val2,Y_val2), callbacks=[annealer], verbose=0, initial_epoch=0 )<save_to_csv>
train['Embarked'].fillna(train['Embarked'].mode() [0], inplace=True )
Titanic - Machine Learning from Disaster
13,482,615
history = history.history file = open('info.csv', 'w') file.write(str(history)) file.close()<predict_on_test>
train=pd.get_dummies(train,columns=['Sex','Embarked','TravelAlone'],drop_first=True) test=pd.get_dummies(test,columns=['Sex','Embarked','TravelAlone'],drop_first=True)
Titanic - Machine Learning from Disaster
13,482,615
predictions = model.predict(T) predictions = np.argmax(predictions, axis=1) res = encoder.inverse_transform(predictions )<save_to_csv>
train['AgeCateg'] = pd.cut(train['Age'], 5) train[['AgeCateg', 'Survived']].groupby(['AgeCateg'], as_index=False ).mean().sort_values(by='AgeCateg', ascending=True) train['FareCateg'] = pd.qcut(train['Fare'], 4) train[['FareCateg', 'Survived']].groupby(['FareCateg'], as_index=False ).mean().sort_values(by='FareCateg', ascending=True) def encodeAgeFare(train): train.loc[train['Age'] <= 16, 'Age'] = 0 train.loc[(train['Age'] > 16)&(train['Age'] <= 32), 'Age'] = 1 train.loc[(train['Age'] > 32)&(train['Age'] <= 48), 'Age'] = 2 train.loc[(train['Age'] > 48)&(train['Age'] <= 64), 'Age'] = 3 train.loc[(train['Age'] > 48)&(train['Age'] <= 80), 'Age'] = 4 train.loc[train['Fare'] <= 7.91, 'Fare'] = 0 train.loc[(train['Fare'] > 7.91)&(train['Fare'] <= 14.454), 'Fare'] = 1 train.loc[(train['Fare'] > 14.454)&(train['Fare'] <= 31.0), 'Fare'] = 2 train.loc[(train['Fare'] > 31.0)&(train['Fare'] <= 512.329), 'Fare'] = 3 encodeAgeFare(train) encodeAgeFare(test) train.drop(columns=['AgeCateg','FareCateg'],inplace=True)
Titanic - Machine Learning from Disaster
13,482,615
file = open('ansEpochs.csv', 'w') file.write('Id,Category ') for i in range(res.size): file.write('' + str(i + 1)+ ',' + str(res[i])+ ' ') file.close()<categorify>
X = train.drop('Survived',axis=1) y = train['Survived']
Titanic - Machine Learning from Disaster
13,482,615
def Education_to_numeric(bp): if bp == 'Bachelors ': return 4 if bp == 'Partial College': return 3 if bp == 'High School': return 2 if bp == 'Graduate Degree': return 1 if bp == 'Partial High School': return 0 edu=train_data.Education edut=test_data.Education edupp = edu.apply(Education_to_numeric) edud = edut.apply(Education_to_numeric) train_data.Education=edupp test_data.Education=edud def occupation_to_numeric(b): if b == 'Clerical': return 1 if b == 'Professional': return 1 if b == 'Manual': return 0 if b == 'Skilled Manual': return 0 if b == 'Management': return 1 p=train_data.Occupation pt=test_data.Occupation d = p.apply(occupation_to_numeric) dt = pt.apply(occupation_to_numeric) train_data.Occupation=d test_data.Occupation=dt def COUNTRY_to_numeric(d): if d == 'Canada': return 3 if d == 'France': return 4 if d == 'Australia': return 1 if d == 'United Kingdom': return 2 if d == 'United States': return 5 if d == 'Germany': return 6 pon=train_data.CountryRegionName ptu=test_data.CountryRegionName qa = pon.apply(COUNTRY_to_numeric) qaw= ptu.apply(COUNTRY_to_numeric) train_data.CountryRegionName=qa test_data.CountryRegionName=qaw def marital_to_numeric(ip): if ip == 'M': return 0 if ip == 'S': return 1 pl= train_data.MaritalStatus plt=test_data.MaritalStatus ml = pl.apply(marital_to_numeric) mlt = plt.apply(marital_to_numeric) train_data.MaritalStatus=ml test_data.MaritalStatus=mlt def region_to_numeric(a): if a == 'M': return 2 if a == 'F': return 1 z=train_data.Gender zt=test_data.Gender c = z.apply(region_to_numeric) ct = zt.apply(region_to_numeric) train_data.Gender=c test_data.Gender=ct yy=train_data.BirthDate now=pd.Timestamp(DT.datetime.now()) nuu = pd.to_datetime(yy) nuu=(now-nuu)/365 ny=nuu.dt.days train_data.BirthDate=ny yyt=test_data.BirthDate nowt=pd.Timestamp(DT.datetime.now()) nuut = pd.to_datetime(yyt) nuut=(nowt-nuut)/365 nyt=nuut.dt.days test_data.BirthDate=nyt train_data.PostalCode=train_data.TotalChildren.subtract(train_data.NumberChildrenAtHome, fill_value=0) test_data.PostalCode=test_data.TotalChildren.subtract(test_data.NumberChildrenAtHome, fill_value=0) gh=train_data.YearlyIncome-train_data.TotalChildren def age_to_numeric(qa): if qa <=37: return 3 if 51>qa >=44: return 4 if 58>qa >=51: return 1 if 65>qa >=58: return 2 if qa >=65: return 5 train_data.BirthDate=100/train_data.BirthDate**3.9 test_data.BirthDate=100/test_data.BirthDate**3.9 def ave_to_numeric(qa): if qa <40: return 1 if 50>qa >=40: return 2 if 59>qa >=50: return 3 if 65>qa >=59: return 4 if qa >=65: return 5 train_data.AveMonthSpend=(np.log(train_data.AveMonthSpend)) *12 test_data.AveMonthSpend=(np.log(test_data.AveMonthSpend)) *12 train_data.Suffix=(train_data.YearlyIncome**3.3)-(train_data.AveMonthSpend**3) test_data.Suffix=(test_data.YearlyIncome**3.3)-(test_data.AveMonthSpend**3) train_data.YearlyIncome=(train_data.YearlyIncome**3.3) test_data.YearlyIncome=(test_data.YearlyIncome**3.3) train_data.TotalChildren=(train_data.TotalChildren*2) test_data.TotalChildren=(test_data.TotalChildren*2) train_data.City=train_data.AveMonthSpend*30 test_data.City=test_data.AveMonthSpend*30 train_data.PostalCode=train_data.TotalChildren.subtract(train_data.NumberChildrenAtHome, fill_value=0) test_data.PostalCode=test_data.TotalChildren.subtract(test_data.NumberChildrenAtHome, fill_value=0) train_data.PostalCode=1/train_data.CustomerID**1/2 test_data.PostalCode=1/test_data.CustomerID**1/2 y = train_data.BikeBuyer features = ['TotalChildren','YearlyIncome', 'HomeOwnerFlag','NumberChildrenAtHome','Gender','MaritalStatus','BirthDate','CountryRegionName', 'Occupation','PostalCode','Suffix','Education','PostalCode' ] train_data.AveMonthSpend.plot.hist(color='blue',bins=50) plt.show() X= train_data[features] textX=test_data[features] corr= train_data.corr() f, ax = plt.subplots(figsize=(10, 5)) sns.heatmap(corr,cmap='coolwarm',linewidths=2.0, annot=True) my_imputer = SimpleImputer() X = my_imputer.fit_transform(X) textX = my_imputer.fit_transform(textX) <train_model>
np.random.seed(0) X_train,X_test,y_train,y_test = train_test_split(X,y, test_size=0.3 )
Titanic - Machine Learning from Disaster
13,482,615
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1) xgb = XGBClassifier(n_estimators=380, learning_rate=0.049, random_state=1,min_child_weight=6) training_start = time.perf_counter() xgb.fit(train_X, train_y) training_end = time.perf_counter() prediction_start = time.perf_counter() preds = xgb.predict(val_X) prediction_end = time.perf_counter() acc_xgb =(preds == val_y ).sum().astype(float)/ len(preds)*100 xgb_train_time = training_end-training_start xgb_prediction_time = prediction_end-prediction_start print("XGBoost's prediction accuracy is: %3.2f" %(acc_xgb)) print("Time consumed for training: %4.3f" %(xgb_train_time)) print("Time consumed for prediction: %6.5f seconds" %(xgb_prediction_time)) pre=xgb.predict(textX) final=pre.astype(int) output = pd.DataFrame({'CustomerID': test_data.CustomerID, 'BikeBuyer': final}) output.to_csv('Sample Submission file clf.csv', index=False )<install_modules>
from xgboost import XGBClassifier from lightgbm import LGBMClassifier from catboost import CatBoostClassifier from sklearn.metrics import accuracy_score,confusion_matrix from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import RandomizedSearchCV
Titanic - Machine Learning from Disaster
13,482,615
!pip install --upgrade efficientnet tensorflow_addons tensorflow<import_modules>
xgbclassifier=XGBClassifier() xgbclassifier.fit(X_train,y_train) pred_xgb=xgbclassifier.predict(X_test)
Titanic - Machine Learning from Disaster
13,482,615
import math import re import random import os import tensorflow as tf import tensorflow_addons as tfa import numpy as np import tensorflow.keras.backend as K import efficientnet.tfkeras as efn import efficientnet import itertools import matplotlib import scipy import pandas as pd import sklearn from matplotlib import pyplot as plt from datetime import datetime<import_modules>
randomfc = RandomForestClassifier(n_estimators=100) randomfc.fit(X_train,y_train) pred_rf=randomfc.predict(X_test)
Titanic - Machine Learning from Disaster
13,482,615
print(f'Numpy version : {np.__version__}') print(f'Tensorflow version : {tf.__version__}') print(f'Tensorflow Addons version : {tfa.__version__}') print(f'EfficientNet(library)version : {efficientnet.__version__}') print(f'Matplotlib version : {matplotlib.__version__}') print(f'Scipy version : {scipy.__version__}') print(f'Pandas version : {pd.__version__}') print(f'Scikit-Learn version : {sklearn.__version__}' )<install_modules>
lightgb=LGBMClassifier() lightgb.fit(X_train,y_train) pred_lgb=lightgb.predict(X_test)
Titanic - Machine Learning from Disaster
13,482,615
!pip freeze > requirements.txt<feature_engineering>
ada=AdaBoostClassifier(n_estimators=50,learning_rate=1) ada.fit(X_train,y_train) pred_ada=ada.predict(X_test)
Titanic - Machine Learning from Disaster