kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
1,266,101
train_X, test_X, train_y, word_index = load_and_prec() embedding_matrix_1 = load_glove(word_index) embedding_matrix_3 = load_para(word_index )<compute_test_metric>
df = pd.read_csv('.. /input/train.csv') df.head()
Titanic - Machine Learning from Disaster
1,266,101
embedding_matrix = np.mean([embedding_matrix_1, embedding_matrix_3], axis = 0) np.shape(embedding_matrix) def threshold_search(y_true, y_proba): best_threshold = 0 best_score = 0 for threshold in [i * 0.01 for i in range(100)]: score = f1_score(y_true=y_true, y_pred=y_proba > threshold) if score > best_score: best_threshold = threshold best_score = score search_result = {'threshold': best_threshold, 'f1': best_score} return search_result DATA_SPLIT_SEED = 2018 clr = CyclicLR(base_lr=0.001, max_lr=0.002, step_size=300., mode='exp_range', gamma=0.99994 )<find_best_model_class>
get_missing_data_table(df )
Titanic - Machine Learning from Disaster
1,266,101
train_meta = np.zeros(train_y.shape) test_meta = np.zeros(test_X.shape[0]) splits = list(StratifiedKFold(n_splits=4, shuffle=True, random_state=DATA_SPLIT_SEED ).split(train_X, train_y)) for idx,(train_idx, valid_idx)in enumerate(splits): X_train = train_X[train_idx] y_train = train_y[train_idx] X_val = train_X[valid_idx] y_val = train_y[valid_idx] model = model_lstm_atten(embedding_matrix) pred_val_y, pred_test_y, best_score = train_pred(model, X_train, y_train, X_val, y_val, epochs = 8, callback = [clr,]) train_meta[valid_idx] = pred_val_y.reshape(-1) test_meta += pred_test_y.reshape(-1)/ len(splits) sub = pd.read_csv('.. /input/sample_submission.csv') sub.prediction = test_meta > 0.33 sub.to_csv("submission.csv", index=False )<import_modules>
df = df.drop('Cabin', axis='columns') df = delete_null_observations(df, column='Embarked') df = df.reset_index(drop=True) df['Age'] = df['Age'].fillna(value=1000) get_missing_data_table(df )
Titanic - Machine Learning from Disaster
1,266,101
tqdm.pandas(desc='Progress') <define_variables>
df['Family Size'] = df['SibSp'] + df['Parch'] df = df.drop('SibSp', axis='columns') df = df.drop('Parch', axis='columns') df.head(5 )
Titanic - Machine Learning from Disaster
1,266,101
embed_size = 300 max_features = 120000 maxlen = 70 batch_size = 512 n_epochs = 5 n_splits = 5 SEED = 1029<set_options>
titles = name_row.tolist() for i in range(len(titles)) : title = titles[i] if title != 'Master' and title != 'Miss' and title != 'Mr' and title !='Mrs': titles[i] = 'Other' name_row = pd.DataFrame(titles, columns=['Title']) df['Title'] = name_row.copy() df = df.drop('Name', axis='columns') df.head(5 )
Titanic - Machine Learning from Disaster
1,266,101
def seed_everything(seed=1029): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True seed_everything()<features_selection>
test_df = df.copy() test_df = pd.DataFrame([df['Age'].tolist() , df['Title'].tolist() ] ).transpose() test_df.columns = ['Age','Title'] test_df_list = test_df.values for i in range(len(test_df_list)) : age = test_df_list[i][0] title = test_df_list[i][1] if age == 1000: if title == 'Master': test_df_list[i][0] = 5.19 elif title == 'Miss': test_df_list[i][0] = 21.87 elif title == 'Mr': test_df_list[i][0] = 32.18 elif title == 'Mrs': test_df_list[i][0] = 35.48 else: test_df_list[i][0] = 42.81 df['Age'] = test_df['Age'].copy() df['Age'] = df['Age'].astype('float64') df['Age'].describe()
Titanic - Machine Learning from Disaster
1,266,101
def load_glove(word_index): EMBEDDING_FILE = '.. /input/embeddings/glove.840B.300d/glove.840B.300d.txt' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')[:300] embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE)) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = -0.005838499,0.48782197 embed_size = all_embs.shape[1] nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix def load_fasttext(word_index): EMBEDDING_FILE = '.. /input/embeddings/wiki-news-300d-1M/wiki-news-300d-1M.vec' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE)if len(o)>100) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = all_embs.mean() , all_embs.std() embed_size = all_embs.shape[1] nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix def load_para(word_index): EMBEDDING_FILE = '.. /input/embeddings/paragram_300_sl999/paragram_300_sl999.txt' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE, encoding="utf8", errors='ignore')if len(o)>100) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = -0.0053247833,0.49346462 embed_size = all_embs.shape[1] nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix<load_from_csv>
df = df.drop('Ticket', axis='columns') df = df.drop('PassengerId', axis='columns') df.head(5 )
Titanic - Machine Learning from Disaster
1,266,101
df_train = pd.read_csv(".. /input/train.csv") df_test = pd.read_csv(".. /input/test.csv") df = pd.concat([df_train ,df_test],sort=True )<feature_engineering>
df = transform_dummy_variables(df,['Sex','Pclass','Embarked','Title']) df.head(5 )
Titanic - Machine Learning from Disaster
1,266,101
def build_vocab(texts): sentences = texts.apply(lambda x: x.split() ).values vocab = {} for sentence in sentences: for word in sentence: try: vocab[word] += 1 except KeyError: vocab[word] = 1 return vocab vocab = build_vocab(df['question_text'] )<define_variables>
X_train = df.iloc[:,1:].values y = df.iloc[:,0].values sc = StandardScaler() X_train = sc.fit_transform(X_train) print('X_train: {0}'.format(X_train[0:5])) print('y: {0}'.format(y[0:5]))
Titanic - Machine Learning from Disaster
1,266,101
sin = len(df_train[df_train["target"]==0]) insin = len(df_train[df_train["target"]==1]) persin =(sin/(sin+insin)) *100 perinsin =(insin/(sin+insin)) *100 print(" print("<feature_engineering>
classifier = XGBClassifier() classifier.fit(X_train, y )
Titanic - Machine Learning from Disaster
1,266,101
def build_vocab(texts): sentences = texts.apply(lambda x: x.split() ).values vocab = {} for sentence in sentences: for word in sentence: try: vocab[word] += 1 except KeyError: vocab[word] = 1 return vocab def known_contractions(embed): known = [] for contract in contraction_mapping: if contract in embed: known.append(contract) return known def clean_contractions(text, mapping): specials = ["’", "‘", "´", "`"] for s in specials: text = text.replace(s, "'") text = ' '.join([mapping[t] if t in mapping else t for t in text.split(" ")]) return text def correct_spelling(x, dic): for word in dic.keys() : x = x.replace(word, dic[word]) return x def unknown_punct(embed, punct): unknown = '' for p in punct: if p not in embed: unknown += p unknown += ' ' return unknown def clean_numbers(x): x = re.sub('[0-9]{5,}', ' x = re.sub('[0-9]{4}', ' x = re.sub('[0-9]{3}', ' x = re.sub('[0-9]{2}', ' return x def clean_special_chars(text, punct, mapping): for p in mapping: text = text.replace(p, mapping[p]) for p in punct: text = text.replace(p, f' {p} ') specials = {'\u200b': ' ', '…': '...', '\ufeff': '', 'करना': '', 'है': ''} for s in specials: text = text.replace(s, specials[s]) return text def add_lower(embedding, vocab): count = 0 for word in vocab: if word in embedding and word.lower() not in embedding: embedding[word.lower() ] = embedding[word] count += 1 print(f"Added {count} words to embedding" )<define_variables>
params = { 'min_child_weight': [1, 5, 10], 'gamma': [0.5, 1, 1.5, 2, 5], 'subsample': [0.6, 0.8, 1.0], 'colsample_bytree': [0.6, 0.8, 1.0], 'max_depth': [3, 4, 5] } folds = 4 param_comb = 5 skf = StratifiedKFold(n_splits=folds, shuffle = True, random_state = 1001) random_search = RandomizedSearchCV(classifier, param_distributions=params, n_iter=param_comb, scoring='roc_auc', n_jobs=4, cv=skf.split(X_train,y), verbose=3, random_state=1001, iid=True) random_search.fit(X_train, y) xgboost_classifier = random_search.best_estimator_
Titanic - Machine Learning from Disaster
1,266,101
puncts = [',', '.', '"', ':', ')', '(', '-', '!', '?', '|', ';', "'", '$', '&', '/', '[', ']', '>', '%', '=', ' '·', '_', '{', '}', '©', '^', '®', '`', '<', '→', '°', '€', '™', '›', '♥', '←', '×', '§', '″', '′', 'Â', '█', '½', 'à', '…', '“', '★', '”', '–', '●', 'â', '►', '−', '¢', '²', '¬', '░', '¶', '↑', '±', '¿', '▾', '═', '¦', '║', '―', '¥', '▓', '—', '‹', '─', '▒', ':', '¼', '⊕', '▼', '▪', '†', '■', '’', '▀', '¨', '▄', '♫', '☆', 'é', '¯', '♦', '¤', '▲', 'è', '¸', '¾', 'Ã', '⋅', '‘', '∞', '∙', ')', '↓', '、', '│', '(', '»', ',', '♪', '╩', '╚', '³', '・', '╦', '╣', '╔', '╗', '▬', '❤', 'ï', 'Ø', '¹', '≤', '‡', '√', ] def clean_text(x): x = str(x) for punct in puncts: x = x.replace(punct, f' {punct} ') return x def clean_numbers(x): x = re.sub('[0-9]{5,}', ' x = re.sub('[0-9]{4}', ' x = re.sub('[0-9]{3}', ' x = re.sub('[0-9]{2}', ' return x mispell_dict = {"ain't": "is not", "aren't": "are not","can't": "cannot", "'cause": "because", "could've": "could have", "couldn't": "could not", "didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hasn't": "has not", "haven't": "have not", "he'd": "he would","he'll": "he will", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is", "I'd": "I would", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have","I'm": "I am", "I've": "I have", "i'd": "i would", "i'd've": "i would have", "i'll": "i will", "i'll've": "i will have","i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would", "it'd've": "it would have", "it'll": "it will", "it'll've": "it will have","it's": "it is", "let's": "let us", "ma'am": "madam", "mayn't": "may not", "might've": "might have","mightn't": "might not","mightn't've": "might not have", "must've": "must have", "mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have","o'clock": "of the clock", "oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have", "she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is", "should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have","so's": "so as", "this's": "this is","that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there would", "there'd've": "there would have", "there's": "there is", "here's": "here is","they'd": "they would", "they'd've": "they would have", "they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have", "wasn't": "was not", "we'd": "we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are", "we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are", "what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is", "where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have", "why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have", "would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all", "y'all'd": "you all would","y'all'd've": "you all would have","y'all're": "you all are","y'all've": "you all have","you'd": "you would", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have", "you're": "you are", "you've": "you have", 'colour': 'color', 'centre': 'center', 'favourite': 'favorite', 'travelling': 'traveling', 'counselling': 'counseling', 'theatre': 'theater', 'cancelled': 'canceled', 'labour': 'labor', 'organisation': 'organization', 'wwii': 'world war 2', 'citicise': 'criticize', 'youtu ': 'youtube ', 'Qoura': 'Quora', 'sallary': 'salary', 'Whta': 'What', 'narcisist': 'narcissist', 'howdo': 'how do', 'whatare': 'what are', 'howcan': 'how can', 'howmuch': 'how much', 'howmany': 'how many', 'whydo': 'why do', 'doI': 'do I', 'theBest': 'the best', 'howdoes': 'how does', 'mastrubation': 'masturbation', 'mastrubate': 'masturbate', "mastrubating": 'masturbating', 'pennis': 'penis', 'Etherium': 'Ethereum', 'narcissit': 'narcissist', 'bigdata': 'big data', '2k17': '2017', '2k18': '2018', 'qouta': 'quota', 'exboyfriend': 'ex boyfriend', 'airhostess': 'air hostess', "whst": 'what', 'watsapp': 'whatsapp', 'demonitisation': 'demonetization', 'demonitization': 'demonetization', 'demonetisation': 'demonetization'} def _get_mispell(mispell_dict): mispell_re = re.compile('(%s)' % '|'.join(mispell_dict.keys())) return mispell_dict, mispell_re mispellings, mispellings_re = _get_mispell(mispell_dict) def replace_typical_misspell(text): def replace(match): return mispellings[match.group(0)] return mispellings_re.sub(replace, text )<feature_engineering>
classifier = RandomForestClassifier() classifier.fit(X_train, y )
Titanic - Machine Learning from Disaster
1,266,101
def add_features(df): df['question_text'] = df['question_text'].progress_apply(lambda x:str(x)) df['total_length'] = df['question_text'].progress_apply(len) df['capitals'] = df['question_text'].progress_apply(lambda comment: sum(1 for c in comment if c.isupper())) df['caps_vs_length'] = df.progress_apply(lambda row: float(row['capitals'])/float(row['total_length']), axis=1) df['num_words'] = df.question_text.str.count('\S+') df['num_unique_words'] = df['question_text'].progress_apply(lambda comment: len(set(w for w in comment.split()))) df['words_vs_unique'] = df['num_unique_words'] / df['num_words'] return df def load_and_prec() : train_df = pd.read_csv(".. /input/train.csv") test_df = pd.read_csv(".. /input/test.csv") print("Train shape : ",train_df.shape) print("Test shape : ",test_df.shape) train_df["question_text"] = train_df["question_text"].apply(lambda x: x.lower()) test_df["question_text"] = test_df["question_text"].apply(lambda x: x.lower()) train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: clean_text(x)) test_df["question_text"] = test_df["question_text"].apply(lambda x: clean_text(x)) train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: clean_numbers(x)) test_df["question_text"] = test_df["question_text"].apply(lambda x: clean_numbers(x)) train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: replace_typical_misspell(x)) test_df["question_text"] = test_df["question_text"].apply(lambda x: replace_typical_misspell(x)) train_X = train_df["question_text"].fillna("_ test_X = test_df["question_text"].fillna("_ train = add_features(train_df) test = add_features(test_df) features = train[['caps_vs_length', 'words_vs_unique']].fillna(0) test_features = test[['caps_vs_length', 'words_vs_unique']].fillna(0) ss = StandardScaler() ss.fit(np.vstack(( features, test_features))) features = ss.transform(features) test_features = ss.transform(test_features) tokenizer = Tokenizer(num_words=max_features) tokenizer.fit_on_texts(list(train_X)) train_X = tokenizer.texts_to_sequences(train_X) test_X = tokenizer.texts_to_sequences(test_X) train_X = pad_sequences(train_X, maxlen=maxlen) test_X = pad_sequences(test_X, maxlen=maxlen) train_y = train_df['target'].values np.random.seed(SEED) trn_idx = np.random.permutation(len(train_X)) train_X = train_X[trn_idx] train_y = train_y[trn_idx] features = features[trn_idx] return train_X, test_X, train_y, features, test_features, tokenizer.word_index <train_model>
params = { 'n_estimators': [5, 10, 15], 'criterion': ['gini', 'entropy'], 'max_features': ['auto', 'sqrt', 'log2', None], 'max_depth': [None, 3, 4, 5] } folds = 4 param_comb = 5 skf = StratifiedKFold(n_splits=folds, shuffle = True, random_state = 1001) random_search = RandomizedSearchCV(classifier, param_distributions=params, n_iter=param_comb, scoring='roc_auc', n_jobs=4, cv=skf.split(X_train,y), verbose=3, random_state=1001, iid=True) random_search.fit(X_train, y) randomforest_classifier = random_search.best_estimator_
Titanic - Machine Learning from Disaster
1,266,101
x_train, x_test, y_train, features, test_features, word_index = load_and_prec() <save_model>
classifier = SVC(probability=True) classifier.fit(X_train, y )
Titanic - Machine Learning from Disaster
1,266,101
np.save("x_train",x_train) np.save("x_test",x_test) np.save("y_train",y_train) np.save("features",features) np.save("test_features",test_features) np.save("word_index.npy",word_index )<load_pretrained>
params = { 'C': [0.5, 1, 1.5], 'kernel': ['rbf', 'linear', 'poly', 'sigmoid'], 'gamma': [0.001, 0.0001], 'class_weight': [None, 'balanced'] } folds = 4 param_comb = 5 skf = StratifiedKFold(n_splits=folds, shuffle = True, random_state = 1001) random_search = RandomizedSearchCV(classifier, param_distributions=params, n_iter=param_comb, scoring='roc_auc', n_jobs=4, cv=skf.split(X_train,y), verbose=3, random_state=1001, iid=True) random_search.fit(X_train, y) svc_classifier = random_search.best_estimator_
Titanic - Machine Learning from Disaster
1,266,101
x_train = np.load("x_train.npy") x_test = np.load("x_test.npy") y_train = np.load("y_train.npy") features = np.load("features.npy") test_features = np.load("test_features.npy") word_index = np.load("word_index.npy" ).item()<normalization>
classifier = VotingClassifier(estimators=[('xgb', xgboost_classifier),('rf',randomforest_classifier),('svc',svc_classifier)], voting='soft') classifier.fit(X_train, y )
Titanic - Machine Learning from Disaster
1,266,101
seed_everything() glove_embeddings = load_glove(word_index) paragram_embeddings = load_para(word_index) embedding_matrix = np.mean([glove_embeddings, paragram_embeddings], axis=0) del glove_embeddings, paragram_embeddings gc.collect() np.shape(embedding_matrix )<split>
accuracies = cross_val_score(estimator=classifier, X=X_train, y=y, cv=5) print('accuracy mean: {0}'.format(accuracies.mean())) print('accuracy std: {0}'.format(accuracies.std()))
Titanic - Machine Learning from Disaster
1,266,101
splits = list(StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=SEED ).split(x_train, y_train)) splits[:3]<choose_model_class>
df_test = pd.read_csv('.. /input/test.csv') df_test.describe()
Titanic - Machine Learning from Disaster
1,266,101
class CyclicLR(object): def __init__(self, optimizer, base_lr=1e-3, max_lr=6e-3, step_size=2000, mode='triangular', gamma=1., scale_fn=None, scale_mode='cycle', last_batch_iteration=-1): if not isinstance(optimizer, Optimizer): raise TypeError('{} is not an Optimizer'.format( type(optimizer ).__name__)) self.optimizer = optimizer if isinstance(base_lr, list)or isinstance(base_lr, tuple): if len(base_lr)!= len(optimizer.param_groups): raise ValueError("expected {} base_lr, got {}".format( len(optimizer.param_groups), len(base_lr))) self.base_lrs = list(base_lr) else: self.base_lrs = [base_lr] * len(optimizer.param_groups) if isinstance(max_lr, list)or isinstance(max_lr, tuple): if len(max_lr)!= len(optimizer.param_groups): raise ValueError("expected {} max_lr, got {}".format( len(optimizer.param_groups), len(max_lr))) self.max_lrs = list(max_lr) else: self.max_lrs = [max_lr] * len(optimizer.param_groups) self.step_size = step_size if mode not in ['triangular', 'triangular2', 'exp_range'] \ and scale_fn is None: raise ValueError('mode is invalid and scale_fn is None') self.mode = mode self.gamma = gamma if scale_fn is None: if self.mode == 'triangular': self.scale_fn = self._triangular_scale_fn self.scale_mode = 'cycle' elif self.mode == 'triangular2': self.scale_fn = self._triangular2_scale_fn self.scale_mode = 'cycle' elif self.mode == 'exp_range': self.scale_fn = self._exp_range_scale_fn self.scale_mode = 'iterations' else: self.scale_fn = scale_fn self.scale_mode = scale_mode self.batch_step(last_batch_iteration + 1) self.last_batch_iteration = last_batch_iteration def batch_step(self, batch_iteration=None): if batch_iteration is None: batch_iteration = self.last_batch_iteration + 1 self.last_batch_iteration = batch_iteration for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): param_group['lr'] = lr def _triangular_scale_fn(self, x): return 1. def _triangular2_scale_fn(self, x): return 1 /(2.**(x - 1)) def _exp_range_scale_fn(self, x): return self.gamma**(x) def get_lr(self): step_size = float(self.step_size) cycle = np.floor(1 + self.last_batch_iteration /(2 * step_size)) x = np.abs(self.last_batch_iteration / step_size - 2 * cycle + 1) lrs = [] param_lrs = zip(self.optimizer.param_groups, self.base_lrs, self.max_lrs) for param_group, base_lr, max_lr in param_lrs: base_height =(max_lr - base_lr)* np.maximum(0,(1 - x)) if self.scale_mode == 'cycle': lr = base_lr + base_height * self.scale_fn(cycle) else: lr = base_lr + base_height * self.scale_fn(self.last_batch_iteration) lrs.append(lr) return lrs <choose_model_class>
get_missing_data_table(df_test )
Titanic - Machine Learning from Disaster
1,266,101
embedding_dim = 300 embedding_path = '.. /save/embedding_matrix.npy' use_pretrained_embedding = True hidden_size = 60 gru_len = hidden_size Routings = 4 Num_capsule = 5 Dim_capsule = 5 dropout_p = 0.25 rate_drop_dense = 0.28 LR = 0.001 T_epsilon = 1e-7 num_classes = 30 class Embed_Layer(nn.Module): def __init__(self, embedding_matrix=None, vocab_size=None, embedding_dim=300): super(Embed_Layer, self ).__init__() self.encoder = nn.Embedding(vocab_size + 1, embedding_dim) if use_pretrained_embedding: self.encoder.weight.data.copy_(t.from_numpy(embedding_matrix)) def forward(self, x, dropout_p=0.25): return nn.Dropout(p=dropout_p )(self.encoder(x)) class GRU_Layer(nn.Module): def __init__(self): super(GRU_Layer, self ).__init__() self.gru = nn.GRU(input_size=300, hidden_size=gru_len, bidirectional=True) def init_weights(self): ih =(param.data for name, param in self.named_parameters() if 'weight_ih' in name) hh =(param.data for name, param in self.named_parameters() if 'weight_hh' in name) b =(param.data for name, param in self.named_parameters() if 'bias' in name) for k in ih: nn.init.xavier_uniform_(k) for k in hh: nn.init.orthogonal_(k) for k in b: nn.init.constant_(k, 0) def forward(self, x): return self.gru(x) class Caps_Layer(nn.Module): def __init__(self, input_dim_capsule=gru_len * 2, num_capsule=Num_capsule, dim_capsule=Dim_capsule, \ routings=Routings, kernel_size=(9, 1), share_weights=True, activation='default', **kwargs): super(Caps_Layer, self ).__init__(**kwargs) self.num_capsule = num_capsule self.dim_capsule = dim_capsule self.routings = routings self.kernel_size = kernel_size self.share_weights = share_weights if activation == 'default': self.activation = self.squash else: self.activation = nn.ReLU(inplace=True) if self.share_weights: self.W = nn.Parameter( nn.init.xavier_normal_(t.empty(1, input_dim_capsule, self.num_capsule * self.dim_capsule))) else: self.W = nn.Parameter( t.randn(BATCH_SIZE, input_dim_capsule, self.num_capsule * self.dim_capsule)) def forward(self, x): if self.share_weights: u_hat_vecs = t.matmul(x, self.W) else: print('add later') batch_size = x.size(0) input_num_capsule = x.size(1) u_hat_vecs = u_hat_vecs.view(( batch_size, input_num_capsule, self.num_capsule, self.dim_capsule)) u_hat_vecs = u_hat_vecs.permute(0, 2, 1, 3) b = t.zeros_like(u_hat_vecs[:, :, :, 0]) for i in range(self.routings): b = b.permute(0, 2, 1) c = F.softmax(b, dim=2) c = c.permute(0, 2, 1) b = b.permute(0, 2, 1) outputs = self.activation(t.einsum('bij,bijk->bik',(c, u_hat_vecs))) if i < self.routings - 1: b = t.einsum('bik,bijk->bij',(outputs, u_hat_vecs)) return outputs def squash(self, x, axis=-1): s_squared_norm =(x ** 2 ).sum(axis, keepdim=True) scale = t.sqrt(s_squared_norm + T_epsilon) return x / scale class Capsule_Main(nn.Module): def __init__(self, embedding_matrix=None, vocab_size=None): super(Capsule_Main, self ).__init__() self.embed_layer = Embed_Layer(embedding_matrix, vocab_size) self.gru_layer = GRU_Layer() self.gru_layer.init_weights() self.caps_layer = Caps_Layer() self.dense_layer = Dense_Layer() def forward(self, content): content1 = self.embed_layer(content) content2, _ = self.gru_layer( content1) content3 = self.caps_layer(content2) output = self.dense_layer(content3) return output <normalization>
df_test = imput_nan_values(df_test,'Fare','median') df_test['Age'] = df_test['Age'].fillna(value=1000) name_row = df_test['Name'].copy() name_row = pd.DataFrame(name_row.str.split(', ',1 ).tolist() , columns = ['Last name', 'Name']) name_row = name_row['Name'].copy() name_row = pd.DataFrame(name_row.str.split('.',1 ).tolist() ,columns=["Title","Name"]) name_row = name_row['Title'].copy() titles = name_row.tolist() for i in range(len(titles)) : title = titles[i] if title != 'Master' and title != 'Miss' and title != 'Mr' and title !='Mrs': titles[i] = 'Other' name_row = pd.DataFrame(titles, columns=['Title']) df_test['Title'] = name_row.copy() test_df = df_test.copy() test_df = pd.DataFrame([df_test['Age'].tolist() , df_test['Title'].tolist() ] ).transpose() test_df.columns = ['Age','Title'] test_df_list = test_df.values for i in range(len(test_df_list)) : age = test_df_list[i][0] title = test_df_list[i][1] if age == 1000: if title == 'Master': test_df_list[i][0] = 5.19 elif title == 'Miss': test_df_list[i][0] = 21.87 elif title == 'Mr': test_df_list[i][0] = 32.18 elif title == 'Mrs': test_df_list[i][0] = 35.48 else: test_df_list[i][0] = 42.81 df_test['Age'] = test_df['Age'].copy() get_missing_data_table(df_test )
Titanic - Machine Learning from Disaster
1,266,101
class Attention(nn.Module): def __init__(self, feature_dim, step_dim, bias=True, **kwargs): super(Attention, self ).__init__(**kwargs) self.supports_masking = True self.bias = bias self.feature_dim = feature_dim self.step_dim = step_dim self.features_dim = 0 weight = torch.zeros(feature_dim, 1) nn.init.xavier_uniform_(weight) self.weight = nn.Parameter(weight) if bias: self.b = nn.Parameter(torch.zeros(step_dim)) def forward(self, x, mask=None): feature_dim = self.feature_dim step_dim = self.step_dim eij = torch.mm( x.contiguous().view(-1, feature_dim), self.weight ).view(-1, step_dim) if self.bias: eij = eij + self.b eij = torch.tanh(eij) a = torch.exp(eij) if mask is not None: a = a * mask a = a / torch.sum(a, 1, keepdim=True)+ 1e-10 weighted_input = x * torch.unsqueeze(a, -1) return torch.sum(weighted_input, 1) class NeuralNet(nn.Module): def __init__(self): super(NeuralNet, self ).__init__() fc_layer = 16 fc_layer1 = 16 self.embedding = nn.Embedding(max_features, embed_size) self.embedding.weight = nn.Parameter(torch.tensor(embedding_matrix, dtype=torch.float32)) self.embedding.weight.requires_grad = False self.embedding_dropout = nn.Dropout2d(0.1) self.lstm = nn.LSTM(embed_size, hidden_size, bidirectional=True, batch_first=True) self.gru = nn.GRU(hidden_size * 2, hidden_size, bidirectional=True, batch_first=True) self.lstm2 = nn.LSTM(hidden_size * 2, hidden_size, bidirectional=True, batch_first=True) self.lstm_attention = Attention(hidden_size * 2, maxlen) self.gru_attention = Attention(hidden_size * 2, maxlen) self.linear = nn.Linear(hidden_size*8+3, fc_layer1) self.relu = nn.ReLU() self.dropout = nn.Dropout(0.1) self.fc = nn.Linear(fc_layer**2,fc_layer) self.out = nn.Linear(fc_layer, 1) self.lincaps = nn.Linear(Num_capsule * Dim_capsule, 1) self.caps_layer = Caps_Layer() def forward(self, x): h_embedding = self.embedding(x[0]) h_embedding = torch.squeeze( self.embedding_dropout(torch.unsqueeze(h_embedding, 0))) h_lstm, _ = self.lstm(h_embedding) h_gru, _ = self.gru(h_lstm) content3 = self.caps_layer(h_gru) content3 = self.dropout(content3) batch_size = content3.size(0) content3 = content3.view(batch_size, -1) content3 = self.relu(self.lincaps(content3)) h_lstm_atten = self.lstm_attention(h_lstm) h_gru_atten = self.gru_attention(h_gru) avg_pool = torch.mean(h_gru, 1) max_pool, _ = torch.max(h_gru, 1) f = torch.tensor(x[1], dtype=torch.float ).cuda() conc = torch.cat(( h_lstm_atten, h_gru_atten,content3, avg_pool, max_pool,f), 1) conc = self.relu(self.linear(conc)) conc = self.dropout(conc) out = self.out(conc) return out<categorify>
df_test = df_test.drop('Cabin', axis='columns') df_test['Family Size'] = df_test['SibSp'] + df_test['Parch'] df_test = df_test.drop('SibSp', axis='columns') df_test = df_test.drop('Parch', axis='columns') df_test = df_test.drop('Name', axis='columns') df_test = df_test.drop('Ticket', axis='columns') df_test = df_test.drop('PassengerId', axis='columns') df_test['Age'] = df_test['Age'].astype('float64') df_test = transform_dummy_variables(df_test,['Sex','Pclass','Embarked','Title']) df_test.head(5 )
Titanic - Machine Learning from Disaster
1,266,101
<define_variables><EOS>
X_test = df_test.values sc = StandardScaler() X_test = sc.fit_transform(X_test) pred = classifier.predict(X_test) test_dataset = pd.read_csv('.. /input/test.csv') ps_id = test_dataset.iloc[:,0].values d = {'PassengerId':ps_id, 'Survived':pred} df = pd.DataFrame(data=d) df = df.set_index('PassengerId') df.to_csv('predictions.csv') df.head(15 )
Titanic - Machine Learning from Disaster
9,687,592
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<data_type_conversions>
import numpy as np import pandas as pd import seaborn as sns
Titanic - Machine Learning from Disaster
9,687,592
for i,(train_idx, valid_idx)in enumerate(splits): x_train = np.array(x_train) y_train = np.array(y_train) features = np.array(features) x_train_fold = torch.tensor(x_train[train_idx.astype(int)], dtype=torch.long ).cuda() y_train_fold = torch.tensor(y_train[train_idx.astype(int), np.newaxis], dtype=torch.float32 ).cuda() kfold_X_features = features[train_idx.astype(int)] kfold_X_valid_features = features[valid_idx.astype(int)] x_val_fold = torch.tensor(x_train[valid_idx.astype(int)], dtype=torch.long ).cuda() y_val_fold = torch.tensor(y_train[valid_idx.astype(int), np.newaxis], dtype=torch.float32 ).cuda() model = NeuralNet() model.cuda() loss_fn = torch.nn.BCEWithLogitsLoss(reduction='sum') step_size = 300 base_lr, max_lr = 0.001, 0.003 optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=max_lr) scheduler = CyclicLR(optimizer, base_lr=base_lr, max_lr=max_lr, step_size=step_size, mode='exp_range', gamma=0.99994) train = torch.utils.data.TensorDataset(x_train_fold, y_train_fold) valid = torch.utils.data.TensorDataset(x_val_fold, y_val_fold) train = MyDataset(train) valid = MyDataset(valid) train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid, batch_size=batch_size, shuffle=False) print(f'Fold {i + 1}') for epoch in range(n_epochs): start_time = time.time() model.train() avg_loss = 0. for i,(x_batch, y_batch, index)in enumerate(train_loader): f = kfold_X_features[index] y_pred = model([x_batch,f]) if scheduler: scheduler.batch_step() loss = loss_fn(y_pred, y_batch) optimizer.zero_grad() loss.backward() optimizer.step() avg_loss += loss.item() / len(train_loader) model.eval() valid_preds_fold = np.zeros(( x_val_fold.size(0))) test_preds_fold = np.zeros(( len(df_test))) avg_val_loss = 0. for i,(x_batch, y_batch, index)in enumerate(valid_loader): f = kfold_X_valid_features[index] y_pred = model([x_batch,f] ).detach() avg_val_loss += loss_fn(y_pred, y_batch ).item() / len(valid_loader) valid_preds_fold[i * batch_size:(i+1)* batch_size] = sigmoid(y_pred.cpu().numpy())[:, 0] elapsed_time = time.time() - start_time print('Epoch {}/{} \t loss={:.4f} \t val_loss={:.4f} \t time={:.2f}s'.format( epoch + 1, n_epochs, avg_loss, avg_val_loss, elapsed_time)) avg_losses_f.append(avg_loss) avg_val_losses_f.append(avg_val_loss) for i,(x_batch,)in enumerate(test_loader): f = test_features[i * batch_size:(i+1)* batch_size] y_pred = model([x_batch,f] ).detach() test_preds_fold[i * batch_size:(i+1)* batch_size] = sigmoid(y_pred.cpu().numpy())[:, 0] train_preds[valid_idx] = valid_preds_fold test_preds += test_preds_fold / len(splits) print('All \t loss={:.4f} \t val_loss={:.4f} \t '.format(np.average(avg_losses_f),np.average(avg_val_losses_f))) <compute_test_metric>
train_raw_data=pd.read_csv('.. /input/titanic/train.csv') test_raw_data=pd.read_csv('.. /input/titanic/test.csv' )
Titanic - Machine Learning from Disaster
9,687,592
def bestThresshold(y_train,train_preds): tmp = [0,0,0] delta = 0 for tmp[0] in tqdm(np.arange(0.1, 0.501, 0.01)) : tmp[1] = f1_score(y_train, np.array(train_preds)>tmp[0]) if tmp[1] > tmp[2]: delta = tmp[0] tmp[2] = tmp[1] print('best threshold is {:.4f} with F1 score: {:.4f}'.format(delta, tmp[2])) return delta delta = bestThresshold(y_train,train_preds )<save_to_csv>
trainrow=train_raw_data.shape[0] testrow=test_raw_data.shape[0] y_train=train_raw_data['Survived'].copy() train_raw_data=train_raw_data.drop(['Survived'],1 )
Titanic - Machine Learning from Disaster
9,687,592
submission = df_test[['qid']].copy() submission['prediction'] =(test_preds > delta ).astype(int) submission.to_csv('submission.csv', index=False )<import_modules>
combine=pd.concat([train_raw_data,test_raw_data]) combine.head()
Titanic - Machine Learning from Disaster
9,687,592
from sklearn.model_selection import GridSearchCV,StratifiedKFold from keras.wrappers.scikit_learn import KerasClassifier from sklearn.model_selection import cross_val_score,train_test_split from scipy import stats from sklearn import metrics from keras.models import Sequential from keras.layers import Dense from keras.preprocessing.text import Tokenizer import string,re from collections import Counter import nltk from nltk.corpus import stopwords import spacy from spacy.lang.en.stop_words import STOP_WORDS from spacy.lang.en import English<import_modules>
combine.isnull().sum()
Titanic - Machine Learning from Disaster
9,687,592
import time from tqdm import tqdm import math from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation, CuDNNGRU, Conv1D,CuDNNLSTM from keras.layers import Bidirectional, GlobalMaxPool1D from keras.models import Model from keras import initializers, regularizers, constraints, optimizers, layers from sklearn import model_selection as ms from sklearn.model_selection import KFold,StratifiedKFold from sklearn.feature_extraction.text import CountVectorizer from sklearn.svm import LinearSVC from keras.layers import Input, Dense, Embedding, concatenate from keras.layers import CuDNNGRU, Bidirectional, GlobalAveragePooling1D, GlobalMaxPooling1D from keras.preprocessing import text, sequence from keras.layers import Reshape, Flatten, Concatenate, Dropout, SpatialDropout1D from keras.optimizers import Adam from keras.models import Model from keras import backend as K from keras.engine.topology import Layer from keras import initializers, regularizers, constraints, optimizers, layers from keras.layers import * from keras.callbacks import *<load_from_csv>
combine['Embarked']=combine['Embarked'].fillna(combine['Embarked'].value_counts().index[0] )
Titanic - Machine Learning from Disaster
9,687,592
df_train = pd.read_csv(".. /input/train.csv") df_test = pd.read_csv(".. /input/test.csv") print("train data shape --",df_train.shape) print("test data shape --",df_test.shape )<feature_engineering>
combine['Cabin']=combine['Cabin'].fillna('U') combine['Cabin'].value_counts() combine['Cabin']=combine['Cabin'].astype(str ).str[0] combine.head()
Titanic - Machine Learning from Disaster
9,687,592
df_train["question_text"] = df_train["question_text"].apply(lambda x: x.replace('.',' fullstop ')) df_train["question_text"] = df_train["question_text"].apply(lambda x: x.replace('?',' endofquestion ')) df_train["question_text"] = df_train["question_text"].apply(lambda x: x.replace(',',' comma ')) df_train["question_text"] = df_train["question_text"].apply(lambda x: x.replace('!',' exclamationmark ')) df_train["question_text"] = df_train["question_text"].apply(lambda x: x.replace('-',' hyphen ')) df_train["question_text"] = df_train["question_text"].apply(lambda x: x.replace('/',' backslash '))<feature_engineering>
combine.loc[combine['Fare'].isnull() ]
Titanic - Machine Learning from Disaster
9,687,592
df_train["question_text"] = df_train["question_text"].apply(lambda x: x.replace('fullstop','.')) df_train["question_text"] = df_train["question_text"].apply(lambda x: x.replace('endofquestion','?')) df_train["question_text"] = df_train["question_text"].apply(lambda x: x.replace('comma',',')) df_train["question_text"] = df_train["question_text"].apply(lambda x: x.replace('exclamationmark','!')) df_train["question_text"] = df_train["question_text"].apply(lambda x: x.replace('hyphen','-')) df_train["question_text"] = df_train["question_text"].apply(lambda x: x.replace('backslash','/'))<feature_engineering>
combine['Fare']=combine['Fare'].fillna(combine.loc[(combine['Pclass']==3)&(combine['Sex']=="male")&(combine['Age']<65)&(combine['Age']>55)].dropna() ['Fare'].mean() )
Titanic - Machine Learning from Disaster
9,687,592
df_test["question_text"] = df_test["question_text"].apply(lambda x: x.replace('.',' fullstop ')) df_test["question_text"] = df_test["question_text"].apply(lambda x: x.replace('?',' endofquestion ')) df_test["question_text"] = df_test["question_text"].apply(lambda x: x.replace(',',' comma ')) df_test["question_text"] = df_test["question_text"].apply(lambda x: x.replace('!',' exclamationmark ')) df_test["question_text"] = df_test["question_text"].apply(lambda x: x.replace('-',' hyphen ')) df_test["question_text"] = df_test["question_text"].apply(lambda x: x.replace('/',' backslash '))<feature_engineering>
passengerids=test_raw_data['PassengerId'] combine=combine.drop(['PassengerId','Ticket'],1 )
Titanic - Machine Learning from Disaster
9,687,592
df_test["question_text"] = df_test["question_text"].apply(lambda x: x.replace('fullstop','.')) df_test["question_text"] = df_test["question_text"].apply(lambda x: x.replace('endofquestion','?')) df_test["question_text"] = df_test["question_text"].apply(lambda x: x.replace('comma',',')) df_test["question_text"] = df_test["question_text"].apply(lambda x: x.replace('exclamationmark','!')) df_test["question_text"] = df_test["question_text"].apply(lambda x: x.replace('hyphen','-')) df_test["question_text"] = df_test["question_text"].apply(lambda x: x.replace('backslash','/'))<concatenate>
combine['familysize']=combine['SibSp']+combine['Parch']+1 combine.head()
Titanic - Machine Learning from Disaster
9,687,592
df_combined = pd.concat([df_train,df_test],axis=0) print("combined shape ",df_combined.shape )<define_variables>
combine['Title'] = combine.Name.str.extract('([A-Za-z]+)\.', expand=False) combine.head()
Titanic - Machine Learning from Disaster
9,687,592
embed_size = 300 max_features = 60000 maxlen = 60 total_X = df_combined["question_text"].values<feature_engineering>
combine['Title'].value_counts()
Titanic - Machine Learning from Disaster
9,687,592
tokenizer = Tokenizer(num_words=max_features,filters='" ',) tokenizer.fit_on_texts(list(total_X))<count_values>
combine=combine.drop(['Name'],1) combine.head()
Titanic - Machine Learning from Disaster
9,687,592
WORDS = tokenizer.word_counts print(len(WORDS))<prepare_x_and_y>
combine=combine.drop(['SibSp','Parch'],1) combine.head()
Titanic - Machine Learning from Disaster
9,687,592
train_X = df_train["question_text"].values test_X = df_test["question_text"].values<string_transform>
combine['Sex']=combine['Sex'].map({'male':0,'female':1}) combine.head()
Titanic - Machine Learning from Disaster
9,687,592
train_X = tokenizer.texts_to_sequences(train_X) test_X = tokenizer.texts_to_sequences(test_X )<prepare_x_and_y>
for i in range(0,2): for j in range(0,3): print(i,j+1) temp_dataset=combine[(combine['Sex']==i)&(combine['Pclass']==j+1)]['Age'].dropna() print(temp_dataset) combine.loc[(combine.Age.isnull())&(combine.Sex==i)&(combine.Pclass==j+1),'Age']=int(temp_dataset.median() )
Titanic - Machine Learning from Disaster
9,687,592
train_X = pad_sequences(train_X, maxlen=maxlen) test_X = pad_sequences(test_X, maxlen=maxlen) train_y = df_train['target'].values<categorify>
combine.isnull().sum()
Titanic - Machine Learning from Disaster
9,687,592
def get_embeddings(embedtype): if embedtype is "glove": EMBEDDING_FILE = '.. /input/embeddings/glove.840B.300d/glove.840B.300d.txt' elif embedtype is "fastext": EMBEDDING_FILE = '.. /input/embeddings/wiki-news-300d-1M/wiki-news-300d-1M.vec' elif embedtype is "paragram": EMBEDDING_FILE = '.. /input/embeddings/paragram_300_sl999/paragram_300_sl999.txt' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE,encoding="latin")if len(o)>100) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = all_embs.mean() , all_embs.std() vocab_size = all_embs.shape[0] embed_size = all_embs.shape[1] word_index = tokenizer.word_index print("embed size-->",embed_size) print("total words in embeddings-->",vocab_size) print("total words in data-->",len(word_index)) nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) count_common_words =0 for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector count_common_words = count_common_words+1 print(count_common_words," common words fount in ",embedtype) print("{0}% common words found in {1}".format(( count_common_words*100/len(word_index)) ,embedtype)) return embedding_matrix <choose_model_class>
combine_checkpoint=combine.copy() combine.head()
Titanic - Machine Learning from Disaster
9,687,592
embedding_glove = get_embeddings(embedtype="glove" )<choose_model_class>
combine['Age_Band']=pd.cut(combine['Age'],5) combine['Age_Band'].unique()
Titanic - Machine Learning from Disaster
9,687,592
embedding_paragram = get_embeddings(embedtype="paragram" )<train_model>
combine.loc[(combine['Age']<=16.136),'Age']=1 combine.loc[(combine['Age']>16.136)&(combine['Age']<=32.102),'Age']=2 combine.loc[(combine['Age']>32.102)&(combine['Age']<=48.068),'Age']=3 combine.loc[(combine['Age']>48.068)&(combine['Age']<=64.034),'Age']=4 combine.loc[(combine['Age']>64.034)&(combine['Age']<=80.) ,'Age']=5 combine['Age'].unique()
Titanic - Machine Learning from Disaster
9,687,592
mean_gl_par_embedding = np.mean([embedding_glove,embedding_paragram],axis=0) print("mean glove paragram embedding shape--> ",mean_gl_par_embedding.shape )<set_options>
combine=combine.drop(['Age_Band'],1 )
Titanic - Machine Learning from Disaster
9,687,592
class Attention(Layer): def __init__(self, step_dim, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs): self.supports_masking = True self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.step_dim = step_dim self.features_dim = 0 super(Attention, self ).__init__(**kwargs) def build(self, input_shape): assert len(input_shape)== 3 self.W = self.add_weight(( input_shape[-1],), initializer=self.init, name='{}_W'.format(self.name), regularizer=self.W_regularizer, constraint=self.W_constraint) self.features_dim = input_shape[-1] if self.bias: self.b = self.add_weight(( input_shape[1],), initializer='zero', name='{}_b'.format(self.name), regularizer=self.b_regularizer, constraint=self.b_constraint) else: self.b = None self.built = True def compute_mask(self, input, input_mask=None): return None def call(self, x, mask=None): features_dim = self.features_dim step_dim = self.step_dim eij = K.reshape(K.dot(K.reshape(x,(-1, features_dim)) , K.reshape(self.W,(features_dim, 1))),(-1, step_dim)) if self.bias: eij += self.b eij = K.tanh(eij) a = K.exp(eij) if mask is not None: a *= K.cast(mask, K.floatx()) a /= K.cast(K.sum(a, axis=1, keepdims=True)+ K.epsilon() , K.floatx()) a = K.expand_dims(a) weighted_input = x * a return K.sum(weighted_input, axis=1) def compute_output_shape(self, input_shape): return input_shape[0], self.features_dim<choose_model_class>
combine['Fare_Band']=pd.cut(combine['Fare'],3) combine['Fare_Band'].unique()
Titanic - Machine Learning from Disaster
9,687,592
class CyclicLR(Callback): def __init__(self, base_lr=0.001, max_lr=0.006, step_size=2000., mode='triangular', gamma=1., scale_fn=None, scale_mode='cycle'): super(CyclicLR, self ).__init__() self.base_lr = base_lr self.max_lr = max_lr self.step_size = step_size self.mode = mode self.gamma = gamma if scale_fn == None: if self.mode == 'triangular': self.scale_fn = lambda x: 1. self.scale_mode = 'cycle' elif self.mode == 'triangular2': self.scale_fn = lambda x: 1/(2.**(x-1)) self.scale_mode = 'cycle' elif self.mode == 'exp_range': self.scale_fn = lambda x: gamma**(x) self.scale_mode = 'iterations' else: self.scale_fn = scale_fn self.scale_mode = scale_mode self.clr_iterations = 0. self.trn_iterations = 0. self.history = {} self._reset() def _reset(self, new_base_lr=None, new_max_lr=None, new_step_size=None): if new_base_lr != None: self.base_lr = new_base_lr if new_max_lr != None: self.max_lr = new_max_lr if new_step_size != None: self.step_size = new_step_size self.clr_iterations = 0. def clr(self): cycle = np.floor(1+self.clr_iterations/(2*self.step_size)) x = np.abs(self.clr_iterations/self.step_size - 2*cycle + 1) if self.scale_mode == 'cycle': return self.base_lr +(self.max_lr-self.base_lr)*np.maximum(0,(1-x)) *self.scale_fn(cycle) else: return self.base_lr +(self.max_lr-self.base_lr)*np.maximum(0,(1-x)) *self.scale_fn(self.clr_iterations) def on_train_begin(self, logs={}): logs = logs or {} if self.clr_iterations == 0: K.set_value(self.model.optimizer.lr, self.base_lr) else: K.set_value(self.model.optimizer.lr, self.clr()) def on_batch_end(self, epoch, logs=None): logs = logs or {} self.trn_iterations += 1 self.clr_iterations += 1 self.history.setdefault('lr', [] ).append(K.get_value(self.model.optimizer.lr)) self.history.setdefault('iterations', [] ).append(self.trn_iterations) for k, v in logs.items() : self.history.setdefault(k, [] ).append(v) K.set_value(self.model.optimizer.lr, self.clr() )<choose_model_class>
combine.loc[(combine['Fare']<=170.776),'Fare']=1 combine.loc[(combine['Fare']>170.776)&(combine['Fare']<=314.553),'Fare']=2 combine.loc[(combine['Fare']>314.553)&(combine['Fare']<=513),'Fare']=3 combine=combine.drop(['Fare_Band'],1 )
Titanic - Machine Learning from Disaster
9,687,592
def build_model() : inp = Input(shape=(maxlen,)) x = Embedding(max_features, embed_size, weights=[mean_gl_par_embedding],trainable=False )(inp) x = SpatialDropout1D(rate=0.1 )(x) x1 = Bidirectional(CuDNNGRU(200, return_sequences=True))(x) x2 = Bidirectional(CuDNNGRU(128, return_sequences=True))(x) atten_1 = Attention(maxlen )(x1) atten_2 = Attention(maxlen )(x2) x = concatenate([atten_1,atten_2]) x = Dense(128, activation="relu" )(x) x = Dropout(0.2 )(x) x = BatchNormalization()(x) x = Dense(1, activation="sigmoid" )(x) model = Model(inputs=inp, outputs=x) model.compile(loss='binary_crossentropy', optimizer=Adam() ,) return model<compute_test_metric>
combine['Fare'].value_counts()
Titanic - Machine Learning from Disaster
9,687,592
def f1_smart(y_true, y_pred): args = np.argsort(y_pred) tp = y_true.sum() fs =(tp - np.cumsum(y_true[args[:-1]])) / np.arange(y_true.shape[0] + tp - 1, tp, -1) res_idx = np.argmax(fs) return 2 * fs[res_idx],(y_pred[args[res_idx]] + y_pred[args[res_idx + 1]])/ 2<split>
combine=pd.get_dummies(columns=['Pclass','Sex','Cabin','Embarked','Title','Age','Fare'],data=combine) combine.head()
Titanic - Machine Learning from Disaster
9,687,592
kfold = StratifiedKFold(n_splits=5, random_state=1990, shuffle=True) bestscore = [] y_test = np.zeros(( test_X.shape[0],)) filepath="weights_best_mean.h5" for i,(train_index, valid_index)in enumerate(kfold.split(train_X, train_y)) : X_train, X_val, Y_train, Y_val = train_X[train_index], train_X[valid_index], train_y[train_index], train_y[valid_index] checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min') reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.6, patience=1, min_lr=0.0001, verbose=2) earlystopping = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=2, verbose=2, mode='auto') callbacks = [clr,] model = build_model() if i == 0:print(model.summary()) model.fit(X_train, Y_train, batch_size=512, epochs=2, validation_data=(X_val, Y_val), verbose=1, callbacks=[checkpoint,clr,]) model.load_weights(filepath) y_pred = model.predict([X_val], batch_size=1024, verbose=1) y_test += np.squeeze(model.predict([test_X], batch_size=1024, verbose=2)) /5 f1, threshold = f1_smart(np.squeeze(Y_val), np.squeeze(y_pred)) print('Optimal F1: {:.4f} at threshold: {:.4f}'.format(f1, threshold)) bestscore.append(threshold )<compute_test_metric>
x_train=combine.iloc[:trainrow] x_test=combine.iloc[trainrow:]
Titanic - Machine Learning from Disaster
9,687,592
print("mean threshold--> ",np.mean(bestscore))<save_to_csv>
from sklearn.preprocessing import StandardScaler
Titanic - Machine Learning from Disaster
9,687,592
print(y_test.shape) pred_test_y =(y_test>np.mean(bestscore)).astype(int) out_df = pd.DataFrame({"qid":df_test["qid"].values}) out_df['prediction'] = pred_test_y out_df.to_csv("submission.csv", index=False )<import_modules>
scaler=StandardScaler() scaler.fit(x_train) x_scaled_train=scaler.transform(x_train) x_scaled_train
Titanic - Machine Learning from Disaster
9,687,592
tqdm.pandas(desc='Progress') <set_options>
x_scaled_test=scaler.transform(x_test) x_scaled_test
Titanic - Machine Learning from Disaster
9,687,592
def seed_everything(seed=1029): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True seed_everything() SEED=12345<define_variables>
reg=LogisticRegression() reg.fit(x_scaled_train,y_train) print(reg.score(x_scaled_train,y_train)) y_pred=reg.predict(x_scaled_test) y_pred
Titanic - Machine Learning from Disaster
9,687,592
embed_size = 300 max_features = 120000 maxlen = 80 batch_size = 256 n_epochs = 5 n_splits = 5 <train_model>
xgb=XGBClassifier() xgb.fit(x_scaled_train,y_train,early_stopping_rounds=5, eval_set=[(x_scaled_train, y_train)], verbose=False) print(xgb.score(x_scaled_train,y_train)) y_pred=xgb.predict(x_scaled_test )
Titanic - Machine Learning from Disaster
9,687,592
token = Tokenizer() token.fit_on_texts(["Let us learn on a example"]) print(token.texts_to_sequences(["Let us learn on a example"])) print(token.texts_to_sequences(["Let us hopefully learn on a example"]))<drop_column>
rfc=RandomForestClassifier(random_state=4,n_estimators=500,warm_start=True,max_depth=6,min_samples_leaf=2,max_features='sqrt') rfc.fit(x_scaled_train,y_train) print(rfc.score(x_scaled_train,y_train)) y_pred=rfc.predict(x_scaled_test )
Titanic - Machine Learning from Disaster
9,687,592
del token<load_from_csv>
submission = pd.DataFrame({ "PassengerId": passengerids, "Survived": y_pred }) submission
Titanic - Machine Learning from Disaster
9,687,592
df_train = pd.read_csv(".. /input/quora-insincere-questions-classification/train.csv") df_test = pd.read_csv(".. /input/quora-insincere-questions-classification/test.csv") df = pd.concat([df_train ,df_test],sort=True )<feature_engineering>
submission.to_csv('submission1.csv', index=False )
Titanic - Machine Learning from Disaster
1,472,711
def build_vocab(texts): sentences = texts.apply(lambda x: x.split() ).values vocab = {} for sentence in sentences: for word in sentence: try: vocab[word] += 1 except KeyError: vocab[word] = 1 return vocab def known_contractions(embed): known = [] for contract in contraction_mapping: if contract in embed: known.append(contract) return known def clean_contractions(text, mapping): specials = ["’", "‘", "´", "`"] for s in specials: text = text.replace(s, "'") text = ' '.join([mapping[t] if t in mapping else t for t in text.split(" ")]) return text def correct_spelling(x, dic): for word in dic.keys() : x = x.replace(word, dic[word]) return x def unknown_punct(embed, punct): unknown = '' for p in punct: if p not in embed: unknown += p unknown += ' ' return unknown def clean_numbers(x): x = re.sub('[0-9]{5,}', ' x = re.sub('[0-9]{4}', ' x = re.sub('[0-9]{3}', ' x = re.sub('[0-9]{2}', ' return x def clean_special_chars(text, punct, mapping): for p in mapping: text = text.replace(p, mapping[p]) for p in punct: text = text.replace(p, f' {p} ') specials = {'\u200b': ' ', '…': '...', '\ufeff': '', 'करना': '', 'है': ''} for s in specials: text = text.replace(s, specials[s]) return text def add_lower(embedding, vocab): count = 0 for word in vocab: if word in embedding and word.lower() not in embedding: embedding[word.lower() ] = embedding[word] count += 1 print(f"Added {count} words to embedding" )<define_variables>
print(os.listdir(".. /input")) warnings.filterwarnings('ignore') plt.rcParams['figure.figsize'] =(16,9) sns.set_palette('gist_earth' )
Titanic - Machine Learning from Disaster
1,472,711
puncts = [',', '.', '"', ':', ')', '(', '-', '!', '?', '|', ';', "'", '$', '&', '/', '[', ']', '>', '%', '=', ' '·', '_', '{', '}', '©', '^', '®', '`', '<', '→', '°', '€', '™', '›', '♥', '←', '×', '§', '″', '′', 'Â', '█', '½', 'à', '…', '“', '★', '”', '–', '●', 'â', '►', '−', '¢', '²', '¬', '░', '¶', '↑', '±', '¿', '▾', '═', '¦', '║', '―', '¥', '▓', '—', '‹', '─', '▒', ':', '¼', '⊕', '▼', '▪', '†', '■', '’', '▀', '¨', '▄', '♫', '☆', 'é', '¯', '♦', '¤', '▲', 'è', '¸', '¾', 'Ã', '⋅', '‘', '∞', '∙', ')', '↓', '、', '│', '(', '»', ',', '♪', '╩', '╚', '³', '・', '╦', '╣', '╔', '╗', '▬', '❤', 'ï', 'Ø', '¹', '≤', '‡', '√', ] def clean_text(x): x = str(x) for punct in puncts: x = x.replace(punct, f' {punct} ') return x def clean_numbers(x): x = re.sub('[0-9]{5,}', ' x = re.sub('[0-9]{4}', ' x = re.sub('[0-9]{3}', ' x = re.sub('[0-9]{2}', ' return x mispell_dict = {"ain't": "is not", "aren't": "are not","can't": "cannot", "'cause": "because", "could've": "could have", "couldn't": "could not", "didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hasn't": "has not", "haven't": "have not", "he'd": "he would","he'll": "he will", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is", "I'd": "I would", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have","I'm": "I am", "I've": "I have", "i'd": "i would", "i'd've": "i would have", "i'll": "i will", "i'll've": "i will have","i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would", "it'd've": "it would have", "it'll": "it will", "it'll've": "it will have","it's": "it is", "let's": "let us", "ma'am": "madam", "mayn't": "may not", "might've": "might have","mightn't": "might not","mightn't've": "might not have", "must've": "must have", "mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have","o'clock": "of the clock", "oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have", "she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is", "should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have","so's": "so as", "this's": "this is","that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there would", "there'd've": "there would have", "there's": "there is", "here's": "here is","they'd": "they would", "they'd've": "they would have", "they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have", "wasn't": "was not", "we'd": "we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are", "we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are", "what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is", "where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have", "why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have", "would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all", "y'all'd": "you all would","y'all'd've": "you all would have","y'all're": "you all are","y'all've": "you all have","you'd": "you would", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have", "you're": "you are", "you've": "you have", 'colour': 'color', 'centre': 'center', 'favourite': 'favorite', 'travelling': 'traveling', 'counselling': 'counseling', 'theatre': 'theater', 'cancelled': 'canceled', 'labour': 'labor', 'organisation': 'organization', 'wwii': 'world war 2', 'citicise': 'criticize', 'youtu ': 'youtube ', 'Qoura': 'Quora', 'sallary': 'salary', 'Whta': 'What', 'narcisist': 'narcissist', 'howdo': 'how do', 'whatare': 'what are', 'howcan': 'how can', 'howmuch': 'how much', 'howmany': 'how many', 'whydo': 'why do', 'doI': 'do I', 'theBest': 'the best', 'howdoes': 'how does', 'mastrubation': 'masturbation', 'mastrubate': 'masturbate', "mastrubating": 'masturbating', 'pennis': 'penis', 'Etherium': 'Ethereum', 'narcissit': 'narcissist', 'bigdata': 'big data', '2k17': '2017', '2k18': '2018', 'qouta': 'quota', 'exboyfriend': 'ex boyfriend', 'airhostess': 'air hostess', "whst": 'what', 'watsapp': 'whatsapp', 'demonitisation': 'demonetization', 'demonitization': 'demonetization', 'demonetisation': 'demonetization'} def _get_mispell(mispell_dict): mispell_re = re.compile('(%s)' % '|'.join(mispell_dict.keys())) return mispell_dict, mispell_re mispellings, mispellings_re = _get_mispell(mispell_dict) def replace_typical_misspell(text): def replace(match): return mispellings[match.group(0)] return mispellings_re.sub(replace, text )<feature_engineering>
df_train = pd.read_csv('.. /input/train.csv') df_test = pd.read_csv('.. /input/test.csv') full = pd.concat([df_train, df_test], axis = 0, sort=True) full.set_index('PassengerId', drop = False, inplace=True) train = full[:891] display(full.head(3)) print(f"Dataset contains {full.shape[0]} records, with {full.shape[1]} variables." )
Titanic - Machine Learning from Disaster
1,472,711
def add_features(df): df['question_text'] = df['question_text'].progress_apply(lambda x:str(x)) df['total_length'] = df['question_text'].progress_apply(len) df['capitals'] = df['question_text'].progress_apply(lambda comment: sum(1 for c in comment if c.isupper())) df['caps_vs_length'] = df.progress_apply(lambda row: float(row['capitals'])/float(row['total_length']), axis=1) df['num_words'] = df.question_text.str.count('\S+') df['num_unique_words'] = df['question_text'].progress_apply(lambda comment: len(set(w for w in comment.split()))) df['words_vs_unique'] = df['num_unique_words'] / df['num_words'] return df<load_from_csv>
def parse_Cabin(cabin): if type(cabin)== str: m = re.search(r'([A-Z])+', cabin) return m.group(1) else: return 'X' full['Cabin_short'] = full['Cabin'].map(parse_Cabin )
Titanic - Machine Learning from Disaster
1,472,711
def load_and_prec() : train_df = pd.read_csv(".. /input/quora-insincere-questions-classification/train.csv") test_df = pd.read_csv(".. /input/quora-insincere-questions-classification/test.csv") print("Train shape : ",train_df.shape) print("Test shape : ",test_df.shape) train_df["question_text"] = train_df["question_text"].apply(lambda x: x.lower()) test_df["question_text"] = test_df["question_text"].apply(lambda x: x.lower()) train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: clean_text(x)) test_df["question_text"] = test_df["question_text"].apply(lambda x: clean_text(x)) train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: clean_numbers(x)) test_df["question_text"] = test_df["question_text"].apply(lambda x: clean_numbers(x)) train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: replace_typical_misspell(x)) test_df["question_text"] = test_df["question_text"].apply(lambda x: replace_typical_misspell(x)) train_X = train_df["question_text"].fillna("_ test_X = test_df["question_text"].fillna("_ train = add_features(train_df) test = add_features(test_df) features = train[['caps_vs_length', 'words_vs_unique']].fillna(0) test_features = test[['caps_vs_length', 'words_vs_unique']].fillna(0) ss = StandardScaler() ss.fit(np.vstack(( features, test_features))) features = ss.transform(features) test_features = ss.transform(test_features) tokenizer = Tokenizer(num_words=max_features) tokenizer.fit_on_texts(list(train_X)) train_X = tokenizer.texts_to_sequences(train_X) test_X = tokenizer.texts_to_sequences(test_X) train_X = pad_sequences(train_X, maxlen=maxlen) test_X = pad_sequences(test_X, maxlen=maxlen) train_y = train_df['target'].values np.random.seed(123) trn_idx = np.random.permutation(len(train_X)) train_X = train_X[trn_idx] train_y = train_y[trn_idx] features = features[trn_idx] return train_X, test_X, train_y, features, test_features, tokenizer.word_index <split>
dict_fare_by_Pclass = dict(full.groupby('Pclass' ).Fare.mean()) missing_fare = full.loc[full.Fare.isnull() ,'Pclass'].map(dict_fare_by_Pclass) full.loc[full.Fare.isnull() ,'Fare'] = missing_fare
Titanic - Machine Learning from Disaster
1,472,711
x_train, x_test, y_train, features, test_features, word_index = load_and_prec()<save_model>
features = pd.DataFrame() features['Pclass'] = full['Pclass'] features['Fare'] = full['Fare'] features['Sex'] = full['Sex']
Titanic - Machine Learning from Disaster
1,472,711
np.save("x_train",x_train) np.save("x_test",x_test) np.save("y_train",y_train) np.save("features",features) np.save("test_features",test_features) np.save("word_index.npy",word_index )<load_pretrained>
features['A5'] =(full['Ticket_short'] == 'A5' ).astype(int) features['PC'] =(full['Ticket_short'] == 'PC' ).astype(int )
Titanic - Machine Learning from Disaster
1,472,711
x_train = np.load("x_train.npy") x_test = np.load("x_test.npy") y_train = np.load("y_train.npy") features = np.load("features.npy") test_features = np.load("test_features.npy") word_index = np.load("word_index.npy" ).item()<load_from_csv>
dict_Title = {"Capt": "Officer", "Col": "Officer", "Major": "Officer", "Jonkheer": "Royalty", "Don": "Royalty", "Sir" : "Royalty", "Dr": "Officer", "Rev": "Officer", "the Countess":"Royalty", "Dona": "Royalty", "Mme": "Mrs", "Mlle": "Miss", "Ms": "Mrs", "Mr" : "Mr", "Mrs" : "Mrs", "Miss" : "Miss", "Master" : "Master", "Lady" : "Royalty" } title = title.map(dict_Title) plt.figure(figsize =(14,6)) sns.violinplot(x = title, y = full['Age']);
Titanic - Machine Learning from Disaster
1,472,711
with open(".. /input/glove-wiki-twitter2550/glove.twitter.27B.50d.txt")as f: lines = f.readlines() lines = [line.rstrip().split() for line in lines] print(len(lines)) print(len(lines[0])) print(lines[99][0]) print(lines[99][1:]) print(len(lines[99][1:]))<set_options>
df_title = pd.DataFrame(title ).join(full[['Age','Survived']]) dict_age = df_title.groupby('Name' ).Age.mean() idx = full.Age.isnull() full.loc[idx,'Age'] = df_title.loc[idx, 'Name'].map(dict_age )
Titanic - Machine Learning from Disaster
1,472,711
del lines gc.collect()<statistical_test>
features['Title'] = df_title['Name'] features['Child'] =(full['Age'] <= 14 ).astype(int )
Titanic - Machine Learning from Disaster
1,472,711
def load_glove(word_index): EMBEDDING_FILE = '.. /input/quora-insincere-questions-classification/embeddings/glove.840B.300d/glove.840B.300d.txt' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE)) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = all_embs.mean() , all_embs.std() embed_size = all_embs.shape[1] nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix def load_fasttext(word_index): EMBEDDING_FILE = '.. /input/quora-insincere-questions-classification/embeddings/wiki-news-300d-1M/wiki-news-300d-1M.vec' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE)if len(o)>100) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = all_embs.mean() , all_embs.std() embed_size = all_embs.shape[1] nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix def load_para(word_index): EMBEDDING_FILE = '.. /input/quora-insincere-questions-classification/embeddings/paragram_300_sl999/paragram_300_sl999.txt' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE, encoding="utf8", errors='ignore')if len(o)>100) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = all_embs.mean() , all_embs.std() embed_size = all_embs.shape[1] nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix<normalization>
def parse_surname(name): return name.split(',')[0] family = pd.DataFrame(full[['Parch','SibSp','Ticket']]) family['Family_size'] = 1 + family.Parch + family.SibSp family['Surname'] = full.Name.map(parse_surname) dict_scount = dict(family.groupby('Surname' ).Family_size.count()) dict_scode = dict(zip(dict_scount.keys() , range(len(dict_scount)))) family['Surname_code'] = family['Surname'].map(dict_scode) family['Surname_count'] = family['Surname'].map(dict_scount) display(full[family.Surname == 'Smith'] )
Titanic - Machine Learning from Disaster
1,472,711
seed_everything() glove_embeddings = load_glove(word_index) paragram_embeddings = load_para(word_index) fasttext_embeddings = load_fasttext(word_index) embedding_matrix = np.mean([glove_embeddings, paragram_embeddings, fasttext_embeddings], axis=0) del glove_embeddings, paragram_embeddings, fasttext_embeddings gc.collect() np.shape(embedding_matrix )<split>
surname2chk = family[family['Family_size'] < family['Surname_count']].Surname.unique() family['Surname_adj'] = family['Surname'] for s in surname2chk: family_regroup = family[family['Surname'] == s] fam_code_dict = tick2fam_gen(family_regroup) for idx in family_regroup.index: curr_ticket = full.loc[idx].Ticket fam_code = fam_code_dict[curr_ticket] if family_regroup.loc[idx, 'Family_size'] == 1: if family_regroup.Ticket.value_counts() [curr_ticket] > 1: family.loc[idx, 'Surname_adj'] = s + '-hidfam' + fam_code else: family.loc[idx, 'Surname_adj'] = s + '-single' + fam_code else: family.loc[idx, 'Surname_adj'] = s + '-fam' + fam_code display(family[family.Surname == 'Smith'] )
Titanic - Machine Learning from Disaster
1,472,711
splits = list(StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=SEED ).split(x_train, y_train)) splits[:3]<choose_model_class>
dict_fcount = dict(family.groupby('Surname_adj' ).Family_size.count()) dict_fcode = dict(zip(dict_fcount.keys() , range(len(dict_fcount)))) family['Family_code'] = family['Surname_adj'].map(dict_fcode) family['Family_count'] = family['Surname_adj'].map(dict_fcount) print(f"No.of Family Before Regrouping: {len(family.Surname_code.unique())}") print(f"No.of Family After Regrouping: {len(family.Family_code.unique())}" )
Titanic - Machine Learning from Disaster
1,472,711
class CyclicLR(object): def __init__(self, optimizer, base_lr=1e-3, max_lr=6e-3, step_size=2000, mode='triangular', gamma=1., scale_fn=None, scale_mode='cycle', last_batch_iteration=-1): if not isinstance(optimizer, Optimizer): raise TypeError('{} is not an Optimizer'.format( type(optimizer ).__name__)) self.optimizer = optimizer if isinstance(base_lr, list)or isinstance(base_lr, tuple): if len(base_lr)!= len(optimizer.param_groups): raise ValueError("expected {} base_lr, got {}".format( len(optimizer.param_groups), len(base_lr))) self.base_lrs = list(base_lr) else: self.base_lrs = [base_lr] * len(optimizer.param_groups) if isinstance(max_lr, list)or isinstance(max_lr, tuple): if len(max_lr)!= len(optimizer.param_groups): raise ValueError("expected {} max_lr, got {}".format( len(optimizer.param_groups), len(max_lr))) self.max_lrs = list(max_lr) else: self.max_lrs = [max_lr] * len(optimizer.param_groups) self.step_size = step_size if mode not in ['triangular', 'triangular2', 'exp_range'] \ and scale_fn is None: raise ValueError('mode is invalid and scale_fn is None') self.mode = mode self.gamma = gamma if scale_fn is None: if self.mode == 'triangular': self.scale_fn = self._triangular_scale_fn self.scale_mode = 'cycle' elif self.mode == 'triangular2': self.scale_fn = self._triangular2_scale_fn self.scale_mode = 'cycle' elif self.mode == 'exp_range': self.scale_fn = self._exp_range_scale_fn self.scale_mode = 'iterations' else: self.scale_fn = scale_fn self.scale_mode = scale_mode self.batch_step(last_batch_iteration + 1) self.last_batch_iteration = last_batch_iteration def batch_step(self, batch_iteration=None): if batch_iteration is None: batch_iteration = self.last_batch_iteration + 1 self.last_batch_iteration = batch_iteration for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): param_group['lr'] = lr def _triangular_scale_fn(self, x): return 1. def _triangular2_scale_fn(self, x): return 1 /(2.**(x - 1)) def _exp_range_scale_fn(self, x): return self.gamma**(x) def get_lr(self): step_size = float(self.step_size) cycle = np.floor(1 + self.last_batch_iteration /(2 * step_size)) x = np.abs(self.last_batch_iteration / step_size - 2 * cycle + 1) lrs = [] param_lrs = zip(self.optimizer.param_groups, self.base_lrs, self.max_lrs) for param_group, base_lr, max_lr in param_lrs: base_height =(max_lr - base_lr)* np.maximum(0,(1 - x)) if self.scale_mode == 'cycle': lr = base_lr + base_height * self.scale_fn(cycle) else: lr = base_lr + base_height * self.scale_fn(self.last_batch_iteration) lrs.append(lr) return lrs<normalization>
group = pd.DataFrame(family[['Surname_code','Surname_count','Family_code','Family_count']]) dict_tcount = dict(full.groupby('Ticket' ).PassengerId.count()) dict_tcode = dict(zip(dict_tcount.keys() ,range(len(dict_tcount)))) group['Ticket_code'] = full.Ticket.map(dict_tcode) group['Ticket_count'] = full.Ticket.map(dict_tcount) print(f"No.of Tickets Identified: {len(group['Ticket_code'].unique())}") display(full[(full.Ticket == 'A/4 48871')|(full.Ticket == 'A/4 48873')] )
Titanic - Machine Learning from Disaster
1,472,711
class Attention(nn.Module): def __init__(self, feature_dim, step_dim, bias=True, **kwargs): super(Attention, self ).__init__(**kwargs) self.supports_masking = True self.bias = bias self.feature_dim = feature_dim self.step_dim = step_dim self.features_dim = 0 weight = torch.zeros(feature_dim, 1) nn.init.xavier_uniform_(weight) self.weight = nn.Parameter(weight) if bias: self.b = nn.Parameter(torch.zeros(step_dim)) def forward(self, x, mask=None): feature_dim = self.feature_dim step_dim = self.step_dim eij = torch.mm( x.contiguous().view(-1, feature_dim), self.weight ).view(-1, step_dim) if self.bias: eij = eij + self.b eij = torch.tanh(eij) a = torch.exp(eij) if mask is not None: a = a * mask a = a / torch.sum(a, 1, keepdim=True)+ 1e-10 weighted_input = x * torch.unsqueeze(a, -1) return torch.sum(weighted_input, 1 )<choose_model_class>
def ChainCombineGroups(df, colA, colB): data = df.copy() search_df = data.copy() group_count = 0 while not search_df.empty: pool = search_df.iloc[:1] idx = pool.index search_df.drop(index = idx, inplace = True) flag_init = 1 update = pd.DataFrame() while(flag_init or not update.empty): flag_init = 0 pool_A_uniq = np.unique(pool[colA]) pool_B_uniq = np.unique(pool[colB]) for col in [colA,colB]: idx = [] for num in np.unique(pool[col]): idx.extend(search_df[search_df[col] == num].index) update = search_df.loc[idx] pool = pd.concat([pool, update], axis = 0) search_df = search_df.drop(index = idx) data.loc[pool.index, 'Group_'] = group_count group_count += 1 return np.array(data['Group_'].astype(int))
Titanic - Machine Learning from Disaster
1,472,711
embedding_dim = 300 embedding_path = '.. /save/embedding_matrix.npy' use_pretrained_embedding = True hidden_size = 60 gru_len = hidden_size Routings = 4 Num_capsule = 5 Dim_capsule = 5 dropout_p = 0.25 rate_drop_dense = 0.28 LR = 0.001 T_epsilon = 1e-7 num_classes = 30 class Embed_Layer(nn.Module): def __init__(self, embedding_matrix=None, vocab_size=None, embedding_dim=300): super(Embed_Layer, self ).__init__() self.encoder = nn.Embedding(vocab_size + 1, embedding_dim) if use_pretrained_embedding: self.encoder.weight.data.copy_(t.from_numpy(embedding_matrix)) def forward(self, x, dropout_p=0.25): return nn.Dropout(p=dropout_p )(self.encoder(x)) class GRU_Layer(nn.Module): def __init__(self): super(GRU_Layer, self ).__init__() self.gru = nn.GRU(input_size=300, hidden_size=gru_len, bidirectional=True) def init_weights(self): ih =(param.data for name, param in self.named_parameters() if 'weight_ih' in name) hh =(param.data for name, param in self.named_parameters() if 'weight_hh' in name) b =(param.data for name, param in self.named_parameters() if 'bias' in name) for k in ih: nn.init.xavier_uniform_(k) for k in hh: nn.init.orthogonal_(k) for k in b: nn.init.constant_(k, 0) def forward(self, x): return self.gru(x) class Caps_Layer(nn.Module): def __init__(self, input_dim_capsule=gru_len * 2, num_capsule=Num_capsule, dim_capsule=Dim_capsule, \ routings=Routings, kernel_size=(9, 1), share_weights=True, activation='default', **kwargs): super(Caps_Layer, self ).__init__(**kwargs) self.num_capsule = num_capsule self.dim_capsule = dim_capsule self.routings = routings self.kernel_size = kernel_size self.share_weights = share_weights if activation == 'default': self.activation = self.squash else: self.activation = nn.ReLU(inplace=True) if self.share_weights: self.W = nn.Parameter( nn.init.xavier_normal_(t.empty(1, input_dim_capsule, self.num_capsule * self.dim_capsule))) else: self.W = nn.Parameter( t.randn(BATCH_SIZE, input_dim_capsule, self.num_capsule * self.dim_capsule)) def forward(self, x): if self.share_weights: u_hat_vecs = t.matmul(x, self.W) else: print('add later') batch_size = x.size(0) input_num_capsule = x.size(1) u_hat_vecs = u_hat_vecs.view(( batch_size, input_num_capsule, self.num_capsule, self.dim_capsule)) u_hat_vecs = u_hat_vecs.permute(0, 2, 1, 3) b = t.zeros_like(u_hat_vecs[:, :, :, 0]) for i in range(self.routings): b = b.permute(0, 2, 1) c = F.softmax(b, dim=2) c = c.permute(0, 2, 1) b = b.permute(0, 2, 1) outputs = self.activation(t.einsum('bij,bijk->bik',(c, u_hat_vecs))) if i < self.routings - 1: b = t.einsum('bik,bijk->bij',(outputs, u_hat_vecs)) return outputs def squash(self, x, axis=-1): s_squared_norm =(x ** 2 ).sum(axis, keepdim=True) scale = t.sqrt(s_squared_norm + T_epsilon) return x / scale class Capsule_Main(nn.Module): def __init__(self, embedding_matrix=None, vocab_size=None): super(Capsule_Main, self ).__init__() self.embed_layer = Embed_Layer(embedding_matrix, vocab_size) self.gru_layer = GRU_Layer() self.gru_layer.init_weights() self.caps_layer = Caps_Layer() self.dense_layer = Dense_Layer() def forward(self, content): content1 = self.embed_layer(content) content2, _ = self.gru_layer( content1) content3 = self.caps_layer(content2) output = self.dense_layer(content3) return output <choose_model_class>
group['Group_code'] = ChainCombineGroups(group, 'Family_code', 'Ticket_code') dict_gcount = dict(group.groupby('Group_code' ).Family_code.count()) group['Group_count'] = group.Group_code.map(dict_gcount) print(f"Family: {len(family['Family_code'].unique())}") print(f"Group: {len(group['Ticket_code'].unique())}") print(f"Combined: {len(group['Group_code'].unique())} ") print('An example of grouping the both friends and family under a same group:') display(pd.concat([full['Ticket'],family[['Surname','Family_code']],group[['Ticket_code','Group_code']]], axis = 1)[group['Group_code'] == 458] )
Titanic - Machine Learning from Disaster
1,472,711
class NeuralNet(nn.Module): def __init__(self): super(NeuralNet, self ).__init__() fc_layer = 16 fc_layer1 = 16 self.embedding = nn.Embedding(max_features, embed_size) self.embedding.weight = nn.Parameter(torch.tensor(embedding_matrix, dtype=torch.float32)) self.embedding.weight.requires_grad = False self.embedding_dropout = nn.Dropout2d(0.1) self.lstm = nn.LSTM(embed_size, hidden_size, bidirectional=True, batch_first=True) self.gru = nn.GRU(hidden_size * 2, hidden_size, bidirectional=True, batch_first=True) self.lstm2 = nn.LSTM(hidden_size * 2, hidden_size, bidirectional=True, batch_first=True) self.lstm_attention = Attention(hidden_size * 2, maxlen) self.gru_attention = Attention(hidden_size * 2, maxlen) self.bn = nn.BatchNorm1d(16, momentum=0.5) self.linear = nn.Linear(hidden_size*8+3, fc_layer1) self.relu = nn.ReLU() self.dropout = nn.Dropout(0.1) self.fc = nn.Linear(fc_layer**2,fc_layer) self.out = nn.Linear(fc_layer, 1) self.lincaps = nn.Linear(Num_capsule * Dim_capsule, 1) self.caps_layer = Caps_Layer() def forward(self, x): h_embedding = self.embedding(x[0]) h_embedding = torch.squeeze( self.embedding_dropout(torch.unsqueeze(h_embedding, 0))) h_lstm, _ = self.lstm(h_embedding) h_gru, _ = self.gru(h_lstm) content3 = self.caps_layer(h_gru) content3 = self.dropout(content3) batch_size = content3.size(0) content3 = content3.view(batch_size, -1) content3 = self.relu(self.lincaps(content3)) h_lstm_atten = self.lstm_attention(h_lstm) h_gru_atten = self.gru_attention(h_gru) avg_pool = torch.mean(h_gru, 1) max_pool, _ = torch.max(h_gru, 1) f = torch.tensor(x[1], dtype=torch.float ).cuda() conc = torch.cat(( h_lstm_atten, h_gru_atten,content3, avg_pool, max_pool,f), 1) conc = self.relu(self.linear(conc)) conc = self.bn(conc) conc = self.dropout(conc) out = self.out(conc) return out<categorify>
group_final = pd.concat([family[['Surname_code','Surname_count','Family_code','Family_count']], group[['Ticket_code','Ticket_count','Group_code','Group_count']], full['Survived']], axis = 1 )
Titanic - Machine Learning from Disaster
1,472,711
class MyDataset(Dataset): def __init__(self,dataset): self.dataset = dataset def __getitem__(self, index): data, target = self.dataset[index] return data, target, index def __len__(self): return len(self.dataset )<define_variables>
for param in [('Surname_code','Surname_count'), ('Family_code','Family_count'), ('Ticket_code','Ticket_count'), ('Group_code','Group_count')]: n_member_survived_by_gp = group_final.groupby(param[0] ).Survived.sum() n_mem_survived = group_final[param[0]].map(n_member_survived_by_gp) n_mem_survived_adj = n_mem_survived - group_final.Survived.apply(lambda x: 1 if x == 1 else 0) n_member_dead_by_gp = group_final.groupby(param[0] ).Survived.count() - group_final.groupby(param[0] ).Survived.sum() n_mem_dead = group_final[param[0]].map(n_member_dead_by_gp) n_mem_dead_adj = n_mem_dead - group_final.Survived.apply(lambda x: 1 if x == 0 else 0) unknown_factor =(group_final[param[1]] - n_mem_survived_adj - n_mem_dead_adj)/group_final[param[1]] confidence = 1 - unknown_factor key = 'Confidence_member_survived'+'_'+param[0] ratio =(1/group_final[param[1]])*(n_mem_survived_adj - n_mem_dead_adj) group_final[key] = confidence * ratio plt.barh(group_final.corr().Survived[-4:].index, group_final.corr().Survived[-4:]) plt.xlabel('Correlation with Survived'); features['Cf_mem_survived'] = group_final['Confidence_member_survived_Group_code']
Titanic - Machine Learning from Disaster
1,472,711
train_preds = np.zeros(( len(x_train))) test_preds = np.zeros(( len(df_test))) seed_everything() x_test_cuda = torch.tensor(x_test, dtype=torch.long ).cuda() test = torch.utils.data.TensorDataset(x_test_cuda) test_loader = torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=False) avg_losses_f = [] avg_val_losses_f = [] def sigmoid(x): return 1 /(1 + np.exp(-x)) <data_type_conversions>
features['Parch'] = full['Parch'] features['SibSp'] = full['SibSp'] features['Group_size'] = group['Group_count'] features.head()
Titanic - Machine Learning from Disaster
1,472,711
for i,(train_idx, valid_idx)in enumerate(splits): x_train = np.array(x_train) y_train = np.array(y_train) features = np.array(features) x_train_fold = torch.tensor(x_train[train_idx.astype(int)], dtype=torch.long ).cuda() y_train_fold = torch.tensor(y_train[train_idx.astype(int), np.newaxis], dtype=torch.float32 ).cuda() kfold_X_features = features[train_idx.astype(int)] kfold_X_valid_features = features[valid_idx.astype(int)] x_val_fold = torch.tensor(x_train[valid_idx.astype(int)], dtype=torch.long ).cuda() y_val_fold = torch.tensor(y_train[valid_idx.astype(int), np.newaxis], dtype=torch.float32 ).cuda() model = NeuralNet() model.cuda() loss_fn = torch.nn.BCEWithLogitsLoss(reduction='sum') step_size = 400 base_lr, max_lr = 0.001, 0.004 optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=max_lr) scheduler = CyclicLR(optimizer, base_lr=base_lr, max_lr=max_lr, step_size=step_size, mode='exp_range', gamma=0.99994) train = torch.utils.data.TensorDataset(x_train_fold, y_train_fold) valid = torch.utils.data.TensorDataset(x_val_fold, y_val_fold) train = MyDataset(train) valid = MyDataset(valid) train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid, batch_size=batch_size, shuffle=False) print(f'Fold {i + 1}') for epoch in range(n_epochs): start_time = time.time() model.train() avg_loss = 0. for i,(x_batch, y_batch, index)in enumerate(train_loader): f = kfold_X_features[index] y_pred = model([x_batch,f]) if scheduler: scheduler.batch_step() loss = loss_fn(y_pred, y_batch) optimizer.zero_grad() loss.backward() optimizer.step() avg_loss += loss.item() / len(train_loader) model.eval() valid_preds_fold = np.zeros(( x_val_fold.size(0))) test_preds_fold = np.zeros(( len(df_test))) avg_val_loss = 0. for i,(x_batch, y_batch, index)in enumerate(valid_loader): f = kfold_X_valid_features[index] y_pred = model([x_batch,f] ).detach() avg_val_loss += loss_fn(y_pred, y_batch ).item() / len(valid_loader) valid_preds_fold[i * batch_size:(i+1)* batch_size] = sigmoid(y_pred.cpu().numpy())[:, 0] elapsed_time = time.time() - start_time print('Epoch {}/{} \t loss={:.4f} \t val_loss={:.4f} \t time={:.2f}s'.format( epoch + 1, n_epochs, avg_loss, avg_val_loss, elapsed_time)) avg_losses_f.append(avg_loss) avg_val_losses_f.append(avg_val_loss) for i,(x_batch,)in enumerate(test_loader): f = test_features[i * batch_size:(i+1)* batch_size] y_pred = model([x_batch,f] ).detach() test_preds_fold[i * batch_size:(i+1)* batch_size] = sigmoid(y_pred.cpu().numpy())[:, 0] train_preds[valid_idx] = valid_preds_fold test_preds += test_preds_fold / len(splits) print('All \t loss={:.4f} \t val_loss={:.4f} \t '.format(np.average(avg_losses_f),np.average(avg_val_losses_f))) <compute_test_metric>
scalar = StandardScaler() features_z_transformed = features.copy() continuous = ['Fare'] features_z_transformed[continuous] = scalar.fit_transform(features_z_transformed[continuous]) features_z_transformed.Sex = features_z_transformed.Sex.apply(lambda x: 1 if x == 'male' else 0) features_final = pd.get_dummies(features_z_transformed) encoded = list(features_final.columns) print("{} total features after one-hot encoding.".format(len(encoded))) features_final_train = features_final[:891] features_final_test = features_final[891:]
Titanic - Machine Learning from Disaster
1,472,711
def bestThresshold(y_train,train_preds): tmp = [0,0,0] delta = 0 for tmp[0] in tqdm(np.arange(0.1, 0.501, 0.01)) : tmp[1] = f1_score(y_train, np.array(train_preds)>tmp[0]) if tmp[1] > tmp[2]: delta = tmp[0] tmp[2] = tmp[1] print('best threshold is {:.4f} with F1 score: {:.4f}'.format(delta, tmp[2])) return delta delta = bestThresshold(y_train,train_preds )<save_to_csv>
X_train, X_test, y_train, y_test = train_test_split(features_final_train, train.Survived, test_size = 0.2, random_state = 0 )
Titanic - Machine Learning from Disaster
1,472,711
submission = df_test[['qid']].copy() submission['prediction'] =(test_preds > delta ).astype(int) submission.to_csv('submission.csv', index=False )<import_modules>
clf_A = GradientBoostingClassifier(random_state = 0) clf_B = LogisticRegression(random_state= 0) clf_C = RandomForestClassifier(random_state= 0) samples_100 = len(y_train) samples_10 = int(len(y_train)/2) samples_1 = int(len(y_train)/10) results = {} for clf in [clf_A, clf_B, clf_C]: clf_name = clf.__class__.__name__ results[clf_name] = {} for i, samples in enumerate([samples_1, samples_10, samples_100]): results[clf_name][i] = \ train_predict(clf, samples, X_train, y_train, X_test, y_test )
Titanic - Machine Learning from Disaster
1,472,711
tqdm.pandas()<load_from_csv>
warnings.filterwarnings('ignore') clf = RandomForestClassifier(random_state = 0, oob_score = True) parameters = {'criterion' :['gini'], 'n_estimators' : [350], 'max_depth':[5], 'min_samples_leaf': [4], 'max_leaf_nodes': [10], 'min_impurity_decrease': [0], 'max_features' : [1] } scorer = make_scorer(accuracy_score) grid_obj = GridSearchCV(clf, parameters, scoring = scorer, cv = 10) grid_fit = grid_obj.fit(X_train,y_train) best_clf = grid_fit.best_estimator_ predictions =(clf.fit(X_train, y_train)).predict(X_test) best_predictions = best_clf.predict(X_test) print("Unoptimized model ------") print("Accuracy score on testing data: {:.4f}".format(accuracy_score(y_test, predictions))) print("Oob score on testing data: {:.4f}".format(clf.oob_score_)) print(" Optimized Model ------") print("Final accuracy score on the testing data: {:.4f}".format(accuracy_score(y_test, best_predictions))) print("Final oob score on the testing data: {:.4f}".format(best_clf.oob_score_)) print(" Best Parameters ------") best_clf
Titanic - Machine Learning from Disaster
1,472,711
<set_options><EOS>
final_predict = best_clf.predict(features_final_test) prediction = pd.DataFrame(full[891:].PassengerId) prediction['Survived'] = final_predict.astype('int') prediction.to_csv('predict.csv',index = False )
Titanic - Machine Learning from Disaster
8,119,418
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<compute_test_metric>
titanic = pd.read_csv("/kaggle/input/titanic/train.csv", sep=",") titanic_sub = pd.read_csv("/kaggle/input/titanic/test.csv", sep="," )
Titanic - Machine Learning from Disaster
8,119,418
def threshold_search(y_true, y_proba): best_threshold = 0 best_score = 0 for threshold in tqdm([i * 0.01 for i in range(100)]): score = f1_score(y_true=y_true, y_pred=y_proba > threshold) if score > best_score: best_threshold = threshold best_score = score search_result = {'threshold': best_threshold, 'f1': best_score} return search_result<compute_test_metric>
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42) for train_index, test_index in split.split(titanic, titanic['Sex']): train_set = titanic.loc[train_index] test_set = titanic.loc[test_index] train_set = train_set.reset_index(drop=True )
Titanic - Machine Learning from Disaster
8,119,418
def sigmoid(x): return 1 /(1 + np.exp(-x))<define_variables>
np.nanmean(train_set['Age'].loc[Title[Title == 'Miss'].index] )
Titanic - Machine Learning from Disaster
8,119,418
embed_size = 300 max_features = 95000 maxlen = 70<define_variables>
np.nanmean(train_set['Age'].loc[Title[Title == 'Mrs'].index] )
Titanic - Machine Learning from Disaster
8,119,418
puncts = [',', '.', '"', ':', ')', '(', '-', '!', '?', '|', ';', "'", '$', '&', '/', '[', ']', '>', '%', '=', ' '·', '_', '{', '}', '©', '^', '®', '`', '<', '→', '°', '€', '™', '›', '♥', '←', '×', '§', '″', '′', 'Â', '█', '½', 'à', '…', '“', '★', '”', '–', '●', 'â', '►', '−', '¢', '²', '¬', '░', '¶', '↑', '±', '¿', '▾', '═', '¦', '║', '―', '¥', '▓', '—', '‹', '─', '▒', ':', '¼', '⊕', '▼', '▪', '†', '■', '’', '▀', '¨', '▄', '♫', '☆', 'é', '¯', '♦', '¤', '▲', 'è', '¸', '¾', 'Ã', '⋅', '‘', '∞', '∙', ')', '↓', '、', '│', '(', '»', ',', '♪', '╩', '╚', '³', '・', '╦', '╣', '╔', '╗', '▬', '❤', 'ï', 'Ø', '¹', '≤', '‡', '√', ] def clean_text(x): x = str(x) for punct in puncts: x = x.replace(punct, f' {punct} ') return x<feature_engineering>
np.corrcoef(Family_members, People_on_ticket )
Titanic - Machine Learning from Disaster
8,119,418
train_df["question_text"] = train_df["question_text"].str.lower() test_df["question_text"] = test_df["question_text"].str.lower() train_df["question_text"] = train_df["question_text"].apply(lambda x: clean_text(x)) test_df["question_text"] = test_df["question_text"].apply(lambda x: clean_text(x)) x_train = train_df["question_text"].fillna("_ x_test = test_df["question_text"].fillna("_ tokenizer = Tokenizer(num_words=max_features) tokenizer.fit_on_texts(list(x_train)) x_train = tokenizer.texts_to_sequences(x_train) x_test = tokenizer.texts_to_sequences(x_test) x_train = pad_sequences(x_train, maxlen=maxlen) x_test = pad_sequences(x_test, maxlen=maxlen) y_train = train_df['target'].values<statistical_test>
sum(People_on_ticket<Family_members+1 )
Titanic - Machine Learning from Disaster
8,119,418
def load_glove(word_index): EMBEDDING_FILE = '.. /input/embeddings/glove.840B.300d/glove.840B.300d.txt' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')[:300] embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE)) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = -0.005838499,0.48782197 embed_size = all_embs.shape[1] nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix def load_fasttext(word_index): EMBEDDING_FILE = '.. /input/embeddings/wiki-news-300d-1M/wiki-news-300d-1M.vec' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE)if len(o)>100) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = all_embs.mean() , all_embs.std() embed_size = all_embs.shape[1] nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix def load_para(word_index): EMBEDDING_FILE = '.. /input/embeddings/paragram_300_sl999/paragram_300_sl999.txt' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE, encoding="utf8", errors='ignore')if len(o)>100) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = -0.0053247833,0.49346462 embed_size = all_embs.shape[1] nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix<normalization>
sum(People_on_ticket>Family_members+1 )
Titanic - Machine Learning from Disaster
8,119,418
seed_everything() glove_embeddings = load_glove(tokenizer.word_index) paragram_embeddings = load_para(tokenizer.word_index) embedding_matrix = np.mean([glove_embeddings, paragram_embeddings], axis=0) np.shape(embedding_matrix )<split>
print("Average age of lone passenger: ", round(np.nanmean(train_set_lone['Age']),0), sep="") print("Number of missing Age values: ", sum(np.isnan(train_set_lone['Age'])) , " - ", round(( sum(np.isnan(train_set_lone['Age'])) *100/len(train_set_lone['Age'])) ,2), "% of train_set_lone.", sep="") print("The number of missing values is ", round(sum(np.isnan(train_set_lone['Age'])) *100/sum(np.isnan(train_set['Age'])) ,2), "% of entire missing values in Age variable in train_set.", sep="" )
Titanic - Machine Learning from Disaster
8,119,418
splits = list(StratifiedKFold(n_splits=5, shuffle=True, random_state=10 ).split(x_train, y_train))<normalization>
outlier_ind = train_set.loc[train_set['Fare']==max(train_set['Fare'])].index train_set = train_set.drop(outlier_ind) train_set = train_set.reset_index(drop=True )
Titanic - Machine Learning from Disaster
8,119,418
class Attention(nn.Module): def __init__(self, feature_dim, step_dim, bias=True, **kwargs): super(Attention, self ).__init__(**kwargs) self.supports_masking = True self.bias = bias self.feature_dim = feature_dim self.step_dim = step_dim self.features_dim = 0 weight = torch.zeros(feature_dim, 1) nn.init.xavier_uniform_(weight) self.weight = nn.Parameter(weight) if bias: self.b = nn.Parameter(torch.zeros(step_dim)) def forward(self, x, mask=None): feature_dim = self.feature_dim step_dim = self.step_dim eij = torch.mm( x.contiguous().view(-1, feature_dim), self.weight ).view(-1, step_dim) if self.bias: eij = eij + self.b eij = torch.tanh(eij) a = torch.exp(eij) if mask is not None: a = a * mask a = a / torch.sum(a, 1, keepdim=True)+ 1e-10 weighted_input = x * torch.unsqueeze(a, -1) return torch.sum(weighted_input, 1 )<choose_model_class>
print("Minimum price for ticket in first class: ", min(train_set.loc[train_set['Pclass']==1]['Fare']), sep="" )
Titanic - Machine Learning from Disaster
8,119,418
class NeuralNet(nn.Module): def __init__(self): super(NeuralNet, self ).__init__() hidden_size = 40 self.embedding = nn.Embedding(max_features, embed_size) self.embedding.weight = nn.Parameter(torch.tensor(embedding_matrix, dtype=torch.float32)) self.embedding.weight.requires_grad = False self.embedding_dropout = nn.Dropout2d(0.1) self.lstm = nn.LSTM(embed_size, hidden_size, bidirectional=True, batch_first=True) self.gru = nn.GRU(hidden_size * 2, hidden_size, bidirectional=True, batch_first=True) self.lstm_attention = Attention(hidden_size * 2, maxlen) self.gru_attention = Attention(hidden_size * 2, maxlen) self.linear = nn.Linear(320, 16) self.relu = nn.ReLU() self.dropout = nn.Dropout(0.1) self.out = nn.Linear(16, 1) def forward(self, x): h_embedding = self.embedding(x) h_embedding = torch.squeeze( self.embedding_dropout(torch.unsqueeze(h_embedding, 0))) h_lstm, _ = self.lstm(h_embedding) h_gru, _ = self.gru(h_lstm) h_lstm_atten = self.lstm_attention(h_lstm) h_gru_atten = self.gru_attention(h_gru) avg_pool = torch.mean(h_gru, 1) max_pool, _ = torch.max(h_gru, 1) conc = torch.cat(( h_lstm_atten, h_gru_atten, avg_pool, max_pool), 1) conc = self.relu(self.linear(conc)) conc = self.dropout(conc) out = self.out(conc) return out<define_variables>
first_class = train_set.loc[train_set['Pclass']==1] second_class = train_set.loc[train_set['Pclass']==2] third_class = train_set.loc[train_set['Pclass']==3] p_class = list(train_set['Pclass']) fare = list(train_set['Fare']) Fare_class = list() for i in range(0,len(p_class)) : if(p_class[i] == 1): if(fare[i] < statistics.mean(second_class['Fare'])) : Fare_class.append("cheap") else: Fare_class.append("normal") elif(p_class[i] == 2): if(fare[i] < statistics.mean(third_class['Fare'])) : Fare_class.append("cheap") elif(fare[i] > statistics.mean(first_class['Fare'])) : Fare_class.append("expensive") else: Fare_class.append("normal") else: if(fare[i] > statistics.mean(second_class['Fare'])) : Fare_class.append("expensive") else: Fare_class.append("normal") Fare_class = pd.Series(Fare_class) sns.countplot(Fare_class, hue='Survived', data=train_set) plt.legend(['No','Yes'], title="Survived", loc="upper right") plt.title("Fare_class: Survived vs Dead") plt.show()
Titanic - Machine Learning from Disaster
8,119,418
batch_size = 512 n_epochs = 6<choose_model_class>
train_set_no_cabins = train_set.loc[np.where(pd.isnull(train_set['Cabin'])) ] train_set_cabins = train_set.loc[~train_set.index.isin(train_set_no_cabins.index)] train_set_cabins['Pclass'].value_counts()
Titanic - Machine Learning from Disaster
8,119,418
class CyclicLR(object): def __init__(self, optimizer, base_lr=1e-3, max_lr=6e-3, step_size=2000, mode='triangular', gamma=1., scale_fn=None, scale_mode='cycle', last_batch_iteration=-1): if not isinstance(optimizer, Optimizer): raise TypeError('{} is not an Optimizer'.format( type(optimizer ).__name__)) self.optimizer = optimizer if isinstance(base_lr, list)or isinstance(base_lr, tuple): if len(base_lr)!= len(optimizer.param_groups): raise ValueError("expected {} base_lr, got {}".format( len(optimizer.param_groups), len(base_lr))) self.base_lrs = list(base_lr) else: self.base_lrs = [base_lr] * len(optimizer.param_groups) if isinstance(max_lr, list)or isinstance(max_lr, tuple): if len(max_lr)!= len(optimizer.param_groups): raise ValueError("expected {} max_lr, got {}".format( len(optimizer.param_groups), len(max_lr))) self.max_lrs = list(max_lr) else: self.max_lrs = [max_lr] * len(optimizer.param_groups) self.step_size = step_size if mode not in ['triangular', 'triangular2', 'exp_range'] \ and scale_fn is None: raise ValueError('mode is invalid and scale_fn is None') self.mode = mode self.gamma = gamma if scale_fn is None: if self.mode == 'triangular': self.scale_fn = self._triangular_scale_fn self.scale_mode = 'cycle' elif self.mode == 'triangular2': self.scale_fn = self._triangular2_scale_fn self.scale_mode = 'cycle' elif self.mode == 'exp_range': self.scale_fn = self._exp_range_scale_fn self.scale_mode = 'iterations' else: self.scale_fn = scale_fn self.scale_mode = scale_mode self.batch_step(last_batch_iteration + 1) self.last_batch_iteration = last_batch_iteration def batch_step(self, batch_iteration=None): if batch_iteration is None: batch_iteration = self.last_batch_iteration + 1 self.last_batch_iteration = batch_iteration for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): param_group['lr'] = lr def _triangular_scale_fn(self, x): return 1. def _triangular2_scale_fn(self, x): return 1 /(2.**(x - 1)) def _exp_range_scale_fn(self, x): return self.gamma**(x) def get_lr(self): step_size = float(self.step_size) cycle = np.floor(1 + self.last_batch_iteration /(2 * step_size)) x = np.abs(self.last_batch_iteration / step_size - 2 * cycle + 1) lrs = [] param_lrs = zip(self.optimizer.param_groups, self.base_lrs, self.max_lrs) for param_group, base_lr, max_lr in param_lrs: base_height =(max_lr - base_lr)* np.maximum(0,(1 - x)) if self.scale_mode == 'cycle': lr = base_lr + base_height * self.scale_fn(cycle) else: lr = base_lr + base_height * self.scale_fn(self.last_batch_iteration) lrs.append(lr) return lrs<compute_test_metric>
print("Average age of Southampton passenger:", round(np.nanmean(train_set[train_set['Embarked'] == 'S']['Age']))) print("Average age of Queenstown passenger:", round(np.nanmean(train_set[train_set['Embarked'] == 'Q']['Age']))) print("Average age of Cherbourg passenger:", round(np.nanmean(train_set[train_set['Embarked'] == 'C']['Age'])) )
Titanic - Machine Learning from Disaster
8,119,418
def f1_smart(y_true, y_pred): thresholds = [] for thresh in np.arange(0.1, 0.501, 0.01): thresh = np.round(thresh, 2) res = metrics.f1_score(y_true,(y_pred > thresh ).astype(int)) thresholds.append([thresh, res]) thresholds.sort(key=lambda x: x[1], reverse=True) best_thresh = thresholds[0][0] best_f1 = thresholds[0][1] return best_f1, best_thresh def f1_smart_torch(y_true, y_pred): y_true = y_true.cpu().data.numpy() return f1_smart(y_true, y_pred) def save(m, info): torch.save(info, 'best_model.info') torch.save(m, 'best_model.m') def load() : m = torch.load('best_model.m') info = torch.load('best_model.info') return m, info<define_variables>
train_set_known_age = train_set_age[~np.isnan(train_set_age['Age'])] train_set_known_age = train_set_known_age[(( train_set_known_age['Pclass']==3)& (train_set_known_age['Is_alone']==1)) |(train_set_known_age['Pclass']==3)] print("Percentage of single and/or third class passengers with a known age: ", round(len(train_set_known_age.index)*100/len(train_set.index), 2), "%", sep="" )
Titanic - Machine Learning from Disaster
8,119,418
train_preds = np.zeros(( len(train_df))) test_preds = np.zeros(( len(test_df))) seed_everything() x_test_cuda = torch.tensor(x_test, dtype=torch.long ).cuda() test = torch.utils.data.TensorDataset(x_test_cuda) test_loader = torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=False) best_thresholds = [] best_f1 = [] for i,(train_idx, valid_idx)in enumerate(splits): x_train_fold = torch.tensor(x_train[train_idx], dtype=torch.long ).cuda() y_train_fold = torch.tensor(y_train[train_idx, np.newaxis], dtype=torch.float32 ).cuda() x_val_fold = torch.tensor(x_train[valid_idx], dtype=torch.long ).cuda() y_val_fold = torch.tensor(y_train[valid_idx, np.newaxis], dtype=torch.float32 ).cuda() model = NeuralNet() model.cuda() loss_fn = torch.nn.BCEWithLogitsLoss(reduction='sum') base_lr, max_lr = 0.0001, 0.003 optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=max_lr) train = torch.utils.data.TensorDataset(x_train_fold, y_train_fold) valid = torch.utils.data.TensorDataset(x_val_fold, y_val_fold) train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid, batch_size=batch_size, shuffle=False) print(f'Fold {i + 1}') min_val_loss = 1000000000 step_size=300 best_thresh = 0 max_f1 = 0 for epoch in range(n_epochs): start_time = time.time() scheduler = CyclicLR(optimizer, base_lr=base_lr, max_lr=max_lr, step_size=step_size, mode='exp_range', gamma=0.99994) model.train() avg_loss = 0. step = 0 for x_batch, y_batch in tqdm(train_loader, disable=True): if scheduler: scheduler.batch_step() step += 1 y_pred = model(x_batch) loss = loss_fn(y_pred, y_batch) optimizer.zero_grad() loss.backward() optimizer.step() avg_loss += loss.item() / len(train_loader) model.eval() valid_preds_fold = np.zeros(( x_val_fold.size(0))) test_preds_fold = np.zeros(( len(test_df))) avg_val_loss = 0. for i,(x_batch, y_batch)in enumerate(valid_loader): y_pred = model(x_batch ).detach() avg_val_loss += loss_fn(y_pred, y_batch ).item() / len(valid_loader) valid_preds_fold[i * batch_size:(i+1)* batch_size] = sigmoid(y_pred.cpu().numpy())[:, 0] val_f1, threshold = f1_smart_torch(y_val_fold, valid_preds_fold) print('Optimal F1: {} at threshold: {}'.format(val_f1, threshold)) if avg_val_loss < min_val_loss: print("Saving model with minimum Loss") save(m=model, info={'epoch': epoch, 'val_loss': avg_val_loss, 'val_f1': val_f1}) min_val_loss = avg_val_loss best_thresh = threshold max_f1 = val_f1 elapsed_time = time.time() - start_time print('Epoch {}/{} \t loss={:.4f} \t val_loss={:.4f} \t time={:.2f}s'.format( epoch + 1, n_epochs, avg_loss, avg_val_loss, elapsed_time)) best_thresholds.append(best_thresh) best_f1.append(max_f1) model,_ = load() for i,(x_batch,)in enumerate(test_loader): y_pred = model(x_batch ).detach() test_preds_fold[i * batch_size:(i+1)* batch_size] = sigmoid(y_pred.cpu().numpy())[:, 0] train_preds[valid_idx] = valid_preds_fold test_preds += test_preds_fold / len(splits )<compute_test_metric>
from sklearn.base import BaseEstimator, TransformerMixin from sklearn.impute import SimpleImputer from sklearn.compose import ColumnTransformer from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder from sklearn.pipeline import FeatureUnion from sklearn.preprocessing import StandardScaler
Titanic - Machine Learning from Disaster
8,119,418
search_result = threshold_search(y_train, train_preds) search_result<save_to_csv>
class TitleSelector(BaseEstimator, TransformerMixin): def __init__(self, attribute_names): self._attribute_names = attribute_names def fit(self, X, y=None): return self def get_title(self, obj): title =(( obj.rsplit(',', 1)[1] ).rsplit('.', 1)[0] ).strip() return title def transform(self, X): X.loc[:, 'Title'] = X[self._attribute_names].apply(self.get_title) X = X.drop(self._attribute_names, 1) return X.values class TitleCoder(BaseEstimator, TransformerMixin): def __init__(self, attribute_names): self._attribute_names = attribute_names def fit(self, X, y=None): return self def code_title(self, obj): if(obj == 'Mr'): title_c = 'Mr' elif(obj == 'Mrs'): title_c = 'Mrs' elif(obj == 'Miss'): title_c = 'Miss' elif(obj == 'Master'): title_c = 'Master' else: title_c = 'Other' return title_c def transform(self, X): X.loc[:, 'Title_c'] = X[self._attribute_names].apply(self.code_title) X = X.drop(self._attribute_names, 1) return X.values class AgeCoder(BaseEstimator, TransformerMixin): def __init__(self, attribute_names): self._attribute_names = attribute_names def fit(self, X, y=None): return self def code_age(self, obj): if(obj <= 15): age_c = 'young' elif(obj < 50): age_c = 'mid' elif(obj >= 50): age_c = 'old' else: age_c = 'unknown' return age_c def transform(self, X): X.loc[:, 'Age_c'] = X[self._attribute_names].apply(self.code_age) X = X.drop(self._attribute_names, 1) return X.values class SibSpCoder(BaseEstimator, TransformerMixin): def __init__(self, attribute_names): self._attribute_names = attribute_names def fit(self, X, y=None): return self def code_SibSp(self, obj): if(obj == 0): SibSp_c = 'zero' elif(obj == 1): SibSp_c = 'one' else: SibSp_c = 'more' return SibSp_c def transform(self, X): X.loc[:, 'SibSp_c'] = X[self._attribute_names].apply(self.code_SibSp) X = X.drop(self._attribute_names, 1) return X.values class ParchCoder(BaseEstimator, TransformerMixin): def __init__(self, attribute_names): self._attribute_names = attribute_names def fit(self, X, y=None): return self def code_Parch(self, obj): if(obj == 0): Parch_c = 'zero' elif(obj == 1 or obj == 2): Parch_c = 'one/two' else: Parch_c = 'more' return Parch_c def transform(self, X): X.loc[:, 'Parch_c'] = X[self._attribute_names].apply(self.code_Parch) X = X.drop(self._attribute_names, 1) return X.values class Family_members(BaseEstimator, TransformerMixin): def __init__(self, attribute_names): self._attribute_names = attribute_names def fit(self, X, y=None): return self def transform(self, X): X.loc[:, 'Family_members'] = X[self._attribute_names] + X['Parch'] return X.values class Family_membersCoder(BaseEstimator, TransformerMixin): def __init__(self, attribute_names): self._attribute_names = attribute_names def fit(self, X, y=None): return self def code_Family_members(self, obj): if(obj == 0): Family_members_c = 'zero' elif(obj == 1 or obj == 2): Family_members_c = 'one/two' elif(obj == 3): Family_members_c = 'three' else: Family_members_c = 'more' return Family_members_c def transform(self, X): X.loc[:, 'Family_members_c'] = X[self._attribute_names].apply(self.code_Family_members) X = X.drop(self._attribute_names, 1) return X.values class People_on_ticket(BaseEstimator, TransformerMixin): def __init__(self, attribute_names): self._attribute_names = attribute_names def fit(self, X, y=None): return self def transform(self, X): counts = list() tickets = list(X[self._attribute_names]) for i in tickets: counts.append(ticket_base.count(i)) X.loc[:, 'People_on_ticket'] = counts return X.values class People_on_ticketCoder(BaseEstimator, TransformerMixin): def __init__(self, attribute_names): self._attribute_names = attribute_names def fit(self, X, y=None): return self def code_People_on_ticket(self, obj): if(obj == 1): People_on_ticket_c = 'one' elif(obj == 2): People_on_ticket_c = 'two' elif(obj == 3): People_on_ticket_c = 'three' elif(obj == 4): People_on_ticket_c = 'four' else: People_on_ticket_c = 'more' return People_on_ticket_c def transform(self, X): X.loc[:, 'People_on_ticket_c'] = X[self._attribute_names].apply(self.code_People_on_ticket) X = X.drop(self._attribute_names, 1) return X.values class Is_Alone(BaseEstimator, TransformerMixin): def __init__(self, attribute_names): self._attribute_names = attribute_names def fit(self, X, y=None): return self def transform(self, X): family_members = list(X['Family_members']) ppl_on_ticket = list(X[self._attribute_names]) is_alone = list() for i in range(0,len(ppl_on_ticket)) : if(ppl_on_ticket[i] == 1 and family_members[i] == 0): is_alone.append(1) else: is_alone.append(0) X.loc[:, 'Is_alone'] = is_alone return X.values class Is_Cabin(BaseEstimator, TransformerMixin): def __init__(self, attribute_names): self._attribute_names = attribute_names def fit(self, X, y=None): return self def code_Cabin(self, obj): if(pd.isnull(obj)) : is_cabin = 0 else: is_cabin = 1 return is_cabin def transform(self, X): X.loc[:, 'Is_cabin'] = X[self._attribute_names].apply(self.code_Cabin) X = X.drop(self._attribute_names, 1) return X.values class FareClass(BaseEstimator, TransformerMixin): def __init__(self, attribute_names): self._attribute_names = attribute_names def fit(self, X, y=None): return self def transform(self, X): first_class = X.loc[X['Pclass']==1] second_class = X.loc[X['Pclass']==2] third_class = X.loc[X['Pclass']==3] p_class = list(X['Pclass']) fare = list(X[self._attribute_names]) fare_class = list() for i in range(0,len(p_class)) : if(p_class[i] == 1): if(fare[i] < statistics.mean(second_class['Fare'])) : fare_class.append("cheap") else: fare_class.append("normal") elif(p_class[i] == 2): if(fare[i] < statistics.mean(third_class['Fare'])) : fare_class.append("cheap") elif(fare[i] > statistics.mean(first_class['Fare'])) : fare_class.append("expensive") else: fare_class.append("normal") else: if(fare[i] > statistics.mean(second_class['Fare'])) : fare_class.append("expensive") else: fare_class.append("normal") X.loc[:, 'Fare_class'] = fare_class return X.values
Titanic - Machine Learning from Disaster
8,119,418
submission = test_df[['qid']].copy() submission['prediction'] = test_preds > search_result['threshold'] submission.to_csv('submission.csv', index=False )<set_options>
name = 'Name' name_pipeline = Pipeline(steps=[ ('get_title', TitleSelector(name)) ]) title = 'Title' title_pipeline = Pipeline(steps=[ ('code_title', TitleCoder(title)) ]) age = 'Age' age_pipeline = Pipeline(steps=[ ('code_age', AgeCoder(age)) ]) sibsp = 'SibSp' sibsp_pipeline = Pipeline(steps=[ ('code_sibsp', SibSpCoder(sibsp)) ]) sibsp_pipeline2 = Pipeline(steps=[ ('create_family_members', Family_members(sibsp)) ]) parch = 'Parch' parch_pipeline = Pipeline(steps=[ ('code_parch', ParchCoder(parch)) ]) family_members = 'Family_members' family_members_pipeline = Pipeline(steps=[ ('code_family_members', Family_membersCoder(family_members)) ]) ticket = 'Ticket' ticket_pipeline = Pipeline(steps=[ ('create_People_on_ticket', People_on_ticket(ticket)) , ]) people_on_ticket = 'People_on_ticket' ticket_pipeline2 = Pipeline(steps=[ ('code_People_on_ticket', People_on_ticketCoder(people_on_ticket)) , ]) is_alone_pipeline = Pipeline(steps=[ ('create_is_alone', Is_Alone(people_on_ticket)) , ]) cabin = 'Cabin' cabin_pipeline = Pipeline(steps=[ ('code_cabin', Is_Cabin(cabin)) , ]) fare = 'Fare' fare_pipeline = Pipeline(steps=[ ('fare_class', FareClass(fare)) , ]) attribs1 = ['Pclass', 'Title_c', 'Sex', 'Age_c', 'Family_members_c', 'Embarked', ] attribs1_pipeline = Pipeline(steps=[ ('imputer', SimpleImputer(strategy="most_frequent")) , ('encoder', OneHotEncoder(sparse=False)) , ('standarizer', StandardScaler()) ]) attribs2 = ['SibSp', 'Parch', 'Fare', 'Is_cabin'] attribs2_pipeline = Pipeline(steps=[ ('imputer', SimpleImputer(strategy="median")) , ('standarizer', StandardScaler()) ]) preprocessor = ColumnTransformer( remainder = 'drop', transformers=[ ('first', attribs1_pipeline, attribs1), ('second', attribs2_pipeline, attribs2) ]) full_pipeline = FeatureUnion([ ('name_pipeline', name_pipeline), ('title_pipeline', title_pipeline), ('age_pipeline', age_pipeline), ('sibsp_pipeline2', sibsp_pipeline2), ('family_members_pipeline', family_members_pipeline), ('cabin_pipeline', cabin_pipeline), ('preprocessor', preprocessor) ]) full_pipeline2 = FeatureUnion([ ('preprocessor', preprocessor) ] )
Titanic - Machine Learning from Disaster
8,119,418
%matplotlib inline<load_from_csv>
train_set_prepared = full_pipeline.fit_transform(train_set) train_set_prepared = full_pipeline2.fit_transform(train_set) X_train_prepared = train_set_prepared y_train_prepared = train_set['Survived'] test_set_prepared = full_pipeline.fit_transform(test_set) test_set_prepared = full_pipeline2.fit_transform(test_set) X_test_prepared = test_set_prepared y_test_prepared = test_set['Survived']
Titanic - Machine Learning from Disaster
8,119,418
test = pd.read_csv(".. /input/covid19-global-forecasting-week-4/test.csv") train = pd.read_csv(".. /input/covid19-global-forecasting-week-4/train.csv") test = test[test.Date > "2020-04-14"] all_data = pd.concat([train, test],ignore_index=True ).sort_values(by=['Country_Region','Province_State','Date']) all_data['ConfirmedCases'] = all_data['ConfirmedCases'].fillna(-1) all_data['Fatalities'] = all_data['Fatalities'].fillna(-0) countries = pd.read_csv(".. /input/countries/countries.csv") countries.drop(['iso_alpha3','iso_numeric','official_name','name','iso_alpha2'], axis='columns',inplace=True) data_quarantine = pd.read_csv(".. /input/countries/quarantine_dates.csv") data_quarantine = data_quarantine.groupby("Country" ).max().loc[:,"Start date"] data_quarantine.loc["Russia"] = "2020-03-30" countries = countries.set_index("ccse_name", drop=True) countries["Quarantine"] = data_quarantine countries_mean = countries.mean() all_data = all_data.merge(countries, how ="left" , left_on="Country_Region", right_on='ccse_name') all_data['fertility_rate'] = all_data['fertility_rate'].fillna(countries_mean['fertility_rate']) all_data['median_age'] = all_data['median_age'].fillna(countries_mean['median_age']) all_data['migrants'] = all_data['migrants'].fillna(countries_mean['migrants']) all_data['urban_pop_rate'] = all_data['urban_pop_rate'].fillna(countries_mean['urban_pop_rate']) all_data['density'] = all_data['density'].fillna(countries_mean['density']) all_data['land_area'] = all_data['land_area'].fillna(countries_mean['land_area']) all_data['population'] = all_data['population'].fillna(countries_mean['population']) all_data['world_share'] = all_data['world_share'].fillna(countries_mean['world_share']) all_data['Quarantine'] = all_data['Quarantine'].fillna("2020-04-01") all_data.drop(['Id'], axis='columns',inplace=True) all_data['Province_State'] = all_data['Province_State'].fillna("zzz" )<data_type_conversions>
rf_model = RandomForestClassifier(random_state=42) params_grid = [ {'n_estimators': [100, 200, 300, 400, 500], 'criterion': ['gini', 'entropy'], 'min_samples_split': [2, 3, 4, 5], 'max_features': ['auto', 'log2', None], 'bootstrap': ['True', 'False']} ] grid_search = GridSearchCV(rf_model, params_grid, cv=5, scoring="accuracy", n_jobs=1) grid_search.fit(X_train_prepared, y_train_prepared) grid_search.best_params_
Titanic - Machine Learning from Disaster
8,119,418
data2 = all_data data2 = data2[data2.ConfirmedCases != 0] data2.loc[data2.ConfirmedCases == -1,"ConfirmedCases"] = 0 data2["Date"] = pd.to_datetime(data2.Date) data4 = data2[["Country_Region","Date"]].groupby("Country_Region" ).min() data4.columns = ["Date_min"] data2 = data2.merge(data4, how = 'left', left_on='Country_Region', right_on='Country_Region') data2["days"] =(data2.Date - data2.Date_min ).dt.days data2["days_mart"] =(data2.Date - pd.to_datetime("2020-03-1")).dt.days data2["days_after_Quarantine"] =(data2.Date - pd.to_datetime(data2.Quarantine)).dt.days data2.drop(['Date_min'], axis='columns',inplace=True) data2.Date = data2["Date"].apply(lambda x: pd.Series(x.strftime("%m-%d"))) data2 = data2.rename(columns={"ConfirmedCases": "confirmed","Fatalities":"deaths","Country_Region":"countries"}) data2[data2.countries == "Russia"].iloc[0:70]<filter>
params_grid2 = [ {'n_estimators': [120, 140, 160, 180, 200, 220, 240, 260, 280], 'criterion': ['gini', 'entropy'], 'min_samples_split': [4, 5], 'max_features': ['auto', None], 'bootstrap': ['True']} ] grid_search2 = GridSearchCV(rf_model, params_grid2, cv=5, scoring="accuracy", n_jobs=1) grid_search2.fit(X_train_prepared, y_train_prepared) def indices_of_top_k(arr, k): return np.sort(np.argpartition(np.array(arr), -k)[-k:]) def fs_calculate_results() : train_prediction = list() test_prediction = list() knn_model = KNeighborsClassifier() feature_importances = grid_search2.best_estimator_.feature_importances_ for i in range(1,26): indices_of_top = indices_of_top_k(feature_importances, i) X_train_restricted = X_train_prepared[:, indices_of_top] X_test_restricted = X_test_prepared[:, indices_of_top] params_grid_fs = [ {'n_neighbors': [20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35], 'weights': ['uniform', 'distance'], 'algorithm': ['auto', 'ball_tree', 'kd_tree', 'brute'], 'metric': ['minkowski', 'euclidean', 'manhattan']} ] grid_search_fs = GridSearchCV(knn_model, params_grid_fs, cv=5, scoring="accuracy", n_jobs=1) grid_search_fs.fit(X_train_restricted, y_train_prepared) train_prediction.append(grid_search_fs.best_score_) knn_final_model = grid_search_fs.best_estimator_ knn_predictions = knn_final_model.predict(X_test_restricted) test_prediction.append(accuracy_score(y_test_prepared, knn_predictions)) return train_prediction, test_prediction train_pred, test_pred = fs_calculate_results() fs_results = pd.DataFrame({'train_set': train_pred, 'test_set': test_pred}) fs_results['train-test_difference'] = fs_results['train_set'] - fs_results['test_set'] fs_results.index += 1 fs_results = fs_results.sort_values('train-test_difference') fs_results
Titanic - Machine Learning from Disaster