kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
14,356,555
df.ingredients = df.ingredients.astype('str') df.ingredients = df.ingredients.str.replace("["," ") df.ingredients = df.ingredients.str.replace("]"," ") df.ingredients = df.ingredients.str.replace("'"," ") df.ingredients = df.ingredients.str.replace(","," " )<data_type_conversions>
alldata['titles'] = pd.Categorical(alldata['titles'])
Titanic - Machine Learning from Disaster
14,356,555
testset.ingredients = testset.ingredients.astype('str') testset.ingredients = testset.ingredients.str.replace("["," ") testset.ingredients = testset.ingredients.str.replace("]"," ") testset.ingredients = testset.ingredients.str.replace("'"," ") testset.ingredients = testset.ingredients.str.replace(","," " )<feature_engineering>
alldata['titles'] = alldata['titles'].cat.codes
Titanic - Machine Learning from Disaster
14,356,555
vect = TfidfVectorizer()<feature_engineering>
alldata.drop('titles1',axis=1 )
Titanic - Machine Learning from Disaster
14,356,555
features = vect.fit_transform(df.ingredients )<categorify>
alldata.loc[alldata['Sex']=='male','Embarked'].value_counts()
Titanic - Machine Learning from Disaster
14,356,555
testfeatures = vect.transform(testset.ingredients )<categorify>
alldata.loc[alldata['Sex']=='female','Embarked'].value_counts()
Titanic - Machine Learning from Disaster
14,356,555
encoder = LabelEncoder() labels = encoder.fit_transform(df.cuisine )<split>
alldata.loc[alldata['Pclass']==1,'Embarked'].value_counts()
Titanic - Machine Learning from Disaster
14,356,555
X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.2 )<compute_test_metric>
alldata.loc[alldata['Pclass']==2,'Embarked'].value_counts()
Titanic - Machine Learning from Disaster
14,356,555
<choose_model_class>
alldata.loc[alldata['Pclass']==3,'Embarked'].value_counts()
Titanic - Machine Learning from Disaster
14,356,555
<compute_test_metric>
alldata['Embarked']=alldata['Embarked'].fillna('S' )
Titanic - Machine Learning from Disaster
14,356,555
<compute_test_metric>
alldata.isnull().sum()
Titanic - Machine Learning from Disaster
14,356,555
<split>
pclass_list = list(alldata['Pclass'].unique()) df_averages = [] for classes in pclass_list: df_averages.append(( alldata.loc[alldata['Pclass']==classes]['Age'].mean())) averages = pd.DataFrame(df_averages,index=pclass_list,columns=['Average age in class']) averages
Titanic - Machine Learning from Disaster
14,356,555
<import_modules>
pclass_list = list(alldata['Pclass'].unique()) df_fares = [] for fares in pclass_list: df_fares.append(( alldata.loc[alldata['Pclass']==fares]['Fare'].median())) averages2 = pd.DataFrame(df_fares,index=pclass_list,columns=['Average fare in class']) averages2
Titanic - Machine Learning from Disaster
14,356,555
<compute_test_metric>
alldata.loc[(alldata['Age'].isnull())&(alldata['Pclass']== 1),'Age'] = averages.loc[1,'Average age in class'] alldata.loc[(alldata['Age'].isnull())&(alldata['Pclass']== 2),'Age'] = averages.loc[2,'Average age in class'] alldata.loc[(alldata['Age'].isnull())&(alldata['Pclass']== 3),'Age'] = averages.loc[3,'Average age in class'] alldata.loc[(alldata['Fare'].isnull())&(alldata['Pclass']== 1),'Fare'] = averages2.loc[1,'Average fare in class'] alldata.loc[(alldata['Fare'].isnull())&(alldata['Pclass']== 2),'Fare'] = averages2.loc[2,'Average fare in class'] alldata.loc[(alldata['Fare'].isnull())&(alldata['Pclass']== 3),'Fare'] = averages2.loc[3,'Average fare in class'] alldata.isnull().sum()
Titanic - Machine Learning from Disaster
14,356,555
<import_modules>
farebandlist = list(alldata['FareBand'].unique()) farelist =[] for fares in farebandlist: farelist.append(alldata.loc[(alldata['type']=='train')&(alldata['FareBand']== fares),'Survived'].value_counts()) farelist = pd.DataFrame(farelist,index=farebandlist) farelist.columns
Titanic - Machine Learning from Disaster
14,356,555
import lightgbm as lgb<train_model>
farelist[[0.0, 1.0]] = farelist[[0.0, 1.0]].apply(lambda x: x/x.sum() , axis=1) farelist
Titanic - Machine Learning from Disaster
14,356,555
gbm = lgb.LGBMClassifier(objective="mutliclass",n_estimators=10000,num_leaves=512) gbm.fit(X_train,y_train,verbose = 300 )<predict_on_test>
alldata['FareBand'] = alldata['FareBand'].astype(np.int64) alldata.info()
Titanic - Machine Learning from Disaster
14,356,555
pred = gbm.predict(testfeatures )<categorify>
sex1 = pd.get_dummies(alldata['Sex'],drop_first=True) embarked1 = pd.get_dummies(alldata['Embarked'],drop_first=True) alldata.drop(['Sex','Embarked'],axis=1,inplace=True)
Titanic - Machine Learning from Disaster
14,356,555
predconv = encoder.inverse_transform(pred )<create_dataframe>
alldata = pd.concat([alldata,sex1,embarked1],axis=1 )
Titanic - Machine Learning from Disaster
14,356,555
sub = pd.DataFrame({'id':testset.id,'cuisine':predconv} )<define_variables>
alldata.drop(['Ticket','Fare','Age'],axis=1,inplace=True )
Titanic - Machine Learning from Disaster
14,356,555
output = sub[['id','cuisine']]<save_to_csv>
train = alldata.loc[alldata['type']=='train'] test = alldata.loc[alldata['type']=='test']
Titanic - Machine Learning from Disaster
14,356,555
output.to_csv("outputfile.csv",index = False )<import_modules>
train = train.drop(['type'],axis=1)
Titanic - Machine Learning from Disaster
14,356,555
%matplotlib inline init_notebook_mode(connected=True) warnings.filterwarnings("ignore") notebookstart= time.time()<load_from_disk>
train.isnull().sum()
Titanic - Machine Learning from Disaster
14,356,555
train_df = pd.read_json('.. /input/train.json') test_df = pd.read_json('.. /input/test.json') train=train_df train.head(15 )<sort_values>
train.isnull().sum()
Titanic - Machine Learning from Disaster
14,356,555
train=train_df total = train.isnull().sum().sort_values(ascending = False) percent =(train.isnull().sum() /train.isnull().count() *100 ).sort_values(ascending = False) missing_train_data = pd.concat([total, percent], axis=1, keys=['Total missing', 'Percent missing']) print(" print(missing_train_data.head() )<categorify>
test = test.drop(['type'],axis=1 )
Titanic - Machine Learning from Disaster
14,356,555
train_df['seperated_ingredients'] = train_df['ingredients'].apply(','.join) test_df['seperated_ingredients'] = test_df['ingredients'].apply(','.join) train_df['for ngrams']=train_df['seperated_ingredients'].str.replace(',',' ') def generate_ngrams(text, n): words = text.split(' ') iterations = len(words)- n + 1 for i in range(iterations): yield words[i:i + n] def net_diagram(*cuisines): ngrams = {} for title in train_df[train_df.cuisine==cuisines[0]]['for ngrams']: for ngram in generate_ngrams(title, 2): ngram = ','.join(ngram) if ngram in ngrams: ngrams[ngram] += 1 else: ngrams[ngram] = 1 ngrams_mws_df = pd.DataFrame.from_dict(ngrams, orient='index') ngrams_mws_df.columns = ['count'] ngrams_mws_df['cusine'] = cuisines[0] ngrams_mws_df.reset_index(level=0, inplace=True) ngrams = {} for title in train_df[train_df.cuisine==cuisines[1]]['for ngrams']: for ngram in generate_ngrams(title, 2): ngram = ','.join(ngram) if ngram in ngrams: ngrams[ngram] += 1 else: ngrams[ngram] = 1 ngrams_mws_df1 = pd.DataFrame.from_dict(ngrams, orient='index') ngrams_mws_df1.columns = ['count'] ngrams_mws_df1['cusine'] = cuisines[1] ngrams_mws_df1.reset_index(level=0, inplace=True) cuisine1=ngrams_mws_df.sort_values('count',ascending=False)[:25] cuisine2=ngrams_mws_df1.sort_values('count',ascending=False)[:25] df_final=pd.concat([cuisine1,cuisine2]) g = nx.from_pandas_dataframe(df_final,source='cusine',target='index') cmap = plt.cm.RdYlGn colors = [n for n in range(len(g.nodes())) ] k = 0.35 pos=nx.spring_layout(g, k=k) nx.draw_networkx(g,pos, node_size=df_final['count'].values*8, cmap = cmap, node_color=colors, edge_color='grey', font_size=15, width=3) plt.title("Top 25 Bigrams for %s and %s" %(cuisines[0],cuisines[1]), fontsize=30) plt.gcf().set_size_inches(30,30) plt.show() plt.savefig('network.png') net_diagram('french','cajun_creole' )<prepare_x_and_y>
test.isnull().sum()
Titanic - Machine Learning from Disaster
14,356,555
df = pd.read_json('.. /input/train.json' ).set_index('id') test_df = pd.read_json('.. /input/test.json' ).set_index('id') traindex = df.index testdex = test_df.index y = df.cuisine.copy() df = pd.concat([df.drop("cuisine", axis=1), test_df], axis=0) df_index = df.index del test_df; gc.collect() ; vect = CountVectorizer() dummies = vect.fit_transform(df.ingredients.str.join(' ')) df = pd.DataFrame(dummies.todense() ,columns=vect.get_feature_names()) df.index= df_index X = df.loc[traindex,:] test_df = df.loc[testdex,:] del df; gc.collect() ;<load_from_csv>
test.isnull().sum()
Titanic - Machine Learning from Disaster
14,356,555
def read_dataset(path): return json.load(open(path)) train = read_dataset('.. /input/train.json') test = read_dataset('.. /input/test.json') def generate_text(data): text_data = [" ".join(doc['ingredients'] ).lower() for doc in data] return text_data train_text = generate_text(train) test_text = generate_text(test) target = [doc['cuisine'] for doc in train] tfidf = TfidfVectorizer(binary=True) def tfidf_features(txt, flag): if flag == "train": x = tfidf.fit_transform(txt) else: x = tfidf.transform(txt) x = x.astype('float16') return x X2 = tfidf_features(train_text, flag="train") X_test3 = tfidf_features(test_text, flag="test") lb = LabelEncoder() y2 = lb.fit_transform(target )<train_on_grid>
X = train.drop(['Survived','PassengerId'],axis=1) y = train['Survived']
Titanic - Machine Learning from Disaster
14,356,555
def compareAccuracy(a, b): print(' Compare Multiple Classifiers: ') print('K-Fold Cross-Validation Accuracy: ') names = [] models = [] resultsAccuracy = [] models.append(( 'LR', LogisticRegression())) models.append(( 'LSVM', LinearSVC())) models.append(( 'RF', RandomForestClassifier())) for name, model in models: model.fit(a, b) kfold = model_selection.KFold(n_splits=10, random_state=7) accuracy_results = model_selection.cross_val_score(model, a,b, cv=kfold, scoring='accuracy') resultsAccuracy.append(accuracy_results) names.append(name) accuracyMessage = "%s: %f(%f)" %(name, accuracy_results.mean() , accuracy_results.std()) print(accuracyMessage) fig = plt.figure() fig.suptitle('Algorithm Comparison: Accuracy') ax = fig.add_subplot(111) plt.boxplot(resultsAccuracy) ax.set_xticklabels(names) ax.set_ylabel('Cross-Validation: Accuracy Score') plt.show() def defineModels() : print(' LR = LogisticRegression') print('LSVM = LinearSVM') print('RF = RandomForestClassifier' )<compute_test_metric>
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0 )
Titanic - Machine Learning from Disaster
14,356,555
compareAccuracy(X2,y2) defineModels()<save_to_csv>
from sklearn import preprocessing
Titanic - Machine Learning from Disaster
14,356,555
model = LinearSVC() model.fit(X, y) submission = model.predict(test_df) submission_df = pd.Series(submission, index=testdex ).rename('cuisine') submission_df.to_csv("recipe_submission.csv", index=True, header=True) model.fit(X2, y2) y_test3 = model.predict(X_test3) y_pred = lb.inverse_transform(y_test3) test_id = [doc['id'] for doc in test] sub = pd.DataFrame({'id': test_id, 'cuisine': y_pred}, columns=['id', 'cuisine']) sub.to_csv('recipe_submission2.csv', index=False )<set_options>
scaler = preprocessing.StandardScaler().fit(x_train )
Titanic - Machine Learning from Disaster
14,356,555
os.environ['PYTHONHASHSEED'] = '10000' np.random.seed(10001) random.seed(10002) tf.set_random_seed(10003) wordnet_lemmatizer = WordNetLemmatizer() stop_words = set(stopwords.words('english')) <define_variables>
X_scaled = scaler.transform(x_train )
Titanic - Machine Learning from Disaster
14,356,555
path = '.. /input/' embedding_path = '.. /input/embeddings/' cores = 4 max_text_length=50 do_submission = False min_df_one=1 keep_only_words_in_embedding = True keep_unknown_words_in_keras_sequence_as_zeros = True contraction_mapping = {u"ain't": u"is not", u"aren't": u"are not",u"can't": u"cannot", u"'cause": u"because", u"could've": u"could have", u"couldn't": u"could not", u"didn't": u"did not", u"doesn't": u"does not", u"don't": u"do not", u"hadn't": u"had not", u"hasn't": u"has not", u"haven't": u"have not", u"he'd": u"he would", u"he'll": u"he will", u"he's": u"he is", u"how'd": u"how did", u"how'd'y": u"how do you", u"how'll": u"how will", u"how's": u"how is", u"I'd": u"I would", u"I'd've": u"I would have", u"I'll": u"I will", u"I'll've": u"I will have", u"I'm": u"I am", u"I've": u"I have", u"i'd": u"i would", u"i'd've": u"i would have", u"i'll": u"i will", u"i'll've": u"i will have",u"i'm": u"i am", u"i've": u"i have", u"isn't": u"is not", u"it'd": u"it would", u"it'd've": u"it would have", u"it'll": u"it will", u"it'll've": u"it will have",u"it's": u"it is", u"let's": u"let us", u"ma'am": u"madam", u"mayn't": u"may not", u"might've": u"might have",u"mightn't": u"might not",u"mightn't've": u"might not have", u"must've": u"must have", u"mustn't": u"must not", u"mustn't've": u"must not have", u"needn't": u"need not", u"needn't've": u"need not have",u"o'clock": u"of the clock", u"oughtn't": u"ought not", u"oughtn't've": u"ought not have", u"shan't": u"shall not", u"sha'n't": u"shall not", u"shan't've": u"shall not have", u"she'd": u"she would", u"she'd've": u"she would have", u"she'll": u"she will", u"she'll've": u"she will have", u"she's": u"she is", u"should've": u"should have", u"shouldn't": u"should not", u"shouldn't've": u"should not have", u"so've": u"so have",u"so's": u"so as", u"this's": u"this is",u"that'd": u"that would", u"that'd've": u"that would have", u"that's": u"that is", u"there'd": u"there would", u"there'd've": u"there would have", u"there's": u"there is", u"here's": u"here is",u"they'd": u"they would", u"they'd've": u"they would have", u"they'll": u"they will", u"they'll've": u"they will have", u"they're": u"they are", u"they've": u"they have", u"to've": u"to have", u"wasn't": u"was not", u"we'd": u"we would", u"we'd've": u"we would have", u"we'll": u"we will", u"we'll've": u"we will have", u"we're": u"we are", u"we've": u"we have", u"weren't": u"were not", u"what'll": u"what will", u"what'll've": u"what will have", u"what're": u"what are", u"what's": u"what is", u"what've": u"what have", u"when's": u"when is", u"when've": u"when have", u"where'd": u"where did", u"where's": u"where is", u"where've": u"where have", u"who'll": u"who will", u"who'll've": u"who will have", u"who's": u"who is", u"who've": u"who have", u"why's": u"why is", u"why've": u"why have", u"will've": u"will have", u"won't": u"will not", u"won't've": u"will not have", u"would've": u"would have", u"wouldn't": u"would not", u"wouldn't've": u"would not have", u"y'all": u"you all", u"y'all'd": u"you all would",u"y'all'd've": u"you all would have", u"y'all're": u"you all are",u"y'all've": u"you all have",u"you'd": u"you would", u"you'd've": u"you would have", u"you'll": u"you will", u"you'll've": u"you will have", u"you're": u"you are", u"you've": u"you have" }<string_transform>
scaler = preprocessing.StandardScaler().fit(x_test )
Titanic - Machine Learning from Disaster
14,356,555
def load_glove_words() : EMBEDDING_FILE = embedding_path+'glove.840B.300d/glove.840B.300d.txt' def get_coefs(word,*arr): return word, 1 embeddings_index1 = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE)) EMBEDDING_FILE = embedding_path+'paragram_300_sl999/paragram_300_sl999.txt' embeddings_index2 = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE, encoding='utf-8', errors='ignore')) return(set(list(embeddings_index1.keys())) ).union(set(list(embeddings_index2.keys()))) embeddings_index = load_glove_words() print(len(embeddings_index)) <string_transform>
X_testscaled = scaler.transform(x_test )
Titanic - Machine Learning from Disaster
14,356,555
def clean_str(text): text = re.sub(u"\[math\].*\[\/math\]", u" math ", text) text = re.sub(u"\S*@\S*\.\S*", u" email ", text) text = u" ".join(re.sub(u"^\d+(?:[.,]\d*)?$", u"number", w)for w in text.split(" ")) specials = [u"’", u"‘", u"´", u"`", u"\u2019"] for s in specials: text = u" ".join(w.replace(s, u"'")for w in text.split(" ")) text = u" ".join([w.strip() if w in embeddings_index else clean(w ).strip() for w in text.split(" ")]) text = re.sub(u"\s+", u" ", text ).strip() text = u" ".join([w.strip() for w in text.split(" ")if w in embeddings_index ]) return text def clean(text): try: if text in contraction_mapping: return contraction_mapping[text] for i, j in [(u"é", u"e"),(u"ē", u"e"),(u"è", u"e"),(u"ê", u"e"),(u"à", u"a"), (u"â", u"a"),(u"ô", u"o"),(u"ō", u"o"),(u"ü", u"u"),(u"ï", u"i"), (u"ç", u"c"),(u"\xed", u"i")]: text = re.sub(i, j, text) if text in embeddings_index: return text text = text.lower() if text in embeddings_index: return text text = re.sub(u"[^a-z\s0-9]", u" ", text) text = u" ".join(re.sub(u"^\d+(?:[.,]\d*)?$", u"number", w)for w in text.split(" ")) text = re.sub(u"[^a-z\s]", u" ", text) text = re.sub(u"\s+", u" ", text ).strip() text = u" ".join([ wordnet_lemmatizer.lemmatize(w)if w not in embeddings_index else w for w in text.split() ]) except: print('ERROR') text = '' return text def parallelize_dataframe(df, func): df_split = np.array_split(df, cores) pool = Pool(cores) df = pd.concat(pool.map(func, df_split)) pool.close() pool.join() return df def clean_str_df(df): return df.apply(lambda s : clean_str(s)) def prepare_data(df_data, train=True): print('Prepare data.... ') df_data['question_text'] = parallelize_dataframe(df_data['question_text'], clean_str_df) return df_data def create_vocabulary(df): start_time = time.time() word_frequency_dc=defaultdict(np.uint32) def word_count(text): text = set(text.split(' ')) if keep_only_words_in_embedding: for w in text: if w in embeddings_index: word_frequency_dc[w]+=1 else: for w in text: word_frequency_dc[w]+=1 df['question_text'].apply(lambda x : word_count(x)) print('[{}] Finished COUNTING WORDS FOR question_text...'.format(time.time() - start_time)) start_time = time.time() vocabulary_dc = word_frequency_dc.copy() cpt=1 for key in vocabulary_dc: vocabulary_dc[key]=cpt cpt+=1 print('[{}] Finished CREATING VOCABULARY...'.format(time.time() - start_time)) return word_frequency_dc, vocabulary_dc def tokenize(text): return [w for w in text.split() ] def load_glove(word_index): def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') EMBEDDING_FILE = embedding_path+'glove.840B.300d/glove.840B.300d.txt' embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE)) all_embs = np.stack(embeddings_index.values()) emb_mean1,emb_std1 = all_embs.mean() , all_embs.std() EMBEDDING_FILE = embedding_path+'paragram_300_sl999/paragram_300_sl999.txt' embeddings_index2 = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE, encoding='utf-8', errors='ignore')) all_embs = np.stack(embeddings_index2.values()) emb_mean2,emb_std2 = all_embs.mean() , all_embs.std() embedding_matrix = np.zeros(( len(word_index)+2, 300), dtype=np.float32) for word, i in word_index.items() : embedding_vector = None if(word in embeddings_index)&(word in embeddings_index2): embedding_vector =(embeddings_index.get(word)+(embeddings_index2.get(word)-emb_mean2+emb_mean1)) /2. else: if(word in embeddings_index): embedding_vector = embeddings_index.get(word) else: if(word in embeddings_index2): embedding_vector = embeddings_index2.get(word)-emb_mean2+emb_mean1 if embedding_vector is not None: embedding_matrix[i] = embedding_vector.astype(np.float32) return embedding_matrix def preprocess_keras(text): if keep_unknown_words_in_keras_sequence_as_zeros: return [ vocabulary_dc[w] if(( w in embeddings_index)&(word_frequency_dc[w]>=min_df_one)) else 0 for w in(text.split())] else: return [ vocabulary_dc[w] for w in(text.split())if w in embeddings_index ] def preprocess_keras_df(df): return df.apply(preprocess_keras) def get_keras_data(df): X = { 'num_data' : num_FE(df), 'question_text_in_glove': pad_sequences(df['seq_question_text_in_glove'], maxlen=max_text_length, value=0, padding='post'), 'question_text_mask_in_glove':(pad_sequences(df['seq_question_text_in_glove'], maxlen=max_text_length, value=0, padding='post')>0 ).astype(int ).reshape(-1,max_text_length,1), 'starting_words': pad_sequences(df['seq_question_text_in_glove'].apply(lambda x : x[:10]), maxlen=10, value=0, padding='post'), 'ending_words': pad_sequences(df['seq_question_text_in_glove'].apply(lambda x : x[-10:]), maxlen=10, value=0, padding='post') } return X def num_FE(temp_df): df = pd.DataFrame() df['total_length'] = temp_df['question_text_original'].apply(len) df['capitals'] = temp_df['question_text_original'].apply(lambda comment: sum(1 for c in comment if c.isupper())) df['caps_vs_length'] = df.apply(lambda row: float(row['capitals'])/float(row['total_length']), axis=1) df['num_exclamation_marks'] = temp_df['question_text_original'].apply(lambda comment: comment.count('!')) df['num_question_marks'] = temp_df['question_text_original'].apply(lambda comment: comment.count('?')) df['num_punctuation'] = temp_df['question_text_original'].apply( lambda comment: sum(comment.count(w)for w in '.,;:')) df['num_symbols'] = temp_df['question_text_original'].apply( lambda comment: sum(comment.count(w)for w in '* df['num_words'] = temp_df['question_text_original'].apply(lambda comment: len(comment.split())) df['num_unique_words'] = temp_df['question_text_original'].apply( lambda comment: len(set(w for w in comment.split()))) df['words_vs_unique'] = df['num_unique_words'] / df['num_words'] return df.values.astype(np.float32 )<compute_train_metric>
params_to_test = { 'n_estimators':[50,100,150,170,180,190,200,210], 'max_depth':[3,5,6] } rf_model = RandomForestClassifier(random_state=42) grid_search = GridSearchCV(rf_model, param_grid=params_to_test, cv=10, scoring='f1_macro', n_jobs=4) grid_search.fit(X_scaled, y_train) best_params = grid_search.best_params_ best_model = RandomForestClassifier(**best_params )
Titanic - Machine Learning from Disaster
14,356,555
def threshold_search(y_true, y_proba): best_threshold = 0 best_score = 0 for threshold in [i * 0.01 for i in range(10,70)]: score = f1_score(y_true=y_true, y_pred=y_proba > threshold) if score > best_score: best_threshold = threshold best_score = score search_result = {'threshold': best_threshold, 'f1': best_score} return search_result class My_Callback(Callback): def __init__(self, save_path, factor=0.5, patience_reduce=10, patience_stop=30, min_lr=1e-4): self.scores_list = [] self.scores_list_stop = [] self.thresh_list = [] self.all_preds = None self.best_score = 0. self.best_threshold = 0.5 self.best_epoch = 0 self.best_lr = 0 self.save_path = save_path self.factor = factor self.min_lr = min_lr self.patience_reduce = patience_reduce self.patience_stop = patience_stop def on_epoch_end(self, epoch, logs={}): all_preds = self.all_preds ep = epoch best_score = self.best_score preds_valid = self.model.predict(test_keras , batch_size=2048) thresholds = threshold_search(dtest_y, preds_valid) best_thres = thresholds['threshold'] score_all = thresholds['f1'] print(ep+1, ' F1 : ', "{0:.4f}".format(score_all), "{0:.4f}".format(best_thres), ' AUC : ', "{0:.4f}".format(roc_auc_score(dtest_y, preds_valid>=best_thres))) self.scores_list.append(score_all) self.thresh_list.append(best_thres) self.scores_list_stop.append(score_all) if score_all > best_score: self.best_score = score_all self.best_threshold = best_thres self.best_epoch = ep+1 self.best_lr = np.float32(K.get_value(self.model.optimizer.lr)) self.model.save_weights(self.save_path, overwrite=True) print("Score improved from ", "{0:.4f}".format(best_score), ' to ', "{0:.4f}".format(score_all),\ 'threshold : ', "{0:.4f}".format(self.best_threshold), ' lr : ', self.best_lr, ' epoch : ', self.best_epoch) else: print("Score didnt improve, current best score is : ", "{0:.4f}".format(best_score),\ 'with threshold : ', "{0:.4f}".format(self.best_threshold), ' lr : ', self.best_lr, ' epoch : ', self.best_epoch) pos = np.argmax(self.scores_list) if len(self.scores_list)- pos>self.patience_reduce: old_lr = np.float32(K.get_value(self.model.optimizer.lr)) if old_lr > self.min_lr: new_lr = old_lr * self.factor new_lr = max(new_lr, self.min_lr) self.model.load_weights(self.save_path) K.set_value(self.model.optimizer.lr, new_lr) self.scores_list = self.scores_list[:pos+1] self.thresh_list = self.thresh_list[:pos+1] print('*'*70) print('Reducing LR from ', old_lr, ' to ', new_lr) print('Loading Last best score : ', self.scores_list[-1], 'with threshold : ',self.thresh_list[-1]) print('*'*70) pos = np.argmax(self.scores_list_stop) if len(self.scores_list_stop)- pos>self.patience_stop: print('TRAINING STOP, NO MORE IMPROVEMENT') print('LOADING BEST WEIGHTS.... ') self.model.load_weights(self.save_path) self.stopped_epoch = ep self.model.stop_training = True print('') return class DataGenerator(keras.utils.Sequence): 'Generates data for Keras' def __init__(self, data, labels, shuffle=True, seed=None): self.epoch = 0 self.seed = seed if self.seed is None: self.seed = self.epoch np.random.seed(self.seed) self.list_IDs = np.random.permutation(len(labels)) self.data_num = data['num_data'] self.data_start = data['starting_words'] self.data_end = data['ending_words'] self.data_glove = data['question_text_in_glove'] self.data_mask = data['question_text_mask_in_glove'] self.labels = labels self.shuffle = shuffle self.on_epoch_end() def __len__(self): 'Denotes the number of batches per epoch' return int(np.floor(len(self.list_IDs)/ batch_size)) def __getitem__(self, index): 'Generate one batch of data' indexes = self.indexes[index*batch_size:(index+1)*batch_size] list_IDs_temp = [self.list_IDs[k] for k in indexes] X, y = self.__data_generation(list_IDs_temp) return(X, y) def on_epoch_end(self): 'Updates indexes after each epoch' self.epoch += 1 self.seed += 1 np.random.seed(self.seed) self.list_IDs = np.random.permutation(len(self.labels)) self.indexes = np.arange(len(self.list_IDs)) if self.shuffle == True: np.random.shuffle(self.indexes) def __data_generation(self, list_IDs_temp): X = { 'num_data' : self.data_num[list_IDs_temp], 'starting_words': self.data_start[list_IDs_temp], 'ending_words': self.data_end[list_IDs_temp], 'question_text_in_glove': self.data_glove[list_IDs_temp], 'question_text_mask_in_glove': self.data_mask[list_IDs_temp].reshape(-1,max_text_length,1), } return X , self.labels[list_IDs_temp] def new_rnn_model() : sequence_in_glove = Input(shape=[train_keras["question_text_in_glove"].shape[1]], name="question_text_in_glove") mask_in_glove = Input(shape=[train_keras["question_text_mask_in_glove"].shape[1],1], name="question_text_mask_in_glove") starting_words = Input(shape=[train_keras["starting_words"].shape[1]], name="starting_words") ending_words = Input(shape=[train_keras["ending_words"].shape[1]], name="ending_words") num_data = Input(shape=[train_keras["num_data"].shape[1]], name="num_data") num_data_normed = BatchNormalization()(num_data) shared_embedding = Embedding(glove_weights.shape[0], glove_weights.shape[1], weights=[glove_weights], trainable=False, name='emb') emb_sequence_in_glove = shared_embedding(sequence_in_glove) emb_sequence_in_glove = multiply([emb_sequence_in_glove, mask_in_glove]) emb_sequence_in_glove = SpatialDropout1D(0.1 )(emb_sequence_in_glove) shared_embedding2 = Embedding(glove_weights.shape[0], 20, trainable=True, name='emb2') emb_sequence_in_glove2 = shared_embedding(sequence_in_glove) emb_sequence_in_glove2 = multiply([emb_sequence_in_glove2, mask_in_glove]) emb_sequence_in_glove2 = SpatialDropout1D(0.2 )(emb_sequence_in_glove2) lstm2 = CuDNNLSTM(64, return_sequences=True )(emb_sequence_in_glove2) lstm2 = SpatialDropout1D(0.2 )(lstm2) max_lstm2 = GlobalMaxPooling1D()(lstm2) avg_lstm2 = Lambda(lambda x : K.sum(x, axis=1))(lstm2) avg_lstm2 = BatchNormalization()(avg_lstm2) starting_emb = shared_embedding(starting_words) ending_emb = shared_embedding(ending_words) avg_emb_start = Lambda(lambda x : K.sum(x, axis=1))(starting_emb) avg_emb_start = BatchNormalization()(avg_emb_start) avg_emb_end = Lambda(lambda x : K.sum(x, axis=1))(ending_emb) avg_emb_end = BatchNormalization()(avg_emb_end) avg_emb = Lambda(lambda x : K.sum(x, axis=1))(emb_sequence_in_glove) avg_emb = BatchNormalization()(avg_emb) lstm = CuDNNLSTM(128, return_sequences=True )(emb_sequence_in_glove) lstm = SpatialDropout1D(0.1 )(lstm) max_lstm = GlobalMaxPooling1D()(lstm) avg_lstm = Lambda(lambda x : K.sum(x, axis=1))(lstm) avg_lstm = BatchNormalization()(avg_lstm) main_l = concatenate([ max_lstm, avg_lstm, num_data_normed, avg_emb]) main_l = Dense(196 )(main_l) main_l = Activation('relu' )(main_l) main_l = Dense(64 )(main_l) main_l = Activation('relu' )(main_l) output = Dense(1, activation="sigmoid" )(main_l) model = Model([sequence_in_glove, mask_in_glove, starting_words, ending_words, num_data], output) return model<load_from_csv>
best_model.fit(X_scaled,y_train )
Titanic - Machine Learning from Disaster
14,356,555
df_sub = pd.read_csv(path+'test.csv', encoding='utf-8', engine='python') df_sub['target'] = -99 df_sub['question_text'].fillna(u'unknownstring', inplace=True) df_train_all = pd.read_csv(path+'train.csv', encoding='utf-8', engine='python') df_train_all['question_text'].fillna(u'unknownstring', inplace=True) df_sub['question_text_original'] = df_sub['question_text'].copy() df_sub = prepare_data(df_sub) df_train_all['question_text_original'] = df_train_all['question_text'].copy() df_train_all = prepare_data(df_train_all) word_frequency_dc, vocabulary_dc = create_vocabulary(df_train_all[['question_text']].append(df_sub[['question_text']])) print(len(vocabulary_dc)) glove_weights = load_glove(vocabulary_dc) print(glove_weights.shape )<create_dataframe>
betterpred = best_model.predict(X_testscaled )
Titanic - Machine Learning from Disaster
14,356,555
df_train_all['seq_question_text_in_glove'] = parallelize_dataframe(df_train_all['question_text'], preprocess_keras_df) train_keras = get_keras_data(df_train_all) df_sub['seq_question_text_in_glove'] = parallelize_dataframe(df_sub['question_text'], preprocess_keras_df) sub_keras = get_keras_data(df_sub) <train_model>
from sklearn.metrics import accuracy_score
Titanic - Machine Learning from Disaster
14,356,555
scale = 1. BATCH_SIZE = int(scale*1024) lr1 = scale*2e-3 lr2 = scale*1e-3 batch_size = BATCH_SIZE epochs = 5 save_model_name='./model32.h5' all_preds_sub = [ ] print("Fitting RNN model...") for bag in range(7): train_generator = DataGenerator(train_keras, df_train_all['target'].values, shuffle=True, seed=bag) rnn_model = new_rnn_model() rnn_model.get_layer('emb' ).trainable=False optimizer = Adam(lr=lr1) rnn_model.compile(loss="binary_crossentropy", optimizer=optimizer) rnn_model.fit_generator(generator=train_generator, workers=3, verbose=2, epochs=epochs) finetune=True if finetune: rnn_model.get_layer('emb' ).trainable=True optimizer = Adam(lr=lr2) rnn_model.compile(loss="binary_crossentropy", optimizer=optimizer) rnn_model.fit_generator(generator=train_generator, workers=3, verbose=2,epochs=1) preds_sub = rnn_model.predict(sub_keras , batch_size=2048 ).squeeze() all_preds_sub.append(preds_sub) del rnn_model gc.collect() print('*******************************************************') <save_to_csv>
predacc = round(accuracy_score(betterpred, y_test)* 100, 2) print(predacc,'%' )
Titanic - Machine Learning from Disaster
14,356,555
subThreshold = 0.33 mean_preds = np.array(all_preds_sub ).transpose() mean_preds = mean_preds.mean(axis=1) sub_df = pd.DataFrame() sub_df['qid'] = df_sub.qid.values sub_df['prediction'] =(mean_preds>subThreshold ).astype(int) sub_df.to_csv('submission.csv', index=False )<set_options>
scale1 = preprocessing.StandardScaler().fit(X) X_scaled1 = scale1.transform(X )
Titanic - Machine Learning from Disaster
14,356,555
warnings.filterwarnings('ignore' )<import_modules>
X_test = test.drop(['Survived','PassengerId'],axis=1 )
Titanic - Machine Learning from Disaster
14,356,555
from nltk.tokenize import TweetTokenizer from gensim.models import KeyedVectors from sklearn.metrics import f1_score from sklearn.model_selection import StratifiedKFold, train_test_split<import_modules>
predscale1 = preprocessing.StandardScaler().fit(X_test) pred_scale = predscale1.transform(X_test )
Titanic - Machine Learning from Disaster
14,356,555
import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import torch import torchtext from torchtext import data, vocab from torchtext.data import Dataset<import_modules>
testpred = best_model.predict(pred_scale )
Titanic - Machine Learning from Disaster
14,356,555
torchtext.vocab.tqdm = tqdm_notebook<define_variables>
output = pd.DataFrame({'PassengerId': test.PassengerId, 'Survived': testpred}) output.info() output['Survived'] = output['Survived'].astype('int64') output.head()
Titanic - Machine Learning from Disaster
14,356,555
path = ".. /input" emb_path = ".. /input/embeddings" n_folds = 5 bs = 512 device = 'cuda'<set_options>
output['Survived'].value_counts()
Titanic - Machine Learning from Disaster
14,356,555
seed = 7777 random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True<choose_model_class>
from sklearn.svm import SVC
Titanic - Machine Learning from Disaster
14,356,555
tknzr = TweetTokenizer(strip_handles=True, reduce_len=True )<define_variables>
model = SVC()
Titanic - Machine Learning from Disaster
14,356,555
mispell_dict = { "can't" : "can not", "tryin'":"trying", "'m": " am", "'ll": " 'll", "'d" : " 'd'", ".. ": " ",".": ".", ",":" , ", "'ve" : " have", "n't": " not","'s": " 's", "'re": " are", "$": " $","’": " ' ", "y'all": "you all", 'metoo': 'me too', 'colour': 'color', 'centre': 'center', 'favourite': 'favorite', 'travelling': 'traveling', 'counselling': 'counseling', 'centerd': 'centered', 'theatre': 'theater','cancelled': 'canceled','labour': 'labor', 'organisation': 'organization','wwii': 'world war 2', 'citicise': 'criticize', 'youtu ': 'youtube ','Qoura': 'Quora','sallary': 'salary','Whta': 'What', 'narcisist': 'narcissist','howdo': 'how do','whatare': 'what are', 'howcan': 'how can','howmuch': 'how much','howmany': 'how many', 'whydo': 'why do', 'theBest': 'the best', 'howdoes': 'how does', 'mastrubation': 'masturbation', 'mastrubate': 'masturbate', "mastrubating": 'masturbating', 'doI': 'do I', 'pennis': 'penis', 'Etherium': 'Ethereum', 'narcissit': 'narcissist', '2k17': '2017', '2k18': '2018','qouta': 'quota', 'exboyfriend': 'ex boyfriend', 'airhostess': 'air hostess', "whst": 'what', 'watsapp':'whatsapp', 'demonitisation': 'demonetization', 'demonitization': 'demonetization', 'demonetisation': 'demonetization','bigdata': 'big data', 'Quorans': 'Questions','quorans': 'questions','quoran':'question','Quoran':'Question', 'Skripal':'russian spy','Doklam':'Tibet', 'BNBR':'Be Nice Be Respectful', 'Brexit': 'British exit', 'Bhakts':'fascists','bhakts':'fascists','Bhakt':'fascist','bhakt':'fascist', 'SJWs':'Social Justice Warrior','SJW':'Social Justice Warrior', 'Modiji':'Prime Minister of India', 'Ra apist': 'Rapist', ' apist ':' ape ', 'wumao':'commenters','cucks': 'cuck', 'Strzok':'stupid phrase','strzok':'stupid phrase', ' s.p ': ' ', ' S.P ': ' ', 'U.s.p': '', 'U.S.A.': 'USA', 'u.s.a.': 'USA', 'U.S.A': 'USA', 'u.s.a': 'USA', 'U.S.': 'USA', 'u.s.': 'USA', ' U.S ': ' USA ', ' u.s ': ' USA ', 'U.s.': 'USA', ' U.s ': 'USA', ' u.S ': ' USA ', ' fu.k': ' fuck', 'U.K.': 'UK', ' u.k ': ' UK ', ' don t ': ' do not ', 'bacteries': 'batteries', ' yr old ': ' years old ', 'Ph.D': 'PhD', 'cau.sing': 'causing', 'Kim Jong-Un': 'The president of North Korea', 'savegely': 'savagely', '2fifth': 'twenty fifth', '2third': 'twenty third', '2nineth': 'twenty nineth', '2fourth': 'twenty fourth', ' 'Trumpcare': 'Trump health care system', '4fifth': 'forty fifth', 'Remainers': 'remainder', 'Terroristan': 'terrorist', 'antibrahmin': 'anti brahmin','culturr': 'culture', 'fuckboys': 'fuckboy', 'Fuckboys': 'fuckboy', 'Fuckboy': 'fuckboy', 'fuckgirls': 'fuck girls', 'fuckgirl': 'fuck girl', 'Trumpsters': 'Trump supporters', '4sixth': 'forty sixth', 'weatern': 'western', '4fourth': 'forty fourth', 'emiratis': 'emirates', 'trumpers': 'Trumpster', 'indans': 'indians', 'mastuburate': 'masturbate', ' f**k': ' fuck', ' F**k': ' fuck', ' F**K': ' fuck', ' u r ': ' you are ', ' u ': ' you ', '操你妈 ': 'fuck your mother', ' e.g.': ' for example', 'i.e.': 'in other words', '...': '.', 'et.al': 'elsewhere', 'anti-Semitic': 'anti-semitic', ' f***': ' fuck', ' f**': ' fuc', ' F***': ' fuck', ' F**': ' fuck', ' a****': ' assho', 'a**': 'ass', ' h***': ' hole', 'A****': 'assho', ' A**': ' ass', ' H***': ' hole', ' s***': ' shit', ' s**': 'shi', ' S***': ' shit', ' S**': ' shi', ' Sh**': 'shit', ' p****': ' pussy', ' p*ssy': ' pussy', ' P****': ' pussy', ' p***': ' porn', ' p*rn': ' porn', ' P***': ' porn',' Fck': ' Fuck',' fck': ' fuck', ' st*up*id': ' stupid', ' d***': 'dick', ' di**': ' dick', ' h*ck': ' hack', ' b*tch': ' bitch', 'bi*ch': ' bitch', ' bit*h': ' bitch', ' bitc*': ' bitch', ' b****': ' bitch', ' b***': ' bitc', ' b**': ' bit', ' b*ll': ' bull',' FATF': 'Western summit conference', 'Terroristan': 'terrorist Pakistan', 'terroristan': 'terrorist Pakistan', ' incel': ' involuntary celibates', ' incels': ' involuntary celibates', 'emiratis': 'Emiratis', 'weatern': 'western', 'westernise': 'westernize', 'Pizzagate': 'debunked conspiracy theory', 'naïve': 'naive', ' HYPSM': ' Harvard, Yale, Princeton, Stanford, MIT', ' HYPS': ' Harvard, Yale, Princeton, Stanford', 'kompromat': 'compromising material', ' Tharki': ' pervert', ' tharki': 'pervert', 'Naxali ': 'Naxalite ', 'Naxalities': 'Naxalites','Mewani': 'Indian politician Jignesh Mevani', ' Wjy': ' Why', 'Fadnavis': 'Indian politician Devendra Fadnavis', 'Awadesh': 'Indian engineer Awdhesh Singh', 'Awdhesh': 'Indian engineer Awdhesh Singh', 'Khalistanis': 'Sikh separatist movement', 'madheshi': 'Madheshi','Stupead': 'stupid', 'narcissit': 'narcissist', } def clean_latex(text): text = re.sub(r'\[math\]', ' LaTex math ', text) text = re.sub(r'\[\/math\]', ' LaTex math ', text) pattern_to_sub = { r'\\mathrm': ' LaTex math mode ', r'\\mathbb': ' LaTex math mode ', r'\\boxed': ' LaTex equation ', r'\\begin': ' LaTex equation ', r'\\end': ' LaTex equation ', r'\\left': ' LaTex equation ', r'\\right': ' LaTex equation ', r'\\(over|under)brace': ' LaTex equation ', r'\\text': ' LaTex equation ', r'\\vec': ' vector ', r'\\var': ' variable ', r'\\theta': ' theta ', r'\\mu': ' average ', r'\\min': ' minimum ', r'\\max': ' maximum ', r'\\sum': ' + ', r'\\times': ' * ', r'\\cdot': ' * ', r'\\hat': ' ^ ', r'\\frac': ' / ', r'\\div': ' / ', r'\\sin': ' Sine ', r'\\cos': ' Cosine ', r'\\tan': ' Tangent ', r'\\infty': ' infinity ', r'\\int': ' integer ', r'\\in': ' in ', } pattern_dict = {k.strip('\'): v for k, v in pattern_to_sub.items() } patterns = pattern_to_sub.keys() pattern_re = re.compile('(%s)' % '|'.join(patterns)) def _replace(match): try: word = pattern_dict.get(match.group(0 ).strip('\')) except KeyError: word = match.group(0) print('!!Error: Could Not Find Key: {}'.format(word)) return word return pattern_re.sub(_replace, text) def correct_spelling(s, dic): for key, corr in dic.items() : s = s.replace(key, dic[key]) return s def tweet_clean(text): text = re.sub(r'[^A-Za-z0-9!.,?$'"]+', ' ', text) return text def tokenizer(s): s = clean_latex(s) s = correct_spelling(s, mispell_dict) s = tweet_clean(s) return tknzr.tokenize(s )<compute_test_metric>
model.fit(X_scaled,y_train )
Titanic - Machine Learning from Disaster
14,356,555
def find_threshold(y_t, y_p, floor=-1., ceil=1., steps=41): thresholds = np.linspace(floor, ceil, steps) best_val = 0.0 for threshold in thresholds: val_predict =(y_p > threshold) score = f1_score(y_t, val_predict) if score > best_val: best_threshold = threshold best_val = score return best_threshold<split>
predictions = model.predict(X_testscaled )
Titanic - Machine Learning from Disaster
14,356,555
def splits_cv(data, cv, y=None): for indices in cv.split(range(len(data)) , y): (train_data, val_data)= tuple([data.examples[i] for i in index] for index in indices) yield tuple(Dataset(d, data.fields)for d in(train_data, val_data)if d )<load_from_csv>
svcacc = round(accuracy_score(predictions, y_test)* 100, 2) print(svcacc,'%' )
Titanic - Machine Learning from Disaster
14,356,555
skf = StratifiedKFold(n_splits = n_folds, shuffle = True, random_state = seed) scores = pd.read_csv('.. /input/train.csv') target = scores.target.values scores = scores.set_index('qid') scores.drop(columns=['question_text'], inplace=True) subm = pd.read_csv('.. /input/test.csv') subm = subm.set_index('qid') subm.drop(columns='question_text', inplace=True )<define_variables>
testpredictions = model.predict(pred_scale )
Titanic - Machine Learning from Disaster
14,356,555
txt_field = data.Field(sequential=True, tokenize=tokenizer, include_lengths=False, use_vocab=True) label_field = data.Field(sequential=False, use_vocab=False, is_target=True) qid_field = data.RawField() train_fields = [ ('qid', qid_field), ('question_text', txt_field), ('target', label_field) ] test_fields = [ ('qid', qid_field), ('question_text', txt_field), ]<load_from_csv>
output2 = pd.DataFrame({'PassengerId': test.PassengerId, 'Survived': testpredictions}) output2.info() output2['Survived'] = output['Survived'].astype('int64') output2.head()
Titanic - Machine Learning from Disaster
14,356,555
train_ds = data.TabularDataset(path=os.path.join(path, 'train.csv'), format='csv', fields=train_fields, skip_header=True) test_ds = data.TabularDataset(path=os.path.join(path, 'test.csv'), format='csv', fields=test_fields, skip_header=True )<define_variables>
output['Survived'].value_counts()
Titanic - Machine Learning from Disaster
14,356,555
test_ds.fields['qid'].is_target = False train_ds.fields['qid'].is_target = False<define_variables>
output2['Survived'].value_counts()
Titanic - Machine Learning from Disaster
14,356,555
<choose_model_class><EOS>
output2.to_csv('my_submission.csv', index=False )
Titanic - Machine Learning from Disaster
13,420,831
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<compute_train_metric>
%matplotlib inline
Titanic - Machine Learning from Disaster
13,420,831
def OOF_preds(test_df, target, embs_vocab, epochs = 4, alias='prediction', cv=skf, loss_fn = torch.nn.BCEWithLogitsLoss(reduction='mean', pos_weight=(torch.Tensor([2.7])).to(device)) , bs = 512, embedding_dim = 300, bidirectional=True, n_hidden = 64): print('Embedding vocab size: ', embs_vocab.size() [0]) test_df[alias] = 0. for train, _ in splits_cv(train_ds, cv, target): train = data.BucketIterator(train, batch_size=bs, device=device, sort_key=lambda x: len(x.question_text), sort_within_batch=True, shuffle=True, repeat=False) model = RecNN(embs_vocab, n_hidden, dropout=0., bidirectional=bidirectional ).to(device) opt = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), 1e-3, betas=(0.75, 0.999), eps=1e-08, weight_decay=0, amsgrad=True) print(' ') for epoch in range(epochs): y_true_train = np.empty(0) y_pred_train = np.empty(0) total_loss_train = 0 model.train() for(_, x), y in train: y = y.type(dtype=torch.cuda.FloatTensor) opt.zero_grad() pred = model(x) loss = loss_fn(pred, y) loss.backward() opt.step() y_true_train = np.concatenate([y_true_train, y.cpu().data.numpy() ], axis = 0) y_pred_train = np.concatenate([y_pred_train, pred.cpu().squeeze().data.numpy() ], axis = 0) total_loss_train += loss.item() tacc = f1_score(y_true_train, y_pred_train>0) tloss = total_loss_train/len(train) print(f'Epoch {epoch+1}: Train loss: {tloss:.4f}, F1: {tacc:.4f}') preds = torch.empty(0) qids = [] for(y, x), _ in test_loader: pred = model(x) qids.append(y) preds = torch.cat([preds, pred.detach().cpu() ]) preds = torch.sigmoid(preds ).numpy() qids = [item for sublist in qids for item in sublist] test_df.at[qids, alias] = test_df.loc[qids][alias].values + preds/n_folds gc.enable() ; del train gc.collect() ; gc.enable() ; del embs_vocab, model gc.collect() ; return test_df<load_pretrained>
from sklearn.preprocessing import OneHotEncoder, LabelEncoder, label_binarize, StandardScaler
Titanic - Machine Learning from Disaster
13,420,831
def preload_gnews() : vector_google = KeyedVectors.load_word2vec_format(os.path.join(emb_path, embs_file['gnews']), binary=True) stoi = {s:idx for idx, s in enumerate(vector_google.index2word)} itos = {idx:s for idx, s in enumerate(vector_google.index2word)} cache='cache/' path_cache = os.path.join(cache, 'GoogleNews-vectors-negative300.bin') file_suffix = '.pt' path_pt = path_cache + file_suffix torch.save(( itos, stoi, torch.from_numpy(vector_google.vectors), vector_google.vectors.shape[1]), path_pt) embs_file = {} embs_file['wiki'] = 'wiki-news-300d-1M/wiki-news-300d-1M.vec' embs_file['gnews'] = 'GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin' embs_file['glove'] = 'glove.840B.300d/glove.840B.300d.txt' embs_file['gram'] = 'paragram_300_sl999/paragram_300_sl999.txt' embs_vocab = {} !mkdir cache preload_gnews() vec = vocab.Vectors(os.path.join(emb_path, embs_file['gnews']), cache='cache/') txt_field.build_vocab(train_ds, test_ds, max_size=350000, vectors=vec) embs_vocab['gnews'] = train_ds.fields['question_text'].vocab.vectors !rm -r cache vec = vocab.Vectors(os.path.join(emb_path, embs_file['wiki']), cache='cache/') txt_field.build_vocab(train_ds, test_ds, max_size=350000, vectors=vec) embs_vocab['wiki'] = train_ds.fields['question_text'].vocab.vectors vec = vocab.Vectors(os.path.join(emb_path, embs_file['glove']), cache='cache/') txt_field.build_vocab(train_ds, test_ds, max_size=350000, vectors=vec) embs_vocab['glove'] = train_ds.fields['question_text'].vocab.vectors print('Embedding loaded, vocab size: ', embs_vocab['glove'].size() [0]) !rm -r cache gc.enable() del vec gc.collect() ;<feature_engineering>
from sklearn.metrics import mean_absolute_error as MAE from sklearn.model_selection import cross_val_score from sklearn.preprocessing import OneHotEncoder
Titanic - Machine Learning from Disaster
13,420,831
def fill_unknown(vector): data = torch.zeros_like(vector) data.copy_(vector) idx = torch.nonzero(data.sum(dim=1)== 0) data[idx] = embs_vocab['glove'][idx] idx = torch.nonzero(data.sum(dim=1)== 0) data[idx] = embs_vocab['wiki'][idx] idx = torch.nonzero(data.sum(dim=1)== 0) data[idx] = embs_vocab['gnews'][idx] return data<compute_train_metric>
from sklearn.ensemble import RandomForestClassifier from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import LogisticRegression from sklearn import svm from sklearn.neighbors import KNeighborsClassifier from sklearn.ensemble import GradientBoostingClassifier
Titanic - Machine Learning from Disaster
13,420,831
%%time subm = OOF_preds(subm, target, epochs = 5, alias='wiki', embs_vocab=fill_unknown(embs_vocab['wiki']), cv = StratifiedKFold(n_splits = n_folds, shuffle = True, random_state = seed), embedding_dim = 300, bidirectional=True, n_hidden = 64 )<compute_train_metric>
from catboost import CatBoostClassifier, Pool, cv
Titanic - Machine Learning from Disaster
13,420,831
%%time subm = OOF_preds(subm, target, epochs = 5, alias='glove', embs_vocab=fill_unknown(embs_vocab['glove']), cv = StratifiedKFold(n_splits = n_folds, shuffle = True, random_state = seed+15), bs = 512, embedding_dim = 300, bidirectional=True, n_hidden = 64 )<compute_train_metric>
from sklearn.ensemble import VotingClassifier
Titanic - Machine Learning from Disaster
13,420,831
%%time subm = OOF_preds(subm, target, epochs = 5, alias='gnews', embs_vocab=fill_unknown(embs_vocab['gnews']), cv = StratifiedKFold(n_splits = n_folds, shuffle = True, random_state = seed+25), bs = 512, embedding_dim = 300, bidirectional=True, n_hidden = 64 )<feature_engineering>
from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV
Titanic - Machine Learning from Disaster
13,420,831
submission = np.mean(subm.values, axis = 1 )<save_to_csv>
train_data_path = ".. /input/titanic/train.csv" test_data_path = ".. /input/titanic/test.csv"
Titanic - Machine Learning from Disaster
13,420,831
subm['prediction'] = submission > 0.55 subm.prediction = subm.prediction.astype('int') subm.to_csv('submission.csv', columns=['prediction'] )<import_modules>
train_data = pd.read_csv(train_data_path) test_data = pd.read_csv(test_data_path )
Titanic - Machine Learning from Disaster
13,420,831
tqdm.pandas(desc='Progress') <define_variables>
num_var = ['Age', 'SibSp', 'Parch', 'Fare'] cat_var = ['Survived', 'Pclass', 'Sex', 'Ticket', 'Cabin', 'Embarked']
Titanic - Machine Learning from Disaster
13,420,831
embed_size = 300 max_features = 120000 maxlen = 70 batch_size = 512 n_epochs = 5 n_splits = 5 SEED = 10 debug =0<choose_model_class>
df_num = train_data[num_var] df_cat = train_data[cat_var]
Titanic - Machine Learning from Disaster
13,420,831
loss_fn = torch.nn.BCEWithLogitsLoss(reduction='sum' )<set_options>
pd.pivot_table(train_data, index='Survived', values = num_var )
Titanic - Machine Learning from Disaster
13,420,831
def seed_everything(seed=10): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True seed_everything()<features_selection>
[i for i in cat_var if i not in ['Survived', 'Ticket']]
Titanic - Machine Learning from Disaster
13,420,831
def load_glove(word_index): EMBEDDING_FILE = '.. /input/embeddings/glove.840B.300d/glove.840B.300d.txt' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')[:300] embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE)) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = -0.005838499,0.48782197 embed_size = all_embs.shape[1] nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector else: embedding_vector = embeddings_index.get(word.capitalize()) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix def load_fasttext(word_index): EMBEDDING_FILE = '.. /input/embeddings/wiki-news-300d-1M/wiki-news-300d-1M.vec' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE)if len(o)>100) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = all_embs.mean() , all_embs.std() embed_size = all_embs.shape[1] nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix def load_para(word_index): EMBEDDING_FILE = '.. /input/embeddings/paragram_300_sl999/paragram_300_sl999.txt' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE, encoding="utf8", errors='ignore')if len(o)>100) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = -0.0053247833,0.49346462 embed_size = all_embs.shape[1] nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix<feature_engineering>
train_data['cabin_count'] = train_data.Cabin.apply(lambda x: 0 if pd.isna(x)else len(x.split())) train_data['cabin_count'].value_counts()
Titanic - Machine Learning from Disaster
13,420,831
def build_vocab(texts): sentences = texts.apply(lambda x: x.split() ).values vocab = {} for sentence in sentences: for word in sentence: try: vocab[word] += 1 except KeyError: vocab[word] = 1 return vocab def known_contractions(embed): known = [] for contract in contraction_mapping: if contract in embed: known.append(contract) return known def clean_contractions(text, mapping): specials = ["’", "‘", "´", "`"] for s in specials: text = text.replace(s, "'") text = ' '.join([mapping[t] if t in mapping else t for t in text.split(" ")]) return text def correct_spelling(x, dic): for word in dic.keys() : x = x.replace(word, dic[word]) return x def unknown_punct(embed, punct): unknown = '' for p in punct: if p not in embed: unknown += p unknown += ' ' return unknown def clean_special_chars(text, punct, mapping): for p in mapping: text = text.replace(p, mapping[p]) for p in punct: text = text.replace(p, f' {p} ') specials = {'\u200b': ' ', '…': '...', '\ufeff': '', 'करना': '', 'है': ''} for s in specials: text = text.replace(s, specials[s]) return text def add_lower(embedding, vocab): count = 0 for word in vocab: if word in embedding and word.lower() not in embedding: embedding[word.lower() ] = embedding[word] count += 1 print(f"Added {count} words to embedding") <define_variables>
pd.pivot_table(train_data, index='Survived', columns='cabin_count', values='Ticket', aggfunc='count' )
Titanic - Machine Learning from Disaster
13,420,831
puncts = [',', '.', '"', ':', ')', '(', '-', '!', '?', '|', ';', "'", '$', '&', '/', '[', ']', '>', '%', '=', ' '·', '_', '{', '}', '©', '^', '®', '`', '<', '→', '°', '€', '™', '›', '♥', '←', '×', '§', '″', '′', 'Â', '█', '½', 'à', '…', '“', '★', '”', '–', '●', 'â', '►', '−', '¢', '²', '¬', '░', '¶', '↑', '±', '¿', '▾', '═', '¦', '║', '―', '¥', '▓', '—', '‹', '─', '▒', ':', '¼', '⊕', '▼', '▪', '†', '■', '’', '▀', '¨', '▄', '♫', '☆', 'é', '¯', '♦', '¤', '▲', 'è', '¸', '¾', 'Ã', '⋅', '‘', '∞', '∙', ')', '↓', '、', '│', '(', '»', ',', '♪', '╩', '╚', '³', '・', '╦', '╣', '╔', '╗', '▬', '❤', 'ï', 'Ø', '¹', '≤', '‡', '√', ] def clean_text(x): x = str(x) for punct in puncts: if punct in x: x = x.replace(punct, f' {punct} ') return x def clean_numbers(x): if bool(re.search(r'\d', x)) : x = re.sub('[0-9]{5,}', ' x = re.sub('[0-9]{4}', ' x = re.sub('[0-9]{3}', ' x = re.sub('[0-9]{2}', ' return x mispell_dict = {"ain't": "is not", "aren't": "are not","can't": "cannot", "'cause": "because", "could've": "could have", "couldn't": "could not", "didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hasn't": "has not", "haven't": "have not", "he'd": "he would","he'll": "he will", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is", "I'd": "I would", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have","I'm": "I am", "I've": "I have", "i'd": "i would", "i'd've": "i would have", "i'll": "i will", "i'll've": "i will have","i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would", "it'd've": "it would have", "it'll": "it will", "it'll've": "it will have","it's": "it is", "let's": "let us", "ma'am": "madam", "mayn't": "may not", "might've": "might have","mightn't": "might not","mightn't've": "might not have", "must've": "must have", "mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have","o'clock": "of the clock", "oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have", "she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is", "should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have","so's": "so as", "this's": "this is","that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there would", "there'd've": "there would have", "there's": "there is", "here's": "here is","they'd": "they would", "they'd've": "they would have", "they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have", "wasn't": "was not", "we'd": "we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are", "we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are", "what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is", "where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have", "why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have", "would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all", "y'all'd": "you all would","y'all'd've": "you all would have","y'all're": "you all are","y'all've": "you all have","you'd": "you would", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have", "you're": "you are", "you've": "you have", 'colour': 'color', 'centre': 'center', 'favourite': 'favorite', 'travelling': 'traveling', 'counselling': 'counseling', 'theatre': 'theater', 'cancelled': 'canceled', 'labour': 'labor', 'organisation': 'organization', 'wwii': 'world war 2', 'citicise': 'criticize', 'youtu ': 'youtube ', 'Qoura': 'Quora', 'sallary': 'salary', 'Whta': 'What', 'narcisist': 'narcissist', 'howdo': 'how do', 'whatare': 'what are', 'howcan': 'how can', 'howmuch': 'how much', 'howmany': 'how many', 'whydo': 'why do', 'doI': 'do I', 'theBest': 'the best', 'howdoes': 'how does', 'mastrubation': 'masturbation', 'mastrubate': 'masturbate', "mastrubating": 'masturbating', 'pennis': 'penis', 'Etherium': 'Ethereum', 'narcissit': 'narcissist', 'bigdata': 'big data', '2k17': '2017', '2k18': '2018', 'qouta': 'quota', 'exboyfriend': 'ex boyfriend', 'airhostess': 'air hostess', "whst": 'what', 'watsapp': 'whatsapp', 'demonitisation': 'demonetization', 'demonitization': 'demonetization', 'demonetisation': 'demonetization'} def _get_mispell(mispell_dict): mispell_re = re.compile('(%s)' % '|'.join(mispell_dict.keys())) return mispell_dict, mispell_re mispellings, mispellings_re = _get_mispell(mispell_dict) def replace_typical_misspell(text): def replace(match): return mispellings[match.group(0)] return mispellings_re.sub(replace, text) <concatenate>
train_data['cabin_adv'] = train_data.Cabin.apply(lambda x: str(x)[0]) train_data['cabin_adv'].value_counts()
Titanic - Machine Learning from Disaster
13,420,831
def parallelize_apply(df,func,colname,num_process,newcolnames): pool =Pool(processes=num_process) arraydata = pool.map(func,tqdm(df[colname].values)) pool.close() newdf = pd.DataFrame(arraydata,columns = newcolnames) df = pd.concat([df,newdf],axis=1) return df def parallelize_dataframe(df, func): df_split = np.array_split(df, 4) pool = Pool(4) df = pd.concat(pool.map(func, df_split)) pool.close() pool.join() return df def count_regexp_occ(regexp="", text=None): return len(re.findall(regexp, text)) def add_features(df): df['question_text'] = df['question_text'].progress_apply(lambda x:str(x)) df["lower_question_text"] = df["question_text"].apply(lambda x: x.lower()) df['total_length'] = df['question_text'].progress_apply(len) df['capitals'] = df['question_text'].progress_apply(lambda comment: sum(1 for c in comment if c.isupper())) df['caps_vs_length'] = df.progress_apply(lambda row: float(row['capitals'])/float(row['total_length']), axis=1) df['num_words'] = df.question_text.str.count('\S+') df['num_unique_words'] = df['question_text'].progress_apply(lambda comment: len(set(w for w in comment.split()))) df['words_vs_unique'] = df['num_unique_words'] / df['num_words'] return df def load_and_prec() : if debug: train_df = pd.read_csv(".. /input/train.csv")[:80000] test_df = pd.read_csv(".. /input/test.csv")[:20000] else: train_df = pd.read_csv(".. /input/train.csv") test_df = pd.read_csv(".. /input/test.csv") print("Train shape : ",train_df.shape) print("Test shape : ",test_df.shape) train = parallelize_dataframe(train_df, add_features) test = parallelize_dataframe(test_df, add_features) train_df["question_text"] = train_df["question_text"].apply(lambda x: x.lower()) test_df["question_text"] = test_df["question_text"].apply(lambda x: x.lower()) train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: clean_text(x)) test_df["question_text"] = test_df["question_text"].apply(lambda x: clean_text(x)) train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: clean_numbers(x)) test_df["question_text"] = test_df["question_text"].apply(lambda x: clean_numbers(x)) train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: replace_typical_misspell(x)) test_df["question_text"] = test_df["question_text"].apply(lambda x: replace_typical_misspell(x)) train_X = train_df["question_text"].fillna("_ test_X = test_df["question_text"].fillna("_ features = train[['num_unique_words','words_vs_unique']].fillna(0) test_features = test[['num_unique_words','words_vs_unique']].fillna(0) ss = StandardScaler() pc = PCA(n_components=5) ss.fit(np.vstack(( features, test_features))) features = ss.transform(features) test_features = ss.transform(test_features) tokenizer = Tokenizer(num_words=max_features) tokenizer.fit_on_texts(list(train_X)) train_X = tokenizer.texts_to_sequences(train_X) test_X = tokenizer.texts_to_sequences(test_X) train_X = pad_sequences(train_X, maxlen=maxlen) test_X = pad_sequences(test_X, maxlen=maxlen) train_y = train_df['target'].values np.random.seed(SEED) trn_idx = np.random.permutation(len(train_X)) train_X = train_X[trn_idx] train_y = train_y[trn_idx] features = features[trn_idx] return train_X, test_X, train_y, features, test_features, tokenizer.word_index <categorify>
pd.pivot_table(train_data, index='Survived', columns='cabin_adv', values='Ticket', aggfunc='count' )
Titanic - Machine Learning from Disaster
13,420,831
start = time.time() x_train, x_test, y_train, features, test_features, word_index = load_and_prec() print(time.time() -start )<normalization>
train_data['numeric_ticket'] = train_data.Ticket.apply(lambda x: 1 if x.isnumeric() else 0) train_data['numeric_ticket']
Titanic - Machine Learning from Disaster
13,420,831
seed_everything() if debug: paragram_embeddings = np.random.randn(120000,300) glove_embeddings = np.random.randn(120000,300) embedding_matrix = np.mean([glove_embeddings, paragram_embeddings], axis=0) else: glove_embeddings = load_glove(word_index) paragram_embeddings = load_para(word_index) embedding_matrix = np.mean([glove_embeddings, paragram_embeddings], axis=0 )<choose_model_class>
pd.pivot_table(train_data, index='Survived', columns='numeric_ticket', values='Ticket', aggfunc='count' )
Titanic - Machine Learning from Disaster
13,420,831
class CyclicLR(object): def __init__(self, optimizer, base_lr=1e-3, max_lr=6e-3, step_size=2000, mode='triangular', gamma=1., scale_fn=None, scale_mode='cycle', last_batch_iteration=-1): if not isinstance(optimizer, Optimizer): raise TypeError('{} is not an Optimizer'.format( type(optimizer ).__name__)) self.optimizer = optimizer if isinstance(base_lr, list)or isinstance(base_lr, tuple): if len(base_lr)!= len(optimizer.param_groups): raise ValueError("expected {} base_lr, got {}".format( len(optimizer.param_groups), len(base_lr))) self.base_lrs = list(base_lr) else: self.base_lrs = [base_lr] * len(optimizer.param_groups) if isinstance(max_lr, list)or isinstance(max_lr, tuple): if len(max_lr)!= len(optimizer.param_groups): raise ValueError("expected {} max_lr, got {}".format( len(optimizer.param_groups), len(max_lr))) self.max_lrs = list(max_lr) else: self.max_lrs = [max_lr] * len(optimizer.param_groups) self.step_size = step_size if mode not in ['triangular', 'triangular2', 'exp_range'] \ and scale_fn is None: raise ValueError('mode is invalid and scale_fn is None') self.mode = mode self.gamma = gamma if scale_fn is None: if self.mode == 'triangular': self.scale_fn = self._triangular_scale_fn self.scale_mode = 'cycle' elif self.mode == 'triangular2': self.scale_fn = self._triangular2_scale_fn self.scale_mode = 'cycle' elif self.mode == 'exp_range': self.scale_fn = self._exp_range_scale_fn self.scale_mode = 'iterations' else: self.scale_fn = scale_fn self.scale_mode = scale_mode self.batch_step(last_batch_iteration + 1) self.last_batch_iteration = last_batch_iteration def batch_step(self, batch_iteration=None): if batch_iteration is None: batch_iteration = self.last_batch_iteration + 1 self.last_batch_iteration = batch_iteration for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): param_group['lr'] = lr def _triangular_scale_fn(self, x): return 1. def _triangular2_scale_fn(self, x): return 1 /(2.**(x - 1)) def _exp_range_scale_fn(self, x): return self.gamma**(x) def get_lr(self): step_size = float(self.step_size) cycle = np.floor(1 + self.last_batch_iteration /(2 * step_size)) x = np.abs(self.last_batch_iteration / step_size - 2 * cycle + 1) lrs = [] param_lrs = zip(self.optimizer.param_groups, self.base_lrs, self.max_lrs) for param_group, base_lr, max_lr in param_lrs: base_height =(max_lr - base_lr)* np.maximum(0,(1 - x)) if self.scale_mode == 'cycle': lr = base_lr + base_height * self.scale_fn(cycle) else: lr = base_lr + base_height * self.scale_fn(self.last_batch_iteration) lrs.append(lr) return lrs <categorify>
print(142/88, 407/254 )
Titanic - Machine Learning from Disaster
13,420,831
class MyDataset(Dataset): def __init__(self,dataset): self.dataset = dataset def __getitem__(self,index): data,target = self.dataset[index] return data,target,index def __len__(self): return len(self.dataset )<compute_train_metric>
train_data['name_title'] = train_data.Name.apply(lambda x: x.split(',', 1)[1].split('.', 1)[0]) train_data['name_title']
Titanic - Machine Learning from Disaster
13,420,831
def pytorch_model_run_cv(x_train,y_train,features,x_test, model_obj, feats = False,clip = True): seed_everything() avg_losses_f = [] avg_val_losses_f = [] train_preds = np.zeros(( len(x_train))) test_preds = np.zeros(( len(x_test))) splits = list(StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=SEED ).split(x_train, y_train)) for i,(train_idx, valid_idx)in enumerate(splits): seed_everything(i*1000+i) x_train = np.array(x_train) y_train = np.array(y_train) if feats: features = np.array(features) x_train_fold = torch.tensor(x_train[train_idx.astype(int)], dtype=torch.long ).cuda() y_train_fold = torch.tensor(y_train[train_idx.astype(int), np.newaxis], dtype=torch.float32 ).cuda() if feats: kfold_X_features = features[train_idx.astype(int)] kfold_X_valid_features = features[valid_idx.astype(int)] x_val_fold = torch.tensor(x_train[valid_idx.astype(int)], dtype=torch.long ).cuda() y_val_fold = torch.tensor(y_train[valid_idx.astype(int), np.newaxis], dtype=torch.float32 ).cuda() model = copy.deepcopy(model_obj) model.cuda() loss_fn = torch.nn.BCEWithLogitsLoss(reduction='sum') step_size = 300 base_lr, max_lr = 0.001, 0.003 optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=max_lr) scheduler = CyclicLR(optimizer, base_lr=base_lr, max_lr=max_lr, step_size=step_size, mode='exp_range', gamma=0.99994) train = MyDataset(torch.utils.data.TensorDataset(x_train_fold, y_train_fold)) valid = MyDataset(torch.utils.data.TensorDataset(x_val_fold, y_val_fold)) train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid, batch_size=batch_size, shuffle=False) print(f'Fold {i + 1}') for epoch in range(n_epochs): start_time = time.time() model.train() avg_loss = 0. for i,(x_batch, y_batch, index)in enumerate(train_loader): if feats: f = kfold_X_features[index] y_pred = model([x_batch,f]) else: y_pred = model(x_batch) if scheduler: scheduler.batch_step() loss = loss_fn(y_pred, y_batch) optimizer.zero_grad() loss.backward() if clip: nn.utils.clip_grad_norm_(model.parameters() ,1) optimizer.step() avg_loss += loss.item() / len(train_loader) model.eval() valid_preds_fold = np.zeros(( x_val_fold.size(0))) test_preds_fold = np.zeros(( len(x_test))) avg_val_loss = 0. for i,(x_batch, y_batch,index)in enumerate(valid_loader): if feats: f = kfold_X_valid_features[index] y_pred = model([x_batch,f] ).detach() else: y_pred = model(x_batch ).detach() avg_val_loss += loss_fn(y_pred, y_batch ).item() / len(valid_loader) valid_preds_fold[index] = sigmoid(y_pred.cpu().numpy())[:, 0] elapsed_time = time.time() - start_time print('Epoch {}/{} \t loss={:.4f} \t val_loss={:.4f} \t time={:.2f}s'.format( epoch + 1, n_epochs, avg_loss, avg_val_loss, elapsed_time)) avg_losses_f.append(avg_loss) avg_val_losses_f.append(avg_val_loss) for i,(x_batch,)in enumerate(test_loader): if feats: f = test_features[i * batch_size:(i+1)* batch_size] y_pred = model([x_batch,f] ).detach() else: y_pred = model(x_batch ).detach() test_preds_fold[i * batch_size:(i+1)* batch_size] = sigmoid(y_pred.cpu().numpy())[:, 0] train_preds[valid_idx] = valid_preds_fold test_preds += test_preds_fold / len(splits) print('All \t loss={:.4f} \t val_loss={:.4f} \t '.format(np.average(avg_losses_f),np.average(avg_val_losses_f))) return train_preds, test_preds <choose_model_class>
train_data['name_title'].value_counts()
Titanic - Machine Learning from Disaster
13,420,831
class Alex_NeuralNet_Meta(nn.Module): def __init__(self,hidden_size,lin_size, embedding_matrix=embedding_matrix): super(Alex_NeuralNet_Meta, self ).__init__() self.hidden_size = hidden_size drp = 0.1 self.embedding = nn.Embedding(max_features, embed_size) self.embedding.weight = nn.Parameter(torch.tensor(embedding_matrix, dtype=torch.float32)) self.embedding.weight.requires_grad = False self.embedding_dropout = nn.Dropout2d(0.1) self.lstm = nn.LSTM(embed_size, hidden_size, bidirectional=True, batch_first=True) for name, param in self.lstm.named_parameters() : if 'bias' in name: nn.init.constant_(param, 0.0) elif 'weight_ih' in name: nn.init.kaiming_normal_(param) elif 'weight_hh' in name: nn.init.orthogonal_(param) self.gru = nn.GRU(hidden_size*2, hidden_size, bidirectional=True, batch_first=True) for name, param in self.gru.named_parameters() : if 'bias' in name: nn.init.constant_(param, 0.0) elif 'weight_ih' in name: nn.init.kaiming_normal_(param) elif 'weight_hh' in name: nn.init.orthogonal_(param) self.linear = nn.Linear(hidden_size*6 + features.shape[1], lin_size) self.relu = nn.ReLU() self.dropout = nn.Dropout(drp) self.out = nn.Linear(lin_size, 1) def forward(self, x): h_embedding = self.embedding(x[0]) embeddings = h_embedding.unsqueeze(2) embeddings = embeddings.permute(0, 3, 2, 1) embeddings = self.embedding_dropout(embeddings) embeddings = embeddings.permute(0, 3, 2, 1) h_embedding = embeddings.squeeze(2) h_lstm, _ = self.lstm(h_embedding) h_gru, hh_gru = self.gru(h_lstm) hh_gru = hh_gru.view(-1, 2*self.hidden_size) avg_pool = torch.mean(h_gru, 1) max_pool, _ = torch.max(h_gru, 1) f = torch.tensor(x[1], dtype=torch.float ).cuda() conc = torch.cat(( hh_gru, avg_pool, max_pool,f), 1) conc = self.relu(self.linear(conc)) conc = self.dropout(conc) out = self.out(conc) return out<normalization>
def feature_engineer_df(df): print("adding column 'cabin_count'...") df['cabin_count'] = df.Cabin.apply(lambda x: 0 if pd.isna(x)else len(x.split())) print("adding column 'cabin_adv'...") df['cabin_adv'] = df.Cabin.apply(lambda x: str(x)[0]) print("adding column 'numeric_ticket'...") df['numeric_ticket'] = df.Ticket.apply(lambda x: 1 if x.isnumeric() else 0) print("adding column 'name_title'...") df['name_title'] = df.Name.apply(lambda x: x.split(',', 1)[1].split('.', 1)[0]) print('imputing null values...') print("adding column 'age missing'...") df['age_missing'] = df.Age.apply(lambda x: 1 if pd.isnull(x)else 0) df.Age = df.Age.fillna(df.Age.mean()) print("adding column 'fare_missing'...") df['fare_missing'] = df.Fare.apply(lambda x: 1 if pd.isnull(x)else 0) df.Fare = df.Fare.fillna(df.Fare.median()) print("dropping rows with missing 'Embarked'...") print(f"{np.count_nonzero(df['Embarked'].isnull().values)} rows with missing 'Embarked' will be dropped") df.Embarked = df.dropna(subset=['Embarked']) print("normalizing 'sibsp'...") df['norm_sibsp'] = np.log(df.SibSp+1) print("normalizing 'fare'...") df['norm_fare'] = np.log(df.Fare+1 )
Titanic - Machine Learning from Disaster
13,420,831
def sigmoid(x): return 1 /(1 + np.exp(-x)) seed_everything() x_test_cuda = torch.tensor(x_test, dtype=torch.long ).cuda() test = torch.utils.data.TensorDataset(x_test_cuda) test_loader = torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=False )<compute_train_metric>
def solve_mismatch(df_train, df_test): df_train['train_data'] = 1 df_test['train_data'] = 0 df_test['Survived'] = 0 combined_df = pd.concat([df_train, df_test]) combined_df.Pclass = combined_df.Pclass.astype(str) combined_dummy = pd.get_dummies(combined_df[['PassengerId', 'Survived', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'norm_fare', 'Embarked', 'cabin_adv', 'cabin_count', 'numeric_ticket', 'name_title', 'age_missing', 'fare_missing', 'train_data']]) new_train = combined_dummy[combined_dummy['train_data']==1] new_test = combined_dummy[combined_dummy['train_data']==0] new_train.drop(['train_data'], axis=1, inplace=True) new_test.drop(['train_data', 'Survived'], axis=1, inplace=True) return(new_train, new_test )
Titanic - Machine Learning from Disaster
13,420,831
train_preds , test_preds = pytorch_model_run_cv(x_train,y_train,features,x_test,Alex_NeuralNet_Meta(70,16, embedding_matrix=embedding_matrix), feats = True )<compute_test_metric>
def ft_splitted(df, drop=['Survived', 'PassengerId']): X = df.drop(drop, axis=1) y = df[drop[0]] return([X, y] )
Titanic - Machine Learning from Disaster
13,420,831
def bestThresshold(y_train,train_preds): tmp = [0,0,0] delta = 0 for tmp[0] in tqdm(np.arange(0.1, 0.501, 0.01)) : tmp[1] = f1_score(y_train, np.array(train_preds)>tmp[0]) if tmp[1] > tmp[2]: delta = tmp[0] tmp[2] = tmp[1] print('best threshold is {:.4f} with F1 score: {:.4f}'.format(delta, tmp[2])) return delta , tmp[2] delta, _ = bestThresshold(y_train,train_preds )<save_to_csv>
feature_engineer_df(train_data )
Titanic - Machine Learning from Disaster
13,420,831
if debug: df_test = pd.read_csv(".. /input/test.csv")[:20000] else: df_test = pd.read_csv(".. /input/test.csv") submission = df_test[['qid']].copy() submission['prediction'] =(test_preds > delta ).astype(int) submission.to_csv('submission.csv', index=False )<import_modules>
train_data['age_missing'].value_counts()
Titanic - Machine Learning from Disaster
13,420,831
from numpy import array from numpy import asarray from numpy import zeros from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.models import Sequential from keras.layers import LSTM from keras.layers import Dense from keras.layers import Flatten from keras.layers import Dropout from keras.layers import Bidirectional from keras.layers import CuDNNGRU from keras.layers.embeddings import Embedding<load_from_csv>
feature_engineer_df(test_data )
Titanic - Machine Learning from Disaster
13,420,831
df_train = pd.read_csv(".. /input/train.csv" )<load_from_csv>
test_data['age_missing'].value_counts()
Titanic - Machine Learning from Disaster
13,420,831
df_test = pd.read_csv(".. /input/test.csv" )<feature_engineering>
train_data, test_data = solve_mismatch(train_data, test_data )
Titanic - Machine Learning from Disaster
13,420,831
tokenizer = Tokenizer() tokenizer.fit_on_texts(df_train['question_text'] )<define_variables>
X_train, y_train = ft_splitted(train_data )
Titanic - Machine Learning from Disaster
13,420,831
vocab_size = len(tokenizer.word_index)+ 1<string_transform>
def scale_data(X): scale = StandardScaler() X_scaled = X.copy() X_scaled[['Age', 'SibSp', 'Parch', 'norm_fare']] = scale.fit_transform(X_scaled[['Age', 'SibSp', 'Parch', 'norm_fare']]) return(X_scaled )
Titanic - Machine Learning from Disaster
13,420,831
ts_train=tokenizer.texts_to_sequences(df_train['question_text'] )<string_transform>
X_train_scaled = scale_data(X_train) X_train_scaled.head()
Titanic - Machine Learning from Disaster
13,420,831
ts_test=tokenizer.texts_to_sequences(df_test['question_text'] )<concatenate>
X_test_scaled = scale_data(test_data.drop(['PassengerId'], axis=1)) X_test_scaled.head()
Titanic - Machine Learning from Disaster
13,420,831
X_train_vectorized=pad_sequences(ts_train,maxlen=135,padding='post' )<concatenate>
def get_model_accuracy(model, cv=5): cv_score = cross_val_score(model, X_train_scaled, y_train, cv=cv) print(cv_score) print(f"{model.__class__.__name__}({format(cv_score.mean() *100, '.2f')}%)") return(cv_score.mean() )
Titanic - Machine Learning from Disaster
13,420,831
X_test_vectorized=pad_sequences(ts_test,maxlen=135,padding='post' )<prepare_x_and_y>
GNB = GaussianNB() get_model_accuracy(GNB )
Titanic - Machine Learning from Disaster
13,420,831
y_train = df_train['target']<feature_engineering>
LR = LogisticRegression() get_model_accuracy(LR )
Titanic - Machine Learning from Disaster
13,420,831
embeddings_index = {} f = open('.. /input/embeddings/glove.840B.300d/glove.840B.300d.txt', encoding='utf8') for line in f: values = line.split() word = ''.join(values[:-300]) coefs = np.asarray(values[-300:], dtype='float32') embeddings_index[word] = coefs f.close() print('Loaded %s word vectors.' % len(embeddings_index))<define_variables>
SVC = svm.SVC(probability=True) get_model_accuracy(SVC )
Titanic - Machine Learning from Disaster
13,420,831
embedding_matrix = zeros(( vocab_size, 300)) for word, i in tokenizer.word_index.items() : embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector<choose_model_class>
KNN = KNeighborsClassifier() get_model_accuracy(KNN )
Titanic - Machine Learning from Disaster
13,420,831
model = Sequential() e = Embedding(vocab_size, 300, weights=[embedding_matrix], input_length=135, trainable=False) model.add(e) model.add(Bidirectional(LSTM(128, return_sequences=True))) model.add(Flatten()) model.add(Dense(128, activation='sigmoid')) model.add(Dropout(0.2)) model.add(Dense(1, activation='sigmoid')) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) print(model.summary() )<train_model>
RFC = RandomForestClassifier(random_state=42) get_model_accuracy(RFC )
Titanic - Machine Learning from Disaster
13,420,831
model.fit(X_train_vectorized, y_train, epochs=3, batch_size=1024, verbose=0 )<predict_on_test>
GB = GradientBoostingClassifier(random_state=42) get_model_accuracy(GB )
Titanic - Machine Learning from Disaster
13,420,831
predictions = model.predict(X_test_vectorized )<data_type_conversions>
voting_clf_all = VotingClassifier( estimators=[('GNB', GNB),('LR', LR),('SVC', SVC),('KNN', KNN),('RFC', RFC),('GB', GB)], voting='soft') get_model_accuracy(voting_clf_all )
Titanic - Machine Learning from Disaster
13,420,831
preds_class =(predictions > 0.33 ).astype(np.int )<define_variables>
voting_clf_best_3 = VotingClassifier( estimators=[('SVC', SVC),('RFC', RFC),('KNN', KNN)], voting='soft') get_model_accuracy(voting_clf_best_3 )
Titanic - Machine Learning from Disaster
13,420,831
qid = df_test['qid']<concatenate>
def clf_performance(classifier): print(classifier.__class__.__name__) print(f"Best Score: {classifier.best_score_}") print(f"Best Parameters: {classifier.best_params_}" )
Titanic - Machine Learning from Disaster
13,420,831
submission_df = pd.concat([qid, prediction], axis=1 )<save_to_csv>
param_grid = { 'random_state': [42], 'max_iter': [100, 500, 2000], 'penalty': ['l1', 'l2'], 'C': np.logspace(-4, 4, 20), 'solver': ['liblinear', 'lbfgs'] } clf_LR = GridSearchCV(LR, param_grid=param_grid, cv=5, verbose=True, n_jobs=-1) best_clf_LR = clf_LR.fit(X_train_scaled, y_train) clf_performance(best_clf_LR )
Titanic - Machine Learning from Disaster
13,420,831
submission_df.to_csv("submission.csv", columns = submission_df.columns, index=False )<import_modules>
print_valid_params("'C': 11.288378916846883, 'max_iter': 100, 'penalty': 'l1', 'random_state': 42, 'solver': 'liblinear'" )
Titanic - Machine Learning from Disaster