kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
6,488,543
PROCESS_TWEETS = False if PROCESS_TWEETS: total['text'] = total['text'].apply(lambda x: x.lower()) total['text'] = total['text'].apply(lambda x: re.sub(r'https?://\S+|www\.\S+', '', x, flags = re.MULTILINE)) total['text'] = total['text'].apply(remove_punctuation) total['text'] = total['text'].apply(remove_stopwords) total['text'] = total['text'].apply(remove_less_than) total['text'] = total['text'].apply(remove_non_alphabet) total['text'] = total['text'].apply(spell_check )<define_variables>
train = pd.read_csv('.. /input/train.csv') test = pd.read_csv('.. /input/test.csv') PassengerId = test['PassengerId'] train.head(3 )
Titanic - Machine Learning from Disaster
6,488,543
contractions = { "ain't": "am not / are not / is not / has not / have not", "aren't": "are not / am not", "can't": "cannot", "can't've": "cannot have", "'cause": "because", "could've": "could have", "couldn't": "could not", "couldn't've": "could not have", "didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hadn't've": "had not have", "hasn't": "has not", "haven't": "have not", "he'd": "he had / he would", "he'd've": "he would have", "he'll": "he shall / he will", "he'll've": "he shall have / he will have", "he's": "he has / he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how has / how is / how does", "I'd": "I had / I would", "I'd've": "I would have", "I'll": "I shall / I will", "I'll've": "I shall have / I will have", "I'm": "I am", "I've": "I have", "isn't": "is not", "it'd": "it had / it would", "it'd've": "it would have", "it'll": "it shall / it will", "it'll've": "it shall have / it will have", "it's": "it has / it is", "let's": "let us", "ma'am": "madam", "mayn't": "may not", "might've": "might have", "mightn't": "might not", "mightn't've": "might not have", "must've": "must have", "mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have", "o'clock": "of the clock", "oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have", "she'd": "she had / she would", "she'd've": "she would have", "she'll": "she shall / she will", "she'll've": "she shall have / she will have", "she's": "she has / she is", "should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have", "so's": "so as / so is", "that'd": "that would / that had", "that'd've": "that would have", "that's": "that has / that is", "there'd": "there had / there would", "there'd've": "there would have", "there's": "there has / there is", "they'd": "they had / they would", "they'd've": "they would have", "they'll": "they shall / they will", "they'll've": "they shall have / they will have", "they're": "they are", "they've": "they have", "to've": "to have", "wasn't": "was not", "we'd": "we had / we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are", "we've": "we have", "weren't": "were not", "what'll": "what shall / what will", "what'll've": "what shall have / what will have", "what're": "what are", "what's": "what has / what is", "what've": "what have", "when's": "when has / when is", "when've": "when have", "where'd": "where did", "where's": "where has / where is", "where've": "where have", "who'll": "who shall / who will", "who'll've": "who shall have / who will have", "who's": "who has / who is", "who've": "who have", "why's": "why has / why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have", "would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all", "y'all'd": "you all would", "y'all'd've": "you all would have", "y'all're": "you all are", "y'all've": "you all have", "you'd": "you had / you would", "you'd've": "you would have", "you'll": "you shall / you will", "you'll've": "you shall have / you will have", "you're": "you are", "you've": "you have" } contractions_re = re.compile('(%s)' % '|'.join(contractions.keys())) def expand_contractions(s, contractions = contractions): def replace(match): return contractions[match.group(0)] return contractions_re.sub(replace, s) expand_contractions("can't stop won't stop" )<feature_engineering>
full_data = [train, test] train['Name_length'] = train['Name'].apply(len) test['Name_length'] = test['Name'].apply(len) train['Has_Cabin'] = train["Cabin"].apply(lambda x: 0 if type(x)== float else 1) test['Has_Cabin'] = test["Cabin"].apply(lambda x: 0 if type(x)== float else 1) for dataset in full_data: dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1 for dataset in full_data: dataset['IsAlone'] = 0 dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1 for dataset in full_data: dataset['Embarked'] = dataset['Embarked'].fillna('S') for dataset in full_data: dataset['Fare'] = dataset['Fare'].fillna(train['Fare'].median()) train['CategoricalFare'] = pd.qcut(train['Fare'], 4) for dataset in full_data: age_avg = dataset['Age'].mean() age_std = dataset['Age'].std() age_null_count = dataset['Age'].isnull().sum() age_null_random_list = np.random.randint(age_avg - age_std, age_avg + age_std, size=age_null_count) dataset['Age'][np.isnan(dataset['Age'])] = age_null_random_list dataset['Age'] = dataset['Age'].astype(int) train['CategoricalAge'] = pd.cut(train['Age'], 5) def get_title(name): title_search = re.search('([A-Za-z]+)\.', name) if title_search: return title_search.group(1) return "" for dataset in full_data: dataset['Title'] = dataset['Name'].apply(get_title) for dataset in full_data: dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') dataset['Title'] = dataset['Title'].replace('Ms', 'Miss') dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') for dataset in full_data: dataset['Sex'] = dataset['Sex'].map({'female': 0, 'male': 1} ).astype(int) title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5} dataset['Title'] = dataset['Title'].map(title_mapping) dataset['Title'] = dataset['Title'].fillna(0) dataset['Embarked'] = dataset['Embarked'].map({'S': 0, 'C': 1, 'Q': 2} ).astype(int) dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0 dataset.loc[(dataset['Fare'] > 7.91)&(dataset['Fare'] <= 14.454), 'Fare'] = 1 dataset.loc[(dataset['Fare'] > 14.454)&(dataset['Fare'] <= 31), 'Fare'] = 2 dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3 dataset['Fare'] = dataset['Fare'].astype(int) dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0 dataset.loc[(dataset['Age'] > 16)&(dataset['Age'] <= 32), 'Age'] = 1 dataset.loc[(dataset['Age'] > 32)&(dataset['Age'] <= 48), 'Age'] = 2 dataset.loc[(dataset['Age'] > 48)&(dataset['Age'] <= 64), 'Age'] = 3 dataset.loc[ dataset['Age'] > 64, 'Age'] = 4 ;
Titanic - Machine Learning from Disaster
6,488,543
total['text'] = total['text'].apply(expand_contractions )<categorify>
drop_elements = ['PassengerId', 'Name', 'Ticket', 'Cabin', 'SibSp'] train = train.drop(drop_elements, axis = 1) train = train.drop(['CategoricalAge', 'CategoricalFare'], axis = 1) test = test.drop(drop_elements, axis = 1 )
Titanic - Machine Learning from Disaster
6,488,543
def clean(tweet): tweet = re.sub(r"tnwx", "Tennessee Weather", tweet) tweet = re.sub(r"azwx", "Arizona Weather", tweet) tweet = re.sub(r"alwx", "Alabama Weather", tweet) tweet = re.sub(r"wordpressdotcom", "wordpress", tweet) tweet = re.sub(r"gawx", "Georgia Weather", tweet) tweet = re.sub(r"scwx", "South Carolina Weather", tweet) tweet = re.sub(r"cawx", "California Weather", tweet) tweet = re.sub(r"usNWSgov", "United States National Weather Service", tweet) tweet = re.sub(r"MH370", "Malaysia Airlines Flight 370", tweet) tweet = re.sub(r"okwx", "Oklahoma City Weather", tweet) tweet = re.sub(r"arwx", "Arkansas Weather", tweet) tweet = re.sub(r"lmao", "laughing my ass off", tweet) tweet = re.sub(r"amirite", "am I right", tweet) tweet = re.sub(r"w/e", "whatever", tweet) tweet = re.sub(r"w/", "with", tweet) tweet = re.sub(r"USAgov", "USA government", tweet) tweet = re.sub(r"recentlu", "recently", tweet) tweet = re.sub(r"Ph0tos", "Photos", tweet) tweet = re.sub(r"exp0sed", "exposed", tweet) tweet = re.sub(r"<3", "love", tweet) tweet = re.sub(r"amageddon", "armageddon", tweet) tweet = re.sub(r"Trfc", "Traffic", tweet) tweet = re.sub(r"WindStorm", "Wind Storm", tweet) tweet = re.sub(r"16yr", "16 year", tweet) tweet = re.sub(r"TRAUMATISED", "traumatized", tweet) tweet = re.sub(r"IranDeal", "Iran Deal", tweet) tweet = re.sub(r"ArianaGrande", "Ariana Grande", tweet) tweet = re.sub(r"camilacabello97", "camila cabello", tweet) tweet = re.sub(r"RondaRousey", "Ronda Rousey", tweet) tweet = re.sub(r"MTVHottest", "MTV Hottest", tweet) tweet = re.sub(r"TrapMusic", "Trap Music", tweet) tweet = re.sub(r"ProphetMuhammad", "Prophet Muhammad", tweet) tweet = re.sub(r"PantherAttack", "Panther Attack", tweet) tweet = re.sub(r"StrategicPatience", "Strategic Patience", tweet) tweet = re.sub(r"socialnews", "social news", tweet) tweet = re.sub(r"IDPs:", "Internally Displaced People :", tweet) tweet = re.sub(r"ArtistsUnited", "Artists United", tweet) tweet = re.sub(r"ClaytonBryant", "Clayton Bryant", tweet) tweet = re.sub(r"jimmyfallon", "jimmy fallon", tweet) tweet = re.sub(r"justinbieber", "justin bieber", tweet) tweet = re.sub(r"Time2015", "Time 2015", tweet) tweet = re.sub(r"djicemoon", "dj icemoon", tweet) tweet = re.sub(r"LivingSafely", "Living Safely", tweet) tweet = re.sub(r"FIFA16", "Fifa 2016", tweet) tweet = re.sub(r"thisiswhywecanthavenicethings", "this is why we cannot have nice things", tweet) tweet = re.sub(r"bbcnews", "bbc news", tweet) tweet = re.sub(r"UndergroundRailraod", "Underground Railraod", tweet) tweet = re.sub(r"c4news", "c4 news", tweet) tweet = re.sub(r"MUDSLIDE", "mudslide", tweet) tweet = re.sub(r"NoSurrender", "No Surrender", tweet) tweet = re.sub(r"NotExplained", "Not Explained", tweet) tweet = re.sub(r"greatbritishbakeoff", "great british bake off", tweet) tweet = re.sub(r"LondonFire", "London Fire", tweet) tweet = re.sub(r"KOTAWeather", "KOTA Weather", tweet) tweet = re.sub(r"LuchaUnderground", "Lucha Underground", tweet) tweet = re.sub(r"KOIN6News", "KOIN 6 News", tweet) tweet = re.sub(r"LiveOnK2", "Live On K2", tweet) tweet = re.sub(r"9NewsGoldCoast", "9 News Gold Coast", tweet) tweet = re.sub(r"nikeplus", "nike plus", tweet) tweet = re.sub(r"david_cameron", "David Cameron", tweet) tweet = re.sub(r"peterjukes", "Peter Jukes", tweet) tweet = re.sub(r"MikeParrActor", "Michael Parr", tweet) tweet = re.sub(r"4PlayThursdays", "Foreplay Thursdays", tweet) tweet = re.sub(r"TGF2015", "Tontitown Grape Festival", tweet) tweet = re.sub(r"realmandyrain", "Mandy Rain", tweet) tweet = re.sub(r"GraysonDolan", "Grayson Dolan", tweet) tweet = re.sub(r"ApolloBrown", "Apollo Brown", tweet) tweet = re.sub(r"saddlebrooke", "Saddlebrooke", tweet) tweet = re.sub(r"TontitownGrape", "Tontitown Grape", tweet) tweet = re.sub(r"AbbsWinston", "Abbs Winston", tweet) tweet = re.sub(r"ShaunKing", "Shaun King", tweet) tweet = re.sub(r"MeekMill", "Meek Mill", tweet) tweet = re.sub(r"TornadoGiveaway", "Tornado Giveaway", tweet) tweet = re.sub(r"GRupdates", "GR updates", tweet) tweet = re.sub(r"SouthDowns", "South Downs", tweet) tweet = re.sub(r"braininjury", "brain injury", tweet) tweet = re.sub(r"auspol", "Australian politics", tweet) tweet = re.sub(r"PlannedParenthood", "Planned Parenthood", tweet) tweet = re.sub(r"calgaryweather", "Calgary Weather", tweet) tweet = re.sub(r"weallheartonedirection", "we all heart one direction", tweet) tweet = re.sub(r"edsheeran", "Ed Sheeran", tweet) tweet = re.sub(r"TrueHeroes", "True Heroes", tweet) tweet = re.sub(r"ComplexMag", "Complex Magazine", tweet) tweet = re.sub(r"TheAdvocateMag", "The Advocate Magazine", tweet) tweet = re.sub(r"CityofCalgary", "City of Calgary", tweet) tweet = re.sub(r"EbolaOutbreak", "Ebola Outbreak", tweet) tweet = re.sub(r"SummerFate", "Summer Fate", tweet) tweet = re.sub(r"RAmag", "Royal Academy Magazine", tweet) tweet = re.sub(r"offers2go", "offers to go", tweet) tweet = re.sub(r"ModiMinistry", "Modi Ministry", tweet) tweet = re.sub(r"TAXIWAYS", "taxi ways", tweet) tweet = re.sub(r"Calum5SOS", "Calum Hood", tweet) tweet = re.sub(r"JamesMelville", "James Melville", tweet) tweet = re.sub(r"JamaicaObserver", "Jamaica Observer", tweet) tweet = re.sub(r"TweetLikeItsSeptember11th2001", "Tweet like it is september 11th 2001", tweet) tweet = re.sub(r"cbplawyers", "cbp lawyers", tweet) tweet = re.sub(r"fewmoretweets", "few more tweets", tweet) tweet = re.sub(r"BlackLivesMatter", "Black Lives Matter", tweet) tweet = re.sub(r"NASAHurricane", "NASA Hurricane", tweet) tweet = re.sub(r"onlinecommunities", "online communities", tweet) tweet = re.sub(r"humanconsumption", "human consumption", tweet) tweet = re.sub(r"Typhoon-Devastated", "Typhoon Devastated", tweet) tweet = re.sub(r"Meat-Loving", "Meat Loving", tweet) tweet = re.sub(r"facialabuse", "facial abuse", tweet) tweet = re.sub(r"LakeCounty", "Lake County", tweet) tweet = re.sub(r"BeingAuthor", "Being Author", tweet) tweet = re.sub(r"withheavenly", "with heavenly", tweet) tweet = re.sub(r"thankU", "thank you", tweet) tweet = re.sub(r"iTunesMusic", "iTunes Music", tweet) tweet = re.sub(r"OffensiveContent", "Offensive Content", tweet) tweet = re.sub(r"WorstSummerJob", "Worst Summer Job", tweet) tweet = re.sub(r"HarryBeCareful", "Harry Be Careful", tweet) tweet = re.sub(r"NASASolarSystem", "NASA Solar System", tweet) tweet = re.sub(r"animalrescue", "animal rescue", tweet) tweet = re.sub(r"KurtSchlichter", "Kurt Schlichter", tweet) tweet = re.sub(r"Throwingknifes", "Throwing knives", tweet) tweet = re.sub(r"GodsLove", "God's Love", tweet) tweet = re.sub(r"bookboost", "book boost", tweet) tweet = re.sub(r"ibooklove", "I book love", tweet) tweet = re.sub(r"NestleIndia", "Nestle India", tweet) tweet = re.sub(r"realDonaldTrump", "Donald Trump", tweet) tweet = re.sub(r"DavidVonderhaar", "David Vonderhaar", tweet) tweet = re.sub(r"CecilTheLion", "Cecil The Lion", tweet) tweet = re.sub(r"weathernetwork", "weather network", tweet) tweet = re.sub(r"GOPDebate", "GOP Debate", tweet) tweet = re.sub(r"RickPerry", "Rick Perry", tweet) tweet = re.sub(r"frontpage", "front page", tweet) tweet = re.sub(r"NewsInTweets", "News In Tweets", tweet) tweet = re.sub(r"ViralSpell", "Viral Spell", tweet) tweet = re.sub(r"til_now", "until now", tweet) tweet = re.sub(r"volcanoinRussia", "volcano in Russia", tweet) tweet = re.sub(r"ZippedNews", "Zipped News", tweet) tweet = re.sub(r"MicheleBachman", "Michele Bachman", tweet) tweet = re.sub(r"53inch", "53 inch", tweet) tweet = re.sub(r"KerrickTrial", "Kerrick Trial", tweet) tweet = re.sub(r"abstorm", "Alberta Storm", tweet) tweet = re.sub(r"Beyhive", "Beyonce hive", tweet) tweet = re.sub(r"RockyFire", "Rocky Fire", tweet) tweet = re.sub(r"Listen/Buy", "Listen / Buy", tweet) tweet = re.sub(r"ArtistsUnited", "Artists United", tweet) tweet = re.sub(r"ENGvAUS", "England vs Australia", tweet) tweet = re.sub(r"ScottWalker", "Scott Walker", tweet) return tweet total['text'] = total['text'].apply(clean )<define_variables>
ntrain = train.shape[0] ntest = test.shape[0] SEED = 0 NFOLDS = 5 kf = KFold(ntrain, n_folds= NFOLDS, random_state=SEED) class SklearnHelper(object): def __init__(self, clf, seed=0, params=None): params['random_state'] = seed self.clf = clf(**params) def train(self, x_train, y_train): self.clf.fit(x_train, y_train) def predict(self, x): return self.clf.predict(x) def fit(self,x,y): return self.clf.fit(x,y) def feature_importances(self,x,y): print(self.clf.fit(x,y ).feature_importances_)
Titanic - Machine Learning from Disaster
6,488,543
tweets = [tweet for tweet in total['text']] train = total[:len(train)] test = total[len(train):]<categorify>
def get_oof(clf, x_train, y_train, x_test): oof_train = np.zeros(( ntrain,)) oof_test = np.zeros(( ntest,)) oof_test_skf = np.empty(( NFOLDS, ntest)) for i,(train_index, test_index)in enumerate(kf): x_tr = x_train[train_index] y_tr = y_train[train_index] x_te = x_train[test_index] clf.train(x_tr, y_tr) oof_train[test_index] = clf.predict(x_te) oof_test_skf[i, :] = clf.predict(x_test) oof_test[:] = oof_test_skf.mean(axis=0) return oof_train.reshape(-1, 1), oof_test.reshape(-1, 1 )
Titanic - Machine Learning from Disaster
6,488,543
def generate_ngrams(text, n_gram=1): token = [token for token in text.lower().split(' ')if token != '' if token not in wordcloud.STOPWORDS] ngrams = zip(*[token[i:] for i in range(n_gram)]) return [' '.join(ngram)for ngram in ngrams] disaster_unigrams = defaultdict(int) for word in total[train['target'] == 1]['text']: for word in generate_ngrams(word, n_gram = 1): disaster_unigrams[word] += 1 disaster_unigrams = pd.DataFrame(sorted(disaster_unigrams.items() , key=lambda x: x[1])[::-1]) nondisaster_unigrams = defaultdict(int) for word in total[train['target'] == 0]['text']: for word in generate_ngrams(word, n_gram = 1): nondisaster_unigrams[word] += 1 nondisaster_unigrams = pd.DataFrame(sorted(nondisaster_unigrams.items() , key=lambda x: x[1])[::-1]) disaster_bigrams = defaultdict(int) for word in total[train['target'] == 1]['text']: for word in generate_ngrams(word, n_gram = 2): disaster_bigrams[word] += 1 disaster_bigrams = pd.DataFrame(sorted(disaster_bigrams.items() , key=lambda x: x[1])[::-1]) nondisaster_bigrams = defaultdict(int) for word in total[train['target'] == 0]['text']: for word in generate_ngrams(word, n_gram = 2): nondisaster_bigrams[word] += 1 nondisaster_bigrams = pd.DataFrame(sorted(nondisaster_bigrams.items() , key=lambda x: x[1])[::-1]) disaster_trigrams = defaultdict(int) for word in total[train['target'] == 1]['text']: for word in generate_ngrams(word, n_gram = 3): disaster_trigrams[word] += 1 disaster_trigrams = pd.DataFrame(sorted(disaster_trigrams.items() , key=lambda x: x[1])[::-1]) nondisaster_trigrams = defaultdict(int) for word in total[train['target'] == 0]['text']: for word in generate_ngrams(word, n_gram = 3): nondisaster_trigrams[word] += 1 nondisaster_trigrams = pd.DataFrame(sorted(nondisaster_trigrams.items() , key=lambda x: x[1])[::-1]) disaster_4grams = defaultdict(int) for word in total[train['target'] == 1]['text']: for word in generate_ngrams(word, n_gram = 4): disaster_4grams[word] += 1 disaster_4grams = pd.DataFrame(sorted(disaster_4grams.items() , key=lambda x: x[1])[::-1]) nondisaster_4grams = defaultdict(int) for word in total[train['target'] == 0]['text']: for word in generate_ngrams(word, n_gram = 4): nondisaster_4grams[word] += 1 nondisaster_4grams = pd.DataFrame(sorted(nondisaster_4grams.items() , key=lambda x: x[1])[::-1] )<string_transform>
rf_params = { 'n_jobs': -1, 'n_estimators': 500, 'warm_start': True, 'max_depth': 6, 'min_samples_leaf': 2, 'max_features' : 'sqrt', 'verbose': 0 } et_params = { 'n_jobs': -1, 'n_estimators':500, 'max_depth': 8, 'min_samples_leaf': 2, 'verbose': 0 } ada_params = { 'n_estimators': 500, 'learning_rate' : 0.75 } gb_params = { 'n_estimators': 500, 'max_depth': 5, 'min_samples_leaf': 2, 'verbose': 0 } svc_params = { 'kernel' : 'linear', 'C' : 0.025 }
Titanic - Machine Learning from Disaster
6,488,543
to_exclude = '*+-/() % [\\]{|}^_`~\t' to_tokenize = '!" tokenizer = Tokenizer(filters = to_exclude) text = 'Why are you so f% text = re.sub(r'(['+to_tokenize+'])', r' \1 ', text) tokenizer.fit_on_texts([text]) print(tokenizer.word_index )<feature_engineering>
rf = SklearnHelper(clf=RandomForestClassifier, seed=SEED, params=rf_params) et = SklearnHelper(clf=ExtraTreesClassifier, seed=SEED, params=et_params) ada = SklearnHelper(clf=AdaBoostClassifier, seed=SEED, params=ada_params) gb = SklearnHelper(clf=GradientBoostingClassifier, seed=SEED, params=gb_params) svc = SklearnHelper(clf=SVC, seed=SEED, params=svc_params )
Titanic - Machine Learning from Disaster
6,488,543
<string_transform>
y_train = train['Survived'].ravel() train = train.drop(['Survived'], axis=1) x_train = train.values x_test = test.values
Titanic - Machine Learning from Disaster
6,488,543
tokenizer = Tokenizer() tokenizer.fit_on_texts(tweets) sequences = tokenizer.texts_to_sequences(tweets) word_index = tokenizer.word_index print('Found %s unique tokens.' % len(word_index)) data = pad_sequences(sequences) labels = train['target'] print('Shape of data tensor:', data.shape) print('Shape of label tensor:', labels.shape) nlp_train = data[:len(train)] labels = labels nlp_test = data[len(train):] MAX_SEQUENCE_LENGTH = data.shape[1]<feature_engineering>
et_oof_train, et_oof_test = get_oof(et, x_train, y_train, x_test) rf_oof_train, rf_oof_test = get_oof(rf,x_train, y_train, x_test) ada_oof_train, ada_oof_test = get_oof(ada, x_train, y_train, x_test) gb_oof_train, gb_oof_test = get_oof(gb,x_train, y_train, x_test) svc_oof_train, svc_oof_test = get_oof(svc,x_train, y_train, x_test) print("Training is complete" )
Titanic - Machine Learning from Disaster
6,488,543
embeddings_index = {} with open('.. /input/glove-global-vectors-for-word-representation/glove.6B.200d.txt','r')as f: for line in tqdm(f): values = line.split() word = values[0] coefs = np.asarray(values[1:], dtype='float32') embeddings_index[word] = coefs f.close() print('Found %s word vectors in the GloVe library' % len(embeddings_index))<define_variables>
rf_feature = rf.feature_importances(x_train,y_train) et_feature = et.feature_importances(x_train, y_train) ada_feature = ada.feature_importances(x_train, y_train) gb_feature = gb.feature_importances(x_train,y_train )
Titanic - Machine Learning from Disaster
6,488,543
EMBEDDING_DIM = 200<categorify>
rf_features = [0.10474135, 0.21837029, 0.04432652, 0.02249159, 0.05432591, 0.02854371 ,0.07570305, 0.01088129 , 0.24247496, 0.13685733 , 0.06128402] et_features = [ 0.12165657, 0.37098307 ,0.03129623 , 0.01591611 , 0.05525811 , 0.028157 ,0.04589793 , 0.02030357 , 0.17289562 , 0.04853517, 0.08910063] ada_features = [0.028 , 0.008 , 0.012 , 0.05866667, 0.032 , 0.008 ,0.04666667 , 0., 0.05733333, 0.73866667, 0.01066667] gb_features = [ 0.06796144 , 0.03889349 , 0.07237845 , 0.02628645 , 0.11194395, 0.04778854 ,0.05965792 , 0.02774745, 0.07462718, 0.4593142 , 0.01340093]
Titanic - Machine Learning from Disaster
6,488,543
embedding_matrix = np.zeros(( len(word_index)+ 1, EMBEDDING_DIM)) for word, i in tqdm(word_index.items()): embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector print("Our embedded matrix is of dimension", embedding_matrix.shape )<choose_model_class>
cols = train.columns.values feature_dataframe = pd.DataFrame({'features': cols, 'Random Forest feature importances': rf_features, 'Extra Trees feature importances': et_features, 'AdaBoost feature importances': ada_features, 'Gradient Boost feature importances': gb_features } )
Titanic - Machine Learning from Disaster
6,488,543
embedding = Embedding(len(word_index)+ 1, EMBEDDING_DIM, weights = [embedding_matrix], input_length = MAX_SEQUENCE_LENGTH, trainable = False) <normalization>
feature_dataframe['mean'] = feature_dataframe.mean(axis= 1) feature_dataframe.head(3 )
Titanic - Machine Learning from Disaster
6,488,543
def scale(df, scaler): return scaler.fit_transform(df.iloc[:, 2:]) meta_train = scale(train, StandardScaler()) meta_test = scale(test, StandardScaler() )<choose_model_class>
base_predictions_train = pd.DataFrame({'RandomForest': rf_oof_train.ravel() , 'ExtraTrees': et_oof_train.ravel() , 'AdaBoost': ada_oof_train.ravel() , 'GradientBoost': gb_oof_train.ravel() }) base_predictions_train.head()
Titanic - Machine Learning from Disaster
6,488,543
def create_lstm(spatial_dropout, dropout, recurrent_dropout, learning_rate, bidirectional = False): activation = LeakyReLU(alpha = 0.01) nlp_input = Input(shape =(MAX_SEQUENCE_LENGTH,), name = 'nlp_input') meta_input_train = Input(shape =(7,), name = 'meta_train') emb = embedding(nlp_input) emb = SpatialDropout1D(dropout )(emb) if bidirectional: nlp_out =(Bidirectional(LSTM(100, dropout = dropout, recurrent_dropout = recurrent_dropout, kernel_initializer = 'orthogonal')) )(emb) else: nlp_out =(LSTM(100, dropout = dropout, recurrent_dropout = recurrent_dropout, kernel_initializer = 'orthogonal'))(emb) x = Concatenate()([nlp_out, meta_input_train]) x = Dropout(dropout )(x) preds = Dense(1, activation='sigmoid', kernel_regularizer = regularizers.l2(1e-4))(x) model = Model(inputs=[nlp_input , meta_input_train], outputs = preds) optimizer = Adam(learning_rate = learning_rate) model.compile(loss = 'binary_crossentropy', optimizer = optimizer, metrics = ['accuracy']) return model<choose_model_class>
x_train = np.concatenate(( et_oof_train, rf_oof_train, ada_oof_train, gb_oof_train, svc_oof_train), axis=1) x_test = np.concatenate(( et_oof_test, rf_oof_test, ada_oof_test, gb_oof_test, svc_oof_test), axis=1 )
Titanic - Machine Learning from Disaster
6,488,543
lstm = create_lstm(spatial_dropout =.2, dropout =.2, recurrent_dropout =.2, learning_rate = 3e-4, bidirectional = True) lstm.summary()<train_model>
gbm = xgb.XGBClassifier( n_estimators= 2000, max_depth= 4, min_child_weight= 2, gamma=0.9, subsample=0.8, colsample_bytree=0.8, objective= 'binary:logistic', nthread= -1, scale_pos_weight=1 ).fit(x_train, y_train) predictions = gbm.predict(x_test )
Titanic - Machine Learning from Disaster
6,488,543
<choose_model_class><EOS>
StackingSubmission = pd.DataFrame({ 'PassengerId': PassengerId, 'Survived': predictions }) StackingSubmission.to_csv("gender_submission.csv", index=False )
Titanic - Machine Learning from Disaster
7,258,897
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<choose_model_class>
import math, time, random, datetime
Titanic - Machine Learning from Disaster
7,258,897
def create_lstm_2(spatial_dropout, dropout, recurrent_dropout, learning_rate, bidirectional = False): activation = LeakyReLU(alpha = 0.01) nlp_input = Input(shape =(MAX_SEQUENCE_LENGTH,), name = 'nlp_input') meta_input_train = Input(shape =(7,), name = 'meta_train') emb = embedding(nlp_input) emb = SpatialDropout1D(dropout )(emb) if bidirectional: nlp_out =(Bidirectional(LSTM(100, dropout = dropout, recurrent_dropout = recurrent_dropout, kernel_initializer = 'orthogonal')) )(emb) else: nlp_out =(LSTM(100, dropout = dropout, recurrent_dropout = recurrent_dropout, kernel_initializer = 'orthogonal'))(emb) x = Concatenate()([nlp_out, meta_input_train]) x = Dropout(dropout )(x) x =(Dense(100, activation = activation, kernel_regularizer = regularizers.l2(1e-4), kernel_initializer = 'he_normal'))(x) x = Dropout(dropout )(x) preds = Dense(1, activation='sigmoid', kernel_regularizer = regularizers.l2(1e-4))(x) model = Model(inputs=[nlp_input , meta_input_train], outputs = preds) optimizer = Adam(learning_rate = learning_rate) model.compile(loss = 'binary_crossentropy', optimizer = optimizer, metrics = ['accuracy']) return model<choose_model_class>
%matplotlib inline plt.style.use('seaborn-whitegrid') warnings.filterwarnings('ignore' )
Titanic - Machine Learning from Disaster
7,258,897
lstm_2 = create_lstm_2(spatial_dropout =.4, dropout =.4, recurrent_dropout =.4, learning_rate = 3e-4, bidirectional = True) lstm_2.summary()<train_model>
train = pd.read_csv('.. /input/titanic/train.csv') test = pd.read_csv('.. /input/titanic/test.csv' )
Titanic - Machine Learning from Disaster
7,258,897
history2 = lstm_2.fit([nlp_train, meta_train], labels, validation_split =.2, epochs = 30, batch_size = 21, verbose = 1 )<predict_on_test>
ntrain = train.shape[0] ntest = test.shape[0] y_train = train['Survived'].values passId = test['PassengerId'] data = pd.concat(( train, test)) print("data size is: {}".format(data.shape))
Titanic - Machine Learning from Disaster
7,258,897
submission_lstm = pd.DataFrame() submission_lstm['id'] = test_id submission_lstm['prob'] = lstm_2.predict([nlp_test, meta_test]) submission_lstm['target'] = submission_lstm['prob'].apply(lambda x: 0 if x <.5 else 1) submission_lstm.head(10 )<choose_model_class>
data.isnull().sum()
Titanic - Machine Learning from Disaster
7,258,897
def create_dual_lstm(spatial_dropout, dropout, recurrent_dropout, learning_rate, bidirectional = False): activation = LeakyReLU(alpha = 0.01) nlp_input = Input(shape =(MAX_SEQUENCE_LENGTH,), name = 'nlp_input') meta_input_train = Input(shape =(7,), name = 'meta_train') emb = embedding(nlp_input) emb = SpatialDropout1D(dropout )(emb) if bidirectional: nlp_out =(Bidirectional(LSTM(100, dropout = dropout, recurrent_dropout = recurrent_dropout, kernel_initializer = 'orthogonal', return_sequences = True)) )(emb) nlp_out = SpatialDropout1D(dropout )(nlp_out) nlp_out =(Bidirectional(LSTM(100, dropout = dropout, recurrent_dropout = recurrent_dropout, kernel_initializer = 'orthogonal')) )(emb) else: nlp_out =(LSTM(100, dropout = dropout, recurrent_dropout = recurrent_dropout, kernel_initializer = 'orthogonal', return_sequences = True))(emb) nlp_out = SpatialDropout1D(dropout )(nlp_out) nlp_out =(LSTM(100, dropout = dropout, recurrent_dropout = recurrent_dropout, kernel_initializer = 'orthogonal'))(emb) x = Concatenate()([nlp_out, meta_input_train]) x = Dropout(dropout )(x) preds = Dense(1, activation='sigmoid', kernel_regularizer = regularizers.l2(1e-4))(x) model = Model(inputs=[nlp_input , meta_input_train], outputs = preds) optimizer = Adam(learning_rate = learning_rate) model.compile(loss = 'binary_crossentropy', optimizer = optimizer, metrics = ['accuracy']) return model<train_model>
data.Age.isnull().any()
Titanic - Machine Learning from Disaster
7,258,897
history3 = dual_lstm.fit([nlp_train, meta_train], labels, validation_split =.2, epochs = 25, batch_size = 21, verbose = 1 )<predict_on_test>
train.groupby(['Pclass','Survived'])['Survived'].count()
Titanic - Machine Learning from Disaster
7,258,897
submission_lstm2 = pd.DataFrame() submission_lstm2['id'] = test_id submission_lstm2['prob'] = dual_lstm.predict([nlp_test, meta_test]) submission_lstm2['target'] = submission_lstm2['prob'].apply(lambda x: 0 if x <.5 else 1) submission_lstm2.head(10 )<define_variables>
train.groupby('Pclass' ).Survived.mean()
Titanic - Machine Learning from Disaster
7,258,897
BATCH_SIZE = 32 EPOCHS = 2 USE_META = True ADD_DENSE = False DENSE_DIM = 64 ADD_DROPOUT = False DROPOUT =.2<install_modules>
data.Name.value_counts()
Titanic - Machine Learning from Disaster
7,258,897
!pip install --quiet transformers <categorify>
temp = data.copy() temp['Initial']=0 for i in train: temp['Initial']=data.Name.str.extract('([A-Za-z]+)\.' )
Titanic - Machine Learning from Disaster
7,258,897
TOKENIZER = AutoTokenizer.from_pretrained("bert-large-uncased") enc = TOKENIZER.encode("Encode me!") dec = TOKENIZER.decode(enc) print("Encode: " + str(enc)) print("Decode: " + str(dec))<categorify>
def survpct(a): return temp.groupby(a ).Survived.mean() survpct('Initial' )
Titanic - Machine Learning from Disaster
7,258,897
def bert_encode(data,maximum_len): input_ids = [] attention_masks = [] for i in range(len(data.text)) : encoded = TOKENIZER.encode_plus(data.text[i], add_special_tokens=True, max_length=maximum_len, pad_to_max_length=True, return_attention_mask=True) input_ids.append(encoded['input_ids']) attention_masks.append(encoded['attention_mask']) return np.array(input_ids),np.array(attention_masks )<choose_model_class>
temp.groupby('Initial')['Age'].mean()
Titanic - Machine Learning from Disaster
7,258,897
def build_model(model_layer, learning_rate, use_meta = USE_META, add_dense = ADD_DENSE, dense_dim = DENSE_DIM, add_dropout = ADD_DROPOUT, dropout = DROPOUT): input_ids = tf.keras.Input(shape=(60,),dtype='int32') attention_masks = tf.keras.Input(shape=(60,),dtype='int32') meta_input = tf.keras.Input(shape =(meta_train.shape[1],)) transformer_layer = model_layer([input_ids,attention_masks]) output = transformer_layer[1] if use_meta: output = tf.keras.layers.Concatenate()([output, meta_input]) if add_dense: print("Training with additional dense layer...") output = tf.keras.layers.Dense(dense_dim,activation='relu' )(output) if add_dropout: print("Training with dropout...") output = tf.keras.layers.Dropout(dropout )(output) output = tf.keras.layers.Dense(1,activation='sigmoid' )(output) if use_meta: print("Training with meta-data...") model = tf.keras.models.Model(inputs = [input_ids,attention_masks, meta_input],outputs = output) else: print("Training without meta-data...") model = tf.keras.models.Model(inputs = [input_ids,attention_masks],outputs = output) model.compile(tf.keras.optimizers.Adam(lr=learning_rate), loss='binary_crossentropy', metrics=['accuracy']) return model<load_from_csv>
temp['Newage']=temp['Age'] def newage(k,n): temp.loc[(temp.Age.isnull())&(temp.Initial==k),'Newage']= n newage('Capt',int(70.000000)) newage('Col',int(54.000000)) newage('Countess',int(33.000000)) newage('Don',int(40.000000)) newage('Dona',int(39.000000)) newage('Dr',int(43.571429)) newage('Jonkheer',int(38.000000)) newage('Lady',int(48.000000)) newage('Major',int(48.500000)) newage('Master',int(5.482642)) newage('Miss',int(21.774238)) newage('Mlle',int(24.000000)) newage('Mme',int(24.000000)) newage('Mr',int(32.252151)) newage('Mrs',int(36.994118)) newage('Ms',int(28.000000)) newage('Rev',int(41.250000)) newage('Sir',int(49.000000))
Titanic - Machine Learning from Disaster
7,258,897
train = pd.read_csv('.. /input/nlp-getting-started/train.csv') test = pd.read_csv('.. /input/nlp-getting-started/test.csv' )<categorify>
groupmean('Age_Range', 'Survived' )
Titanic - Machine Learning from Disaster
7,258,897
bert_large = TFAutoModel.from_pretrained('bert-large-uncased') TOKENIZER = AutoTokenizer.from_pretrained("bert-large-uncased") train_input_ids,train_attention_masks = bert_encode(train,60) test_input_ids,test_attention_masks = bert_encode(test,60) print('Train length:', len(train_input_ids)) print('Test length:', len(test_input_ids)) BERT_large = build_model(bert_large, learning_rate = 1e-5) BERT_large.summary()<train_model>
temp['Gender']= temp['Sex'] for n in range(1,4): temp.loc[(temp['Sex'] == 'male')&(temp['Pclass'] == n),'Gender']= 'm'+str(n) temp.loc[(temp['Sex'] == 'female')&(temp['Pclass'] == n),'Gender']= 'w'+str(n) temp.loc[(temp['Gender'] == 'm3'),'Gender']= 'm2' temp.loc[(temp['Gender'] == 'w3'),'Gender']= 'w2' temp.loc[(temp['Age'] <= 1.0),'Gender']= 'baby' temp.loc[(temp['Age'] > 75.0),'Gender']= 'old'
Titanic - Machine Learning from Disaster
7,258,897
history_bert = BERT_large.fit([train_input_ids,train_attention_masks, meta_train], train.target, validation_split =.2, epochs = EPOCHS, callbacks = [checkpoint], batch_size = BATCH_SIZE )<predict_on_test>
groupmean('Gender', 'Survived' )
Titanic - Machine Learning from Disaster
7,258,897
BERT_large.load_weights('large_model.h5') preds_bert = BERT_large.predict([test_input_ids,test_attention_masks,meta_test] )<prepare_output>
temp['Agroup']=0 temp.loc[temp['Newage']<1.0,'Agroup']= 1 temp.loc[(temp['Newage']>=1.0)&(temp['Newage']<=3.0),'Agroup']= 2 temp.loc[(temp['Newage']>3.0)&(temp['Newage']<11.0),'Agroup']= 7 temp.loc[(temp['Newage']>=11.0)&(temp['Newage']<15.0),'Agroup']= 13 temp.loc[(temp['Newage']>=15.0)&(temp['Newage']<18.0),'Agroup']= 16 temp.loc[(temp['Newage']>=18.0)&(temp['Newage']<= 20.0),'Agroup']= 18 temp.loc[(temp['Newage']> 20.0)&(temp['Newage']<=22.0),'Agroup']= 21 temp.loc[(temp['Newage']>22.0)&(temp['Newage']<=26.0),'Agroup']= 24 temp.loc[(temp['Newage']>26.0)&(temp['Newage']<=30.0),'Agroup']= 28 temp.loc[(temp['Newage']>30.0)&(temp['Newage']<=32.0),'Agroup']= 31 temp.loc[(temp['Newage']>32.0)&(temp['Newage']<=34.0),'Agroup']= 33 temp.loc[(temp['Newage']>34.0)&(temp['Newage']<=38.0),'Agroup']= 36 temp.loc[(temp['Newage']>38.0)&(temp['Newage']<=52.0),'Agroup']= 45 temp.loc[(temp['Newage']>52.0)&(temp['Newage']<=75.0),'Agroup']= 60 temp.loc[temp['Newage']>75.0,'Agroup']= 78
Titanic - Machine Learning from Disaster
7,258,897
submission_bert = pd.DataFrame() submission_bert['id'] = test_id submission_bert['prob'] = preds_bert submission_bert['target'] = np.round(submission_bert['prob'] ).astype(int) submission_bert.head(10 )<save_to_csv>
groupmean('Agroup', 'Survived' )
Titanic - Machine Learning from Disaster
7,258,897
submission_bert = submission_bert[['id', 'target']] submission_bert.to_csv('submission_bert.csv', index = False) print('Blended submission has been saved to disk' )<install_modules>
groupmean('Agroup', 'Age' )
Titanic - Machine Learning from Disaster
7,258,897
!pip install bert-for-tf2<import_modules>
temp['Alone']=0 temp.loc[(temp['SibSp']==0)&(temp['Parch']==0),'Alone']= 1
Titanic - Machine Learning from Disaster
7,258,897
import numpy as np import pandas as pd import re import tensorflow as tf from tensorflow_core.python.keras.layers import Dense, Input from tensorflow.keras.optimizers import Adam from tensorflow_core.python.keras.models import Model from tensorflow_core.python.keras.callbacks import ModelCheckpoint import tensorflow_hub as hub from bert.tokenization import bert_tokenization import math import seaborn as sns from sklearn.metrics import confusion_matrix import matplotlib.pyplot as plt from sklearn.model_selection import StratifiedKFold <string_transform>
temp['Family']=0 for i in temp: temp['Family'] = temp['Parch'] + temp['SibSp'] +1
Titanic - Machine Learning from Disaster
7,258,897
def clean_text(text): new_text = [] for each in text.split() : if each.isalpha() : new_text.append(each) cleaned_text = ' '.join(new_text) cleaned_text = re.sub(r'https?:\/\/t.co\/[A-Za-z0-9]+','',cleaned_text) return cleaned_text<categorify>
bag('Parch','Survived','Survived per Parch','Parch Survived vs Not Survived' )
Titanic - Machine Learning from Disaster
7,258,897
def bert_encode(texts, tokenizer, max_len =512): all_tokens = [] all_masks = [] all_segments = [] for text in texts: text = tokenizer.tokenize(text) text = text[:max_len-2] input_sequence = ['[CLS]'] + text +['[SEP]'] pad_len = max_len - len(input_sequence) tokens = tokenizer.convert_tokens_to_ids(input_sequence) tokens += [0]*pad_len pad_masks = [1]*len(input_sequence)+ [0]*pad_len segment_ids = [0]*max_len all_tokens.append(tokens) all_masks.append(pad_masks) all_segments.append(segment_ids) return np.array(all_tokens), np.array(all_masks), np.array(all_segments) def build_bert(bert_layer, max_len =512): adam = Adam(lr=6e-6) input_word_ids = Input(shape =(max_len,),dtype ='int32') input_mask = Input(shape =(max_len,),dtype ='int32') segment_ids = Input(shape =(max_len,),dtype ='int32') pooled_output, sequence_output = bert_layer([input_word_ids, input_mask, segment_ids]) clf_output = sequence_output[:,0,:] out = Dense(1, activation ='sigmoid' )(clf_output) model = Model(inputs =[input_word_ids, input_mask, segment_ids], outputs =out) model.compile(optimizer=adam ,loss = 'binary_crossentropy', metrics =['accuracy']) return model bert_layer = hub.KerasLayer('https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/2', trainable=True) train_data = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test_data = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy() do_lower_case = bert_layer.resolved_object.do_lower_case.numpy() tokenizer = bert_tokenization.FullTokenizer(vocab_file, do_lower_case) train_data['text'] = train_data['text'].apply(lambda x: clean_text(x)) test_data['text'] = test_data['text'].apply(lambda x: clean_text(x)) all_models = [] all_loss = [] skf = StratifiedKFold(n_splits = 5, random_state =42, shuffle=True) for fold,(train_idx,val_idx)in enumerate(skf.split(train_data['text'],train_data['target'])) : print('Fold:'+str(fold)) train_input = bert_encode(train_data.loc[train_idx,'text'], tokenizer, max_len=100) train_labels = train_data.loc[train_idx,'target'] valid_input = bert_encode(train_data.loc[val_idx,'text'], tokenizer, max_len=100) valid_labels = train_data.loc[val_idx,'target'] model = build_bert(bert_layer, max_len=100) model.fit(train_input, train_labels,epochs =3, batch_size = 16) valid_loss, valid_acc = model.evaluate(valid_input,valid_labels, batch_size =16) all_models.append(model) all_loss.append(valid_loss) <load_from_csv>
temp.Ticket.isnull().any()
Titanic - Machine Learning from Disaster
7,258,897
test_text = list(test_data['text']) test_input = bert_encode(test_text, tokenizer, max_len=100) min_loss_index = all_loss.index(min(all_loss)) results = all_models[min_loss_index].predict(test_input) submission_data = pd.read_csv('/kaggle/input/nlp-getting-started/sample_submission.csv') submission_data['target'] = results.round().astype('int') submission_data.to_csv('/kaggle/working/submission.csv',index=False, header=True) <load_from_csv>
temp['Initick'] = 0 for s in temp: temp['Initick']=temp.Ticket.str.extract('^([A-Za-z]+)') for s in temp: temp.loc[(temp.Initick.isnull()),'Initick']='X' temp.head()
Titanic - Machine Learning from Disaster
7,258,897
train = pd.read_csv('.. /input/nlp-getting-started/train.csv') test = pd.read_csv('.. /input/nlp-getting-started/test.csv') sample = pd.read_csv('.. /input/nlp-getting-started/sample_submission.csv' )<count_duplicates>
train['Tgroup'] = 0 temp['Tgroup'] = 0 temp.loc[(temp['Initick']=='X')&(temp['Pclass']==1),'Tgroup']= 1 temp.loc[(temp['Initick']=='X')&(temp['Pclass']==2),'Tgroup']= 2 temp.loc[(temp['Initick']=='X')&(temp['Pclass']==3),'Tgroup']= 3 temp.loc[(temp['Initick']=='Fa'),'Tgroup']= 3 temp.loc[(temp['Initick']=='SCO'),'Tgroup']= 4 temp.loc[(temp['Initick']=='A'),'Tgroup']= 5 temp.loc[(temp['Initick']=='CA'),'Tgroup']= 6 temp.loc[(temp['Initick']=='W'),'Tgroup']= 7 temp.loc[(temp['Initick']=='S'),'Tgroup']= 8 temp.loc[(temp['Initick']=='SOTON'),'Tgroup']= 9 temp.loc[(temp['Initick']=='LINE'),'Tgroup']= 10 temp.loc[(temp['Initick']=='STON'),'Tgroup']= 11 temp.loc[(temp['Initick']=='C'),'Tgroup']= 12 temp.loc[(temp['Initick']=='P'),'Tgroup']= 13 temp.loc[(temp['Initick']=='WE'),'Tgroup']= 14 temp.loc[(temp['Initick']=='SC'),'Tgroup']= 15 temp.loc[(temp['Initick']=='F'),'Tgroup']= 16 temp.loc[(temp['Initick']=='PP'),'Tgroup']= 17 temp.loc[(temp['Initick']=='PC'),'Tgroup']= 17 temp.loc[(temp['Initick']=='SO'),'Tgroup']= 18 temp.loc[(temp['Initick']=='SW'),'Tgroup']= 19
Titanic - Machine Learning from Disaster
7,258,897
sns.countplot(train.text.duplicated() )<count_duplicates>
groupmean('Tgroup', 'Survived' )
Titanic - Machine Learning from Disaster
7,258,897
duplicate_index = train[train.text.duplicated() ].index train.drop(index = duplicate_index, inplace = True) train.reset_index(drop = True, inplace = True )<define_variables>
temp['Fgroup']=0 temp.loc[temp['Fare']<= 7.125,'Fgroup']=5.0 temp.loc[(temp['Fare']>7.125)&(temp['Fare']<=7.9),'Fgroup']= 7.5 temp.loc[(temp['Fare']>7.9)&(temp['Fare']<=8.03),'Fgroup']= 8.0 temp.loc[(temp['Fare']>8.03)&(temp['Fare']<10.5),'Fgroup']= 9.5 temp.loc[(temp['Fare']>=10.5)&(temp['Fare']<23.0),'Fgroup']= 16.0 temp.loc[(temp['Fare']>=23.0)&(temp['Fare']<=27.8),'Fgroup']= 25.5 temp.loc[(temp['Fare']>27.8)&(temp['Fare']<=51.0),'Fgroup']= 38.0 temp.loc[(temp['Fare']>51.0)&(temp['Fare']<=73.5),'Fgroup']= 62.0 temp.loc[temp['Fare']>73.5,'Fgroup']= 100.0
Titanic - Machine Learning from Disaster
7,258,897
shortforms = {"ain't": "am not", "aren't": "are not", "can't": "cannot", "can't've": "cannot have", "'cause": "because", "could've": "could have", "couldn't": "could not", "couldn't've": "could not have", "didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hadn't've": "had not have", "hasn't": "has not", "haven't": "have not", "he'd": "He had", "he'd've": "He would have", "he'll": "He will", "he'll've": "He will have", "he's": "He is", "how'd": "How did", "how'd'y": "How do you", "how'll": "How will", "how's": "How is", "i'd": "I had", "i'd've": "I would have", "i'll": "I will", "i'll've": "I will have", "i'm": "I am", "i've": "I have", "isn't": "is not", "it'd": "It had", "it'd've": "It would have", "it'll": "It will", "it'll've": "It will have", "it's": "It is", ".it's": "It is", "let's": "Let us", "ma'am": "Madam", "mayn't": "may not", "might've": "might have", "mightn't": "might not", "mightn't've": "might not have", "must've": "must have", "mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have", "o'clock": "of the clock", "oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have", "she'd": "She had", "she'd've": "She would have", "she'll": "She will", "she'll've": "She will have", "she's": "She is", "should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have", "so's": "so is", "that'd": "that had", "that'd've": "that would have", "that's": "that is", "there'd": "There had", "there'd've": "There would have", "there's": "There has", "they'd": "They had", "they'd've": "They would have", "they'll": "They will", "they'll've": "They will have", "they're": "They are", "they've": "They have", "to've": "to have", "wasn't": "was not", "we'd": "We had", "we'd've": "We would have", "we'll": "We will", "we'll've": "We will have", "we're": "We are", "we've": "We have", "weren't": "were not", "what'll": "What will", "what'll've": "What will have", "what're": "What are", "what's": "What is", "what've": "What have", "when's": "When is", "when've": "When have", "where'd": "Where did", "where's": "Where is", "where've": "Where have", "who'll": "Who will", "who'll've": "Who will have", "who's": "Who is", "who've": "Who have", "why's": "Why is", "why've": "Why have", "will've": "ill have", "won't": "will not", "won't've": "will not have", "would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "You all", "y'all'd": "You all would", "y'all'd've": "You all would have", "y'all're": "You all are", "y'all've": "You all have", "you'd": "You had", "you'd've": "You would have", "you'll": "You will", "you'll've": "You will have", "you're": "You are", "you've": "You have" }<string_transform>
temp.Cabin.value_counts()
Titanic - Machine Learning from Disaster
7,258,897
def cleaner(text): text = str(text ).lower() text = re.sub(r'<*?>',' ',text) text = re.sub(r'https?://\S+|www\.\S+',' ',text) text = ' '.join([shortforms[word] if word in shortforms.keys() else word for word in text.split() ]) text = str(text ).lower() text = re.sub(r'^\s','',text) text = re.sub(r'\s+',' ',text) return(text )<feature_engineering>
temp.Cabin.isnull().sum()
Titanic - Machine Learning from Disaster
7,258,897
%%time train['cleaner_text'] = train.text.progress_apply(lambda x: cleaner(x)) test['cleaner_text'] = test.text.progress_apply(lambda x: cleaner(x))<load_pretrained>
temp['Inicab'] = 0 for i in temp: temp['Inicab']=temp.Cabin.str.extract('^([A-Za-z]+)') temp.loc[(( temp.Cabin.isnull())&(temp.Pclass.values == 1)) ,'Inicab']='X' temp.loc[(( temp.Cabin.isnull())&(temp.Pclass.values == 2)) ,'Inicab']='Y' temp.loc[(( temp.Cabin.isnull())&(temp.Pclass.values == 3)) ,'Inicab']='Z'
Titanic - Machine Learning from Disaster
7,258,897
case = 'roberta-base' tokenizer = RobertaTokenizer.from_pretrained(case) config = AutoConfig.from_pretrained(case, output_attentions = True, output_hidden_states = True) model = TFAutoModel.from_pretrained(case, config = config) bert = TFRobertaMainLayer(config )<categorify>
temp.Inicab.value_counts()
Titanic - Machine Learning from Disaster
7,258,897
%%time def convert2token(all_text): token_id, attention_id = [], [] for i, sent in tqdm.tqdm(enumerate(all_text)) : token_dict = tokenizer.encode_plus(sent, max_length=60, pad_to_max_length=True, return_attention_mask=True, return_tensors='tf', add_special_tokens= True) token_id.append(token_dict['input_ids']) attention_id.append(token_dict['attention_mask']) token_id = np.array(token_id, dtype='int32') attention_id = np.array(attention_id, dtype='int32') return(token_id, attention_id) train_token_id, train_attention_id = convert2token(train.cleaner_text.values) test_token_id, test_attention_id = convert2token(test.cleaner_text.values )<choose_model_class>
temp['Inicab'].replace(['A','B', 'C', 'D', 'E', 'F', 'G','T', 'X', 'Y', 'Z'],[1,2,3,4,5,6,7,8,9,10,11],inplace=True )
Titanic - Machine Learning from Disaster
7,258,897
def building_model(need_emb): inp_1 = tf.keras.layers.Input(shape =(60,), name = 'token_id', dtype = 'int32') inp_2 = tf.keras.layers.Input(shape =(60,), name = 'mask_id', dtype = 'int32') x1 = tf.keras.layers.Reshape(( 60,))(inp_1) x2 = tf.keras.layers.Reshape(( 60,))(inp_2) if need_emb: emb = model(x1, attention_mask = x2)[0] x = tf.keras.layers.Dense(256, activation = 'relu' )(emb[:,0,:]) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.Dropout(0.3 )(x) x = tf.keras.layers.Dense(32, activation = 'relu' )(x) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.Dropout(0.3 )(x) else : emb = bert(x1, attention_mask = x2)[0] x = tf.keras.layers.Dropout(0.2 )(emb[:,0,:]) out = tf.keras.layers.Dense(1, activation = 'sigmoid' )(x) Emb_Model = tf.keras.models.Model(inputs = [inp_1, inp_2], outputs = out) callback = ModelCheckpoint(filepath = 'best.hdf5', monitor = 'val_loss', save_best_only = True, verbose = 1) if need_emb : for layer in Emb_Model.layers[:5]: layer.trainable = False return(Emb_Model, callback )<train_model>
temp.loc[(temp.Embarked.isnull())]
Titanic - Machine Learning from Disaster
7,258,897
Emb_Model.compile(metrics=['accuracy'], optimizer=tf.keras.optimizers.Adam(learning_rate = 4e-5), loss='binary_crossentropy') Emb_Model.fit([np.reshape(train_token_id,(7503,60)) , np.reshape(train_attention_id,(7503,60)) ], train.target, epochs=10, batch_size=64, validation_split=0.20, shuffle = True )<predict_on_test>
temp.loc[(temp.Ticket == '113572')]
Titanic - Machine Learning from Disaster
7,258,897
%%time Emb_Model_Answer = Emb_Model.predict([np.reshape(test_token_id,(3263,60)) , np.reshape(test_attention_id,(3263,60)) ] )<train_model>
temp.sort_values(['Ticket'], ascending = True)[35:45]
Titanic - Machine Learning from Disaster
7,258,897
Tune_Bert.compile(metrics=['accuracy'], optimizer=tf.keras.optimizers.Adam(learning_rate=1e-5), loss='binary_crossentropy') Tune_Bert.fit([np.reshape(train_token_id,(7503,60)) , np.reshape(train_attention_id,(7503,60)) ], train.target, epochs=10, batch_size=64, validation_split=0.20, shuffle = True, callbacks = [callback] )<predict_on_test>
temp.loc[(train.Embarked.isnull()),'Embarked']='S'
Titanic - Machine Learning from Disaster
7,258,897
Tune_Bert.load_weights('best.hdf5') Tune_answer = Tune_Bert.predict([np.reshape(test_token_id,(3263,60)) , np.reshape(test_attention_id,(3263,60)) ] )<create_dataframe>
temp.sort_values(['Ticket'], ascending = True)[35:45]
Titanic - Machine Learning from Disaster
7,258,897
answer_Emb = pd.DataFrame({'id': sample.id, 'target': np.where(Emb_Model_Answer>0.5,1,0 ).reshape(Emb_Model_Answer.shape[0])}) answer_tune = pd.DataFrame({'id': sample.id, 'target': np.where(Tune_answer>0.5,1,0 ).reshape(Tune_answer.shape[0])} )<save_to_csv>
temp.groupby('Initial' ).Survived.mean()
Titanic - Machine Learning from Disaster
7,258,897
answer_Emb.to_csv('submission_emb.csv', index = False) answer_tune.to_csv('submission_tune.csv', index = False )<load_from_url>
temp['Initial'].replace(['Capt', 'Col', 'Countess', 'Don', 'Dona' , 'Dr', 'Jonkheer', 'Lady', 'Major', 'Master', 'Miss' ,'Mlle', 'Mme', 'Mr', 'Mrs', 'Ms', 'Rev', 'Sir'],[1, 2, 3, 4, 5, 6, 4, 3, 2, 8, 9, 3, 3, 4, 5, 3, 1, 3 ],inplace=True )
Titanic - Machine Learning from Disaster
7,258,897
!wget --quiet https://raw.githubusercontent.com/tensorflow/models/master/official/nlp/bert/tokenization.py<import_modules>
temp.groupby('Initial' ).Survived.mean()
Titanic - Machine Learning from Disaster
7,258,897
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os from wordcloud import WordCloud from nltk.corpus import stopwords from tqdm.notebook import tqdm import tensorflow as tf from tensorflow.keras.layers import Dense, Input from tensorflow.keras.optimizers import Adam from tensorflow.keras.models import Model from tensorflow.keras.callbacks import ModelCheckpoint import tensorflow_hub as hub import tokenization from sklearn.model_selection import train_test_split<set_options>
temp.groupby('Embarked' ).Survived.mean()
Titanic - Machine Learning from Disaster
7,258,897
pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) plt.style.use('fivethirtyeight' )<load_from_csv>
temp["Embarked"].replace(['C','Q', 'S'], [1,2,3], inplace =True )
Titanic - Machine Learning from Disaster
7,258,897
train_data = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv") test_data = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv" )<count_missing_values>
temp["Gender"].replace(['baby','m1', 'm2', 'old', 'w1', 'w2'], [1,2,3,4,5,6], inplace =True )
Titanic - Machine Learning from Disaster
7,258,897
print("Shape of the training dataset: {}.".format(train_data.shape)) print("Shape of the testing dataset: {}".format(test_data.shape)) for col in train_data.columns: nan_vals = train_data[col].isna().sum() pcent =(train_data[col].isna().sum() / train_data[col].count())* 100 print("Total NaN values in column '{}' are: {}, which is {:.2f}% of the data in that column".format(col, nan_vals, pcent))<categorify>
df = pd.DataFrame()
Titanic - Machine Learning from Disaster
7,258,897
def bert_encode(texts, tokenizer, max_len=512): all_tokens, all_masks, all_segments = [], [], [] for text in tqdm(texts): text = tokenizer.tokenize(text) text = text[:max_len-2] input_sequence = ["[CLS]"] + text + ["[SEP]"] pad_len = max_len - len(input_sequence) tokens = tokenizer.convert_tokens_to_ids(input_sequence) tokens += [0] * pad_len pad_masks = [1] * len(input_sequence)+ [0] * pad_len segment_ids = [0] * max_len all_tokens.append(tokens) all_masks.append(pad_masks) all_segments.append(segment_ids) return np.array(all_tokens), np.array(all_masks), np.array(all_segments )<choose_model_class>
df.isnull().sum()
Titanic - Machine Learning from Disaster
7,258,897
%%time url = "https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/1" bert_layer = hub.KerasLayer(url, trainable=True )<data_type_conversions>
score = df.copy()
Titanic - Machine Learning from Disaster
7,258,897
vocab_fl = bert_layer.resolved_object.vocab_file.asset_path.numpy() lower_case = bert_layer.resolved_object.do_lower_case.numpy() tokenizer = tokenization.FullTokenizer(vocab_fl, lower_case )<categorify>
score['Survived'] = temp['Survived']
Titanic - Machine Learning from Disaster
7,258,897
%%time train_input = bert_encode(train_data['text'].values, tokenizer, max_len=160) test_input = bert_encode(test_data['text'].values, tokenizer, max_len=160) train_labels = train_data['target'].values<choose_model_class>
score['Score'] = 0
Titanic - Machine Learning from Disaster
7,258,897
def build_model(transformer, max_len=512): input_word_ids = Input(shape=(max_len,), dtype=tf.int32, name='input_word_ids') input_mask = Input(shape=(max_len,), dtype=tf.int32, name='input_mask') segment_ids = Input(shape=(max_len,), dtype=tf.int32, name='segment_ids') _, seq_op = transformer([input_word_ids, input_mask, segment_ids]) class_tkn = seq_op[:, 0, :] op = Dense(1, activation='sigmoid' )(class_tkn) model = Model(inputs=[input_word_ids, input_mask, segment_ids], outputs=op) model.compile(optimizer=Adam(1e-5), loss='binary_crossentropy', metrics=['accuracy']) return model<choose_model_class>
def see(a): return score.groupby(a ).Survived.mean() see('Pclass' )
Titanic - Machine Learning from Disaster
7,258,897
model = build_model(bert_layer, max_len=160) model.summary()<train_model>
score['Class'] = 0 score['CE'] = 0 score['CN'] = 0 score['CP'] = 0 for i in score: score.loc[(( score.Embarked.values == 1)) ,'CE']=1 score.loc[(( score.Name.values == 2)) ,'CN']=1 score.loc[(( score.Name.values == 3)) ,'CN']=5 score.loc[(( score.Pclass.values == 1)) ,'Class']=1 score.loc[(( score.Pclass.values == 3)) ,'Class']=-1 score['Class'] = score['CE'] + score['CN'] + score['CP'] score.head(3 )
Titanic - Machine Learning from Disaster
7,258,897
checkpoint = ModelCheckpoint('model.h5', monitor='val_loss', save_best_only=True) train_history = model.fit( train_input, train_labels, validation_split=0.1, epochs=3, callbacks=[checkpoint], batch_size=16 )<predict_on_test>
score['Wealth'] = 0 score['WC'] = 0 score['WF'] = 0 score['WT'] = 0 for i in score: score.loc[(( score.Cabin.values == 8)) ,'WC']=-5 score.loc[(( score.Cabin.values == 11)) ,'WC']=-1 score.loc[(( score.Cabin.values == 3)) ,'WC']=1 score.loc[(( score.Cabin.values == 6)) ,'WC']=1 score.loc[(( score.Cabin.values == 7)) ,'WC']=1 score.loc[(( score.Cabin.values == 2)) ,'WC']=3 score.loc[(( score.Cabin.values == 4)) ,'WC']=3 score.loc[(( score.Cabin.values == 5)) ,'WC']=3 score.loc[(( score.Fare.values <= 5)) ,'WF']=-5 score.loc[(( score.Fare.values == 9.5)) ,'WF']=-3 score.loc[(( score.Fare.values == 7.5)) ,'WF']=-1 score.loc[(( score.Fare.values == 62)) ,'WF']=1 score.loc[(( score.Fare.values >= 100)) ,'WF']=3 score.loc[(( score.Ticket.values >= 4)&(score.Ticket.values <= 7)) ,'WT']=-5 score.loc[(( score.Ticket.values >= 8)&(score.Ticket.values <= 9)) ,'WT']=-3 score.loc[(( score.Ticket.values == 10)) ,'WT']=-1 score.loc[(( score.Ticket.values == 1)) ,'WT']=1 score.loc[(( score.Ticket.values >= 13)&(score.Ticket.values <= 17)) ,'WT']=1 score.loc[(( score.Ticket.values >= 18)) ,'WT']=5 score['Wealth'] = score['WC'] + score['WF'] + score['WT'] score.head(20 )
Titanic - Machine Learning from Disaster
7,258,897
preds = model.predict(test_input )<save_to_csv>
score['Priority'] = 0 score['PA'] = 0 score['PN'] = 0 score['PS'] = 0 for i in score: score.loc[(( score.Age.values == 1)) ,'PA']=5 score.loc[(( score.Age.values == 13)) ,'PA']=1 score.loc[(( score.Age.values == 2)) ,'PA']=1 score.loc[(( score.Age.values == 31)) ,'PA']=-1 score.loc[(( score.Age.values == 7)) ,'PA']=1 score.loc[(( score.Age.values == 78)) ,'PA']=5 score.loc[(( score.Name.values == 4)) ,'PN']=-1 score.loc[(( score.Name.values == 5)) ,'PN']=3 score.loc[(( score.Name.values == 8)) ,'PN']=1 score.loc[(( score.Name.values == 9)) ,'PN']=1 score.loc[(( score.Sex.values == 1)) ,'PS']=3 score.loc[(( score.Sex.values == 3)) ,'PS']=-3 score.loc[(( score.Sex.values == 4)) ,'PS']=5 score.loc[(( score.Sex.values == 5)) ,'PS']=5 score.loc[(( score.Sex.values >= 6)) ,'PS']=1 score['Priority'] = score['PA'] + score['PN'] + score['PS'] score.head(3 )
Titanic - Machine Learning from Disaster
7,258,897
sub_fl = pd.read_csv("/kaggle/input/nlp-getting-started/sample_submission.csv") sub_fl['target'] = preds.round().astype(int) sub_fl.to_csv("submission.csv", index=False )<set_options>
score['Situation'] = 0 score['SA'] = 0 score['SF'] = 0 for i in score: score.loc[(( score.Age.values == 36)) ,'SA']=1 score.loc[(( score.Family.values == 2)) ,'SF']=1 score.loc[(( score.Family.values == 3)) ,'SF']=1 score.loc[(( score.Family.values == 4)) ,'SF']=3 score['Situation'] = score['SA'] + score['SF'] score.head(20 )
Titanic - Machine Learning from Disaster
7,258,897
warnings.filterwarnings('ignore' )<randomize_order>
score['Sacrificed'] = 0 score['SN'] = 0 score['FS'] = 0 for i in score: score.loc[(( score.Name.values == 1)) ,'SN']=-5 score.loc[(( score.Family.values == 5)) ,'FS']=-1 score.loc[(( score.Family.values == 6)) ,'FS']=-3 score.loc[(( score.Family.values == 8)) ,'FS']=-5 score.loc[(( score.Family.values >= 9)) ,'FS']=-5 score['Sacrificed'] = score['SN'] + score['FS'] score.head(3 )
Titanic - Machine Learning from Disaster
7,258,897
def seed_everything(seed=0): random.seed(seed) np.random.seed(seed) def df_parallelize_run(func, t_split): num_cores = np.min([N_CORES,len(t_split)]) pool = Pool(num_cores) df = pd.concat(pool.map(func, t_split), axis=1) pool.close() pool.join() return df<categorify>
score['Score'] = score['Class'] + score['Wealth'] + score['Priority'] + score['Situation'] + score['Sacrificed']
Titanic - Machine Learning from Disaster
7,258,897
def get_data_by_store(store): df = pd.concat([pd.read_pickle(BASE), pd.read_pickle(PRICE ).iloc[:,2:], pd.read_pickle(CALENDAR ).iloc[:,2:]], axis=1) df = df[df['store_id']==store] df2 = pd.read_pickle(MEAN_ENC)[mean_features] df2 = df2[df2.index.isin(df.index)] df3 = pd.read_pickle(LAGS ).iloc[:,3:] df3 = df3[df3.index.isin(df.index)] df = pd.concat([df, df2], axis=1) del df2 df = pd.concat([df, df3], axis=1) del df3 features = [col for col in list(df)if col not in remove_features] df = df[['id','d',TARGET]+features] df = df[df['d']>=START_TRAIN].reset_index(drop=True) return df, features def get_base_test() : base_test = pd.DataFrame() for store_id in STORES_IDS: temp_df = pd.read_pickle('test_'+store_id+'.pkl') temp_df['store_id'] = store_id base_test = pd.concat([base_test, temp_df] ).reset_index(drop=True) return base_test def make_lag(LAG_DAY): lag_df = base_test[['id','d',TARGET]] col_name = 'sales_lag_'+str(LAG_DAY) lag_df[col_name] = lag_df.groupby(['id'])[TARGET].transform(lambda x: x.shift(LAG_DAY)).astype(np.float16) return lag_df[[col_name]] def make_lag_roll(LAG_DAY): shift_day = LAG_DAY[0] roll_wind = LAG_DAY[1] lag_df = base_test[['id','d',TARGET]] col_name = 'rolling_mean_tmp_'+str(shift_day)+'_'+str(roll_wind) lag_df[col_name] = lag_df.groupby(['id'])[TARGET].transform(lambda x: x.shift(shift_day ).rolling(roll_wind ).mean()) return lag_df[[col_name]]<init_hyperparams>
df_new = pd.DataFrame()
Titanic - Machine Learning from Disaster
7,258,897
lgb_params = { 'boosting_type': 'gbdt', 'objective': 'tweedie', 'tweedie_variance_power': 1.1, 'metric': 'rmse', 'subsample': 0.5, 'subsample_freq': 1, 'learning_rate': 0.03, 'num_leaves': 2**11-1, 'min_data_in_leaf': 2**12-1, 'feature_fraction': 0.5, 'max_bin': 100, 'n_estimators': 1400, 'boost_from_average': False, 'verbose': -1, } <init_hyperparams>
df_enc = df_new.apply(LabelEncoder().fit_transform) df_enc.head()
Titanic - Machine Learning from Disaster
7,258,897
VER = 1 SEED = 42 seed_everything(SEED) lgb_params['seed'] = SEED N_CORES = psutil.cpu_count() TARGET = 'sales' START_TRAIN = 0 END_TRAIN = 1913 P_HORIZON = 28 USE_AUX = True remove_features = ['id','state_id','store_id', 'date','wm_yr_wk','d',TARGET] mean_features = ['enc_cat_id_mean','enc_cat_id_std', 'enc_dept_id_mean','enc_dept_id_std', 'enc_item_id_mean','enc_item_id_std'] ORIGINAL = '.. /input/m5-forecasting-accuracy/' BASE = '.. /input/m5-simple-fe/grid_part_1.pkl' PRICE = '.. /input/m5-simple-fe/grid_part_2.pkl' CALENDAR = '.. /input/m5-simple-fe/grid_part_3.pkl' LAGS = '.. /input/m5-lags-features/lags_df_28.pkl' MEAN_ENC = '.. /input/m5-custom-features/mean_encoding_df.pkl' AUX_MODELS = '.. /input/m5-aux-models/' STORES_IDS = pd.read_csv(ORIGINAL+'sales_train_validation.csv')['store_id'] STORES_IDS = list(STORES_IDS.unique()) SHIFT_DAY = 28 N_LAGS = 15 LAGS_SPLIT = [col for col in range(SHIFT_DAY,SHIFT_DAY+N_LAGS)] ROLS_SPLIT = [] for i in [1,7,14]: for j in [7,14,30,60]: ROLS_SPLIT.append([i,j] )<init_hyperparams>
train = df_enc[:ntrain] test = df_enc[ntrain:]
Titanic - Machine Learning from Disaster
7,258,897
if USE_AUX: lgb_params['n_estimators'] = 2 <init_hyperparams>
X_test = test X_train = train
Titanic - Machine Learning from Disaster
7,258,897
for store_id in STORES_IDS: print('Train', store_id) grid_df, features_columns = get_data_by_store(store_id) train_mask = grid_df['d']<=END_TRAIN valid_mask = train_mask&(grid_df['d']>(END_TRAIN-P_HORIZON)) preds_mask = grid_df['d']>(END_TRAIN-100) train_data = lgb.Dataset(grid_df[train_mask][features_columns], label=grid_df[train_mask][TARGET]) train_data.save_binary('train_data.bin') train_data = lgb.Dataset('train_data.bin') valid_data = lgb.Dataset(grid_df[valid_mask][features_columns], label=grid_df[valid_mask][TARGET]) grid_df = grid_df[preds_mask].reset_index(drop=True) keep_cols = [col for col in list(grid_df)if '_tmp_' not in col] grid_df = grid_df[keep_cols] grid_df.to_pickle('test_'+store_id+'.pkl') del grid_df seed_everything(SEED) estimator = lgb.train(lgb_params, train_data, valid_sets = [valid_data], verbose_eval = 100, ) model_name = 'lgb_model_'+store_id+'_v'+str(VER)+'.bin' pickle.dump(estimator, open(model_name, 'wb')) !rm train_data.bin del train_data, valid_data, estimator gc.collect() MODEL_FEATURES = features_columns<define_variables>
scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test )
Titanic - Machine Learning from Disaster
7,258,897
all_preds = pd.DataFrame() base_test = get_base_test() main_time = time.time() for PREDICT_DAY in range(1,29): print('Predict | Day:', PREDICT_DAY) start_time = time.time() grid_df = base_test.copy() grid_df = pd.concat([grid_df, df_parallelize_run(make_lag_roll, ROLS_SPLIT)], axis=1) for store_id in STORES_IDS: model_path = 'lgb_model_'+store_id+'_v'+str(VER)+'.bin' if USE_AUX: model_path = AUX_MODELS + model_path estimator = pickle.load(open(model_path, 'rb')) day_mask = base_test['d']==(END_TRAIN+PREDICT_DAY) store_mask = base_test['store_id']==store_id mask =(day_mask)&(store_mask) base_test[TARGET][mask] = estimator.predict(grid_df[mask][MODEL_FEATURES]) temp_df = base_test[day_mask][['id',TARGET]] temp_df.columns = ['id','F'+str(PREDICT_DAY)] if 'id' in list(all_preds): all_preds = all_preds.merge(temp_df, on=['id'], how='left') else: all_preds = temp_df.copy() print(' ' %0.2f min total |' %(( time.time() - main_time)/ 60), ' %0.2f day sales |' %(temp_df['F'+str(PREDICT_DAY)].sum())) del temp_df all_preds = all_preds.reset_index(drop=True) all_preds<save_to_csv>
ran = RandomForestClassifier(random_state=1) knn = KNeighborsClassifier() log = LogisticRegression() xgb = XGBClassifier() gbc = GradientBoostingClassifier() svc = SVC(probability=True) ext = ExtraTreesClassifier() ada = AdaBoostClassifier() gnb = GaussianNB() gpc = GaussianProcessClassifier() bag = BaggingClassifier() models = [ran, knn, log, xgb, gbc, svc, ext, ada, gnb, gpc, bag] scores = [] for mod in models: mod.fit(X_train, y_train) acc = cross_val_score(mod, X_train, y_train, scoring = "accuracy", cv = 10) scores.append(acc.mean() )
Titanic - Machine Learning from Disaster
7,258,897
submission = pd.read_csv(ORIGINAL+'sample_submission.csv')[['id']] submission = submission.merge(all_preds, on=['id'], how='left' ).fillna(0) submission.to_csv('submission_v'+str(VER)+'.csv', index=False )<set_options>
results = pd.DataFrame({ 'Model': ['Random Forest', 'K Nearest Neighbour', 'Logistic Regression', 'XGBoost', 'Gradient Boosting', 'SVC', 'Extra Trees', 'AdaBoost', 'Gaussian Naive Bayes', 'Gaussian Process', 'Bagging Classifier'], 'Score': scores}) result_df = results.sort_values(by='Score', ascending=False ).reset_index(drop=True) result_df.head(11 )
Titanic - Machine Learning from Disaster
7,258,897
warnings.filterwarnings('ignore' )<randomize_order>
fi = {'Features':train.columns.tolist() , 'Importance':xgb.feature_importances_} importance = pd.DataFrame(fi, index=None ).sort_values('Importance', ascending=False )
Titanic - Machine Learning from Disaster
7,258,897
def seed_everything(seed=0): random.seed(seed) np.random.seed(seed) def df_parallelize_run(func, t_split): num_cores = np.min([N_CORES,len(t_split)]) pool = Pool(num_cores) df = pd.concat(pool.map(func, t_split), axis=1) pool.close() pool.join() return df<categorify>
fi = {'Features':train.columns.tolist() , 'Importance':np.transpose(log.coef_[0])} importance = pd.DataFrame(fi, index=None ).sort_values('Importance', ascending=False )
Titanic - Machine Learning from Disaster
7,258,897
def get_data_by_store(store): df = pd.concat([pd.read_pickle(BASE), pd.read_pickle(PRICE ).iloc[:,2:], pd.read_pickle(CALENDAR ).iloc[:,2:]], axis=1) df = df[df['store_id']==store] df2 = pd.read_pickle(MEAN_ENC)[mean_features] df2 = df2[df2.index.isin(df.index)] df3 = pd.read_pickle(LAGS ).iloc[:,3:] df3 = df3[df3.index.isin(df.index)] df = pd.concat([df, df2], axis=1) del df2 df = pd.concat([df, df3], axis=1) del df3 features = [col for col in list(df)if col not in remove_features] df = df[['id','d',TARGET]+features] df = df[df['d']>=START_TRAIN].reset_index(drop=True) return df, features def get_base_test() : base_test = pd.DataFrame() for store_id in STORES_IDS: temp_df = pd.read_pickle('test_'+store_id+'.pkl') temp_df['store_id'] = store_id base_test = pd.concat([base_test, temp_df] ).reset_index(drop=True) return base_test def make_lag(LAG_DAY): lag_df = base_test[['id','d',TARGET]] col_name = 'sales_lag_'+str(LAG_DAY) lag_df[col_name] = lag_df.groupby(['id'])[TARGET].transform(lambda x: x.shift(LAG_DAY)).astype(np.float16) return lag_df[[col_name]] def make_lag_roll(LAG_DAY): shift_day = LAG_DAY[0] roll_wind = LAG_DAY[1] lag_df = base_test[['id','d',TARGET]] col_name = 'rolling_mean_tmp_'+str(shift_day)+'_'+str(roll_wind) lag_df[col_name] = lag_df.groupby(['id'])[TARGET].transform(lambda x: x.shift(shift_day ).rolling(roll_wind ).mean()) return lag_df[[col_name]]<init_hyperparams>
gbc_imp = pd.DataFrame({'Feature':train.columns, 'gbc importance':gbc.feature_importances_}) xgb_imp = pd.DataFrame({'Feature':train.columns, 'xgb importance':xgb.feature_importances_}) ran_imp = pd.DataFrame({'Feature':train.columns, 'ran importance':ran.feature_importances_}) ext_imp = pd.DataFrame({'Feature':train.columns, 'ext importance':ext.feature_importances_}) ada_imp = pd.DataFrame({'Feature':train.columns, 'ada importance':ada.feature_importances_}) importances = gbc_imp.merge(xgb_imp, on='Feature' ).merge(ran_imp, on='Feature' ).merge(ext_imp, on='Feature' ).merge(ada_imp, on='Feature') importances['Average'] = importances.mean(axis=1) importances = importances.sort_values(by='Average', ascending=False ).reset_index(drop=True) importances
Titanic - Machine Learning from Disaster
7,258,897
lgb_params = { 'boosting_type': 'gbdt', 'objective': 'tweedie', 'tweedie_variance_power': 1.1, 'metric': 'rmse', 'subsample': 0.5, 'subsample_freq': 1, 'learning_rate': 0.02, 'num_leaves': 2**11-1, 'min_data_in_leaf': 2**12-1, 'feature_fraction': 0.5, 'max_bin': 100, 'n_estimators': 1300, 'early_stopping_rounds': 30, 'boost_from_average': False, 'verbose': -1, } <init_hyperparams>
fi = {'Features':importances['Feature'], 'Importance':importances['Average']} importance = pd.DataFrame(fi, index=None ).sort_values('Importance', ascending=False )
Titanic - Machine Learning from Disaster
7,258,897
VER = 11 SEED = 41 seed_everything(SEED) lgb_params['seed'] = SEED N_CORES = psutil.cpu_count() TARGET = 'sales' START_TRAIN = 30 END_TRAIN = 1941 P_HORIZON = 28 USE_AUX = True remove_features = ['id','state_id','store_id', 'date','wm_yr_wk','d',TARGET] mean_features = ['enc_cat_id_mean','enc_cat_id_std', 'enc_dept_id_mean','enc_dept_id_std', 'enc_item_id_mean','enc_item_id_std'] ORIGINAL = '.. /input/m5-forecasting-accuracy/' BASE = '.. /input/featureextraction/grid_part_1.pkl' PRICE = '.. /input/featureextraction/grid_part_2.pkl' CALENDAR = '.. /input/featureextraction/grid_part_3.pkl' LAGS = '.. /input/lag-rollingfeature/lags_df_28.pkl' MEAN_ENC = '.. /input/other-features/mean_encoding_df.pkl' AUX_MODELS = '.. /input/m5extrafeaturesadd/' STORES_IDS = pd.read_csv(ORIGINAL+'sales_train_evaluation.csv')['store_id'] STORES_IDS = list(STORES_IDS.unique()) SHIFT_DAY = 28 N_LAGS = 15 LAGS_SPLIT = [col for col in range(SHIFT_DAY,SHIFT_DAY+N_LAGS)] ROLS_SPLIT = [] for i in [1,7,14]: for j in [7,14,30,60]: ROLS_SPLIT.append([i,j] )<set_options>
train = train.drop(['Class', 'Pclass', 'Embarked'], axis=1) test = test.drop(['Class', 'Pclass', 'Embarked'], axis=1) X_train = train X_test = test X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test )
Titanic - Machine Learning from Disaster
7,258,897
gc.collect()<define_variables>
ran = RandomForestClassifier(random_state=1) knn = KNeighborsClassifier() log = LogisticRegression() xgb = XGBClassifier(random_state=1) gbc = GradientBoostingClassifier(random_state=1) svc = SVC(probability=True) ext = ExtraTreesClassifier(random_state=1) ada = AdaBoostClassifier(random_state=1) gnb = GaussianNB() gpc = GaussianProcessClassifier() bag = BaggingClassifier(random_state=1) models = [ran, knn, log, xgb, gbc, svc, ext, ada, gnb, gpc, bag] scores_v2 = [] for mod in models: mod.fit(X_train, y_train) acc = cross_val_score(mod, X_train, y_train, scoring = "accuracy", cv = 10) scores_v2.append(acc.mean() )
Titanic - Machine Learning from Disaster
7,258,897
for store_id in STORES_IDS: print('Train', store_id) grid_df, features_columns = get_data_by_store(store_id) train_mask = grid_df['d']<=END_TRAIN valid_mask = train_mask&(grid_df['d']>(END_TRAIN-P_HORIZON)) preds_mask = grid_df['d']>(END_TRAIN-100) train_data = lgb.Dataset(grid_df[train_mask][features_columns], label=grid_df[train_mask][TARGET]) train_data.save_binary('train_data.bin') train_data = lgb.Dataset('train_data.bin') valid_data = lgb.Dataset(grid_df[valid_mask][features_columns], label=grid_df[valid_mask][TARGET]) grid_df = grid_df[preds_mask].reset_index(drop=True) keep_cols = [col for col in list(grid_df)if '_tmp_' not in col] grid_df = grid_df[keep_cols] grid_df.to_pickle('test_'+store_id+'.pkl') del grid_df seed_everything(SEED) estimator = lgb.train(lgb_params, train_data, valid_sets = [valid_data], verbose_eval = 100, ) model_name = 'lgb_model_'+store_id+'_v'+str(VER)+'.bin' pickle.dump(estimator, open(model_name, 'wb')) !rm train_data.bin del train_data, valid_data, estimator gc.collect() MODEL_FEATURES = features_columns<define_variables>
Cs = [0.001, 0.01, 0.1, 1, 5, 10, 15, 20, 50, 100] gammas = [0.001, 0.01, 0.1, 1] hyperparams = {'C': Cs, 'gamma' : gammas} gd=GridSearchCV(estimator = SVC(probability=True), param_grid = hyperparams, verbose=True, cv=5, scoring = "accuracy") gd.fit(X_train, y_train) print(gd.best_score_) print(gd.best_estimator_ )
Titanic - Machine Learning from Disaster
7,258,897
all_preds = pd.DataFrame() base_test = get_base_test() main_time = time.time() for PREDICT_DAY in range(1,29): print('Predict | Day:', PREDICT_DAY) start_time = time.time() grid_df = base_test.copy() grid_df = pd.concat([grid_df, df_parallelize_run(make_lag_roll, ROLS_SPLIT)], axis=1) for store_id in STORES_IDS: model_path = 'lgb_model_'+store_id+'_v'+str(VER)+'.bin' if USE_AUX: model_path = AUX_MODELS + model_path estimator = pickle.load(open(model_path, 'rb')) day_mask = base_test['d']==(END_TRAIN+PREDICT_DAY) store_mask = base_test['store_id']==store_id mask =(day_mask)&(store_mask) base_test[TARGET][mask] = estimator.predict(grid_df[mask][MODEL_FEATURES]) temp_df = base_test[day_mask][['id',TARGET]] temp_df.columns = ['id','F'+str(PREDICT_DAY)] if 'id' in list(all_preds): all_preds = all_preds.merge(temp_df, on=['id'], how='left') else: all_preds = temp_df.copy() print(' ' %0.2f min total |' %(( time.time() - main_time)/ 60), ' %0.2f day sales |' %(temp_df['F'+str(PREDICT_DAY)].sum())) del temp_df all_preds = all_preds.reset_index(drop=True) all_preds<save_to_csv>
learning_rate = [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.2] n_estimators = [100, 250, 500, 750, 1000, 1250, 1500] hyperparams = {'learning_rate': learning_rate, 'n_estimators': n_estimators} gd=GridSearchCV(estimator = GradientBoostingClassifier() , param_grid = hyperparams, verbose=True, cv=5, scoring = "accuracy") gd.fit(X_train, y_train) print(gd.best_score_) print(gd.best_estimator_ )
Titanic - Machine Learning from Disaster
7,258,897
submission = pd.read_csv(ORIGINAL+'sample_submission.csv')[['id']] submission = submission.merge(all_preds, on=['id'], how='left' ).fillna(0) submission.to_csv('submission_v'+str(VER)+'.csv', index=False )<set_options>
penalty = ['l1', 'l2'] C = np.logspace(0, 4, 10) hyperparams = {'penalty': penalty, 'C': C} gd=GridSearchCV(estimator = LogisticRegression() , param_grid = hyperparams, verbose=True, cv=5, scoring = "accuracy") gd.fit(X_train, y_train) print(gd.best_score_) print(gd.best_estimator_ )
Titanic - Machine Learning from Disaster
7,258,897
gc.collect()<set_options>
learning_rate = [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.2] n_estimators = [10, 25, 50, 75, 100, 250, 500, 750, 1000] hyperparams = {'learning_rate': learning_rate, 'n_estimators': n_estimators} gd=GridSearchCV(estimator = XGBClassifier() , param_grid = hyperparams, verbose=True, cv=5, scoring = "accuracy") gd.fit(X_train, y_train) print(gd.best_score_) print(gd.best_estimator_ )
Titanic - Machine Learning from Disaster
7,258,897
!pip install.. /input/kaggle-efficientnet-repo/efficientnet-1.0.0-py3-none-any.whl gc.enable()<categorify>
max_depth = [3, 4, 5, 6, 7, 8, 9, 10] min_child_weight = [1, 2, 3, 4, 5, 6] hyperparams = {'max_depth': max_depth, 'min_child_weight': min_child_weight} gd=GridSearchCV(estimator = XGBClassifier(learning_rate=0.0001, n_estimators=10), param_grid = hyperparams, verbose=True, cv=5, scoring = "accuracy") gd.fit(X_train, y_train) print(gd.best_score_) print(gd.best_estimator_ )
Titanic - Machine Learning from Disaster
7,258,897
sz = 256 N = 48 def tile(img): result = [] shape = img.shape pad0,pad1 =(sz - shape[0]%sz)%sz,(sz - shape[1]%sz)%sz img = np.pad(img,[[pad0//2,pad0-pad0//2],[pad1//2,pad1-pad1//2],[0,0]], constant_values=255) img = img.reshape(img.shape[0]//sz,sz,img.shape[1]//sz,sz,3) img = img.transpose(0,2,1,3,4 ).reshape(-1,sz,sz,3) if len(img)< N: img = np.pad(img,[[0,N-len(img)],[0,0],[0,0],[0,0]],constant_values=255) idxs = np.argsort(img.reshape(img.shape[0],-1 ).sum(-1)) [:42] img = img[idxs] return img<choose_model_class>
gamma = [i*0.1 for i in range(0,5)] hyperparams = {'gamma': gamma} gd=GridSearchCV(estimator = XGBClassifier(learning_rate=0.0001, n_estimators=10, max_depth=3, min_child_weight=1), param_grid = hyperparams, verbose=True, cv=5, scoring = "accuracy") gd.fit(X_train, y_train) print(gd.best_score_) print(gd.best_estimator_ )
Titanic - Machine Learning from Disaster
7,258,897
class ConvNet(tf.keras.Model): def __init__(self, engine, input_shape, weights): super(ConvNet, self ).__init__() self.engine = engine( include_top=False, input_shape=input_shape, weights=weights) self.avg_pool2d = tf.keras.layers.GlobalAveragePooling2D() self.dropout = tf.keras.layers.Dropout(0.5) self.dense_1 = tf.keras.layers.Dense(1024) self.dense_2 = tf.keras.layers.Dense(1) @tf.function def call(self, inputs, **kwargs): x = tf.reshape(inputs,(-1, IMG_SIZE, IMG_SIZE, 3)) x = self.engine(x) shape = x.shape x = tf.reshape(x,(-1, N_TILES, shape[1], shape[2], shape[3])) x = tf.transpose(x, perm=[0, 2, 1, 3, 4]) x = tf.reshape(x,(-1, shape[1], N_TILES*shape[2], shape[3])) x = self.avg_pool2d(x) x = self.dropout(x, training=False) x = self.dense_1(x) x = tf.nn.relu(x) return self.dense_2(x )<choose_model_class>
subsample = [0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1] colsample_bytree = [0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1] hyperparams = {'subsample': subsample, 'colsample_bytree': colsample_bytree} gd=GridSearchCV(estimator = XGBClassifier(learning_rate=0.0001, n_estimators=10, max_depth=3, min_child_weight=1, gamma=0), param_grid = hyperparams, verbose=True, cv=5, scoring = "accuracy") gd.fit(X_train, y_train) print(gd.best_score_) print(gd.best_estimator_ )
Titanic - Machine Learning from Disaster
7,258,897
is_ef = True backbone_name = 'efficientnet-b0' N_TILES = 42 IMG_SIZE = 256 if backbone_name.startswith('efficientnet'): model_fn = getattr(efn, f'EfficientNetB{backbone_name[-1]}') model = ConvNet(engine=model_fn, input_shape=(IMG_SIZE, IMG_SIZE, 3), weights=None) dummy_data = tf.zeros(( 2 * N_TILES, IMG_SIZE, IMG_SIZE, 3), dtype=tf.float32) _ = model(dummy_data )<load_pretrained>
reg_alpha = [1e-5, 1e-2, 0.1, 1, 100] hyperparams = {'reg_alpha': reg_alpha} gd=GridSearchCV(estimator = XGBClassifier(learning_rate=0.0001, n_estimators=10, max_depth=3, min_child_weight=1, gamma=0, subsample=0.6, colsample_bytree=0.9), param_grid = hyperparams, verbose=True, cv=5, scoring = "accuracy") gd.fit(X_train, y_train) print(gd.best_score_) print(gd.best_estimator_ )
Titanic - Machine Learning from Disaster
7,258,897
model.load_weights('.. /input/tpu-training-tensorflow-iafoos-method-42x256x256x3/efficientnet-b0.h5' )<load_from_csv>
n_restarts_optimizer = [0, 1, 2, 3] max_iter_predict = [1, 2, 5, 10, 20, 35, 50, 100] warm_start = [True, False] hyperparams = {'n_restarts_optimizer': n_restarts_optimizer, 'max_iter_predict': max_iter_predict, 'warm_start': warm_start} gd=GridSearchCV(estimator = GaussianProcessClassifier() , param_grid = hyperparams, verbose=True, cv=5, scoring = "accuracy") gd.fit(X_train, y_train) print(gd.best_score_) print(gd.best_estimator_ )
Titanic - Machine Learning from Disaster
7,258,897
TRAIN = '.. /input/prostate-cancer-grade-assessment/train_images/' MASKS = '.. /input/prostate-cancer-grade-assessment/train_label_masks/' BASE_PATH = '.. /input/prostate-cancer-grade-assessment/' train = pd.read_csv(BASE_PATH + "train.csv") train.head()<load_from_csv>
n_estimators = [10, 25, 50, 75, 100, 125, 150, 200] learning_rate = [0.001, 0.01, 0.1, 0.5, 1, 1.5, 2] hyperparams = {'n_estimators': n_estimators, 'learning_rate': learning_rate} gd=GridSearchCV(estimator = AdaBoostClassifier() , param_grid = hyperparams, verbose=True, cv=5, scoring = "accuracy") gd.fit(X_train, y_train) print(gd.best_score_) print(gd.best_estimator_ )
Titanic - Machine Learning from Disaster
7,258,897
sub = pd.read_csv(".. /input/prostate-cancer-grade-assessment/sample_submission.csv") sub.head()<load_from_csv>
n_neighbors = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20] algorithm = ['auto'] weights = ['uniform', 'distance'] leaf_size = [1, 2, 3, 4, 5, 10, 15, 20, 25, 30] hyperparams = {'algorithm': algorithm, 'weights': weights, 'leaf_size': leaf_size, 'n_neighbors': n_neighbors} gd=GridSearchCV(estimator = KNeighborsClassifier() , param_grid = hyperparams, verbose=True, cv=5, scoring = "accuracy") gd.fit(X_train, y_train) print(gd.best_score_) print(gd.best_estimator_ )
Titanic - Machine Learning from Disaster
7,258,897
test = pd.read_csv(".. /input/prostate-cancer-grade-assessment/test.csv") test.head()<define_variables>
n_estimators = [10, 25, 50, 75, 100] max_depth = [3, None] max_features = [1, 3, 5, 7] min_samples_split = [2, 4, 6, 8, 10] min_samples_leaf = [2, 4, 6, 8, 10] hyperparams = {'n_estimators': n_estimators, 'max_depth': max_depth, 'max_features': max_features, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf} gd=GridSearchCV(estimator = RandomForestClassifier() , param_grid = hyperparams, verbose=True, cv=5, scoring = "accuracy") gd.fit(X_train, y_train) print(gd.best_score_) print(gd.best_estimator_ )
Titanic - Machine Learning from Disaster
7,258,897
TEST = '.. /input/prostate-cancer-grade-assessment/test_images/'<define_variables>
n_estimators = [10, 25, 50, 75, 100] max_depth = [3, None] max_features = [1, 3, 5, 7] min_samples_split = [2, 4, 6, 8, 10] min_samples_leaf = [2, 4, 6, 8, 10] hyperparams = {'n_estimators': n_estimators, 'max_depth': max_depth, 'max_features': max_features, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf} gd=GridSearchCV(estimator = ExtraTreesClassifier() , param_grid = hyperparams, verbose=True, cv=5, scoring = "accuracy") gd.fit(X_train, y_train) print(gd.best_score_) print(gd.best_estimator_ )
Titanic - Machine Learning from Disaster
7,258,897
PRED_PATH = TEST df = sub t_df = test<concatenate>
n_estimators = [10, 15, 20, 25, 50, 75, 100, 150] max_samples = [1, 2, 3, 5, 7, 10, 15, 20, 25, 30, 50] max_features = [1, 3, 5, 7] hyperparams = {'n_estimators': n_estimators, 'max_samples': max_samples, 'max_features': max_features} gd=GridSearchCV(estimator = BaggingClassifier() , param_grid = hyperparams, verbose=True, cv=5, scoring = "accuracy") gd.fit(X_train, y_train) print(gd.best_score_) print(gd.best_estimator_ )
Titanic - Machine Learning from Disaster
7,258,897
transforms = albumentations.Compose([ albumentations.Transpose(p=0.5), albumentations.VerticalFlip(p=0.5), albumentations.HorizontalFlip(p=0.5), ] )<categorify>
ran = RandomForestClassifier(n_estimators=25, max_depth=3, max_features=3, min_samples_leaf=2, min_samples_split=8, random_state=1) knn = KNeighborsClassifier(algorithm='auto', leaf_size=1, n_neighbors=5, weights='uniform') log = LogisticRegression(C=2.7825594022071245, penalty='l2') xgb = XGBClassifier(learning_rate=0.0001, n_estimators=10, random_state=1) gbc = GradientBoostingClassifier(learning_rate=0.0005, n_estimators=1250, random_state=1) svc = SVC(probability=True) ext = ExtraTreesClassifier(max_depth=None, max_features=3, min_samples_leaf=2, min_samples_split=8, n_estimators=10, random_state=1) ada = AdaBoostClassifier(learning_rate=0.1, n_estimators=50, random_state=1) gpc = GaussianProcessClassifier() bag = BaggingClassifier(random_state=1) models = [ran, knn, log, xgb, gbc, svc, ext, ada, gnb, gpc, bag] scores_v3 = [] for mod in models: mod.fit(X_train, y_train) acc = cross_val_score(mod, X_train, y_train, scoring = "accuracy", cv = 10) scores_v3.append(acc.mean() )
Titanic - Machine Learning from Disaster