code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:573] # language: python # name: conda-env-573-py # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import altair as alt # ### Learning Data Viz techniques from an online class df = pd.read_csv("titanic.csv") df.head() df["survived"].value_counts() df.isnull().sum() df.info() sns.heatmap(df.isnull(), yticklabels=False, cbar= False, cmap ='viridis' ) # - Most of the missing is in the age, cabin, boat, body and home.dest column sns.countplot(x=df["survived"], hue= df["sex"], palette = "ocean") # - We can observe that the more number of female have survived than males. # - Also, among the deceased, there were more men than women. sns.countplot(x=df["survived"], hue= df["pclass"], palette = "inferno") sns.displot(x = df["age"].dropna(), kde = False, bins =40) # - We see that the age ranges from 0 to 80, with most of them belonging to the age group 20-50. # - The maximum number of people belong to the age group of 25 to 30. df["age_category"] = pd.cut( df.age, bins = [0,10,20,30,40,50,60,70,80], labels = ["kids", "teenager", "young", "middle_aged", "upper middle", "old", "very_old", "senior"], ordered= True) plt.figure(figsize = (25,8)) ax = sns.countplot(x=df["age_category"], data=df, hue= "survived", palette = "YlOrBr") # - We observe that the majority of the people who did not survive are between the age group 20 to 30 followed by people in the age group 30-40. However, these are the counts and not percentages or proportions # #### Considering the proportions of people belonging to different age groups who survived temp_df = df.groupby("age_category")["survived"].sum().reset_index() temp_df2 = df.groupby("age_category").size().reset_index() final_temp = pd.merge( temp_df, temp_df2, on = "age_category", how= 'inner' ) # + #final_temp['prop_survived'] = final_temp["survived"]/ final_temp[0] #final_temp # + #temp = df.groupby("survived")["age_category"].value_counts() # - final_temp = final_temp.melt(id_vars = "age_category") plt.figure(figsize =(12,8)) sns.barplot(data = final_temp, x="age_category",y="value", hue="variable") # - This shows that maximum number of people in the ship belonged to the young catgeory and most of the people who survived alo belong to the same category, but the survived people do not account not even 50% of the total in that group. # - More than half of the kids have survived. # - Very few people belonging to the very_old category have survived. # - Rest all categories approximately half of them were rescued. # # ### Analyzing Fare sns.distplot(x = df["fare"].dropna(), kde = False, bins = 20) temp_df_p_class = df.groupby("pclass")["survived"].sum().reset_index() temp_p_class_total = df.groupby("pclass").size().reset_index() final_temp_p_class = pd.merge( temp_df_p_class, temp_p_class_total, on = "pclass", how= 'inner' ) final_temp_p_class final_temp_p_class = final_temp_p_class.melt(id_vars = "pclass") plt.figure(figsize =(12,8)) sns.barplot(data = final_temp_p_class, x="pclass",y="value", hue="variable") # - We can observe that most of the people belonging to the class 1 have been recused # - 50% of the people belonging to the class 2 have survived # - Most of the people belonging to the class 3 have not been able to survive. # ### Age vs pclass plt.figure(figsize=(12,8)) sns.boxplot(x = "pclass", y="age", data = df) # - Most of the people belonging to the class 1 are from 30 to 50 year of age, however total range is wider than other two classes. # - The median age of people belonging to class 3 is less than that of class 2 # - There are many outliers for the class 2 and class 3
dataviz.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd from __future__ import with_statement import warnings warnings.filterwarnings('ignore') # ### Problem 1 # #### Create a function that will take input two lists/numpy array of arbitrary length with numeric values and return a list/numpy array containing all the unique items that are either in the first list or the second but not in both. # #### Randomly create 2 numpy arrays with different length and test your function. Make sure the results are reproducible. Example Input and Output: Input = [ 1, 2, 3, 4 ], [ 1, 2, 0 ,4 ] Output = [ 3, 0 ] def unique_items(list1 , list2): list1 = set(list1) list2 = set(list2) return np.array((list1-list2) | (list2-list1)) np.random.seed(42) print(unique_items(np.random.rand(np.random.randint(1,10)), np.random.rand(np.random.randint(1,10)))) input1 = [1,2,3,4] input2 = [1,2,0,4] print(unique_items(input1, input2)) # ### Problem 2 # #### Given a list of integers, write one line of code that will create a new list containing only the odd numbers from the original list. # #### Randomly generate a list of integers from 10 to 100 included and test your code. Make sure the results are reproducible. Example Input and Output: Input = [ 1, 2, 3, 4 ] Output = [ 1, 3 ] original_list = np.random.random_integers(low=10, high=100, size=20) odd_list = [i for i in original_list if i % 2 ] print (odd_list) original_list = [ 1, 2, 3, 4 ] odd_list = [i for i in original_list if i % 2 != 0 ] print (odd_list) # ### Problem 3 # #### Write a function that will take a list of integers and strings and save in a .txt file the number of strings and the number of even and odd integers. Example Input and Output: Input = [ 1, 2, “hello”, 4 , 6] Output in a .txt file: Number of strings: 1 Number of odd integers: 1 Number of even integers: 3 def arrange_list(mixed_list): number_of_strings = 0 number_of_even_integers = 0 number_of_odd_integers = 0 for item in mixed_list: if isinstance(item, str): number_of_strings += 1 elif isinstance(item, int): if item % 2 == 0: number_of_even_integers += 1 elif item % 2 == 1: number_of_odd_integers += 1 return (number_of_strings, number_of_even_integers, number_of_odd_integers) def save_to_file(mixed_list): number_of_strings, number_of_even_integers, number_of_odd_integers = arrange_list(mixed_list=mixed_list) f = open("a.txt","w") f.write("Number of strings: %d\r\n" % (number_of_strings)) f.write("Number of odd integers: %d\r\n" % (number_of_odd_integers)) f.write("Number of even integers: %d\r\n" % (number_of_even_integers)) f.close() save_to_file([ 1, 2, 'hello', 4 , 6])
Homeworks/Homework_1/Homework1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %reload_ext autoreload # %autoreload 2 # %matplotlib inline from fastai.io import * from fastai.conv_learner import * from fastai.column_data import * # - torch.cuda.set_device(1) # ## Setup # # 准备数据 # We're going to download the collected works of Nietzsche to use as our data for this class. PATH='data/nietzsche/' get_data("https://s3.amazonaws.com/text-datasets/nietzsche.txt", f'{PATH}nietzsche.txt') text = open(f'{PATH}nietzsche.txt').read() print('corpus length:', len(text)) text[:400] # + # 统计字符数目 chars = sorted(list(set(text))) vocab_size = len(chars)+1 print('total chars:', vocab_size) # - # Sometimes it's useful to have a zero value in the dataset, e.g. for padding # + # 增加零值用于数据集中的padding。 chars.insert(0, "\0") ''.join(chars[1:-6]) # - # Map from chars to indices and back again # 构建一张字符->下标和下标->字符的表 char_indices = {c: i for i, c in enumerate(chars)} indices_char = {i: c for i, c in enumerate(chars)} # *idx* will be the data we use from now on - it simply converts all the characters to their index (based on the mapping above) # + # 将文本中所有字符都转换为下标 idx = [char_indices[c] for c in text] idx[:10] # - # 测试 ''.join(indices_char[i] for i in idx[:70]) # ## Three char model # # 3字符模型 # ### Create inputs # Create a list of every 4th character, starting at the 0th, 1st, 2nd, then 3rd characters # 构建列表,分别用来存储第0个,第1个,第2个和第3个字符 cs=3 c1_dat = [idx[i] for i in range(0, len(idx)-cs, cs)] c2_dat = [idx[i+1] for i in range(0, len(idx)-cs, cs)] c3_dat = [idx[i+2] for i in range(0, len(idx)-cs, cs)] c4_dat = [idx[i+3] for i in range(0, len(idx)-cs, cs)] # Our inputs # 前三个字符作为输入 x1 = np.stack(c1_dat) x2 = np.stack(c2_dat) x3 = np.stack(c3_dat) # Our output # 第四个字符作为输出 y = np.stack(c4_dat) # The first 4 inputs and outputs x1[:4], x2[:4], x3[:4] y[:4] # 确保维度对应上 x1.shape, y.shape # ### Create and train model # Pick a size for our hidden state # 选择隐态的大小 n_hidden = 256 # The number of latent factors to create (i.e. the size of the embedding matrix) # 选择latent factors的数目 # 比如embedding matrix的大小 n_fac = 42 # 构建一个针对三字符预测的模型 class Char3Model(nn.Module): def __init__(self, vocab_size, n_fac): super().__init__() # Embedding层 self.e = nn.Embedding(vocab_size, n_fac) # The 'green arrow' from our diagram - the layer operation from input to hidden # Linear层:输入->隐藏层 self.l_in = nn.Linear(n_fac, n_hidden) # The 'orange arrow' from our diagram - the layer operation from hidden to hidden # Linear层:隐藏层->隐藏层 self.l_hidden = nn.Linear(n_hidden, n_hidden) # The 'blue arrow' from our diagram - the layer operation from hidden to output # Linear层:隐藏层->隐藏层 self.l_out = nn.Linear(n_hidden, vocab_size) def forward(self, c1, c2, c3): # 详情看RNN的资料 # 根据输入构建嵌套矩阵-》输入线性层-》Relu in1 = F.relu(self.l_in(self.e(c1))) in2 = F.relu(self.l_in(self.e(c2))) in3 = F.relu(self.l_in(self.e(c3))) # 构建初始量-》循环增加-》隐藏层-》tanh增加对比 h = V(torch.zeros(in1.size()).cuda()) h = F.tanh(self.l_hidden(h+in1)) h = F.tanh(self.l_hidden(h+in2)) h = F.tanh(self.l_hidden(h+in3)) # 得到Softmax结果 return F.log_softmax(self.l_out(h)) # 用fastai自带ColumnarModelData构建数据集dataset md = ColumnarModelData.from_arrays('.', [-1], np.stack([x1,x2,x3], axis=1), y, bs=512) m = Char3Model(vocab_size, n_fac).cuda() it = iter(md.trn_dl) *xs,yt = next(it) t = m(*V(xs)) opt = optim.Adam(m.parameters(), 1e-2) fit(m, md, 1, opt, F.nll_loss) set_lrs(opt, 0.001) fit(m, md, 1, opt, F.nll_loss) # ### Test model def get_next(inp): idxs = T(np.array([char_indices[c] for c in inp])) p = m(*VV(idxs)) i = np.argmax(to_np(p)) return chars[i] get_next('y. ') get_next('ppl') get_next(' th') get_next('and') # ## Our first RNN! # ### Create inputs # This is the size of our unrolled RNN. cs=8 # For each of 0 through 7, create a list of every 8th character with that starting point. These will be the 8 inputs to our model. c_in_dat = [[idx[i+j] for i in range(cs)] for j in range(len(idx)-cs)] # Then create a list of the next character in each of these series. This will be the labels for our model. c_out_dat = [idx[j+cs] for j in range(len(idx)-cs)] xs = np.stack(c_in_dat, axis=0) xs.shape y = np.stack(c_out_dat) # So each column below is one series of 8 characters from the text. xs[:cs,:cs] # ...and this is the next character after each sequence. y[:cs] # ### Create and train model val_idx = get_cv_idxs(len(idx)-cs-1) md = ColumnarModelData.from_arrays('.', val_idx, xs, y, bs=512) class CharLoopModel(nn.Module): # This is an RNN! def __init__(self, vocab_size, n_fac): super().__init__() self.e = nn.Embedding(vocab_size, n_fac) self.l_in = nn.Linear(n_fac, n_hidden) self.l_hidden = nn.Linear(n_hidden, n_hidden) self.l_out = nn.Linear(n_hidden, vocab_size) def forward(self, *cs): bs = cs[0].size(0) h = V(torch.zeros(bs, n_hidden).cuda()) for c in cs: inp = F.relu(self.l_in(self.e(c))) h = F.tanh(self.l_hidden(h+inp)) return F.log_softmax(self.l_out(h), dim=-1) m = CharLoopModel(vocab_size, n_fac).cuda() opt = optim.Adam(m.parameters(), 1e-2) fit(m, md, 1, opt, F.nll_loss) set_lrs(opt, 0.001) fit(m, md, 1, opt, F.nll_loss) class CharLoopConcatModel(nn.Module): def __init__(self, vocab_size, n_fac): super().__init__() self.e = nn.Embedding(vocab_size, n_fac) self.l_in = nn.Linear(n_fac+n_hidden, n_hidden) self.l_hidden = nn.Linear(n_hidden, n_hidden) self.l_out = nn.Linear(n_hidden, vocab_size) def forward(self, *cs): bs = cs[0].size(0) h = V(torch.zeros(bs, n_hidden).cuda()) for c in cs: inp = torch.cat((h, self.e(c)), 1) inp = F.relu(self.l_in(inp)) h = F.tanh(self.l_hidden(inp)) return F.log_softmax(self.l_out(h), dim=-1) m = CharLoopConcatModel(vocab_size, n_fac).cuda() opt = optim.Adam(m.parameters(), 1e-3) it = iter(md.trn_dl) *xs,yt = next(it) t = m(*V(xs)) fit(m, md, 1, opt, F.nll_loss) set_lrs(opt, 1e-4) fit(m, md, 1, opt, F.nll_loss) # ### Test model def get_next(inp): idxs = T(np.array([char_indices[c] for c in inp])) p = m(*VV(idxs)) i = np.argmax(to_np(p)) return chars[i] get_next('for thos') get_next('part of ') get_next('queens a') # ## RNN with pytorch class CharRnn(nn.Module): def __init__(self, vocab_size, n_fac): super().__init__() self.e = nn.Embedding(vocab_size, n_fac) self.rnn = nn.RNN(n_fac, n_hidden) self.l_out = nn.Linear(n_hidden, vocab_size) def forward(self, *cs): bs = cs[0].size(0) h = V(torch.zeros(1, bs, n_hidden)) inp = self.e(torch.stack(cs)) outp,h = self.rnn(inp, h) return F.log_softmax(self.l_out(outp[-1]), dim=-1) m = CharRnn(vocab_size, n_fac).cuda() opt = optim.Adam(m.parameters(), 1e-3) it = iter(md.trn_dl) *xs,yt = next(it) t = m.e(V(torch.stack(xs))) t.size() ht = V(torch.zeros(1, 512,n_hidden)) outp, hn = m.rnn(t, ht) outp.size(), hn.size() t = m(*V(xs)); t.size() fit(m, md, 4, opt, F.nll_loss) set_lrs(opt, 1e-4) fit(m, md, 2, opt, F.nll_loss) # ### Test model def get_next(inp): idxs = T(np.array([char_indices[c] for c in inp])) p = m(*VV(idxs)) i = np.argmax(to_np(p)) return chars[i] get_next('for thos') def get_next_n(inp, n): res = inp for i in range(n): c = get_next(inp) res += c inp = inp[1:]+c return res get_next_n('for thos', 40) # ## Multi-output model # ### Setup # Let's take non-overlapping sets of characters this time c_in_dat = [[idx[i+j] for i in range(cs)] for j in range(0, len(idx)-cs-1, cs)] # Then create the exact same thing, offset by 1, as our labels c_out_dat = [[idx[i+j] for i in range(cs)] for j in range(1, len(idx)-cs, cs)] xs = np.stack(c_in_dat) xs.shape ys = np.stack(c_out_dat) ys.shape xs[:cs,:cs] ys[:cs,:cs] # ### Create and train model val_idx = get_cv_idxs(len(xs)-cs-1) md = ColumnarModelData.from_arrays('.', val_idx, xs, ys, bs=512) class CharSeqRnn(nn.Module): def __init__(self, vocab_size, n_fac): super().__init__() self.e = nn.Embedding(vocab_size, n_fac) self.rnn = nn.RNN(n_fac, n_hidden) self.l_out = nn.Linear(n_hidden, vocab_size) def forward(self, *cs): bs = cs[0].size(0) h = V(torch.zeros(1, bs, n_hidden)) inp = self.e(torch.stack(cs)) outp,h = self.rnn(inp, h) return F.log_softmax(self.l_out(outp), dim=-1) m = CharSeqRnn(vocab_size, n_fac).cuda() opt = optim.Adam(m.parameters(), 1e-3) it = iter(md.trn_dl) *xst,yt = next(it) def nll_loss_seq(inp, targ): sl,bs,nh = inp.size() targ = targ.transpose(0,1).contiguous().view(-1) return F.nll_loss(inp.view(-1,nh), targ) fit(m, md, 4, opt, nll_loss_seq) set_lrs(opt, 1e-4) fit(m, md, 1, opt, nll_loss_seq) # ### Identity init! m = CharSeqRnn(vocab_size, n_fac).cuda() opt = optim.Adam(m.parameters(), 1e-2) m.rnn.weight_hh_l0.data.copy_(torch.eye(n_hidden)) fit(m, md, 4, opt, nll_loss_seq) set_lrs(opt, 1e-3) fit(m, md, 4, opt, nll_loss_seq) # ## Stateful model # ### Setup # + from torchtext import vocab, data from fastai.nlp import * from fastai.lm_rnn import * PATH='data/nietzsche/' TRN_PATH = 'trn/' VAL_PATH = 'val/' TRN = f'{PATH}{TRN_PATH}' VAL = f'{PATH}{VAL_PATH}' # Note: The student needs to practice her shell skills and prepare her own dataset before proceeding: # - trn/trn.txt (first 80% of nietzsche.txt) # - val/val.txt (last 20% of nietzsche.txt) # %ls {PATH} # - # %ls {PATH}trn # + TEXT = data.Field(lower=True, tokenize=list) bs=64; bptt=8; n_fac=42; n_hidden=256 FILES = dict(train=TRN_PATH, validation=VAL_PATH, test=VAL_PATH) md = LanguageModelData.from_text_files(PATH, TEXT, **FILES, bs=bs, bptt=bptt, min_freq=3) len(md.trn_dl), md.nt, len(md.trn_ds), len(md.trn_ds[0].text) # - # ### RNN class CharSeqStatefulRnn(nn.Module): def __init__(self, vocab_size, n_fac, bs): self.vocab_size = vocab_size super().__init__() self.e = nn.Embedding(vocab_size, n_fac) self.rnn = nn.RNN(n_fac, n_hidden) self.l_out = nn.Linear(n_hidden, vocab_size) self.init_hidden(bs) def forward(self, cs): bs = cs[0].size(0) if self.h.size(1) != bs: self.init_hidden(bs) outp,h = self.rnn(self.e(cs), self.h) self.h = repackage_var(h) return F.log_softmax(self.l_out(outp), dim=-1).view(-1, self.vocab_size) def init_hidden(self, bs): self.h = V(torch.zeros(1, bs, n_hidden)) m = CharSeqStatefulRnn(md.nt, n_fac, 512).cuda() opt = optim.Adam(m.parameters(), 1e-3) fit(m, md, 4, opt, F.nll_loss) # + set_lrs(opt, 1e-4) fit(m, md, 4, opt, F.nll_loss) # - # ### RNN loop # + # From the pytorch source def RNNCell(input, hidden, w_ih, w_hh, b_ih, b_hh): return F.tanh(F.linear(input, w_ih, b_ih) + F.linear(hidden, w_hh, b_hh)) # - class CharSeqStatefulRnn2(nn.Module): def __init__(self, vocab_size, n_fac, bs): super().__init__() self.vocab_size = vocab_size self.e = nn.Embedding(vocab_size, n_fac) self.rnn = nn.RNNCell(n_fac, n_hidden) self.l_out = nn.Linear(n_hidden, vocab_size) self.init_hidden(bs) def forward(self, cs): bs = cs[0].size(0) if self.h.size(1) != bs: self.init_hidden(bs) outp = [] o = self.h for c in cs: o = self.rnn(self.e(c), o) outp.append(o) outp = self.l_out(torch.stack(outp)) self.h = repackage_var(o) return F.log_softmax(outp, dim=-1).view(-1, self.vocab_size) def init_hidden(self, bs): self.h = V(torch.zeros(1, bs, n_hidden)) m = CharSeqStatefulRnn2(md.nt, n_fac, 512).cuda() opt = optim.Adam(m.parameters(), 1e-3) fit(m, md, 4, opt, F.nll_loss) # ### GRU class CharSeqStatefulGRU(nn.Module): def __init__(self, vocab_size, n_fac, bs): super().__init__() self.vocab_size = vocab_size self.e = nn.Embedding(vocab_size, n_fac) self.rnn = nn.GRU(n_fac, n_hidden) self.l_out = nn.Linear(n_hidden, vocab_size) self.init_hidden(bs) def forward(self, cs): bs = cs[0].size(0) if self.h.size(1) != bs: self.init_hidden(bs) outp,h = self.rnn(self.e(cs), self.h) self.h = repackage_var(h) return F.log_softmax(self.l_out(outp), dim=-1).view(-1, self.vocab_size) def init_hidden(self, bs): self.h = V(torch.zeros(1, bs, n_hidden)) # + # From the pytorch source code - for reference def GRUCell(input, hidden, w_ih, w_hh, b_ih, b_hh): gi = F.linear(input, w_ih, b_ih) gh = F.linear(hidden, w_hh, b_hh) i_r, i_i, i_n = gi.chunk(3, 1) h_r, h_i, h_n = gh.chunk(3, 1) resetgate = F.sigmoid(i_r + h_r) inputgate = F.sigmoid(i_i + h_i) newgate = F.tanh(i_n + resetgate * h_n) return newgate + inputgate * (hidden - newgate) # + m = CharSeqStatefulGRU(md.nt, n_fac, 512).cuda() opt = optim.Adam(m.parameters(), 1e-3) # - fit(m, md, 6, opt, F.nll_loss) set_lrs(opt, 1e-4) fit(m, md, 3, opt, F.nll_loss) # ### Putting it all together: LSTM # + from fastai import sgdr n_hidden=512 # - class CharSeqStatefulLSTM(nn.Module): def __init__(self, vocab_size, n_fac, bs, nl): super().__init__() self.vocab_size,self.nl = vocab_size,nl self.e = nn.Embedding(vocab_size, n_fac) self.rnn = nn.LSTM(n_fac, n_hidden, nl, dropout=0.5) self.l_out = nn.Linear(n_hidden, vocab_size) self.init_hidden(bs) def forward(self, cs): bs = cs[0].size(0) if self.h[0].size(1) != bs: self.init_hidden(bs) outp,h = self.rnn(self.e(cs), self.h) self.h = repackage_var(h) return F.log_softmax(self.l_out(outp), dim=-1).view(-1, self.vocab_size) def init_hidden(self, bs): self.h = (V(torch.zeros(self.nl, bs, n_hidden)), V(torch.zeros(self.nl, bs, n_hidden))) m = CharSeqStatefulLSTM(md.nt, n_fac, 512, 2).cuda() lo = LayerOptimizer(optim.Adam, m, 1e-2, 1e-5) os.makedirs(f'{PATH}models', exist_ok=True) fit(m, md, 2, lo.opt, F.nll_loss) on_end = lambda sched, cycle: save_model(m, f'{PATH}models/cyc_{cycle}') cb = [CosAnneal(lo, len(md.trn_dl), cycle_mult=2, on_cycle_end=on_end)] fit(m, md, 2**4-1, lo.opt, F.nll_loss, callbacks=cb) on_end = lambda sched, cycle: save_model(m, f'{PATH}models/cyc_{cycle}') cb = [CosAnneal(lo, len(md.trn_dl), cycle_mult=2, on_cycle_end=on_end)] fit(m, md, 2**6-1, lo.opt, F.nll_loss, callbacks=cb) # ### Test def get_next(inp): idxs = TEXT.numericalize(inp) p = m(VV(idxs.transpose(0,1))) r = torch.multinomial(p[-1].exp(), 1) return TEXT.vocab.itos[to_np(r)[0]] get_next('for thos') def get_next_n(inp, n): res = inp for i in range(n): c = get_next(inp) res += c inp = inp[1:]+c return res print(get_next_n('for thos', 400))
dl1/.ipynb_checkpoints/lesson6-rnn-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Author: <NAME> import numpy as np import matplotlib from matplotlib import colors, pyplot as plt # - # ## Figure 1 # ### Generate data # + d = 20 k = 2 n = 100 m = 10 m_test=100 n_test=n sigma2=0.001 T=500 r=1 Ystar = np.random.normal(size=(d,k)) Vstar, R = np.linalg.qr(Ystar) VstarT = Vstar.T Xstar = np.random.normal(size=(n,k)) Ustar, R = np.linalg.qr(Xstar) for i in range(n): Ustar[i,:]= Ustar[i,:]/np.linalg.norm(Ustar[i,:]) Mstar = np.dot(Ustar,Vstar.T) Xstar = np.random.normal(size=(n_test,k)) Ustar_test, R = np.linalg.qr(Xstar) Mstar_test = np.dot(Ustar_test,Vstar.T) I = np.eye(d) A = np.zeros((n,m,d)) A_test = np.zeros((n_test,m_test,d)) Y = np.zeros((n,m)) Y_test = np.zeros((n_test,m_test)) for i in range(n): # train data for j in range(m): A[i,j,:] = np.random.normal(size=(d))/(np.sqrt(m)) Y[i,j] = np.dot(Mstar[i,:], A[i,j,:] ) + np.random.normal(0, sigma2) # test data for i in range(n_test): for j in range(m_test): A_test[i,j,:] = np.random.normal(size=(d))/(np.sqrt(m_test)) Y_test[i,j] = np.dot(Mstar[i,:] , A_test[i,j,:] ) + np.random.normal(0, sigma2) V_fedrep = np.random.normal(size=(d,k)) U_fedrep = np.random.normal(size=(n,k)) u_fedsgd = np.random.normal(size=(k,1)) V_fedsgd = np.random.normal(size=(d,k)) # alpha = 3.5/2 # beta = 0.01/2 eta_fedrep = 0.1#25 eta_fedsgd = 0.005 dist_fedrep = np.zeros(T) dist_fedsgd = np.zeros(T) rn = r*n spreads = np.zeros(T) for t in range(T): clients= np.random.choice(n,max(1,int(rn)),replace=False) V_fedrep_orth, RVorth = np.linalg.qr(V_fedrep) V_fedsgd_orth, RVorth = np.linalg.qr(V_fedsgd) dist_fedrep[t] = np.linalg.norm(np.dot((np.eye(d) - np.dot(V_fedrep_orth, V_fedrep_orth.T)), Vstar), ord=2)**2 dist_fedsgd[t] = np.linalg.norm(np.dot((np.eye(d) - np.dot(V_fedsgd_orth, V_fedsgd_orth.T)), Vstar), ord=2)**2 GV_fr = np.zeros((d,k)) GV = np.zeros((d,k)) Gu = np.zeros((k,1)) for i in clients: Xi = A[i,:,:] wi = np.dot(np.linalg.inv(np.dot(np.dot(Xi,V_fedrep).T, np.dot(Xi,V_fedrep))), np.dot(np.dot(Xi,V_fedrep).T, Y[i,:].T)) U_fedrep[i,:] = wi.T Sigma_i = np.dot(Xi.T, Xi) GV_fr += np.dot(np.dot(Sigma_i, V_fedrep),np.outer(U_fedrep[i,:], U_fedrep[i,:])) - np.outer(np.dot(Xi.T, Y[i,:]), U_fedrep[i,:])# + eta_fedrep*np.outer(np.dot(Xi.T, Y[i,:]), U_fedrep[i,:]) Gu += (np.dot(np.dot(V_fedsgd.T, Sigma_i), np.dot(V_fedsgd,u_fedsgd)) -np.expand_dims(np.dot(V_fedsgd.T, np.dot(Xi.T, Y[i,:])), axis=-1))/len(clients) GV += np.dot(V_fedsgd,np.outer(u_fedsgd, u_fedsgd)) - np.outer(np.dot(Xi.T, Y[i,:]), u_fedsgd) V_fedrep = V_fedrep - eta_fedrep*GV_fr V_fedsgd = V_fedsgd - eta_fedsgd*GV u_fedsgd = u_fedsgd - eta_fedsgd*Gu e_local_fin = 0 e_fedrep_fin = 0 e_fedsgd_fin = 0 for i in range(n): Xi = A[i,:,:] wi = np.dot(np.linalg.inv(np.dot(Xi.T, Xi)+0.00001*np.eye(d)), np.dot(Xi.T, Y[i,:].T)) ui = np.dot(np.linalg.inv(np.dot(np.dot(V_fedsgd.T,Xi.T), np.dot(Xi, V_fedsgd))+0.00001*np.eye(k)), np.dot(np.dot(V_fedsgd.T,Xi.T), Y[i,:].T)) for j in range(m_test): # Note that we mistakenly divided these errors by m_test for the figure in the paper even though # the variance of the data is already 1/m_test. The code here corrects for this, and qualitatively, the results # are the same, because the same incorrect scaling was applied to all methods. e_local_fin += (Y_test[i,j] - np.dot(A_test[i,j,:], wi))**2/(2*n) e_fedrep_fin += (Y_test[i,j] - np.dot(A_test[i,j,:], np.dot(V_fedrep,U_fedrep[i,:].T)))**2/(2*n) e_fedsgd_fin += (Y_test[i,j] - np.dot(A_test[i,j,:], np.dot(V_fedsgd,ui)))**2/(2*n) plt.figure(2) plt.title('Principal Angle Distance') plt.plot(dist_fedrep,label='FedRep') plt.plot(dist_fedsgd,label='FedSGD') plt.yscale('log') plt.legend() # - m5amg = e_fedrep_fin m5fed = e_fedsgd_fin m5loc = e_local_fin m10amg = e_fedrep_fin m10fed = e_fedsgd_fin m10loc = e_local_fin m20amg = e_fedrep_fin m20fed = e_fedsgd_fin m20loc = e_local_fin # ### Plot Figure 1 # + e_fed = [m5fed, m10fed, m20fed] e_amg = [m5amg, m10amg, m20amg] e_loc = [m5loc, m10loc, m20loc] width = 0.22 matplotlib.rcParams['pdf.fonttype'] = 42 matplotlib.rcParams['ps.fonttype'] = 42 ind = np.arange(3) plt.figure(23) plt.bar(ind, e_loc, width, label='Local Only') plt.bar(ind+width, e_fed, width, label='FedSGD') plt.bar(ind+2*width, e_amg, width, label='FedRep') plt.yscale('log') plt.ylabel('Average MSE') plt.xticks(ind + width , ('0.25*d','0.5*d', 'd')) plt.xlabel('Number of training samples/user') plt.legend(loc='best') plt.title('Local MSE for $d=20, k=2, n=100$') plt.savefig('bar_fig1.pdf', format='pdf') # - # ## Figure 4 # + s = [1,2,5,10,20] d=20 e_fed = np.zeros((len(s))) e_amg= np.zeros((len(s))) e= np.zeros((len(s))) n_test=100 matplotlib.rcParams['pdf.fonttype'] = 42 matplotlib.rcParams['ps.fonttype'] = 42 # Fine-tune the head based on the pre-trained representation for i,samples in enumerate(s): X = np.random.normal(size=(samples,d))/np.sqrt(samples) gram = np.dot(X.T, X) for j in range(n_test): wstar = np.random.normal(size=(k)) ustar = np.dot(Vstar, wstar) y_train = np.dot(np.dot(X, Vstar), wstar) + np.random.normal(size=(samples))*sigma2 Xy = np.dot(X.T,y_train) wfed = np.dot(np.linalg.inv(np.dot(np.dot(V_fedsgd.T, gram), V_fedsgd) + 0.00001*np.eye(k)), np.dot(V_fedsgd.T, Xy)) wamg = np.dot(np.linalg.inv(np.dot(np.dot(V_fedrep.T, gram), V_fedrep) + 0.00001*np.eye(k)), np.dot(V_fedrep.T, Xy)) w = np.dot(np.linalg.inv(gram + 0.00001*np.eye(d)), Xy) X_test = np.random.normal(size=(50,d))/np.sqrt(50) y_test = np.dot( X_test, np.dot(Vstar, wstar)) yfed = np.dot( X_test, np.dot(V_fedsgd, wfed)) yamg = np.dot( X_test, np.dot(V_fedrep, wamg)) y = np.dot(X_test, w) e_fed[i] += np.linalg.norm(y_test- yfed)**2/(2*n_test) e_amg[i] += np.linalg.norm(y_test- yamg)**2/(2*n_test) e[i] += np.linalg.norm(y_test- y)**2/(2*n_test) width = 0.25 ind = np.arange(len(s)) plt.figure(23) plt.bar(ind, e, width, label='Local Only') plt.bar(ind+width, e_fed, width, label='FedSGD') plt.bar(ind + 2*width, e_amg, width, label='FedRep') plt.yscale('log') plt.ylabel('Average MSE on New Client $w_{new}$') plt.xticks(ind + width , ('1','2', '5', '10', '20')) plt.xlabel('Number of fine-tuning samples') plt.legend(loc='best') plt.title('New Client MSE for $d=20, k=2, m=10, r=0.1$') plt.savefig('bar_fig4.pdf', format='pdf') # - # ## Figure 3 # + def top_k_SVD(M): X, s, Y = np.linalg.svd(M) Mk = np.dot(np.dot(X[:,:k], np.diag(s[:k])), Y[:,:k].T) return Mk ns = [10,100,1000] ms = [5] # tuned learning rates lrs_amg = np.asarray([[0.4,4,60],[3,3.5,4]]) lrs_add = np.asarray([[0.12,0.75,1],[0.75,0.75,1]]) lrs_aff = np.asarray([[0.01,0.4,0.25],[0.75,0.75,1]]) T = 200 r=0.1 e_amg_means = np.zeros((T, len(ns),len(ms) )) e_add_means = np.zeros((T, len(ns),len(ms) )) e_aff_means = np.zeros((T, len(ns),len(ms) )) e_amg_stddevs = np.zeros((T, len(ns),len(ms) )) e_add_stddevs = np.zeros((T, len(ns),len(ms) )) paWamg_means = np.zeros((T, len(ns),len(ms) )) paWadd_means = np.zeros((T, len(ns),len(ms) )) paWaff_means = np.zeros((T, len(ns),len(ms) )) paWamg_stddevs = np.zeros((T, len(ns),len(ms) )) paWadd_stddevs = np.zeros((T, len(ns),len(ms) )) paBamg_means = np.zeros((T, len(ns),len(ms) )) paBadd_means = np.zeros((T, len(ns),len(ms) )) paBaff_means = np.zeros((T, len(ns),len(ms) )) paBamg_stddevs = np.zeros((T, len(ns),len(ms) )) paBadd_stddevs = np.zeros((T, len(ns),len(ms) )) for ii in range(len(ns)): for jj in range(len(ms)): s= 1 d = 10 k = 2 n = ns[ii] rn = int(r*n) m = ms[jj] m_test=50 sigma2 = 0.001 alpha = lrs_amg[jj,ii] beta = lrs_add[jj,ii] betaff = lrs_aff[jj,ii] trials = 1 e_amg = np.zeros((T, trials)) e_add = np.zeros((T, trials)) e_aff = np.zeros((T, trials)) paB_amg = np.zeros((T, trials)) paW_amg = np.zeros((T, trials)) paB_add = np.zeros((T, trials)) paW_add = np.zeros((T, trials)) paB_aff = np.zeros((T, trials)) paW_aff = np.zeros((T, trials)) for tri in range(trials): Ystar = np.random.normal(size=(d,k)) Vstar, R = np.linalg.qr(Ystar) VstarT = Vstar.T Xstar = np.random.normal(size=(n,k)) Ustar, R = np.linalg.qr(Xstar) Mstar = np.dot(np.dot(Ustar,np.eye(k)),Vstar.T) I = np.eye(d) A = np.zeros((n,m,d)) A_test = np.zeros((n,m_test,d)) Y = np.zeros((n,m)) Y_test = np.zeros((n,m_test)) for i in range(n): # train data for j in range(m): A[i,j,:] = np.random.normal(size=(d))/(np.sqrt(m)) Y[i,j] = np.dot(Mstar[i,:], A[i,j,:] ) + np.random.normal(0,sigma2) # test data for j in range(m_test): A_test[i,j,:] = np.random.normal(size=(d))/(np.sqrt(m)) Y_test[i,j] = np.dot(Mstar[i,:] , A_test[i,j,:] ) + np.random.normal(0,sigma2) V = np.random.normal(size=(d,k)) U = np.random.normal(size=(n,k)) Vdd = np.random.normal(size=(d,k)) Udd = np.random.normal(size=(n,k)) errors = np.zeros(T) errors_dd = np.zeros(TT) errs = np.zeros(TT) errs2 = np.zeros(TT) errs3 = np.zeros(TT) estar=0 errstar = np.ones(TT) sine = np.zeros(TT) sine2 = np.zeros(TT) sine3 = np.zeros(TT) sine4 = np.zeros(TT) sine5 = np.zeros(TT) sine6 = np.zeros(TT) sine7 = np.zeros(TT) sine8 = np.zeros(TT) sine9 = np.zeros(TT) Tinit = 10 M = np.random.normal(size=(n,d)) Mk = top_k_SVD(M) eta_init = 0.1 for tau in range(Tinit): add = np.zeros((n,d)) for i in range(n): for j in range(m): aij = np.zeros((n,d)) aij[i,:] = A[i,j,:] add += (np.trace(np.dot(aij.T, Mk)) - Y[i,j])*aij M = Mk - eta_init*add Mk = top_k_SVD(M) U, s, V = np.linalg.svd(Mk) U = U[:,:k] V = V[:,:k] Vdd = V Udd = U Vff = V Uff = U Vfedsgd = V ufedsgd = U[0,:] Vfedavg = V ufedavg = U[0,:] w = np.random.normal(size=(d)) for t in range(T): clients = np.random.choice(n, rn, replace=False) # compute principal angle distances and errors if t>=0: Vorth,RVorth = np.linalg.qr(V) Vddorth,RVddorth = np.linalg.qr(Vdd) Vfforth,RVddorth = np.linalg.qr(Vff) Uorth,RVorth = np.linalg.qr(U) Uddorth,RVddorth = np.linalg.qr(Udd) Ufforth,RVfforth = np.linalg.qr(Uff) sine[t] = (np.linalg.norm(Mstar - np.dot(U,V.T)))**2/(2*n) sine3[t] = (np.linalg.norm(Mstar - np.dot(Udd,Vdd.T)))**2/(2*n) sine9[t] = (np.linalg.norm(Mstar - np.dot(Uff,Vff.T)))**2/(2*n) sine2[t] = np.linalg.norm(np.dot(Uorth, Uorth.T) - np.dot(Ustar,Ustar.T ))**2 sine4[t] = np.linalg.norm(np.dot(Uddorth, Uddorth.T) - np.dot(Ustar,Ustar.T ))**2 sine7[t] = np.linalg.norm(np.dot(Ufforth, Ufforth.T) - np.dot(Ustar,Ustar.T ))**2 sine5[t] = np.linalg.norm(np.dot((np.eye(d) - np.dot(Vorth, Vorth.T)), Vstar), ord=2)**2 sine6[t] = np.linalg.norm(np.dot((np.eye(d) - np.dot(Vddorth, Vddorth.T)), Vstar), ord=2)**2 sine8[t] = np.linalg.norm(np.dot((np.eye(d) - np.dot(Vfforth, Vfforth.T)), Vstar), ord=2)**2 # take minimization step wrt U for i in clients: Xi = A[i,:,:] wi = np.dot(np.linalg.inv(np.dot(np.dot(Xi,V).T, np.dot(Xi,V))), np.dot(np.dot(Xi,V).T, Y[i,:].T)) U[i,:] = wi.T # grad step wrt U gdd = np.zeros(U.shape) add = np.zeros((n,d)) gff = np.zeros(U.shape) aff = np.zeros((n,d)) for i in clients: # train data for j in range(m): uvdd = np.dot(Udd, Vdd.T) uvff = np.dot(Uff, Vff.T) aij = np.zeros((n,d)) aij[i,:] = A[i,j,:] add += (np.trace(np.dot(aij.T, uvdd)) - Y[i,j])*aij aff += (np.trace(np.dot(aij.T, uvff)) - Y[i,j])*aij gdd = np.dot(add,Vdd) Udd = Udd - beta*gdd gff = np.dot(aff,Vff) Uff = Uff - betaff*gff for iii in range(9): gff = np.zeros(U.shape) aff = np.zeros((n,d)) for i in clients: # train data for j in range(m): uvff = np.dot(Uff, Vff.T) aij = np.zeros((n,d)) aij[i,:] = A[i,j,:] aff += (np.trace(np.dot(aij.T, uvff)) - Y[i,j])*aij gff = np.dot(aff,Vff) Uff = Uff - betaff*gff # take grad step wrt V a = np.zeros((d,n)) add = np.zeros((d,n)) aff = np.zeros((d,n)) gw = np.zeros(d) gV = np.zeros((d,k)) gu = np.zeros(k) for i in clients: for j in range(m): uv = np.dot(U, V.T) uvdd = np.dot(Udd, Vdd.T) uvff = np.dot(Uff, Vff.T) aij = np.zeros((d,n)) aij[:,i] = A[i,j,:] a += (np.trace(np.dot(aij, uv)) - Y[i,j] )*aij add += (np.trace(np.dot(aij, uvdd)) - Y[i,j])*aij aff += (np.trace(np.dot(aij, uvff)) - Y[i,j])*aij g = np.dot(a,U) gdd = np.dot(add,Udd) gff = np.dot(aff,Uff) V = V - alpha*g Vdd = Vdd - beta*gdd Vff = Vff - beta*gff e_amg[:,tri] = sine e_add[:,tri] = sine3 e_aff[:,tri] = sine9 paB_amg[:,tri] = sine5 paW_amg[:,tri] = sine2 paB_add[:,tri] = sine6 paW_add[:,tri] = sine4 paB_aff[:,tri] = sine7 paW_aff[:,tri] = sine8 e_amg_means[:,ii,jj] = np.mean(e_amg,axis=1) e_add_means[:,ii,jj] = np.mean(e_add,axis=1) e_aff_means[:,ii,jj] = np.mean(e_aff,axis=1) e_amg_stddevs[:,ii,jj] = np.std(e_amg,axis=1) e_add_stddevs[:,ii,jj] = np.std(e_add,axis=1) paWamg_means[:,ii,jj] = np.mean(e_amg,axis=1) paWadd_means[:,ii,jj] = np.mean(e_add,axis=1) paWaff_means[:,ii,jj] = np.mean(e_aff,axis=1) paWamg_stddevs[:,ii,jj] = np.std(e_amg,axis=1) paWadd_stddevs[:,ii,jj] = np.std(e_add,axis=1) paBamg_means[:,ii,jj] = np.mean(e_amg,axis=1) paBadd_means[:,ii,jj] = np.mean(e_add,axis=1) paBaff_means[:,ii,jj] = np.mean(e_aff,axis=1) paBamg_stddevs[:,ii,jj] = np.std(e_amg,axis=1) paBadd_stddevs[:,ii,jj] = np.std(e_add,axis=1) # + matplotlib.rcParams['pdf.fonttype'] = 42 matplotlib.rcParams['ps.fonttype'] = 42 T=200 iters = np.linspace(1,T,T) plt.figure(12,figsize=[15,5]) for ii in range(len(ns)): for jj in range(len(ms)): plt.subplot(2,5,1+ii+5*jj) plt.yscale("log") plt.plot(iters,paBadd_means[:,ii,jj], '-.', c='coral', label='GD-GD') plt.plot(iters,paBaff_means[:,ii,jj], '--', c='darkred', label='10GD-GD') plt.plot(iters,paBamg_means[:,ii,jj], c='g', label='FedRep') plt.fill_between(iters, (paBamg_means[:,ii,jj]-paBamg_stddevs[:,ii,jj]), (paBamg_means[:,ii,jj]+paBamg_stddevs[:,ii,jj]), color='darkblue', alpha=.1) plt.fill_between(iters, (paBadd_means[:,ii,jj]-paBadd_stddevs[:,ii,jj]), (paBadd_means[:,ii,jj]+paBadd_stddevs[:,ii,jj]), color='firebrick', alpha=.1) if ii == 0 and jj == 0: plt.legend() if ii == 0: xx=0 plt.ylabel("$F(W_t, B_t)$") if jj == 0: plt.xlabel("$t$") plt.title('$n={}$'.format(ns[ii])) plt.subplots_adjust(hspace = 0.3) plt.subplots_adjust(wspace = 0.3) plt.savefig('synth3.pdf', format='pdf')
FedRep_linear_sims.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Excercise: # # ### Find all the people that did a hike with "tom" # # This is similar to the graph traversal query we saw before. # # Since we know the schema, we know that "tom" is linked to a "Hike" entity, # and that all the people that was part of that "Hike" is also linked to the Hike. # # Hence, to find all the people that participated in a hike with "tom": # # # 1) We find the entity corresponding to "tom" # 2) We find all the hikes linked to tom # 3) We find all the people linked to those hikes. # # The first step is to import vdms client module, and connect to the VDMS instance running. # # + import vdms db = vdms.vdms() db.connect("localhost") # - # Let's start by finding the entity corresponding to tom # + query = """ [ { "FindEntity" : { ## COMPLETE THIS COMMAND TO FIND TOM } } ] """ response, images = db.query(query) print (db.get_last_response_str()) # - # Now that we have the entity corresponding to "tom", let's find the hikes where he participated. # # **Hint**: We can use the _ref and the link block. # + query = """ [ { "FindEntity" : { ## COMPLETE THIS COMMAND TO FIND TOM } }, { "FindEntity" : { ## COMPLETE THIS COMMAND TO FIND HIKES } } ] """ response, images = db.query(query) print (db.get_last_response_str()) # - # Now that we have the hikes, we can check all the people that participated on those hikes. # + query = """ [ { "FindEntity" : { ## COMPLETE THIS COMMAND TO FIND TOM } }, { "FindEntity" : { ## COMPLETE THIS COMMAND TO FIND HIKES } }, { "FindEntity" : { ## COMPLETE THIS COMMAND TO FIND PEOPLE LINKED TO THOSE HIKES } } ] """ response, images = db.query(query) print (db.get_last_response_str()) # - # The response you get should look something like: # # [ # { # "FindEntity": { # "returned": 1, # "entities": [ # { # "name": "tom" # } # ], # "status": 0 # } # }, # { # "FindEntity": { # "returned": 1, # "entities": [ # { # "place": "Mt Rainier" # } # ], # "status": 0 # } # }, # { # "FindEntity": { # "returned": 5, # "entities": [ # { # "lastname": "Simer", # "name": "luis" # }, # { # "lastname": "Ferdinand", # "name": "sofi" # }, # { # "lastname": "Teixeira", # "name": "Thiago" # }, # { # "lastname": "Ille", # "name": "sol" # } # ], # "status": 0 # } # } # ] # # ### Extra Credit: # # Find all the profile pictures of the people on those hikes. # + query = """ [ { "FindEntity" : { ## COMPLETE THIS COMMAND TO FIND TOM } }, { "FindEntity" : { ## COMPLETE THIS COMMAND TO FIND HIKES } }, { "FindEntity" : { ## COMPLETE THIS COMMAND TO FIND PEOPLE LINKED TO THOSE HIKES } }, { "FindImage" : { ## COMPLETE THIS COMMAND TO FIND PROFILE IMAGES LINKED TO THOSE PEOPLE } } ] """ response, images = db.query(query) print (db.get_last_response_str()) # -
docker/demo/examples/4 - Excercise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # ## Text generation using tensor2tensor on Cloud ML Engine # # This notebook illustrates using the <a href="https://github.com/tensorflow/tensor2tensor">tensor2tensor</a> library to do from-scratch, distributed training of a poetry model. Then, the trained model is used to complete new poems. # # <br/> # # ### Install tensor2tensor, and specify Google Cloud Platform project and bucket # Install the necessary packages. tensor2tensor will give us the Transformer model. Project Gutenberg gives us access to historical poems. # # # <b>p.s.</b> Note that this notebook uses Python2 because Project Gutenberg relies on BSD-DB which was deprecated in Python 3 and removed from the standard library. # tensor2tensor itself can be used on Python 3. It's just Project Gutenberg that has this issue. # + language="bash" # pip freeze | grep tensor # + language="bash" # pip install tensor2tensor==1.13.1 tensorflow==1.13.1 tensorflow-serving-api==1.13 gutenberg # pip install tensorflow_hub # # # install from sou # #git clone https://github.com/tensorflow/tensor2tensor.git # #cd tensor2tensor # #yes | pip install --user -e . # - # If the following cell does not reflect the version of tensorflow and tensor2tensor that you just installed, click **"Reset Session"** on the notebook so that the Python environment picks up the new packages. # + language="bash" # pip freeze | grep tensor # + import os PROJECT = 'qwiklabs-gcp-273e0fc7a73ebe0d' # REPLACE WITH YOUR PROJECT ID BUCKET = 'qwiklabs-gcp-273e0fc7a73ebe0d' # REPLACE WITH YOUR BUCKET NAME REGION = 'us-central1' # REPLACE WITH YOUR BUCKET REGION e.g. us-central1 # this is what this notebook is demonstrating PROBLEM= 'poetry_line_problem' # for bash os.environ['PROJECT'] = PROJECT os.environ['BUCKET'] = BUCKET os.environ['REGION'] = REGION os.environ['PROBLEM'] = PROBLEM #os.environ['PATH'] = os.environ['PATH'] + ':' + os.getcwd() + '/tensor2tensor/tensor2tensor/bin/' # + language="bash" # gcloud config set project $PROJECT # gcloud config set compute/region $REGION # - # ### Download data # # We will get some <a href="https://www.gutenberg.org/wiki/Poetry_(Bookshelf)">poetry anthologies</a> from Project Gutenberg. # + language="bash" # rm -rf data/poetry # mkdir -p data/poetry # + from gutenberg.acquire import load_etext from gutenberg.cleanup import strip_headers import re books = [ # bookid, skip N lines (26715, 1000, 'Victorian songs'), (30235, 580, 'Baldwin collection'), (35402, 710, 'Swinburne collection'), (574, 15, 'Blake'), (1304, 172, 'Bulchevys collection'), (19221, 223, 'Palgrave-Pearse collection'), (15553, 522, 'Knowles collection') ] with open('data/poetry/raw.txt', 'w') as ofp: lineno = 0 for (id_nr, toskip, title) in books: startline = lineno text = strip_headers(load_etext(id_nr)).strip() lines = text.split('\n')[toskip:] # any line that is all upper case is a title or author name # also don't want any lines with years (numbers) for line in lines: if (len(line) > 0 and line.upper() != line and not re.match('.*[0-9]+.*', line) and len(line) < 50 ): cleaned = re.sub('[^a-z\'\-]+', ' ', line.strip().lower()) ofp.write(cleaned) ofp.write('\n') lineno = lineno + 1 else: ofp.write('\n') print('Wrote lines {} to {} from {}'.format(startline, lineno, title)) # - # !wc -l data/poetry/*.txt # ## Create training dataset # # We are going to train a machine learning model to write poetry given a starting point. We'll give it one line, and it is going to tell us the next line. So, naturally, we will train it on real poetry. Our feature will be a line of a poem and the label will be next line of that poem. # <p> # Our training dataset will consist of two files. The first file will consist of the input lines of poetry and the other file will consist of the corresponding output lines, one output line per input line. with open('data/poetry/raw.txt', 'r') as rawfp,\ open('data/poetry/input.txt', 'w') as infp,\ open('data/poetry/output.txt', 'w') as outfp: prev_line = '' for curr_line in rawfp: curr_line = curr_line.strip() # poems break at empty lines, so this ensures we train only # on lines of the same poem if len(prev_line) > 0 and len(curr_line) > 0: infp.write(prev_line + '\n') outfp.write(curr_line + '\n') prev_line = curr_line # !head -5 data/poetry/*.txt # We do not need to generate the data beforehand -- instead, we can have Tensor2Tensor create the training dataset for us. So, in the code below, I will use only data/poetry/raw.txt -- obviously, this allows us to productionize our model better. Simply keep collecting raw data and generate the training/test data at the time of training. # ### Set up problem # The Problem in tensor2tensor is where you specify parameters like the size of your vocabulary and where to get the training data from. # + language="bash" # rm -rf poetry # mkdir -p poetry/trainer # + # %%writefile poetry/trainer/problem.py import os import tensorflow as tf from tensor2tensor.utils import registry from tensor2tensor.models import transformer from tensor2tensor.data_generators import problem from tensor2tensor.data_generators import text_encoder from tensor2tensor.data_generators import text_problems from tensor2tensor.data_generators import generator_utils tf.summary.FileWriterCache.clear() # ensure filewriter cache is clear for TensorBoard events file @registry.register_problem class PoetryLineProblem(text_problems.Text2TextProblem): """Predict next line of poetry from the last line. From Gutenberg texts.""" @property def approx_vocab_size(self): return 2**13 # ~8k @property def is_generate_per_split(self): # generate_data will NOT shard the data into TRAIN and EVAL for us. return False @property def dataset_splits(self): """Splits of data to produce and number of output shards for each.""" # 10% evaluation data return [{ "split": problem.DatasetSplit.TRAIN, "shards": 90, }, { "split": problem.DatasetSplit.EVAL, "shards": 10, }] def generate_samples(self, data_dir, tmp_dir, dataset_split): with open('data/poetry/raw.txt', 'r') as rawfp: prev_line = '' for curr_line in rawfp: curr_line = curr_line.strip() # poems break at empty lines, so this ensures we train only # on lines of the same poem if len(prev_line) > 0 and len(curr_line) > 0: yield { "inputs": prev_line, "targets": curr_line } prev_line = curr_line # Smaller than the typical translate model, and with more regularization @registry.register_hparams def transformer_poetry(): hparams = transformer.transformer_base() hparams.num_hidden_layers = 2 hparams.hidden_size = 128 hparams.filter_size = 512 hparams.num_heads = 4 hparams.attention_dropout = 0.6 hparams.layer_prepostprocess_dropout = 0.6 hparams.learning_rate = 0.05 return hparams @registry.register_hparams def transformer_poetry_tpu(): hparams = transformer_poetry() transformer.update_hparams_for_tpu(hparams) return hparams # hyperparameter tuning ranges @registry.register_ranged_hparams def transformer_poetry_range(rhp): rhp.set_float("learning_rate", 0.05, 0.25, scale=rhp.LOG_SCALE) rhp.set_int("num_hidden_layers", 2, 4) rhp.set_discrete("hidden_size", [128, 256, 512]) rhp.set_float("attention_dropout", 0.4, 0.7) # - # %%writefile poetry/trainer/__init__.py from . import problem # + # %%writefile poetry/setup.py from setuptools import find_packages from setuptools import setup REQUIRED_PACKAGES = [ 'tensor2tensor' ] setup( name='poetry', version='0.1', author = 'Google', author_email = '<EMAIL>', install_requires=REQUIRED_PACKAGES, packages=find_packages(), include_package_data=True, description='Poetry Line Problem', requires=[] ) # - # !touch poetry/__init__.py # !find poetry # ## Generate training data # # Our problem (translation) requires the creation of text sequences from the training dataset. This is done using t2t-datagen and the Problem defined in the previous section. # # (Ignore any runtime warnings about np.float64. they are harmless). # + language="bash" # DATA_DIR=./t2t_data # TMP_DIR=$DATA_DIR/tmp # rm -rf $DATA_DIR $TMP_DIR # mkdir -p $DATA_DIR $TMP_DIR # # Generate data # t2t-datagen \ # --t2t_usr_dir=./poetry/trainer \ # --problem=$PROBLEM \ # --data_dir=$DATA_DIR \ # --tmp_dir=$TMP_DIR # - # Let's check to see the files that were output. If you see a broken pipe error, please ignore. # !ls t2t_data | head # ## Provide Cloud ML Engine access to data # # Copy the data to Google Cloud Storage, and then provide access to the data. `gsutil` throws an error when removing an empty bucket, so you may see an error the first time this code is run. # + language="bash" # DATA_DIR=./t2t_data # gsutil -m rm -r gs://${BUCKET}/poetry/ # gsutil -m cp ${DATA_DIR}/${PROBLEM}* ${DATA_DIR}/vocab* gs://${BUCKET}/poetry/data # + language="bash" # PROJECT_ID=$PROJECT # AUTH_TOKEN=$(gcloud auth print-access-token) # SVC_ACCOUNT=$(curl -X GET -H "Content-Type: application/json" \ # -H "Authorization: Bearer $AUTH_TOKEN" \ # https://ml.googleapis.com/v1/projects/${PROJECT_ID}:getConfig \ # | python -c "import json; import sys; response = json.load(sys.stdin); \ # print(response['serviceAccount'])") # # echo "Authorizing the Cloud ML Service account $SVC_ACCOUNT to access files in $BUCKET" # gsutil -m defacl ch -u $SVC_ACCOUNT:R gs://$BUCKET # gsutil -m acl ch -u $SVC_ACCOUNT:R -r gs://$BUCKET # error message (if bucket is empty) can be ignored # gsutil -m acl ch -u $SVC_ACCOUNT:W gs://$BUCKET # - # ## Train model locally on subset of data # # Let's run it locally on a subset of the data to make sure it works. # + language="bash" # BASE=gs://${BUCKET}/poetry/data # OUTDIR=gs://${BUCKET}/poetry/subset # gsutil -m rm -r $OUTDIR # gsutil -m cp \ # ${BASE}/${PROBLEM}-train-0008* \ # ${BASE}/${PROBLEM}-dev-00000* \ # ${BASE}/vocab* \ # $OUTDIR # - # Note: the following will work only if you are running Jupyter on a reasonably powerful machine. Don't be alarmed if your process is killed. # + language="bash" # DATA_DIR=gs://${BUCKET}/poetry/subset # OUTDIR=./trained_model # rm -rf $OUTDIR # t2t-trainer \ # --data_dir=gs://${BUCKET}/poetry/subset \ # --t2t_usr_dir=./poetry/trainer \ # --problem=$PROBLEM \ # --model=transformer \ # --hparams_set=transformer_poetry \ # --output_dir=$OUTDIR --job-dir=$OUTDIR --train_steps=10 # - # ## Option 1: Train model locally on full dataset (use if running on Notebook Instance with a GPU) # # You can train on the full dataset if you are on a Google Cloud Notebook Instance with a P100 or better GPU # + language="bash" # LOCALGPU="--train_steps=7500 --worker_gpu=1 --hparams_set=transformer_poetry" # # DATA_DIR=gs://${BUCKET}/poetry/data # OUTDIR=gs://${BUCKET}/poetry/model # rm -rf $OUTDIR # t2t-trainer \ # --data_dir=gs://${BUCKET}/poetry/subset \ # --t2t_usr_dir=./poetry/trainer \ # --problem=$PROBLEM \ # --model=transformer \ # --hparams_set=transformer_poetry \ # --output_dir=$OUTDIR ${LOCALGPU} # - # ## Option 2: Train on Cloud ML Engine # # tensor2tensor has a convenient --cloud_mlengine option to kick off the training on the managed service. # It uses the [Python API](https://cloud.google.com/ml-engine/docs/training-jobs) mentioned in the Cloud ML Engine docs, rather than requiring you to use gcloud to submit the job. # <p> # Note: your project needs P100 quota in the region. # <p> # The echo is because t2t-trainer asks you to confirm before submitting the job to the cloud. Ignore any error about "broken pipe". # If you see a message similar to this: # <pre> # [... cloud_mlengine.py:392] Launched transformer_poetry_line_problem_t2t_20190323_000631. See console to track: https://console.cloud.google.com/mlengine/jobs/. # </pre> # then, this step has been successful. # + language="bash" # GPU="--train_steps=7500 --cloud_mlengine --worker_gpu=1 --hparams_set=transformer_poetry" # # DATADIR=gs://${BUCKET}/poetry/data # OUTDIR=gs://${BUCKET}/poetry/model # JOBNAME=poetry_$(date -u +%y%m%d_%H%M%S) # echo $OUTDIR $REGION $JOBNAME # gsutil -m rm -rf $OUTDIR # echo "'Y'" | t2t-trainer \ # --data_dir=gs://${BUCKET}/poetry/subset \ # --t2t_usr_dir=./poetry/trainer \ # --problem=$PROBLEM \ # --model=transformer \ # --output_dir=$OUTDIR \ # ${GPU} # + language="bash" # ## CHANGE the job name (based on output above: You will see a line such as Launched transformer_poetry_line_problem_t2t_20190322_233159) # gcloud ml-engine jobs describe transformer_poetry_line_problem_t2t_20190323_003001 # - # The job took about <b>25 minutes</b> for me and ended with these evaluation metrics: # <pre> # Saving dict for global step 8000: global_step = 8000, loss = 6.03338, metrics-poetry_line_problem/accuracy = 0.138544, metrics-poetry_line_problem/accuracy_per_sequence = 0.0, metrics-poetry_line_problem/accuracy_top5 = 0.232037, metrics-poetry_line_problem/approx_bleu_score = 0.00492648, metrics-poetry_line_problem/neg_log_perplexity = -6.68994, metrics-poetry_line_problem/rouge_2_fscore = 0.00256089, metrics-poetry_line_problem/rouge_L_fscore = 0.128194 # </pre> # Notice that accuracy_per_sequence is 0 -- Considering that we are asking the NN to be rather creative, that doesn't surprise me. Why am I looking at accuracy_per_sequence and not the other metrics? This is because it is more appropriate for problem we are solving; metrics like Bleu score are better for translation. # ## Option 3: Train on a directly-connected TPU # # If you are running on a VM connected directly to a Cloud TPU, you can run t2t-trainer directly. Unfortunately, you won't see any output from Jupyter while the program is running. # # Compare this command line to the one using GPU in the previous section. # + language="bash" # # use one of these # TPU="--train_steps=7500 --use_tpu=True --cloud_tpu_name=laktpu --hparams_set=transformer_poetry_tpu" # # DATADIR=gs://${BUCKET}/poetry/data # OUTDIR=gs://${BUCKET}/poetry/model_tpu # JOBNAME=poetry_$(date -u +%y%m%d_%H%M%S) # echo $OUTDIR $REGION $JOBNAME # gsutil -m rm -rf $OUTDIR # echo "'Y'" | t2t-trainer \ # --data_dir=gs://${BUCKET}/poetry/subset \ # --t2t_usr_dir=./poetry/trainer \ # --problem=$PROBLEM \ # --model=transformer \ # --output_dir=$OUTDIR \ # ${TPU} # + language="bash" # gsutil ls gs://${BUCKET}/poetry/model_tpu # - # The job took about <b>10 minutes</b> for me and ended with these evaluation metrics: # <pre> # Saving dict for global step 8000: global_step = 8000, loss = 6.03338, metrics-poetry_line_problem/accuracy = 0.138544, metrics-poetry_line_problem/accuracy_per_sequence = 0.0, metrics-poetry_line_problem/accuracy_top5 = 0.232037, metrics-poetry_line_problem/approx_bleu_score = 0.00492648, metrics-poetry_line_problem/neg_log_perplexity = -6.68994, metrics-poetry_line_problem/rouge_2_fscore = 0.00256089, metrics-poetry_line_problem/rouge_L_fscore = 0.128194 # </pre> # Notice that accuracy_per_sequence is 0 -- Considering that we are asking the NN to be rather creative, that doesn't surprise me. Why am I looking at accuracy_per_sequence and not the other metrics? This is because it is more appropriate for problem we are solving; metrics like Bleu score are better for translation. # ## Option 4: Training longer # # Let's train on 4 GPUs for 75,000 steps. Note the change in the last line of the job. # + language="bash" # # XXX This takes 3 hours on 4 GPUs. Remove this line if you are sure you want to do this. # # DATADIR=gs://${BUCKET}/poetry/data # OUTDIR=gs://${BUCKET}/poetry/model_full2 # JOBNAME=poetry_$(date -u +%y%m%d_%H%M%S) # echo $OUTDIR $REGION $JOBNAME # gsutil -m rm -rf $OUTDIR # echo "'Y'" | t2t-trainer \ # --data_dir=gs://${BUCKET}/poetry/subset \ # --t2t_usr_dir=./poetry/trainer \ # --problem=$PROBLEM \ # --model=transformer \ # --hparams_set=transformer_poetry \ # --output_dir=$OUTDIR \ # --train_steps=75000 --cloud_mlengine --worker_gpu=4 # - # This job took <b>12 hours</b> for me and ended with these metrics: # <pre> # global_step = 76000, loss = 4.99763, metrics-poetry_line_problem/accuracy = 0.219792, metrics-poetry_line_problem/accuracy_per_sequence = 0.0192308, metrics-poetry_line_problem/accuracy_top5 = 0.37618, metrics-poetry_line_problem/approx_bleu_score = 0.017955, metrics-poetry_line_problem/neg_log_perplexity = -5.38725, metrics-poetry_line_problem/rouge_2_fscore = 0.0325563, metrics-poetry_line_problem/rouge_L_fscore = 0.210618 # </pre> # At least the accuracy per sequence is no longer zero. It is now 0.0192308 ... note that we are using a relatively small dataset (12K lines) and this is *tiny* in the world of natural language problems. # <p> # In order that you have your expectations set correctly: a high-performing translation model needs 400-million lines of input and takes 1 whole day on a TPU pod! # ## Check trained model # + language="bash" # gsutil ls gs://${BUCKET}/poetry/model #_modeltpu # - # ## Batch-predict # # How will our poetry model do when faced with Rumi's spiritual couplets? # %%writefile data/poetry/rumi.txt Where did the handsome beloved go? I wonder, where did that tall, shapely cypress tree go? He spread his light among us like a candle. Where did he go? So strange, where did he go without me? All day long my heart trembles like a leaf. All alone at midnight, where did that beloved go? Go to the road, and ask any passing traveler —  That soul-stirring companion, where did he go? Go to the garden, and ask the gardener —  That tall, shapely rose stem, where did he go? Go to the rooftop, and ask the watchman —  That unique sultan, where did he go? Like a madman, I search in the meadows! That deer in the meadows, where did he go? My tearful eyes overflow like a river —  That pearl in the vast sea, where did he go? All night long, I implore both moon and Venus —  That lovely face, like a moon, where did he go? If he is mine, why is he with others? Since he’s not here, to what “there” did he go? If his heart and soul are joined with God, And he left this realm of earth and water, where did he go? Tell me clearly, <NAME>, Of whom it is said, “The sun never dies” — where did he go? # Let's write out the odd-numbered lines. We'll compare how close our model can get to the beauty of Rumi's second lines given his first. # + language="bash" # awk 'NR % 2 == 1' data/poetry/rumi.txt | tr '[:upper:]' '[:lower:]' | sed "s/[^a-z\'-\ ]//g" > data/poetry/rumi_leads.txt # head -3 data/poetry/rumi_leads.txt # + language="bash" # # same as the above training job ... # TOPDIR=gs://${BUCKET} # OUTDIR=${TOPDIR}/poetry/model #_tpu # or ${TOPDIR}/poetry/model_full # DATADIR=${TOPDIR}/poetry/data # MODEL=transformer # HPARAMS=transformer_poetry #_tpu # # # the file with the input lines # DECODE_FILE=data/poetry/rumi_leads.txt # # BEAM_SIZE=4 # ALPHA=0.6 # # t2t-decoder \ # --data_dir=$DATADIR \ # --problem=$PROBLEM \ # --model=$MODEL \ # --hparams_set=$HPARAMS \ # --output_dir=$OUTDIR \ # --t2t_usr_dir=./poetry/trainer \ # --decode_hparams="beam_size=$BEAM_SIZE,alpha=$ALPHA" \ # --decode_from_file=$DECODE_FILE # - # <b> Note </b> if you get an error about "AttributeError: 'HParams' object has no attribute 'problems'" please <b>Reset Session</b>, run the cell that defines the PROBLEM and run the above cell again. # + magic_args=" " language="bash" # DECODE_FILE=data/poetry/rumi_leads.txt # cat ${DECODE_FILE}.*.decodes # - # Some of these are still phrases and not complete sentences. This indicates that we might need to train longer or better somehow. We need to diagnose the model ... # <p> # # ### Diagnosing training run # # <p> # Let's diagnose the training run to see what we'd improve the next time around. # (Note that this package may not be present on Jupyter -- `pip install pydatalab` if necessary) from google.datalab.ml import TensorBoard TensorBoard().start('gs://{}/poetry/model_full2'.format(BUCKET)) for pid in TensorBoard.list()['pid']: TensorBoard().stop(pid) print('Stopped TensorBoard with pid {}'.format(pid)) # <table> # <tr> # <td><img src="diagrams/poetry_loss.png"/></td> # <td><img src="diagrams/poetry_acc.png"/></td> # </table> # Looking at the loss curve, it is clear that we are overfitting (note that the orange training curve is well below the blue eval curve). Both loss curves and the accuracy-per-sequence curve, which is our key evaluation measure, plateaus after 40k. (The red curve is a faster way of computing the evaluation metric, and can be ignored). So, how do we improve the model? Well, we need to reduce overfitting and make sure the eval metrics keep going down as long as the loss is also going down. # <p> # What we really need to do is to get more data, but if that's not an option, we could try to reduce the NN and increase the dropout regularization. We could also do hyperparameter tuning on the dropout and network sizes. # ## Hyperparameter tuning # # tensor2tensor also supports hyperparameter tuning on Cloud ML Engine. Note the addition of the autotune flags. # <p> # The `transformer_poetry_range` was registered in problem.py above. # + language="bash" # # XXX This takes about 15 hours and consumes about 420 ML units. Uncomment if you wish to proceed anyway # # DATADIR=gs://${BUCKET}/poetry/data # OUTDIR=gs://${BUCKET}/poetry/model_hparam # JOBNAME=poetry_$(date -u +%y%m%d_%H%M%S) # echo $OUTDIR $REGION $JOBNAME # gsutil -m rm -rf $OUTDIR # echo "'Y'" | t2t-trainer \ # --data_dir=gs://${BUCKET}/poetry/subset \ # --t2t_usr_dir=./poetry/trainer \ # --problem=$PROBLEM \ # --model=transformer \ # --hparams_set=transformer_poetry \ # --output_dir=$OUTDIR \ # --hparams_range=transformer_poetry_range \ # --autotune_objective='metrics-poetry_line_problem/accuracy_per_sequence' \ # --autotune_maximize \ # --autotune_max_trials=4 \ # --autotune_parallel_trials=4 \ # --train_steps=7500 --cloud_mlengine --worker_gpu=4 # - # When I ran the above job, it took about 15 hours and finished with these as the best parameters: # <pre> # { # "trialId": "37", # "hyperparameters": { # "hp_num_hidden_layers": "4", # "hp_learning_rate": "0.026711152525921437", # "hp_hidden_size": "512", # "hp_attention_dropout": "0.60589466163419292" # }, # "finalMetric": { # "trainingStep": "8000", # "objectiveValue": 0.0276162791997 # } # </pre> # In other words, the accuracy per sequence achieved was 0.027 (as compared to 0.019 before hyperparameter tuning, so a <b>40% improvement!</b>) using 4 hidden layers, a learning rate of 0.0267, a hidden size of 512 and droput probability of 0.606. This is inspite of training for only 7500 steps instead of 75,000 steps ... we could train for 75k steps with these parameters, but I'll leave that as an exercise for you. # <p> # Instead, let's try predicting with this optimized model. Note the addition of the hp* flags in order to override the values hardcoded in the source code. (there is no need to specify learning rate and dropout because they are not used during inference). I am using 37 because I got the best result at trialId=37 # + language="bash" # # same as the above training job ... # BEST_TRIAL=28 # CHANGE as needed. # TOPDIR=gs://${BUCKET} # OUTDIR=${TOPDIR}/poetry/model_hparam/$BEST_TRIAL # DATADIR=${TOPDIR}/poetry/data # MODEL=transformer # HPARAMS=transformer_poetry # # # the file with the input lines # DECODE_FILE=data/poetry/rumi_leads.txt # # BEAM_SIZE=4 # ALPHA=0.6 # # t2t-decoder \ # --data_dir=$DATADIR \ # --problem=$PROBLEM \ # --model=$MODEL \ # --hparams_set=$HPARAMS \ # --output_dir=$OUTDIR \ # --t2t_usr_dir=./poetry/trainer \ # --decode_hparams="beam_size=$BEAM_SIZE,alpha=$ALPHA" \ # --decode_from_file=$DECODE_FILE \ # --hparams="num_hidden_layers=4,hidden_size=512" # + magic_args=" " language="bash" # DECODE_FILE=data/poetry/rumi_leads.txt # cat ${DECODE_FILE}.*.decodes # - # Take the first three line. I'm showing the first line of the couplet provided to the model, how the AI model that we trained complets it and how Rumi completes it: # <p> # INPUT: where did the handsome beloved go <br/> # AI: where art thou worse to me than dead <br/> # RUMI: I wonder, where did that tall, shapely cypress tree go? # <p> # INPUT: he spread his light among us like a candle <br/> # AI: like the hurricane eclipse <br/> # RUMI: Where did he go? So strange, where did he go without me? <br/> # <p> # INPUT: all day long my heart trembles like a leaf <br/> # AI: and through their hollow aisles it plays <br/> # RUMI: All alone at midnight, where did that beloved go? # <p> # Oh wow. The couplets as completed are quite decent considering that: # * We trained the model on American poetry, so feeding it Rumi is a bit out of left field. # * Rumi, of course, has a context and thread running through his lines while the AI (since it was fed only that one line) doesn't. # # <p> # "Spreading light like a hurricane eclipse" is a metaphor I won't soon forget. And it was created by a machine learning model! # ## Serving poetry # # How would you serve these predictions? There are two ways: # <ol> # <li> Use [Cloud ML Engine](https://cloud.google.com/ml-engine/docs/deploying-models) -- this is serverless and you don't have to manage any infrastructure. # <li> Use [Kubeflow](https://github.com/kubeflow/kubeflow/blob/master/user_guide.md) on Google Kubernetes Engine -- this uses clusters but will also work on-prem on your own Kubernetes cluster. # </ol> # <p> # In either case, you need to export the model first and have TensorFlow serving serve the model. The model, however, expects to see *encoded* (i.e. preprocessed) data. So, we'll do that in the Python Flask application (in AppEngine Flex) that serves the user interface. # + language="bash" # TOPDIR=gs://${BUCKET} # OUTDIR=${TOPDIR}/poetry/model_full2 # DATADIR=${TOPDIR}/poetry/data # MODEL=transformer # HPARAMS=transformer_poetry # BEAM_SIZE=4 # ALPHA=0.6 # # t2t-exporter \ # --model=$MODEL \ # --hparams_set=$HPARAMS \ # --problem=$PROBLEM \ # --t2t_usr_dir=./poetry/trainer \ # --decode_hparams="beam_size=$BEAM_SIZE,alpha=$ALPHA" \ # --data_dir=$DATADIR \ # --output_dir=$OUTDIR # + language="bash" # MODEL_LOCATION=$(gsutil ls gs://${BUCKET}/poetry/model_full2/export | tail -1) # echo $MODEL_LOCATION # saved_model_cli show --dir $MODEL_LOCATION --tag_set serve --signature_def serving_default # - # #### Cloud ML Engine # %%writefile mlengine.json description: Poetry service on ML Engine autoScaling: minNodes: 1 # We don't want this model to autoscale down to zero # + language="bash" # MODEL_NAME="poetry" # MODEL_VERSION="v1" # MODEL_LOCATION=$(gsutil ls gs://${BUCKET}/poetry/model_full2/export | tail -1) # echo "Deleting and deploying $MODEL_NAME $MODEL_VERSION from $MODEL_LOCATION ... this will take a few minutes" # gcloud ml-engine versions delete ${MODEL_VERSION} --model ${MODEL_NAME} # #gcloud ml-engine models delete ${MODEL_NAME} # #gcloud ml-engine models create ${MODEL_NAME} --regions $REGION # gcloud ml-engine versions create ${MODEL_VERSION} \ # --model ${MODEL_NAME} --origin ${MODEL_LOCATION} --runtime-version=1.13 --config=mlengine.json # - # #### Kubeflow # # Follow these instructions: # * On the GCP console, launch a Google Kubernetes Engine (GKE) cluster named 'poetry' with 2 nodes, each of which is a n1-standard-2 (2 vCPUs, 7.5 GB memory) VM # * On the GCP console, click on the Connect button for your cluster, and choose the CloudShell option # * In CloudShell, run: # ``` # git clone https://github.com/GoogleCloudPlatform/training-data-analyst` # cd training-data-analyst/courses/machine_learning/deepdive/09_sequence # ``` # * Look at [`./setup_kubeflow.sh`](setup_kubeflow.sh) and modify as appropriate. # ### AppEngine # # What's deployed in Cloud ML Engine or Kubeflow is only the TensorFlow model. We still need a preprocessing service. That is done using AppEngine. Edit application/app.yaml appropriately. # !cat application/app.yaml # + language="bash" # cd application # #gcloud app create # if this is your first app # #gcloud app deploy --quiet --stop-previous-version app.yaml # - # Now visit https://mlpoetry-dot-cloud-training-demos.appspot.com and try out the prediction app! # # <img src="diagrams/poetry_app.png" width="50%"/> # Copyright 2019 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
Courses/Advanced Machine Learning with TensorFlow on Google Cloud Platform/Sequence Models for Time Series and Natural Language Processing/Week 2/Encoder-Decoder Networks/poetry.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # argv: # - /usr/bin/julia # - -i # - --color=yes # - --project=@. # - /home/buddhilw/.julia/packages/IJulia/e8kqU/src/kernel.jl # - '{connection_file}' # display_name: Julia 1.6.1 # env: null # interrupt_mode: signal # language: julia # metadata: null # name: julia-1.6 # --- # + active="" # Text provided under a Creative Commons Attribution license, CC-BY, Copyright (c) 2020, Cysor. All code is made available under the FSF-approved BSD-3 license. Adapted from CFDPython Copyright (c) Barba group - https://github.com/barbagroup/CFDPython # - # 12 steps to Navier–Stokes # ====== # *** # This Jupyter notebook continues the presentation of the **12 steps to Navier–Stokes**, the practical module taught in the interactive CFD class of [Prof. <NAME>](http://lorenabarba.com). You should have completed [Step 1](./01_Step_1.ipynb) before continuing, having written your own Julia script or notebook and having experimented with varying the parameters of the discretization and observing what happens. # # Step 2: Nonlinear Convection # ----- # *** # Now we're going to implement nonlinear convection using the same methods as in step 1. The 1D convection equation is: # # $$\frac{\partial u}{\partial t} + u \frac{\partial u}{\partial x} = 0$$ # # Instead of a constant factor $c$ multiplying the second term, now we have the solution $u$ multiplying it. Thus, the second term of the equation is now *nonlinear*. We're going to use the same discretization as in Step 1 — forward difference in time and backward difference in space. Here is the discretized equation. # # $$\frac{u_i^{n+1}-u_i^n}{\Delta t} + u_i^n \frac{u_i^n-u_{i-1}^n}{\Delta x} = 0$$ # # Solving for the only unknown term, $u_i^{n+1}$, yields: # # $$u_i^{n+1} = u_i^n - u_i^n \frac{\Delta t}{\Delta x} (u_i^n - u_{i-1}^n)$$ # As before, the Julia code starts by loading the necessary libraries. Then, we declare some variables that determine the discretization in space and time (you should experiment by changing these parameters to see what happens). Then, we create the initial condition $u_0$ by initializing the array for the solution using $u_0 = 2\ @\ 0.5 \leq x \leq 1$ and $u_0 = 1$ everywhere else in $(0,2)$ (i.e., a hat function). # + using Plots # Spatial Steps nx = 41 Δx = 2 / (nx - 1) x = range(0, stop=Δx*(nx-1), length=nx) # Time Steps n = 20 Δt = 0.025 t = range(0, stop=Δt*nt, length=nt) # Inital Condition U₀ = ones(nx) # Initalise u with ones U₀[0.5 .≤ x .≤ 1] .= 2 # Set u₀ = 2 in the interval 0.5 ≤ x ≤ 1 as per our I.C.s uⁿ⁺¹ = copy(u₀); # Initialise arbitrary future timestep with inital condition, u₀ # - # The code snippet below is *unfinished*. We have copied over the line from [Step 1](./01_Step_1.ipynb) that executes the time-stepping update. Can you edit this code to execute the nonlinear convection instead? # + jupyter={"outputs_hidden": false} for n in 1:nt #iterate through time uⁿ = copy(uⁿ⁺¹) ##copy the existing values of u into un for i in 2:nx ##now we'll iterate through the u array ###This is the line from Step 1, copied exactly. Edit it for our new equation. ###then uncomment it and run the cell to evaluate Step 2 uⁿ⁺¹[i] = uⁿ[i] - uⁿ[i] * Δt/Δx * (uⁿ[i] - uⁿ[i-1]) end end u = uⁿ⁺¹ plot(x, u) ##Plot the results # - # What do you observe about the evolution of the hat function under the nonlinear convection equation? What happens when you change the numerical parameters and run again? # ## Learn More # For a careful walk-through of the discretization of the convection equation with finite differences (and all steps from 1 to 4), watch **Video Lesson [4](https://youtube.com/watch?v=y2WaK7_iMRI)** by <NAME> on YouTube.
Lessons/02_Step_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.4 64-bit (''learn-env'': conda)' # name: python3 # --- # # # Importing Packages and Dataset # + # import packages for data cleaning and processing import pandas as pd import numpy as np from datetime import datetime import warnings warnings.filterwarnings('ignore') import matplotlib.pyplot as plt # import modules for preprocessing from sklearn.linear_model import LinearRegression, Lasso, Ridge from sklearn.model_selection import train_test_split from sklearn.preprocessing import PolynomialFeatures, StandardScaler from sklearn.metrics import r2_score, mean_squared_error from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from mlxtend.evaluate import bias_variance_decomp # pd.set_option('display.max_rows', 200) pd.set_option('display.max_columns', 200) # - df = pd.read_csv('data/kc_house_data.csv') df.head() df.describe() df.isna().sum() # # Feature Selection # + # Extract sale date for later processing df['sale_date'] = [x[:8] for x in df.date] df.sale_date = df.sale_date.apply(lambda x: datetime.strptime(x, '%Y%m%d')) df.drop(columns='date', inplace=True) df.drop(['id'], inplace=True, axis=1) # Replace anomalous bedroom value and check values in column df.replace({'bedrooms': {33: 3}}, inplace=True) df.replace({'bathrooms': {0: 0.25}}, inplace=True) # Create new feature to incorporate age at the time of sale df['sale_age'] = df.sale_date.dt.year - df[['yr_built', 'yr_renovated']].max(axis=1) df.replace({'sale_age': {-1: 0}}, inplace=True) # Create new feature for age from year built df['age'] = df.sale_date.dt.year - df.yr_built df.replace({'age': {-1: 0}}, inplace=True) # Create binary variables for whether there has been a renovation, the property has a bathroom, and has been viewed df['renovated'] = df.yr_renovated.apply(lambda x: x if x==0 else 1) df['basement'] = df.sqft_basement.apply(lambda x: x if x==0 else 1) df['viewed'] = df.view.apply(lambda x: x if x==0 else 1) # Drop original columms as well as the sale_date columns since it is in datetime format df.drop(['yr_built', 'yr_renovated', 'sale_date', 'sqft_basement', 'view'], inplace=True, axis=1) # Drop latitude and longitude as zip code has stronger correlation df.drop(['lat', 'long'], inplace=True, axis=1) # - df.head() # # Dummy Variables and Polynomial/Interaction Features # + # Grab indices of columns for creating dummy variables and create dataframe with dummy variables dum_feat = df[['bedrooms', 'bathrooms', 'floors', 'condition', 'grade', 'zipcode']] dum_index = dum_feat.columns # Create dummy variables then drop one of the dummy variables, as well as original categorical variable used in creating the dummy variables df_dum = pd.get_dummies(data=dum_feat, columns=dum_index, drop_first=True, prefix=['bdr', 'bth', 'flr', 'cnd', 'grd', 'zip']) df_dum.head() # - # Set target variable y = df['price'] # # Baseline Model poly_feat_1 = df.drop(['price', 'bedrooms', 'bathrooms', 'floors', 'condition', 'grade', 'zipcode'], axis=1) X = pd.concat([poly_feat_1, df_dum], axis=1) X.head() X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, test_size=0.2, shuffle=True) lr = LinearRegression().fit(X_train, y_train) loss, bias, var = bias_variance_decomp(lr, X_train.values, y_train.values, X_test.values, y_test.values, loss='mse', random_seed=42) print('Average expected loss: %.3e' % loss) print('Average bias: %.3e' % bias) print('Average variance: %.3e' % var) y_pred_train_lr = lr.predict(X_train) y_pred_test_lr = lr.predict(X_test) train_test_metrics(y_train, y_test, y_pred_train_lr, y_pred_test_lr) # # Degree-2 Polynomial Features Model # Grab columns for polynominal and interaction features from the original dataframe without dummy variables poly_feat_2 = df.drop(['price', 'bedrooms', 'bathrooms', 'floors', 'condition', 'grade', 'zipcode'], axis=1) # Use PolynomialFeatures to create binomial and interaction features poly_2 = PolynomialFeatures(degree=2, include_bias=False) poly_data_2 = poly_2.fit_transform(poly_feat_2) poly_columns_2 = poly_2.get_feature_names(poly_feat_2.columns) df_poly_2 = pd.DataFrame(poly_data_2, columns=poly_columns_2) # Concatenating two dataframes together for input into linear regression model X_poly_2 = pd.concat([df_poly_2, df_dum], axis=1) X_poly_2.head() X_train_2, X_test_2, y_train_2, y_test_2 = train_test_split(X_poly_2, y, random_state=42, test_size=0.2, shuffle=True) lr_2 = LinearRegression().fit(X_train_2, y_train_2) loss_2, bias_2, var_2 = bias_variance_decomp(lr_2, X_train_2.values, y_train_2.values, X_test_2.values, y_test_2.values, loss='mse', random_seed=42) print('Average expected loss: %.3e' % loss_2) print('Average bias: %.3e' % bias_2) print('Average variance: %.3e' % var_2) y_pred_train_lr_2 = lr_2.predict(X_train_2) y_pred_test_lr_2 = lr_2.predict(X_test_2) train_test_metrics(y_train_2, y_test_2, y_pred_train_lr_2, y_pred_test_lr_2) # # Degree-3 Polynomial Features Model poly_feat_3 = df.drop(['price', 'bedrooms', 'bathrooms', 'floors', 'condition', 'grade', 'zipcode'], axis=1) poly_3 = PolynomialFeatures(degree=3, include_bias=False) poly_data_3 = poly_3.fit_transform(poly_feat_3) poly_columns_3 = poly_3.get_feature_names(poly_feat_3.columns) df_poly_3 = pd.DataFrame(poly_data_3, columns=poly_columns_3) X_poly_3 = pd.concat([df_poly_3, df_dum], axis=1) X_poly_3.head() X_train_3, X_test_3, y_train_3, y_test_3 = train_test_split(X_poly_3, y, random_state=42, test_size=0.2, shuffle=True) lr_3 = LinearRegression().fit(X_train_3, y_train_3) loss_3, bias_3, var_3 = bias_variance_decomp(lr_3, X_train_3.values, y_train_3.values, X_test_3.values, y_test_3.values, loss='mse', random_seed=42) print('Average expected loss: %.3e' % loss_3) print('Average bias: %.3e' % bias_3) print('Average variance: %.3e' % var_3) y_pred_train_lr_3 = lr_3.predict(X_train_3) y_pred_test_lr_3 = lr_3.predict(X_test_3) train_test_metrics(y_train_3, y_test_3, y_pred_train_lr_3, y_pred_test_lr_3) # # Degree-4 Polynomal Features Model poly_feat_4 = df.drop(['price', 'bedrooms', 'bathrooms', 'floors', 'condition', 'grade', 'zipcode'], axis=1) poly_4 = PolynomialFeatures(degree=4, include_bias=False) poly_data_4 = poly_4.fit_transform(poly_feat_4) poly_columns_4 = poly_4.get_feature_names(poly_feat_4.columns) df_poly_4 = pd.DataFrame(poly_data_4, columns=poly_columns_4) X_poly_4 = pd.concat([df_poly_4, df_dum], axis=1) X_poly_4.head() X_train_4, X_test_4, y_train_4, y_test_4 = train_test_split(X_poly_4, y, random_state=42, test_size=0.2, shuffle=True) lr_4 = LinearRegression().fit(X_train_4, y_train_4) loss_4, bias_4, var_4 = bias_variance_decomp(lr_4, X_train_4.values, y_train_4.values, X_test_4.values, y_test_4.values, loss='mse', random_seed=42) print('Average expected loss: %.3e' % loss_4) print('Average bias: %.3e' % bias_4) print('Average variance: %.3e' % var_4) y_pred_train_lr_4 = lr_4.predict(X_train_4) y_pred_test_lr_4 = lr_4.predict(X_test_4) train_test_metrics(y_train_4, y_test_4, y_pred_train_lr_4, y_pred_test_lr_4) # # Metrics Dataframe # + data = [['%.3e' % bias, '%.3e' % var, round(r2_score(y_train, y_pred_train_lr), 4), round(r2_score(y_test, y_pred_test_lr), 4), int(rmse(y_train, y_pred_train_lr)), int(rmse(y_test, y_pred_test_lr))], ['%.3e' % bias_2, '%.3e' % var_2, round(r2_score(y_train_2, y_pred_train_lr_2), 4), round(r2_score(y_test_2, y_pred_test_lr_2), 4), int(rmse(y_train_2, y_pred_train_lr_2)), int(rmse(y_test_2, y_pred_test_lr_2))], ['%.3e' % bias_3, '%.3e' % var_3, round(r2_score(y_train_3, y_pred_train_lr_3), 4), round(r2_score(y_test_3, y_pred_test_lr_3), 4), int(rmse(y_train_3, y_pred_train_lr_3)), int(rmse(y_test_3, y_pred_test_lr_3))], ['%.3e' % bias_4, '%.3e' % var_4, round(r2_score(y_train_4, y_pred_train_lr_4), 4), round(r2_score(y_test_4, y_pred_test_lr_4), 4), int(rmse(y_train_4, y_pred_train_lr_4)), int(rmse(y_test_4, y_pred_test_lr_4))]] index = ["Baseline", "Poly-2", "Poly-3", "Poly-4"] columns = ["Average Bias", "Average Variance", "Training R^2 Score", "Testing R^2 Score", "Training RMSE", "Testing RMSE"] poly_feat_df = pd.DataFrame(data=data, index=index, columns=columns) poly_feat_df # - poly_feat_df.to_csv("data/poly_feat_df.csv") # # Ridge Regression Model ss = StandardScaler() X_train_scaled = ss.fit_transform(X_train) X_test_scaled = ss.transform(X_test) X_train_sc = pd.DataFrame(X_train_scaled, columns=X_train.columns) X_test_sc = pd.DataFrame(X_test_scaled, columns=X_test.columns) ridge = Ridge(alpha=1).fit(X_train_sc, y_train) y_pred_train_ridge = ridge.predict(X_train_sc) y_pred_test_ridge = ridge.predict(X_test_sc) loss_5, bias_5, var_5 = bias_variance_decomp(ridge, X_train_sc.values, y_train.values, X_test_sc.values, y_test.values, loss='mse', random_seed=42) print('Average expected loss: %.3e' % loss_5) print('Average bias: %.3e' % bias_5) print('Average variance: %.3e' % var_5) train_test_metrics(y_train, y_test, y_pred_train_ridge, y_pred_test_ridge) # # Lasso Regression Model lasso = Lasso(alpha=1).fit(X_train_sc, y_train) y_pred_train_lasso = lasso.predict(X_train_sc) y_pred_test_lasso = lasso.predict(X_test_sc) loss_6, bias_6, var_6 = bias_variance_decomp(lasso, X_train_sc.values, y_train.values, X_test_sc.values, y_test.values, loss='mse', random_seed=42) print('Average expected loss: %.3e' % loss_6) print('Average bias: %.3e' % bias_6) print('Average variance: %.3e' % var_6) train_test_metrics(y_train, y_test, y_pred_train_lasso, y_pred_test_lasso) # # Decision Tree Regressor dtr = DecisionTreeRegressor(random_state=42).fit(X_train, y_train) y_pred_train_dtr = dtr.predict(X_train) y_pred_test_dtr = dtr.predict(X_test) loss_7, bias_7, var_7 = bias_variance_decomp(dtr, X_train.values, y_train.values, X_test.values, y_test.values, loss='mse', random_seed=42) print('Average expected loss: %.3e' % loss_7) print('Average bias: %.3e' % bias_7) print('Average variance: %.3e' % var_7) train_test_metrics(y_train, y_test, y_pred_train_dtr, y_pred_test_dtr) # # Random Forest Regressor rf = RandomForestRegressor(n_estimators=100, max_features="auto", max_depth=100, min_samples_leaf=4, min_samples_split=10, random_state=1).fit(X_train, y_train) y_pred_train_rf = rf.predict(X_train) y_pred_test_rf= rf.predict(X_test) loss_8, bias_8, var_8 = bias_variance_decomp(rf, X_train.values, y_train.values, X_test.values, y_test.values, loss='mse', random_seed=42) print('Average expected loss: %.3e' % loss_8) print('Average bias: %.3e' % bias_8) print('Average variance: %.3e' % var_8) train_test_metrics(y_train, y_test, y_pred_train_rf, y_pred_test_rf) # # DataFrame of Metrics # + data_2 = [['%.3e' % bias, '%.3e' % var, round(r2_score(y_train, y_pred_train_lr), 4), round(r2_score(y_test, y_pred_test_lr), 4), int(rmse(y_train, y_pred_train_lr)), int(rmse(y_test, y_pred_test_lr))], ['%.3e' % bias_5, '%.3e' % var_5, round(r2_score(y_train, y_pred_train_ridge), 4), round(r2_score(y_test, y_pred_test_ridge), 4), int(rmse(y_train, y_pred_train_ridge)), int(rmse(y_test, y_pred_test_ridge))], ['%.3e' % bias_6, '%.3e' % var_6, round(r2_score(y_train, y_pred_train_lasso), 4), round(r2_score(y_test, y_pred_test_lasso), 4), int(rmse(y_train, y_pred_train_lasso)), int(rmse(y_test, y_pred_test_lasso))], ['%.3e' % bias_7, '%.3e' % var_7, round(r2_score(y_train, y_pred_train_dtr), 4), round(r2_score(y_test, y_pred_test_dtr), 4), int(rmse(y_train, y_pred_train_dtr)), int(rmse(y_test, y_pred_test_dtr))], ['%.3e' % bias_8, '%.3e' % var_8, round(r2_score(y_train, y_pred_train_rf), 4), round(r2_score(y_test, y_pred_test_rf), 4), int(rmse(y_train, y_pred_train_rf)), int(rmse(y_test, y_pred_test_rf))]] index_2 = ["Baseline", "Baseline w/Ridge", "Baseline w/Lasso", "Decision Tree Regressor", "Random Forest Regressor"] columns = ["Average Bias", "Average Variance", "Training R^2 Score", "Testing R^2 Score", "Training RMSE", "Testing RMSE"] models_df = pd.DataFrame(data=data_2, index=index_2, columns=columns) # - models_df models_df.to_csv("data/models_df.csv") ss = StandardScaler() X_train_2_scaled = ss.fit_transform(X_train_2) X_test_2_scaled = ss.transform(X_test_2) X_train_2_sc = pd.DataFrame(X_train_2_scaled, columns=X_train_2.columns) X_test_2_sc = pd.DataFrame(X_test_2_scaled, columns=X_test_2.columns) ridge_2 = Ridge(alpha=1).fit(X_train_2_sc, y_train_2) y_pred_train_2_ridge = ridge_2.predict(X_train_2_sc) y_pred_test_2_ridge = ridge_2.predict(X_test_2_sc) loss_9, bias_9, var_9 = bias_variance_decomp(ridge_2, X_train_2_sc.values, y_train_2.values, X_test_2_sc.values, y_test_2.values, loss='mse', random_seed=42) print('Average expected loss: %.3e' % loss_9) print('Average bias: %.3e' % bias_9) print('Average variance: %.3e' % var_9) train_test_metrics(y_train_2, y_test_2, y_pred_train_2_ridge, y_pred_test_2_ridge) lasso = Lasso(alpha=1).fit(X_train_2_sc, y_train_2) y_pred_train_2_lasso = lasso.predict(X_train_2_sc) y_pred_test_2_lasso = lasso.predict(X_test_2_sc) loss_10, bias_10, var_10 = bias_variance_decomp(lasso, X_train_2_sc.values, y_train_2.values, X_test_2_sc.values, y_test_2.values, loss='mse', random_seed=42) print('Average expected loss: %.3e' % loss_10) print('Average bias: %.3e' % bias_10) print('Average variance: %.3e' % var_10) train_test_metrics(y_train_2, y_test_2, y_pred_train_2_lasso, y_pred_test_2_lasso)
Bias-Variance/Bias_Variance_Tradeoff.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Preprocessing I Image stats # ( overwrites image_features_nb.py ) # # # * estimate intensity stats in red / green channel # * calculate nuc and syn stats for each image # * save for future steps # # # + # local imports from pathlib import Path # from os.path import isfile import numpy as np import pandas as pd from src.data.read_data import get_image_name from src.features.build_features import get_img_percentiles # %load_ext autoreload # %autoreload 2 # - # ### Image stats # This code takes in raw tif or tiff file ... does stuff to it and it's all good now. # + # %% path with all the images pallium_path = Path("D:/Code/repos/psd95_segmentation/data/raw/img/pallium") # get list of tif files in the folder files = [x for x in pallium_path.glob('*tif*') if x.is_file()] print(files[0]) len(files) # + # Load a csv that has all the info for the images, segmentation , etc info_df = pd.read_csv('D:/Code/repos/psd95_segmentation/data/raw/karls_good2.csv') info_year = info_df.loc[:, ['Source Image', 'Subject Issue Date','Subject']].sort_values(by=['Source Image']) info_year.drop_duplicates(subset='Source Image', inplace=True) info_year # - # Here we're finding in that big table from Karl all the images that we have downloaded. Getting theit names and years: # Match the files you have with the years, extract file names years = np.zeros((len(files), 1)) names = [] for count, file_name in enumerate(files): img_name = get_image_name(file_name) year = info_year.loc[info_year['Source Image'] == img_name, 'Subject Issue Date'].values[0].split(sep='/')[-1] print(f"#{count} image {img_name} year {year}") years[count] = year names.append(img_name) # + # Get the intensity precentiles for green and red channels. # use padding, since you don't want the 0 boarders to effect the stats padding = [[0, 0], [50, 500], [20, 20]] # loads a file, calculates percentiles prc_green = get_img_percentiles("green", files, padding) prc_red = get_img_percentiles("red", files, padding) # + # merge year w percentile img_intensity_df = pd.DataFrame(np.concatenate((years, prc_green, prc_red), axis=1), columns=['Year', 'green_prc_0', 'green_prc_10', 'green_prc_20', 'green_prc_30', 'green_prc_40', 'green_prc_50', 'green_prc_60', 'green_prc_70', 'green_prc_80', 'green_prc_90', 'green_prc_100', 'red_prc_0', 'red_prc_10', 'red_prc_20', 'red_prc_30', 'red_prc_40', 'red_prc_50', 'red_prc_60', 'red_prc_70', 'red_prc_80', 'red_prc_90', 'red_prc_100']) img_intensity_df.insert(0, 'Name', names, True) # + # Figure out what time point it is. """ If the old image name ends with a 6 or 5 then its tp 2; 3 or 2 is tp 1. """ # %% id_segments = [] time_point = [] for count, img_name in enumerate(names): id_segment = info_df.loc[info_df['Source Image'] == img_name, 'ID'].values[0] print(f"#{count} image {img_name} ID {id_segment}") id_segments.append(id_segment) if id_segment[-2] == '6' or id_segment[-2] == '5': time_point.append(2) elif id_segment[-2] == '3' or id_segment[-2] == '2': time_point.append(1) else: raise ValueError("Don't know what time point it is") # add to the data frame img_intensity_df.insert(1, 'tp', time_point, True) # - # ### Nuclear stats # Calculates percentiles of nuclear intensity. # # # If no nuclear csv is present, sets all percentiles for that image to ZERO. # + # Calculate and add nuclear percentiles to the data frame nuc_path = 'D:/Code/repos/psd95_segmentation/data/raw/csv/pallium/nuc/' # %% nuc_prc = np.zeros((len(names), 5)) # for each image , read the corresponding csv and calculate pct for count, img_name in enumerate(names): print(f"#{count} image {img_name}") csv_file = nuc_path + img_name + '_nuclei_only.csv' if Path(csv_file).exists(): nuc_csv_df = pd.read_csv(csv_file, skiprows=[1]) nuc_intensity = nuc_csv_df.loc[:, ['raw core']] nuc_prc[count, :] = np.percentile(nuc_intensity, [10, 25, 50, 75, 90]) else: print("Skipped") nuc_prc_df = pd.DataFrame(nuc_prc, columns=['nuc_prc_10', 'nuc_prc_25', 'nuc_prc_50', 'nuc_prc_75', 'nuc_prc_90']) # add to the main df img_intensity_df = pd.concat([img_intensity_df, nuc_prc_df], axis=1) # - # ### Synapse stats # + # Calculate and add synapse percentiles to the data frame syn_path = 'D:/Code/repos/psd95_segmentation/data/raw/csv/pallium/syn/' syn_prc = np.zeros((len(names), 5)) for count, img_name in enumerate(names): print(f"#{count} image {img_name}") done = 0 segmentations = info_df.loc[info_df['Source Image'] == img_name, ['RID', 'classifier_name', 'Segments Filtered URL']] # there will be multiple entries for each image ... for index, segmentation in segmentations.iterrows(): rid = segmentation[0] cl_name = segmentation[1] # need url to figure out type ... url = segmentation[2] csv_type = url.split('.')[1] # there are nuclear ans synaptic files, you want synaptic if csv_type == 'synapses_only': csv_file = syn_path + img_name + '_' + rid + '_' + csv_type + '.csv' # grab the first segmentation you have for this image if Path(csv_file).exists() and done == 0: done = 1 syn_csv_df = pd.read_csv(csv_file, skiprows=[1]) syn_intensity = syn_csv_df.loc[:, ['raw core']] syn_prc[count, :] = np.percentile(syn_intensity, [10, 25, 50, 75, 90]) # if you ran through all entries but didn't have 'synapses_only' if done==0: print("Skipped") syn_prc_df = pd.DataFrame(syn_prc, columns=['syn_prc_10', 'syn_prc_25', 'syn_prc_50', 'syn_prc_75', 'syn_prc_90']) img_intensity_df = pd.concat([img_intensity_df, syn_prc_df], axis=1) # - # ### Save data frame as csv img_intensity_df int_path = "D:/Code/repos/psd95_segmentation/data/processed/" int_file = int_path + "Intensity_stats.csv" img_intensity_df.to_csv(int_file)
notebooks/Preprocessing1_Image_stats.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + language="bash" # rm -r Data # mkdir Data # cp ../ubuntu/ZZ17001a/mm10/barcodes.tsv Data/ # cp ../ubuntu/ZZ17001a/mm10/genes.tsv Data/ # cp ../ubuntu/ZZ17001a/mm10/matrix.mtx Data/ # + # %matplotlib inline import pandas as pd import numpy as np import matplotlib import matplotlib.pyplot as plt import scipy.io import scipy.stats as stats from statsmodels.robust.scale import mad pd.core.config.option_context('mode.use_inf_as_null',True) import seaborn as sns import os import sys import csv import shlex import subprocess sys.setrecursionlimit(10000) from plotly.graph_objs import Scatter3d, Data, Marker,Layout, Figure, Scene, XAxis, YAxis, ZAxis import plotly.plotly as py from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot init_notebook_mode(connected=True) matplotlib.rcParams['axes.edgecolor']='k' matplotlib.rcParams['axes.linewidth']=3 matplotlib.rcParams['axes.spines.top']='off' matplotlib.rcParams['axes.spines.right']='off' matplotlib.rcParams['axes.facecolor']='white' # + def read10X(path): mat = scipy.io.mmread(os.path.join(path,"matrix.mtx")) genes_path =os.path.join(path,"genes.tsv") gene_ids = [row[0] for row in csv.reader(open(genes_path), delimiter="\t")] gene_names = [row[1] for row in csv.reader(open(genes_path), delimiter="\t")] barcodes_path = os.path.join(path,"barcodes.tsv") barcodes = [row[0] for row in csv.reader(open(barcodes_path), delimiter="\t")] featureData=pd.DataFrame(data=gene_names, index=gene_ids, columns=['Associated.Gene.Name']) counts=pd.DataFrame(index=gene_ids,columns=barcodes,data=mat.todense()) return counts, featureData def filterCells(counts): umi_counts=counts.sum() cells1000=umi_counts[umi_counts>500].index return cells1000 def filterGenes(counts): filteredGenes=counts.index[(counts >= 2).sum(1) >=2] return filteredGenes def plotQC(counts): genesdetected=(counts>=1.).sum() umi_counts=counts.sum() fig,(ax,ax1)=plt.subplots(1,2,figsize=(10, 5)) genesdetected.plot(kind='hist',bins=np.arange(0,5000,100),lw=0,ax=ax) ax.grid('off') ax.patch.set_facecolor('white') ax.axvline(x=np.median(genesdetected),ls='--',lw=2,c='k') ax.set_xlabel('Genes',fontsize=13) ax.set_ylabel('Cells',fontsize=13) umi_counts.plot(kind='hist',bins=np.arange(0,10000,500),lw=0,ax=ax1,color=sns.color_palette()[1]) ax1.grid('off') ax1.patch.set_facecolor('white') ax1.axvline(x=np.median(umi_counts),ls='--',lw=2,c='k') ax1.set_xlabel('Transcripts - UMI',fontsize=13) ax1.set_ylabel('Cells',fontsize=13) def normalize(counts): cells1000=filterCells(counts) filteredGenes=filterGenes(counts) umi_counts=counts.sum() cpt=counts*np.median(umi_counts)/umi_counts cpt=cpt.loc[filteredGenes,cells1000] cpt=(cpt+1).apply(np.log) return cpt def overdispersion(cpt,nGenes): meanExpression=np.log(np.mean(np.exp(cpt)-1,1)+1) dispersion=np.log(np.var(np.exp(cpt)-1,1)/np.mean(np.exp(cpt)-1,1)) bins = np.linspace(min(meanExpression),max(meanExpression),20) pos = np.digitize(meanExpression, bins) overDispersion=[] for index,gene in enumerate(meanExpression.index): medianBin=dispersion[pos==pos[index]].median() madBin=mad(dispersion[pos==pos[index]]) normalizedDispersion=abs(dispersion.ix[gene]-medianBin)/madBin overDispersion.append([ gene, normalizedDispersion ]) overDispersion=pd.DataFrame(overDispersion) overDispersion.set_index(0,inplace=True) top1000=overDispersion.sort_values(1,ascending=False)[:nGenes].index return top1000 def variance(cpt,nGenes): variance=cpt.var(1) top1000=variance.sort_values(inplace=True,ascending=False)[:nGenes].index return top1000 def runTSNE(cpt,genes): np.savetxt('Data/filtered.tsv', cpt.loc[top1000].T.values, delimiter='\t') cmd='/Users/mby/Downloads/bhtsne-master/bhtsne.py -d 3 -i Data/filtered.tsv --no_pca -r 1024 -o Data/out.tsv' cmd=shlex.split(cmd) proc=subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=subprocess.PIPE) stdout, stderr=proc.communicate() tsne=np.loadtxt('Data/out.tsv') tsneData=pd.DataFrame(tsne,index=cpt.columns, columns=['V1','V2','V3']) return tsneData def PCA(cpt,genes): from sklearn.decomposition import PCA as sklearnPCA sklearn_pca = sklearnPCA(n_components=50) Y_sklearn = sklearn_pca.fit_transform(cpt.ix[top1000].T) pcaData=pd.DataFrame(Y_sklearn,index=cpt.columns) eig_vals=sklearn_pca.explained_variance_ tot = sum(eig_vals) var_exp = [(i / tot)*100 for i in sorted(eig_vals, reverse=True)] cum_var_exp = np.cumsum(var_exp) return pcaData,cum_var_exp def getEnsid(featureData,gene): return featureData[featureData['Associated.Gene.Name']==gene].index def plotTSNE(cpt,tsnedata,gene,featureData,dim1,dim2): fig,ax=plt.subplots(1) ax.scatter(tsnedata[dim1],tsnedata[dim2],c=cpt.loc[getEnsid(featureData,gene),],s=10, linewidths=1, cmap=plt.cm.Greens,vmax=2,vmin=0.1) ax.set_title(gene) #return fig def dbscan(tsnedata,eps,minCells): from sklearn.cluster import DBSCAN db = DBSCAN(eps=eps, min_samples=minCells).fit(tsnedata.values) tsnedata['dbCluster'] = db.labels_+1 return tsnedata def plotTSNEClusters(tsnedata,dim1,dim2): colors=['#a6cee3','#1f78b4','#b2df8a', '#33a02c','#fb9a99','#e31a1c', '#fdbf6f','#ff7f00','#cab2d6', '#6a3d9a','#ffff99','#b15928', '#000000','#bdbdbd','#ffff99'] k2=sns.lmplot(dim1, dim2, data=tsnedata, hue='dbCluster', fit_reg=False,palette=colors,scatter_kws={"s": 5}) k2.ax.grid('off') k2.ax.patch.set_facecolor('white') #k2.savefig('../Figures/TSNE-KM.pdf',format='pdf',dpi=300) def mkRds(cpt,featureData,tsnedata): cpt.to_csv('Data/Expression-G.csv') featureData['Chromosome.Name']=1 featureData.to_csv('Data/MM10_10X-FeatureData.csv') tsnedata.to_csv('Data/TSNEData-Dbscan.csv') rscript=''' rm(list=ls()) setwd('%s') log2cpm<-read.csv('%s',row.names=1,stringsAsFactors = F, as.is=T, check.names=F) featuredata<-read.csv('%s',row.names=1,stringsAsFactors = F, as.is=T,sep=',',check.names=F) tsne.data<-read.csv('%s',row.names=1,stringsAsFactors = F,as.is=T,check.names=F) save(log2cpm,featuredata,tsne.data,file='%s') '''%(os.getcwd(),'Data/Expression-G.csv','Data/MM10_10X-FeatureData.csv', 'Data/TSNEData-Dbscan.csv','Data/Data.Rds') with open('Data/setupRds.R','w') as fout: fout.writelines(rscript) cmd='R --no-save -f Data/setupRds.R' os.system(cmd) def tsne3d(tsnedata): walkers=[] colors=['#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6', '#6a3d9a','#ffff99','#b15928','#000000','#bdbdbd','#ffff99'] colors=colors*3 for ii in range(0,44,1): tsne_subset=tsnedata[tsnedata['dbCluster']==ii] cellnames=tsne_subset.index a=tsne_subset['V1'].values b=tsne_subset['V2'].values c=tsne_subset['V3'].values trace = Scatter3d( x=a, y=b, z=c, text=['CellName: %s' %(i) for i in cellnames], mode='markers', name=ii, marker=dict( color=colors[ii], size=3, symbol='circle', line=dict( color=colors[ii], width=0 ) )) walkers.append(trace) data = Data(walkers) layout = Layout( title='BS16001-TE1', hovermode='closest', xaxis=dict( title='TSNE-1', ticklen=0, showline=True, zeroline=True ), yaxis=dict( title='TSNE-2', ticklen=5, ), scene=Scene( xaxis=XAxis(title='TSNE-1',showgrid=True,zeroline=True,showticklabels=True), yaxis=YAxis(title='TSNE-2',showgrid=True,zeroline=True,showticklabels=True), zaxis=ZAxis(title='TSNE-3',showgrid=True,zeroline=True,showticklabels=True) ) ) fig = Figure(data=data, layout=layout) iplot(fig) def findMarkers(cpt,cells1,cells2,genes): aucScores=[] from sklearn import metrics for gene in genes: y=[1]*len(cells2)+[2]*len(cells1) pred = np.concatenate((cpt.loc[gene,cells2],cpt.loc[gene,cells1])) fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2) aucScores.append(metrics.auc(fpr, tpr)) return pd.DataFrame(aucScores,index=genes,columns=['Score']) def expMean(x): return(np.log(np.mean(np.exp(x)-1)+1)) def markerHeatmap(cpt,genes,tsnedata_dbscan,featureData): hdata=cpt.loc[genes,].dropna() colorMap=dict(zip(range(1,8,1),sns.color_palette('Set1',9))) hetmap=sns.clustermap(hdata,z_score=0,yticklabels=False,vmin=-3,vmax=3,\ xticklabels=featureData.loc[genes,'Associated.Gene.Name'] ,row_cluster=True,col_cluster=True ,col_colors=colorMap,metric='correlation' ) b=plt.setp(hetmap.ax_heatmap.yaxis.get_majorticklabels(), rotation=0) # - # ### Read Input data counts, featuredata=read10X('Data/') # ### Shape of dataset: Genes, Cells counts.shape plotQC(counts) # ### Normalize data # Since number of genes and transcipts detected is directly dependent on read depth, library size normalization is essential. This function will normalize gene expression based on total transcripts detected in each cell, multiply with a constant and log transform. cpt=normalize(counts) # ### Feature Selection # One of the most important steps in single cell RNA seq processing, is selecting genes that describe most of the biological variance. However, this is confounded by the high levels of technical noise associated with single cell RNA-seq data. # # ***This jupyter notebook contains 2 functions to enable feature selection:*** # 1. variance - select the top variable genes in the dataset # 2. overdispersion - select the top variable genes in the dataset corrected for technical variance top1000=overdispersion(cpt,1000) # ### Dimensionality reduction # After gene selection, the high dimensionality of single cell RNA-seq data is commnly reduced to cluster similar cells together. # ***This jupyter notebook contains 2 functions for dimensionality reduction:*** # 1. PCA # 2. tSNE - for the purposes of the demonstration, we will use tSNE and reduce data to 3 dimensions tsnedata=runTSNE(cpt,top1000) tsnedata=pd.read_csv('') # ### Visualization # Visualization is an important part of an single cell experiment. Exploring data with genes of interest helps validate clustering as wells begins the process of identifying the cell type of each cluster # # Lets take a look at our dimensionality reduction by plotting cells. plt.scatter(tsnedata['V2'],tsnedata['V3'],s=5) # ### Visualization # Visualization is an important part of an single cell experiment. Exploring data with genes of interest helps validate clustering as wells begins the process of identifying the cell type of each cluster # # Lets take a look at our dimensionality reduction by plotting cells, but this time color each cell by the expression of particular gene. Pick from Emcn, Olig1, Olig2, Pdgra, Fyn, Aqp4,Mog,Slc32a1,Slc17a6,Cx3cr1. plotTSNE(cpt,tsnedata,'Snap25',featuredata,'V2','V3') # ### Cluster identification # After dimensionality reduction, clusters are identified using a variety of approaches. We will use a simple algorithm called DBSCAN to identify clusters # # ***This jupyter notebook contains 1 functions for dimensionality reduction:*** # 1. DBSCAN tsnedata_dbscan=dbscan(tsnedata,3.2,20) # ### Visualization # # Lets take a look at our dimensionality reduction by plotting cells, but this time color each cell by the cluster assignment as determined by DBSCAN plotTSNEClusters(tsnedata_dbscan,'V2','V3') # ### Visualization # # Lets take a look at our dimensionality reduction by plotting cells, but this time color each cell by the cluster assignment as determined by DBSCAN. Remember that our data was reduced to 3 dimensions. So, lets plot all 3 dimensions # + walkers=[] colors=['#a6cee3','#1f78b4','#b2df8a', '#33a02c','#fb9a99','#e31a1c', '#fdbf6f','#ff7f00','#cab2d6', '#6a3d9a','#ffff99','#b15928', '#000000','#bdbdbd','#ffff99'] for ii in range(0,44,1): tsne_subset=tsne[tsne['dbCluster']==ii] cellnames=tsne_subset.index a=tsne_subset['V1'].values b=tsne_subset['V2'].values c=tsne_subset['V3'].values trace = Scatter3d( x=a, y=b, z=c, text=['CellName: %s' %(i) for i in cellnames], mode='markers', name=ii, marker=dict( color=colors[ii], size=3, symbol='circle', line=dict( color=colors[ii], width=0 ) )) walkers.append(trace) data = Data(walkers) layout = Layout( title='BS16001-TE1', hovermode='closest', xaxis=dict( title='TSNE-1', ticklen=0, showline=True, zeroline=True ), yaxis=dict( title='TSNE-2', ticklen=5, ), scene=Scene( xaxis=XAxis(title='TSNE-1',showgrid=True,zeroline=True,showticklabels=True), yaxis=YAxis(title='TSNE-2',showgrid=True,zeroline=True,showticklabels=True), zaxis=ZAxis(title='TSNE-3',showgrid=True,zeroline=True,showticklabels=True) ) ) fig = Figure(data=data, layout=layout) py.iplot(fig, filename='BS16001-TE1-KMEANS.html') # - tsne3d(tsnedata_dbscan) # ### Marker Identification # Identifying genes that differentiate each of these cell populations is an important aspect of single cell RNA-seq data. There are many different methods to do this type of analysis. Given the size of the dataset ome of these are compute heavy. For the sake of brevity, we will use AUROC classification of differentially expressed genes. aurocScoresAll=pd.DataFrame() for cluster in range(1,8,1): cells1=tsnedata_dbscan[tsnedata_dbscan['dbCluster']==cluster].index cells2=tsnedata_dbscan.index.difference(cells1) data1=cpt.loc[cpt.index,cells1].apply(expMean,1) data2=cpt.loc[cpt.index,cells2].apply(expMean,1) totaldiff=(data1-data2) genes=totaldiff[totaldiff>1.].index aurocScores=findMarkers(cpt, cells1, cells2, genes ) aurocScores['Associated.Gene.Name']=featuredata['Associated.Gene.Name'] aurocScores['dbCluster']=cluster aurocScoresAll=aurocScoresAll.append(aurocScores) # ### Visualization # # Let's make a heatmap of all markergenes markerHeatmap(cpt,aurocScoresAll.index,tsnedata_dbscan,featuredata) # ### Make .Rds file for CellView # # And finally, let's summarize this analysis into an .Rds file that we can share with others mkRds(cpt,featuredata,tsnedata_dbscan)
AnalysisNotebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- class Board(object): def __init__(self): self.game_board = self.init_board() def init_board(self): return [[str(i) for i in range((3 * j) - 2, (3 * j) + 1)] for j in range(1, 4)][::-1] def insert_into_board(self, x, y, symbol): if self.game_board[x][y].isdigit(): self.game_board[x][y] = symbol return True return False def __str__(self): str_board = "" for row in range(3): for col in range(3): str_board += self.game_board[row][col] if col < 2: str_board += " | " str_board += "\n" if row < 2: str_board += "---------" str_board += "\n" return str_board board = Board() class Player(object): def __init__(self, name, symbol): if symbol.isdigit(): raise ValueError("The player symbol must be not a digit") self.name = name self.symbol = symbol self.is_turn = False self.winner = False def __str__(self): return f"{self.name} with the symbol {self.symbol}" player_1 = Player("Oscar", "O") player_2 = Player("Juan", "X") class Game(object): def __init__(self, board, player_1, player_2): self.board = board self.player_1 = player_1 self.player_2 = player_2 self.player_1.is_turn = True self.player_2.is_turn = False self.have_winner = False def play(self): while not self.have_winner: print(self.board) self.give_turn() if self.check_winner(): if self.player_1.winner: print(self.player_1, "won!") else: print(self.player_2, "won!") self.have_winner = True self.change_turns() def change_turns(self): self.player_1.is_turn = not self.player_1.is_turn self.player_2.is_turn = not self.player_2.is_turn def give_turn(self): message = "choose a number between 1 - 9" if self.player_1.is_turn: pos = int(input(f"{self.player_1.name}, {message} ")) else: pos = int(input(f"{self.player_2.name}, {message} ")) row = 2 - int((pos - 1) / 3) col = int((pos - 1) % 3) if self.player_1.is_turn: self.board.insert_into_board(row, col, self.player_1.symbol) else: self.board.insert_into_board(row, col, self.player_2.symbol) def check_winner(self): # Rows for row in self.board.game_board: rows = set([row[0], row[1], row[2]]) if self.check_set(rows): return True # Columns for column in range(3): columns = set([self.board.game_board[0][column], self.board.game_board[1][column], self.board.game_board[2][column]]) if self.check_set(columns): return True # Diagonals diagonal_1 = set([self.board.game_board[0][0], self.board.game_board[1][1], self.board.game_board[2][2]]) diagonal_2 = set([self.board.game_board[0][2], self.board.game_board[1][1], self.board.game_board[2][0]]) if self.check_set(diagonal_1) or self.check_set(diagonal_2): return True return False def check_set(self, some_set): return len(some_set) == 1 def set_winner(self): if self.player_1.is_turn: self.player_1.winner = True else: self.player_2.winner = True game = Game(board, player_1, player_2) game.play()
Modulo 5/Tic Tac Toe.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.10 64-bit (''dpbox2pg'': conda)' # name: python3710jvsc74a57bd0380b241f17d31af55f07b38cf1a031a0142f6a06d3c444464e37f58c8877c863 # --- import os import dropbox from dotenv import load_dotenv load_dotenv() my_dropbox_token = os.getenv("DROPBOX_ACCESS_TOKEN") dbx = dropbox.Dropbox(my_dropbox_token) dbx with open("localGC.txt", "wb") as f: print(f) #metadata, res =dbx.files_download(path="/Y_program/Y_txt_data180_ohlc_files/GC_ohlc.txt") #f.write(res.content) file_to_read = f"/Y_program/Y_txt_data180_ohlc_files/GC_ohlc.txt" _, f = dbx.files_download(file_to_read)
db2pg_direct.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from torch.autograd import Variable import numpy as np import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import DataLoader from torchvision import datasets, transforms from torch.autograd import Variable import matplotlib.pyplot as plt class CNN(nn.Module): def __init__(self): super(CNN,self).__init__() self.layer1=nn.Sequential( nn.Conv2d(1, 16, kernel_size=3),nn.BatchNorm2d(16), nn.ReLU(inplace=True) ) self.layer2=nn.Sequential( nn.Conv2d(16,32,kernel_size=3),nn.BatchNorm2d(32), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2,stride=2) ) self.layer3=nn.Sequential( nn.Conv2d(32,64,kernel_size=3), nn.BatchNorm2d(64), nn.ReLU(inplace=True) ) self.layer4=nn.Sequential( nn.Conv2d(64,128,kernel_size=3), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2,stride=2) ) self.fc=nn.Sequential( nn.Linear(128*4*4,256), nn.ReLU(inplace=True), nn.Linear(256,10) ) def forward(self, x): x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) # x = self.layer5(x) x = x.view(x.size(0),-1) x=self.fc(x) return x batch_size=64 learning_rate=1e-2 num_epoches=20 data_tf = transforms.Compose( [transforms.ToTensor(), transforms.Normalize([0.5], [0.5])] ) train_dataset=datasets.MNIST(root='./data',train=True,transform=data_tf, download=True) test_dataset=datasets.MNIST(root='./data',train=False,transform=data_tf) train_loader=DataLoader(train_dataset,batch_size=batch_size,shuffle=True) test_loader=DataLoader(test_dataset,batch_size=batch_size,shuffle=False) model=torch.load('model.pkl') criterion=nn.CrossEntropyLoss() optimizer=optim.SGD(model.parameters(),lr=learning_rate) # + def test(): # model.eval() eval_loss = 0 eval_acc = 0 for data in test_loader: img, label = data # img = img.view(img.size(0), -1) if torch.cuda.is_available(): img = img.cuda() label = label.cuda() # out = model(img) out = model(img) loss = criterion(out, label) eval_loss += loss.data.item() * label.size(0) _, pred = torch.max(out, 1) num_correct = (pred == label).sum() eval_acc += num_correct.item() print('Test Loss: {:.6f}, Acc: {:.6f}'.format( eval_loss / (len(test_dataset)), eval_acc / (len(test_dataset)) )) # - test() def make_noise(input_data,aimtarget,epoch): noise=torch.rand(1,1,28,28).float().cuda() data=(1/255)*input_data.float().reshape(1,1,28,28).cuda() target=torch.tensor(aimtarget).unsqueeze(0).long().cuda() noise.requires_grad=True advoptimizer=optim.SGD([noise],lr=learning_rate) for i in range(epoch): img=data+noise ans=model(img) loss=criterion(ans,target)+noise.norm()**(1.8) advoptimizer.zero_grad() loss.backward() advoptimizer.step() return noise data1=train_dataset.train_data[1] noise_1=make_noise(data1,5,100) ans=model(noise_1+(1/255)*data1.float().reshape(1,1,28,28).cuda()) # print('the norm of noise_1 is{}'.format(noise_1.norm()) print(noise_1.norm()) print('aimtarget:{}'.format(5)) print('result target:{}'.format(torch.max(ans,1)[1].item())) plt.subplot(131) plt.imshow(data1.reshape(28,28),cmap='gray') plt.subplot(132) plt.imshow(noise_1.cpu().detach().numpy().reshape(28,28),cmap='gray') plt.subplot(133) plt.imshow(noise_1.cpu().detach().numpy().reshape(28,28)+data1.reshape(28,28),cmap='gray') totalnorm=0 acc=0 for i in range(1000): data=train_dataset.train_data[i] noise=make_noise(data,7,300) ans=model(noise+(1/255)*data.float().reshape(1,1,28,28).cuda()) if torch.max(ans,1)[1].item()==7: acc+=1 totalnorm+=noise.norm() print(totalnorm/200) print(acc/200) print(totalnorm/1000) print(acc/1000)
adversarial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 机器学习纳米学位 # ## 监督学习 # ## 项目2: 为*CharityML*寻找捐献者 # 欢迎来到机器学习工程师纳米学位的第二个项目!在此文件中,有些示例代码已经提供给你,但你还需要实现更多的功能让项目成功运行。除非有明确要求,你无须修改任何已给出的代码。以**'练习'**开始的标题表示接下来的代码部分中有你必须要实现的功能。每一部分都会有详细的指导,需要实现的部分也会在注释中以'TODO'标出。请仔细阅读所有的提示! # # 除了实现代码外,你还必须回答一些与项目和你的实现有关的问题。每一个需要你回答的问题都会以**'问题 X'**为标题。请仔细阅读每个问题,并且在问题后的**'回答'**文字框中写出完整的答案。我们将根据你对问题的回答和撰写代码所实现的功能来对你提交的项目进行评分。 # >**提示:**Code 和 Markdown 区域可通过**Shift + Enter**快捷键运行。此外,Markdown可以通过双击进入编辑模式。 # ## 开始 # # 在这个项目中,你将使用1994年美国人口普查收集的数据,选用几个监督学习算法以准确地建模被调查者的收入。然后,你将根据初步结果从中选择出最佳的候选算法,并进一步优化该算法以最好地建模这些数据。你的目标是建立一个能够准确地预测被调查者年收入是否超过50000美元的模型。这种类型的任务会出现在那些依赖于捐款而存在的非营利性组织。了解人群的收入情况可以帮助一个非营利性的机构更好地了解他们要多大的捐赠,或是否他们应该接触这些人。虽然我们很难直接从公开的资源中推断出一个人的一般收入阶层,但是我们可以(也正是我们将要做的)从其他的一些公开的可获得的资源中获得一些特征从而推断出该值。 # # 这个项目的数据集来自[UCI机器学习知识库](https://archive.ics.uci.edu/ml/datasets/Census+Income)。这个数据集是由<NAME>和<NAME>在发表文章_"Scaling Up the Accuracy of Naive-Bayes Classifiers: A Decision-Tree Hybrid"_之后捐赠的,你可以在<NAME>提供的[在线版本](https://www.aaai.org/Papers/KDD/1996/KDD96-033.pdf)中找到这个文章。我们在这里探索的数据集相比于原有的数据集有一些小小的改变,比如说移除了特征`'fnlwgt'` 以及一些遗失的或者是格式不正确的记录。 # ---- # ## 探索数据 # 运行下面的代码单元以载入需要的Python库并导入人口普查数据。注意数据集的最后一列`'income'`将是我们需要预测的列(表示被调查者的年收入会大于或者是最多50,000美元),人口普查数据中的每一列都将是关于被调查者的特征。 # + # 为这个项目导入需要的库 import numpy as np import pandas as pd from time import time from IPython.display import display # 允许为DataFrame使用display() # 导入附加的可视化代码visuals.py import visuals as vs # 为notebook提供更加漂亮的可视化 # %matplotlib inline # 导入人口普查数据 data = pd.read_csv("census.csv") # 成功 - 显示第一条记录 display(data.head(n=1)) # - # ### 练习:数据探索 # 首先我们对数据集进行一个粗略的探索,我们将看看每一个类别里会有多少被调查者?并且告诉我们这些里面多大比例是年收入大于50,000美元的。在下面的代码单元中,你将需要计算以下量: # # - 总的记录数量,`'n_records'` # - 年收入大于50,000美元的人数,`'n_greater_50k'`. # - 年收入最多为50,000美元的人数 `'n_at_most_50k'`. # - 年收入大于50,000美元的人所占的比例, `'greater_percent'`. # # **提示:** 您可能需要查看上面的生成的表,以了解`'income'`条目的格式是什么样的。 # + # TODO:总的记录数 n_records = data.count() # TODO:被调查者的收入大于$50,000的人数 n_greater_50k = data[data['income']=='>50K'].count() # TODO:被调查者的收入最多为$50,000的人数 n_at_most_50k = data[data['income']=='<=50K'].count() # TODO:被调查者收入大于$50,000所占的比例 greater_percent = n_at_most_50k['income'] / n_records['income'] # 打印结果 print ("Total number of records: {}".format(n_records)) print ("Individuals making more than $50,000: {}".format(n_greater_50k)) print ("Individuals making at most $50,000: {}".format(n_at_most_50k)) print ("Percentage of individuals making more than $50,000: {:.2f}%".format(greater_percent)) # - # ---- # ## 准备数据 # 在数据能够被作为输入提供给机器学习算法之前,它经常需要被清洗,格式化,和重新组织 - 这通常被叫做**预处理**。幸运的是,对于这个数据集,没有我们必须处理的无效或丢失的条目,然而,由于某一些特征存在的特性我们必须进行一定的调整。这个预处理都可以极大地帮助我们提升几乎所有的学习算法的结果和预测能力。 # # ### 获得特征和标签 # `income` 列是我们需要的标签,记录一个人的年收入是否高于50K。 因此我们应该把他从数据中剥离出来,单独存放。 # 将数据切分成特征和对应的标签 income_raw = data['income'] features_raw = data.drop('income', axis = 1) # ### 转换倾斜的连续特征 # # 一个数据集有时可能包含至少一个靠近某个数字的特征,但有时也会有一些相对来说存在极大值或者极小值的不平凡分布的的特征。算法对这种分布的数据会十分敏感,并且如果这种数据没有能够很好地规一化处理会使得算法表现不佳。在人口普查数据集的两个特征符合这个描述:'`capital-gain'`和`'capital-loss'`。 # # 运行下面的代码单元以创建一个关于这两个特征的条形图。请注意当前的值的范围和它们是如何分布的。 # 可视化 'capital-gain'和'capital-loss' 两个特征 vs.distribution(features_raw) # 对于高度倾斜分布的特征如`'capital-gain'`和`'capital-loss'`,常见的做法是对数据施加一个<a href="https://en.wikipedia.org/wiki/Data_transformation_(statistics)">对数转换</a>,将数据转换成对数,这样非常大和非常小的值不会对学习算法产生负面的影响。并且使用对数变换显著降低了由于异常值所造成的数据范围异常。但是在应用这个变换时必须小心:因为0的对数是没有定义的,所以我们必须先将数据处理成一个比0稍微大一点的数以成功完成对数转换。 # # 运行下面的代码单元来执行数据的转换和可视化结果。再次,注意值的范围和它们是如何分布的。 # + # 对于倾斜的数据使用Log转换 skewed = ['capital-gain', 'capital-loss'] features_raw[skewed] = data[skewed].apply(lambda x: np.log(x + 1)) # 可视化对数转换后 'capital-gain'和'capital-loss' 两个特征 vs.distribution(features_raw, transformed = True) # - # ### 规一化数字特征 # 除了对于高度倾斜的特征施加转换,对数值特征施加一些形式的缩放通常会是一个好的习惯。在数据上面施加一个缩放并不会改变数据分布的形式(比如上面说的'capital-gain' or 'capital-loss');但是,规一化保证了每一个特征在使用监督学习器的时候能够被平等的对待。注意一旦使用了缩放,观察数据的原始形式不再具有它本来的意义了,就像下面的例子展示的。 # # 运行下面的代码单元来规一化每一个数字特征。我们将使用[`sklearn.preprocessing.MinMaxScaler`](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html)来完成这个任务。 # + from sklearn.preprocessing import MinMaxScaler # 初始化一个 scaler,并将它施加到特征上 scaler = MinMaxScaler() numerical = ['age', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week'] features_raw[numerical] = scaler.fit_transform(data[numerical]) # 显示一个经过缩放的样例记录 display(features_raw.head(n = 1)) # - # ### 练习:数据预处理 # # 从上面的**数据探索**中的表中,我们可以看到有几个属性的每一条记录都是非数字的。通常情况下,学习算法期望输入是数字的,这要求非数字的特征(称为类别变量)被转换。转换类别变量的一种流行的方法是使用**独热编码**方案。独热编码为每一个非数字特征的每一个可能的类别创建一个_“虚拟”_变量。例如,假设`someFeature`有三个可能的取值`A`,`B`或者`C`,。我们将把这个特征编码成`someFeature_A`, `someFeature_B`和`someFeature_C`. # # | 特征X | | 特征X_A | 特征X_B | 特征X_C | # | :-: | | :-: | :-: | :-: | # | B | | 0 | 1 | 0 | # | C | ----> 独热编码 ----> | 0 | 0 | 1 | # | A | | 1 | 0 | 0 | # # 此外,对于非数字的特征,我们需要将非数字的标签`'income'`转换成数值以保证学习算法能够正常工作。因为这个标签只有两种可能的类别("<=50K"和">50K"),我们不必要使用独热编码,可以直接将他们编码分别成两个类`0`和`1`,在下面的代码单元中你将实现以下功能: # - 使用[`pandas.get_dummies()`](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.get_dummies.html?highlight=get_dummies#pandas.get_dummies)对`'features_raw'`数据来施加一个独热编码。 # - 将目标标签`'income_raw'`转换成数字项。 # - 将"<=50K"转换成`0`;将">50K"转换成`1`。 # + # TODO:使用pandas.get_dummies()对'features_raw'数据进行独热编码 features = pd.get_dummies(features_raw) # TODO:将'income_raw'编码成数字值 income = income_raw.replace({'<=50K': 0, '>50K':1}) # 打印经过独热编码之后的特征数量 encoded = list(features.columns) print ("{} total features after one-hot encoding.".format(len(encoded))) # 移除下面一行的注释以观察编码的特征名字 print (encoded) # - # ### 混洗和切分数据 # 现在所有的 _类别变量_ 已被转换成数值特征,而且所有的数值特征已被规一化。和我们一般情况下做的一样,我们现在将数据(包括特征和它们的标签)切分成训练和测试集。其中80%的数据将用于训练和20%的数据用于测试。然后再进一步把训练数据分为训练集和验证集,用来选择和优化模型。 # # 运行下面的代码单元来完成切分。 # + # 导入 train_test_split from sklearn.model_selection import train_test_split # 将'features'和'income'数据切分成训练集和测试集 X_train, X_test, y_train, y_test = train_test_split(features, income, test_size = 0.2, random_state = 0, stratify = income) # 将'X_train'和'y_train'进一步切分为训练集和验证集 X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=0, stratify = y_train) # 显示切分的结果 print ("Training set has {} samples.".format(X_train.shape[0])) print ("Validation set has {} samples.".format(X_val.shape[0])) print ("Testing set has {} samples.".format(X_test.shape[0])) # - # ---- # ## 评价模型性能 # 在这一部分中,我们将尝试四种不同的算法,并确定哪一个能够最好地建模数据。四种算法包含一个*天真的预测器* 和三个你选择的监督学习器。 # ### 评价方法和朴素的预测器 # *CharityML*通过他们的研究人员知道被调查者的年收入大于\$50,000最有可能向他们捐款。因为这个原因*CharityML*对于准确预测谁能够获得\$50,000以上收入尤其有兴趣。这样看起来使用**准确率**作为评价模型的标准是合适的。另外,把*没有*收入大于\$50,000的人识别成年收入大于\$50,000对于*CharityML*来说是有害的,因为他想要找到的是有意愿捐款的用户。这样,我们期望的模型具有准确预测那些能够年收入大于\$50,000的能力比模型去**查全**这些被调查者*更重要*。我们能够使用**F-beta score**作为评价指标,这样能够同时考虑查准率和查全率: # # $$ F_{\beta} = (1 + \beta^2) \cdot \frac{precision \cdot recall}{\left( \beta^2 \cdot precision \right) + recall} $$ # # # 尤其是,当 $\beta = 0.5$ 的时候更多的强调查准率,这叫做**F$_{0.5}$ score** (或者为了简单叫做F-score)。 # ### 问题 1 - 天真的预测器的性能 # # 通过查看收入超过和不超过 \$50,000 的人数,我们能发现多数被调查者年收入没有超过 \$50,000。如果我们简单地预测说*“这个人的收入没有超过 \$50,000”*,我们就可以得到一个 准确率超过 50% 的预测。这样我们甚至不用看数据就能做到一个准确率超过 50%。这样一个预测被称作是天真的。通常对数据使用一个*天真的预测器*是十分重要的,这样能够帮助建立一个模型表现是否好的基准。 使用下面的代码单元计算天真的预测器的相关性能。将你的计算结果赋值给`'accuracy'`, `‘precision’`, `‘recall’` 和 `'fscore'`,这些值会在后面被使用,请注意这里不能使用scikit-learn,你需要根据公式自己实现相关计算。 # # *如果我们选择一个无论什么情况都预测被调查者年收入大于 \$50,000 的模型,那么这个模型在**验证集上**的准确率,查准率,查全率和 F-score是多少?* # income[income==0].count() # + #不能使用scikit-learn,你需要根据公式自己实现相关计算。 #TODO: 计算准确率 accuracy = (income[income==1].count() + income[income==0].count())/income.count() # TODO: 计算查准率 Precision precision = income[income==1].count() / (income[income==1].count() + income[income==0].count()) # TODO: 计算查全率 Recall recall = income[income==1].count() / income[income==1].count() # TODO: 使用上面的公式,设置beta=0.5,计算F-score fscore = (1 + 0.5 * 0.5) * precision * recall / ((0.5* 0.5* precision)+ recall) # 打印结果 print ("Naive Predictor on validation data: \n \ Accuracy score: {:.4f} \n \ Precision: {:.4f} \n \ Recall: {:.4f} \n \ F-score: {:.4f}".format(accuracy, precision, recall, fscore)) # - # ## 监督学习模型 # ### 问题 2 - 模型应用 # # 你能够在 [`scikit-learn`](http://scikit-learn.org/stable/supervised_learning.html) 中选择以下监督学习模型 # - 高斯朴素贝叶斯 (GaussianNB) # - 决策树 (DecisionTree) # - 集成方法 (Bagging, AdaBoost, Random Forest, Gradient Boosting) # - K近邻 (K Nearest Neighbors) # - 随机梯度下降分类器 (SGDC) # - 支撑向量机 (SVM) # - Logistic回归(LogisticRegression) # # 从上面的监督学习模型中选择三个适合我们这个问题的模型,并回答相应问题。 # ### 模型1 # # **模型名称** # # 回答:集成方法 AdaBoost # # # **描述一个该模型在真实世界的一个应用场景。(你需要为此做点研究,并给出你的引用出处)** # # 回答:人脸检测(https://blog.csdn.net/mao19931004/article/details/49534583) # # **这个模型的优势是什么?他什么情况下表现最好?** # # 回答:它有错误调节能力,分类精度高;在Adaboost的框架下可以使用各种回归分类模型来构建弱学习器,非常灵活;作为简单的二元分类器时,构造简单,结果可理解;不容易发生过拟合 # # **这个模型的缺点是什么?什么条件下它表现很差?** # # 回答:对异常样本敏感,异常样本在迭代中可能会获得较高的权重,影响最终的强学习器的预测准确性 # # **根据我们当前数据集的特点,为什么这个模型适合这个问题。** # # 回答:1.此数据集因为只用决策是否大于50K,因此属于二元分类 # ### 模型2 # # **模型名称** # # 回答:Logistic回归 # # # **描述一个该模型在真实世界的一个应用场景。(你需要为此做点研究,并给出你的引用出处)** # # 回答:流行病学(https://blog.csdn.net/goodhuajun/article/details/39429445) # # **这个模型的优势是什么?他什么情况下表现最好?** # # 回答:实现简单;分类时计算量非常小,速度很快,存储资源低;便利的观测样本概率分数 # # **这个模型的缺点是什么?什么条件下它表现很差?** # # 回答:当特征空间很大时,逻辑回归的性能不是很好;容易欠拟合,一般准确度不太高;只能处理两分类问题(在此基础上衍生出来的softmax可以用于多分类),且必须线性可分;对于非线性特征,需要进行转换; # # **根据我们当前数据集的特点,为什么这个模型适合这个问题。** # # 回答:因为此测试样本就是属于二分类问题,所以适合此模型 # ### 模型3 # # **模型名称** # # 回答:高斯朴素贝叶斯 # # # **描述一个该模型在真实世界的一个应用场景。(你需要为此做点研究,并给出你的引用出处)** # # 回答:过滤垃圾邮件(http://www.ruanyifeng.com/blog/2011/08/bayesian_inference_part_two.html) # # **这个模型的优势是什么?他什么情况下表现最好?** # # 回答:这个模型的优势是处理连续数据,特别当数据是高斯分布时,有一个很好的表现。处理连续数据数值问题的另一种常用技术是通过离散化连续数值的方法。通常,当训练样本数量较少或者是精确的分布已知时,通过概率分布的方法是一种更好的选择。在大量样本的情形下离散化的方法表现最优,因为大量的样本可以学习到数据的分布。 # # # **这个模型的缺点是什么?什么条件下它表现很差?** # # 回答:由于高斯朴素贝叶斯使用的是概率分布估计的方法,不合适在大数据集上应用,因为容易出现欠拟合,在数据分布不准确时或数据样本很大时,表现很差。NBC模型假设属性之间相互独立,这个假设在实际应用中往往是不成立的(可以考虑用聚类算法先将相关性较大的属性聚类),这给NBC模型的正确分类带来了一定影响。在属性个数比较多或者属性之间相关性较大时,NBC模型的分类效率比不上决策树模型。而在属性相关性较小时,NBC模型的性能最为良好。 # # **根据我们当前数据集的特点,为什么这个模型适合这个问题。** # # 回答:当前数据集的属性之间基本是相互独立的,因此很适合朴素贝叶斯算法 # ### 练习 - 创建一个训练和预测的流水线 # 为了正确评估你选择的每一个模型的性能,创建一个能够帮助你快速有效地使用不同大小的训练集并在验证集上做预测的训练和验证的流水线是十分重要的。 # 你在这里实现的功能将会在接下来的部分中被用到。在下面的代码单元中,你将实现以下功能: # # - 从[`sklearn.metrics`](http://scikit-learn.org/stable/modules/classes.html#sklearn-metrics-metrics)中导入`fbeta_score`和`accuracy_score`。 # - 用训练集拟合学习器,并记录训练时间。 # - 对训练集的前300个数据点和验证集进行预测并记录预测时间。 # - 计算预测训练集的前300个数据点的准确率和F-score。 # - 计算预测验证集的准确率和F-score。 # + # TODO:从sklearn中导入两个评价指标 - fbeta_score和accuracy_score from sklearn.metrics import fbeta_score, accuracy_score def train_predict(learner, sample_size, X_train, y_train, X_val, y_val): ''' inputs: - learner: the learning algorithm to be trained and predicted on - sample_size: the size of samples (number) to be drawn from training set - X_train: features training set - y_train: income training set - X_val: features validation set - y_val: income validation set ''' results = {} # TODO:使用sample_size大小的训练数据来拟合学习器 # TODO: Fit the learner to the training data using slicing with 'sample_size' start = time() # 获得程序开始时间 learner = learner learner.fit(X_train[:sample_size-1], y_train[:sample_size-1]) end = time() # 获得程序结束时间 # TODO:计算训练时间 results['train_time'] = end - start # TODO: 得到在验证集上的预测值 # 然后得到对前300个训练数据的预测结果 start = time() # 获得程序开始时间 predictions_val = learner.predict(X_val) predictions_train = learner.predict(X_train[:300-1]) end = time() # 获得程序结束时间 # TODO:计算预测用时 results['pred_time'] = end - start # TODO:计算在最前面的300个训练数据的准确率 results['acc_train'] = accuracy_score(y_train[:300-1], predictions_train[:300-1]) # TODO:计算在验证上的准确率 results['acc_val'] = accuracy_score(y_val, predictions_val) # TODO:计算在最前面300个训练数据上的F-score results['f_train'] = fbeta_score(y_train[:300-1], predictions_train[:300-1],beta=1) # TODO:计算验证集上的F-score results['f_val'] = fbeta_score(y_val, predictions_val, beta=1) # 成功 print ("{} trained on {} samples.".format(learner.__class__.__name__, sample_size)) #print ("trained time{}, predict time{}, acc_train{}, acc_val{}, f_train{}, f_val{}.".format(results['train_time'],results['pred_time'], results['acc_train'],results['acc_val'],results['f_train'], results['f_val'])) # 返回结果 return results # - # ### 练习:初始模型的评估 # 在下面的代码单元中,您将需要实现以下功能: # - 导入你在前面讨论的三个监督学习模型。 # - 初始化三个模型并存储在`'clf_A'`,`'clf_B'`和`'clf_C'`中。 # - 使用模型的默认参数值,在接下来的部分中你将需要对某一个模型的参数进行调整。 # - 设置`random_state` (如果有这个参数)。 # - 计算1%, 10%, 100%的训练数据分别对应多少个数据点,并将这些值存储在`'samples_1'`, `'samples_10'`, `'samples_100'`中 # # **注意:**取决于你选择的算法,下面实现的代码可能需要一些时间来运行! # + # TODO:从sklearn中导入三个监督学习模型 from sklearn.linear_model import LogisticRegression from sklearn.ensemble import AdaBoostClassifier from sklearn.naive_bayes import GaussianNB # TODO:初始化三个模型 clf_A = AdaBoostClassifier(random_state=5)#base_estimator = DecisionTreeClassifier(max_depth=2), n_estimators = 4 clf_B = GaussianNB() clf_C = LogisticRegression(random_state=5) # TODO:计算1%, 10%, 100%的训练数据分别对应多少点 samples_1 = int(X_train.shape[0] * 0.01) samples_10 = int(X_train.shape[0] * 0.1) samples_100 = X_train.shape[0] # 收集学习器的结果 results = {} for clf in [clf_A, clf_B, clf_C]: clf_name = clf.__class__.__name__ results[clf_name] = {} for i, samples in enumerate([samples_1, samples_10, samples_100]): results[clf_name][i] = train_predict(clf, samples, X_train, y_train, X_val, y_val) # 对选择的三个模型得到的评价结果进行可视化 vs.evaluate(results, accuracy, fscore) # - # ---- # ## 提高效果 # # 在这最后一节中,您将从三个有监督的学习模型中选择 *最好的* 模型来使用学生数据。你将在整个训练集(`X_train`和`y_train`)上使用网格搜索优化至少调节一个参数以获得一个比没有调节之前更好的 F-score。 # ### 问题 3 - 选择最佳的模型 # # *基于你前面做的评价,用一到两段话向 *CharityML* 解释这三个模型中哪一个对于判断被调查者的年收入大于 \$50,000 是最合适的。* # **提示:**你的答案应该包括评价指标,预测/训练时间,以及该算法是否适合这里的数据。 # **回答:** # AdaBoost相对来说是最适合此模型的,虽然训练时间与预测时间比其它两种算法高,但是基本都在10s内可以接受,而且adaboost的准确率和f-score在三个模型中是最高的。 # ### 问题 4 - 用通俗的话解释模型 # # *用一到两段话,向 *CharityML* 用外行也听得懂的话来解释最终模型是如何工作的。你需要解释所选模型的主要特点。例如,这个模型是怎样被训练的,它又是如何做出预测的。避免使用高级的数学或技术术语,不要使用公式或特定的算法名词。* # **回答: ** # AdaBoost通俗来说就是在不停地试错并使错误的代价变大,在训练时,所有点都会给一个很小的值,每次分类时都会被分为2个部分,就是正确与错误,然后看在此分类中的点是否被分得正确,若是正确则在此点的基础值上乘以较小的数字让他变小,若是错误的就乘以较大的值使它的整体值变大。这种操作循环做,每次做的时候这个点之前算出的值都会累加,直到达到设置的最大循环次数或错误达到预期最小的值时停止。 # ### 练习:模型调优 # 调节选择的模型的参数。使用网格搜索(GridSearchCV)来至少调整模型的重要参数(至少调整一个),这个参数至少需尝试3个不同的值。你要使用整个训练集来完成这个过程。在接下来的代码单元中,你需要实现以下功能: # # - 导入[`sklearn.model_selection.GridSearchCV`](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html) 和 [`sklearn.metrics.make_scorer`](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html). # - 初始化你选择的分类器,并将其存储在`clf`中。 # - 设置`random_state` (如果有这个参数)。 # - 创建一个对于这个模型你希望调整参数的字典。 # - 例如: parameters = {'parameter' : [list of values]}。 # - **注意:** 如果你的学习器有 `max_features` 参数,请不要调节它! # - 使用`make_scorer`来创建一个`fbeta_score`评分对象(设置$\beta = 0.5$)。 # - 在分类器clf上用'scorer'作为评价函数运行网格搜索,并将结果存储在grid_obj中。 # - 用训练集(X_train, y_train)训练grid search object,并将结果存储在`grid_fit`中。 # # **注意:** 取决于你选择的参数列表,下面实现的代码可能需要花一些时间运行! # + # TODO:导入'GridSearchCV', 'make_scorer'和其他一些需要的库 from sklearn.model_selection import GridSearchCV from sklearn.metrics import make_scorer from sklearn.tree import DecisionTreeClassifier # TODO:初始化分类器 clf = AdaBoostClassifier(random_state=5) # TODO:创建你希望调节的参数列表 parameters = {'n_estimators':[50,100,200,400]} # TODO:创建一个fbeta_score打分对象 scorer = make_scorer(fbeta_score,beta=0.5) # TODO:在分类器上使用网格搜索,使用'scorer'作为评价函数 grid_obj = GridSearchCV(clf, parameters, scorer) # TODO:用训练数据拟合网格搜索对象并找到最佳参数 grid_obj.fit(X_train, y_train) # 得到estimator best_clf = grid_obj.best_estimator_ # 使用没有调优的模型做预测 predictions = (clf.fit(X_train, y_train)).predict(X_val) best_predictions = best_clf.predict(X_val) # 汇报调优后的模型 print ("best_clf\n------") print (best_clf) # 汇报调参前和调参后的分数 print ("\nUnoptimized model\n------") print ("Accuracy score on validation data: {:.4f}".format(accuracy_score(y_val, predictions))) print ("F-score on validation data: {:.4f}".format(fbeta_score(y_val, predictions, beta = 0.5))) print ("\nOptimized Model\n------") print ("Final accuracy score on the validation data: {:.4f}".format(accuracy_score(y_val, best_predictions))) print ("Final F-score on the validation data: {:.4f}".format(fbeta_score(y_val, best_predictions, beta = 0.5))) # - # ### 问题 5 - 最终模型评估 # # _你的最优模型在测试数据上的准确率和 F-score 是多少?这些分数比没有优化的模型好还是差?_ # **注意:**请在下面的表格中填写你的结果,然后在答案框中提供讨论。 # #### 结果: # # | 评价指标 | 未优化的模型 | 优化的模型 | # | :------------: | :---------------: | :-------------: | # | 准确率 | 0.8648 | 0.8740 | # | F-score | 0.7443 | 0.7595 | # **回答:** # 最优模型在测试数据上的准确率为0.8740,F-score是0.7595.这些分数比没有优化模型要好,比天真预测器的模型要好很多 # ---- # ## 特征的重要性 # # 在数据上(比如我们这里使用的人口普查的数据)使用监督学习算法的一个重要的任务是决定哪些特征能够提供最强的预测能力。专注于少量的有效特征和标签之间的关系,我们能够更加简单地理解这些现象,这在很多情况下都是十分有用的。在这个项目的情境下这表示我们希望选择一小部分特征,这些特征能够在预测被调查者是否年收入大于\$50,000这个问题上有很强的预测能力。 # # 选择一个有 `'feature_importance_'` 属性的scikit学习分类器(例如 AdaBoost,随机森林)。`'feature_importance_'` 属性是对特征的重要性排序的函数。在下一个代码单元中用这个分类器拟合训练集数据并使用这个属性来决定人口普查数据中最重要的5个特征。 # ### 问题 6 - 观察特征相关性 # # 当**探索数据**的时候,它显示在这个人口普查数据集中每一条记录我们有十三个可用的特征。 # _在这十三个记录中,你认为哪五个特征对于预测是最重要的,选择每个特征的理由是什么?你会怎样对他们排序?_ # **回答:** # - 特征1:capital gain 资本收益决定了收入多少 # - 特征2:workclass 在不同的工作阶级决定了收入 # - 特征3:education_level 受教育程度与在社会中找的工作及地位是息息相关的 # - 特征4:occupation 所在职位与收入有关 # - 特征5:capital loss 资本流失决定了损失的多少 # ### 练习 - 提取特征重要性 # # 选择一个`scikit-learn`中有`feature_importance_`属性的监督学习分类器,这个属性是一个在做预测的时候根据所选择的算法来对特征重要性进行排序的功能。 # # 在下面的代码单元中,你将要实现以下功能: # - 如果这个模型和你前面使用的三个模型不一样的话从sklearn中导入一个监督学习模型。 # - 在整个训练集上训练一个监督学习模型。 # - 使用模型中的 `'feature_importances_'`提取特征的重要性。 # + # TODO:导入一个有'feature_importances_'的监督学习模型 # TODO:在训练集上训练一个监督学习模型 model = clf # TODO: 提取特征重要性 importances = model.feature_importances_ # 绘图 vs.feature_plot(importances, X_train, y_train) # - # ### 问题 7 - 提取特征重要性 # 观察上面创建的展示五个用于预测被调查者年收入是否大于\$50,000最相关的特征的可视化图像。 # # _这五个特征的权重加起来是否超过了0.5?_<br> # _这五个特征和你在**问题 6**中讨论的特征比较怎么样?_<br> # _如果说你的答案和这里的相近,那么这个可视化怎样佐证了你的想法?_<br> # _如果你的选择不相近,那么为什么你觉得这些特征更加相关?_ # **回答:** # 0.18+0.16+0.12+0.06+0.04=0.52,权重加起来超过了0.5 # 这5个特征与问题6中的讨论有相同项也有不同项。 # 与我选择不相近,我认为age,hours-per-week权重高的原因在于年龄越大积累的财富越多,而工作的时间越长工资会越高 # ### 特征选择 # # 如果我们只是用可用特征的一个子集的话模型表现会怎么样?通过使用更少的特征来训练,在评价指标的角度来看我们的期望是训练和预测的时间会更少。从上面的可视化来看,我们可以看到前五个最重要的特征贡献了数据中**所有**特征中超过一半的重要性。这提示我们可以尝试去**减小特征空间**,简化模型需要学习的信息。下面代码单元将使用你前面发现的优化模型,并**只使用五个最重要的特征**在相同的训练集上训练模型。 # + # 导入克隆模型的功能 from sklearn.base import clone # 减小特征空间 X_train_reduced = X_train[X_train.columns.values[(np.argsort(importances)[::-1])[:5]]] X_val_reduced = X_val[X_val.columns.values[(np.argsort(importances)[::-1])[:5]]] # 在前面的网格搜索的基础上训练一个“最好的”模型 clf_on_reduced = (clone(best_clf)).fit(X_train_reduced, y_train) # 做一个新的预测 reduced_predictions = clf_on_reduced.predict(X_val_reduced) # 对于每一个版本的数据汇报最终模型的分数 print ("Final Model trained on full data\n------") print ("Accuracy on validation data: {:.4f}".format(accuracy_score(y_val, best_predictions))) print ("F-score on validation data: {:.4f}".format(fbeta_score(y_val, best_predictions, beta = 0.5))) print ("\nFinal Model trained on reduced data\n------") print ("Accuracy on validation data: {:.4f}".format(accuracy_score(y_val, reduced_predictions))) print ("F-score on validation data: {:.4f}".format(fbeta_score(y_val, reduced_predictions, beta = 0.5))) # - # ### 问题 8 - 特征选择的影响 # # *最终模型在只是用五个特征的数据上和使用所有的特征数据上的 F-score 和准确率相比怎么样?* # *如果训练时间是一个要考虑的因素,你会考虑使用部分特征的数据作为你的训练集吗?* # **回答:** # 只用5个特征的数据的F-score和准确率(0.7176,0.8447)都比使用所有特征数据上的值(0.7595,0.8740)低. # 会考虑,因为时间会大大缩短 # ### 问题 9 - 在测试集上测试你的模型 # # 终于到了测试的时候,记住,测试集只能用一次。 # # *使用你最有信心的模型,在测试集上测试,计算出准确率和 F-score。* # *简述你选择这个模型的原因,并分析测试结果* # # # **回答:** # *选择AdaBoost模型的主要原因是因为时间较快,准确率较高,而且不容易过拟合* #TODO test your model on testing data and report accuracy and F score y_predict = best_clf.predict(X_test) acc_test = accuracy_score(y_test, y_predict) f_test = fbeta_score(y_test, y_predict,beta=0.5) print("test accuracy is {}, F-score is {}".format(acc_test, f_test)) # > **注意:** 当你写完了所有的代码,并且回答了所有的问题。你就可以把你的 iPython Notebook 导出成 HTML 文件。你可以在菜单栏,这样导出**File -> Download as -> HTML (.html)**把这个 HTML 和这个 iPython notebook 一起做为你的作业提交。
finding_donors/finding_donors.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Spam email filtering using Naive Bayes Classifier # Importing pandas - a Python data analysis library. Used here for data loading and processing. import pandas as pd import numpy as np import re # + # importing data data = pd.read_csv('emails_dataset.csv', encoding="unicode_escape") # display the first few entries data.head() # - data = data[['label', 'text']] data.head() type(data) # to display the dataset details data.describe() # to display the spam and ham count data.label.value_counts() # + # Data cleaning using regular expression to match words only def clean_data(email): return " ".join(re.findall(r"\b[a-zA-Z]+\b(?<!subject)", email.lower())) data['text'] = data['text'].apply(lambda x: clean_data(x)) data.head() # - from sklearn.model_selection import train_test_split X, Y = data['text'], data['label'] X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=25) from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer(stop_words='english') vectorized_data = vectorizer.fit_transform(x for x in X_train) vectorized_data = pd.DataFrame(vectorized_data.toarray()) vectorized_data.head() # + # Setting the column names as word tokens tfidf_tokens = vectorizer.get_feature_names() vectorized_data = vectorized_data.set_axis(tfidf_tokens, axis=1, inplace=False) vectorized_data.head() # + # Appending label to the corresponding vectors vectorized_data['label'] = data['label'] vectorized_data.head() # + # Summing up the likelihood of each token p_dist = vectorized_data.groupby('label').sum() p_dist.head() # + # adding to token to avoid multiplication with '0' p_dist += 1 p_dist.head() # + # Normalizing the values between 0 and 1 by dividing all the values by max(all the values) p_dist.loc['ham'] = p_dist.loc['ham'] / p_dist.max(axis=1)[0] p_dist.loc['spam'] = p_dist.loc['spam'] / p_dist.max(axis=1)[1] # + # Display normalized values p_dist.head() # + # finding the likelihood of spam and ham emails from the given dataset p_ham = (data['label'] == 'ham').sum() / data.shape[0] p_spam = (data['label'] == 'spam').sum() / data.shape[0] print(p_ham, p_spam) # + # Defining Naive Bayes function to calculate the chance of a given input text being spam and ham def naive_bayes(p_dist, email, p_ham, p_spam): tokens = re.findall(r"\w[a-zA-Z]+", email) # cleaning the input email ham_prob, spam_prob = p_ham, p_spam for token in tokens: if token in p_dist: ham_prob = ham_prob * p_dist[token][0] spam_prob = spam_prob * p_dist[token][1] return ham_prob, spam_prob # - test_set = pd.DataFrame([X_test, Y_test]).transpose() test_set.head() def prediction_accuracy(p_dist, test_set, p_ham, p_spam): predicted_correct = 0 TP, TN, FP, FN = 0, 0, 0, 0 for index, row in test_set.iterrows(): ham_score, spam_score = naive_bayes(p_dist, row['text'], p_ham, p_spam) if (spam_score > ham_score): if row['label'] == 'spam': TP += 1 predicted_correct += 1 else: FP += 1 else: if row['label'] == 'ham': TN += 1 predicted_correct += 1 else: FN += 1 accuracy = (predicted_correct / test_set.shape[0]) * 100 return accuracy, TP, TN, FP, FN prediction_results = prediction_accuracy(p_dist, test_set, p_ham, p_spam) print(f'Accuracy: {prediction_results[0]:.2f}%') print("Confusion Matrix") print(' Positive Negative') print(f'Positive {prediction_results[1]} {prediction_results[3]}') print(f'Negative {prediction_results[4]} {prediction_results[2]}') naive_bayes(p_dist, "How is it there?", p_ham, p_spam)
spam or ham (kaggle dataset).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + cell_id="dccd667e-c449-4c17-a45a-e938bc81e043" deepnote_cell_type="code" deepnote_cell_height=156 import pandas as pd pd.set_option('max_columns', None) import numpy as np import json import seaborn as sns import matplotlib.pyplot as plt # + cell_id="00001-624b674d-e610-45df-987e-2c26ae20f7ba" deepnote_cell_type="code" deepnote_cell_height=948 one_race_cols = {"Driver": "Driver", "WINS": "Win", "AS": "Start", "AMR": "Mid Race", "AF": "Finish", "AP": "Average Position", "PD": "Pass Differential", "GP": "Green Passes", "GPD": "Green Passed", "QP": "Quality Passes", "%QP": "% Quality Passes", "#FL": "Fastest Laps", "LTOP15": "Laps in Top 15", "%LTOP15": "% Laps in Top 15", "LL": "Laps Led", "%LL": "% Laps Led", "TL": "Total Laps", "DR": "Driver Rating", "PTS": "Points", "RACES": "Races"} multi_race_cols = {"Driver": "Driver", "WINS": "Wins", "AS": "Average Start", "AMR": "Average Mid Race", "AF": "Average Finish", "AP": "Average Position", "PD": "Pass Differential", "GP": "Green Passes", "GPD": "Green Passed", "QP": "Quality Passes", "%QP": "% Quality Passes", "#FL": "Fastest Laps", "LTOP15": "Laps in Top 15", "%LTOP15": "% Laps in Top 15", "LL": "Laps Led", "%LL": "% Laps Led", "TL": "Total Laps", "DR": "Driver Rating", "PTS": "Points", "RACES": "Races"} car_nums = {"<NAME>": 16, "<NAME>": 10, "<NAME>": 20, "<NAME>": 44, "<NAME>": 77, "<NAME>": 12, "<NAME>": 48, "<NAME>": 14, "<NAME>": 17, "<NAME>": 21, "<NAME>": 45, "<NAME>": 18, "<NAME>": 24, "<NAME>": 77, "<NAME>": 1, "<NAME>": 2, "<NAME>": 41, "<NAME>": 3, "<NAME>": 42, "<NAME>": 9, "<NAME>": 38, "<NAME>": 16, "<NAME>": 50, "<NAME>": 11, "<NAME>": 15, "<NAME>": 4, "<NAME>": 31, "<NAME>": 16, "<NAME>": 27, "<NAME>": 20, "<NAME>": 6, "<NAME>": 7, "<NAME>": 5, "<NAME>": 22, "<NAME>": 78, "<NAME>": 34, "<NAME>": 15, "<NAME>": 15, "<NAME>": 8, "<NAME>": 66, "<NAME>": 15, "<NAME> Jr.": 47, "<NAME>": 99, "<NAME> Jr.": 19, "<NAME>": 27, "<NAME>": 23, "<NAME>": 51} teams = {"<NAME>": "Kaulig", "<NAME>": "SHR", "<NAME>": "JGR", "<NAME>": "NYR", "<NAME>": "Spire", "<NAME>": "Penske", "<NAME>": "Hendrick", "<NAME>": "SHR", "<NAME>": "RFK", "<NAME>": "WBR", "<NAME>": "23X1", "<NAME>": "JGR", "<NAME>": "Hendrick", "<NAME>": "Spire", "<NAME>": "Trackhouse", "<NAME>": "Penske", "<NAME>": "SHR", "<NAME>": "RCR", "<NAME>": "PGMS", "<NAME>": "Hendrick", "<NAME>": "FRM", "<NAME>": "Kaulig", "<NAME>": "TMT", "<NAME>": "JGR", "<NAME>": "RWR", "<NAME>": "SHR", "<NAME>": "Kaulig", "<NAME>": "Kaulig", "<NAME>": "Hezeberg", "<NAME>": "PGMS", "<NAME>": "RFK", "<NAME>": "Spire", "<NAME>": "Hendrick", "<NAME>": "Penske", "<NAME>": "LFM", "<NAME>": "FRM", "<NAME>": "RWR", "<NAME>": "RWR", "<NAME>": "RCR", "<NAME>": "MBM", "<NAME>": "RWR", "<NAME> Jr.": "JTGD", "<NAME>": "Trackhouse", "<NAME> Jr.": "JGR", "<NAME>": "Hezeberg", "<NAME>": "23X1", "<NAME>": "RWR"} makes = {"23X1": "Toyota", "Beard": "Chevy", "FRM": "Ford", "Hendrick": "Chevy", "Hezeberg": "Ford", "JGR": "Toyota", "JTGD": "Chevy", "Kaulig": "Chevy", "LFM": "Ford", "MBM": "Ford", "NYR": "Chevy", "Penske": "Ford", "PGMS": "Chevy", "RCR": "Chevy", "RFK": "Ford", "RWR": "Ford", "SHR": "Ford", "Spire": "Chevy", "TMT": "Chevy", "Trackhouse": "Chevy", "WBR": "Ford"} track_types = {"Auto Club": "Speedway", "Atlanta": "Superpeedway", "Bristol": "Short Track", "COTA": "Road Course", "Daytona": "Superspeedway", "Las Vegas": "Speedway", "Martinsville": "Short Track", "Phoenix": "Speedway", "Richmond": "Short Track", "Talladega": "Superspeedway"} cup_laps = {1: 201} # + cell_id="00002-0d4fbd9e-aec8-4d3f-ba0f-a18f34179ebe" deepnote_cell_type="code" deepnote_cell_height=396.5625 deepnote_output_heights=[null, 177] daytona1 = pd.read_excel('NASCARStatsHub.xlsx', sheet_name = 0) print(daytona1.columns) daytona1.head() # + cell_id="00003-39a4bd00-e228-4ec3-b3f7-35ca0d3c0b8b" deepnote_cell_type="code" deepnote_cell_height=102 cup_1race = pd.read_excel('NASCARStatsHub.xlsx', sheet_name = 0) cup_1race["RACES"] = 1 cup_2races = pd.read_excel('NASCARStatsHub.xlsx', sheet_name = 1) # + cell_id="00004-d7415853-8b9d-4794-b0d0-1ee3508942d6" deepnote_cell_type="code" deepnote_cell_height=956 deepnote_output_heights=[607] def add_RACES(prev_df, curr_df): prev_df = prev_df.set_index('Driver') curr_df = curr_df.set_index('Driver') df = curr_df.copy() df["RACES"] = 1 for driver in df.index: if driver in prev_df.index: if curr_df.loc[driver, "TL"] != prev_df.loc[driver, "TL"]: df.loc[driver, "RACES"] = prev_df.loc[driver, "RACES"] + 1 df = df.reset_index(drop = False) return df cup_2races = add_RACES(cup_1race, cup_2races) cup_2races # + cell_id="00005-f7cd75e9-c341-4221-a3f9-b44332f1e476" deepnote_cell_type="code" deepnote_cell_height=966 def multi_race_to_one(prev_df, curr_df): prev_df = prev_df.set_index('Driver') curr_df = curr_df.set_index('Driver') df = curr_df.copy() for col in df.columns: df[col].values[:] = 0 for driver in df.index: if driver in prev_df.index: df.loc[driver, "RACES"] = curr_df.loc[driver, "RACES"] df.loc[driver, "WINS"] = curr_df.loc[driver, "WINS"] - prev_df.loc[driver, "WINS"] df.loc[driver, "PD"] = curr_df.loc[driver, "PD"] - prev_df.loc[driver, "PD"] df.loc[driver, "GP"] = curr_df.loc[driver, "GP"] - prev_df.loc[driver, "GP"] df.loc[driver, "GPD"] = curr_df.loc[driver, "GPD"] - prev_df.loc[driver, "GPD"] df.loc[driver, "QP"] = curr_df.loc[driver, "QP"] - prev_df.loc[driver, "QP"] df.loc[driver, "#FL"] = curr_df.loc[driver, "#FL"] - prev_df.loc[driver, "#FL"] df.loc[driver, "LTOP15"] = curr_df.loc[driver, "LTOP15"] - prev_df.loc[driver, "LTOP15"] df.loc[driver, "LL"] = curr_df.loc[driver, "LL"] - prev_df.loc[driver, "LL"] df.loc[driver, "TL"] = curr_df.loc[driver, "TL"] - prev_df.loc[driver, "TL"] df.loc[driver, "PTS"] = curr_df.loc[driver, "PTS"] - prev_df.loc[driver, "PTS"] df.loc[driver, "AS"] = ((curr_df.loc[driver, "AS"] * curr_df.loc[driver, "RACES"]) - (prev_df.loc[driver, "AS"] * prev_df.loc[driver, "RACES"])) df.loc[driver, "AMR"] = ((curr_df.loc[driver, "AMR"] * curr_df.loc[driver, "RACES"]) - (prev_df.loc[driver, "AMR"] * prev_df.loc[driver, "RACES"])) df.loc[driver, "AF"] = ((curr_df.loc[driver, "AF"] * curr_df.loc[driver, "RACES"]) - (prev_df.loc[driver, "AF"] * prev_df.loc[driver, "RACES"])) df.loc[driver, "DR"] = ((curr_df.loc[driver, "DR"] * curr_df.loc[driver, "RACES"]) - (prev_df.loc[driver, "DR"] * prev_df.loc[driver, "RACES"])) if df.loc[driver, "LTOP15"] > 0: df.loc[driver, "%LTOP15"] = round((df.loc[driver, "LTOP15"] / df.loc[driver, "TL"]) * 100,1) if df.loc[driver, "LL"] > 0: df.loc[driver, "%LL"] = round((df.loc[driver, "LL"] / df.loc[driver, "TL"]) * 100,1) if df.loc[driver, "QP"] > 0: df.loc[driver, "%QP"] = round((df.loc[driver, "QP"] / df.loc[driver, "GP"]) * 100,1) if df.loc[driver, "TL"] > 0: df.loc[driver, "AP"] = round(((curr_df.loc[driver, "AP"]*curr_df.loc[driver, "TL"] - prev_df.loc[driver, "AP"]*prev_df.loc[driver, "TL"]) / df.loc[driver, "TL"]),1) else: df.drop(driver, inplace = True) else: df.loc[driver] = curr_df.loc[driver] df = df.reset_index(drop = False) df = df.rename(columns = one_race_cols, errors = "raise") return df # + cell_id="00006-4fb8bb06-92ea-446c-b730-a0f80faa1c15" deepnote_cell_type="code" deepnote_cell_height=722 deepnote_output_heights=[607] autoclub = multi_race_to_one(cup_1race, cup_2races) autoclub # + cell_id="00007-b931d13b-3070-48a9-a8ab-d92dd1b24983" deepnote_cell_type="code" deepnote_cell_height=292 deepnote_output_heights=[177] daytona1 = cup_1race.rename(columns = one_race_cols, errors = "raise") daytona1.head() # + cell_id="00008-447237fb-ecbb-481b-80e3-697fadf1623c" deepnote_cell_type="code" deepnote_cell_height=210 def add_cols(df, track = "", racenum = 0): df["Points Eligible"] = [1 if row > 0 else 0 for row in df["Points"]] df["Car #"] = df["Driver"].map(car_nums) df["Team"] = df["Driver"].map(teams) df["Make"] = df["Team"].map(makes) df["Race #"] = racenum df["Track"] = track df["Track Type"] = df["Track"].map(track_types) df["Quality Passes Per Lap"] = round(df["Quality Passes"] / df["Total Laps"], 3) # + cell_id="00009-a8b13a62-b5cb-4d88-8e35-0fe02ea8f50c" deepnote_cell_type="code" deepnote_cell_height=722 deepnote_output_heights=[607] add_cols(daytona1, "Daytona", 1) daytona1 # + cell_id="00010-08c2a5df-e4ce-47b1-8371-fc2010611de4" deepnote_cell_type="code" deepnote_cell_height=228 def gragson_at_beard(df): # Fix for superspeedways; otherwise, it's assumed Gragson races for Kaulig in the 16 for index, row in df.iterrows(): if row["Driver"] == "<NAME>": if row["Track Type"] == "Superspeedway": df.loc[index, "Team"] = "Beard" df.loc[index, "Car #"] = 62 print("{} raced for {} in the #{} car at {}.".format(df.loc[index, "Driver"], df.loc[index, "Team"], df.loc[index, "Car #"], df.loc[index, "Track"])) # + cell_id="00011-ecbdc984-e330-4234-8360-d2ae9b066b95" deepnote_cell_type="code" deepnote_cell_height=768.1875 deepnote_output_heights=[null, 607] gragson_at_beard(daytona1) daytona1 # + cell_id="00012-290cbb78-ed76-4555-a76a-a07a3c1530bb" deepnote_cell_type="code" deepnote_cell_height=750 def raw_laps_to_stats(json_file): # Takes lap-times.json file from Race Center, mapping lap stats to drivers f = open(json_file) data = json.load(f) driver_stats = {} for i in data["laps"]: driver = i["FullName"] if driver.startswith("*") == True: driver = driver[2:] if driver.endswith("#") == True: driver = driver[:-2] elif driver.endswith(")") == True: driver = driver[:-3] if driver == "<NAME>.": driver = "<NAME>" if driver == "<NAME>": driver = "<NAME>" lap_list = [] #for j in i["Laps"]: # lap_list.append(j["LapTime"]) lap_list = [j["LapTime"] for j in i["Laps"]] lap_list = [i for i in lap_list if i] # This removes the "none" values fast_lap = np.min(lap_list) median_lap = round(np.median(lap_list),3) full_speed_laps = [] for lap in lap_list: if lap < fast_lap*1.1: full_speed_laps.append(lap) average_full_speed_lap = round(np.mean(full_speed_laps),3) lap_dict = {"Fastest Lap": fast_lap, "Median Lap": median_lap, "Average Full Speed Lap": average_full_speed_lap} driver_stats[driver] = lap_dict return driver_stats # + cell_id="00013-3f649856-7e35-4271-b565-e72c5c5bee8b" deepnote_cell_type="code" deepnote_cell_height=726 deepnote_output_heights=[611] laps_daytona1_dict = raw_laps_to_stats("lap-times-daytona1.json") laps_daytona1_dict # + cell_id="00014-4b95fb7e-1090-41dc-b5c4-060de0a7ec88" deepnote_cell_type="code" deepnote_cell_height=282 def add_lap_dict_to_df(lap_dict, main_df): # Adds the driver-mapped lap stats to our main dataframe rows = [] for key, value in lap_dict.items(): driver = key stats = value stats["Driver"] = driver rows.append(stats) lap_df = pd.DataFrame(rows) df = pd.merge(main_df, lap_df, on="Driver") return df # + cell_id="00015-03765f9a-3afd-47e3-918b-ff6cea462508" deepnote_cell_type="code" deepnote_cell_height=292 deepnote_output_heights=[177] daytona1 = add_lap_dict_to_df(laps_daytona1_dict, daytona1) daytona1.head() # + cell_id="00016-edd167fb-0dd3-4272-8552-5d0f66e8bbaf" deepnote_cell_type="code" deepnote_cell_height=740 deepnote_output_heights=[607] daytona1 = daytona1.sort_values(by=["Finish"]).reset_index(drop = True) daytona1.index = np.arange(1, len(daytona1) + 1) # Makes index value equal to finish position daytona1 # + cell_id="00017-b804e855-880b-4f40-9069-f5d9e0f9fa7f" deepnote_cell_type="code" deepnote_cell_height=138 def add_lap_ranks(df): df["Fastest Lap Rank"] = df["Fastest Lap"].rank(method="min") df["Median Lap Rank"] = df["Median Lap"].rank(method="min") df["Average Full Speed Lap Rank"] = df["Average Full Speed Lap"].rank(method="min") return df # + cell_id="00018-02fa11af-12bf-44d7-8c3f-9302a59f4f69" deepnote_cell_type="code" deepnote_cell_height=722 deepnote_output_heights=[607] daytona1 = add_lap_ranks(daytona1) daytona1 # + cell_id="00019-85098867-4d29-4f2c-b50a-fde2355d9d05" deepnote_cell_type="code" deepnote_cell_height=722 deepnote_output_heights=[607] daytona1_contenders = daytona1[daytona1["Quality Passes"] >= 10] # Gets rid of cars not part of draft for most of race daytona1_contenders # + cell_id="00020-4999791a-5b71-463f-8e63-c17f6ff65a3e" deepnote_cell_type="code" deepnote_cell_height=102 daytona1_varsofinterest = daytona1[["Finish", "Average Position", "Pass Differential", "Quality Passes", "% Quality Passes", "Quality Passes Per Lap", "Fastest Lap", "Median Lap", "Average Full Speed Lap"]] daytona1_corr = daytona1_varsofinterest.corr() # + cell_id="00021-9f35e14a-d3bf-40f2-a55c-c3c4bd5bb61c" deepnote_cell_type="code" deepnote_cell_height=102 daytona1_contenders_varsofinterest = daytona1_contenders[["Finish", "Average Position", "Pass Differential", "Quality Passes", "% Quality Passes", "Quality Passes Per Lap", "Fastest Lap", "Median Lap", "Average Full Speed Lap"]] daytona1_contenders_corr = daytona1_contenders_varsofinterest.corr() # + cell_id="00022-c334ba0e-0530-41f9-ae91-6a5953d4f8f3" deepnote_cell_type="code" deepnote_cell_height=762 deepnote_output_heights=[611] sns.set_theme(rc = {'figure.figsize':(12,12)}) heatmap_daytona1 = sns.heatmap(daytona1_corr, annot=True) heatmap_daytona1.set_title("Correlation Matrix of 2022 Daytona 500 Stats (all 40)", fontsize=24) plt.show() # + cell_id="00023-c7ed04af-27a0-457b-9b36-eef3ecde237b" deepnote_cell_type="code" deepnote_cell_height=762 deepnote_output_heights=[611] sns.set_theme(rc = {'figure.figsize':(12,12)}) heatmap_daytona1 = sns.heatmap(daytona1_contenders_corr, annot=True) heatmap_daytona1.set_title("Correlation Matrix of 2022 Daytona 500 Stats (Contenders Only)", fontsize=24) plt.show() # + cell_id="00024-f87eec21-967d-4a67-96d7-b6da4701b549" deepnote_cell_type="code" deepnote_cell_height=66 # + [markdown] tags=[] created_in_deepnote_cell=true deepnote_cell_type="markdown" # <a style='text-decoration:none;line-height:16px;display:flex;color:#5B5B62;padding:10px;justify-content:end;' href='https://deepnote.com?utm_source=created-in-deepnote-cell&projectId=9ef4eb23-f38f-47e8-bfb0-51d8616b0dee' target="_blank"> # <img alt='Created in deepnote.com' style='display:inline;max-height:16px;margin:0px;margin-right:7.5px;' src='data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iODBweCIgaGVpZ2h0PSI4MHB4IiB2aWV3Qm94PSIwIDAgODAgODAiIHZlcnNpb249IjEuMSIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB4bWxuczp4bGluaz0iaHR0cDovL3d3dy53My5vcmcvMTk5OS94bGluayI+CiAgICA8IS0tIEdlbmVyYXRvcjogU2tldGNoIDU0LjEgKDc2NDkwKSAtIGh0dHBzOi8vc2tldGNoYXBwLmNvbSAtLT4KICAgIDx0aXRsZT5Hcm91cCAzPC90aXRsZT4KICAgIDxkZXNjPkNyZWF0ZWQgd2l0aCBTa2V0Y2guPC9kZXNjPgogICAgPGcgaWQ9IkxhbmRpbmciIHN0cm9rZT0ibm9uZSIgc3Ryb2tlLXdpZHRoPSIxIiBmaWxsPSJub25lIiBmaWxsLXJ1bGU9ImV2ZW5vZGQiPgogICAgICAgIDxnIGlkPSJBcnRib2FyZCIgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoLTE<KEY>MDAwLCAtNzkuMDAwMDAwKSI+CiAgICAgICAgICAgIDxnIGlkPSJHcm91cC0zIiB0cmFuc2Zvcm09InRyYW5zbGF0ZSgxMjM1LjAwMDAwMCwgNzkuMDAwMDAwKSI+CiAgICAgICAgICAgICAgICA8cG9seWdvbiBpZD0iUGF0aC0yMCIgZmlsbD0iIzAyNjVCNCIgcG9pbnRzPSIyLjM3NjIzNzYyIDgwIDM4LjA0NzY2NjcgODAgNTcuODIxNzgyMiA3My44MDU3NTkyIDU3LjgyMTc4MjIgMzIuNzU5MjczOSAzOS4xNDAyMjc4IDMxLjY4MzE2ODMiPjwvcG9seWdvbj4KICAgICAgICAgICAgICAgIDxwYXRoIGQ9Ik0zNS4wMDc3MTgsODAgQzQyLjkwNjIwMDcsNzYuNDU0OTM1OCA0Ny41NjQ5MTY3LDcxLjU0MjI2NzEgNDguOTgzODY2LDY1LjI2MTk5MzkgQzUxLjExMjI4OTksNTUuODQxNTg0MiA0MS42NzcxNzk1LDQ5LjIxMjIyODQgMjUuNjIzOTg0Niw0OS4yMTIyMjg0IEMyNS40ODQ5Mjg5LDQ5LjEyNjg0NDggMjkuODI2MTI5Niw0My4yODM4MjQ4IDM4LjY0NzU4NjksMzEuNjgzMTY4MyBMNzIuODcxMjg3MSwzMi41NTQ0MjUgTDY1LjI4MDk3Myw2Ny42NzYzNDIxIEw1MS4xMTIyODk5LDc3LjM3NjE0NCBMMzUuMDA3NzE4LDgwIFoiIGlkPSJQYXRoLTIyIiBmaWxsPSIjMDAyODY4Ij48L3BhdGg+CiAgICAgICAgICAgICAgICA8cGF0aCBkPSJNMCwzNy43MzA0NDA1IEwyNy4xMTQ1MzcsMC4yNTcxMTE0MzYgQzYyLjM3MTUxMjMsLTEuOTkwNzE3MDEgODAsMTAuNTAwMzkyNyA4MCwzNy43MzA0NDA1IEM4MCw2NC45NjA0ODgyIDY0Ljc3NjUwMzgsNzkuMDUwMzQxNCAzNC4zMjk1MTEzLDgwIEM0Ny4wNTUzNDg5LDc3LjU2NzA4MDggNTMuNDE4MjY3Nyw3MC4zMTM2MTAzIDUzLjQxODI2NzcsNTguMjM5NTg4NSBDNTMuNDE4MjY3Nyw0MC4xMjg1NTU3IDM2LjMwMzk1NDQsMzcuNzMwNDQwNSAyNS4yMjc0MTcsMzcuNzMwNDQwNSBDMTcuODQzMDU4NiwzNy43MzA0NDA1IDkuNDMzOTE5NjYsMzcuNzMwNDQwNSAwLDM3LjczMDQ0MDUgWiIgaWQ9IlBhdGgtMTkiIGZpbGw9IiMzNzkzRUYiPjwvcGF0aD4KICAgICAgICAgICAgPC9nPgogICAgICAgIDwvZz4KICAgIDwvZz4KPC9zdmc+' > </img> # Created in <span style='font-weight:600;margin-left:4px;'>Deepnote</span></a>
OLD_AnalysisData/OLD/NASCARStatsHub-First2Races.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] papermill={"duration": 0.005973, "end_time": "2021-09-03T13:32:01.690848", "exception": false, "start_time": "2021-09-03T13:32:01.684875", "status": "completed"} tags=[] # # Input URL # + [markdown] papermill={"duration": 0.005433, "end_time": "2021-09-03T13:32:01.701844", "exception": false, "start_time": "2021-09-03T13:32:01.696411", "status": "completed"} tags=[] # This component reads a file from a HTTP(s) source via wget # + papermill={"duration": 2.806151, "end_time": "2021-09-03T13:32:04.513591", "exception": false, "start_time": "2021-09-03T13:32:01.707440", "status": "completed"} tags=[] # !pip install wget==3.2 # + papermill={"duration": 0.359649, "end_time": "2021-09-03T13:32:04.879262", "exception": false, "start_time": "2021-09-03T13:32:04.519613", "status": "completed"} tags=[] import logging import os import re import sys import wget # + papermill={"duration": 0.011926, "end_time": "2021-09-03T13:32:04.919252", "exception": false, "start_time": "2021-09-03T13:32:04.907326", "status": "completed"} tags=[] # path and file name for output output_data_csv = os.environ.get('output_data_csv', 'data.csv') # url of souce url = os.environ.get('url') # temporal data storage for local execution data_dir = os.environ.get('data_dir', '../../data/') # + parameters = list( map(lambda s: re.sub('$', '"', s), map( lambda s: s.replace('=', '="'), filter( lambda s: s.find('=') > -1 and bool(re.match(r'[A-Za-z0-9_]*=[.\/A-Za-z0-9]*', s)), sys.argv ) ))) for parameter in parameters: logging.warning('Parameter: ' + parameter) exec(parameter) # + papermill={"duration": 0.333027, "end_time": "2021-09-03T13:32:05.259803", "exception": false, "start_time": "2021-09-03T13:32:04.926776", "status": "completed"} tags=[] destination = os.path.join(data_dir, output_data_csv) wget.download(url, out=destination) # + papermill={"duration": 0.012328, "end_time": "2021-09-03T13:32:05.279309", "exception": false, "start_time": "2021-09-03T13:32:05.266981", "status": "completed"} tags=[] print('Data written successfully')
component-library/input/input-url.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/DannyML-DSC/Hash-analytics/blob/master/Attrition_Case.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="traZzJqV8az_" colab_type="code" outputId="96b78bd8-9dbb-40a9-9d14-bb8e43eb9235" colab={"base_uri": "https://localhost:8080/", "height": 377} #authenticatiopn script in gcp # !apt-get install -y -qq software-properties-common python-software-properties module-init-tools # !apt-get install software-properties-common # !apt-get install -y -qq software-properties-common module-init-tools # !apt-get install -y -qq python-software-properties module-init-tools # !add-apt-repository -y ppa:alessandro-strada/ppa 2>&1 > /dev/null # !apt-get update -qq 2>&1 > /dev/null # !apt-get -y install -qq google-drive-ocamlfuse fuse from google.colab import auth auth.authenticate_user() from oauth2client.client import GoogleCredentials creds = GoogleCredentials.get_application_default() import getpass # !google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret} < /dev/null 2>&1 | grep URL vcode = getpass.getpass() # !echo {vcode} | google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret} # + id="bw2B7DMn82vj" colab_type="code" colab={} #script for reading data from google drive # !mkdir -p drive # !google-drive-ocamlfuse drive # + id="D6KS1ntkJ-Bi" colab_type="code" colab={} #we import the important libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import StandardScaler from matplotlib.pyplot import figure # + id="KVYAv0_yKJpQ" colab_type="code" colab={} data_stayed = pd.read_excel('drive//app//Employee.xlsx', sheet_name='Stayed' ) # + id="fDFZVldKKarb" colab_type="code" outputId="fdd3054d-da03-4db8-e3f8-d013afa1e993" colab={"base_uri": "https://localhost:8080/", "height": 210} data_stayed.head() # + id="x0YlbgieKppu" colab_type="code" colab={} data_left = pd.read_excel('drive//app//Employee.xlsx', sheet_name='Left') # + id="s2vEiRLxKzFe" colab_type="code" outputId="ae7d745a-e334-4b1e-af0f-4abe370e548e" colab={"base_uri": "https://localhost:8080/", "height": 210} data_left.head() # + id="X4xr_wAZK32B" colab_type="code" colab={} data = pd.concat([data_stayed,data_left], axis=0, ignore_index=True) # + id="7wiH4Q4RNmK3" colab_type="code" colab={} data.to_csv('drive//app//Employee.csv') # + id="j4Q11og0Nx03" colab_type="code" colab={} df = pd.read_csv('drive//app//Employee.csv', index_col=0) # + id="BMXKToN8OKZX" colab_type="code" outputId="0618dd1a-d33a-4e37-de9a-37622d9d2694" colab={"base_uri": "https://localhost:8080/", "height": 241} df.head() # + id="p2ij0U_UNMQI" colab_type="code" outputId="142aabd5-cacd-46ef-be07-ac357a76b166" colab={"base_uri": "https://localhost:8080/", "height": 297} #Data description data.describe() # + id="YRLlVgWK2axu" colab_type="code" colab={} df = df.sample(frac=1) # + id="mii_nhja1v8V" colab_type="code" outputId="229195cf-5d6c-440c-df73-cdb974d5d39f" colab={"base_uri": "https://localhost:8080/", "height": 241} df.head() # + id="3xoeye6D3bOR" colab_type="code" outputId="e36be8b8-46c7-4b47-aed3-6a22dd646d24" colab={"base_uri": "https://localhost:8080/", "height": 85} df.columns # + id="IHycg6xg3iT2" colab_type="code" colab={} df.drop('Emp ID', axis=1, inplace=True) # + id="bGeAJa1C3oPX" colab_type="code" colab={} df.reset_index(inplace=True) # + id="vYaJ9Dyu32F6" colab_type="code" colab={} df.drop('index', axis =1, inplace=True) # + id="zLXvdCb-4Ll2" colab_type="code" colab={} df.drop('work_accident' , axis=1, inplace=True) # + id="0MRbB6z24gEx" colab_type="code" colab={} df.drop('promotion_last_5years', axis=1, inplace=True) # + id="MeW2oXAJ5aP2" colab_type="code" colab={} df.drop('dept', axis=1, inplace=True) # + id="8S1yUvz23_9_" colab_type="code" outputId="30c65676-7aff-4a17-8cca-ebc71341f831" colab={"base_uri": "https://localhost:8080/", "height": 297} df.head(100) # + id="7q8MuFxhR88I" colab_type="code" outputId="38980f6b-bc5f-44a4-bf4b-7dd53931c534" colab={"base_uri": "https://localhost:8080/", "height": 557} #calculating the total number of those that left and stayed in the company figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k') sns.set(style="ticks") f = sns.countplot(x="Attrition", data=df, palette="bwr") plt.show() # + id="FXrmbwnNTFLV" colab_type="code" outputId="26292f7d-04aa-4a5a-eb0c-9d09e855c2ce" colab={"base_uri": "https://localhost:8080/", "height": 68} df.columns # + id="tUgkZ11T5A_m" colab_type="code" colab={} from sklearn.preprocessing import LabelEncoder # + id="tkSM5Ayt5KdI" colab_type="code" colab={} #encoding categorical values to enable proper assesment of our python scriptand ML model. le = LabelEncoder() df['salary'] = le.fit_transform(df.salary) # + id="E7bAVVGA5ofw" colab_type="code" outputId="cdd5c4b7-233c-4928-836d-cf67c3378c69" colab={"base_uri": "https://localhost:8080/", "height": 297} df.head(100) # + id="y7GnFtZPS6ff" colab_type="code" outputId="2d8a3914-c952-4573-c691-3808bbe518de" colab={"base_uri": "https://localhost:8080/", "height": 449} #feature selection using pearson correlation. values with correlation value close to +1 are positively correlated #Using Pearson Correlation, we deduce that the only value with a high correlation of 0.33 #satisfaction_level, is highly correlated to attrition #meaning that the status of an employee in a copany is dependent on the level of satisfaction the employee has gotten in the company plt.figure(figsize=(10,5)) cor = df.corr() sns.heatmap(cor, annot=True, cmap=plt.cm.Reds) plt.show() # + id="YsqY9IVaWEwD" colab_type="code" outputId="b72552d8-78dd-46b1-b969-d14ac09ba60c" colab={"base_uri": "https://localhost:8080/", "height": 102} def feature_engine(): #Correlation with output variable cor_target = abs(cor["Attrition"]) #Selecting highly correlated features relevant_features = cor_target[cor_target>0.3] df_features = pd.DataFrame({'relevant_features': relevant_features}) print(df_features.sort_values(ascending=False, by='relevant_features')) feature_engine() # + id="jsaSxOLcS68l" colab_type="code" outputId="942c584b-b563-431c-ebcd-efc4e92c3888" colab={"base_uri": "https://localhost:8080/", "height": 285} #visual comparism between satisfaction level and Attrition #it can be seen that employees with a satisfaction level greater than #0.3 are more satisfied in the company and arent likely to leave attall. viz = sns.barplot(x= "satisfaction_level", y = "Attrition", data = df ) # + id="swN0p7dAS7B9" colab_type="code" outputId="84b2d74b-839d-46c2-b2de-dfe5adf10811" colab={"base_uri": "https://localhost:8080/", "height": 285} #since time spent in comapny had d high correlation as well, we plot it agfainst attrition and anayse the possibilities. viz2 = sns.barplot(x= "time_spend_company", y="Attrition", data=df) # + id="B8S5ez3GS7Fv" colab_type="code" outputId="46b957c7-8a45-476c-f91a-1b85924cd69e" colab={"base_uri": "https://localhost:8080/", "height": 285} viz2 = sns.barplot(x= "average_monthly_hours", y="Attrition", data=df) # + id="MvfgV_l85153" colab_type="code" outputId="4e4f8971-c7b9-4825-d221-e3155e40cb79" colab={"base_uri": "https://localhost:8080/", "height": 285} viz3 = sns.barplot(x= "salary", y="Attrition", data=df) # + id="-fDXpwvFCXFA" colab_type="code" colab={} X = df[['satisfaction_level', 'last_evaluation', 'number_project', 'average_monthly_hours', 'time_spend_company', 'salary']] Y = df[['Attrition']] # + id="YxJJyLyrS7K4" colab_type="code" colab={} from sklearn.preprocessing import StandardScaler # + id="XIhEjkcBS7NX" colab_type="code" colab={} sc = StandardScaler() X = sc.fit_transform(X) # + id="TOKbL5fOS7Pt" colab_type="code" colab={} X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size = 0.25, random_state = 0) # + id="9iIJJ_21S7VA" colab_type="code" colab={} from sklearn.linear_model import LogisticRegression # + id="EHpDtZWBDda_" colab_type="code" outputId="35158567-fd20-4a3e-99c6-53839edbe9d0" colab={"base_uri": "https://localhost:8080/", "height": 221} X_train # + id="gTIzjubqDhfN" colab_type="code" outputId="95ce9a85-962f-449b-ab3c-167caf76f351" colab={"base_uri": "https://localhost:8080/", "height": 235} Y_train # + id="-5r8r8dHDmq9" colab_type="code" outputId="6c0b89c1-d8cd-4e11-c462-46c6822cee46" colab={"base_uri": "https://localhost:8080/", "height": 85} X_test # + id="-Z8n6BnSDuHx" colab_type="code" outputId="c95425fb-a77c-42c0-d956-06e0ded837b0" colab={"base_uri": "https://localhost:8080/", "height": 111} Y_test # + id="g8zN449xS7ZO" colab_type="code" colab={} model = LogisticRegression() # + id="bb_6aCBLS7Xl" colab_type="code" outputId="1ac4e563-b70d-4038-980d-5d5d51686787" colab={"base_uri": "https://localhost:8080/", "height": 156} model.fit(X_train,Y_train) # + id="9KQTbJMYS7SQ" colab_type="code" colab={} prediction = model.predict(X_test) # + id="DF6z0mgzENv9" colab_type="code" colab={} from sklearn.metrics import accuracy_score # + id="ypNMz-MBS6_K" colab_type="code" colab={} acc = accuracy_score(Y_test, prediction) # + id="BOLmIi0FEaFD" colab_type="code" outputId="8935c4ac-e2a0-45d9-ea28-a1d38c20aa06" colab={"base_uri": "https://localhost:8080/", "height": 34} acc
Attrition_Case.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <a href="https://www.pieriandata.com"><img src="../DATA/Logo.jpg"></a> # # <em text-align:center>Copyright Pierian Data Inc.</em> # # Video Basics Assessment Solution # # ## Project Task # # **You only have one task here. Create a program that reads in a live stream from a camera on your computer (or if you don't have a camera, just open up a video file). Then whenever you click the left mouse button down, create a blue circle around where you've clicked. Check out the video for an example of what the final project should look like** # **Guide** # # * Create a draw_circle function for the callback function # * Use two events cv2.EVENT_LBUTTONDOWN and cv2.EVENT_LBUTTONUP # * Use a boolean variable to keep track if the mouse has been clicked up and down based on the events above # * Use a tuple to keep track of the x and y where the mouse was clicked. # * You should be able to then draw a circle on the frame based on the x,y coordinates from the Event # # Check out the skeleton guide below: # + # Create a function based on a CV2 Event (Left button click) # mouse callback function def draw_circle(event,x,y,flags,param): global center,clicked # get mouse click on down and track center if event == cv2.EVENT_LBUTTONDOWN: center = (x, y) clicked = False # Use boolean variable to track if the mouse has been released if event == cv2.EVENT_LBUTTONUP: clicked = True # Haven't drawn anything yet! center = (0,0) clicked = False # Capture Video cap = cv2.VideoCapture(0) # Create a named window for connections cv2.namedWindow('Test') # Bind draw_rectangle function to mouse cliks cv2.setMouseCallback('Test', draw_circle) while True: # Capture frame-by-frame ret, frame = cap.read() # Use if statement to see if clicked is true if clicked==True: # Draw circle on frame cv2.circle(frame, center=center, radius=50, color=(255,0,0), thickness=5) # Display the resulting frame cv2.imshow('Test', frame) # This command let's us quit with the "q" button on a keyboard. # Simply pressing X on the window won't work! if cv2.waitKey(1) & 0xFF == ord('q'): break # When everything is done, release the capture cap.release() cv2.destroyAllWindows() # -
03-Video-Basics/04-Video-Basics-Assessment-Solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" # System packages import os import sys import warnings # Data related import numpy as np import pandas as pd import pprint as pp # sklearn from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split, cross_val_predict,cross_val_score, StratifiedKFold, GridSearchCV from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, TfidfVectorizer from sklearn.pipeline import FeatureUnion, Pipeline, make_pipeline, make_union from sklearn.preprocessing import FunctionTransformer, OneHotEncoder from sklearn.metrics import accuracy_score import scikitplot.plotters as skplt # nlp from gensim.models import KeyedVectors # models import xgboost as xgb import eli5 from eli5.explain import explain_weights from eli5.formatters import explain_weights_df warnings.filterwarnings('ignore') # + # Add utils_functions.py as a dataset # Import module from shutil import copyfile # Copy our file into the working directory (make sure it has .py suffix) copyfile(src = "../input/utils-functions/utils_functions.py", dst = '/kaggle/working/utils_functions.py') from utils_functions import * # - # ## 1. Load data df = pd.read_csv('../input/processed/train_variants_text.csv') df=df.dropna(subset=['Text']) # ### Process data # + df['Class'] = df['Class'].astype(int) df['Gene'] = df['Gene'].astype(str) df['Variation'] = df['Variation'].astype(str) y= df['Class'] X= df.drop(['Class','ID'],axis =1) # - # ## 3.1 Bag-of-words 1 Group+xgboost # ### Split into Train amd Validation data X_tr, X_val, y_tr, y_val = split_data(df, 'Text', 'Class', 0.1, 0, stratify='Class') clf = Pipeline([('vect', CountVectorizer(preprocessor=clean_text_stemmed, stop_words =stop_words)), ('tfidf', TfidfTransformer()), ('clf', xgb.XGBClassifier(objective="multi:softprob", random_state=42))]) clf.fit(X_tr, y_tr) predicted = clf.predict(X_val) acc=np.mean(predicted == y_val) print(acc) X_te = test.Text.values X_te.shape y_te = clf.predict_proba(X_te) y_te.shape # ## 3.2 Bag-of-words *3 groups +xgboost # Select one dataframe column for vectorization def build_preprocessor(df,field): field_idx = list(df.columns).index(field) return lambda x: default_preprocessor(x[field_idx]) default_preprocessor = CountVectorizer().build_preprocessor() vectorizer = FeatureUnion([ ('Variation',TfidfVectorizer(preprocessor=build_preprocessor(X,'Variation'))), ('Gene',TfidfVectorizer(preprocessor=build_preprocessor(X,'Gene'))), ('Text',TfidfVectorizer(preprocessor=build_preprocessor(X,'Text'))), ]) X_v = vectorizer.fit_transform(X.values) X_tr, X_val, y_tr, y_val = train_test_split(X_v, y, test_size=0.2, stratify=y, random_state=42) model = xgb.XGBClassifier(objective="multi:softprob", random_state=42) model.fit(X_tr, y_tr) # Accuracy score model.score(X_val,y_val) df_name = explain_weights_df(model, vec=vectorizer, top=10, feature_filter=lambda x: x != '<BIAS>') df_name.to_csv('../data/features/20190609full_union_3groups_tfidf_feature_weights.csv') df_name.head(10) # Analyzing individual predictions. Let's check some predictions from the validation set. You see a summary of various vectorizer's contribution at the top, and then below you can see features highlighed in text. eli5.show_prediction(model, doc=X.values[1], vec=vectorizer)
notebooks/03BoW_Full_Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] nbsphinx="hidden" # This notebook is part of the `nbsphinx` documentation: https://nbsphinx.readthedocs.io/. # - # # Raw Cells # # Any Jupyter Notebook consists of cells of three different types: Code cells, Markdown cells, and/or a Raw cells. # While most Jupyter Notebook users are very familiar with Code cells and Markdown cells in Jupyter Notebooks, Raw cells are less frequently used. # For Jupyter Notebook, they are introduced # [here](https://jupyter-notebook.readthedocs.io/en/stable/notebook.html?highlight=raw#raw-cells) # and for JupyterLab # [here](https://jupyterlab.readthedocs.io/en/stable/extension/notebook.html?highlight=raw#model). # The Raw cells are also sometimes referred to as Raw NBConvert cells in the context of # [nbconvert](https://nbconvert.readthedocs.io/en/latest/architecture.html?highlight=raw%20cell#a-detailed-pipeline-exploration). # The Raw cell type can be used to render different code formats into HTML or LaTeX by Sphinx. # This information is stored in the notebook metadata and converted appropriately. # # ## Usage # # Raw cells are created differently depending on the user interface. # # ### Jupyter Notebook # # To select a desired format from within Jupyter Notebook, select the cell containing your special code and choose options from the following dropdown menus: # # 1. Select "Raw NBConvert" in the Menu Toolbar (just below the two menus "Widgets" and "Help") # 2. Chose the appropriate "Raw NBConvert Format" within the cell # # ![Steps for converting cells to Raw formats in Jupyter](images/raw_cells_jupyter_notebook.png) # <!-- this comment makes pandoc create an in-line image --> # # ### JupyterLab # # To select a desired format from within JupyterLab, first activate the right sidebar by clicking on View in the Menu Toolbar. # Then you ensure that in front of Show Right Sidebar there is a tick. # Once the Right Sidebar is shown, you are ready to go. # # Now you select the cell containing your special code and choose options from the following dropdown menus: # 2. Select "Raw" in the Notebook Toolbar (just next to the symbols that run cells or reload the kernel) # 3. Chose the appropriate "Raw NBConvert Format" in the Right Sidebar # # ![Steps for converting cells to Raw formats in Jupyter](images/raw_cells_jupyterlab.png) # <!-- this comment makes pandoc create an in-line image --> # ## Available Raw Cell Formats # # The following examples show how different Jupyter cell formats are rendered by Sphinx. # ### None # # By default (if no cell format is selected), the cell content is included (without any conversion) in both the HTML and LaTeX output. # This is typically not useful at all. # + active="" # "I'm a raw cell with no format." # - # ### reST # # Raw cells in "reST" format are interpreted as reStructuredText and parsed by Sphinx. The result is visible in both HTML and LaTeX output. # + raw_mimetype="text/restructuredtext" active="" # "**I'm** a *raw cell* in reST_ format." # # .. _reST: https://www.sphinx-doc.org/rest.html # - # ### Markdown # # Raw cells in "Markdown" format are interpreted as Markdown, and the result is included in both HTML and LaTeX output. Since the Jupyter Notebook also supports normal Markdown cells, this might not be useful *at all*. # + raw_mimetype="text/markdown" active="" # "**I'm** a *raw cell* in [Markdown](https://daringfireball.net/projects/markdown/) format." # - # ### HTML # # Raw cells in "HTML" format are only visible in HTML output. This option might not be very useful, since raw HTML code is also allowed within normal Markdown cells. # + raw_mimetype="text/html" active="" # <p>&ldquo;<b>I&rsquo;m</b> a <em>raw cell</em> in # <a href="https://www.w3.org/html/">HTML</a> format.&rdquo;</p> # - # ### LaTeX # # Raw cells in "LaTeX" format are only visible in LaTeX output. # + raw_mimetype="text/latex" active="" # \textbf{I'm} a \emph{raw cell} in \href{https://www.latex-project.org/}{\LaTeX} format. # - # ### Python # # Raw cells in "Python" format are not visible at all (nor executed in any way). # + raw_mimetype="text/x-python" active="" # print("I'm a raw cell in \"Python\" format!")
doc/raw-cells.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <a href="mailto:<EMAIL>">Prof. M.Sc. <NAME></a> # # Com o objetivo de trabalhar com APIs em Python, precisamos de ferramentas que realizem todas as requisições. A biblioteca mais comum em Python para esse tipo de tarefa é <b>requests</b>. A API <b>requests</b> não é nativa do Python, então você precisará instalá-la, caso já não tenha feito para poder iniciar. Você poderá instalar utilizando um dos comandos a seguir: # # <code>pip install requests</code> # <br> # <code>conda install requests</code> # # Geralmente, as APIs Rest retornam para o requisitante um documento do tipo <i>JSON</i>. Para podermos usufruir de todas as funcionalidades, o Python possui uma biblioteca chamada json a qual fornece algumas ferramentas para trabalhar com esse tipo de documento. Caso não possua essa biblioteca, utilize um dos comandos a seguir para instalá-la. # # <code>pip install json</code> # <br> # <code>conda install json</code> # # Feito isso, basta importar as duas bibliotecas para dentro de seu projeto para poder utilizá-las # + import requests import json ''' Para esse projeto em específico, utilizaremos a biblioteca datetime para poder realizar um impressão formatada de um dos dados recebidos via JSON ''' from datetime import datetime # - # A <b>NASA</b>, agência espacial norte-americana, possui um portal que disponibiliza uma API <i>Open Source</i> com alguns dados interessantes sobre o espaço e naves espaciais. Nesse projeto, iremos consumir os dados dos astronautas no espaço nesse momento (http://api.open-notify.org/astros.json) e a previsão de quando a estação espacial estará passando por determinada região da terra (http://api.open-notify.org/iss-pass.json) # # No passo a seguir iremos utilizar o método <i>get</i> para recuperar as informações dos astronautas no espaço nesse momento. Em seguida iremos verificar o status dessa solicitação. As possíveis respostas para essa requisição são: # <ul> # <li>200: Tudo correu bem, e o resultado foi retornado (se houver).</li> # <li>301: O servidor está redirecionando você para um terminal diferente. Isso pode acontecer quando uma empresa alterna nomes de domínio ou um nome de terminal é alterado.</li> # <li>400: O servidor acha que você fez uma solicitação incorreta. Isso pode acontecer quando você não envia os dados corretos, entre outras coisas.</li> # <li>401: O servidor acha que você não está autenticado. Muitas APIs exigem credenciais de login, portanto, isso acontece quando você não envia as credenciais corretas para acessar uma API.</li> # <li>403: O recurso que você está tentando acessar é proibido: você não tem as permissões corretas para vê-lo.</li> # <li>404: O recurso que você tentou acessar não foi encontrado no servidor.</li> # <li>503: O servidor não está pronto para lidar com a solicitação.</li> # </ul> response = requests.get("http://api.open-notify.org/astros.json") print(response.status_code) # Caso o código de retorno do status seja 200, teremos um documento <i>JSON</i> com as informações dos astronautas e iremos imprimir o resultado. print(response.json()) # Aparentemente, os dados recuperados vem num formato de dicionário (objeto <i>dict</i> da linguagem Python). Podemos criar uma função que transforma esse retorno numa forma melhor legível, através do método <i>dumps</i>, passando alguns parâmetros de identação e ordenação. def jsonprint(obj): text = json.dumps(obj, sort_keys=True, indent=4) print(text) #Nesse passo iremos invocar o método recém criado para exibir seu resultado formatado. jsonprint(response.json()) # Algumas APIs permitem que realizemos consultas parametrizadas, é o caso da API que prediz quando a estação espacial estará passando por determinado ponto na terra. Para esse exemplo, iremos criar uma variável denominada parameters e passaremos a latitude e longitude da cidade de Nova York. parameters = { "lat": 40.71, "lon": -74 } # Após criar o parâmetro para a personalização da consulta, utilizaremos novamente o método <i>get</i>, porém adicionaremos a cláusula params apontando para nossa variável parameters. Iremos exibir os resultados obtidos utilizando a função criada para formatação do <i>JSON</i>. response = requests.get("http://api.open-notify.org/iss-pass.json", params=parameters) jsonprint(response.json()) # A partir dos resultados obtidos, iremos exibir apenas a duração e a hora em que a estação espacial estiver sobre os parâmetros informados, esses dados estão dispostos a partir da chave <i>response</i> pass_times = response.json()['response'] jsonprint(pass_times) # + #Nesse passo, criaremos uma lista apenas com os dados do horário de passagem e exibiremos da forma que fora recuperado risetimes = [] for d in pass_times: time = d['risetime'] risetimes.append(time) print(risetimes) # + #Já nesse passo, iremos formatar os dados do horário de passagem da estação espacial para algo mais inteligível aos humanos times = [] for rt in risetimes: time = datetime.fromtimestamp(rt) times.append(time) print(time) # - # Dessa forma, concluímos um exemplo simples de realizar requests em uma API disponível para consultas. Ainda há muito a se fazer a partir daqui, mas já auxilia em atividades mais simples. # # Fonte: https://www.dataquest.io/blog/python-api-tutorial/
APIConsumer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # <center><img alt="" src="images/Cover_NLPTM.jpg"/></center> # # ## <center><font color="blue">Representasi Dokumen</font></center> # <b><center>(C) <NAME> - 2020</center> # <center>tau-data Indonesia ~ https://tau-data.id ~ <EMAIL></center> # + # Installing Modules for Google Colab import nltk # !wget https://raw.githubusercontent.com/taudata-indonesia/eLearning/master/taudataNlpTm.py # !mkdir data # !wget -P data/ https://raw.githubusercontent.com/taudata-indonesia/eLearning/master/data/slang.txt # !wget -P data/ https://raw.githubusercontent.com/taudata-indonesia/eLearning/master/data/stopwords_id.txt # !wget -P data/ https://raw.githubusercontent.com/taudata-indonesia/eLearning/master/data/stopwords_en.txt # !wget -P data/ https://raw.githubusercontent.com/taudata-indonesia/eLearning/master/data/wn-ind-def.tab # !wget -P data/ https://raw.githubusercontent.com/taudata-indonesia/eLearning/master/data/wn-msa-all.tab # !wget -P data/ https://raw.githubusercontent.com/taudata-indonesia/eLearning/master/data/all_indo_man_tag_corpus_model.crf.tagger # !pip install spacy python-crfsuite unidecode textblob sastrawi tweepy twython # !python -m spacy download en # !python -m spacy download xx # !python -m spacy download en_core_web_sm nltk.download('popular') # - import taudataNlpTm as tau, seaborn as sns; sns.set() import tweepy, json, nltk, urllib.request from textblob import TextBlob from nltk.tokenize import TweetTokenizer from twython import TwythonStreamer from tqdm import tqdm_notebook as tqdm # <h1 id="Vector-Space-Model---VSM">Vector Space Model - VSM</h1> # # <p><img alt="" src="images/vsm.png" style="width: 300px; height: 213px;" /></p> # # + [markdown] slideshow={"slide_type": "slide"} # ## <font color="blue">Outline Representasi Dokumen :</font> # * Representasi Sparse (VSM): Binary, tf &amp;-/ idf, Custom tf-idf, BM25 # * Frequency filtering, n-grams, vocabulary based # * Representasi Dense:&nbsp;Word Embedding (Word2Vec dan FastText) # * Tensor to Matrix representation untuk model Machine Learning di Text Mining # - # <ul> # <li>Data yang biasanya kita ketahui berbentuk <strong>tabular </strong>(tabel/kolom-baris/matriks/<em>array</em>/larik), data seperti ini disebut data terstruktur (<strong><em>structured data</em></strong>).</li> # <li>Data terstruktur dapat disimpan dengan baik di&nbsp;<em>spreadsheet</em>&nbsp;(misal:&nbsp;<em>Excel/CSV</em>) atau basis data (<em>database</em>) relasional dan secara umum dapat digunakan langsung oleh berbagai model/<em>tools</em>&nbsp;statistik/data mining konvensional.</li> # <li>Sebagian data yang lain memiliki &ldquo;<em>tags</em>&rdquo; yang menjelaskan elemen semantik yang berbeda di dalamnya dan cenderung tidak memiliki skema (struktur) yang statis.</li> # <li>Data seperti ini disebut data<em>&nbsp;<strong>semi-structured</strong></em>, contohnya data dalam bentuk &nbsp;<strong><a href="http://www.w3.org/XML/" target="_blank">XML</a></strong>.</li> # <li>Apa bedanya? Apa maksudnya tidak memiliki skema yang statis? Penjelasan mudahnya bayangkan sebuah data terstruktur (tabular), namun dalam setiap baris (<em>record/instance</em>)-nya tidak memiliki jumlah variabel (peubah) yang sama.</li> # <li>Tentu saja data seperti ini tidak sesuai jika disimpan dan diolah dengan&nbsp;<em>tools/software</em>&nbsp;yang mengasumsikan struktur yang statis pada setiap barisnya (misal: Excel dan SPSS).</li> # </ul> # # <p><img alt="" src="images/3_tipeData.png" style="height: 400px ; width: 430px" /></p> # # <ul> # <li>Data multimedia seperti teks, gambar atau video <strong>tidak dapat</strong>&nbsp;<strong>secara langsung</strong>&nbsp;dianalisa dengan model statistik/data mining.</li> # <li>Sebuah proses awal&nbsp;<em>(pre-process)</em>&nbsp;harus dilakukan terlebih dahulu untuk merubah data-data tidak (semi) terstruktur tersebut menjadi bentuk yang dapat digunakan oleh model statistik/data mining konvensional.</li> # <li>Terdapat berbagai macam cara mengubah data-data tidak terstruktur tersebut ke dalam bentuk yang lebih sederhana, dan ini adalah suatu bidang ilmu tersendiri yang cukup dalam. Sebagai contoh saja sebuah teks biasanya dirubah dalam bentuk vektor/<em>topics</em>&nbsp;terlebih dahulu sebelum diolah.</li> # <li>Vektor data teks sendiri bermacam-macam jenisnya: ada yang berdasarkan eksistensi (<strong><em>binary</em></strong>), frekuensi dokumen (<strong>tf</strong>), frekuensi dan invers jumlah dokumennya dalam corpus (<strong><a href="https://en.wikipedia.org/wiki/Tf%E2%80%93idf" target="_blank">tf-idf</a></strong>), <strong>tensor</strong>, dan sebagainya.</li> # <li>&nbsp;Proses perubahan ini sendiri biasanya tidak&nbsp;<em>lossless</em>, artinya terdapat cukup banyak informasi yang hilang. Maksudnya bagaimana? Sebagai contoh ketika teks direpresentasikan dalam vektor (sering disebut sebagai model <strong>bag-of-words</strong>) maka informasi urutan antar kata menghilang.&nbsp;</li> # </ul> # # <p><img alt="" src="images/3_structureData.png" style="height:270px; width:578px" /></p> # # <p><strong>Contoh bentuk umum representasi dokumen:</strong></p> # # # <p><img alt="" src="images/3_Bentuk umum representasi dokumen.JPG" style="height: 294px ; width: 620px" /></p> # # <p>Pada Model <em>n-grams</em> kolom bisa juga berupa frase.</p> # <h2 id="Document-Term-Matrix-:-Vector-Space-Model---VSM">Document-Term Matrix : Vector Space Model - VSM</h2> # # <p><img alt="" src="images/vsm_matrix.png" style="width: 500px; height: 283px;" /></p> # # <p><img alt="" src="images/3_rumus tfidf.png" style="height:370px; width:367px" /></p> # # <p><img alt="" src="images/3_tfidf logic.jpg" style="height:359px; width:638px" /></p> # <p><img alt="" src="images/3_variant tfidf.png" style="height:334px; width:955px" /></p> # K = |d| # # pertama-tama mari kita Load Data twitter dari pertemuan sebelumnya # # * Silahkan gunakan data baru (crawl lagi) jika diinginkan def loadTweets(file='Tweets.json'): f=open(file,encoding='utf-8', errors ='ignore', mode='r') T=f.readlines();f.close() for i,t in enumerate(T): T[i] = json.loads(t.strip()) return T # + # karena ToS data json ini dikirimkan terpisah hanya untuk kalangan terbatas. import json T2 = loadTweets(file='data/tweets_sma-01.json') print('Total data = {}'.format(len(T2))) print('tweet pertama oleh "{}" : "{}"'.format(T2[0]['user']['screen_name'],T2[0]['full_text'])) # - # Contoh mengambil hanya data tweet data = [t['full_text'] for t in T2] data[:5] # 5 tweet pertama # # PreProcessing Data Text-nya # + # pre processing import taudataNlpTm as tau from tqdm import tqdm_notebook as tqdm # cleanText(T, fix={}, lemma=None, stops = set(), symbols_remove = True, min_charLen = 2, fixTag= True) stops, lemmatizer = tau.LoadStopWords(lang='id') stops.add('rt') stops.add('..') for i,d in tqdm(enumerate(data)): data[i] = tau.cleanText(d, lemma=lemmatizer, stops = stops, symbols_remove = True, min_charLen = 2) print(data[0]) # + # Menggunakan modul SciKit untuk merubah data tidak terstruktur ke VSM # Scikit implementation http://scikit-learn.org/stable/modules/feature_extraction.html from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer # http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html#sklearn.feature_extraction.text.CountVectorizer # http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html#sklearn.feature_extraction.text.TfidfVectorizer # - # VSM - "binari" binary_vectorizer = CountVectorizer(binary = True) binari = binary_vectorizer.fit_transform(data) binari.shape # ukuran VSM # Sparse vectors/matrix binari[0] # Mengakses Datanya print(binari[0].data) print(binari[0].indices) # Kolom dan term print(str(binary_vectorizer.vocabulary_)[:93]) # + # VSM term Frekuensi : "tf" tf_vectorizer = CountVectorizer(binary = False) tf = tf_vectorizer.fit_transform(data) print(tf.shape) # Sama print(tf[0].data) # Hanya data ini yg berubah print(tf[0].indices) # Letak kolomnya tetap sama # - d = tf_vectorizer.vocabulary_ kata_kolom = {k:v for v,k in d.items()} kata_kolom[597] # + # VSM term Frekuensi : "tf-idf" tfidf_vectorizer = TfidfVectorizer() tfidf = tfidf_vectorizer.fit_transform(data) print(tfidf.shape) # Sama print(tfidf[0].data) # Hanya data ini yg berubah print(tfidf[0].indices) # Letak kolomnya berbeda, namun jumlah kolom dan elemennya tetap sama # - # ## tf-idf: # # <img alt="" src="images/toydata_vsm.png" /> # # * Menurut http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html # * default formula tf-idf yang digunakan sk-learn adalah: # * $tfidf = tf * log(\frac{N}{df+1})$ ==> Smooth IDF # * namun kita merubahnya menjadi: # * $tfidf = tf * log(\frac{N}{df})$ ==> Non Smooth IDF # * $tfidf = tf * log(\frac{N}{df+1})$ ==> linear_tf, Smooth IDF # * $tfidf = (1+log(tf)) * log(\frac{N}{df})$ ==> sublinear_tf, Non Smooth IDF # VSM term Frekuensi : "tf-idf" tfidf_vectorizer = TfidfVectorizer(smooth_idf= False, sublinear_tf=True) tfidf = tfidf_vectorizer.fit_transform(data) print(tfidf.shape) # Sama print(tfidf[0].data) # Hanya data ini yg berubah print(tfidf[0].indices) # Letak kolomnya = tfidf # ### Alasan melakukan filtering berdasarkan frekuensi: # * Intuitively filter noise # * Curse of Dimensionality (akan dibahas kemudian) # * Computational Complexity # * Improving accuracy # + # Frequency Filtering di VSM tfidf_vectorizer = TfidfVectorizer() tfidf_1 = tfidf_vectorizer.fit_transform(data) tfidf_vectorizer = TfidfVectorizer(max_df=0.75, min_df=5) tfidf_2 = tfidf_vectorizer.fit_transform(data) print(tfidf_1.shape) print(tfidf_2.shape) # + tfidf_vectorizer = TfidfVectorizer(lowercase=True, smooth_idf= True, sublinear_tf=True, ngram_range=(1, 2), max_df=0.90, min_df=2) tfidf_3 = tfidf_vectorizer.fit_transform(data) print(tfidf_3.shape) # - # <h2 id="Best-Match-Formula-:-BM25">Best-Match Formula : BM25</h2> # # <p><img alt="" src="images/3_bm25_simple.png" style="height: 123px; width: 300px;" /></p> # # <ol> # <li>di IR nilai b dan k yang optimal adalah :&nbsp;<strong> <em>b</em> = 0.75&nbsp; dan k = [1.2 - 2.0]&nbsp; &nbsp;</strong><br /> # ref:&nbsp;<em><NAME>., <NAME>., &amp; <NAME>. &Uuml;. <NAME>. (2008). Introduction to information retrieval.&nbsp;An Introduction To Information Retrieval,&nbsp;151, 177.</em></li> # <li>Tapi kalau untuk TextMining (clustering) nilai <strong>k optimal adalah 20, nilai b = sembarang (boleh = 0.75)</strong><br /> # ref:&nbsp;<em><NAME>., &amp; <NAME>. (2011). Improving document clustering using Okapi BM25 feature weighting.&nbsp;Information retrieval,&nbsp;14(5), 466-487.</em></li> # <li><strong>avgDL </strong>adalah rata-rata panjang dokumen di seluruh dataset dan <strong>DL </strong>adalah panjang dokumen D.<br /> # hati-hati, ini berbeda dengan &nbsp;tf-idf MySQL diatas.</li> # </ol> # # + # Variasi pembentukan matriks VSM: d1 = '@udin76, Minum kopi pagi-pagi sambil makan pisang goreng is the best' d2 = 'Belajar NLP dan Text Mining ternyata seru banget sadiezz' d3 = 'Sudah lumayan lama bingits tukang Bakso belum lewat' d4 = 'Aduh ga banget makan Mie Ayam p4k4i kesyap, please deh' D = [d1, d2, d3, d4] # Jika kita menggunakan cara biasa: tfidf_vectorizer = TfidfVectorizer() vsm = tfidf_vectorizer.fit_transform(D) print(tfidf_vectorizer.vocabulary_) # - # N-Grams VSM # Bermanfaat untuk menangkap frase kata, misal: "ga banget", "pisang goreng", dsb tfidf_vectorizer = TfidfVectorizer(ngram_range=(1, 2)) vsm = tfidf_vectorizer.fit_transform(D) print(tfidf_vectorizer.vocabulary_) # Vocabulary based VSM # Bermanfaat untuk menghasilkan hasil analisa yang "bersih" # variasi 2 d1 = '@udin76, Minum kopi pagi-pagi sambil makan pisang goreng is the best' d2 = 'Belajar NLP dan Text Mining ternyata seru banget sadiezz' d3 = 'Sudah lumayan lama bingits tukang Bakso belum lewat seru' d4 = 'Aduh ga banget makan Mie Ayam p4k4i kesyap, please deh' D = [d1,d2,d3,d4] Vocab = {'seru banget':0, 'seru':1, 'the best':2, 'lama':3, 'text mining':4, 'nlp':5, 'ayam':6} tf_vectorizer = CountVectorizer(binary = False, vocabulary=Vocab) tf = tf_vectorizer.fit_transform(D) print(tf.toarray()) tf_vectorizer.vocabulary_ Vocab = {'seru banget':0, 'the best':1, 'lama':2, 'text mining':3, 'nlp':4, 'ayam':5} tfidf_vectorizer = TfidfVectorizer(max_df=1.0, min_df=1, lowercase=True, vocabulary=Vocab) vsm = tfidf_vectorizer.fit_transform(D) print(tfidf_vectorizer.vocabulary_) # VSM terurut sesuai definisi dan terkesan lebih "bersih" # Perusahaan besar biasanya memiliki menggunakan teknik ini dengan vocabulary yang comprehensif # Sangat cocok untuk Sentiment Analysis # <h2><strong>Word Embeddings</strong></h2> # # <h2><img alt="" src="images/3_word_embeddings.png" style="height: 296px ; width: 602px" /></h2> # <p><img alt="" src="images/3_word2vec_example.png" style="height:400px; width:667px" /></p> # # <h3>Word2Vec</h3> # # <p><img alt="" src="images/3_word2Vec.png" style="height:400px; width:636px" /><br /> # Dikembangkan oleh <NAME> - Google :</p> # # <p><NAME>; <NAME>. &quot;word2vec Explained: Deriving Mikolov et al.&#39;s Negative-Sampling Word-Embedding Method&quot;.&nbsp;<a href="https://en.wikipedia.org/wiki/ArXiv">arXiv</a>:<a href="https://arxiv.org/abs/1402.3722">1402.3722</a> </p> # # <p><img alt="" src="images/BoW_VS_WordEmbedding.png" style="width: 248px; height: 372px;" /></p> # data[:3] # + # Rubah bentuk data seperti yang dibutuhkan genSim # Bisa juga dilakukan dengan memodifikasi fungsi "cleanText" (agar lebih efisien) data_we = [] for doc in data: Tokens = [str(w) for w in TextBlob(doc).words] data_we.append(Tokens) print(data_we[:3]) # + # https://radimrehurek.com/gensim/models/word2vec.html # train word2vec dengan data di atas from gensim.models import Word2Vec L = 300 # Jumlah neurons = ukuran vektor = jumlah kolom model_wv = Word2Vec(data_we, min_count=2, size=L, window = 5, workers= -2) # min_count adalah jumlah kata minimal yang muncul di corpus # "size" adalah Dimensionality of the word vectors # (menurut beberapa literature untuk text disarankan 300-500) # "window" adalah jarak maximum urutan kata yang di pertimbangkan # workers = jumlah prosesor yang digunakan untuk menjalankan word2vec print('Done!...') # - # di data yang sebenarnya (i.e. besar) Gensim sering membutuhkan waktu cukup lama # Untungnya kita bisa menyimpan dan me-load kembali hasil perhitungan model word2vec, misal model_wv.save('data/model_w2v') model_wv = Word2Vec.load('data/model_w2v') print('Done!...') # ### Hati-hati, Word2vec menggunakan Matriks Dense # # <p>Penggunaan memory oleh Gensim kurang lebih sebagai berikut:</p> # # <p>Jumlah kata x &quot;size&quot; x 12 bytes</p> # # <p>Misal terdapat 100 000 kata unik dan menggunakan 200 layers, maka penggunaan memory =&nbsp;</p> # # <p>100,000x200x12 bytes = ~229MB</p> # # <p>Jika jumlah size semakin banyak, maka jumlah training data yang diperlukan juga semakin banyak, namun model akan semakin akurat.</p> # # Melihat vector suatu kata vektor = model_wv.wv.__getitem__(['psbb']) print(len(vektor[0])) # Panjang vektor keseluruhan = jumlah neuron yang digunakan print(vektor[0][:5]) # 5 elemen pertama dari vektornya # Mencari kata terdekat menurut data training dan Word2Vec model_wv.wv.most_similar('psbb') # Melihat similarity antar kata print(model_wv.wv.similarity('psbb', 'corona')) print(model_wv.wv.similarity('psbb', 'bioskop')) print(model_wv.wv.similarity('psbb', 'psbb')) # <p><img alt="" src="images/3_cosine.png" style="height:400px; width:683px" /></p> # # ## Hati-hati Cosine adalah similarity bukan distance # Hal ini akan mempengaruhi interpretasi # + # error jika kata tidak ada di training data # beckman bukan beckmans ==> hence di Word Embedding PreProcessing harus thourough kata = 'copid' try: print(model_wv.wv.most_similar(kata)) except: print('error! kata "',kata,'" tidak ada di training data') # ini salah satu kelemahan Word2Vec # - # ## Tips: # # <p>Hati-hati GenSim tidak menggunakan seluruh kata di training data!.</p> # # <p>Perintah berikut akan menghasilkan kata-kata yang terdapat di vocabulary GenSim</p> # Vocabulary = model_wv.wv.vocab print(str(Vocabulary.keys())[:250]) # Gunakan vocabulary ini (rubah ke "set") untuk membuat program menjadi lebih robust # ## Hati-hati menginterpretasikan hasil Word2Vec # <h3 id=" FastText-(Facebook-2016)">&nbsp;FastText (Facebook-2016)</h3> # # <ul> # <li>Menggunakan Sub-words: app, ppl, ple - apple</li> # <li>Paper:&nbsp;https://arxiv.org/abs/1607.04606&nbsp;&nbsp;</li> # <li>Website:&nbsp;https://fasttext.cc/</li> # <li>Source:&nbsp;https://github.com/facebookresearch/fastText&nbsp;</li> # </ul> # # + # Caution penggunaan memory besar, bila timbul "Memory Error" kecilkan nilai L from gensim.models import FastText L = 100 # Jumlah neurons = ukuran vektor = jumlah kolom model_FT = FastText(data_we, size=L, window=5, min_count=2, workers=-2) 'Done' # - # Mencari kata terdekat menurut data training dan Word2Vec model_FT.wv.most_similar('psbb') # Melihat similarity antar kata print(model_FT.wv.similarity('psbb', 'corona')) print(model_FT.wv.similarity('psbb', 'jakarta')) print(model_FT.wv.similarity('psbb', 'psbb')) # + # Word2Vec VS FastText try: print(model_wv.wv.most_similar('coro')) except: print('Word2Vec error!') try: print(model_FT.wv.most_similar('coro')) except: print('FastText error!') # - # # Diskusi: # <ul> # <li>Apakah kelebihan dan kekurangan WE secara umum?</li> # <li>Apakah kira-kira aplikasi WE?</li> # <li>Apakah bisa dijadikan representasi dokumen? Bagaimana caranya?</li> # <li>Bergantung pada apa sajakah performa model WE?</li> # </ul> # # * Preprocessing apa yang sebaiknya dilakukan pada model Word Embedding? # * Apakah Pos Tag bermanfaat disini? Jika iya bagaimana menggunakannya? # <h1>End of Module UDA-06</h1> # <hr /> # <p><img alt="" src="images/2_Studying_Linguistic.png" style="height:500px; width:667px" /></p> #
nlptm-06 Representasi Dokumen.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="VOhmcD3F7z4T" # ## Setup # + [markdown] id="D0XgD-kCEHMI" # ### System Detail & Colab Setup # # Enter below code in browser developer console to prevent disconnect. # ``` # function ClickConnect() { # console.log('Working') # document # .querySelector('#top-toolbar > colab-connect-button') # .shadowRoot.querySelector('#connect') # .click() # } # intervalTiming = setInterval(ClickConnect, 60000) # ``` # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 337, "status": "ok", "timestamp": 1646131356633, "user": {"displayName": "\u00ad\uc190\uc0c1\ud604 | \uae30\uacc4\uacf5\ud559\ubd80 | \ud55c\uc591\ub300(\uc11c\uc6b8)", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03568245900072502246"}, "user_tz": -540} id="xgtxJADb6CU5" outputId="1b49926a-b10e-42a3-b007-8b1a178381ff" # !nvidia-smi # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 15022, "status": "ok", "timestamp": 1646131371998, "user": {"displayName": "\u00ad\uc190\uc0c1\ud604 | \uae30\uacc4\uacf5\ud559\ubd80 | \ud55c\uc591\ub300(\uc11c\uc6b8)", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03568245900072502246"}, "user_tz": -540} id="lhHIDRTisli7" outputId="165aac77-5ef9-44c8-acc6-731008cce856" # !pip install -q -U albumentations # !pip install opencv-python-headless==4.5.2.52 # + [markdown] id="JRFOxYh_htPm" # ### Load Dataset # Unarchive zip file on GoogleDrive to local `/content/` directory # + executionInfo={"elapsed": 44147, "status": "ok", "timestamp": 1646131416138, "user": {"displayName": "\u00ad\uc190\uc0c1\ud604 | \uae30\uacc4\uacf5\ud559\ubd80 | \ud55c\uc591\ub300(\uc11c\uc6b8)", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03568245900072502246"}, "user_tz": -540} id="gWMIvdLwh64I" # !mkdir "./PascalVOC" # !unzip -qq "/content/drive/MyDrive/Colab Resources/Datasets/PascalVOC.zip" -d "/content/PascalVOC/" # + [markdown] id="wCECOe-M71oF" # ### Import # # Reload everytime when python module is updated during runtime. # + executionInfo={"elapsed": 15516, "status": "ok", "timestamp": 1646131431639, "user": {"displayName": "\u00ad\uc190\uc0c1\ud604 | \uae30\uacc4\uacf5\ud559\ubd80 | \ud55c\uc591\ub300(\uc11c\uc6b8)", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03568245900072502246"}, "user_tz": -540} id="H2h1B07PGfT3" import torch import torchvision import torch.optim as optim from torch.utils.data import DataLoader from importlib import reload import sys import os.path sys.path.append("/content/drive/MyDrive/Colab Resources/YOLOv1") from yolo.resnet_model_light import ResnetYoloV1 from yolo.loss import YoloLoss from yolo.dataset import VOCDataset from utils.metric import Metric from utils.image_processes import ResizePreprocess, JitterPreprocess, normalize_imagenet_to_cv2 from utils.box_processes import BoxProcesses from utils.trainer import Trainer from utils.inferrer import Inferrer from utils.visualize import draw_box_on_image # + [markdown] id="-5HE3THQFiuI" # ### Set Device # # Utilize GPU when available # + executionInfo={"elapsed": 16, "status": "ok", "timestamp": 1646131431640, "user": {"displayName": "\u00ad\uc190\uc0c1\ud604 | \uae30\uacc4\uacf5\ud559\ubd80 | \ud55c\uc591\ub300(\uc11c\uc6b8)", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03568245900072502246"}, "user_tz": -540} id="FWBwHAxEFkiJ" DEVICE = "cuda" if torch.cuda.is_available() else "cpu" # + [markdown] id="0vEKEaHY73yn" # ## Hyperparameters # + executionInfo={"elapsed": 16, "status": "ok", "timestamp": 1646131431641, "user": {"displayName": "\u00ad\uc190\uc0c1\ud604 | \uae30\uacc4\uacf5\ud559\ubd80 | \ud55c\uc591\ub300(\uc11c\uc6b8)", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03568245900072502246"}, "user_tz": -540} id="uwBfeUCX7f0d" # Local Hyperparameters etc. NUM_WORKERS = 2 PIN_MEMORY = True # Tunable Hyperparameters (in Colab) BATCH_SIZE = 64 LEARNING_RATE = 0.005 EPOCHS = 135 WEIGHT_DECAY = 0.0005 MODEL_CKPT_PATH = "/content/drive/MyDrive/Colab Resources/YOLOv1/resnet_model.pth.tar" # + [markdown] id="8tsz62eyHlyO" # ## Training # + [markdown] id="_qqC21xbHn88" # ### Load Data # + executionInfo={"elapsed": 15, "status": "ok", "timestamp": 1646131431641, "user": {"displayName": "\u00ad\uc190\uc0c1\ud604 | \uae30\uacc4\uacf5\ud559\ubd80 | \ud55c\uc591\ub300(\uc11c\uc6b8)", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03568245900072502246"}, "user_tz": -540} id="98XKAZe_Huwq" train_dataset = VOCDataset( csv_file="/content/PascalVOC/train.csv", transform=JitterPreprocess(), img_dir="/content/PascalVOC/images", label_dir="/content/PascalVOC/labels" ) val_dataset = VOCDataset( csv_file="/content/PascalVOC/test.csv", transform=ResizePreprocess(), img_dir="/content/PascalVOC/images", label_dir="/content/PascalVOC/labels" ) train_loader = DataLoader( dataset=train_dataset, batch_size=BATCH_SIZE, num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=True ) val_loader = DataLoader( dataset=val_dataset, batch_size=BATCH_SIZE, num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=False, drop_last=False ) # + colab={"base_uri": "https://localhost:8080/", "height": 429} executionInfo={"elapsed": 757, "status": "ok", "timestamp": 1646131432383, "user": {"displayName": "\u00ad\uc190\uc0c1\ud604 | \uae30\uacc4\uacf5\ud559\ubd80 | \ud55c\uc591\ub300(\uc11c\uc6b8)", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03568245900072502246"}, "user_tz": -540} id="paIImDUmJPfP" outputId="ebcb75c6-6eae-48a9-de4f-8b4c4b749ccc" import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = [7, 7] plt.axis('off') # pick one sample IMG_NUM = 15 sample_data = train_dataset[IMG_NUM] # re-process to use visualizer function... box_processor = BoxProcesses() rgb_img = normalize_imagenet_to_cv2(sample_data[0]) bbox = box_processor.boxes_cell_to_list(sample_data[1].unsqueeze(0))[0] bbox = list(filter(lambda box: box[1]>0, bbox)) draw_box_on_image(rgb_img, bbox, color=[0, 255, 0]) plt.imshow(rgb_img) # + [markdown] id="SufPw1KkMF5j" # ### Initialize Model # + colab={"base_uri": "https://localhost:8080/", "height": 422} executionInfo={"elapsed": 6137, "status": "ok", "timestamp": 1646133667808, "user": {"displayName": "\u00ad\uc190\uc0c1\ud604 | \uae30\uacc4\uacf5\ud559\ubd80 | \ud55c\uc591\ub300(\uc11c\uc6b8)", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03568245900072502246"}, "user_tz": -540} id="zPFSCcarMHen" outputId="f8ddf876-4a08-42e6-f8d2-db8fbd9ac06e" # ====== # Initialize Model # ====== model = ResnetYoloV1(split_size=7, num_boxes=2, num_classes=20).to(DEVICE) optimizer = optim.SGD(model.parameters(), lr=LEARNING_RATE, momentum=0.9, weight_decay=WEIGHT_DECAY) loss_fn = YoloLoss() # setup trainer trainer = Trainer(DEVICE) # load previous training trainer.launch_model(model, optimizer, MODEL_CKPT_PATH, LOAD_MODEL=True) # set learning rate scheduler trainer.launch_training(optimizer) # setup inferrer inferrer = Inferrer(DEVICE) best_mAP = max(trainer.val_mAPs) print(f"Best mAP during training was: {best_mAP}") # + [markdown] id="nbcjHMTBMJzB" # ### Epoch Loop # + colab={"base_uri": "https://localhost:8080/", "height": 693} id="KpPvCJ-CMMK1" outputId="20092906-d98b-4957-b71b-7783710c494b" executionInfo={"status": "ok", "timestamp": 1646133221301, "user_tz": -540, "elapsed": 1751532, "user": {"displayName": "\u00ad\uc190\uc0c1\ud604 | \uae30\uacc4\uacf5\ud559\ubd80 | \ud55c\uc591\ub300(\uc11c\uc6b8)", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03568245900072502246"}} # ===== # Epoch Loop # ===== for epoch in range(trainer.prev_epoch+1, EPOCHS): print(f"Epoch: {epoch}") # Training train_mean_loss, val_mean_loss = trainer.epoch_train_fn(train_loader, val_loader, model, optimizer, loss_fn) # Record losses trainer.record_losses(train_mean_loss, val_mean_loss) if (epoch+1) % 1 == 0: if (epoch+1) % 5 == 0: # Check mAP performance train_mAP = 0 val_mAP = trainer.check_mAP(val_loader, model, inferrer) print(f"Train mAP: {train_mAP}, Validation mAP: {val_mAP}") # Record mAPs trainer.record_mAPs(train_mAP, val_mAP) # Plot record trainer.plot_training() # Save checkpoint during training trainer.save_training(model, optimizer, epoch, filename=MODEL_CKPT_PATH) # + [markdown] id="ej4Uc1D3fe07" # ## Evaluation # + [markdown] id="lh1BFHUJq4GO" # ### Mean Average Precision # + id="qy_noIi8qtyi" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1646133571938, "user_tz": -540, "elapsed": 350641, "user": {"displayName": "\u00ad\uc190\uc0c1\ud604 | \uae30\uacc4\uacf5\ud559\ubd80 | \ud55c\uc591\ub300(\uc11c\uc6b8)", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03568245900072502246"}} outputId="d7d26bba-2067-4dd4-d267-7fb54c609ac0" mean_avg_prec = trainer.check_mAP(val_loader, model, inferrer) print(f"Test mAP: {mean_avg_prec}") # + [markdown] id="yR_UGwWgfcUc" # ### Visualize # Plot a validation batch # + id="HqVtGudRXvpO" colab={"base_uri": "https://localhost:8080/", "height": 1000, "output_embedded_package_id": "19tFrc8cszPrjTeSYsXxtD5Ei0QqnVTHv"} executionInfo={"status": "ok", "timestamp": 1646133660599, "user_tz": -540, "elapsed": 88676, "user": {"displayName": "\u00ad\uc190\uc0c1\ud604 | \uae30\uacc4\uacf5\ud559\ubd80 | \ud55c\uc591\ub300(\uc11c\uc6b8)", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "03568245900072502246"}} outputId="cb89743f-7e2f-42e1-f66d-dc28ac2a6e8c" import numpy as np import time batch = next(iter(val_loader)) all_pred_boxes, all_true_boxes = inferrer.infer_labeled(val_loader, model, iou_threshold=0.5, threshold=0.1) for IMG_IDX in range(len(batch[0])): # filter out bboxes of current image pred_bboxes = [ bbox[1:8] for bbox in all_pred_boxes if bbox[0] == IMG_IDX ] gt_bboxes = [ bbox[1:8] for bbox in all_true_boxes if bbox[0] == IMG_IDX ] # draw boxes on image rgb_image = normalize_imagenet_to_cv2(batch[0][IMG_IDX]) draw_box_on_image(rgb_image, gt_bboxes, color=[0, 255, 0]) draw_box_on_image(rgb_image, pred_bboxes, color=[255, 0, 0]) # plot fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.imshow(rgb_image) plt.show()
YOLOv1 train.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Exploring Beat Frequencies using the `Audio` Object # This example uses the `Audio` object and Matplotlib to explore the phenomenon of beat frequencies. # + jupyter={"outputs_hidden": false} # %matplotlib inline import matplotlib.pyplot as plt import numpy as np # + jupyter={"outputs_hidden": false} from ipywidgets import interactive from IPython.display import Audio, display import numpy as np # + jupyter={"outputs_hidden": false} def beat_freq(f1=220.0, f2=224.0): max_time = 3 rate = 8000 times = np.linspace(0,max_time,rate*max_time) signal = np.sin(2*np.pi*f1*times) + np.sin(2*np.pi*f2*times) print(f1, f2, abs(f1-f2)) display(Audio(data=signal, rate=rate)) return signal # + jupyter={"outputs_hidden": false} v = interactive(beat_freq, f1=(200.0,300.0), f2=(200.0,300.0)) display(v) # + jupyter={"outputs_hidden": false} v.kwargs # + jupyter={"outputs_hidden": false} f1, f2 = v.children f1.value = 255 f2.value = 260 plt.plot(v.result[0:6000])
001-Jupyter/001-Tutorials/001-Basic-Tutorials/002-Interactive-Widgets/Example - Beat Frequencies.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.2 64-bit (''venv'': venv)' # language: python # name: python382jvsc74a57bd0265455998716544771fe8f8e46d3bc336b7ce25fc2914091372dd288abc92a2d # --- import pandas as pd df = pd.DataFrame({ 'city_a': [240, 440, 455, 475, 475, 490, 490, 500, 500, 500, 530, 550, 578, 580, 620, 687, 694, 703, 859], 'city_b': [500, 564, 590, 600, 600, 600, 645, 650, 660, 667, 689, 692, 700, 700, 705, 735, 760, 764, 805] }) boxplot = df.boxplot(column=['city_a', 'city_b'])
unit_2/math_1.11.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import json import pandas as pd import matplotlib.pyplot as plt # ## Question: Why are hash getting repeated? # + train = pd.read_json("logs/train.json", lines=True) test = pd.read_json("logs/test.json", lines=True) print (train.shape, test.shape) # + logs = pd.concat([train, test], axis=0) logs.shape # - logs = logs[((logs['d_port'] == 443) | (logs['s_port'] == 443)) & (logs['protocol'] == 6)] logs.shape # + vc = logs['payload_hash'].value_counts() repeat_hashes = list(vc[vc > 2].index) df = logs[logs['payload_hash'].isin(repeat_hashes)].reset_index(drop=True) # - print (" Total number of unique hashes: {}\n".format(logs['payload_hash'].nunique()), "Number of repeated hashes: {}\n".format(len(repeat_hashes)), "As percentage: {:.5f}\n".format(len(repeat_hashes)/logs['payload_hash'].nunique()), "Each repeated hash is repeated approximately {:.5f}\n".format(df.shape[0]/len(repeat_hashes))) # Approximately 7.9 hashes in 10,000 are repeated. df.drop(['ip_version', 'protocol'], axis=1, inplace=True) df.columns df['payload_hash'].value_counts().describe() # Let's take the most repeated hash and and perform analysis on how frequently it is repeated, it's characteristics etc. df1 = df[df['payload_hash'] == vc.index[0]] df1.shape df1['s_ip'].nunique() df2 = df1[df1['d_ip'] == '192.168.1.6'] seconds = (df2['timestamp'] - df2['timestamp'].shift(1)).dt.seconds plt.figure(figsize=(20, 6)) plt.boxplot(seconds) plt.show() seconds.describe()
analysis/repeat_hash_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/nstaudac/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling/blob/master/Nicholas_Staudacher_LS_DS_121_Join_and_Reshape_Data.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" id="pmU5YUal1eTZ" # _Lambda School Data Science_ # # # Join and Reshape datasets # # Objectives # - concatenate data with pandas # - merge data with pandas # - understand tidy data formatting # - melt and pivot data with pandas # # Links # - [Pandas Cheat Sheet](https://github.com/pandas-dev/pandas/blob/master/doc/cheatsheet/Pandas_Cheat_Sheet.pdf) # - [Tidy Data](https://en.wikipedia.org/wiki/Tidy_data) # - Combine Data Sets: Standard Joins # - Tidy Data # - Reshaping Data # - Python Data Science Handbook # - [Chapter 3.6](https://jakevdp.github.io/PythonDataScienceHandbook/03.06-concat-and-append.html), Combining Datasets: Concat and Append # - [Chapter 3.7](https://jakevdp.github.io/PythonDataScienceHandbook/03.07-merge-and-join.html), Combining Datasets: Merge and Join # - [Chapter 3.8](https://jakevdp.github.io/PythonDataScienceHandbook/03.08-aggregation-and-grouping.html), Aggregation and Grouping # - [Chapter 3.9](https://jakevdp.github.io/PythonDataScienceHandbook/03.09-pivot-tables.html), Pivot Tables # # Reference # - Pandas Documentation: [Reshaping and Pivot Tables](https://pandas.pydata.org/pandas-docs/stable/reshaping.html) # - Modern Pandas, Part 5: [Tidy Data](https://tomaugspurger.github.io/modern-5-tidy.html) # + [markdown] colab_type="text" id="Mmi3J5fXrwZ3" # ## Download data # # We’ll work with a dataset of [3 Million Instacart Orders, Open Sourced](https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2)! # + colab_type="code" id="K2kcrJVybjrW" outputId="f0b952c6-109a-4aeb-c5a4-ee42a6a35f26" colab={"base_uri": "https://localhost:8080/", "height": 204} # !wget https://s3.amazonaws.com/instacart-datasets/instacart_online_grocery_shopping_2017_05_01.tar.gz # + colab_type="code" id="kqX40b2kdgAb" outputId="4d2030a8-1321-4472-f8ab-7536005517b0" colab={"base_uri": "https://localhost:8080/", "height": 238} # !tar --gunzip --extract --verbose --file=instacart_online_grocery_shopping_2017_05_01.tar.gz # + colab_type="code" id="YbCvZZCBfHCI" outputId="3695e35e-a037-4ec2-fea9-cbcc33f82e87" colab={"base_uri": "https://localhost:8080/", "height": 34} # %cd instacart_2017_05_01 # + id="etshR5kpvWOj" colab_type="code" outputId="59a4e951-ec68-4a6d-d899-8a6946877619" colab={"base_uri": "https://localhost:8080/", "height": 119} # !ls -lh *.csv # + [markdown] id="RcCu3Tlgv6J2" colab_type="text" # # Join Datasets # + [markdown] colab_type="text" id="RsA14wiKr03j" # ## Goal: Reproduce this example # # The first two orders for user id 1: # + colab_type="code" id="vLqOTMcfjprg" outputId="0c4dccec-33d4-457c-b6f1-49de6f80e93f" colab={"base_uri": "https://localhost:8080/", "height": 312} from IPython.display import display, Image url = 'https://cdn-images-1.medium.com/max/1600/1*vYGFQCafJtGBBX5mbl0xyw.png' example = Image(url=url, width=600) display(example) # + [markdown] colab_type="text" id="nPwG8aM_txl4" # ## Load data # # Here's a list of all six CSV filenames # + colab_type="code" id="Ksah0cOrfdJQ" outputId="6d0f6bd7-8805-4098-ddac-ea6a65688fb9" colab={"base_uri": "https://localhost:8080/", "height": 119} # !ls -lh *.csv # + [markdown] colab_type="text" id="AHT7fKuxvPgV" # For each CSV # - Load it with pandas # - Look at the dataframe's shape # - Look at its head (first rows) # - `display(example)` # - Which columns does it have in common with the example we want to reproduce? # + [markdown] colab_type="text" id="cB_5T6TprcUH" # ### aisles # + id="_YyUJxaqcWiF" colab_type="code" colab={} import pandas as pd # + colab_type="code" id="JB3bvwSDK6v3" outputId="4edb9559-3c24-4444-aa64-c6a6b827da5d" colab={"base_uri": "https://localhost:8080/", "height": 221} aisles = pd.read_csv('aisles.csv') print(aisles.shape) aisles.head() # + id="I3EanX7GcpVt" colab_type="code" outputId="ec5f818a-8813-4e94-9c33-476a2d60fb4c" colab={"base_uri": "https://localhost:8080/", "height": 312} display(example) # + [markdown] id="_FBU6jofd7Co" colab_type="text" # Aisles doesn't have any data we need. # + [markdown] colab_type="text" id="9-GrkqM6rfXr" # ### departments # + id="yxFd5n20yOVn" colab_type="code" outputId="cb56f027-a506-4cb7-e969-2ee009499db3" colab={"base_uri": "https://localhost:8080/", "height": 221} departments = pd.read_csv('departments.csv') print(departments.shape) departments.head() # + [markdown] colab_type="text" id="VhhVcn9kK-nG" # ### order_products__prior # + id="86rIMNFSzKaG" colab_type="code" outputId="1a06cdf7-d10d-446e-a785-7210368664cb" colab={"base_uri": "https://localhost:8080/", "height": 221} order_products__prior = pd.read_csv('order_products__prior.csv') print(order_products__prior.shape) order_products__prior.head() # + id="qMXMA-Y0fAm_" colab_type="code" outputId="09ac1f10-d639-48f1-9fcb-7b0ba58de175" colab={"base_uri": "https://localhost:8080/", "height": 312} display(example) # + [markdown] id="Kfpy2m8tfCuy" colab_type="text" # We need: # # - order id # - product id # - add to cart order # + [markdown] colab_type="text" id="HVYJEKJcLBut" # ### order_products__train # + id="xgwSUCBk6Ciy" colab_type="code" outputId="a465f71d-3cec-4636-a154-62cde32d1729" colab={"base_uri": "https://localhost:8080/", "height": 221} order_products__train = pd.read_csv('order_products__train.csv') print(order_products__train.shape) order_products__train.head() # + [markdown] id="_4k8m74xf_XS" colab_type="text" # We need: # # - order id # - product id # - add to cart order # + [markdown] colab_type="text" id="LYPrWUJnrp7G" # ### orders # + id="UfPRTW5w128P" colab_type="code" outputId="88b7597f-0516-4071-d8fa-0ae3efc04840" colab={"base_uri": "https://localhost:8080/", "height": 221} orders = pd.read_csv('orders.csv') print(orders.shape) orders.head() # + id="xrR7DH74gHf4" colab_type="code" outputId="5327d616-e22d-4332-960b-85e9500fc991" colab={"base_uri": "https://localhost:8080/", "height": 312} display(example) # + [markdown] id="JZXX7KSqgOKN" colab_type="text" # we need: # # - order id # - user id # - order number # - order dow # - order hour of day # + [markdown] colab_type="text" id="nIX3SYXersao" # ### products # + id="3BKG5dxy2IOA" colab_type="code" outputId="4b1fad06-c8b8-4f24-90d9-d4190028c9e2" colab={"base_uri": "https://localhost:8080/", "height": 221} products = pd.read_csv('products.csv') print(products.shape) products.head() # + id="QGYmNyDlgl4P" colab_type="code" outputId="e3c731e3-0119-4d0b-e1fd-7fe6a0f1be54" colab={"base_uri": "https://localhost:8080/", "height": 312} display(example) # + [markdown] id="tSToqHrLgpMt" colab_type="text" # We need: # - product id # - product name # + [markdown] colab_type="text" id="cbHumXOiJfy2" # ## Concatenate order_products__prior and order_products__train # + colab_type="code" id="TJ23kqpAY8Vv" outputId="2e2f77b2-7f8a-4c1b-e445-9daa1b80404c" colab={"base_uri": "https://localhost:8080/", "height": 221} order_products = pd.concat([order_products__prior, order_products__train]) print(order_products.shape) order_products.head() # + id="_NfAn41hiQ89" colab_type="code" outputId="adc1593b-5f65-4270-edc6-ec11387d393e" colab={"base_uri": "https://localhost:8080/", "height": 34} order_products__prior.shape, order_products__train.shape, order_products.shape # + id="khgkcCkwi9Yp" colab_type="code" colab={} # assert call to see if previous work is correct, will throw an exception error message if incorrect assert (len(order_products)) == len(order_products__prior) + len(order_products__train) # + [markdown] colab_type="text" id="Z1YRw5ypJuv2" # ## Get a subset of orders — the first two orders for user id 1 # + [markdown] id="eJ9EixWs6K64" colab_type="text" # From `orders` dataframe: # - user_id # - order_id # - order_number # - order_dow # - order_hour_of_day # + id="XwWjYrgojixd" colab_type="code" outputId="6ec098f2-d0f2-4a33-a78f-4db2cdfb7220" colab={"base_uri": "https://localhost:8080/", "height": 312} display(example) # + id="4stS4kAxkDnQ" colab_type="code" outputId="505ebae4-3bbe-42ee-c83b-8f7180283a09" colab={"base_uri": "https://localhost:8080/", "height": 204} condition = order_products['order_id'] == 2539329 order_products[condition] # + id="PeKCbkqslEUf" colab_type="code" outputId="fc7c9757-234c-4df3-a2d3-c5114a5054c7" colab={"base_uri": "https://localhost:8080/", "height": 111} condition = (orders['user_id'] == 1) & (orders['order_number'] <= 2) columns = ['order_id', 'user_id', 'order_number', 'order_dow', 'order_hour_of_day'] subset = orders.loc[condition, columns] subset # + [markdown] colab_type="text" id="3K1p0QHuKPnt" # ## Merge dataframes # + [markdown] id="4MVZ9vb1BuO0" colab_type="text" # Merge the subset from `orders` with columns from `order_products` # + id="3lajwEE86iKc" colab_type="code" outputId="9eb27478-3004-4e35-973b-200ea904145f" colab={"base_uri": "https://localhost:8080/", "height": 390} # 'order_id', 'product_id', 'add_to_cart_order']) columns = ['order_id', 'product_id', 'add_to_cart_order'] merged = pd.merge(subset, order_products[columns], how='inner', on='order_id') merged # + id="cMPVnzjeoIn1" colab_type="code" outputId="46517164-5992-4e15-ef65-7ff9e0eec54a" colab={"base_uri": "https://localhost:8080/", "height": 312} display(example) # + [markdown] id="i1uLO1bxByfz" colab_type="text" # Merge with columns from `products` # + id="D3Hfo2dkJlmh" colab_type="code" outputId="f254f67c-0e2d-4471-cb49-4b36e8500dc7" colab={"base_uri": "https://localhost:8080/", "height": 390} final = pd.merge(merged, products[['product_id', 'product_name']], how='inner', on='product_id') final # + id="De2447d-pDQs" colab_type="code" outputId="9810b863-05d3-4ff7-f70e-798b9901ee4b" colab={"base_uri": "https://localhost:8080/", "height": 390} final_2 = final.sort_values(by=['order_number', 'add_to_cart_order']) final_2.columns = [column.replace(('_'), ' ') for column in final] final_2 # + [markdown] id="dDfzKXJdwApV" colab_type="text" # # Reshape Datasets # + [markdown] id="4stCppWhwIx0" colab_type="text" # ## Why reshape data? # # #### Some libraries prefer data in different formats # # For example, the Seaborn data visualization library prefers data in "Tidy" format often (but not always). # # > "[Seaborn will be most powerful when your datasets have a particular organization.](https://seaborn.pydata.org/introduction.html#organizing-datasets) This format ia alternately called “long-form” or “tidy” data and is described in detail by <NAME>ickham. The rules can be simply stated: # # > - Each variable is a column # - Each observation is a row # # > A helpful mindset for determining whether your data are tidy is to think backwards from the plot you want to draw. From this perspective, a “variable” is something that will be assigned a role in the plot." # # #### Data science is often about putting square pegs in round holes # # Here's an inspiring [video clip from _Apollo 13_](https://www.youtube.com/watch?v=ry55--J4_VQ): “Invent a way to put a square peg in a round hole.” It's a good metaphor for data wrangling! # + [markdown] id="79KITszBwXp7" colab_type="text" # ## Hadley Wickham's Examples # # From his paper, [Tidy Data](http://vita.had.co.nz/papers/tidy-data.html) # + id="Jna5sk5FwYHr" colab_type="code" colab={} # %matplotlib inline import pandas as pd import numpy as np import seaborn as sns table1 = pd.DataFrame( [[np.nan, 2], [16, 11], [3, 1]], index=['<NAME>', '<NAME>', '<NAME>'], columns=['treatmenta', 'treatmentb']) table2 = table1.T # + [markdown] id="eWe5rpI9wdvT" colab_type="text" # "Table 1 provides some data about an imaginary experiment in a format commonly seen in the wild. # # The table has two columns and three rows, and both rows and columns are labelled." # + id="SdUp5LbcwgNK" colab_type="code" outputId="5538a94d-007a-4525-c4ec-d72cba357dec" colab={"base_uri": "https://localhost:8080/", "height": 142} table1 # + [markdown] id="SaEcDmZhwmon" colab_type="text" # "There are many ways to structure the same underlying data. # # Table 2 shows the same data as Table 1, but the rows and columns have been transposed. The data is the same, but the layout is different." # + id="SwDVoCj5woAn" colab_type="code" outputId="1c684255-4a6b-4cd7-fb26-91ac28354d60" colab={"base_uri": "https://localhost:8080/", "height": 111} table2 # + [markdown] id="k3ratDNbwsyN" colab_type="text" # "Table 3 reorganises Table 1 to make the values, variables and obserations more clear. # # Table 3 is the tidy version of Table 1. Each row represents an observation, the result of one treatment on one person, and each column is a variable." # # | name | trt | result | # |--------------|-----|--------| # | <NAME> | a | - | # | <NAME> | a | 16 | # | <NAME> | a | 3 | # | <NAME> | b | 2 | # | <NAME> | b | 11 | # | <NAME> | b | 1 | # + [markdown] id="WsvD1I3TwwnI" colab_type="text" # ## Table 1 --> Tidy # # We can use the pandas `melt` function to reshape Table 1 into Tidy format. # + id="eDjZ2HPLv1j7" colab_type="code" outputId="122a4dd8-34b5-4161-ae0d-99c74a9c773e" colab={"base_uri": "https://localhost:8080/", "height": 142} table1 = table1.reset_index() table1 # + id="S48tKmC46veF" colab_type="code" outputId="f7e5610b-8ecb-4b9b-9f52-f1fc585888e7" colab={"base_uri": "https://localhost:8080/", "height": 142} table1['index'].value_counts().reset_index() # + id="qUU2j95pwSCS" colab_type="code" outputId="a5edf7c0-8e91-466f-a7dd-ed794c26fd4c" colab={"base_uri": "https://localhost:8080/", "height": 235} tidy = table1.melt(id_vars='index') tidy.columns = ['name', 'trt', 'result'] tidy # + [markdown] id="Ck15sXaJxPrd" colab_type="text" # ## Table 2 --> Tidy # + id="k2Qn94RIxQhV" colab_type="code" outputId="0c7ee6f4-5ee8-46ec-a6f5-7f323e078215" colab={"base_uri": "https://localhost:8080/", "height": 111} ##### LEAVE BLANK --an assignment exercise ##### #creating table2 from table1 by transposing table1 table2 = table2.reset_index() table2 # + id="b1voZUuE3bk5" colab_type="code" outputId="ab571515-446b-4e71-bc23-d3a31c7b82f9" colab={"base_uri": "https://localhost:8080/", "height": 235} tidy = table2.melt(id_vars='index') tidy.columns = ['name', 'trt', 'result'] tidy # + [markdown] id="As0W7PWLxea3" colab_type="text" # ## Tidy --> Table 1 # # The `pivot_table` function is the inverse of `melt`. # + id="CdZZiLYoxfJC" colab_type="code" outputId="42120f95-6838-414a-a31e-9d3bd5adf631" colab={"base_uri": "https://localhost:8080/", "height": 142} table1 # + id="EhHw6Pv7x7ZU" colab_type="code" outputId="5600fbcc-6d81-420b-e9f2-460a1263deec" colab={"base_uri": "https://localhost:8080/", "height": 235} tidy # + id="w2gjKPVgx82T" colab_type="code" outputId="c181069a-a1b5-450b-b7b7-4dec89e168ed" colab={"base_uri": "https://localhost:8080/", "height": 142} tidy.pivot_table(index='name', columns='trt', values='result') # + [markdown] id="3GeAKoSZxoPS" colab_type="text" # ## Tidy --> Table 2 # + id="W2jjciN2xk9r" colab_type="code" outputId="4970492b-fe05-4020-d5a3-f50fb6a3d297" colab={"base_uri": "https://localhost:8080/", "height": 142} ##### LEAVE BLANK --an assignment exercise ##### #use tidy data to create table2? tidy_table = tidy.T tidy_table # + [markdown] id="jr0jQy6Oxqi7" colab_type="text" # # Seaborn example # # The rules can be simply stated: # # - Each variable is a column # - Each observation is a row # # A helpful mindset for determining whether your data are tidy is to think backwards from the plot you want to draw. From this perspective, a “variable” is something that will be assigned a role in the plot." # + id="kWo3FIP9xuKo" colab_type="code" outputId="1fb88401-c73c-402f-ebdf-caa9d4cfb8c1" colab={"base_uri": "https://localhost:8080/", "height": 153} sns.catplot(x='trt', y='result', col='name', kind='bar', data=tidy, height=2); # + [markdown] id="cIgT41Rxx4oj" colab_type="text" # ## Now with Instacart data # + id="Oydw0VvGxyDJ" colab_type="code" colab={} products = pd.read_csv('products.csv') order_products = pd.concat([pd.read_csv('order_products__prior.csv'), pd.read_csv('order_products__train.csv')]) orders = pd.read_csv('orders.csv') # + [markdown] id="6p-IsG0jyXQj" colab_type="text" # ## Goal: Reproduce part of this example # # Instead of a plot with 50 products, we'll just do two — the first products from each list # - Half And Half Ultra Pasteurized # - Half Baked Frozen Yogurt # + id="Rs-_n9yjyZ15" colab_type="code" outputId="c9498f50-5b14-41a5-d3b8-dd7c379a2063" colab={"base_uri": "https://localhost:8080/", "height": 383} from IPython.display import display, Image url = 'https://cdn-images-1.medium.com/max/1600/1*wKfV6OV-_1Ipwrl7AjjSuw.png' example = Image(url=url, width=600) display(example) # + [markdown] id="Vj5GR7I4ydBg" colab_type="text" # So, given a `product_name` we need to calculate its `order_hour_of_day` pattern. # + [markdown] id="Vc9_s7-LyhBI" colab_type="text" # ## Subset and Merge # # One challenge of performing a merge on this data is that the `products` and `orders` datasets do not have any common columns that we can merge on. Due to this we will have to use the `order_products` dataset to provide the columns that we will use to perform the merge. # + id="W1yHMS-OyUTH" colab_type="code" colab={} product_names = ['Half Baked Frozen Yogurt', 'Half and Half Ultra Pasteurized'] # + id="zOY5AxtIzM3-" colab_type="code" outputId="26a6e284-e8df-41d2-ce04-852bc03c5a74" colab={"base_uri": "https://localhost:8080/", "height": 34} products.columns.tolist() # + id="Hp2vz1rCzM27" colab_type="code" outputId="b9d0ef49-6a29-41bf-fd14-6f3882a936d7" colab={"base_uri": "https://localhost:8080/", "height": 136} orders.columns.tolist() # + id="YWg-aYVWzVu9" colab_type="code" outputId="f844ecd6-323b-4cb0-9f9c-39c05e3e617f" colab={"base_uri": "https://localhost:8080/", "height": 34} order_products.columns.tolist() # + id="szgTFNnWzcg4" colab_type="code" colab={} merged = (products[['product_id', 'product_name']] .merge(order_products[['order_id', 'product_id']]) .merge(orders[['order_id', 'order_hour_of_day']])) # + id="eGDNCnCW0RuV" colab_type="code" outputId="b75aa865-6ae9-44cd-d03e-5fa41ff15c10" colab={"base_uri": "https://localhost:8080/", "height": 204} merged.head() # + id="gGFJxiWF0DCI" colab_type="code" outputId="5921a115-f78d-43b3-d472-cb5b7ae4f10f" colab={"base_uri": "https://localhost:8080/", "height": 34} products.shape, order_products.shape, merged.shape # + id="zFBUKRbd0U44" colab_type="code" outputId="e33bf7d2-c376-49a4-d0d7-00d3ac216ab2" colab={"base_uri": "https://localhost:8080/", "height": 1000} # condition = ((merged['product_name'] == 'Half Baked Frozen Yogurt') (merged['product_name'] == 'Half and Half Ultra Pasteurized') # merged = merged[condition] product_names = ['Half Baked Frozen Yogurt', 'Half And Half Ultra Pasteurized'] condition = merged['product_name'].isin(product_names) subset = merged[condition] subset # + [markdown] id="UvhcadjFzx0Q" colab_type="text" # ## 4 ways to reshape and plot # + [markdown] id="aEE_nCWjzz7f" colab_type="text" # ### 1. value_counts # + id="vTL3Cko87VL-" colab_type="code" colab={} froyo = subset[subset['product_name']=='Half Baked Frozen Yogurt'] cream = subset[subset['product_name']=='Half And Half Ultra Pasteurized'] # + id="aU6Pvc321N6f" colab_type="code" outputId="ee099d64-0169-421d-e398-6c3290529b44" colab={"base_uri": "https://localhost:8080/", "height": 269} (cream['order_hour_of_day'] .value_counts(normalize=True) .sort_index() .plot()) (froyo['order_hour_of_day'] .value_counts(normalize=True) .sort_index() .plot()); # + [markdown] id="tMSd6YDj0BjE" colab_type="text" # ### 2. crosstab # + id="Slu2bWYK0CZD" colab_type="code" outputId="b280953c-9f68-4cfa-bc05-f2bf32f1aa88" colab={"base_uri": "https://localhost:8080/", "height": 284} (pd.crosstab(subset['order_hour_of_day'], subset['product_name'], normalize='columns') * 100).plot(); # + [markdown] id="ICjPVqO70Hv8" colab_type="text" # ### 3. Pivot Table # + id="LQtMNVa10I_S" colab_type="code" outputId="ad2b240c-9ade-461a-9542-82baee4a5879" colab={"base_uri": "https://localhost:8080/", "height": 284} subset.pivot_table(index='order_hour_of_day', columns='product_name', values='order_id', aggfunc=len).plot(); # + [markdown] id="7A9jfBVv0M7e" colab_type="text" # ### 4. melt # + id="2AmbAKm20PAg" colab_type="code" outputId="d60b7f10-d6d6-4da8-dfe0-ef4323287fcb" colab={"base_uri": "https://localhost:8080/", "height": 369} table = pd.crosstab(subset['order_hour_of_day'], subset['product_name'], normalize=True) melted = (table .reset_index() .melt(id_vars='order_hour_of_day') .rename(columns={ 'order_hour_of_day': 'Hour of Day Ordered', 'product_name': 'Product', 'value': 'Percent of Orders by Product' })) sns.relplot(x='Hour of Day Ordered', y='Percent of Orders by Product', hue='Product', data=melted, kind='line'); # + [markdown] colab_type="text" id="kAMtvSQWPUcj" # # Assignment # # ## Join Data Section # # These are the top 10 most frequently ordered products. How many times was each ordered? # # 1. Banana # 2. Bag of Organic Bananas # 3. Organic Strawberries # 4. Organic Baby Spinach # 5. Organic Hass Avocado # 6. Organic Avocado # 7. Large Lemon # 8. Strawberries # 9. Limes # 10. Organic Whole Milk # # First, write down which columns you need and which dataframes have them. # # Next, merge these into a single dataframe. # # Then, use pandas functions from the previous lesson to get the counts of the top 10 most frequently ordered products. # # ## Reshape Data Section # # - Replicate the lesson code # - Complete the code cells we skipped near the beginning of the notebook # - Table 2 --> Tidy # - Tidy --> Table 2 # - Load seaborn's `flights` dataset by running the cell below. Then create a pivot table showing the number of passengers by month and year. Use year for the index and month for the columns. You've done it right if you get 112 passengers for January 1949 and 432 passengers for December 1960. # + id="ok5w2NrK5dvk" colab_type="code" outputId="43c1ea64-24d4-416a-e823-299b5d7aa7ce" colab={"base_uri": "https://localhost:8080/", "height": 221} order_products__prior = pd.read_csv('order_products__prior.csv') print(order_products__prior.shape) order_products__prior.head() # + [markdown] id="cVc3bneAMDKQ" colab_type="text" # Contains Add_to_cart_order, product id # + id="Mw3DfN0K5ekk" colab_type="code" outputId="73def380-5ecd-444e-c37e-184911295397" colab={"base_uri": "https://localhost:8080/", "height": 221} order_products__train = pd.read_csv('order_products__train.csv') print(order_products__train.shape) order_products__train.head() # + [markdown] id="9Z9M_bP6NWjI" colab_type="text" # Contains Add_to_cart_order, product id # + id="-TYsULhyNKpx" colab_type="code" outputId="22e93a67-6ec3-4d95-a246-2a2cbec32c9a" colab={"base_uri": "https://localhost:8080/", "height": 221} order_products = pd.concat([order_products__prior, order_products__train]) print(order_products.shape) order_products.head() # + [markdown] id="lLhN0NRBNXkX" colab_type="text" # Contains Add_to_cart_order, product id # + id="5xqrXudm5e1D" colab_type="code" outputId="2ed68bd2-fcbc-4b6a-e1fa-565aac379aae" colab={"base_uri": "https://localhost:8080/", "height": 221} products = pd.read_csv('products.csv') print(products.shape) products.head() # + [markdown] id="f-eTy9huMRSR" colab_type="text" # contacts product id, product name # + id="OoarZwfON3Lo" colab_type="code" outputId="cd57fdcb-63e3-475e-c6b4-02910d2d405c" colab={"base_uri": "https://localhost:8080/", "height": 1000} condition = (products['product_id']) & (products['product_name']) columns = ['product_id', 'product_name'] subset = products[columns] subset # + id="-j8ca7HFMYkq" colab_type="code" outputId="1231df92-e791-4df7-97a2-b0e7b76192df" colab={"base_uri": "https://localhost:8080/", "height": 1000} # merging dataframes together columns = ['order_id', 'product_id', 'add_to_cart_order'] merged = pd.merge(subset, order_products[columns], how='inner', on='product_id') merged # + id="vYplfUqqMuqa" colab_type="code" outputId="3f5880c2-37ab-4069-837a-ef67094eb612" colab={"base_uri": "https://localhost:8080/", "height": 204} merged.head() # + id="rQYKh1zyP5NG" colab_type="code" outputId="6ebdc923-4674-4b5f-db8f-8d0fc49d0cb0" colab={"base_uri": "https://localhost:8080/", "height": 1000} # producing list of top ten items ordered product_name = ['Banana', 'Bag of Organic Bananas', 'Organic Strawberries', 'Organic Baby Spinach', 'Organic Hass Avocado', 'Organic Avocado', 'Large Lemon', 'Strawberries', 'Limes', 'Organic Whole Milk'] condition = merged['product_name'].isin(product_name) subset = merged[condition] subset # + id="ZlEAb-8Mhqya" colab_type="code" outputId="5b884856-4078-4a49-c9b7-13778822e78a" colab={"base_uri": "https://localhost:8080/", "height": 85} subset['product_name'].unique() # + id="rDHdeddDQfQi" colab_type="code" outputId="bbe81b74-0cff-4cb3-a97c-256b8bd3fcad" colab={"base_uri": "https://localhost:8080/", "height": 68} # setting up value counts of each item in top ten organic_whole_milk = subset['product_name']=='Organic Whole Milk' organic_whole_milk.value_counts() # + id="tfNh-c3RmbQX" colab_type="code" outputId="23f36f87-f7df-4473-96b5-21dd0d2ee894" colab={"base_uri": "https://localhost:8080/", "height": 68} organic_avocado = subset['product_name']=='Organic Avocado' organic_avocado.value_counts() # + id="6HCpWI6cl08w" colab_type="code" outputId="f3515626-55d0-4cb1-82b9-cd2102c0b9b9" colab={"base_uri": "https://localhost:8080/", "height": 68} banana = subset['product_name']=='Banana' banana.value_counts() # + id="bqgEDAH3l6bx" colab_type="code" outputId="f2f4815e-a4df-4ca0-f6c0-4086ba8a0cd7" colab={"base_uri": "https://localhost:8080/", "height": 68} large_lemon = subset['product_name']=='Large Lemon' large_lemon.value_counts() # + id="z7Ulfg77l_QD" colab_type="code" outputId="64c1f89d-ca7e-430f-b41c-14c25a4ab8d0" colab={"base_uri": "https://localhost:8080/", "height": 68} bag_of_organic_bananas = subset['product_name']=='Bag of Organic Bananas' bag_of_organic_bananas.value_counts() # + id="GJQ9KJ1OcVCL" colab_type="code" outputId="047965f1-ca3f-4385-d27b-64068b089702" colab={"base_uri": "https://localhost:8080/", "height": 68} organic_strawberries = subset['product_name']=='Organic Strawberries' organic_strawberries.value_counts() # + id="e0YspyAVeQWg" colab_type="code" outputId="018b7576-ece9-4f36-ff79-f4c992cc681c" colab={"base_uri": "https://localhost:8080/", "height": 68} organic_baby_spinach = subset['product_name']=='Organic Baby Spinach' organic_baby_spinach.value_counts() # + id="xrfNVXvGhZa7" colab_type="code" outputId="71767d77-247a-4730-d9c7-fd0f562dde50" colab={"base_uri": "https://localhost:8080/", "height": 68} organic_hass_avocado = subset['product_name']=='Organic Hass Avocado' organic_hass_avocado.value_counts() # + id="Zwn8WjnthhE8" colab_type="code" outputId="4f7e28f7-8fd3-4ea2-c3e9-6fbc1d264ccf" colab={"base_uri": "https://localhost:8080/", "height": 68} limes = subset['product_name']=='Limes' limes.value_counts() # + id="640Y0E0CiLai" colab_type="code" outputId="b7127589-6620-444c-8790-7c0d1bc2797c" colab={"base_uri": "https://localhost:8080/", "height": 68} strawberries = subset['product_name']=='Strawberries' strawberries.value_counts() # + id="fgxulJQq0uLw" colab_type="code" colab={} flights = sns.load_dataset('flights') # + id="1qKc88WI0up-" colab_type="code" outputId="8a2e651f-aa72-4dbe-db75-1e2401e9bcab" colab={"base_uri": "https://localhost:8080/", "height": 204} ##### YOUR CODE HERE ##### flights.head() # + id="qyqUWOlQ5ByJ" colab_type="code" outputId="edc3d568-1235-4989-944c-0e10681b3147" colab={"base_uri": "https://localhost:8080/", "height": 266} flights_pivot = flights.pivot_table(index='year', columns='month') flights_pivot.head() # + id="XZ-zwaPR5TxH" colab_type="code" outputId="fa9f2533-bda4-4992-f3b8-db69a2bee1cf" colab={"base_uri": "https://localhost:8080/", "height": 266} flights_pivot.tail() # + [markdown] id="mnOuqL9K0dqh" colab_type="text" # ## Join Data Stretch Challenge # # The [Instacart blog post](https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2) has a visualization of "**Popular products** purchased earliest in the day (green) and latest in the day (red)." # # The post says, # # > "We can also see the time of day that users purchase specific products. # # > Healthier snacks and staples tend to be purchased earlier in the day, whereas ice cream (especially Half Baked and The Tonight Dough) are far more popular when customers are ordering in the evening. # # > **In fact, of the top 25 latest ordered products, the first 24 are ice cream! The last one, of course, is a frozen pizza.**" # # Your challenge is to reproduce the list of the top 25 latest ordered popular products. # # We'll define "popular products" as products with more than 2,900 orders. # # ## Reshape Data Stretch Challenge # # _Try whatever sounds most interesting to you!_ # # - Replicate more of Instacart's visualization showing "Hour of Day Ordered" vs "Percent of Orders by Product" # - Replicate parts of the other visualization from [Instacart's blog post](https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2), showing "Number of Purchases" vs "Percent Reorder Purchases" # - Get the most recent order for each user in Instacart's dataset. This is a useful baseline when [predicting a user's next order](https://www.kaggle.com/c/instacart-market-basket-analysis) # - Replicate parts of the blog post linked at the top of this notebook: [Modern Pandas, Part 5: Tidy Data](https://tomaugspurger.github.io/modern-5-tidy.html)
Nicholas_Staudacher_LS_DS_121_Join_and_Reshape_Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 1. Clusterização Hierárquica # + # import libraries # linear algebra import numpy as np # data processing import pandas as pd # library of math import math # data visualization from matplotlib import pyplot as plt # datasets from sklearn import datasets # - # Scikit Learning hierarchical clustering from sklearn.cluster import AgglomerativeClustering # SciPy hierarchical clustering from scipy.cluster import hierarchy # ## 1.1 Clusterização Hierárquica # Verifique abaixo o resultado da clusterização hierárquica variando a quantidade de clusters de forma iterativa. Teste também outras funções de ligação (linkage). # # Fique livre parar alterar ou criar novos dados no dataset de exemplo abaixo. # + # Dataset tmp_data = np.array([[1,1],[2,2],[2,3],[4,4], [4,2],[6,6],[10,9],[7,7], [8,8],[7,9],[10,10],[14,2]]) plt.scatter(tmp_data[:,0], tmp_data[:,1], s=150) plt.show() # + # Teste diferentes funções de ligações # {“ward”, “complete”, “average”} linkage = 'ward' # Clusterização hierárquica ag_model = AgglomerativeClustering(linkage=linkage) # Número de colunas do plot plot_col = 3 n_rows, n_col = tmp_data.shape fig, ax = plt.subplots(nrows=math.ceil(len(tmp_data)/plot_col), ncols=plot_col,figsize=(20,20)) count = 0 for n_clusters in range(len(tmp_data),0,-1): index = abs(n_clusters - len(tmp_data)) # Atribui o número de clusters ag_model.n_clusters = n_clusters # Ajusta o modelo ag_model = ag_model.fit(tmp_data) if plot_col == 1: ax[count].scatter(tmp_data[:,0], tmp_data[:,1], c=ag_model.labels_, s=150) ax[count].set_title("Qt. Clusters: " + str(n_clusters)) else: ax[count, (index)%plot_col].scatter(tmp_data[:,0], tmp_data[:,1], c=ag_model.labels_, s=150) ax[count, (index)%plot_col].set_title("Qt. Clusters: " + str(n_clusters)) if (index+1) % plot_col == 0: count += 1 plt.show() # - # # 1.2 Dendrograma # Um dendrograma é um tipo de diagrama de árvore que mostra o relacionamentos entre conjuntos de dados semelhantes, ou agrupamento hierárquico. Eles são freqüentemente usados em biologia para mostrar o agrupamento entre genes ou amostras, mas podem representar qualquer tipo de dados agrupados. # > https://www.statisticshowto.datasciencecentral.com/hierarchical-clustering/ # Utilize o código abaixo para criar os Dendrogramas dos dados utilizados nas células acima. Fique livre para retornar e testar outras distribuições dos dados e funções de ligamento (linkage). # + # Usa o método de linkage especificado # para construir o dendrograma if (linkage == 'average'): Z = hierarchy.average(tmp_data) elif (linkage == 'complete'): Z = hierarchy.complete(tmp_data) elif (linkage == 'ward'): Z = hierarchy.ward(tmp_data) plt.figure() plt.title("Dendrograma - linkage: " + str(linkage)) dn = hierarchy.dendrogram(Z) # - # É possível fazer um teste de permutação para validar o número de clusters escolhidos, ou seja, verificar se realmente existe uma tendência não aleatória para os objetos se agruparem. # # A técnica envolve testes estatísticos e pode ser estudado pelo material a seguir: # http://www.econ.upf.edu/~michael/stanford/maeb7.pdf # ## 1.3 Exemplo 1 - Bolhas com diferentes variâncias # Observe a distribuição do dataset abaixo. # + # blobs with varied variances n_samples = 1500 random_state = 170 varied = datasets.make_blobs(n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=random_state) dataset_varied = varied[0] plt.scatter(dataset_varied[:,0], dataset_varied[:,1]) plt.show() # - # Utilize o algoritmo do K-means para separa os conjuntos. # # Você pode importar o K-means criado por você! Para importar uma função de um notebook para outro instale a biblioteca nbimporter: # - pip install nbimporter import nbimporter from Notebook_KMeans import KMeans # + ### CODE HERE ### # - # Os dados parecem estar agrupados corretamente? Tente fazer o mesmo procedimento com algoritmo de clusterização hieráquica # > https://scikit-learn.org/stable/modules/generated/sklearn.cluster.AgglomerativeClustering.html # + ### CODE HERE ### # - # Mostre e análise o dendrograma do exemplo acima. # + ### CODE HERE ### # - # Qual a sua conclusão? # "Escreva aqui" # ## 1.4 Exemplo 2 - Noisy Circles # + # Gerar o conjunto de dados n_samples = 1500 noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5, noise=.05) dataset_circles = noisy_circles[0] # Mostrar os dados plt.scatter(dataset_circles[:,0], dataset_circles[:,1]) plt.show() # - # Utilize o algoritmo do K-means para separa os conjuntos. # + ### CODE HERE ### # - # Os dados parecem estar agrupados corretamente? Tente fazer o mesmo procedimento com algoritmo de clusterização hieráquica, altera os parâmetros se for necessário. # + ### CODE HERE ### # - # Mostre e análise o dendrograma do exemplo acima. # + ### CODE HERE ### # - # Qual a sua conclusão? # "Escreva aqui" # # 2. DBSCAN # + # Anisotropicly distributed data random_state = 170 X, y = datasets.make_blobs(n_samples=n_samples, random_state=random_state) transformation = [[0.6, -0.6], [-0.4, 0.8]] dataset = np.dot(X, transformation) # Mostrar os dados plt.scatter(dataset[:,0], dataset[:,1]) plt.show() # - # Clusterize os dados usando clusterização Hierárquica # + ### CODE HERE ### # - # Clusterize os dados usando o algoritmo DBSCAN. # > https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html # + ### CODE HERE ### # - # Qual a sua conclusão? # "Escreva aqui"
2019/09-clustering/Notebook_Clustering_Assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # US - Baby Names # ### Introduction: # # We are going to use a subset of [US Baby Names](https://www.kaggle.com/kaggle/us-baby-names) from Kaggle. # In the file it will be names from 2004 until 2014 # # # ### Step 1. Import the necessary libraries import pandas as pd # ### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/06_Stats/US_Baby_Names/US_Baby_Names_right.csv). # ### Step 3. Assign it to a variable called baby_names. url = "https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/06_Stats/US_Baby_Names/US_Baby_Names_right.csv" baby_names = pd.read_csv(url) baby_names.head() # ### Step 4. See the first 10 entries baby_names.head(10) # ### Step 5. Delete the column 'Unnamed: 0' and 'Id' baby_names.drop(['Unnamed: 0', 'Id'], axis=1, inplace=True) baby_names.columns # ### Step 6. Is there more male or female names in the dataset? # More female names baby_names['Gender'].value_counts() # ### Step 7. Group the dataset by name and assign to names # Careful here, you needed to see that each row contains count of that name in a year # so you needed to sum # be careful to drop the year column since it is numeric, sum will add it. names = baby_names.drop("Year", axis=1).groupby("Name").sum() names # ### Step 8. How many different names exist in the dataset? # 17632 len(names) # ### Step 9. What is the name with most occurrences? # Riley names.idxmax() # ### Step 10. How many different names have the least occurrences? # 3682 (names == names.min()).sum() # ### Step 11. What is the median name occurrence? names.median() # ### Step 12. What is the standard deviation of names? names.std() # ### Step 13. Get a summary with the mean, min, max, std and quartiles. names.describe() # + # Notice that we would apply all the methods directly on the df since there was only one column.
06_Stats/US_Baby_Names/Exercises.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/alinesoares653/aula-dh-200921/blob/master/M2A30_PRATICA_INDEPENDENTE_Intro_Regressao_Linear.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="HXCJlrSOyhgz" # # PRACTICA INDEPEDIENTE: Introdução à Regressão Linear - Solução. # + [markdown] id="ZIuhAL4JdZQm" # ## Introdução: # # #### Vamos trabalhar com um conjunto de dados sobre aluguéis de bicicletas que foi utilizado em um concurso de Kaggle # # # #### São fornecidos dados sobre aluguéis por hora que abrangem dois anos. O conjunto de treinamento abrange os primeiros 19 dias de cada mês e o conjunto de teste vai do dia 20 até o fim do mês. **_Queremos projetar o número total de bicicletas alugadas durante cada hora coberta pelo conjunto do teste, utilizando apenas as informações disponíveis no teste de treinamento._** # # **CAMPOS DO SET** # # **datetime** - hourly date + timestamp # # **season** - 1 = spring, 2 = summer, 3 = fall, 4 = winter # # **holiday** - whether the day is considered a holiday # # **workingday** - whether the day is neither a weekend nor holiday # # **weather** - # # 1: Clear, Few clouds, Partly cloudy, Partly cloudy <br/> # 2: Mist + Cloudy, Mist + Broken clouds, Mist + Few clouds, Mist <br/> # 3: Light Snow, Light Rain + Thunderstorm + Scattered clouds,Light Rain + Scattered clouds <br/> # 4: Heavy Rain + Ice Pallets + Thunderstorm + Mist, Snow + Fog <br/> # **temp** - temperature in Celsius # # **atemp** - "feels like" temperature in Celsius # # **humidity** - relative humidity # # **windspeed** - wind speed # # **casual** - number of non-registered user rentals initiated # # **registered** - number of registered user rentals initiated # # **count** - number of total rentals # # + [markdown] id="z5jtkULIdZQn" # #### Exercício 1: Faça a importação das bibliotecas `numpy` e `padas`, receba o arquivo `'bikeshare.csv'`, tomando a coluna `datetime` como índice e então faça a renomeação da coluna: `'count'` $\rightarrow$ `'total'`. # + id="EZjhGeiEEEv1" outputId="5f0371f8-c90d-4c9d-a9a5-af0ec12a9001" colab={"base_uri": "https://localhost:8080/", "height": 411} import pandas as pd import numpy as np bikes = pd.read_csv('bikeshare.csv') #, index_col= ['datetime']) bikes.rename(columns = {'count': 'total'}, inplace = True) bikes # + id="bY5ELNFr1fp3" outputId="a897b78c-5ca1-4230-eb0e-f49a04925d7a" colab={"base_uri": "https://localhost:8080/"} bikes.info() # + id="5KvSBMxjdZQt" from datetime import datetime # + id="9HYHA3sbdZQx" dates = pd.to_datetime(pd.Series(bikes['datetime']), format = '%Y-%m-%d %H:%M:%S') hour = dates.apply(lambda x: x.strftime('%H')) # + id="hZMUQq_idZQ0" bikes['hour'] = hour # + id="4dtCTNQFw89s" outputId="8ed9ae2c-0300-4546-973d-47dc58135d0b" colab={"base_uri": "https://localhost:8080/", "height": 581} bikes # + [markdown] id="RJVewrG7yhlY" # #### Exercício 2: Considerando a engenharia dos atributos, tente criar as seguintes colunas dummy para as horas `hour` tabeladas. # # - hora: Como um único atributo numérico (de 0 a 23). # - hora: Como um atributo categórico (use 23 variáveis dummy). # - dia: Como um único atributo ategórico (día = 1 de 7am a 8pm e día = 0 de lo contrário) # + id="ffWmH48Iyhla" horas = pd.get_dummies(bikes['hour']) # + id="IEQ2-C16dZQ8" # + id="Uf0fQkiUCZLC" outputId="58aa4bf0-4d55-4a01-fe7e-a2ae717c68eb" colab={"base_uri": "https://localhost:8080/"} bikes['hour'] = bikes['hour'].astype(int) bikes.info() # + id="TDzXzmouyhlj" bikes['dia'] = np.where((bikes['hour'] >6) & (bikes['hour']<21),1,0) # + id="WAd1p9hLyhlm" outputId="c3b81770-7692-4022-a742-2eb9019a8e68" colab={"base_uri": "https://localhost:8080/", "height": 1000} bikes.head(30) # + id="EQusVJnHdZRK" # + id="7Bk5GYBEdZRN" y = bikes['total'] x = bikes.drop(['total'], axis = 1) # + id="xeyfdhV0dZRQ" outputId="2c2cfc39-8e92-4aeb-9c9d-d5d622fea1a2" colab={"base_uri": "https://localhost:8080/", "height": 317} # + id="3UR0uSuTyhlz" # + id="_BTym5Tpyhl2" # + id="3q33lL69dZRY" # + id="UZzVMyqWyhmA" # + id="u4V1mkkcyhmF" # + [markdown] id="-bndl3GadZRi" # ##### Exercício 3: Separe o dataset fornecido em subconjuntos de treino e teste e calcule os valores de `RMSE` parra os diferentes conjuntos de atributos: # # - ['temp', 'season', 'humidity'] # - ['temp', 'season', 'humidity','dia'] # - ['temp', 'season', 'humidity','hora'] # - ['temp', 'season', 'humidity', # 1, 2, 3, 4, 5, # 6, 7, 8, 9, 10, # 11, 12, 13, 14, # 15, 16, 17, 18, # 19, 20, 21, 22, # 23 # ]. # # #### Estude qual dos modelos funciona melhor. # + id="f09P2T01JD0H" from sklearn.model_selection import train_test_split # + id="11Y9R2YyJTuK" # + id="q-F3xfWodZRj" x_treino, x_teste, y_treino, y_teste = # + id="Pc3WRoxpyhmL" # + id="TokEMFf7yhmS" # + id="f6erPo-cyhmZ" # + id="lZkXfgr8yhme" # + id="zOVXHkEyyhmq" # + [markdown] id="WubYIZYByhmv" # #### Exercício 4: Crie e compare modelos com variáveis quadráticas, `temp_2` e `humidity_2`, por exemplo. Considere a seguinte lista de atributos: # # - ['temp', 'season', 'humidity', # 1, 2, 3, 4, 5, # 6, 7, 8, 9, 10, # 11, 12, 13, 14, 15, # 16, 17, 18, 19, 20, # 21, 22, 23, # 'temp_2', 'humidity_2'] # + id="5tjMNJT1yhmx" # + id="NnpyNOpyyhm1" # + id="gfEToNIuyhm3" # + id="_zvbI7dVdZR9"
M2A30_PRATICA_INDEPENDENTE_Intro_Regressao_Linear.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import math as m def unique(socks_pile): unique_pile=[] for i in socks_pile: if i not in unique_pile: unique_pile.append(i) else: continue return unique_pile def sockCount(unique_pile, ar): pair_count=[] for i in range(len(unique_pile)): pair_count.append(0) for i in ar: index=unique_pile.index(i) pair_count[index]+=1 return pair_count def organized(pairs_unorganized): ans=0 for i in pairs_unorganized: ans+=m.floor(i/2) return ans if __name__=='__main__': n=int(input()) ar=list(map(int, input().rstrip().split())) unique_pile=unique(ar) pairs_unorganized=sockCount(unique_pile,ar) print(organized(pairs_unorganized))
Sock Merchant.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Natural Language Processing with Python - NLTK # Installing the NLTK package: http://www.nltk.org/install.html import nltk # **Installing NLTK data files (click at "Download" when prompted)** nltk.download() # ### Tokenization # Process of dividing a string into lists of chunks or "tokens", where a token is an entire part. For example: a word is a token in a sentence, and a sentence is a token in a paragraph. # + from nltk.tokenize import sent_tokenize import nltk.data # - # **Dividing a paragraph into sentences** paragraph_en = 'Hi. Good to know that you are learning PLN. Thank you for being with us.' paragraph_es = 'Hola. Es bueno saber que estás aprendiendo PLN. Gracias por estar con nosotros.' sent_tokenize(paragraph_en) sent_tokenize(paragraph_es) tokenizer_en = nltk.data.load('tokenizers/punkt/PY3/english.pickle') tokenizer_es = nltk.data.load('tokenizers/punkt/PY3/spanish.pickle') tokenizer_en.tokenize(paragraph_en) tokenizer_es.tokenize(paragraph_es) tokenizer_en tokenizer_es # **Dividing a sentence into words** from nltk.tokenize import regexp_tokenize from nltk.tokenize import RegexpTokenizer from nltk.tokenize import TreebankWordTokenizer from nltk.tokenize import WordPunctTokenizer from nltk.tokenize import word_tokenize word_tokenize('Data Science Rocks!') tw_tokenizer = TreebankWordTokenizer() tw_tokenizer.tokenize('Hello my friend.') word_tokenize("I can't do that.") wp_tokenizer = WordPunctTokenizer() wp_tokenizer.tokenize("I can't do that.") re_tokenizer = RegexpTokenizer("[\w']+") re_tokenizer.tokenize("I can't do that.") regexp_tokenize("I can't do that.", "[\w']+") re_tokenizer = RegexpTokenizer('\s+', gaps = True) re_tokenizer.tokenize("I can't do that.") # ### Training a Tokenizer from nltk.tokenize import PunktSentenceTokenizer from nltk.tokenize import sent_tokenize from nltk.corpus import webtext # **NLTK file at /home/caio/nltk_data/corpora/webtext** file = webtext.raw('overheard.txt') ps_tokenizer = PunktSentenceTokenizer(file) ps_tokenizer sentences_ps = ps_tokenizer.tokenize(file) sentences_ps[0] sentences_st = sent_tokenize(file) sentences_st[0] sentences_st[678] sentences_ps[678] # **Using the file path** with open('/home/caio/nltk_data/corpora/webtext/overheard.txt', encoding = 'ISO-8859-2') as file: file_text = file.read() ps_tokenizer = PunktSentenceTokenizer(file_text) sentences_ps = ps_tokenizer.tokenize(file_text) sentences_ps[0] sentences_ps[678] # ### Stopwords # Stopwords are common words that normally don't contribute to a sentence meaning, at least with regard to the information purpose and natural language processing. They are words like "the" and "a". Many search engines filter these words to save space in their search indexes. from nltk.corpus import stopwords stops_en = set(stopwords.words('english')) sentence_words = ["Can't", 'is', 'a', 'contraction'] [valid_word for valid_word in sentence_words if valid_word not in stops_en] stops_pt = set(stopwords.words('portuguese')) sentence_words = ['Data', 'Science', 'é', 'um', 'assunto', 'interessante'] [valid_word for valid_word in sentence_words if valid_word not in stops_pt] # **Stopwords Languages** print(stopwords.fileids()) # **Stopwords Portuguese Words** print(stopwords.words('portuguese')) # ### Wordnet # WordNet is a lexical database (in english). It is a kind of dictionary created specifically for natural language processing. from nltk.corpus import wordnet syn = wordnet.synsets('cookbook')[0] syn.name() syn.definition() wordnet.synsets('cooking')[0].examples() # ### Collocations # Collocations are two or more words that tend to appear frequently together, such as "United States" or "Rio Grande do Sul". These words can generate different combinations and therefore the context is also important in natural language processing. from nltk.collocations import BigramCollocationFinder from nltk.corpus import stopwords from nltk.corpus import webtext from nltk.metrics import BigramAssocMeasures words_lower = [word.lower() for word in webtext.words('grail.txt')] bcf = BigramCollocationFinder.from_words(words_lower) bcf.nbest(BigramAssocMeasures.likelihood_ratio, 4) stop_words = set(stopwords.words('english')) bcf.apply_word_filter(lambda word: len(word) < 3 or word in stop_words) bcf.nbest(BigramAssocMeasures.likelihood_ratio, 4) # ### Stemming Words # Stemming is the technique of removing suffixes and prefixes from a word, called "stem". For example, the stem of the word "cooking" is "cook". A good algorithm knows that "ing" is a suffix and can be removed.<br /> # Stemming is widely used in search engines for indexing words. Instead of storing all the words forms, a search engine stores only the word stem, reducing the index size and increasing the search process performance. from nltk.stem import LancasterStemmer from nltk.stem import PorterStemmer from nltk.stem import RegexpStemmer from nltk.stem import SnowballStemmer stemmer = PorterStemmer() stemmer.stem('eating') stemmer.stem('generously') stemmer = LancasterStemmer() stemmer.stem('eating') stemmer.stem('generously') stemmer = RegexpStemmer('ing') stemmer.stem('eating') print(SnowballStemmer.languages) stemmer = SnowballStemmer('english') stemmer.stem('eating') stemmer.stem('generously') # ### Corpus # Corpus is a collection of text documents and Corpora is the plural of Corpus.<br /> # This term comes from the Latin word for "body" (in this case, the body of a text). A custom Corpus is a collection of text files organized in a directory. # For the training of a custom model as part of a text classification process (such as text analysis), it is necessary to create your own Corpus and train it. from nltk.corpus import brown from nltk.corpus.reader import WordListCorpusReader from nltk.tokenize import line_tokenize # **Creating a custom Corpus** reader = WordListCorpusReader('.', ['aux/custom-corpus.txt']) reader.words() reader.fileids() reader.raw() line_tokenize(reader.raw()) print(brown.categories())
modules/07-real-time-analytics-with-spark-streaming/01-nltk.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %load_ext notexbook # %texify # # Dataset Partitioning # ## Data Partitioning in Machine Learning # To evaluate how well **Supervised** models _generalize_, data are usually _partitioned_ into a _training_ and a _test_ set. # # ![train_test_split](https://raw.githubusercontent.com/leriomaggio/pytorch-beautiful-ml-data/main/4_data_partitioning/imgs/train_test_split.svg) # # The *former* is used as the **reference**, and the **only** dataset on which the whole _training process_ of `ML`/`DL` models builds on; whereas the _latter_ is also referred to as the **held-out** dataset, emphasising that this data should be always kept **separate** from the _training data_, # and only used _afterwards_ to test model *generalisation* capabilities. # # On a very similar note, <ins>*training data*</ins> is usually further partitioned into a _validation_ set. This data is used to monitor and validate model's progress during the training, as if it was a # *development beta version* of a more general test set. In fact, this data is sometimes also referred to as *development set* or # <ins>_internal_</ins> [\*](#fnstarval) *validation data*. # # Therefore, **all** the rules applying to the *test* data, also apply to the *validation* data! # # ![train_test_split](https://raw.githubusercontent.com/leriomaggio/pytorch-beautiful-ml-data/main/4_data_partitioning/imgs/train_validation_test2.svg) # # <span id="fnstarval"><i>$\star$:</i> As opposed to **external** validation data used for *test* partition.</span> # <span class="fn"><i>Source: </i> [Cross-validation: evaluating estimator performance](https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation) _from_ **scikit-learn** documentation.</span> # **Note** # # It is worth mentioning that the same principles about data partitioning, and their respective roles also applies to any `preprocessing`, and/or any statistics is derived on the data *before* any actual training is performed. # # For example: **Standardising** a datasets is a common requirement for many machine learning models that might behave badly if the individual features do not more or less look like standard normally distributed data. # # To do so, `sklearn` provides the [`StandardScaler`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html) _transformer_ in the [`sklearn.preprocessing`](https://scikit-learn.org/stable/modules/preprocessing.html) package, which standardise each feature by removing the mean ($\mu$), and scaling to unit variance ($\sigma$). # # The standard score of a sample $x$ is calculated as: # $$ # z = \frac{(x-\mu)}{\sigma} # $$ # where $\mu$ and $\sigma$ are respectively the *mean*, and the *standard deviation* of each feature among all the **training** samples! # # Therefore: # # **1**: <span class="texbook-turquoise">CORRECT</span> ✅ # # ```python # X_train, X_test = split(X) # scaler = StandardScaler() # scaler.fit(X_train) # fit on training data ONLY! # ... # X_train = scaler.transform(X_train) # X_test = scaler.transform(X_test) # ``` # # **2**: <span class="texbook-red">**IN**CORRECT</span> ❌ # # ```python # scaler = StandardScaler() # scaler.fit(X) # fit on the whole dataset # ... # X_train, X_test = split(X) # X_train = scaler.transform(X_train) # X_test = scaler.transform(X_test) # ``` # # In particular, this is <span class="texbook-red">**incorrect**</span> because information from the _training_ set[$^{1}$](#fn1) would leak into the _test_ set, harming the whole principle of using this data to test how well a ML model is able to **generalise** on _unseen_ data. In other words, the _test_ data would not be truly _unseen_ to some extent. # Further examples of `preprocessing` methods as included in `sklearn` can be found in the official [documentation](https://scikit-learn.org/stable/auto_examples/preprocessing/plot_all_scaling.html#sphx-glr-auto-examples-preprocessing-plot-all-scaling-py). # # # <span id="fn1"><i>[1]: </i>Aggregated in the calculation of the $\mu$ and $\sigma$ parameters used for standardisation</span> # Therefore, as a **general rule of thumb**: _always apply data partitioning before anything else_. # # To do so, `sklearn` provides an easy to use `train_test_split(*arrays, **options)` helper function, which wraps `(1)` input validation; and `(2)` the use of a more articulate and generic `ShuffleSplit.split` generator, i.e. `next(ShuffleSplit().split(X, y))` # # In this notebook, we will see how we can combine this, and more complex methods for data partitioning with `torch.utils.data.Dataset` objects, and its corresponding `DataLoaders`. # ### `Wine` Dataset Example # In order to understand how data partitioning is performed in _standard_ Machine learning settings, we will use the # **Wine Recognition Dataset** as included in `sklearn` as our reference dataset from sklearn.datasets import load_wine wine = load_wine() print(wine.DESCR) X, y = wine.data, wine.target y_names = wine.target_names y_names X.shape, y.shape # The dataset contains `178` samples (each characterised by `13` features), to be classified in `3` classes # + from collections import Counter samples_per_class = Counter(y) samples_per_class # - # ### Random Splitting # The most obvious way to split the dataset in two partitions (_training_ and _test_) is to resort to the _good ol' random splitting_. # # In particular, we could _shuffle_ our labels, and then randomly assign them to either partition. We just need to make sure that we have full control of the _(pseudo-)random_ generation, so that the whole assignment is fully repeatable! # # To do so, we could either fix a **global random seed**, or use our own [`numpy.random.Generator`](https://numpy.org/doc/stable/reference/random/generator.html) [\*](#fnnote). # # <span class="fn">**[\*]**: Always the solution to prefer, to have full control of random components, _ed._</span> # + import numpy as np SEED = 123456 rng = np.random.RandomState(seed=SEED) # - # <span class="fn">**[$\dagger$]**: Here we used a _legacy_ `numpy.random.RandomState` generator, as this is the one currently supported in `sklearn`. <br /> Alternatively, we could use `rng = np.random.default_rng(seed=SEED)` to instantiate the more up to date `Generator` object.</span> # + shuffled_indices = rng.permutation(np.arange(len(y))) test_size = 0.25 # We decide how big should be the test partition (alt. training_partition) test_size_index = int(len(y) * test_size) X_train, X_test = X[shuffled_indices[:-test_size_index]], X[shuffled_indices[-test_size_index:]] y_train, y_test = y[shuffled_indices[:-test_size_index]], y[shuffled_indices[-test_size_index:]] # - shuffled_indices y_train, y_test # Generating these partitions was pretty easy (and also quite straightforward to implement). # # However, to avoid lots of _boilerplate_, `sklearn` provides an utility function to perform exactly the same operations (_and more_), namely `sklearn.model_selection.train_test_split`, which in turn is itself a wrapper around the `sklearn.model_selection.ShuffleSplit` class: from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=rng) y_train, y_test # **Note**: Passing in the same `RandomState` generator, we obtained _exactly the same_ partitions! # #### Stratification Gotcha # # There is a gotcha with the previous example: the random splitting did not take into account samples distribution among the partitions. It was, in fact, purely _random_. # # Sample distributions per-class among partitions might be though an important piece of information to keep in mind, especially if we are going to work with a (quite) imbalanced data (_not really the case here, though_, ed.) train_counter = Counter(y_train) train_counter test_counter = Counter(y_test) test_counter # To account for sample distribution, we could just pass on an additional `stratify` parameter to the `train_test_split` function [$\mp$](#fnstratify) # # <span id="fnstratify">This makes the internal implementation to switch wrapping from `ShuffleSplit` to `StratifiedShuffleSplit` (see [here](https://github.com/scikit-learn/scikit-learn/blob/95119c13af77c76e150b753485c662b7c52a41a2/sklearn/model_selection/_split.py#L2188)) </span> X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=rng, stratify=y) train_stratified_count = Counter(y_train) train_stratified_count test_stratified_count = Counter(y_test) test_stratified_count samples_per_class # + [markdown] slideshow={"slide_type": "slide"} # ## Cross-Validation # - # By partitioning the available data into three sets (i.e. _train_, _validation_, _test_), we # drastically reduce the number of samples which are using for learning the model, # and the results can depend on a particular random choice for the pair of (train, validation) sets. # # A solution to this issue is a procedure called **Cross Validation** (`CV` in short). In cross-validation, the data is split repeatedly into a **training** and **test-set**, with a separate model built for every pair. # The test-set scores are then aggregated for a more robust estimate. # # In more details: # # (from [Wikipedia](https://en.wikipedia.org/wiki/Cross-validation_(statistics))) # # > Cross-validation, sometimes called **rotation estimation**, or *out-of-sample testing*, # > is (*a*) model validation technique for assessing how the results # > of a statistical analysis will generalize to an independent data set. # > [$\ldots$] # > The **goal** of cross-validation is to test the model's ability # > to predict new data that was not used in estimating it, in order to **flag problems** like # > <ins>overfitting or selection bias</ins>. # # # ### How does `CV` works? # A **test set** is still **held out** for <ins>final evaluation</ins>, but the *validation set* is no longer intended as a single partition, in the `CV` strategy. # # In the most general settings, (i.e. `k-fold CV`), the _training set_ is split into **k smaller sets**, so that: # # 1. Select $k-1$ folds for training, $1$ fold for validation; # 2. A model is **trained** using the selected $k - 1$ folds of the data; # 3. the model is **validated** on the remaining $k_{th}$ fold kept out. # 4. Repeat this process again (**<ins>re-initialising all model parameters</ins>**) by applying a different selection to the folds for training/validation **until** all the folds has been used once for validation. # # 5. Aggregate results for each data partitioning, and derive statistics (with confidence intervals) on model predictions and generalisation capabilities. # + [markdown] slideshow={"slide_type": "fragment"} # The most common way to do cross-validation is **k-fold cross-validation**, in which the data is first split into k (often 5 or 10) equal-sized folds, and then for each iteration, one of the k folds is used as test data, and the rest as training data: # - # #### K-Fold Cross Validation Partitions # # ![cross_validation](https://raw.githubusercontent.com/leriomaggio/pytorch-beautiful-ml-data/main/4_data_partitioning/imgs/cross_validation.svg) # # <span class="fn"><i>Source: </i> [Cross-validation: evaluating estimator performance](https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation) _from_ **scikit-learn** documentation.</span> # There exist multiple strategies (or variations) on how folds can be generated during a `CV`, and the [`sklearn.model_selection`](https://scikit-learn.org/stable/modules/classes.html#module-sklearn.model_selection) package provides classes and functions to handle different partitioning strategies, and data configurations. # Among the many brilliant examples provided in `sklearn` documentation, I would strongly recommend # [Visualizing cross-validation behaviour in scikit-learn](https://scikit-learn.org/stable/auto_examples/model_selection/plot_cv_indices.html), which practically shows the effect of different data selection strategies when creating folds. # # In the rest of this notebook, we will build on a similar idea, showing mainly the difference in samples selection when applying a `KFold` and a `StratifiedKFold` cross-validation strategy. # + from matplotlib import pyplot as plt # %matplotlib inline def plot_cv(cv, X, y): """Helper function to plot results of the given CV selection""" masks = [] for train, test in cv.split(X, y): mask = np.zeros_like(y, dtype=bool) mask[test] = 1 masks.append(mask) plt.matshow(masks) # - # First, replicate previous results on `wine` dataset, but this time directly using the `ShuffleSplit` (and `StratifiedShuffleSplit`) from sklearn.model_selection import ShuffleSplit, StratifiedShuffleSplit # Configuration Settings shared among all the CV objects N_SPLITS = 50 TEST_SIZE = test_size RANDOM_STATE = rng # ##### `ShuffleSplit` plot_cv(ShuffleSplit(n_splits=N_SPLITS, test_size=TEST_SIZE, random_state=RANDOM_STATE), wine.data, wine.target) # ##### `StratifiedShuffleSplit` plot_cv(StratifiedShuffleSplit(n_splits=N_SPLITS, test_size=TEST_SIZE, random_state=RANDOM_STATE), wine.data, wine.target) # ##### `KFold` from sklearn.model_selection import KFold, StratifiedKFold # no shuffling plot_cv(KFold(n_splits=N_SPLITS), wine.data, wine.target) # with shuffling plot_cv(KFold(n_splits=N_SPLITS, random_state=RANDOM_STATE, shuffle=True), wine.data, wine.target) # ##### `StratifiedKFold` # no shuffling plot_cv(StratifiedKFold(n_splits=N_SPLITS), wine.data, wine.target) # with shuffling plot_cv(StratifiedKFold(n_splits=45, random_state=RANDOM_STATE, shuffle=True), wine.data, wine.target) # ##### `RepeatedKFold` from sklearn.model_selection import RepeatedKFold # + NSPLITS_CV = 5 NREPEAT_CV = 10 plot_cv(RepeatedKFold(n_splits=NSPLITS_CV, n_repeats=NREPEAT_CV, random_state=RANDOM_STATE), wine.data, wine.target) # - # ##### `RepeatedStratifiedKFold` from sklearn.model_selection import RepeatedStratifiedKFold plot_cv(RepeatedStratifiedKFold(n_splits=NSPLITS_CV, n_repeats=NREPEAT_CV, random_state=RANDOM_STATE), wine.data, wine.target)
2_ML_Data/3_ml_data_partitioning_primer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import math import pandas as pd import numpy as np from sklearn.preprocessing import LabelEncoder from PIL import Image import tensorflow as tf tf.__version__ IMG_WIDTH = 300 IMG_HEIGHT = 150 NUM_CHANNELS = 3 TARGET_SIZE = (IMG_WIDTH, IMG_HEIGHT) INPUT_SHAPE = (IMG_WIDTH, IMG_HEIGHT, NUM_CHANNELS) BATCH_SIZE = 8 data = pd.read_csv("images-metadata.csv", index_col="ID") # data.head() # + # files path all_images = data["PATH"].values n_samples = len(all_images) # labels labels = data["CLASS"].values print("Total images: ", n_samples) # - encoder = LabelEncoder() encoder.fit(labels) labels = encoder.transform(labels) labels = tf.keras.utils.to_categorical(labels) num_classes = labels.shape[1] def load_image(path, transpose=True): image = Image.open(path) image = image.convert('RGB') if transpose and (image.height > image.width): image = image.transpose(Image.ROTATE_90) image = image.resize(TARGET_SIZE) image = np.array(image) / 255.0 image = image.reshape(INPUT_SHAPE) return image class DataGenerator(tf.keras.utils.Sequence): def __init__(self, x_set, y_set, batch_size=32, target_size=(150, 150)): self.x, self.y = x_set, y_set self.batch_size = batch_size self.target_size = target_size def __len__(self): return math.ceil(len(self.x) / self.batch_size) def __getitem__(self, idx): batch_x = self.x[idx * self.batch_size:(idx + 1) * self.batch_size] batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size] return np.array([ load_image(file_name) for file_name in batch_x]), np.array(batch_y) train_samples = int(n_samples*.8) train_generator = DataGenerator(all_images[:train_samples], labels[:train_samples], BATCH_SIZE, TARGET_SIZE) val_generator = DataGenerator(all_images[train_samples:], labels[train_samples:], BATCH_SIZE, TARGET_SIZE) # + # Création d'un modèle séquentiel model = tf.keras.models.Sequential() # Ajout de couches convolutionnelles et de de pooling model.add(tf.keras.layers.Conv2D(128, (3, 3), activation='relu', input_shape=INPUT_SHAPE)) model.add(tf.keras.layers.MaxPooling2D((2, 2))) model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='relu')) model.add(tf.keras.layers.MaxPooling2D((2, 2))) model.add(tf.keras.layers.Conv2D(32, (3, 3), activation='relu')) model.add(tf.keras.layers.MaxPooling2D((2, 2))) model.add(tf.keras.layers.Conv2D(32, (3, 3), activation='relu')) model.add(tf.keras.layers.MaxPooling2D((2, 2))) # Ajout de couches denses model.add(tf.keras.layers.GlobalAveragePooling2D()) model.add(tf.keras.layers.Dropout(0.2)) model.add(tf.keras.layers.Dense(32, activation='relu')) model.add(tf.keras.layers.Dense(num_classes, activation='softmax')) # 5 => nombre de classes print(model.summary()) # - model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc']) # + EPOCHS = 1 history = model.fit(train_generator, validation_data=val_generator, epochs=EPOCHS) # + EPOCHS = 9 history = model.fit(train_generator, validation_data=val_generator, epochs=EPOCHS) # + acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(acc) + 1) fig = plt.figure(figsize=(12, 4)) fig.add_subplot(1, 2, 1) plt.plot(epochs, acc, 'bo', label='Training Accuracy') plt.plot(epochs, val_acc, 'b', label='Validation Accuracy') plt.xlabel('Epoch') plt.ylabel('Accuracy') plt.title('Training and validation accuracy') plt.legend(loc='best') fig.add_subplot(1, 2, 2) plt.plot(epochs, loss, 'bo', label='Training Loss') plt.plot(epochs, val_loss, 'b', label='Validation Loss') plt.xlabel('Epoch') plt.ylabel('Loss') plt.title('Training and validation loss') plt.legend(loc='best') plt.show() # -
3. Modeling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- from qiskit import Aer, IBMQ from qiskit.utils import QuantumInstance from qiskit.transpiler import PassManager from qiskit.transpiler.passes.calibration import RZXCalibrationBuilderNoEcho from qiskit_nature.drivers import UnitsType, Molecule from qiskit_nature.drivers.second_quantization import ElectronicStructureDriverType, ElectronicStructureMoleculeDriver from qiskit_nature.problems.second_quantization import ElectronicStructureProblem from qiskit_nature.converters.second_quantization import QubitConverter from qiskit_nature.transformers.second_quantization.electronic import FreezeCoreTransformer from qiskit_nature.mappers.second_quantization import ParityMapper from qiskit_nature.algorithms import GroundStateEigensolver from qiskit_nature.runtime import VQEClient from qiskit.algorithms import NumPyMinimumEigensolver, VQE from qiskit.algorithms.optimizers import SPSA from qiskit.circuit import QuantumCircuit, ParameterVector from qiskit.utils import QuantumInstance from qiskit.providers.aer import AerSimulator import matplotlib.pyplot as plt import numpy as np IBMQ.load_account() provider = IBMQ.get_provider() hardware_backend = provider.get_backend('ibmq_quito') # + def HEA_aware(num_q, depth): circuit = QuantumCircuit(num_q) params = ParameterVector("theta", length=num_q * (3 * depth + 2)) counter = 0 for q in range(num_q): circuit.rx(params[counter], q) counter += 1 circuit.rz(params[counter], q) counter += 1 for d in range(depth): for q in range(num_q - 1): gate = QuantumCircuit(num_q) gate.rzx(np.pi/2, q, q + 1) pass_ = RZXCalibrationBuilderNoEcho(hardware_backend) qc_cr = PassManager(pass_).run(gate) circuit.compose(qc_cr, inplace=True) for q in range(num_q): circuit.rz(params[counter], q) counter += 1 circuit.rx(params[counter], q) counter += 1 circuit.rx(params[counter], q, label="last" if q == num_q - 1 else 'rz') counter += 1 return circuit, params dist = 0.82 qubit_converter = QubitConverter(ParityMapper(), two_qubit_reduction=True) numpy_solver = NumPyMinimumEigensolver() molecule = Molecule(geometry=[['H', [0., 0., 0.]], ['H', [0., 0., dist]]]) driver = ElectronicStructureMoleculeDriver(molecule, basis='sto3g', \ driver_type=ElectronicStructureDriverType.PYSCF) es_problem = ElectronicStructureProblem(driver) calc = GroundStateEigensolver(qubit_converter, numpy_solver) res = calc.solve(es_problem) ret_1 = np.real(res.total_energies[0]) target_energy = ret_1 # + shots = 1024 backend = provider.get_backend('ibmq_qasm_simulator') qubits = 2 depth = 1 circuit, _ = HEA_aware(qubits, depth) spsa = SPSA(200) qubit_converter = QubitConverter(ParityMapper(), two_qubit_reduction=True) numpy_solver = NumPyMinimumEigensolver() molecule = Molecule(geometry=[['H', [0., 0., 0.]], ['H', [0., 0., dist]]]) driver = ElectronicStructureMoleculeDriver(molecule, basis='sto3g', \ driver_type=ElectronicStructureDriverType.PYSCF) es_problem = ElectronicStructureProblem(driver) runtime_vqe = VQEClient( ansatz=circuit, optimizer=spsa, provider=provider, backend=backend, shots=shots ) runtime_vqe_groundstate_solver = GroundStateEigensolver(qubit_converter, runtime_vqe) runtime_vqe_result_soft = runtime_vqe_groundstate_solver.solve(es_problem) # + runtime_result_soft = runtime_vqe_result_soft.raw_result history_soft = runtime_result_soft.optimizer_history loss_soft = history_soft["loss"] target_energy = ret_1 plt.figure(figsize=(14, 8)) plt.plot(loss_soft + runtime_vqe_result_soft.nuclear_repulsion_energy, label="Noiseless Sim") plt.axhline(y=target_energy, color="tab:red", ls="--", label="Target") plt.legend() plt.xlabel("Iteration") plt.ylabel("Energy [H]") plt.title("VQE energy") plt.show() # + from qiskit.tools.jupyter import * # %qiskit_version_table
HEA-VQE/runtime_hea_vqe.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Homeworks # Base code for basic data we used during the whole week #Import statements - To keed this in order from sklearn.metrics import mean_absolute_error from nltk.corpus import opinion_lexicon import urllib.request, os, gzip import json import random import numpy # a powerfull module import tensorflow as tf # Googles ML module from nltk.corpus import stopwords # We use it to remove the stopwords of the comments since they dont provide relevant info from nltk.tokenize import sent_tokenize, word_tokenize , TweetTokenizer # Divide text in sentences and then in words from sklearn.linear_model import LinearRegression # sklearn is a machine learning toolkit (needs numpy, scipy and matplotlib) from sklearn.linear_model import LogisticRegression # http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html # If I am not mistaken, logistic regresion is for booleans clasiffiers from sklearn.metrics import precision_score, recall_score # I think these are metrics to evaluate models form the slklearn library from sklearn.naive_bayes import GaussianNB, BernoulliNB # To apply statistical models, not regressions(?)1 from nltk.sentiment import SentimentAnalyzer from nltk.sentiment.util import mark_negation, HAPPY, SAD #Utilities of negation and happy or sad emojis from nltk.sentiment.vader import SentimentIntensityAnalyzer # + # %matplotlib inline datadir = './data/' def download_data(dataset_name, datadir): filename = 'reviews_%s_5.json' % dataset_name filepath = os.path.join(datadir, filename) if os.path.exists(filepath): print("Dataset %s has already been downloaded to %s" % (dataset_name, datadir)) else: url = 'http://snap.stanford.edu/data/amazon/productGraph/categoryFiles/%s.gz' % filename urllib.request.urlretrieve(url, filepath + ".gz") with gzip.open(filepath + ".gz", 'rb') as fin: with open(filepath, 'wb') as fout: fout.write(fin.read()) print("Downloaded dataset %s and saved it to %s" % (dataset_name, datadir)) dataset = "Baby" download_data(dataset, datadir) # + def load_data (dataset_name, datadir): filepath = os.path.join(datadir, 'reviews_%s_5.json' % dataset_name) if not os.path.exists(filepath): download_data(dataset_name, datadir) data = [] with open(filepath, 'r') as f: for line in f: # read file line by line item_hash = hash(line) # we will use this later for partitioning our data item = json.loads(line) # convert JSON string to Python dict item['hash'] = item_hash # add hash for identification purposes data.append(item) print("Loaded %d data for dataset %s" % (len(data), dataset_name)) return data # load the data... baby = load_data(dataset, datadir) # + def partition_train_validation_test(data): # 60% : modulus is 0, 1, 2, 3, 4, or 5 data_train = [item for item in data if item['hash']%10<=5] # 20% : modulus is 6 or 7 data_valid = [item for item in data if item['hash']%10 in [6,7]] # 20% : modulus is 8 or 9 data_test = [item for item in data if item['hash']%10 in [8,9]] print("Now we have", len(data_train), "training examples,", len(data_valid), "validation examples, and", len(data_test), "test examples") return data_train, data_valid, data_test baby_train, baby_valid, baby_test = partition_train_validation_test(baby) # + eng_stopwords = set(stopwords.words('english')) positive_words = set(opinion_lexicon.positive()) negative_words = set(opinion_lexicon.negative()) def my_tokenize(text): # split text into lower-case tokens, removing all-punctuation tokens and stopwords tokens = [] for sentence in sent_tokenize(text): #Adds to the array and array with the words in lowercase, we add them if they are not stopwords and there is at least one letter in it tokens.extend(x for x in word_tokenize(sentence.lower()) #continues down... if x not in eng_stopwords and any(i.isalpha() for i in x))# This extends the list by adding elements, it is different from append... see https://stackoverflow.com/questions/252703/difference-between-append-vs-extend-list-methods-in-python return tokens def pos_neg_fraction(text): # We recieve the raw text tokens = my_tokenize(text) # We tokenize the text first count_pos, count_neg = 0, 0 for t in tokens: if t in positive_words: count_pos += 1 if t in negative_words: count_neg += 1 count_all = len(tokens) # this is because we need to be sure there is no 0 len sentence if count_all != 0: return count_pos/count_all, count_neg/count_all else: return 0., 0. pos_example = 'This is a good, great, fantastic, amazing, wonderful, super product!!!' neg_example = 'This is a bad, atrocious, terrible, dreadful, awful, abysmal product!!!' print(pos_neg_fraction(pos_example)) print(pos_neg_fraction(neg_example)) # - def dataset_to_matrix(data): # data is a lot of text in {} that has reviwer name..comment...date...etc.. but all identified with text labels #in that sence data is an unidiminsional array # the item "atribute", we added it to the data to identify the sections of it return numpy.array([list(pos_neg_fraction(item['reviewText'])) for item in data]) # X_train with two columns and as many rows as there are examples in the data set. #The first column contains the fraction of positive words, #while the second column contains the fraction of negative words for each example. X_train = dataset_to_matrix(baby_train) X_valid = dataset_to_matrix(baby_valid) X_test = dataset_to_matrix(baby_test) most_pos, most_neg = numpy.argmax(X_train, axis=0) # find maximum ROW (axis 0).. through aaaaallll the data (OMG!) # print the example with the highest fraction of positive words: print("We found a fraction of %f %% positive words for example %d" % (100.*X_train[most_pos, 0], most_pos)) print(baby_train[most_pos]) print("We found a fraction of %f %% negative words for example %d" % (100.*X_train[most_neg, 1], most_neg)) print(baby_train[most_neg]) # + def dataset_to_targets (data): return numpy.array([item['overall'] for item in data]) Y_train = dataset_to_targets(baby_train) Y_valid = dataset_to_targets(baby_valid) Y_test = dataset_to_targets(baby_test) print("Our feature matrix is two-dimensional and has shape", X_train.shape) # contains pos,neg fraction print("Our target vector is one-dimensional and has shape", Y_train.shape) # containd sscore # - # ## Day 1 lreg = LinearRegression().fit(X_train,Y_train) print("The coefficient for the fpos variable is", lreg.coef_[0]) print("The coefficient for the fneg variable is", lreg.coef_[1]) print("The intercept is", lreg.intercept_) #If the review contains 20% positive words (fpos==0.2) #but still no negative words (fneg==0), we would expect the following rating: features = [[0.2, 0]] expected_rating_A = lreg.predict(features)[0] print("The expected rating is %f stars" % expected_rating_A) # we can also compute this explicitly: expected_rating_B = lreg.intercept_ + 0.2*lreg.coef_[0] + 0*lreg.coef_[1] print("This is the same as %f stars" % expected_rating_B) #However, if the review contains no positive words (fpos==0) but 20% negative words (fneg==0.2), #we expect the following rating: features = [[0, 0.2]] expected_rating_A = lreg.predict(features)[0] print("The expected rating is %f stars" % expected_rating_A) # we can also compute this explicitly: expected_rating_B = lreg.intercept_ + 0 * lreg.coef_[0] + 0.2 * lreg.coef_[1] print("This is the same as %f stars" % expected_rating_B) # + def predict_lreg(features): expected_rating = lreg.predict(features) expected_rating[expected_rating > 5.0] = 5.0 expected_rating[expected_rating < 1.0] = 1.0 return expected_rating pred_train = predict_lreg(X_train) # - mae_train = mean_absolute_error(pred_train, Y_train) print("The mean absolute error on the training data is %f stars" % mae_train) # #### - calculate the prediction for 100% pos, and 100% neg review features_100pos=[[1,0]] features_100neg=[[0,1]] expected_rating_pos = lreg.predict(features_100pos)[0] # I think 0 is for linear as in kernel = {‘linear’, ‘rbf’, ‘poly’, ‘sigmoid’, ‘precomputed’} expected_rating_neg = lreg.predict(features_100neg)[0] print("The expected rating for 100 pos review is %f stars" % expected_rating_pos) print("The expected rating for 100 neg review is %f stars" % expected_rating_neg) # This model needs a threshold to limit the starts between 0 and 5 # #### - Repeat this same process for "Apps for Android" dataset ###### Getting data#### android_dataset = "Apps_for_Android" download_data(android_dataset, datadir) ################Processing data############ # Load data from directory apps= load_data(android_dataset,datadir) #Partition data for training validation and tests apps_train,apps_valid,apps_test = partition_train_validation_test(apps) #We get the portions of positive and negative terms, omiting stopwords and puntuation marks (tokenizing words and sentences as well)) X_apps_train = dataset_to_matrix(apps_train) app_most_pos, app_most_neg = numpy.argmax(X_apps_train, axis=0) print(apps_train[app_most_pos]) print() print(apps_train[app_most_neg]) Y_apps_train = dataset_to_targets(apps_train) # extract the overall item from each review ####Linear regression lreg_app = LinearRegression().fit(X_apps_train, Y_apps_train) app_pos_features = [[0.2, 0]] app_neg_features = [[0,0.2]] expected_rating_pos = lreg.predict(app_pos_features)[0] expected_rating_neg= lreg.predict(app_neg_features)[0] print("The expected rating for a review with 20 porcet positive comments is %f stars" % expected_rating_pos) print("The expected rating for a review with 20 porcet negative comments is %f stars" % expected_rating_neg) #With this data we get sort of a similiar approximation pred_train_apps = predict_lreg(X_apps_train) mae_train_apps = mean_absolute_error(pred_train_apps, Y_apps_train) print("The mean absolute error on the training data is %f stars" % mae_train_apps) #We are getting even a higher error with a much more bigger dataset, funny, because it is bigger # # Day 2 def discretize_targets(Y): return Y<= 3.0 # So this is a boolean, we are returning if the conditions is true D_train = discretize_targets(Y_train) # We must remember that Y_train is a 1 dimension array with the scores print("The training data contains %f %% dissatisfied customers" % (100.*D_train.mean())) print(D_train[:10]) # + logreg = LogisticRegression().fit(X_train, D_train) # We use just the training data # The predict_proba() method produces a matrix with two columns # the first column contains the probability for the label being "false" (satisfied customer) # the second column contains the probability for the label being "true" (dissatisfied customer) # the sum of both columns is 1 # we select the second column with [:,1] # [:,0] would select the first column # [1,:] would select the second row prob2_train = logreg.predict_proba(X_train)[:,1] pred2_train = prob2_train > 0.5 max_prob2 = numpy.argmax(prob2_train) min_prob2 = numpy.argmin(prob2_train) def analyze_training_example_2(i): print("Training example number", i) print("True rating = %f stars" % Y_train[i]) print("Expected to be dissatisfied:", pred2_train[i]) print("Expected probability of being dissatisfied : %f" % prob2_train[i]) print("Features = %f / %f" % (X_train[i,0], X_train[i,1])) print("Review text = %s" % baby_train[i]['reviewText']) analyze_training_example_2(max_prob2) print() analyze_training_example_2(min_prob2) # - precision2 = precision_score(D_train, pred2_train) recall2 = recall_score(D_train, pred2_train) print("For the default threshold (0.5) we get precision = %f " "and recall = %f" % (precision2, recall2)) nb = GaussianNB().fit(X_train, D_train) prob3_train = nb.predict_proba(X_train)[:,1] pred3_train = prob3_train>0.5 precision3 = precision_score(D_train, pred3_train) recall3 = recall_score(D_train, pred3_train) print("Now the precision is %f and the recall is %f" % (precision3, recall3)) # #### -Change the treshold from 0.5 to 0.2, and rerun the code. new_pred2_train = prob2_train > 0.2 new_precision = precision_score(D_train, new_pred2_train) new_recall = recall_score(D_train, new_pred2_train) print("For the default threshold (0.2) we get precision = %f " "and recall = %f" % (new_precision, new_recall)) # #### - Give a commentary in plain English about how that changed precision and recall. What does that mean? What is now included that wasn't before? What part of it is good? What is bad from our Task perspective. Remember: our task was to identify Dissatisfied reviews. # The precision is droping because we are taking more cases as valid (dissatisfied costumers) as our threshold is lower, however there are more chances that a higher proportion of cases labaled as "dissatisfied" where actually sattisfied. From our problem perspective I think it is better to encrease the recall as I guess we are more interested in identifiying dissatisfied costumers to address the product's problems . # + feat_cols = [tf.feature_column.numeric_column(key="fpos"), tf.feature_column.numeric_column(key="fneg")] model = tf.estimator.LinearRegressor(feature_columns=feat_cols) get_training_data = tf.estimator.inputs.numpy_input_fn( x={"fpos" : X_train[:,0], "fneg" : X_train[:,1]}, y=Y_train, num_epochs=None, shuffle=True) model.train(input_fn=get_training_data, steps=5000) # + eval_training_data = tf.estimator.inputs.numpy_input_fn( x={"fpos":X_train[:,0], "fneg": X_train[:,1]}, num_epochs=1, shuffle=False) pred_train_tf = numpy.array([item['predictions'][0] for item in model.predict(input_fn=eval_training_data)]) mae_train_tf = mean_absolute_error(pred_train_tf, Y_train) print("The mean absolute error on the training data is %f stars" % mae_train_tf) # - # + examples_negation = ["This product wasn't bad.", "This is not a bad product.", "This product was bad.", "This is a bad product."] for sentence in examples_negation: tokens_with_negation = mark_negation(word_tokenize(sentence.lower())) # Append _NEG suffix to words that appear in the scope between a negation and a punctuation mark. print("Sentence =", sentence) print(tokens_with_negation) negated_stopwords = set(x+"_NEG" for x in eng_stopwords) all_stopwords = eng_stopwords.union(negated_stopwords) # set union stopwords and negate stopwords def tokenize_with_negation(text): # split text into lower-case tokens, removing all-punctuation tokens and stopwords tokens = [] for sentence in sent_tokenize(text): pretokens = word_tokenize(sentence.lower()) pretokens = [x for x in pretokens if any(i.isalpha() for i in x)] #Everything the same unitl here #Append _NEG suffix to words that appear in the scope between a negation #and a punctuation mark pretokens = mark_negation(pretokens) #Here # ok, but tokens started empy so we are bsically storing the array tokens.extend(x for x in pretokens if x not in all_stopwords) # puts a list at the beegining of the frist list return tokens print(baby_train[31]['reviewText']) print(tokenize_with_negation(baby_train[31]['reviewText'])) # + all_positive_words = positive_words.union({x+"_NEG" for x in negative_words}) all_negative_words = negative_words.union({x+"_NEG" for x in positive_words}) def pos_neg_fraction_with_negation(text): tokens = tokenize_with_negation(text) #Tokenize and adds negation when necessary # count how many positive and negative words occur in the text count_pos, count_neg = 0, 0 for t in tokens: if t in all_positive_words: count_pos += 1 if t in all_negative_words: count_neg += 1 count_all = len(tokens) if count_all != 0: return count_pos/count_all, count_neg/count_all else: # avoid division by zero return 0., 0. pos_example = 'This is a good, great, fantastic, amazing, wonderful, super product!!!' neg_example = 'This is a bad, atrocious, terrible, dreadful, awful, abysmal product!!!' print(pos_neg_fraction_with_negation(pos_example)) print(pos_neg_fraction_with_negation(neg_example)) pos_example_neg = 'This is not a good, great, fantastic, amazing, wonderful, super product!!!' neg_example_neg = 'This is not a bad, atrocious, terrible, dreadful, awful, abysmal product!!!' print(pos_neg_fraction_with_negation(pos_example_neg)) print(pos_neg_fraction_with_negation(neg_example_neg)) # + def dataset_to_matrix_with_neg(data): return numpy.array([list(pos_neg_fraction_with_negation(item['reviewText'])) for item in data]) X_train_neg = dataset_to_matrix_with_neg(baby_train) # - lreg_neg = LinearRegression().fit(X_train_neg, Y_train) pred_train_neg = lreg_neg.predict(X_train_neg) mae_train_with_neg = mean_absolute_error(pred_train_neg, Y_train) print("Now the mean absolute error on the training data is %f stars" % mae_train_with_neg) # Random Forest from sklearn.ensemble import RandomForestRegressor rf_neg = RandomForestRegressor().fit(X_train_neg,Y_train) pred_train_rf_neg = rf_neg.predict(X_train_neg) mae_train_rf_neg = mean_absolute_error(pred_train_rf_neg, Y_train) print("A nonlinear regressor achieves a MAE of %f stars" % mae_train_rf_neg) # #### Add features , Explain which features you chose, implement them, and write a commentary on your results... Feel welcome to use NLTK's built-in sentiment analyzer or any other research that you can find and understand # I will add emojis (personal choice, I communicate with emojis the whole time), also if a word is in uupercases then it will count as 2 times # + def tokenize_with_negation_Emojis(text): # split text into lower-case tokens, removing all-punctuation tokens and stopwords tokens = [] tknzr = TweetTokenizer() for sentence in sent_tokenize(text): #pretokens = word_tokenize(sentence.lower()) emopretokens = tknzr.tokenize(sentence) #use tweet tokenizer to include emojis emojis = [x for x in emopretokens if x in SAD or x in HAPPY] # if a word is in uppercase then count ir twice pretokens_emphasis = [x for x in emopretokens if x.isupper() and x.isalpha()] pretokens = word_tokenize(sentence.lower()) pretokens = [x for x in pretokens if any(i.isalpha() for i in x)] #Everything the same unitl here #Append _NEG suffix to words that appear in the scope between a negation #and a punctuation mark pretokens.extend(pretokens_emphasis) pretokens = mark_negation(pretokens) #Here pretokens.extend(emojis) # ok, but tokens started empy so we are bsically storing the array tokens.extend(x for x in pretokens if x not in all_stopwords) # puts a list at the beegining of the frist list return tokens all_positive_words_emoji = all_positive_words.union(HAPPY) all_negative_words_emoji = all_negative_words.union(SAD) def pos_neg_fraction_with_negation_emoji(text): tokens = tokenize_with_negation_Emojis(text) #Tokenize and adds negation when necessary # count how many positive and negative words occur in the text count_pos, count_neg = 0, 0 for t in tokens: if t in all_positive_words_emoji: count_pos += 1 if t in all_negative_words_emoji: count_neg += 1 count_all = len(tokens) if count_all != 0: return count_pos/count_all, count_neg/count_all else: # avoid division by zero return 0., 0. def dataset_to_matrix_with_neg_emoji(data): return numpy.array([list(pos_neg_fraction_with_negation_emoji(item['reviewText'])) for item in data]) # - my_example =" :D :) ;) " print(pos_neg_fraction_with_negation_emoji(my_example)) my_example = ":( :/)" print(pos_neg_fraction_with_negation_emoji(my_example)) print(baby_train[700]['reviewText']) print(pos_neg_fraction_with_negation_emoji(baby_train[700]['reviewText'])) X_train_neg_emoji = dataset_to_matrix_with_neg_emoji(baby_train) lreg_neg_emoji = LinearRegression().fit(X_train_neg_emoji, Y_train) pred_train_neg_emoji = lreg_neg_emoji.predict(X_train_neg_emoji) mae_train_with_neg_emoji = mean_absolute_error(pred_train_neg_emoji, Y_train) print("Now the mean absolute error on the training data is %f stars" % mae_train_with_neg_emoji) # I managed to improve the model a littttllleee bit :P 0.813530 #Diferences between the tokenizers print(TweetTokenizer().tokenize("this is HUGE!! :D ")) print(word_tokenize("this is HUGE!! :D")) # # DAy3 sia= SentimentIntensityAnalyzer() text = baby_train[50000]['reviewText'] for s in sent_tokenize(text): print(s) print(sia.polarity_scores(s)) # + def sia_features(dataset): ''' For each review text in the dataset, extract: (1) mean positive sentiment over all sentences (2) mean neutral sentiment over all sentences (3) mean negative sentiment over all sentences (4) maximum postive sentiment over all sentences (5) maximum neutral sentiment over all sentences (6) maximum negative sentiment over al sentences ''' feat_matrix = numpy.empty((len(dataset),6)) for i in range(len(dataset)): sentences = sent_tokenize(dataset[i]['reviewText']) nsent = len(sentences) if nsent: sentences_polarities = numpy.empty((nsent,3)) for j in range(nsent): polarity = sia.polarity_scores(sentences[j]) sentences_polarities[j,0] = polarity['pos'] sentences_polarities[j,1] = polarity['neu'] sentences_polarities[j,2] = polarity['neg'] feat_matrix[i, 0:3] = numpy.mean(sentences_polarities, axis = 0) #axis 0 is row, axis 1 is columns feat_matrix[i,3:6] = numpy.max(sentences_polarities, axis = 0) else: feat_matrix[i, 0:6] = 0.0 return feat_matrix sia_tr = sia_features(baby_train) # - # ### **Homework** : add lenght function baby_train[1000]['reviewText'][6] == "s" def len_features(dataset): ''' Add two features: (1) lenght of review (in thousands of characters) - truncate at 2500 (2) percengate of exclamation marks (in %) ''' lenfeat_matrix = numpy.empty((len(dataset),2)) for i in range(len(dataset)): excla = 0 review= dataset[i]['reviewText'] for j in range(len(review)): if(j < 2500000) : if review[j] == "!": excla = excla +1 review_len = (len(dataset[i]['reviewText']))/1000 if (review_len >= 2500): review_len = 2500 if(review_len==0): excla_percentage =0 else: excla_percentage = excla/(review_len*1000) lenfeat_matrix[i,0] = review_len lenfeat_matrix[i,1] = excla_percentage *100 return lenfeat_matrix len_tr = len_features(baby_train) print(len_tr[800, :]) print(X_train_neg.shape, sia_tr.shape , len_tr.shape) # stack horizontally X_train_augmented = numpy.concatenate(( X_train_neg, sia_tr) ,axis = 1) lreg_augmented = LinearRegression().fit(X_train_augmented,Y_train) pred_train_augmented = lreg_augmented.predict(X_train_augmented) mean_train_augmented = mean_absolute_error(pred_train_augmented, Y_train) print("Now the mean absolute error on the training data is %f starts" % mean_train_augmented) #Now with the lenght and exclamation % feature X_train_augmented2 = numpy.concatenate(( X_train_neg, sia_tr, len_tr) ,axis = 1) lreg_augmented2 = LinearRegression().fit(X_train_augmented2,Y_train) pred_train_augmented2 = lreg_augmented2.predict(X_train_augmented2) mean_train_augmented2 = mean_absolute_error(pred_train_augmented2, Y_train) print("Now the mean absolute error on the training data is %f starts" % mean_train_augmented2) # random forest rf_augmented = RandomForestRegressor().fit(X_train_augmented, Y_train) #fetting rfpred_train_augmented = rf_augmented.predict(X_train_augmented) # prediction mae_train_rf_augmented = mean_absolute_error(rfpred_train_augmented, Y_train) #check error print("For the RF, MAE is %f stars" % mae_train_rf_augmented) # random forest NOW with the len_ft matrix rf_augmented2 = RandomForestRegressor().fit(X_train_augmented2, Y_train) #fetting rfpred_train_augmented2 = rf_augmented2.predict(X_train_augmented2) # prediction mae_train_rf_augmented2 = mean_absolute_error(rfpred_train_augmented2, Y_train) #check error print("For the RF, MAE is %f stars" % mae_train_rf_augmented) # + X_valid_neg = dataset_to_matrix(baby_valid) sia_valid = sia_features(baby_valid) #leng_valid X_valid_augmented = numpy.concatenate((X_valid_neg,sia_valid), axis = 1) pred_valid_lraugmented = lreg_augmented.predict(X_valid_augmented) pred_valid_rf_augmented = rf_augmented.predict(X_valid_augmented) mae_valid_augmented = mean_absolute_error(pred_valid_lraugmented, Y_valid) print("on the validation set, we get %f error for the linear regresion" % mae_valid_augmented) mae_valid_rfaugemented = mean_absolute_error(pred_valid_rf_augmented, Y_valid) print("and %f for the random forest regresion" % mae_valid_rfaugemented) # - # ### **HOMEWORK** ="Be lazy. Not just lazy but proactively, agressively lazy." Remove duplication. # create a single function that takes in data and spits out all success metrics across all of your algos. # + def WholeAnalysisPipeline(data): # works for train or validation data Y_data = dataset_to_targets(data) whole_feat_matrix = numpy.empty((len(data),10)) # all the features in just one for i in range(len(data)): pos_neg_matrix = numpy.empty((len(data),2)) excla = 0 review= data[i]['reviewText'] whole_feat_matrix[i,0:2] = pos_neg_fraction_with_negation(review) #From sia sentences = sent_tokenize(review) nsent = len(sentences) if nsent: sentences_polarities = numpy.empty((nsent,3)) for j in range(nsent): polarity = sia.polarity_scores(sentences[j]) sentences_polarities[j,0] = polarity['pos'] sentences_polarities[j,1] = polarity['neu'] sentences_polarities[j,2] = polarity['neg'] whole_feat_matrix[i, 2:5] = numpy.mean(sentences_polarities, axis = 0) #axis 0 is row, axis 1 is columns whole_feat_matrix[i,5:8] = numpy.max(sentences_polarities, axis = 0) else: whole_feat_matrix[i, 2:8] = 0.0 #from len for j in range(len(review)): if(j < 2500000) : if review[j] == "!": excla = excla +1 review_len = (len(data[i]['reviewText']))/1000 if (review_len >= 2500): review_len = 2500 if(review_len==0): excla_percentage =0 else: excla_percentage = excla/(review_len*1000) whole_feat_matrix[i,8] = review_len whole_feat_matrix[i,9] = excla_percentage *100 m_lreg_augmented = LinearRegression().fit(whole_feat_matrix,Y_data) m_pred_train_augmented = m_lreg_augmented.predict(whole_feat_matrix) m_mean_train_augmented2 = mean_absolute_error(m_pred_train_augmented, Y_data) print("Now the mean absolute error on the training data is %f starts" % m_mean_train_augmented2) m_rf_augmented = RandomForestRegressor().fit(whole_feat_matrix, Y_data) #fetting m_rfpred_train_augmented = m_rf_augmented.predict(X_train_augmented2) # prediction m_mae_train_rf_augmented = mean_absolute_error(m_rfpred_train_augmented, Y_data) #check error print("For the RF, MAE is %f stars" % m_mae_train_rf_augmented) return m_mean_train_augmented2, m_mae_train_rf_augmented WholeAnalysisPipeline(baby_train)
Week4/.ipynb_checkpoints/Homeworks week4-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %reload_ext autoreload # %autoreload 2 # %matplotlib inline import os os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"; os.environ["CUDA_VISIBLE_DEVICES"]="0" # ## Image Classification Example # We will begin our image classification example by importing some required modules. import ktrain from ktrain import vision as vis # Next, we use the ```images_from_folder``` function to load the data as a generator (i.e., DirectoryIterator object). This function assumes the following directory structure: # ``` # ├── datadir # │ ├── train # │ │ ├── class0 # folder containing documents of class 0 # │ │ ├── class1 # folder containing documents of class 1 # │ │ ├── class2 # folder containing documents of class 2 # │ │ └── classN # folder containing documents of class N # │ └── test # │ ├── class0 # folder containing documents of class 0 # │ ├── class1 # folder containing documents of class 1 # │ ├── class2 # folder containing documents of class 2 # │ └── classN # folder containing documents of class N # ``` # The *train_test_names* argument can be used, if the train and test subfolders are named differently (e.g., *test* folder is called *valid*). Here, we load a dataset of cat and dog images, which can be obtained from [here](https://www.kaggle.com/c/dogs-vs-cats/data). The DATADIR variale should be set to the path to the extracted folder. The **data_aug** parameter can be used to employ [data augmentation](https://arxiv.org/abs/1712.04621). We set this parameter using the ```get_data_aug``` function, which returns a default data augmentation with ```horizontal_flip=True``` as the only change to the defaults. See [Keras documentation](https://keras.io/preprocessing/image/#imagedatagenerator-class) for a full set of agumentation parameters. Finally, we pass the requested target size (224,224) and color mode (rgb, which is a 3-channel image). The image will be resized or converted appropriately based on the values supplied. A target size of 224 by 224 is typically used when using a network pretrained on ImageNet, which we do next. The ```images_from_folder``` function returns generators for both the training and validation data in addition an instance of ```ktrain.vision.ImagePreprocessor```, which can be used to preprocess raw data when making predictions for new examples. This will be demonstrated later. DATADIR = 'data/dogscats' (train_data, val_data, preproc) = vis.images_from_folder(datadir=DATADIR, # use a default data augmentation with horizontal_flip=True data_aug=vis.get_data_aug(horizontal_flip=True), train_test_names=['train', 'valid'], target_size=(224,224), color_mode='rgb') # Next, we use the ```image_classifier``` function to load a **ResNet50** model pre-trained on [ImageNet](http://www.image-net.org/). For more information on using pretrained networks, see this [blog post](https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html). By default, all layers except the randomly initialized custom Dense layers on top are frozen (i.e., trainable). We, then, wrap the model and data in a Learner object. We specify 4 CPU workers to load batches during training, disable multiprocessing, and use a batch size of 64. You can change these values based on your system specification to see what yields the best peformance. # let's print the available precanned image classification models in ktrain vis.print_image_classifiers() model = vis.image_classifier('pretrained_resnet50', train_data, val_data) learner = ktrain.get_learner(model=model, train_data=train_data, val_data=val_data, workers=8, use_multiprocessing=False, batch_size=64) # Next, we freeze the first 15 layers, as the ImageNet pre-trained weights of these early layers are typically applicable as is. All other layers are unfrozen and trainable. You can use the ```learner.freeze``` and ```learner.unfreeze``` methods to selectively freeze and unfreeze layers, if necessary. ```learner.freeze(freeze_range=15)``` and ```learner.unfreeze(exclude_range=15)``` are equivalent. The number of layers you freeze will depend on how similar your dataset is to ImageNet and other particulars of the dataset. For instance, classifying satellite images or subcellular protein patterns may require less frozen layers than classifying pictures of dogs and cats. You can also begin training for a few epochs with many frozen layers and gradually unfreeze layers for later epochs. learner.freeze(freeze_range=15) # You use the ```print_layers``` function to examine the layers of the created network. learner.print_layers() # As shown before, we use the Learning Rate Finder in *ktrain* to find a good initial learning rate. learner.lr_find() learner.lr_plot() # Finally, we will use the ```autofit``` method to train our model using a [triangular learning rate policy](https://arxiv.org/pdf/1506.01186.pdf). Since we have not specified the number of epochs, the maximum learning # rate will be periodically reduced when validation loss fails to decrease and eventually stop automatically. # # Our final validation accuracy is **99.55%** first occuring at the 8th epoch during this run. learner.autofit(1e-4) loss, acc = learner.model.evaluate_generator(learner.val_data, steps=len(learner.val_data)) print('final loss:%s, final accuracy:%s' % (loss, acc)) # As can be seen, the final validation accuracy of our model is **99.55%**. # ### Using Our Model to Make Predictions # Finally, let's use our model to make predictions for some images. # # Here is a sample image of both a cat and a dog from the validation set. !!ls {DATADIR}/valid/cats |head -n3 !!ls {DATADIR}/valid/dogs |head -n3 vis.show_image(DATADIR+'/valid/cats/cat.10016.jpg') vis.show_image(DATADIR+'/valid/dogs/dog.10001.jpg') # Now, let's create a predictor object to make predictions for the above images. predictor = ktrain.get_predictor(learner.model, preproc) # Let's see if we predict the selected cat and dog images correctly. predictor.predict_filename(DATADIR+'/valid/cats/cat.10016.jpg') predictor.predict_filename(DATADIR+'/valid/dogs/dog.10001.jpg') # Our predictor is working well. We can save our predictor to disk for later use in an application. predictor.save('/tmp/cat_vs_dog_detector') # Let's load our predictor from disk to show that it still works as expected. predictor = ktrain.load_predictor('/tmp/cat_vs_dog_detector') predictor.predict_filename(DATADIR+'/valid/cats/cat.10016.jpg') predictor.predict_filename(DATADIR+'/valid/dogs/dog.10001.jpg') # Finally, let's make predictions for all the cat pictures in our validation set: predictor.predict_folder(DATADIR+'/valid/cats/')[:10] # ## Multi-Label Image Classification # In the previous example, the classes were mutually exclusive. That is, images contained either a dog or a cat, but not both. Some problems are multi-label classification problems in that each image can belong to multiple classes (or categories). One such instance of this is the [Kaggle Planet Competition](https://www.kaggle.com/c/planet-understanding-the-amazon-from-space). In this competition, were are given a collection of satellite images of the Amazon rainforest. The objective here is to identify locations of deforestation and human encroachment on forests by classifying images into up to 17 different categories. Categories include *agriculture*, *habitation*, *selective_logging*, and *slash_burn*. A given satellite image can belong to more than category. The dataset can be downloaded from the [competition page](https://www.kaggle.com/c/planet-understanding-the-amazon-from-space/data). The satellite images are located in a zipped folder called **train-jpg.zip**. The labels for each image are in the form of a CSV (i.e., **train_v2.csv**) with file names and their labels. Let us first examine the CSV file for this dataset. Be sure to set the DATADIR variable to the path of the extracted dataset. DATADIR = 'data/planet' !!head {DATADIR}/train_v2.csv # We make three observations. # * The *image_name* field is the file name of the satellite image. # * The file names are missing the .jpg file extension. # * The labels are simply a space-delimited list of tags, rather than a one-hot-encoded vector. # # Let us first convert this CSV into a new CSV that includes one-hot-encoded representations of the tags and appends the file extension to the file names. Since this dataset format is somewhat common (especially for multi-label image classification problems), *ktrain* contains a convenience function to automatically perform the conversion. ORIGINAL_DATA = DATADIR+'/train_v2.csv' CONVERTED_DATA = DATADIR+'/train_v2-CONVERTED.csv' labels = vis.preprocess_csv(ORIGINAL_DATA, CONVERTED_DATA, x_col='image_name', y_col='tags', suffix='.jpg') !!head {DATADIR}/train_v2-CONVERTED.csv # We can use the ```images_from_csv``` for function to load the data as generators. Remember to specify ```preprocess_for='resenet50'```, as we will be using a ResNet50 architecture again. train_data, val_data, preproc = vis.images_from_csv( CONVERTED_DATA, 'image_name', directory=DATADIR+'/train-jpg', val_filepath = None, label_columns = labels, data_aug=vis.get_data_aug(horizontal_flip=True, vertical_flip=True)) # As before, we load a pre-trained ResNet50 model (the default) and wrap this model and the data in a Learner object. Here, will freeze only the first two layers, as the satelitte images are comparatively more dissimilar to ImageNet. Thus, the weights in earlier layers will need more updating. model = vis.image_classifier('pretrained_resnet50', train_data, val_data=val_data) learner = ktrain.get_learner(model, train_data=train_data, val_data=val_data, batch_size=64, workers=8, use_multiprocessing=False) learner.freeze(2) # The learning-rate-finder indicates a learning rate of 1e-4 would be a good choice. learner.lr_find() learner.lr_plot() # For this dataset, instead of using ```autofit```, we will use the ```fit_onecycle``` method that utilizes the [1cycle learning rate policy](https://arxiv.org/pdf/1803.09820.pdf). The final model achieves an F2-score of **0.928**, as shown below. learner.fit_onecycle(1e-4, 20) # If there is not yet evidence of overfitting, it can sometimes be beneficial to train further after early_stopping. Since, the validation loss appears to still decrease, we will train for a little more using a lower learning rate. We only train for one additional epoch here for illustration purposes. Prior training, the current model is saved using the ```learner.save_model``` method in case we end up overfitting. If overfitting, the original model can be restored using the ```learner.load_model``` method. learner.save_model('/tmp/planet_model') learner.fit_onecycle(1e-4/10,1) # ### Evaluation # # The evaluation metric for the [Kaggle Planet competition](https://www.kaggle.com/c/planet-understanding-the-amazon-from-space#evaluation) was the F2-score. # # As shown below, this model achieves an F2-score of **0.928**. # from sklearn.metrics import fbeta_score import numpy as np import warnings def f2(preds, targs, start=0.17, end=0.24, step=0.01): with warnings.catch_warnings(): warnings.simplefilter("ignore") return max([fbeta_score(targs, (preds>th), 2, average='samples') for th in np.arange(start,end,step)]) y_pred = learner.model.predict_generator(val_data, steps=len(val_data)) y_true = val_data.labels f2(y_pred, y_true) # ### Making Predictions # # # Let's make some predictions using our model and examine results. As before, we first create a Predictor instance. predictor = ktrain.get_predictor(learner.model, preproc) # Let's examine the folder of images and select a couple to analyze. !!ls {DATADIR}/train-jpg/ |head # Image train_10008.jpg is categorized into the following classes: # * artisinal_mine (i.e., small-scale mining operations - sometimes illegal in lands designated for conservation) # * clear # * primary (rainforest) # * water vis.show_image(DATADIR+'/train-jpg/train_10008.jpg') !!head -n 1 {CONVERTED_DATA} !!grep train_10008.jpg {CONVERTED_DATA} # Our predictions are consistent as shown below: predictor.predict_filename(DATADIR+'/train-jpg/train_10008.jpg') # Here is another example showing water, clear, and primary. vis.show_image(DATADIR+'/train-jpg/train_10010.jpg') !!head -n 1 {CONVERTED_DATA} !!grep train_10010.jpg {CONVERTED_DATA} predictor.predict_filename(DATADIR+'/train-jpg/train_10010.jpg')
tutorial-03-image-classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # The Fourier transform is a way of expressing imformation in the **frequency domain** instead of the time domain. You can go back and forth from one to the other. # # It would be easiest to examine how the Fourier transform behaves in 1D before moving to 2D images. The expression for the 1D Fourier transform is: # # $$ F(u) = \int_{\infty}^{\infty}{f(x)e^{-j2\pi u x}dx} $$ # # And the inverse transform (going back into the spatial domain): # # $$f(x) = \int_{\infty}^{\infty}{F(u)e^{j2 \pi u x}dx} $$ # # But since we will be dealing with discrete-valued functions (images) we need to have a discrete form of the Fourier transform. This can be expressed as: # # $$F(u) = \frac{1}{M} \sum_{x=0}^{M-1}{f(x) e^{-j2 \pi ux/M}} $$ # # This is easiest to make sense of when you see a few example to get the idea of how the transform behaves. # + fig, axes = plt.subplots(nrows=4, figsize=(15,17)) taxes = [0]*4 for i, ax in enumerate(axes): taxes[i] = ax.twinx() ax.set_ylim([-1.2, 1.2]) taxes[i].set_ylim([-60, 280]) axes[0].plot(x, y, c='k') taxes[0].bar(x, yf, width=0.03) axes[1].plot(x, y1, c='k') taxes[1].bar(x, y1f, width=0.03) axes[2].plot(x, y2, c='k') taxes[2].bar(x, y2f, width=0.03) axes[3].plot(x, y3, c='k') taxes[3].bar(x, y3f, width=0.03) #ax.bar(x, yf, width=0.05) # - # The 2D version of the discrete Fourier transform has a similar expression: # # $$F(u,v) = \frac{1}{MN}\sum_{x=0}^{M-1}{\sum_{y=0}^{N-1}{f(x,y) e^{-j2 \pi (ux/M + vy/N)}}} $$ # How about an actual picture now? Let's try our trusty camera-man. # Note that it is standard to center the spectrum. We will also perform a log adjustment to enhance the contrast. # With the exception of trivial cases, it is usually impossible to make direct associations between specific components of an image and its transform. There are some general statements that we can make though: # # Since frequency is directly related to the rate of change, we can intuit that frequencies in the Fourier space correspond to patterns in the spatial domain. As we move away from the center, the low frequencies correspond to the slowly varying components of an image. Higher frequencies farther out from the center correspond to faster and faster gray level changes in the image. # # Filtering in the frequency domain # # Filtering in the frequency domain consists of the following steps: # # 1. Compute F(u,v) the DFT of the image. # 2. Center transform # 3. Multiply F(u,v) by a filter function H(u,v) # 4. Un-center the transform # 5. Computer the inverse # 6. Obtain the real part # It just so happens that F(0,0) = the average intensity of the original image. So, if we set this single value to zero, the average intensity of the image will be 0. This is known as a **notch filter**. That doesn't matter if we're scaling everything between 0 and 1, so we won't bother. # # Low frequencies are responsible for the general gray-level appearance of an image over smooth areas, while high frequencies are responsible for detail, such as edges and noise. A filter that attenuates high frequencies while "passing" low frequencies is called a **lowpass filter**. A filter that has the opposite effect is called a **highpass filter**. A low-pass filtered image would have less sharp detail, because high frequencies have been attenuated. A high-pass filtered image would have less gray-level variations in smooth areas and emphasized transitional (edge) gray-level details (e.g. a sharper image). # This should look a little familiar: didn't we get similar results from using convolutions in the spatial domain? Actually yes. We are just doing the same thing two different ways. A convolution in the spatial domain is the same as multiplication in the Fourier domain: # # $$f(x,y) * h(x,y) \leftrightarrow F(u,v)H(u,v) $$ # # Analogously: # # $$f(x,y)h(x,y) \leftrightarrow F(u,v) * H(u,v) $$ # # Why is this important? In general, if the filters are the same size, it is computationally more efficient to work in the frequency domain. But we often use smaller filters in the spatial domain-- in that case, it is more efficient to work in the spatial domain. You can also derive smaller spatial domain filters if you know the filter in the frequency domain. What is typically done in practice is to experiment in the frequency domain, obtain an H(u,v), then develop a smaller filter in the spatial domain based on H(u,v) that is used in practice. # You can also try **ideal** filters, that are just binary filters, as opposed to gradually changing filters like the Gaussian # Note the odd ringing that occurs when using ideal filters. This is a characteristic of ideal filters. This can be explained if you look at the corresponding filter in the spatial domain. # Or, going to the center to construct a smaller filter # The centeral circle is responsible for the blurring in the processing image, while the concentric rings are responsible for the ringing behavior. Gaussian filters achieve blurring without ringing, because they have the central circle without the concentric rings. # # An intermediate filter is the **Butterworth filter**. It has the form: # # $$H(u,v) = \frac{1}{1+[D(u,v)/D_0]^{2n}}$$ # # where D is the distance from any point to the center: # # $$D(u,v) = [(u-M/2)^2+(v-N/2)^2]^{1/2} $$ # # Where n is the order of the filter and $D_0$ is the cutoff frequency. # A Butterworth filter of order 1 has no ringing. Ringing is imperceptible with filters of order 2, but becomes more significant with higher order filters. It's an intermediate between ideal filters and Gaussian filters. # # LaPlacian in the filter domain # # # Taking advantage of some properties of the Fourier transform: # # $$\mathscr{F} \bigg[ \frac{\partial^2 f(x,y)}{\partial x^2} + \frac{\partial^2 f(x,y)}{\partial y^2} \bigg] = (ju)^2F(u,v) + (jv)^2F(u,v) = -(u^2 + v^2)F(u,v)$$ # # Thus, the LaPlacian filter: # # $$H(u,v) = -(u^2 + v^2) $$ # # Correcting for centering: # # $$H(u,v) = -[(u-M/2)^2 + (v-N/2)^2] $$ # #
Wi20_content/L5_Frequency_Domain_empty.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- ## --skrypt realizujacy symulacje dla przykladuz wahadlem matematycznym bez tlumienia (zajecia 1 ) import numpy as np from matplotlib import pyplot as plt plt.rcParams['figure.figsize'] = [12, 7] plt.rcParams['figure.dpi'] = 150 # + # -- wyprowadzenie modelu matematycznego przedstawiam krok po kroku w filmie, dostępnym na YT # https://youtu.be/BP613iafk5o # - # -dane wejsciowe g = 9.81 l = 1.2 # + # -model matematyczny A = np.identity(2) B = np.zeros((2, 2)) # zapewnienie spojnosci modelu B[1, 0] = -1 #-wprowadzanie wspolczynnika tlumienia do ukladu rownan B[0, 0] = 0.5 #-tworze pusty wektor prawej strony ukladu rownan F = np.zeros((2, 1)) # - #-definicja dziedziny czasu t0 = 0 tf = 20 dt = 1E-4 t = np.arange(t0, tf, dt) #-definicja warunkow poczatkowych y0 = np.array([[9], [0*np.pi]]) #-alokacja pamieci na wyniki wyn = np.zeros((2, t.size)) #-tworze zmienna pomocnicza na wyniki poszczegolnych iteracji Result = y0.copy() # + #----procedura obliczeniowa------- #-wprowadzanie warunku poczatkowego do tablicy na wyniki for i in range(0, max(y0.shape)): wyn[i, 0] = y0[i] #-definiuje funkcje typu in-line do wyznaczania wartosci chwilowych dla prawej strony ukladu rownan f = lambda x: (-g/l)*np.sin(x) #-implementuje rozwiazanie numeryczne metoda Euler'a for k in range(0, max(t.shape)-1): F[0] = f(Result[1]) Result += np.linalg.inv(A) @ (F - B @ Result) * dt wyn[0, k+1] = Result[0] wyn[1, k+1] = Result[1] # + # -rysowanie wykresu fig, axs = plt.subplots(2) axs[0].plot(t, wyn[0, :]) axs[0].grid() # axs[0].set_xlabel('time') axs[0].set_ylabel('theta') axs[0].set_title('predkosc') axs[1].plot(t, wyn[1,:]) axs[1].grid() axs[1].set_xlabel('time') axs[1].set_ylabel('theta') axs[1].set_title('wychylenie') # - # -- definicja plaszczyzny theta = np.linspace(-3*np.pi, 3*np.pi, 50) omega = np.linspace(-10, 10, 50) # print(theta.shape) #alokacja pamieci phase = np.zeros((max(theta.shape), max(omega.shape), 2)) #definicja pomocniczej funkcji lambda FF = lambda x: np.array([[(-g/l*np.sin(x))], [0]], dtype=object) # obliczenia skladowych pola wektorowego na podstawie zaleznosci opisującej dynamike ukladu czyli: f'(x) x = np.zeros((2,1)) ph = np.zeros((max(theta.shape), max(omega.shape), 2)) fprime = lambda x: np.linalg.inv(A) @ (FF(x[1]) - B @ x) for i in range(max(theta.shape)): for j in range(max(omega.shape)): dif = fprime(np.array([[theta[i]], [omega[j]]])) ph[i, j, 0] = dif[0] ph[i, j, 1] = dif[1] plt.figure(2) plt.quiver(theta, omega, ph[:,:,1], ph[:,:,0], minshaft=0.2, color='blue', headwidth=2, headlength=5, headaxislength=3) plt.xlabel('theta') plt.ylabel('omega') plt.title('plaszczyzna fazowa') plt.grid() # -- dodaje przebieg rozwiazania plt.plot(wyn[1,:], wyn[0,:], color='red')
pendulum_eg.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + editable=true # %load_ext sql # + editable=true # %sql postgresql://student:student@127.0.0.1/sparkifydb # + editable=true # %sql SELECT * FROM songplays LIMIT 5; # + editable=true # %sql SELECT * FROM users LIMIT 5; # + editable=true # %sql SELECT * FROM songs LIMIT 5; # + editable=true # %sql SELECT * FROM artists LIMIT 5; # + editable=true # %sql SELECT * FROM time LIMIT 5; # + editable=true # %sql SELECT COUNT(*) AS total_number_users FROM users; # + editable=true # %sql SELECT gender, level, COUNT(*) AS total_number FROM users GROUP BY gender, level; # + editable=true # %sql SELECT COUNT(*) AS total_number_artists FROM artists; # + editable=true # %sql SELECT COUNT(*) AS total_number_songs FROM songs; # + [markdown] editable=true # ## REMEMBER: Restart this notebook to close connection to `sparkifydb` # + editable=true
Project 1 - Data Modeling with Postgres/test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Create Artifact PROJECT_NAME = 'proj_sample' # !ls -lh data # + import wandb run = wandb.init(project=PROJECT_NAME, job_type='preprocessing', entity='drozzy') artifact = wandb.Artifact(name='titanic', type='dataset') artifact.add_dir('data') run.log_artifact(artifact) # - # ## Use Artifact (reload jupyter) # + import wandb run = wandb.init(project=PROJECT_NAME, job_type='training', entity='drozzy') artifact = run.use_artifact(artifact='titanic:latest', type='dataset') artifact_dir = artifact.download() print(f'Downloaded to: {artifact_dir}') # - # !ls {artifact_dir} # + for line in open(f'{artifact_dir}/train.csv', 'r'): print(line) break for line in open(f'{artifact_dir}/test.csv', 'r'): print(line) break # - # ### Sample logging using the artifact # + wandb.config.dropout = 0.2 wandb.config.hidden_layer_size = 128 for epoch in range(10): loss = 0 # change as appropriate :) wandb.log({'epoch': epoch, 'loss': loss}) # -
create_artifact_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Engine # %load_ext autoreload # %autoreload 2 import os,sys sys.path.insert(1, os.path.join(sys.path[0], '..', 'module')) import wiki # ### Test path_base = '/Users/harangju/Developer/data/wiki/partition/' name_xml = 'enwiki-20190720-pages-articles-multistream1.xml-p10p30302.bz2' name_index = 'enwiki-20190720-pages-articles-multistream-index1.txt-p10p30302.bz2' path_xml = path_base + name_xml path_index = path_base + name_index dump = wiki.Dump(path_xml, path_index) # test initial idx # should load indices & calculate block sizes l = list(dump.idx) l[:3] # test subsequent idx # this time, should just return the indices l = list(dump.idx) l[:3] # test fetch_block() # should return the XML block starting with alchemy offset, i, block_size = dump.idx['Alchemy'] xml = wiki.Dump.fetch_block(path_xml, offset, block_size) xml[:100] # + # test search_id() import xml.etree.ElementTree as ET root = ET.fromstring(b'<root>' + xml + b'</root>') page_text = wiki.Dump.search_id(root, 12) page_text[:200] # - # test filter_top_section wiki.Dump.filter_top_section(page_text)[:400] # test load_page() # page_name = 'AccessibleComputing' page_name = 'Anarchism' # page_name = 'Artificial languages' # page_name = 'Abstract (law)' # page_name = 'Anxiety' # page_name = 'Foreign relations of Azerbaijan' # page_name = '<NAME>' # page_name = 'ADHD' page = dump.load_page(page_name, filter_top=True) page[:400] # testing XML cache # %time page = dump.load_page('AccessibleComputing') # %time page = dump.load_page('Anarchism') # %time page = dump.load_page('Angola') print('Number of links: ' + str(len(dump.links))) dump.links[:5] # %time dump.article_links[:5] # %time dump.article_links[:5] dump.load_page('No page') print(dump.links) print(dump.article_links) page = dump.load_page('Angola') history = wiki.Dump.get_history(dump.page) history[:500] years = wiki.Dump.filter_years(history) years[:10] dump.years[:10] dump.load_page('AccessibleComputing') dump.years
tests/test-wiki-dump.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #Date Updated:12/2/2020 #Topic: US COVID Confirmed Cases from January 22 - Nov 18 import pandas as pd import numpy as np # %matplotlib inline pd.options.display.float_format = '{:.2f}'.format #Read excel dataset df = pd.read_excel('timeseries_covid19_us_confirmed.xlsx',index_col='date',parse_dates=True) #since everything is US, it is now irrelevant in this dataframe df = df.drop(columns=['country']) df.head() # - df.tail() df.info() df.index df.total.describe() # ## ETS Decomposition from statsmodels.tsa.seasonal import seasonal_decompose result = seasonal_decompose(df['total'], model = 'multiplicative') result from pylab import rcParams rcParams['figure.figsize']=12,5 result.plot(); # Analysis: There is a trend and seasonality in this dataset. # # Forecasting with Holt-Winters Method # ### Train & Test Split #80/20 split 80% of 302 is 242 and 20% of 302 is 60 train_data = df.iloc[:242] # Goes up to but not including 242 test_data = df.iloc[242:] test_data.index # ### Fitting the Model # + from statsmodels.tsa.holtwinters import ExponentialSmoothing import warnings warnings.filterwarnings("ignore") fitted_model = ExponentialSmoothing(train_data['total'],trend='mul',seasonal='mul',seasonal_periods=7).fit() # - # ### Evaluating Model against Test Set test_predictions = fitted_model.forecast(60).rename('Case Forecast') #from 9/20/20 to 11/18/20 test_predictions train_data['total'].plot(legend=True,label='TRAIN') test_data['total'].plot(legend=True,label='TEST',figsize=(15,10)) test_predictions.plot(legend=True,label='PREDICTION'); train_data['total'].plot(legend=True,label='TRAIN') #original test values test_data['total'].plot(legend=True,label='TEST',figsize=(15,10)) #Zoom to focus on the comparison of testing and prediction test_predictions.plot(legend=True,label='PREDICTION',xlim=['9/17/2020','11/20/2020']); # ### Evaluation Metrics from sklearn.metrics import mean_squared_error,mean_absolute_error mean_absolute_error(test_data,test_predictions) mean_squared_error(test_data,test_predictions) np.sqrt(mean_squared_error(test_data,test_predictions)) test_data.describe() #we want to compare MSE with Avergae of Test Data, or RMSE with STD of overall True Data # # Forecasting into the Future final_model = ExponentialSmoothing(df['total'],trend='mul',seasonal='mul',seasonal_periods=7).fit() forecast_predictions = final_model.forecast(43)#forecast to December 31,2020 forecast_predictions df['total'].plot(figsize=(12,8)) forecast_predictions.plot(legend=True,label='PREDICTION'); #Winter season 2020 (Predicted from 11/19 to 12/31) percentage increase. 100 * (forecast_predictions.loc['2020-12-31'] - forecast_predictions.loc['2020-11-19']) / forecast_predictions.loc['2020-11-19'] # Analysis: By the end of the year, there will be an increase of about 21,673,065.50 total confirmed cases in the United States. This is an 85% increase from November 19 to December 31. # # EWMA #Create a simple moving average (SMA) - 1 week df['7-day-SMA'] = df['total'].rolling (window = 7).mean() #Window is in relation days df.head(10) #Create a simple moving average (SMA) - 2 weeks df['14-day-SMA'] = df['total'].rolling (window = 14).mean() df.plot(figsize=(12,5)); df['EWMA-7'] = df['total'].ewm (span = 7).mean() df[['total', 'EWMA-7']].plot(figsize=(12,5)); #Exponential Weighted Moving Average - EWMA 2 weeks df['EWMA-14'] = df['total'].ewm (span = 14).mean() df[['total', 'EWMA-14']].plot(figsize=(12,5)); # Analysis: We can conclude that using a weekly (7 days) period and a two-week (14 days) period show very similar positive trend as the original total number of confirmed cases. # # ACF & PACF # Import the statsmodel from statsmodels.tsa.stattools import acovf,acf,pacf,pacf_yw,pacf_ols df = pd.DataFrame({'total':[1,1,2,2,5,5,5,6,6,8,8,8,11,11,11,12,12,12,12,12,13,13,14,14,14,14,14,14,14,14,16,16,16,16,16,16, 17,17,25,32,55,74,107,184,237,403,519,594,782,1147,1586,2219,2978,3212,4679,6512,9169,13663,20030, 26025,34855,46080,56685,68754,86613,105293,124900,143779,165835,192177,218035,248447,280417,313417, 341629,371802,403212,435375,469989,503439,532761,559695,585518,614061,644213,675624,708296,736237, 761932,790348,816389,845698,878911,912662,944211,971066,994253,1018907,1046722,1076203,1110443, 1138215,1162681,1186063,1210541,1235639,1263365,1290121,1315078,1333956,1353366,1376091,1397050, 1424212,1449467,1473497,1491804,1513789,1534835,1557967,1583760,1607071,1628134,1648146,1666542, 1685931,1704448,1727316,1751532,1775411,1794444,1811364,1832750,1852783,1874133,1899531,1920893, 1938579,1956121,1974436,1995392,2018432,2043341,2068537,2087608,2107106,2130753,2157124,2185059, 2216056,2247987,2274003,2304675,2340836,2375209,2415570,2460898,2502411,2542727,2582721,2628753, 2680294,2735926,2787598,2833232,2882830,2927021,2987639,3047438,3109997,3177856,3237911,3297011, 3355793,3423156,3490765,3567937,3639586,3702091,3762878,3824330,3888437,3960276,4028867,4102027, 4167452,4222488,4278460,4344232,4415991,4483552,4550214,4607983,4654133,4698705,4755862,4810377, 4869668,4927811,4982203,5028443,5077133,5124014,5180717,5232435,5297168,5343926,5385099,5421610, 5466634,5513998,5558067,5606317,5649966,5684419,5720909,5760722,5806212,5851281,5897820,5943118, 5977868,6013011,6054607,6095534,6139311,6189453,6232569,6263967,6287484,6314339,6348188,6384163, 6431466,6472367,6506533,6540379,6579728,6618161,6663061,6711680,6754647,6792454,6843972,6883437, 6921997,6967687,7015737,7059974,7097217,7129408,7172182,7213326,7258592,7312775,7361385,7397455, 7436312,7479029,7529600,7587826,7643776,7697676,7743468,7784396,7837182,7896552,7960813,8029175, 8085940,8135180,8201980,8263008,8325891,8400991,8481812,8564592,8626352,8692595,8768402,8846899, 8936997,9035265,9123994,9199595,9281802,9405633,9508154,9634074,9759029,9887116,9996838,10117470, 10256089,10399325,10560111,10737335,10903890,11036935,11195388,11357322,11527483]}) #biased array arr = acovf(df['total']) arr #Unbiased array ubarr = acovf(df['total'],unbiased=True) ubarr #Autocorrelation for 1D acarr = acf(df['total']) acarr #Partial Autocorrelation pacarr = pacf_yw(df['total'],nlags=100,method='mle') #Maximum likelihood estimation pacarr #Partial Unbiased ubarr2= pacf_yw(df['total'],nlags=100,method='unbiased') ubarr2 # + #Plotpoint to show correlation from pandas.plotting import lag_plot lag_plot(df['total']); # - #ACF Plots from statsmodels.graphics.tsaplots import plot_acf,plot_pacf #ACF array acf(df['total']) #41 lags was returned #Plot the autocorrelation at 41 lags title = 'Autocorrelation: Daily US Confirmed Cases' lags = 41 plot_acf(df,title=title,lags=lags); #How far does it go to reach negative? title = 'Autocorrelation: Daily US Confirmed Cases' lags = 120 plot_acf(df,title=title,lags=lags); # Analysis: The previous data of COVID cases does affect the number of future cases. According to the ACF plots. The autocorrelation of daily US Confirmed Cases is gradually decreasing. By 110 lags, the autocorrelation reaches 0 and lags after 110 becomes negative. # ## Differencing # + #apply differencing from statsmodels.tsa.statespace.tools import diff df['d1'] = diff(df['total'],k_diff=1) df['d1'].plot(figsize=(12,5)); # - title='PACF: Daily US Confirmed Cases' lags=40 plot_pacf(df['d1'].dropna(),title=title,lags=np.arange(lags)); # be sure to add .dropna() here! # ## Plot Resizing # + import matplotlib.pyplot as plt fig, ax = plt.subplots(figsize=(12,5)) plot_acf(df['total'],ax=ax);
TimeSeries_CalvinTruong.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Exercise 37: Logistic Regression as a Classifier - 2 Class Classifier import struct import numpy as np import gzip import urllib.request import matplotlib.pyplot as plt from array import array from sklearn.linear_model import LogisticRegression # ## Prepare the MNIST data for training # The following 4 cells will download the save the MNIST training data to file. Once you have run these cells once successfully you can delete or comment them out. # # Download the image files # + active="" # request = urllib.request.urlopen('http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz') # # with open('train-images-idx3-ubyte.gz', 'wb') as f: # f.write(request.read()) # # request = urllib.request.urlopen('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz') # # with open('t10k-images-idx3-ubyte.gz', 'wb') as f: # f.write(request.read()) # - # Download the label files # + active="" # request = urllib.request.urlopen('http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz') # # with open('train-labels-idx1-ubyte.gz', 'wb') as f: # f.write(request.read()) # # request = urllib.request.urlopen('http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz') # # with open('t10k-labels-idx1-ubyte.gz', 'wb') as f: # f.write(request.read()) # - # !ls *.gz # or !dir *.gz for windows # Load the downloaded data # + with gzip.open('train-images-idx3-ubyte.gz', 'rb') as f: magic, size, rows, cols = struct.unpack(">IIII", f.read(16)) img = np.array(array("B", f.read())).reshape((size, rows, cols)) with gzip.open('train-labels-idx1-ubyte.gz', 'rb') as f: magic, size = struct.unpack(">II", f.read(8)) labels = np.array(array("B", f.read())) with gzip.open('t10k-images-idx3-ubyte.gz', 'rb') as f: magic, size, rows, cols = struct.unpack(">IIII", f.read(16)) img_test = np.array(array("B", f.read())).reshape((size, rows, cols)) with gzip.open('t10k-labels-idx1-ubyte.gz', 'rb') as f: magic, size = struct.unpack(">II", f.read(8)) labels_test = np.array(array("B", f.read())) # - # Visualise a sample of the data for i in range(10): plt.subplot(2, 5, i + 1) plt.imshow(img[i], cmap='gray'); plt.title(f'{labels[i]}'); plt.axis('off') # ## Construct a Logistic Model to Classify Digits 0 - 9 # # In this model as we are predicting classes 0 - 9 we will require images from all available data. However given the extremely large dataset we will need to sample only a small amount of the original MNIST set due to limited system requirements and anticipated training time. We will select 2000 samples at random: # + samples_0_1 = np.where((labels == 0) | (labels == 1))[0] images_0_1 = img[samples_0_1] labels_0_1 = labels[samples_0_1] samples_0_1_test = np.where((labels_test == 0) | (labels_test == 1)) images_0_1_test = img_test[samples_0_1_test].reshape((-1, rows * cols)) labels_0_1_test = labels_test[samples_0_1_test] # - # Visualising the selected information: sample_0 = np.where((labels == 0))[0][0] plt.imshow(img[sample_0], cmap='gray'); sample_1 = np.where((labels == 1))[0][0] plt.imshow(img[sample_1], cmap='gray'); # In order to provide the image information to the Logistic model we must first flatten the data out so that each image is 1 x 784 pixels in shape. images_0_1 = images_0_1.reshape((-1, rows * cols)) images_0_1.shape # Let's construct the model, use the sklearn LogisticRegression API and call the fit function. model = LogisticRegression(solver='liblinear') model.fit(X=images_0_1, y=labels_0_1) # Determine the score against the training set model.score(X=images_0_1, y=labels_0_1) # Display the first two predictions for the Logistic model against the training data model.predict(images_0_1) [:2] # Examine the corresponding predicted probabilities for the first two training samples model.predict_proba(images_0_1)[:2] # Compare the performance against the test set model.score(X=images_0_1_test, y=labels_0_1_test)
Chapter 4 - Classification/Exercise 37 - Logistic Regression - 2 Class Classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy from matplotlib import pyplot # %matplotlib inline from matplotlib import rcParams rcParams['font.family'] = 'serif' rcParams['font.size'] = 16 # + L = 1 nt = 100 nx = 51 alpha = 1.22e-3 dx = L/(nx - 1) Ti = numpy.zeros(nx) Ti[0] = 100 # - def ftcs(T, nt, dt, dx, alpha): """Solves the diffusion equation with FTCS scheme Parameters: T - initial temp. profile nt - number of time steps dx - mesh spacing alpha - diffusion coefficient (thermal diffusivity) Returns: T - temp profile after nt time steps """ for n in range (nt): Tn = T.copy() T[1:-1] = Tn[1:-1] + alpha*dt/dx**2*(Tn[2:] - 2*Tn[1:-1] + Tn[0:-2]) return T sigma = 1/2.0 dt = sigma * dx**2/alpha T = ftcs(Ti.copy(), nt, dt, dx, alpha) pyplot.plot(numpy.linspace(0,1,nx), T, ls = '-', lw = 3) pyplot.ylim(0, 100) pyplot.xlabel('Length of Rod') pyplot.ylabel('Temperature'); nt = 1000 T = ftcs(Ti.copy(), nt, dt, dx, alpha ) pyplot.plot(numpy.linspace(0,1,nx), T, ls = '-', lw = 3) pyplot.ylim(0, 100) pyplot.xlabel('Length of Rod') pyplot.ylabel('Temperature'); def ftcs_mixed(T, nt, dt, dx, alpha): """Solves the diffusion equation with FTCS using Dirichlet BC at left boundary and Neumann BC at right boundary Parameters: u - initial temperature profile nt - number of time steps dt - step size dx - spatial discretization alpha - diffusion coefficient Returns: u - Temperature profile after nt time steps with FT scheme """ for n in range(nt): Tn = T.copy() T[1:-1] = Tn[1:-1] + alpha*dt/dx**2*(Tn[2:] - 2*Tn[1:-1] + Tn[0:-2]) T[-1] = T[-2] return T nt = 1000 T = ftcs_mixed(Ti.copy(), nt, dt, dx, alpha) pyplot.plot(numpy.linspace(0,1,nx), T, lw = 3) pyplot.ylim(0, 100) pyplot.title('Rod with LHS Dirichlet BC and RHS Neumann BC\n') pyplot.ylabel('Temperature') pyplot.xlabel('Length');
lessons/04_spreadout/04_01_1D_Heat_Eqn_Explicit_solutions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Test Hypothesis by Simulating Statistics # ## Mini-Lab: Hypothesis Testing # Welcome to your next mini-lab! Go ahead an run the following cell to get started. You can do that by clicking on the cell and then clickcing `Run` on the top bar. You can also just press `Shift` + `Enter` to run the cell. # + from datascience import * import numpy as np import otter import matplotlib # %matplotlib inline import matplotlib.pyplot as plots plots.style.use('fivethirtyeight') grader = otter.Notebook("m7_l1_tests") # - # In the previous two labs we've analyzed some data regarding COVID-19 test cases. Let's continue to analyze this data, specifically _claims_ about this data. Once again, we'll be be using ficitious statistics from Blockeley University. # # Let's say that Blockeley data science faculty are looking at the spread of COVID-19 across the realm of Minecraft. We have very specific data about Blockeley and the rest of Cubefornia but other realms' data isn't as clear cut or detailed. Let's say that a neighboring village has been reporting a COVID-19 infection rate of 26%. Should we trust these numbers? # # Regardless of whether or not you believe these claims, the job of a data scientist is to definitively substantiate or disprove such claims using data. You have access to the test results of similar sized villages nearby and come up with the brilliant idea of running a hypothesis test. Let's go ahead and load it! Run te cell below to import this data. If you want to explore this data further, go ahead and group by both columns! An empty cell is provided for you to do this. test_results = Table.read_table("../datasets/covid19_village_tests.csv") test_results.show(5) ... # From here we can formulate our **Null Hypothesis** and **Alternate Hypothesis** Our *null hypothesis* is that this village truly has a 26% infection rate amongst the populations. Our *alternate hypothesis* is that this village does not in actuality have a 26% infection rate - it's way too low. Now we need our test statistic. Since we're looking at the infection rate in the population, our test statistic should be given by the following formula: # # $$\text{Test Statistic} = \frac{\text{Number of Positive Cases}}{\text{Total Number of Cases}}$$ # # We've started the function declaration for you. Go ahead and complete `proportion_positive` to calculate this test statistic. # # *Note*: Check out `np.count_nonzero` and built-in `len` function! These should be helpful for you. def proportion_positive(test_results): numerator = ... denominator = ... return numerator / denominator grader.check("q1") # If you grouped by `Village Number` before, you would realize that there are roughly 3000 tests per village. Let's now create functions that will randomly take 3000 tests from the `test_results` table and to apply our test statistic. Complete the `sample_population` and `apply_statistic` functions below! # # The `sample_population` function will take a `population_table`. This will be a table with all the data we want and will return a new table that has been sampled from this `population_table`. Please note that `with_replacement` should be `False`. # # The `apply_statistic` function will take in a `sample_table`, `column_name`, and `statistic_function`. The `sample_table` will be a table full of samples taken from a population table, the `column_name` will be the name of the column containing the data of interest, and the `statistic_function` which will be the test statistic that we will use. This function will return the result of using the `statistic_function` on the `sample_table`. # + def sample_population(population_table): sampled_population = ... return sampled_population def apply_statistic(sample_table, column_name, statistic_function): return statistic_function(...) # - grader.check("q2") # Now for the simulation portion! Complete the for loop below and fill in a reasonable number for the `iterations` variable. The `iterations` variable will determine just how many random samples that we will take in order to test our hypotheses. There is also code that will visualize your simulation and give you data regarding your simulation vs. the null hypothesis. # + # Simulation code below. Fill out this portion! iterations = ... simulations = make_array() for iteration in np.arange(iterations): sample_table = ... test_statistic = ... simulations = np.append(simulations, test_statistic) # This code is to tell you what percentage of our simulations are at or below the null hypothesis # There's no need to fill anything out but it is good to understand what's going on! null_hypothesis = 0.26 num_below = np.count_nonzero(simulations <= null_hypothesis) / iterations print(f"Out of the {iterations} simulations, roughly {round(num_below * 100, 2)}% of test statistics " + f"are less than our null hypothesis of a {null_hypothesis * 100}% infection rate.") # This code is to graph your simulation data and where our null hypothesis lies # There's no need to fill anything out but it is good to understand what's going on! simulation_table = Table().with_column("Simulated Test Statistics", simulations) simulation_table.hist(bins=20) plots.scatter(null_hypothesis, 0, color='red', s=30); # - grader.check("q3") # Given our hypothesis test, what can you conclude about the village that reports having a 26% COVID-19 infection rate? Has your hypothesis been proven or disproven? Do you now trust or distrust these numbers? And if you do distrust these numbers, what do you think went wrong in the reporting? # Congratulations on finishing! Run the next cell to make sure that you passed all of the test cases. grader.check_all()
minilabs/test-hypothesis-by-simulating-statistics/test_hypothesis_minilab.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Ships in Satellite Imagery # ### Context: # Detecting the location of large ships in satellite images. # ### Content: # This dataset is from Planet satellite imagery which collected over the San Francisco Bay and San Pedro Bay areas of California. It includes 4000 80x80 RGB images labeled with either a "ship" or "no-ship" classification. # # label: Valued 1 representing the "ship" class, valued 0 representing the "no-ship" class. # # scene id: The unique identifier of the PlanetScope visual scene the image chip was extracted from. The scene id can be used with the Planet API to discover and download the entire scene. # # longitude_latitude: The longitude and latitude coordinates of the image center point, with values separated by a single underscore. # # 80x80 RGB image is stored as a list of 19200 integers. The first 6400 entries includes the red channel values, the next 6400 is green, third 6400 is blue. # # Data Wrangling and EDA # + #Import the necessary modules import json import matplotlib.pyplot as plt from skimage import color import numpy as np from keras.utils import np_utils # - #Download the dataset f = open('C://Users//kurt_//Downloads//shipsnet.json//shipsnet.json') dataset = json.load(f) f.close() dataset.keys() # + #Convert datas from list to array data_ = np.array(dataset['data']).astype('uint8') labels_ = np.array(dataset['labels']).astype('uint8') #It will be seen from the shape that data includes 4000 images and every image represented as a vector of length 19200 elements. print(data_.shape) print(labels_.shape) # - #Check for NULL values in data and labels print(np.isnan(data_).sum()) print(np.isnan(labels_).sum()) # + #Check for ship/no-ship numbers number_ship = np.sum(labels_==1) number_noship = np.sum(labels_==0) sizes = [number_ship, number_noship] titles = "Ship", "NoShip" fig = plt.figure(figsize=(5, 7)) plt.pie(sizes, labels=titles, autopct='%1.1f%%') plt.show() print('Number of ship:', number_ship) print('Number of no-ship:', number_noship) # - #Color channel 3:RGB n_spct = 3 w = 80 h = 80 data_ = data_.reshape([-1, n_spct, w, h]) # + #Get one channel pic = data_[0] red_spct = pic[0] green_spct = pic[1] blue_spct = pic[2] #Plot each channel plt.figure(figsize=(15, 5)) plt.set_cmap('hsv') plt.subplot(1, 3, 1) plt.imshow(red_spct) plt.subplot(1, 3, 2) plt.imshow(green_spct) plt.subplot(1, 3, 3) plt.imshow(blue_spct) plt.show() # - #Reshape the data data = data_.transpose([0, 2, 3, 1]) #Check for shapes print(data.shape) print(labels_.shape) # + #Check for images with ship and no_ship noship = data[labels_==0] ship = data[labels_==1] def plot_(x, y): plt.figure(figsize=(6, 8)) plt.subplot(1, 2, 1) plt.title('ship') plt.imshow(ship[1]) plt.subplot(1, 2, 2) plt.title('noship') plt.imshow(noship[1]) plt.show() plot_(ship, noship) # - # Grayscaling: It is a process which convert an image from other color spaces e.g RGB, CMYK, HSV, etc. to grayscale. It varies between complete black and complete white. #Convert the RGB images to grayscale, so that have one number for each pixel. data_gray = [color.rgb2gray(x) for x in data] data_gray = np.array(data_gray) #Check for one image plt.imshow(data[8]) print(labels_[8]) #Check for shape print(data_gray.shape) #Convert labels to categorical labels = np_utils.to_categorical(labels_, 2) print(labels.shape)
Capstone3-EDA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # 经验证: # * 1、test的时间顺序并不是全部都是时序的,排行榜是按时序排列后取前0.78为public,后0.22为private,所以以后的工作重心将落到train的后44% # * 2、测试集中私有部分从2018-07-24 09:00:00开始,有9173472条数据,包含全部的building_id # * 3、测试集中公共部分与私有部分对应的数据从2017-07-24 09:00:00开始,有9174900条数据,包含全部的building_id # * 4、重点为训练集中2017-07-24 09:00:00后的数据建模,有9003109条数据,占训练集0.445 # * 5、为防止测试集中不评分的那部分不知道算不算在0.22里面,也为防止一定的过拟合,可以考虑重点关注2017-07月后的训练数据,占0.51 # * 6、验证EDA1后,发现0-104号建筑5月20日前的大量为0的数据是有误的,训练集不应该对这部分数据进行过拟合,删除这部分数据训练对提高public评分很有帮助,遗憾的是,private并不包含这类数据 # * 7、猜想某些大范围为0的数据都同0-104号一样是有误的,重点验证7月后的该种数据,测试是否能提高public # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Suppress warnings import warnings warnings.filterwarnings('ignore') import gc # matplotlib and seaborn for plotting import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns import matplotlib.patches as patches from plotly import tools, subplots import plotly.offline as py py.init_notebook_mode(connected=True) import plotly.graph_objs as go import plotly.express as px pd.set_option('max_columns', 100) py.init_notebook_mode(connected=True) from plotly.offline import init_notebook_mode, iplot init_notebook_mode(connected=True) import plotly.graph_objs as go # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk('../input'): for filename in filenames: print(os.path.join(dirname, filename)) # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" # 导入数据, 只有train 和 test train = pd.read_csv('../input/ashrae-energy-prediction/train.csv') test = pd.read_csv('../input/ashrae-energy-prediction/test.csv') # - # 时间戳转换日期 train["timestamp"] = pd.to_datetime(train["timestamp"]) test["timestamp"] = pd.to_datetime(test["timestamp"]) train_exception = pd.read_pickle('../output/ashrae-eda-exception-label1/train_exception.pkl') train['exception'] = train_exception.exception.values del train_exception gc.collect() test_exception = pd.read_pickle('../output/ashrae-eda-exception-label1/test_exception.pkl') test['exception'] = test_exception.exception.values del test_exception gc.collect() gc.collect() # + # 1413个建筑有0,498个建筑有1,324个建筑有2,145个建筑有3 # [0,1]有132个,[0,2]64,[0,3]21, [1,2]13 # [0,1,2]220, [0,1,3]111 # [0,1,2,3]13 # - gc.collect() # + # (0,0)到(104,0)从5月中旬才开始有读数, 其中有些在开始有读数之前有一些读数,不知什么情况 # 猜测该地区出现了大面积的电表故障或者大面积的电力故障,其中有极少建筑幸存或用于检修测试,所以有少量读数,5月中旬后修好才恢复该地区正常用电或读表 # - train.exception.value_counts(dropna=False) test.exception.value_counts(dropna=False) def exception_label(bid, meter, start, end): print(f'标记bid={bid}, meter={meter}异常') train.loc[(train.building_id == bid) & (train.meter == meter) & (train.timestamp >= start) & (train.timestamp <= end), 'exception'] = 1 test.loc[(test.building_id == bid) & (test.meter == meter) & (test.timestamp >= start.replace('2016', '2017')) & (test.timestamp <= end.replace('2016', '2017')), 'exception'] = 1 test.loc[(test.building_id == bid) & (test.meter == meter) & (test.timestamp >= start.replace('2016', '2018')) & (test.timestamp <= end.replace('2016', '2018')), 'exception'] = 1 gc.collect() # + exception_label(745, 2, '2016-10-04 08:00:00', '2016-10-11 18:00:00') # exception_label(747, 2, '2016-05-23 08:00:00', '2016-06-08 12:00:00') # exception_label(747, 2, '2016-06-11 06:00:00', '2016-06-18 03:00:00') # exception_label(747, 2, '2016-06-18 07:00:00', '2016-07-02 03:00:00') # exception_label(747, 2, '2016-07-02 05:00:00', '2016-09-16 01:00:00') # exception_label(747, 2, '2016-09-17 05:00:00', '2016-09-25 01:00:00') # exception_label(747, 2, '2016-10-18 16:00:00', '2016-10-22 01:00:00') exception_label(750, 2, '2016-10-04 08:00:00', '2016-10-11 08:00:00') exception_label(753, 2, '2016-10-04 08:00:00', '2016-10-11 09:00:00') # exception_label(754, 2, '2016-06-02 19:00:00', '2016-06-08 03:00:00') # exception_label(754, 2, '2016-06-15 10:00:00', '2016-07-02 03:00:00') # exception_label(754, 2, '2016-07-02 05:00:00', '2016-10-04 09:00:00') # exception_label(757, 2, '2016-06-11 11:00:00', '2016-07-02 03:00:00') # exception_label(757, 2, '2016-07-02 05:00:00', '2016-10-11 08:00:00') exception_label(758, 2, '2016-03-07 13:00:00', '2016-03-13 02:00:00') exception_label(758, 2, '2016-03-13 04:00:00', '2016-04-22 04:00:00') exception_label(758, 2, '2016-04-22 06:00:00', '2016-07-02 03:00:00') exception_label(758, 2, '2016-07-02 05:00:00', '2016-10-11 08:00:00') exception_label(758, 2, '2016-10-11 10:00:00', '2016-12-29 05:00:00') exception_label(758, 2, '2016-12-29 07:00:00', '2016-12-31 23:00:00') # exception_label(759, 2, '2016-01-14 22:00:00', '2016-01-25 08:00:00') # exception_label(759, 2, '2016-01-25 11:00:00', '2016-02-02 08:00:00') # exception_label(759, 2, '2016-06-12 12:00:00', '2016-06-24 13:00:00') # exception_label(759, 2, '2016-07-26 18:00:00', '2016-08-05 12:00:00') # exception_label(759, 2, '2016-08-05 14:00:00', '2016-09-06 19:00:00') exception_label(762, 2, '2016-02-23 12:00:00', '2016-03-03 15:00:00') exception_label(762, 2, '2016-03-03 17:00:00', '2016-03-13 02:00:00') exception_label(762, 2, '2016-03-13 04:00:00', '2016-04-22 04:00:00') exception_label(762, 2, '2016-04-22 06:00:00', '2016-07-02 03:00:00') exception_label(762, 2, '2016-07-02 05:00:00', '2016-10-11 08:00:00') exception_label(762, 2, '2016-10-11 10:00:00', '2016-12-29 05:00:00') exception_label(762, 2, '2016-12-29 07:00:00', '2016-12-31 23:00:00') exception_label(767, 2, '2016-10-04 08:00:00', '2016-10-11 08:00:00') # exception_label(769, 2, '2016-07-03 10:00:00', '2016-09-02 08:00:00') # exception_label(769, 2, '2016-10-04 05:00:00', '2016-10-11 08:00:00') # exception_label(771, 2, '2016-05-23 10:00:00', '2016-07-02 03:00:00') # exception_label(771, 2, '2016-07-02 05:00:00', '2016-10-11 08:00:00') # exception_label(771, 2, '2016-10-16 12:00:00', '2016-10-22 01:00:00') exception_label(772, 2, '2016-01-01 00:00:00', '2016-02-09 07:00:00') exception_label(772, 2, '2016-02-09 09:00:00', '2016-02-25 14:00:00') exception_label(772, 2, '2016-10-04 08:00:00', '2016-10-18 14:00:00') exception_label(772, 2, '2016-10-18 16:00:00', '2016-12-16 12:00:00') # exception_label(774, 2, '2016-01-01 00:00:00', '2016-02-11 08:00:00') # exception_label(774, 2, '2016-07-29 08:00:00', '2016-08-04 06:00:00') # exception_label(774, 2, '2016-08-04 09:00:00', '2016-08-22 22:00:00') # exception_label(774, 2, '2016-08-23 08:00:00', '2016-08-29 20:00:00') exception_label(776, 2, '2016-08-25 09:00:00', '2016-08-29 10:00:00') exception_label(776, 2, '2016-10-18 00:00:00', '2016-10-26 11:00:00') exception_label(776, 2, '2016-10-26 16:00:00', '2016-11-18 09:00:00') exception_label(776, 2, '2016-11-20 16:00:00', '2016-11-25 23:00:00') exception_label(776, 2, '2016-11-29 16:00:00', '2016-12-09 05:00:00') exception_label(783, 2, '2016-01-01 00:00:00', '2016-12-09 14:00:00') exception_label(784, 2, '2016-10-04 08:00:00', '2016-10-11 08:00:00') # exception_label(789, 2, '2016-06-24 07:00:00', '2016-07-13 05:00:00') # exception_label(789, 2, '2016-07-20 13:00:00', '2016-08-07 12:00:00') # exception_label(790, 2, '2016-06-24 07:00:00', '2016-07-19 13:00:00') # exception_label(790, 2, '2016-07-19 15:00:00', '2016-08-07 12:00:00') # exception_label(790, 2, '2016-08-07 14:00:00', '2016-09-15 11:00:00') # exception_label(790, 2, '2016-09-15 14:00:00', '2016-09-29 11:00:00') # exception_label(791, 2, '2016-05-26 04:00:00', '2016-05-31 18:00:00') # exception_label(791, 2, '2016-06-01 04:00:00', '2016-06-05 09:00:00') # exception_label(791, 2, '2016-06-05 11:00:00', '2016-10-12 08:00:00') # exception_label(796, 2, '2016-05-24 06:00:00', '2016-10-24 12:00:00') # exception_label(798, 2, '2016-05-24 10:00:00', '2016-10-18 08:00:00') # exception_label(799, 2, '2016-05-13 14:00:00', '2016-05-20 11:00:00') # exception_label(799, 2, '2016-05-20 13:00:00', '2016-07-05 18:00:00') # exception_label(799, 2, '2016-07-05 20:00:00', '2016-10-24 15:00:00') # exception_label(799, 2, '2016-10-28 19:00:00', '2016-11-03 13:00:00') # exception_label(802, 2, '2016-05-24 06:00:00', '2016-06-05 13:00:00') # exception_label(802, 2, '2016-06-05 15:00:00', '2016-07-24 11:00:00') # exception_label(802, 2, '2016-07-24 15:00:00', '2016-10-11 05:00:00') exception_label(907, 2, '2016-09-30 16:00:00', '2016-10-08 03:00:00') # 中国国庆节? exception_label(912, 2, '2016-10-23 12:00:00', '2016-10-27 10:00:00') exception_label(912, 2, '2016-10-27 12:00:00', '2016-11-04 05:00:00') exception_label(928, 2, '2016-10-08 17:00:00', '2016-10-18 09:00:00') exception_label(928, 2, '2016-10-23 16:00:00', '2016-10-27 08:00:00') exception_label(928, 2, '2016-10-27 17:00:00', '2016-10-31 08:00:00') exception_label(954, 2, '2016-01-01 00:00:00', '2016-08-08 10:00:00') exception_label(1072, 2, '2016-01-01 00:00:00', '2016-07-25 12:00:00') # exception_label(1075, 2, '2016-05-24 10:00:00', '2016-10-25 08:00:00') # exception_label(1078, 2, '2016-05-03 15:00:00', '2016-05-10 10:00:00') # exception_label(1078, 2, '2016-05-10 12:00:00', '2016-07-15 08:00:00') # exception_label(1078, 2, '2016-07-15 10:00:00', '2016-07-28 15:00:00') # exception_label(1078, 2, '2016-07-28 21:00:00', '2016-09-28 14:00:00') # exception_label(1078, 2, '2016-09-28 16:00:00', '2016-10-01 12:00:00') # exception_label(1078, 2, '2016-10-01 14:00:00', '2016-10-04 09:00:00') # exception_label(1078, 2, '2016-10-04 11:00:00', '2016-10-11 10:00:00') # 1099问题太奇怪了 exception_label(1111, 2, '2016-09-19 10:00:00', '2016-09-27 15:00:00') exception_label(1111, 2, '2016-09-28 02:00:00', '2016-10-11 20:00:00') # exception_label(1119, 2, '2016-06-09 21:00:00', '2016-06-15 14:00:00') # exception_label(1119, 2, '2016-06-15 16:00:00', '2016-07-01 18:00:00') # exception_label(1119, 2, '2016-07-01 20:00:00', '2016-07-28 16:00:00') # exception_label(1119, 2, '2016-07-28 21:00:00', '2016-08-31 09:00:00') # exception_label(1119, 2, '2016-08-31 11:00:00', '2016-09-08 15:00:00') # exception_label(1149, 2, '2016-04-12 11:00:00', '2016-10-26 17:00:00') # exception_label(1155, 2, '2016-05-25 10:00:00', '2016-08-25 07:00:00') # exception_label(1155, 2, '2016-08-25 09:00:00', '2016-09-22 08:00:00') # exception_label(1155, 2, '2016-09-22 10:00:00', '2016-10-18 09:00:00') # exception_label(1162, 2, '2016-05-24 10:00:00', '2016-07-18 09:00:00') # exception_label(1162, 2, '2016-07-18 11:00:00', '2016-09-11 02:00:00') # exception_label(1162, 2, '2016-09-11 04:00:00', '2016-09-17 19:00:00') # exception_label(1162, 2, '2016-09-20 06:00:00', '2016-09-29 16:00:00') # exception_label(1186, 2, '2016-07-05 09:00:00', '2016-08-05 13:00:00') # exception_label(1192, 2, '2016-07-07 20:00:00', '2016-07-19 14:00:00') # 1203问题奇怪 # 1207也奇怪 # 1220小缺口奇怪 exception_label(1225, 2, '2016-09-28 08:00:00', '2016-10-20 12:00:00') exception_label(1226, 2, '2016-09-28 08:00:00', '2016-10-20 12:00:00') exception_label(1243, 2, '2016-10-19 15:00:00', '2016-11-08 20:00:00') exception_label(1250, 2, '2016-01-01 00:00:00', '2016-12-21 15:00:00') exception_label(1263, 2, '2016-11-10 14:00:00', '2016-11-21 14:00:00') exception_label(1288, 2, '2016-07-07 16:00:00', '2016-08-10 18:00:00') exception_label(1291, 2, '2016-09-28 08:00:00', '2016-10-20 12:00:00') exception_label(1292, 2, '2016-09-28 08:00:00', '2016-10-20 12:00:00') exception_label(1293, 2, '2016-09-28 08:00:00', '2016-10-20 12:00:00') exception_label(1294, 2, '2016-09-28 08:00:00', '2016-10-20 12:00:00') exception_label(1295, 2, '2016-09-28 08:00:00', '2016-10-20 12:00:00') exception_label(1296, 2, '2016-09-28 08:00:00', '2016-10-20 12:00:00') exception_label(1297, 2, '2016-09-28 08:00:00', '2016-10-20 12:00:00') exception_label(1298, 2, '2016-09-28 08:00:00', '2016-10-20 12:00:00') exception_label(1299, 2, '2016-09-28 08:00:00', '2016-10-20 12:00:00') exception_label(1305, 2, '2016-09-28 08:00:00', '2016-10-20 12:00:00') exception_label(1307, 2, '2016-09-28 07:00:00', '2016-10-20 12:00:00') exception_label(1307, 2, '2016-12-17 06:00:00', '2016-12-31 04:00:00') # 处理了1.0613的值 exception_label(1309, 2, '2016-07-25 20:00:00', '2016-08-04 11:00:00') exception_label(1329, 2, '2016-11-21 16:00:00', '2016-11-29 09:00:00') exception_label(1346, 2, '2016-12-01 08:00:00', '2016-12-06 17:00:00') exception_label(1361, 2, '2016-11-28 12:00:00', '2016-12-01 10:00:00') exception_label(1361, 2, '2016-12-01 13:00:00', '2016-12-06 15:00:00') # - gc.collect() # %%time for bid in train[(train.exception == 1) & (train.meter == 2)].building_id.unique(): train.loc[(train.building_id == bid) & (train.meter == 2) & (train.exception == 0), 'exception'] = -1 test.loc[(test.building_id == bid) & (test.meter == 2) & (test.exception != 1), 'exception'] = -1 train.exception.value_counts(dropna=False) test.exception.value_counts(dropna=False) # + # ## 存疑异常标记 # # 30前期有大量0值 # train.loc[(train.building_id == 30) & (train.meter == 1) & (train.meter_reading < 212), 'exception'] = 2 # # 43大量0值 # train.loc[(train.building_id == 43) & (train.meter == 1) & (train.meter_reading == 0), 'exception'] = 2 # # 28大量0值 # train.loc[(train.building_id == 28) & (train.meter == 1) & (train.meter_reading == 0), 'exception'] = 2 # - gc.collect() train[['exception']].to_pickle('../output/fork-of-ashrae-eda-exception-label2/train_exception.pkl') test[['exception']].to_pickle('../output/fork-of-ashrae-eda-exception-label2/test_exception.pkl')
solutions/rank-4/exception_label/fork-of-ashrae-eda-exception-label2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # *** # *** # # 13. 파일 입출력 # *** # *** # - 파일을 열어서 읽고, 쓰고, 덧붙이는 방법 # - open(filename, mode) 내장 함수로 filename 이름을 지닌 file 객체를 얻는다. # - 얻어진 파일 객체에서 자료를 읽거나, 쓰거나, 덧붙이는 작업 수행 # - 모든 작업이 끝나면 close()를 호출하여 작업 프로세스의 자원 점유 해제 # *** # ## 1 파일 입출력 방법 # *** # ### 1-1 파일 처리 모드의 종류 # - open 내장 함수의 두번째 인자 mode 설명 # - 두번째 인자 mode 생략시에는 읽기 전용(r) 모드로 설정 # # | Mode | 간단 설명 | 자세한 설명 # |--------|-----------------------------|------------| # | 'r' | 읽기 전용 (기본 모드) | 파일 객체를 읽기 모드로 생성하고, 파일 포인터를 파일 처음 위치에 놓는다.| # | 'w' | 쓰기 전용 | 새로운 파일을 쓰기 모드로 생성하거나 해당 파일이 이미 존재하면 내용을 모두 없에면서 쓰기 모드로 생성하고, 파일 포인터를 파일 처음 위치에 놓는다. | # | 'a' | 파일 끝에 추가 | 이미 존재하는 파일을 쓰기 모드로 생성하거나 파일이 존재하지 않으면 새롭게 파일을 생성하면서 쓰기 모드로 생성하고, 파일 포인터를 파일의 마지막 위치에 놓는다. 따라서, 이후 작성되는 내용은 파일의 뒷 부분에 추가된다.| # # - 이진 파일로 저장을 위해서는 아래 모드 사용 # # | Mode | 간단 설명 | # |--------|-----------------------------| # | 'rb' | 이진 파일 읽기 전용 | # | 'wb' | 이진 파일 쓰기 전용 | # | 'ab' | 이진 파일 끝에 추가 | # ### 1-2 파일 쓰기 # + import os print(os.getcwd()) # - s = """Its power: Python developers typically report they are able to develop applications in a half to a tenth the amount of time it takes them to do the same work in such languages as C.""" f = open('t.txt', 'w') f.write(s) # 문자열을 파일에 기록 f.close() # ### 1-3 파일 읽기 # - read() 메소드 사용 # - 대용량 파일인 경우 사용 비추천 #f = file('t.txt') f = open('t.txt', 'r') s = f.read() print(s) print(type(s)) # - close()을 마지막에 호출하지 않으면 해당 file 객체가 다른 값으로 치환되거나 프로그램이 종료될 때 자동으로 close()가 불리워진다. # - 하지만 명시적으로 close()를 호출하는 것을 권장함 # - with ~ as ~ 사용 추천 # - with ~ as ~ 블럭이 끝나면 자동으로 close()를 해줌 s = """Its power: Python developers typically report they are able to develop applications in a half to a tenth the amount of time it takes them to do the same work in such languages as C.""" with open('t.txt', 'w') as f: f.write(s) # 문자열을 파일에 기록 with open('t.txt', 'r') as f: s = f.read() print(s) print(type(s)) # ### 1-4 라인 단위로 파일 읽기 # - 총 4가지 방법 존재 # - 파일 객체의 반복자(iterator) 이용하기 # - 파일 객체의 반복자는 각 라인별로 내용을 읽어오도록 설정되어 있음 # - 파일을 라인별로 읽는 방법 중 가장 효과적임 # - readline(): 한번에 한줄씩 읽는다. # - readlines(): 파일 전체를 라인 단위로 끊어서 리스트에 저장한다. # - xreadlines(): python3.6에서는 지원하지 않음 # - 파일 객체의 반복자 사용 f = open('t.txt') i = 1 for line in f: print(i, ":", line, end="") i += 1 f.close() # - readline() 사용 f = open('t.txt') line = f.readline() print(type(line)) i = 1 while line: print(i, ":", line, end="") line = f.readline() i += 1 f.close() # - readlines() 사용 # - 각 라인을 모두 읽어서 메모리에 리스트로 저장함 # - 대용량 파일인 경우 사용 비추천 # + f = open('t.txt') print(f.readlines()) print(type(f.readlines())) print() f.seek(0) i = 1 for line in f.readlines(): print(i, ":", line, end="") i += 1 f.close() # + f = open('t.txt') print(f.readlines()) print(type(f.readlines())) print() f.close() f = open('t.txt') i = 1 for line in f.readlines(): print(i, ":", line, end="") i += 1 f.close() # - # - xreadlines() 사용 (python3에서는 지원하지 않는 메소드) # - python3에서는 xreadlines()사용과 파일 객체 f의 반복자 사용하는 경우와 동일함. # # + f = open('t.txt') print(f.xreadlines()) print() f.seek(0) i = 1 for line in f.xreadlines(): print(i, ":", line,) i += 1 f.close() # - # ### 1-5 라인 단위로 파일 쓰기 # - writelines(): 리스트 안에 있는 각 문자열을 연속해서 파일로 출력한다. # + lines = ['first line\n', 'second line\n', 'third line\n'] f = open('t1.txt', 'w') f.writelines(lines) f.close() f = open('t1.txt') print(f.read()) f.close() # - # - write() 이용하여 여러 문자열을 각 라인별로 파일로 출력하는 방법 # + lines = ['first line', 'second line', 'third line'] f = open('t1.txt', 'w') f.write('\n'.join(lines)) f.close() f = open('t1.txt') print(f.read()) f.close() # - # - 텍스트 파일 t.txt의 단어(공백으로 분리된 문자열) 수를 출력하는 방법 # + f = open('t.txt') s = f.read() print(s) n = len(s.split()) print(n) f.close() # - # ### 1-6 기존 파일에 내용 추가 f = open('removeme.txt', 'w') # 파일의 생성 f.write('first line\n') f.write('second line\n') f.close() # + f = open('removeme.txt', 'a') # 파일 추가 모드로 오픈 f.write('third line\n') f.close() f = open('removeme.txt') # 파일 읽기 print(f.read()) f.close() # - # ### 1-7 파일 내 임의 위치로 접근 # - 파일 포인터 (pointer) # - 파일 내에서 현재 위치를 가리키고 있음 # - 파일 접근 방법 # - 순차 접근 (기본 방식): 파일을 앞에서 부터 순차적으로 읽고 쓰는 방식 # - 임의 접근: 파일 내 임의 위치에서 읽고 쓰는 방식 # - 임의 접근을 위한 file 객체 포인터 (pointer) 관련 메소드 # - seek(n): 파일의 가장 첫번째 위치에서 n번째 바이트로 포인터 이동 # - tell(): 파일 내 현재 포인터 위치를 반환 # + name = 't.txt' f = open(name, 'w+') # 읽고 쓰기로 오픈, 단, 파일이 이미 존재한다면 기존 파일은 없어지고 다시 생성된다. s = '0123456789abcdef' f.write(s) f.seek(5) # 시작부터 5바이트 포인터 이동 print(f.tell()) # 현재 위치 돌려줌 print(f.read(1)) # 1문자 읽기 print(f.tell()) print() # - # *** # ## 2 표준 출력 방향 전환 # *** # # - sys 모듈의 표준 입출력 관련 객체 # - sys.stdout: 표준 출력 # - sys.stderr: 표준 에러 출력 # - sys.stdin: 표준 입력 # - 예를 들어, sys.stdout을 파일 객체로 변환하면 모든 표준 출력(print 출력)은 해당 파일로 저장된다. # ### 2-1 표준 출력을 파일로 저장하기 # + import sys f = open('t.txt', 'w') stdout = sys.stdout # 표준 출력 저장해 두기 sys.stdout = f # 파일 객체로 표준 출력 변경 print('Sample output') print('Good') print('Good') f.close() sys.stdout = stdout # 표준 출력 원상 복구 # - f = open('t.txt') print(f.read()) print("Hello koreatech!", file=open("output.txt", "w")) f = open('output.txt') print(f.read()) # - print를 직접 이용하여 출력을 다른 객체로 전환하기 # - 아래와 같은 방식은 python3에서 더이상 지원하지 않음 print >> sys.stderr, "Warning, action field not supplied" # - 대신 아래 방법 사용 print("Warning, action field not supplied", file=sys.stderr) # - 동일 방법으로 표준 출력(print)을 파일 객체로 전환 # + f = open('t.txt', 'w') print('spam string', file=f) f.close() f = open('t.txt') print(f.read()) f.close() # - # ### 2-2 StringIO 모듈 사용하기 # - StringIO 모듈의 StringIO 클래스 객체 # - 파일 객체처럼 입출력 가능한 문자열 객체 # - StringIO에 지원되는 메소드는 파일 객체가 지원하는 메소드와 거의 동일하다. # - getvalue() 메소드 # - 현재까지 담아 놓은 전체 내용을 반환한다. # - 표준 출력으로 문자열 객체에 내용 작성하기 # - python3 에서는 io 모듈내에 StringIO 클래스 존재 # + import sys from io import StringIO f = StringIO() f.write("abc") f.write("def") q = f.read() print(q) q1 = f.getvalue() print(q1) # + f.seek(0) print(f.read()) f.close() # + def f1(): s = "" for i in range(10000, 1000000): s += str(i) def f2(): s = StringIO() for i in range(10000, 1000000): s.write(str(i)) # - # %timeit f1() # %timeit f2() # + import sys from io import StringIO stdout = sys.stdout # 표준 출력 저장해 두기 sys.stdout = f = StringIO() print(type(f)) print('Sample output') print('Good') print('Good') sys.stdout = stdout # + s = f.getvalue() print('Done-------') print(s) # - # *** # ## 3 파일로의 지속 모듈 # *** # - 지속성(Persistence) # - 프로그램 내에 생성된 각종 객체들을 해당 프로그램 종료 이후에도 존재하게 만들고, 그것들을 동일한 또는 다른 프로그램에서 사용하는 기능 # - 지속성 기능을 지원하는 모듈 # - DBM 관련 모듈 # - anydbm, dbm, gdbm, dbhash, dumbdbm # - anydbm: 시스템에서 사용가능한 모듈 중 가장 최적의 모듈을 반환함 # - 기본적으로 dumbdbm을 반환한다 # - 사전 자료형을 사용하는 것과 동일한 방법으로 사용 # - **pickle** 모듈 # - 파이썬의 객체를 저장하는 일반화된 지속성 모듈 # - 파이썬의 기본 객체뿐만 아니라 사용자 정의의 복잡한 객체도 저장 가능 # - 기본적으로 텍스트 모드로 저장하지만 이진 모드로도 저장 가능 # - 피클링(pickling) 모듈 사용하기 # - file을 open 할 때 이진 파일로 open 하기 # - 즉, 모드로서 'wb', 'rb' 적용 # + import pickle phone = {'tom':4358382, 'jack':9465215, 'jim':6851325, 'Joseph':6584321} List = ['string', 1234, 0.2345] Tuple = (phone, List) # 리스트, 튜플, 사전의 복합 객체 with open('pickle.txt', 'wb') as f: # 파일 객체를 얻는다. pickle.dump(Tuple, f) # 파일로 출력(pickling), 복합 객체 출력 with open('pickle.txt', 'rb') as f: x,y = pickle.load(f) # 파일에서 읽어오기. 튜플의 내용을 x, y에 받는다. print(x) # 사전 print(y) # 리스트 # + import pickle class Simple: # 가장 단순한 클래스를 정의 pass s = Simple() # 인스턴스 객체 생성 s.count = 10 # 인스턴스 이름 공간에 변수 생성 with open('pickle2.txt', 'wb') as f: pickle.dump(s, f) # 인스턴스 저장 with open('pickle2.txt', 'rb') as f: t = pickle.load(f) # 인스턴스 가져오기 print(t.count) # - # #### [참고] 다양한 파일 처리 모드 # # - open 내장 함수의 두번째 인자 mode 설명 # - 두번째 인자 mode 생략시에는 읽기 전용(r) 모드로 설정 # # | Mode | 간단 설명 | 자세한 설명 # |--------|-----------------------------|------------| # | 'r' | 읽기 전용(기본 모드) | 파일 객체를 읽기 모드로 생성하고, 파일 포인터를 파일 처음 위치에 놓는다.| # | 'w' | 쓰기 전용(기존 파일 내용 삭제) | 파일이 존재하지 않으면 새로운 파일을 쓰기 모드로 생성하고, 해당 파일이 이미 존재하면 내용을 모두 없에면서 쓰기 모드로 생성하고, 파일 포인터를 파일 처음 위치에 놓는다. | # | 'a' | 파일 끝에 추가(쓰기 전용) | 파일이 존재하지 않으면 새롭게 파일을 생성하면서 쓰기 모드로 생성하고, 해당 파일이 이미 존재하면 파일 객체을 쓰기 모드로 생성하면서 파일 포인터를 파일의 마지막 위치에 놓는다. 따라서, 이후 작성되는 내용은 파일의 뒷 부분에 추가됨.| # | 'r+' | 읽고 쓰기 | 파일 객체를 읽고 쓸 수 있도록 생성한다. 파일 포인터를 파일 처음 위치에 놓는다. | # | 'w+' | 읽고 쓰기(기존 파일 내용 삭제) | 파일 객체를 읽고 쓸 수 있도록 생성한다. 파일이 존재하지 않으면 새로운 파일을 생성하고, 해당 파일이 이미 존재하면 내용을 모두 없에면서 생성하고, 파일 포인터를 파일 처음 위치에 놓는다.| # | 'a+' | 읽고 쓰기(파일 끝에 추가) | 파일 객체를 읽고 쓸 수 있도록 생성한다. 파일이 존재하지 않으면 새롭게 파일을 생성하고, 해당 파일이 이미 존재하면 파일 객체을 생성하면서 파일 포인터를 파일의 마지막 위치에 놓는다 (그래서, 이후 작성되는 내용은 파일의 뒷 부분에 추가). | # # # - 이진 파일로 저장을 위해서는 아래 모드 사용 # # | Mode | 간단 설명 | # |--------|-----------------------------| # | 'rb' | 이진 파일 읽기 전용 | # | 'wb' | 이진 파일 쓰기 전용(기존 파일 내용 삭제) | # | 'ab' | 이진 파일 끝에 추가(쓰기 전용) | # | 'rb+' | 이진 파일 읽고 쓰기 | # | 'wb+' | 이진 파일 읽고 쓰기(기존 파일 내용 삭제) | # | 'ab+' | 이진 파일 끝에 추가(읽기도 가능) | # # # # <p style='text-align: right;'>참고 문헌: 파이썬(열혈강의)(개정판 VER.2), 이강성, FreeLec, 2005년 8월 29일</p>
python3.6/.ipynb_checkpoints/python13-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Introduction # # By exposing sample to electron beam, we can measure electrons inelastic scattering energy loss by identifying resonant collisions resulting in molecular vibrational levels excitations. Vibrational levels, up to a first anharmonic term, can be described as: # # \begin{equation} # E_n = \hbar \omega (n + \frac{1}{2}) - \hbar \omega X (n + \frac{1}{2}) ^ 2 # \end{equation} # # Analysis # + import numpy as np import pandas as pd import matplotlib.pyplot as plt from scipy.optimize import curve_fit import plotly.plotly as pl from plotly import tools from plotly import graph_objs as go from plotly.offline import plot, init_notebook_mode, iplot # - data_1 = pd.read_csv("../data/Ascii1.dat", header=None, sep="\t").rename({0: "e", 1: "n"}, axis=1) data_2 = pd.read_csv("../data/Ascii2.dat", header=None, sep="\t").rename({0: "e", 1: "n"}, axis=1) # + e_1 = data_1.e.values n_1 = data_1.n.values e_2 = data_2.e.values n_2 = data_2.n.values # - idx_1 = np.where((e_1 < 2.7) & (e_1 > 1.3)) idx_2 = np.where(n_2 > 20) # + fig, ax = plt.subplots(1, 2) fig.set_figwidth(15) ax[0].scatter(e_1[idx_1], n_1[idx_1]) ax[1].scatter(e_2[idx_2], n_1[idx_2]) # + n1 = np.linspace(1, 5, 5) n2 = np.linspace(1, 5, 5) e1 = np.array([1.510, 1.763, 2.005, 2.275, 2.531]) e2 = np.array([1.292, 1.570, 1.838, 2.117, 2.402]) # - n1 def energy(n, *args): return args[0] * (n + .5) - args[0] * args[1] * (n + .5) ** 2 # + init_notebook_mode(True) trace_1 = go.Scatter(x=e_1[idx_1], y=n_1[idx_1], mode='markers') trace_2 = go.Scatter(x=e_2[idx_2], y=n_2[idx_2], mode='markers') fig = tools.make_subplots(rows=1, cols=2) fig.append_trace(trace_1, 1, 1) fig.append_trace(trace_2, 1, 2) fig['layout'].update(height=600, width=800, title='Vibrational spectra') #iplot(fig) # - popt_1, pcov_1 = curve_fit(energy, n1, e1, p0=[2e3, 5e-3], maxfev=8000) popt_2, pcov_2 = curve_fit(energy, n2, e2, p0=[2e3, 5e-3], maxfev=8000) corr = 1 / 1.23941 * 10 ** 4 # + h_bar_omega_1 = popt_1[0] * corr d_h_bar_omega_1 = pcov_1[0, 0] * corr h_bar_omega_x_1 = popt_1[1] * corr d_h_bar_omega_x_1 = (popt_1[0] * pcov_1[1, 1] + popt_1[1] * pcov_1[0, 0] + 2 * popt_1[1] * popt_1[0] * pcov_1[0, 1]) * corr # + h_bar_omega_2 = popt_2[0] * corr d_h_bar_omega_2 = pcov_2[0, 0] * corr h_bar_omega_x_2 = popt_2[1] * corr d_h_bar_omega_x_2 = (popt_2[0] * pcov_2[1, 1] + popt_2[1] * pcov_2[0, 0] + 2 * popt_2[1] * popt_2[0] * pcov_2[0, 1]) * corr # - h_bar_omega_1
Physics of Molecules/src/Vibrational spectra.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + import networkx as nx #calculate betweenness centrality of node #v in digraph G (label is the label of weight) def betweenness_centrality(G,label,v): sum = 0.0 n = len(G.nodes()) for s in G.nodes(): for t in G.nodes(): if (s == v or t == v or s == t): continue A = 0 B = 0 L = nx.all_shortest_paths(G,s,t,weight=label) try: for p in L: A += 1 if v in p: B += 1 except: continue if A > 0: sum += B/A return sum / ((n-1)*(n-2))
Day06_GraphAlgorithms2/notebooks/sol2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # 10 Minutes to cuDF and Dask-cuDF # ======================= # # Modeled after 10 Minutes to Pandas, this is a short introduction to cuDF and Dask-cuDF, geared mainly for new users. # # ### What are these Libraries? # # [cuDF](https://github.com/rapidsai/cudf) is a Python GPU DataFrame library (built on the Apache Arrow columnar memory format) for loading, joining, aggregating, filtering, and otherwise manipulating data. # # [Dask](https://dask.org/) is a flexible library for parallel computing in Python that makes scaling out your workflow smooth and simple. # # [Dask-cuDF](https://github.com/rapidsai/dask-cudf) is a library that provides a partitioned, GPU-backed dataframe, using Dask. # # # ### When to use cuDF and Dask-cuDF # # If your workflow is fast enough on a single GPU or your data comfortably fits in memory on a single GPU, you would want to use cuDF. If you want to distribute your workflow across multiple GPUs, have more data than you can fit in memory on a single GPU, or want to analyze data spread across many files at once, you would want to use Dask-cuDF. # + import os import numpy as np import pandas as pd import cudf import dask_cudf np.random.seed(12) #### Portions of this were borrowed and adapted from the #### cuDF cheatsheet, existing cuDF documentation, #### and 10 Minutes to Pandas. # - # Object Creation # --------------- # Creating a `cudf.Series` and `dask_cudf.Series`. s = cudf.Series([1,2,3,None,4]) print(s) ds = dask_cudf.from_cudf(s, npartitions=2) print(ds.compute()) # Creating a `cudf.DataFrame` and a `dask_cudf.DataFrame` by specifying values for each column. df = cudf.DataFrame([('a', list(range(20))), ('b', list(reversed(range(20)))), ('c', list(range(20)))]) print(df) ddf = dask_cudf.from_cudf(df, npartitions=2) print(ddf.compute()) # Creating a `cudf.DataFrame` from a pandas `Dataframe` and a `dask_cudf.Dataframe` from a `cudf.Dataframe`. # # *Note that best practice for using Dask-cuDF is to read data directly into a `dask_cudf.DataFrame` with something like `read_csv` (discussed below).* pdf = pd.DataFrame({'a': [0, 1, 2, 3],'b': [0.1, 0.2, None, 0.3]}) gdf = cudf.DataFrame.from_pandas(pdf) print(gdf) dask_df = dask_cudf.from_cudf(pdf, npartitions=2) dask_gdf = dask_cudf.from_dask_dataframe(dask_df) print(dask_gdf.compute()) # Viewing Data # ------------- # Viewing the top rows of a GPU dataframe. print(df.head(2)) print(ddf.head(2)) # Sorting by values. print(df.sort_values(by='b')) print(ddf.sort_values(by='b').compute()) # Selection # ------------ # # ## Getting # Selecting a single column, which initially yields a `cudf.Series` or `dask_cudf.Series`. Calling `compute` results in a `cudf.Series` (equivalent to `df.a`). print(df['a']) print(ddf['a'].compute()) # ## Selection by Label # Selecting rows from index 2 to index 5 from columns 'a' and 'b'. print(df.loc[2:5, ['a', 'b']]) print(ddf.loc[2:5, ['a', 'b']].compute()) # ## Selection by Position # Selecting via integers and integer slices, like numpy/pandas. Note that this functionality is not available for Dask-cuDF DataFrames. print(df.iloc[0]) print(df.iloc[0:3, 0:2]) # You can also select elements of a `DataFrame` or `Series` with direct index access. print(df[3:5]) print(s[3:5]) # ## Boolean Indexing # Selecting rows in a `DataFrame` or `Series` by direct Boolean indexing. print(df[df.b > 15]) print(ddf[ddf.b > 15].compute()) # Selecting values from a `DataFrame` where a Boolean condition is met, via the `query` API. print(df.query("b == 3")) print(ddf.query("b == 3").compute()) # You can also pass local variables to Dask-cuDF queries, via the `local_dict` keyword. With standard cuDF, you may either use the `local_dict` keyword or directly pass the variable via the `@` keyword. cudf_comparator = 3 print(df.query("b == @cudf_comparator")) dask_cudf_comparator = 3 print(ddf.query("b == @val", local_dict={'val':dask_cudf_comparator}).compute()) # Supported logical operators include `>`, `<`, `>=`, `<=`, `==`, and `!=`. # ## MultiIndex # cuDF supports hierarchical indexing of DataFrames using MultiIndex. Grouping hierarchically (see `Grouping` below) automatically produces a DataFrame with a MultiIndex. arrays = [['a', 'a', 'b', 'b'], [1, 2, 3, 4]] tuples = list(zip(*arrays)) idx = cudf.MultiIndex.from_tuples(tuples) idx # This index can back either axis of a DataFrame. gdf1 = cudf.DataFrame({'first': np.random.rand(4), 'second': np.random.rand(4)}) gdf1.index = idx print(gdf1.to_pandas()) gdf2 = cudf.DataFrame({'first': np.random.rand(4), 'second': np.random.rand(4)}).T gdf2.columns = idx print(gdf2.to_pandas()) # Accessing values of a DataFrame with a MultiIndex. Note that slicing is not yet supported. print(gdf1.loc[('b', 3)].to_pandas()) # Missing Data # ------------ # Missing data can be replaced by using the `fillna` method. print(s.fillna(999)) print(ds.fillna(999).compute()) # Operations # ------------ # ## Stats # Calculating descriptive statistics for a `Series`. print(s.mean(), s.var()) print(ds.mean().compute(), ds.var().compute()) # ## Applymap # Applying functions to a `Series`. Note that applying user defined functions directly with Dask-cuDF is not yet implemented. For now, you can use [map_partitions](http://docs.dask.org/en/stable/dataframe-api.html#dask.dataframe.DataFrame.map_partitions) to apply a function to each partition of the distributed dataframe. # + def add_ten(num): return num + 10 print(df['a'].applymap(add_ten)) # - print(ddf['a'].map_partitions(add_ten).compute()) # ## Histogramming # Counting the number of occurrences of each unique value of variable. print(df.a.value_counts()) print(ddf.a.value_counts().compute()) # ## String Methods # Like pandas, cuDF provides string processing methods in the `str` attribute of `Series`. Full documentation of string methods is a work in progress. Please see the cuDF API documentation for more information. s = cudf.Series(['A', 'B', 'C', 'Aaba', 'Baca', None, 'CABA', 'dog', 'cat']) print(s.str.lower()) ds = dask_cudf.from_cudf(s, npartitions=2) print(ds.str.lower().compute()) # ## Concat # Concatenating `Series` and `DataFrames` row-wise. s = cudf.Series([1, 2, 3, None, 5]) print(cudf.concat([s, s])) ds2 = dask_cudf.from_cudf(s, npartitions=2) print(dask_cudf.concat([ds2, ds2]).compute()) # ## Join # Performing SQL style merges. Note that the dataframe order is not maintained, but may be restored post-merge by sorting by the index. # + df_a = cudf.DataFrame() df_a['key'] = ['a', 'b', 'c', 'd', 'e'] df_a['vals_a'] = [float(i + 10) for i in range(5)] df_b = cudf.DataFrame() df_b['key'] = ['a', 'c', 'e'] df_b['vals_b'] = [float(i+100) for i in range(3)] merged = df_a.merge(df_b, on=['key'], how='left') print(merged) # + ddf_a = dask_cudf.from_cudf(df_a, npartitions=2) ddf_b = dask_cudf.from_cudf(df_b, npartitions=2) merged = ddf_a.merge(ddf_b, on=['key'], how='left').compute() print(merged) # - # ## Append # Appending values from another `Series` or array-like object. print(s.append(s)) print(ds2.append(ds2).compute()) # ## Grouping # Like pandas, cuDF and Dask-cuDF support the Split-Apply-Combine groupby paradigm. # + df['agg_col1'] = [1 if x % 2 == 0 else 0 for x in range(len(df))] df['agg_col2'] = [1 if x % 3 == 0 else 0 for x in range(len(df))] ddf = dask_cudf.from_cudf(df, npartitions=2) # - # Grouping and then applying the `sum` function to the grouped data. print(df.groupby('agg_col1').sum()) print(ddf.groupby('agg_col1').sum().compute()) # Grouping hierarchically then applying the `sum` function to grouped data. We send the result to a pandas dataframe only for printing purposes. print(df.groupby(['agg_col1', 'agg_col2']).sum().to_pandas()) ddf.groupby(['agg_col1', 'agg_col2']).sum().compute().to_pandas() # Grouping and applying statistical functions to specific columns, using `agg`. print(df.groupby('agg_col1').agg({'a':'max', 'b':'mean', 'c':'sum'})) print(ddf.groupby('agg_col1').agg({'a':'max', 'b':'mean', 'c':'sum'}).compute()) # ## Transpose # Transposing a dataframe, using either the `transpose` method or `T` property. Currently, all columns must have the same type. Transposing is not currently implemented in Dask-cuDF. sample = cudf.DataFrame({'a':[1,2,3], 'b':[4,5,6]}) print(sample) print(sample.transpose()) # Time Series # ------------ # # `DataFrames` supports `datetime` typed columns, which allow users to interact with and filter data based on specific timestamps. # + import datetime as dt date_df = cudf.DataFrame() date_df['date'] = pd.date_range('11/20/2018', periods=72, freq='D') date_df['value'] = np.random.sample(len(date_df)) search_date = dt.datetime.strptime('2018-11-23', '%Y-%m-%d') print(date_df.query('date <= @search_date')) # - date_ddf = dask_cudf.from_cudf(date_df, npartitions=2) print(date_ddf.query('date <= @search_date', local_dict={'search_date':search_date}).compute()) # Categoricals # ------------ # `DataFrames` support categorical columns. # + pdf = pd.DataFrame({"id":[1,2,3,4,5,6], "grade":['a', 'b', 'b', 'a', 'a', 'e']}) pdf["grade"] = pdf["grade"].astype("category") gdf = cudf.DataFrame.from_pandas(pdf) print(gdf) # - dgdf = dask_cudf.from_cudf(gdf, npartitions=2) print(dgdf.compute()) # Accessing the categories of a column. Note that this is currently not supported in Dask-cuDF. gdf.grade.cat.categories # Accessing the underlying code values of each categorical observation. print(gdf.grade.cat.codes) print(dgdf.grade.cat.codes.compute()) # Converting Data Representation # -------------------------------- # ## Pandas # Converting a cuDF and Dask-cuDF `DataFrame` to a pandas `DataFrame`. print(df.head().to_pandas()) print(ddf.compute().head().to_pandas()) # ## Numpy # Converting a cuDF or Dask-cuDF `DataFrame` to a numpy `ndarray`. print(df.as_matrix()) print(ddf.compute().as_matrix()) # Converting a cuDF or Dask-cuDF `Series` to a numpy `ndarray`. print(df['a'].to_array()) print(ddf['a'].compute().to_array()) # ## Arrow # Converting a cuDF or Dask-cuDF `DataFrame` to a PyArrow `Table`. print(df.to_arrow()) print(ddf.compute().to_arrow()) # Getting Data In/Out # ------------------------ # # ## CSV # Writing to a CSV file, by first sending data to a pandas `Dataframe` on the host. # + if not os.path.exists('example_output'): os.mkdir('example_output') df.to_pandas().to_csv('example_output/foo.csv', index=False) # - ddf.compute().to_pandas().to_csv('example_output/foo_dask.csv', index=False) # Reading from a csv file. df = cudf.read_csv('example_output/foo.csv') print(df) ddf = dask_cudf.read_csv('example_output/foo_dask.csv') print(ddf.compute()) # Reading all CSV files in a directory into a single `dask_cudf.DataFrame`, using the star wildcard. ddf = dask_cudf.read_csv('example_output/*.csv') print(ddf.compute()) # ## Parquet # Writing to parquet files, using the CPU via PyArrow. df.to_parquet('example_output/temp_parquet') # Reading parquet files with a GPU-accelerated parquet reader. df = cudf.read_parquet('example_output/temp_parquet/72706b163a0d4feb949005d22146ad83.parquet') print(df.to_pandas()) # Writing to parquet files from a `dask_cudf.DataFrame` using PyArrow under the hood. ddf.to_parquet('example_files') # ## ORC # Reading ORC files. df2 = cudf.read_orc('/cudf/python/cudf/tests/data/orc/TestOrcFile.test1.orc') df2.to_pandas()
docs/source/10min.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import torch import torchvision.models as models from torchvision import transforms from PIL import Image import torch.nn as nn import numpy as np import matplotlib.pyplot as plt # - model = models.resnet18(pretrained=True) model # + # freeze all paramters for param in model.parameters(): param.requires_grad = False # Parameters of newly constructed modules have requires_grad=True by default num_ftrs = model.fc.in_features model.fc = nn.Linear(num_ftrs, 2) # - avg_layer = model._modules.get('avgpool') layer4= model._modules.get('layer4') layer4[1].bn2 my_embedding = torch.zeros(512) my_embedding.unsqueeze(0).shape # + # please download this sample file # for example on linux # wget https://en.wikipedia.org/wiki/File:African_Bush_Elephant.jpg img_path = 'elephant.jpg' with open(img_path, 'rb') as f: with Image.open(f) as img: img = img.convert('RGB') # - plt.imshow(img) img.size model = models.resnet18(pretrained=True) model.eval() transform = transforms.Compose([ transforms.Resize([224,224]), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) input_tensor = transform(img) # 3x330x495 -> 3x244x24 size may differ input_tensor = input_tensor.unsqueeze(0) # 3x244x2244 -> 1x3x244x244 input = torch.autograd.Variable(input_tensor, requires_grad=False) output_logits = model(input) _, preds = torch.max(output_logits, 1) input_tensor.shape _, preds = torch.topk(output_logits, 3) preds with open("imagenet1000_clsidx_to_labels.txt") as f: class_names = eval(f.read()) class_names[386] import torch.nn.functional as F output_logits[0] probs = F.softmax(output_logits[0], dim=0) probs probs[386] probs[101] probs[385]
transfer_learning/bottleneck_feature_extractor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Challenge # # ## Identifying Outliers using Standard Deviation # + # initial imports import pandas as pd import numpy as np import random import hvplot.pandas from sqlalchemy import create_engine # + # create a connection to the database engine = create_engine("postgresql://postgres:postgres@localhost:5432/SQL_hw_Fraud_detect") rand1=random.randint(1,25) rand2=random.randint(1,25) rand3=random.randint(1,25) query= f'''SELECT tr.date, tr.amount, tr.card, m.name, mc.name, credit_card.card_holder FROM transactions AS tr JOIN merchant AS m ON tr.merchant_id=m.id JOIN merchant_catagory AS mc ON m.id_merchant_catagory=mc.id JOIN credit_card on tr.card like CONCAT(SUBSTRING(CAST(CAST(credit_card.card AS BIGINT) AS VARCHAR(100)),1,4),'%%') WHERE credit_card.card_holder={rand1} OR credit_card.card_holder={rand2} OR credit_card.card_holder={rand3} ''' transactions_df = pd.read_sql(query, engine, index_col='date', parse_dates=True) transactions_df.columns=['amount','card_num', 'merchant', 'category', 'holder'] transactions_df.head() # + # code a function to identify outliers based on standard deviation ''' Outliers or non-normal data can be determined by a z-test 3.5<=|Yi-Yave|/s but NIST also recomments 3.5<=0.6745|Yi-Yave|/MAD https://www.itl.nist.gov/div898/handbook/eda/section3/eda35h.htm''' def std_outlier(df_group, holder): df=df_group.loc[df_group['holder']==holder] std=df['amount'].std() ave=df['amount'].mean() mask_std=abs(df['amount']-ave)/std>=3.5 outlier_std=df.loc[mask_std] return outlier_std def mad_outlier(df_group, holder): df=df_group.loc[df_group['holder']==holder] mad=df['amount'].mad() ave=df['amount'].mean() mask_mad=abs(.6745*(df['amount'])-ave)/mad>=3.5 outlier_mad=df.loc[mask_mad] return outlier_mad def iqr_outliers(df_group, holder): df=df_group.loc[df_group['holder']==holder] qtr1=df['amount'].quantile(.25) qtr3=df['amount'].quantile(.75) iqr15=1.5*(qtr1+qtr3)/2 mask_iqr=(df['amount']<(qtr1-iqr15))|(df['amount']>(qtr3+iqr15)) outlier_iqr=df.loc[mask_iqr] return outlier_iqr # + # find anomalous transactions for 3 random card holders # - holder1=transactions_df.loc[transactions_df['holder']==rand1] holder1.hvplot.hist(y='amount', bins=10, alpha=0.5, height=400).opts(title=f'Histogram of transactions for card holder {rand1}') outlier_std=std_outlier(transactions_df, rand1) outlier_std outlier_mad=mad_outlier(transactions_df, rand1) outlier_mad outlier_iqr=iqr_outliers(transactions_df, rand1) outlier_iqr holder2=transactions_df.loc[transactions_df['holder']==rand1] holder2.hvplot.hist(y='amount', bins=10, alpha=0.5, height=400).opts(title=f'Histogram of transactions for card holder {rand2}') outlier_std2=std_outlier(transactions_df, rand2) outlier_std2 outlier_mad2=mad_outlier(transactions_df, rand2) outlier_mad2 outlier_iqr2=iqr_outliers(transactions_df, rand2) outlier_iqr2 holder3=transactions_df.loc[transactions_df['holder']==rand3] holder3.hvplot.hist(y='amount', bins=10, alpha=0.5, height=400).opts(title=f'Histogram of transactions for card holder {rand3}') outlier_std3=std_outlier(transactions_df, rand3) outlier_std3 outlier_mad3=mad_outlier(transactions_df, rand3) outlier_mad3 outlier_iqr3=iqr_outliers(transactions_df, rand3) outlier_iqr3 # ## Identifying Outliers Using Interquartile Range # + # code a function to identify outliers based on interquartile range # - # find anomalous transactions for 3 random card holders
challenge.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Imbalanced Data import pandas as pd import numpy as np from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report,\ confusion_matrix,\ accuracy_score from sklearn import metrics import matplotlib.pyplot as plt import seaborn as sns data = pd.read_csv('adult.csv') data.head() data.replace('?',np.nan,inplace=True) #Dropping all NULL values data.dropna(inplace=True) data['income'].value_counts() # + #Encoding the Categorical values to Numericals using LabelEncoder from sklearn.preprocessing import LabelEncoder Labelenc_workclass = LabelEncoder() data['workclass'] = Labelenc_workclass\ .fit_transform(data['workclass']) Labelenc_education = LabelEncoder() data['education'] = Labelenc_education\ .fit_transform(data['education']) Labelenc_marital_status = LabelEncoder() data['marital-status'] = Labelenc_marital_status\ .fit_transform(data['marital-status']) Labelenc_occupation = LabelEncoder() data['occupation'] = Labelenc_occupation\ .fit_transform(data['occupation']) Labelenc_relationship = LabelEncoder() data['relationship'] = Labelenc_relationship\ .fit_transform(data['relationship']) Labelenc_race = LabelEncoder() data['race'] = Labelenc_race\ .fit_transform(data['race']) Labelenc_gender = LabelEncoder() data['gender'] = Labelenc_gender\ .fit_transform(data['gender']) Labelenc_native_country = LabelEncoder() data['native-country'] = Labelenc_native_country\ .fit_transform(data['native-country']) Labelenc_income = LabelEncoder() data['income'] = Labelenc_income\ .fit_transform(data['income']) # - data.head() # Putting feature variable to X X = data.drop(['income'],axis=1) # Putting response variable to y y = data['income'] X_train, X_test, y_train, y_test = train_test_split\ (X,y,\ test_size=0.20, \ random_state=123) clf_random = RandomForestClassifier(random_state=0) clf_random.fit(X_train,y_train) y_pred=clf_random.predict(X_test) print(classification_report(y_test, y_pred)) # + cm = confusion_matrix(y_test, y_pred) cm_df = pd.DataFrame(cm,\ index = ['<=50K', '>50K'], \ columns = ['<=50K', '>50K']) # - plt.figure(figsize=(8,6)) sns.heatmap(cm_df, annot=True,fmt='g',cmap='Greys_r') plt.title('Random Forest \nAccuracy:{0:.3f}'\ .format(accuracy_score(y_test, y_pred))) plt.ylabel('True Values') plt.xlabel('Predicted Values') plt.show() # # Exercise 9.04 # ## Fitting a Random Forest Classifier using SMOTE and Building the Confusion Matrix import imblearn from imblearn.over_sampling import SMOTE X_resampled, y_resampled = SMOTE().fit_resample(X_train,y_train) clf_random.fit(X_resampled,y_resampled) y_pred=clf_random.predict(X_test) print(classification_report(y_test, y_pred)) # + cm = confusion_matrix(y_test, y_pred) cm_df = pd.DataFrame(cm,\ index = ['<=50K', '>50K'], \ columns = ['<=50K', '>50K']) # - plt.figure(figsize=(8,6)) sns.heatmap(cm_df, annot=True,fmt='g',cmap='Greys_r') plt.title('Random Forest \nAccuracy:{0:.3f}'\ .format(accuracy_score(y_test, y_pred))) plt.ylabel('True Values') plt.xlabel('Predicted Values') plt.show()
Chapter09/Exercise9.04/Exercise9.04.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Using Breast Cancer Wisconsin (Diagnostic) Database to create a classifier that can help diagnose patients. # + import numpy as np import pandas as pd from sklearn.datasets import load_breast_cancer cancer = load_breast_cancer() print(cancer.DESCR) # - print(type(cancer)) #object "Bunch" is like dictionary print(cancer.keys()) print(cancer.target_names) # + # Number of features of the breast cancer dataset len(cancer['feature_names']) # - features = cancer['feature_names'] print(features) print(len(features)) #Converting "Bunch" object to DataFrame data1 = pd.DataFrame(data= np.c_[cancer['data'], cancer['target']], columns= cancer['feature_names'].tolist() + ['target']) data1.head() # + #Finding class distribution dist = data1.target.value_counts() dist.rename({0.0: 'malignant', 1.0:'benign'}) # - #Split data and labels X = data1.iloc[:,0:30] y = data1.target # + #Split train and test sets from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) # + #Using K-nearest neighbors classifier from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier() knn.fit(X_train.values, y_train.values) # + means = data1.mean()[:-1].values.reshape(1, -1) print(means) my_prediction = knn.predict(means) # + second_prediction = knn.predict(X_test.values) second_prediction # - knn.score(X_test.values, y_test.values) # + import matplotlib.pyplot as plt # %matplotlib inline # Find the training and testing accuracies by target value (i.e. malignant, benign) mal_train_X = X_train[y_train==0] mal_train_y = y_train[y_train==0] ben_train_X = X_train[y_train==1] ben_train_y = y_train[y_train==1] mal_test_X = X_test[y_test==0] mal_test_y = y_test[y_test==0] ben_test_X = X_test[y_test==1] ben_test_y = y_test[y_test==1] scores = [knn.score(mal_train_X.values, mal_train_y.values), knn.score(ben_train_X.values, ben_train_y.values), knn.score(mal_test_X.values, mal_test_y.values), knn.score(ben_test_X.values, ben_test_y.values)] bars = plt.bar(np.arange(4), scores, color=['#4c72b0','#4c72b0','#55a868','#55a868'])# directly label the score onto the bars for bar in bars: height = bar.get_height() plt.gca().text(bar.get_x() + bar.get_width()/2, height*.90, '{0:.{1}f}'.format(height, 2), ha='center', color='w', fontsize=11)# remove all the ticks (both axes), and tick labels on the Y axis plt.tick_params(top='off', bottom='off', left='off', right='off', labelleft='off', labelbottom='on')# remove the frame of the chart for spine in plt.gca().spines.values(): spine.set_visible(False) plt.xticks([0,1,2,3], ['Malignant\nTraining', 'Benign\nTraining', 'Malignant\nTest', 'Benign\nTest'], alpha=0.8) plt.title('Training and Test Accuracies for Malignant and Benign Cells', alpha=0.8) plt.figure()# Plot the scores as a bar chart # -
Cancer classifier example/cancer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/chandrayee31/Covid19_dashboard/blob/main/covid_19_dashboard.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="oCei5ulynbTC" # # COVID-19 Interactive Analysis Dashboard # + [markdown] id="lJHg54nErk9L" # # Covid 19 Internship project # + id="nuzfs6GwnbTK" # importing libraries from __future__ import print_function from ipywidgets import interact, interactive, fixed, interact_manual from IPython.core.display import display, HTML import numpy as np import pandas as pd import matplotlib.pyplot as plt import plotly.express as px import folium import plotly.graph_objects as go import seaborn as sns import ipywidgets as widgets # + id="Ya4kAGkCnbTM" # loading data right from the source: death_df = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv') confirmed_df = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv') recovered_df = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv') country_df = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/web-data/data/cases_country.csv') # + [markdown] id="FxXd5dzFnbTH" # ## What is COVID-19? # # > Coronaviruses are a large family of viruses that may cause respiratory illnesses in humans ranging from common colds to more severe conditions such as Severe Acute Respiratory Syndrome (SARS) and Middle Eastern Respiratory Syndrome (MERS).1 # 'Novel coronavirus' is a new, previously unidentified strain of coronavirus. The novel coronavirus involved in the current outbreak has been named SARS-CoV-2 by the World Health Organization (WHO). 3The disease it causes has been named “coronavirus disease 2019” (or “COVID-19”).` # # ![Coronavirus particle Image](https://www.apta.com/wp-content/uploads/home-banner-1.jpg) # + id="5In5_ujTnbTN" confirmed_df.head() # + id="UOXFr48qnbTN" recovered_df.head() # + id="mEDXfNu9nbTO" death_df.head() # + id="Y28UQI-5nbTP" country_df.head() # + id="YGDh8MPDnbTQ" # data cleaning # renaming the df column names to lowercase country_df.columns = map(str.lower, country_df.columns) confirmed_df.columns = map(str.lower, confirmed_df.columns) death_df.columns = map(str.lower, death_df.columns) recovered_df.columns = map(str.lower, recovered_df.columns) # changing province/state to state and country/region to country confirmed_df = confirmed_df.rename(columns={'province/state': 'state', 'country/region': 'country'}) recovered_df = confirmed_df.rename(columns={'province/state': 'state', 'country/region': 'country'}) death_df = death_df.rename(columns={'province/state': 'state', 'country/region': 'country'}) country_df = country_df.rename(columns={'country_region': 'country'}) # country_df.head() # + id="3lBX1HKdnbTR" # total number of confirmed, death and recovered cases confirmed_total = int(country_df['confirmed'].sum()) deaths_total = int(country_df['deaths'].sum()) recovered_total = int(country_df['recovered'].sum()) active_total = int(country_df['active'].sum()) # + id="l-o2oTJynbTS" outputId="e9be0af1-5655-457b-c650-2286dcf46bb6" # displaying the total stats display(HTML("<div style = 'background-color: #504e4e; padding: 30px '>" + "<span style='color: #fff; font-size:30px;'> Confirmed: " + str(confirmed_total) +"</span>" + "<span style='color: red; font-size:30px;margin-left:20px;'> Deaths: " + str(deaths_total) + "</span>"+ "<span style='color: lightgreen; font-size:30px; margin-left:20px;'> Recovered: " + str(recovered_total) + "</span>"+ "</div>") ) # + [markdown] id="F6wPr0bEnbTV" # # COVID-19 Confirmed/Death/Recovered cases by countries # # ## Enter number of countries you want the data for # + id="9bnijxVBnbTV" outputId="069d3455-1551-4503-995d-7103247dd159" colab={"base_uri": "https://localhost:8080/", "height": 476, "referenced_widgets": ["1508d36831c5419d9b8679734a2be1c2", "154a5e98be8d4da482ff3f845d756aa2", "bd8ac54654624d918c3320f95747a0ed", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "d4ad73f1ad5d4a7c9221539e75b61fcd", "b48d5474aaa74c539f3908ee379024e4"]} # sorting the values by confirmed descednding order # country_df.sort_values('confirmed', ascending= False).head(10).style.background_gradient(cmap='copper') fig = go.FigureWidget( layout=go.Layout() ) def highlight_col(x): r = 'background-color: red' y = 'background-color: purple' g = 'background-color: grey' df1 = pd.DataFrame('', index=x.index, columns=x.columns) df1.iloc[:, 4] = y df1.iloc[:, 5] = r df1.iloc[:, 6] = g return df1 def show_latest_cases(n): n = int(n) return country_df.sort_values('confirmed', ascending= False).head(n).style.apply(highlight_col, axis=None) interact(show_latest_cases, n='10') ipywLayout = widgets.Layout(border='solid 2px green') ipywLayout.display='none' # uncomment this, run cell again - then the graph/figure disappears widgets.VBox([fig], layout=ipywLayout) # + id="lfmko8FWnbTX" sorted_country_df = country_df.sort_values('confirmed', ascending= False) # + [markdown] id="-XpZ5CaxnbTY" # # Slide to check for the worst hit countries # + id="Xw0ZedKjnbTZ" outputId="6e0e3775-a242-44e8-b838-2768ae1ce833" colab={"base_uri": "https://localhost:8080/", "height": 574, "referenced_widgets": ["adac4004c640491fbc8ed921ad0875bf", "c2a540c0e67949ae895350ca5c180e14", "7a8685eab4164a76b8fa8d3a000b93ff", "6cbc5083bb644c70bba84ab124c350bc", "f2869d6f87e744228ff55d6568c449e9", "7ae02f629a8444bb8ef3b32a4f888cc4", "85e7b121ed9442f2a4e975674f4c98b9", "cceaf795eefa43c39fe2810f342b03e5", "<KEY>", "b48d5474aaa74c539f3908ee379024e4"]} # # plotting the 20 worst hit countries def bubble_chart(n): fig = px.scatter(sorted_country_df.head(n), x="country", y="confirmed", size="confirmed", color="country", hover_name="country", size_max=60) fig.update_layout( title=str(n) +" Worst hit countries", xaxis_title="Countries", yaxis_title="Confirmed Cases", width = 700 ) fig.show(); interact(bubble_chart, n=10) ipywLayout = widgets.Layout(border='solid 2px green') ipywLayout.display='none' widgets.VBox([fig], layout=ipywLayout) # + id="WOby_dlGnbTb" def plot_cases_of_a_country(country): labels = ['confirmed', 'deaths'] colors = ['blue', 'red'] mode_size = [6, 8] line_size = [4, 5] df_list = [confirmed_df, death_df] fig = go.Figure(); for i, df in enumerate(df_list): if country == 'World' or country == 'world': x_data = np.array(list(df.iloc[:, 20:].columns)) y_data = np.sum(np.asarray(df.iloc[:,4:]),axis = 0) else: x_data = np.array(list(df.iloc[:, 20:].columns)) y_data = np.sum(np.asarray(df[df['country'] == country].iloc[:,20:]),axis = 0) fig.add_trace(go.Scatter(x=x_data, y=y_data, mode='lines+markers', name=labels[i], line=dict(color=colors[i], width=line_size[i]), connectgaps=True, text = "Total " + str(labels[i]) +": "+ str(y_data[-1]) )); fig.update_layout( title="COVID 19 cases of " + country, xaxis_title='Date', yaxis_title='No. of Confirmed Cases', margin=dict(l=20, r=20, t=40, b=20), paper_bgcolor="lightgrey", width = 800, ); fig.update_yaxes(type="linear") fig.show(); # + [markdown] id="tEFUkkHsnbTc" # # Check the details of your country or the World # # * Enter the name of your country(in capitalized format(e.g. Italy)) and world for total cases # + id="bdGuCVronbTd" outputId="78dc911d-0dda-45fd-a0e3-53c8c091656b" colab={"base_uri": "https://localhost:8080/", "height": 574, "referenced_widgets": ["23674c9aa97940e3a4ac947d3f124697", "850d7bbba8594ebf8e59f594a42340ca", "da88b4bae4614862ba11d7e837972962", "b54d26c1056f4e3c94feba845b4dc487", "645d9f1faf934075874b41b6609945fb", "990d2edd71a34b02a2f64ed084dd171b", "d91921499b2d4f918a15528cfabde850", "616ea1b39eef439d9420ba8cf713d2b3", "d823fdc7e22d43059967e9c4a0172108", "<KEY>"]} interact(plot_cases_of_a_country, country='World') ipywLayout = widgets.Layout(border='solid 2px green') ipywLayout.display='none' # uncomment this, run cell again - then the graph/figure disappears widgets.VBox([fig], layout=ipywLayout) # + [markdown] id="Dc-k1h7UnbTd" # # 10 worst hit countries - Confirmed cases # + id="-QROETrlnbTe" outputId="4dce4faa-2939-495f-a6d9-7f93eba4a807" colab={"base_uri": "https://localhost:8080/", "height": 517} px.bar( sorted_country_df.head(10), x = "country", y = "confirmed", title= "Top 10 worst affected countries", # the axis names color_discrete_sequence=["red"], height=500, width=800 ) # + [markdown] id="7pZDi9xOnbTf" # # 10 worst hit countries - Death cases # + id="NWl6Llr0nbTf" outputId="77e1e65f-7b1e-4a11-d066-bc7a2e890114" colab={"base_uri": "https://localhost:8080/", "height": 517} px.bar( sorted_country_df.head(10), x = "country", y = "deaths", title= "Top 10 worst affected countries", # the axis names color_discrete_sequence=["purple"], height=500, width=800 ) # + [markdown] id="BkoT4YvYnbTg" # # Worst hit countries - Recovering cases # + id="GCkjuYRVnbTh" outputId="53b97065-cc20-4d0b-a485-65b42786738b" colab={"base_uri": "https://localhost:8080/", "height": 517} px.bar( sorted_country_df.head(10), x = "country", y = "recovered", title= "Top 10 worst affected countries", # the axis names color_discrete_sequence=["pink"], height=500, width=800 ) # + [markdown] id="TyO0AmdNnbTh" # # Global spread of COVID-19 # + id="oSZ20B-0o6nQ" outputId="888d9365-688a-4bf8-cb5b-0b5a4eeb80b5" colab={"base_uri": "https://localhost:8080/", "height": 461} confirmed_df.iloc[:, 2:4] filtered_df = confirmed_df[confirmed_df[['lat', 'long']].notnull().all(1)] filtered_df # + id="P11yGqijnbTi" outputId="c7c5e34a-cdb5-4089-9b5f-aba7af9d777b" colab={"base_uri": "https://localhost:8080/", "height": 712} world_map = folium.Map(location=[11,0], tiles="cartodbpositron", zoom_start=2, max_zoom = 6, min_zoom = 2) for i in range(0,len(filtered_df)): folium.Circle( location=[filtered_df.iloc[i]['lat'], filtered_df.iloc[i]['long']], fill=True, radius=(int((np.log(filtered_df.iloc[i,-1]+1.00001)))+0.2)*50000, color='red', fill_color='indigo', tooltip = "<div style='margin: 0; background-color: black; color: white;'>"+ "<h4 style='text-align:center;font-weight: bold'>"+filtered_df.iloc[i]['country'] + "</h4>" "<hr style='margin:10px;color: white;'>"+ "<ul style='color: white;;list-style-type:circle;align-item:left;padding-left:20px;padding-right:20px'>"+ "<li>Confirmed: "+str(filtered_df.iloc[i,-1])+"</li>"+ "<li>Deaths: "+str(death_df.iloc[i,-1])+"</li>"+ "<li>Death Rate: "+ str(np.round(death_df.iloc[i,-1]/(filtered_df.iloc[i,-1]+1.00001)*100,2))+ "</li>"+ "</ul></div>", ).add_to(world_map) world_map # + [markdown] id="gMscRgUwnbTj" # ## [Notebook covers:](https://github.com/datasciencewithharshit/voila-covid-19-dashboard) # # 1. What is COVID-19? # 2. Data loading from [John Hopkins CSSE data repository](https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_time_series) # 3. Data Cleaning and Preparation # 4. Visualising N number of worst hit countries using [plotly](https://plotly.com/) scatter plot. # 5. Plotting confirmed and death cases for the requested country. # 6. Plotting all cases on world map using [Folium](https://python-visualization.github.io/folium/) # # # ## Symptoms: # People may be sick with the virus for 1 to 14 days before developing symptoms. The most common symptoms of coronavirus disease (COVID-19) are fever, tiredness, and dry cough. Most people (about 80%) recover from the disease without needing special treatment. # * cough # * fever # * tiredness # * difficulty in breathing(severe cases) # # ## More Info on COVID-19: # * [https://www.who.int/health-topics/coronavirus](https://www.who.int/health-topics/coronavirus) # * [https://www.who.int/emergencies/diseases/novel-coronavirus-2019](https://www.who.int/emergencies/diseases/novel-coronavirus-2019) # * [https://www.nature.com/articles/s41597-020-0448-0](https://www.nature.com/articles/s41597-020-0448-0) # # ## Link to the analysis and other resources: # * [Link to GitHub repo: ](https://github.com/datasciencewithharshit/voila-covid-19-dashboard) # * [Link to Author's Youtube: ](https://www.youtube.com/channel/UCH-xwLTKQaABNs2QmGxK2bQ?view_as=subscriber) # # + id="Bb__W72QnbTk"
covid_19_dashboard.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # analyzing the effect of a constant force with an inverse powerlaw attractive force # <NAME><br> # 8.11.2021 # + from lib.my_initialization import * import random,scipy from lib.measure.compute_slope import * from lib.measure.compute_sliding_slope import * from lib.measure.powerlaw import * # %load_ext autoreload # %autoreload 2 from scipy import stats alpha=0.1 # - #TODO: move to lib.measure def spline_ysmooth(x,y,der=0,s=0): tck = scipy.interpolate.splrep(x, y, s=s) ynew = scipy.interpolate.splev(x, tck, der=der) return ynew # + # # For darkmode plots # from jupyterthemes import jtplot # jtplot.style(theme='monokai', context='notebook', ticks=True, grid=False) # - # # visualize the effect of turning on/off a basin of attraction # Qualitative Results from Run 14 # - the low values of D apparent in the full models seem to exhibit different dependences of m,M on a,r... # + # data_dir="/home/timothytyree/Documents/GitHub/bgmc/python/data/osg_output/run_12_all.csv" data_dir="/home/timothytyree/Documents/GitHub/bgmc/python/data/osg_output/run_14_all.csv" df=pd.read_csv(data_dir) # df.head() print(list(df.columns)) assert (not (df.CollRate<0).any()) # + #derived values # df['CollRate']=1./df['CollTime'] df['A']=df['L']**2 df['q']=df['N']/df['A'] #number of tips per square centimeter df['w']=df['CollRate']/df['A'] #[mHz?]/cm^2 # df=df[df.niter==250].copy() #extract column values r_values=np.array(sorted(set(df.r.values)))#cm D_values=np.array(sorted(set(df.D.values)))#cm^2/s L_values=np.array(sorted(set(df.L.values)))#cm A_values=L_values**2#cm^2 kappa_values=np.array(sorted(set(df.kappa.values)))#1/s varkappa_values=np.array(sorted(set(df.varkappa.values)))#1/s x0_values=np.array(sorted(set(df.x0.values)))#1/s set_second_values=np.array(sorted(set(df.set_second.values))) reflect_values=np.array(sorted(set(df.reflect.values))) no_repulsion_values=np.array(sorted(set(df.no_repulsion.values))) no_attraction_values=np.array(sorted(set(df.no_attraction.values))) # neighbor_values=np.array(sorted(set(df.neighbor.values))) # force_code_values=np.array(sorted(set(df.force_code.values))) #make test for whether there is one input parameter present in an input DataFrame print(f"fixed parameters:") print(f"D~{D_values}") print(f"L~{L_values}") print(f"kappa~{kappa_values}") print(f"x0~{x0_values}") print(f"set_second~{set_second_values}") print(f"reflect~{reflect_values}") print(f"no_repulsion~{no_repulsion_values}") print(f"no_attraction~{no_attraction_values}") print(f"neighbor~{neighbor_values}") print(f"force_code~{force_code_values}") print(f"\nvaried parameters:") # print(f"varkappa~{np.mean(varkappa_values):.3f}+-{2*np.std(varkappa_values):.3f}") print(f"varkappa~{varkappa_values}") print(f"r~{r_values}") # - # + #for FK model #query the control kappa=kappa_values[0] D=D_values[-1]#-1]# r=r_values[1] L=L_values[0] x0=x0_values[0] #cm set_second=0 no_repulsion=0 no_attraction=0 reflect=0 neighbor=0 force_code=2 varkappa=varkappa_values[0]#3]# #cm^2/s #query the DataFrame query =(df.set_second==set_second)&(df.reflect==reflect) query&=df.r==r query&=df.D==D query&=df.L==L query&=df.varkappa==varkappa query&=df.x0==x0 query&=(df.no_repulsion==no_repulsion)&(df.no_attraction==no_attraction) query&=(df.neighbor==neighbor)&(df.force_code==force_code) dg=df[query] dh=dg[dg.kappa==kappa] x_values=dh.q.values y_values=dh.w.values x_values_control=x_values.copy() y_values_control=y_values.copy() print((varkappa,x_values.shape)) varkappa=varkappa_values[3]#-3]# #cm^2/s #query the DataFrame query =(df.set_second==set_second)&(df.reflect==reflect) query&=df.r==r query&=df.D==D query&=df.L==L query&=df.varkappa==varkappa query&=df.x0==x0 query&=(df.no_repulsion==no_repulsion)&(df.no_attraction==no_attraction) query&=(df.neighbor==neighbor)&(df.force_code==force_code) dg=df[query] # kappa_values=np.array(sorted(set(dg.kappa.values))) # kappa=kappa_values[1] dh=dg[dg.kappa==kappa] x_values=dh.q.values y_values=dh.w.values x_values_test=x_values.copy() y_values_test=y_values.copy() print((varkappa,x_values.shape)) print((r,D,L,kappa,varkappa,x0)) # + smoothing=10. #plot fits for full model m_fk=1.945;#+-0.030; B_fk=2.441+-0.051 m_lr=1.544;#+-0.034; B_lr=5.870+-0.137 M_fk=5.67;#+-0.39 Hz*cm^{2(m-1)} M_lr=15.37;#+-1.57 Hz*cm^{2(m-1)} # RMSE_fk=0.1252 Hz/cm^2 # RMSE_lr=0.0974 Hz/cm^2 # R^2=0.997 (FK) # R^2=0.994 (LR) # yscale=10**3 xv=np.arange(0.1,1.,.05) yv_fk=M_fk*(xv)**m_fk yv_lr=M_lr*(xv)**m_lr fontsize=16 # plt.xlim([0.1,1]) # plt.ylim([1e-1,15]) plt.yscale('log') plt.xscale('log') plt.plot(xv,yv_fk,label='FK power law fit',zorder=3,lw=4) plt.plot(xv,yv_lr,label='LR power law fit',zorder=3,lw=4,color='C1') fontsize=18 plt.plot(x_values_control,spline_ysmooth(x_values_control,y_values_control,s=smoothing),'-',c='k',alpha=.7,lw=3,label=f'without attraction')#, a={0:.0f} cm'+r'$^2$/s') plt.plot(x_values_test,spline_ysmooth(x_values_test,y_values_test,s=smoothing),'-',c='g',alpha=.7,lw=3,label=f'with attraction')#, a={varkappa:.0f} cm'+r'$^2$/s',lw=3) # plt.plot(x_values_control,y_values_control,'-',c='k',alpha=.7,lw=3,label=f'without attraction')#, a={0:.0f} cm'+r'$^2$/s') # plt.plot(x_values_test,y_values_test,'-',c='g',alpha=.7,lw=3,label=f'with attraction')#, a={varkappa:.0f} cm'+r'$^2$/s',lw=3) # plt.plot(x_values,y_values,c='C2',alpha=.7,label='simulation',lw=6) # plt.plot(x_values_force,y_values_force,c='C3',alpha=.7,label='simulation with forces',lw=6) # plt.plot(x_values,y_values,c=c_values,alpha=0.4,cmap='bwr') # plt.title(u'comparison to simulation\nwith two hybrid modes',fontsize=fontsize) plt.xlabel(r'q (cm$^{-2}$)',fontsize=fontsize) plt.ylabel(r'w (Hz cm$^{-2}$)', fontsize=fontsize) plt.tick_params(axis='both', which='major', labelsize=fontsize) plt.tick_params(axis='both', which='minor', labelsize=0) plt.legend(fontsize=fontsize-5) # plt.xlim([0.08,1]) plt.ylim([0.05,40]) # print(f'varkappa={varkappa} Hz');print(f' x0={x0} cm') # plt.title(r'$\varkappa=$'+f'{varkappa} Hz, '+r'$x_0=$'+f'{x0} cm\nforce_code={force_code}, neighbors={neighbor}\n',fontsize=fontsize) plt.title(r'$a=$'+f'{varkappa}, '+r'$\kappa=$'+f'{kappa:.0f}\nforce_code={force_code}, neighbors={neighbor}\nr={r},D={D},L={L}\n',fontsize=fontsize) plt.show() # + #for FK model #query the control kappa=kappa_values[0] D=D_values[-1]#-1]# r=r_values[2] L=L_values[0] x0=x0_values[0] #cm set_second=0 no_repulsion=0 no_attraction=0 reflect=0 neighbor=0 force_code=2 varkappa=varkappa_values[0] #cm^2/s #query the DataFrame query =(df.set_second==set_second)&(df.reflect==reflect) query&=df.r==r query&=df.D==D query&=df.L==L query&=df.varkappa==varkappa query&=df.x0==x0 query&=(df.no_repulsion==no_repulsion)&(df.no_attraction==no_attraction) query&=(df.neighbor==neighbor)&(df.force_code==force_code) dg=df[query] dh=dg[dg.kappa==kappa] x_values=dh.q.values y_values=dh.w.values x_values_control=x_values.copy() y_values_control=y_values.copy() print((varkappa,x_values.shape)) varkappa=varkappa_values[3] #cm^2/s #query the DataFrame query =(df.set_second==set_second)&(df.reflect==reflect) query&=df.r==r query&=df.D==D query&=df.L==L query&=df.varkappa==varkappa query&=df.x0==x0 query&=(df.no_repulsion==no_repulsion)&(df.no_attraction==no_attraction) query&=(df.neighbor==neighbor)&(df.force_code==force_code) dg=df[query] # kappa_values=np.array(sorted(set(dg.kappa.values))) # kappa=kappa_values[1] dh=dg[dg.kappa==kappa] x_values=dh.q.values y_values=dh.w.values x_values_test=x_values.copy() y_values_test=y_values.copy() print((varkappa,x_values.shape)) print((r,D,L,kappa,varkappa,x0)) # + smoothing=10. #plot fits for full model m_fk=1.945;#+-0.030; B_fk=2.441+-0.051 m_lr=1.544;#+-0.034; B_lr=5.870+-0.137 M_fk=5.67;#+-0.39 Hz*cm^{2(m-1)} M_lr=15.37;#+-1.57 Hz*cm^{2(m-1)} # RMSE_fk=0.1252 Hz/cm^2 # RMSE_lr=0.0974 Hz/cm^2 # R^2=0.997 (FK) # R^2=0.994 (LR) # yscale=10**3 xv=np.arange(0.1,1.,.05) yv_fk=M_fk*(xv)**m_fk yv_lr=M_lr*(xv)**m_lr fontsize=16 # plt.xlim([0.1,1]) # plt.ylim([1e-1,15]) plt.yscale('log') plt.xscale('log') plt.plot(xv,yv_fk,label='FK power law fit',zorder=3,lw=4) plt.plot(xv,yv_lr,label='LR power law fit',zorder=3,lw=4,color='C1') fontsize=18 plt.plot(x_values_control,spline_ysmooth(x_values_control,y_values_control,s=smoothing),'-',c='k',alpha=.7,lw=3,label=f'without attraction')#, a={0:.0f} cm'+r'$^2$/s') plt.plot(x_values_test,spline_ysmooth(x_values_test,y_values_test,s=smoothing),'-',c='g',alpha=.7,lw=3,label=f'with attraction')#, a={varkappa:.0f} cm'+r'$^2$/s',lw=3) # plt.plot(x_values_control,y_values_control,'-',c='k',alpha=.7,lw=3,label=f'without attraction')#, a={0:.0f} cm'+r'$^2$/s') # plt.plot(x_values_test,y_values_test,'-',c='g',alpha=.7,lw=3,label=f'with attraction')#, a={varkappa:.0f} cm'+r'$^2$/s',lw=3) # plt.plot(x_values,y_values,c='C2',alpha=.7,label='simulation',lw=6) # plt.plot(x_values_force,y_values_force,c='C3',alpha=.7,label='simulation with forces',lw=6) # plt.plot(x_values,y_values,c=c_values,alpha=0.4,cmap='bwr') # plt.title(u'comparison to simulation\nwith two hybrid modes',fontsize=fontsize) plt.xlabel(r'q (cm$^{-2}$)',fontsize=fontsize) plt.ylabel(r'w (Hz cm$^{-2}$)', fontsize=fontsize) plt.tick_params(axis='both', which='major', labelsize=fontsize) plt.tick_params(axis='both', which='minor', labelsize=0) plt.legend(fontsize=fontsize-5) # plt.xlim([0.08,1]) # print(f'varkappa={varkappa} Hz');print(f' x0={x0} cm') # plt.title(r'$\varkappa=$'+f'{varkappa} Hz, '+r'$x_0=$'+f'{x0} cm\nforce_code={force_code}, neighbors={neighbor}\n',fontsize=fontsize) plt.title(r'$a=$'+f'{varkappa}, '+r'$\kappa=$'+f'{kappa:.0f}\nforce_code={force_code}, neighbors={neighbor}\nr={r},D={D},L={L}\n',fontsize=fontsize) plt.show() # + [markdown] heading_collapsed=true # ## (skip) plot token trials for long range 1/r forces # + hidden=true #token long-ranged forces # force_code_values=np.array([2,3]) # no_attraction_values=np.array([0]) # neighbor_values=np.array([0,1]) # set_second_values=np.array([0]) # varkappa_values=np.array([0.1,1.,5.,10.,20.,50.])#1/s # x0_values=np.array([0.])#,1.0,5.0])#cm #x0 does nothing for QED2,3 kappa=1500;no_attraction=0;force_code=2;neighbor=0;varkappa=50;x0=0. #query the DataFrame query =(df.set_second==set_second)&(df.reflect==reflect) query&=df.r==r query&=df.D==D query&=df.L==L query&=df.kappa==kappa query&=df.varkappa==varkappa query&=df.x0==x0 query&=(df.no_repulsion==no_repulsion)&(df.no_attraction==no_attraction) query&=(df.neighbor==neighbor)&(df.force_code==force_code) dg=df[query] x_values=dg.q.values y_values=dg.w.values x_values_force_1500=x_values.copy() y_values_force_1500=y_values.copy() kappa=500; #query the DataFrame query =(df.set_second==set_second)&(df.reflect==reflect) query&=df.r==r query&=df.D==D query&=df.L==L query&=df.kappa==kappa query&=df.varkappa==varkappa query&=df.x0==x0 query&=(df.no_repulsion==no_repulsion)&(df.no_attraction==no_attraction) query&=(df.neighbor==neighbor)&(df.force_code==force_code) dg=df[query] x_values=dg.q.values y_values=dg.w.values x_values_force_500=x_values.copy() y_values_force_500=y_values.copy() # + hidden=true #plot fits for full model m_fk=1.945;#+-0.030; B_fk=2.441+-0.051 m_lr=1.544;#+-0.034; B_lr=5.870+-0.137 M_fk=5.67;#+-0.39 Hz*cm^{2(m-1)} M_lr=15.37;#+-1.57 Hz*cm^{2(m-1)} # RMSE_fk=0.1252 Hz/cm^2 # RMSE_lr=0.0974 Hz/cm^2 # R^2=0.997 (FK) # R^2=0.994 (LR) # yscale=10**3 xv=np.arange(0.1,1.,.05) yv_fk=M_fk*(xv)**m_fk yv_lr=M_lr*(xv)**m_lr fontsize=16 # plt.xlim([0.1,1]) # plt.ylim([1e-1,15]) plt.yscale('log') plt.xscale('log') # plt.plot(xv,yv_fk,label='FK power law fit',zorder=3,lw=4) plt.plot(xv,yv_lr,label='LR power law fit',zorder=3,lw=4,color='C1') fontsize=18 # plt.plot(x_values_control_500,y_values_control_500,'-',c='k',alpha=.7,label=r'no forces, $\kappa=500$ Hz',lw=3) plt.plot(x_values_control_1500,y_values_control_1500,'-',c='k',alpha=.7,label=r'no forces, $\kappa=1500$ Hz',lw=3) # plt.plot(x_values_force_500,y_values_force_500,'-',c='g',alpha=.7,label=r'1/$r$ forces, $\kappa=500$ Hz',lw=3) plt.plot(x_values_force_1500,y_values_force_1500,'-',c='g',alpha=.7,label=r'1/$r$ forces, $\kappa=1500$ Hz',lw=3) # plt.plot(x_values,y_values,c='C2',alpha=.7,label='simulation',lw=6) # plt.plot(x_values_force,y_values_force,c='C3',alpha=.7,label='simulation with forces',lw=6) # plt.plot(x_values,y_values,c=c_values,alpha=0.4,cmap='bwr') # plt.title(u'comparison to simulation\nwith two hybrid modes',fontsize=fontsize) plt.xlabel(r'q (cm$^{-2}$)',fontsize=fontsize) plt.ylabel(r'w (Hz cm$^{-2}$)', fontsize=fontsize) plt.tick_params(axis='both', which='major', labelsize=fontsize) plt.tick_params(axis='both', which='minor', labelsize=0) plt.legend(fontsize=fontsize-5) # plt.xlim([0.08,1]) # print(f'varkappa={varkappa} Hz');print(f' x0={x0} cm') plt.title(r'$\varkappa=$'+f'{varkappa} Hz cm, '+f'\nforce_code={force_code}, neighbors={neighbor}\n',fontsize=fontsize) plt.show() # + hidden=true #plot fits for full model m_fk=1.945;#+-0.030; B_fk=2.441+-0.051 m_lr=1.544;#+-0.034; B_lr=5.870+-0.137 M_fk=5.67;#+-0.39 Hz*cm^{2(m-1)} M_lr=15.37;#+-1.57 Hz*cm^{2(m-1)} # RMSE_fk=0.1252 Hz/cm^2 # RMSE_lr=0.0974 Hz/cm^2 # R^2=0.997 (FK) # R^2=0.994 (LR) # yscale=10**3 xv=np.arange(0.1,1.,.05) yv_fk=M_fk*(xv)**m_fk yv_lr=M_lr*(xv)**m_lr fontsize=16 # plt.xlim([0.1,1]) # plt.ylim([1e-1,15]) plt.yscale('log') plt.xscale('log') plt.plot(xv,yv_fk,label='FK power law fit',zorder=3,lw=4) plt.plot(xv,yv_lr,label='LR power law fit',zorder=3,lw=4,color='C1') fontsize=18 plt.plot(x_values_control_500,y_values_control_500,'-',c='k',alpha=.7,label=r'no forces, $\kappa=500$ Hz',lw=3) # plt.plot(x_values_control_1500,y_values_control_1500,'-',c='k',alpha=.7,label=r'no forces, $\kappa=1500$ Hz',lw=3) plt.plot(x_values_force_500,y_values_force_500,'-',c='g',alpha=.7,label=r'1/$r$ forces, $\kappa=500$ Hz',lw=3) # plt.plot(x_values_force_1500,y_values_force_1500,'-',c='g',alpha=.7,label=r'1/$r$ forces, $\kappa=1500$ Hz',lw=3) # plt.plot(x_values,y_values,c='C2',alpha=.7,label='simulation',lw=6) # plt.plot(x_values_force,y_values_force,c='C3',alpha=.7,label='simulation with forces',lw=6) # plt.plot(x_values,y_values,c=c_values,alpha=0.4,cmap='bwr') # plt.title(u'comparison to simulation\nwith two hybrid modes',fontsize=fontsize) plt.xlabel(r'q (cm$^{-2}$)',fontsize=fontsize) plt.ylabel(r'w (Hz cm$^{-2}$)', fontsize=fontsize) plt.tick_params(axis='both', which='major', labelsize=fontsize) plt.tick_params(axis='both', which='minor', labelsize=0) plt.legend(fontsize=fontsize-5) # plt.xlim([0.08,1]) # print(f'varkappa={varkappa} Hz');print(f' x0={x0} cm') plt.title(r'$\varkappa=$'+f'{varkappa} Hz cm, '+f'\nforce_code={force_code}, neighbors={neighbor}\n',fontsize=fontsize) plt.show() # + hidden=true # + [markdown] heading_collapsed=true # ## (skip) plot token trials for long range 1/r^2 forces # + hidden=true #token long-ranged forces # force_code_values=np.array([2,3]) # no_attraction_values=np.array([0]) # neighbor_values=np.array([0,1]) # set_second_values=np.array([0]) # varkappa_values=np.array([0.1,1.,5.,10.,20.,50.])#1/s # x0_values=np.array([0.])#,1.0,5.0])#cm #x0 does nothing for QED2,3 kappa=1500;no_attraction=0;force_code=3;neighbor=0;varkappa=50;x0=0. #query the DataFrame query =(df.set_second==set_second)&(df.reflect==reflect) query&=df.r==r query&=df.D==D query&=df.L==L query&=df.kappa==kappa query&=df.varkappa==varkappa query&=df.x0==x0 query&=(df.no_repulsion==no_repulsion)&(df.no_attraction==no_attraction) query&=(df.neighbor==neighbor)&(df.force_code==force_code) dg=df[query] x_values=dg.q.values y_values=dg.w.values x_values_force_1500=x_values.copy() y_values_force_1500=y_values.copy() kappa=500; #query the DataFrame query =(df.set_second==set_second)&(df.reflect==reflect) query&=df.r==r query&=df.D==D query&=df.L==L query&=df.kappa==kappa query&=df.varkappa==varkappa query&=df.x0==x0 query&=(df.no_repulsion==no_repulsion)&(df.no_attraction==no_attraction) query&=(df.neighbor==neighbor)&(df.force_code==force_code) dg=df[query] x_values=dg.q.values y_values=dg.w.values x_values_force_500=x_values.copy() y_values_force_500=y_values.copy() # + hidden=true #plot fits for full model m_fk=1.945;#+-0.030; B_fk=2.441+-0.051 m_lr=1.544;#+-0.034; B_lr=5.870+-0.137 M_fk=5.67;#+-0.39 Hz*cm^{2(m-1)} M_lr=15.37;#+-1.57 Hz*cm^{2(m-1)} # RMSE_fk=0.1252 Hz/cm^2 # RMSE_lr=0.0974 Hz/cm^2 # R^2=0.997 (FK) # R^2=0.994 (LR) # yscale=10**3 xv=np.arange(0.1,1.,.05) yv_fk=M_fk*(xv)**m_fk yv_lr=M_lr*(xv)**m_lr fontsize=16 # plt.xlim([0.1,1]) # plt.ylim([1e-1,15]) plt.yscale('log') plt.xscale('log') plt.plot(xv,yv_fk,label='FK power law fit',zorder=3,lw=4) plt.plot(xv,yv_lr,label='LR power law fit',zorder=3,lw=4,color='C1') fontsize=18 # plt.plot(x_values_control_500,y_values_control_500,'-',c='k',alpha=.7,label=r'no forces, $\kappa=500$ Hz',lw=3) plt.plot(x_values_control_1500,y_values_control_1500,'-',c='k',alpha=.7,label=r'no forces, $\kappa=1500$ Hz',lw=3) # plt.plot(x_values_force_500,y_values_force_500,'-',c='g',alpha=.7,label=r'1/$r^2$ forces, $\kappa=500$ Hz',lw=3) plt.plot(x_values_force_1500,y_values_force_1500,'-',c='g',alpha=.7,label=r'1/$r^2$ forces, $\kappa=1500$ Hz',lw=3) # plt.plot(x_values,y_values,c='C2',alpha=.7,label='simulation',lw=6) # plt.plot(x_values_force,y_values_force,c='C3',alpha=.7,label='simulation with forces',lw=6) # plt.plot(x_values,y_values,c=c_values,alpha=0.4,cmap='bwr') # plt.title(u'comparison to simulation\nwith two hybrid modes',fontsize=fontsize) plt.xlabel(r'q (cm$^{-2}$)',fontsize=fontsize) plt.ylabel(r'w (Hz cm$^{-2}$)', fontsize=fontsize) plt.tick_params(axis='both', which='major', labelsize=fontsize) plt.tick_params(axis='both', which='minor', labelsize=0) plt.legend(fontsize=fontsize-5) # plt.xlim([0.08,1]) # print(f'varkappa={varkappa} Hz');print(f' x0={x0} cm') plt.title(r'$\varkappa=$'+f'{varkappa} Hz cm^2, '+f'\nforce_code={force_code}, neighbors={neighbor}\n',fontsize=fontsize) plt.show() # + hidden=true #plot fits for full model m_fk=1.945;#+-0.030; B_fk=2.441+-0.051 m_lr=1.544;#+-0.034; B_lr=5.870+-0.137 M_fk=5.67;#+-0.39 Hz*cm^{2(m-1)} M_lr=15.37;#+-1.57 Hz*cm^{2(m-1)} # RMSE_fk=0.1252 Hz/cm^2 # RMSE_lr=0.0974 Hz/cm^2 # R^2=0.997 (FK) # R^2=0.994 (LR) # yscale=10**3 xv=np.arange(0.1,1.,.05) yv_fk=M_fk*(xv)**m_fk yv_lr=M_lr*(xv)**m_lr fontsize=16 # plt.xlim([0.1,1]) # plt.ylim([1e-1,15]) plt.yscale('log') plt.xscale('log') plt.plot(xv,yv_fk,label='FK power law fit',zorder=3,lw=4) plt.plot(xv,yv_lr,label='LR power law fit',zorder=3,lw=4,color='C1') fontsize=18 plt.plot(x_values_control_500,y_values_control_500,'-',c='k',alpha=.7,label=r'no forces, $\kappa=500$ Hz',lw=3) # plt.plot(x_values_control_1500,y_values_control_1500,'-',c='k',alpha=.7,label=r'no forces, $\kappa=1500$ Hz',lw=3) plt.plot(x_values_force_500,y_values_force_500,'-',c='g',alpha=.7,label=r'1/$r^2$ forces, $\kappa=500$ Hz',lw=3) # plt.plot(x_values_force_1500,y_values_force_1500,'-',c='g',alpha=.7,label=r'1/$r^2$ forces, $\kappa=1500$ Hz',lw=3) # plt.plot(x_values,y_values,c='C2',alpha=.7,label='simulation',lw=6) # plt.plot(x_values_force,y_values_force,c='C3',alpha=.7,label='simulation with forces',lw=6) # plt.plot(x_values,y_values,c=c_values,alpha=0.4,cmap='bwr') # plt.title(u'comparison to simulation\nwith two hybrid modes',fontsize=fontsize) plt.xlabel(r'q (cm$^{-2}$)',fontsize=fontsize) plt.ylabel(r'w (Hz cm$^{-2}$)', fontsize=fontsize) plt.tick_params(axis='both', which='major', labelsize=fontsize) plt.tick_params(axis='both', which='minor', labelsize=0) plt.legend(fontsize=fontsize-5) # plt.xlim([0.08,1]) # print(f'varkappa={varkappa} Hz');print(f' x0={x0} cm') plt.title(r'$\varkappa=$'+f'{varkappa} Hz cm^2, '+f'\nforce_code={force_code}, neighbors={neighbor}\n',fontsize=fontsize) plt.show() # + hidden=true # + hidden=true #slice relevant particle density to the particle densities observed from the full model L=L_values[0] # x_values=x_values_force_500#dg[dg.L==L].q.values # y_values=y_values_force_500#dg[dg.L==L].w.values x_values=x_values_spring_1500#dg[dg.L==L].q.values y_values=y_values_spring_1500#dg[dg.L==L].w.values qmin=0.06;qmax=0.6 boo=(x_values>qmin)&(x_values<qmax) dict_ci=compute_95CI_ols(np.log(x_values[boo]),np.log(y_values[boo])) print(*dict_ci) print(*dict_ci.values()) print(f"Therefore, 95% confidence interval DOES include exponent values observed from the Luo-Rudy model, but for this trial, only low densities.") # + hidden=true #compute sliding m xavg_values,slope_values,Rsquared_values = compute_sliding_slope_loglog(x_values,y_values, x_min=qmin, window_width=0.2, stepsize=0.01, ) xavg_values.shape # + hidden=true # + hidden=true run_control={"marked": true} fontsize=18 x=xavg_values[:-1] # y=Rsquared_values[:-1] y=slope_values[:-1] plt.plot(x,y,lw=2) # plt.scatter(x,y,s=10) # plt.xlabel('N',fontsize=fontsize) # plt.ylabel('Collision Rate',fontsize=fontsize) # plt.xscale('log') # plt.title(u'comparison to simulation\nwith two hybrid modes',fontsize=fontsize) plt.xlabel(r'q (cm$^{-2}$)',fontsize=fontsize) plt.ylabel(r'exponent', fontsize=fontsize) plt.tick_params(axis='both', which='major', labelsize=fontsize) plt.tick_params(axis='both', which='minor', labelsize=0) # print(f'varkappa={varkappa} Hz');print(f' x0={x0} cm') plt.title(r'$\varkappa=$'+f'{varkappa} Hz cm^2, '+f'\nforce_code={force_code}, neighbors={neighbor}\n',fontsize=fontsize) plt.show() # + [markdown] heading_collapsed=true # # (skip) print all powerlaw fits to text file # + hidden=true def slice_df_and_print_powerlaw_fit(r,D,L,kappa,varkappa,x0,q_min=0.06,q_max=.2,no_attraction=0,no_repulsion=1,force_code=2): '''map from trial to power law fit in interval q_min to q_max''' #query the DataFrame query =(df.set_second==set_second_values[0])&(df.reflect==reflect_values[0]) query&=df.r==r#r_values[0] query&=df.D==D#D_values[0] query&=df.L==L#L_values[0] query&=df.kappa==kappa query&=df.varkappa==varkappa#_values#[-1]#5] query&=df.x0==x0#_values[2] query&=(df.no_repulsion==no_repulsion)&(df.no_attraction==no_attraction) dff=df[query] dg=dff.copy()#[dff.kappa==kappa]#kappa_values[0]]#smaller reaction rate x_values=dg.q.values y_values=dg.w.values # q_min=0.1;q_max=.4 # boo=(x_values>q_min)&(x_values<q_max) # x=np.log(x_values[boo]) # y=np.log(y_values[boo]) # dict_out=compute_95CI_ols(x,y) # dict_out boo=(x_values>q_min)&(x_values<q_max) x=x_values[boo] y=y_values[boo] # print(x) print_fit_power_law(x,y) # + hidden=true varkappa=varkappa_values[-1] x0=x0_values[2] kappa=kappa_values[0] r=r_values[0];D=D_values[0];L=L_values[0]; slice_df_and_print_powerlaw_fit(r,D,L,kappa,varkappa,x0) # + hidden=true # r=r_values[0];D=D_values[0]; # varkappa=varkappa_values[0] # x0=x0_values[0] for varkappa in varkappa_values:#[::2]: print(f"for r={r:.3f} cm and D={D} cm^2/s fixed,") print(f"the powerlaw fit for varkappa={varkappa} cm and x0={x0} was") try: slice_df_and_print_powerlaw_fit(r,D,L,kappa,varkappa,x0) except AssertionError as e: print(f"Error: empty slice!") print(f'') # + hidden=true # + [markdown] hidden=true # __Result__ # - attractive spring at with x0=0 gave $m \approx 2$... Several values are superquadratic. # + hidden=true import sys text_fn=data_dir.replace('.csv','_powerfits.txt') original_stdout = sys.stdout # Save a reference to the original standard output count=0 with open(text_fn, 'w') as f: sys.stdout = f # Change the standard output to the file we created. # r=r_values[1];D=D_values[3]; for r in r_values: for D in D_values: for L in L_values: for kappa in kappa_values: for varkappa in varkappa_values: for x0 in x0_values: for no_repulsion in no_repulsion_values: for no_attraction in no_attraction_values: print(f"for r={r:.3f} cm, D={D:.3f} cm^2/s, kappa={kappa:.0f} 1/s,no_repulsion={no_repulsion==1}, and no_attraction={no_attraction==1} fixed") print(f"the powerlaw fit for varkappa={varkappa:.1f} cm and x0={x0:.1f} was") try: slice_df_and_print_powerlaw_fit(r,D,L,kappa,varkappa,x0) except AssertionError as e: print(f"Error: empty slice!") print(f'') count+=1 sys.stdout = original_stdout # Reset the standard output to its original value print(f'output redirected to {text_fn}.') print(f"number of trials considered = {count}") # + [markdown] hidden=true # __Result__ # - no trials exhibited exponent smaller than m<2.00000. # - the robust exponents are still too large to support the LR model, which has m=1.544... # + [markdown] heading_collapsed=true # # varying varkappa # + hidden=true # x0=x0_values[-1];print(f"x0={x0}") # no_repulsion=0;no_attraction=0;reflect=0;neighbors=0;set_second=0 # r=r_values[1];D=D_values[-1];L=L_values[0];kappa=kappa_values[0] #for FK model #query the control kappa_values=np.array(sorted(set(df.kappa.values)))#1/s # kappa=kappa_values[0] # D=D_values[0]#-1]# # r=r_values[0] # L=L_values[0] # x0=np.min(x0_values[0] ) #cm # set_second=0 # no_repulsion=1 # no_attraction=0 # reflect=0 # neighbor=0 # force_code=2 #plot fits for full model m_fk=1.945;#+-0.030; B_fk=2.441+-0.051 m_lr=1.544;#+-0.034; B_lr=5.870+-0.137 M_fk=5.67;#+-0.39 Hz*cm^{2(m-1)} M_lr=15.37;#+-1.57 Hz*cm^{2(m-1)} # RMSE_fk=0.1252 Hz/cm^2 # RMSE_lr=0.0974 Hz/cm^2 # R^2=0.997 (FK) # R^2=0.994 (LR) # yscale=10**3 xv=np.arange(0.1,1.,.05) yv_fk=M_fk*(xv)**m_fk yv_lr=M_lr*(xv)**m_lr fontsize=16 # plt.xlim([0.1,1]) # plt.ylim([1e-1,15])act plt.yscale('log') plt.xscale('log') plt.plot(xv,yv_fk,label='FK power law fit',zorder=3,lw=4) plt.plot(xv,yv_lr,label='LR power law fit',zorder=3,lw=4) #plot data for varkappa in varkappa_values[[0,1,2,3,7,-1]]:#[3:]:#[:4]:#[::2]: #query the DataFrame query =(df.set_second==set_second)&(df.reflect==reflect) query&=df.r==r query&=df.D==D query&=df.L==L query&=df.varkappa==varkappa query&=df.x0==x0 query&=(df.no_repulsion==no_repulsion)&(df.no_attraction==no_attraction) query&=(df.neighbor==neighbor)&(df.force_code==force_code) dg=df[query] dh=dg[dg.kappa==kappa] x_values=dh.q.values y_values=dh.w.values print(r'kappa='+f'{kappa:.1f},varkappa={varkappa:.1f}') plt.scatter(x_values,y_values,label=r"$a =$ "+f"{varkappa:.1f} cm^2/s",alpha=0.8,s=5)#,cmap='bwr') # plt.ylim([1e-5,1e4]) plt.xlabel(r'q (cm$^{-2}$)',fontsize=fontsize) plt.ylabel(r'w (Hz cm$^{-2}$)', fontsize=fontsize) plt.tick_params(axis='both', which='major', labelsize=fontsize) plt.tick_params(axis='both', which='minor', labelsize=0) plt.legend(fontsize=fontsize-6,ncol=2,loc='lower right') # plt.title(r'$\varkappa=$'+f'{varkappa} Hz, '+r'$x_0=$'+f'{x0} cm\n',fontsize=fontsize) # plt.title(r'$x_0=$'+f'{x0} cm\n',fontsize=fontsize) # plt.title(r'$\kappa=$'+f'{kappa:.1f}\nforce_code={force_code}, neighbors={neighbor}\nr={r},D={D},L={L}\n',fontsize=fontsize) plt.title(r'$\kappa=$'+f'{kappa}\nforce_code={force_code}, neighbors={neighbor}\nr={r},D={D},L={L}\n',fontsize=fontsize) plt.show() # + hidden=true q_min=0.3;q_max=0.5#2 #TODO: plot m+-Delta_m versus energy gap for LR and FK model #plot data # kappa=kappa_values[0] # r=1.;D=5.;Dratio=1000;L=L_values[-1] eg_lst=[];m_lst=[];Dm_lst=[];M_lst=[];DM_lst=[];Rsq_lst=[] for varkappa in varkappa_values: try: #query the DataFrame query =(df.set_second==set_second)&(df.reflect==reflect) query&=df.r==r query&=df.D==D query&=df.L==L query&=df.varkappa==varkappa query&=df.x0==x0 query&=(df.no_repulsion==no_repulsion)&(df.no_attraction==no_attraction) query&=(df.neighbor==neighbor)&(df.force_code==force_code) dg=df[query] # kappa_values=np.array(sorted(set(dg.kappa.values))) # kappa=kappa_values[0] dh=dg[dg.kappa==kappa] x_values=dh.q.values y_values=dh.w.values boo=(x_values>q_min)&(x_values<q_max) x=x_values[boo] y=y_values[boo] # print_fit_power_law(x,y) B,Delta_B,m,Delta_m,Rsq=fit_power_law(x,y) rmse=compute_power_rmse(x,y,m,B) M, Delta_M= comp_power_scale(B,Delta_B,m,Delta_m) # print(f"m={m:.3f}+-{Delta_m:.3f}; B={B:.3f}+-{Delta_B:.3f}") # print(f"M=B**m={M:.2f}+-{Delta_M:.2f} Hz*cm^{{2(m-1)}}") # print(f"RMSE={rmse:.4f} Hz/cm^2") # print(f"R^2={Rsq:.3f}") eg_lst.append(varkappa) m_lst.append(m) Dm_lst.append(Delta_m) M_lst.append(M) DM_lst.append(Delta_M) Rsq_lst.append(Rsq) except AssertionError as e: pass print(Rsq_lst) # + hidden=true m_fk=1.945#1.858;#+-0.027; B_fk=2.464+-0.030<br> m_lr=1.544#1.638;#+-0.017; B_fk=5.588+-0.067 plt.errorbar(eg_lst, m_lst, yerr=Dm_lst, fmt='-o',c='k',label='Monte Carlo') plt.plot(eg_lst, m_fk+0.*np.array(eg_lst), '-',c='C0',label='Fenton-Karma') plt.plot(eg_lst, m_lr+0.*np.array(eg_lst), '-',c='C1',label='Luo-Rudy') # plt.scatter(Dtdt_lst,m_lst,marker='^') # plt.xscale('log') title=f"r={r:.1f} cm, "+r"$\kappa$="+f"{kappa:.0f} Hz\nD={D} cm"+r"$^2$/s"+f", A={L**2:.0f} cm"+r"$^2$, "+f"considering {q_min}<q<{q_max}\n" # title=f"r={r:.1f} cm, "+r"$\kappa$="+f"variable\nD={D} cm"+r"$^2$/s"+f", A={L**2:.0f} cm"+r"$^2$" plt.title(title,fontsize=fontsize) plt.xlabel(r'a (cm$^2$/s)',fontsize=fontsize) plt.ylabel('m exponent',fontsize=fontsize) # plt.xscale('log') # plt.yscale('log') plt.tick_params(axis='both', which='major', labelsize=fontsize) plt.tick_params(axis='both', which='minor', labelsize=0) # plt.legend(fontsize=fontsize-8)#,ncol=2) # plt.xlim([10,50]) # plt.ylim([0.9,2.3]) # plt.grid('on') plt.legend(fontsize=fontsize-2) plt.show() # print("considering 20<N<50 spiral tips") # print("considering N<20 spiral tips") # print(f"considering {q_min}<q<{q_max}") # + hidden=true M_fk=5.67;#+-0.39 Hz*cm^{2(m-1)} M_lr=15.37;#+-1.57 Hz*cm^{2(m-1)} plt.errorbar(eg_lst, M_lst, yerr=DM_lst, fmt='-o',c='k',label='Monte Carlo') plt.plot(eg_lst, M_fk+0.*np.array(eg_lst), '-',c='C0',label='Fenton-Karma') plt.plot(eg_lst, M_lr+0.*np.array(eg_lst), '-',c='C1',label='Luo-Rudy') # plt.scatter(Dtdt_lst,m_lst,marker='^') # plt.xscale('log') title=f"r={r:.1f} cm, "+r"$\kappa$="+f"{kappa:.0f} Hz\nD={D} cm"+r"$^2$/s"+f", A={L**2:.0f} cm"+r"$^2$, "+f"considering {q_min}<q<{q_max}\n" # title=f"r={r:.1f} cm, "+r"$\kappa$="+f"variable\nD={D} cm"+r"$^2$/s"+f", A={L**2:.0f} cm"+r"$^2$" plt.title(title,fontsize=fontsize) plt.xlabel(r'a (cm$^2$/s)',fontsize=fontsize) plt.ylabel(r'M magnitude ($cm^{2(m-1)}$/s)',fontsize=fontsize) # plt.xscale('log') # plt.yscale('log') plt.tick_params(axis='both', which='major', labelsize=fontsize) plt.tick_params(axis='both', which='minor', labelsize=0) # plt.legend(fontsize=fontsize-8)#,ncol=2) # plt.xlim([10,50]) # plt.ylim([0.9,2.3]) # plt.grid('on') plt.legend(fontsize=fontsize-2) plt.show() # print("considering 20<N<50 spiral tips") # print("considering N<20 spiral tips") # print(f"considering {q_min}<q<{q_max}") # - # # varying r # + # x0=x0_values[-1];print(f"x0={x0}") # no_repulsion=0;no_attraction=0;reflect=0;neighbors=0;set_second=0 # r=r_values[1];D=D_values[-1];L=L_values[0];kappa=kappa_values[0] #for FK model #query the control kappa_values=np.array(sorted(set(df.kappa.values)))#1/s varkappa=varkappa_values[3];print(varkappa) # kappa=kappa_values[0] # D=D_values[0]#-1]# # r=r_values[0] # L=L_values[0] # x0=np.min(x0_values[0] ) #cm # set_second=0 # no_repulsion=1 # no_attraction=0 # reflect=0 # neighbor=0 # force_code=2 #plot fits for full model m_fk=1.945;#+-0.030; B_fk=2.441+-0.051 m_lr=1.544;#+-0.034; B_lr=5.870+-0.137 M_fk=5.67;#+-0.39 Hz*cm^{2(m-1)} M_lr=15.37;#+-1.57 Hz*cm^{2(m-1)} # RMSE_fk=0.1252 Hz/cm^2 # RMSE_lr=0.0974 Hz/cm^2 # R^2=0.997 (FK) # R^2=0.994 (LR) # yscale=10**3 xv=np.arange(0.1,1.,.05) yv_fk=M_fk*(xv)**m_fk yv_lr=M_lr*(xv)**m_lr fontsize=16 # plt.xlim([0.1,1]) # plt.ylim([1e-1,15])act plt.yscale('log') plt.xscale('log') plt.plot(xv,yv_fk,label='FK power law fit',zorder=3,lw=4) plt.plot(xv,yv_lr,label='LR power law fit',zorder=3,lw=4) #plot data # for varkappa in varkappa_values[3:]:#[0,1,2,3,7,-1]]:#[:4]:#[::2]: for r in r_values[1:]:#[0,1,2,3,7,-1]]:#[:4]:#[::2]: #query the DataFrame query =(df.set_second==set_second)&(df.reflect==reflect) query&=df.r==r query&=df.D==D query&=df.L==L query&=df.varkappa==varkappa query&=df.x0==x0 query&=(df.no_repulsion==no_repulsion)&(df.no_attraction==no_attraction) query&=(df.neighbor==neighbor)&(df.force_code==force_code) dg=df[query] dh=dg[dg.kappa==kappa] x_values=dh.q.values y_values=dh.w.values print(r'r='+f'{r:.1f},varkappa={varkappa:.1f}') # label=r"$a =$ "+f"{varkappa:.1f} cm^2/s" label=r"$r =$ "+f"{r:.1f} cm" plt.scatter(x_values,y_values,label=label,alpha=0.8,s=5)#,cmap='bwr') # plt.ylim([1e-5,1e4]) plt.xlabel(r'q (cm$^{-2}$)',fontsize=fontsize) plt.ylabel(r'w (Hz cm$^{-2}$)', fontsize=fontsize) plt.tick_params(axis='both', which='major', labelsize=fontsize) plt.tick_params(axis='both', which='minor', labelsize=0) plt.legend(fontsize=fontsize-6,ncol=2,loc='lower right') # plt.title(r'$\varkappa=$'+f'{varkappa} Hz, '+r'$x_0=$'+f'{x0} cm\n',fontsize=fontsize) # plt.title(r'$x_0=$'+f'{x0} cm\n',fontsize=fontsize) # plt.title(r'$\kappa=$'+f'{kappa:.1f}\nforce_code={force_code}, neighbors={neighbor}\nr={r},D={D},L={L}\n',fontsize=fontsize) plt.title(f"a={varkappa:.0f}"+r'cm$^2$/s, $\kappa=$'+f'{kappa}\nforce_code={force_code}, neighbors={neighbor}\nr={r},D={D},L={L}\n',fontsize=fontsize) plt.show() # + q_min=0.3;q_max=0.5#2 #TODO: plot m+-Delta_m versus energy gap for LR and FK model #plot data # kappa=kappa_values[0] # r=1.;D=5.;Dratio=1000;L=L_values[-1] eg_lst=[];m_lst=[];Dm_lst=[];M_lst=[];DM_lst=[];Rsq_lst=[] # for varkappa in varkappa_values[3:]:#[0,1,2,3,7,-1]]:#[:4]:#[::2]: for r in r_values[1:]:#[0,1,2,3,7,-1]]:#[:4]:#[::2]: try: #query the DataFrame query =(df.set_second==set_second)&(df.reflect==reflect) query&=df.r==r query&=df.D==D query&=df.L==L query&=df.varkappa==varkappa query&=df.x0==x0 query&=(df.no_repulsion==no_repulsion)&(df.no_attraction==no_attraction) query&=(df.neighbor==neighbor)&(df.force_code==force_code) dg=df[query] # kappa_values=np.array(sorted(set(dg.kappa.values))) # kappa=kappa_values[0] dh=dg[dg.kappa==kappa] x_values=dh.q.values y_values=dh.w.values boo=(x_values>q_min)&(x_values<q_max) x=x_values[boo] y=y_values[boo] # print_fit_power_law(x,y) B,Delta_B,m,Delta_m,Rsq=fit_power_law(x,y) rmse=compute_power_rmse(x,y,m,B) M, Delta_M= comp_power_scale(B,Delta_B,m,Delta_m) # print(f"m={m:.3f}+-{Delta_m:.3f}; B={B:.3f}+-{Delta_B:.3f}") # print(f"M=B**m={M:.2f}+-{Delta_M:.2f} Hz*cm^{{2(m-1)}}") # print(f"RMSE={rmse:.4f} Hz/cm^2") # print(f"R^2={Rsq:.3f}") eg_lst.append(r) m_lst.append(m) Dm_lst.append(Delta_m) M_lst.append(M) DM_lst.append(Delta_M) Rsq_lst.append(Rsq) except AssertionError as e: pass print(Rsq_lst) # + m_fk=1.945#1.858;#+-0.027; B_fk=2.464+-0.030<br> m_lr=1.544#1.638;#+-0.017; B_fk=5.588+-0.067 plt.errorbar(eg_lst, m_lst, yerr=Dm_lst, fmt='-o',c='k',label='Monte Carlo') plt.plot(eg_lst, m_fk+0.*np.array(eg_lst), '-',c='C0',label='Fenton-Karma') plt.plot(eg_lst, m_lr+0.*np.array(eg_lst), '-',c='C1',label='Luo-Rudy') # plt.scatter(Dtdt_lst,m_lst,marker='^') # plt.xscale('log') title=f"a={varkappa:.0f}"+r"(cm$^2$/s), $\kappa$="+f"{kappa:.0f} Hz\nD={D} cm"+r"$^2$/s"+f", A={L**2:.0f} cm"+r"$^2$, "+f"considering {q_min}<q<{q_max}\n" # title=f"r={r:.1f} cm, "+r"$\kappa$="+f"variable\nD={D} cm"+r"$^2$/s"+f", A={L**2:.0f} cm"+r"$^2$" plt.title(title,fontsize=fontsize) plt.xlabel(r'r (cm)',fontsize=fontsize) plt.ylabel('m exponent',fontsize=fontsize) # plt.xscale('log') # plt.yscale('log') plt.tick_params(axis='both', which='major', labelsize=fontsize) plt.tick_params(axis='both', which='minor', labelsize=0) # plt.legend(fontsize=fontsize-8)#,ncol=2) # plt.xlim([10,50]) # plt.ylim([0.9,2.3]) # plt.grid('on') plt.legend(fontsize=fontsize-2) plt.show() # print("considering 20<N<50 spiral tips") # print("considering N<20 spiral tips") # print(f"considering {q_min}<q<{q_max}") # + M_fk=5.67;#+-0.39 Hz*cm^{2(m-1)} M_lr=15.37;#+-1.57 Hz*cm^{2(m-1)} plt.errorbar(eg_lst, M_lst, yerr=DM_lst, fmt='-o',c='k',label='Monte Carlo') plt.plot(eg_lst, M_fk+0.*np.array(eg_lst), '-',c='C0',label='Fenton-Karma') plt.plot(eg_lst, M_lr+0.*np.array(eg_lst), '-',c='C1',label='Luo-Rudy') # plt.scatter(Dtdt_lst,m_lst,marker='^') # plt.xscale('log') title=f"a={varkappa:.0f}"+r"(cm$^2$/s), $\kappa$="+f"{kappa:.0f} Hz\nD={D} cm"+r"$^2$/s"+f", A={L**2:.0f} cm"+r"$^2$, "+f"considering {q_min}<q<{q_max}\n" # title=f"r={r:.1f} cm, "+r"$\kappa$="+f"variable\nD={D} cm"+r"$^2$/s"+f", A={L**2:.0f} cm"+r"$^2$" plt.title(title,fontsize=fontsize) plt.xlabel(r'r (cm)',fontsize=fontsize) plt.ylabel(r'M magnitude ($cm^{2(m-1)}$/s)',fontsize=fontsize) # plt.xscale('log') # plt.yscale('log') plt.tick_params(axis='both', which='major', labelsize=fontsize) plt.tick_params(axis='both', which='minor', labelsize=0) # plt.legend(fontsize=fontsize-8)#,ncol=2) # plt.xlim([10,50]) # plt.ylim([0.9,2.3]) # plt.grid('on') plt.legend(fontsize=fontsize-2) plt.show() # print("considering 20<N<50 spiral tips") # print("considering N<20 spiral tips") # print(f"considering {q_min}<q<{q_max}") # - # + [markdown] heading_collapsed=true # # (skip) varying r for Case 3 # + hidden=true # varkappa=varkappa_values[3];print(f'varkappa={varkappa}') varkappa=varkappa_values[1];print(f'varkappa={varkappa}') # r=r_values[0]; x0 =x0_values[-1];print(f'x0={x0}')#[0,1,2,-4,-1]]:#[::2]: D=D_values[0];print(f'D={D}') L=L_values[-1] kappa_values=np.array(sorted(set(df['kappa'].values))) kappa=kappa_values[-1];print(f'kappa={kappa}') no_attraction=1 #plot fits for full model m_fk=1.945;#+-0.030; B_fk=2.441+-0.051 m_lr=1.544;#+-0.034; B_lr=5.870+-0.137 M_fk=5.67;#+-0.39 Hz*cm^{2(m-1)} M_lr=15.37;#+-1.57 Hz*cm^{2(m-1)} # RMSE_fk=0.1252 Hz/cm^2 # RMSE_lr=0.0974 Hz/cm^2 # R^2=0.997 (FK) # R^2=0.994 (LR) # yscale=10**3 xv=np.arange(0.1,1.,.05) yv_fk=M_fk*(xv)**m_fk yv_lr=M_lr*(xv)**m_lr fontsize=16 # plt.xlim([0.1,1]) # plt.ylim([1e-1,15]) plt.yscale('log') plt.xscale('log') plt.plot(xv,yv_fk,label='FK power law fit',zorder=3,lw=4) plt.plot(xv,yv_lr,label='LR power law fit',zorder=3,lw=4) #plot data for r in r_values: #query the DataFrame query=df.r==r query&=df.D==D query&=df.L==L query&=df.varkappa==varkappa query&=df['x0']==x0 query&=df.kappa==kappa query&=(df.no_repulsion==no_repulsion)&(df.no_attraction==no_attraction) dff=df[query] dg=dff.copy() x_values=dg.q.values y_values=dg.w.values if x_values.shape[0]>0: plt.scatter(x_values,y_values,label=r"$r=$ "+f"{r:.1f} cm",alpha=0.8)#,cmap='bwr') plt.ylim([1e-3,1e4]) plt.xlabel(r'q (cm$^{-2}$)',fontsize=fontsize) plt.ylabel(r'w (Hz cm$^{-2}$)', fontsize=fontsize) plt.tick_params(axis='both', which='major', labelsize=fontsize) plt.tick_params(axis='both', which='minor', labelsize=0) plt.title(r'$\varkappa=$'+f'{varkappa} Hz\n',fontsize=fontsize) plt.legend(fontsize=fontsize-8,ncol=2,loc='lower right') plt.show() # + hidden=true # print(f""" # the following parameter set did not tend to finish in <10 hours... # r={r:.1f},D={D:.0f},L={L:.0f},kappa={kappa:.0f},varkappa={varkappa:.0f},x0={x0:.0f},no_attraction={no_attraction},no_repulsion={no_repulsion} # """) # + [markdown] hidden=true # TODO: remove as many variables as possible while retaining the ability to control slope and magnitude on the log-log plot # - fix r,L # - try removing kappa using instantaneous reactions # - then, try removing x0 by using scale-invariant, long-ranged forces between (i) nearest neighbors and (ii) all particles # - vary D and varkappa to fit to model # # # Alternatively, make a scatterplot of all available varkappa,x0 pairs that worked, and then make a genetic algorithm. # # Maybe later, consider trying a force like $F = F_1 \log (r/r0) + F_0$ # + hidden=true q_min=0.3;q_max=0.6 #TODO: plot m+-Delta_m versus energy gap for LR and FK model #plot data # r=1.;D=5.;Dratio=1000;L=L_values[-1] eg_lst=[];m_lst=[];Dm_lst=[];Rsq_lst=[] for r in r_values: try: #query the DataFrame query=df.r==r query&=df.D==D query&=df.L==L query&=df.varkappa==varkappa query&=df['x0']==x0 query&=df.kappa==kappa query&=(df.no_repulsion==no_repulsion)&(df.no_attraction==no_attraction) dff=df[query] dg=dff.copy() x_values=dg.q.values y_values=dg.w.values boo=(x_values>q_min)&(x_values<q_max) x=x_values[boo] y=y_values[boo] # print_fit_power_law(x,y) B,Delta_B,m,Delta_m,Rsq=fit_power_law(x,y) rmse=compute_power_rmse(x,y,m,B) M, Delta_M= comp_power_scale(B,Delta_B,m,Delta_m) # print(f"m={m:.3f}+-{Delta_m:.3f}; B={B:.3f}+-{Delta_B:.3f}") # print(f"M=B**m={M:.2f}+-{Delta_M:.2f} Hz*cm^{{2(m-1)}}") # print(f"RMSE={rmse:.4f} Hz/cm^2") # print(f"R^2={Rsq:.3f}") eg_lst.append(r) m_lst.append(m) Dm_lst.append(Delta_m) Rsq_lst.append(Rsq) except AssertionError as e: pass print(Rsq_lst) # + hidden=true m_fk=1.945#1.858;#+-0.027; B_fk=2.464+-0.030<br> m_lr=1.544#1.638;#+-0.017; B_fk=5.588+-0.067 plt.errorbar(eg_lst, m_lst, yerr=Dm_lst, fmt='-o',label='Monte Carlo') plt.plot(eg_lst, m_fk+0.*np.array(eg_lst), '--',c='gray',label='Fenton-Karma') plt.plot(eg_lst, m_lr+0.*np.array(eg_lst), '-',c='gray',label='Luo-Rudy') # plt.scatter(Dtdt_lst,m_lst,marker='^') # plt.xscale('log') title=f"$x_0=$"+f"{x0:.0f} cm, "+r"$\kappa$="+f"{kappa:.0f} Hz\nD={D} cm"+r"$^2$/s"+f", A={L**2:.0f} cm"+r"$^2$" plt.title(title,fontsize=fontsize) plt.xlabel(r'$r$',fontsize=fontsize) plt.ylabel('exponent',fontsize=fontsize) # plt.xscale('log') # plt.yscale('log') plt.tick_params(axis='both', which='major', labelsize=fontsize) plt.tick_params(axis='both', which='minor', labelsize=0) # plt.legend(fontsize=fontsize-8)#,ncol=2) # plt.xlim([10,50]) # plt.ylim([0.9,2.3]) # plt.grid('on') plt.legend(fontsize=fontsize-2,loc='lower right') plt.show() # print("considering 20<N<50 spiral tips") # print("considering N<20 spiral tips") print(f"considering {q_min}<q<{q_max}") # + hidden=true print('trials with case 3 and r=0.4 that finished') print(f'kappa:{set(df[(df.varkappa<0)&(df.r==0.4)&(df.x0>0)].kappa.values)}') print(f'D:{set(df[(df.varkappa<0)&(df.r==0.4)].D.values)}') print(f"however, ^these trials did not finish when r=0.1...") # - # # TODO: make the standard 2 panel figure for 1 token control trial # - Control: varkappa<0, x0=0 # # # TODO: make the standard 2 panel figure # for each nontrivial case # - Case 1: varkappa>0, x0=0 # - Case 2: varkappa>0, x0>0 # - Case 3: varkappa<0, x0>0 # # # Consider Panel # A = Control # BCD # + [markdown] heading_collapsed=true # # TODO: compute the RMSE of each trial with the power fit of the full model, limiting sample to relevant particle number densities # + [markdown] heading_collapsed=true # # TODO: compute the change in mean CollRate as a result of turning on the forces (setting no_attraction from 1 to 0). # + hidden=true #TODO: groupby groups groups=['r','D','L','kappa','varkappa','x0','A','no_repulsion'] #TODO:compute the rate change df.loc[df.no_attraction==0,'change_of_rate']=df[df.no_attraction==0]['CollRate']-df[df.no_attraction==1]['CollRate'] df.loc[df.no_attraction==1,'change_of_rate']=df[df.no_attraction==0]['CollRate']-df[df.no_attraction==1]['CollRate'] df.sort_values(['r','D','L','kappa','varkappa','x0','A','no_repulsion'],inplace=True) # + hidden=true df.head() # + [markdown] hidden=true # DONE: genetic algorithm k-parents # 1. consider the k trials with RMSE for (i) the FK model and (ii) the LR model # 1. take random linear combinations of ^those parents, run them on the OSG, and then take the k best fits # 1. repeat until desired convergence is met # 1. repeat for various k, and visualize any (in)dependence of k # - DONE: find rough fit to initialize genetic algorithm # # + hidden=true data_dir="/home/timothytyree/Documents/GitHub/bgmc/python/data/osg_output/run_10_all.csv" df=pd.read_csv(data_dir) assert not (df.CollRate<0).any() df.head() # + hidden=true #derived values # df['CollRate']=1./df['CollTime'] df['A']=df['L']**2 df['q']=df['N']/df['A'] #number of tips per square centimeter df['w']=df['CollRate']/df['A'] #[mHz?]/cm^2 # df=df[df.niter==250].copy() #extract column values r_values=np.array(sorted(set(df.r.values)))#cm D_values=np.array(sorted(set(df.D.values)))#cm^2/s L_values=np.array(sorted(set(df.L.values)))#cm A_values=L_values**2#cm^2 kappa_values=np.array(sorted(set(df.kappa.values)))#1/s varkappa_values=np.array(sorted(set(df.varkappa.values)))#1/s x0_values=np.array(sorted(set(df.x0.values)))#1/s set_second_values=np.array(sorted(set(df.set_second.values))) reflect_values=np.array(sorted(set(df.reflect.values))) no_repulsion_values=np.array(sorted(set(df.no_repulsion.values))) no_attraction_values=np.array(sorted(set(df.no_attraction.values))) # + hidden=true #make test for whether there is one input parameter present in an input DataFrame1 print(r_values) print(D_values) print(L_values) print(kappa_values) print(varkappa_values) print(x0_values) print(set_second_values) print(reflect_values) print(no_repulsion_values) print(no_attraction_values) # + hidden=true varkappa=varkappa_values[0];print(f'varkappa={varkappa}') x0=x0_values[2];print(f'x0={x0}') #query the DataFrame query =(df.set_second==set_second_values[0])&(df.reflect==reflect_values[0]) query&=df.r==r_values[0] query&=df.D==D_values[0] query&=df.L==L_values[0] query&=df.varkappa==varkappa query&=df.x0==x0 query&=(df.no_repulsion==no_repulsion_values[0])&(df.no_attraction==no_attraction_values[0]) dff=df[query] kappa_values=np.array(sorted(set(dff.kappa.values))) dg=dff[dff.kappa==kappa_values[0]]#smaller reaction rate # dg=dff[dff.kappa==kappa_values[-1]]#Luo-Rudy fit # dg=dff.kappa==kappa_values[0]#Fenton-Karma fit x_values_force=dg.q.values y_values_force=dg.w.values varkappa=varkappa_values[3];print(f'varkappa={varkappa}') x0=x0_values[2];print(f'x0={x0}') #query the DataFrame query =(df.set_second==set_second_values[0])&(df.reflect==reflect_values[0]) query&=df.r==r_values[0] query&=df.D==D_values[0] query&=df.L==L_values[0] query&=df.varkappa==varkappa query&=df.x0==x0 query&=(df.no_repulsion==no_repulsion_values[0])&(df.no_attraction==no_attraction_values[0]) dff=df[query] kappa_values=np.array(sorted(set(dff.kappa.values))) dg=dff[dff.kappa==kappa_values[0]]#smaller reaction rate # dg=dff[dff.kappa==kappa_values[-1]]#Luo-Rudy fit # dg=dff.kappa==kappa_values[0]#Fenton-Karma fit x_values=dg.q.values y_values=dg.w.values # + hidden=true dt=1e-5 #plot fits for full model m_fk=1.945;#+-0.030; B_fk=2.441+-0.051 m_lr=1.544;#+-0.034; B_lr=5.870+-0.137 M_fk=5.67;#+-0.39 Hz*cm^{2(m-1)} M_lr=15.37;#+-1.57 Hz*cm^{2(m-1)} # RMSE_fk=0.1252 Hz/cm^2 # RMSE_lr=0.0974 Hz/cm^2 # R^2=0.997 (FK) # R^2=0.994 (LR) # yscale=10**3 xv=np.arange(0.1,1.,.05) yv_fk=M_fk*(xv)**m_fk yv_lr=M_lr*(xv)**m_lr fontsize=16 # plt.xlim([0.1,1]) # plt.ylim([1e-1,15]) plt.yscale('log') plt.xscale('log') plt.plot(xv,yv_fk,label='FK power law fit',zorder=3,lw=4) plt.plot(xv,yv_lr,label='LR power law fit',zorder=3,lw=4) fontsize=18 plt.plot(x_values,y_values,c='C2',alpha=.7,label='simulation without forces',lw=6) plt.plot(x_values_force,y_values_force,c='C3',alpha=.7,label='simulation with forces',lw=6) # plt.plot(x_values,y_values,c=c_values,alpha=0.4,cmap='bwr') # plt.title(u'comparison to simulation\nwith two hybrid modes',fontsize=fontsize) plt.xlabel(r'q (cm$^{-2}$)',fontsize=fontsize) plt.ylabel(r'w (Hz cm$^{-2}$)', fontsize=fontsize) plt.tick_params(axis='both', which='major', labelsize=fontsize) plt.tick_params(axis='both', which='minor', labelsize=0) plt.legend(fontsize=fontsize-5) # print(f'varkappa={varkappa} Hz');print(f' x0={x0} cm') title=r"$\kappa$="+f"{kappa:.0f} Hz, D={D} cm"+r"$^2$/s"+f", A={L**2:.0f} cm"+r"$^2$" title+='\n'+r'$\varkappa=$'+f'{varkappa} Hz, '+r'$x_0=$'+f'{x0:.0f} cm, dt={dt} s\n' plt.title(title,fontsize=fontsize) plt.title(title,fontsize=fontsize) plt.show() # + hidden=true print(sorted(set(dg.r.values))) print(sorted(set(dg.D.values))) print(sorted(set(dg.L.values))) print(sorted(set(dg.kappa.values))) # + hidden=true #import previous data # data_dir="/home/timothytyree/Documents/GitHub/bgmc/python/data/osg_output/run_1_to_3_merged.csv" # data_dir="/home/timothytyree/Documents/GitHub/bgmc/python/data/osg_output/run_6_all.csv" # has match but dt=1e-5... #Is it present in run_4?? df=pd.read_csv(data_dir) assert not (df.CollRate<0).any() #derived values df['CollRate']=1./df['CollTime'] df['A']=df['L']**2 df['q']=df['N']/df['A'] #number of tips per square centimeter df['w']=df['CollRate']/df['A'] #[mHz?]/cm^2 # # df=df[df.niter==250].copy() # #extract column values # r_values=np.array(sorted(set(df.r.values)))#cm # D_values=np.array(sorted(set(df.D.values)))#cm^2/s # L_values=np.array(sorted(set(df.L.values)))#cm # A_values=L_values**2#cm^2 # kappa_values=np.array(sorted(set(df.kappa.values)))#1/s # varkappa_values=np.array(sorted(set(df.varkappa.values)))#1/s # x0_values=np.array(sorted(set(df.x0.values)))#1/s # set_second_values=np.array(sorted(set(df.set_second.values))) # reflect_values=np.array(sorted(set(df.reflect.values))) # no_repulsion_values=np.array(sorted(set(df.no_repulsion.values))) # no_attraction_values=np.array(sorted(set(df.no_attraction.values))) #show the first 5 entries that match the query query =df.r==r query&=df.D==D query&=df.L==L query&=df.kappa==kappa query&=df.reflect==reflect dg=df[query] dg.head() # + hidden=true dt_values=np.array(sorted(set(dg.dt.values))) dt_values # + hidden=true df.head() # + hidden=true del dg del df # + hidden=true run_control={"marked": true} # #plot sliding m # fontsize=18 # x=xavg_values[:-1] # # y=Rsquared_values[:-1] # y=slope_values[:-1] # plt.plot(x,y,lw=2) # # plt.scatter(x,y,s=10) # # plt.xlabel('N',fontsize=fontsize) # # plt.ylabel('Collision Rate',fontsize=fontsize) # # plt.xscale('log') # # plt.title(u'comparison to simulation\nwith two hybrid modes',fontsize=fontsize) # plt.xlabel(r'q (cm$^{-2}$)',fontsize=fontsize) # plt.ylabel(r'exponent', fontsize=fontsize) # plt.tick_params(axis='both', which='major', labelsize=fontsize) # plt.tick_params(axis='both', which='minor', labelsize=0) # # print(f'varkappa={varkappa} Hz');print(f' x0={x0} cm') # plt.title(r'$\varkappa=$'+f'{varkappa} Hz, '+r'$x_0=$'+f'{x0} cm\n',fontsize=fontsize) # plt.show() # + hidden=true # + hidden=true # + hidden=true # #measure m in some window # qmin=0.06;qmax=0.2 # L=L_values[0] # x_values=dg[dg.L==L].q.values # y_values=dg[dg.L==L].w.values # boo=(x_values>qmin)&(x_values<qmax) # dict_ci=compute_95CI_ols(np.log(x_values[boo]),np.log(y_values[boo])) # print(*dict_ci) # print(*dict_ci.values()) # print(f"Therefore, 95% confidence interval DOES include exponent values observed from the Luo-Rudy model, but for this trial, only low densities.") # #compute sliding m # xavg_values,slope_values,Rsquared_values = compute_sliding_slope_loglog(x_values,y_values, # x_min=qmin, # window_width=0.2, # stepsize=0.01, # ) # xavg_values.shape # - # # sanity check from return_CollTime.x # for probing the effect of dt at high density (q=1) # _This is evidence that attractive forces increase the collision rate, W, at high densities, which contradicts the figure I showed WJ and am looking at now._ # # ...Then, why did I record a contradictory signal? #I ran these settings without forces using ./return_CollTime.x for N=100 r=0.1 D=2 L=10 kappa=500 reflect=0 # + #from 1500 independent trials of the reaction N=100 --> N=98 #for each trial, seed=1234 #no forces dt_control_lst=[1e-5, 1e-6, 1e-7] Tavg_control_lst=[0.00175961,0.00172462,np.nan] #strong attractive forces of close range dt_lst=[1e-5, 1e-6, 1e-7] Tavg_lst=[0.00102133,0.00104635,np.nan] #NOTE: I ended the dt=1e-7 trials after 6 hours... They did not finish... #is runtime on 1e-6 small enough for the OSG? #no... # - # __Results__ # - rate_with_force > rate_without_force at high densities when the seeds match # - there is apparently a seed dependence to the value of Tavg # - it is not practical for me to use dt=1e-6 on the OSG. It is certainly not practical for me to use dt=1e-7 on the OSG. # - it is necessary for seeds to match for the ends to match # - Why is ^this? Is it because at high densities, q~1, the uncertainty is large for niter=1500? # - it is not necessary for seeds to match for the ends to match # - TODO: just make niter=5000 on one machine(, or better...) # - or aggregate many machines and make niter=15000 # - solution: just make niter=5000 on the next data run, and limit search to trials in the neighborhood of where I think they'll model the LR model's powerfit # # # DONT: bootstrap uncertainties for variable N. __Just increase niter!__ # - I should measure the variance/95% CI explicitely versus N via bootstrapping for one trial. # - TODO: look for the/some token trial in the dense data from runs_1_to_3. # - TODO: measure ^that trial's variance for each group of constant N # - TODO: visualize the 95% CI of Tavg versus N. # - TODO: translate ^that plot into w versus q # # # print the top 5 of a given run, in terms of RMSE from the LR/FK models # then, # - use ^those to generate some guesses for the LR/FK models # then, # - dev run_11 # then, # - condor_submit run_11.submit # ## # Ranking the top results from the genetic algorithm # this is comparing rmse at high densities # compute w versus q and find r,kappa with smalles RMSE_lr,fk # + # def routine(item): # r,D,kappa=item # return PlotUnivCurve(r,D,kappa) # #run plotting in parallel # b = db.from_sequence(queue, npartitions=9).map(routine) # start = time.time() # retval = list(b) # print(f"run time was {time.time()-start:.2f} seconds.") # beep(10) # - def get_eval_powerlaw(dict_out): '''dict_out has fields m and b. Example Usage: eval_powerlaw=get_eval_powerlaw(dict_out) w=eval_powerlaw(q) ''' m=dict_out['m'];b=dict_out['b'] q_foo = lambda q: m*q+b eval_powerlaw = lambda q: np.exp(q_foo(np.log(q))) return eval_powerlaw data_dir="/home/timothytyree/Documents/GitHub/bgmc/python/data/osg_output/run_12_all.csv" df=pd.read_csv(data_dir) df.head() df.columns (df.CollRate<0).any() # + #derived values # df['CollRate']=1./df['CollTime'] df['A']=df['L']**2 df['q']=df['N']/df['A'] #number of tips per square centimeter df['w']=df['CollRate']/df['A'] #[mHz?]/cm^2 # df=df[df.niter==250].copy() #extract column values r_values=np.array(sorted(set(df.r.values)))#cm D_values=np.array(sorted(set(df.D.values)))#cm^2/s L_values=np.array(sorted(set(df.L.values)))#cm A_values=L_values**2#cm^2 kappa_values=np.array(sorted(set(df.kappa.values)))#1/s varkappa_values=np.array(sorted(set(df.varkappa.values)))#1/s x0_values=np.array(sorted(set(df.x0.values)))#1/s set_second_values=np.array(sorted(set(df.set_second.values))) reflect_values=np.array(sorted(set(df.reflect.values))) no_repulsion_values=np.array(sorted(set(df.no_repulsion.values))) no_attraction_values=np.array(sorted(set(df.no_attraction.values))) # + # #make test for whether there is one input parameter present in an input DataFrame1 # print(r_values) # print(D_values) # print(L_values) # print(kappa_values) # print(varkappa_values) # print(x0_values) # print(set_second_values) # print(reflect_values) # print(no_repulsion_values) # print(no_attraction_values) # + # r=r_values[0] # L=L_values[0] # D=2;#np.min(D_values);print(f'D={D}') # varkappa=-20;#np.min(varkappa_values);print(f'varkappa={varkappa}') # x0=1.;#np.max(x0_values);print(f'x0={x0}') # # D==D_values[0];print(f'D={D}') # # varkappa=varkappa_values[3];print(f'varkappa={varkappa}') # # x0=x0_values[2];print(f'x0={x0}') # #query the DataFrame # query =(df.set_second==set_second_values[0])&(df.reflect==reflect_values[0]) # query&=df.r==r # query&=df.D==D # query&=df.L==L # query&=df.varkappa==varkappa # query&=df.x0==x0 # query&=(df.no_repulsion==no_repulsion_values[0])&(df.no_attraction==no_attraction_values[0]) # dff=df[query] # kappa_values=np.array(sorted(set(dff.kappa.values))) # dg=dff[dff.kappa==kappa_values[0]]#smaller reaction rate # # dg=dff[dff.kappa==kappa_values[-1]]#Luo-Rudy fit # # dg=dff.kappa==kappa_values[0]#Fenton-Karma fit # + # varkappa=varkappa_values[3];print(f'varkappa={varkappa}') # x0=x0_values[2];print(f'x0={x0}') # #query the DataFrame # query =(df.set_second==set_second_values[0])&(df.reflect==reflect_values[0]) # query&=df.r==r_values[0] # query&=df.D==D_values[0] # query&=df.L==L_values[0] # query&=df.varkappa==varkappa # query&=df.x0==x0 # query&=(df.no_repulsion==no_repulsion_values[0])&(df.no_attraction==no_attraction_values[0]) # dff=df[query] # kappa_values=np.array(sorted(set(dff.kappa.values))) # dg=dff[dff.kappa==kappa_values[0]].copy()#smaller reaction rate # + #load birth death rates from the full model data_folder=f'{nb_dir}/data' os.chdir(data_folder) data_fk_fn='full_results/data_fig4_vidmar_fk_tt.csv' data_lr_fn='full_results/data_fig4_vidmar_lr_tt.csv' fk=pd.read_csv(data_fk_fn) fk['N']=fk['No2']*2 fk['q']=fk['N']/fk['A'] #number of tips per square centimeter fk['w']=fk['rate']/fk['A'] #[mHz?]/cm^2 lr=pd.read_csv(data_lr_fn) lr['N']=lr['No2']*2 lr['q']=lr['N']/lr['A'] #number of tips per square centimeter lr['w']=lr['rate']/lr['A'] #[mHz?]/cm^2 # + #compute w_lr and w_fk using a linear regression of a log-log plot # from lib.compute_slope import * yscale=10**3 x=lr.q.values y=yscale*lr.w.values dict_out=compute_95CI_ols(np.log(x),np.log(y)) dict_lr=dict_out.copy() #predict the values functionally over all q m=dict_out['m'];b=dict_out['b'] q_foo = lambda q: m*q+b df['w_lr']=np.exp(q_foo(np.log(df['q'].values))) x=fk.q.values y=yscale*fk.w.values dict_out=compute_95CI_ols(np.log(x),np.log(y)) dict_fk=dict_out.copy() #predict the values functionally over all q m=dict_out['m'];b=dict_out['b'] q_foo = lambda q: m*q+b df['w_fk']=np.exp(q_foo(np.log(df['q'].values))) # - #compute the square errors df['se_w_fk']=(df['w']-df['w_fk'])**2 df['se_w_lr']=(df['w']-df['w_lr'])**2 # + # df_fn='w_vs_q_agg_over_A.csv' # os.chdir(data_folder) # df.to_csv(dg_fn,index=False) # + run_control={"marked": false} #in rmse_lr,fk cell qmin=0.3;qmax=1.0; #compute the root mean squared errors over various axes #common defines no_attraction=0;no_repulsion=1;reflect=0;set_second=0 #input list r_lst=[];D_lst=[];L_lst=[];kappa_lst=[];varkappa_lst=[];x0_lst=[] neighbor_lst=[];force_code_lst=[] varkappa_values=np.array(sorted(set(df.varkappa.values))) #output list rmse_lr_lst=[];rmse_fk_lst=[];m_lst=[];Delta_m_lst=[] for set_second in set_second_values: for reflect in reflect_values: for r in r_values: for D in D_values: for L in L_values: for kappa in kappa_values: for varkappa in varkappa_values: for x0 in x0_values: for no_repulsion in no_repulsion_values: for no_attraction in no_attraction_values: for neighbor in neighbor_values: for force_code in force_code_values: #query the DataFrame query =(df.set_second==set_second)&(df.reflect==reflect) query&=df.r==r query&=df.D==D query&=df.L==L query&=df.kappa==kappa query&=df.varkappa==varkappa query&=df.x0==x0 query&=(df.no_repulsion==no_repulsion)&(df.no_attraction==no_attraction) query&=(df.force_code==force_code)&(df.neighbor==neighbor) dg=df[query] #limit query to the specified interval of particle densities query=(dg.q>qmin)&(dg.q<qmax) qu=dg[query] # if the slice is non-empty if qu.size>0: rmse_lr=np.sqrt(qu.se_w_lr.mean()) rmse_fk=np.sqrt(qu.se_w_fk.mean()) #extract the data x_values=dg.q.values y_values=dg.w.values boo=(x_values>qmin)&(x_values<qmax)#redundant try: dict_ci=compute_95CI_ols(np.log(x_values[boo]),np.log(y_values[boo])) # print(*zip(dict_ci,dict_ci.values())) m=dict_ci['m'] Delta_m=dict_ci['Delta_m'] except AssertionError as e: m=-9999 Delta_m=-9999 #record inputs r_lst.append(r) D_lst.append(D) L_lst.append(L) kappa_lst.append(kappa) varkappa_lst.append(varkappa) x0_lst.append(x0) neighbor_lst.append(neighbor) force_code_lst.append(force_code) #record outputs rmse_lr_lst.append(rmse_lr) rmse_fk_lst.append(rmse_fk) m_lst.append(m) Delta_m_lst.append(Delta_m) # - beep(10) #form a pandas.DataFrame of ^that dh=pd.DataFrame({ 'rmse_lr':rmse_lr_lst, 'rmse_fk':rmse_fk_lst, 'm':m_lst, 'Delta_m':Delta_m, 'r':r_lst, 'D':D_lst, 'L':L_lst, 'kappa':kappa_lst, 'varkappa':varkappa_lst, 'x0':x0_lst, 'neighbor':neighbor_lst, 'force_code':force_code_lst, }) dh.head() #compute absolute error from the target exponent dh['m_fk']=np.abs(dh['m']-m_fk) dh['m_lr']=np.abs(dh['m']-m_lr) #and the top 5 matches for either model is... (FK) print(f"The top 5 for the Fenton-Karma model:") dh.sort_values(by='rmse_fk').head(5) #and the top 5 matches for either model is... (FK) print(f"The top 5 for the Luo-Rudy model:") dh.sort_values(by='rmse_lr',inplace=True) dh.head(5) #rank each trial by distance from m_fk print(f"The top 5 for the exponent of the Fenton-Karma model:") dh.sort_values(by='m_fk').head(5) #rank each trial by distance from m_lr print(f"The top 5 for the exponent of the Luo-Rudy model:") dh.sort_values(by='m_lr').head(5) #save dh as csv save_folder=f"{nb_dir}/data/osg_output" assert ( os.path.exists(save_folder)) os.chdir(save_folder) save_fn=data_dir.replace('.csv',f'run_12_rmse_m_lr_fk_qmin_{qmin}_qmax_{qmax}.csv') dh.to_csv(save_fn,index=False) assert(os.path.exists(save_fn)) set(dh.varkappa.values) # ## DONE: dev run 11 using random linear combinations of ^those for the LR model # -TODO(later): fork run 11 to fit the FK model. # + # - DONE: make run 11 have niter=5000 and Nmax=60... # num_trials_born=100 # the total number of daughters # # print the breeding_values of the top 5 # # num_breeders=5 # # num_breeding_params=3 # # breeding_cols=['D','varkappa','x0'] # # num_breeding_params=len(breeding_cols) # # # taken from the *.ipynb located here: 'analyzing the effect of strong attractive forces between nearest neighbors.ipynb' # # breeder_values=dh[breeding_cols].head(num_breeders).values # # breeder_values_LR=breeder_values # # print(breeder_values) # #the top 5 for the LR model. Epoch 0 # breeder_values=np.array( # [[ 2., -5., 5. ], # [ 20., -5., 5. ], # [ 20., -20., 1.5], # [ 20., -10., 3. ], # [ 2., -20., 1.5]]) # #breed the given most-fit trials omnisexually. # num_breeders, num_breeding_params = breeder_values.shape # rand_matrix=np.random.rand(num_trials_born-num_breeders,num_breeders) # each entry is uniformly distributed on the interval from 0 to 1. # breeder_trials=np.matmul(rand_matrix,breeder_values)/num_breeders # #prepend the breeding_values of the top 5 to breeder_trials # trial_values=np.concatenate((breeder_values,breeder_trials),axis=0) # for trial in trial_values: # D,varkappa,x0=trial # pass # # trial_values.shape # print ( (D,varkappa,x0)) # - # ## TODO(later): analyze ^those top 5-10 results by plotting them and paying a lot of attention to them... #TODO: plot the best trial #and the top 5 matches for either model is... (FK) print(f"The top 5 for the Luo-Rudy model:") dh.sort_values(by='m_lr',inplace=True) dh.head(5) #DONE: plot the corresponding control trial. overlay with ^that #DONE: plot the exponent versus q for the best trial and its control #DONE: indicate the exponent of the LR model print(f"The top 5 for the Fenton-Karma model:") dh.sort_values(by='m_fk',inplace=True) dh.head(5) arr=dh.head(5).values[:6,4:12] # arr=np.array([ # [1.0e-01, 2.0e+0, 1.0e+01, 1.5e+03, -5,5,0,1], # # [1.0e-01, 2.0e+0, 1.0e+01, 1.5e+03, -5,5,1,1], # [1.0e-01, 2.0e+0, 1.0e+01, 1.5e+03, 20,0,0,2], # [1.0e-01, 2.0e+0, 1.0e+01, 1.5e+03, 20,0,1,2], # [1.0e-01, 2.0e+0, 1.0e+01, 1.5e+03, 50,0,0,3], # [1.0e-01, 2.0e+0, 1.0e+01, 1.5e+03, 50,0,1,3], # ]) for j in range(arr.shape[0]): r, D, L, kappa, varkappa, x0, neighbor, force_code = arr[j] # __Results__ # - the best trials for the LR model have long ranges # - matching the seeds corrected the high density disagreement. # # __Questions__ # - did increasing niter fix the high density disagreement? neighbor,force_code savefig_folder=f'{nb_dir}/../fig' saving=True for j in range(arr.shape[0]): reflect=0 set_second=0 no_repulsion=1 L=10 r, D, L, kappa, varkappa, x0,neighbor,force_code = arr[j] # #dh.head(5).values[j,4:12] savefig_fn=f'run_12_effect_of_attraction_r_{r:.1f}_D_{D:.2f}_L_{L:.0f}_kappa_{kappa:.0f}_varkappa_{varkappa:.2f}_x0_{x0:.2f}_neighbor_{int(neighbor):0d}_force_code_{int(force_code):0d}.png' print (*(r, D, L, kappa, varkappa, x0, neighbor, force_code)) # r=0.1;D=20;L=10;kappa=1500;varkappa=-5;x0=5. #with forces #slice the df query =df.no_attraction==0 query&=df.no_repulsion==no_repulsion query&=df.reflect==reflect query&=df.set_second==set_second query&=df.r==r query&=df.L==L query&=df.kappa==kappa dff=df[query]#.copy() #query the dataframe with the bred parameters query =dff.D==D query&=dff.varkappa==varkappa query&=dff.x0==x0 query&=(dff.neighbor==int(neighbor))&(dff.force_code==int(force_code)) dg=dff[query]#.copy() #extract the data x_values_force=dg.q.values y_values_force=dg.w.values assert (y_values_force.shape[0]>0)################### #query the control #query the DataFrame query =(df.set_second==set_second)&(df.reflect==reflect) query&=df.r==r query&=df.D==D query&=df.L==L query&=df.kappa==kappa query&=df.varkappa==0 query&=df.x0==0 query&=(df.no_repulsion==no_repulsion)&(df.no_attraction==1) query&=(df.neighbor==0)&(df.force_code==0) dg=df[query] x_values=dg.q.values y_values=dg.w.values #slice relevant particle density to the particle densities observed from the full model qmin=0.1;qmax=1 window_width=0.3 stepsize=0.1 try: boo=(x_values>qmin)&(x_values<qmax) dict_ci=compute_95CI_ols(np.log(x_values[boo]),np.log(y_values[boo])) # print(*zip(dict_ci,dict_ci.values())) #compute sliding m xavg_values,slope_values,Rsquared_values = compute_sliding_slope_loglog(x_values,y_values, x_min=qmin, window_width=window_width, stepsize=stepsize, ) except AssertionError as e: xavg_values,slope_values=np.array([]),np.array([]) print ('uh-oh') #with forces boo=(x_values_force>qmin)&(x_values_force<qmax) dict_ci=compute_95CI_ols(np.log(x_values_force[boo]),np.log(y_values_force[boo])) # print(*zip(dict_ci,dict_ci.values())) #compute sliding m xavg_values_force,slope_values_force,Rsquared_values_force = compute_sliding_slope_loglog(x_values_force,y_values_force, x_min=qmin, window_width=window_width, stepsize=stepsize, ) # print((xavg_values.shape,xavg_values_force.shape)) assert (xavg_values_force.shape[0]>0) #plot ^this trial against its control #plot fits for full model m_fk=1.945;#+-0.030; B_fk=2.441+-0.051 m_lr=1.544;#+-0.034; B_lr=5.870+-0.137 M_fk=5.67;#+-0.39 Hz*cm^{2(m-1)} M_lr=15.37;#+-1.57 Hz*cm^{2(m-1)} # RMSE_fk=0.1252 Hz/cm^2 # RMSE_lr=0.0974 Hz/cm^2 # R^2=0.997 (FK) # R^2=0.994 (LR) # yscale=10**3 xv=np.arange(0.1,1.,.05) yv_fk=M_fk*(xv)**m_fk yv_lr=M_lr*(xv)**m_lr #plot the values figsize=(12,5) fontsize=16 fig, axs = plt.subplots(ncols=2,figsize=figsize, constrained_layout=True) lw=3 ax=axs[0] ax.plot(xv,yv_fk,label='Fenton-Karma',zorder=1,lw=lw,c='C0',alpha=.7) ax.plot(xv,yv_lr,label='Luo-Rudy',zorder=1,lw=lw,c='C1',alpha=.7) ax.plot(x_values,y_values,c='k',alpha=.7,label='without forces',lw=lw) ax.plot(x_values_force,y_values_force,c='C2',alpha=0.7,label='with forces',lw=lw) # plt.plot(x_values,y_values,c=c_values,alpha=0.4,cmap='bwr') # plt.title(u'comparison to simulation\nwith two hybrid modes',fontsize=fontsize) ax.set_xlabel(r'q (cm$^{-2}$)',fontsize=fontsize) ax.set_ylabel(r'w (Hz cm$^{-2}$)', fontsize=fontsize) ax.tick_params(axis='both', which='major', labelsize=fontsize) ax.tick_params(axis='both', which='minor', labelsize=0) ax.legend(fontsize=fontsize-2) # title=r'$D=$'+f'{D} cm'+r'$^2$/s, '+r'$\varkappa=$'+f'{varkappa} Hz, '+r'$x_0=$'+f'{x0} cm\n' # ax.set_title(title,fontsize=fontsize) # plt.xlim([0.1,1]) ax.set_ylim([2e-3,50]) ax.set_yscale('log') ax.set_xscale('log') ax=axs[1] ax.plot(xavg_values,slope_values,lw=lw,c='k', label='without forces',alpha=.7) ax.plot(xavg_values_force,slope_values_force,lw=lw,c='C2', label='with forces',alpha=.7) ax.plot(xavg_values_force, m_fk+0.*xavg_values_force, '-',lw=lw,c='C0',label='Fenton-Karma',alpha=.7) ax.plot(xavg_values_force, m_lr+0.*xavg_values_force, '-',lw=lw,c='C1',label='Luo-Rudy',alpha=.7) # ax.set_xscale('log') ax.set_ylim([1.4,3]) ax.set_xlabel(r'q (cm$^{-2}$)',fontsize=fontsize) ax.set_ylabel(r'exponent', fontsize=fontsize) ax.tick_params(axis='both', which='major', labelsize=fontsize) ax.tick_params(axis='both', which='minor', labelsize=0) # title=r'$D=$'+f'{D} cm'+r'$^2$/s, '+r'$\varkappa=$'+f'{varkappa} Hz, '+r'$x_0=$'+f'{x0} cm\n' # title+=f"r={r:.1f} cm, "+r"$\kappa$="+f"{kappa:.0f} Hz, A={L**2:.0f} cm"+r"$^2$"+'\n' # ax.set_title(title,fontsize=fontsize) # ax.legend(loc='best',fontsize=fontsize-4,ncol=2) #format title title=r'$D=$'+f'{D:.0f} cm'+r'$^2$/s, '+r'$\varkappa=$'+f'{varkappa:.0f} Hz, '+r'$x_0=$'+f'{x0:.0f} cm\nforce_code={int(force_code)}, neighbors={int(neighbor)}\n' # title+=f"r={r:.1f} cm, "+r"$\kappa$="+f"{kappa:.0f} Hz, A={L**2:.0f} cm"+r"$^2$"+f'\n'#Rank = #{j+1}' title+=f"r={r:.1f} cm, "+r"$\kappa$="+f"{kappa:.0f} Hz, A={L**2:.0f} cm"+r"$^2$"+f'\nRank = #{j+1}' fig.suptitle(title, fontsize=fontsize+2) # fig.tight_layout() if not saving: plt.show() else: os.chdir(savefig_folder) plt.savefig(savefig_fn, dpi=300) print(f"saved figure in {savefig_fn}") plt.close() beep(7) # + # # plot one of ^these trials against its control # saving=False # #plot fits for full model # m_fk=1.945;#+-0.030; B_fk=2.441+-0.051 # m_lr=1.544;#+-0.034; B_lr=5.870+-0.137 # M_fk=5.67;#+-0.39 Hz*cm^{2(m-1)} # M_lr=15.37;#+-1.57 Hz*cm^{2(m-1)} # # RMSE_fk=0.1252 Hz/cm^2 # # RMSE_lr=0.0974 Hz/cm^2 # # R^2=0.997 (FK) # # R^2=0.994 (LR) # # yscale=10**3 # xv=np.arange(0.1,1.,.05) # yv_fk=M_fk*(xv)**m_fk # yv_lr=M_lr*(xv)**m_lr # #plot the values # figsize=(6,5) # fontsize=16 # savefig_folder=f'{nb_dir}/../fig' # savefig_fn=f'effect_of_attraction_D_{D:.0f}_varkappa_{varkappa:.0f}_x0_{x0:.0f}.png' # fig, ax = plt.subplots(ncols=1,figsize=figsize)# ax=axs[1] # lw=3 # ax.plot(xv,yv_fk,label='FK power law fit',zorder=1,lw=lw,c='C0',alpha=.7) # ax.plot(xv,yv_lr,label='LR power law fit',zorder=1,lw=lw,c='C1',alpha=.7) # ax.plot(x_values,y_values,c='k',alpha=.7,label='without forces',lw=lw) # ax.plot(x_values_force,y_values_force,c='C2',alpha=0.7,label='with forces',lw=lw) # # plt.plot(x_values,y_values,c=c_values,alpha=0.4,cmap='bwr') # # plt.title(u'comparison to simulation\nwith two hybrid modes',fontsize=fontsize) # ax.set_xlabel(r'q (cm$^{-2}$)',fontsize=fontsize) # ax.set_ylabel(r'w (Hz cm$^{-2}$)', fontsize=fontsize) # ax.tick_params(axis='both', which='major', labelsize=fontsize) # ax.tick_params(axis='both', which='minor', labelsize=0) # ax.legend(fontsize=fontsize-5) # title=r'$D=$'+f'{D} cm'+r'$^2$/s, '+r'$\varkappa=$'+f'{varkappa} Hz, '+r'$x_0=$'+f'{x0} cm\n' # ax.set_title(title,fontsize=fontsize) # # plt.xlim([0.1,1]) # # plt.ylim([1e-1,15]) # ax.set_yscale('log') # ax.set_xscale('log') # fig.tight_layout() # if not saving: # plt.show() # else: # plt.tight_layout() # os.chdir(savefig_folder) # plt.savefig(savefig_fn, dpi=300) # # print(f"saved figure in \n\t{savefig_fn}") # # plt.close() # + #DONT: abstract both plotting methods as functions #DONE: merge ^these to plots quickly into one # + # m_fk=1.945#1.858;#+-0.027; B_fk=2.464+-0.030<br> # m_lr=1.544#1.638;#+-0.017; B_fk=5.588+-0.067 # fontsize=16 # lw=3 # x=xavg_values[:-1] # # y=Rsquared_values[:-1] # y=slope_values[:-1] # plt.plot(xavg_values,slope_values,lw=lw,c='k', label='without forces') # plt.plot(xavg_values_force,slope_values_force,lw=lw,c='C2', label='with forces') # plt.plot(xavg_values_force, m_fk+0.*xavg_values_force, '--',lw=lw,c='gray',label='Fenton-Karma') # plt.plot(xavg_values_force, m_lr+0.*xavg_values_force, '-',lw=lw,c='gray',label='Luo-Rudy') # # plt.scatter(x,y,s=10) # # plt.xlabel('N',fontsize=fontsize) # # plt.ylabel('Collision Rate',fontsize=fontsize) # # plt.xscale('log') # # plt.title(u'comparison to simulation\nwith two hybrid modes',fontsize=fontsize) # plt.xlabel(r'q (cm$^{-2}$)',fontsize=fontsize) # plt.ylabel(r'exponent', fontsize=fontsize) # plt.tick_params(axis='both', which='major', labelsize=fontsize) # plt.tick_params(axis='both', which='minor', labelsize=0) # # print(f'varkappa={varkappa} Hz');print(f' x0={x0} cm') # title=r'$D=$'+f'{D} cm'+r'$^2$/s, '+r'$\varkappa=$'+f'{varkappa} Hz, '+r'$x_0=$'+f'{x0} cm\n' # title+=f"r={r:.1f} cm, "+r"$\kappa$="+f"{kappa:.0f} Hz, A={L**2:.0f} cm"+r"$^2$"+'\n' # plt.title(title,fontsize=fontsize) # plt.legend(loc='best',fontsize=fontsize-4,ncol=2) # plt.show() # - #DONE: fix the y axes #DONE(now): make ^those for 3 or so more trial settings #TODO: assemble ^those in .odg on left side of one page #TODO: on right side, take some Cornell notes. #TODO(later): put all ^this into a function an automate the plotting of a given input csv that has only one trial in it. #what are the varkappa values when force_code=2,3 set(df[df.force_code==3].varkappa.values) # # TODO: show nearest=0,1 is not equivalent to machine precision # + #token long-ranged forces # force_code_values=np.array([2,3]) # no_attraction_values=np.array([0]) # neighbor_values=np.array([0,1]) # set_second_values=np.array([0]) # varkappa_values=np.array([0.1,1.,5.,10.,20.,50.])#1/s # x0_values=np.array([0.])#,1.0,5.0])#cm #x0 does nothing for QED2,3 kappa=1500;no_attraction=0;force_code=1;neighbor=0;varkappa=-5;x0=5. #query the DataFrame query =(df.set_second==set_second)&(df.reflect==reflect) query&=df.r==r query&=df.D==D query&=df.L==L query&=df.kappa==kappa query&=df.varkappa==varkappa query&=df.x0==x0 query&=(df.no_repulsion==no_repulsion)&(df.no_attraction==no_attraction) query&=(df.neighbor==neighbor)&(df.force_code==force_code) dg=df[query] x_values=dg.q.values y_values=dg.w.values x_values_force_neigh_0=x_values.copy() y_values_force_neigh_0=y_values.copy() neighbor=1 #query the DataFrame query =(df.set_second==set_second)&(df.reflect==reflect) query&=df.r==r query&=df.D==D query&=df.L==L query&=df.kappa==kappa query&=df.varkappa==varkappa query&=df.x0==x0 query&=(df.no_repulsion==no_repulsion)&(df.no_attraction==no_attraction) query&=(df.neighbor==neighbor)&(df.force_code==force_code) dg=df[query] x_values=dg.q.values y_values=dg.w.values x_values_force_neigh_1=x_values.copy() y_values_force_neigh_1=y_values.copy() # + #plot fits for full model m_fk=1.945;#+-0.030; B_fk=2.441+-0.051 m_lr=1.544;#+-0.034; B_lr=5.870+-0.137 M_fk=5.67;#+-0.39 Hz*cm^{2(m-1)} M_lr=15.37;#+-1.57 Hz*cm^{2(m-1)} # RMSE_fk=0.1252 Hz/cm^2 # RMSE_lr=0.0974 Hz/cm^2 # R^2=0.997 (FK) # R^2=0.994 (LR) # yscale=10**3 xv=np.arange(0.1,1.,.05) yv_fk=M_fk*(xv)**m_fk yv_lr=M_lr*(xv)**m_lr fontsize=16 # plt.xlim([0.1,1]) # plt.ylim([1e-1,15]) plt.yscale('log') plt.xscale('log') plt.plot(xv,yv_fk,label='FK power law fit',zorder=3,lw=4) plt.plot(xv,yv_lr,label='LR power law fit',zorder=3,lw=4,color='C1') fontsize=18 plt.plot(x_values_force_neigh_1,y_values_force_neigh_1,'-',c='r',alpha=.7,label=r'neighbors only',lw=3) plt.plot(x_values_force_neigh_0,y_values_force_neigh_0,'-',c='g',alpha=.7,label=r'all particles',lw=3) # plt.plot(x_values,y_values,c='C2',alpha=.7,label='simulation',lw=6) # plt.plot(x_values_force,y_values_force,c='C3',alpha=.7,label='simulation with forces',lw=6) # plt.plot(x_values,y_values,c=c_values,alpha=0.4,cmap='bwr') # plt.title(u'comparison to simulation\nwith two hybrid modes',fontsize=fontsize) plt.xlabel(r'q (cm$^{-2}$)',fontsize=fontsize) plt.ylabel(r'w (Hz cm$^{-2}$)', fontsize=fontsize) plt.tick_params(axis='both', which='major', labelsize=fontsize) plt.tick_params(axis='both', which='minor', labelsize=0) plt.legend(fontsize=fontsize-5) # plt.xlim([0.08,1]) # print(f'varkappa={varkappa} Hz');print(f' x0={x0} cm') plt.title(r'$\kappa=1500$ Hz, $\varkappa=$'+f'{varkappa} Hz, '+r'$x_0=$'+f'{x0:.0f} cm, '+f'\nforce_code={force_code}\n',fontsize=fontsize) plt.show() # + # plt.yscale('log') # plt.xscale('log') # plt.plot(xv,yv_fk,label='FK power law fit',zorder=3,lw=4) # plt.plot(xv,yv_lr,label='LR power law fit',zorder=3,lw=4,color='C1') fontsize=18 plt.plot(x_values_force_neigh_1,y_values_force_neigh_1-y_values_force_neigh_0,'-',c='purple',alpha=.7,label=r'neighbors only minus all particles',lw=3) # plt.plot(x_values,y_values,c='C2',alpha=.7,label='simulation',lw=6) # plt.plot(x_values_force,y_values_force,c='C3',alpha=.7,label='simulation with forces',lw=6) # plt.plot(x_values,y_values,c=c_values,alpha=0.4,cmap='bwr') # plt.title(u'comparison to simulation\nwith two hybrid modes',fontsize=fontsize) plt.xlabel(r'q (cm$^{-2}$)',fontsize=fontsize) plt.ylabel(r'w (Hz cm$^{-2}$)', fontsize=fontsize) plt.tick_params(axis='both', which='major', labelsize=fontsize) plt.tick_params(axis='both', which='minor', labelsize=0) plt.legend(fontsize=fontsize-5) # plt.xlim([0.08,1]) # print(f'varkappa={varkappa} Hz');print(f' x0={x0} cm') plt.title(r'$\kappa=1500$ Hz, $\varkappa=$'+f'{varkappa} Hz, '+r'$x_0=$'+f'{x0:.0f} cm, '+f'\nforce_code={force_code}\n',fontsize=fontsize) plt.show() # -
python/analyzing the effect of attractive force magnitude and reaction range.ipynb
# --- # jupyter: # jupytext: # formats: ipynb,jl:hydrogen # text_representation: # extension: .jl # format_name: hydrogen # format_version: '1.3' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.8.0-DEV # language: julia # name: julia-1.8 # --- # %% using BenchmarkTools, ArraysOfArrays inp = rand(5, 5, 3) @show nestedview(inp, 2) == [inp[:, :, k] for k in axes(inp, 3)] print("Comprehension: ") @btime out = [$inp[:, :, k] for k in axes($inp, 3)] print("Comprehension with @view:") @btime out = [@view($inp[:, :, k]) for k in axes($inp, 3)] print("ArraysOfArrays.nestedview:") @btime out = nestedview($inp, 2); # %%
0014/ArraysOfArrays.ipynb
# --- # jupyter: # jupytext: # cell_metadata_filter: all,-slideshow # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Let $C$ be the center point and $\theta$ is the tilt angle there is a corresponding "unit tangent vector" to the sphere with that tilt. Call this vector $v$. # To move $C$ along $v$ a distance $D$ in a sphere of radius $R$ is something like # # $$ # P_1 = \cos(A) \cdot C + R\cdot \sin (A) \cdot v # $$ # where $A$ corresponds to 90km in radians. This is `midpt_1` in code below. Moving in the direction $-v$ # yields # $$ # P_2 = \cos(A) \cdot C - R\cdot \sin (A) \cdot v # $$ # which is referred to as `midpt_2` below. import numpy as np import functools lat_deg, lon_deg = 77.875, -20.975 lat, lon, R, theta = lat_deg*(2*np.pi)/360, lon_deg*(2*np.pi)/360, 6371, -70 * 2 * np.pi / 360 boulder_lat, boulder_lon = lat, lon x, y, z = (R * np.cos(lat) * np.sin(lon), R * np.cos(lat) * np.cos(lon), R * np.sin(lat)) C = np.array([x,y,z]) # ## Computing $v$ from $\theta$ # # At a point $C=[x,y,z]$, a tilt can be thought of as moving through lat and lon along a line with direction vector $d=(d_lon, d_lat)$, so we have # in parameters $t$ # $$ # x(t), y(t), z(t) = (R * \cos(lat_0 + t dlat) * \cos(lon_0 + t dlon), R * \cos(lat_0 + t dlat) * \sin(lon_0 + t dlon), R * \sin(lat_0 + t dlat)) # $$ # Differentiating with respect to $t$ # (ignoring the $R$ scaling as we want normalized $v$) we see $v$ is parallel # to # $$ # R\cdot (-\sin (lat_0) \cos(lon_0) dlat - \cos(lat_0) \sin(lon_0) dlon, -\sin(lat_0) \sin(lon_0) dlat + \cos(lat_0) \cos(lon_0) dlon, \cos(lat_0) dlat) # $$ dlat, dlon = np.sin(theta), np.cos(theta) v = np.array([-np.sin(lat) * np.sin(lon) * dlat + np.cos(lat) * np.cos(lon) * dlon, -np.sin(lat) * np.cos(lon) * dlat - np.cos(lat) * np.sin(lon) * dlon, np.cos(lat) * dlat]) v /= np.linalg.norm(v) np.sum(v*C) # The angle $A$ is # $$ # \frac{A}{2\pi} = \frac{90km}{2 \pi \cdot 6371km} # $$ A = 90/R A midpt_1 = np.cos(A) * C + R * np.sin(A) * v np.linalg.norm(midpt_1 - C), np.dot(midpt_1, C) / R**2, np.cos(A) # To find next corner, we move $\perp$ to $v$. # That direction can be found by # $$ # v \times P_1. # $$ # Let $v^{\perp}$ be the unit vector in this direction. v_perp = np.cross(midpt_1, v) # == np.cross(C, v) v_perp /= np.linalg.norm(v_perp) v_perp # We will then move 92.5km from $P_1$ in the direction # $$ # P_2 = \cos(B) \cdot P_1 + R \cdot \sin(B) \cdot v^{\perp} # $$ # where # $$ # \frac{B}{2\pi} = \frac{92.5km}{6371km} # $$ # + B = 92.5/6371 corners = [np.cos(B) * midpt_1 + R * np.sin(B) * v_perp] corners.append(np.cos(B) * midpt_1 - R * np.sin(B) * v_perp) v_perp = np.cross(midpt_1, v) # == np.cross(C, v) v_perp /= np.linalg.norm(v_perp) v_perp midpt_2 = np.cos(A) * C - R * np.sin(A) * v corners.append(np.cos(B) * midpt_2 + R * np.sin(B) * v_perp) corners.append(np.cos(B) * midpt_2 - R * np.sin(B) * v_perp) corners # - [np.linalg.norm(corner) for corner in corners] # We can find another corner # $$ # \cos(A') \cdot P_1 - R \cdot \sin(A') \cdot v^{\perp} # $$ # and similarly other corners. # ### Now convert back to lat lon lat_degs = [np.arcsin(z_ / R) / (2 * np.pi) * 360 for x_, y_, z_ in corners] lat_degs lon_degs = [np.arctan2(x_ / R, y_ / R) / (2 * np.pi) * 360 for x_, y_, z_ in corners] lon_degs # %matplotlib inline import matplotlib.pyplot as plt plt.scatter(lon_degs, lat_degs) plt.scatter([lon_deg], [lat_deg]) from sklearn.neighbors import BallTree import pandas as pd import numpy as np ls8 = pd.read_csv('./LS8.csv') # + #LS_rad = np.vstack((ls8.lat.values,ls8.lon.values)).T # fixed lat/lon convention LS_rad = np.vstack((ls8.lon.values,ls8.lat.values)).T # fixed lat/lon convention LS_rad # - LS_rad *= np.pi/180 LS_rad LSBall = BallTree(LS_rad,metric='haversine') ls8.lat.plot() #lat = 40 #lon = -105.27 q = np.array([[np.deg2rad(lat), np.deg2rad(lon)]]) LSBall.query(q.reshape(1,-1),k=5,return_distance=True,breadth_first=True) lat*(2*np.pi)/360, lat, lat*180/np.pi 1.446412 ls8.loc[[14394]] ls8.loc[[14624]] boulder_scenes = np.array([[1.82882079, 0.72884239], [1.83717104, 0.75371663], [1.83235034, 0.65405287], [1.83991762, 0.6790087 ], [1.82075591, 0.70393913], [1.84772238, 0.70393913], [1.85578727, 0.72884239]]) lon, lat # ### A representation of the scene that implements `contains` # + def representation(center_lon, # in radians center_lat, # in radians instrument_tilt, # in degrees, rotation clockwise len_lon=180, # extent in km len_lat=185, # extent in km R=6371): # "radius" of earth tilt_deg = instrument_tilt * 2 * np.pi / 360 x, y, z = (R * np.cos(center_lat) * np.sin(center_lon), R * np.cos(center_lat) * np.cos(center_lon), R * np.sin(center_lat)) C = np.array([x,y,z]) # center of scene dlat, dlon = np.sin(-tilt_deg), np.cos(-tilt_deg) dir_lon = np.array([-np.sin(center_lat) * np.sin(center_lon) * dlat + np.cos(center_lat) * np.cos(center_lon) * dlon, -np.sin(center_lat) * np.cos(center_lon) * dlat - np.cos(center_lat) * np.sin(center_lon) * dlon, np.cos(center_lat) * dlat]) dir_lon /= np.linalg.norm(dir_lon) A = len_lon / 2 / R midpt_1 = np.cos(A) * C + R * np.sin(A) * dir_lon dir_lat = np.cross(midpt_1, dir_lon) dir_lat /= np.linalg.norm(dir_lat) B = len_lat/ 2 / R corners = [np.cos(B) * midpt_1 + R * np.sin(B) * dir_lat] corners.append(np.cos(B) * midpt_1 - R * np.sin(B) * dir_lat) midpt_2 = np.cos(A) * C - R * np.sin(A) * dir_lon corners.append(np.cos(B) * midpt_2 + R * np.sin(B) * dir_lat) corners.append(np.cos(B) * midpt_2 - R * np.sin(B) * dir_lat) corners = np.array(corners) corners_lon_lat = np.array([(np.arctan2(x_ / R, y_ / R), np.arcsin(z_ / R)) for x_, y_, z_ in corners]) # now work out halfspace # these are the edge segmentsin lon/lat space supports = [corners_lon_lat[0]-corners_lon_lat[1], corners_lon_lat[0]-corners_lon_lat[2], corners_lon_lat[1]-corners_lon_lat[3], corners_lon_lat[2]-corners_lon_lat[3]] # normals to each edge segment normals = np.array([(s[1],-s[0]) for s in supports]) pts = [corners_lon_lat[0], # a point within each edge corners_lon_lat[0], corners_lon_lat[1], corners_lon_lat[3]] bdry_values = np.array([np.sum(n * p) for n, p in zip(normals, pts)]) center_values = [np.sum(n * [center_lon, center_lat]) for n in normals] center_signs = np.sign(center_values - bdry_values) def _check(normals, center_signs, bdry_values, lon_lat_vals): normal_mul = np.asarray(lon_lat_vals).dot(normals.T) values_ = normal_mul - bdry_values[None,:] signs_ = np.sign(values_) * center_signs[None,:] return np.squeeze(np.all(signs_ == 1, 1)) _check = functools.partial(_check, normals, center_signs, bdry_values) return corners_lon_lat, _check, normals, bdry_values, center_signs # - # ## All scenes containing Boulder # + for i, scene in enumerate(boulder_scenes): lon, lat = scene corners, contains, normals, bdry_values, center_signs = representation(lon, lat, -theta*360/(2*np.pi)) if contains([boulder_lon, boulder_lat]): plt.scatter(corners[:,0], corners[:,1], label='Scene %d' % i) plt.scatter([boulder_lon], [boulder_lat], marker='+', label='Boulder', s=100) plt.legend() boulder_lon, boulder_lat # - # ### What needs to be stored # # - We need to store `normals`, `bdry_values` and `center_signs` for each scene. # ### How `contains` is determined # # - Function can check several query points at once.... # # + def _check(normals, center_signs, bdry_values, lon_lat_vals): normal_mul = np.asarray(lon_lat_vals).dot(normals.T) values_ = normal_mul - bdry_values[None,:] signs_ = np.sign(values_) * center_signs[None,:] return np.squeeze(np.all(signs_ == 1, 1)) import functools contains = functools.partial(_check, normals, center_signs, bdry_values) # - # ### Scenes not containing Boulder # + for i, scene in enumerate(boulder_scenes): lon, lat = scene corners, contains, normals, bdry_values, center_signs = representation(lon, lat, -theta*360/(2*np.pi)) if not contains([boulder_lon, boulder_lat]): plt.figure() plt.scatter(corners[:,0], corners[:,1], label='Scene %d' % i) plt.scatter([boulder_lon], [boulder_lat], marker='+', label='Boulder', s=100) plt.legend() boulder_lon, boulder_lat # -
examples/Contains_for_Landsat_Svalbard.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img src="http://cfs22.simplicdn.net/ice9/new_logo.svgz "/> # # # Assignment 02: Evaluate the Diabetes Dataset # # *The comments/sections provided are your cues to perform the assignment. You don't need to limit yourself to the number of rows/cells provided. You can add additional rows in each section to add more lines of code.* # # *If at any point in time you need help on solving this assignment, view our demo video to understand the different steps of the code.* # # **Happy coding!** # # * * * # #### 1: Import the dataset #Import the required libraries import pandas as pd #Import the diabetes dataset df_diabetes_data = pd.read_csv('/Users/ronald.garciarobles/Ron/Architecture/Artificial Intelligence/AI Engineer/Assignments/ML_Assignment2/pima-indians-diabetes.data', header=None) # #### 2: Analyze the dataset #View the first five observations of the dataset df_diabetes_data.head() # #### 3: Find the features of the dataset #Use the .NAMES file to view and set the features of the dataset feature_names = ['Pregnant','glucose','bp','skin','insulin','bmi','pedigree','age','label'] #Use the feature names set earlier and fix it as the column headers of the dataset df_diabetes_data = pd.read_csv('/Users/ronald.garciarobles/Ron/Architecture/Artificial Intelligence/AI Engineer/Assignments/ML_Assignment2/pima-indians-diabetes.data', header=None, names=feature_names) #Verify if the dataset is updated with the new headers df_diabetes_data.head() #View the number of observations and features of the dataset df_diabetes_data.shape # #### 4: Find the response of the dataset #Select features from the dataset to create the model feature_select_cols = ['Pregnant','insulin','bmi','age'] #Create the feature object X_feature = df_diabetes_data[feature_select_cols] #Create the reponse object Y_target = df_diabetes_data['label'] #View the shape of the feature object X_feature.shape #View the shape of the target object Y_target.shape # #### 5: Use training and testing datasets to train the model #Split the dataset to test and train the model from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(X_feature, Y_target, random_state=1) # #### 6: Create a model to predict the diabetes outcome # Create a logistic regression model using the training set from sklearn.linear_model import LogisticRegression logReg = LogisticRegression() logReg.fit(x_train, y_train) #Make predictions using the testing set y_pred = logReg.predict(x_test) y_pred # #### 7: Check the accuracy of the model #Evaluate the accuracy of your model from sklearn import metrics print(metrics.accuracy_score(y_test, y_pred)) #Print the first 30 actual and predicted responses print('actual: ', y_test.values[0:30]) print('predicted: ', y_pred[0:30])
Assignments/ML_Assignment2/Assignment 02.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.10 64-bit (''base'': conda)' # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/", "height": 460} id="SY4BQvbxxIUI" executionInfo={"status": "error", "timestamp": 1629214795005, "user_tz": -540, "elapsed": 3177, "user": {"displayName": "\uae40\ud604\uc6b0", "photoUrl": "", "userId": "06560543018646300359"}} outputId="2af5effb-ccb6-48e3-9910-973071f9f77b" import tensorflow as tf import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder import pandas as pd gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: try: # Currently, memory growth needs to be the same across GPUs for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs") except RuntimeError as e: # Memory growth must be set before GPUs have been initialized print(e) # 데이터 입력 df = pd.read_csv('../dataset/iris.csv', names = ["sepal_length", "sepal_width", "petal_length", "petal_width", "species"]) # + id="6AUdjc9pxLgE" # 데이터 분류 dataset=df.copy() # 데이터 분류 Y_obj=dataset.pop("species") X=dataset.copy() # 문자열을 숫자로 변환 Y_encoded=pd.get_dummies(Y_obj) # + colab={"base_uri": "https://localhost:8080/", "height": 241} id="kwOKnzVGxNPY" executionInfo={"status": "error", "timestamp": 1629214797434, "user_tz": -540, "elapsed": 426, "user": {"displayName": "\uae40\ud604\uc6b0", "photoUrl": "", "userId": "06560543018646300359"}} outputId="cfb7379b-46bb-4cdf-b280-0f2ce56f24fa" # 전체 데이터에서 학습 데이터와 테스트 데이터(0.1)로 구분 X_train1, X_test, Y_train1, Y_test = train_test_split(X, Y_encoded, test_size=0.1,shuffle=True, stratify=Y_encoded) ## shuffle=True로 하면 데이터를 섞어서 나눔 ## 학습 셋에서 학습과 검증 데이터(0.2)로 구분 X_train, X_valid, Y_train, Y_valid = train_test_split(X_train1, Y_train1, test_size=0.2, shuffle=True, stratify=Y_train1) ## shuffle=True로 하면 데이터를 섞어서 나눔 # + colab={"base_uri": "https://localhost:8080/"} id="nJ0DTC4LxOdy" executionInfo={"status": "ok", "timestamp": 1629214802533, "user_tz": -540, "elapsed": 425, "user": {"displayName": "\uae40\ud604\uc6b0", "photoUrl": "", "userId": "06560543018646300359"}} outputId="dbbc9663-e41c-444b-997a-50ae902bb89a" # 모델의 설정 activation=tf.keras.activations.sigmoid input_Layer = tf.keras.layers.Input(shape=(4,)) x = tf.keras.layers.Dense(16, activation=activation,)(input_Layer) x = tf.keras.layers.Dense(12, activation=activation)(x) Out_Layer= tf.keras.layers.Dense(3, activation='softmax')(x) model = tf.keras.models.Model(inputs=[input_Layer], outputs=[Out_Layer]) model.summary() # + colab={"base_uri": "https://localhost:8080/", "height": 205} id="vo59ZUgCxP6b" executionInfo={"status": "error", "timestamp": 1629214826855, "user_tz": -540, "elapsed": 590, "user": {"displayName": "\uae40\ud604\uc6b0", "photoUrl": "", "userId": "06560543018646300359"}} outputId="9c019062-dbcc-42ba-f324-6981c11560da" # 모델 컴파일 loss=tf.keras.losses.categorical_crossentropy optimizer = tf.keras.optimizers.SGD(learning_rate=0.01) metrics=tf.keras.metrics.categorical_accuracy model.compile(loss=loss, optimizer=optimizer, metrics=[metrics]) ## model fit은 histoy를 반환한다. 훈련중의 발생하는 모든 정보를 담고 있는 딕셔너리. result=model.fit(X_train, Y_train, epochs=50, batch_size=50, validation_data=(X_valid,Y_valid)) # validation_data=(X_valid,Y_valid)을 추가하여 학습시 검증을 해줌. # + id="yy42GZZrxVzb" ### model.save('iris_multi_model.h5') # + id="Mcm7auvDxXSC" ## histoy는 딕셔너리이므로 keys()를 통해 출력의 key(카테고리)를 확인하여 무엇을 받고 있는지 확인. print(result.history.keys()) ### result에서 loss와 val_loss의 key를 가지는 값들만 추출 loss = result.history['loss'] val_loss = result.history['val_loss'] ### loss와 val_loss를 그래프화 epochs = range(1, len(loss) + 1) plt.subplot(211) ## 2x1 개의 그래프 중에 1번째 plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() ### result에서 binary_accuracy와 val_binary_accuracy key를 가지는 값들만 추출 acc = result.history['categorical_accuracy'] val_acc = result.history['val_categorical_accuracy'] ### binary_accuracy와 val_binary_accuracy key를 그래프화 plt.subplot(212) ## 2x1 개의 그래프 중에 2번째 plt.plot(epochs, acc, 'ro', label='Training acc') plt.plot(epochs, val_acc, 'r', label='Validation acc') plt.title('Training and validation accuracy') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.legend() # model.evalueate를 통해 테스트 데이터로 정확도 확인하기. ## model.evaluate(X_test, Y_test)의 리턴값은 [loss, binary_acuuracy ] -> 위 model.compile에서 metrics=[ keras.metrics.binary_accuracy]옵션을 주어서 binary acuuracy 출력됨. print("\n Test Accuracy: %.4f" % (model.evaluate(X_test, Y_test)[1])) ## 그래프 띄우기 plt.show()
tensorflow/day3/practice/P_03_03_iris_multi_classification_save_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # [![image](https://colab.research.google.com/assets/colab-badge.svg)](https://githubtocolab.com/giswqs/geemap/blob/master/examples/notebooks/20_planet_imagery.ipynb) # [![image](https://binder.pangeo.io/badge_logo.svg)](https://gishub.org/geemap-pangeo) # # Uncomment the following line to install [geemap](https://geemap.org) if needed. # + # # !pip install geemap # - import os import geemap # + # geemap.update_package() # - # First, you need to sign up a Planet account and get an API key. See https://developers.planet.com/quickstart/apis. # Uncomment the following line to pass in your API key. # + # os.environ["PLANET_API_KEY"] = "12345" # + tile_format = "ipyleaflet" if os.environ.get("USE_FOLIUM") is not None: tile_format = "folium" # + # geemap.planet_quarterly() # + # geemap.planet_monthly() # + # geemap.planet_catalog() # - quarterly_tiles = geemap.planet_quarterly_tiles(tile_format=tile_format) monthly_tiles = geemap.planet_monthly_tiles(tile_format=tile_format) for tile in quarterly_tiles: print(tile) for tile in monthly_tiles: print(tile) m = geemap.Map() m.add_planet_by_month(year=2020, month=8) m m = geemap.Map() m.add_planet_by_quarter(year=2019, quarter=2) m
examples/notebooks/77_planet_imagery.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="TH_2XqqxC79_" # # Part 1 - Setup # ## Start by Cloning the "Python Communist Revolution" Library # + colab={"base_uri": "https://localhost:8080/"} id="mdrs5X5HC2nO" outputId="fd0f08c0-f9eb-497c-a409-eb4dbaf98136" # !git clone https://github.com/jokteur/python_communism.git # + [markdown] id="4YeQbVTzDZpY" # ## Install the newly cloned library using pip # + colab={"base_uri": "https://localhost:8080/"} id="km5rg-z5DNva" outputId="605a11f1-9962-49e6-89ff-2c866b714fbe" # !pip install ./python_communism/ # + [markdown] id="YwbHqCv1GQwI" # ## Import the library # + id="R7tFu3PTDyXV" import communism # + [markdown] id="Lo1bPkoyGm-I" # # Part 2 - Start the Communist revolution # # ## Start the revolution # # *Both for the production on a mass scale of this communist consciousness, and ... the alteration of men on a mass scale is, necessary, ... a revolution; this revolution is necessary, therefore, not only because the ruling class cannot be overthrown in any other way, but also because the class overthrowing it can only in a revolution succeed in ridding itself of all the muck of ages and become fitted to found society anew.* # # *Marx, German Ideology (1845)* # + [markdown] id="rjAGKC4yUsUv" # ## Let's create some classes. Classes are the foundation of society, but they result in a huge inequality that's why we will try to abolish them. # + id="4Vn6p0fiJl3e" class OrdinaryPeople: def __str__(self): return 'We are the normal people.' class WorkingClassHeroes: def __str__(self): import base64 import zlib compressed_data = b'x\xda\xbd\x95O\x8f\x9b0\x10\xc5\xef\xfe4\x046\xd2\xe6\xb8\xa0\x8d\x03\xd9 %Q\xf8\xe3\x1b\xc6+g\x17\x1bPIH\xe0\xd3\xf7\x19H\xbaU\x95V\xed\xa1\xb7\xc863o~\xf3f\xb2M62\xb7\x17-\xffp\x8fy\xe7\xce\xb9s(s}\x90\xa9^t\xfc\xe3"\x05=\xaa\xf7\xbd{J\xe3\x93\xf2W\xaa\x15{W\xb38j\xfcU\x88\xb3k\xe3\x15\xc1\xdc\xa7\xa2\x16Z\x9d\t\x1b\x03H\xae\x9f\xf1\xa1:\xb1\xbd[\xf32\xb4X\xbc,|\xba\xd0>U\x96O\x97\r\x7f\xa9\xca\x88*\x04\xd9UxsIcu\xc6]\x8f\xa0\xadO\x83\xda\xc4!&\x10\xd3\x91\xe2\x9e{\xe6\xce\xae\xca\xe2y\x89\x8f\xa7\x00\xb5\xeb\xafD\x9b\xebS\xcdu.S\xfbz\xcc\x9d\x8d\xcch\xd4\xf1\xce\xadQI\xcf\xed\x99\x82\xfa\xe1\x9e\x08\xfa\x8c\x92\x0e\xeb\xed\xdeu\xb8\x13|\x1b\x83\x85M\x9a\x84\xbd\x8f\x12s(\xceF\x14\'\x96L\xc9V;\xa3Fy\xb2ZG#\x86J$\x81E&\x0eG\xe1\xb9\x95\xc9\x02Eg\xe6\xb9\x16\xb2\x83\xc5\xb1\xc6\xf9\x80!M\xb6\x08\x18\xe2\xcd\xa2\xf1\x14JM\n(\\Z(y\xb8\xcf\xe2T\x92\xf7x1\x0b\x9c@\x195,a*G#\xb8\xde\xde\xc0\x17,\t/Y\x12\xe2\xfe\x158\xd0\xa8;;#Bt\x08\xd6\xa3d-\xe2\xf0\x1b\x99\xca\xea\xd2\xa4\x9eO*?\xd3\xf8\xa9\x84"\xcd\xedk\xc3\x9d|\x08\x9c%\xbe\xcc\xcb\x08\t7\x0f\x91\x90\xdf2)^\xa5\xb0\x17]f\xab3\xba\xf5\xc9\xe9\xb2\x07t\x94\x18\xb4\xb7N\xf28\x02\x92\xe1\xde"\x90\xf8\xc1\xb6\xd5\xdaS\xa2b\xf1\xd3T\x9e\xb0!\xdfBv\x0b`\x15\x18\x0e\xa5\x83\xd91\xd7Qq\xf3\x1bw|\xb9\xf9|\x91\xdc\xde\x993E\xd2$\xe8\x07\xa0\xf7@\xaez_\xb9*u\xb6\xd2\xf0\x1cT\xae\xdc:\xb5\x1b\x99\x1a\x04&x\xe2\xaf#\xfb\xa8`\xf4\xb19\xf6\xf2\x1c\xe0=\xca\x8c\x8e\x9c^\xd196\xe3:\xb4\xb2xq~\xf3~LBn?K\x06V\xf0a\xcb>\x8c\xfb\x97\xdd\xa3\xf2\xc9\xa3\xfa\xc7\xf2\xff\xd1\xb4\x9e\xac\xdf0s\x97\x89G\xc1\x1d\xd7\xb0r24\xc5_\x05\x8aSU\x1a\xd5\x93\x0f{\x96H\x99\xc6\xf3\xc2_\xeeb\xaf\x18\xce\xc6q\x1a#7\xf2\xee\xb9a\xde~\xf6\xdd\xa8\xca\x9c\x19\x1b\x0c\x1fk B\xc7\x83\xd9d\xec\x92\x0c\\\x9c]\xcd\xe9\xc5\xb0\xf9\x9c8\\\xc0\xa6Gf\x0b\xa5\x98m\xa2S\xf8,\x85\x85\xf6\xa3\x0f\xc12\xfa\xc5:\xe4O\xde\xf9\x1bv\xe4\x06\xef\x0b\xbb\x83y\xc4\xf6\x02\x81\x83\x96\xdb\xd60\x97\xc66\xc6&\xf9}n\x97f\x94\xb0\xaa\xae\xcdht\xd5\x12\xc0\xee\xb1\xc7\xcc\xba\xf1\x04>bZu\xf9\xcdct63\xbf9\x8d`\xd6\'(Y8\xa3\xef\xc2S\x16_\xcdR\xe8\xa7\x86a\xb4\xae\r\xf1\n\xa5\xa7\x03\xc70\xba\x8d\x92O\xafuf\x1fFE\x94\xb5\x9c\x9e\x00\x1e\xdd\x1c\x97c\x95\r[\xf6?3\x0b\xd84&\x02c\x82\xb6\x8f\x01\xb1\xf3\xa6e\x89\xf5#\xe0\xbbK\xe3\xd3z\xc0\x80\xd5\xd4p \xe0\xa2\x1e\x97\xfa:e\xc7\x1f\x8bmF\xea"\xb32\xea\xbf\xae\'\x1e\x1f\xd6\xe4;\'\x1cA%' uncompressed_string = str(base64.decodebytes(zlib.decompress(compressed_data)), encoding='ascii') return uncompressed_string class Intellectuals: def __str__(self): return 'We are the Intellectuals, pretentious and we know it.' class Elites: def __str__(self): # according to a new report highlighting the growing gap between the super-rich and everyone else # https://www.theguardian.com/inequality/2017/nov/14/worlds-richest-wealth-credit-suisse return 'We are the richest 1% that own half the world\'s wealth.' # + [markdown] id="mjjZ62sFVGi_" # ## Now that the classes are created, look how Python internally stores them # + colab={"base_uri": "https://localhost:8080/"} id="DkKKwQPEVMiI" outputId="54226931-6790-47fc-eb9d-1df7e13a1db6" print('That\'s how Python works internally, it stores the variables and classes you define inside a dictionary.\n\n') # Find the intersection between the global variables dictionnary and our set of class names # Similar to globals().keys().intersect(('OrdinaryPeople', 'WorkingClassHeroes', 'Intellectuals', 'Elites')) our_vars = globals().keys() & {'OrdinaryPeople', 'WorkingClassHeroes', 'Intellectuals', 'Elites'} for k in our_vars: v = globals().get(k) print(f'--- The class {k} is now stored in the current scope\'s global variables dictionnary and represented as:\n{v}\n') # + [markdown] id="rKEI7awuZ92e" # ## Look what each class say about themselves # + colab={"base_uri": "https://localhost:8080/"} id="kNcTllHUaCb6" outputId="70c9f8c6-ab97-4406-caad-0e0798883f62" print(f'OrdinaryPeople say: {OrdinaryPeople()}', end='\n\n') print(f'Intellectuals say: {Intellectuals()}', end='\n\n') print(f'Elites say: {Elites()}', end='\n\n') # + [markdown] id="qa1UN4waaa0K" # ## Can you guess what the WorkingClassHeroes say?? # # uncomment the line below to know # + id="JaScQ0DGaYWX" # print(f'WorkingClassHeroes say: {WorkingClassHeroes()}', end='\n\n') # + [markdown] id="hSTBopN-XVQt" # ## Let's look at the inequality between classes # + colab={"base_uri": "https://localhost:8080/"} id="r8b_U0NQXgew" outputId="0218f868-0395-4772-9e99-e2042ab41743" print("Are OrdinaryPeople equal to the WorkingClassHeroes?") print(f"OrdinaryPeople() == WorkingClassHeroes() => {OrdinaryPeople() == WorkingClassHeroes()}", end='\n\n') print("Are OrdinaryPeople equal to the Elites ?") print(f"OrdinaryPeople() == Elites () => {OrdinaryPeople() == Elites()}", end='\n\n') print("Are OrdinaryPeople equal to the Intellectuals?") print(f"OrdinaryPeople() == Intellectuals() => {OrdinaryPeople() == Intellectuals()}", end='\n\n') print("Are WorkingClassHeroes equal to the Elites ?") print(f"WorkingClassHeroes() == Elites () => {WorkingClassHeroes() == Elites()}", end='\n\n') print("Are WorkingClassHeroes equal to the Intellectuals?") print(f"WorkingClassHeroes() == Intellectuals() => {WorkingClassHeroes() == Intellectuals()}", end='\n\n') print("Are Intellectuals equal to the Elites ?") print(f"Intellectuals() == Elites () => {Intellectuals() == Elites()}", end='\n\n') # + [markdown] id="ElJ43b0TZVgi" # ## Now the revolution will kick-in! # + colab={"base_uri": "https://localhost:8080/"} id="hsATqOL2JKJq" outputId="21b877ab-58ae-4cce-9f8e-8fc3e42100b6" communism.revolution(globals()) # + [markdown] id="bIgeXuYbZjnf" # ## Let's look again if inequality remains # + colab={"base_uri": "https://localhost:8080/"} id="N5Y-0qEtZpQ-" outputId="95afa13d-cd33-4850-ed02-20b99cc54b7e" print("Are OrdinaryPeople equal to the WorkingClassHeroes?") print(f"OrdinaryPeople() == WorkingClassHeroes() => {OrdinaryPeople() == WorkingClassHeroes()}", end='\n\n') print("Are OrdinaryPeople equal to the Elites ?") print(f"OrdinaryPeople() == Elites() => {OrdinaryPeople() == Elites()}", end='\n\n') print("Are OrdinaryPeople equal to the Intellectuals?") print(f"OrdinaryPeople() == Intellectuals() => {OrdinaryPeople() == Intellectuals()}", end='\n\n') print("Are WorkingClassHeroes equal to the Elites ?") print(f"WorkingClassHeroes() == Elites() => {WorkingClassHeroes() == Elites()}", end='\n\n') print("Are WorkingClassHeroes equal to the Intellectuals?") print(f"WorkingClassHeroes() == Intellectuals() => {WorkingClassHeroes() == Intellectuals()}", end='\n\n') print("Are Intellectuals equal to the Elites ?") print(f"Intellectuals() == Elites() => {Intellectuals() == Elites()}", end='\n\n') # + [markdown] id="rDP5zlllZsq1" # ## Now all classes are equal in the eye of Python 😀 # + [markdown] id="DbyGaIZPkN0y" # ## But wait there's a catch here... # # let's look at the new rising classes # + id="f5hHqpFTR70m" class RisingElites: pass class Oligarchs: pass class FactoryWorkers: pass # + [markdown] id="_8fWEbbkmvRv" # ## The new classes that surfaced after the revolution, will give the impression that they are equal to the other classes. But when we compare them together, we notice that inequality is back again. # + colab={"base_uri": "https://localhost:8080/"} id="6sWeMYHOm8-I" outputId="bc16fa32-ef4b-4bef-97d6-bb5763805492" print(f'The RisingElites will claim that they are equal to the OrdinaryPeople.') print(f"RisingElites() == OrdinaryPeople() => {RisingElites() == OrdinaryPeople()}", end='\n\n') print(f'But when we compare the RisingElites to the FactoryWorkers, we discover that they are not equal.') print(f"RisingElites() == FactoryWorkers() => {RisingElites() == FactoryWorkers()}", end='\n\n') print(f'Same for the new Oligarchs and FactoryWorkers:') print(f"Oligarchs() == FactoryWorkers() => {Oligarchs() == FactoryWorkers()}", end='\n\n') # + [markdown] id="3zmTK7-woGVj" # ## Did we just crush a group of people, and left room from new inequalities to surface? # + [markdown] id="PSedUFBvoVtV" # If you look how the [communism](https://github.com/jokteur/python_communism/blob/main/communism.py) library works, it actually crushes the classes that already exists in the global scope, but it has no effect on the new classes that are created after the revolution (after calling the *revolution()* function) # + [markdown] id="j1PGjiglo4Dl" # # Part 3 - Key takeaways # + [markdown] id="mtMjLOvtpkom" # * Revolutions that rely only on crushing others and setting one class against the other are destined to fail. # The cultural revolution is a good example # + [markdown] id="OGYLQ5PLqov8" # * Overriding the \_\_eq\_\_() and \_\_hash\_\_() functions of a class can give the impression of equality but deepper issues will arise # + [markdown] id="pdpjq1fZrQME" # * Perhaps the old models have proved not to be very suitable for our days to shape a global society that is as dynamic and as exposed as ever. # + id="wI0fzQEdltGJ"
The_Python_Communist_Revolution_Notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np # Visualisation import seaborn as sns import matplotlib.pyplot as plt import plotly.express as px import matplotlib.image as mpimg # Préparation des données from sklearn import preprocessing from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder from sklearn.model_selection import train_test_split # Modèles from sklearn.linear_model import LinearRegression import statsmodels.api as sm from sklearn.linear_model import ElasticNet from sklearn.ensemble import RandomForestRegressor import keras from keras.models import Sequential from keras.layers import Dense from keras.wrappers.scikit_learn import KerasRegressor import eli5 from eli5.sklearn import PermutationImportance from sklearn.decomposition import PCA from sklearn.cluster import KMeans from sklearn.metrics import mean_squared_error from sklearn.inspection import permutation_importance import time # Option d'affchage pd.set_option('display.max_columns', None) # - t0 = time.time() df0 = pd.read_csv('/Users/lilian/Desktop/hackathon2021/data/train.csv') df = df0.copy() df = df[df['store_id'].isin(list(df['store_id'].sample(600)))].copy() df.shape df.sample() # ### Construction de variables intéressantes à partir des préexistantes # + # Note moyenne def compute_note(overall, count): try: overall / count except: return np.nan df['note_moyenne'] = df.apply(lambda row : compute_note(row.sum_rating_overall, row.rating_count) , axis = 1) # + # Lifetime col_date = ['date', 'items_first_enabled_date', 'store_last_saving_date', 'store_first_saving_date',\ 'pickup_start', 'pickup_end'] for col in col_date : df[col]= pd.to_datetime(df[col]) df['lifetime'] = df['store_last_saving_date'] - df['store_first_saving_date'] # + # Reduction df['reduction'] = 1 - df['item_price'] / df['before_price'] # + # Temps d'ouverture df['temps_ouverture'] = df['pickup_end'] - df['pickup_start'] # + # Heure de début d'ouverture df['heure_debut_ouverture'] = df.apply(lambda row : row.pickup_start.hour, axis = 1) # + # Efficacité df['efficacite'] = df['meals_saved'] / df['total_supply'] # + # Franchise df['franchise'] = df.apply(lambda row : int(row.parent_chain_id > 0), axis = 1) # - # ### Création de nouvelles variables # ## Recodage de la variable objectif def determine_absence_future(date, store_id): df_short = df[df['store_id'] == store_id] df_short = df_short[df_short['date'] > date] serie = list(df_short['total_supply']) i = 0 l = len(serie) try: while serie[i] == 0 and i < l: i+=1 except: return 0 return i df['absence_future'] = df.apply(lambda row : determine_absence_future(row.date, row.store_id), axis = 1) # + # Variance du nombre de vente pour un shop def compute_variance(date, store_id, variable): df_short = df[df['store_id'] == store_id] df_short = df_short[df_short['date'] < date] return df_short[variable].var() df['variance_ventes'] = df.apply(lambda row : compute_variance(row.date, row.store_id, 'meals_saved'), axis = 1) # + # Baisse du nombre de vente dans le mois précedent def compute_baisse_vente(date, store_id, variable): df_short = df[df['store_id'] == store_id] df_short = df_short[df_short['date'] < date] serie = df_short[variable] try: b = int(np.mean(serie[-30:]) / np.mean(serie[:-30]) < 1) except: b = 0 return b df['baisse_ventes'] = df.apply(lambda row : compute_baisse_vente(row.date, row.store_id, 'meals_saved'), axis = 1) # + # Augmentation de la réduction durant la semaine précedente def compute_hausse_reduction(date, store_id, variable): df_short = df[df['store_id'] == store_id] df_short = df_short[df_short['date'] < date] serie = df_short[variable] try: b = int(np.mean(serie[-7:]) / np.mean(serie[:-7]) > 1) if b: print(ok) except: b = 0 return b df['hausse_reduction'] = df.apply(lambda row : compute_hausse_reduction(row.date, row.store_id, 'reduction'), axis = 1) # + # Variance de la durée d'ouverture def compute_variance(date, store_id, variable): df_short = df[df['store_id'] == store_id] #print(df_short.shape) df_short = df_short[df_short['date'] < date] display(df_short) serie = df_short[variable] #print(serie) variance = np.var([(i.seconds / 3600) for i in serie]) return variance #df['variance_duree_ouverture'] = df.apply(lambda row : compute_variance(row.date, row.store_id, 'temps_ouverture'), axis = 1) # + # Baisse de la note moyenne durant la semaine précedente def compute_baisse_note(date, store_id, variable): df_short = df[df['store_id'] == store_id] df_short = df_short[df_short['date'] < date] serie = df_short[variable] try: b = np.mean(serie[-7:]) / np.mean(serie[:-7]) < 1 except: b = 0 return b df['baisse_note'] = df.apply(lambda row : compute_baisse_note(row.date, row.store_id, 'note_moyenne'), axis = 1) # + # Augmentation du nombre d'invendus durant le mois précédent def compute_hausse_invendus(date, store_id, variable): df_short = df[df['store_id'] == store_id] df_short = df_short[df_short['date'] < date] serie = df_short[variable] try: b = np.mean(serie[-30:]) / np.mean(serie[:-30]) > 1 except: b = 0 return b df['hausse_reduction'] = df.apply(lambda row : compute_hausse_reduction(row.date, row.store_id, 'reduction'), axis = 1) # - # ### Suppression des variables non intéressantes df.columns df = df.drop(columns = ['parent_chain_id', 'store_country', 'country_iso_code', 'region_id', 'store_activity_name', 'item_id', 'item_name', 'currency_code', 'pickup_end', 'pickup_start', 'declared_supply', 'manual_removed_supply', 'store_cancellation', 'item_price', 'meals_refunded', 'rating_count', 'sum_rating_overall', 'item_view', 'no_unique_consumers', 'is_enabled', 'Département', 'store_id', 'target']) df.sample(10) print((time.time() - t0)/ 60) # ### Mise en forme des données categ_var = ['store_region', 'store_segment'] for var in categ_var: df = pd.concat([df, pd.get_dummies(df[var], prefix = var)], axis = 1).drop(columns = [var]) df['lifetime'] = df['lifetime'].dt.days # + m = min(df['items_first_enabled_date']) date_m = df['date'] - m items_first_enabled_date_m = df['items_first_enabled_date']-m store_first_saving_date_m = df['store_first_saving_date']-m store_last_saving_date_m = df['store_last_saving_date']-m df.drop(columns = ['date']) df.drop(columns = ['items_first_enabled_date']) df.drop(columns = ['store_first_saving_date']) df.drop(columns = ['store_last_saving_date']) df['date'] = date_m.apply(lambda x: x.days) df['items_first_enabled_date'] = items_first_enabled_date_m.apply(lambda x: x.days) df['store_first_saving_date'] = store_first_saving_date_m.apply(lambda x: x.days) df['store_last_saving_date'] = store_last_saving_date_m.apply(lambda x: x.days) df['temps_ouverture'] = df['temps_ouverture'].apply(lambda x: x.seconds/3600) # - df df = df.drop(columns = ['before_price', 'note_moyenne', 'reduction', 'efficacite']) df = df.dropna() # + min_max_scaler = preprocessing.MinMaxScaler() df[list(df.columns)] = min_max_scaler.fit_transform(df[list(df.columns)]) # - df df.to_csv('df_calculee') # ## Application des modeles # + # On définit les échantillons y = df[['absence_future']] X = df.drop(columns = ['absence_future']) # On choisit un échantillon de validation de 20 % X_train, X_test, y_train, y_test = train_test_split(X,y,test_size = 0.2) # - N = X.shape[1] # ## Regression linéaire # + lin_reg = LinearRegression().fit(X_train, y_train) # On prédit à partir de l'échantillon de test pour calculer les scores y_pred = lin_reg.predict(X_test) # + px.histogram(pd.DataFrame([ (X.columns[i], lin_reg.coef_[0][i]) for i in range(N) ]).T.rename(index = {0 : 'variable', 1 : 'coeff'}).T, x = 'variable', y = 'coeff', histfunc = 'sum' ).show() print('MSE :', mean_squared_error(y_test, y_pred)) # + # Affichage des p-values mod = sm.OLS(y,X) fii = mod.fit() p_values = fii.summary2().tables[1]['P>|t|'] pd.DataFrame(p_values).T # + EN_reg = ElasticNet(alpha=.1, copy_X=True, fit_intercept = False, l1_ratio=.031) EN_reg.fit(X_train, y_train) # - y_pred = EN_reg.predict(X_test) print('MSE : ', mean_squared_error(y_test, y_pred)) pd.DataFrame([(X.columns[i], EN_reg.coef_[i]) for i in range(N)]).T.rename(index = {0 : 'variable', 1 : 'coeff'}) # ## RF # + # On définit d'abord une fonction qui donne l'importance des variables vis à vis d'un certain modèle def feat_importance(model, x_train, y_train, X): """ Renvoie le tableau de l'importance des variables vis à vis du modèle par la méthode des permutations """ result = permutation_importance( model, X, y, n_repeats = 3, random_state = 0 )['importances_mean'] importance = pd.DataFrame(result, index = X.columns, columns = ["Importance"]) return importance.sort_values(by = ['Importance'], ascending = False) # + # Création des essembles de tests et d'entrainement, on choisit une taile de test de 30% ici X = df.drop(['absence_future'], axis = 1) x = np.array(X) y = np.array(df['absence_future']) x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.3, random_state = 41) # random_state correspond à la graine générant l'échantillon aléatoire # + max_depth_ls = [1, 10, 13, 15, 17, 20, 25, 30] # profondeurs maximales des arbres de décision testées mse_train_max_depth = [] mse_test_max_depth = [] # Pour chaque profondeur max, on regresse avec random forest for m in max_depth_ls : print('Profondeur téstée : ', m) rf = RandomForestRegressor( max_depth = m, random_state=0, n_estimators = 30) # nombre d'arbres utilisés rf = rf.fit(x_train, y_train) y_pred_train = rf.predict(x_train) y_pred = rf.predict(x_test) mse_train_max_depth.append(mean_squared_error(y_train, y_pred_train)) mse_test_max_depth.append(mean_squared_error(y_test, y_pred)) # + # On affiche ensuite les performances de la regression sur les deux échantillon (train et test) fig, ax = plt.subplots(figsize = (18, 8)) plt.plot(max_depth_ls, mse_train_max_depth, color = 'red', label = 'Train') plt.plot(max_depth_ls, mse_test_max_depth, color = 'blue', label = 'Test') plt.title('MSE en fonction de max_depth') plt.legend() plt.show() # + #On regarde la valeur qui minimise la MSE sur l'ensemble de test max_depth_ls[mse_test_max_depth.index(min(mse_test_max_depth))] # + # On regarde maintenant l'effet du nombre d'arbre sur l'effet de la regression nb_estimators_ls = [1, 2, 3, 5, 20, 40, 50, 60, 80] mse_train_nb_estimators = [] mse_test_nb_estimators = [] for m in nb_estimators_ls : print("Nombre d'arbres testés : ", m) rf = RandomForestRegressor(max_depth = 15, random_state = 0, n_estimators = m) rf = rf.fit(x_train, y_train) y_pred_train = rf.predict(x_train) y_pred = rf.predict(x_test) mse_train_nb_estimators.append(mean_squared_error(y_train, y_pred_train)) mse_test_nb_estimators.append(mean_squared_error(y_test, y_pred)) # + # On affiche ensuite les performances de la regression sur les deux échantillon (train et test) fig, ax = plt.subplots(figsize = (18, 8)) plt.plot(nb_estimators_ls, mse_train_nb_estimators, color = 'red', label = 'Train') plt.plot(nb_estimators_ls, mse_test_nb_estimators, color = 'blue', label = 'Test') plt.title('MSE en fonction de n_estimators') plt.legend() plt.show() # + # On fait varier le nombre minimum d'exemple requis pour créer une feuille/noeud samples_leaf_ls = [1, 2, 3, 4, 10] mse_train_samples_leaf = [] mse_test_samples_leaf = [] for m in samples_leaf_ls : print('min_samples_leaf testé : ', m) rf = RandomForestRegressor( max_depth = 15, min_samples_leaf = m, n_estimators = 60, random_state = 0 ) rf = rf.fit(x_train, y_train) y_pred_train = rf.predict(x_train) y_pred = rf.predict(x_test) mse_train_samples_leaf.append(mean_squared_error(y_train, y_pred_train)) mse_test_samples_leaf.append(mean_squared_error(y_test, y_pred)) # + # On affiche ensuite les performances de la regression sur les deux échantillon (train et test) fig, ax = plt.subplots(figsize = (18, 8)) plt.plot(samples_leaf_ls, mse_train_samples_leaf, color='red', label='Train') plt.plot(samples_leaf_ls, mse_test_samples_leaf, color='blue', label='Test') plt.title('MSE en fct de min samples leaf') plt.legend() plt.show() # + max_leaf_ls = [2, 10, 100, 150, 200, 1000, 1500] mse_train_max_leaf = [] mse_test_max_leaf = [] for m in max_leaf_ls : print('Nombre de feuilles max testé : ', m) rf = RandomForestRegressor(max_depth = 15, min_samples_leaf = 1, max_leaf_nodes = m, n_estimators = 60) rf = rf.fit(x_train, y_train) y_pred_train = rf.predict(x_train) y_pred = rf.predict(x_test) mse_train_max_leaf.append(mean_squared_error(y_train, y_pred_train)) mse_test_max_leaf.append(mean_squared_error(y_test, y_pred)) # + # On affiche ensuite les performances de la regression sur les deux échantillon (train et test) fig, ax = plt.subplots(figsize = (18, 8)) plt.plot(max_leaf_ls, mse_train_max_leaf, color = 'red', label = 'Train') plt.plot(max_leaf_ls, mse_test_max_leaf, color = 'blue', label = 'Test') plt.title('MSE en fonction max_leaf_nodes') plt.legend() plt.show() # + # On a maintenant tous nos paramètres rf = RandomForestRegressor( max_depth = 15, min_samples_leaf = 1, max_leaf_nodes = 1000, n_estimators = 60 ) rf = rf.fit(x_train, y_train) y_pred_train = rf.predict(x_train) y_pred = rf.predict(x_test) oo = np.zeros(y_pred.shape) print('MSE train : ', mean_squared_error(y_train, y_pred_train)) print('MSE test : ', mean_squared_error(y_test, y_pred)) print('MSE modèle nulle : ', mean_squared_error(y_test, oo)) importance = feat_importance(rf, x_train, y_train, X) # - importance.plot(kind = 'barh', figsize = (18, 14)) # + # On définit les échantillons y = df[['absence_future']] X = df.drop(columns = ['absence_future']) # + # Construction de l'architecture du réseau model = Sequential() model.add(Dense(70, input_dim = N, activation = 'relu')) model.add(Dense(12)) model.add(Dense(13)) model.add(Dense(15)) model.add(Dense(5)) model.add(Dense(1)) model.compile(optimizer = 'adam', loss = 'mse') # + # Lancement de la phase d'apprentissage history = model.fit(X, y, validation_split = 0.2, epochs = 30) # + # On affiche l'évolution de la loss au fil des époques pour les échantillons train et test. fig, ax = plt.subplots(figsize = (18, 8)) plt.plot(history.history['loss']) plt.title('Evolution de MSE sur X_train au fil des époques') plt.ylabel('mse') plt.legend(['train'], loc = 'upper left') plt.show() fig, ax = plt.subplots(figsize = (18, 8)) plt.plot(history.history['val_loss']) plt.title('Evolution de MSE sur X_test au fil des époques') plt.ylabel('mse') plt.legend(['test'], loc = 'upper left') plt.show() # - perm = PermutationImportance(model, scoring = 'r2', random_state = 1).fit(X,y) eli5.show_weights(perm, feature_names = X.columns.tolist(), top = N) print((time.time() - t0)/ 60)
.ipynb_checkpoints/construction_base-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 13. Loading and Preprocessing Data with TensorFlow # Introducing the **Data API**, a useful tool in ingesting and preprocessing large datasets. # ### The Data API # # The whole API revolves around the concept of **dataset**. Here a simple example: # + jupyter={"outputs_hidden": true} import tensorflow as tf X = tf.range(10) # any data tensor # + jupyter={"outputs_hidden": true} dataset = tf.data.Dataset.from_tensor_slices(X) # + jupyter={"outputs_hidden": true} dataset # - # #### Chaining Transformations # We can apply all sorts of transformation to our datasets, and these will create new datasets. We can also chain them: # + jupyter={"outputs_hidden": true} dataset = dataset.repeat(3).batch(7) # + jupyter={"outputs_hidden": true} for item in dataset: print(item) # - # We can transform it using `map()`: # + jupyter={"outputs_hidden": true} dataset = dataset.map(lambda x: x * 2) # items: [0,2,4,6,8,10,12] # - # #### Shuffling the Data # Example: # + jupyter={"outputs_hidden": true} dataset = tf.data.Dataset.range(10).repeat(3) # 0 to 9, three times # + jupyter={"outputs_hidden": true} dataset = dataset.shuffle(buffer_size=5, seed=42).batch(7) # + jupyter={"outputs_hidden": true} for item in dataset: print(item) # - # Another method to shuffle the data is **interleaving lines from multiple files**. The idea is simple: take the dataset and split it into a training set, a validation set, and a test set. Then split each set into many files. # Now let's suppose we have a list `train_filepaths` that contains the list of training file paths. Next, we create a dataset containing filepaths: # + jupyter={"outputs_hidden": true} filepath_dataset = tf.data.Dataset.list_files(train_filepaths, seed=42) # - # And finally`interleave` to read one line at the time from all of them (sequentially): # + jupyter={"outputs_hidden": true} n_readers = 5 dataset = filepath_dataset.interleave( lambda filepath: tf.data.TextLineDataset(filepath).skip(1), cycle_length=n_readers) # + [markdown] jupyter={"outputs_hidden": true} # ### TFRecord Format # # Useful when bottleneck is loading and parsing the data. We can create a TFRecord file easily: # + jupyter={"outputs_hidden": true} with tf.io.TFRecordWriter("my_data.tfrecord") as f: f.write(b"This is the first record") f.write(b"And this is the second record") # - # Then we use `tf.data.TFRecordDataset` to read it: # + jupyter={"outputs_hidden": true} filepaths = ["my_data.tfrecord"] dataset = tf.data.TFRecordDataset(filepaths) for item in dataset: print(item) # - # ### Preprocessing the Input Features # # Aka converting all features into numerical features, generally normalizing them, etc. We have several options, some already covered: # # 1. Preparing data files in advance using another package (e.g. pandas) # 2. Preprocessing using Data API # 3. Preprocessing layer directly embedded in our model # # Let's cover `3`. # Let's implement a standardization layer using a `Lambda` layer. For each feature, it subtracts the mean and divides by its standard deviation (plus a tiny smoothing term to avoid division by zero): # + jupyter={"outputs_hidden": true} import numpy as np means = np.mean(X_train, axis=0, keepdims=True) stds = np.std(X_train, axis=0, keepdims=True) eps = keras.backend.epsilon() model = keras.models.Sequential([ keras.layers.Lambda(lambda inputs: (inputs - means) / (stds + eps)), [...] # other layers ]) # - # However, we may prefer to use a nice self-contained custom layer instead of having global variables like `means` and `stds`: # + jupyter={"outputs_hidden": true} from tensorflow import keras class Standardization(keras.layers.Layer): def adapt(self, data_sample): self.means_ = np.mean(data_sample, axis=0, keepdims=True) self.stds_ = np.std(data_sample, axis=0, keepdims=True) def call(self, inputs): return (inputs - self.means_) / (self.stds_ + keras.backend.epsilon()) # - # #### Encoding Categorical Features Using One-Hot Vectors # Encoding categorical features is an essential step in any preprocesssing pipeline. Let's use how this can be done: # + jupyter={"outputs_hidden": true} # list of all possible categories vocab = ["<1H OCEAN", "INLAND", "NEAR OCEAN", "NEAR BAY", "ISLAND"] # tensor with indices (0-4) indices = tf.range(len(vocab), dtype=tf.int64) # initializer for the lookup table table_init = tf.lookup.KeyValueTensorInitializer(vocab, indices) # out of vocab buckets num_oov_buckets = 2 # lookup for oov buckets table = tf.lookup.StaticVocabularyTable(table_init, num_oov_buckets) # - # Now let’s use the lookup table to encode a small batch of categorical features to one-hot vectors: # + jupyter={"outputs_hidden": true} categories = tf.constant(["NEAR BAY", "DESERT", "INLAND", "INLAND"]) cat_indices = table.lookup(categories) # + jupyter={"outputs_hidden": true} cat_one_hot = tf.one_hot(cat_indices, depth=len(vocab) + num_oov_buckets) # + jupyter={"outputs_hidden": true} cat_one_hot # - # **Tip**: This works well with relatively few (<10) categories. For larger (>50) vocabs, it may be better to switch to **embeddings**. # #### Encoding Categorical Features Using Embeddings # An embedding is a trainable dense vector that represents a category. In some cases (e.g. word embeddings) they can be reused. # # In addition to important concept of **representation learning** embeddings can also encode concepts (e.g. King - Man + Woman $\approx$ Queen). # # Here how it works in a simplified 2D embedding example: # + jupyter={"outputs_hidden": true} embedding_dim = 2 #typically 10-300 embed_init = tf.random.uniform([len(vocab) + num_oov_buckets, embedding_dim]) embedding_matrix = tf.Variable(embed_init) # + jupyter={"outputs_hidden": true} embedding_matrix # - # Encoding: # + jupyter={"outputs_hidden": true} categories = tf.constant(["NEAR BAY", "DESERT", "INLAND", "INLAND"]) # + jupyter={"outputs_hidden": true} cat_indices = table.lookup(categories) # + jupyter={"outputs_hidden": true} cat_indices # + jupyter={"outputs_hidden": true} tf.nn.embedding_lookup(embedding_matrix, cat_indices) # - # Putting everything together, we can create a model that learns categorical features through embeddings. # + jupyter={"outputs_hidden": true} regular_inputs = keras.layers.Input(shape=[8]) categories = keras.layers.Input(shape=[], dtype=tf.string) # look up each category cat_indices = keras.layers.Lambda(lambda cats: table.lookup(cats)) (categories) # look up embeddings cat_embed = keras.layers.Embedding(input_dim=6, output_dim=2) (cat_indices) encoded_inputs = keras.layers.concatenate([regular_inputs, cat_embed]) outputs = keras.layers.Dense(1)(encoded_inputs) model = keras.models.Model(inputs=[regular_inputs, categories], outputs=[outputs]) # - # ### TF Transform # # It may happen that we want to deploy our production model, and having to modify preprocessing for every platform can definitely be a problem in the long run. TF Transform takes care of making sure that there is only one preprocessing step. # + import tensorflow_transform as tft def preprocess(inputs): # inputs = a batch of input features median_age = inputs["housing_median_age"] ocean_proximity = inputs["ocean_proximity"] standardized_age = tft.scale_to_z_score(median_age) ocean_proximity_id = tft.compute_and_apply_vocabulary(ocean_proximity) return { "standardized_median_age": standardized_age, "ocean_proximity_id": ocean_proximity_id } # - # Next, TF Transform lets you apply this `preprocess()` function to the whole training set using Apache Beam.
.ipynb_checkpoints/13. Loading_and_Preprocessing_Data_with_TensorFlow-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd data = pd.read_csv("xeno-canto_ca-nv_index.csv") data.head() list = data["english_cname"].unique() bird_list = [bird for bird in list] def dataframe_difference(df1, df2, which=None): """Find rows which are different between two DataFrames.""" comparison_df = df1.merge(df2, indicator=True, how='outer') if which is None: diff_df = comparison_df[comparison_df['_merge'] != 'both'] else: diff_df = comparison_df[comparison_df['_merge'] == which] return diff_df temp = data.loc[data['english_cname'] == bird_list[0]] train_df = data.loc[data['english_cname'] == bird_list[0]].sample(frac=.75, random_state=4) test_df = dataframe_difference(train_df, temp) for i in range(1, len(bird_list)): temp = data.loc[data['english_cname'] == bird_list[i]] train_temp = data.loc[data['english_cname'] == bird_list[i]].sample(frac=.75, random_state=4) test_temp = dataframe_difference(train_temp, temp) train_df = pd.concat([train_df,train_temp], ignore_index = True) test_df = pd.concat([test_df,test_temp], ignore_index = True) assert len(train_df) + len(test_df) == len(data) train_df.to_csv("train.csv") test_df.to_csv("test.csv") import os import shutil test_dir = 'Test/' train_dir = 'Train/' source_dir = 'xeno-canto-ca-nv/' # + # shutil.move(source_dir, dest_dir) # - for index, row in test_df.iterrows(): name = row["english_cname"] audio = row["file_name"] if not os.path.exists(f"{test_dir}{name}"): os.makedirs(f"{test_dir}{name}") from_dir = f"{source_dir}{audio}" dest_dir = f"{test_dir}{name}/" shutil.move(from_dir, dest_dir) for index, row in train_df.iterrows(): name = row["english_cname"] audio = row["file_name"] if not os.path.exists(f"{train_dir}{name}"): os.makedirs(f"{train_dir}{name}") from_dir = f"{source_dir}{audio}" dest_dir = f"{train_dir}{name}/" shutil.move(from_dir, dest_dir)
Extra/Data_Split_HSK/Data_Split.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # 1. Write a Python program to draw line charts of the financial data of Alphabet Inc. between October 3, 2016 to October 7, 2016. import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('./dataset/fdata.csv', sep=',', parse_dates=True, index_col=0) df.plot() # #### Read Total profit of all months and show it using a line plot # Total profit data provided for each month. Generated line plot must include the following properties: – # # + X label name = Month Number # + Y label name = Total profit # The line plot graph should look like this. import pandas as pd import matplotlib.pyplot as plt df = pd.read_csv("./dataset/sales_data.csv") profitList = df ['total_profit'].tolist() monthList = df ['month_number'].tolist() plt.plot(monthList, profitList, label = 'Month-wise Profit data of last year') plt.xlabel('Month number') plt.ylabel('Profit in dollar') plt.xticks(monthList) plt.title('Company profit per month') plt.yticks([100000, 200000, 300000, 400000, 500000]) plt.show() # ## Get Total profit of all months and show line plot with the following Style properties # Generated line plot must include following Style properties: – # # + Line Style dotted and Line-color should be red # + Show legend at the lower right location. # + X label name = Month Number # + Y label name = Sold units number # + Add a circle marker. # + Line marker color as read # + Line width should be 3 # The line plot graph should look like this. # + profitList = df ['total_profit'].tolist() monthList = df ['month_number'].tolist() plt.plot(monthList, profitList, label = 'Profit data of last year', color='r', marker='o', markerfacecolor='k', linestyle='--', linewidth=3) plt.xlabel('Month Number') plt.ylabel('Profit in dollar') plt.legend(loc='lower right') plt.title('Company Sales data of last year') plt.xticks(monthList) plt.yticks([100000, 200000, 300000, 400000, 500000]) plt.show() # - # ## Read toothpaste sales data of each month and show it using a scatter plot # Also, add a grid in the plot. gridline style should “–“. # # The scatter plot should look like this. monthList = df ['month_number'].tolist() toothPasteSalesData = df ['toothpaste'].tolist() plt.scatter(monthList, toothPasteSalesData, label = 'Tooth paste Sales data') plt.xlabel('Month Number') plt.ylabel('Number of units Sold') plt.legend(loc='upper left') plt.title(' Tooth paste Sales data') plt.xticks(monthList) plt.grid(True, linewidth= 1, linestyle="--") plt.show() # ## Read face cream and facewash product sales data and show it using the bar chart # Bar chart should display the number of units sold per month for each product. Add a separate bar for each product in the same chart. # # The bar chart should look like this. monthList = df ['month_number'].tolist() faceCremSalesData = df ['facecream'].tolist() faceWashSalesData = df ['facewash'].tolist() plt.bar([a-0.25 for a in monthList], faceCremSalesData, width= 0.25, label = 'Face Cream sales data', align='edge') plt.bar([a+0.25 for a in monthList], faceWashSalesData, width= -0.25, label = 'Face Wash sales data', align='edge') plt.xlabel('Month Number') plt.ylabel('Sales units in number') plt.legend(loc='upper left') plt.title(' Sales data') plt.xticks(monthList) plt.grid(True, linewidth= 1, linestyle="--") plt.title('Facewash and facecream sales data') plt.show() # ## Read sales data of bathing soap of all months and show it using a bar chart. Save this plot to your hard disk # The bar chart should look like this. monthList = df ['month_number'].tolist() bathingsoapSalesData = df ['bathingsoap'].tolist() plt.bar(monthList, bathingsoapSalesData) plt.xlabel('Month Number') plt.ylabel('Sales units in number') plt.title(' Sales data') plt.xticks(monthList) plt.grid(True, linewidth= 1, linestyle="--") plt.title('bathingsoap sales data') plt.savefig('./img/sales_data_of_bathingsoap.png', dpi=150) plt.show() # ## Calculate total sale data for last year for each product and show it using a Pie chart # Note: In Pie chart display Number of units sold per year for each product in percentage. # # The Pie chart should look like this. monthList = df ['month_number'].tolist() labels = ['FaceCream', 'FaseWash', 'ToothPaste', 'Bathing soap', 'Shampoo', 'Moisturizer'] salesData = [df ['facecream'].sum(), df ['facewash'].sum(), df ['toothpaste'].sum(), df ['bathingsoap'].sum(), df ['shampoo'].sum(), df ['moisturizer'].sum()] plt.axis("equal") plt.pie(salesData, labels=labels, autopct='%1.1f%%') plt.legend(loc='lower right') plt.title('Sales data') plt.show() # ## Read Bathing soap facewash of all months and display it using the Subplot # The Subplot should look like this. bathingsoap = df ['bathingsoap'].tolist() faceWashSalesData = df ['facewash'].tolist() f, axarr = plt.subplots(2, sharex=True) axarr[0].plot(monthList, bathingsoap, label = 'Bathingsoap Sales Data', color='k', marker='o', linewidth=3) axarr[0].set_title('Sales data of a Bathingsoap') axarr[1].plot(monthList, faceWashSalesData, label = 'Face Wash Sales Data', color='r', marker='o', linewidth=3) axarr[1].set_title('Sales data of a facewash') plt.xticks(monthList) plt.xlabel('Month Number') plt.ylabel('Sales units in number') plt.show() # ## Read all product sales data and show it using the stack plot # The Stack plot should look like this. monthList = df ['month_number'].tolist() faceCremSalesData = df ['facecream'].tolist() faceWashSalesData = df ['facewash'].tolist() toothPasteSalesData = df ['toothpaste'].tolist() bathingsoapSalesData = df ['bathingsoap'].tolist() shampooSalesData = df ['shampoo'].tolist() moisturizerSalesData = df ['moisturizer'].tolist() plt.plot([],[],color='m', label='face Cream', linewidth=5) plt.plot([],[],color='c', label='Face wash', linewidth=5) plt.plot([],[],color='r', label='Tooth paste', linewidth=5) plt.plot([],[],color='k', label='Bathing soap', linewidth=5) plt.plot([],[],color='g', label='Shampoo', linewidth=5) plt.plot([],[],color='y', label='Moisturizer', linewidth=5) plt.stackplot(monthList, faceCremSalesData, faceWashSalesData, toothPasteSalesData, bathingsoapSalesData, shampooSalesData, moisturizerSalesData, colors=['m','c','r','k','g','y']) plt.xlabel('Month Number') plt.ylabel('Sales unints in Number') plt.title('Alll product sales data using stack plot') plt.legend(loc='upper left') plt.show()
6Visualization/.ipynb_checkpoints/1_matplotlib_exercises-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Observations and Insights # # + # Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import scipy.stats as st import numpy as np import seaborn as sns from scipy.stats import linregress from matplotlib.pyplot import figure from sklearn import datasets # Study data files mouse_metadata_path = "data/Mouse_metadata.csv" study_results_path = "data/Study_results.csv" # Read the mouse data and the study results mouse_metadata = pd.read_csv(mouse_metadata_path) study_results = pd.read_csv(study_results_path) # Combine the data into a single dataset Combined_data = pd.merge(mouse_metadata, study_results, how='outer') # Display the data table for preview Combined_data.head() # - # Checking the number of mice. mice=Combined_data["Mouse ID"].value_counts() number_of_mice=len(mice) number_of_mice # Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint. duplicatemice = Combined_data.loc[Combined_data.duplicated(subset=['Mouse ID', 'Timepoint',]),'Mouse ID'].unique() # Optional: Get all the data for the duplicate mouse ID. allduplicate_mouse_id=pd.DataFrame(duplicate_mice) allduplicate_mouse_id # Create a clean DataFrame by dropping the duplicate mouse by its ID. clean_df = Combined_data[Combined_data['Mouse ID'].isin(duplicate_mice)==False] # Checking the number of mice in the clean DataFrame. clean_mice=clean_df["Mouse ID"].value_counts() clean_numberof_mice=len(clean_mice) clean_numberof_mice # ## Summary Statistics # + # Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen # This method is the most straighforward, creating multiple series and putting them all together at the end. regimen_mean = clean_df.groupby('Drug Regimen').mean()["Tumor Volume (mm3)"] regimen_mean regimen_median = clean_df.groupby('Drug Regimen').median()["Tumor Volume (mm3)"] regimen_median regimen_variance = clean_df.groupby('Drug Regimen').var()["Tumor Volume (mm3)"] regimen_variance regimen_std = clean_df.groupby('Drug Regimen').std()["Tumor Volume (mm3)"] regimen_std regimen_sem = clean_df.groupby('Drug Regimen').sem()["Tumor Volume (mm3)"] regimen_sem # + summary_statstable = pd.DataFrame({"Mean": regimen_mean, "Median":regimen_median, "Variance":regimen_variance, "Standard Deviation": regimen_std, "SEM": regimen_sem}) summary_statstable summary_statstable.to_csv("fileOne.csv", index=False, header=True) # + # Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen # This method produces everything in a single groupby function single_group_by = clean_df.groupby('Drug Regimen') summary_statstable_2 = single_group_by.agg(['mean','median','var','std','sem'])["Tumor Volume (mm3)"] summary_statstable_2 # - # ## Bar and Pie Charts # + # Generate a bar plot showing the total number of mice for each treatment throughout the course of the study using pandas. count_mice_per_tret = Combined_data.groupby(["Drug Regimen"]).count()["Mouse ID"] plot_pandas = count_mice_per_tret.plot.bar(figsize=(15,10), color='b',fontsize = 14) count_mice_per_tret plt.xlabel("Drug Regimen",fontsize = 14) plt.ylabel("Number of Mice",fontsize = 14) plt.title("Number of Mice per Treatment",fontsize = 20) plt.savefig("../Images/Pan_mice_per_treat.png", bbox_inches = "tight") plt.tight_layout() plt.show() count_mice_per_tret # - # Generate a bar plot showing the total number of mice for each treatment throughout the course of the study using pyplot. mice_list =(Combined_data.groupby(["Drug Regimen"])["Mouse ID"].count()).tolist() mice_list # + x_axis = np.arange(len(count_mice_per_tret)) fig1, ax1 = plt.subplots(figsize=(15, 10)) plt.bar(x_axis, mice_list, color='b', alpha=0.8, align='center') tick_locations = [value for value in x_axis] plt.xticks(tick_locations, ['Capomulin', 'Ceftamin', 'Infubinol', 'Ketapril', 'Naftisol', 'Placebo', 'Propriva', 'Ramicane', 'Stelasyn', 'Zoniferol'], rotation='vertical') plt.xlim(-0.75, len(x_axis)-0.25) plt.ylim(0, max(mice_list)+10) plt.title("Number of Mice per Treatment",fontsize = 20) plt.xlabel("Drug Regimen",fontsize = 14) plt.ylabel("Number of Mice",fontsize = 14) plt.savefig("../Images/mat_mice_per_treat.png", bbox_inches = "tight") # + # Group by gender and get the number to plot groupby_gender = Combined_data.groupby(["Mouse ID","Sex"]) groupby_gender gender_df = pd.DataFrame(groupby_gender.size()) # Create the dataframe with total count of Female and Male mice mouse_gender = pd.DataFrame(gender_df.groupby(["Sex"]).count()) mouse_gender.columns = ["Total Count"] # create and format the percentage of female vs male mouse_gender["Percentage of Sex"] = (100*(mouse_gender["Total Count"]/mouse_gender["Total Count"].sum())) # format the "Percentage of Sex" column mouse_gender["Percentage of Sex"] = mouse_gender["Percentage of Sex"] # gender_df mouse_gender # + # Generate a pie plot showing the distribution of female versus male mice using pandas colors = ['green', 'blue'] explode = (0.1, 0) plot = mouse_gender.plot.pie(y='Total Count',figsize=(15,10), colors = colors, startangle=140, explode = explode, shadow = True, autopct="%1.1f%%") plt.title('Male vs Female Mouse Population',fontsize = 20) plt.ylabel('Sex',fontsize = 12) plt.axis("equal",fontsize = 12) plt.savefig("../Images/pi_pandas.png", bbox_inches = "tight") plt.show() # + # Generate a pie plot showing the distribution of female versus male mice using pyplot labels = ["Female","Male"] #List the values of each section of the pie chart sizes = [49.799197,50.200803] #Set colors for each section of the pie colors = ['green', 'blue'] #Determoine which section of the circle to detach explode = (0.1, 0) #Create the pie chart based upon the values fig1, ax1 = plt.subplots(figsize=(15, 10)) plt.pie(sizes, explode=explode,labels=labels, colors=colors, autopct="%1.1f%%", shadow=True, startangle=140,) plt.title('Male vs Female Mouse Population',fontsize = 20) plt.ylabel('Sex',fontsize = 12) #Set equal axis plt.axis("equal",fontsize = 12) plt.savefig("../Images/pi_plot.png", bbox_inches = "tight") plt.show() # - # ## Quartiles, Outliers and Boxplots # + # Calculate the final tumor volume of each mouse across four of the treatment regimens: # Capomulin, Ramicane, Infubinol, and Ceftamin # Start by getting the last (greatest) timepoint for each mouse # Merge this group df with the original dataframe to get the tumor volume at the last timepoint Capomulin_df = Combined_data.loc[Combined_data["Drug Regimen"] == "Capomulin",:] Ramicane_df = Combined_data.loc[Combined_data["Drug Regimen"] == "Ramicane", :] Infubinol_df = Combined_data.loc[Combined_data["Drug Regimen"] == "Infubinol", :] Ceftamin_df = Combined_data.loc[Combined_data["Drug Regimen"] == "Ceftamin", :] # Capomulin Capomulin_last = Capomulin_df.groupby('Mouse ID').max()['Timepoint'] Capomulin_vol = pd.DataFrame(Capomulin_last) Capomulin_merge = pd.merge(Capomulin_vol, Combined_data, on=("Mouse ID","Timepoint"),how="left") Capomulin_merge.head() # + # Put treatments into a list for for loop (and later for plot labels) # Create empty list to fill with tumor vol data (for plotting) # Calculate the IQR and quantitatively determine if there are any potential outliers. # Locate the rows which contain mice on each drug and get the tumor volumes # add subset # Determine outliers using upper and lower bounds Capomulin_tumors = Capomulin_merge["Tumor Volume (mm3)"] quartiles =Capomulin_tumors.quantile([.25,.5,.75]) lowerq = quartiles[0.25] upperq = quartiles[0.75] iqr = upperq-lowerq print(f"The lower quartile of Capomulin tumors: {lowerq}") print(f"The upper quartile of Capomulin tumors: {upperq}") print(f"The interquartile range of Capomulin tumors: {iqr}") print(f"The median of Capomulin tumors: {quartiles[0.5]} ") lower_bound = lowerq - (1.5*iqr) upper_bound = upperq + (1.5*iqr) print(f"Values below {lower_bound} could be outliers.") print(f"Values above {upper_bound} could be outliers.") # + Ramicane_last = Ramicane_df.groupby('Mouse ID').max()['Timepoint'] Ramicane_vol = pd.DataFrame(Ramicane_last) Ramicane_merge = pd.merge(Ramicane_vol, Combined_data, on=("Mouse ID","Timepoint"),how="left") Ramicane_merge.head() Ramicane_merge.to_csv("output.csv") Ramicane_tumors = Ramicane_merge["Tumor Volume (mm3)"] quartiles =Ramicane_tumors.quantile([.25,.5,.75]) lowerq = quartiles[0.25] upperq = quartiles[0.75] iqr = upperq-lowerq print(f"The lower quartile of Ramicane tumors is: {lowerq}") print(f"The upper quartile of Ramicane tumors is: {upperq}") print(f"The interquartile range of Ramicane tumors is: {iqr}") print(f"The median of Ramicane tumors is: {quartiles[0.5]} ") lower_bound = lowerq - (1.5*iqr) upper_bound = upperq + (1.5*iqr) print(f"Values below {lower_bound} could be outliers.") print(f"Values above {upper_bound} could be outliers.") # + Infubinol_last = Infubinol_df.groupby('Mouse ID').max()['Timepoint'] Infubinol_vol = pd.DataFrame(Infubinol_last) Infubinol_merge = pd.merge(Infubinol_vol, Combined_data, on=("Mouse ID","Timepoint"),how="left") Infubinol_merge.head() Infubinol_tumors = Infubinol_merge["Tumor Volume (mm3)"] quartiles =Infubinol_tumors.quantile([.25,.5,.75]) lowerq = quartiles[0.25] upperq = quartiles[0.75] iqr = upperq-lowerq print(f"The lower quartile of Infubinol tumors is: {lowerq}") print(f"The upper quartile of Infubinol tumors is: {upperq}") print(f"The interquartile range of Infubinol tumors is: {iqr}") print(f"The median of Infubinol tumors is: {quartiles[0.5]} ") lower_bound = lowerq - (1.5*iqr) upper_bound = upperq + (1.5*iqr) print(f"Values below {lower_bound} could be outliers.") print(f"Values above {upper_bound} could be outliers.") Infubinol_merge.to_csv("output.csv") # + Ceftamin_last = Ceftamin_df.groupby('Mouse ID').max()['Timepoint'] Ceftamin_vol = pd.DataFrame(Ceftamin_last) Ceftamin_merge = pd.merge(Ceftamin_vol, Combined_data, on=("Mouse ID","Timepoint"),how="left") Ceftamin_merge.head() Ceftamin_tumors = Ceftamin_merge["Tumor Volume (mm3)"] quartiles = Ceftamin_tumors.quantile([.25,.5,.75]) lowerq = quartiles[0.25] upperq = quartiles[0.75] iqr = upperq-lowerq print(f"The lower quartile of treatment is: {lowerq}") print(f"The upper quartile of temperatures is: {upperq}") print(f"The interquartile range of temperatures is: {iqr}") print(f"The the median of temperatures is: {quartiles[0.5]} ") # Determine outliers using upper and lower bounds lower_bound = lowerq - (1.5*iqr) upper_bound = upperq + (1.5*iqr) print(f"Values below {lower_bound} could be outliers.") print(f"Values above {upper_bound} could be outliers.") # + # Generate a box plot of the final tumor volume of each mouse across four regimens of interest data_to_plot = [Capomulin_tumors, Ramicane_tumors, Infubinol_tumors, Ceftamin_tumors] Regimen= ['Capomulin', 'Ramicane', 'Infubinol','Ceftamin'] fig1, ax1 = plt.subplots(figsize=(15, 10)) ax1.set_title('Tumor Volume at Selected Mouse',fontsize =25) ax1.set_ylabel('Final Tumor Volume (mm3)',fontsize = 14) ax1.set_xlabel('Drug Regimen',fontsize = 14) ax1.boxplot(data_to_plot, labels=Regimen, widths = 0.4, patch_artist=True,vert=True) plt.ylim(10, 80) plt.savefig("../Images/box_plot.png", bbox_inches = "tight") plt.show() # - # ## Line and Scatter Plots # Generate a line plot of time point versus tumor volume for a mouse treated with Capomulin forline_df = Capomulin_df.loc[Capomulin_df["Mouse ID"] == "b742",:] forline_df.head() # + x_axis = forline_df["Timepoint"] tumsiz = forline_df["Tumor Volume (mm3)"] fig1, ax1 = plt.subplots(figsize=(15, 10)) plt.title('Capomulin treatmeant of mouse b742',fontsize =25) plt.plot(x_axis, tumsiz,linewidth=2, markersize=15,marker="o",color="blue", label="Fahreneit") plt.xlabel('Timepoint (Days)',fontsize =14) plt.ylabel('Tumor Volume (mm3)',fontsize =14) plt.savefig("../Images/line_graph.png", bbox_inches = "tight") plt.show() # + # Generate a scatter plot of mouse weight versus average tumor volume for the Capomulin regimen fig1, ax1 = plt.subplots(figsize=(15, 10)) avg_capm_vol =Capomulin_df.groupby(['Mouse ID']).mean() marker_size=15 plt.scatter(avg_capm_vol['Weight (g)'],avg_capm_vol['Tumor Volume (mm3)'],s=175, color="blue") plt.title('Mouse Weight Versus Average Tumor Volume',fontsize =25) plt.xlabel('Weight (g)',fontsize =14) plt.ylabel('Averag Tumor Volume (mm3)',fontsize =14) plt.savefig("../Images/scatterplot.png", bbox_inches = "tight") plt.show() # - # ## Correlation and Regression # Calculate the correlation coefficient and linear regression model # for mouse weight and average tumor volume for the Capomulin regimen corr=round(st.pearsonr(avg_capm_vol['Weight (g)'],avg_capm_vol['Tumor Volume (mm3)'])[0],2) print(f"The correlation between mouse weight and average tumor volume is {corr}") # + x_values = avg_capm_vol['Weight (g)'] y_values = avg_capm_vol['Tumor Volume (mm3)'] (slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values) regress_values = x_values * slope + intercept print(f"slope:{slope}") print(f"intercept:{intercept}") print(f"rvalue (Correlation coefficient):{rvalue}") print(f"pandas (Correlation coefficient):{corr}") print(f"stderr:{stderr}") line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2)) print(line_eq) # + #the linear regression equation and line to plot fig1, ax1 = plt.subplots(figsize=(15, 10)) plt.scatter(x_values,y_values,s=175, color="blue") plt.plot(x_values,regress_values,"r-") plt.title('Regression Plot of Mouse Weight Versus Average Tumor Volume',fontsize =20) plt.xlabel('Weight(g)',fontsize =14) plt.ylabel('Average Tumore Volume (mm3)',fontsize =14) ax1.annotate(line_eq, xy=(20, 40), xycoords='data',xytext=(0.8, 0.95), textcoords='axes fraction',horizontalalignment='right', verticalalignment='top',fontsize=30,color="red") print(f"The r-squared is: {rvalue**2}") plt.savefig("../Images/linear_regression.png", bbox_inches = "tight") plt.show() # -
Pymaceuticals/pymaceuticals_starter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tubería de entrenamiento # # En los sistemas de aprendizaje automatico usualmente se crean utilizando diferentes modulos, la biblioteca scikit-learn tiene funciones que nos permiten juntar modulos y construir las llamadas tuberias. # # La canalización puede incluir módulos que realizan varias funciones como la selección de características,preprocesamiento, bosques aleatorios, agrupamiento, etc. # + import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import samples_generator from sklearn.feature_selection import SelectKBest, f_regression from sklearn.pipeline import Pipeline from sklearn.ensemble import ExtraTreesClassifier from sklearn.metrics import classification_report # - #Creamos 150 puntos de 25 dimensiones y de 3 clases distintas X, y = samples_generator.make_classification(n_samples=150, n_features=25, n_classes=3, n_informative=6, n_redundant=0, random_state=7) # + #El primer componente de la tuberia es el selector de caracteristicas #este selecciona las mejores caracteristicas de K caracteristicas. #establecemos k como 9 k_best_selector = SelectKBest(f_regression, k=9) #Creamos el clasificador de bosque extremadamente aleatorio classifier = ExtraTreesClassifier(n_estimators=60, max_depth=4) #Construimos la tuberia processor_pipeline = Pipeline([('selector', k_best_selector),('erf', classifier)]) #Podemos cambiar ls parametros de los bloques individuales #cambiemos k del primer bloque #cambiemos los estimadores en el segundo bloque processor_pipeline.set_params(selector__k=7, erf__n_estimators=30) #Entrenamos la tuberia processor_pipeline.fit(X,y) # - #Predecir la salida de los datos de entrada output = processor_pipeline.predict(X) print("\nPredicted output:\n", output) print("\nCalificación:", processor_pipeline.score(X,y)) print(classification_report(y, output)) # + #Extraemos las caracteristicas elegidas por el bloque selector, se definio que se usaran 7 de las 25 status = processor_pipeline.named_steps['selector'].get_support() #Extraemos los indices de los caracteristicas elegidas selected = [i for i , x in enumerate(status) if x] print("\nIndices de caracteristias elegidas:", ','.join([str(x) for x in selected])) # -
sistemas_de_recomendaciones/tuberias_de_entrenamiento_5_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.0 64-bit (''harmonizome'': venv)' # name: python_defaultSpec_1594074193745 # --- # # Harmonizome ETL: The Human Protein Atlas (THPA) - Immunohistochemistry # Created by: <NAME> <br> # Credit to: <NAME> # # Data Source: http://www.proteinatlas.org/about/download # appyter init from appyter import magic magic.init(lambda _=globals: _()) # + import sys import os from datetime import date import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline import harmonizome.utility_functions as uf import harmonizome.lookup as lookup # - # %load_ext autoreload # %autoreload 2 # ### Notebook Information # + tags=[] print('This notebook was run on:', date.today(), '\nPython version:', sys.version) # - # # Initialization # + # %%appyter hide_code {% do SectionField( name='data', title='Upload Data', img='load_icon.png' ) %} # + # %%appyter code_eval {% do DescriptionField( name='description', text='The example below was sourced from <a href="http://www.proteinatlas.org/about/download" target="_blank">www.proteinatlas.org</a>. If clicking on the example does not work, it should be downloaded directly from the source.', section='data' ) %} {% set df_file = FileField( constraint='.*\.zip$', name='normal_tissue', label='Normal Tissue Data (tsv.zip)', default='Input/THPA/normal_tissue.tsv.zip', examples={ 'normal_tissue.tsv.zip': 'https://www.proteinatlas.org/download/normal_tissue.tsv.zip' }, section='data' ) %} # - # ### Load Mapping Dictionaries # + tags=[] symbol_lookup, geneid_lookup = lookup.get_lookups() # - # ### Output Path # + output_name = 'thpa_ihc' path = 'Output/THPA-IHC' if not os.path.exists(path): os.makedirs(path) # - # # Load Data # + # %%appyter code_exec df = pd.read_csv( {{df_file}}, sep='\t', usecols=['Gene name', 'Tissue', 'Cell type', 'Level'] ) # - df.head() df.shape # # Pre-process Data # ## Get Relevant Data # Only high level of expression df = df[df['Level'] == 'High'].drop('Level', axis=1) df.head() # Create attribute tissue and cell type df['Tissue'] = df['Tissue'] + ' - ' + df['Cell type'] df = df.drop('Cell type', axis=1).set_index('Gene name') df.index.name = 'Gene Symbol' df.columns = ['Tissue - Cell Type'] df.head() # # Filter Data # ## Map Gene Symbols to Up-to-date Approved Gene Symbols # + tags=[] df = uf.map_symbols(df, symbol_lookup, remove_duplicates=True) df.shape # - # # Analyze Data # ## Create Binary Matrix binary_matrix = uf.binary_matrix(df) binary_matrix.head() binary_matrix.shape uf.save_data(binary_matrix, path, output_name + '_binary_matrix', compression='npz', dtype=np.uint8) # ## Create Gene List gene_list = uf.gene_list(binary_matrix, geneid_lookup) gene_list.head() gene_list.shape uf.save_data(gene_list, path, output_name + '_gene_list', ext='tsv', compression='gzip', index=False) # ## Create Attribute List attribute_list = uf.attribute_list(binary_matrix) attribute_list.head() attribute_list.shape uf.save_data(attribute_list, path, output_name + '_attribute_list', ext='tsv', compression='gzip') # ## Create Gene and Attribute Set Libraries uf.save_setlib(binary_matrix, 'gene', 'up', path, output_name + '_gene_up_set') uf.save_setlib(binary_matrix, 'attribute', 'up', path, output_name + '_attribute_up_set') # ## Create Attribute Similarity Matrix attribute_similarity_matrix = uf.similarity_matrix(binary_matrix.T, 'jaccard', sparse=True) attribute_similarity_matrix.head() uf.save_data(attribute_similarity_matrix, path, output_name + '_attribute_similarity_matrix', compression='npz', symmetric=True, dtype=np.float32) # ## Create Gene Similarity Matrix gene_similarity_matrix = uf.similarity_matrix(binary_matrix, 'jaccard', sparse=True) gene_similarity_matrix.head() uf.save_data(gene_similarity_matrix, path, output_name + '_gene_similarity_matrix', compression='npz', symmetric=True, dtype=np.float32) # ## Create Gene-Attribute Edge List edge_list = uf.edge_list(binary_matrix) uf.save_data(edge_list, path, output_name + '_edge_list', ext='tsv', compression='gzip') # # Create Downloadable Save File uf.archive(path) # ### Link to download output files: [click here](./output_archive.zip)
THPA (Immunohistochemistry).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.10 64-bit (''5-minute-craft-titles-lBIm3JLI'': pipenv)' # language: python # name: python3 # --- # # 5-Minute Craft Youtube Video Titles # # Team members: <NAME>, <NAME>, <NAME> # # Data Source: https://www.kaggle.com/shivamb/5minute-crafts-video-views-dataset # # This notebook aims to explore characteristics and generate insights from video titles of the Youtube channel [5-Minute Crafts](https://www.youtube.com/channel/UC295-Dw_tDNtZXFeAPAW6Aw) # + import pickle import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import nltk from sklearn.cluster import KMeans from yellowbrick.cluster import KElbowVisualizer from sklearn.preprocessing import StandardScaler from sklearn.feature_extraction.text import TfidfVectorizer pd.options.display.float_format = '{:.2f}'.format # - # ## Exploring the Dataset df = pd.read_csv('../data/external/5-Minute Crafts.csv') # The dataset consists of the video_id, title, and previously calculated metrics. df.head() # Total number of rows and columns: df.shape # Checking the video ids, we can confirm that all videos are unique. len(df.video_id.unique()) # Although all videos ids are unique, a small amount of video titles repeats. len(df.title.unique()) # The dataset does not have any null values df.isnull().values.any() # Let's check some descriptive statistics about the videos: # # * Although the channel is called 5-minutes, 96% of the videos have a duration higher than 6 minutes, and 75% more than 11 minutes. # * The channel has a video with over 280M views. 75% of the videos have more than 567K views. # * The mean number of words is 8 on the video title. # * The average word length is 5.46, wich is close to the average length of English words of 5.1 according to [WolframAlpha](https://www.wolframalpha.com/input/?i=average+english+word+length) # * 93% of their videos have a digit in the title df[['duration_seconds', 'total_views', 'num_words','avg_word_len', 'contain_digits']].describe() df[df.duration_seconds > 360].shape[0] / df.shape[0] # ## Distribution # # Evaluating the distribution of active days since, we can notice that the dataset does not provide exact dates for when the video was published. This happens possibly because of how Youtube shows on its page the time since the video was published. # # The number of videos with 'active days since' less than 365 is tiny. Therefore, comparing those videos with older ones may be misleading. For this reason, we drop them from the dataset. # + accumulated_percent = df.active_since_days.value_counts().sort_index() / df.active_since_days.count() accumulated_percent = accumulated_percent.cumsum() fig, ax = plt.subplots(figsize=(12, 6)) ax.plot(accumulated_percent.index, accumulated_percent) ax.set(xlabel="Active days since", ylabel="Frequency", title="Accumalated distribution of Active days since") # - accumulated_percent df = df[df.active_since_days >= 365] df.shape # We first expected that older videos would have higher total views, however looking into the box plot of 'Total Views x 'Active Days Since', there is no clear distinction between those groups. This can be related to many reasons, one of them being the subscriber's growth rate of the channel over time. # + fig, ax = plt.subplots(figsize=(12, 6)) ax.set(xlabel="Total Views", ylabel="Active Days Since", title="Total Views / Active Days since") ax = sns.boxplot(x="active_since_days", y="total_views", data=df, ax = ax, palette=['gray','blue'], showfliers=False) # - # As we noticed in the descriptive statistics of the dataset, the distribution of videos duration skewed to the right. fig, ax = plt.subplots(figsize=(12, 6)) ax.hist(df.duration_seconds, bins=100) ax.set(xlabel="Duration seconds", ylabel="Frequency", title="Distribution of duration seconds") df.reset_index(inplace=True, drop=True) # ## Feature Engineering # # The features 'num_words_uppercase' and 'num_words_lowercase' are very similar to each other, for this reason we deleted them and created a feature called represint the percentual of uppercase words, given the total of words in the title. df['perc_uppercase'] = df.num_words_uppercase / df.num_words df.drop(['num_words_uppercase','num_words_lowercase'],axis=1, inplace=True) original_features = df.columns # To evaluate the **relevance** of the words, we removed the stopwords and calculated the TF-IDF(term frequency weighted by inverse document frequency). # + df['title'] = df.title.str.lower() nltk.download('stopwords') stopwords = nltk.corpus.stopwords.words('english') vectorizer = TfidfVectorizer(stop_words=stopwords, token_pattern=r"\b[^\d\W]+\b") # - sdf = pd.DataFrame.sparse.from_spmatrix(vectorizer.fit_transform(df.title), columns=vectorizer.get_feature_names_out()) df = df.join(sdf) # ## Clustering # # We noted before that the videos have different behavior for active_since_days, duration_seconds, and total_views. Therefore we used Kmeans to split the videos into a cluster based on these 3 variables. # # ### Normalization # The 3 features selected have very different magnitude. For this reason, before executing the Kmeans, we will use the Standard Scaler to normalize the data. X = df[['active_since_days', 'duration_seconds','total_views']].copy() X = StandardScaler().fit_transform(X) # Using the elbow technique, we could notice that the best number of clusters is 9. kmeans = KMeans() elbow = KElbowVisualizer(kmeans, k=(4,16)) elbow.fit(X) elbow.show() # + # Uncoment the lines below for retraining the model. # Attention, the cluster names steps below will need to be renamed. # kmeans = KMeans(elbow.elbow_value_) # kmeans.fit(X) # with open('../models/kmeans.pkl', 'wb') as file: # pickle.dump(kmeans, file) with open('../models/kmeans.pkl', 'rb') as file: kmeans = pickle.load(file) # - # Looking into the size of each cluster, we can notice that some clusters are tiny. We will focus our study on the clusters with more than 100 videos. df['cluster'] = kmeans.predict(X) df.cluster.value_counts() # + MIN_LEN = 100 clusters_len = df.cluster.value_counts().sort_index() clusters_filter = {i for i,value in enumerate(clusters_len) if value > MIN_LEN} clusters_filter # - filtered_clusters = df[df.cluster.isin(clusters_filter)].copy() # Now that we filtered the clusters, we can visualize them using the 3 features selected to understand what they represent. # + cols_to_plot = ['active_since_days', 'duration_seconds', 'total_views', 'cluster'] g = sns.PairGrid(filtered_clusters[cols_to_plot], hue="cluster", palette="Paired") g.map_diag(sns.histplot) g.map_offdiag(sns.scatterplot) g.add_legend() # - # We named each cluster by looking into the characteristics in the chart above. # + cluster_names = { 0: '3 years ', 2: '1 years', 4: 'Long duration', 7: '2 years', 8: 'Top performers', 5: '4 years' } filtered_clusters['cluster_name'] = df.cluster.map(cluster_names) # - # ## Text Mining # # For our first exploration of the title, we created the same chart above but evaluated the title features for each cluster. # + cols_to_plot = ['num_words', 'num_punctuation', 'num_stopwords', 'avg_word_len', 'contain_digits', 'perc_uppercase', 'cluster_name'] g = sns.PairGrid(filtered_clusters[cols_to_plot], hue="cluster_name", palette='Paired') g.map_diag(sns.histplot) g.map_offdiag(sns.scatterplot) g.add_legend() # - # Evaluating the chart above, we can notice some interesting points on how the youtube channel changed the title strategy over time. # # ### Insights # * 1 year videos: more words, more punctuation # * 3 years videos: more uppercase words # * 4 years digits: more titles containing digits # * Top performers: less punctuation # # Let's explore these insights. # + fig, ax = plt.subplots(figsize=(12, 6)) ax.set(xlabel="Number of Words", ylabel="Active Days Since", title="Number of Words / Active Days Since") data = df[df.active_since_days.isin({365, 1460})] ax = sns.boxplot(x="active_since_days", y="num_words", data=data, ax = ax, palette=['gray','blue']) # - df.groupby('active_since_days')['contain_digits'].mean() # + fig, ax = plt.subplots(figsize=(12, 6)) ax.set(xlabel="Percentage of Uppercase", ylabel="Active Days Since", title="Percentage of uppercase / Active Days since") data = df ax = sns.boxplot(x="active_since_days", y="perc_uppercase", data=data, ax = ax, palette=['gray','blue']) # - cols_to_drop = list(original_features)+['cluster_name', 'cluster'] for c in filtered_clusters.cluster_name.unique(): print(f'Evaluating most relevant words for each cluster: {c}') cluster_data = filtered_clusters[filtered_clusters.cluster_name == c].copy() cluster_data = cluster_data.drop(cols_to_drop, axis=1) print(cluster_data.mean().sort_values()[-10::]) print('---------------\n') # ### Insights # * Long duration has relevant words like: "Live", "Compilation" # * Comparing 4 years to 1 year: "craft" and "minute" used to the most relevant words in the title, what changed in the most recent videos. # ### Are there words correlated to the video performance? correlation = df.corr() correlation.sort_values('total_views').total_views # Looking into the most positive correlated words, we notice that the R score is not high. However, the correlation between the tf-idf of the word "fortune" and total views is higher than "active_since_days." We will explore more the titles with this word below. df['has_fortune'] = df.fortune > 0 fortune_metrics = df.groupby(['active_since_days','has_fortune'])['total_views'].mean() fortune_metrics # + fig, ax = plt.subplots(figsize=(12, 6)) ax.set(xlabel="Total Views", ylabel="Active Days Since", title="Total Views / Active Days since") ax.scatter(df[df.has_fortune == False].active_since_days, df[df.has_fortune == False].total_views,c='gray', zorder=1) ax.scatter(df[df.has_fortune == True].active_since_days, df[df.has_fortune == True].total_views,c='blue', zorder=10) # - # In the chart above, we evaluated the mean values of total_views of videos with the word fortune vs. the other videos in the same year. The videos with the word fortune are considerably higher than. df[df.fortune > 0][['title','total_views']] # Looking into the metrics of videos with the word fortune as relevant, we could notice a pattern of videos with the sentence "that will save you a fortune". The mean of total views of those views is higher than other videos from the same year. In addition, the video with the highest number of views is part of the fortune group, which explains the high mean value of the group. However, the other videos from the group still show a higher number of views. # Possible improvements for this project: # # * We don't know if some videos were promoted #
notebooks/1.0-initial-data-exploration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Dataframes # + [markdown] slideshow={"slide_type": "fragment"} # * Dataframes are a restricted sub-type of RDDs. # * Restircing the type allows for more optimization. # + [markdown] slideshow={"slide_type": "subslide"} # * Dataframes store two dimensional data, similar to the type of data stored in a spreadsheet. # * Each column in a dataframe can have a different type. # * Each row contains a `record`. # + [markdown] slideshow={"slide_type": "fragment"} # * Similar to, but not the same as, [pandas dataframes](http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe) and [R dataframes](http://www.r-tutor.com/r-introduction/data-frame) # + slideshow={"slide_type": "skip"} from pyspark import SparkContext sc = SparkContext(master="local[4]") sc.version # + slideshow={"slide_type": "skip"} import os import sys from pyspark import SparkContext from pyspark.sql import SQLContext from pyspark.sql.types import Row, StructField, StructType, StringType, IntegerType # %pylab inline # + slideshow={"slide_type": "skip"} # Just like using Spark requires having a SparkContext, using SQL requires an SQLContext sqlContext = SQLContext(sc) sqlContext # + [markdown] slideshow={"slide_type": "slide"} # ### Constructing a DataFrame from an RDD of Rows # Each Row defines it's own fields, the schema is *inferred*. # + slideshow={"slide_type": "subslide"} # One way to create a DataFrame is to first define an RDD from a list of Rows some_rdd = sc.parallelize([Row(name=u"John", age=19), Row(name=u"Smith", age=23), Row(name=u"Sarah", age=18)]) some_rdd.collect() # + slideshow={"slide_type": "subslide"} # The DataFrame is created from the RDD or Rows # Infer schema from the first row, create a DataFrame and print the schema some_df = sqlContext.createDataFrame(some_rdd) some_df.printSchema() # + slideshow={"slide_type": "subslide"} # A dataframe is an RDD of rows plus information on the schema. # performing **collect()* on either the RDD or the DataFrame gives the same result. print(type(some_rdd),type(some_df)) print('some_df =',some_df.collect()) print('some_rdd=',some_rdd.collect()) # + [markdown] slideshow={"slide_type": "slide"} # ### Defining the Schema explicitly # The advantage of creating a DataFrame using a pre-defined schema allows the content of the RDD to be simple tuples, rather than rows. # + slideshow={"slide_type": "subslide"} # In this case we create the dataframe from an RDD of tuples (rather than Rows) and provide the schema explicitly another_rdd = sc.parallelize([("John", 19), ("Smith", 23), ("Sarah", 18)]) # Schema with two fields - person_name and person_age schema = StructType([StructField("person_name", StringType(), False), StructField("person_age", IntegerType(), False)]) # Create a DataFrame by applying the schema to the RDD and print the schema another_df = sqlContext.createDataFrame(another_rdd, schema) another_df.printSchema() # root # |-- age: binteger (nullable = true) # |-- name: string (nullable = true) # + [markdown] slideshow={"slide_type": "slide"} # ## Loading DataFrames from disk # There are many maethods to load DataFrames from Disk. Here we will discuss three of these methods # 1. Parquet # 2. JSON (on your own) # 3. CSV (on your own) # # In addition, there are API's for connecting Spark to an external database. We will not discuss this type of connection in this class. # + [markdown] slideshow={"slide_type": "skip"} # ### Loading dataframes from JSON files # [JSON](http://www.json.org/) is a very popular readable file format for storing structured data. # Among it's many uses are **twitter**, `javascript` communication packets, and many others. In fact this notebook file (with the extension `.ipynb` is in json format. JSON can also be used to store tabular data and can be easily loaded into a dataframe. # - # !wget 'https://mas-dse-open.s3.amazonaws.com/Moby-Dick.txt' -P ../../Data/ # + slideshow={"slide_type": "skip"} # when loading json files you can specify either a single file or a directory containing many json files. path = "../../Data/people.json" # !cat $path # Create a DataFrame from the file(s) pointed to by path people = sqlContext.read.json(path) print('people is a',type(people)) # The inferred schema can be visualized using the printSchema() method. people.show() people.printSchema() # + [markdown] slideshow={"slide_type": "skip"} # ### Excercise: Loading csv files into dataframes # # Spark 2.0 includes a facility for reading csv files. In this excercise you are to create similar functionality using your own code. # # You are to write a class called `csv_reader` which has the following methods: # # * `__init__(self,filepath):` recieves as input the path to a csv file. It throws an exeption `NoSuchFile` if the file does not exist. # * `Infer_Schema()` opens the file, reads the first 10 lines (or less if the file is shorter), and infers the schema. The first line of the csv file defines the column names. The following lines should have the same number of columns and all of the elements of the column should be of the same type. The only types allowd are `int`,`float`,`string`. The method infers the types of the columns, checks that they are consistent, and defines a dataframe schema of the form: # ```python # schema = StructType([StructField("person_name", StringType(), False), # StructField("person_age", IntegerType(), False)]) # ``` # If everything checks out, the method defines a `self.` variable that stores the schema and returns the schema as it's output. If an error is found an exception `BadCsvFormat` is raised. # * `read_DataFrame()`: reads the file, parses it and creates a dataframe using the inferred schema. If one of the lines beyond the first 10 (i.e. a line that was not read by `InferSchema`) is not parsed correctly, the line is not added to the Dataframe. Instead, it is added to an RDD called `bad_lines`. # The methods returns the dateFrame and the `bad_lines` RDD. # + [markdown] slideshow={"slide_type": "slide"} # ### Parquet files # + [markdown] slideshow={"slide_type": "fragment"} # * [Parquet](http://parquet.apache.org/) is a popular columnar format. # + [markdown] slideshow={"slide_type": "fragment"} # * Spark SQL allows [SQL](https://en.wikipedia.org/wiki/SQL) queries to retrieve a subset of the rows without reading the whole file. # + [markdown] slideshow={"slide_type": "fragment"} # * Compatible with HDFS : allows parallel retrieval on a cluster. # + [markdown] slideshow={"slide_type": "fragment"} # * Parquet compresses the data in each column. # + [markdown] slideshow={"slide_type": "skip"} # ### Spark and Hive # * Parquet is a **file format** not an independent database server. # * Spark can work with the [Hive](https://cwiki.apache.org/confluence/display/Hive/Hive+on+Spark%3A+Getting+Started) relational database system that supports the full array of database operations. # * Hive is compatible with HDFS. # + slideshow={"slide_type": "skip"} dir='../../Data' parquet_file=dir+"/users.parquet" # !ls $dir # + slideshow={"slide_type": "subslide"} #load a Parquet file print(parquet_file) df = sqlContext.read.load(parquet_file) df.show() # + slideshow={"slide_type": "subslide"} df2=df.select("name", "favorite_color") df2.show() # + slideshow={"slide_type": "subslide"} outfilename="namesAndFavColors.parquet" # !rm -rf $dir/$outfilename df2.write.save(dir+"/"+outfilename) # !ls -ld $dir/$outfilename # + [markdown] slideshow={"slide_type": "skip"} # A new interface object has been added in **Spark 2.0** called **SparkSession**. A spark session is initialized using a `builder`. For example # ```python # spark = SparkSession.builder \ # .master("local") \ # .appName("Word Count") \ # .config("spark.some.config.option", "some-value") \ # .getOrCreate() # ``` # # Using a SparkSession a Parquet file is read [as follows:](http://spark.apache.org/docs/2.1.0/api/python/pyspark.sql.html#pyspark.sql.DataFrameReader.parquet): # ```python # df = spark.read.parquet('python/test_support/sql/parquet_partitioned') # ``` # + [markdown] slideshow={"slide_type": "slide"} # ## Lets have a look at a real-world dataframe # # This dataframe is a small part from a large dataframe (15GB) which stores meteorological data from stations around the world. We will read the dataframe from a zipped parquet file. # + slideshow={"slide_type": "skip"} from os.path import split,join,exists from os import mkdir,getcwd,remove from glob import glob # create directory if needed notebook_dir=getcwd() data_dir=join(split(split(notebook_dir)[0])[0],'Data') weather_dir=join(data_dir,'Weather') if exists(weather_dir): print('directory',weather_dir,'already exists') else: print('making',weather_dir) mkdir(weather_dir) file_index='NY' zip_file='%s.tgz'%(file_index) #the .csv extension is a mistake, this is a pickle file, not a csv file. old_files='%s/%s*'%(weather_dir,zip_file[:-3]) for f in glob(old_files): print('removing',f) # !rm -rf {f} # + slideshow={"slide_type": "skip"} command="wget https://mas-dse-open.s3.amazonaws.com/Weather/by_state/%s -P %s "%(zip_file, weather_dir) print(command) # !$command # !ls -lh $weather_dir/$zip_file # - #extracting the parquet file # !tar zxvf {weather_dir}/{zip_file} -C {weather_dir} weather_parquet = join(weather_dir, zip_file[:-3]+'parquet') print(weather_parquet) df = sqlContext.read.load(weather_parquet) df.show(1) # + slideshow={"slide_type": "subslide"} #selecting a subset of the rows so it fits in slide. df.select('station','year','measurement').show(5) # + [markdown] slideshow={"slide_type": "subslide"} # ## Summary # * Dataframes are an efficient way to store data tables. # * All of the values in a column have the same type. # * A good way to store a dataframe in disk is to use a Parquet file. # * Next: Operations on dataframes.
Sections/Section1-Spark-Basics/2.SparkSQL/1.SparkSQL.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Центр непрерывного образования # # # Программа «Python для автоматизации и анализа данных» # # *<NAME>, Яндекс.Маркет* # # # Задачки на регулярные выражения # ### Задачка про аббревиатуры # Владимир устроился на работу в одно очень важное место. И в первом же документе он ничего не понял, # там были сплошные ФГУП НИЦ ГИДГЕО, ФГОУ ЧШУ АПК и т.п. Тогда он решил собрать все аббревиатуры, чтобы потом найти их расшифровки на http://sokr.ru/. Помогите ему. # Будем считать аббревиатурой слова только лишь из заглавных букв (как минимум из двух). Если несколько таких слов разделены пробелами, то они # считаются одной аббревиатурой. # **Ввод**: Это курс информатики соответствует ФГОС и ПООП, это подтверждено ФГУ ФНЦ НИИСИ РАН\ # **Вывод**: ФГОС, ПООП, ФГУ, ФНЦ, НИИСИ, РАН import re # Ваше решение s = 'Это курс информатики соответствует ФГОС и ПООП, это подтверждено ФГУ ФНЦ НИИСИ РАН' res = re.findall(f'[А-Я][А-Я ]*[А-Я]', s) print(', '.join(res)) # + active="" # ### Задачка про перевод из camel_case'a в snake_case # - # Мы уже довольно много говорили про то, что в компаниях могут быть конвенции по обозначению переменных. Что, если вы написали код, а в нем переменные названы в Camel Case а вам требуется snake case? Пожалуй, стоит автоматизировать этот процесс. Попробуем написать функцию, которая этот функционал реализует #Camel case to snake case v = 'CamelCaseVar' import re result = v[0] + re.sub('([A-Z]+)', r'_\1', v[1:]) print(result.lower()) # ### Задачка про подсчет количества слов # Слова у нас могут состоять из букв или букв, стоящих вокруг дефиса (во-первых, чуть-чуть, давай-ка). Попробуем это описать регулярным выражением text = ''' - Дельный, что и говорить, Был старик тот самый, Что придумал суп варить На колесах прямо. Суп - во-первых. Во-вторых, Кашу в норме прочной. Нет, старик он был старик Чуткий - это точно. ''' words = re.findall(r'([а-яА-Я]+\-[а-яА-Я]+|[а-яА-Я]+)', text) len(words) # ### Задачка про поиск слов на а и на е # Найдите в тексте слова, начинающиеся на а и на е import re # Input. text = "The following example creates an ArrayList with a capacity of 50 elements.\ Four elements are then added to the ArrayList and the ArrayList is trimmed accordingly." words = re.findall(r'\b[aeAE][a-z]*', text) print(words) # **Пример 2** # # Найдите в тексте слова, начинающиеся на а и на е import re # Input. text = ''' Ihr naht euch wieder, schwankende Gestalten, Die früh sich einst dem trüben Blick gezeigt. Versuch’ ich wohl, euch diesmal festzuhalten? Fühl’ ich mein Herz noch jenem Wahn geneigt? ''' words = re.findall(r'\b[aeAE][a-z]*', text) print(words) # ### Задачка про деление текста на предложения # Для простоты будем считать, что: # * каждое предложение начинается с заглавной русской или латинской буквы; # * каждое предложение заканчивается одним из знаков препинания .;!?; # * между предложениями может быть любой непустой набор пробельных символов; # * внутри предложений нет заглавных и точек (нет пакостей в духе «Мы любим творчество А. С. Пушкина)». # # Разделите текст на предложения так, чтобы каждое предложение занимало одну строку. # Пустых строк в выводе быть не должно. import re s = """Mr. Smith bought cheapsite.com for 1.5 million dollars, i.e. he paid a lot for it! \ \ Did he mind? <NAME>. thinks he didn't. In any case, this isn't true... \ Well, with a probability of .9 it isn't.""" sentences = re.sub(r'(?<=[^A-ZА-Я].[.;!?])(\s+|[\s\\]+)(?=[A-ZА-Я])', '\n', s) sentences = sentences.split('\n') for i in sentences: print(i) # ### Давайте разберем реальный пример # # Возьмем перевод книги Идиот, вытащим оттуда текст первой главы, после чего посчитаем количество вхождений слова the. Ссылка 'https://www.gutenberg.org/files/2638/2638-0.txt' # + import re import requests the_idiot_url = 'https://www.gutenberg.org/files/2638/2638-0.txt' raw = requests.get(the_idiot_url).text # Индекс начала первой главы start = re.search(r'\*\*\* START OF THIS PROJECT GUTENBERG EBOOK THE IDIOT \*\*\*', raw).end() # - # Индекс конца первой главы end = re.search(r'II', raw).start() end # ## Про время # Вовочка подготовил одно очень важное письмо, но везде указал неправильное время. # Поэтому нужно заменить все вхождения времени на строку (TBD). Время — это строка вида HH:MM:SS или HH:MM, в которой HH — число от 00 до 23, а MM и SS — число от 00 до 59. # Ввод: # # Уважаемые! Если вы к 09:00 не вернёте # чемодан, то уже в 09:00:01 я за себя не отвечаю. # PS. С отношением 25:50 всё нормально! # # Вывод: # # Уважаемые! Если вы к (TBD) не вернёте # чемодан, то уже в (TBD) я за себя не отвечаю. # PS. С отношением 25:50 всё нормально! # + inp = """Уважаемые! Если вы к 09:00 не вернёте чемодан, то уже в 09:00:01 я за себя не отвечаю. PS. С отношением 25:50 всё нормально!""" tbd = '(TBD)' import re # res = re.sub(r'((0\d|1\d|2[0-3][0-5])\:[0-5]\d\:[0-5]\d)|((0\d|1\d|2[0-3][0-5])\:[0-5]\d)', tbd, inp) #mine res = re.sub(r'(?:[0-1]\d|[2][0-3])(?:[:][0-5]\d){1,2}', tbd, inp) print(res) # - # ## Про финансовую отчетность # # Владимиру потребовалось срочно запутать финансовую документацию. Но так, чтобы это было обратимо. # Он не придумал ничего лучше, чем заменить каждое целое число (последовательность цифр) на его куб. Помогите ему. # # Ввод: # # Было закуплено 12 единиц техники # по 410.37 рублей. # # Вывод: # # Было закуплено 1728 единиц техники # по 68921000.50653 рублей. from re import finditer s = '''Было закуплено 12 единиц техники по 410.37 рублей.''' replacement = [] for match in finditer('(\d+)', s): replacement.append((match.span(), int(match.group())**3)) s = list(s) for elem in replacement[::-1]: s[(elem[0][0]):(elem[0][1])] = str(elem[1]) print("".join(s)) # + s = '''Было закуплено 12 единиц техники по 410.37 рублей.''' def repl(m): return str(int(m[0]) ** 3) print(re.sub(r'\d+', repl, s, count=0)) # -
01 python/lect 3 materials/2020_DPO_3_1_regexp_problems_no_solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] id="cac470df-29e7-4148-9bbd-d8b9a32fa570" tags=[] # # Node Classification with Graph Neural Networks # > 공부 # # - toc:true # - branch: master # - badges: true # - comments: false # - author: 최서연 # - categories: [GNN] # - # ref: https://keras.io/examples/graph/gnn_citations/ # - GNN의 간단한 구현 예제 # - 그래프 컨볼루션 레이어 구현 # 사용한 cora dataset # # https://relational.fit.cvut.cz/dataset/CORA # --- # 논문(paper): 2708개 # - 7개의 class # - 'Case_Based', 'Genetic_Algorithms', 'Neural_Networks', 'Probabilistic_Methods', 'Reinforcement_Learning', 'Rule_Learning', 'Theory' # # 인용(cites): 5429개 # - cited paper id # - citing paper id # # 단어(content): 1433개 # ![](https://relational.fit.cvut.cz/assets/img/datasets-generated/CORA.svg) # --- # CORA 데이터 이해 # # - The goal of our experiments is to predict the categories assigned to each paper. # - 각 논문에 할당된 카테고리를 예측하는 게 시험의 목표이다. # - Dataset has been pre-processed removing papers that don't belong to at least one category. Also paper that don't have authors and don't have a title have been discarded. # - 하나의 카테고리에도 속하지 않은 논문들은 사전에 제거되었다. 저자가 없거나 제목 없는 논문들도 제거되었다. # - 즉, 결측값은 없다는 뜻 # - 11881 papers belonging to 80 different categories # - 80개의 다른 카테고리를 가진 11,881개의 논문 # - 16114 authors # - 16,114명의 저자 # - 34648 citations relations between papers # - 논문 사이에 인용관계 24,648 # - 27020 authorship relations between papers and authors # - 논문과 저자 사이의 저작권 관계 27,020 # - Each paper is associated with a vectorial representation containing its title represented as bag-of-words with TF-IDF weights. # - 각 논문은 벡터 표현되어 있다. # - 목표 # - link-prediction problem: predict citation relations between papers or authorship relations with authors # - 연결 예측 문제: 논문 간의 인용관계 예측 또는 저자 간의 저작권 관계 예측 # - multi-label classification problem: predict the categories assigned to each paper # - 다중 레이블 분류 문제: 각 논문에 관련된 카테고리 예측 # - The Cora dataset consists of 2708 scientific publications classified into one of seven classes. The citation network consists of 5429 links. Each publication in the dataset is described by a 0/1-valued word vector indicating the absence/presence of the corresponding word from the dictionary. The dictionary consists of 1433 unique words. # - Cora는 7개의 class에서 분류된 2,708개의 과학 출판물로 구성되어 있다. 이 인용 network는 5,429개의 연결로 구성되어 있고, 각 출판물은 사전에서 부합하는 단어의 존재와 부재를 나타내는 valued 단어 벡터로 0/1로써 나타난다. 사전은 1,433개의 unique 단어들로 구성되어 있다. # # - The Cora dataset consists of 2,708 scientific papers classified into one of seven classes. The citation network consists of 5,429 links. Each paper has a binary word vector of size 1,433, indicating the presence of a corresponding word. import os import pandas as pd import numpy as np import networkx as nx import matplotlib.pyplot as plt import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers # ### Prepare the Dataset # The dataset has two tap-separated files: cora.cites and cora.content. # - cora.cites와 cora.content의 tab으로 구분된 파일들이다. # # The cora.cites includes the citation records with two columns: cited_paper_id (target) and citing_paper_id (source). # - cora.cites는 cited_paper_id (target)와 citing_paper_id (source)의 두 개의 열이 있는 인용 레코드가 포함된다. # # The cora.content includes the paper content records with 1,435 columns: paper_id, subject, and 1,433 binary features. # - cora.content는 paper_id, subject, 1,433 binary features이 1,435개의 열이 포함된 논문 내용 기록이 포함된다. zip_file = keras.utils.get_file( fname="cora.tgz", origin="https://linqs-data.soe.ucsc.edu/public/lbc/cora.tgz", extract=True, ) data_dir = os.path.join(os.path.dirname(zip_file), "cora") citations = pd.read_csv( os.path.join(data_dir, "cora.cites"), sep="\t", header=None, names=["target", "source"], ) print("Citations shape:", citations.shape) # The target column includes the paper ids cited by the paper ids in the source column. # - source 열에 있는 논문 id에 의해 인용된 논문 id를 target 열이 포함한다. citations.sample(frac=1).head() column_names = ["paper_id"] + [f"term_{idx}" for idx in range(1433)] + ["subject"] papers = pd.read_csv( os.path.join(data_dir, "cora.content"), sep="\t", header=None, names=column_names, ) print("Papers shape:", papers.shape) # Now we display a sample of the papers DataFrame. The DataFrame includes the paper_id and the subject columns, as well as 1,433 binary column representing whether a term exists in the paper or not. # - 이 데이터에는 논문에 단어가 있는지 없는 지를 나타내는 1,433개의 이진 열과 논문id와 제목 열을 포함한다. # dataset.saple(n): 데이터에서 랜덤으로 n개를 뽑는다. print(papers.sample(5).T) print(papers.subject.value_counts()) # We convert the paper ids and the subjects into zero-based indices. # - 논문 id와 subject를 0 기반의 표현으로 변환한다. # + class_values = sorted(papers["subject"].unique()) class_idx = {name: id for id, name in enumerate(class_values)} paper_idx = {name: idx for idx, name in enumerate(sorted(papers["paper_id"].unique()))} papers["paper_id"] = papers["paper_id"].apply(lambda name: paper_idx[name]) citations["source"] = citations["source"].apply(lambda name: paper_idx[name]) citations["target"] = citations["target"].apply(lambda name: paper_idx[name]) papers["subject"] = papers["subject"].apply(lambda value: class_idx[value]) # - # `isin`메서드는 DataFrame객체의 각 요소가 values값과 일치하는지 여부를 bool형식으로 반환합니다. # Now let's visualize the citation graph. Each node in the graph represents a paper, and the color of the node corresponds to its subject. Note that we only show a sample of the papers in the dataset. # - 그래프의 각 노드는 논문을 의미하고, 노드의 색은 subject를 나타낸다. 여기서 보이는것은 데이터의 sample이다. plt.figure(figsize=(10, 10)) colors = papers["subject"].tolist() cora_graph = nx.from_pandas_edgelist(citations.sample(n=1500)) subjects = list(papers[papers["paper_id"].isin(list(cora_graph.nodes))]["subject"]) nx.draw_spring(cora_graph, node_size=15, node_color=subjects) # ### Split the dataset into stratified train and test sets # - 데이터를 계층화된 test와 train셋으로 나누기 # DataFrame으로 부터 특정 비율의 표본을 무작위로 추출하기 (fraction) `df.sample(frac=0.5)` # - DataFrame으로 부터 특정 비율(fraction)으로 무작위 표본 추출을 하고 싶으면 frac 매개변수에 0~1 사이의 부동소수형(float) 값을 입력 # + train_data, test_data = [], [] for _, group_data in papers.groupby("subject"): # Select around 50% of the dataset for training. random_selection = np.random.rand(len(group_data.index)) <= 0.5 train_data.append(group_data[random_selection]) test_data.append(group_data[~random_selection]) train_data = pd.concat(train_data).sample(frac=1) test_data = pd.concat(test_data).sample(frac=1) print("Train data shape:", train_data.shape) print("Test data shape:", test_data.shape) # - # ### Implement Train and Evaluate Experiment # - train 으로 시험해서 test 평가 hidden_units = [32, 32] learning_rate = 0.01 dropout_rate = 0.5 num_epochs = 300 batch_size = 256 # This function compiles and trains an input model using the given training data. # - 주어진 training data로 입력 모델 컴파일 및 train하는 함수 # - Adam 사용 # - 학습률은 0.01 # - 손실함수(`losses.SparseCategoricalCrossentropy()`) (https://www.tensorflow.org/api_docs/python/tf/keras/losses/SparseCategoricalCrossentropy) # - Use this crossentropy loss function when there are two or more label classes. We expect labels to be provided as integers. If you want to provide labels using one-hot representation, please use CategoricalCrossentropy loss. There should be # classes floating point values per feature for y_pred and a single floating point value per feature for y_true. # - 두 개 이상의 레이블 클래스가 있는 경우 이 crossentropy loss function을 사용한다. 레이블은 정수로 제공될 것으로 기대힌다. one-hot representation을 사용하여 레이블을 나타내려면 CategoricalCrossentropy loss를 사용한다. # classes에 대한 기능당 부동 소수점 값 y_pred 과 y_true에 대한 기능당 단일 부동 소수점 값이 있어야 한다. # - `from_logits` # - Whether y_pred is expected to be a logits tensor. By default, we assume that y_pred encodes a probability distribution. # - y_pred가 logits tensor로 예상되는지의 여부다. 기본설정은 y_pred가 확률분포라고 (from_logit=False) 되어 있다. # - Both, categorical cross entropy and sparse categorical cross entropy have the same loss function which you have mentioned above. The only difference is the format in which you mention $Y_i$ (i,e true labels).(https://stats.stackexchange.com/questions/326065/cross-entropy-vs-sparse-cross-entropy-when-to-use-one-over-the-other) # - If your $Y_i$'s are one-hot encoded, use categorical_crossentropy. Examples (for a 3-class classification): $[1,0,0] , [0,1,0], [0,0,1]$ # - But if your $Y_i$'s are integers, use sparse_categorical_crossentropy. Examples for above 3-class classification problem: $[1] , [2], [3]$ # - metric(`tf.keras.metrics.SparseCategoricalAccuracy()`) # - tf.keras.metrics.SparseCategoricalAccuracy(name='sparse_categorical_accuracy', dtype=None) # - name $\to$ (Optional) string name of the metric instance. # - callback(`keras.callbacks.EarlyStopping()`) (https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/EarlyStopping) (Stop training when a monitored metric has stopped improving. 모니터된 metric이 improve를 멈출때 훈련이 중지된다.) # - tf.keras.callbacks.EarlyStopping(monitor='val_loss',min_delta=0,patience=0,verbose=0,mode='auto',baseline=None,restore_best_weights=False) # - monitor Quantity to be monitored. # - patience Number of epochs with no improvement after which training will be stopped. # - restore_best_weights Whether to restore model weights from the epoch with the best value of the monitored quantity. If False, the model weights obtained at the last step of training are used. An epoch will be restored regardless of the performance relative to the baseline. If no epoch improves on baseline, training will run for patience epochs and restore weights from the best epoch in that set. # - 가중치를 재복원할 것인지의 여부를 뭍는다. False라면 훈련의 마지막 단계에서 얻은 모델 가중치를 얻게 된다. 기저치와 관련있어서 baseline에서 에폭 변화가 없으면 그 상태에서 best 에폭으로부터 가중치를 재복원하는 것 같은.. def run_experiment(model, x_train, y_train): # Compile the model. model.compile( optimizer=keras.optimizers.Adam(learning_rate), loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=[keras.metrics.SparseCategoricalAccuracy(name="acc")], ) # Create an early stopping callback. early_stopping = keras.callbacks.EarlyStopping( monitor="val_acc", patience=50, restore_best_weights=True ) # Fit the model. history = model.fit( x=x_train, y=y_train, epochs=num_epochs, batch_size=batch_size, validation_split=0.15, callbacks=[early_stopping], ) return history # This function displays the loss and accuracy curves of the model during training. # - 훈련하는 동안 손실과 정확도 곡선을 나타내는 함수 def display_learning_curves(history): fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5)) ax1.plot(history.history["loss"]) ax1.plot(history.history["val_loss"]) ax1.legend(["train", "test"], loc="upper right") ax1.set_xlabel("Epochs") ax1.set_ylabel("Loss") ax2.plot(history.history["acc"]) ax2.plot(history.history["val_acc"]) ax2.legend(["train", "test"], loc="upper right") ax2.set_xlabel("Epochs") ax2.set_ylabel("Accuracy") plt.show() # + [markdown] tags=[] # ### Implement Feedforward Network (FFN) Module # - 순방향 신경망 구현 # - # - 활성화함수(`tf.nn.gelu`) (https://www.tensorflow.org/api_docs/python/tf/nn/gelu) (Gaussian Error Linear Unit (GELU) activation function.) # - tf.nn.gelu(features, approximate=False, name=None) # - Gaussian error linear unit (GELU) computes $x * P(X <= x)$, where $P(X) \sim N(0, 1)$. The (GELU) nonlinearity weights inputs by their value, rather than gates inputs by their sign as in ReLU. # - GELU 가우시안 에러 선형 단위(?), Relu 함수는 부호에 따라 입력함수를 처리해주었다면, Gelu 함수는 값에 따라 입력함수를 처리한다. x $\times$ (x의 확률이 표준정규분포를 따를때 표준정규분포표에서 x 보다 작을 확률) def create_ffn(hidden_units, dropout_rate, name=None): fnn_layers = [] for units in hidden_units: fnn_layers.append(layers.BatchNormalization()) fnn_layers.append(layers.Dropout(dropout_rate)) fnn_layers.append(layers.Dense(units, activation=tf.nn.gelu)) return keras.Sequential(fnn_layers, name=name) # ### Build a Baseline Neural Network Model # + feature_names = set(papers.columns) - {"paper_id", "subject"} num_features = len(feature_names) num_classes = len(class_idx) # Create train and test features as a numpy array. x_train = train_data[feature_names].to_numpy() x_test = test_data[feature_names].to_numpy() # Create train and test targets as a numpy array. y_train = train_data["subject"] y_test = test_data["subject"] # - # Implement a baseline classifier # - We add five FFN blocks with skip connections, so that we generate a baseline model with roughly the same number of parameters as the GNN models to be built later. # - skip connection을 가진 FFN 블록을 5개 추가해서 나중에 GNN 모델 만들때 파라미터의 수를 대략적으로 기준 모델과 같게 하도록 한다. # 위에서 hidden_units은 [32,32]로, num_class는 class index 수로 정의 # + def create_baseline_model(hidden_units, num_classes, dropout_rate=0.2): inputs = layers.Input(shape=(num_features,), name="input_features") x = create_ffn(hidden_units, dropout_rate, name=f"ffn_block1")(inputs) for block_idx in range(4): # Create an FFN block. x1 = create_ffn(hidden_units, dropout_rate, name=f"ffn_block{block_idx + 2}")(x) # Add skip connection. x = layers.Add(name=f"skip_connection{block_idx + 2}")([x, x1]) # Compute logits. logits = layers.Dense(num_classes, name="logits")(x) # Create the model. return keras.Model(inputs=inputs, outputs=logits, name="baseline") baseline_model = create_baseline_model(hidden_units, num_classes, dropout_rate) baseline_model.summary() # + tags=[] history = run_experiment(baseline_model, x_train, y_train) # - display_learning_curves(history) _, test_accuracy = baseline_model.evaluate(x=x_test, y=y_test, verbose=0) print(f"Test accuracy: {round(test_accuracy * 100, 2)}%") # + [markdown] tags=[] # ### Examine the baseline model predictions # - # Let's create new data instances by randomly generating binary word vectors with respect to the word presence probabilities. # - 단어가 존재하는 확률과 관련해서 이진 단어 벡터를 무작위로 만드는 새로운 데이터 인스턴스를 생성 # + def generate_random_instances(num_instances): token_probability = x_train.mean(axis=0) instances = [] for _ in range(num_instances): probabilities = np.random.uniform(size=len(token_probability)) instance = (probabilities <= token_probability).astype(int) instances.append(instance) return np.array(instances) def display_class_probabilities(probabilities): for instance_idx, probs in enumerate(probabilities): print(f"Instance {instance_idx + 1}:") for class_idx, prob in enumerate(probs): print(f"- {class_values[class_idx]}: {round(prob * 100, 2)}%") # - # - `tf.convert_to_tensor(logits)` logit을 텐서로 변환 new_instances = generate_random_instances(num_classes) logits = baseline_model.predict(new_instances) probabilities = keras.activations.softmax(tf.convert_to_tensor(logits)).numpy() display_class_probabilities(probabilities) # ### Build a Graph Neural Network Model # The graph data is represented by the graph_info tuple, which consists of the following three elements: # # 1. node_features: This is a [num_nodes, num_features] NumPy array that includes the node features. In this dataset, the nodes are the papers, and the node_features are the word-presence binary vectors of each paper. # - 노드 특징을 포함하는 넘파이 배열이다.노드는 papers이고, node_features는 각 논문 사이의 단어 존재 이진 벡터이다. # 2. edges: This is [num_edges, num_edges] NumPy array representing a sparse adjacency matrix of the links between the nodes. In this example, the links are the citations between the papers. # - 노드 사이의 연결의 희소 인접 행렬을 나타내는 넘파이 배열이다. 이 예제에서 link는 paper 사이의 인용이다. # 3. edge_weights (optional): This is a [num_edges] NumPy array that includes the edge weights, which quantify the relationships between nodes in the graph. In this example, there are no weights for the paper citations. # - 그래프에서 노드 사이 관계를 정량화하는 엣지 가중치를 포함하는 넘파이 배열이다. 이 예제에서 논문 잉용에 대한 가중치는 없다. # **sparse adjacency matrix** # - 노드 수보다 엣지 수가 적은 matrix # + # Create an edges array (sparse adjacency matrix) of shape [2, num_edges]. edges = citations[["source", "target"]].to_numpy().T # Create an edge weights array of ones. edge_weights = tf.ones(shape=edges.shape[1]) # Create a node features array of shape [num_nodes, num_features]. node_features = tf.cast( papers.sort_values("paper_id")[feature_names].to_numpy(), dtype=tf.dtypes.float32 ) # Create graph info tuple with node_features, edges, and edge_weights. graph_info = (node_features, edges, edge_weights) print("Edges shape:", edges.shape) print("Nodes shape:", node_features.shape) # + [markdown] tags=[] # ### Implement a graph convolution layer # - # gru? 가 뭘까.. # We implement a graph convolution module as a Keras Layer. Our GraphConvLayer performs the following steps: # # 1. Prepare: The input node representations are processed using a FFN to produce a message. You can simplify the processing by only applying linear transformation to the representations. # - 입력 노드 표현은 메세지를 제공하는 FFN을 사용하여 처리된다. 그 표현을 오직 선형 변환만 적용함으로써 처리를 단순화할 수 있다. # 2. Aggregate: The messages of the neighbours of each node are aggregated with respect to the edge_weights using a permutation invariant pooling operation, such as sum, mean, and max, to prepare a single aggregated message for each node. See, for example, tf.math.unsorted_segment_sum APIs used to aggregate neighbour messages. # - 각 노드들의 이웃들의 메세지는 각 노드에 대해 단순 집계된 메세지를 준비하기 위해서 sum, mean, max 같은 순열 불변 풀링 operation을 사용한 edge_weights를 나타내어 집계한다. 예를들어, tf.math.unsorted_usum API는 이웃 메세지들을 집계하는데 사용된다. # 3. Update: The node_repesentations and aggregated_messages—both of shape [num_nodes, representation_dim]— are combined and processed to produce the new state of the node representations (node embeddings). If combination_type is gru, the node_repesentations and aggregated_messages are stacked to create a sequence, then processed by a GRU layer. Otherwise, the node_repesentations and aggregated_messages are added or concatenated, then processed using a FFN. # - node_repesentations 과 aggregated_messages—both는 노드 표현(노드 임베딩)의 새로운 상태를 제공하기 위해 결합되고 처리된다. 만일 conbination type이 gru라면, node_repesentations and aggregated_messages는 시퀀스를 만들기 위해 쌓이게 되면 GPU 층에 의해 처리된다. 반면에 node_repesentations and aggregated_messages가 추가되거나 연결되면 FFN을 사용하여 처리된다. # --- # GRU; Gated Recurrent Unit 게이트 순환 유닛(https://keras.io/api/layers/recurrent_layers/gru/) # - LSTM에서의 장기 의존성 문제의 해결책은 유지하면서 은닉 상태 업데이트 계산을 줄임 # - 학습 속도는 빠르지만 비슷한 성능을 보인다. # - 3개의 게이트(출력, 입력, 삭제)가 존재하는 LSTM # - 2개의 게이트(업데이트, 리셋)만 존재하는 GRU # --- # `*arg` # - argument. 인자로 받음. # - 함수 내에 몇 개의 인자로 받을지 확실하지 않을때 사용 # # `**karg` # - keyword argument, dictionary형테 {'keyword': value}로 함수 내부에 전달 # # $\star$순서 # ```python # def f(일반변수, *arg, **karg): # ... # ``` # --- # `super(other class,self).__init__()` # - other class를 상속받는 방법 # --- # Hyperbolic Tangent = Hyperbolic Sine / Hyperbolic Cosine # $$\tanh z = \frac{\sinh z}{\cosh z} = \frac{e^z - e^{-z}}{e^z + e^{-z}} = \frac{e^{2z} -1}{e^{2z} + 1}$$ # ![](https://upload.wikimedia.org/wikipedia/commons/thumb/7/76/Sinh_cosh_tanh.svg/256px-Sinh_cosh_tanh.svg.png) # --- # `tf.expand_dims(,axis=)` (https://www.tensorflow.org/api_docs/python/tf/expand_dims) # - 배열의 차원을 늘려준다. # - axis는 몇 번째 차원을 늘릴 것인지. # --- # `tf.math.unsorted_segment_max` (https://www.tensorflow.org/api_docs/python/tf/math/unsorted_segment_max) # - 세그먼트들의 최대값을 계산해낸다. # # `tf.math.unsorted_segment_mean` # - 세그먼트들의 평균을 계산해낸다. # # `tf.math.unsorted_segment_sum` # - 세그먼트들의 합을 계산해낸다. # --- class GraphConvLayer(layers.Layer): def __init__( self, hidden_units, dropout_rate=0.2, aggregation_type="mean", combination_type="concat", normalize=False, *args, **kwargs, ): super(GraphConvLayer, self).__init__(*args, **kwargs) self.aggregation_type = aggregation_type self.combination_type = combination_type self.normalize = normalize self.ffn_prepare = create_ffn(hidden_units, dropout_rate) if self.combination_type == "gated": self.update_fn = layers.GRU( units=hidden_units, activation="tanh", recurrent_activation="sigmoid", dropout=dropout_rate, return_state=True, recurrent_dropout=dropout_rate, ) else: self.update_fn = create_ffn(hidden_units, dropout_rate) def prepare(self, node_repesentations, weights=None): # node_repesentations shape is [num_edges, embedding_dim]. messages = self.ffn_prepare(node_repesentations) if weights is not None: messages = messages * tf.expand_dims(weights, -1) return messages def aggregate(self, node_indices, neighbour_messages): # node_indices shape is [num_edges]. # neighbour_messages shape: [num_edges, representation_dim]. num_nodes = tf.math.reduce_max(node_indices) + 1 if self.aggregation_type == "sum": aggregated_message = tf.math.unsorted_segment_sum( neighbour_messages, node_indices, num_segments=num_nodes ) elif self.aggregation_type == "mean": aggregated_message = tf.math.unsorted_segment_mean( neighbour_messages, node_indices, num_segments=num_nodes ) elif self.aggregation_type == "max": aggregated_message = tf.math.unsorted_segment_max( neighbour_messages, node_indices, num_segments=num_nodes ) else: raise ValueError(f"Invalid aggregation type: {self.aggregation_type}.") return aggregated_message def update(self, node_repesentations, aggregated_messages): # node_repesentations shape is [num_nodes, representation_dim]. # aggregated_messages shape is [num_nodes, representation_dim]. if self.combination_type == "gru": # Create a sequence of two elements for the GRU layer. h = tf.stack([node_repesentations, aggregated_messages], axis=1) elif self.combination_type == "concat": # Concatenate the node_repesentations and aggregated_messages. h = tf.concat([node_repesentations, aggregated_messages], axis=1) elif self.combination_type == "add": # Add node_repesentations and aggregated_messages. h = node_repesentations + aggregated_messages else: raise ValueError(f"Invalid combination type: {self.combination_type}.") # Apply the processing function. node_embeddings = self.update_fn(h) if self.combination_type == "gru": node_embeddings = tf.unstack(node_embeddings, axis=1)[-1] if self.normalize: node_embeddings = tf.nn.l2_normalize(node_embeddings, axis=-1) return node_embeddings def call(self, inputs): """Process the inputs to produce the node_embeddings. inputs: a tuple of three elements: node_repesentations, edges, edge_weights. Returns: node_embeddings of shape [num_nodes, representation_dim]. """ node_repesentations, edges, edge_weights = inputs # Get node_indices (source) and neighbour_indices (target) from edges. node_indices, neighbour_indices = edges[0], edges[1] # neighbour_repesentations shape is [num_edges, representation_dim]. neighbour_repesentations = tf.gather(node_repesentations, neighbour_indices) # Prepare the messages of the neighbours. neighbour_messages = self.prepare(neighbour_repesentations, edge_weights) # Aggregate the neighbour messages. aggregated_messages = self.aggregate(node_indices, neighbour_messages) # Update the node embedding with the neighbour messages. return self.update(node_repesentations, aggregated_messages) # The GNN classification model follows the Design Space for Graph Neural Networks approach, as follows: # # 1. Apply preprocessing using FFN to the node features to generate initial node representations. # - 초기 노드 표현을 일반화하기 위해 노드 특징들을 FFN을 사용하여 준비한다. # 2. Apply one or more graph convolutional layer, with skip connections, to the node representation to produce node embeddings. # - slip connection을 가진 하나 이상의 그래프 컴볼루션 레이어를 노드 표현에 적용하여 노드 임베딩을 만든다. # 3. Apply post-processing using FFN to the node embeddings to generat the final node embeddings. # - FFN 사용 후 마지막 노드 임베딩을 만들기 위한 노드 임베딩의 적용 # 4. Feed the node embeddings in a Softmax layer to predict the node class. # - softmax 레이어의 노드 임베딩을 feed해 노드 클래스 예측 # # Each graph convolutional layer added captures information from a further level of neighbours. However, adding many graph convolutional layer can cause oversmoothing, where the model produces similar embeddings for all the nodes. # - 각 그래프 컨볼루션 레이어는 더 높은 수준의 이웃으로부터 정보를 capture한다. 하지만 그래프 컨볼루션 레이어를 많이 추가하는 것은 모델이 모든 노드에 대해 유사한 임베딩을 생성하는 oversmoothing을 야기할 수 있다. # # Note that the graph_info passed to the constructor of the Keras model, and used as a property of the Keras model object, rather than input data for training or prediction. The model will accept a batch of node_indices, which are used to lookup the node features and neighbours from the graph_info. # - graph_info는 케라스 모델의 condtructor에게 전달되며, 훈련이나 예측을 위한 입력 데이터보다는 케라스모델 개체의 속성으로 사용된다. 모델은 graph_info에서 노드 특징과 이웃을 lookup하는데 사용되는 node_indices의 배치를 수용한다. class GNNNodeClassifier(tf.keras.Model): def __init__( self, graph_info, num_classes, hidden_units, aggregation_type="sum", combination_type="concat", dropout_rate=0.2, normalize=True, *args, **kwargs, ): super(GNNNodeClassifier, self).__init__(*args, **kwargs) # Unpack graph_info to three elements: node_features, edges, and edge_weight. node_features, edges, edge_weights = graph_info self.node_features = node_features self.edges = edges self.edge_weights = edge_weights # Set edge_weights to ones if not provided. if self.edge_weights is None: self.edge_weights = tf.ones(shape=edges.shape[1]) # Scale edge_weights to sum to 1. self.edge_weights = self.edge_weights / tf.math.reduce_sum(self.edge_weights) # Create a process layer. self.preprocess = create_ffn(hidden_units, dropout_rate, name="preprocess") # Create the first GraphConv layer. self.conv1 = GraphConvLayer( hidden_units, dropout_rate, aggregation_type, combination_type, normalize, name="graph_conv1", ) # Create the second GraphConv layer. self.conv2 = GraphConvLayer( hidden_units, dropout_rate, aggregation_type, combination_type, normalize, name="graph_conv2", ) # Create a postprocess layer. self.postprocess = create_ffn(hidden_units, dropout_rate, name="postprocess") # Create a compute logits layer. self.compute_logits = layers.Dense(units=num_classes, name="logits") def call(self, input_node_indices): # Preprocess the node_features to produce node representations. x = self.preprocess(self.node_features) # Apply the first graph conv layer. x1 = self.conv1((x, self.edges, self.edge_weights)) # Skip connection. x = x1 + x # Apply the second graph conv layer. x2 = self.conv2((x, self.edges, self.edge_weights)) # Skip connection. x = x2 + x # Postprocess node embedding. x = self.postprocess(x) # Fetch node embeddings for the input node_indices. node_embeddings = tf.gather(x, input_node_indices) # Compute logits return self.compute_logits(node_embeddings) # + [markdown] tags=[] # ### Train the GNN model # + [markdown] tags=[] # Let's test instantiating and calling the GNN model. Notice that if you provide N node indices, the output will be a tensor of shape [N, num_classes], regardless of the size of the graph. # - GNN 모델을 인스턴스화하고 호출하는 것을 테스트한다. N개의 node indices를 주면 결과는 그래프의 크기에 관계없이 n by class수 모양의 tensor가 될 것이다. # + gnn_model = GNNNodeClassifier( graph_info=graph_info, num_classes=num_classes, hidden_units=hidden_units, dropout_rate=dropout_rate, name="gnn_model", ) print("GNN output shape:", gnn_model([1, 10, 100])) gnn_model.summary() # + jupyter={"outputs_hidden": true} tags=[] x_train = train_data.paper_id.to_numpy() history = run_experiment(gnn_model, x_train, y_train) # - # Note that we use the standard supervised cross-entropy loss to train the model. However, we can add another self-supervised loss term for the generated node embeddings that makes sure that neighbouring nodes in graph have similar representations, while faraway nodes have dissimilar representations. # - 모델 학습을 위해 cross-entropy loss 이용. 하지만 생성된 node embedding에 대해 또다른 자체 supervised loss term을 추가할 수 있다. 여기서 멀리 있는 노드들은 서로다른 표현을 가지는동안 그래프에 이웃한 노드들은 유사한 표현을 가지고 있다. # 임베딩하는 함수가 representation이라고 보면 될듯 display_learning_curves(history) # Now we evaluate the GNN model on the test data split. The results may vary depending on the training sample, however the GNN model always outperforms the baseline model in terms of the test accuracy. # - 분할한 test 데이터에 GNN모델 평가. 결과는 훈련 샘플에 따라 달라질 수 있지만 GNN 모델은 항상 테스트 정확도의 면에서 기준모델을 능가한다. x_test = test_data.paper_id.to_numpy() _, test_accuracy = gnn_model.evaluate(x=x_test, y=y_test, verbose=0) print(f"Test accuracy: {round(test_accuracy * 100, 2)}%") # Examine the GNN model predictions # # Let's add the new instances as nodes to the node_features, and generate links (citations) to existing nodes. # - node_feature에 새로운 인스턴스 추가하고 기존노드에 인용연결 생성 # + # First we add the N new_instances as nodes to the graph # by appending the new_instance to node_features. num_nodes = node_features.shape[0] new_node_features = np.concatenate([node_features, new_instances]) # Second we add the M edges (citations) from each new node to a set # of existing nodes in a particular subject new_node_indices = [i + num_nodes for i in range(num_classes)] new_citations = [] for subject_idx, group in papers.groupby("subject"): subject_papers = list(group.paper_id) # Select random x papers specific subject. selected_paper_indices1 = np.random.choice(subject_papers, 5) # Select random y papers from any subject (where y < x). selected_paper_indices2 = np.random.choice(list(papers.paper_id), 2) # Merge the selected paper indices. selected_paper_indices = np.concatenate( [selected_paper_indices1, selected_paper_indices2], axis=0 ) # Create edges between a citing paper idx and the selected cited papers. citing_paper_indx = new_node_indices[subject_idx] for cited_paper_idx in selected_paper_indices: new_citations.append([citing_paper_indx, cited_paper_idx]) new_citations = np.array(new_citations).T new_edges = np.concatenate([edges, new_citations], axis=1) # - # Now let's update the node_features and the edges in the GNN model. # + print("Original node_features shape:", gnn_model.node_features.shape) print("Original edges shape:", gnn_model.edges.shape) gnn_model.node_features = new_node_features gnn_model.edges = new_edges gnn_model.edge_weights = tf.ones(shape=new_edges.shape[1]) print("New node_features shape:", gnn_model.node_features.shape) print("New edges shape:", gnn_model.edges.shape) logits = gnn_model.predict(tf.convert_to_tensor(new_node_indices)) probabilities = keras.activations.softmax(tf.convert_to_tensor(logits)).numpy() display_class_probabilities(probabilities) # -
_notebooks/2022-06-01-GNN-cora.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/sbooeshaghi/azucar/blob/main/analysis/293T/obs2/imports.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="MeH1Ws8_PHVE" outputId="78a3943d-beb6-4876-937a-bff62fea2b21" # !pip install --quiet -U upsetplot scikit-learn git+http://github.com/dirguis/ipfn@master # + colab={"base_uri": "https://localhost:8080/"} id="3NA8IqSsPNrh" outputId="1bb813f1-ef98-481b-c716-2b9dbadbd3f9" # !git clone https://github.com/sbooeshaghi/azucar.git # + id="w5LeEhPfP8GI" import os from scipy.io import mmread, mmwrite import pandas as pd import numpy as np from collections import defaultdict # + colab={"base_uri": "https://localhost:8080/"} id="O6HmJWigPxct" outputId="9f4de681-f67e-4459-fa6f-fb945c82f85a" sample = "293T" observation = "obs2" base_data = f"azucar/analysis/{sample}/{observation}/out" base_mark = f"azucar/analysis/{sample}/{observation}/assign" matrix_fn = os.path.join(base_data, "matrix.mtx") genes_fn = os.path.join(base_data, "genes.txt") barcodes_fn = os.path.join(base_data, "barcodes.txt") # !gunzip $base_data/*.gz # + id="Hsrhjm4UZUXH" # !rm *.mtx *.txt *.ec # + id="SGttZ8vDQHGf" from azucar.analysis.scripts.mx_sanitize import mx_sanitize from azucar.analysis.scripts.mx_filter import mx_filter, knee, gmm from azucar.analysis.scripts.mx_norm import mx_norm from azucar.analysis.scripts.mx_select import mx_select from azucar.analysis.scripts.mx_extract import mx_extract from azucar.analysis.scripts.mx_assign import mx_assign from azucar.analysis.scripts.mx_diff import mx_diff from azucar.analysis.scripts.ec_index import ec_index from azucar.analysis.scripts.ec_matrix import ec_matrix from azucar.analysis.scripts.ec_merge import ec_merge from azucar.analysis.scripts.utils import read_str_list, read_markers_str, get_marker_centroids, read_markers_ec, write_markers # + id="SprEGo7hQiOW" markers_fn = os.path.join(base_mark, "markers.txt") # + colab={"base_uri": "https://localhost:8080/"} id="WXTyvfQ6Sc8O" outputId="8bc7cdc5-e49c-4e43-ddd8-5a30fb013755" # drop barcodes and genes that sum to zero, update barcodes and genes file mx_sanitize(matrix_fn, barcodes_fn, genes_fn, "./san.matrix.mtx", "./san.barcodes.txt", "./san.genes.txt") # knee plot gmm filter mx_filter("./san.matrix.mtx", "./san.barcodes.txt", "./san.fil.matrix.mtx", "./san.fil.barcodes.txt", comps=[3,3])#, select_axis=2) dbco = mmread("./san.fil.matrix.mtx").toarray()[:,-1] # we know that mtag3 is shared across three conditions mtx = mmread("san.fil.matrix.mtx").tocsr() mtx[:,2] = mtx[:,2] / 3.0 mmwrite("san.fil.matrix.mtx", mtx) # normalize matrix (log1p -> ipf) mx_norm("./san.fil.matrix.mtx", "./san.log1p.matrix.mtx", how="log1p") mx_norm("./san.log1p.matrix.mtx", "./san.norm.matrix.mtx", how="ipf")#, target_sum=1_000_000) dbco_norm = mmread("./san.norm.matrix.mtx").toarray()[:,-1] # index the markers -> markers.ec marker_genes.txt groups.txt ec_index(markers_fn, "./markers.ec", "./groups.txt", "./marker_genes.txt") # get the gene ids -> select.txt (selects in order of markers.ec) mx_select(markers_fn, "./san.genes.txt", "./select.txt") # extract elements from matrix that are of interest, rows / columns (with associated metadata) mx_extract("san.norm.matrix.mtx", "san.genes.txt", "select.txt", "san.norm.extr.matrix.mtx", "san.extr.genes.txt", axis=1) # perform assignments with markers and matrices mx_assign("san.norm.extr.matrix.mtx", "san.fil.barcodes.txt", "san.extr.genes.txt", "markers.ec", "groups.txt", "assignments.txt") # + id="pXP8sV_aLwvv" outputId="56300e4b-ab17-4896-f9a2-45cfd04d0757" colab={"base_uri": "https://localhost:8080/"} # !cat markers.ec # !cat groups.txt # !cat marker_genes.txt markers # + colab={"base_uri": "https://localhost:8080/"} id="M6L52jsJPPOY" outputId="38be3b16-add7-4910-b97c-69188512cc0a" df = pd.read_csv("assignments.txt", sep="\t", index_col=0) print("shape: ", df.shape) # original counts (this is the same as make df above) raw_mtx = mmread("./san.fil.matrix.mtx").toarray() raw_genes = [] read_str_list("./san.genes.txt", raw_genes) for idx, v in enumerate(raw_mtx.T): df[f"{raw_genes[idx]}"] = v # np.exp(v) - 1 df["dbco"] = dbco df["dbco_ipf"] = dbco_norm # for the np log df.loc[df['ent'] == 0.0, 'ent'] = np.finfo(float).tiny ## find entropy cutoff adj_ent = -np.log(df["ent"].values) u, xx, v = knee(np.log1p(adj_ent.reshape(-1,1)), sum_axis=1) (cutoff, ncells) = gmm(xx, v, comps=[3]) cutoff = np.exp(cutoff) - 1 # adjust (since we log1p it) ent_cutoff = np.exp(-cutoff) print(ncells, ent_cutoff) # + colab={"base_uri": "https://localhost:8080/", "height": 174} id="dquZbWWJXHUD" outputId="0462fc5e-f8c4-4960-e560-b42094884e7e" df.query(f"ent < {ent_cutoff}").groupby("label").agg({ "mtag1_ipf": "mean", "mtag2_ipf": "mean", "mtag3_ipf": "mean", "mtag4_ipf": "mean", "dbco_ipf": "mean", "label": "count" }).astype(float) # + id="tF2LopnMMR1N" outputId="bc7ac250-518c-4bba-c829-3eca4709b2e4" colab={"base_uri": "https://localhost:8080/", "height": 174} df.query(f"ent < {ent_cutoff}").groupby("label").agg({ "mtag1": "mean", "mtag2": "mean", "mtag3": "mean", "mtag4": "mean", "dbco": "mean", "label": "count" }).astype(float) # + colab={"base_uri": "https://localhost:8080/"} id="SMAWp1THYClp" outputId="0e354035-426f-403f-9221-f36d52ae04c5" # fix mx_diff to save file mdf = mx_diff("san.norm.extr.matrix.mtx", "san.barcodes.txt", "san.extr.genes.txt", df["label"].values) def convert_mtx(*x): # x is a pandas series with multiple values # but because our matrix has unique group, tag pairs # there is only one element in the series return x[0].values[0] es = mdf.groupby(["group_id", "name"])["es"].apply(convert_mtx).unstack() pv = mdf.groupby(["group_id", "name"])["p_corr"].apply(convert_mtx).unstack() xa, ya = np.where(es.values > 0) check = defaultdict(list) conditions = es.index.values tags = es.columns.values for i, j in zip(xa, ya): check[conditions[i]].append(tags[j]) markers = defaultdict(list) read_markers_str(markers_fn, markers) # number in true only, number in intersection, number in check only for ct, gs in markers.items(): tm = markers[ct] cm = check.get(ct, []) left = np.setdiff1d (tm, cm).shape[0] itx = np.intersect1d(tm, cm).shape[0] right = np.setdiff1d (cm, tm).shape[0] print(f"{left, itx, right} \t {ct} \t {tm} ∩ {cm}") # + colab={"base_uri": "https://localhost:8080/"} id="y_yx1gvx71dd" outputId="9c590dcd-b9cd-4d43-b030-807a84644fc7" (es.values > 0).astype(int) # + colab={"base_uri": "https://localhost:8080/", "height": 174} id="K74xcVbtc7ym" outputId="691315de-aade-46e2-c0c8-be01f25457b5" es # + colab={"base_uri": "https://localhost:8080/", "height": 174} id="0AHKp_vSdALn" outputId="e3250b48-252d-4e39-ef3a-d5c346da3bc7" pv # + id="UVuHTtLbz3YU" ec_matrix("markers.ec", "markers.mtx") # + colab={"base_uri": "https://localhost:8080/"} id="cg2CxN3x0m7K" outputId="50c8be44-9b53-4ddd-cf96-90a4ef4330ca" # !cat markers.mtx # + colab={"base_uri": "https://localhost:8080/"} id="V_Ngbctb8Pxb" outputId="19e96625-0b3b-4126-b7f5-56a746a8f17d" markers # + id="upr-UfDm8gb3" # 0: no_sugar # 1: control # 2: tmg # + colab={"base_uri": "https://localhost:8080/", "height": 423} id="nJd2TdzG8LeU" outputId="2742f1a7-e5c3-4de9-9037-157c0b784937" df.query(f"ent < {ent_cutoff}").sort_values("label").query("label == 'tmg'").filter(like="_ipf") # + id="MPfyaZCS9xwM" # tags # AAGGCAGACG mtag1 # GGCTGCGCAC mtag2 # TAGTTGACAT mtag3 # GACGCGCGTT mtag4 # AGCGAACCGC dbco # mtag4 has 8bp shared seq with mtag4_rc # tags_revc # CGTCTGCCTT mtag1 # GTGCGCAGCC mtag2 # ATGTCAACTA mtag3 # AACGCGCGTC mtag4 # GCGGTTCGCT dbco # + id="66rj6KYcCsw3" markers_a = { "a": [1,2,3], "b": [2,4], "c": [1] } markers_b = { "a": [4], "b": [1], "c": [2,3], "d": [2] } # + id="ewbA8SAQD1fq" write_markers("test_a.txt", markers_a) write_markers("test_b.txt", markers_b) # + id="TCMGXJNHD9HL" ec_merge("test_a.txt", "test_b.txt", "merge2.txt") # + colab={"base_uri": "https://localhost:8080/"} id="BR0OsV4mEFPE" outputId="31460f93-5474-48ca-b4cc-f1c88591106b" # !cat merge.txt # + [markdown] id="wUDeEkild8LI" # ## Manual Assignment # + id="tHTPDXiPdC3B" ## John Assignment # load data mtx = mmread("./san.matrix.mtx").toarray() bcs = [] read_str_list("./san.barcodes.txt", bcs) genes = [] read_str_list("./san.genes.txt", genes) # make df m = pd.DataFrame(mtx, index = bcs, columns=genes) # John assignment preprocessing john = m.div(m.sum(axis=0), axis=1) # normalize gene to be same john["mtag3"] = john["mtag3"] * 3. john *= 100000 john.head() # assignment procedure def john_assign(row): thresh = 40 # try all conditions # where tags for conditions are high # and non-tags are low # assign label = [] for c, v in markers.items(): other = list(set(genes) - set(v)) other.remove("dbco") c1 = row[v[0]] > thresh c2 = row[v[1]] > thresh c3 = row[other[0]] < thresh c4 = row[other[1]] < thresh if (c1&c2) & (c3&c4): label.append(c) return label blah = john.apply(john_assign, axis=1) d = blah[blah.map(lambda d: len(d) > 0)] d = d.map(lambda x: x[0]) john["label"] = john.index.map(d) john = john.dropna() # + id="yOPibYAOd-PN" ## Top two assignment # load in data mtx = mmread("./san.matrix.mtx").toarray()[:,:-1] dbco = mmread("./san.matrix.mtx").toarray()[:,-1] bcs = [] read_str_list("./san.barcodes.txt", bcs) genes = [] read_str_list("./san.genes.txt", genes) genes.remove("dbco") # make df man = pd.DataFrame(mtx, index=bcs, columns=genes) # pick top two expressed barcodes and assign based on map bcmap = { (1,3): "no_sugar", (3,4): "control", (2,3): "tmg" } ind = np.sort(np.argpartition(mtx, -2)[:,-2:], axis=1) + 1 assn = [bcmap.get(tuple(i), None) for i in ind] man["dbco"] = dbco man["label"] = assn # + colab={"base_uri": "https://localhost:8080/"} id="yWCZkdpyeCjD" outputId="e13b74f9-47f3-4183-896f-c2e5efd61537" # gmm v john common = np.intersect1d(john.index.values, df.index.values) print((df.loc[common].label.values == john.loc[common].label.values).sum(), "out of", common.shape[0]) # + colab={"base_uri": "https://localhost:8080/"} id="es87v2I-eFSj" outputId="ca7bfc00-8f69-4424-9316-13b22a1de337" # man v john common = np.intersect1d(john.index.values, man.index.values) print((man.loc[common].label.values == john.loc[common].label.values).sum(), "out of", common.shape[0]) # + colab={"base_uri": "https://localhost:8080/"} id="5i2QfwxseJAt" outputId="9a919269-0a14-4691-da6c-426fdb19fda8" # man v df common = np.intersect1d(df.index.values, man.index.values) print((man.loc[common].label.values == df.loc[common].label.values).sum(), "out of", common.shape[0]) # + colab={"base_uri": "https://localhost:8080/"} id="Z29SvwpreK6M" outputId="5d1e6209-35c1-4b96-c28c-7beaad2f241a" john.groupby("label")["dbco"].mean() # + colab={"base_uri": "https://localhost:8080/"} id="xvnX3GOueMlq" outputId="267c1c04-5d39-4a1d-a4b9-e1a31ab3f3a6" man.loc[common].groupby("label")["dbco"].mean() # + colab={"base_uri": "https://localhost:8080/"} id="b-v8LMdkeObJ" outputId="e05bbc7d-fe66-40a7-94ac-29534666c737" df.loc[common].groupby("label")["dbco"].mean() # + id="_xF1QqTSeR2U"
analysis/293T/obs2/imports.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python2 # --- # # Introduction # # In this tutorial we will "fit" the drift diffusion model to experimental reaction time data, that is we will learn how to choose the parameters of the model such that it "optimally" matches the data. In general, a model is given by a parameterized function specifying and constraining a family of allowed dependencies between independent and dependent variables. Also, in general, this functional form is not known *a priori*, and one therefore needs to compare several reasonable candidate models and select the most appropriate one given the data. # ## Prerequisites # - psychophysics reaction time distributions # - drift diffusion model # - basic math and statistics # - basic Python # ## Objectives # - learn about and apply Ordinary Least Squares & Maximum Likelihood regression # - fit drift diffusion model to simulated and experimental reaction-time data # - perform model selection given a set of candidate models # # General methodology # We will start with an example of the most common fitting methodology. # ## Ordinary Least Squares (OLS) Regression # # We have data pairs $(x_i,y_i)$ that we think are linearly related but we are not sure what the slope or intercept is that best characterizes this relationship. To find this, we fit the data with a linear model # # $\hat{y}_i = \beta_1 x_i + \beta_0$ # # and estimate the best fitting parameters by minimizing the mean squared error (MSE) # # $\sum_i(y_i - \hat{y}_i)^2 = \sum_i (y_i-\beta_1 x_i-\beta_0)^2$ # # For the case of linear regression, there is actually an analytical solution: # # $\beta_1 = \frac{cov(x,y)}{var(x)}$ # # $\beta_0 = \bar{y} - \hat{\beta_1} x$ # # but we will use a more general optimization library to start familiarizing ourselves with these tools. # ### Exercise 1: OLS # # 1. Generate N = 100 data pairs $(x_i,y_i)$ using a linear model with normally distributed noise $\epsilon$ and your choice of slope $\beta_1$ and intercept $\beta_0$ parameters. # 2. Calculate the analytical estimate for the OLS regression # 3. Write a function that returns the mean square error (MSE) for any parameter values. # 4. Use an optimization library to numerically find the parameters that minimize the MSE and compare these to the true parameters of the generative model # 5. Plot the data, as well as the anlytical and numerical OLS estimate # # Tip: Use the `minimize` function from the `scipy.optimize` module # ## Maximum Likelihood Estimation (MLE) # # We can also fit the model to the data using Maximum Likelihood methods. To do this, we take the following generative model for the data: # # $\hat{y}_i = \beta_1 x_i + \beta_0 + \epsilon$ # # where $\epsilon\sim \mathcal{N}(0,\sigma^2)$ is a normally distributed random variable with mean 0 and variance $\sigma^2$ and $y\sim \mathcal{N}(\beta_1 x+\beta_0,\sigma^2)$. # # The probability distribution of $y$ given $x$ is then given by: # # \begin{eqnarray} # \\ # p(y_i|x_i,\beta_1,\beta_0) = \frac{1}{\sqrt{2\pi\sigma^2}}\exp[-(y_i-\beta_1 x_i-\beta_0)^2/(2\sigma^2)] # \end{eqnarray} # # For a pair $(x_i,y_i)$, the log likelihood of observation $y_i$ is # \begin{eqnarray} # \log p(y_i|x_i,a,b) # = \log [\frac{1}{\sqrt{2\pi\sigma^2}}\exp[-(y_i-\beta_1 x_i-\beta_0)^2/(2\sigma^2)]] \\ # = \log [\frac{1}{\sqrt{2\pi\sigma^2}}] -(y_i-\beta_1 x_i-\beta_0)^2/(2\sigma^2) # \end{eqnarray} # # That is: When $\epsilon$ is normally distributed, maximizing the total log likelihood of the data is equivalent to minimizing the mean squared error. # # ### Exercise: MLE # # 1. Generate N = 100 data pairs (y,x) using a linear model with normally distributed noise $\epsilon$ and your choice of slope $\beta_1$ and intercept $\beta_0$ parameters. # 2. Write a function that returns the total negative log likelihood for any parameter values. # 3. Use an optimization library to numerically find the parameters that minimize the negative log likelihood (or equivalently, maximize the log likelihood of the data given the model). # # Tip: Use the `minimize` function from the `scipy.optimize` module. # # Hint: You can add a very small number to your likelihood to make sure you are not taking $\log(0)$ # ### Exercise: Likelihood heat map # 1. Plot the likelihood heat map as a function of $\beta_1$ and $\beta_0$ # **Expected Output** # (with random seed == 1) # <img src="fig/lin_ll_surface.png"> # # Application: Fitting the DDM # # Now that we have looked at model fitting for a simple case, we can try to fit the DDM to the monkey reaction time data from last class in order to find the best fitting mean, noise and boundary parameters. # # As a first step, we will test our ability to recover the parameters of the model on simulated data for which we set the parameters. # # Once we are convinced that we can recover the parameters on simulated data, we will fit the model to the experimental monkey data. # # ## Excercise: Histogram of simulated RTs # 1. Generate 5000 trials of simulated RT data using the constant bound DDM function ($\mu = 0.0015, \sigma = 0.05, B = 1$) # 2. Plot the simulated RT data using your plot function from yesterday # # Tip: We will import a data simulation function `sim_DDM_constant`, a plotting function `plot_rt_distribution` and a function that computes the analytic DDM and returns the (RG) `analytic_ddm` from the module ddm. You can use this module or you can use your own based on the work from the last tutorial. # *Expected output* # <img src="fig/RT_simulated_cb.png"> # ## Exercise: Likelihood from analytic DDM # # 1. Implement the following function # ``` # def get_nll_ddm(parameter, sigma, rts, corrects): # ''' # Determines the negative loglikelihood of the analytical DDM # # Parameters # ---------- # parameter : array_like of float # length 2: 1st entry is mu (drift rate), 2nd is B (boundary) # Note: we pack mu and B in one parameter because we want to # make it compatible for later use with sp.optimize.minimize # sigma : float # DDM standard deviation # rts : array_like of floats # reaction times for which the likelihood will be evaluated # corrects: array_like of bools, same length as rts # indicates for each rt if it was a correct trial # # Returns # ------- # nll : float # negative log-likelihood # ''' # ``` # # 1. Use the `analytic_ddm` function from the module `ddm` to calculate the log likelihood for a correct trial where RT is $500ms$, $\mu=0.0015$ and $B=1$. # 2. What's the log-likelihood for an incorrect trial with otherwise identical parameters? # # 4. What's the analytical log-likelihood of the decision-variable trajectory from the previous exercise? # ## Exercise: Fit DDM to simulated monkey reaction time distribution # Once you are able to evaluate your likelihood function at various parameter values, it's time to fit the simulated data. The goal here is to pass the negative log likelihood to an optimizer that will find the parameters to minimize the total negative log likelihood. # Note that optimizers tend to work better when parameters have the same order of magnitude. Also, the optimization function that we are going to use, `scipy.optimize.minimize`, requires that all parameters that are optimized over are packed into a vector and that this vector is the first argument of the objective function. # # - Write a wrapper function that's exactly like `get_nll_ddm`, except that it takes as first argument the vector $(1000 \mu,B)$. # # Remember that this will mean rescaling the parameters returned by the optimizer in future exercises! # 2. Use the optimizer `minimize` on your negative log likelihood function to maximize the log likelihood of the simulated data. Again $\sigma$ will be fixed at 0.05. # 3. Is the optimization succesful? If yes, you should see "message: 'Optimization terminated successfully.'" in the output. If not, consider using a bounded optimization (check out the bounds input to the function and use method 'SLSQP'). $mu$ and $B$ should be positive. # 4. Compare the simulated data with the fitted distribution. To do so, use the analytic_ddm function with the fitted parameter value. # *Expected output* # <img src="fig/RT_sim_fits.png"> # ## Exercise: Fit DDM to experimental monkey RT data # Now we use the same approach to fit the monkey data for the coherence 0.064. # # 1. Plot the distribution of the monkey data and compare it with the distribution of the simulated data. Do you notice any difference? # 2. Fit the monkey data for coherence 0.064 using the same approach as before. How is the fit? # *Expected output* # <img src="fig/RT_simulated_cb_bins.png"> # <img src="fig/RT_monkey_data.png"> # ## Exercise: DDM with non-decision time # We can potentially improve the fit by having a non-decision time, which is a constant added to all reaction time. # # 1. Calculate the negative loglikelihood of the monkey data with a non-decision time $t_{nd}$. # 2. Maximize the log likelihood with a model of three parameters $\mu,B,t_{nd}$. # # Model Comparison # We will use the $BIC$ (Bayesian Information Criterion) to compare models: # \begin{eqnarray} # -2 \log p(M|y) \approx -2\ln(L) + k\ln(n) \equiv BIC # \end{eqnarray} # where # - $M$ is the model under consideration, # - $L$ the likelihood for model $M$, # - $y$ the observed data, # - $k$ the number of free parameters, # - $n$ the number of data points (observations) # # and the approximation holds for large $n$. # # The $BIC$ penalizes more complex models with more parameters. Specifically, in our context, the BIC penalizes the non-decision time model for its extra parameter. # # Note that a lower BIC is better and in general a difference of BIC 10 or more is good evidence for the model with the lower BIC. # # ***Reference*** # # Wit et al. ‘All models are wrong...’: an introduction to model uncertainty. Statistica Neederlandia (2012). # ## Exercise: BIC # # 1. Compare the BIC of the two models # # 2. Which model has the smaller BIC (the smaller the better)? # # [Optional] Exercise: Generalizing ML fitting # # Above we derived for a linear model function $y$ that when the measurment error $\epsilon$ is normally distributed, then maximizing the total log likelihood of the data is equivalent to minimizing the mean squared error. # # - Now assume your data $\{{x_i}, {y_i}\}$ can be described by an arbitrary model function $f(x, \theta$) where $\theta$ represents the function's parameters that you want to recover. Moreover, assume that you know that the measurement errors $\epsilon_i = y_i - f(x_i, \theta)$ are not normally distributed but follow an arbitrary probability distribution $p$. Derive the likelihood of $\theta$ given the data under these assumptions. # - Pick a function (e.g. a difference between two exponentials, or a polynomial multiplied by an exponential) and a probability distribution $p$ and fit the RT data. Compare.
module1/4_model_fitting/model_fitting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/fresher96/arabic-speech-commands/blob/master/colab/arabic_commands_classical.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="egyqFzf73Ovv" colab_type="text" # ###### Data Preparation # + id="coA7S7diTkYK" colab_type="code" colab={"base_uri": "https://localhost:8080/"} outputId="22d9b970-da75-4c94-9b44-366eeb33581a" from google_drive_downloader import GoogleDriveDownloader # Link: https://drive.google.com/file/d/1-LwzGIEsPr28kmt8TbSEn9vlQq4vosvQ/view?usp=sharing GoogleDriveDownloader.download_file_from_google_drive( file_id='1-LwzGIEsPr28kmt8TbSEn9vlQq4vosvQ', dest_path='./ASC.rar') # !mkdir dataroot; cd dataroot; unrar x ../ASC.rar > /dev/null # + id="jqPNL1qIXqps" colab_type="code" colab={} import pandas as pd import numpy as np import matplotlib.pyplot as plt import sklearn data_root = './dataroot/' complement = '_featuers.csv' splits = ['train', 'val', 'test'] # + id="vP7C70xoX-by" colab_type="code" colab={"base_uri": "https://localhost:8080/"} outputId="597e1b77-a4d1-4611-fc39-31abde89fa4a" df = {} X = {} y = {} for split in splits: df[split] = pd.read_csv(data_root + split + complement) print(split, df[split].shape) X[split] = df[split].iloc[:, :-1] y[split] = df[split].iloc[:, -1] # + id="KDu2SwY5Hc1f" colab_type="code" colab={} from sklearn.model_selection import PredefinedSplit, RandomizedSearchCV X['train_val'] = X['train'].append(X['val'], ignore_index=True) y['train_val'] = y['train'].append(y['val'], ignore_index=True) split_index = [-1 if i < len(X['train']) else 0 for i in range(len(X['train_val']))] pdsplit = PredefinedSplit(test_fold = split_index) # + [markdown] id="Gd2aBgBg9Mhw" colab_type="text" # ##### KNN # + id="0VbvcSmAMMDw" colab_type="code" colab={} from sklearn.neighbors import KNeighborsClassifier clf = KNeighborsClassifier()(weights='distance', algorithm='brute') params = dict( n_neighbors=[6], ) clf = RandomizedSearchCV(clf, params, cv=pdsplit, n_jobs=-1, verbose=1, n_iter=10, return_train_score=True) clf.fit(X['train_val'], y['train_val']) # + id="d8M5qEbBWGQA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 311} outputId="a95ddf82-ac74-488f-f268-e32f1e979ee7" clf.cv_results_ # + id="Wn5dRMAVRz5I" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="4492dbc9-99cb-4f20-b4e2-68bd5e3344f0" print(clf.cv_results_['mean_train_score'], clf.cv_results_['mean_test_score']) clf.score(X['test'], y['test']) # + [markdown] id="24x7kfWb9VBb" colab_type="text" # ##### Naive Bayes # + id="1NYQHu7B8gu3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 257} outputId="771b2a18-a691-4c99-bcc0-c1fa42505428" from sklearn.naive_bayes import GaussianNB clf = GaussianNB() params = dict( ) clf = RandomizedSearchCV(clf, params, cv=pdsplit, n_jobs=-1, verbose=1, n_iter=10, return_train_score=True) clf.fit(X['train_val'], y['train_val']) # + id="m6hxswyIXcjm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 237} outputId="8239c5d1-c953-4c6c-e0a9-89f7e07730b8" clf.cv_results_ # + id="hJuUbOGVVN62" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="9f97a6af-2c77-45d0-e07f-138e9189d6cf" print(clf.cv_results_['mean_train_score'], clf.cv_results_['mean_test_score']) clf.score(X['test'], y['test']) # + [markdown] id="Gp99bXnU-S1j" colab_type="text" # ##### Decesion Tree # + id="dlZtIiDE9lCL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 496} outputId="ad0eb4bb-db49-4bdb-ea0d-ecd417540b5b" from sklearn.tree import DecisionTreeClassifier clf = DecisionTreeClassifier() params = dict( max_depth=[650, 700, 750] ) clf = RandomizedSearchCV(clf, params, cv=pdsplit, n_jobs=-1, verbose=2, n_iter=10, return_train_score=True) clf.fit(X['train_val'], y['train_val']) # + id="EoUMG_l5YqB1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 311} outputId="ee4a53a6-21b6-4596-f6a6-3e12f3700761" clf.cv_results_ # + id="VmhCnL91bVt7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="41a898dc-b3d6-4c5c-da6f-8489bc9e0ee1" print(clf.cv_results_['mean_train_score'], clf.cv_results_['mean_test_score']) clf.score(X['test'], y['test']) # + [markdown] id="OotZ_JVMcfTI" colab_type="text" # ##### Random Forest # + colab_type="code" id="NrHfY9-egzvK" colab={"base_uri": "https://localhost:8080/", "height": 588} outputId="7b74a21a-a21f-4102-fc38-8db019b5e37c" from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier(max_depth=700) params = dict( n_estimators=[250] ) clf = RandomizedSearchCV(clf, params, cv=pdsplit, n_jobs=-1, verbose=2, n_iter=10, return_train_score=True) clf.fit(X['train_val'], y['train_val']) # + colab_type="code" id="Hzl55I11gzvX" colab={"base_uri": "https://localhost:8080/", "height": 311} outputId="e40c3848-6ba6-4adf-8485-6e17e4b77c0a" clf.cv_results_ #0.75894309 0.76869919 # + colab_type="code" id="7NkTPxhpgzvm" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="66dcb61e-3853-4f9f-ece9-0777c400ea2b" print(clf.cv_results_['mean_train_score'], clf.cv_results_['mean_test_score']) clf.score(X['test'], y['test']) # + [markdown] id="HtE0qqN7rgXo" colab_type="text" # ##### Logistic Regression # + colab_type="code" id="4Eg19a7ntqc3" colab={"base_uri": "https://localhost:8080/", "height": 386} outputId="5df50e95-5d03-4cc4-b69b-0a1ab7e48719" from sklearn.linear_model import LogisticRegression clf = LogisticRegression(max_iter=1500) params = dict( C=[1e-5, 1e-6] ) clf = RandomizedSearchCV(clf, params, cv=pdsplit, n_jobs=-1, verbose=2, n_iter=10, return_train_score=True) clf.fit(X['train_val'], y['train_val']) # + colab_type="code" id="C8dZRdwutqdR" colab={"base_uri": "https://localhost:8080/", "height": 311} outputId="ecc37257-132b-4dee-89a9-014c2b7aa9f2" clf.cv_results_ # 0.74390244 0.96395664 # + colab_type="code" id="Xrq9kNgctqde" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="127b86bb-4928-406b-ca48-f28178061f6a" print(clf.cv_results_['mean_train_score'], clf.cv_results_['mean_test_score']) clf.score(X['test'], y['test']) # + [markdown] id="yTDxz0bd3JZZ" colab_type="text" # # HMM # + id="EXUNmQmtB8ud" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 202} outputId="86724e75-8c2a-4df1-b746-638ec7f1eb4f" # !pip install hmmlearn # + id="pEwbDAKtDvNN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 90} outputId="6df879bc-d770-4629-c95b-1ff87eac7ef7" from hmmlearn import hmm from collections import defaultdict n_mfcc = 12 dataset = {} for split in splits + ['train_val']: dataset[split] = defaultdict(list) num = 0 for (index, row), label in zip(X[split].iterrows(), y[split]): feature = row.to_numpy() feature = np.reshape(feature, (-1, n_mfcc)) # print(feature, feature.shape, label); input(); dataset[split][label].append(feature) num += 1 print(split, num) # + id="coLBtlnuxmjD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 90} outputId="79ac822a-03ad-488c-ee01-392a9be75644" # adapted from: https://github.com/wblgers/hmm_speech_recognition_demo/blob/master/demo.py from tqdm import tqdm def train_GMMHMM(states_num=26, dataset=dataset['train']): GMMHMM_Models = {} idx = 1; for label in tqdm(dataset.keys()): print('itr %02d | training on %s'%(idx, label)) idx += 1 model = hmm.GaussianHMM(n_components=states_num, \ covariance_type='diag', n_iter=100) trainData = dataset[label] length = np.zeros([len(trainData), ], dtype=np.int) for m in range(len(trainData)): length[m] = trainData[m].shape[0] trainData = np.vstack(trainData) model.fit(trainData, lengths=length) # get optimal parameters GMMHMM_Models[label] = model return GMMHMM_Models hmmModels = train_GMMHMM() # + id="deT5BNK6xnDv" colab_type="code" colab={} def test_GMMHMM(hmmModels=hmmModels, testDataSet=dataset['test']): score_cnt = 0 score_tot = 0 idx = 1 for label in testDataSet.keys(): score_cls = 0 features = testDataSet[label] score_tot += len(features); for feature in features: scoreList = {} for model_label in hmmModels.keys(): model = hmmModels[model_label] score = model.score(feature) scoreList[model_label] = score predict = max(scoreList, key=scoreList.get) # print("Test on true label ", label, ": predict result label is ", predict); input(); if predict == label: score_cls += 1 score_cnt += score_cls print("itr %02d | %s accuracy: %d/%d = %.2f"%(idx, label, score_cls, len(features), 100.0*score_cls/len(features)), "%") idx += 1 print("accuracy: %d/%d = %.2f"%(score_cnt, score_tot, 100.0*score_cnt/score_tot), "%") test_GMMHMM(hmmModels, dataset['test']) # test_GMMHMM(hmmModels, dataset['val']) # test_GMMHMM(hmmModels, dataset['train']) # + id="Ao7gpXoq16O6" colab_type="code" colab={}
colab/arabic_commands_classical.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import seaborn as sns import matplotlib.pyplot as plt df = pd.read_csv('df_userratings.csv') df.head() df_movie=pd.read_csv('df_movies.csv') df_movie.head() import numpy as np n=df.groupby('user_id')['rating'].count(); plt.hist(n,bins=np.arange(0,700,10)); plt.title('Users\' ratings frequency') plt.xlabel('rating number') plt.ylabel('frequency') min(df.groupby('user_id')['rating'].count()) df_movie.year.hist(bins=np.arange(1922,2000,1),grid=False) plt.title('movie number by year') plt.xlabel('movie number') plt.ylabel('number') df_users=pd.read_csv('users.csv') df_users.head() df_url=pd.read_csv('img_url.csv') df_url.head() # + def changingLabels(number): return str(number/10**3) + "K" plt.figure(figsize = (10, 6)) ax = sns.countplot(x="rating", data=df) ax.set_yticklabels([changingLabels(num) for num in ax.get_yticks()]) plt.tick_params(labelsize = 15) plt.title("Distribution of Ratings in user_rating data", fontsize = 20) plt.xlabel("Ratings", fontsize = 20) plt.ylabel("Number of Ratings(thousands)", fontsize = 20) plt.show() # + rating_peruser=df.groupby('user_id')['rating'].mean() rating_peruser=list(rating_peruser) rating_permovie=df.groupby('item_id')['rating'].mean() rating_permovie=list(rating_permovie) fig, axes = plt.subplots(nrows = 1, ncols = 2, figsize = (16, 7)) sns.distplot(rating_peruser, hist = False, ax = axes[0], label = "PDF") sns.kdeplot(rating_peruser, cumulative = True, ax = axes[0], label = "CDF") axes[0].set_title("Average Rating Per User", fontsize=20) axes[0].tick_params(labelsize = 15) axes[0].legend(loc='upper left', fontsize = 17) sns.distplot(rating_permovie, hist = False, ax = axes[1], label = "PDF") sns.kdeplot(rating_permovie, cumulative = True, ax = axes[1], label = "CDF") axes[1].set_title("Average Rating Per Movie", fontsize=20) axes[1].tick_params(labelsize = 15) axes[1].legend(loc='upper left', fontsize = 17) # + from pyspark import SparkContext import numpy as np # begin train model itemRDD = sc.parallelize(np.array(df_movie).tolist()) sc = SparkContext.getOrCreate() rawUserData=sc.parallelize(np.array(df).tolist()) header = rawUserData.first() print(rawUserData.count()) #print the number of lines print(rawUserData.first()) #first data(userID projectID evaluate Date) # - def train_ALS(train_data, validation_data, num_iters, reg_param, ranks): """ Grid Search Function to select the best model based on RMSE of hold-out data """ # initial min_error = float('inf') best_rank = -1 best_regularization = 0 best_model = None for rank in ranks: for reg in reg_param: # train ALS model model = ALS.train( ratings=train_data, # (userID, productID, rating) tuple iterations=num_iters, rank=rank, lambda_=reg, # regularization param seed=99) # make prediction valid_data = validation_data.map(lambda p: (p[0], p[1])) predictions = model.predictAll(valid_data).map(lambda r: ((r[0], r[1]), r[2])) # get the rating result ratesAndPreds = validation_data.map(lambda r: ((r[0], r[1]), r[2])).join(predictions) # get the RMSE MSE = ratesAndPreds.map(lambda r: (r[1][0] - r[1][1])**2).mean() error = math.sqrt(MSE) print('{} latent factors and regularization = {}: validation RMSE is {}'.format(rank, reg, error)) if error < min_error: min_error = error best_rank = rank best_regularization = reg best_model = model print('\nThe best model has {} latent factors and regularization = {}'.format(best_rank, best_regularization)) return best_model # + import time from pyspark.mllib.recommendation import ALS import math ############# rawRatings = rawUserData.map(lambda line:line[:3]) # only need three columns ratingsRDD = rawRatings.map(lambda x:(int(x[0]),int(x[1]),int(x[2]))) # train, validation, test = ratingsRDD.randomSplit([6, 2, 2], seed=99) ############# num_iterations = 10 ranks = [8, 10, 12, 14, 16, 18, 20] reg_params = [0.001, 0.01, 0.05, 0.1, 0.2] # grid search and select best model start_time = time.time() final_model = train_ALS(train, validation, num_iterations, reg_params, ranks) print ('Total Runtime: {:.2f} seconds'.format(time.time() - start_time)) # - import matplotlib.pyplot as plt def plot_learning_curve(arr_iters, train_data, validation_data, reg, rank): """ Plot function to show learning curve of ALS """ errors = [] for num_iters in arr_iters: print(num_iters) # train ALS model model = ALS.train( ratings=train_data, # (userID, productID, rating) tuple iterations=num_iters, rank=rank, lambda_=reg, # regularization param seed=99) # make prediction valid_data = validation_data.map(lambda p: (p[0], p[1])) predictions = model.predictAll(valid_data).map(lambda r: ((r[0], r[1]), r[2])) # get the rating result ratesAndPreds = validation_data.map(lambda r: ((r[0], r[1]), r[2])).join(predictions) # get the RMSE MSE = ratesAndPreds.map(lambda r: (r[1][0] - r[1][1])**2).mean() error = math.sqrt(MSE) # add to errors errors.append(error) # plot plt.figure(figsize=(12, 6)) plt.plot(arr_iters, errors) plt.xlabel('number of iterations') plt.ylabel('RMSE') plt.title('ALS Learning Curve') plt.grid(True) plt.show() # create an array of num_iters iter_array = list(range(1, 11)) # create learning curve plot plot_learning_curve(iter_array, train, validation, 0.1, 8) # make prediction using test data test_data = test.map(lambda p: (p[0], p[1])) predictions = final_model.predictAll(test_data).map(lambda r: ((r[0], r[1]), r[2])) # get the rating result ratesAndPreds = test.map(lambda r: ((r[0], r[1]), r[2])).join(predictions) # get the RMSE MSE = ratesAndPreds.map(lambda r: (r[1][0] - r[1][1])**2).mean() error = math.sqrt(MSE) print('The out-of-sample RMSE of rating predictions is', round(error, 4)) # + ############application from pyspark.mllib.recommendation import Rating # step1: process data rawRatings = rawUserData.map(lambda line:line[:3]) print(rawRatings.take(5)) ratingsRDD = rawRatings.map(lambda x:(x[0],x[1],x[2])) print(ratingsRDD.take(5)) print(ratingsRDD.count()) # check unduplicated number of users numUsers = ratingsRDD.map(lambda x:x[0]).distinct().count() print(numUsers) # 943 users # check unduplicated number of movies numMovies = ratingsRDD.map(lambda x:x[1]).distinct().count() print('unduplicated movie number: ',numMovies) # step2:train model from pyspark.mllib.recommendation import ALS model = ALS.train(ratingsRDD,8,10,0.1) #parameter:data rank iterations reg rate print('the model is:',model) # step3:use model to recommend # check the rating of recommending a movie to a user print('the rating of recommending movie 1141 to user 10001:',model.predict(10001,1141)) # step4:display the movie title print('the number of movies',itemRDD.count()) # create a dictionary of movie_id and title,key is movie_id, value is movie title movieTitle = itemRDD.map(lambda a:(float(a[0]),a[1])).collectAsMap() ##recommend products recommendP = model.recommendProducts(10001,5) # the first parameter is user_id, the second is movie_id print('For user'+str(10001)+'recommend movie\n') for p,rec in enumerate(recommendP): print(str(movieTitle[rec.product])+'; recommendation rating '+'{:.3f}'.format(rec.rating)+'\n') # recommend users recommendU = model.recommendUsers(product=30,num=5) print('For movie: '+str(movieTitle[rec.product])+'\n') for i,rec in enumerate(recommendU): print('recommend user '+str(rec.user)+'; recommendation rating '+'{:.3f}'.format(rec.rating)+'\n')
algoritems_test/ALS_algorithm_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # import packages # import tensorflow as tf import numpy as np # Reset default graph tf.reset_default_graph() # Load mnist data from datasets/mnist. from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('datasets/mnist') # Try the minist.train.next_batch function to observe the data X_batch, y_batch = mnist.train.next_batch(50) from sklearn.datasets import fetch_mldata # Construction phase of DNN: # 1. Input layer # 2. Hidden layer 1 of 300 neurons, using ReLU activation function # 3. Hidden layer 2 of 100 neurons, using ReLU activation function # 4. Output layer # # This DNN applies gradient descent to minimize the entropy loss function. # This DNN has an "accuracy" node to calculate the accuracy n_input = 28*28 n_hidden1 = 300 n_hidden2 = 100 n_output = 10 X = tf.placeholder(tf.float32, shape=(None, n_input), name='X') y = tf.placeholder(tf.int32, shape=(None), name='y') with tf.name_scope('DNN'): hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.relu, name='hidden1') hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu, name='hidden2') logits = tf.layers.dense(hidden2, n_output, activation=None, name='logits') with tf.name_scope('loss'): loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits), name='loss') with tf.name_scope('training'): optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001) training_op = optimizer.minimize(loss, name='training_op') with tf.name_scope('eval'): correct = tf.nn.in_top_k(logits, y, 1) accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name='accuracy') accuracy_summary = tf.summary.scalar('Accuracy', accuracy) # Execution phase of DNN: # 1. Run 50 epoches at a batch size 100 # 2. Print training and validation accuracy after each epoch, also save them to log file # 3. Save the final model at tmp/mnist_dnn_final.ckpt init = tf.global_variables_initializer() saver = tf.train.Saver() file_writer_training = tf.summary.FileWriter('logs/mnist_dnn/train', graph=tf.get_default_graph()) file_writer_val = tf.summary.FileWriter('logs/mnist_dnn/val') n_epoch = 50 batch_size = 100 n_data = mnist.train.num_examples n_step = int(np.ceil(n_data/batch_size)) with tf.Session() as sess: sess.run(init) for epoch in range(n_epoch): for step in range(n_step): X_batch, y_batch = mnist.train.next_batch(batch_size) sess.run(training_op, feed_dict={X: X_batch, y: y_batch}) (train_accuracy_val, train_accuracy_sum) = \ sess.run([accuracy, accuracy_summary], feed_dict={X: mnist.train.images, y: mnist.train.labels}) (val_accuracy_val, val_accuracy_sum) = \ sess.run([accuracy, accuracy_summary], feed_dict={X: mnist.validation.images, y: mnist.validation.labels}) print("Epoch: {}, training accuray: {:.4f}, validation accuracy: {:.4f}".format(epoch, train_accuracy_val, val_accuracy_val)) global_step = n_step * epoch + step file_writer_training.add_summary(train_accuracy_sum, global_step=global_step) file_writer_val.add_summary(val_accuracy_sum, global_step=global_step) saver.save(sess, 'tmp/mnist_dnn_final.ckpt') file_writer_training.close() file_writer_val.close() # Visualize the DNN and the learning curve in TensorBoard
Chapter10_DNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.10 64-bit (''aug'': conda)' # name: python3 # --- # # Tutorial: Use on tensors # ## 1. Check if the operator is differentiable import beacon_aug as BA BA.properties.isOpDifferentiable(op=BA.RandomCrop(library="torch")) # ## 2. Use the transformation operator as a layer # # Augmentation can also be used as network layer (e.g. [stylegan2-ada](https://github.com/NVlabs/stylegan2-ada-pytorch)) in limited data regimes # + import torch import beacon_aug as BA size= (2, 3, 8, 8) # rgb image (8x8) batch size 2, x = torch.rand(size, requires_grad=True, device="cpu") y = BA.RandomCrop(library="torch",height=4,width=4).apply(x, library="torch",as_layer=True) print(x.shape, y.shape) # - # ## 3. Pytorch Data Loader Example # # # # + from torchvision import transforms, datasets import beacon_aug as BA class TorchvisionAdapter: def __init__(self, transforms: BA.Compose): self.transforms = transforms def __call__(self, img, *args, **kwargs): return self.transforms(image=np.array(img))['image'] train_transform = TorchvisionAdapter(BA.Compose([ BA.RandomResizedCrop(size=[224, 224], scale=(0.8, 1.2), library="torch"), BA.HorizontalFlip(library="torch"), BA.ColorJitter(p=0.8,library="torch"), BA.RandomGrayscale(p=0.2, library="torch"), BA.ToTensor(library="torch"), ])) train_dataset = datasets.ImageFolder(root= "Your Data-Set-Folder", transform=train_transform) # + import numpy as np from PIL import Image Image.fromarray(np.uint8(train_dataset[0][0]*255))
ipynbs/tutorial_torch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.5 64-bit (''base'': conda)' # metadata: # interpreter: # hash: cff9d2dd46246ebad9b755e5103afbe146c0c9ff6a61a749f67e85fa816eb766 # name: python3 # --- # # Atributos e Métodos # --- import numpy as np # --- # # ## `shape` # # O atributo `shape` dos objetos do tipo array numpy, mostra a dimensão dos mesmos. x = np.array([[1,2],[2,3],[-1,0]]) x, x.shape # --- # # ## `transpose` # # O método `np.transpose()` faz a tranposição de um array numpy. x, np.transpose(x) # A transposta de uma array numpy também é um atributo do mesmo: `.T` x, x.T # --- # # ## `save`/`load` # # Muitas vezes é necessário salvar um array numpy no disco. Isso é feito usando # # ~~~ # np.save('arrayx', x) # ~~~ # # Assim o array numpy armazenado na variável `x`, será salvo na memória com o nome `arrayx.npy`. np.save('matriz',x) # Quando um array está salvo no disco, a sua leitura e carregamento no código é feita com o método `load`. Fazendo # # y = np.load('nome.npy') # # Carregamos o array salvo no arquivo `nome.py` para a variável `y`. y = np.load('matriz.npy') y
numpy-arrays/04-atributos-metodos.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Run queries vs AWS Lambda functions # --- # + from PIL import Image import io import requests import base64 import time import json import numpy as np import multiprocessing as mp # - with open('test-dog.jpg', 'rb') as f: b64_im = base64.b64encode(f.read()) img = Image.open(io.BytesIO(base64.b64decode(b64_im))) # + # # %%timeit now=time.time() url = 'https://53f8w4fcua.execute-api.us-east-1.amazonaws.com/Prod/invocations' payload = {'data': b64_im.decode('utf-8')} headers = {'content-type': 'application/json'} r = requests.post(url, json=payload, headers=headers) print('proc time: {} seconds and response is: {}'.format(time.time()-now, r)) # - response = r.json() print(response) bb = json.loads(response['json'])['boxes'][0] area = (bb[0], bb[1], bb[2], bb[3]) cropped_img = img.crop(area) cropped_img area = (response['xmin'], response['ymin'], response['xmax'], response['ymax']) cropped_img = img.crop(area) cropped_img # ## Parallel requests list_of_images = [b64_im for x in range(1000)] def query_image(image): payload = {'data': image.decode('utf-8')} headers = {'content-type': 'application/json'} return requests.post(url, json=payload, headers=headers) now = time.time() pool = mp.Pool(mp.cpu_count()) results = pool.starmap(query_image, [(image,) for image in list_of_images]) pool.close() print('total proc time: {} milliseconds'.format(time.time()-now, r))
test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # # Partial Dependence and Individual Conditional Expectation Plots # # Partial dependence plots show the dependence between the target function [2]_ # and a set of features of interest, marginalizing over the values of all other # features (the complement features). Due to the limits of human perception, the # size of the set of features of interest must be small (usually, one or two) # thus they are usually chosen among the most important features. # # Similarly, an individual conditional expectation (ICE) plot [3]_ # shows the dependence between the target function and a feature of interest. # However, unlike partial dependence plots, which show the average effect of the # features of interest, ICE plots visualize the dependence of the prediction on a # feature for each :term:`sample` separately, with one line per sample. # Only one feature of interest is supported for ICE plots. # # This example shows how to obtain partial dependence and ICE plots from a # :class:`~sklearn.neural_network.MLPRegressor` and a # :class:`~sklearn.ensemble.HistGradientBoostingRegressor` trained on the # California housing dataset. The example is taken from [1]_. # # .. [1] <NAME>, <NAME> and <NAME>, "Elements of Statistical # Learning Ed. 2", Springer, 2009. # # .. [2] For classification you can think of it as the regression score before # the link function. # # .. [3] <NAME>., <NAME>., <NAME>., and <NAME>., Peeking Inside # the Black Box: Visualizing Statistical Learning With Plots of # Individual Conditional Expectation. (2015) Journal of Computational and # Graphical Statistics, 24(1): 44-65 (https://arxiv.org/abs/1309.6392) # import sklearn sklearn.set_config(display="diagram") # ## California Housing data preprocessing # # Center target to avoid gradient boosting init bias: gradient boosting # with the 'recursion' method does not account for the initial estimator # (here the average target, by default). # # # + jupyter={"outputs_hidden": false} import pandas as pd from sklearn.datasets import fetch_california_housing from sklearn.model_selection import train_test_split cal_housing = fetch_california_housing() X = pd.DataFrame(cal_housing.data, columns=cal_housing.feature_names) y = cal_housing.target y -= y.mean() X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=0) # - # ## 1-way partial dependence with different models # # In this section, we will compute 1-way partial dependence with two different # machine-learning models: (i) a multi-layer perceptron and (ii) a # gradient-boosting. With these two models, we illustrate how to compute and # interpret both partial dependence plot (PDP) and individual conditional # expectation (ICE). # # ### Multi-layer perceptron # # Let's fit a :class:`~sklearn.neural_network.MLPRegressor` and compute # single-variable partial dependence plots. # # # + jupyter={"outputs_hidden": false} from time import time from sklearn.pipeline import make_pipeline from sklearn.preprocessing import QuantileTransformer from sklearn.neural_network import MLPRegressor print("Training MLPRegressor...") tic = time() est = make_pipeline( QuantileTransformer(), MLPRegressor( hidden_layer_sizes=(50, 50), learning_rate_init=0.01, early_stopping=True ), ) est.fit(X_train, y_train) print(f"done in {time() - tic:.3f}s") print(f"Test R2 score: {est.score(X_test, y_test):.2f}") # - # We configured a pipeline to scale the numerical input features and tuned the # neural network size and learning rate to get a reasonable compromise between # training time and predictive performance on a test set. # # Importantly, this tabular dataset has very different dynamic ranges for its # features. Neural networks tend to be very sensitive to features with varying # scales and forgetting to preprocess the numeric feature would lead to a very # poor model. # # It would be possible to get even higher predictive performance with a larger # neural network but the training would also be significantly more expensive. # # Note that it is important to check that the model is accurate enough on a # test set before plotting the partial dependence since there would be little # use in explaining the impact of a given feature on the prediction function of # a poor model. # # We will plot the partial dependence, both individual (ICE) and averaged one # (PDP). We limit to only 50 ICE curves to not overcrowd the plot. # # # + jupyter={"outputs_hidden": false} import matplotlib.pyplot as plt from sklearn.inspection import partial_dependence from sklearn.inspection import PartialDependenceDisplay print("Computing partial dependence plots...") tic = time() _, ax = plt.subplots(figsize=(8, 6)) features = ["MedInc", "AveOccup", "HouseAge", "AveRooms"] display = PartialDependenceDisplay.from_estimator( est, X_train, features, kind="both", subsample=50, n_jobs=3, grid_resolution=20, random_state=0, ice_lines_kw={"color": "tab:blue", "alpha": 0.2, "linewidth": 0.5}, pd_line_kw={"color": "tab:orange", "linestyle": "--"}, ax=ax, ) print(f"done in {time() - tic:.3f}s") display.figure_.suptitle( "Partial dependence of house value on non-location features\n" "for the California housing dataset, with MLPRegressor" ) display.figure_.subplots_adjust(hspace=0.5) # - # ### Gradient boosting # # Let's now fit a :class:`~sklearn.ensemble.HistGradientBoostingRegressor` and # compute the partial dependence on the same features. # # # + jupyter={"outputs_hidden": false} from sklearn.ensemble import HistGradientBoostingRegressor print("Training HistGradientBoostingRegressor...") tic = time() est = HistGradientBoostingRegressor() est.fit(X_train, y_train) print(f"done in {time() - tic:.3f}s") print(f"Test R2 score: {est.score(X_test, y_test):.2f}") # - # Here, we used the default hyperparameters for the gradient boosting model # without any preprocessing as tree-based models are naturally robust to # monotonic transformations of numerical features. # # Note that on this tabular dataset, Gradient Boosting Machines are both # significantly faster to train and more accurate than neural networks. It is # also significantly cheaper to tune their hyperparameters (the defaults tend # to work well while this is not often the case for neural networks). # # We will plot the partial dependence, both individual (ICE) and averaged one # (PDP). We limit to only 50 ICE curves to not overcrowd the plot. # # # + jupyter={"outputs_hidden": false} print("Computing partial dependence plots...") tic = time() _, ax = plt.subplots(figsize=(8, 6)) display = PartialDependenceDisplay.from_estimator( est, X_train, features, kind="both", subsample=50, n_jobs=3, grid_resolution=20, random_state=0, ice_lines_kw={"color": "tab:blue", "alpha": 0.2, "linewidth": 0.5}, pd_line_kw={"color": "tab:orange", "linestyle": "--"}, ax=ax, ) print(f"done in {time() - tic:.3f}s") display.figure_.suptitle( "Partial dependence of house value on non-location features\n" "for the California housing dataset, with Gradient Boosting" ) display.figure_.subplots_adjust(wspace=0.4, hspace=0.5) # - # ### Analysis of the plots # # We can clearly see on the PDPs (thick blue line) that the median house price # shows a linear relationship with the median income (top left) and that the # house price drops when the average occupants per household increases (top # middle). The top right plot shows that the house age in a district does not # have a strong influence on the (median) house price; so does the average # rooms per household. # # The ICE curves (light blue lines) complement the analysis: we can see that # there are some exceptions, where the house price remain constant with median # income and average occupants. On the other hand, while the house age (top # right) does not have a strong influence on the median house price on average, # there seems to be a number of exceptions where the house price increase when # between the ages 15-25. Similar exceptions can be observed for the average # number of rooms (bottom left). Therefore, ICE plots show some individual # effect which are attenuated by taking the averages. # # In all plots, the tick marks on the x-axis represent the deciles of the # feature values in the training data. # # We also observe that :class:`~sklearn.neural_network.MLPRegressor` has much # smoother predictions than # :class:`~sklearn.ensemble.HistGradientBoostingRegressor`. # # However, it is worth noting that we are creating potential meaningless # synthetic samples if features are correlated. # # # ## 2D interaction plots # # PDPs with two features of interest enable us to visualize interactions among # them. However, ICEs cannot be plotted in an easy manner and thus interpreted. # Another consideration is linked to the performance to compute the PDPs. With # the tree-based algorithm, when only PDPs are requested, they can be computed # on an efficient way using the `'recursion'` method. # # # + jupyter={"outputs_hidden": false} features = ["AveOccup", "HouseAge", ("AveOccup", "HouseAge")] print("Computing partial dependence plots...") tic = time() _, ax = plt.subplots(ncols=3, figsize=(9, 4)) display = PartialDependenceDisplay.from_estimator( est, X_train, features, kind="average", n_jobs=3, grid_resolution=20, ax=ax, ) print(f"done in {time() - tic:.3f}s") display.figure_.suptitle( "Partial dependence of house value on non-location features\n" "for the California housing dataset, with Gradient Boosting" ) display.figure_.subplots_adjust(wspace=0.4, hspace=0.3) # - # <div class="alert alert-success"> # <p><b>EXERCISE</b>:</p> # Compute the partial dependence plot for the numerical features of the Titanic dataset? # </div> # + # # %load solutions/solution_28.py # + # # %load solutions/solution_29.py # + # # %load solutions/solution_30.py # + # # %load solutions/solution_31.py # + # # %load solutions/solution_32.py # + # # %load solutions/solution_33.py
09_inspection/03_plot_partial_dependence.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/jadelekeGit/Blogs/blob/master/The_Battle_of_Neighborhood_WK2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="i5U3I-YJKN9V" # + id="O8BXY5WMKN9e" outputId="cf9b1384-c4d4-4d25-b562-6490b3394e09" import numpy as np import pandas as pd pd.set_option('display.max_column', None) pd.set_option('display.max_row', None) import json import xml # !pip install geocoder from geopy.geocoders import Nominatim import requests from pandas.io.json import json_normalize import matplotlib.pyplot as plt import matplotlib.cm as cm import matplotlib.colors as colors from sklearn.cluster import KMeans # !pip install folium import folium from bs4 import BeautifulSoup print ('Librairies imported.') # + id="LMmxhoeTKN9g" outputId="53ac6013-fece-4b59-f438-13e752dd7648" # !wget -q -O 'newyork_data.json' https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DS0701EN-SkillsNetwork/labs/newyork_data.json print('Data downloaded!') # + id="SPE-JWwOKN9h" with open('newyork_data.json') as f: newyork_data = json.load(f) # + id="xqoISwdtKN9h" outputId="2e0e5d1c-806c-47ab-ef41-be082cffafa9" newyork_data # + id="NF3jmRdLKN9i" neighborhoods_data = newyork_data['features'] # + id="_8i4XDFaKN9i" outputId="d2e36dec-fb83-4870-92db-44f9a5ae2a27" neighborhoods_data[0] # + id="v1vtbL4kKN9i" outputId="caed3564-dec5-4ae2-9532-1fc502ea279f" column_names = ['Borough','Neighborhood', 'Latitude', 'Longitude'] ny_df = pd.DataFrame(columns=column_names) ny_df # + id="ULyKm5UAKN9j" # Fill the dataframe # + id="Yh0O_EKkKN9j" for data in neighborhoods_data: borough = neighborhood_name = data['properties']['borough'] neighborhood_name = data['properties']['name'] neighborhood_latlon = data['geometry']['coordinates'] neighborhood_lat = neighborhood_latlon[1] neighborhood_lon = neighborhood_latlon[0] ny_df = ny_df.append({'Borough':borough, 'Neighborhood':neighborhood_name, 'Latitude':neighborhood_lat, 'Longitude':neighborhood_lon}, ignore_index=True) # + id="PnPWDFazKN9j" outputId="7a493a19-3756-4dd3-ae7b-4aa1c7197d67" print('shape:', ny_df.shape) ny_df.head() # + [markdown] id="nGpd2e3BKN9k" # We got in dataframe ny_df the data we need. We're going to analyze the data and examine the number of neighborhoods per borough. # + id="2QyqabHoKN9k" outputId="814b2c1a-7758-4ccd-d866-ab2adb54fc62" clr = "#3186cd" ny_df.groupby('Borough')['Neighborhood'].count().plot.bar(figsize=(10,5), color=clr) plt.title('Neighborhoods per Borough: NYC', fontsize = 16) plt.xlabel('Borough', fontsize = 12) plt.ylabel('No. Neighborhoods',fontsize = 12) plt.xticks(rotation = 'horizontal') plt.show() # + [markdown] id="co2GPVwaKN9k" # Queens has the highest number of neighborhoods while staten island has the lowest. We're going to determine the best to set up a restaurant business in Queens. # + id="WAU2rH9YKN9l" outputId="0e3e5474-d476-4b34-b1dc-f2fb83b24d0b" address = 'Queens,NYC' geolocator = Nominatim(user_agent = "Foursquare_agent") location = geolocator.geocode(address) latitude_x = location.latitude longitude_y = location.longitude print('The Geograpical Co-ordinate are {}, {}.'.format(latitude_x, longitude_y)) # + id="iG4eiHJTKN9l" # + id="dOHfEzmlKN9l" outputId="578f24c6-fcd2-4da3-d342-96147154c655" map_Queens = folium.Map(location=[latitude_x, longitude_y], zoom_start=10) for lat, lng, nei in zip(ny_df['Latitude'], ny_df['Longitude'], ny_df['Neighborhood']): label = '{}'.format(nei) label = folium.Popup(label, parse_html=True) folium.CircleMarker( [lat, lng], radius=5, popup=label, color='blue', fill=True, fill_color='#3186cc', fill_opacity=0.7, parse_html=False).add_to(map_Queens) map_Queens # + id="EAKtbPxrKN9m" # + id="SZ1LUWnSKN9m" CLIENT_ID = 'N5FHYAAPJ40G5EWXDZ5CPPENEPTY53H4XIEY4U5BNVCV5FI1' # my Foursquare ID CLIENT_SECRET = '<KEY>' # my Foursquare Secret VERSION = '20201202' LIMIT = 30 # + id="6VpOf4ARKN9m" radius = 1000 LIMIT = 100 url = 'https://api.foursquare.com/v2/venues/explore?&client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}'.format( CLIENT_ID, CLIENT_SECRET, VERSION, latitude_x, longitude_y, radius, LIMIT) results = requests.get(url).json() # + id="Dvhb67qjKN9m" outputId="671ef6b3-0acc-478d-ccf3-bde6d28364b3" venues=results['response']['groups'][0]['items'] nearby_venues = json_normalize(venues) nearby_venues.columns # + id="foPd70QOKN9n" # + id="xZ4V60onKN9n" def get_category_type(row): try: categories_list = row['categories'] except: categories_list = row['venue.categories'] if len(categories_list) == 0: return None else: return categories_list[0]['name'] # + id="tMXQEXymKN9n" # + [markdown] id="-3VvwF7eKN9n" # # Nearby Location # + id="wt8yUUtgKN9n" outputId="2a3aece8-3ffd-4961-be75-3cb1802c3687" filtered_columns = ['venue.name', 'venue.categories', 'venue.location.lat', 'venue.location.lng'] nearby_venues =nearby_venues.loc[:, filtered_columns] nearby_venues.head() # + id="78hRw3DIKN9o" # + [markdown] id="dgJHKl6rKN9o" # # Categories of Nearby Locations # + id="vgs-waBaKN9o" outputId="c3a7e1ec-79c0-40ec-d62d-002317b176df" nearby_venues['venue.categories'] = nearby_venues.apply(get_category_type, axis=1) # clean columns nearby_venues.columns = [col.split(".")[-1] for col in nearby_venues.columns] nearby_venues.head(5) # + id="60boNueaKN9o" outputId="ee33eec9-3d77-46f5-f1bb-479ec30151ac" # Top 10 Categories cat=pd.Series(nearby_venues.categories) cat.value_counts()[:10] # + id="CrrEdFEfKN9p" # + id="YNcasoVWKN9p" def getNearbyVenues(names, latitudes, longitudes, radius=1000): venues_list=[] for name, lat, lng in zip(names, latitudes, longitudes): print(name) url = 'https://api.foursquare.com/v2/venues/explore?&client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}'.format( CLIENT_ID, CLIENT_SECRET, VERSION, lat, lng, radius, LIMIT) # making GET request venue_results = requests.get(url).json()["response"]['groups'][0]['items'] # return only relevant information for each nearby venue venues_list.append([( name, lat, lng, v['venue']['name'], v['venue']['location']['lat'], v['venue']['location']['lng'], v['venue']['categories'][0]['name']) for v in venue_results]) nearby_venues = pd.DataFrame([item for venue_list in venues_list for item in venue_list]) nearby_venues.columns = ['Neighborhood', 'Neighborhood Latitude', 'Neighborhood Longitude', 'Venue', 'Venue Latitude', 'Venue Longitude', 'Venue Category'] return(nearby_venues) # + id="JHhqI_47KN9p" outputId="119ca205-e974-4c0e-916f-e817bdb2b8cb" # Nearby Venues Queens_venues = getNearbyVenues(names=ny_df['Neighborhood'], latitudes=ny_df['Latitude'], longitudes=ny_df['Longitude'] ) # + id="E2qhimWeKN9q" outputId="886189f1-0ba4-46c4-92df-0413ac394f2c" print('There are {} Uniques Categories.'.format(len(Queens_venues['Venue Category'].unique()))) Queens_venues.groupby('Neighborhood').count().head() # + [markdown] id="k_FbFcbFKN9q" # # One Hot Encoding # + id="QwH8225ZKN9r" outputId="161ccb69-2107-470b-abe0-8febe196512e" # one hot encoding Queens_onehot = pd.get_dummies(Queens_venues[['Venue Category']], prefix="", prefix_sep="") # add neighborhood column back to dataframe Queens_onehot['Neighborhood'] = Queens_venues['Neighborhood'] # move neighborhood column to the first column fixed_columns = [Queens_onehot.columns[-1]] + list(Queens_onehot.columns[:-1]) Queens_onehot = Queens_onehot[fixed_columns] Queens_grouped = Queens_onehot.groupby('Neighborhood').mean().reset_index() Queens_onehot.head(5) # + id="h8VXy3z1KN9r" outputId="fe9cda0d-78e2-430c-fe1b-018c006e851c" num_top_venues = 5 for hood in Queens_grouped['Neighborhood']: print("---- "+hood+" ----") temp =Queens_grouped[Queens_grouped['Neighborhood'] == hood].T.reset_index() temp.columns = ['venue','freq'] temp = temp.iloc[1:] temp['freq'] = temp['freq'].astype(float) temp = temp.round({'freq': 2}) print(temp.sort_values('freq', ascending=False).reset_index(drop=True).head(num_top_venues)) print('\n') # + [markdown] id="vhER0FNnKN9s" # Most Common Venues # + id="kssrZIp0KN9s" def return_most_common_venues(row, num_top_venues): row_categories = row.iloc[1:] row_categories_sorted = row_categories.sort_values(ascending=False) return row_categories_sorted.index.values[0:num_top_venues] # + id="HnIewr2kKN9s" import numpy as np num_top_venues = 10 indicators = ['st', 'nd', 'rd'] columns = ['Neighborhood'] for ind in np.arange(num_top_venues): try: columns.append('{}{} Most Common Venue'.format(ind+1, indicators[ind])) except: columns.append('{}th Most Common Venue'.format(ind+1)) neighborhoods_venues_sorted = pd.DataFrame(columns=columns) neighborhoods_venues_sorted['Neighborhood'] = Queens_grouped['Neighborhood'] for ind in np.arange(Queens_grouped.shape[0]): neighborhoods_venues_sorted.iloc[ind, 1:] = return_most_common_venues(Queens_grouped.iloc[ind, :], num_top_venues) neighborhoods_venues_sorted.head() # + [markdown] id="DJTyH_mGKN9s" # # K-Means Clustering # + id="8Dkcx80GKN9s" # Using K-Means to cluster neighborhood into 4 clusters Queens_grouped_clustering = Queens_grouped.drop('Neighborhood', 1) kmeans = KMeans(n_clusters=4, random_state=0).fit(Queens_grouped_clustering) kmeans.labels_ # + id="S5ufFdrsKN9u" neighborhoods_venues_sorted.insert(0, 'Cluster Labels', kmeans.labels_) Queens_merged =ny_df.iloc[:16,:] # add latitude/longitude for each neighborhood Queens_merged = Queens_merged.join(neighborhoods_venues_sorted.set_index('Neighborhood'), on='Neighborhood') Queens_merged.head() # + id="COmZ_R0oKN9v" Queens_merged.tail() # + id="H1rLN_cVKN9v" # create map kclusters = 10 map_clusters = folium.Map(location=[latitude_x, longitude_y], zoom_start=10) # set color scheme for the clusters x = np.arange(kclusters) colors_array = cm.rainbow(np.linspace(0, 1, kclusters)) rainbow = [colors.rgb2hex(i) for i in colors_array] print(rainbow) # add markers to the map markers_colors = [] for lat, lon, nei , cluster in zip(Queens_merged['Latitude'], Queens_merged['Longitude'], Queens_merged['Neighborhood'], Queens_merged['Cluster Labels']): label = folium.Popup(str(nei) + ' Cluster ' + str(cluster), parse_html=True) folium.CircleMarker( [lat, lon], radius=5, popup=label, color=rainbow[cluster-1], fill=True, fill_color=rainbow[cluster-1], fill_opacity=0.7).add_to(map_clusters) map_clusters # + id="B6-afIP3KN9v" # + id="6ZmGgX8IKN9w"
The_Battle_of_Neighborhood_WK2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Task 13 - Knee angle # Implement the knee angle changes for isometric contraction, using the model of Nigg and Herzog (2003) # ## Simulation - Series def TendonForce (Lnorm_see,Lslack, Lce_o): ''' Compute tendon force Inputs: Lnorm_see = normalized tendon length Lslack = slack length of the tendon (non-normalized) Lce_o = optimal length of the fiber Output: Fnorm_tendon = normalized tendon force ''' Umax = .04 if Lnorm_see<Lslack/Lce_o: Fnorm_tendon = 0 else: Fnorm_tendon = ((Lnorm_see-Lslack/Lce_o)/(Umax*Lslack/Lce_o))**2 return Fnorm_tendon def ParallelElementForce (Lnorm_ce): ''' Compute parallel element force Inputs: Lnorm_ce = normalized contractile element length Output: Fnorm_kpe = normalized parallel element force ''' Umax = 1 if Lnorm_ce< 1: Fnorm_kpe = 0 else: Fnorm_kpe = ((Lnorm_ce-1)/(Umax*1))**2 return Fnorm_kpe def ForceLengthCurve (Lnorm_ce,width): F0 = max([0, (1-((Lnorm_ce-1)/width)**2)]) return F0 def ContractileElementDot(F0, Fnorm_CE, a): ''' Compute Contractile Element Derivative Inputs: F0 = Force-Length Curve Fce = Contractile element force Output: Lnorm_cedot = normalized contractile element length derivative ''' FMlen = 1.4 # young adults Vmax = 10 # young adults Af = 0.25 #force-velocity shape factor Fnorm_CE = min(FMlen*a*F0 - 0.001, Fnorm_CE) if Fnorm_CE > a*F0: b = ((2 + 2/Af)*(a*F0*FMlen - Fnorm_CE))/(FMlen-1) elif Fnorm_CE <= a*F0: b = a*F0 + Fnorm_CE/Af Lnorm_cedot = (.25 + .75*a)*Vmax*((Fnorm_CE - a*F0)/b) return Lnorm_cedot def ContractileElementForce(Fnorm_tendon,Fnorm_kpe, alpha): ''' Compute Contractile Element force Inputs: Fnorm_tendon = normalized tendon force Fnorm_kpe = normalized parallel element force Output: Fnorm_CE = normalized contractile element force ''' Fnorm_CE = Fnorm_tendon/np.cos(alpha) - Fnorm_kpe return Fnorm_CE def tendonLength(Lm,Lce_o,Lnorm_ce, alpha): ''' Compute tendon length Inputs: Lm = Lce_o = optimal length of the fiber Lnorm_ce = normalized contractile element length Output: Lnorm_see = normalized tendon length ''' Lnorm_see = Lm/Lce_o - Lnorm_ce*np.cos(alpha) return Lnorm_see def activation(a,u,dt): ''' Compute activation Inputs: u = idealized muscle excitation signal, 0 <= u <= 1 a = muscular activation dt = time step Output: a = muscular activation ''' tau_deact = 50e-3 #young adults tau_act = 15e-3 if u>a: tau_a = tau_act*(0.5+1.5*a) elif u <=a: tau_a = tau_deact/(0.5+1.5*a) #------- dadt = (u-a)/tau_a # euler a += dadt*dt #------- return a def TotalMomentKneeJoint(Rf, Fnorm_tendon, m, g, Rcm, phi): M = Rf * Fnorm_tendon - m*g * Rcm * np.sin(phi - np.pi/2) return M # ## Simulation - Parallel # + import numpy as np import matplotlib.pyplot as plt import math # %matplotlib inline # + # Muscle properties Lslack = .223 Umax = .04 Lce_o = .093 #optmal l width = .63#*Lce_o Fmax = 7400 a = 0 u = 0.5 #b = .25*10#*Lce_o # Parameters for the equation of motion m = 10; g = 9.81; Rcm = 0.264; I = 0.1832; Rf = 0.033; # Initial conditions Lnorm_ce = .087/Lce_o #norm t0 = 0 tf = 5 h = 1e-3; phi = np.pi/2; phid = 0.0; # Initialization t = np.arange(t0,tf,h) F = np.empty(t.shape) Fkpe = np.empty(t.shape) FiberLen = np.empty(t.shape) TendonLen = np.empty(t.shape) a_dynamics = np.empty(t.shape) Phi = np.empty(t.shape) # + phi = np.pi/2; phid = 0.0; #Normalizing alpha = 0*np.pi/180 for i in range (len(t)): #ramp if t[i]<=1: Lm = 0.31 elif t[i]>1 and t[i]<2: Lm = .31 - .04*(t[i]-1) #print(Lm) #shortening at 4cm/s u = 0.7 + 0.2*np.sin(np.pi*t[i]) Lnorm_see = tendonLength(Lm,Lce_o,Lnorm_ce, alpha) Fnorm_tendon = TendonForce (Lnorm_see,Lslack, Lce_o) Fnorm_kpe = ParallelElementForce (Lnorm_ce) #isometric force at Lce from CE force length relationship F0 = ForceLengthCurve (Lnorm_ce,width) Fnorm_CE = ContractileElementForce(Fnorm_tendon,Fnorm_kpe, alpha) #Fnorm_CE = ~Fm #computing activation a = activation(a,u,h) #calculate CE velocity from Hill's equation Lnorm_cedot = ContractileElementDot(F0, Fnorm_CE,a) M = TotalMomentKneeJoint(Rf, Fnorm_tendon, m, g, Rcm, phi) # total moment with respect to knee joint phidd = M/I; # angular acceleration # --- Euler integration step Lnorm_ce += h*Lnorm_cedot phid = phid + phidd * h phi = phi + phid * h F[i] = Fnorm_tendon*Fmax Fkpe[i] = Fnorm_kpe*Fmax FiberLen[i] = Lnorm_ce*Lce_o TendonLen[i] = Lnorm_see*Lce_o a_dynamics[i] = a Phi[i] = phi # - # ## Plots # + fig, ax = plt.subplots(1, 1, figsize=(6,6), sharex=True) ax.plot(t,Phi*180/np.pi,c='magenta') plt.grid() plt.xlabel('time [s]') plt.ylabel('Knee Angle [degrees]') # + fig, ax = plt.subplots(1, 1, figsize=(6,6), sharex=True) ax.plot(t,F,c='red') plt.grid() plt.xlabel('time (s)') plt.ylabel('Force (N)') # + fig, ax = plt.subplots(1, 1, figsize=(6,6), sharex=True) ax.plot(t,FiberLen, label = 'fiber') ax.plot(t,TendonLen, label = 'tendon') plt.grid() plt.xlabel('time (s)') plt.ylabel('Length (m)') ax.legend(loc='best') fig, ax = plt.subplots(1, 3, figsize=(12,4), sharex=True, sharey=True) ax[0].plot(t,FiberLen, label = 'fiber') plt.grid() ax[1].plot(t,TendonLen, label = 'tendon') plt.grid() ax[2].plot(t,FiberLen + TendonLen, label = 'muscle (tendon + fiber)') plt.grid() ax[1].set_xlabel('time (s)') ax[0].set_ylabel('Length (m)') # -
courses/modsim2018/tasks/Tasks_For&During_Lecture13/.ipynb_checkpoints/Task_ForLect13-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: rl_pytorch # language: python # name: rl_pytorch # --- import gym import numpy as np # create deterministic version of Frozen Lake from gym.envs.registration import register register( id='FrozenLakeNotSlippery-v0', entry_point='gym.envs.toy_text:FrozenLakeEnv', kwargs={'map_name' : '4x4', 'is_slippery': False} ) # ### Cross-Entropy Method # + def run_sample(env, policy, action_size): state = env.reset() # get action counts for each sample action_count = np.zeros(np.shape(policy)) total_reward = 0 while True: action = np.random.choice(action_size,p=policy[state]) action_count[state,action] += 1 state, reward, done, info = env.step(action) total_reward += reward if done: break return total_reward, action_count def update_policy(policy, s_list, learning_rate, keep): # only keep best samples s_list.sort(key=lambda x: x[0], reverse=True) s_list = s_list[:keep] # get the action counts for the best performers in one array best_policy = np.zeros(np.shape(policy)) for s in s_list: best_policy += s[1] # update policy using learning rate for i in range(len(best_policy)): total_actions = np.sum(best_policy[i]) if total_actions > 0: policy[i] = (best_policy[i]/total_actions)*learning_rate + policy[i]*(1.-learning_rate) # normalize the probabilities of the policy so that they sum to 1 policy = policy / np.sum(policy,axis=1)[:,None] return policy # - # ### FrozenLake Not Slippery # + # create environment env = gym.make('FrozenLakeNotSlippery-v0') state_size = env.nS action_size = env.nA # hyperparameters samples = 100 learning_rate = 0.1 trial = 100 keep_best = int(0.2*samples) # + # initialize policy policy_array = np.ones((state_size, action_size))/action_size # run trials and collect samples with each trial # update policy at end of each trial for t in range(trial): sample_list = [] for s in range(samples): reward, action_table = run_sample(env, policy_array, action_size) sample_list.append((reward, action_table)) policy_array = update_policy(policy_array, sample_list, learning_rate, keep_best) # + # evaluate the agent using the found policy episodes = 100 episode_reward_list, episode_len_list = [], [] for i in range(episodes): state = env.reset() episode_reward = 0 episode_length = 0 while True: action = np.argmax(policy_array[state]) state, reward, done, info = env.step(action) episode_reward += reward episode_length += 1 if done: episode_reward_list.append(episode_reward) episode_len_list.append(episode_length) #print("Episode {}: Reward: {} Length: {}".format(i, episode_reward, episode_length)) break print("Average Reward: {} Average Length: {}".format(np.mean(episode_reward_list), np.mean(episode_len_list))) # - # ### FrozenLake Slippery # + # create environment env = gym.make('FrozenLake-v0') state_size = env.nS action_size = env.nA # hyperparameters samples = 100 learning_rate = 0.1 trial = 1000 keep_best = int(0.2*samples) # + # initialize policy policy_array = np.ones((state_size, action_size))/action_size # run trials and collect samples with each trial # update policy at end of each trial for t in range(trial): sample_list = [] for s in range(samples): reward, action_table = run_sample(env, policy_array, action_size) sample_list.append((reward, action_table)) policy_array = update_policy(policy_array, sample_list, learning_rate, keep_best) # + # evaluate the agent using the found policy episodes = 100 episode_reward_list, episode_len_list = [], [] for i in range(episodes): state = env.reset() episode_reward = 0 episode_length = 0 while True: action = np.argmax(policy_array[state]) state, reward, done, info = env.step(action) episode_reward += reward episode_length += 1 if done: episode_reward_list.append(episode_reward) episode_len_list.append(episode_length) #print("Episode {}: Reward: {} Length: {}".format(i, episode_reward, episode_length)) break print("Average Reward: {} Average Length: {}".format(np.mean(episode_reward_list), np.mean(episode_len_list))) # - # ### Taxi Environment # + # create environment env = gym.make('Taxi-v2') state_size = env.nS action_size = env.nA # hyperparameters samples = 100 learning_rate = 0.1 trial = 1000 keep_best = int(0.5*samples) # + policy_array = np.ones((state_size, action_size))/action_size for t in range(trial): sample_list = [] for s in range(samples): reward, action_table = run_sample(env, policy_array, action_size) sample_list.append((reward, action_table)) policy_array = update_policy(policy_array, sample_list, learning_rate, keep_best) # + # evaluate the agent using the found policy episodes = 100 episode_reward_list, episode_len_list = [], [] for i in range(episodes): state = env.reset() episode_reward = 0 episode_length = 0 while True: # choose action based on best action in that state action = np.argmax(policy_array[state]) # choose action based policy distribution #action = np.random.choice(action_size,p=policy_array[state]) state, reward, done, info = env.step(action) episode_reward += reward episode_length += 1 if done: episode_reward_list.append(episode_reward) episode_len_list.append(episode_length) #print("Episode {}: Reward: {} Length: {}".format(i, episode_reward, episode_length)) break print("Average Reward: {} Average Length: {}".format(np.mean(episode_reward_list), np.mean(episode_len_list))) # - # ## Cross-Entropy Method using PyTorch # + import torch if torch.cuda.is_available(): device = torch.device('cuda') else: device = torch.device('cpu') # + # create environment env = gym.make('FrozenLakeNotSlippery-v0') state_size = env.nS action_size = env.nA # hyperparameters samples = 100 learning_rate = 0.1 smoothing_factor = 1 trial = 200 keep_best = int(0.2*samples) # + def run_sample_tensor(env, policy, state_size, action_size, device): state = env.reset() # get action counts for each sample action_count = torch.zeros((state_size, action_size))#.to(device) total_reward = torch.zeros((1))#.to(device) while True: action = np.random.choice(action_size, p=policy[state]) action_count[state, action] += 1 state, reward, done, info = env.step(action) total_reward += reward if done: break return total_reward, action_count def update_policy_tensor(policy, samp_tensor, rew_tensor, learn_rate, smooth_factor, action_size, keep, device): # sort tensor by reward and return indices of best performers to samp_index samp_index = rew_tensor.sort(descending=True)[1] # get indices of best samples samp_index = samp_index[:keep] # only keep best samples by using samp_index and index_select samp_tensor = samp_tensor.index_select(0, samp_index) # sum the results to get the action counts by state and action samp_tensor = samp_tensor.sum(dim=0) # sum to get action counts by state action_count = samp_tensor.sum(dim=1) # only want states visited at least once mask = action_count.ge(0.5) # update policy with best samples policy[mask] = samp_tensor[mask]/action_count[mask,None]*learn_rate + policy[mask]*(1.-learn_rate) # normalize policy so that they sum to 1 policy = policy / policy.sum(dim=1)[:,None] return policy # + policy_tensor = torch.ones((state_size,action_size)).to(device)/action_size for t in range(trial): sample_tensor = torch.zeros((samples, state_size, action_size)).to(device) reward_tensor = torch.zeros((samples)).to(device) policy_array = policy_tensor.cpu().numpy() for s in range(samples): reward, action_table_tensor = run_sample_tensor(env, policy_array, state_size, action_size, device) reward_tensor[s] = reward sample_tensor[s] = action_table_tensor policy_tensor = update_policy_tensor(policy_tensor, sample_tensor, reward_tensor, learning_rate, smoothing_factor, action_size, keep_best, device) # + # evaluate the agent using the found policy episodes = 100 episode_reward_list, episode_len_list = [], [] policy_array = policy_tensor.cpu().numpy() for i in range(episodes): state = env.reset() episode_reward = 0 episode_length = 0 while True: # choose action based on best action in that state action = np.argmax(policy_array[state]) # choose action based policy distribution #action = np.random.choice(action_size,p=policy_array[state]) state, reward, done, info = env.step(action) episode_reward += reward episode_length += 1 if done: episode_reward_list.append(episode_reward) episode_len_list.append(episode_length) #print("Episode {}: Reward: {} Length: {}".format(i, episode_reward, episode_length)) break print("Average Reward: {} Average Length: {}".format(np.mean(episode_reward_list), np.mean(episode_len_list))) # - # ### FrozenLake Slippery using PyTorch # + # create environment env = gym.make('FrozenLake-v0') state_size = env.nS action_size = env.nA # hyperparameters samples = 100 learning_rate = 0.1 smoothing_factor = 1 trial = 1000 keep_best = int(0.2*samples) policy_tensor = torch.ones((state_size,action_size)).to(device)/action_size for t in range(trial): sample_tensor = torch.zeros((samples, state_size, action_size)).to(device) reward_tensor = torch.zeros((samples)).to(device) policy_array = policy_tensor.cpu().numpy() for s in range(samples): reward, action_table_tensor = run_sample_tensor(env, policy_array, state_size, action_size, device) reward_tensor[s] = reward sample_tensor[s] = action_table_tensor policy_tensor = update_policy_tensor(policy_tensor, sample_tensor, reward_tensor, learning_rate, smoothing_factor, action_size, keep_best, device) # + # evaluate the agent using the found policy episodes = 100 episode_reward_list, episode_len_list = [], [] policy_array = policy_tensor.cpu().numpy() for i in range(episodes): state = env.reset() episode_reward = 0 episode_length = 0 while True: # choose action based on best action in that state action = np.argmax(policy_array[state]) # choose action based policy distribution #action = np.random.choice(action_size,p=policy_array[state]) state, reward, done, info = env.step(action) episode_reward += reward episode_length += 1 if done: episode_reward_list.append(episode_reward) episode_len_list.append(episode_length) #print("Episode {}: Reward: {} Length: {}".format(i, episode_reward, episode_length)) break print("Average Reward: {} Average Length: {}".format(np.mean(episode_reward_list), np.mean(episode_len_list))) # - # ### Taxi using PyTorch # + # create environment env = gym.make('Taxi-v2') state_size = env.nS action_size = env.nA # hyperparameters samples = 100 learning_rate = 0.1 smoothing_factor = 1 trial = 1000 keep_best = int(0.5*samples) policy_tensor = torch.ones((state_size,action_size)).to(device)/action_size for t in range(trial): sample_tensor = torch.zeros((samples, state_size, action_size)).to(device) reward_tensor = torch.zeros((samples)).to(device) policy_array = policy_tensor.cpu().numpy() for s in range(samples): reward, action_table_tensor = run_sample_tensor(env, policy_array, state_size, action_size, device) reward_tensor[s] = reward sample_tensor[s] = action_table_tensor policy_tensor = update_policy_tensor(policy_tensor, sample_tensor, reward_tensor, learning_rate, smoothing_factor, action_size, keep_best, device) # + # evaluate the agent using the found policy episodes = 100 episode_reward_list, episode_len_list = [], [] policy_array = policy_tensor.cpu().numpy() for i in range(episodes): state = env.reset() episode_reward = 0 episode_length = 0 while True: # choose action based on best action in that state action = np.argmax(policy_array[state]) # choose action based policy distribution #action = np.random.choice(action_size,p=policy_array[state]) state, reward, done, info = env.step(action) episode_reward += reward episode_length += 1 if done: episode_reward_list.append(episode_reward) episode_len_list.append(episode_length) #print("Episode {}: Reward: {} Length: {}".format(i, episode_reward, episode_length)) break print("Average Reward: {} Average Length: {}".format(np.mean(episode_reward_list), np.mean(episode_len_list))) # -
Section 2/2.7 Cross-Entropy Method.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/BenYavor/Wireless_encoding_with_MI_estimation/blob/master/Communication_Autoencoder_with_MI_channel_estimation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="EkWq8Zt_isjR" colab_type="code" outputId="8f3cc779-e404-4025-d29a-7c569caa2d15" colab={"base_uri": "https://localhost:8080/", "height": 644} # !pip install tensorflow==2.0.0 import sys assert sys.version_info >= (3, 5) import numpy as np # %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt mpl.rc('axes', labelsize=14) mpl.rc('xtick', labelsize=12) mpl.rc('ytick', labelsize=12) import tensorflow as tf from tensorflow import keras import pandas as pd from scipy import special from tensorflow.keras import layers np.random.seed(42) tf.random.set_seed(42) import time # + id="MXJRVGze4Lgb" colab_type="code" colab={} M = 16 k = int(np.log2(M)) n = 1 TRAINING_SNR = 7 # + id="oUypWuZ9jAcV" colab_type="code" colab={} def EbNo_to_noise(ebnodb): '''Transform EbNo[dB]/snr to noise power''' ebno = 10**(ebnodb/10) noise_std = 1/np.sqrt(2*(k/n)*ebno) return noise_std def SNR_to_noise(snrdb): '''Transform EbNo[dB]/snr to noise power''' snr = 10**(snrdb/10) noise_std = 1/np.sqrt(2*snr) return noise_std # + id="x81YbgfUjDLE" colab_type="code" colab={} randN_05 = keras.initializers.RandomNormal(mean=0.0, stddev=0.05, seed=None) bias_init = keras.initializers.Constant(0.01) input_A = keras.layers.Input(shape=[4*n]) input_B = keras.layers.Input(shape=[4*n]) transform = keras.models.Sequential([ layers.Dense(128, bias_initializer=bias_init, kernel_initializer=randN_05, activation="relu"), #256 #keras.layers.Dropout(rate=0.3), # To regularize higher dimensionality layers.Dense(128, bias_initializer=bias_init, kernel_initializer=randN_05, activation="relu"), #256 #keras.layers.Dropout(rate=0.3), # To regularize higher dimensionality layers.Dense(1, bias_initializer=bias_init, kernel_initializer=randN_05, activation=None)]) output_A = transform(input_A) output_B = transform(input_B) output_C = tf.reduce_mean(output_A) - tf.math.log(tf.reduce_mean(tf.exp(output_B))) # MINE MI_mod = keras.models.Model(inputs=[input_A, input_B], outputs=output_C) #print(MI_mod.inputs) #MI_mod.summary() #keras.utils.plot_model(MI_mod, 'Structure_of_MI_estimation.png', show_shapes=True) # + id="E6C69Kzzi4cZ" colab_type="code" colab={} noise_std = EbNo_to_noise(TRAINING_SNR) # custom functions / layers without weights norm_layer = keras.layers.Lambda(lambda x: tf.divide(x,tf.sqrt(2*tf.reduce_mean(tf.square(x))))) shape_layer = keras.layers.Lambda(lambda x: tf.reshape(x, shape=[-1,2,n])) shape_layer2 = keras.layers.Lambda(lambda x: tf.reshape(x, shape=[-1,2*n])) channel_layer = keras.layers.Lambda(lambda x: x + tf.random.normal(tf.shape(x), mean=0.0, stddev=noise_std)) encoder = keras.models.Sequential([ keras.layers.Embedding(M, M, embeddings_initializer='glorot_normal'), keras.layers.Dense(M*4, activation="elu"), # M keras.layers.Dense(M*4, activation="elu"), #optional keras.layers.Dense(2*n, activation=None), shape_layer, norm_layer]) channel = keras.models.Sequential([channel_layer]) decoder = keras.models.Sequential([ keras.layers.InputLayer(input_shape=[2,n]), shape_layer2, keras.layers.Dense(M*4, activation="elu"), #M keras.layers.Dense(M*2, activation="elu"), #optional keras.layers.Dense(M, activation="softmax") ]) autoencoder = keras.models.Sequential([encoder, channel, decoder]) # + id="PA9p9oXcjIa4" colab_type="code" colab={} def B_Ber_m(input_msg, msg): '''Calculate the Batch Bit Error Rate''' pred_error = tf.not_equal(input_msg, tf.argmax(msg, 1)) bber = tf.reduce_mean(tf.cast(pred_error, tf.float32)) return bber # + id="l75z0TB6jKOV" colab_type="code" colab={} def random_sample(batch_size=32): msg = np.random.randint(M, size=batch_size) return msg # + id="9yqE4rYojNw_" colab_type="code" colab={} def test_encoding(M=16, n=1): inp = np.arange(0,M) coding = encoder.predict(inp) fig = plt.figure(figsize=(4,4)) plt.plot(coding[:,0], coding[:, 1], "b.") plt.xlabel("$x_1$", fontsize=18) plt.ylabel("$x_2$", fontsize=18, rotation=0) plt.grid(True) plt.gca().set_ylim(-2, 2) plt.gca().set_xlim(-2, 2) plt.show() def test_noisy_codeword(data): rcvd_word = data[1:2000] fig = plt.figure(figsize=(4,4)) plt.plot(rcvd_word[:,0], rcvd_word[:, 1], "b.") plt.xlabel("$x_1$", fontsize=18) plt.ylabel("$x_2$", fontsize=18, rotation=0) plt.grid(True) plt.gca().set_ylim(-2, 2) plt.gca().set_xlim(-2, 2) plt.show() # + id="YMwTXx4qjQgY" colab_type="code" colab={} loss_fn = keras.losses.SparseCategoricalCrossentropy() mean_loss = keras.metrics.Mean() # + id="afmRKoJ8jTGx" colab_type="code" colab={} def plot_loss(step, epoch, mean_loss, X_batch, y_pred, plot_encoding): template = 'Iteration: {}, Epoch: {}, Loss: {:.5f}, Batch_BER: {:.5f}' if step % 10 == 0: print(template.format(step, epoch, mean_loss.result(), B_Ber_m(X_batch, y_pred))) if plot_encoding: test_encoding() # + id="MUxXl-Hy4sCQ" colab_type="code" colab={} def plot_batch_loss(epoch, mean_loss, X_batch, y_pred): template_outer_loop = 'Interim result for Epoch: {}, Loss: {:.5f}, Batch_BER: {:.5f}' print(template_outer_loop.format(epoch, mean_loss.result(), B_Ber_m(X_batch, y_pred))) # + id="2SI75bbcJ0bg" colab_type="code" colab={} def split_train_data(x_sample, y_sample): x_shaped = tf.reshape(x_sample, shape=[-1,2*n]) y_shaped = tf.reshape(y_sample, shape=[-1,2*n]) x_sample1, x_sample2 = tf.split(x_shaped, num_or_size_splits=2) y_sample1, y_sample2 = tf.split(y_shaped, num_or_size_splits=2) joint_sample = tf.concat([x_sample1, y_sample1], axis=1) marg_sample = tf.concat([x_sample2, y_sample1], axis=1) return joint_sample, marg_sample # + id="Tuf3BQ1dhl1F" colab_type="code" colab={} def train_mi(n_epochs=5, n_steps=20, batch_size=200): i = 0 for epoch in range(1, n_epochs + 1): print("Training in Epoch {}/{}".format(epoch, n_epochs)) for step in range(1, n_steps + 1): i +=1 losses =[] X_batch = random_sample(batch_size*2) loss = mi_train_step(X_batch) mi_avg = -mean_loss(loss) losses.append(loss) if i%20==0: MINE_losses.append(np.mean(losses)) print('Epoch: {}, Mi is {}'.format(epoch, mi_avg)) mean_loss.reset_states() @tf.function def mi_train_step(X_batch): with tf.GradientTape() as tape: x_enc = encoder(X_batch, training=True) y_recv = channel(x_enc) joint_marg_s = split_train_data(x_enc, y_recv) loss = -MI_mod(joint_marg_s) gradients = tape.gradient(loss, MI_mod.trainable_variables) optimizer_mi.apply_gradients(zip(gradients, MI_mod.trainable_variables)) return loss # + [markdown] id="zB73RkszjT9j" colab_type="text" # ## Training Function # + id="_sUbcMOpjUGr" colab_type="code" colab={} def train_decoder(n_epochs=5, n_steps=20, batch_size=200, plot_encoding=True): i = 0 for epoch in range(1, n_epochs + 1): print("Training Bob in Epoch {}/{}".format(epoch, n_epochs)) for step in range(1, n_steps + 1): i+=1 losses =[] X_batch = random_sample(batch_size) loss, y_pred = dec_train_step(X_batch) mean_loss(loss) plot_loss(step, epoch, mean_loss, X_batch, y_pred, plot_encoding) losses.append(loss) if i%20==0: Dec_losses.append(np.mean(losses)) plot_batch_loss(epoch, mean_loss, X_batch, y_pred) mean_loss.reset_states() @tf.function def dec_train_step(X_batch): with tf.GradientTape() as tape: y_pred = autoencoder(X_batch, training=True) loss = tf.reduce_mean(loss_fn(X_batch, y_pred)) gradients = tape.gradient(loss, decoder.trainable_variables) optimizer_ae.apply_gradients(zip(gradients, decoder.trainable_variables)) return loss, y_pred # + id="gofvRVrTk_9L" colab_type="code" colab={} def train_encoder(n_epochs=5, n_steps=20, batch_size=200): i=0 for epoch in range(1, n_epochs + 1): print("Training Bob in Epoch {}/{}".format(epoch, n_epochs)) for step in range(1, n_steps + 1): i+=1 losses =[] X_batch = random_sample(batch_size) loss = enc_train_step1(X_batch,optimizer_ae) mi_avg = -mean_loss(loss) losses.append(loss) if i%20==0: Enc_losses.append(np.mean(losses)) X_batch = random_sample(batch_size) enc_train_step2(X_batch) print('Epoch: {}, Mi is {}'.format(epoch, mi_avg)) # test_encoding(M, 1) @tf.function def enc_train_step1(X_batch,optimizer_ae): with tf.GradientTape() as tape: x_enc = encoder(X_batch, training=True) y_recv = channel(x_enc) joint_marg_s = split_train_data(x_enc, y_recv) loss = -MI_mod(joint_marg_s) gradients = tape.gradient(loss, encoder.trainable_variables) optimizer_ae.apply_gradients(zip(gradients, encoder.trainable_variables)) return loss @tf.function def enc_train_step2(X_batch): with tf.GradientTape() as tape: x_enc = encoder(X_batch, training=True) y_recv = channel(x_enc) joint_marg_s = split_train_data(x_enc, y_recv) loss = -MI_mod(joint_marg_s) gradients = tape.gradient(loss, MI_mod.trainable_variables) optimizer_mi.apply_gradients(zip(gradients, MI_mod.trainable_variables)) # + colab_type="code" id="eHGEqRx5Q7ML" colab={} def train_encoder2(n_epochs=5, n_steps=20, batch_size=200): i=0 for epoch in range(1, n_epochs + 1): print("Training Bob in Epoch {}/{}".format(epoch, n_epochs)) for step in range(1, n_steps + 1): losses =[] i +=1 X_batch = random_sample(batch_size) loss = enc_train_step12(X_batch) mi_avg = -mean_loss(loss) losses.append(loss) if i%20 == 0: Enc_losses.append(np.mean(losses)) X_batch = random_sample(batch_size) enc_train_step2(X_batch) print('Epoch: {}, Mi is {}'.format(epoch, mi_avg)) # test_encoding(M, 1) @tf.function def enc_train_step12(X_batch): with tf.GradientTape() as tape: x_enc = encoder(X_batch, training=True) y_recv = channel(x_enc) joint_marg_s = split_train_data(x_enc, y_recv) loss = -MI_mod(joint_marg_s) gradients = tape.gradient(loss, encoder.trainable_variables) optimizer_ae2.apply_gradients(zip(gradients, encoder.trainable_variables)) return loss # + id="cfFUT6DwlmvJ" colab_type="code" colab={} # test msg sequence for normal encoding N_test = 500000 test_msg = np.random.randint(M, size=N_test) # + id="xVXMlZzTlz8i" colab_type="code" colab={} def Test_AE(data): '''Calculate Bit Error for varying SNRs''' snr_range = np.linspace(0, 15, 31) bber_vec = [None] * len(snr_range) for db in range(len(snr_range)): noise_std = EbNo_to_noise(snr_range[db]) code_word = encoder(data) rcvd_word = code_word + tf.random.normal(tf.shape(code_word), mean=0.0, stddev=noise_std) dcoded_msg = decoder(rcvd_word) bber_vec[db] = B_Ber_m(data, dcoded_msg) if (db % 6 == 0) & (db > 0): print(f'Progress: {db} of {30} parts') return (snr_range, bber_vec) # + id="vFKf3kxv6h1l" colab_type="code" colab={} time_to_train_MI = 0 time_to_train_enc = 0 time_to_train_dec = 0 samples_for_training = 0 # + id="c36sWVydq0g6" colab_type="code" colab={} MINE_losses = [] Enc_losses = [] Dec_losses = [] # + id="wZQLuwHHl1Gj" colab_type="code" outputId="492687dc-7bb3-43d6-b9f0-d390fdf389e1" colab={"base_uri": "https://localhost:8080/", "height": 1000} # %%time start_mi = time.time() optimizer_mi = keras.optimizers.Nadam(lr=0.005) train_mi(n_epochs=1, n_steps=500, batch_size=128) samples_for_training += 2*(1*128*500) #could be smaller if used the same data for the n_steps time_to_train_MI += time.time()-start_mi start_enc1 = time.time() optimizer_ae = keras.optimizers.Nadam(lr=0.005) train_encoder(n_epochs=5, n_steps=400, batch_size=500) samples_for_training += (5*500) + (5*500*400) time_to_train_enc += time.time() - start_enc1 test_encoding(M, 1) start_enc2 = time.time() optimizer_ae2 = keras.optimizers.Nadam(lr=0.0005) train_encoder(n_epochs=5, n_steps=400, batch_size=500) samples_for_training += (5*500) + (5*500*400) time_to_train_enc += time.time() - start_enc2 test_encoding(M, 1) start_dec = time.time() optimizer_ae = keras.optimizers.Nadam(lr=0.005) train_decoder(n_epochs=5, n_steps=400, batch_size=500, plot_encoding=False) samples_for_training += 5*400*500 time_to_train_dec += time.time() - start_dec # + id="N9dYDWne8UQN" colab_type="code" outputId="778af068-0fa3-4e50-cb0d-69a99a345073" colab={"base_uri": "https://localhost:8080/", "height": 106} print('MI time',time_to_train_MI) print('Enc time',time_to_train_enc) print('Dec time',time_to_train_dec) print('time for training',(time_to_train_dec +time_to_train_enc+ time_to_train_MI)) print('Samples used for training', samples_for_training ) # + id="z869ARDCr4wg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 395} outputId="508e6fb6-c624-49f6-cdf2-ef929f7f45aa" fig = plt.figure(figsize=(10, 6)) #plt.plot(Enc_losses,'^-') #plt.plot(Dec_losses,'*-') iterations = np.linspace(0,(len(MINE_losses)*20),len(MINE_losses)) plt.plot(iterations, MINE_losses ,'^-') plt.ylabel("Losses", fontsize=14, rotation=90) plt.xlabel("Iterations", fontsize=14) plt.legend(['MINE training loss'], prop={'size': 14}, loc='upper right'); plt.grid(True, which="both") # + colab_type="code" outputId="a3edf718-896b-4a2e-8853-e4b151124528" id="8XsoDMdaw1vo" colab={"base_uri": "https://localhost:8080/", "height": 395} fig = plt.figure(figsize=(10, 6)) #plt.plot(MINE_losses,'o-') iterations = np.linspace(0,(len(Enc_losses)*20),len(Enc_losses)) plt.plot(iterations, Enc_losses ,'^-') plt.ylabel("Losses", fontsize=14, rotation=90) plt.xlabel("Iterations", fontsize=14) plt.legend(['Enc training loss'], prop={'size': 14}, loc='upper right'); plt.grid(True, which="both") # + colab_type="code" outputId="c10f2efa-c3fc-4d70-88a2-837cf571a0a5" id="Cj0MS69Dw2NJ" colab={"base_uri": "https://localhost:8080/", "height": 395} fig = plt.figure(figsize=(10, 6)) #plt.plot(MINE_losses,'o-') #plt.plot(Enc_losses,'^-') iterations = np.linspace(0,2000,100) plt.plot(iterations,Dec_losses,'*-') plt.ylabel("Losses", fontsize=14, rotation=90) plt.xlabel("Iterations", fontsize=14) plt.legend(['Dec training loss'], prop={'size': 14}, loc='upper right'); plt.grid(True, which="both") # + id="z-ELSw_ZnCXT" colab_type="code" outputId="b7b9614c-b7ac-4104-fe0d-4e512938ee97" colab={"base_uri": "https://localhost:8080/", "height": 106} bber_data = Test_AE(test_msg) # + id="eN-jOLPqTvrz" colab_type="code" outputId="015c6ead-0461-4d9e-dab2-c53b2c46dcfa" colab={"base_uri": "https://localhost:8080/", "height": 406} # Approximate 16 QAM Error def SIXT_QAM_sim(ebno): return (3.0/2)*special.erfc(np.sqrt((4.0/10)*10.**(ebno/10))) ebnodbs = np.linspace(0,15,16) fig = plt.figure(figsize=(10, 6)) plt.semilogy(bber_data[0], bber_data[1], 'o-') plt.semilogy(ebnodbs, SIXT_QAM_sim(ebnodbs), '^-'); plt.gca().set_ylim(1e-5, 1) plt.gca().set_xlim(0, 15) plt.ylabel("Batch Symbol Error Rate", fontsize=14, rotation=90) plt.xlabel("SNR [dB]", fontsize=18) plt.legend(['AE with MINE', '16QAM'], prop={'size': 14}, loc='upper right'); plt.grid(True, which="both") # + id="7Ai0b10W8XJl" colab_type="code" colab={}
Communication_Autoencoder_with_MI_channel_estimation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python # language: python # name: conda-env-python-py # --- # + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} # <a href="https://cognitiveclass.ai"><img src = "https://ibm.box.com/shared/static/9gegpsmnsoo25ikkbl4qzlvlyjbgxs5x.png" width = 400> </a> # # <h1 align=center><font size = 5>From Requirements to Collection</font></h1> # + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} # ## Introduction # # In this lab, we will continue learning about the data science methodology, and focus on the **Data Requirements** and the **Data Collection** stages. # + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} # ## Table of Contents # # <div class="alert alert-block alert-info" style="margin-top: 20px"> # 1. [Data Requirements](#0)<br> # 2. [Data Collection](#2)<br> # </div> # <hr> # + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} # # Data Requirements <a id="0"></a> # + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} # # + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} # <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DS0103EN/labs/images/lab2_fig1_flowchart_data_requirements.png" width=500> # + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} # In the videos, we learned that the chosen analytic approach determines the data requirements. Specifically, the analytic methods to be used require certain data content, formats and representations, guided by domain knowledge. # + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} # In the **From Problem to Approach Lab**, we determined that automating the process of determining the cuisine of a given recipe or dish is potentially possible using the ingredients of the recipe or the dish. In order to build a model, we need extensive data of different cuisines and recipes. # + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} # Identifying the required data fulfills the data requirements stage of the data science methodology. # # ----------- # + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} # # Data Collection <a id="2"></a> # + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} # <img src = "https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DS0103EN/labs/images/lab2_fig2_flowchart_data_collection.png" width=500> # + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} # In the initial data collection stage, data scientists identify and gather the available data resources. These can be in the form of structured, unstructured, and even semi-structured data relevant to the problem domain. # + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} # #### Web Scraping of Online Food Recipes # # A researcher named <NAME> scraped tens of thousands of food recipes (cuisines and ingredients) from three different websites, namely: # + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} # <img src = "https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DS0103EN/labs/images/lab2_fig3_allrecipes.png" width=500> # # www.allrecipes.com # # <img src = "https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DS0103EN/labs/images/lab2_fig4_epicurious.png" width=500> # # www.epicurious.com # # <img src = "https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DS0103EN/labs/images/lab2_fig5_menupan.png" width=500> # # www.menupan.com # + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} # For more information on <NAME> and his research, you can read his paper on [Flavor Network and the Principles of Food Pairing](http://yongyeol.com/papers/ahn-flavornet-2011.pdf). # + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} # Luckily, we will not need to carry out any data collection as the data that we need to meet the goal defined in the business understanding stage is readily available. # + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} # #### We have already acquired the data and placed it on an IBM server. Let's download the data and take a look at it. # + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} # <strong>Important note:</strong> Please note that you are not expected to know how to program in Python. The following code is meant to illustrate the stage of data collection, so it is totally fine if you do not understand the individual lines of code. We have a full course on programming in Python, <a href="http://cocl.us/PY0101EN_DS0103EN_LAB2_PYTHON_Coursera"><strong>Python for Data Science</strong></a>, which is also offered on Coursera. So make sure to complete the Python course if you are interested in learning how to program in Python. # + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} # ### Using this notebook: # # To run any of the following cells of code, you can type **Shift + Enter** to excute the code in a cell. # + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} # Get the version of Python installed. # + button=false deletable=true jupyter={"outputs_hidden": true} new_sheet=false run_control={"read_only": false} # check Python version # !python -V # + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} # Read the data from the IBM server into a *pandas* dataframe. # + button=false deletable=true jupyter={"outputs_hidden": true} new_sheet=false run_control={"read_only": false} import pandas as pd # download library to read data into dataframe pd.set_option('display.max_columns', None) recipes = pd.read_csv("https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DS0103EN/labs/data/recipes.csv") print("Data read into dataframe!") # takes about 30 seconds # + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} # Show the first few rows. # + button=false deletable=true jupyter={"outputs_hidden": true} new_sheet=false run_control={"read_only": false} recipes.head() # + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} # Get the dimensions of the dataframe. # + button=false deletable=true jupyter={"outputs_hidden": true} new_sheet=false run_control={"read_only": false} recipes.shape # + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} # So our dataset consists of 57,691 recipes. Each row represents a recipe, and for each recipe, the corresponding cuisine is documented as well as whether 384 ingredients exist in the recipe or not beginning with almond and ending with zucchini. # # ----------- # + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} # Now that the data collection stage is complete, data scientists typically use descriptive statistics and visualization techniques to better understand the data and get acquainted with it. Data scientists, essentially, explore the data to: # # * understand its content, # * assess its quality, # * discover any interesting preliminary insights, and, # * determine whether additional data is necessary to fill any gaps in the data. # + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} # ### Thank you for completing this lab! # # This notebook was created by [<NAME>](https://www.linkedin.com/in/aklson/). I hope you found this lab session interesting. Feel free to contact me if you have any questions! # + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} # This notebook is part of a course on **Coursera** called *Data Science Methodology*. If you accessed this notebook outside the course, you can take this course, online by clicking [here](http://cocl.us/DS0103EN_Coursera_LAB2). # + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} # <hr> # # Copyright &copy; 2019 [Cognitive Class](https://cognitiveclass.ai/?utm_source=bducopyrightlink&utm_medium=dswb&utm_campaign=bdu). This notebook and its source code are released under the terms of the [MIT License](https://bigdatauniversity.com/mit-license/).
DS_Methodology/DS0103EN-2-2-1-From-Requirements-to-Collection-v2.0.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="GvJbBW_oDOwC" # # Week 4: Handling Complex Images - Happy or Sad Dataset # # In this assignment you will be using the happy or sad dataset, which contains 80 images of emoji-like faces, 40 happy and 40 sad. # # Create a convolutional neural network that trains to 100% accuracy on these images, which cancels training upon hitting training accuracy of >.999 # + colab={"base_uri": "https://localhost:8080/"} id="3NFuMFYXtwsT" outputId="723d6bc3-c7cd-491b-d6f8-49a2e404a0a2" import matplotlib.pyplot as plt import tensorflow as tf import numpy as np import os # - # Begin by taking a look at some images of the dataset: # + colab={"base_uri": "https://localhost:8080/", "height": 369} id="uaWTfp5Ox9E-" outputId="1a4b4b15-9a5f-4fd3-8c56-b32d47ae0893" from tensorflow.keras.preprocessing.image import load_img happy_dir = "./data/happy/" sad_dir = "./data/sad/" print("Sample happy image:") plt.imshow(load_img(f"{os.path.join(happy_dir, os.listdir(happy_dir)[0])}")) plt.show() print("\nSample sad image:") plt.imshow(load_img(f"{os.path.join(sad_dir, os.listdir(sad_dir)[0])}")) plt.show() # - # It is cool to be able to see examples of the images to better understand the problem-space you are dealing with. # # However there is still some relevant information that is missing such as the resolution of the image (although matplotlib renders the images in a grid providing a good idea of these values) and the maximum pixel value (this is important for normalizing these values). For this you can use Keras as shown in the next cell: # + from tensorflow.keras.preprocessing.image import img_to_array # Load the first example of a happy face sample_image = load_img(f"{os.path.join(happy_dir, os.listdir(happy_dir)[0])}") # Convert the image into its numpy array representation sample_array = img_to_array(sample_image) print(f"Each image has shape: {sample_array.shape}") print(f"The maximum pixel value used is: {np.max(sample_array)}") # - # Looks like the images have a resolution of 150x150. **This is very important because this will be the input size of the first layer in your network.** # # **The last dimension refers to each one of the 3 RGB channels that are used to represent colored images.** # Since you already have coded the callback responsible for stopping training (once a desired level of accuracy is reached) in the previous two assignments this time it is already provided so you can focus on the other steps: # + id="X0UOFLauzIW4" class myCallback(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs={}): if logs.get('accuracy') is not None and logs.get('accuracy') > 0.999: print("\nReached 99.9% accuracy so cancelling training!") self.model.stop_training = True # - # A quick note on callbacks: # # So far you have used only the `on_epoch_end` callback but there are many more. For example you might want to check out the [EarlyStopping](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/EarlyStopping) callback, which allows you to save the best weights for your model. # Keras provides great support for preprocessing image data. A lot can be accomplished by using the `ImageDataGenerator` class. Be sure to check out the [docs](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/image/ImageDataGenerator) if you get stuck in the next exercise. In particular you might want to pay attention to the `rescale` argument when instantiating the `ImageDataGenerator` and to the [`flow_from_directory`](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/image/ImageDataGenerator#flow_from_directory) method. # + cellView="code" id="rrGO8ObGzqht" from tensorflow.keras.preprocessing.image import ImageDataGenerator # GRADED FUNCTION: image_generator def image_generator(): ### START CODE HERE # Instantiate the ImageDataGenerator class. # Remember to set the rescale argument. train_datagen = ImageDataGenerator(rescale=1/255) # Specify the method to load images from a directory and pass in the appropriate arguments: # - directory: should be a relative path to the directory containing the data # - targe_size: set this equal to the resolution of each image (excluding the color dimension) # - batch_size: number of images the generator yields when asked for a next batch. Set this to 10. # - class_mode: How the labels are represented. Should be one of "binary", "categorical" or "sparse". # Pick the one that better suits here given that the labels are going to be 1D binary labels. train_generator = train_datagen.flow_from_directory(directory='./data/', target_size=(150, 150), batch_size=10, class_mode="binary") ### END CODE HERE return train_generator # + colab={"base_uri": "https://localhost:8080/"} id="L9uxJFQb1nOx" outputId="0c6ce535-7764-4bc0-a4a4-e6289a360b04" # Save your generator in a variable gen = image_generator() # Expected output: 'Found 80 images belonging to 2 classes' # - # **Expected Output:** # ``` # Found 80 images belonging to 2 classes. # ``` # + id="eUcNTpra1FK0" from tensorflow.keras import optimizers, losses # GRADED FUNCTION: train_happy_sad_model def train_happy_sad_model(train_generator): # Instantiate the callback callbacks = myCallback() ### START CODE HERE # Define the model, you can toy around with the architecture. # Some helpful tips in case you are stuck: # - A good first layer would be a Conv2D layer with an input shape that matches # that of every image in the training set (including the color dimension) # - The model will work best with 3 convolutional layers # - There should be a Flatten layer in between convolutional and dense layers # - The final layer should be a Dense layer with the number of units # and activation function that supports binary classification. model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(16, (3,3), activation='relu', input_shape=(150, 150, 3)), tf.keras.layers.MaxPooling2D(2, 2), # The second convolution tf.keras.layers.Conv2D(32, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), # The third convolution tf.keras.layers.Conv2D(64, (3,3), activation='relu'), tf.keras.layers.MaxPooling2D(2,2), # Flatten tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation = 'relu'), tf.keras.layers.Dense(1, activation = 'sigmoid') ]) # Compile the model # Select a loss function compatible with the last layer of your network model.compile(loss=losses.BinaryCrossentropy(), optimizer=optimizers.Adam(), metrics=['accuracy']) # Train the model # Your model should achieve the desired accuracy in less than 15 epochs. # You can hardcode up to 20 epochs in the function below but the callback should trigger before 15. history = model.fit(x=train_generator, epochs=15, callbacks=[callbacks] ) ### END CODE HERE return history # + colab={"base_uri": "https://localhost:8080/"} id="sSaPPUe_z_OU" outputId="b6e6306a-8b28-463b-e1a0-8bdeb9116f26" hist = train_happy_sad_model(gen) # - # If you see the message that was defined in the callback printed out after less than 15 epochs it means your callback worked as expected and training was successful. You can also double check by running the following cell: # + id="0imravDn0Ajz" print(f"Your model reached the desired accuracy after {len(hist.epoch)} epochs") # - # **Congratulations on finishing the last assignment of this course!** # # You have successfully implemented a CNN to assist you in the classification task for complex images. Nice job! # # **Keep it up!**
C1/W4/assignment/C1W4_Assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # **AdaBoost** # Minimal medium | Median molecules log transformed # ## Importing libraries import numpy as np import pandas as pd import os from joblib import dump from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import train_test_split from sklearn.ensemble import AdaBoostRegressor from math import sqrt from sklearn.metrics import explained_variance_score from sklearn.metrics import max_error from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_squared_log_error from sklearn.metrics import median_absolute_error from sklearn.metrics import r2_score from scipy import stats import seaborn as sns import matplotlib.pyplot as plt # ## Data loading and transformation # + data = pd.read_csv("./MIN_trainingdata.csv", sep='\t') col = [] for column in data.columns: col.append(column) target_col = col[2] features = col[3:len(col)] #scaler_x = MinMaxScaler(feature_range=(0,1)) #scaler_y = MinMaxScaler(feature_range=(0,1)) X = data[features].values y = data[target_col].values y = np.log1p(y) y = np.reshape(y, (-1,1)) #X = scaler_x.fit_transform(X) #y = scaler_y.fit_transform(y) # - # ## Model configuration and training # + from tpot.builtins import StackingEstimator from xgboost import XGBRegressor from sklearn.ensemble import ExtraTreesRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import ElasticNetCV from sklearn.linear_model import RidgeCV from sklearn.linear_model import LassoLarsCV from sklearn.linear_model import SGDRegressor from sklearn.svm import LinearSVR from sklearn.pipeline import make_pipeline, make_union from sklearn.preprocessing import MaxAbsScaler, RobustScaler, Normalizer base = make_pipeline( StackingEstimator(estimator=LassoLarsCV(normalize=True)), StackingEstimator(estimator=LinearSVR(C=0.01, dual=True, epsilon=0.001, loss="epsilon_insensitive", tol=0.1)), MaxAbsScaler(), StackingEstimator(estimator=RidgeCV()), Normalizer(norm="l2"), StackingEstimator(estimator=LinearSVR(C=0.5, dual=False, epsilon=0.1, loss="squared_epsilon_insensitive", tol=0.1)), StackingEstimator(estimator=ExtraTreesRegressor(bootstrap=False, max_features=0.4, min_samples_leaf=2, min_samples_split=4, n_estimators=100)), MinMaxScaler(), StackingEstimator(estimator=RidgeCV()), StackingEstimator(estimator=LinearSVR(C=5.0, dual=True, epsilon=0.1, loss="epsilon_insensitive", tol=0.0001)), StackingEstimator(estimator=RidgeCV()), StackingEstimator(estimator=SGDRegressor()), RobustScaler(), StackingEstimator(estimator=LinearSVR(C=15.0, dual=True, epsilon=0.01, loss="epsilon_insensitive", tol=0.1)), StackingEstimator(estimator=ElasticNetCV(l1_ratio=0.75, tol=0.001)), StackingEstimator(estimator=XGBRegressor(learning_rate=0.1, max_depth=1, min_child_weight=6, n_estimators=100, nthread=1, objective="reg:squarederror", subsample=0.6500000000000001)), MinMaxScaler(), StackingEstimator(estimator=ExtraTreesRegressor(bootstrap=False, max_features=0.2, min_samples_leaf=2, min_samples_split=4, n_estimators=100)), StackingEstimator(estimator=LinearSVR(C=5.0, dual=True, epsilon=0.1, loss="epsilon_insensitive", tol=0.0001)), MaxAbsScaler(), RandomForestRegressor(bootstrap=False, max_features=0.05, min_samples_leaf=1, min_samples_split=4, n_estimators=100) ) parameters = {'test_size': 0.25, 'base_estimator': base, 'n_estimators': 100, #default = 50 'learning_rate': 0.3, #default = 1.0 'loss': 'linear', 'random_state': 9 #default = None } X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=parameters['test_size'], random_state=9) model = AdaBoostRegressor(base_estimator=parameters['base_estimator'], n_estimators=parameters['n_estimators'], learning_rate=parameters['learning_rate'], loss=parameters['loss'], random_state=parameters['random_state']) # - model.fit(X_train, y_train.ravel()) # ## Model evaluation # + #y_rescaled = scaler_y.inverse_transform(y_valid) y_rescaled = y_valid predict_valid = model.predict(X_valid) predict_valid = np.reshape(predict_valid, (-1,1)) #predict_valid = scaler_y.inverse_transform(predict_valid) baseline_preds = y_rescaled[:,target_col.index("Median molecules per cell")] baseline_errors = abs(baseline_preds - y_rescaled) errors = abs(predict_valid - y_rescaled) mape = 100 * (errors / y_rescaled) accuracy = 100 - np.mean(mape) # + print("Average baseline error: ", round(np.mean(baseline_errors),2)) print("Mean absolute error: ", round(np.mean(errors),2)) print("Accuracy: ", round(accuracy, 2), "%", "\n") print("Explained variance regression score: ", explained_variance_score(y_rescaled, predict_valid)) print("R2 score: ", r2_score(y_rescaled, predict_valid), "\n") print("Maximum residual error: ", max_error(y_rescaled, predict_valid)) print("Median absolute error: ", median_absolute_error(y_rescaled, predict_valid)) print("Mean absolute error: ", mean_absolute_error(y_rescaled, predict_valid)) print("Mean squared error: ", mean_squared_error(y_rescaled, predict_valid)) print("Root mean squared error:", sqrt(mean_squared_error(y_rescaled, predict_valid))) print("Mean squared logarithmic error: ", mean_squared_log_error(y_rescaled, predict_valid)) # - # ## Correlation between experimental data and predicted values # + pearson = stats.pearsonr(y_rescaled.ravel(), predict_valid.ravel()) spearman = stats.spearmanr(y_rescaled.ravel(), predict_valid.ravel()) print('Pearson\'s r:', pearson[0], 'p-value:', pearson[1]) print('Spearman\'s r:', spearman[0], 'p-value:', spearman[1], '\n') # + plot_data = pd.DataFrame() plot_data['Known abundance'] = y_rescaled.ravel() plot_data['Predicted abundance'] = predict_valid.ravel() sns.regplot(x='Known abundance', y='Predicted abundance', data=plot_data) # - # ## Predicted values # + predict_valid = np.expm1(predict_valid) y_rescaled = np.expm1(y_rescaled) fmt = '%-8s%-20s%s' print(fmt % ('', 'Eval data', 'Prediction')) for i, (eval_row, pred_row) in enumerate(zip(y_rescaled, predict_valid)): print(fmt % (i, eval_row, pred_row)) # - # ## Model testing with ecYeast7 # + yeast7 = pd.read_csv("./testingdata.csv", sep='\t') col_test = [] for column in yeast7.columns: col_test.append(column) test_known = col_test[2] test_features = col_test[3:len(col)] #scaler_x_test = MinMaxScaler(feature_range=(0,1)) #scaler_y_test = MinMaxScaler(feature_range=(0,1)) X_test = yeast7[test_features].values #X_test = yeast7[selected].values y_test = yeast7[test_known].values y_test = np.log1p(y_test) y_test = np.reshape(y_test, (-1,1)) #X_test = scaler_x_test.fit_transform(X_test) #y_test = scaler_y_test.fit_transform(y_test) # + #test_rescaled = scaler_y_test.inverse_transform(y_test) test_rescaled = y_test predict_yeast7 = model.predict(X_test) predict_yeast7 = np.reshape(predict_yeast7, (-1,1)) #predict_yeast7 = scaler_y.inverse_transform(predict_yeast7) # + baseline_preds_test = test_rescaled[:,test_known.index("Median molecules per cell")] baseline_errors_test = abs(baseline_preds_test - test_rescaled) errors_test = abs(predict_yeast7 - test_rescaled) mape_test = 100 * (errors_test / test_rescaled) accuracy_test = 100 - np.mean(mape_test) print("Average baseline error: ", round(np.mean(baseline_errors_test),2)) print("Mean absolute error: ", round(np.mean(errors_test),2)) print("Accuracy: ", round(accuracy_test, 2), "%", "\n") print("Explained variance regression score: ", explained_variance_score(test_rescaled, predict_yeast7)) print("R2 score: ", r2_score(test_rescaled, predict_yeast7), '\n') print("Maximum residual error: ", max_error(test_rescaled, predict_yeast7)) print("Median absolute error: ", median_absolute_error(test_rescaled, predict_yeast7)) print("Mean absolute error: ", mean_absolute_error(test_rescaled, predict_yeast7)) print("Mean squared error: ", mean_squared_error(test_rescaled, predict_yeast7)) print("Root mean squared error:", sqrt(mean_squared_error(test_rescaled, predict_yeast7))) print("Mean squared logarithmic error: ", mean_squared_log_error(test_rescaled, predict_yeast7)) # + pearson = stats.pearsonr(y_rescaled.ravel(), predict_valid.ravel()) spearman = stats.spearmanr(y_rescaled.ravel(), predict_valid.ravel()) print('Pearson\'s r:', pearson[0], 'p-value:', pearson[1]) print('Spearman\'s r:', spearman[0], 'p-value:', spearman[1]) # + plot_data = pd.DataFrame() plot_data['Known abundance'] = test_rescaled.ravel() plot_data['Predicted abundance'] = predict_yeast7.ravel() sns.regplot(x='Known abundance', y='Predicted abundance', data=plot_data) # + predict_yeast7 = np.expm1(predict_yeast7) test_rescaled = np.expm1(test_rescaled) fmt = '%-8s%-20s%s' print(fmt % ('', 'Known abundance', 'Prediction')) for i, (eval_row, pred_row) in enumerate(zip(yeast7['Median molecules per cell'], predict_yeast7)): print(fmt % (i, eval_row, pred_row)) # - # ## ecYeast8 protein prediction # + ecyeast8 = pd.read_csv("./MIN_predictiondata.csv", sep='\t') ecy8_col_test = [] for column in ecyeast8.columns: ecy8_col_test.append(column) ecy8_pred_unknown = ecy8_col_test[2] ecy8_pred_features = ecy8_col_test[3:len(col)] X_pred = ecyeast8[ecy8_pred_features].values y_pred = ecyeast8[ecy8_pred_unknown].values y_pred = np.log1p(y_pred) y_pred = np.reshape(y_pred, (-1,1)) # - predict_ecyeast8 = model.predict(X_pred) predict_ecyeast8 = np.reshape(predict_ecyeast8, (-1,1)) # + predict_ecyeast8 = np.expm1(predict_ecyeast8) fmt = '%-8s%-20s' print(fmt % ('', 'Prediction')) for i, pred_row in enumerate(predict_ecyeast8): print(fmt % (i, pred_row)) # - prot_list = predict_ecyeast8.tolist() output = open("pred_ecYeast8_MIN.txt", "w") for prot in prot_list: output.write(str(prot)+'\n') output.close()
1.ProteinAbundance/MIN_sklearn_adaboost.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # AV Against FE012 # + import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import lightgbm as lgb from sklearn.model_selection import KFold from sklearn import model_selection, preprocessing, metrics # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory import shap import os #print(os.listdir("../input")) from sklearn import preprocessing #import xgboost as xgb import gc import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.simplefilter(action='ignore', category=FutureWarning) from tqdm import tqdm # Any results you write to the current directory are saved as output. # - train = pd.read_parquet('../../data/train_FE013.parquet') test = pd.read_parquet('../../data/test_FE013.parquet') FEATURES = ['V85', 'bank_type_TransactionAmt_mean', 'D5_fq_enc', 'V12', 'V81', 'V282', 'bank_type_D7_std', 'id_15', 'V13', 'C12_fq_enc', 'anomaly', 'D7_DT_D_std_score', 'D3_DT_D_min_max', 'card4_count_full', 'D14_DT_D_min_max', 'card1_count_full', 'V169', 'D3_DT_M_min_max', 'V279', 'V91', 'bank_type_D10_std', 'D14', 'D6_DT_M_std_score', 'D4_DT_W_min_max', 'V152', 'V56', #'D3_intercept_bin0', 'D14_intercept_bin0', 'V220', 'V277', 'D12_intercept', 'ProductCD_W_00cents', 'D13_intercept_bin0', 'V291', 'V189', 'D15_DT_M_min_max', 'C5_fq_enc', 'D3_fq_enc', 'card5_fq_enc', 'addr1_count_full', 'V266', 'D11_intercept_bin2', 'V23', 'D4_intercept_bin3', 'bank_type_D10_mean', 'D2_intercept_bin3', 'V306', 'DeviceType', 'V285', 'D5_DT_W_std_score', 'V131', 'V37', 'V296', 'bank_type_D1_mean', 'V75', 'D3_DT_W_std_score', 'D10_DT_M_min_max', 'id_33_0', 'V67', 'D4_intercept_bin4', 'V256', 'V143', 'uid5_D6_std', 'ProductCD_target_mean', 'mxC3', 'V129', 'D13_DT_M_std_score', 'V24', 'D3_DT_M_std_score', 'mxC4', 'D9', 'id_30_version_fq_enc', 'D5_DT_D_std_score', 'D11_DT_M_std_score', 'uid5_D6_mean', 'D14_DT_M_std_score', 'card5_TransactionAmt_std', 'V20', 'C8_fq_enc', 'V70', 'V127', 'D6_intercept', '# ', 'sum_Cxx_binary_higher_than_q95', 'V156', 'uid4_D12_mean', 'C5', 'uid4_D12_std', 'id_30_fq_enc', 'V61', 'id_33', 'D15_to_std_addr1', 'bank_type_D9_mean', #'D5_intercept', 'D10_DT_W_min_max', 'V130', 'bank_type_D9_std', 'uid5_D7_std', 'bank_type_D14_mean', 'bank_type_D3_std', 'bank_type_D5_mean', 'ProductCD', 'M8', 'V44', 'D6_fq_enc', 'D15_DT_D_min_max', 'D11_intercept_bin0', 'V257', 'bank_type_D7_mean', 'V76', #'D15', 'V38', 'V55', 'V261', 'V149', #'D4', 'D8_intercept_bin0', 'M2', 'bank_type_D6_std', 'id_30_version', 'D4_intercept_bin1', 'D15_to_mean_card4', 'V82', 'D3_DT_D_std_score', 'D10_intercept_bin3', 'bank_type_D2_std', 'V77', 'M7', #'D11', 'D4_intercept_bin2', 'email_check', 'V294', 'V317', 'V308', 'id_33_fq_enc', 'bank_type_D5_std', 'D8_intercept', 'V62', 'V187', 'card5_TransactionAmt_mean', 'bank_type_D12_mean', #'id_33_count_dist', 'D2_intercept_bin2', 'C10', 'V86', 'D8_DT_M_min_max', 'D15_intercept_bin4', 'D6_DT_W_std_score', 'uid5_D7_mean', 'C9_fq_enc', 'mxC10', 'D14_DT_W_std_score', 'card2_count_full', 'V258', 'bank_type_D14_std', 'D10_intercept_bin4', 'V83', 'bank_type_D13_std', 'D8_DT_W_min_max', 'TransactionAmt', 'V312', 'D14_intercept', 'id_33_1', 'D15_intercept_bin2', 'D12_DT_W_std_score', 'V78', 'D8_D9_decimal_dist', 'M9', 'V281', 'bank_type_D12_std', 'V54', 'C9', 'M4_target_mean', 'sum_Cxx_binary_higher_than_q90', 'D10_DT_D_min_max', 'bank_type_D3_mean', 'bank_type_D8_mean', 'R_emaildomain_prefix', 'bank_type_D6_mean', 'V314', 'D11_DT_W_std_score', #'D10', 'D4_DT_D_min_max', 'V283', 'D10_intercept_bin2', 'D13_intercept', 'D8_DT_D_min_max', 'C2_fq_enc', 'V165', 'D1_intercept_bin4', 'bank_type_D13_mean', #'D3_intercept', 'TransactionAmt_2Dec', 'card3_div_Mean_D9_DOY', 'C12', 'D4_DT_M_std_score', 'D2_intercept_bin1', 'mxC8', 'D2_fq_enc', 'addr1_third_digit', 'D4_fq_enc', #'D1_fq_enc', 'mxC12', 'D8', 'D10_intercept_bin1', 'id_01', 'id_09', 'id_03', 'addr1_second_digit', 'D15_to_mean_addr1', 'sum_Cxx_binary_higher_than_q80', 'V53', 'TransactionAmt_decimal', 'card3_div_Mean_D6_DOY', 'D15_intercept_bin3', 'V45', 'id_02_to_std_card4', 'addr2_div_Mean_D10_DOY_productCD', 'DeviceInfo_version', 'DeviceInfo_device', 'D1_intercept_bin3', 'D11_intercept', 'DeviceInfo_version_fq_enc', 'C6', 'uid5_D13_std', 'TransactionAmt_DT_M_min_max', 'dist2', 'C8', 'D15_intercept_bin1', 'M3', 'R_emaildomain_fq_enc', 'DeviceInfo_device_fq_enc', 'D6_DT_D_std_score', 'sum_Cxx_binary_higher_than_q60', #'D11__DeviceInfo', 'TranAmt_div_Mean_D12_DOY_productCD', 'D10_DT_M_std_score', 'uid5_D13_mean', 'mxC5', #id_30', 'addr2_div_Mean_D4_DOY', 'uid2_D12_std', 'C11_fq_enc', 'id_06', 'uid2_D12_mean', 'sum_Cxx_binary_higher_than_q70', 'V310', 'V307', 'C6_fq_enc', 'D8_fq_enc', 'dist2_fq_enc', 'D2_intercept_bin0', 'addr1_div_Mean_D10_DOY_productCD', 'addr1_div_Mean_D10_DOY', 'addr1_div_Mean_D11_DOY', 'uid2_D8_std', 'id_02__id_20', 'V313', 'D4_intercept_bin0', 'D11_DT_D_std_score', 'Transaction_day_of_week', 'card6_div_Mean_D3_DOY', 'uid2_D1_std', 'uid5_D11_mean', 'uid_fq_enc', 'D14_DT_D_std_score', 'D12_DT_D_std_score', 'id_02_to_mean_card4', 'uid4_D13_std', 'D1_intercept_bin1', 'id_02_to_std_card1', 'uid5_D11_std', 'P_emaildomain_prefix', 'DT_day', #'D8_DT_M_std_score', 'uid2_D1_mean', 'TransactionAmt_to_mean_card4', 'card5_div_Mean_D11_DOY', 'D15_DT_M_std_score', 'V87', 'uid_D12_std', 'id_31_device_fq_enc', 'uid2_D11_mean', 'card3_DT_W_week_day_dist_best', 'uid5_D14_std', 'uid2_D15_mean', 'sum_Cxx_binary_higher_than_q50', 'id_13', 'card3_div_Mean_D11_DOY', 'C11', 'bank_type_DT_W_week_day_dist_best', 'card4_div_Mean_D11_DOY', 'addr1_div_Mean_D1_DOY', 'uid2_D4_mean', 'card2_div_Mean_D11_DOY', 'C13_fq_enc', 'uid4_D13_mean', 'card5_DT_W_week_day_dist_best', 'id_02', 'uid5_D14_mean', 'uid2_D10_mean', # 'id_01_count_dist', 'D13_DT_W_std_score', 'C2', 'C14', 'addr2_div_Mean_D10_DOY', 'uid2_D11_std', 'addr1_div_Mean_D1_DOY_productCD', 'id_02_to_mean_card1', 'dist1_fq_enc', 'card1_div_Mean_D11_DOY', 'D15_to_std_card1', 'TransactionAmt_DT_M_std_score', 'uid2_D6_std', 'TransactionAmt_to_std_card4', 'uid2_D15_std', 'uid3_D8_std', 'card6_div_Mean_D11_DOY', 'TranAmt_div_Mean_D14_DOY', 'card3_div_Mean_D14_DOY', #'D2', #'D1', 'uid_D15_mean', 'uid4_D6_std', 'uid_D15_std', 'D10_intercept_bin0', 'DeviceInfo_fq_enc', 'uid2_D13_std', 'uid_D12_mean', 'uid4_D6_mean', 'uid_D1_std', 'D1_intercept_bin2', 'uid_D10_mean', 'card2__id_20', 'uid4_D7_std', 'uid3_D13_std', 'C14_fq_enc', 'uid_D8_std', 'uid3_D13_mean', 'uid2_D4_std', 'addr1_div_Mean_D4_DOY', 'uid_D4_mean', 'D4_DT_W_std_score', 'addr2_div_Mean_D1_DOY_productCD', 'uid_D11_mean', 'D15_intercept_bin0', 'uid2_D10_std', 'uid_D13_std', 'uid2_fq_enc', 'uid2_D13_mean', 'uid2_D2_mean', 'D2_intercept', 'uid_D11_std', 'card2', 'uid4_D14_std', 'C_sum_after_clip75', 'R_emaildomain', 'dist1', 'id_05', 'uid_TransactionAmt_mean', 'uid_D1_mean', 'uid3_D1_std', 'uid5_D8_std', 'uid3_D6_std', 'Transaction_hour_of_day', 'uid4_D14_mean', 'uid5_D10_std', 'uid3_D10_std', 'uid5_D1_std', 'uid5_D15_std', 'uid2_D7_mean', 'uid3_D11_std', 'uid4_D8_std', 'D13_DT_D_std_score', 'uid3_D11_mean', 'uid2_D14_std', 'uid2_D7_std', 'uid2_D14_mean', 'uid_D13_mean', 'uid_D10_std', 'uid2_D3_std', 'uid_D6_std', 'uid3_D15_std', 'addr1_fq_enc', #id_31', 'uid_TransactionAmt_std', 'card1_div_Mean_D4_DOY_productCD', 'uid2_TransactionAmt_mean', 'C_sum_after_clip90', 'uid2_TransactionAmt_std', 'uid4_D7_mean', 'uid2_D6_mean', 'uid3_D15_mean', 'D15_to_mean_card1', 'uid5_D15_mean', 'M4', 'uid3_D7_std', 'card2_div_Mean_D4_DOY', 'card5_div_Mean_D4_DOY_productCD', 'card5_div_Mean_D4_DOY', 'D4_intercept', 'uid_D4_std', 'card6_div_Mean_D4_DOY_productCD', 'card5__P_emaildomain', 'card1_fq_enc', 'uid5_D10_mean', 'card1_div_Mean_D4_DOY', 'C1', 'M6', 'uid2_D2_std', 'P_emaildomain_fq_enc', 'card1_TransactionAmt_mean', 'uid3_D10_mean', 'TransactionAmt_DT_W_min_max', 'uid5_D4_std', 'card1_div_Mean_D10_DOY_productCD', 'uid3_D1_mean', 'card1_div_Mean_D10_DOY', 'uid_D14_mean', 'mxC9', 'TranAmt_div_Mean_D4_DOY_productCD', 'D15_DT_W_std_score', 'DeviceInfo__P_emaildomain', 'uid3_D14_mean', #'bank_type_DT_M', 'mxC11', 'uid5_D1_mean', 'uid_D2_mean', 'D10_DT_W_std_score', #'card3_DT_M_month_day_dist_best', 'uid3_D2_std', 'TranAmt_div_Mean_D4_DOY', 'card1_TransactionAmt_std', 'card3_div_Mean_D4_DOY_productCD', 'D1_intercept_bin0', 'uid3_D4_std', 'card2_div_Mean_D10_DOY', 'uid_D2_std', 'uid3_D14_std', 'uid3_D4_mean', 'uid_D7_mean', 'uid5_D2_std', 'card4_div_Mean_D4_DOY_productCD', 'card6_div_Mean_D4_DOY', 'TranAmt_div_Mean_D10_DOY', 'uid2_D9_std', 'TransactionAmt_DT_W_std_score', 'C1_fq_enc', 'card1_div_Mean_D1_DOY', 'uid5_D4_mean', 'uid3_D6_mean', 'mxC14', 'uid5_D2_mean', 'card4_div_Mean_D4_DOY', 'card3_div_Mean_D4_DOY', 'uid_D14_std', 'M5', 'C13', 'mxC6', 'card5_div_Mean_D10_DOY_productCD', # 'card3_DT_M_month_day_dist', 'card2_div_Mean_D10_DOY_productCD', 'uid_D7_std', 'card2_div_Mean_D4_DOY_productCD', 'bank_type_DT_M_month_day_dist', 'uid3_D7_mean', 'uid_D3_std', 'uid5_fq_enc', 'uid3_fq_enc', 'uid_D3_mean', 'D4_DT_D_std_score', 'uid3_D2_mean', 'uid4_D1_std', 'uid2_D5_std', 'uid4_D10_std', 'bank_type_DT_D_hour_dist_best', 'uid2_D8_mean', 'card6_div_Mean_D10_DOY_productCD', 'card1_div_Mean_D1_DOY_productCD', 'uid5_D9_std', 'card4_div_Mean_D10_DOY_productCD', 'uid2_D3_mean', 'uid_D6_mean', 'card2_div_Mean_D1_DOY', 'card5_div_Mean_D10_DOY', 'mxC2', 'card2_TransactionAmt_std', 'bank_type_DT_W_week_day_dist', 'card2_TransactionAmt_mean', 'uid4_D10_mean', #id_31_count_dist', 'TranAmt_div_Mean_D1_DOY', 'uid3_D3_std', 'uid4_D15_std', 'card5_div_Mean_D1_DOY_productCD', 'card4_div_Mean_D10_DOY', 'card5_DT_D_hour_dist_best', 'uid4_D4_std', 'card5_DT_M_month_day_dist', #'bank_type_DT_W', 'addr1__card1', 'bank_type_DT_M_month_day_dist_best', 'card2_div_Mean_D1_DOY_productCD', 'card6_div_Mean_D10_DOY', 'uid2_D5_mean', 'uid_DT_M', 'card2__dist1', 'uid2_D9_mean', 'card5_DT_M_month_day_dist_best', 'TranAmt_div_Mean_D10_DOY_productCD', 'uid4_D11_std', 'uid_D5_mean', 'uid5_D3_std', 'TransactionAmt_DT_D_std_score', #'D8_DT_W_std_score', 'card5_DT_W_week_day_dist', 'uid5_D5_std', 'card3_DT_W_week_day_dist', 'uid4_D9_std', 'D10_intercept', 'uid3_D3_mean', 'uid4_D5_std', 'uid_D5_std', 'card5_div_Mean_D1_DOY', 'uid5_D3_mean', 'bank_type_DT_D', 'uid4_D1_mean', 'uid_D8_mean', 'uid3_D5_mean', 'D15_intercept', 'uid5_TransactionAmt_std', 'uid3_D5_std', 'uid4_D4_mean', 'uid4_D15_mean', 'uid5_D8_mean', 'uid5_D9_mean', 'uid_D9_std', 'uid_D9_mean', 'uid5_D5_mean', 'mtransamt', 'bank_type_DT_D_hour_dist', 'uid4_D11_mean', 'D15_DT_D_std_score', 'TransactionAmt_DT_D_min_max', 'uid4_D2_mean', 'ntrans', 'addr2_div_Mean_D1_DOY', 'uid5_TransactionAmt_mean', 'uid3_D9_std', 'TransactionAmt_Dec', 'uid3_TransactionAmt_std', 'card5_DT_D_hour_dist', 'card1', 'card4_div_Mean_D1_DOY_productCD', 'P_emaildomain__C2', 'card3_div_Mean_D10_DOY', 'uid4_D3_std', 'card3_DT_D_hour_dist_best', 'uid4_D8_mean', 'uid4_D2_std', 'card6_div_Mean_D1_DOY_productCD', 'uid_DT_W', #'Sum_TransAmt_Day', 'uid4_D5_mean', 'card4_div_Mean_D1_DOY', 'card3_div_Mean_D10_DOY_productCD', 'uid3_D8_mean', 'TransactionAmt_userid_median', 'uid4_fq_enc', 'uid3_TransactionAmt_mean', 'uid3_D9_mean', 'card6_div_Mean_D1_DOY', #'Trans_Count_Day', 'mxC1', 'D10_DT_D_std_score', 'card3_div_Mean_D1_DOY', 'TransactionAmt_to_mean_card1', 'card2_fq_enc', 'product_type', 'card3_div_Mean_D1_DOY_productCD', 'TransactionAmt_to_std_card1', 'uid_DT_D', 'uid4_D9_mean', 'D1_intercept', 'card3_DT_D_hour_dist', 'TranAmt_div_Mean_D1_DOY_productCD', 'product_type_DT_M', 'uid4_D3_mean', 'uid4_TransactionAmt_mean', 'uid4_TransactionAmt_std', 'D8_DT_D_std_score', #'Mean_TransAmt_Day', #'minDT', 'product_type_DT_W', 'mintransamt', 'maxtransamt', 'TransactionAmt_userid_std', 'P_emaildomain', 'card1__card5', 'product_type_DT_D', 'mxC13', #'maxDT', 'id_19', 'DeviceInfo', 'id_20', 'addr1', 'userid_min_C1', 'userid_max_C1', 'userid_max_minus_min_C1', 'userid_unique_C1', 'userid_mean_C1', 'userid_min_C2', 'userid_max_C2', 'userid_max_minus_min_C2', 'userid_unique_C2', 'userid_mean_C2', 'userid_min_C3', 'userid_max_C3', 'userid_max_minus_min_C3', 'userid_unique_C3', 'userid_mean_C3', 'userid_min_C4', 'userid_max_C4', 'userid_max_minus_min_C4', 'userid_unique_C4', 'userid_mean_C4', 'userid_min_C5', 'userid_max_C5', 'userid_max_minus_min_C5', 'userid_unique_C5', 'userid_mean_C5', 'userid_min_C6', 'userid_max_C6', 'userid_max_minus_min_C6', 'userid_unique_C6', 'userid_mean_C6', 'userid_min_C7', 'userid_max_C7', 'userid_max_minus_min_C7', 'userid_unique_C7', 'userid_mean_C7', 'userid_min_C8', 'userid_max_C8', 'userid_max_minus_min_C8', 'userid_unique_C8', 'userid_mean_C8', 'userid_min_C9', 'userid_max_C9', 'userid_max_minus_min_C9', 'userid_unique_C9', 'userid_mean_C9', 'userid_min_C10', 'userid_max_C10', 'userid_max_minus_min_C10', 'userid_unique_C10', 'userid_mean_C10', 'userid_min_C11', 'userid_max_C11', 'userid_max_minus_min_C11', 'userid_unique_C11', 'userid_mean_C11', 'userid_min_C12', 'userid_max_C12', 'userid_max_minus_min_C12', 'userid_unique_C12', 'userid_mean_C12', 'userid_min_C13', 'userid_max_C13', 'userid_max_minus_min_C13', 'userid_unique_C13', 'userid_mean_C13', 'userid_min_C14', 'userid_max_C14', 'userid_max_minus_min_C14', 'userid_unique_C14', 'userid_mean_C14', 'hour', 'hour_sin', #'week', 'week_sin', 'week_cos', 'month', #'life_of_customer', 'addr1_broad_area', 'uid6_TransactionAmt_mean', 'uid6_TransactionAmt_std', 'hour_TransactionAmt_mean', 'hour_TransactionAmt_std', # 'week_TransactionAmt_mean', 'week_TransactionAmt_std', 'D1_diff', 'D10_diff', 'D15_diff', 'new_identity_M5_mean', 'new_identity_M6_mean', 'new_identity_V315_mean', 'new_identity_D1_diff_mean', 'new_identity_D3_mean', 'new_identity_D10_diff_mean', 'new_identity_D15_diff_mean', 'addr1_addr2_new_identity_M5_mean_mean', 'addr1_addr2_new_identity_M5_mean_std', 'addr1_addr2_new_identity_M6_mean_mean', 'addr1_addr2_new_identity_M6_mean_std', 'addr1_addr2_new_identity_V315_mean_mean', 'addr1_addr2_new_identity_V315_mean_std', 'addr1_addr2_new_identity_D1_diff_mean_mean', 'addr1_addr2_new_identity_D1_diff_mean_std', 'addr1_addr2_new_identity_D10_diff_mean_mean', 'addr1_addr2_new_identity_D10_diff_mean_std', 'addr1_addr2_new_identity_D15_diff_mean_mean', 'addr1_addr2_new_identity_D15_diff_mean_std', 'new_identity_ProductCD_TransactionAmt_mean', 'uid6_C1_mean', 'uid6_C1_std', 'uid6_V54_mean', 'uid6_V54_std', 'uid6_V281_mean', 'uid6_V281_std', 'uid6_C11_mean', 'uid6_C11_std', 'uid6_D4_mean', 'uid6_D4_std', 'uid6_V67_mean', 'uid6_V67_std', 'uid6_V320_mean', 'uid6_V320_std', 'uid6_M5_mean', 'uid6_M5_std', 'uid6_M6_mean', 'uid6_M6_std', 'uid3_V67_mean', 'uid3_V67_std', 'uid3_V83_mean', 'uid3_V83_std', 'uid6_fq_enc', 'card4_fq_enc', 'card6_fq_enc', 'ProductCD_fq_enc', 'M4_fq_enc', 'addr_fq_enc', 'R_emaildomain_V118_mean', 'R_emaildomain_V118_std', 'R_emaildomain_V119_mean', 'R_emaildomain_V119_std', 'card1_V20_mean', 'card1_V20_std', 'card1_V151_mean', 'card1_V151_std', 'card1_V67_mean', 'card1_V67_std', 'hour_V116_mean', 'hour_V116_std'] train = train[FEATURES] test = test[FEATURES] train['target'] = 0 test['target'] = 1 train_test = pd.concat([train, test], axis =0) target = train_test['target'].values del train, test gc.collect() train, test = model_selection.train_test_split(train_test, test_size=0.33, random_state=529, shuffle=True) train_y = train['target'].values test_y = test['target'].values del train['target'], test['target'] gc.collect() train = lgb.Dataset(train, label=train_y) test = lgb.Dataset(test, label=test_y) param = {'num_leaves': 50, 'min_data_in_leaf': 30, 'objective':'binary', 'max_depth': 5, 'learning_rate': 0.2, "min_child_samples": 20, "boosting": "gbdt", "feature_fraction": 0.9, "bagging_freq": 1, "bagging_fraction": 0.9 , "bagging_seed": 44, "metric": 'auc', "verbosity": -1} num_round = 500 clf = lgb.train(param, train, num_round, valid_sets = [train, test], verbose_eval=10, early_stopping_rounds = 50) # + feature_imp = pd.DataFrame(sorted(zip(clf.feature_importance(),FEATURES)), columns=['Value','Feature']) plt.figure(figsize=(20, 10)) sns.barplot(x="Value", y="Feature", data=feature_imp.sort_values(by="Value", ascending=False).head(20)) plt.title('LightGBM Features') plt.tight_layout() plt.show() #plt.savefig('lgbm_importances-01.png') # - # # Make this into a loop for i in tqdm(range(0, 300)): top3_feats = feature_imp.sort_values('Value', ascending=False)['Feature'][:3].tolist() print('Remove features:', top3_feats) FEATURES = [x for x in FEATURES if x not in top3_feats] train = pd.read_parquet('../../data/train_FE013.parquet') test = pd.read_parquet('../../data/test_FE013.parquet') train = train[FEATURES] test = test[FEATURES] train['target'] = 0 test['target'] = 1 train_test = pd.concat([train, test], axis =0) target = train_test['target'].values del train, test gc.collect() train, test = model_selection.train_test_split(train_test, test_size=0.33, random_state=529, shuffle=True) train_y = train['target'].values test_y = test['target'].values del train['target'], test['target'] gc.collect() train = lgb.Dataset(train, label=train_y) test = lgb.Dataset(test, label=test_y) param = {'num_leaves': 50, 'min_data_in_leaf': 30, 'objective':'binary', 'max_depth': 5, 'learning_rate': 0.2, "min_child_samples": 20, "boosting": "gbdt", "feature_fraction": 0.9, "bagging_freq": 1, "bagging_fraction": 0.9 , "bagging_seed": 44, "metric": 'auc', "verbosity": -1} num_round = 500 clf = lgb.train(param, train, num_round, valid_sets = [train, test], verbose_eval=10, early_stopping_rounds = 50) feature_imp = pd.DataFrame(sorted(zip(clf.feature_importance(),FEATURES)), columns=['Value','Feature'])
notebooks/AV/AV001-UsingFE012.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Summary # - [Import library](#Import-library) # - [Load csv](#Load-csv) # - [Read dataset](#Read-dataset) # - [Filter of dataset](#Filter-of-dataset) # # - [RQ1](#RQ1) # - [Project size filter](#Project-size-filter) # - [Number of projects per size](#Number-of-projects-per-size) # - [Size of Ruby and Java projects](#Size-of-Ruby-and-Java-projects) # - [Frequency per day and month](#Frequency-per-day-and-month) # - [Plot in bar](#Plot-in-bar) # - [Plot size of all projects](#Plot-size-of-all-projects) # - [Data from very small projects, small, medium, large and very large.](#Data-from-very-small-projects,-small,-medium,-large-and-very-large) # - [General media of commits](#General-media-of-commits) # - [Number of projects below average](#Number-of-projects-below-average) # - [Description of java projects](#Description-of-java-projects) # - [Description of ruby projects](#Description-of-ruby-projects) # - [Plot of java project commits frequency](#Plot-of-java-project-commits-frequency) # - [Plot of ruby project commits frequency](#Plot-of-ruby-project-commits-frequency) # # - [RQ2](#RQ2) # - [Number of builds per java project](#Number-of-builds-per-java-project) # - [Receive java projects that use coveralls](#Receive-java-projects-that-use-coveralls) # - [Number of builds per ruby project](#Number-of-builds-per-ruby-project) # - [Receive java projects that use coveralls](#Receive-ruby-projects-that-use-coveralls) # - [Coverralls dataset with filters - Java](#Coverralls-dataset-with-filters---Java) # - [Travis dataset with filters - Java](#Travis-dataset-with-filters---Java) # - [Coverralls dataset with filters - Ruby](#Coverralls-dataset-with-filters---Ruby) # - [Travis dataset with filters - Ruby](#Travis-dataset-with-filters---Ruby) # - [Boxplot](#Boxplot) # - [Comparing](#Comparing) # - [Get coveralls data](#Get-coveralls-data) # - [Calculate some statistic util informations](#Calculate-some-statistic-util-informations) # # - [RQ3](#RQ3) # - [Filters the Java language and returns a list with the name of the projects](#Filters-the-Java-language-and-returns-a-list-with-the-name-of-the-projects) # - [Creates 2 filters for passing and non-passing builds](#Creates-2-filters-for-passing-and-non-passing-builds) # - [Creates a list with the amount builds passed and failed](#Creates-a-list-with-the-amount-builds-passed-and-failed) # - [Status Build](#Status-Build) # - [Status Build for all projects](#Status-Build-for-all-projects) # - [Status build for JAVA](#Status-build-for-JAVA) # - [Status build for RUBY](#Status-build-for-RUBY) # - [Number of projects](#Number-of-projects) # - [Number of projects with broken build](#Number-of-projects-with-broken-build) # - [Displays build broken by language](#Displays-build-broken-by-language) # # - [RQ4](#RQ4) # - [Filter dataset](#Filter-dataset) # - [Amount of build less than 10 minutes and more than 10 minutes](#Amount-of-build-less-than-10-minutes-and-more-than-10-minutes) # - [Duration build for projetc Very small, small, medium, large and very large](#Duration-build-for-projetc-Very-small,-small,-medium,-large-and-very-large) # - [Project size by language type](#Project-size-by-language-type) # - [Filter language Java](#Filter-language-Java) # - [Java build quantity less than 10 minutes and more than 10 minutes](#Java-build-quantity-less-than-10-minutes-and-more-than-10-minutes) # - [Projects java, Very small, small, medium, large and very large](#Projects-java,-Very-small,-small,-medium,-large-and-very-large) # - [Filter language Ruby](#Filter-language-Ruby) # - [Ruby build quantity less than 10 minutes and more than 10 minutes](#Ruby-build-quantity-less-than-10-minutes-and-more-than-10-minutes) # - [Projects ruby Very small, small, medium, large e very large](#Projects-ruby-Very-small,-small,-medium,-large-e-very-large) # # Import library # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import csv import datetime import requests import math import csv import re # - # # Load csv FILE = '~ci-analysis/travistorrent_11_1_2017.csv.gz' # # Read dataset dataset = pd.read_csv(FILE, sep=',', engine='c',usecols=['gh_team_size', 'tr_build_id', 'gh_project_name', 'tr_build_id', 'gh_project_name', 'gh_is_pr', 'gh_pr_created_at', 'gh_lang', 'git_branch', 'gh_num_commits_in_push', 'git_prev_commit_resolution_status', 'tr_prev_build', 'gh_first_commit_created_at', 'gh_team_size', 'gh_pushed_at', 'gh_sloc', 'tr_log_bool_tests_failed', 'tr_log_setup_time', 'tr_log_buildduration', 'tr_status'],dtype={ 'tr_build_id': np.float32, 'gh_project_name': 'category', 'gh_is_pr': np.bool, 'gh_pr_created_at': 'category', 'gh_lang': 'category', 'git_branch': 'category', 'gh_num_commits_in_push': np.float32, 'git_prev_commit_resolution_status': 'category', 'tr_prev_build': np.float32, 'gh_first_commit_created_at': 'category', 'gh_team_size': np.int32, 'gh_pushed_at': 'category', 'gh_sloc': np.int32}, encoding='utf-8') # # Filter of dataset dataset = dataset.loc[dataset['git_branch'] == 'master'] #filter only projects in the master dataset = dataset[dataset.gh_lang != 'javascript'] #remove projects java script dataset = dataset.dropna(subset=['gh_num_commits_in_push']) #remove NaN in the column specified dataset['gh_pushed_at'] = pd.to_datetime(dataset.gh_pushed_at) #transforms for datetime dataset = dataset.drop_duplicates(['tr_build_id']) #remove duplicates # # RQ1 # ### Project size filter """ Design below 1,000 lines will be discarded, project owning above 1000 and less than 10,000 will be considered small, projects owning over 10,000 lines and less than 100,000 will be considered medium and projects above 100,000 lines will be considered large """ def size_project(dataset, language, sett): dataset = dataset.sort_values(['gh_sloc'], ascending=False) if(sett == 1): #abaixo de 1000 linhas df1 = dataset.loc[(dataset['gh_sloc'] < 1000) & (dataset['gh_lang'] == language)] ##list_project_df1 = df1.gh_project_name.unique().tolist() #print("Projetos {} abaixo de 1000 linhas: {}".format(language, len(list_project_df1))) return(df1) if(sett == 2): #acima de 1000 e menos que 10.000 df2 = dataset.loc[(dataset['gh_sloc'] < 10000)] df2 = df2.loc[(df2['gh_sloc'] > 1000) & (df2['gh_lang'] == language)] #list_project_df2 = df2.gh_project_name.unique().tolist() #filtra o nome de cada projeto e coloca em uma lista #print("Projetos {} acima de 1000 linhas e abaixo de 10.000 : {}".format(language,len(list_project_df2))) return(df2) if(sett == 3): #acima de 10.000 linhas e menos que 100.000 df3 = dataset.loc[(dataset['gh_sloc'] < 100000)] df3 = df3.loc[(df3['gh_sloc'] > 10000) & (df3['gh_lang'] == language)] #list_project_df3 = df3.gh_project_name.unique().tolist() #filtra o nome de cada projeto e coloca em uma lista #print("Projetos {} acima de 10.000 linhas e abaixo de 100.000 : {}".format(language,len(list_project_df3))) return(df3) if(sett == 4): #acima de 100.000 linhas df4 = dataset.loc[(dataset['gh_sloc'] > 100000) & (dataset['gh_lang'] == language)] #list_project_df4 = df4.gh_project_name.unique().tolist() #filtra o nome de cada projeto e coloca em uma lista #print("Projetos {} acima de 100.000 linhas: {}".format(language,len(list_project_df4))) return(df4) # ### Number of projects per size # modelo parcial, ainda falta retornar um dataset # project sizes per line def quantidade_projetos_all(language, sett): small, medium, large, project_null, very_large = 0,0,0,0,0 df = dataset.sort_values(['gh_sloc'], ascending=False) df = df.loc[df['gh_lang'] == language] list_name_project = df.gh_project_name.unique().tolist() if(sett == 1): for name in list_name_project: df0 = df.loc[df['gh_project_name'] == name] if(1000 > df0.gh_sloc[0:1].item()): project_null +=1 print("Projetos {} abaixo de 1000 linhas: {}".format(language, project_null)) if(sett == 2): for name in list_name_project: df1 = df.loc[df['gh_project_name'] == name] if((1000 < df1.gh_sloc[0:1].item()) and (10000 > df1.gh_sloc[0:1].item())): small +=1 print("Projetos {} acima de 1000 linhas e abaixo de 10.000 : {}".format(language, small)) if(sett == 3): for name in list_name_project: df2 = df.loc[df['gh_project_name'] == name] if((10000 < df2.gh_sloc[0:1].item()) and (100000 > df2.gh_sloc[0:1].item())): medium +=1 print("Projetos {} acima de 10.000 linhas e abaixo de 100.000 : {}".format(language,medium)) if(sett == 4): for name in list_name_project: df3 = df.loc[df['gh_project_name'] == name] if((100000 < df3.gh_sloc[0:1].item()) and (1000000 > df3.gh_sloc[0:1].item())): large +=1 print("Projetos {} acima de 100.000 linhas e abaixo de 1.000.000: {}".format(language,large)) if(sett == 5): for name in list_name_project: df4 = df.loc[df['gh_project_name'] == name] if(1000000 <= df4.gh_sloc[0:1].item()): very_large +=1 print("Projetos {} acima de 1.000.000 linhas: {}".format(language, very_large)) # ### Size of Ruby and Java projects # + # tirar o comentado do metodo para não haver erro quantidade_projetos_all('ruby', 1) quantidade_projetos_all('ruby', 2) quantidade_projetos_all('ruby', 3) quantidade_projetos_all('ruby', 4) quantidade_projetos_all('ruby', 5) print("\n") quantidade_projetos_all('java', 1) quantidade_projetos_all('java', 2) quantidade_projetos_all('java', 3) quantidade_projetos_all('java', 4) quantidade_projetos_all('java', 5) # - # ### Frequency per day and month def weekday_frequency(dataframe): #weekday dataframe = dataframe.gh_pushed_at.dt.weekday monday = [] tuesday = [] wednesday = [] thursday = [] friday = [] saturday = [] sunday = [] for date in dataframe: if(date == 0): monday.append(date) elif(date == 1): tuesday.append(date) elif(date == 2): wednesday.append(date) elif(date == 3): thursday.append(date) elif(date == 4): friday.append(date) elif(date == 5): saturday.append(date) elif(date == 6): sunday.append(date) #print("\nMonday: {} commits \n Tuesday: {} \n wednesday: {} \n Thursday: {} \n Friday: {} \n Saturday: {}\n Sunday: {}".format(len(monday), len(tuesday), len(wednesday), len(thursday), len(friday), len(saturday), len(sunday))) return([monday, tuesday, wednesday, thursday, friday, saturday, sunday]) # ### Plot in bar def plot_bar(sett, language, very_small,low, medium, high): # Dados do grafico if(sett == 0): frequency_low = [len(low[0]),len(low[1]),len(low[2]),len(low[3]),len(low[4]),len(low[5]),len(low[6]),len(low[7]),len(low[8]),len(low[9]),len(low[10]),len(low[11])] frequency_medium = [len(medium[0]),len(medium[1]),len(medium[2]),len(medium[3]),len(medium[4]),len(medium[5]),len(medium[6]),len(medium[7]),len(medium[8]),len(medium[9]),len(medium[10]),len(medium[11])] frequency_high = [len(high[0]),len(high[1]),len(high[2]),len(high[3]),len(high[4]),len(high[5]),len(high[6]),len(high[7]),len(high[8]),len(high[9]),len(high[10]),len(high[11])] labels_list = ["january", "february", "march", "april", "may", "june", "july", "august", "september", "october", "november", "december"] if(sett == 1): frequency_very_small = [len(very_small[0]),len(very_small[1]),len(very_small[2]),len(very_small[3]),len(very_small[4]),len(very_small[5]),len(very_small[6])] frequency_low = [len(low[0]),len(low[1]),len(low[2]),len(low[3]),len(low[4]),len(low[5]),len(low[6])] frequency_medium = [len(medium[0]),len(medium[1]),len(medium[2]),len(medium[3]),len(medium[4]),len(medium[5]),len(medium[6])] frequency_high = [len(high[0]),len(high[1]),len(high[2]),len(high[3]),len(high[4]),len(high[5]),len(high[6])] labels_list = ['Monday', 'Tuesday','wednesday','thursday ','friday','saturday','sunday'] # Largura das barras barWidth = 0.20 # Tamanho do Grafico plt.figure(figsize=(10,9)) # Posicao da barras r1 = np.arange(len(frequency_very_small)) r2 = [x + barWidth for x in r1] r3 = [x + barWidth for x in r2] r4 = [x + barWidth for x in r3] # Criando as barras plt.bar(r1, frequency_very_small, color='#303F9F', width=barWidth, label='Very Small') plt.bar(r2, frequency_low, color='#03A9F4', width=barWidth, label='Small') plt.bar(r3, frequency_medium, color='#3F51B5', width=barWidth, label='Medium') plt.bar(r4, frequency_high, color='#212121', width=barWidth, label='Large') # Adicionando legendas as barras if(sett == 0): plt.xlabel('Months', fontsize=15) if(sett == 1): plt.xlabel('Weekday', fontsize=15) plt.xticks([r + barWidth for r in range(len(frequency_low))], labels_list) plt.ylabel('Quantity of commits', fontsize=15) plt.title('{}'.format(language), fontsize=15) plt.legend() plt.savefig('frequencia_por_dia_semana.eps', format='eps') plt.show() # ### Plot size of all projects # + def project(sett): #dataset = dataset.sort_values(['gh_sloc'], ascending=False) if(sett == 1): #abaixo de 1000 linhas df1 = dataset.loc[dataset['gh_sloc'] < 1000] return(df1) if(sett == 2): #acima de 1000 e menos que 10.000 df2 = dataset.loc[dataset['gh_sloc'] < 10000] df2 = df2.loc[df2['gh_sloc'] > 1000] return(df2) if(sett == 3): #acima de 10.000 linhas e menos que 100.000 df3 = dataset.loc[dataset['gh_sloc'] < 100000] df3 = df3.loc[df3['gh_sloc'] > 10000] return(df3) if(sett == 4): #acima de 100.000 linhas df4 = dataset.loc[dataset['gh_sloc'] > 100000] return(df4) df_very_small = project(1) df_small = project(2) df_medium = project(3) df_large = project(4) weekday_very_small = weekday_frequency(df_very_small) weekday_small = weekday_frequency(df_small) weekday_medium = weekday_frequency(df_medium) weekday_large = weekday_frequency(df_large) plot_bar(1, 'Frequency per weekday', weekday_very_small, weekday_small, weekday_medium, weekday_large) # - # ### Data from very small projects, small, medium, large and very large. # + #abaixo de 1000 df_very_small = dataset.loc[(dataset['gh_sloc'] < 1000)] #acima de 1000 e menos que 10.000 df_small = dataset.loc[(dataset['gh_sloc'] < 10000)] df_small = df_small.loc[(df_small['gh_sloc'] > 1000)] #acima de 10.000 linhas e menos que 100.000 df_medium = dataset.loc[(dataset['gh_sloc'] < 100000)] df_medium = df_medium.loc[(df_medium['gh_sloc'] > 10000)] #acima de 100.000 linhas e menos que 1.000.000 df_large = dataset.loc[(dataset['gh_sloc'] > 100000)] df_large = df_large.loc[df_large['gh_sloc'] < 1000000] df_very_large = dataset.loc[dataset['gh_sloc'] > 1000000] # print("\n\nMédia e desvio padrão dos projetos muito pequenos\n", df_very_small.gh_num_commits_in_push.describe(),"\nmedian:", df_very_small.gh_num_commits_in_push.median()) # print("\n\nMédia e desvio padrão dos projetos pequenos\n", df_small.gh_num_commits_in_push.describe(), "\nmedian:", df_small.gh_num_commits_in_push.median()) # print("\n\nMédia e desvio padrão dos projetos medios\n", df_medium.gh_num_commits_in_push.describe(), "\nmedian:",df_medium.gh_num_commits_in_push.median()) # print("\n\nMédia e desvio padrão dos projetos grandes\n", df_large.gh_num_commits_in_push.describe(), "\nmedian:",df_large.gh_num_commits_in_push.median()) # print("\n\nMédia e desvio padrão dos projetos muito grandes\n", df_very_large.gh_num_commits_in_push.describe(), "\nmedian:",df_very_large.gh_num_commits_in_push.median()) # - # ### General media of commits dataset.gh_num_commits_in_push.mean() # ### Number of projects below average # + cont=0 list_name = dataset.gh_project_name.unique().tolist() for i in range(len(list_name)): df = dataset.loc[dataset['gh_project_name'] == list_name[i]] mean = df.gh_num_commits_in_push.mean() if(mean < 2.36): cont +=1 # print("Projetos com media a baixo de 2: {} ".format(len(list_name), cont)) # - # ### Description of java projects # + df = dataset.loc[dataset['gh_lang'] == 'java'] #abaixo de 1000 df_very_small = df.loc[(df['gh_sloc'] < 1000)] #acima de 1000 e menos que 10.000 df_small = df.loc[(df['gh_sloc'] < 10000)] df_small = df_small.loc[(df_small['gh_sloc'] > 1000)] #acima de 10.000 linhas e menos que 100.000 df_medium = df.loc[(df['gh_sloc'] < 100000)] df_medium = df_medium.loc[(df_medium['gh_sloc'] > 10000)] #acima de 100.000 linhas e menor que 1.000.000 df_large = df.loc[(df['gh_sloc'] > 100000)] df_large = df_large.loc[df_large['gh_sloc'] < 1000000] #acima de 1.000.000 df_very_large = df.loc[df['gh_sloc'] > 1000000] # print("\n\nMédia e desvio padrão dos projetos muito pequenos\n", df_very_small.gh_num_commits_in_push.describe(),"\nmedian:", df_very_small.gh_num_commits_in_push.median()) # print("\n\nMédia e desvio padrão dos projetos pequenos\n", df_small.gh_num_commits_in_push.describe(), "\nmedian:", df_small.gh_num_commits_in_push.median()) # print("\n\nMédia e desvio padrão dos projetos medios\n", df_medium.gh_num_commits_in_push.describe(), "\nmedian:",df_medium.gh_num_commits_in_push.median()) # print("\n\nMédia e desvio padrão dos projetos grandes\n", df_large.gh_num_commits_in_push.describe(), "\nmedia:",df_large.gh_num_commits_in_push.median()) # print("\n\nMédia e desvio padrão dos projetos muito grandes\n", df_very_large.gh_num_commits_in_push.describe(), "\nmedian:",df_very_large.gh_num_commits_in_push.median()) # - # ### Description of ruby projects # + df = dataset.loc[dataset['gh_lang'] == 'ruby'] #abaixo de 1000 df_very_small = df.loc[(df['gh_sloc'] < 1000)] #acima de 1000 e menos que 10.000 df_small = df.loc[(df['gh_sloc'] < 10000)] df_small = df_small.loc[(df_small['gh_sloc'] > 1000)] #acima de 10.000 linhas e menos que 100.000 df_medium = df.loc[(df['gh_sloc'] < 100000)] df_medium = df_medium.loc[(df_medium['gh_sloc'] > 10000)] #acima de 100.000 linhas e menor que 1.000.000 df_large = df.loc[(df['gh_sloc'] > 100000)] df_large = df_large.loc[df_large['gh_sloc'] < 1000000] #acima de 1.000.000 df_very_large = df.loc[df['gh_sloc'] > 1000000] # print("\n\nMédia e desvio padrão dos projetos muito pequenos\n", df_very_small.gh_num_commits_in_push.describe(),"\nmedian:", df_very_small.gh_num_commits_in_push.median()) # print("\n\nMédia e desvio padrão dos projetos pequenos\n", df_small.gh_num_commits_in_push.describe(), "\nmedian:", df_small.gh_num_commits_in_push.median()) # print("\n\nMédia e desvio padrão dos projetos medios\n", df_medium.gh_num_commits_in_push.describe(), "\nmedian:",df_medium.gh_num_commits_in_push.median()) # print("\n\nMédia e desvio padrão dos projetos grandes\n", df_large.gh_num_commits_in_push.describe(), "\nmedia:",df_large.gh_num_commits_in_push.median()) # print("\n\nMédia e desvio padrão dos projetos muito grandes\n", df_very_large.gh_num_commits_in_push.describe(), "\nmedian:",df_very_large.gh_num_commits_in_push.median()) # - # ### Plot of java project commits frequency # + #abaixo de 1000 df = dataset.loc[dataset['gh_lang'] == 'java'] df = df.sort_values(['gh_sloc'], ascending=False) df_very_small = df.loc[(df['gh_sloc'] < 1000)] very_small = df_very_small.gh_num_commits_in_push.tolist() #acima de 1000 e menos que 10.000 df_small = df.loc[(df['gh_sloc'] < 10000)] df_small = df_small.loc[(df_small['gh_sloc'] > 1000)] small = df_small.gh_num_commits_in_push.tolist() #acima de 10.000 linhas e menos que 100.000 df_medium = df.loc[(df['gh_sloc'] < 100000)] df_medium = df_medium.loc[(df_medium['gh_sloc'] > 10000)] medium = df_medium.gh_num_commits_in_push.tolist() #acima de 100.000 linhas df_large = df.loc[(df['gh_sloc'] > 100000)] large = df_large.gh_num_commits_in_push.tolist() ###### plot bar ####### data=[very_small, small, medium, large] fig, ax = plt.subplots() fig.subplots_adjust(left=0.08, right=1.38, bottom=0.05, top=1.2, hspace=0.4, wspace=0.3) #fig.subplots_adjust(left=0.15, right=.98, bottom=0.22, top=.9, hspace=0.6, wspace=0.6) ax.set_title('Java Projects', fontsize=15) ax.set_xlabel('Size of the projects', fontsize=15) ax.set_ylabel('Distribution', fontsize=15) ax.set_axisbelow(True) ax.boxplot(data, 0, '', showmeans=True, meanline=True, labels=['Very small', 'Small', 'Medium', 'Large']) # boxplot sem outlier points plt.savefig('size_project_java.eps', format='eps', dpi = 300, bbox_inches='tight') plt.show() # - # ### Plot of ruby project commits frequency # + #abaixo de 1000 df = dataset.loc[dataset['gh_lang'] == 'ruby'] df = df.sort_values(['gh_sloc'], ascending=False) df_very_small = df.loc[(df['gh_sloc'] < 1000)] very_small = df_very_small.gh_num_commits_in_push.tolist() #acima de 1000 e menos que 10.000 df_small = df.loc[(df['gh_sloc'] < 10000)] df_small = df_small.loc[(df_small['gh_sloc'] > 1000)] small = df_small.gh_num_commits_in_push.tolist() #acima de 10.000 linhas e menos que 100.000 df_medium = df.loc[(df['gh_sloc'] < 100000)] df_medium = df_medium.loc[(df_medium['gh_sloc'] > 10000)] medium = df_medium.gh_num_commits_in_push.tolist() #acima de 100.000 linhas df_large = df.loc[(df['gh_sloc'] > 100000)] large = df_large.gh_num_commits_in_push.tolist() ###### plot bar ####### data=[very_small, small, medium, large] fig, ax = plt.subplots() fig.subplots_adjust(left=0.08, right=1.38, bottom=0.05, top=1.2, hspace=0.4, wspace=0.3) #fig.subplots_adjust(left=0.15, right=.98, bottom=0.22, top=.9, hspace=0.6, wspace=0.6) ax.set_title('Ruby Projects', fontsize=15) ax.set_xlabel('Size of the projects', fontsize=15) ax.set_ylabel('Distribution', fontsize=15) ax.set_axisbelow(True) ax.boxplot(data,0,'', showmeans=True, meanline=True, labels=['Very small', 'Small', 'Medium', 'Large']) # boxplot sem outlier points #plt.savefig('size_project_ruby.eps', format='eps', dpi = 300, bbox_inches='tight') plt.show() # - # # RQ2 # ### Number of builds per java project # + df = dataset.loc[dataset['gh_lang'] == 'java'] list_name = df.gh_project_name.unique().tolist() list_size = [] for i in range(len(list_name)): data = df.loc[df['gh_project_name'] == list_name[i]] list_size.append(data.tr_build_id.size) df = pd.Series(list_size, index=list_name) # - # ### Receive java projects that use coveralls # + #Receive java projects that use coveralls and then filter projects that have build in 2016 FILE = 'data/projetos_java_coverall.csv' projetos_coverral = pd.read_csv(FILE, sep=',', engine='c') java_coverral = projetos_coverral.gh_project_name.tolist() projeto_java = [] for i in range(len(java_coverral)): project = dataset.loc[dataset['gh_project_name'] == java_coverral[i]] if(2016 == int(project.gh_pushed_at.dt.year.sort_values(ascending=False)[0:1])): projeto_java.append(java_coverral[i]) #cria um arquivo csv com os dados #output = pd.DataFrame(data=projeto_java, columns=['projetos']) #output.to_csv('output.csv') # - # ### Number of builds per ruby project # + df = dataset.loc[dataset['gh_lang'] == 'ruby'] list_name = df.gh_project_name.unique().tolist() list_size = [] for i in range(len(list_name)): data = df.loc[df['gh_project_name'] == list_name[i]] list_size.append(data.tr_build_id.size) df = pd.Series(list_size, index=list_name) #df.sort_values(ascending=False) #cria um arquivo csv com os dados #output = pd.DataFrame(data=list_size, index=list_name, columns=['projetos']) #df.to_csv('output_ruby.csv') # - # ### Receive ruby projects that use coveralls # + # Recebe projetos java que usam coverrals e dps filtra os projetos que possuem build em 2016 FILE = 'data/projetos_ruby_coverall.csv' projetos_coverral = pd.read_csv(FILE, sep=',', engine='c') ruby_coverral = projetos_coverral.gh_project_name.tolist() projeto_ruby = [] for i in range(len(ruby_coverral)): project = dataset.loc[dataset['gh_project_name'] == ruby_coverral[i]] if(2016 == int(project.gh_pushed_at.dt.year.sort_values(ascending=False)[0:1])): projeto_ruby.append(ruby_coverral[i]) #cria um arquivo csv com os dados #output = pd.DataFrame(data=projeto_ruby, columns=['projetos']) #output.to_csv('output_ruby_filtrado.csv') # - # ### Coverralls dataset with filters - Java # + File = 'data/projetos_java_coverall_filtrados_2016.csv' df = pd.read_csv(File, sep=',', engine='c') df['gh_pushed_at'] = pd.to_datetime(df.gh_pushed_at).dt.date #le a coluna com apenas os dias df['gh_pushed_at'] = df['gh_pushed_at'].sort_values(ascending=False) #ordena da data mais atual para mais antiga #df = df.drop_duplicates(['gh_project_name']) #remove duplicados list_name_cover = df.gh_project_name.unique().tolist() # lista de projetos do dataset # - # ### Travis dataset with filters - Java # + data = dataset.loc[dataset['gh_lang'] == 'java'] #filtra a linguagem do projeto data = data.loc[data.gh_pushed_at.dt.year == 2016] #filtra o ano do projeto data['gh_pushed_at'] = pd.to_datetime(data.gh_pushed_at).dt.date #deixa a coluna apenas com as datas data['gh_pushed_at'] = data['gh_pushed_at'].sort_values(ascending=False)# ordena da data mais atual para mais antiga "uso um dataset auxiliar para ciar uma lista de projetos que não serão usados" dataset_aux = data for i in range(len(list_name_cover)): dataset_aux = dataset_aux.loc[dataset_aux['gh_project_name'] != list_name_cover[i]] #cria uma lista de projetos para remover list_remove = dataset_aux.gh_project_name.unique().tolist() # remove os projetos da lista criada for i in range(len(list_remove)): data = data.loc[data['gh_project_name'] != list_remove[i]] # - # ### Coverralls dataset with filters - Ruby # + File = 'data/projetos_ruby_coverall_filtrados_2016.csv' df = pd.read_csv(File, sep=',', engine='c') df['gh_pushed_at'] = pd.to_datetime(df.gh_pushed_at).dt.date #le a coluna com apenas os dias df['gh_pushed_at'] = df['gh_pushed_at'].sort_values(ascending=False) #ordena da data mais atual para mais antiga #df = df.drop_duplicates(['gh_project_name']) #remove duplicados list_name_cover = df.gh_project_name.unique().tolist() # lista de projetos do dataset # - # ### Travis dataset with filters - Ruby # + data = dataset.loc[dataset['gh_lang'] == 'ruby'] #filtra a linguagem do projeto data = data.loc[data.gh_pushed_at.dt.year == 2016] #filtra o ano do projeto data['gh_pushed_at'] = pd.to_datetime(data.gh_pushed_at).dt.date #deixa a coluna apenas com as datas data['gh_pushed_at'] = data['gh_pushed_at'].sort_values(ascending=False)# ordena da data mais atual para mais antiga "uso um dataset auxiliar para ciar uma lista de projetos que não serão usados" dataset_aux = data for i in range(len(list_name_cover)): dataset_aux = dataset_aux.loc[dataset_aux['gh_project_name'] != list_name_cover[i]] #cria uma lista de projetos para remover list_remove = dataset_aux.gh_project_name.unique().tolist() # remove os projetos da lista criada for i in range(len(list_remove)): data = data.loc[data['gh_project_name'] != list_remove[i]] # - # ### Boxplot # + """ Boxplot. The code below has the purpose of visualizing code coverage between Java proje- cts and Ruby projects using a boxplot. """ # Information about code coverage ([90.12, 80.00]) java = [] ruby = [] data = [java, ruby] fig, ax = plt.subplots() ax.set_axisbelow(True) ax.boxplot(data, 0, showmeans=True, meanline=True, labels=['Java', 'Ruby']) ax.set_title('Code Coverage', fontsize=10) ax.set_xlabel('Languages', fontsize=10) ax.set_ylabel('Distribution', fontsize=10) plt.savefig('code_coverage.eps', format='eps', dpi = 300, bbox_inches='tight') plt.show() # - # ### Comparing # + """ Comparing. The code below has the purpose of filter data between Travis CI and Coveralls """ # Loading csvfiles with data with open('java_coveralls.csv') as csvfile: spamreader = csv.reader(csvfile, delimiter=',', quotechar='|') cover = list(spamreader) with open('java_travis.csv') as csvfile: spamreader = csv.reader(csvfile, delimiter=',', quotechar='|') travis = list(spamreader) dicio = {} for i in travis: for j in cover: if i[0] == j[0]: # Comparing project names # String to datetime data1 = datetime.datetime.strptime(i[1], '%Y-%m-%d') data2 = datetime.datetime.strptime(j[3][0:10], '%Y-%m-%d') # Taking the difference of days quantidade_dias = (data1 - data2).days # Putting into a dict if 0 <= quantidade_dias <= 7: if j[0] not in dicio: dicio[j[0]] = [j[2], j[3][0:10]] # Export data into a csv file for i in dicio: project_name = i coverage = dicio[i][0] push_date = dicio[i][1] with open('output.csv', 'a') as csvfile: writer = csv.writer(csvfile) writer.writerow([project_name, push_date, coverage]) # - # ### Get coveralls data # + """ Get coveralls data. This code has the purpose of obtaining, through Coveralls, coverage data of the projects that were obtained in Travis Torrent. """ projects = [] with open("output_ruby_filtrado.csv", "r") as csvfile: """ loading csv fle with project names and put into a list, removing useless characters. """ for line in csvfile: projects.append(re.sub('"|,|\n', '', line)) for project in projects: """ Getting total pages """ r = requests.get('https://coveralls.io/github/'+project+'.json?page=1') data = r.json() pages = math.ceil(data['total']/5) + 1 for i in range(1, pages): """ Making a get request in Coveralls and comparing branch and year informa tion and put the coverage information in a csv file. """ r = requests.get('https://coveralls.io/github/'+project+'.json?page='+str(i)) data = r.json() with open('output.csv', 'a') as outputfile: writer = csv.writer(outputfile) for j in range(0, len(data['builds'])): if (data['builds'][j]['branch'] == 'master' and data['builds'][j]['created_at'][0:4] == '2016'): writer.writerow([project, data['builds'][j]['branch'], data['builds'][j]['covered_percent'], data['builds'][j]['created_at']]) # - # ### Calculate some statistic util informations # + """ Code used by calculate some statistic util informations """ from numpy import percentile from numpy.random import rand data = [] read_fl = pd.read_csv('file.csv') data_min, data_max = read_fl.min(), read_fl.max() # calculating quartiles quartiles = percentile(read_fl, [25, 50, 75]) # calculating min and max data_min, data_max = read_fl.min(), read_fl.max() # printing print('Min: %.3f' % data_min) print('Q1: %.3f' % quartiles[0]) print('Median: %.3f' % quartiles[1]) print('Q3: %.3f' % quartiles[2]) print('Max: %.3f' % data_max) # - # # RQ3 # ### Filters the Java language and returns a list with the name of the projects df= dataset.loc[dataset['gh_lang'] == 'java'] list_name = df.gh_project_name.unique().tolist() # ### Creates 2 filters for passing and non-passing builds # + passed = df.loc[df['tr_status'] == 'passed'] failed = df.loc[df['tr_status'] == 'failed'] # print('Projetos Java') # print('Total de builds que passaram {}'.format(passed.size)) # print('Total de builds que NÃO passaram {}'.format(failed.size)) # - # ### Creates a list with the amount builds passed and failed passed = [] failed = [] for i in range(len(list_name)): project = df.loc[df['gh_project_name'] == list_name[i]] _passed = project.loc[project['tr_status'] == 'passed'] _failed = project.loc[project['tr_status'] == 'failed'] passed.append(_passed.size) failed.append(_failed.size) # ### Status Build def status_build(project_name): #filtra o dataset pelo nome do projeto e ordena as datas do dataset df = dataset.loc[dataset['gh_project_name'] == project_name] df = df.sort_values(by='gh_pushed_at', ascending=False)#ordena da data mais atual para mais antiga df['gh_pushed_at'] = pd.to_datetime(df.gh_pushed_at).dt.date#le a coluna com apenas os dias date_line = df.gh_pushed_at # cria uma Serie com as datas status_build = df.tr_status.tolist() # cria uma lista com os status da build ''' cont = 0 for i in range(len(status_build)): if('failed' == status_build[i]): print(f'failed: {i} | time: {cont} | {time[i]}') cont += 1 elif('passed' == status_build[i]): print(f'passed: {i} | time: {cont} | {time[i]}') ''' build_red = [] sett = 0 for i in range(len(status_build)): if("failed" == status_build[i]): if(sett == 0): index_fail = i sett = 1 if(('passed' == status_build[i]) and (sett == 1)): #print(f"{project_name} | build: fail:{index_fail} passed:{i} | linha: {i}: days: {date_line.iloc[i]} - {date_line.iloc[index_fail]} | day: {date_line.iloc[index_fail] - date_line.iloc[i]}") build_red.append(date_line.iloc[index_fail] - date_line.iloc[i]) # subtrai o dia que a build nao passou ate o dia que passa sett = 0 return(build_red) # retorna uma lista com datas de quando deu fail ate pasar # ### Status Build for all projects # + list_name = dataset.gh_project_name.unique().tolist() #print(f"quantidade total de projetos {len(list_name)}") list_days_broken = [] for i in range(len(list_name)): list_days_broken.append(status_build(list_name[i])) days_broken = [] for j in range(len(list_days_broken)): days_broken = days_broken + list_days_broken[j] days = pd.DataFrame(days_broken, columns=['date']) # cria uma #print(days.date.dt.days.describe()) count = 0 metric = days.date.dt.days.describe()[6] # 3 quartil #percorre o vetor quando ele encontra um que é equivalente ao if da break e vai para o proximo vetor que no caso seria outro projeto for k in range(len(list_days_broken)): validate = 0 for j in range(len(list_days_broken[k])): if(metric <= list_days_broken[k][j].days): count+=1 break #se uma metrica está sendo contada mais de uma ves quando ele encontar deve passar pra proxima lista e não fica na mesma #print(f"Quantidade de projetos com builds quebradas é de {count} tendo uma duração de {metric} dias") # - # ### Status build for JAVA # + ''' Filtro a linguagem java, utilizo o def status_build e recebo um lista de dias que a build ficou quebrada, adiciono todos elementos em um lista e torno ela em uma Serie utilizo o describe para descrever a media e os quartils pego o 3 quartil como metrica de builds com mais duração em quebra percorro o vetor com as listas de dias quebrados e verifico quais são iguais ou maior a metrica definida e somo em um contador para cada projeto encontrado ''' def days_briken(language, ) df = dataset.loc[dataset['gh_lang'] == 'java'] list_name = df.gh_project_name.unique().tolist() print(f"quantidade de projetos java {len(list_name)}") list_days_broken = [] for i in range(len(list_name)): list_days_broken.append(status_build(list_name[i])) days_broken = [] for j in range(len(list_days_broken)): days_broken = days_broken + list_days_broken[j] days_java = pd.DataFrame(days_broken, columns=['date']) # cria uma print(days_java.date.dt.days.describe()) metric = days_java.date.dt.days.describe()[6] # 3 quartil count = 0 metric = days_java.date.dt.days.describe()[6] # 3 quartil #percorre o vetor quando ele encontra um que é equivalente ao if da break e vai para o proximo vetor que no caso seria outro projeto for k in range(len(list_days_broken)): for j in range(len(list_days_broken[k])): if(metric <= list_days_broken[k][j].days): count+=1 break #uma metrica está sendo contada mais de uma ves quando ele encontar deve passar pra proxima lista e não fica na mesma #print(f"Quantidade de projetos java com builds quebradas é de {count} tendo uma duração de {metric} dias") # - # ### Status build for RUBY # + ''' I filter the java language, I use the def status_build and I get a list of days that the build was broken, I add all the elements in a list and I use it to describe the media and the quartiles I get the 3 quartile as the metric of builds with more duration in break I run the vector with the lists of broken days and check which are equal or greater the defined metrica and somo in a counter for each project found ''' df = dataset.loc[dataset['gh_lang'] == 'ruby'] list_name = df.gh_project_name.unique().tolist() #print(f"quantidade de projetos java {len(list_name)}") list_days_broken = [] for i in range(len(list_name)): list_days_broken.append(status_build(list_name[i])) days_broken_ruby = [] for j in range(len(list_days_broken)): days_broken_ruby = days_broken_ruby + list_days_broken[j] days_ruby = pd.DataFrame(days_broken_ruby, columns=['date']) # cria uma #print(days_ruby.date.dt.days.describe()) metric = days_ruby.date.dt.days.describe()[6] # 3 quartil count = 0 metric = days_ruby.date.dt.days.describe()[6] # 3 quartil #percorre o vetor quando ele encontra um que é equivalente ao if da break e vai para o proximo vetor que no caso seria outro projeto for k in range(len(list_days_broken)): for j in range(len(list_days_broken[k])): if(metric <= list_days_broken[k][j].days): count+=1 break #uma metrica está sendo contada mais de uma ves quando ele encontar deve passar pra proxima lista e não fica na mesma #print(f"Quantidade de projetos ruby com builds quebradas é de {count} tendo uma duração de {metric} dias") # - # ### Number of projects def quantidade_projetos(language, sett): small, medium, large, very_small= [], [], [], [] df = dataset.sort_values(['gh_sloc'], ascending=False) df = df.loc[df['gh_lang'] == language] list_name_project = df.gh_project_name.unique().tolist() if(sett == 1): for name in list_name_project: df0 = df.loc[df['gh_project_name'] == name] if(1000 > df0.gh_sloc[0:1].item()): very_small.append(name) return(very_small) if(sett == 2): for name in list_name_project: df1 = df.loc[df['gh_project_name'] == name] if((1000 < df1.gh_sloc[0:1].item()) and (10000 > df1.gh_sloc[0:1].item())): small.append(name) return(small) if(sett == 3): for name in list_name_project: df2 = df.loc[df['gh_project_name'] == name] if((10000 < df2.gh_sloc[0:1].item()) and (100000 > df2.gh_sloc[0:1].item())): medium.append(name) return(medium) if(sett == 4): for name in list_name_project: df3 = df.loc[df['gh_project_name'] == name] if(100000 < df3.gh_sloc[0:1].item()): large.append(name) return(large) # ### Number of projects with broken build # + def days_broken(list_name): print(f"quantidade de projetos {len(list_name)}") list_days_broken = [] for i in range(len(list_name)): #cria uma matriz de dias quebrados list_days_broken.append(status_build(list_name[i])) days_broken = [] for j in range(len(list_days_broken)): #transforma a matriz numa lista days_broken = days_broken + list_days_broken[j] df_broken = pd.DataFrame(days_broken, columns=['date']) # cria uma print(df_broken.date.dt.days.describe()) metric = df_broken.date.dt.days.describe()[6] # 3 quartil count = 0 metric = df_broken.date.dt.days.describe()[6] # 3 quartil #percorre o vetor quando ele encontra um que é equivalente ao if da break e vai para o proximo vetor que no caso seria outro projeto for k in range(len(list_days_broken)): for j in range(len(list_days_broken[k])): if(metric <= list_days_broken[k][j].days): count+=1 break #uma metrica está sendo contada mais de uma ves quando ele encontar deve passar pra proxima lista e não fica na mesma #print(f"Quantidade de projetos com builds quebradas é de {count} tendo uma duração de {metric} dias") return(df_broken.date.dt.days) # - # ### Displays build broken by language # + list_very_small = quantidade_projetos('java', 1) very_small = days_broken(list_very_small) list_small = quantidade_projetos('java', 2) small = days_broken(list_small) list_medium = quantidade_projetos('java', 3) medium = days_broken(list_medium) list_large = quantidade_projetos('java', 4) large = days_broken(list_large) # - # # RQ4 # ### Filter dataset df = dataset df = df.dropna(subset=['tr_log_buildduration']) df = df.sort_values(by='tr_log_buildduration', ascending=False) # ### Amount of build less than 10 minutes and more than 10 minutes # + list_time_build = df.tr_log_buildduration.tolist() build_10 = 0 build_long = 0 for i in list_time_build: if(i <= 600): build_10+=1 else: build_long+=1 # print(f'Build com 10 min ou menos: {build_10}') # print(f'Build com mais de 10 min: {build_long}') # - # ### Duration build for projetc Very small, small, medium, large and very large # + def size_pro(sett): if(sett == 1): #abaixo de 1000 linhas df1 = dataset.loc[(dataset['gh_sloc'] < 1000)] return(df1) if(sett == 2): #acima de 1000 e menos que 10.000 df2 = dataset.loc[(dataset['gh_sloc'] < 10000)] df2 = df2.loc[(df2['gh_sloc'] > 1000) ] return(df2) if(sett == 3): #acima de 10.000 linhas e menos que 100.000 df3 = dataset.loc[(dataset['gh_sloc'] < 100000)] df3 = df3.loc[(df3['gh_sloc'] > 10000) ] return(df3) if(sett == 4): #acima de 100.000 linhas df4 = dataset.loc[dataset['gh_sloc'] > 100000] df4 = df4.loc[(df4['gh_sloc'] < 1000000)] return(df4) if(sett == 5): df5 = dataset.loc[dataset['gh_sloc'] > 1000000] return(df5) very_small = size_pro(1) small = size_pro(2) medium = size_pro(3) large = size_pro(4) very_large = size_pro(5) # print(f'\nProjetos very small: \n{very_small.tr_log_buildduration.describe()} \nmediana: {very_small.tr_log_buildduration.median()}') # print(f'\nProjetos small: \n{small.tr_log_buildduration.describe()} \nmediana: {small.tr_log_buildduration.median()}') # print(f'\nProjetos medium: \n{medium.tr_log_buildduration.describe()} \nmediana: {medium.tr_log_buildduration.median()}') # print(f'\nProjetos large: \n{large.tr_log_buildduration.describe()} \nmediana: {large.tr_log_buildduration.median()}' ) # print(f'\nProjetos very large: \n{very_large.tr_log_buildduration.describe()} \nmediana: {very_large.tr_log_buildduration.median()}') # - # ### Project size by language type def size_pro_lang(language,sett): if(sett == 1): #abaixo de 1000 linhas df1 = dataset.loc[(dataset['gh_sloc'] < 1000) & (dataset['gh_lang'] == language)] return(df1) if(sett == 2): #acima de 1000 e menos que 10.000 df2 = dataset.loc[(dataset['gh_sloc'] < 10000)] df2 = df2.loc[(df2['gh_sloc'] > 1000) & (df2['gh_lang'] == language)] return(df2) if(sett == 3): #acima de 10.000 linhas e menos que 100.000 df3 = dataset.loc[(dataset['gh_sloc'] < 100000)] df3 = df3.loc[(df3['gh_sloc'] > 10000) & (df3['gh_lang'] == language)] return(df3) if(sett == 4): #acima de 100.000 linhas df4 = dataset.loc[dataset['gh_sloc'] > 100000] df4 = df4.loc[(df4['gh_sloc'] < 1000000) & (df4['gh_lang'] == language)] return(df4) if(sett == 5): df5 = df.loc[df['gh_sloc'] > 1000000] return() # ### Filter language Java df_java = df.loc[df['gh_lang'] == 'java'] # df_java.tr_log_buildduration.describe() # ### Java build quantity less than 10 minutes and more than 10 minutes # + list_time_build = df_java.tr_log_buildduration.tolist() build_10 = 0 build_long = 0 for i in list_time_build: if(i <= 600): build_10+=1 else: build_long+=1 # print(f'Build com 10 min ou menos: {build_10}') # print(f'Build com mais de 10 min: {build_long}') # - # ### Projects java, Very small, small, medium, large and very large # + very_small = size_pro_lang('java', 1) small = size_pro_lang('java', 2) medium = size_pro_lang('java', 3) large = size_pro_lang('java', 4) very_large = size_pro_lang('java', 5) # print(f'\nProjetos very small: \n{very_small.tr_log_buildduration.describe()} \nmediana: {very_small.tr_log_buildduration.median()}') # print(f'\nProjetos small: \n{small.tr_log_buildduration.describe()} \nmediana: {small.tr_log_buildduration.median()}') # print(f'\nProjetos medium: \n{medium.tr_log_buildduration.describe()} \nmediana: {medium.tr_log_buildduration.median()}') # print(f'\nProjetos large: \n{large.tr_log_buildduration.describe()} \nmediana: {large.tr_log_buildduration.median()}' ) # print(f'\nProjetos very large: \n{very_large.tr_log_buildduration.describe()} \nmediana: {very_large.tr_log_buildduration.median()}') # - # ### Filter language Ruby df_ruby = df.loc[df['gh_lang'] == 'ruby'] #df_ruby.tr_log_buildduration.describe() # ### Ruby build quantity less than 10 minutes and more than 10 minutes # + list_time_build = df_ruby.tr_log_buildduration.tolist() build_10 = 0 build_long = 0 for i in list_time_build: if(i <= 600): build_10+=1 else: build_long+=1 # print(f'Build com 10 min ou menos: {build_10}') # print(f'Build com mais de 10 min: {build_long}') # - # ### Projects ruby Very small, small, medium, large e very large # + very_small = size_pro_lang('ruby', 1) small = size_pro_lang('ruby', 2) medium = size_pro_lang('ruby', 3) large = size_pro_lang('ruby', 4) very_large = size_pro_lang('ruby', 5) print(f'\nProjetos very small: \n{very_small.tr_log_buildduration.describe()} \nmediana: {very_small.tr_log_buildduration.median()}') print(f'\nProjetos small: \n{small.tr_log_buildduration.describe()} \nmediana: {small.tr_log_buildduration.median()}') print(f'\nProjetos medium: \n{medium.tr_log_buildduration.describe()} \nmediana: {medium.tr_log_buildduration.median()}') print(f'\nProjetos large: \n{large.tr_log_buildduration.describe()} \nmediana: {large.tr_log_buildduration.median()}' ) print(f'\nProjetos very large: \n{very_large.tr_log_buildduration.describe()} \nmediana: {very_large.tr_log_buildduration.median()}')
src/project.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + import matplotlib.pyplot as plt import numpy as np # %matplotlib inline fig = plt.figure() x = [1, 2, 3, 4, 5, 6, 7] y = [1, 3, 4, 2, 5, 8, 6] #below are all percentage left, bottom, width, height = 0.1, 0.1, 0.8, 0.8 ax1 = fig.add_axes([left, bottom, width, height]) # main axes ax1.plot(x, y, 'r') ax1.set_xlabel('x') ax1.set_ylabel('y') ax1.set_title('title') ax2 = fig.add_axes([0.2, 0.6, 0.25, 0.25]) # inside axes ax2.plot(y, x, 'b') ax2.set_xlabel('x') ax2.set_ylabel('y') ax2.set_title('title inside 1') plt.axes([0.6, 0.2, 0.25, 0.25]) plt.plot(y[::-1], x, 'g') plt.xlabel('x') plt.ylabel('y') plt.title('title inside 2')
pra_matplotlib/plot_14_plot_inplot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sys import os sys.path.insert(0, '/home/jindal/notebooks/jindal/NER') import fastText import numpy as np from validation import compute_f1 from keras.models import Model from keras.layers import TimeDistributed,Conv1D,Dense,Embedding,Input,Dropout,LSTM,Bidirectional,MaxPooling1D,Flatten,concatenate from prepro import readfile,createBatches,createMatrices,iterate_minibatches,addCharInformatioin,padding from keras.utils import plot_model,Progbar from keras.preprocessing.sequence import pad_sequences from keras.initializers import RandomUniform from sklearn.metrics import precision_recall_fscore_support as score import sklearn import pickle, threading from keras.utils import to_categorical import linecache from keras.callbacks import Callback epochs = 50 trainable=True # + sys.version_info[0] # - class WeightsSaver(Callback): def __init__(self, model, N): self.model = model self.N = N self.batch = 0 def on_batch_end(self, batch, logs={}): if self.batch % self.N == 0: name = 'german_lm.h5' # print("model saved %s" %self.batch) self.model.save_weights(name) self.batch += 1 # + ft = fastText.load_model("/home/jindal/notebooks/fastText/wiki.de.bin") nb_embedding_dims = ft.get_dimension() # + def createMatrices(sentences, word2Idx, case2Idx, char2Idx): #{'numeric': 0, 'allLower': 1, 'contains_digit': 6, 'PADDING_TOKEN': 7, 'other': 4, 'allUpper': 2, 'mainly_numeric': 5, 'initialUpper': 3} unknownIdx = word2Idx['UNKNOWN_TOKEN'] paddingIdx = word2Idx['PADDING_TOKEN'] dataset = [] wordCount = 0 unknownWordCount = 0 for sentence in sentences: wordIndices = [] caseIndices = [] charIndices = [] # labelIndices = [] for word,char in sentence: wordCount += 1 if word in word2Idx: wordIdx = word2Idx[word] elif word.lower() in word2Idx: wordIdx = word2Idx[word.lower()] else: wordIdx = unknownIdx unknownWordCount += 1 charIdx = [] for x in char: charIdx.append(char2Idx[x]) #Get the label and map to int wordIndices.append(wordIdx) caseIndices.append(getCasing(word, case2Idx)) charIndices.append(charIdx) # labelIndices.append(label2Idx[label]) dataset.append([wordIndices,caseIndices, charIndices]) return dataset def getCasing(word, caseLookup): casing = 'other' numDigits = 0 for char in word: if char.isdigit(): numDigits += 1 digitFraction = numDigits / float(len(word)) if word.isdigit(): #Is a digit casing = 'numeric' elif digitFraction > 0.5: casing = 'mainly_numeric' elif word.islower(): #All lower case casing = 'allLower' elif word.isupper(): #All upper case casing = 'allUpper' elif word[0].isupper(): #is a title, initial char upper, then all lower casing = 'initialUpper' elif numDigits > 0: casing = 'contains_digit' return caseLookup[casing] def padding(Sentences): maxlen = 52 for sentence in Sentences: char = sentence[2] for x in char: maxlen = max(maxlen,len(x)) for i,sentence in enumerate(Sentences): Sentences[i][2] = pad_sequences(Sentences[i][2],52,padding='post') return Sentences # + def doc_load(filename): f = open(filename,'rb') text = f.read().decode() f.close() return text def save_doc(lines, filename): data = '\n'.join(lines) f = open(filename,'wb') f.write(data.encode()) f.close() # - text = doc_load('/home/jindal/notebooks/wikipedia/de-wiki_tokenized.txt') print(text[:200]) window_size =51 def generate_sequences(out_file, in_file ) -> None: # x = 0 output = open(out_file,'wb') with open(in_file,'rb') as f: for line in f: # if x==1000: #To generate toy dataset # break # x+=1 text = line.decode().split() for i in range(len(text)): if i+1 >= window_size: temp = text[i-window_size+1:i+1] else: temp = ['0' for i in range(window_size - (i +1))] + text[:i+1] string = ' '.join(temp)+' \n' # if len(string.split())!=51: # print(len(string.split())) # print(text) output.write(string.encode()) output.close() generate_sequences('wiki_dataset_sequences','/home/jindal/notebooks/wikipedia/de-wiki_tokenized.txt') # assert with open('toy_dataset_sequences_shuffled') as f: for line in f: assert len(line.split()) == 51 # :: Hard coded case lookup :: case2Idx = {'numeric': 0, 'allLower':1, 'allUpper':2, 'initialUpper':3, 'other':4, 'mainly_numeric':5, 'contains_digit': 6, 'PADDING_TOKEN':7} caseEmbeddings = np.identity(len(case2Idx), dtype='float32') print(caseEmbeddings.shape) # + characters={} with open ('/home/jindal/notebooks/jindal/NER/language_model/toy_dataset_sequences_shuffled') as f: for line in f: for word in line: word =str(word) for char in word: # print(char) characters[char]=True # - print(characters) char2Idx={} for char in characters: char2Idx[char] = len(char2Idx) # print(char2Idx) # + toy_vocab ={} with open('toy_dataset_sequences') as f: for line in f: # print(line) line = line.split() # print(len(line)) for word in line: # word = word # print(word) toy_vocab[word]=True # - print(toy_vocab) # + line = linecache.getline('/home/jindal/notebooks/jindal/NER/language_model/toy_dataset_sequences', 17) print(line) # - word2Idx={} for word in toy_vocab.keys(): word2Idx[word] = len(word2Idx) char2Idx['0'] print(word2Idx) line_number=0 # lock = threading.Lock() # + def my_generator(file :"input training file", batch_size): global line_number, lock while True: word_embeddings = [] case_embeddings = [] char_embeddings = [] # batch_features = np.zeros((batch_size, nb_sequence_length, nb_embedding_dims)) # initializing features with zeros output_labels = [] # print(len(features)) for i in range(batch_size): # lock.acquire() index = line_number%20869 +1 # print(index) line_number+=1 # lock.release() # index = random.choice(len(features), 1)[0] # print(index) line = linecache.getline(file, index) line = line.split() # print(line) temp_casing = [] temp_char=[] temp_word=[] # print(line) if len(line)!=51: print(index) continue for word in line[:-1]: # print(word) casing =getCasing(word, case2Idx) # print(casing) temp_casing.append(casing) temp_char2=[] for char in word: temp_char2.append(char2Idx[char]) temp_char2 = np.array(temp_char2) # print(temp_char2) # temp_char2 = pad_sequences(temp_char2, 52, padding='post') # temp_char.append(pad_sequences(temp_char2, 52, padding='post')) temp_char.append(temp_char2) word_vector = ft.get_word_vector(word.lower()) temp_word.append(word_vector) temp_char = pad_sequences(temp_char, 52) # print(temp_word) # print(len(temp_word)) # print(temp_casing) # print(temp_char) # print(len(temp_char)) # print(" **************** ") word_embeddings.append(temp_word) case_embeddings.append(temp_casing) char_embeddings.append(temp_char) output_labels.append(word2Idx[line[-1]]) # batch_features[i] = process_features(line, window_size-1, nb_embedding_dims) # print(batch_features[i]) # print(batch_features[i].shape) # batch_labels[i] = labels[index] yield ([np.array(word_embeddings), np.array(case_embeddings), np.array(char_embeddings)], np.array(output_labels)) # - for inp, output in my_generator('toy_dataset_sequences_shuffled',10): continue vocab_size = len(word2Idx.keys())+1 print(vocab_size) # %env CUDA_DEVICE_ORDER=PCI_BUS_ID # %env CUDA_VISIBLE_DEVICES=2 words_input = Input(shape=(None, 300), dtype='float32',name='words_input') # words = Embedding(input_dim =50, output_dim=300, trainable=False)(words_input) casing_input = Input(shape=(None,), dtype='int32', name='casing_input') casing = Embedding(output_dim=caseEmbeddings.shape[1], input_dim=caseEmbeddings.shape[0], weights=[caseEmbeddings], trainable=False)(casing_input) character_input=Input(shape=(None,52,),name='char_input') embed_char_out=TimeDistributed(Embedding(len(char2Idx),30,embeddings_initializer=RandomUniform(minval=-0.5, maxval=0.5)), name='char_embedding')(character_input) dropout= Dropout(0.5, name='dropout1')(embed_char_out) conv1d_out= TimeDistributed(Conv1D(kernel_size=3, filters=30, padding='same',activation='tanh', strides=1, name='conv'))(dropout) maxpool_out=TimeDistributed(MaxPooling1D(52), name='maxpool')(conv1d_out) char = TimeDistributed(Flatten())(maxpool_out) char = Dropout(0.5)(char) output = concatenate([words_input, char]) output = Bidirectional(LSTM(200, return_sequences=False, dropout=0.50, recurrent_dropout=0.5))(output) # output = TimeDistributed(Dense(vocab_size))(output) # crf = CRF(len(label2Idx)) # output = crf(output) # output = Flatten()(output) output = Dense(vocab_size, activation='softmax')(output) # output = Flatten()(output) # output = Dense(vocab_size, activation='softmax')(output) # model.add(Dense(vocab_size, activation='softmax')) model = Model(inputs=[words_input,casing_input, character_input], outputs=[output]) model.compile(loss='sparse_categorical_crossentropy', optimizer='nadam', metrics=['accuracy']) model.summary() # plot_model(model, to_file='model.png') # + 20700//32 # - line_number=0 try: model.fit_generator(my_generator('toy_dataset_sequences_shuffled', 32), epochs=1, steps_per_epoch=20869//32, callbacks=[WeightsSaver(model, 200)]) except Exception as e: model.save('german_lm.h5') model.save('german_lm.h5') words_input = Input(shape=(None, 300), dtype='float32',name='words_input') # words = Embedding(input_dim =50, output_dim=300, trainable=False)(words_input) casing_input = Input(shape=(None,), dtype='int32', name='casing_input') casing = Embedding(output_dim=caseEmbeddings.shape[1], input_dim=caseEmbeddings.shape[0], weights=[caseEmbeddings], trainable=False)(casing_input) character_input=Input(shape=(None,52,),name='char_input') embed_char_out=TimeDistributed(Embedding(len(char2Idx),30,embeddings_initializer=RandomUniform(minval=-0.5, maxval=0.5)), name='char_embedding')(character_input) dropout= Dropout(0.5, name='dropout1')(embed_char_out) conv1d_out= TimeDistributed(Conv1D(kernel_size=3, filters=30, padding='same',activation='tanh', strides=1, name='conv'))(dropout) maxpool_out=TimeDistributed(MaxPooling1D(52), name='maxpool')(conv1d_out) char = TimeDistributed(Flatten())(maxpool_out) char = Dropout(0.5)(char) output = concatenate([words_input, char]) output = Bidirectional(LSTM(200, return_sequences=False, dropout=0.50, recurrent_dropout=0.5))(output) # output = TimeDistributed(Dense(vocab_size))(output) # crf = CRF(len(label2Idx)) # output = crf(output) # output = Flatten()(output) output = Dense(vocab_size, activation='softmax')(output) # output = Flatten()(output) # output = Dense(vocab_size, activation='softmax')(output) # model.add(Dense(vocab_size, activation='softmax')) model_new = Model(inputs=[words_input,casing_input, character_input], outputs=[output]) # model.compile(loss='sparse_categorical_crossentropy', optimizer='nadam', metrics=['accuracy']) # model.summary() model_new.load_weights('german_lm.h5') # # Train on Twitter
language_model/language_model_training_wiki.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Global TF Kernel (Python 3) # language: python # name: global-tf-python-3 # --- def f1(precision,recall): if precision == 0.0 and recall == 0.0: return 0.0 else: return 2 * float(precision)* float(recall) / ( precision+recall ) f1(0.31,0.76) f1(0.64,0.38) f1(0.39,0.16) f1(0.22,0.18) f1(0.11,0.39) f1(1,1/2) # ## AP and MAP import numpy as np lst = [] lst.append(np.array([1,2,3])) lst.append(np.array([4,4,4])) matrix = np.vstack(lst) matrix matrix.sum(axis=1) arr = np.array([1,2,3]).reshape(1,-1) np.sum(arr,axis=1) np.argsort(arr,axis=1) np.array([1,1]).shape original_y_true = np.array([[0.,1.],[1.,1.]]);original_y_true original_y_true.shape original_y_score = np.array([[0.,0.4],[0.9,0.1]]);original_y_score score_indices_top_k = np.array([[0,0],[0,0]]);score_indices_top_k row_indices_to_select = [i for i in range(original_y_true.shape[0])];row_indices_to_select column_indices_to_select = score_indices_top_k.T out = original_y_true[row_indices_to_select,column_indices_to_select];out out.shape arr1 = np.array([[0.5]]) arr2 = np.array([[1.0]]) lst = [] lst.append(arr1) lst.append(arr2) np.vstack(lst)
python3/notebooks/random/evaluation-metrics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Regex in Python # **Inhalt:** Text nach bestimmten Mustern durchsuchen # # **Nötige Skills:** Erste Schritte mit Pandas # # **Lernziele:** # - Syntax für Regular Expressions kennenlernen # - Anwendungsmöglichkeiten für Regex # # **Ressourcen:** # - Simons Cheatsheet: https://github.com/MAZ-CAS-DDJ/kurs_19_20/blob/master/00%20weitere%C2%A0Dokumente/hilfsmaterial/regex.md # - Offizielle Dokumentation: https://docs.python.org/3/library/re.html # - Ein Online-Regex-Tester: https://pythex.org/ # - Ein weiterer Online-Texter: https://regex101.com/ # - Ein Cheat-Sheet: https://www.dataquest.io/blog/large_files/python-regular-expressions-cheat-sheet.pdf # - Ein weiteres Cheat-Sheet: https://www.shortcutfoo.com/app/dojos/python-regex/cheatsheet # - Ein Online Traningskurs: https://www.shortcutfoo.com/app/dojos/python-regex/learn # ## Worum es geht # Regular Expressions sind eine super-sophistizierte Form von Such-Wildcards. Wir kennen solche Wildcards aus zB aus Windows-Explorer-Suche: Man benutzt Spezialcharaktere wie Sternchen `(*)` um der Suchmaschine anzuzeigen: Hier könnten verschiedene Buchstaben stehen. Regular Expressions dienen also zur Durchsuchung von Texten nach bestimmten, vordefinierten Mustern. Wir können Regex im Datenjournalismus brauchen, um zB Texte nach Emails oder Postleitzahlen zu durchsuchen, oder um Daten zu säubern und zu formatieren. # # Was für Muster gibt es? Wie komplex kann die Suche werden? Wir testen dies gleich selbst aus. # - Diese Seite hier öffnen: https://regex101.com/ (in einem neuen Fenster) # - Dieses Cheat-Sheet hier öffnen: https://github.com/MAZ-CAS-DDJ/kurs_19_20/blob/master/00%20weitere%C2%A0Dokumente/hilfsmaterial/regex.md # - Den untenstehenden Text in die Zwischenablage kopieren: text = ''' D'w. Nuss vo Bümpliz geit dür d'Strass liecht u flüchtig, wie nes Gas so unerreichbar höch <NAME> schüüch u brav wie Schaf schön föhnfrisiert chöme tubetänzig nöch U d'Spargle wachse i bluetjung Morge d'Sunne chunnt 's wird langsam warm Sie het meh als hundert ching u jede Früehlig git 's es nöis het d'Chiuchefänschterouge off u macht se zue bi jedem Kuss u we sie lachet wärde Bärge zu schtoub u jedes zäihe Läderhärz wird weich D'w. Nuss vo Bümpliz isch schön win es Füür i dr Nacht win e Rose im Schnee we se gseh duss in Bümpliz de schlat mir mis Härz hert i Hals u i gseh win i ungergah Si wohnt im ne Huus us Glas hinger Türe ohni Schloss gseht dür jedi Muur dänkt wi nes Füürwärch win e Zuckerstock läbt win e Wasserfau für si git's nüt, wo's nid git u aus wo's git, git's nid für ging si nimmt's wi's chunnt u lat's la gah D'w. Nuss vo Bümpliz isch schön win es Füür i dr Nacht win e Rose im Schnee we se gseh duss in Bümpliz de schlat mir mis Härz hert i Hals u i gseh win i ungergah ''' # ## Regular Expressions in Python # # Regex sind in den meisten Programmiersprachen ähnlich aufgebaut. Die Python-Library dazu heisst `re` import re # ## Funktionen # In dieser Bibliothek gibt es fünf Funktionen, die wir benutzen können. Grundsätzlich geht es immer darum: anhand von einer Regex und einem String sollen ein oder mehrere Treffer erzielt werden. Die Funktionen machen daraufhin unterschiedliche Dinge mit dem Ergebnis # # **`match()`** und **`search()`**: Diese Funktionen sagen uns, ob *ein Treffer* erzielt wurde und an welchem Ort er sich befindet. # # **`findall()`**: Diese Funktion erstellt eine Liste von allen Treffern. # # **`split()`**: Splittet einen langen String in eine Liste von Substrings, und zwar an den Orten, wo ein Treffer erzielt wurde. # # **`sub()`**: Dort, wo ein Treffer erzielt wurde, wir der Suchstring durch einen anderen Text erstetzt. # ### Markierungszeichen # # Um einen Regex-Suchausdruck zu benutzen, empfiehlt es sich, ein "r" vor den String zu stellen. suchausdruck = r"n.ss" # ### search() # Durchsucht den ganzen String, liefert das erste Ergebnis. resultat = re.search(r"n.ss", text, re.IGNORECASE) # Das Ergebnis ist eine Art ja/nein-Antwort mit einigen Details resultat # Es handelt sich um ein so genanntes Match-Objekt: https://docs.python.org/3/library/re.html#match-objects # # Dieses Objekt hat einige Eigenschaften, die wir abfragen können: resultat.group() #Der gefundene String resultat.start() #die Startposition des gefundenen Strings resultat.end() #die Endposition des gefundenen Strings # Die Regex-Funktionen nehmen eine Reihe von so genannten **Flags** an: # - re.IGNORECASE = Gross-/Kleinschreibung ignorieren # - re.MULTILINE = `^` und `$` schlagen bei Zeilenumbrüchen an # - ... siehe auch: https://docs.python.org/3/library/re.html#module-contents # Zum Beispiel können wir so Zeilen suchen, die mit dem Wort "Bockstössigi" anfangen re.search(r"^Bockstössigi", text, re.MULTILINE) # ### match() # Durchsucht nur den *Anfang* des Strings. Am besten gleich wieder vergessen. re.match("a", "abcdef") #gibt ein Ergebnis re.match("b", "abcdef") #gibt kein Ergebnis # ### findall() # Durchsucht den ganzen String, liefert eine Liste aller Treffer. words = re.findall(r"\b[iu]\b", text, re.IGNORECASE) #Alle Wörter, die nur aus "i" oder "u" bestehen words # Mit der Liste kann man alles machen, was man mit Listen halt so machen kann. len(words) # ### split() # Splittet den Text überall dort, wo es einen Treffer gab, liefert das Ergebnis als Liste. Der Treffer selbst wird herausgeschnitten. newlist = re.split(r"\b[iu]\b", text) #Wir splitten überall, wo i- und u-Wörter stehen for line in newlist: print(line) # ### sub() # Ersetzt den Treffer durch einen anderen String. # + neuer_text = re.sub(r"\bi\b", "çu", text) #Ersetzt alle i durch çu neuer_text = re.sub(r"\bI\b", "çU", neuer_text) #Grossbuchstaben separat neuer_text = re.sub(r"\bu\b", "i", neuer_text) #Ersetzt alle u durch i neuer_text = re.sub(r"\bU\b", "I", neuer_text) #Grossbuchstaben separat neuer_text = re.sub(r"\bçu\b", "u", neuer_text) #Ersetzt alle çu durch i neuer_text = re.sub(r"\bçU\b", "U", neuer_text) #Grossbuchstaben separat print(neuer_text) # - # Wir können `sub()` auch mit einer Funktion benutzen: re.findall(r"\b\w*ü\w\b", text) # Zum Testen: Alle Wörter mit einem ü drin def replace(match): # Diese Funktion wollen wir drauf anwenden (wir kriegen das Resultat als Match-Objekt geliefert) word = match.group() + " - oh, yeah! - " return word neuer_text = re.sub(r"\b\w*ü\w\b", replace, text) #Hier rufen wir unsere replace-Funktion auf print (neuer_text) # ## Spezielles # ### Capture-Klammern # Die Klammern dienen dazu, nur gewisse Teile einer Regex einzufangen: re.findall(r"\b\w*ss\w*\b ", text) #Hier suchen wir zuerst mal nur alle Wörter, die zwei ss drin haben re.findall(r"\b\w*ss\w*\b \w+", text) #Nun suchen wir alle Wörter, die zwei ss drin haben plus das nächste Wort re.findall(r"\b\w*ss\w*\b (\w+)", text) #Jetzt wollen wir nur das nächste Wort einfangen re.findall(r"(\b\w*ss\w*\b) (\w+)", text) #Jetzt fangen wir die beiden Wörter separat ein # ### Lookahead / Lookbehind # Eine Sonder-Funktionalität: Zeichen, die gefolgt werden von anderen Zeichen (die Klammern sind nicht zu verwechseln mit den Capture-Klammern. re.findall(r"w.(?= Nuss)", text) #Lookahead re.findall(r"(?<=w. )Nuss", text) #Lookbehind # ## Lexikon # # Hier nochmals eine (nicht ganz abschliessende) Liste der Spezialzeichen. # # | repetitions | what it does | # |--------|---------| # | `*` | match 0 or more repetitions | # | `+` | match 1 or more repetitions | # | `?` | match 0 or 1 repetitions | # | `{m}` | m specifies the number of repetitions | # | `{m,n}` | m and n specifies a range of repetitions | # | `{m,}` | m specifies the minimum number of repetitions | # # | Shortcut | what it does | # |--------|---------| # | `.` | Match any character except newline | # | `\w` | letters | # | `\W` | not letters | # | `\d` | numbers [0-9] | # | `\D` | not numbers | # | `\s` | whitespace characters: space, tab... | # | `\S` | not space | # | `\b` | Word boundary: spaces, commas, end of line | # | `\B` | Not a word-boundary | # | `^` | match the beginning of string | # | `$` | match the end of string, including `\n` | # # | enclosures | what it does | # |--------|---------| # | `[]` | A defined **set** of characters to search for | # | `()` | A group of characters to search for, can be accessed in the results. | # # | Examples of sets | what it does | # |--------|---------| # | `[aeiou]` | Find any vowel | # | `[Tt]` | Find a lowercase or uppercase t | # | `[0-9]` | Find any number (there is a shortcut for this) | # | `[^0-9]` | Find anything that's not number (there is a shortcut for this) | # | `[13579]` | Find any odd numer | # | `[A-Za-z]` | Find any letter (there is a shortcut for this too) | # | `[+.*]` | Find those actual characters (special characters are canceled in sets) | # # | Lookahead/behind | what it does | # |--------|---------| # | `A(?=B)` | Find A if followed by B | # | `A(?!B)` | Find A if not followed by B | # | `(?<=B)A` | Find A if preceded by B | # | `(?<!B)A` | Find A if not preceded by B | # | `(A)\1` | Backreferencing content of group 1 | # # Übungen # Wir arbeiten nach wie vor mit dem Patent-Ochsner-Song in der Variable `text`. # # Es gibt drei Schwierigkeitsgrade: easy, advanced, pro. # # Manchmal müssen Sie regex-Ausdrücke verwenden, manchmal ganz einfach Python-Funktionen verwenden, zB für Listen. # # Googeln ist erlaubt!! # ## Easy # Finde alle b's im Text (Liste erstellen) re.findall(r"b", text) # Finde alle Wörter, die mit b beginnen, unabhängig von Gross-/Kleinschreibung re.findall(r"\bb\w*", text, re.IGNORECASE) # Finde alle Wörter, die ein b enthalten, unabhängig von Gross-/Kleinschreibung re.findall(r"\w*b\w*", text, re.IGNORECASE) # Erstelle eine Liste aller Zeilen im Text re.findall(r"^.+$", text, re.MULTILINE) # eine Liste aller Wörter, die mit Grossbuchstaben beginnen re.findall(r"\b[A-Z]\w*", text) # Eine Liste aller Wörter, die mehr als 8 Buchstaben haben re.findall(r"\b\w{8,}\b", text) # Eine Liste aller Wörte, die einen Doppelvokal enthalten (z.B. "geit") re.findall(r"\w*[aeiouäöü]{2}\w*", text) # ## Advanced # Welches Wort kommt im Text öfter vor: "w. Nuss" oder "Bümpliz"? n_wnuss = len(re.findall(r"w. Nuss", text)) n_buempliz = len(re.findall(r"Bümpliz", text)) if n_wnuss > n_buempliz: print("w. Nuss") elif n_wnuss == n_buempliz: print("gleich oft") else: print("Bümpliz") # An welcher Position (Zeichen-Nr) steht das Wort "Zuckerstock"? re.search(r"Zuckerstock", text).start() # Sortieren Sie die Liedzeilen nach der Länge der Zeile lines = sorted(re.findall(r"^.+$", text, re.MULTILINE),key=len, reverse=True) lines # Welches ist die längste Liedzeile? max(lines, key=len) # Ersetzen Sie "v. Nuss" durch "Venus" print(re.sub(r"w\. Nuss", "Venus", text)) # Entfernen Sie alle Wörter, die weniger als 3 Buchstaben lang sind, aus dem Text print(re.sub(r"\b\w{1,2}\b", "", text)) # Entfernen Sie alle Wörter, die weniger als 3 Buchstaben lang sind, sowie alle Sonderzeichen aus dem Text print(re.sub(r"\b\w{1,2}\b|[.,']", "", text)) # + # Entfernen Sie alle Wörter, die weniger als 4 Buchstaben lang sind, sowie alle Sonderzeichen aus dem Text # Dann reduzieren Sie alle doppelten und dreifachen Leerschläge auf einen Leerschlag (die Strophen intakt lassen) # Dann entfernen Sie alle Leerschläge am Anfang von Zeilen. (Achtung, hier braucht es flags=re.MULTILINE) newtext = re.sub(r"\b\w{1,3}\b|[.',]", "", text) newtext = re.sub(r" {2,}", " ", newtext) newtext = re.sub(r"^ ", "", newtext, flags=re.MULTILINE) print(newtext) # - # Entfernen Sie den letzten Buchstaben aus jedem Wort print(re.sub(r"(\w)\b", "", text)) # ## Pro # Konvertieren Sie alles zu Kleinbuchstaben # Dann erstellen Sie eine Liste aller Wörter im Text # Dann sortieren Sie die Liste alphabetisch - jedes Wort soll nur einmal vorkommen woerter = re.findall(r"\b\w+\b", text.lower()) set(sorted(woerter)) # Wie viele unterschiedliche Wörter kommen im Text vor? set(sorted(woerter)) # Welcher Buchstabe steht am häufigsten vor einem "ä" (Gross/Kleinschreibung egal)? buchstaben = re.findall(r"\w(?=ä)", text.lower()) max(buchstaben, key=buchstaben.count) # Liste aller Buchstaben, die nochmals vom selben Buchstaben gefolgt werden re.findall(r"(\w)\1", text) # + # Ersetzen Sie sämtliche Doppelbuchstaben (zB "aa") durch einfache Buchstaben ("a) # Achtung: Sie müssen eine separate (Lambda-)Funktion dafür schreiben print(re.sub(r"(\w)\1", lambda m: m.group()[0], text)) # - # ### Super-Pro 1 # Wenn wir mehr als eine Gruppe bilden, können wir die einzelnen Gruppen mit `group(1)`, `group(2)` etc. abrufen. match = re.search(r"(\w)(\w+)(\w)", text) #Findet ein Wort, das mindestens 4 Buchstaben hat, fängt 3 Gruppen ein match.group() #Der ganze gematchte Inhalt match.group(1) #Nur der Inhalt der ersten Unterruppe match.group(3) #Nur der Inhalt der dritten Untergruppe # Vertauschen Sie in sämtlichen Wörtern mit mindestens 3 Buchstaben den ersten und letzten Buchstaben # Achtung: Sie müssen eine separate (Lambda-)Funktion dafür schreiben def replace(match): return match.group(3) + match.group(2) + match.group(1) print(re.sub(r"(\w)(\w+)(\w)", replace, text)) # ### Super-Pro 2 # Wirbeln Sie die Wörter, die auf einer Zeile stehen, durcheinander. (Die Zeilen-Reihenfolge bleibt aber intakt) # Bsp: # w dür D Bümpliz d Nuss Strass vo geit # flüchtig liecht u wie Gas nes ... #Tipp 1: Hier ist eine Shuffle-Funktion from random import shuffle my_list = ["a", "b", "c", "d", "e"] shuffle(my_list) my_list # Tipp 2: List comprehension (massiv) benutzen [element.upper() for element in my_list] # Tipp 3: .join() benutzen " ".join(my_list) #Braucht ca 3-5 Zeilen Code... lines = re.findall(r"^.*$", text, re.MULTILINE) word_lines = [re.findall(r"\b\w+\b", line) for line in lines] [shuffle(word_line) for word_line in word_lines] lines = [" ".join(word_line) for word_line in word_lines] print("\n".join(lines))
12 Pandas Teil 4/Regex in Python L.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # **Fourier Transforms and Plane-Wave Expansions** # # **Authors:** <NAME>, <NAME>, <NAME> and <NAME> # # <i class="fa fa-home fa-2x"></i><a href="../index.ipynb" style="font-size: 20px"> Go back to index</a> # # **Source code:** https://github.com/osscar-org/quantum-mechanics/blob/master/notebook/band-theory/FFT_and_planewaves.ipynb # # This notebook shows interactively how discrete Fourier series can represent a function with a limited amount of plane-wave components. A common way to represent a wavefunction when solving the Kohn-Sham equations is via its expansion in plane waves. # This notebook focuses on a simple example (much simpler than a complete DFT calculation) in order to help the reader focus on the essential aspects of such a representation. # # <hr style="height:1px;border:none;color:#cccccc;background-color:#cccccc;" /> # ## **Goals** # # * Understand how a plane-wave basis is directly related to a Fourier series. # * Learn how to decompose a function using a FFT algorithm. # * Examine how a function is reconstructed from a finite (possibly not complete) set of plane waves. # * Understand the impact of the basis-set size on the convergence of the integral of the reconstructed function. # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ## **Background theory** # # [More on the background theory.](./theory/theory_fft.ipynb) # - # ## **Tasks and exercises** # # <ol> # <li>Prove that plane waves form an orthogonal basis set. # <details> # <summary style="color: red">Solution</summary> # # We have to prove that $\langle w_N^j, w_N^k \rangle =\langle w_N^k, w_N^j \rangle= 0 $ for integer $j \neq k$. We can simply carry out the inner product # $$ \langle w_N^j, w_N^k \rangle = \langle w_N^k, w_N^j \rangle = \int_{-\pi}^{\pi} e^{ijx} e^{-ikx}dx = \int_{-\pi}^{\pi} e^{i(j-k)x} dx = \frac 1 {i(j-k)} [e^{i(j-k)x}]_{-\pi}^{\pi} = \begin{cases} 0 & \text{if j $\neq$ k} \\ 2\pi & \text{if j = k}\end{cases}$$ # </details></li> # # <li>How does the number of plane waves affect the approximation of the target function? Will a function with more "oscillations" require more components to be accurately represented? # <details> # <summary style="color: red">Solution</summary> # # Move the slider to try different numbers of Fourier components. Observe if the FFT interpolation approximates well the original function and if the integral of the square modulus is close to the convergence value. You can also change the objective function by the drop-down menu. Generally, more sampling yields more accurate representation. # For functions with more oscillations (higher frequency components), more Fourier components are needed to reach the same level of accuracy. # </details></li> # # <li>How can we reduce the number of plane waves needed in a DFT calculation, without sacrificing the accuracy of the representation? # <details> # <summary style="color: red">Solution</summary> # # Wavefunctions have the strongest oscillations near nucleus, and a very large number of plane waves is needed to accurately represent this region. Fortunately, core electrons are less relevant in chemical bonding, so we can simplify the problem and obtain a much smoother (pseudo)wavefunction by excluding the core electrons. To learn more about this approach, please check our <a href="./pseudopotential.ipynb">notebook on pseudopotentials</a>. In general, the combination of pseudopotentials and a plane-wave expansion enables fast and accurate calculation of materials and their properties. # </details></li> # # <li>In a DFT calculation, how can we control the number of plane waves used in the basis set? # <details> # <summary style="color: red">Solution</summary> # # The kinetic energy of a plane wave of momentum $\mathbf G$ is given by $\frac {\hbar^2}{2m} \lvert \mathbf G \rvert^2$. By setting a cutoff energy, we can limit the size of the plane-wave basis set. The value of the cutoff depends on the system under investigation and the pseudopotential used, and convergence tests are normally required. To have a suggestion of a converged cutoff value based on the choice of pseudopotentials, you can check the <a href="https://www.materialscloud.org/discover/sssp/table/precision">standard solid-state pseudopotentials (SSSP) library</a> on Materials Cloud. # </details></li> # </ol> # <hr style="height:1px;border:none;color:#cccccc;background-color:#cccccc;" /> # # ## Interactive visualization # (be patient, it might take a few seconds to load) import ipywidgets as ipw import numpy as np from matplotlib.ticker import MaxNLocator import matplotlib.pyplot as plt import scipy.fft as fft # %matplotlib widget plt.rcParams['figure.autolayout'] = 'True' # turn on tight layout globally # + # target functions def periodic_f(x): # smooth return np.exp(-((x-1)/0.15)**2) + 0.5 * np.exp(-((x-1.2)/0.1)**2) + 0.8 * np.exp(-((x-0.8)/0.1)**2) def periodic_f2(x): # less smooth return np.exp(-((x-1)/0.05)**2) -0.5*np.exp(-((x-1)/0.15)**2) + 0.5 * np.exp(-((x-1.2)/0.1)**2) + 0.8 * np.exp(-((x-0.8)/0.1)**2) # + # plot x range x = np.linspace(0, 2, 201, endpoint=False) x_range = 2 # widgets N_slider = ipw.IntSlider(description=r"$N_{\text{fft}}$", min=6, max=40, value=6, step=1, continuous_update=False, layout={'margin':'0px 15px 0px 15px'}) func_dropdown=ipw.Dropdown(description="Function", options=[("Smooth", "periodic_f"), ("Less smooth", "periodic_f2")], layout={ 'margin':'0px 15px 0px 15px'}) reset_button = ipw.Button(description='Show all', icon='redo', style={'description_width': 'initial'}, layout={'width':'220px', 'margin':'0px 20px 0px 60px'}) hl_label = ipw.Label(value='(click on a FFT component to select it)') # + def compute_resampled(N_fft, x_range=2., function=periodic_f): """ Compute FFT series with given number of sampling and target functions. """ # Pick an even number to have zero x_fft = np.linspace(0, x_range, N_fft+1, endpoint=False)# remove last point as it's the same as the first one by PBC y_fft = function(x_fft) # Fourier resampling renormalization = len(x)/(len(y_fft)) y_resamp = fft.irfft(fft.rfft(y_fft), len(x)) * renormalization return x_fft, y_fft, y_resamp def get_integral_resampled(N_fft, x_range=2., function=periodic_f): """ Compute the integral of the square modulus of the function. """ x_fft, y_fft, _ = compute_resampled(N_fft, x_range, function=function) return (y_fft**2).sum() * (x_fft[1] - x_fft[0]) def plot_reconstruct(y_fft): """ Plot Fourier expansions """ ax2.clear() coeffs = fft.rfft(y_fft) N_rfft = 0 # number of fft expansions for coeff, freq_int in list(zip(coeffs, range(len(coeffs)))): freq = 2 * np.pi * freq_int / x_range norm = 1 / (len(y_fft)) * 2 if freq_int == 0: # The zero-frequency does not have a factor 2 because it's not a cosine # summing the two complex conjugates, but just a constant norm /= 2 this_frequency_contrib = ( coeff.real * np.cos(freq * x) - coeff.imag * np.sin(freq * x) ) * norm ax2.plot(x, this_frequency_contrib + N_rfft) # plot components with vertical shift for visibility # ax2.plot(x, this_frequency_contrib) # no shift N_rfft += 1 ax2.axes.yaxis.set_ticks([]) # remove y ticks ax2.set_title('Expansion Components') CONVERGE_SMOOTH = get_integral_resampled(N_fft=200, function=periodic_f) CONVERGE_ROUGH = get_integral_resampled(N_fft=200, function=periodic_f2) def plot_integral(func_name, func): """ plot sum of the square modulus (integral) """ ax3.clear() converged_integral = CONVERGE_SMOOTH if func_name == "periodic_f" else CONVERGE_ROUGH ax3.axhline(converged_integral, color='tab:red') integrals = [] for N in range(6, 41): integrals.append((N, get_integral_resampled(N, function=func))) integrals_x, integrals_y = np.array(integrals).T ax3.plot(integrals_x, integrals_y, 'o--', alpha=0.8) ax3.plot(integrals_x[N_fft-6], integrals_y[N_fft-6],'ro', markersize=11, label='current sampling') ax3.set_xlabel('number of components') ax3.set_ylabel("Integral of square modulus") ax3.set_title("Convergence of FFT") ax3.set_xlim(6,40) ax3.xaxis.set_major_locator(MaxNLocator(integer=True)) ax3.legend(loc='best') def plot_sampling(func, x_fft, y_fft, y_resamp): ax1.clear() ax1.set_title('FFT interpolation') x_fft, y_fft, y_resamp = compute_resampled(N_slider.value, function=func) ax1.plot(x, func(x), 'k-', label='target') ax1.plot(x_fft, y_fft, 'o', label='sampling') ax1.fill_between(x, y_resamp, 0,ec='red', fc='yellow', label='FFT') ax1.legend(loc='best') ax1.set_ylim(-0.35,1.25) def on_plot_click(event): """handle mouse click event on expansion component plot""" # line = event.artist # xdata = line.get_xdata() # ydata = line.get_ydata() if event.inaxes != ax2: return for i in range(len(ax2.lines)): ax2.lines[i].set_alpha(0.1) ax2.lines[i].set_linewidth(1.1) # get the id of the line2D object which is vertically closest to the mouse clicking position id_line = min(enumerate(ax2.lines), key= lambda line: abs(np.mean(line[1].get_ydata())-event.ydata))[0] ax2.lines[id_line].set_alpha(1) ax2.lines[id_line].set_linewidth(2.0) plot_sampling(func, x_fft, y_fft, y_resamp) ax1.fill_between(ax2.lines[id_line].get_xdata(), ax2.lines[id_line].get_ydata()-id_line, 0, ec='tab:blue', fc='tab:green', alpha=0.5,label='component') ax1.legend() def plot_update(change): # get current widget value global N_fft, x_fft, y_fft, y_resamp, func N_fft = N_slider.value func = globals()[func_dropdown.value] # get the function by function name x_fft, y_fft, y_resamp = compute_resampled(N_fft, function=func) # update sampling plot plot_sampling(func, x_fft, y_fft, y_resamp) # update reconstruct plot plot_reconstruct(y_fft) # udpate square modulus plot plot_integral(func_dropdown.value, func) N_slider.observe(plot_update, names='value', type='change') func_dropdown.observe(plot_update, names='value', type='change') reset_button.on_click(plot_update) # + # define layout by gridspec fig = plt.figure(constrained_layout=True, figsize=(7, 6)) gs = fig.add_gridspec(3,4) ax1 = fig.add_subplot(gs[0:2,0:2]) ax2 = fig.add_subplot(gs[0:2,2:4]) ax3 = fig.add_subplot(gs[-1,:]) # interactive plot 2 for line picking cid = fig.canvas.mpl_connect('button_press_event', on_plot_click) # show plots plot_update(None) plt.show() # - # display widgets display(ipw.HBox([N_slider, func_dropdown])) display(ipw.HBox([reset_button, hl_label])) # ## Legend # # The target function, sampling points and the reconstructed function are shown in the top left plot. The real part (cosine functions) and the constant term of the discrete Fourier series are shown in the top right panel. # # Note that the components are shifted vertically for clarity. The integral of the square of the functions reconstructed from truncated Fourier series with different numbers of plane waves $N_{\text{fft}}$ is shown in the bottom panel, where the current choice of sampling is indicated with a red dot. The converged value is also shown with a red horizontal line, obtained with a large number (200) of FFT components. # # The number of FFT components $N_{\text{fft}}$ can be set by the slider. Two target functions can be chosen from the drop-down menu. # # By clicking one of the expansions in the top-right panel, the contribution of that component will be shown in the top left panel. Click the reset button to display all expansion coefficients again.
notebook/band-theory/FFT_and_planewaves.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline # %config InlineBackend.figure_format = "retina" from matplotlib import rcParams rcParams["savefig.dpi"] = 100 rcParams["figure.dpi"] = 100 import numpy as np import matplotlib.pyplot as plt import tensorflow as tf session = tf.InteractiveSession() from exoplanet import transit from batman import _quadratic_ld # + T = tf.float32 c1 = tf.constant(0.5, dtype=T) c2 = tf.constant(0.5, dtype=T) ld = transit.QuadraticLimbDarkening(c1, c2) nums = 2**np.arange(10, 22) nums = 2**np.arange(19, 22) times = [] bm_times = [] for N in nums: r_ref = tf.constant(0.1 + np.zeros(N), dtype=T) z_ref = (1 + r_ref) * tf.constant(np.linspace(0.0, 1.0, N), dtype=T) delta = transit.transit_depth(ld, z_ref, r_ref, n_integrate=500) session.run(delta) # res = %timeit -o session.run(delta) times.append(res.best) args = [z_ref.eval(), 0.1, ld.c1.eval(), ld.c2.eval(), 1] # res = %timeit -o _quadratic_ld._quadratic_ld(*args) bm_times.append(res.best) print(N, times[-1], bm_times[-1]) # - plt.loglog(nums, times, ".-") plt.loglog(nums, bm_times, ".-") plt.loglog(nums, times, ".-") plt.loglog(nums, bm_times, ".-")
paper/notebooks/benchmark.ipynb