code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + from math import pi, pow from math import factorial as f # General python stuff assert 2 == 3-1 result = 0 for i in range(10): result += 1 assert result == 10 a = pi * (12.34/2)**2 assert a == 119.59697657024448 x = 3 y = 3 - (pow(x, 3)/f(3)) + (pow(x, 5)/f(5)) - (pow(x, 7)/f(7)) + (pow(x, 9)/f(9)) - (pow(x, 11)/f(11)) + (pow(x, 13)/f(13)) assert y == 0.14113062718531458
python-notebook/tests/notebooks/python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Global Imports # %matplotlib inline import matplotlib.pyplot as plt from matplotlib.pyplot import subplots # ### External Package Imports import os as os import pickle as pickle import pandas as pd # ### Module Imports # Here I am using a few of my own packages, they are availible on Github under [__theandygross__](https://github.com/theandygross) and should all be instalable by <code>python setup.py</code>. # + from Stats.Scipy import * from Stats.Survival import * from Helpers.Pandas import * from Helpers.LinAlg import * from Figures.FigureHelpers import * from Figures.Pandas import * from Figures.Boxplots import * from Figures.Regression import * #from Figures.Survival import draw_survival_curve, survival_and_stats #from Figures.Survival import draw_survival_curves #from Figures.Survival import survival_stat_plot # - import Data.Firehose as FH from Data.Containers import get_run # ### Import Global Parameters # * These need to be changed before you will be able to sucessfully run this code import NotebookImport from Global_Parameters import * # ### Tweaking Display Parameters pd.set_option('precision', 3) pd.set_option('display.width', 300) plt.rcParams['font.size'] = 12 '''Color schemes for paper taken from http://colorbrewer2.org/''' colors = plt.rcParams['axes.color_cycle'] colors_st = ['#CA0020', '#F4A582', '#92C5DE', '#0571B0'] colors_th = ['#E66101', '#FDB863', '#B2ABD2', '#5E3C99'] import seaborn as sns sns.set_context('paper',font_scale=1.5) sns.set_style('white') # ### Read in All of the Expression Data # This reads in data that was pre-processed in the [./Preprocessing/init_RNA](../Notebooks/init_RNA.ipynb) notebook. codes = pd.read_hdf(RNA_SUBREAD_STORE, 'codes') matched_tn = pd.read_hdf(RNA_SUBREAD_STORE, 'matched_tn') rna_df = pd.read_hdf(RNA_SUBREAD_STORE, 'all_rna') data_portal = pd.read_hdf(RNA_STORE, 'matched_tn') genes = data_portal.index.intersection(matched_tn.index) pts = data_portal.columns.intersection(matched_tn.columns) rna_df = rna_df.ix[genes] matched_tn = matched_tn.ix[genes, pts] # ### Read in Gene-Sets for GSEA # + from Data.Annotations import unstack_geneset_csv gene_sets = unstack_geneset_csv(GENE_SETS) gene_sets = gene_sets.ix[rna_df.index].fillna(0) # - # Initialize function for calling model-based gene set enrichment # + from rpy2 import robjects from rpy2.robjects import pandas2ri pandas2ri.activate() mgsa = robjects.packages.importr('mgsa') # - gs_r = robjects.ListVector({i: robjects.StrVector(list(ti(g>0))) for i,g in gene_sets.iteritems()}) def run_mgsa(vec): v = robjects.r.c(*ti(vec)) r = mgsa.mgsa(v, gs_r) res = pandas2ri.ri2pandas(mgsa.setsResults(r)) return res # ### Function Tweaks # Running the binomial test across 450k probes in the same test space, we rerun the same test a lot. Here I memoize the function to cache results and not recompute them. This eats up a couple GB of memory but should be reasonable. # + from scipy.stats import binom_test def memoize(f): memo = {} def helper(x,y,z): if (x,y,z) not in memo: memo[(x,y,z)] = f(x,y,z) return memo[(x,y,z)] return helper binom_test_mem = memoize(binom_test) def binomial_test_screen(df, fc=1.5, p=.5): """ Run a binomial test on a DataFrame. df: DataFrame of measurements. Should have a multi-index with subjects on the first level and tissue type ('01' or '11') on the second level. fc: Fold-chance cutoff to use """ a, b = df.xs('01', 1, 1), df.xs('11', 1, 1) dx = a - b dx = dx[dx.abs() > np.log2(fc)] n = dx.count(1) counts = (dx > 0).sum(1) cn = pd.concat([counts, n], 1) cn = cn[cn.sum(1) > 0] b_test = cn.apply(lambda s: binom_test_mem(s[0], s[1], p), axis=1) dist = (1.*cn[0] / cn[1]) tab = pd.concat([cn[0], cn[1], dist, b_test], keys=['num_ox', 'num_dx', 'frac', 'p'], axis=1) return tab # - # Added linewidth and number of bins arguments. This should get pushed eventually. def draw_dist(vec, split=None, ax=None, legend=True, colors=None, lw=2, bins=300): """ Draw a smooth distribution from data with an optional splitting factor. """ _, ax = init_ax(ax) if split is None: split = pd.Series('s', index=vec.index) colors = {'s': colors} if colors is not None else None for l,v in vec.groupby(split): if colors is None: smooth_dist(v, bins=bins).plot(label=l, lw=lw, ax=ax) else: smooth_dist(v, bins=bins).plot(label=l, lw=lw, ax=ax, color=colors[l]) if legend and len(split.unique()) > 1: ax.legend(loc='upper left', frameon=False) # Some helper functions for fast calculation of odds ratios on matricies. # + def odds_ratio_df(a,b): a = a.astype(int) b = b.astype(int) flip = lambda v: (v == 0).astype(int) a11 = (a.add(b) == 2).sum(axis=1) a10 = (a.add(flip(b)) == 2).sum(axis=1) a01 = (flip(a).add(b) == 2).sum(axis=1) a00 = (flip(a).add(flip(b)) == 2).sum(axis=1) odds_ratio = (1.*a11 * a00) / (1.*a10 * a01) df = pd.concat([a00, a01, a10, a11], axis=1, keys=['00','01','10','11']) return odds_ratio, df def fet(s): odds, p = stats.fisher_exact([[s['00'],s['01']], [s['10'],s['11']]]) return p # - # #### filter_pathway_hits def filter_pathway_hits(hits, gs, cutoff=.00001): ''' Takes a vector of p-values and a DataFrame of binary defined gene-sets. Uses the ordering defined by hits to do a greedy filtering on the gene sets. ''' l = [hits.index[0]] for gg in hits.index: flag = 0 for g2 in l: if gg in l: flag = 1 break elif (chi2_cont_test(gs[gg], gs[g2])['p'] < cutoff): flag = 1 break if flag == 0: l.append(gg) hits_filtered = hits.ix[l] return hits_filtered
Notebooks/Imports.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # In this notebook , we will train a neural network model that generates embeddings for URLs , based on a contrastive learning framework. The idea is to minimize the inner product between URLs having the same label and increasing the distance between URLs having different labels. # # First we import librairies. # + id="GigSpKQ5DDqV" import pandas as pd import os import numpy as np import re import torch import random import torch.nn as nn from sklearn.metrics import accuracy_score device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') pd.set_option("display.max_colwidth",None) # + id="zAYM5P3YDPeb" data_path = '/content/drive/MyDrive/challenges/adot/data' path_to_stopwords = '/content/drive/MyDrive/challenges/adot/stop_words_french.txt' # - # Next we load the data. # + id="9z6DNwjiDJNJ" df_data = pd.DataFrame(columns=['url' , 'target' , 'day']) for filename in os.listdir(data_path) : if filename.endswith('.parquet') : df_data = pd.concat([df_data , pd.read_parquet(os.path.join(data_path , filename), engine='pyarrow')] , ignore_index=True) # + [markdown] id="K0xIQ0icEe3A" # # Dataset preparation # - # Here the steps for preprocessing are the same for the fasttext embedding model. We add another step which is the elimination of rows where the label is unique in the dataset, because in the contrastive learning framework we need pairs that share the same label, so each label must be present at least in two rows. # + [markdown] id="irND8_ORGdkf" # ## Labels cleaning # + id="dVMK2H65sOgT" from collections import defaultdict dict_occ = defaultdict(int) for label_list in df_data['target'] : for label in label_list.astype('int64') : dict_occ[label]+=1 single_labels = [k for k,v in dict_occ.items() if v < 2 ] # + id="eGQH7HkxGfWe" def get_mapping_target(df , single_labels) : """ Returns a dict where the keys are old targets and the values are new targets that have the property being successive. """ full_labels = [] for index , row in df.iterrows() : labels = [label for label in row['target'].astype('int64') if label not in single_labels] full_labels += labels full_labels = np.array(full_labels) dict_mapping = dict(zip(np.unique(full_labels) , range(len(np.unique(full_labels))))) return dict_mapping dict_mapping = get_mapping_target(df_data,single_labels) # + [markdown] id="FKk5z1o_IalU" # Now we create a new column in df_data where for each row we get its new targets based on dict_mapping. # + id="W68C_p-HIU8u" def get_new_target(target , dict_mapping) : labels = target.astype('int64') new_label = [dict_mapping[label] for label in labels if label not in single_labels] return new_label df_data['labels'] = df_data['target'].apply(lambda x : get_new_target(x , dict_mapping)) # + colab={"base_uri": "https://localhost:8080/"} id="e47sDrlEem01" outputId="f3ec9b47-02e5-4017-e78d-ce06a71117af" n_classes = len(dict_mapping) print('the number of classes is :', n_classes) # + [markdown] id="wcuyoAb3KVxv" # ## URL preprocessing # + id="AvHgu2EpQiDg" import tldextract from urllib.parse import urlparse from nltk.stem.snowball import FrenchStemmer stemmer = FrenchStemmer() def removing_condition(token , stopwords) : cond = any(c.isdigit() for c in token) or len(token) <=2 or token in stopwords return not(cond) def parse_url (url , stopwords_list) : domain_name = tldextract.extract(url)[1] full_path = urlparse(url).path first_tokens = re.split('[- _ % : , / \. \+ ]', full_path) tokens = [] for token in first_tokens : tokens+= re.split('\d+' , token) # remove tokens with numbers tokens = [ stemmer.stem(token.lower()) for token in tokens if removing_condition(token.lower() , stopwords_list) ] tokens = [token for token in tokens if removing_condition(token , stopwords_list)] # return unique elements final_sentence = list(dict.fromkeys([domain_name] + tokens)) return " ".join(final_sentence) # + id="2DB-tJoEmwyC" with open (path_to_stopwords , 'r') as f : lines = f.readlines() lines = [l.replace('\n','') for l in lines] stopwords_list = lines + ['search' , 'article' , 'html' , 'htm' , 'about' , 'fr' , 'id' , 'text', 'lid' , 'pgn' , 'pgs' , 'ms' , 'vhc' , 've' , 'cmp' , 'aa' , 'xca' , 'pr' , 'false'] # + id="8xjH92GCM5Zi" df_data['text_url'] = df_data['url'].apply(lambda x : parse_url(x , stopwords_list)) # + [markdown] id="YZEgASCQbtWL" # Next we split to train and test subsets. # + id="gH7iBmZ2b27F" from sklearn.model_selection import train_test_split df_train, df_test = train_test_split( df_data , test_size=0.2 ) df_train.reset_index(inplace = True , drop = True) df_test.reset_index(inplace = True , drop = True) # + [markdown] id="G4A91rrq-tzg" # We also define label to text dict based on the training data. This ensures sampling URLs having the same label in the trainingloader. # + id="5N9aWG3J-vmm" labels_to_text = defaultdict(list) for idx , rows in df_train.iterrows() : for label in rows['labels'] : labels_to_text[label].append(rows['text_url']) labels_to_text = {k:list(set(v)) for k,v in labels_to_text.items() if len(list(set(v)))>1} # + [markdown] id="f5R8WKcFfB5m" # # define torch dataset and dataloader # # + id="oWAdP3xEfLDt" from torch.utils.data import Dataset , DataLoader class CustomDataset(Dataset): def __init__(self , df , labels_to_text ) : """ This class creates a torch dataset. """ self.labels_to_text = labels_to_text self.keys = list(self.labels_to_text.keys()) self.df = df self.n_classes = n_classes def __len__(self): return len(self.labels_to_text) def __getitem__(self, idx): """ sample a pair of URLs sharing the same label. The label corresponds to the rank "idx" in the keys list. """ key = self.keys[idx] anchor , positive = random.sample(self.labels_to_text[key] , k = 2 ) labels_anchor = self.df[self.df.text_url == anchor]['labels'].values[0] labels_anchor = ' '.join([str(tar) for tar in labels_anchor]) labels_pos = self.df[self.df.text_url == positive]['labels'].values[0] labels_pos = ' '.join([str(tar) for tar in labels_pos]) return (anchor , positive , labels_anchor , labels_pos) # - # In the contrastive setting that we will be implementing , we consider an anchor url $a_i$ , a positive url $p_i$ that share the same label with $a_i$ , and negative urls $n_i^1 , .... , n_i^m$ that will have different labels than $a_i$. # For a given URL anchor $a_i$ , the negative urls are the other positives $p_1 , .... , p_n$ except $p_i$ associated with other anchors. This setting is called in-batch setting , and it has the advantage of being fast and simple to implement. However , we need to ensure the negatives associated with $a_i$ do not share labels with it , hence the next collate function. # + id="ixQCQpPmgKT3" def collate_fn(data) : used_anchors = set() used_positives = set() anchors = [] positives = [] for anchor , positive , labels_anchor , labels_pos in data : labels_anchor = set([int(val) for val in labels_anchor.split()]) labels_pos = set([int(val) for val in labels_pos.split()]) inters_pos_anchors = set(labels_pos).intersection(used_anchors) inters_anc_positives = set(labels_anchor).intersection(used_positives) if len(inters_pos_anchors) > 0 or len(inters_anc_positives) > 0 : continue anchors.append(anchor) positives.append(positive) used_anchors = used_anchors.union(labels_anchor) used_positives = used_positives.union(labels_pos) return anchors , positives def get_loader (df_train , labels_to_text , batch_size = 32 ) : train_dataset = CustomDataset(df_train , labels_to_text) trainloader = DataLoader (train_dataset, batch_size=batch_size, collate_fn = collate_fn , shuffle = True ) return trainloader # + [markdown] id="cIqtfQmce_FY" # # define model # + [markdown] id="lFLof13dhaQj" # First we create our vocabulary # + id="1aC97cV-h1-P" def get_vocab(training_data) : word_to_ix = {'pad' : 0 } for sent in training_data: for word in sent.split() : if word not in word_to_ix: # word has not been assigned an index yet word_to_ix[word] = len(word_to_ix) return word_to_ix vocab = get_vocab (df_train['text_url']) # + colab={"base_uri": "https://localhost:8080/"} id="qS44PfV3MFRo" outputId="bf22d3b0-3200-4966-a977-dc9a59a15c43" len(vocab) # + id="0YMR6aqUiv2d" def get_words_indices(sent , vocab , train) : if train : return [vocab[word] for word in sent] else : ids = [] n_oov , total = 0 , 0 for word in sent : total+=1 if word not in vocab.keys() : ids.append(random.choice(list(vocab.values()))) else : ids.append(vocab[word]) return ids def encode (list_sentences , train , vocab) : """ encode a list of sentences to its indices in the vocab. """ max_length = max([len(sent.split()) for sent in list_sentences]) batch_inputs = torch.empty((len(list_sentences) , max_length) , dtype = torch.int64) list_lengths = [] for p,sent in enumerate(list_sentences) : split_sent = list(reversed(sent.split())) encodings_sentence = get_words_indices(split_sent , vocab , train) + [vocab['pad']] * (max_length - len(split_sent)) batch_inputs[p] = torch.tensor(encodings_sentence) list_lengths.append(len(split_sent)) return batch_inputs # + [markdown] id="a1XcrTey1dUg" # # modeling # + id="mQvGRk_Giv7z" class EmbeddingModule(nn.Module): """ pytorch nn module for embedding. """ def __init__(self, embedding_dim, vocab_size , sentence_embed_dim = 100 ): super(EmbeddingModule, self).__init__() self.word_embeddings = nn.Embedding(vocab_size, embedding_dim , padding_idx=0) self.fc = nn.Linear(embedding_dim , sentence_embed_dim) self.tanh = nn.Tanh() def forward(self, inputs_ ): embeds = self.word_embeddings(inputs_) embeds = torch.mean(embeds , dim = 1) embeds = self.tanh(self.fc(embeds)) return embeds # + colab={"base_uri": "https://localhost:8080/"} id="xAIg6JgY4oLh" outputId="5b6ad260-a894-4e9a-d3ab-5559100893b7" embedding_dim = 512 model = EmbeddingModule(embedding_dim=embedding_dim , vocab_size=len(vocab)) model.to(device) # + id="DyS6Tkbt5ATs" learning_rate = 1e-3 optimizer = torch.optim.Adam(model.parameters() , lr=learning_rate ) # + id="QRv8_TF55Lo-" from tqdm.notebook import tqdm def compute_loss(embeddings_anchors , embeddings_pos) : """ compute contrastive loss """ sim_matrix = torch.cdist(embeddings_anchors, embeddings_pos) loss = - torch.log( torch.exp(torch.diag(sim_matrix))/ torch.sum (torch.exp(sim_matrix) , dim = 1)) mean_loss = torch.mean(loss) return mean_loss def train (loader , vocab ) : """ training function """ model.train() total_loss = 0 for batch_idx , batch in tqdm(enumerate(loader) , total = len(loader)) : anchors , positives = batch anchors = encode(anchors , vocab = vocab , train = True ).to(device) positives = encode(positives , vocab = vocab , train = True ).to(device) optimizer.zero_grad() embeddings_anchors = model(anchors) embeddings_positives = model(positives) loss = compute_loss (embeddings_anchors , embeddings_positives) loss.backward() optimizer.step() total_loss += float(loss) print('loss : {} '.format(total_loss / (batch_idx + 1))) # + [markdown] id="vgTLtnUNglUy" # # Main # + id="BDOgP4s3gmqX" trainloader = get_loader(df_train , labels_to_text=labels_to_text, batch_size = 128) # + colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["81f6f95bb07a41e48c5a89c6293c61ca", "86a9068b8b8141699e87a94eec380495", "22008ec4230b4515a33e0f1a8f584d01", "af04f0d599934d34a06976ce095ceb69", "<KEY>", "baca6556bfc8481184557c96efe43e24", "<KEY>", "<KEY>", "<KEY>", "0beb8a1d425442068ecb9f76cef9b65c", "<KEY>", "25dca254cdd34b2e8387d5470aa1aa3c", "02008230016e45778ed1d02b31de45c4", "<KEY>", "<KEY>", "9d2e613072e6470db248000901243e69", "<KEY>", "<KEY>", "0b8667d5e0db466eabb9df58555d0183", "<KEY>", "7efc0853c39e466c82346fb0a28505ca", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "b2217c5d36ce4bd1b9558a343d80e278", "<KEY>", "b8466e0f95ef4c9c9027a62ad5f1043c", "<KEY>", "f22d9548335548b8b563459a202d3d35", "<KEY>", "21e31252e68d45e59d5ae448fed901e1", "<KEY>", "<KEY>", "fbecca60346641bba590b70cdb4330ca", "763ac1713565463f965ca44d12a59133", "<KEY>", "dd265817fcb54c8b93b0288fe1a42ab7", "d0ffad7db74a4e3ba7cbe70c5e68221d", "<KEY>", "<KEY>", "2c0b6e7da89d47c48869cfafbb8ad24a", "<KEY>", "2b58cdad2cce4a31b68e0a01458abe2f", "<KEY>", "b9d9c42233754ab49cf32fded511986a", "<KEY>", "efe9f8cbe5ea460cb97bca73de21d951", "0c0d5333e4ce4f1fbbf870581f5aeea4", "74ec294f12ec4223bce1e29c72e01404", "af3be86e91344adc9ab5750b378c1cde", "92edc13e2ff6448e8a80577a16137f20", "eca2aee30d244ed883af889604cec0ad", "<KEY>", "f0f02090de2b4c17a14a91f8c9a14d15", "d5eb1b10a3d647359080e8637e227f0a", "37192c239f8f4ad189354ab3e1721c31", "<KEY>", "f9e4fca73ecd48e0b3822570eaf7ecbd", "<KEY>", "<KEY>", "70bf7bd6b9e14cc181c4c641a30404da", "<KEY>", "0644d78d994c4191a7d6964a7ad739b9", "<KEY>", "07182c996d764191a29dd994829d782b", "9f3800dad25a4c4bab274db2becfd144", "28aa3965bede458fbe705cb9bb308612", "531a57a985af4043806efce71f916371", "f6ed0137a212413d90eaf3711fb2b495", "acffda802e894aa7ba3db19e7231051d", "538d23d66d1349b6ab7433f21aff1f9b", "48bee3aecdb5484ea1126d28465c89d1", "<KEY>", "9ff8af0c4c4845ce9e6198e5f9eb7ec1", "839edc55d63a49d49a782c56d28d3245", "<KEY>", "36321a0cdc90470ea606070ddc7335ff", "9ff83a9a799f4a3ba4c8f1c2ac5b422e", "<KEY>", "f52b01d9e1d84e18a95d3b388b996aaa", "e7334b01baf64889ad45db5695884299", "e20800d4c6924912951dbe9ffcb101a4", "b6ef5aef769744ef9a2209b5db3367dd", "16285f0fc59049359075925f107ea28d", "5fdb43ed29b240ab8d446ddd4f25dd0e", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "38d65abebed14bf9aa28a9d421abe23f", "73a56bf115314ea89553d9c3e5d82d9d", "066fd6647e554a0f977557232ca75d35", "ac2617de19bb449eac81c2e7508e01aa", "<KEY>", "<KEY>", "<KEY>", "76d047c2db664afcbe0a488c0ad110ff", "07a8dbb046c5471ca7ca4a61270dc0d3", "<KEY>", "6a1225e07f3345348484b957e7b79450", "<KEY>", "a7389088202c4e43b9a7ead6645d6cdb", "<KEY>", "<KEY>", "8f02e6b32eff42b1b3546480b99dae1b", "<KEY>", "a1b3a75d139e45c9aca2893a5431c82c", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "908be80bb96b409f8af72a3d51c29a19", "<KEY>", "<KEY>", "<KEY>", "440918b3b9e6482d936e388af1d28799", "<KEY>", "<KEY>", "<KEY>", "fd332a66522848c1823725e07aabbd9c", "fe60dfa77ff74064a7071e52a7bb689c", "63120f1f61ff47c29dc8e8834c6426c5", "<KEY>", "<KEY>", "1497b84843534e868e0dd28844797e57", "ed62e43ebdb1415abb213ced8ba08bbb", "4f9df83ba8764a9db8813a804243adee", "e5faac095f9941419e0d7f34078297b6", "4dec2156f49e4bebb49a83b642069f75", "4d5315e888b0442686e29babbd79b204", "<KEY>", "<KEY>", "1c014c30e18740feb06f43508f81f8d7", "<KEY>", "d31d9eeac0114d8782af6f2970519d64", "<KEY>", "<KEY>", "fd1bee1248e34551bc35e8748963c361", "f49df6efe59b4583ab13084ee81144c3", "3b3ba78e23da40caa79ef62a8e0a86e3", "<KEY>", "5a85371d73d34735a9ca2c0967ee7ce1", "97e5dd83d41b481880bef99a588fbda8", "fc8a5f04d0614d829b8b5e0d2d71e1ef", "cae6cd55e2e4460fb9a02dc88ea0d391", "2a05897115474cee8e9ba760a14eef10", "<KEY>", "43e2792d89be4f6b882431dd3f1da9e9", "<KEY>", "<KEY>", "d10fa28a90164446a204a08790cafca6", "f00e953406964d3493681b31a8370ee1", "<KEY>", "<KEY>", "0820604fdcdf4492bc60d2b0c5a96af7"]} id="fZ_-NJrtgj_G" jupyter={"outputs_hidden": true} outputId="21f127c0-9980-43af-e785-cdd8e18c32c4" for epoch in range(20) : print('##### training ######') train(loader = trainloader , vocab = vocab) # + id="4D9O_yysPTk2" def get_embeddings(df , vocab , train , batch_size = 128 , size= 100 ) : list_urls = df['text_url'].values full_embeddings = torch.empty((0,size)) n_text = 0 while n_text < len(list_urls) : batch = list_urls[n_text:n_text + batch_size] batch = encode(batch , vocab = vocab , train = train ).to(device) with torch.no_grad() : embeddings = model(batch) full_embeddings = torch.vstack((full_embeddings , embeddings.cpu())) n_text += batch_size return full_embeddings.numpy() X_train = get_embeddings(df = df_train , vocab = vocab , train = True) X_test = get_embeddings(df = df_test , vocab = vocab , train = False) # + [markdown] id="YVFyyu4oPMYz" # # classifier # + id="3IRWrOCsPN66" def extract_one_hots(targets) : one_hot_targets = np.zeros(n_classes) one_hot_targets[targets] = 1 return one_hot_targets def get_one_hot_labels(df) : one_hot_labels = [] for index,row in df.iterrows() : one_hot_labels.append(extract_one_hots(row["labels"]) ) one_hot_labels = np.stack(one_hot_labels , axis = 0 ) return one_hot_labels.astype(np.int) # + id="awXV23pmPQ8l" y_train = get_one_hot_labels(df_train) y_test = get_one_hot_labels(df_test) # + id="8NpDiHbzQv3X" from skmultilearn.adapt import MLARAM classifier = MLARAM(threshold=5 * 1e-5, vigilance=0.95) # train classifier.fit(X_train, y_train) # predict predictions = classifier.predict(X_test) # - print('exact accuracy score is : ', accuracy_score(y_test,predictions)) def get_IoU_score(y_test, predictions): """ give a target list y_test and the predictions of the multilabel classifier , this function returns the IoU score. """ score = 0 for target, pred in zip(y_test, predictions): target_ones = np.where(target == 1)[0] pred_ones = np.where(np.array(pred) == 1)[0] current_score = len( set(target_ones).intersection(set(pred_ones)) ) / len(set(target_ones).union(set(pred_ones))) score += current_score return score / len(y_test) print('IoU score is : ', get_IoU_score(y_test, predictions))
notebooks/contrastive_embeddings.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # DAT210x - Programming with Python for DS # ## Module4- Lab2 # + import math import pandas as pd import matplotlib.pyplot as plt import matplotlib from sklearn import preprocessing # + # Look pretty... # matplotlib.style.use('ggplot') plt.style.use('ggplot') # - # ### Some Boilerplate Code # For your convenience, we've included some boilerplate code here which will help you out. You aren't expected to know how to write this code on your own at this point, but it'll assist with your visualizations. We've added some notes to the code in case you're interested in knowing what it's doing: # ### A Note on SKLearn's `.transform()` calls: # Any time you perform a transformation on your data, you lose the column header names because the output of SciKit-Learn's `.transform()` method is an NDArray and not a daraframe. # # This actually makes a lot of sense because there are essentially two types of transformations: # - Those that adjust the scale of your features, and # - Those that change alter the number of features, perhaps even changing their values entirely. # # An example of adjusting the scale of a feature would be changing centimeters to inches. Changing the feature entirely would be like using PCA to reduce 300 columns to 30. In either case, the original column's units have either been altered or no longer exist at all, so it's up to you to assign names to your columns after any transformation, if you'd like to store the resulting NDArray back into a dataframe. def scaleFeaturesDF(df): # Feature scaling is a type of transformation that only changes the # scale, but not number of features. Because of this, we can still # use the original dataset's column names... so long as we keep in # mind that the _units_ have been altered: scaled = preprocessing.StandardScaler().fit_transform(df) scaled = pd.DataFrame(scaled, columns=df.columns) print("New Variances:\n", scaled.var()) print("New Describe:\n", scaled.describe()) return scaled # SKLearn contains many methods for transforming your features by scaling them, a type of pre-processing): # - `RobustScaler` # - `Normalizer` # - `MinMaxScaler` # - `MaxAbsScaler` # - `StandardScaler` # - ... # # http://scikit-learn.org/stable/modules/classes.html#module-sklearn.preprocessing # # However in order to be effective at PCA, there are a few requirements that must be met, and which will drive the selection of your scaler. PCA requires your data is standardized -- in other words, it's _mean_ should equal 0, and it should have unit variance. # # SKLearn's regular `Normalizer()` doesn't zero out the mean of your data, it only clamps it, so it could be inappropriate to use depending on your data. `MinMaxScaler` and `MaxAbsScaler` both fail to set a unit variance, so you won't be using them here either. `RobustScaler` can work, again depending on your data (watch for outliers!). So for this assignment, you're going to use the `StandardScaler`. Get familiar with it by visiting these two websites: # # - http://scikit-learn.org/stable/modules/preprocessing.html#preprocessing-scaler # - http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html#sklearn.preprocessing.StandardScaler # Lastly, some code to help with visualizations: def drawVectors(transformed_features, components_, columns, plt, scaled): if not scaled: return plt.axes() # No cheating ;-) num_columns = len(columns) # This funtion will project your *original* feature (columns) # onto your principal component feature-space, so that you can # visualize how "important" each one was in the # multi-dimensional scaling # Scale the principal components by the max value in # the transformed set belonging to that component xvector = components_[0] * max(transformed_features[:,0]) yvector = components_[1] * max(transformed_features[:,1]) ## visualize projections # Sort each column by it's length. These are your *original* # columns, not the principal components. important_features = { columns[i] : math.sqrt(xvector[i]**2 + yvector[i]**2) for i in range(num_columns) } important_features = sorted(zip(important_features.values(), important_features.keys()), reverse=True) print("Features by importance:\n", important_features) ax = plt.axes() for i in range(num_columns): # Use an arrow to project each original feature as a # labeled vector on your principal component axes plt.arrow(0, 0, xvector[i], yvector[i], color='b', width=0.0005, head_width=0.02, alpha=0.75) plt.text(xvector[i]*1.2, yvector[i]*1.2, list(columns)[i], color='b', alpha=0.75) return ax # ### And Now, The Assignment # Do * NOT * alter this line, until instructed! scaleFeatures = True # Load up the dataset specified on the lab instructions page and remove any and all _rows_ that have a NaN in them. You should be a pro at this by now ;-) # # **QUESTION**: Should the `id` column be included in your dataset as a feature? # .. your code here .. df = pd.read_csv('Datasets/kidney_disease.csv', index_col='id') df.dtypes df #df.apply(pd.to_numeric, errors='coerce') df = df.dropna(axis=0) df.columns #convert wc and rc columns to numeric since they are loaded as type object df.loc[:,['wc']] = pd.to_numeric(df['wc'],errors='coerce') df.loc[:,['rc']] = pd.to_numeric(df['rc'],errors='coerce') # Let's build some color-coded labels; the actual label feature will be removed prior to executing PCA, since it's unsupervised. You're only labeling by color so you can see the effects of PCA: labels = ['red' if i=='ckd' else 'green' for i in df.classification] # Use an indexer to select only the following columns: `['bgr','wc','rc']` # .. your code here .. df = df.loc[:,['bgr','wc','rc']] # Either take a look at the dataset's webpage in the attribute info section of UCI's [Chronic Kidney Disease]() page,: https://archive.ics.uci.edu/ml/datasets/Chronic_Kidney_Disease or alternatively, you can actually look at the first few rows of your dataframe using `.head()`. What kind of data type should these three columns be? Compare what you see with the results when you print out your dataframe's `dtypes`. # # If Pandas did not properly detect and convert your columns to the data types you expected, use an appropriate command to coerce these features to the right type. # .. your code here .. df # PCA Operates based on variance. The variable with the greatest variance will dominate. Examine your data using a command that will check the variance of every feature in your dataset, and then print out the results. Also print out the results of running `.describe` on your dataset. # # _Hint:_ If you do not see all three variables: `'bgr'`, `'wc'`, and `'rc'`, then it's likely you probably did not complete the previous step properly. # .. your code here .. df.describe() df.var(axis=0) # Below, we assume your dataframe's variable is named `df`. If it isn't, make the appropriate changes. But do not alter the code in `scaleFeaturesDF()` just yet! # .. your (possible) code adjustment here .. if scaleFeatures: df = scaleFeaturesDF(df) df # Run PCA on your dataset, reducing it to 2 principal components. Make sure your PCA model is saved in a variable called `'pca'`, and that the results of your transformation are saved in another variable `'T'`: # + # .. your code here .. from sklearn.decomposition import PCA pca = PCA(n_components=2, svd_solver='full') pca.fit(df) PCA(copy=True, n_components=2, whiten=False) T = pca.transform(df) # - # Now, plot the transformed data as a scatter plot. Recall that transforming the data will result in a NumPy NDArray. You can either use MatPlotLib to graph it directly, or you can convert it back to DataFrame and have Pandas do it for you. # # Since we've already demonstrated how to plot directly with MatPlotLib in `Module4/assignment1.ipynb`, this time we'll show you how to convert your transformed data back into to a Pandas Dataframe and have Pandas plot it from there. # + # Since we transformed via PCA, we no longer have column names; but we know we # are in `principal-component` space, so we'll just define the coordinates accordingly: ax = drawVectors(T, pca.components_, df.columns.values, plt, scaleFeatures) T = pd.DataFrame(T) T.columns = ['component1', 'component2'] T.plot.scatter(x='component1', y='component2', marker='o', c=labels, alpha=0.75, ax=ax) plt.show() # -
Module4/Module4 - Lab2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import linora as la corpus = [ 'This is the first document.', 'This document is the second document.', 'And this is the third one.', 'Is this the first document?', ] # # filter punctuation corpus = la.text.filter_punctuation(corpus) corpus corpus = [i.split(' ') for i in corpus] corpus # # CountVectorizer x, scale = la.text.CountVectorizer(corpus) x scale # # TfidfVectorizer la.text.TfidfVectorizer(x) # # word count word_count_dict = la.text.word_count(corpus) word_count_dict # # low freq word la.text.word_low_freq(word_count_dict, threshold=1) # # high freq word la.text.word_high_freq(word_count_dict, threshold=3) # # filter word la.text.filter_word(corpus, la.text.word_low_freq(word_count_dict, threshold=1)) # # word to index word_index_dict = la.text.word_to_index(corpus) word_index_dict # # word index sequence word_index_sequence = la.text.word_index_sequence(corpus, word_index_dict) word_index_sequence # # select best length la.text.select_best_length(corpus, sample_rate=0.7) # # pad sequences word_index_sequence = la.text.pad_sequences(word_index_sequence, la.text.select_best_length(corpus, sample_rate=0.7)) word_index_sequence
example/la.text.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:tflearn] # language: python # name: conda-env-tflearn-py # --- # # Machine Learning Recipes with Jsh Gordon Note # # [Video list](https://goo.gl/KewA03) # # this is a note for watching Machine Learning Recipes with Jsh Gordon # # 1 from sklearn import tree features = [[140, 1], [130, 1], [150, 0], [170, 0]] labels = [0, 0, 1, 1] clf = tree.DecisionTreeClassifier() clf = clf.fit(features, labels) print(clf.predict([[120, 0]])) # ## Import Concepts # # * How does this work in the real world? # * How much training data do you need? # * How is the tree created? # * What makes a good feature? # ## 2 # # ### Many types of classifiers # # * Artificial neural network # * Support Vector Machine # * Lions # * Tigers # * Bears # * Oh my! # ### Goals # # #### 1. Import dataset from sklearn.datasets import load_iris import numpy as np iris = load_iris() print(iris.feature_names) print(iris.target_names) print(iris.data[0]) print(iris.target[0]) # #### Testing Data # # * Examples used to "test" the classifier's accuracy. # * Not part of the training data. # # Just like in programming, testing is a very important # part of ML. # + test_idx = [0, 50, 100] # training data train_target = np.delete(iris.target, test_idx) train_data = np.delete(iris.data, test_idx, axis=0) print(train_target.shape) print(train_data.shape) # testing data test_target = iris.target[test_idx] test_data = iris.data[test_idx] print(test_target.shape) print(test_data.shape) # - # #### 2. Train a classifier clf = tree.DecisionTreeClassifier() clf.fit(train_data, train_target) # #### 3. Predict label for new flower. print(test_target) print(clf.predict(test_data)) # #### 4. Visualize the tree. # viz code from sklearn.externals.six import StringIO import pydotplus dot_data = StringIO() tree.export_graphviz(clf, out_file=dot_data, feature_names=iris.feature_names, class_names=iris.target_names, filled=True, rounded = True, impurity=False) graph = pydotplus.graph_from_dot_data(dot_data.getvalue()) graph.write_pdf('iris.pdf') # ### More to learn # # * How are trees built automatically from examples? # * How well do they work in parctice? # ## 3 What Makes a Good Feature? # + import numpy as np # %matplotlib inline import matplotlib.pyplot as plt greyhounds = 500 labs = 500 grey_height = 28 + 4 * np.random.randn(greyhounds) lab_height = 24 + 4 * np.random.randn(labs) plt.hist([grey_height, lab_height], stacked=True, color=['r', 'b']) plt.show() # - # #### Analysis # 35 ่‚ฏๅฎšๆ˜ฏ greyhounds # # 20ๅทฆๅณๆ˜ฏ lab็š„ๅ‡ ็އๆœ€ๅคง # # ไฝ†ๆ˜ฏๅพˆ้šพๅˆคๆ–ญๅœจ25ๅทฆๅณ็š„ๆ—ถๅ€™ๆ˜ฏ่ฐ. ๆ‰€ไปฅ่ฟ™ไธช Feature ๆ˜ฏๅฅฝ็š„, ไฝ†ไธๆ˜ฏๅ……ๅˆ†็š„. # # #### ๆ‰€ไปฅ้—ฎ้ข˜ๆ˜ฏ: ๆˆ‘ไปฌ้œ€่ฆๅคšๅฐ‘ Feature? # # # #### ๆณจๆ„ไบ‹้กน # # * Avoid redundant features: ไพ‹ๅฆ‚ ็”จ่‹ฑๅฐบๅšๅ•ไฝ็š„้ซ˜ๅบฆ, ็”จๅŽ˜็ฑณๅšๅ•ไฝ็š„้ซ˜ๅบฆ # * Features should be easy to understand: # ไพ‹ๅฆ‚ ้ข„ๆต‹้‚ฎไปถๅ‘้€ๆ—ถ้—ด, ไฝฟ็”จ่ท็ฆปๅ’Œๅ‘้€ๆ‰€็”จๅคฉๆ•ฐ ่€Œไธ้€‰ๆ‹ฉไฝฟ็”จ็ป็บฌๅบฆๅๆ ‡. SImpler relationships are easier to learn # # #### Ideal features are # # * Informative # * Independent # * Simple # ## 4. Lets Write a Pipeline # + from sklearn import datasets iris = datasets.load_iris() X = iris.data # input: features y = iris.target # output: label from sklearn.cross_validation import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= .5) # from sklearn import tree # my_classifier = tree.DecisionTreeClassifier() from sklearn.neighbors import KNeighborsClassifier my_classifier = KNeighborsClassifier() my_classifier.fit(X_train, y_train) predictions = my_classifier.predict(X_test) from sklearn.metrics import accuracy_score print(accuracy_score(y_test, predictions)) # - # #### what is X, y? # X: features # y: labels # # ``` python # def classify(features): # # do some logic # return label # # ``` # ## 5. Write Our First Classifier # + from scipy.spatial import distance def euc(a, b): return distance.euclidean(a, b) class ScrappyKNN(): def fit(self, X_train, y_train): self.X_train = X_train self.y_train = y_train def predict(self, X_test): predictions = [] for row in X_test: label = self.closest(row) predictions.append(label) return predictions def closest(self, row): best_dist = euc(row, self.X_train[0]) best_index = 0 for i in range(1, len(self.X_train)): dist = euc(row, self.X_train[i]) if dist < best_dist: best_dist = dist best_index = i return self.y_train[best_index] # + from sklearn import datasets iris = datasets.load_iris() X = iris.data # input: features y = iris.target # output: label from sklearn.cross_validation import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= .5) my_classifier = ScrappyKNN() my_classifier.fit(X_train, y_train) predictions = my_classifier.predict(X_test) from sklearn.metrics import accuracy_score print(accuracy_score(y_test, predictions)) # - # ## 6. Train an Image Classifier with TensorFlow for Poets # ## 7. Classifying Handwritten Digits with TF.Learn # ## 8. Let's Write a Decision Tree Classifier from Scratch
machine_learning/Machine Learning Notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import configparser # v1 OANDA API # pip install git+https://github.com/oanda/oandapy.git # Note: You can request a v1 instead of v20 account by contacting Oanda chat support import oandapy as opy config = configparser.ConfigParser() config.read('oanda.cfg') oanda = opy.API(environment='practice', access_token=config['oanda']['access_token']) # + import pandas as pd data = oanda.get_history(instrument='EUR_USD', # our instrument start='2016-12-08', # start data end='2016-12-10', # end date granularity='M1') # minute bars df = pd.DataFrame(data['candles']).set_index('time') df.index = pd.DatetimeIndex(df.index) df.info() # + import numpy as np df['returns'] = np.log(df['closeAsk'] / df['closeAsk'].shift(1)) cols = [] for momentum in [15, 30, 60, 120]: col = 'position_%s' % momentum df[col] = np.sign(df['returns'].rolling(momentum).mean()) cols.append(col) # + # %matplotlib inline import seaborn as sns; sns.set() strats = ['returns'] for col in cols: strat = 'strategy_%s' % col.split('_')[1] df[strat] = df[col].shift(1) * df['returns'] strats.append(strat) df[strats].dropna().cumsum().apply(np.exp).plot() # - class MomentumTrader(opy.Streamer): def __init__(self, momentum, *args, **kwargs): opy.Streamer.__init__(self, *args, **kwargs) self.ticks = 0 self.position = 0 self.df = pd.DataFrame() self.momentum = momentum self.units = 100000 def create_order(self, side, units): order = oanda.create_order(config['oanda']['account_id'], instrument='EUR_USD', units=units, side=side, type='market') print('\n', order) def on_success(self, data): self.ticks += 1 # print(self.ticks, end=', ') # appends the new tick data to the DataFrame object self.df = self.df.append(pd.DataFrame(data['tick'], index=[data['tick']['time']])) # transforms the time information to a DatetimeIndex object self.df.index = pd.DatetimeIndex(self.df['time']) # resamples the data set to a new, homogeneous interval dfr = self.df.resample('5s').last() # calculates the log returns dfr['returns'] = np.log(dfr['ask'] / dfr['ask'].shift(1)) # derives the positioning according to the momentum strategy dfr['position'] = np.sign(dfr['returns'].rolling( self.momentum).mean()) if dfr['position'].ix[-1] == 1: # go long if self.position == 0: self.create_order('buy', self.units) elif self.position == -1: self.create_order('buy', self.units * 2) self.position = 1 elif dfr['position'].ix[-1] == -1: # go short if self.position == 0: self.create_order('sell', self.units) elif self.position == 1: self.create_order('sell', self.units * 2) self.position = -1 if self.ticks == 250: # close out the position if self.position == 1: self.create_order('sell', self.units) elif self.position == -1: self.create_order('buy', self.units) self.disconnect() mt = MomentumTrader(momentum=12, environment='practice', access_token=config['oanda']['access_token']) mt.rates(account_id=config['oanda']['account_id'], instruments=['DE30_EUR'], ignore_heartbeat=True) mystreamer = opy.Streamer(environment='practice',access_token=config['oanda']['access_token']) mystreamer.rates(account_id="8ba5b3b5e28cd20292383895127b7991-e910b2fd79a67404fb7efcd8132f85e1", instruments="DE30_EUR")
oandamomentumv1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3-azureml # kernelspec: # display_name: Python 3.6 - AzureML # language: python # name: python3-azureml # --- # # Text Analytics # # El Procesamiento del lenguaje natural (NLP) es una parte de la inteligencia artificial que engloba el lenguaje escrito y hablado. Puede usar NLP para crear soluciones que extraigan el significado semรกntico de instrucciones habladas o escritas, o que formulen respuestas adecuadas en lenguaje natural. # # Microsoft Azure *Cognitive Services* incluye el servicio *Text Analytics*, que cuenta con algunas funciones de NLP integradas, como la identificaciรณn de frases clave en texto y la clasificaciรณn de texto segรบn opiniones. # # ![Un robot leyendo un cuaderno](./images/NLP.jpg) # # Por ejemplo, supongamos que la organizaciรณn ficticia *Margieโ€™s Travel* solicita a los clientes que den su opiniรณn sobre sus estancias en los hoteles. Puede usar el servicio Text Analytics para resumir las reseรฑas con sus frases clave, determinar quรฉ reseรฑas son positivas y cuรกles negativas o analizar el texto de la reseรฑa en busca de menciones de entidades conocidas, como las ubicaciones o las personas. # # ## Ver los documentos de reseรฑas # # Empezaremos echando un vistazo a algunas reseรฑas de clientes sobre hoteles. # # Las reseรฑas se incluyen en archivos de texto. Para verlas, haga clic en **Run cell** (&#9655;) a la izquierda de la celda y ejecute el cรณdigo siguiente. # + gather={"logged": 1599694576263} import os # Read the reviews in the /data/reviews folder reviews_folder = os.path.join('data', 'text', 'reviews') # Create a collection of reviews with id (file name) and text (contents) properties reviews = [] for file_name in os.listdir(reviews_folder): review_text = open(os.path.join(reviews_folder, file_name)).read() review = {"id": file_name, "text": review_text} reviews.append(review) for review_num in range(len(reviews)): # print the review text print('{}\n{}\n'.format(reviews[review_num]['id'], reviews[review_num]['text'])) # - # ## Crear un recurso de Cognitive Services # # Para analizar el texto en estas reseรฑas, puede usar el servicio **Text Analytics**, de Cognitive Services. Para ello, debe aprovisionar un recurso de **Text Analytics** o de **Cognitive Services** en su suscripciรณn de Azure (utilice un recurso de Text Analytics si es el รบnico servicio que piensa usar o si quiere supervisar su uso de forma independiente. Si no es asรญ, puede usar un recurso de Cognitive Services para combinar el servicio Text Analytics y otros servicios, lo que permite que los desarrolladores utilicen un mismo punto de conexiรณn y clave para acceder a los recursos). # # Si no tiene uno, siga estos pasos para crear un recurso de **Cognitive Services** en su suscripciรณn de Azure: # # > **Nota**: Si ya tiene un recurso de Cognitive Services, abra su pรกgina de **Inicio rรกpido** en Azure Portal y copie la clave y el punto de conexiรณn en la siguiente celda. En caso contrario, siga estos pasos para crear uno. # # 1. En la pestaรฑa de otro explorador, abra Azure Portal (https://portal.azure.com) e inicie sesiรณn con su cuenta de Microsoft. # 2. Haga clic en el botรณn **&#65291;Crear un recurso**, busque *Cognitive Services* y cree un recurso de **Cognitive Services** con esta configuraciรณn: # - **Suscripciรณn**: *su suscripciรณn de Azure*. # - **Grupo de recursos**: *seleccione o cree un grupo de recursos con un nombre รบnico.* # - **Regiรณn**: *seleccione cualquier regiรณn disponible*: # - **Nombre**: *escriba un nombre รบnico*. # - **Plan de tarifa**: S0 # - **Confirmo que he leรญdo y comprendido las notificaciones**: seleccionado. # 3. Espere a que la implementaciรณn finalice. Vaya al recurso de Cognitive Services y, en la pรกgina **Informaciรณn general**, haga clic en el vรญnculo para administrar las claves del servicio. Necesitarรก el punto de conexiรณn y las claves para conectarse a su recurso de Cognitive Services desde aplicaciones de cliente. # # ### Obtener la clave y el punto de conexiรณn de un recurso de Cognitive Services # # Para usar su recurso de Cognitive Services, las aplicaciones de cliente necesitan su clave de autenticaciรณn y su punto de conexiรณn: # # 1. En Azure Portal, en la pรกgina **Claves y punto de conexiรณn** de su recurso de Cognitive Services, copie la **Key1** de su recurso y pรฉguela en el siguiente cรณdigo, en sustituciรณn de **YOUR_COG_KEY**. # 2. Copie el **Punto de conexiรณn** de su recurso y pรฉguelo en el siguiente cรณdigo, en sustituciรณn de **YOUR_COG_ENDPOINT**. # 3. Haga clic en el botรณn verde <span style="color:green">&#9655;</span> para ejecutar el cรณdigo de la siguiente celda. # + gather={"logged": 1599694661070} cog_key = 'YOUR_COG_KEY' cog_endpoint = 'YOUR_COG_ENDPOINT' print('Ready to use cognitive services at {} using key {}'.format(cog_endpoint, cog_key)) # - # ## Detectar idioma # Empezaremos por identificar el idioma en el que se han escrito las reseรฑas. # + gather={"logged": 1599694675019} import os from azure.cognitiveservices.language.textanalytics import TextAnalyticsClient from msrest.authentication import CognitiveServicesCredentials # Get a client for your text analytics cognitive service resource text_analytics_client = TextAnalyticsClient(endpoint=cog_endpoint, credentials=CognitiveServicesCredentials(cog_key)) # Analyze the reviews you read from the /data/reviews folder earlier language_analysis = text_analytics_client.detect_language(documents=reviews) # print detected language details for each review for review_num in range(len(reviews)): # print the review id print(reviews[review_num]['id']) # Get the language details for this review lang = language_analysis.documents[review_num].detected_languages[0] print(' - Language: {}\n - Code: {}\n - Score: {}\n'.format(lang.name, lang.iso6391_name, lang.score)) # Add the detected language code to the collection of reviews (so we can do further analysis) reviews[review_num]["language"] = lang.iso6391_name # - # ## Extraer frases clave # # Ahora, puede analizar el texto de las reseรฑas de clientes para identificar frases clave que incluyan informaciรณn sobre los puntos clave. # + gather={"logged": 1599694682067} # # Use the client and reviews you created in the previous code cell to get key phrases key_phrase_analysis = text_analytics_client.key_phrases(documents=reviews) # print key phrases for each review for review_num in range(len(reviews)): # print the review id print(reviews[review_num]['id']) # Get the key phrases in this review print('\nKey Phrases:') key_phrases = key_phrase_analysis.documents[review_num].key_phrases # Print each key phrase for key_phrase in key_phrases: print('\t', key_phrase) print('\n') # - # Las frases clave pueden ayudar a comprender mejor los puntos clave de cada reseรฑa. Por ejemplo, una reseรฑa que contenga la frase โ€œbuen personalโ€ o โ€œmal servicioโ€ puede otorgar informaciรณn sobre la experiencia del cliente. # # ## Determinar la opiniรณn # # Puede ser รบtil clasificar las reseรฑas como *positivas* o *negativas* mediante una *puntuaciรณn de opiniรณn*. Para ello puede usar el servicio Text Analytics. # + gather={"logged": 1599694685535} # Use the client and reviews you created previously to get sentiment scores sentiment_analysis = text_analytics_client.sentiment(documents=reviews) # Print the results for each review for review_num in range(len(reviews)): # Get the sentiment score for this review sentiment_score = sentiment_analysis.documents[review_num].score # classifiy 'positive' if more than 0.5, if sentiment_score < 0.5: sentiment = 'negative' else: sentiment = 'positive' # print file name and sentiment print('{} : {} ({})'.format(reviews[review_num]['id'], sentiment, sentiment_score)) # - # ## Extraer entidades conocidas # # Las *entidades* son elementos mencionados en el texto que hacen referencia a otro tipo de elemento conocido. Por ejemplo: una ubicaciรณn, una persona o una fecha. Si estuviera interesado en conocer las fechas y lugares mencionados en las reseรฑas podrรญa usar el siguiente cรณdigo para encontrarlos. # + gather={"logged": 1599694688496} # Use the client and reviews you created previously to get named entities entity_analysis = text_analytics_client.entities(documents=reviews) # Print the results for each review for review_num in range(len(reviews)): print(reviews[review_num]['id']) # Get the named entitites in this review entities = entity_analysis.documents[review_num].entities for entity in entities: # Only print datetime or location entitites if entity.type in ['DateTime','Location']: link = '(' + entity.wikipedia_url + ')' if entity.wikipedia_id is not None else '' print(' - {}: {} {}'.format(entity.type, entity.name, link)) # - # Tenga en cuenta que algunas entidades son lo suficientemente conocidas como para tener una pรกgina de Wikipedia. En estos casos, el servicio Text Analytics devuelve la URL de esa pรกgina. # # ## Mรกs informaciรณn # # Para obtener mรกs informaciรณn sobre el servicio Text Analytics, consulte la [documentaciรณn del servicio Text Analytics](https://docs.microsoft.com/azure/cognitive-services/text-analytics/)
07 - Text Analytics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %pylab inline import seaborn as sbn import scipy.stats as stats current_palette = sbn.color_palette("Set1",12) sbn.set_palette(current_palette) def plot_gaussian_mixture(v, size): # now the mixed gaussians loc_a, scale_a, size_a = (v[0], v[1], int(v[2])) loc_b, scale_b, size_b = (v[3], v[4],int(v[5])) x2 = np.concatenate([np.random.normal(loc=loc_a, scale=scale_a, size=size_a), np.random.normal(loc=loc_b, scale=scale_b, size=size_b)]) x2_eval = np.linspace(x2.min() - 1, x2.max() + 1, 500) bimodal_pdf = stats.norm.pdf(x2_eval, loc=loc_a, scale=scale_a) * float(size_a) / x2.size + \ stats.norm.pdf(x2_eval, loc=loc_b, scale=scale_a) * float(size_b) / x2.size return bimodal_pdf, x2_eval, x2 mix_values = [] v = np.array([-2.5,0.5,9000,2.5,0.5,1000]) mix_values.append(v) for i in range(4): old_v = mix_values[i] v = np.array([old_v[0]+0.5, old_v[1],old_v[2]-1000, old_v[3]-0.5, old_v[4],old_v[5]+1000]) mix_values.append(v) for i in range(4,8): old_v = mix_values[i] v = np.array([old_v[0]-0.5, old_v[1],old_v[2]-1000, old_v[3]+0.5, old_v[4],old_v[5]+1000]) mix_values.append(v) ascending_alpha = np.linspace(0.2,0.8,9) descending_alpha = np.linspace(0.8,0.2,9) # + loc1, scale1, size1 = (-3, 0.5, 1000000) loc3, scale3, size3 = (3, 0.5, 1000000) x1 = np.random.normal(loc=loc1, scale=scale1, size=size1) x3 = np.random.normal(loc=loc3, scale=scale3, size=size3) x1_eval = np.linspace(x1.min() - 1, x3.max() + 1, 500) x3_eval = np.linspace(x1.min() - 1, x3.max() + 1, 500) fig = figure(figsize(17,5)) plot(x1_eval,stats.norm.pdf(x1_eval, loc1, scale1), color = current_palette[1], lw=3) plt.fill_between(x1_eval,stats.norm.pdf(x1_eval, loc1, scale1), alpha=0.1, color = current_palette[1]) plot(x3_eval,stats.norm.pdf(x3_eval, loc3, scale3), color = current_palette[4], lw=3) plt.fill_between(x3_eval,stats.norm.pdf(x3_eval, loc3, scale3), alpha=0.1, color = current_palette[4]) #sbn.kdeplot(x1, shade=True, lw=3, color=current_palette[0]) #sbn.kdeplot(x3, shade=True, lw=3, color=current_palette[1]) lines = ["-","-","--","--","-.","-.",":",":", "-","-"] count = 0 for v in mix_values[::2]: print(v) pdf, x2_eval, x2 = plot_gaussian_mixture(v, 10000) opposite_count = -1 plot(x2_eval[:250], pdf[:250], c=current_palette[1], alpha=descending_alpha[count],ls=lines[count], lw=2) plot(x2_eval[250:], pdf[250:], c=current_palette[4], alpha=ascending_alpha[count], ls= lines[count], lw=2) count = count+2 xlim(-4.5,4.5) sbn.despine(top=True, right=True, left=True, bottom=True, trim=True) savefig("myfig.svg") # - current_palette.as_hex() loc1, scale1, size1 = (-3, 1, 175) loc2, scale2, size2 = (-3, 1, 175) x2 = np.concatenate([np.random.normal(loc=loc1, scale=scale1, size=size1), np.random.normal(loc=loc2, scale=scale2, size=size2)]) x_eval = np.linspace(x2.min() - 1, x2.max() + 1, 500) kde = stats.gaussian_kde(x2) plot(x_eval, kde(x_eval), 'k-', label="Scott's Rule") sbn.despine()
paper/figures/fig7_what_is_lam/what_is_lambda/lambda_illustration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import matplotlib.pyplot as plt # file = pd.read_csv("/Users/barradd/Desktop/PDOS_Zr-40.dat",sep=' ') file_list = [x.split() for x in open("/Users/barradd/Desktop/PDOS_Zr-40.dat").readlines() ] file_df = pd.DataFrame( file_list) new_header = file_df.iloc[0] #grab the first row for the header file_df = file_df[1:] #take the data less the header row file_df.columns = new_header #set the header row as the df header file_df["#Energy"] = file_df["#Energy"].astype(float, copy=True) file_df["tot"] = file_df["tot"].astype(float, copy=True) file_list_2 = [x.split() for x in open("/Users/barradd/Desktop/PDOS-O-119.dat").readlines() ] file_df2 = pd.DataFrame( file_list_2) file_df2 = file_df2[1:] file_df2.columns = new_header #set the header row as the df header file_df2.head() file_df2["#Energy"] = file_df2["#Energy"].astype(float, copy=True) file_df2["tot"] = file_df2["tot"].astype(float, copy=True) file_df.plot(x="#Energy",y="tot", grid=True, xlim=(-6,6), ylim=(0,5)) df_merge = pd.concat([file_df[["#Energy","tot"]], file_df2[["#Energy","tot"]]], axis=1 ) file_df.plot(x="#Energy",y="tot", grid=True, xlim=(-6,6), ylim=(0,5)) file_df2.plot(x="#Energy",y="tot", grid=True, xlim=(-6,6), ylim=(0,5),secondary_y=True, style='g') df_merge.columns = ['E1', 'Zn', 'E2', 'O'] # + fig, ax = plt.subplots() df_merge.plot(x="E1",y="Zn", grid=True, xlim=(-5,5), ylim=(0,5), ax=ax) df_merge.plot(x="E2",y="O", grid=True, xlim=(-5,5), ylim=(0,5), style='g', ax=ax) plt.xlabel("Energy") plt.ylabel("DOS arbitrary units") plt.savefig("figue_1.png",format="png",dpi=300) # - fig, ax = plt.subplots() file_df.plot(x="#Energy",y="tot", grid=True, xlim=(-5,5), ylim=(0,5), ax=ax) file_df2.plot(x="#Energy",y="tot", grid=True, xlim=(-5,5), ylim=(0,5), style='g', ax=ax) plt.xlabel("Energy") plt.savefig("figue_1.png")
cube_viz/Plot_dat_file_catal.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: anaconda3-2020.11 # language: python # name: anaconda3-2020.11 # --- import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt from IPython.display import Latex import snakes.plugins nets = snakes.plugins.load('gv', 'snakes.nets', 'nets') from nets import (Place, PetriNet, Transition, MultiSet, Expression, Marking, OneOf, Substitution, Test, Tuple, Variable,Value) from IPython.display import Image, SVG # ### Baseline Determinisitic Firing Squad Model def firing_squad_deterministic_petri(): n = PetriNet("FiringSquad") n.add_place(Place("Init")) n.add_place(Place("Court = NoOrder")) n.add_place(Place("Court = Order")) n.add_place(Place("Captain = Signal")) n.add_place(Place("Captain = NoSignal")) n.add_place(Place("RiflemanA = NoShoot")) n.add_place(Place("RiflemanA = Shoot")) n.add_place(Place("RiflemanB = NoShoot")) n.add_place(Place("RiflemanB = Shoot")) n.add_place(Place("Prisoner = Alive")) n.add_place(Place("Prisoner = Dead")) n.add_transition(Transition("court_decision_is_unknown" )) n.add_transition(Transition("captain_signals_if_court_orders" )) n.add_transition(Transition("captain_does_not_signal_if_court_does_not_order" )) n.add_transition(Transition("rifleman_A_shoots_if_captain_signals" )) n.add_transition(Transition("rifleman_A_does_not_shoot_if_captain_does_not_signal" )) n.add_transition(Transition("rifleman_B_shoots_if_captain_signals" )) n.add_transition(Transition("rifleman_B_does_not_shoot_if_captain_does_not_signal" )) n.add_transition(Transition("prisoner_dies_if_rifleman_A_shoots")) n.add_transition(Transition("prisoner_dies_if_rifleman_B_shoots")) n.add_transition(Transition("prisoner_lives_if_riflemen_A_and_B_do_not_shoot")) n.add_input("Init", "court_decision_is_unknown", Value(1)) n.add_output("Court = NoOrder", "court_decision_is_unknown", Value(0.5)) n.add_output("Court = Order", "court_decision_is_unknown", Value(0.5)) n.add_input("Court = NoOrder", "captain_does_not_signal_if_court_does_not_order", Value(1)) n.add_output("Captain = NoSignal", "captain_does_not_signal_if_court_does_not_order", Value(1)) n.add_input("Court = Order", "captain_signals_if_court_orders", Value(1)) n.add_output("Captain = Signal", "captain_signals_if_court_orders", Value(1)) n.add_input("Captain = Signal", "rifleman_A_shoots_if_captain_signals", Value(1)) n.add_output("RiflemanA = Shoot", "rifleman_A_shoots_if_captain_signals", Value(1)) n.add_input("Captain = NoSignal", "rifleman_A_does_not_shoot_if_captain_does_not_signal", Value(1)) n.add_output("RiflemanA = NoShoot", "rifleman_A_does_not_shoot_if_captain_does_not_signal", Value(1)) n.add_input("Captain = Signal", "rifleman_B_shoots_if_captain_signals", Value(1)) n.add_output("RiflemanB = Shoot", "rifleman_B_shoots_if_captain_signals", Value(1)) n.add_input("Captain = NoSignal", "rifleman_B_does_not_shoot_if_captain_does_not_signal", Value(1)) n.add_output("RiflemanB = NoShoot", "rifleman_B_does_not_shoot_if_captain_does_not_signal", Value(1)) n.add_input("RiflemanA = Shoot", "prisoner_dies_if_rifleman_A_shoots", Value(1)) n.add_output("Prisoner = Dead", "prisoner_dies_if_rifleman_A_shoots", Value(1)) n.add_input("RiflemanA = NoShoot", "prisoner_lives_if_riflemen_A_and_B_do_not_shoot", Value(1)) n.add_input("RiflemanB = NoShoot", "prisoner_lives_if_riflemen_A_and_B_do_not_shoot", Value(1)) n.add_output("Prisoner = Alive", "prisoner_lives_if_riflemen_A_and_B_do_not_shoot", Value(1)) n.add_input("RiflemanB = Shoot", "prisoner_dies_if_rifleman_B_shoots", Value(1)) n.add_output("Prisoner = Dead", "prisoner_dies_if_rifleman_B_shoots", Value(1)) return n # + fs = firing_squad_deterministic_petri() fs.draw('../images/firing_squad_deterministic_petri.svg') fs.draw('../images/firing_squad_deterministic_petri.png') SVG('../images/firing_squad_deterministic_petri.svg') # - # # # ## S1: Prediction # If rifleman A did not shoot, then the prisoner is alive. # # $$\lnot A\implies \lnot D$$ # # def firing_squad_deterministic_petri_condition(): n = PetriNet("FiringSquad") n.add_place(Place("Init")) n.add_place(Place("Court = NoOrder")) n.add_place(Place("Court = Order")) n.add_place(Place("Captain = Signal")) n.add_place(Place("Captain = NoSignal")) n.add_place(Place("RiflemanA = NoShoot")) n.add_place(Place("RiflemanA = Shoot")) n.add_place(Place("RiflemanB = NoShoot")) n.add_place(Place("RiflemanB = Shoot")) n.add_place(Place("Prisoner = Alive")) n.add_place(Place("Prisoner = Dead")) n.add_transition(Transition("court_decision_is_unknown" )) n.add_transition(Transition("captain_signals_if_court_orders" )) n.add_transition(Transition("captain_does_not_signal_if_court_does_not_order" )) n.add_transition(Transition("rifleman_A_shoots_if_captain_signals" )) n.add_transition(Transition("rifleman_A_does_not_shoot_if_captain_does_not_signal" )) n.add_transition(Transition("rifleman_B_shoots_if_captain_signals" )) n.add_transition(Transition("rifleman_B_does_not_shoot_if_captain_does_not_signal" )) n.add_transition(Transition("prisoner_dies_if_rifleman_A_shoots")) n.add_transition(Transition("prisoner_dies_if_rifleman_B_shoots")) n.add_transition(Transition("prisoner_lives_if_riflemen_A_and_B_do_not_shoot")) n.add_input("Init", "court_decision_is_unknown", Value(1)) n.add_output("Court = NoOrder", "court_decision_is_unknown", Value(1)) n.add_output("Court = Order", "court_decision_is_unknown", Value(0)) n.add_input("Court = NoOrder", "captain_does_not_signal_if_court_does_not_order", Value(1)) n.add_output("Captain = NoSignal", "captain_does_not_signal_if_court_does_not_order", Value(1)) n.add_input("Court = Order", "captain_signals_if_court_orders", Value(1)) n.add_output("Captain = Signal", "captain_signals_if_court_orders", Value(1)) n.add_input("Captain = Signal", "rifleman_A_shoots_if_captain_signals", Value(1)) n.add_output("RiflemanA = Shoot", "rifleman_A_shoots_if_captain_signals", Value(1)) n.add_input("Captain = NoSignal", "rifleman_A_does_not_shoot_if_captain_does_not_signal", Value(1)) n.add_output("RiflemanA = NoShoot", "rifleman_A_does_not_shoot_if_captain_does_not_signal", Value(1)) n.add_input("Captain = Signal", "rifleman_B_shoots_if_captain_signals", Value(1)) n.add_output("RiflemanB = Shoot", "rifleman_B_shoots_if_captain_signals", Value(1)) n.add_input("Captain = NoSignal", "rifleman_B_does_not_shoot_if_captain_does_not_signal", Value(1)) n.add_output("RiflemanB = NoShoot", "rifleman_B_does_not_shoot_if_captain_does_not_signal", Value(1)) n.add_input("RiflemanA = Shoot", "prisoner_dies_if_rifleman_A_shoots", Value(1)) n.add_output("Prisoner = Dead", "prisoner_dies_if_rifleman_A_shoots", Value(1)) n.add_input("RiflemanA = NoShoot", "prisoner_lives_if_riflemen_A_and_B_do_not_shoot", Value(1)) n.add_input("RiflemanB = NoShoot", "prisoner_lives_if_riflemen_A_and_B_do_not_shoot", Value(1)) n.add_output("Prisoner = Alive", "prisoner_lives_if_riflemen_A_and_B_do_not_shoot", Value(1)) n.add_input("RiflemanB = Shoot", "prisoner_dies_if_rifleman_B_shoots", Value(1)) n.add_output("Prisoner = Dead", "prisoner_dies_if_rifleman_B_shoots", Value(1)) return n # + seeAnoshoot = firing_squad_deterministic_petri_condition() seeAnoshoot.draw('../images/firing_squad_deterministic_petri_condition.svg') seeAnoshoot.draw('../images/firing_squad_deterministic_petri_condition.png') SVG('../images/firing_squad_deterministic_petri_condition.svg') # - # ## S4 Action # If the captain gave no signal and rifleman A decides to shoot, then the prisoner will die and B will not shoot. # # $$\lnot C\implies D_A \wedge \lnot B_{A}$$ def firing_squad_deterministic_petri_intervention(): n = PetriNet("FiringSquad") n.add_place(Place("Init")) n.add_place(Place("Court = NoOrder")) n.add_place(Place("Court = Order")) n.add_place(Place("Captain = Signal")) n.add_place(Place("Captain = NoSignal")) n.add_place(Place("RiflemanA = NoShoot")) n.add_place(Place("RiflemanA = Shoot")) n.add_place(Place("RiflemanB = NoShoot")) n.add_place(Place("RiflemanB = Shoot")) n.add_place(Place("Prisoner = Alive")) n.add_place(Place("Prisoner = Dead")) n.add_transition(Transition("court_decision_is_unknown" )) n.add_transition(Transition("captain_signals_if_court_orders" )) n.add_transition(Transition("captain_does_not_signal_if_court_does_not_order" )) n.add_transition(Transition("rifleman_B_shoots_if_captain_signals" )) n.add_transition(Transition("rifleman_B_does_not_shoot_if_captain_does_not_signal" )) n.add_transition(Transition("prisoner_dies_if_rifleman_A_shoots")) n.add_transition(Transition("prisoner_dies_if_rifleman_B_shoots")) n.add_transition(Transition("prisoner_lives_if_riflemen_A_and_B_do_not_shoot")) n.add_input("Init", "court_decision_is_unknown", Value(1)) n.add_output("Court = NoOrder", "court_decision_is_unknown", Value(1)) n.add_output("Court = Order", "court_decision_is_unknown", Value(0)) n.add_input("Court = NoOrder", "captain_does_not_signal_if_court_does_not_order", Value(1)) n.add_output("Captain = NoSignal", "captain_does_not_signal_if_court_does_not_order", Value(1)) n.add_input("Court = Order", "captain_signals_if_court_orders", Value(1)) n.add_output("Captain = Signal", "captain_signals_if_court_orders", Value(1)) n.add_input("Captain = Signal", "rifleman_B_shoots_if_captain_signals", Value(1)) n.add_output("RiflemanB = Shoot", "rifleman_B_shoots_if_captain_signals", Value(1)) n.add_input("Captain = NoSignal", "rifleman_B_does_not_shoot_if_captain_does_not_signal", Value(1)) n.add_output("RiflemanB = NoShoot", "rifleman_B_does_not_shoot_if_captain_does_not_signal", Value(1)) n.add_input("RiflemanA = Shoot", "prisoner_dies_if_rifleman_A_shoots", Value(1)) n.add_output("Prisoner = Dead", "prisoner_dies_if_rifleman_A_shoots", Value(1)) n.add_input("RiflemanA = NoShoot", "prisoner_lives_if_riflemen_A_and_B_do_not_shoot", Value(0)) n.add_input("RiflemanB = NoShoot", "prisoner_lives_if_riflemen_A_and_B_do_not_shoot", Value(1)) n.add_output("Prisoner = Alive", "prisoner_lives_if_riflemen_A_and_B_do_not_shoot", Value(1)) n.add_input("RiflemanB = Shoot", "prisoner_dies_if_rifleman_B_shoots", Value(1)) n.add_output("Prisoner = Dead", "prisoner_dies_if_rifleman_B_shoots", Value(1)) return n # + doAshoots = firing_squad_deterministic_petri_intervention() doAshoots.draw('../images/firing_squad_deterministic_petri_intervention.svg') doAshoots.draw('../images/firing_squad_deterministic_petri_intervention.png') SVG('../images/firing_squad_deterministic_petri_intervention.svg') # - # ## S5 Counterfactual # # If the prisoner is dead, then the prisoner would be dead even if rifleman A had not shot # # $$D\implies D_{\lnot A}$$ def firing_squad_deterministic_petri_intervention(): n = PetriNet("FiringSquad") n.add_place(Place("Init")) n.add_place(Place("Court = NoOrder")) n.add_place(Place("Court = Order")) n.add_place(Place("Captain = Signal")) n.add_place(Place("Captain = NoSignal")) n.add_place(Place("RiflemanA = NoShoot")) n.add_place(Place("RiflemanA = Shoot")) n.add_place(Place("RiflemanB = NoShoot")) n.add_place(Place("RiflemanB = Shoot")) n.add_place(Place("Prisoner = Alive")) n.add_place(Place("Prisoner = Dead")) n.add_transition(Transition("court_decision_is_unknown" )) n.add_transition(Transition("captain_signals_if_court_orders" )) n.add_transition(Transition("captain_does_not_signal_if_court_does_not_order" )) n.add_transition(Transition("rifleman_B_shoots_if_captain_signals" )) n.add_transition(Transition("rifleman_B_does_not_shoot_if_captain_does_not_signal" )) n.add_transition(Transition("prisoner_dies_if_rifleman_A_shoots")) n.add_transition(Transition("prisoner_dies_if_rifleman_B_shoots")) n.add_transition(Transition("prisoner_lives_if_riflemen_A_and_B_do_not_shoot")) n.add_input("Init", "court_decision_is_unknown", Value(1)) n.add_output("Court = NoOrder", "court_decision_is_unknown", Value(0)) n.add_output("Court = Order", "court_decision_is_unknown", Value(1)) n.add_input("Court = NoOrder", "captain_does_not_signal_if_court_does_not_order", Value(1)) n.add_output("Captain = NoSignal", "captain_does_not_signal_if_court_does_not_order", Value(1)) n.add_input("Court = Order", "captain_signals_if_court_orders", Value(1)) n.add_output("Captain = Signal", "captain_signals_if_court_orders", Value(1)) n.add_input("Captain = Signal", "rifleman_B_shoots_if_captain_signals", Value(1)) n.add_output("RiflemanB = Shoot", "rifleman_B_shoots_if_captain_signals", Value(1)) n.add_input("Captain = NoSignal", "rifleman_B_does_not_shoot_if_captain_does_not_signal", Value(1)) n.add_output("RiflemanB = NoShoot", "rifleman_B_does_not_shoot_if_captain_does_not_signal", Value(1)) n.add_input("RiflemanA = Shoot", "prisoner_dies_if_rifleman_A_shoots", Value(0)) n.add_output("Prisoner = Dead", "prisoner_dies_if_rifleman_A_shoots", Value(1)) n.add_input("RiflemanA = NoShoot", "prisoner_lives_if_riflemen_A_and_B_do_not_shoot", Value(1)) n.add_input("RiflemanB = NoShoot", "prisoner_lives_if_riflemen_A_and_B_do_not_shoot", Value(1)) n.add_output("Prisoner = Alive", "prisoner_lives_if_riflemen_A_and_B_do_not_shoot", Value(1)) n.add_input("RiflemanB = Shoot", "prisoner_dies_if_rifleman_B_shoots", Value(1)) n.add_output("Prisoner = Dead", "prisoner_dies_if_rifleman_B_shoots", Value(1)) return n # + seeDdoAshoots = firing_squad_deterministic_petri_intervention() seeDdoAshoots.draw('../images/firing_squad_deterministic_petri_counterfactual.svg') seeDdoAshoots.draw('../images/firing_squad_deterministic_petri_counterfactual.png') SVG('../images/firing_squad_deterministic_petri_counterfactual.svg')
notebooks/firing_squad_deterministic_petri_base.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd from sklearn.linear_model import LinearRegression import numpy as np from sklearn.model_selection import train_test_split import time from matplotlib import pyplot as plt # + #import the data data = pd.read_csv("monthly_csv.csv") model = LinearRegression() X = data['Month'] y = data['Price'] #clean the data data['Month'] = data['Month'].str.replace('-', '.') data['Month'] = data['Month'].to_numpy() #Creating a new axis for converting into 2D array X = X[np.newaxis,:] #data reshaping into 2D from 1D array X = np.reshape(X, (142,2)) #Creating a new axis for converting y into 2D array y = y[np.newaxis,:] # Data reshaping into 2D from 1D array for y y = np.reshape(y, (142,2)) #split the data X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) #train the model model.fit(X_train, y_train) #use the trained model to make a prediction pred = model.predict(X_test) class custome_pred: pred1 = model.predict([[2021, 202002]]) import sklearn.metrics as sm print("Mean absolute error =", round(sm.mean_absolute_error(pred, y_test), 2)) print("Mean squared error =", round(sm.mean_squared_error(pred, y_test), 2)) print("Median absolute error =", round(sm.median_absolute_error(pred, y_test), 2)) print("Explain variance score =", round(sm.explained_variance_score(pred, y_test), 2)) print("R2 score =", round(sm.r2_score(pred, y_test), 2)) print(custome_pred.pred1) class visualisation: import pandas as pd from matplotlib import pyplot as plt data = pd.read_csv("monthly_csv.csv") data['Month'] = data['Month'].str.replace('-', '.') X = data['Month'] y = data['Price'] plt.title('Visualisation') plt.xlabel('Month') plt.ylabel('Price') plt.plot(X,y) plt.show() visualisation() # -
CODE.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="s6wRWQbM8YWK" # # Dictionary # + [markdown] colab_type="text" id="CQgSdaRE8YWP" # Python dictionary is an unordered collection of items. While other compound data types have only value as an element, a dictionary has a key: value pair. # + [markdown] colab_type="text" id="QNZdRerx8YWQ" # # Dict Creation # + colab={} colab_type="code" id="PhZ00ce88YWS" outputId="08e7f3c2-24d0-4aab-8869-9c199552f206" #empty dictionary my_dict = {} #dictionary with integer keys my_dict = {1: 'abc', 2: 'xyz'} print(my_dict) #dictionary with mixed keys my_dict = {'name': 'ruhi', 1: ['abc', 'xyz']} print(my_dict) #create empty dictionary using dict() my_dict = dict() my_dict = dict([(1, 'abc'), (2, 'xyz')]) #create a dict with list of tuples print(my_dict) # + [markdown] colab_type="text" id="CsZFln1r8YWf" # # Dict Access # + colab={} colab_type="code" id="nCQLFclM8YWh" outputId="af38adcd-3f05-4018-9912-8e9abe57e92d" my_dict = {'name': 'ruhi', 'age': 20, 'address': 'india'} #get name print(my_dict['name']) # + colab={} colab_type="code" id="BYsuLB548YWl" outputId="d92d3779-af54-41f6-e0c7-7f2ce831e7e4" #if key is not present it gives KeyError print(my_dict['degree']) # + colab={} colab_type="code" id="WNJMH4qp8YWo" outputId="f9a5a7e4-dc63-4c7d-dd77-b71ecb9e51fb" #another way of accessing key print(my_dict.get('address')) # + colab={} colab_type="code" id="fwrZjWaD8YWs" outputId="80d0fab3-fc6e-47c2-bb44-7678b2c6d287" #if key is not present it will give None using get method print(my_dict.get('degree')) # + [markdown] colab_type="text" id="QVQG50dG8YWy" # # Dict Add or Modify Elements # + colab={} colab_type="code" id="Cg9LrSiL8YW0" outputId="035304a8-cc63-4bb2-d5ee-a58a22c19f30" my_dict = {'name': 'ruhi', 'age': 20, 'address': 'india'} #update name my_dict['name'] = 'rachna' print(my_dict) # + colab={} colab_type="code" id="CZ2yD3Yv8YW5" outputId="ee35a1c7-d8cd-4a80-8785-bdfffb0481cf" #add new key my_dict['degree'] = 'M.Tech' print(my_dict) # + [markdown] colab_type="text" id="JlbqO1N78YW8" # # Dict Delete or Remove Element # + colab={} colab_type="code" id="8WJsHD9E8YW9" outputId="a186c3a8-e420-41e7-d112-fce1fc4ffffc" #create a dictionary my_dict = {'name': 'satish', 'age': 27, 'address': 'guntur'} #remove a particular item print(my_dict.pop('age')) print(my_dict) # + colab={} colab_type="code" id="l65nK-cQ8YXA" outputId="62583228-7758-4d6c-bf6c-76bda6c8df8b" my_dict = {'name': 'satish', 'age': 27, 'address': 'guntur'} #remove an arbitarty key my_dict.popitem() print(my_dict) # + colab={} colab_type="code" id="TY12fl4Q8YXD" outputId="e70eed58-1ce8-4db9-9429-73483d77cb06" squares = {2: 4, 3: 9, 4: 16, 5: 25} #delete particular key del squares[2] print(squares) # + colab={} colab_type="code" id="WaE9o6-j8YXG" outputId="3b26053a-8ce1-4da0-bc7d-8a019b20699c" #remove all items squares.clear() print(squares) # + colab={} colab_type="code" id="LUrIAJo58YXK" outputId="8bc10a7e-15e8-461f-e743-41da6e57c898" squares = {2: 4, 3: 9, 4: 16, 5: 25} #delete dictionary itself del squares print(squares) #NameError because dict is deleted # + [markdown] colab_type="text" id="umrKAvuz8YXP" # # Dictionary Methods # + colab={} colab_type="code" id="pC6eM-Uz8YXQ" outputId="06a4c7f4-113c-44ac-a1fd-086511705807" squares = {2: 4, 3: 9, 4: 16, 5: 25} my_dict = squares.copy() print(my_dict) # + colab={} colab_type="code" id="8smTk2Jr8YXT" outputId="242de0a4-d809-4328-e13f-f72f2ce35aa9" #fromkeys[seq[, v]] -> Return a new dictionary with keys from seq and value equal to v (defaults to None). subjects = {}.fromkeys(['Math', 'English', 'Hindi'], 0) print(subjects) # + colab={} colab_type="code" id="-wW9T7Id8YXW" outputId="49fac564-9a97-4107-832b-1ab7d4da03dc" subjects = {2:4, 3:9, 4:16, 5:25} print(subjects.items()) #return a new view of the dictionary items (key, value) # + colab={} colab_type="code" id="YMjrhvUr8YXZ" outputId="1a1eceaf-43f1-433e-95fc-dafb9087d2c0" subjects = {2:4, 3:9, 4:16, 5:25} print(subjects.keys()) #return a new view of the dictionary keys # + colab={} colab_type="code" id="B2AU0n-Z8YXd" outputId="358facae-79f4-4e30-8e6f-8c388e0441af" subjects = {2:4, 3:9, 4:16, 5:25} print(subjects.values()) #return a new view of the dictionary values # + colab={} colab_type="code" id="Hy5A9de58YXg" outputId="f8ea85e1-81f8-4b35-8a0a-f645fbc286cd" #get list of all available methods and attributes of dictionary d = {} print(dir(d)) # + [markdown] colab_type="text" id="YCcb_-y_8YXo" # # Dict Comprehension # + colab={} colab_type="code" id="b8nEYWrB8YXr" outputId="2dcb519c-f2a1-4301-8cc8-b85ffd6b624b" #Dict comprehensions are just like list comprehensions but for dictionaries d = {'a': 1, 'b': 2, 'c': 3} for pair in d.items(): print(pair) # + colab={} colab_type="code" id="sKVoZ74V8YXw" outputId="c88ef99f-ba54-49dc-d619-1f7d448aba5c" #Creating a new dictionary with only pairs where the value is larger than 2 d = {'a': 1, 'b': 2, 'c': 3, 'd': 4} new_dict = {k:v for k, v in d.items() if v > 2} print(new_dict) # + colab={} colab_type="code" id="JE08JjT28YXz" outputId="9236c079-1c83-42f2-9c2e-2209664c42c4" #We can also perform operations on the key value pairs d = {'a':1,'b':2,'c':3,'d':4,'e':5} d = {k + 'c':v * 2 for k, v in d.items() if v > 2} print(d) # + colab={} colab_type="code" id="g8b_0DPE8YX1"
Python-101/10-dictionary.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="VLUqsUmXvf2_" colab_type="text" # # Compiling GRF from scratch. # # Can be used to create pre-build shared libraries. # # # + id="AakcNSxB-D3P" colab_type="code" outputId="ac3b6f49-2aa5-4569-8738-2b696a9ed7e7" colab={"base_uri": "https://localhost:8080/", "height": 1000} # ! sudo apt-get update # ! sudo apt-get install git cmake build-essential libgl1-mesa-dev libsdl2-dev \ # libsdl2-image-dev libsdl2-ttf-dev libsdl2-gfx-dev libboost-all-dev \ # libdirectfb-dev libst-dev mesa-utils xvfb x11vnc libsdl-sge-dev python3-pip # + id="3GjAo6Y6-LJm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 462} outputId="37b447b1-f88a-4db5-f94d-5de10dc6f837" ## Make sure to set the correct BRANCH (-b XXX) # ! git clone -b v2.4 https://github.com/google-research/football.git # ! cd football && pip3 install . # + id="V2MEON2i-Q4s" colab_type="code" outputId="cce73a10-d6fe-485e-8a87-791f5019194b" colab={"base_uri": "https://localhost:8080/", "height": 68} ## Small code example to make sure that everything works. import gfootball.env as football_env env = football_env.create_environment(env_name="academy_empty_goal_close", stacked=False, logdir='/tmp/football', write_goal_dumps=False, write_full_episode_dumps=False, render=False) env.reset() steps = 0 while True: obs, rew, done, info = env.step(env.action_space.sample()) steps += 1 if steps % 100 == 0: print("Step %d Reward: %f" % (steps, rew)) if done: break print("Steps: %d Reward: %.2f" % (steps, rew)) # + id="OVc80iJfGlWE" colab_type="code" colab={} # ! cp /usr/local/lib/python3.6/dist-packages/gfootball_engine/_gameplayfootball.so /root/prebuilt_gameplayfootball # + id="rl6KRPLesF70" colab_type="code" colab={} # Now download the file from: # /root/prebuilt_gameplay_football (using the menu on the left side)
gfootball/colabs/gfootball_example_from_scratch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import copy from keras.models import clone_model from keras.models import load_model from keras.models import Sequential from keras.layers import Dense from keras.optimizers import Adam import matplotlib.pyplot as plt import numpy as np import progressbar import time import util np.random.seed(0) model_number = 300*1000 model = load_model('../saved_models/value_net_iter{0:06d}.h5'.format(model_number)) analyzer = util.ResultsAnalyzer(model, None, None) end_policy = analyzer.extractPolicy() analyzer.processPolicy(end_policy) x = [5.5973, 7.1863, 5.3510, 3.4371, 3.1367, 3.3230, 2.2831, 2.2678, 2.4155, 2.3401, 2.4069, 2.3901, 2.4570, 2.5797, 2.4849, 2.5265, 2.5060, 2.5477, 2.5244, 2.5491, 2.5557, 2.6129, 2.6057, 2.6461, 2.6773, 2.7261, 2.7254, 2.7111, 2.7707, 2.8284, 2.8250, 2.7686, 2.8788, 2.9524, 2.8266, 2.8878, 2.9929, 2.9490, 2.9261, 2.9443, 2.9247, 2.9970, 2.9807, 3.0932, 3.0689, 4.1290, 3.1757, 4.1285, 3.2368, 3.1616, 3.2302, 3.2794, 3.7010, 3.3482, 3.3169, 3.2848, 5.0563, 9.8165, 7.2128, 5.4654, 6.2152, 6.5929, 4.8255, 4.0505, 4.0116, 4.5165, 4.1764, 4.3960, 4.4577, 4.3646, 4.1668, 4.0466, 4.2155, 4.4573, 4.3624, 4.3457, 4.5205, 4.1395, 4.7616, 4.4522, 4.6926, 4.2881, 4.5563, 4.6502, 4.8728, 4.4388, 4.3599, 4.4635, 4.8299, 5.8258, 4.7094, 4.6218, 4.6954, 5.0666, 4.8969, 5.1419, 4.8821, 4.9591, 5.2063, 4.7708, 5.2512, 4.8849, 4.8296, 4.9064, 5.3780, 5.3864, 5.6884, 5.5320, 5.5428, 5.3465, 5.5889, 5.4498, 5.2609, 5.4612, 5.6722, 5.7716, 8.5362, 5.8986, 6.0078, 5.6727, 5.5540, 5.6885, 5.9872, 5.6949, 5.9246, 5.8335, 5.8406, 5.6864, 6.5043, 6.1763, 5.9254, 5.9905, 6.0597, 5.9907, 6.3251, 5.8398, 5.8286, 6.4020, 6.2201, 6.2150, 6.5418, 6.2008, 6.5191, 6.3091, 6.1428, 6.1535, 6.1417, 6.1482, 6.8894, 6.1965, 6.2831, 6.3784, 6.9101, 6.6076, 6.6850, 6.7986, 6.8689, 7.0552, 6.6367, 6.4755, 6.5174, 6.5284, 6.6757, 6.8882, 7.0490, 6.9360, 7.3696, 7.2295, 7.1360, 6.8598, 7.0941, 8.9517, 8.5641, 11.3481, 11.4682, 13.5702, 10.5819, 10.5029, 11.1499, 10.5713, 10.8807, 11.3593, 11.3678, 10.6601, 13.2213, 11.0494, 9.6540, 8.6962, 8.9942, 8.5755, 8.2085, 9.2310, 8.6042, 8.8462, 11.1050, 9.3368, 8.9109, 8.5613, 7.9498, 7.9651, ] plt.style.use('fivethirtyeight') _f, ax = plt.subplots(figsize=(5,5)) ax ax.set_ylabel('time since last sync') ax.set_xlabel('number of learning updates (/1000)') ax.plot(x) plt.show()
proof_of_work/deep_q/v0/experiments/midtraining_policy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Double-feature Models # + import datetime from pathlib import Path import pickle import sys import time import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.compose import make_column_selector, make_column_transformer, ColumnTransformer from sklearn.pipeline import make_pipeline from sklearn.model_selection import GridSearchCV from sklearn.metrics import rand_score dir_generic_files = Path("/home/enrico/shared_virtualbox/phd_projects_Enrico_Gandini/phd_project_similarity_prediction/generic_input_files/") sys.path.append(dir_generic_files.as_posix()) import machine_learning_helpers as mlh # - # ## Define directories and files #The current notebook should be #in the main directory containing queried results. dir_results = Path.cwd() # Select the date when the survey ended, and define the directory containing survey results up to that date. date_end_survey = datetime.date(year=2021, month=6, day=28) dir_queries = Path(dir_results, f"queried_heroku_{date_end_survey}") # Load DataFrame containing aggregated survey answer. The dataset was produced by the "retrieve answers" script, using SQLAlchemy. file_agg = Path(dir_queries, "aggregated_survey_answers.csv") df_agg = pd.read_csv(file_agg, index_col='id_chosenPair') df_agg df_agg.columns # Directory that will contain fitted models. dir_models = Path(dir_results, "models_3Classes") dir_models.mkdir(exist_ok=True) # Directory that will contain analyses and visualization of models. dir_models_analysis = Path(dir_models, "analysis") dir_models_analysis.mkdir(exist_ok=True) # ## Define variables # Contained in the input data, and necessary to create nice figures. # + colname_score_2d = "tanimoto_cdk_Extended" colname_score_3d = "TanimotoCombo" colname_dist = "pchembl_distance" colname_target = "target_name" colname_subset = "pair_type" colname_pair = "id_surveyPair" colname_n_ans = "n_answers" colname_n_simil = "n_similar" colname_frac_simil = "frac_similar" # - categories_subset = ["dis2D,dis3D", "dis2D,sim3D", "sim2D,dis3D", "sim2D,sim3D", ] targets_names = ["HERG", "5HT2B", "CYP2D6", ] # + nicename_score_2d = (colname_score_2d .replace("_", " ") .title() .replace("Cdk", "CDK") ) nicename_score_3d = colname_score_3d nicenames_scores = {colname_score_2d: nicename_score_2d, colname_score_3d: nicename_score_3d, } nicename_dist = "pChEMBL Distance" nicename_target = "Target" nicename_subset = "Pair Type" nicename_similar = "Similar" nicename_experience = "Academic Qualification" nicename_n_ans = "Number of Answers" nicename_ans_percent = "Answer Percentage" nicename_simil_percent = "Similarity Percentage" nicename_pair = "Pair ID" nicename_n_pairs = "Number of Pairs" nicename_percent_pairs_subset = "Pair Percentage in each subset" # + FIGSIZE_GOLD = (9.556, 5.906) # Golden Rectangle / (in, in) FIGSIZE_PAGE = (8.27, 11.69) #A4 page without margins. FIGSIZE_SQUARE = (FIGSIZE_GOLD[0], FIGSIZE_GOLD[0]) FONTSIZE = 14 kwargs_fig_basic = {"constrained_layout": True, "figsize": FIGSIZE_GOLD} # - n_pairs_each_subset = 25 # + lim_score_2d = (0, 1) lim_score_3d = (0, 2) lims_scores = {colname_score_2d: lim_score_2d, colname_score_3d: lim_score_3d, } lim_percent = (0, 100) # - # ## Define Training set # + colnames_features = [colname_score_2d, colname_score_3d] X_train = df_agg.loc[:, colnames_features].copy() n_train = X_train.shape[0] # - # Training set `y` labels will also be added to the original DataFrame, for easier analyses. # + human_similarity = df_agg[colname_frac_simil] # fraction of human experts that considered a pair of molecules to be similar thres_low = 0.4 thres_high = 0.6 #(!)The last break is `1.0001` instead of `1.0` since #I need non-overlapping intervals #(so, `closed="both"` option in `from_breaks` method is not possible), #and I need to include the right extreme `1.0` #(otherwise, pairs with `frac_similar` equals `1.0` would be in a NaN bin). breaks = [0, thres_low, thres_high, 1.00001] bins_classes = pd.IntervalIndex.from_breaks(breaks, closed="left", ) labels_bins_classes = ["Not Similar", "Uncertain", "Similar"] y_train = pd.cut(human_similarity, bins=bins_classes, ) y_train = y_train.cat.rename_categories(labels_bins_classes) colname_judged = "judged_similar" y_train.name = colname_judged df_agg[colname_judged] = y_train #Check that all items in `human_similarity` were assigned to a bin. if y_train.isna().any(): raise ValueError("NaN values in `y_train`! Check the binning!") y_train # - # Counts of new multi-class labelling. counts_y_train = (pd.value_counts(y_train) .rename("train_labels") .loc[labels_bins_classes] .to_frame() ) counts_y_train # ## Define all Logistic Regression models # Define `kwargs` of Logistic Regression models with all penalty types. # + seed_rand = 1 max_iter = 2000 solver = "saga" # Supports all logistic regression penalties. class_weight = "balanced" #Balance an unbalanced multi-class dataset. kwargs_base = {"random_state": seed_rand, "max_iter": max_iter, "solver": solver, "class_weight": class_weight, } kwargs_noreg = kwargs_base.copy() kwargs_noreg["penalty"]: "none" kwargs_l1 = kwargs_base.copy() kwargs_l1["penalty"] = "l1" kwargs_l2 = kwargs_base.copy() kwargs_l2["penalty"] = "l2" kwargs_enet = kwargs_base.copy() kwargs_enet["penalty"] = "elasticnet" #Elasticnet also requires specification of `l1_ratio` hyperparameter. ratio_default = 0.5 kwargs_enet_default = kwargs_enet.copy() kwargs_enet_default["l1_ratio"] = ratio_default # - # ### Define Decision Tree and Random Forest Models # + kwargs_decisiontree = {"random_state": seed_rand, "class_weight": class_weight, } kwargs_randomforest = kwargs_decisiontree.copy() # - # Define models that will be trained with default hyperparameters initial_models_default = {"noreg": LogisticRegression(**kwargs_noreg), "l1": LogisticRegression(**kwargs_l1), "l2": LogisticRegression(**kwargs_l2), "enet": LogisticRegression(**kwargs_enet_default), "decisiontree": DecisionTreeClassifier(**kwargs_decisiontree), "randomforest": RandomForestClassifier(**kwargs_randomforest), } # ## Fit Logistic Regression models with default hyperparameters # + fitted_models_default = {} for name, model in initial_models_default.items(): model.fit(X_train, y_train) fitted_models_default[name] = model print(f"fitted {name}") # - # ### Evaluate models based on default hyperparameters # On training-set. # + df_evals_default = [] #will contain results of all scores for default models merged_counts_pred_default = [] df_fracs_correct_default = [] for name, model in fitted_models_default.items(): pred = model.predict(X_train) df_agg[f"pred_default_{name}"] = pred #Count classes predicted by the model. counts_pred = (pd.value_counts(pred) .rename(name) .loc[labels_bins_classes] ) merged_counts_pred_default.append(counts_pred) #Fractions of correct predictions fracs_correct = mlh.fracs_correct_predictions(model, X_train, y_train, ) fracs_correct = (fracs_correct .loc[labels_bins_classes] .rename(name) ) df_fracs_correct_default.append(fracs_correct) correct = (y_train == pred).astype(int) n_correct = correct.sum() rand_index = rand_score(y_train, pred) print(f"Model `{name}` correctly predicted {n_correct}/{n_train} pairs.") df_agg[f"correct_default_{name}"] = correct tmp = {"model": name, "n_correct": n_correct, "rand_index": rand_index, } metrics_dict = mlh.various_metrics_multi_classification(model=model, metrics=mlh.METRICS_MULTI_PROBA, X=X_train, y_true=y_train, ) tmp.update(metrics_dict) df_evals_default.append(tmp) merged_counts_pred_default = pd.DataFrame(merged_counts_pred_default).T df_fracs_correct_default = pd.DataFrame(df_fracs_correct_default).T df_evals_default = pd.DataFrame(df_evals_default) df_evals_default # - df_latex_default = (df_evals_default .set_index("model") .rename(columns=mlh.MAP_CLASSIF_METRICS_LATEX) .drop(columns="rand_index") #.T ) print(df_latex_default.to_latex(float_format="%.3f")) # Add the original true training labels to the prediction counts DataFrame, for clarity. merged_counts_pred_default = pd.merge(left=counts_y_train, right=merged_counts_pred_default, left_index=True, right_index=True, ) merged_counts_pred_default # Correct predictions for each class, normalized by number of true instances of each class. df_fracs_correct_default print(df_fracs_correct_default.mul(100).T.to_latex(float_format="%.1f")) # + file_evals_default = Path(dir_models_analysis, "metrics_double_feature_default_models.csv") df_evals_default.to_csv(file_evals_default, index=False) file_counts_default = Path(dir_models_analysis, "pred_counts_double_feature_default_models.csv") merged_counts_pred_default.to_csv(file_counts_default) file_fracs_correct_default = Path(dir_models_analysis, "fracs_correct_double_feature_default_models.csv") df_fracs_correct_default.to_csv(file_fracs_correct_default) # - # ### Save default models dir_default = Path(dir_models, "double_feature_default_models") dir_default.mkdir(exist_ok=True) for name, model in fitted_models_default.items(): file_model = Path(dir_default, f"{name}.pickle") with open(file_model, "wb") as f: pickle.dump(model, f) # Define grids of hyperparameters, that will be later used for hyperparameter optimization. # # All hyperparameters of Logistic Regression are about penalty, so the model with no penalty does not have hyperparameters, and hyperparameter optimization is not needed. # + reg_strengths = np.geomspace(0.01, 100, num=20) ratios = np.geomspace(0.01, 1, num=25) grid_l1 = {"C": reg_strengths} grid_l2 = grid_l1 grid_enet = {"C": reg_strengths, "l1_ratio": ratios, } #Hyperparameters for Decision Tree and Random Forest grid_decisiontree = {"min_samples_leaf": [1, 2, 5, 10], "max_depth": [2, 3, 5, 10], "min_samples_split": [2, 5, 10], } grid_randomforest = grid_decisiontree.copy() grid_randomforest["n_estimators"] = [5, 10, 50, 100] grids_hyperparams = {"l1": grid_l1, "l2": grid_l2, "enet": grid_enet, "decisiontree": grid_decisiontree, "randomforest": grid_randomforest, } # + kwargs_for_optim = {"l1": kwargs_l1, "l2": kwargs_l2, "enet": kwargs_enet, "decisiontree": kwargs_decisiontree, "randomforest": kwargs_randomforest, } n_folds_cv = 10 n_jobs = 5 initial_optimizers = {} for name, grid in grids_hyperparams.items(): kwargs_model = kwargs_for_optim[name] #Select estimator if name == "decisiontree": estimator = DecisionTreeClassifier elif name == "randomforest": estimator = RandomForestClassifier elif name in ["l1", "l2", "enet"]: estimator = LogisticRegression else: raise ValueError(f"`{name}` model not valid!") optimizer = GridSearchCV(estimator=estimator(**kwargs_model), param_grid=grid, cv=n_folds_cv, n_jobs=n_jobs, ) initial_optimizers[name] = optimizer # - fitted_optimizers = {} for name, optimizer in initial_optimizers.items(): start = time.perf_counter() optimizer.fit(X_train, y_train) delta_time = time.perf_counter() - start delta_time = datetime.timedelta(seconds=delta_time) print(f"Completed hyperparameter search of `{name}` " f"using {n_folds_cv}-fold CV in {delta_time} hours with {n_jobs} CPUs.") fitted_optimizers[name] = optimizer # + df_evals_optim = [] #will contain results of all scores for optim models merged_counts_pred_optim = [] df_fracs_correct_optim = [] for name, model in fitted_optimizers.items(): pred = model.predict(X_train) df_agg[f"pred_optim_{name}"] = pred #Count classes predicted by the model. counts_pred = (pd.value_counts(pred) .rename(name) .loc[labels_bins_classes] ) merged_counts_pred_optim.append(counts_pred) #Fractions of correct predictions fracs_correct = mlh.fracs_correct_predictions(model, X_train, y_train, ) fracs_correct = (fracs_correct .loc[labels_bins_classes] .rename(name) ) df_fracs_correct_optim.append(fracs_correct) correct = (y_train == pred).astype(int) n_correct = correct.sum() rand_index = rand_score(y_train, pred) print(f"Optimized `{name}` correctly predicted {n_correct}/{n_train} pairs.") df_agg[f"correct_optim_{name}"] = correct tmp = {"model": name, "n_correct": n_correct, "rand_index": rand_index, } metrics_dict = mlh.various_metrics_multi_classification(model=model, metrics=mlh.METRICS_MULTI_PROBA, X=X_train, y_true=y_train, ) tmp.update(metrics_dict) df_evals_optim.append(tmp) merged_counts_pred_optim = pd.DataFrame(merged_counts_pred_optim).T df_fracs_correct_optim = pd.DataFrame(df_fracs_correct_optim).T df_evals_optim = pd.DataFrame(df_evals_optim) df_evals_optim # - df_latex_optim = (df_evals_optim .set_index("model") .rename(columns=mlh.MAP_CLASSIF_METRICS_LATEX) .drop(columns="rand_index") #.T ) print(df_latex_optim.to_latex(float_format="%.3f")) merged_counts_pred_optim = pd.merge(left=counts_y_train, right=merged_counts_pred_optim, left_index=True, right_index=True, ) merged_counts_pred_optim # Correct predictions for each class, normalized by number of true instances of each class. df_fracs_correct_optim print(df_fracs_correct_optim.mul(100).T.to_latex(float_format="%.1f")) # + file_evals_optim = Path(dir_models_analysis, "metrics_double_feature_optimized_models.csv") df_evals_optim.to_csv(file_evals_optim, index=False) file_counts_optim = Path(dir_models_analysis, "pred_counts_double_feature_optimized_models.csv") merged_counts_pred_optim.to_csv(file_counts_optim) file_fracs_correct_optim = Path(dir_models_analysis, "fracs_correct_double_feature_optimized_models.csv") df_fracs_correct_optim.to_csv(file_fracs_correct_optim) # - # Concatenate the two evaluation DataFrames for an easier visual inspection. # + #Before concatenation, add prefix to `model` column, #to specify if models in concatenated DataFrame are default or optimized. df_evals_default["model"] = "default_" + df_evals_default["model"] df_evals_optim["model"] = "optimized_" + df_evals_optim["model"] pd.concat([df_evals_default, df_evals_optim], ignore_index=True) # - # ***Notes***: # * For optimized models, Elastic Net became identical to L1 (I also checked parameters and hyperparameters) $\Rightarrow$ **do not use optimized Elastic Net**! # * Best model is Random Forest, followed by Decision Tree. # * Decision Tree and Random Forest with default parameters are probably overfit (a lot): all correct predictions; they probably memorized each sample in the training-set. # * After hyperparameter optimization, Decision Tree and Random Forest got worse than default models, but they are now probably not overfit. And they are better than models based on Logistic Regression. # * Random Forest, after optimization, is quite significantly better than Decision Tree. Let's hope that it is not overfit (and it should not be overfit, since Random Forests are meant to reduce overfitting of Decision Trees). # ### Save Optimized Models dir_optim = Path(dir_models, "double_feature_optimized_models") dir_optim.mkdir(exist_ok=True) for name, optimizer in fitted_optimizers.items(): #Select final model with best hyperparameters, #refitted on whole dataset. model = optimizer.best_estimator_ file_model = Path(dir_optim, f"{name}.pickle") with open(file_model, "wb") as f: pickle.dump(model, f)
webapp/results_survey_molecular_similarity/.ipynb_checkpoints/create_double_feature_models_3Classes_01-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # + [markdown] origin_pos=0 # # ่‡ช็„ถ่ฏญ่จ€ๆŽจๆ–ญไธŽๆ•ฐๆฎ้›† # :label:`sec_natural-language-inference-and-dataset` # # ๅœจ :numref:`sec_sentiment`ไธญ๏ผŒๆˆ‘ไปฌ่ฎจ่ฎบไบ†ๆƒ…ๆ„Ÿๅˆ†ๆž้—ฎ้ข˜ใ€‚่ฟ™ไธชไปปๅŠก็š„็›ฎ็š„ๆ˜ฏๅฐ†ๅ•ไธชๆ–‡ๆœฌๅบๅˆ—ๅˆ†็ฑปๅˆฐ้ข„ๅฎšไน‰็š„็ฑปๅˆซไธญ๏ผŒไพ‹ๅฆ‚ไธ€็ป„ๆƒ…ๆ„Ÿๆžๆ€งไธญใ€‚็„ถ่€Œ๏ผŒๅฝ“้œ€่ฆๅ†ณๅฎšไธ€ไธชๅฅๅญๆ˜ฏๅฆๅฏไปฅไปŽๅฆไธ€ไธชๅฅๅญๆŽจๆ–ญๅ‡บๆฅ๏ผŒๆˆ–่€…้œ€่ฆ้€š่ฟ‡่ฏ†ๅˆซ่ฏญไน‰็ญ‰ไปท็š„ๅฅๅญๆฅๆถˆ้™คๅฅๅญ้—ดๅ†—ไฝ™ๆ—ถ๏ผŒ็Ÿฅ้“ๅฆ‚ไฝ•ๅฏนไธ€ไธชๆ–‡ๆœฌๅบๅˆ—่ฟ›่กŒๅˆ†็ฑปๆ˜ฏไธๅคŸ็š„ใ€‚็›ธๅ๏ผŒๆˆ‘ไปฌ้œ€่ฆ่ƒฝๅคŸๅฏนๆˆๅฏน็š„ๆ–‡ๆœฌๅบๅˆ—่ฟ›่กŒๆŽจๆ–ญใ€‚ # # ## ่‡ช็„ถ่ฏญ่จ€ๆŽจๆ–ญ # # *่‡ช็„ถ่ฏญ่จ€ๆŽจๆ–ญ*๏ผˆnatural language inference๏ผ‰ไธป่ฆ็ ”็ฉถ # *ๅ‡่ฎพ*๏ผˆhypothesis๏ผ‰ๆ˜ฏๅฆๅฏไปฅไปŽ*ๅ‰ๆ*๏ผˆpremise๏ผ‰ไธญๆŽจๆ–ญๅ‡บๆฅ๏ผŒ # ๅ…ถไธญไธค่€…้ƒฝๆ˜ฏๆ–‡ๆœฌๅบๅˆ—ใ€‚ # ๆข่จ€ไน‹๏ผŒ่‡ช็„ถ่ฏญ่จ€ๆŽจๆ–ญๅ†ณๅฎšไบ†ไธ€ๅฏนๆ–‡ๆœฌๅบๅˆ—ไน‹้—ด็š„้€ป่พ‘ๅ…ณ็ณปใ€‚่ฟ™็ฑปๅ…ณ็ณป้€šๅธธๅˆ†ไธบไธ‰็ง็ฑปๅž‹๏ผš # # * *่•ดๆถต*๏ผˆentailment๏ผ‰๏ผšๅ‡่ฎพๅฏไปฅไปŽๅ‰ๆไธญๆŽจๆ–ญๅ‡บๆฅใ€‚ # * *็Ÿ›็›พ*๏ผˆcontradiction๏ผ‰๏ผšๅ‡่ฎพ็š„ๅฆๅฎšๅฏไปฅไปŽๅ‰ๆไธญๆŽจๆ–ญๅ‡บๆฅใ€‚ # * *ไธญๆ€ง*๏ผˆneutral๏ผ‰๏ผšๆ‰€ๆœ‰ๅ…ถไป–ๆƒ…ๅ†ตใ€‚ # # ่‡ช็„ถ่ฏญ่จ€ๆŽจๆ–ญไนŸ่ขซ็งฐไธบ่ฏ†ๅˆซๆ–‡ๆœฌ่•ดๆถตไปปๅŠกใ€‚ # ไพ‹ๅฆ‚๏ผŒไธ‹้ข็š„ไธ€ไธชๆ–‡ๆœฌๅฏนๅฐ†่ขซ่ดดไธŠโ€œ่•ดๆถตโ€็š„ๆ ‡็ญพ๏ผŒๅ› ไธบๅ‡่ฎพไธญ็š„โ€œ่กจ็™ฝโ€ๅฏไปฅไปŽๅ‰ๆไธญ็š„โ€œๆ‹ฅๆŠฑโ€ไธญๆŽจๆ–ญๅ‡บๆฅใ€‚ # # >ๅ‰ๆ๏ผšไธคไธชๅฅณไบบๆ‹ฅๆŠฑๅœจไธ€่ตทใ€‚ # # >ๅ‡่ฎพ๏ผšไธคไธชๅฅณไบบๅœจ็คบ็ˆฑใ€‚ # # ไธ‹้ขๆ˜ฏไธ€ไธชโ€œ็Ÿ›็›พโ€็š„ไพ‹ๅญ๏ผŒๅ› ไธบโ€œ่ฟ่กŒ็ผ–็ ็คบไพ‹โ€่กจ็คบโ€œไธ็ก่ง‰โ€๏ผŒ่€Œไธๆ˜ฏโ€œ็ก่ง‰โ€ใ€‚ # # >ๅ‰ๆ๏ผšไธ€ๅ็”ทๅญๆญฃๅœจ่ฟ่กŒDive Into Deep Learning็š„็ผ–็ ็คบไพ‹ใ€‚ # # ๅ‡่ฎพ๏ผš่ฏฅ็”ทๅญๆญฃๅœจ็ก่ง‰ใ€‚ # # ็ฌฌไธ‰ไธชไพ‹ๅญๆ˜พ็คบไบ†ไธ€็งโ€œไธญๆ€งโ€ๅ…ณ็ณป๏ผŒๅ› ไธบโ€œๆญฃๅœจไธบๆˆ‘ไปฌ่กจๆผ”โ€่ฟ™ไธ€ไบ‹ๅฎžๆ— ๆณ•ๆŽจๆ–ญๅ‡บโ€œๅ‡บๅโ€ๆˆ–โ€œไธๅ‡บๅโ€ใ€‚ # # >ๅ‰ๆ๏ผš้Ÿณไนๅฎถไปฌๆญฃๅœจไธบๆˆ‘ไปฌ่กจๆผ”ใ€‚ # # >ๅ‡่ฎพ๏ผš้Ÿณไนๅฎถๅพˆๆœ‰ๅใ€‚ # # ่‡ช็„ถ่ฏญ่จ€ๆŽจๆ–ญไธ€็›ดๆ˜ฏ็†่งฃ่‡ช็„ถ่ฏญ่จ€็š„ไธญๅฟƒ่ฏ้ข˜ใ€‚ๅฎƒๆœ‰็€ๅนฟๆณ›็š„ๅบ”็”จ๏ผŒไปŽไฟกๆฏๆฃ€็ดขๅˆฐๅผ€ๆ”พ้ข†ๅŸŸ็š„้—ฎ็ญ”ใ€‚ไธบไบ†็ ”็ฉถ่ฟ™ไธช้—ฎ้ข˜๏ผŒๆˆ‘ไปฌๅฐ†้ฆ–ๅ…ˆ็ ”็ฉถไธ€ไธชๆต่กŒ็š„่‡ช็„ถ่ฏญ่จ€ๆŽจๆ–ญๅŸบๅ‡†ๆ•ฐๆฎ้›†ใ€‚ # # ## ๆ–ฏๅฆ็ฆ่‡ช็„ถ่ฏญ่จ€ๆŽจๆ–ญ๏ผˆSNLI๏ผ‰ๆ•ฐๆฎ้›† # # [**ๆ–ฏๅฆ็ฆ่‡ช็„ถ่ฏญ่จ€ๆŽจๆ–ญ่ฏญๆ–™ๅบ“๏ผˆStanford Natural Language Inference๏ผŒSNLI๏ผ‰**]ๆ˜ฏ็”ฑ500000ๅคšไธชๅธฆๆ ‡็ญพ็š„่‹ฑ่ฏญๅฅๅญๅฏน็ป„ๆˆ็š„้›†ๅˆ :cite:`Bowman.Angeli.Potts.ea.2015`ใ€‚ๆˆ‘ไปฌๅœจ่ทฏๅพ„`../data/snli_1.0`ไธญไธ‹่ฝฝๅนถๅญ˜ๅ‚จๆๅ–็š„SNLIๆ•ฐๆฎ้›†ใ€‚ # # + origin_pos=1 tab=["mxnet"] import os import re from mxnet import gluon, np, npx from d2l import mxnet as d2l npx.set_np() #@save d2l.DATA_HUB['SNLI'] = ( 'https://nlp.stanford.edu/projects/snli/snli_1.0.zip', '9fcde07509c7e87ec61c640c1b2753d9041758e4') data_dir = d2l.download_extract('SNLI') # + [markdown] origin_pos=3 # ### [**่ฏปๅ–ๆ•ฐๆฎ้›†**] # # ๅŽŸๅง‹็š„SNLIๆ•ฐๆฎ้›†ๅŒ…ๅซ็š„ไฟกๆฏๆฏ”ๆˆ‘ไปฌๅœจๅฎž้ชŒไธญ็œŸๆญฃ้œ€่ฆ็š„ไฟกๆฏไธฐๅฏŒๅพ—ๅคšใ€‚ๅ› ๆญค๏ผŒๆˆ‘ไปฌๅฎšไน‰ๅ‡ฝๆ•ฐ`read_snli`ไปฅไป…ๆๅ–ๆ•ฐๆฎ้›†็š„ไธ€้ƒจๅˆ†๏ผŒ็„ถๅŽ่ฟ”ๅ›žๅ‰ๆใ€ๅ‡่ฎพๅŠๅ…ถๆ ‡็ญพ็š„ๅˆ—่กจใ€‚ # # + origin_pos=4 tab=["mxnet"] #@save def read_snli(data_dir, is_train): """ๅฐ†SNLIๆ•ฐๆฎ้›†่งฃๆžไธบๅ‰ๆใ€ๅ‡่ฎพๅ’Œๆ ‡็ญพ""" def extract_text(s): # ๅˆ ้™คๆˆ‘ไปฌไธไผšไฝฟ็”จ็š„ไฟกๆฏ s = re.sub('\\(', '', s) s = re.sub('\\)', '', s) # ็”จไธ€ไธช็ฉบๆ ผๆ›ฟๆขไธคไธชๆˆ–ๅคšไธช่ฟž็ปญ็š„็ฉบๆ ผ s = re.sub('\\s{2,}', ' ', s) return s.strip() label_set = {'entailment': 0, 'contradiction': 1, 'neutral': 2} file_name = os.path.join(data_dir, 'snli_1.0_train.txt' if is_train else 'snli_1.0_test.txt') with open(file_name, 'r') as f: rows = [row.split('\t') for row in f.readlines()[1:]] premises = [extract_text(row[1]) for row in rows if row[0] in label_set] hypotheses = [extract_text(row[2]) for row in rows if row[0] \ in label_set] labels = [label_set[row[0]] for row in rows if row[0] in label_set] return premises, hypotheses, labels # + [markdown] origin_pos=5 # ็Žฐๅœจ่ฎฉๆˆ‘ไปฌ[**ๆ‰“ๅฐๅ‰3ๅฏน**]ๅ‰ๆๅ’Œๅ‡่ฎพ๏ผŒไปฅๅŠๅฎƒไปฌ็š„ๆ ‡็ญพ๏ผˆโ€œ0โ€ใ€โ€œ1โ€ๅ’Œโ€œ2โ€ๅˆ†ๅˆซๅฏนๅบ”ไบŽโ€œ่•ดๆถตโ€ใ€โ€œ็Ÿ›็›พโ€ๅ’Œโ€œไธญๆ€งโ€๏ผ‰ใ€‚ # # + origin_pos=6 tab=["mxnet"] train_data = read_snli(data_dir, is_train=True) for x0, x1, y in zip(train_data[0][:3], train_data[1][:3], train_data[2][:3]): print('ๅ‰ๆ๏ผš', x0) print('ๅ‡่ฎพ๏ผš', x1) print('ๆ ‡็ญพ๏ผš', y) # + [markdown] origin_pos=7 # ่ฎญ็ปƒ้›†็บฆๆœ‰550000ๅฏน๏ผŒๆต‹่ฏ•้›†็บฆๆœ‰10000ๅฏนใ€‚ไธ‹้ขๆ˜พ็คบไบ†่ฎญ็ปƒ้›†ๅ’Œๆต‹่ฏ•้›†ไธญ็š„ไธ‰ไธช[**ๆ ‡็ญพโ€œ่•ดๆถตโ€ใ€โ€œ็Ÿ›็›พโ€ๅ’Œโ€œไธญๆ€งโ€ๆ˜ฏๅนณ่กก็š„**]ใ€‚ # # + origin_pos=8 tab=["mxnet"] test_data = read_snli(data_dir, is_train=False) for data in [train_data, test_data]: print([[row for row in data[2]].count(i) for i in range(3)]) # + [markdown] origin_pos=9 # ### [**ๅฎšไน‰็”จไบŽๅŠ ่ฝฝๆ•ฐๆฎ้›†็š„็ฑป**] # # ไธ‹้ขๆˆ‘ไปฌๆฅๅฎšไน‰ไธ€ไธช็”จไบŽๅŠ ่ฝฝSNLIๆ•ฐๆฎ้›†็š„็ฑปใ€‚็ฑปๆž„้€ ๅ‡ฝๆ•ฐไธญ็š„ๅ˜้‡`num_steps`ๆŒ‡ๅฎšๆ–‡ๆœฌๅบๅˆ—็š„้•ฟๅบฆ๏ผŒไฝฟๅพ—ๆฏไธชๅฐๆ‰น้‡ๅบๅˆ—ๅฐ†ๅ…ทๆœ‰็›ธๅŒ็š„ๅฝข็Šถใ€‚ๆขๅฅ่ฏ่ฏด๏ผŒๅœจ่พƒ้•ฟๅบๅˆ—ไธญ็š„ๅ‰`num_steps`ไธชๆ ‡่ฎฐไน‹ๅŽ็š„ๆ ‡่ฎฐ่ขซๆˆชๆ–ญ๏ผŒ่€Œ็‰นๆฎŠๆ ‡่ฎฐโ€œ&lt;pad&gt;โ€ๅฐ†่ขซ้™„ๅŠ ๅˆฐ่พƒ็Ÿญ็š„ๅบๅˆ—ๅŽ๏ผŒ็›ดๅˆฐๅฎƒไปฌ็š„้•ฟๅบฆๅ˜ไธบ`num_steps`ใ€‚้€š่ฟ‡ๅฎž็Žฐ`__getitem__`ๅŠŸ่ƒฝ๏ผŒๆˆ‘ไปฌๅฏไปฅไปปๆ„่ฎฟ้—ฎๅธฆๆœ‰็ดขๅผ•`idx`็š„ๅ‰ๆใ€ๅ‡่ฎพๅ’Œๆ ‡็ญพใ€‚ # # + origin_pos=10 tab=["mxnet"] #@save class SNLIDataset(gluon.data.Dataset): """็”จไบŽๅŠ ่ฝฝSNLIๆ•ฐๆฎ้›†็š„่‡ชๅฎšไน‰ๆ•ฐๆฎ้›†""" def __init__(self, dataset, num_steps, vocab=None): self.num_steps = num_steps all_premise_tokens = d2l.tokenize(dataset[0]) all_hypothesis_tokens = d2l.tokenize(dataset[1]) if vocab is None: self.vocab = d2l.Vocab(all_premise_tokens + \ all_hypothesis_tokens, min_freq=5, reserved_tokens=['<pad>']) else: self.vocab = vocab self.premises = self._pad(all_premise_tokens) self.hypotheses = self._pad(all_hypothesis_tokens) self.labels = np.array(dataset[2]) print('read ' + str(len(self.premises)) + ' examples') def _pad(self, lines): return np.array([d2l.truncate_pad( self.vocab[line], self.num_steps, self.vocab['<pad>']) for line in lines]) def __getitem__(self, idx): return (self.premises[idx], self.hypotheses[idx]), self.labels[idx] def __len__(self): return len(self.premises) # + [markdown] origin_pos=12 # ### [**ๆ•ดๅˆไปฃ็ **] # # ็Žฐๅœจ๏ผŒๆˆ‘ไปฌๅฏไปฅ่ฐƒ็”จ`read_snli`ๅ‡ฝๆ•ฐๅ’Œ`SNLIDataset`็ฑปๆฅไธ‹่ฝฝSNLIๆ•ฐๆฎ้›†๏ผŒๅนถ่ฟ”ๅ›ž่ฎญ็ปƒ้›†ๅ’Œๆต‹่ฏ•้›†็š„`DataLoader`ๅฎžไพ‹๏ผŒไปฅๅŠ่ฎญ็ปƒ้›†็š„่ฏ่กจใ€‚ๅ€ผๅพ—ๆณจๆ„็š„ๆ˜ฏ๏ผŒๆˆ‘ไปฌๅฟ…้กปไฝฟ็”จไปŽ่ฎญ็ปƒ้›†ๆž„้€ ็š„่ฏ่กจไฝœไธบๆต‹่ฏ•้›†็š„่ฏ่กจใ€‚ๅ› ๆญค๏ผŒๅœจ่ฎญ็ปƒ้›†ไธญ่ฎญ็ปƒ็š„ๆจกๅž‹ๅฐ†ไธ็Ÿฅ้“ๆฅ่‡ชๆต‹่ฏ•้›†็š„ไปปไฝ•ๆ–ฐ่ฏๅ…ƒใ€‚ # # + origin_pos=13 tab=["mxnet"] #@save def load_data_snli(batch_size, num_steps=50): """ไธ‹่ฝฝSNLIๆ•ฐๆฎ้›†ๅนถ่ฟ”ๅ›žๆ•ฐๆฎ่ฟญไปฃๅ™จๅ’Œ่ฏ่กจ""" num_workers = d2l.get_dataloader_workers() data_dir = d2l.download_extract('SNLI') train_data = read_snli(data_dir, True) test_data = read_snli(data_dir, False) train_set = SNLIDataset(train_data, num_steps) test_set = SNLIDataset(test_data, num_steps, train_set.vocab) train_iter = gluon.data.DataLoader(train_set, batch_size, shuffle=True, num_workers=num_workers) test_iter = gluon.data.DataLoader(test_set, batch_size, shuffle=False, num_workers=num_workers) return train_iter, test_iter, train_set.vocab # + [markdown] origin_pos=15 # ๅœจ่ฟ™้‡Œ๏ผŒๆˆ‘ไปฌๅฐ†ๆ‰น้‡ๅคงๅฐ่ฎพ็ฝฎไธบ128ๆ—ถ๏ผŒๅฐ†ๅบๅˆ—้•ฟๅบฆ่ฎพ็ฝฎไธบ50๏ผŒๅนถ่ฐƒ็”จ`load_data_snli`ๅ‡ฝๆ•ฐๆฅ่Žทๅ–ๆ•ฐๆฎ่ฟญไปฃๅ™จๅ’Œ่ฏ่กจใ€‚็„ถๅŽๆˆ‘ไปฌๆ‰“ๅฐ่ฏ่กจๅคงๅฐใ€‚ # # + origin_pos=16 tab=["mxnet"] train_iter, test_iter, vocab = load_data_snli(128, 50) len(vocab) # + [markdown] origin_pos=17 # ็Žฐๅœจๆˆ‘ไปฌๆ‰“ๅฐ็ฌฌไธ€ไธชๅฐๆ‰น้‡็š„ๅฝข็Šถใ€‚ไธŽๆƒ…ๆ„Ÿๅˆ†ๆž็›ธๅ๏ผŒๆˆ‘ไปฌๆœ‰ๅˆ†ๅˆซไปฃ่กจๅ‰ๆๅ’Œๅ‡่ฎพ็š„ไธคไธช่พ“ๅ…ฅ`X[0]`ๅ’Œ`X[1]`ใ€‚ # # + origin_pos=18 tab=["mxnet"] for X, Y in train_iter: print(X[0].shape) print(X[1].shape) print(Y.shape) break # + [markdown] origin_pos=19 # ## ๅฐ็ป“ # # * ่‡ช็„ถ่ฏญ่จ€ๆŽจๆ–ญ็ ”็ฉถโ€œๅ‡่ฎพโ€ๆ˜ฏๅฆๅฏไปฅไปŽโ€œๅ‰ๆโ€ๆŽจๆ–ญๅ‡บๆฅ๏ผŒๅ…ถไธญไธค่€…้ƒฝๆ˜ฏๆ–‡ๆœฌๅบๅˆ—ใ€‚ # * ๅœจ่‡ช็„ถ่ฏญ่จ€ๆŽจๆ–ญไธญ๏ผŒๅ‰ๆๅ’Œๅ‡่ฎพไน‹้—ด็š„ๅ…ณ็ณปๅŒ…ๆ‹ฌ่•ดๆถตๅ…ณ็ณปใ€็Ÿ›็›พๅ…ณ็ณปๅ’Œไธญๆ€งๅ…ณ็ณปใ€‚ # * ๆ–ฏๅฆ็ฆ่‡ช็„ถ่ฏญ่จ€ๆŽจๆ–ญ๏ผˆSNLI๏ผ‰่ฏญๆ–™ๅบ“ๆ˜ฏไธ€ไธชๆฏ”่พƒๆต่กŒ็š„่‡ช็„ถ่ฏญ่จ€ๆŽจๆ–ญๅŸบๅ‡†ๆ•ฐๆฎ้›†ใ€‚ # # ## ็ปƒไน  # # 1. ๆœบๅ™จ็ฟป่ฏ‘้•ฟๆœŸไปฅๆฅไธ€็›ดๆ˜ฏๅŸบไบŽ็ฟป่ฏ‘่พ“ๅ‡บๅ’Œ็ฟป่ฏ‘็œŸๅฎžๅ€ผไน‹้—ด็š„่กจ้ข$n$ๅ…ƒ่ฏญๆณ•ๅŒน้…ๆฅ่ฟ›่กŒ่ฏ„ไผฐ็š„ใ€‚ไฝ ่ƒฝ่ฎพ่ฎกไธ€็ง็”จ่‡ช็„ถ่ฏญ่จ€ๆŽจๆ–ญๆฅ่ฏ„ไปทๆœบๅ™จ็ฟป่ฏ‘็ป“ๆžœ็š„ๆ–นๆณ•ๅ—๏ผŸ # 1. ๆˆ‘ไปฌๅฆ‚ไฝ•ๆ›ดๆ”น่ถ…ๅ‚ๆ•ฐไปฅๅ‡ๅฐ่ฏ่กจๅคงๅฐ๏ผŸ # # + [markdown] origin_pos=20 tab=["mxnet"] # [Discussions](https://discuss.d2l.ai/t/5721) #
submodules/resource/d2l-zh/mxnet/chapter_natural-language-processing-applications/natural-language-inference-and-dataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Write a Python program to find urls in a string. # # Input # '<p>Contents :</p><a href="https://www.w3schools.com/html/">Python Examples</a><a href="http://github.com">Even More Examples</a>' # # Output # Urls: ['https://w3resource.com', 'http://github.com'] import re text = '<p>Contents :</p><a href="https://w3resource.com">Python Examples</a><a href="http://github.com">Even More Examples</a>' urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', text) print("Original string: ",text) print("Urls: ",urls)
regex/find_url_solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # In the first chart of the first example, you can see that while one column appears as yellow, the rest of the heatmap appears as green. This column **absorbs all the color variations**. To avoid this, you can **normalize** the data frame. You can normalize on columns or on rows. Several formula can be used, read [this page](https://en.wikipedia.org/wiki/Normalization_(statistics)) to find the one you need. # ## Column normalization # You can compare the charts below in order to see the difference between the initial data frame and the normalized version of it. # + # libraries import seaborn as sns import matplotlib.pyplot as plt import pandas as pd import numpy as np # Create a dataframe where the average value of the second column is higher than others: df = pd.DataFrame(np.random.randn(10,10) * 4 + 3) df[1]=df[1]+40 # If we do a heatmap, we just observe that one column has higher values than others: sns.heatmap(df, cmap='viridis') plt.show() # Now if we normalize it by column: df_norm_col=(df-df.mean())/df.std() sns.heatmap(df_norm_col, cmap='viridis') plt.show() # - # ## Row normalization # The same principle works for row normalization. # + # libraries import seaborn as sns import matplotlib.pyplot as plt import pandas as pd import numpy as np # Create a dataframe where the average value of the second row is higher df = pd.DataFrame(np.random.randn(10,10) * 4 + 3) df.iloc[2]=df.iloc[2]+40 # If we do a heatmap, we just observe that one row has higher values than others: sns.heatmap(df, cmap='viridis') plt.show() # Normalize it by row: df_norm_row = df.apply(lambda x: (x-x.mean())/x.std(), axis = 1) # And see the result sns.heatmap(df_norm_row, cmap='viridis') plt.show()
src/notebooks/94-use-normalization-on-seaborn-heatmap.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] tags=[] # # MATH665 # # _<NAME> and <NAME>_ # + id="CvYHSX8XfUke" import numpy as np import matplotlib.pyplot as plt import pandas as pd from sklearn.datasets import load_digits rng = np.random.default_rng(42) # + id="rVOrk6RHglC6" # Data x1 = np.array([0.1, 0.3, 0.1, 0.6, 0.4, 0.6, 0.5, 0.9, 0.4, 0.7]) x2 = np.array([0.1, 0.4, 0.5, 0.9, 0.2, 0.3, 0.6, 0.2, 0.4, 0.6]) y = np.stack( [ np.concatenate([np.ones(5), np.zeros(5)]), np.concatenate([np.zeros(5), np.ones(5)]) ] ) # + colab={"base_uri": "https://localhost:8080/"} id="_SmRwsxzutOF" outputId="f0030397-ae5f-4383-e80a-98fe5de7f2a6" print(rng) # Initialize weights and biases W2 = 0.5 * rng.normal(size=(2, 2)) W3 = 0.5 * rng.normal(size=(3, 2)) W4 = 0.5 * rng.normal(size=(2, 3)) b2 = 0.5 * rng.normal(size=(2, 1)) b3 = 0.5 * rng.normal(size=(3, 1)) b4 = 0.5 * rng.normal(size=(2, 1)) # + id="JslQ8vT1utzf" # Forward and Back propagate eta = 0.05 # learning rate Niter = int(1e2) # number of SG iterations savecost = np.zeros(Niter) # value of cost function at each iteration # + id="IvjuGQG36-6T" def activate(x, W, b): """ Evaluates sigmoid function. x is the input vector, y is the output vector W contains the weights, b contains the shifts The ith component of y is activate((Wx+b)_i) where activate(z) = 1/(1+exp(-z)) """ return 1 / (1 + np.exp(-(W @ x + b))) def cost(x1, x2, y, W2, W3, W4, b2, b3, b4): n = len(x1) costvec = np.zeros(n) for i in range(n): x = np.array([[x1[i]], [x2[i]]]) a2 = activate(x, W2, b2) a3 = activate(a2, W3, b3) a4 = activate(a3, W4, b4) costvec[i] = np.linalg.norm(y[:, [i]] - a4, 2) costval = np.linalg.norm(costvec, 2) ** 2 return costval # + colab={"base_uri": "https://localhost:8080/"} id="ji2WLCL2vGgz" outputId="d9e9a6eb-a239-4af9-f8e7-45fde7429202" tags=[] n = len(x1) for counter in range(Niter): k = rng.integers(n) # choose a training point at random x = np.array([[x1[k]], [x2[k]]]) # Forward pass a2 = activate(x, W2, b2) a3 = activate(a2, W3, b3) a4 = activate(a3, W4, b4) # Backward pass delta4 = a4 * (1 - a4) * (a4 - y[:, [k]]) delta3 = a3 * (1 - a3) * (W4.T @ delta4) delta2 = a2 * (1 - a2) * (W3.T @ delta3) # Gradient step W2 = W2 - eta * delta2 @ x.T W3 = W3 - eta * delta3 @ a2.T W4 = W4 - eta * delta4 @ a3.T b2 = b2 - eta * delta2 b3 = b3 - eta * delta3 b4 = b4 - eta * delta4 # Monitor progress newcost = cost(x1, x2, y, W2, W3, W4, b2, b3, b4) print(f"{counter + 1}-th iteration costs: {newcost}") # display cost to screen # savecost(counter) = newcost; # - # ## New activation function # + id="IvjuGQG36-6T" def activate(x, W, b): """ Evaluates TANH function. x is the input vector, y is the output vector W contains the weights, b contains the shifts """ ep = np.exp(W @ x + b) en = np.exp(-(W @ x + b)) return (ep - en) / (ep + en) def cost(x1, x2, y, W2, W3, W4, b2, b3, b4): n = len(x1) costvec = np.zeros(n) for i in range(n): x = np.array([[x1[i]], [x2[i]]]) a2 = activate(x, W2, b2) a3 = activate(a2, W3, b3) a4 = activate(a3, W4, b4) costvec[i] = np.linalg.norm(y[:, [i]] - a4, 2) costval = np.linalg.norm(costvec, 2) ** 2 return costval # + colab={"base_uri": "https://localhost:8080/"} id="ji2WLCL2vGgz" outputId="d9e9a6eb-a239-4af9-f8e7-45fde7429202" tags=[] n = len(x1) for counter in range(Niter): k = rng.integers(n) # choose a training point at random x = np.array([[x1[k]], [x2[k]]]) # Forward pass a2 = activate(x, W2, b2) a3 = activate(a2, W3, b3) a4 = activate(a3, W4, b4) # Backward pass delta4 = a4 * (1 - a4) * (a4 - y[:, [k]]) delta3 = a3 * (1 - a3) * (W4.T @ delta4) delta2 = a2 * (1 - a2) * (W3.T @ delta3) # Gradient step W2 = W2 - eta * delta2 @ x.T W3 = W3 - eta * delta3 @ a2.T W4 = W4 - eta * delta4 @ a3.T b2 = b2 - eta * delta2 b3 = b3 - eta * delta3 b4 = b4 - eta * delta4 # Monitor progress newcost = cost(x1, x2, y, W2, W3, W4, b2, b3, b4) print(f"{counter + 1}-th iteration costs: {newcost}") # display cost to screen # savecost(counter) = newcost; # - # ## New Dataset # + colab={"base_uri": "https://localhost:8080/"} id="bou4_jqPfaHg" outputId="65632841-1d33-4895-aec9-d2482c955aaf" digits = load_digits() print(digits.data.shape) # + colab={"base_uri": "https://localhost:8080/", "height": 293} id="ekGJlwdbgj4f" outputId="8d94463b-a8bd-467d-8efc-7bc9d1e64a22" plt.gray() plt.matshow(digits.images[0]) plt.show() # - X = digits.data.T y = pd.get_dummies(digits.target).values.T # + colab={"base_uri": "https://localhost:8080/"} id="_SmRwsxzutOF" outputId="f0030397-ae5f-4383-e80a-98fe5de7f2a6" print(rng) # Initialize weights and biases W2 = 0.5 * rng.normal(size=(2, X.shape[0])) W3 = 0.5 * rng.normal(size=(3, 2)) W4 = 0.5 * rng.normal(size=(y.shape[0], 3)) b2 = 0.5 * rng.normal(size=(2, 1)) b3 = 0.5 * rng.normal(size=(3, 1)) b4 = 0.5 * rng.normal(size=(y.shape[0], 1)) # + id="JslQ8vT1utzf" # Forward and Back propagate eta = 0.05 # learning rate Niter = int(1e2) # number of SG iterations savecost = np.zeros(Niter) # value of cost function at each iteration # + id="IvjuGQG36-6T" def activate(x, W, b): """ Evaluates sigmoid function. x is the input vector, y is the output vector W contains the weights, b contains the shifts The ith component of y is activate((Wx+b)_i) where activate(z) = 1/(1+exp(-z)) """ return 1 / (1 + np.exp(-(W @ x + b))) def cost(X, y, W2, W3, W4, b2, b3, b4): n = X.shape[0] costvec = np.zeros(n) for i in range(n): x = X[:, [i]] a2 = activate(x, W2, b2) a3 = activate(a2, W3, b3) a4 = activate(a3, W4, b4) costvec[i] = np.linalg.norm(y[:, [i]] - a4, 2) costval = np.linalg.norm(costvec, 2) ** 2 return costval # + colab={"base_uri": "https://localhost:8080/"} id="ji2WLCL2vGgz" outputId="d9e9a6eb-a239-4af9-f8e7-45fde7429202" tags=[] n = X.shape[0] for counter in range(Niter): k = rng.integers(n) # choose a training point at random x = X[:, [k]] # Forward pass a2 = activate(x, W2, b2) a3 = activate(a2, W3, b3) a4 = activate(a3, W4, b4) # Backward pass delta4 = a4 * (1 - a4) * (a4 - y[:, [k]]) delta3 = a3 * (1 - a3) * (W4.T @ delta4) delta2 = a2 * (1 - a2) * (W3.T @ delta3) # Gradient step W2 = W2 - eta * delta2 @ x.T W3 = W3 - eta * delta3 @ a2.T W4 = W4 - eta * delta4 @ a3.T b2 = b2 - eta * delta2 b3 = b3 - eta * delta3 b4 = b4 - eta * delta4 # Monitor progress newcost = cost(X, y, W2, W3, W4, b2, b3, b4) print(f"{counter + 1}-th iteration costs: {newcost}") # display cost to screen # savecost(counter) = newcost;
assignments/MATH665 Final Project.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Compare oil type volumes by vessel # + import pandas import numpy as np import matplotlib.pyplot as plt import yaml from pathlib import Path # import functions for querying DOE and monte-carlo dataframes from monte_carlo_utils import get_montecarlo_oil_byfac, get_montecarlo_oil from monte_carlo_utils import get_oil_classification, get_DOE_df from monte_carlo_utils import get_DOE_quantity_byfac, get_DOE_quantity # + #~~~~~ User inputs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Dept. of Ecology data files DOE_dir = Path('/Users/rmueller/Data/MIDOSS/DeptOfEcology/') DOE_2018_xlsx = DOE_dir/'MuellerTrans4-30-20.xlsx' DOE_2021_xlsx = DOE_dir/'MuellerTrans_5-26-21.xlsx' # Facility names and lat/lon information file facilities_xlsx = Path( '/Users/rmueller/Data/MIDOSS/marine_transport_data/' 'Oil_Transfer_Facilities.xlsx' ) # 10,000 monte carlo spills monte_carlo_csv = Path( '/Users/rmueller/Data/MIDOSS/monte_carlo/' 'SalishSeaOilSpills_fixbarge_10000_1.csv' ) # Oil Attribution file oil_attribution_file = Path( '/Users/rmueller/Data/MIDOSS/marine_transport_data/' 'oil_attribution.yaml' ) # location of output excel spreadsheets output_directory = Path( '/Users/rmueller/Data/MIDOSS/DeptOfEcology/' ) graphics_directory = Path( '/Users/rmueller/Projects/MIDOSS/graphics_figures/monte_carlo' ) # location of output .tex documents for writing tables to file tex_dir = Path( '/Users/rmueller/Library/Mobile Documents/com~apple~CloudDocs/' 'Documents/Publications/MIDOSS/MIDOSS_MuellerEtAl_paper1/Methods/' 'notes/python_generated_tables') data_types = ['total_gallons', 'fraction_of_total', 'number_of_transfers'] # oil types used in our study oil_types = [ 'ANS','Bunker-C','Diesel','Gasoline','Jet Fuel', 'Dilbit', 'Other' ] oil_colors = [ 'orange', 'saddlebrown','darkslateblue', 'steelblue','slateblue','olive', 'darkgoldenrod' ] # create a color dictionary for oil types to us in pie charts colordict={} for l,c in zip(oil_types,oil_colors): colordict[l]=c # The precision used to calculate oil type weights # Trial and error showed that a precision of 2 is neccessary for weights # to sum to 1.0 precision = 2 # unit conversions gal2m3 = 0.00378541 # - # ### Load data files # + # Oil Attribution File with open(oil_attribution_file) as file: oil_attrs = yaml.load(file, Loader=yaml.Loader) # Facility information facdf = pandas.read_excel( facilities_xlsx, sheet_name = 'Washington' ) # New method converts DOE facility names to monte-carlo facility names # in `get_DOE_df` function of `monte_carlo_utils.py`, so I now use the # monte_carlo names uniformly to query both DOE and monte-carlo facilities facility_names = oil_attrs['categories']['US_origin_destination'] # - # ### Check to make sure that I got the DOE facility names right facdf.head(1) # load DOE data such that the three terminals that are grouped in our # origin-destination analysis are re-named to the terminal that they # are grouped with in our analysis. df = get_DOE_df(DOE_2018_xlsx, facilities_xlsx, group = 'yes') # sort by deliverer and receiver df_del = df.loc[ df.DelivererTypeDescription == 'Facility', ['Deliverer','TransferQtyInGallon'] ].groupby('Deliverer').sum().sort_values(by='TransferQtyInGallon', ascending=False) df_del.reset_index(inplace=True) df_del = df_del.rename(columns={'Deliverer':'Facility_names'}) df_rec = df.loc[ df.ReceiverTypeDescription == 'Facility', ['Receiver','TransferQtyInGallon'] ].groupby('Receiver').sum().sort_values(by='TransferQtyInGallon', ascending=False) df_rec.reset_index(inplace=True) df_rec = df_rec.rename(columns={'Receiver':'Facility_names'}) # + doe_del_facs = df_del['Facility_names'].to_list() doe_rec_facs = df_rec['Facility_names'].to_list() print(f'List of {len(doe_del_facs)} facilities that deliver cargo but do not recieve cargo') print('--------------------------------------------------------------') length=0 for facility in doe_del_facs: if facility not in doe_rec_facs: print(facility) length +=1 print('') print(f'List of {len(doe_rec_facs)} facilities that receive cargo but do not deliver cargo') print('--------------------------------------------------------------') for facility in doe_rec_facs: if facility not in doe_del_facs: print(facility) length+=1 # + tags=[] # merge the two lists all_facilities = pandas.merge( left=pandas.DataFrame(doe_del_facs).rename(columns={0:'Facility_names'}), right=pandas.DataFrame(doe_rec_facs).rename(columns={0:'Facility_names'}), how='outer', on='Facility_names' ) all_facilities = pandas.merge( left = all_facilities, right = df_del, how='left', on = 'Facility_names' ) all_facilities = pandas.merge( left = all_facilities, right = df_rec, how='left', on = 'Facility_names' ).fillna(0) all_facilities = all_facilities.rename( columns={ 'TransferQtyInGallon_x':'TransferOutGallons', 'TransferQtyInGallon_y':'TransferInGallons'} ) all_facilities['TransferTotalGallons'] = ( all_facilities['TransferOutGallons'] + \ all_facilities['TransferInGallons'] ) all_facilities['TransferPercent'] = ( 100 * all_facilities['TransferTotalGallons']/\ all_facilities['TransferTotalGallons'].sum() ) all_facilities.loc[ all_facilities['Facility_names'].isin(facility_names), 'In Monte Carlo?'] = 'yes' all_facilities.loc[ ~all_facilities['Facility_names'].isin(facility_names), 'In Monte Carlo?'] = '--' percent_represented = all_facilities.loc[ all_facilities['In Monte Carlo?'] == 'yes', ['TransferPercent'] ].sum() print(f'{percent_represented.item():.2f}% of WA oil cargo transfers' ' occurs at the marine terminals represented in our study' ) all_facilities.to_latex(buf=tex_dir/'monte_carlo.tex',float_format="%.2e",index=False) # all_facilities.to_latex(buf=tex_dir/'monte_carlo.tex', # formatters={0:'s', # 1:'.2e', # 2:'.2e', # 3:'.2e', # 4:'3.0f', # 5:'s'}, # index=False # ) all_facilities.sort_values(by='TransferTotalGallons', ascending=False) # + missing_facilities=all_facilities.loc[ ~all_facilities['Facility_names'].isin(facility_names) ].sort_values(by='TransferPercent', ascending=False) missing_facilities.head() # - # ### Get DOE and monte-carlo attributions (both facility transfers and all transfers) # + #-------------------------------------------------------------------------------- # Sum DOE oil transfers to/from facilities by oil and vessel types #-------------------------------------------------------------------------------- print('Getting DOE volume transfers by Salish Sea facilities') exports, imports, combined = get_DOE_quantity_byfac( DOE_2018_xlsx, facilities_xlsx, facilities='selected' ) #-------------------------------------------------------------------------------- # Sum all DOE oil transfers by oil and vessel types #-------------------------------------------------------------------------------- print('Getting all DOE volume transfers for WA') exports_all, imports_all, combined_all = get_DOE_quantity(DOE_2018_xlsx, facilities_xlsx) #-------------------------------------------------------------------------------- # Sum monte-carlo tank capacities in spills file by vessel and oil types # to estimate oil type traffic based only on transfers to/from marine facilities #-------------------------------------------------------------------------------- mc_export = {} mc_import = {} mc_allUS = {} for idx,vessel in enumerate(["tanker","atb","barge"]): # calculate total cargo_capacity by vessel type and oil type print(f'Getting monte-carlo {vessel} exports/imports to/from WA marine terminals') mc_export[vessel], mc_import[vessel] = get_montecarlo_oil_byfac( vessel, monte_carlo_csv ) # calculate total cargo_capacity by vessel type and oil type print(f'Getting monte-carlo {vessel} representation of US oil transport') mc_allUS[vessel] = get_montecarlo_oil( vessel, monte_carlo_csv ) # Add entries for oil types that may not be in monte-carlo file # so that the DOE and monte-carlo information in the same format # I intend to eventually put this will go into monte_carlo_utils.py # script if using a key-value pair for both information sources # will ensure that I don't make an ordering mis-match mistake for vessel in ["tanker","atb","barge"]: mc_export[vessel] = dict(mc_export[vessel]) mc_allUS[vessel] = dict(mc_allUS[vessel]) # Add oil types missing in US traffic # e.g. Dilbit is missing in tanker, ANS is missing in ATBs for oil in oil_types: if oil not in mc_export[vessel].keys(): mc_export[vessel][oil] = 0.0 mc_allUS[vessel][oil] = 0.0 # - # ### Check: Is there dilbit in monte-carlo mcdf = pandas.read_csv(monte_carlo_csv) mcdf.groupby( 'Lagrangian_template' ).cargo_capacity.sum() ### Dilbit isn't showing up in terminal transfer plots (below). ID source(s) dilbit = mcdf.loc[ (mcdf.Lagrangian_template == 'Lagrangian_dilbit.dat'), ['vessel_origin','vessel_dest'] ] dilbit.shape dilbit # ### Create: Dictionary for percentage values and comparison percent_oil_df = {} percentages = {} for idx,vessel in enumerate(["tanker", "atb", "barge"]): percentages = {'DOE': [], 'monte-carlo': [], 'DOE_minus_monte-carlo': []} percentages['DOE'] = [ 100*exports[vessel][oil]/sum(exports[vessel].values()) for oil in exports[vessel].keys() ] percentages['monte-carlo'] = [ 100*mc_export[vessel][oil]/sum(mc_export[vessel].values()) for oil in exports[vessel].keys() ] percentages['DOE_minus_monte-carlo'] = [ percentages['DOE'][idx] - percentages['monte-carlo'][idx] for idx in range(len(percentages['DOE'])) ] percent_oil_df[vessel] = pandas.DataFrame( data=percentages, index=exports[vessel].keys() ).rename_axis(index=f'{vessel} export') def calc_percent_difference(DOE_dict, MC_dict): """ Inputs: dictionaries created by get_montecarlo_oil_byfac, get_montecarlo_oil, get_DOE_quantity, get_DOE_quantity_byfac Outputs: Dictionary organized by vessel type with columns corresponding to oil_type percentages for DOE, monte-carlo csv and difference """ percent_oil_df = {} percentages = {} for idx,vessel in enumerate(["tanker", "atb", "barge"]): percentages = { 'DOE': [], 'monte-carlo': [], 'DOE_minus_monte-carlo': [] } percentages['DOE'] = [ 100*DOE_dict[vessel][oil]/sum(DOE_dict[vessel].values()) for oil in DOE_dict[vessel].keys() ] # note: I'm using the DOE dictionary to loop through oil types # so that the order of the output lists are identical percentages['monte-carlo'] = [ 100*MC_dict[vessel][oil]/\ sum(MC_dict[vessel].values()) for oil in DOE_dict[vessel].keys() ] percentages['DOE_minus_monte-carlo'] = [ percentages['DOE'][idx] - \ percentages['monte-carlo'][idx] \ for idx in range(len(percentages['DOE'])) ] percent_oil_df[vessel] = pandas.DataFrame( data=percentages, index=exports[vessel].keys() ).rename_axis(index=f'{vessel} export') return percent_oil_df # --- # ## Plot monte carlo and DOE representation of oil exports by vessel types # --- #-------------------------------------------------------------------------------- # Plot monte-carlo representation of oil export #-------------------------------------------------------------------------------- # Get cargo exports by vessel type and add up cargo_capacities by oil type fig, axes = plt.subplots(1, 3, figsize = (15,5)) for idx,vessel in enumerate(["tanker","atb","barge"]): # add central title axes[idx].axis('equal') if idx==1: axes[idx].set_title( ('Monte Carlo exports FROM TERMINALS according to \n' 'tank capacities and by oil type'), fontsize=18 ) # plot up results pie_wedge_collection = axes[idx].pie( mc_export[vessel].values(), labels = mc_export[vessel].keys(), wedgeprops=dict(width=0.5), textprops={'fontsize': 14} ) # make colors uniform across subplots for pie_wedge in pie_wedge_collection[0]: pie_wedge.set_edgecolor('white') pie_wedge.set_facecolor(colordict[pie_wedge.get_label()]) axes[idx].axis('off') axes[idx].text(0,0,vessel,ha='center',fontsize=18) plt.savefig(graphics_directory/'monte_carlo_oil_exports_v2') #-------------------------------------------------------------------------------- ## Plot department of ecology volume transfers from marine terminals by oil types #-------------------------------------------------------------------------------- fig, axes = plt.subplots(1, 3, figsize = (15,5)) for idx,vessel in enumerate(["tanker","atb","barge",]): # add central title axes[idx].axis('equal') if idx==1: axes[idx].set_title( ('DOE exports FROM TERMINALS according to \n' 'total gallons transferred to cargo vessels by oil type'), fontsize=18 ) # plot up results pie_wedge_collection = axes[idx].pie( exports[vessel].values(), # commented out version # labels = [ # f'{oil}({100*exports["atb"][oil]/sum(exports["atb"].values()):0.1f}%)' for oil in exports['atb'].keys() # ], labels = exports[vessel].keys(), wedgeprops=dict(width=0.5), textprops={'fontsize': 14} ) # make colors uniform across subplots for pie_wedge in pie_wedge_collection[0]: pie_wedge.set_edgecolor('white') pie_wedge.set_facecolor(colordict[pie_wedge.get_label()]) # commented out version parses label to get oil name for color dictionary #pie_wedge.set_facecolor(colordict[pie_wedge.get_label().split('(')[0]]) axes[idx].axis('off') axes[idx].text(0,0,vessel,ha='center',fontsize=18) plt.savefig(graphics_directory/'DOE_oil_exports_byterminals.png') # ### Print: Percentages relating to differences in exports from marine terminals # (correspongind to above graphic) percent_test = calc_percent_difference(exports, mc_export) percent_test # ## Take aways: # - We do tanker exports reasonably well. :-) # - We do jet fuel reasonably well for all vessel # - We tend to get Bunker-C right for tanker and barges # - We show Bunker as the dominant ATB export where DOE shows gasoline (We under attribute gasoline and over-attribute bunker-C in atbs) # - We show less Bunker-C in barge export and more deisel than DOE barge export # # Plausible explanations: # - Our vessel join method is biased in which terminals it captures. # - Barge tugs may be attributed as ATB tugs and visa versa (hence, attributed differently in our attribution than in DOE data) # --- # ## Plot representations of US oil transport according to our monte carlo and DOE oil transfers # --- # + #-------------------------------------------------------------------------------- # Plot monte-carlo representation of US oil transport #-------------------------------------------------------------------------------- fig, axes = plt.subplots(1, 3, figsize = (15,5)) for idx,vessel in enumerate(["tanker","atb","barge"]): # add central title axes[idx].axis('equal') if idx==1: axes[idx].set_title( ('Monte carlo representation of US oil cargo transport \n' 'according to tank capacities, vessel type and oil types'), fontsize=18 ) # plot up results pie_wedge_collection = axes[idx].pie( mc_allUS[vessel].values(), labels = mc_allUS[vessel].keys(), wedgeprops=dict(width=0.5), textprops={'fontsize': 14} ) # make colors uniform across subplots for pie_wedge in pie_wedge_collection[0]: pie_wedge.set_edgecolor('white') pie_wedge.set_facecolor(colordict[pie_wedge.get_label()]) axes[idx].axis('off') axes[idx].text(0,0,vessel,ha='center',fontsize=18) plt.savefig(graphics_directory/'monte_carlo_USoil') #-------------------------------------------------------------------------------- ## Plot department of ecology volume transfers from marine terminals by oil types #-------------------------------------------------------------------------------- fig, axes = plt.subplots(1, 3, figsize = (15,5)) for idx,vessel in enumerate(["tanker","atb","barge",]): # add central title axes[idx].axis('equal') if idx==1: axes[idx].set_title( ('DOE representation of US oil cargo transfers \n' 'according to gallons transferred and sorted by ' 'vessel and oil types'), fontsize=18 ) # plot up results pie_wedge_collection = axes[idx].pie( combined_all[vessel].values(), labels = combined_all[vessel].keys(), wedgeprops=dict(width=0.5), textprops={'fontsize': 14} ) # make colors uniform across subplots for pie_wedge in pie_wedge_collection[0]: pie_wedge.set_edgecolor('white') pie_wedge.set_facecolor(colordict[pie_wedge.get_label()]) axes[idx].axis('off') axes[idx].text(0,0,vessel,ha='center',fontsize=18) plt.savefig(graphics_directory/'DOE_oil.png') # - # ### Print: Percentages relating to US oil cargo transport # (and differences shown in above graphic) percent_test = calc_percent_difference(combined_all, mc_allUS) percent_test # --- # ## Plot DOE representation of oil imports and combined imports/exports for the marine terminals in our study # --- # + fig, axes = plt.subplots(1, 3, figsize = (15,5)) for idx,vessel in enumerate(["tanker","atb","barge",]): # calculate total cargo_capacity by vessel type and oil type net_import = imports[vessel].values() # add central title axes[idx].axis('equal') if idx==1: axes[idx].set_title( ('DOE imports TO TERMINALS according to \n' 'tank capacities and by oil type'), fontsize=18 ) # plot up results pie_wedge_collection = axes[idx].pie( net_import, labels = imports[vessel].keys(), wedgeprops=dict(width=0.5), textprops={'fontsize': 14} ) # make colors uniform across subplots for pie_wedge in pie_wedge_collection[0]: pie_wedge.set_edgecolor('white') pie_wedge.set_facecolor(colordict[pie_wedge.get_label()]) axes[idx].axis('off') axes[idx].text(0,0,vessel,ha='center',fontsize=18) plt.savefig(graphics_directory/'DOE_oil_imports_byterminals') fig, axes = plt.subplots(1, 3, figsize = (15,5)) for idx,vessel in enumerate(["tanker","atb","barge",]): # calculate total cargo_capacity by vessel type and oil type net_combined = combined[vessel].values() # add central title axes[idx].axis('equal') if idx==1: axes[idx].set_title( ('DOE imports & exports combined TO/FROM TERMINALS according to \n' 'transfer quantities and sorted by vessel and oil types'), fontsize=18 ) # plot up results pie_wedge_collection = axes[idx].pie( net_combined, labels = combined[vessel].keys(), wedgeprops=dict(width=0.5), textprops={'fontsize': 14} ) # make colors uniform across subplots for pie_wedge in pie_wedge_collection[0]: pie_wedge.set_edgecolor('white') pie_wedge.set_facecolor(colordict[pie_wedge.get_label()]) axes[idx].axis('off') axes[idx].text(0,0,vessel,ha='center',fontsize=18) plt.savefig(graphics_directory/'DOE_oil_combined_byterminals') # - # ### NEXT: # - Plot up all cargo transfers # - Plot up all cargo oil in monte carlo (both terminal and non-terminal transfrrs) # - Plot up combined imports and exports to terminals #
notebooks/monte_carlo_dev/CompareOilTypeVolumeByVessel.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Imports import os import matplotlib.pyplot as plt from PIL import Image import math # Imports from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Flatten, Conv2D from tensorflow.keras.losses import sparse_categorical_crossentropy from tensorflow.keras.optimizers import Adam from tensorflow.keras.preprocessing.image import ImageDataGenerator import tensorflow as tf print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU'))) # + # tf.debugging.set_log_device_placement(True) # Create some tensors a = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) b = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]) c = tf.matmul(a, b) print(c) # + # Data configuration training_set_folder = './Training_smaller' test_set_folder = './Test_smaller' # Model configuration batch_size = 25 img_width, img_height, img_num_channels = 25, 25, 3 loss_function = sparse_categorical_crossentropy no_classes = 10 no_epochs = 25 optimizer = Adam() verbosity = 1 # + # Determine shape of the data input_shape = (img_width, img_height, img_num_channels) # Create a generator train_datagen = ImageDataGenerator( rescale=1./255 ) train_datagen = train_datagen.flow_from_directory( training_set_folder, save_to_dir='./Data_generated', save_format='jpeg', batch_size=batch_size, target_size=(25, 25), class_mode='sparse') # + # Create the model model = Sequential() model.add(Conv2D(16, kernel_size=(5, 5), activation='relu', input_shape=input_shape)) model.add(Conv2D(32, kernel_size=(5, 5), activation='relu')) model.add(Conv2D(64, kernel_size=(5, 5), activation='relu')) model.add(Conv2D(128, kernel_size=(5, 5), activation='relu')) model.add(Flatten()) model.add(Dense(16, activation='relu')) model.add(Dense(no_classes, activation='softmax')) # Display a model summary model.summary() # + jupyter={"outputs_hidden": true} # Compile the model model.compile(loss=loss_function, optimizer=optimizer, metrics=['accuracy']) # Start training model.fit( train_datagen, epochs=no_epochs, shuffle=False) # + os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = '0' config = tf.ConfigProto() sess = tf.Session(config=config)
4_Fruit_classification/.ipynb_checkpoints/Nhap_fruit_class-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="UEDYQfp6SkIf" # # Lab 4. Text Classification with CNN # # In this lab, we are going to finally put all the previous knowledge into use to train our first neural NLP model. In particular, we are going to read the dataset, preprocess it and train a convolutional neural network using pretrained word vectors as inputs. # # Since we are going to build a deep network and we are going to have 25,000 texts, it's recommended that you run this notebook on a Cuda GPU. If you don't have one at your disposal, you can run this notebook on Google Colab. # # To do it, you just need to visit https://colab.research.google.com/ and upload and run this notebook there. Google Colab will allocate a GPU for you for about twelve hours or until you leave it inactive for some period of time. # # Also, if you are running this notebook on Google Colab, don't forget to go to `Runtime -> Change runtime type` and set `Runtime type` to `Python 3` and `Hardware acceleration` to `GPU`. # # This lab is based on [this tutorial](https://github.com/bentrevett/pytorch-sentiment-analysis/blob/master/4%20-%20Convolutional%20Sentiment%20Analysis.ipynb), so you can always visit it to get more information. However, we made some changes to the data loading part to make it more profound and flexible to use in other models. The original tutorial uses `torchtext` package to load the data. However, it is too high-level and it may be difficult to understand what is going under the hood. Additionaly, we will have more flexibility in adapting our own custom data loader to different tasks and datasets. # # With the setup being dealt with, we can proceed to building our classifier. # # ## Text classification # # Text classification is one of the most popular NLP tasks. It can be used to predict a genre of a document, or establish the authorship of a text. In this lab, we are going to predict a sentiment of a sentence, i.e. try to guess if a text (in our case, a review) is positive or negative. # # Here is the decription of a CNN classifier from the tutorial above: # # > Traditionally, CNNs are used to analyse images and are made up of one or more convolutional layers, followed by one or more linear layers. The convolutional layers use filters (also called kernels or receptive fields) which scan across an image and produce a processed version of the image. This processed version of the image can be fed into another convolutional layer or a linear layer. Each filter has a shape, e.g. a 3x3 filter covers a 3 pixel wide and 3 pixel high area of the image, and each element of the filter has a weight associated with it, the 3x3 filter would have 9 weights. In traditional image processing these weights were specified by hand by engineers, however the main advantage of the convolutional layers in neural networks is that these weights are learned via backpropagation. # # > The intuitive idea behind learning the weights is that your convolutional layers act like feature extractors, extracting parts of the image that are most important for your CNN's goal, e.g. if using a CNN to detect faces in an image, the CNN may be looking for features such as the existance of a nose, mouth or a pair of eyes in the image. # # > So why use CNNs on text? In the same way that a 3x3 filter can look over a patch of an image, a 1x2 filter can look over a 2 sequential words in a piece of text, i.e. a bi-gram. In the previous tutorial we looked at the FastText model which used bi-grams by explicitly adding them to the end of a text, in this CNN model we will instead use multiple filters of different sizes which will look at the bi-grams (a 1x2 filter), tri-grams (a 1x3 filter) and/or n-grams (a 1x$n$ filter) within the text. # # > The intuition here is that the appearance of certain bi-grams, tri-grams and n-grams within the review will be a good indication of the final sentiment. # # ## Data # # To train the classifier, we are going to use the [Large Movie Review Dataset](https://ai.stanford.edu/~amaas/data/sentiment/). It contains 25,000 reviews for training and another 25,000 for testing. Both training and test sets contain 12,500 positive reviews and 12,500 negative reviews. # # In the next steps, we are going to build our own custom dataloader to load, preprocess, split, and batch the data. # + colab_type="code" id="mT16Bz_-LBmg" colab={} import torch from torchtext import data from torchtext import datasets from torch.utils.data import Dataset, DataLoader from torch.utils.data.sampler import SubsetRandomSampler import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import random import numpy as np from pathlib import Path import time # from: https://spacy.io/api/tokenizer from spacy.lang.en import English nlp = English() # Create a Tokenizer with the default settings for English # including punctuation rules and exceptions tokenizer = nlp.Defaults.create_tokenizer(nlp) # Check if we are running on a CPU or GPU device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # + [markdown] colab_type="text" id="9M1EfPLgYwmb" # Run the cell below if you want to save the files and trained models on your Google Drive. # + colab_type="code" id="EBwcjCdvShJ3" colab={} from google.colab import drive drive.mount('/content/drive') # + [markdown] colab_type="text" id="UdftAyx8ZBRu" # Let's download the vector file and unpack in to the `vector_cache/` folder. You can skip this step if you have already done it yourself. # + colab_type="code" id="e7WKqu2yMcXr" outputId="98ef21e4-067e-4e1c-9397-a4251c533ad9" colab={"base_uri": "https://localhost:8080/", "height": 204} # !wget https://dl.fbaipublicfiles.com/fasttext/vectors-english/wiki-news-300d-1M.vec.zip # + colab_type="code" id="QlLJUBe6Mx7z" outputId="a3fb274a-f965-40d1-aaf5-6109256aed8a" colab={"base_uri": "https://localhost:8080/", "height": 51} # !unzip wiki-news-300d-1M.vec.zip -d vector_cache/ # + [markdown] colab_type="text" id="CETGAQ_YZ3mW" # Let's download the dataset and unpack in to the `data/` folder. You can skip this step if you have already done it yourself. # + colab_type="code" id="7cXrxb9WNQfq" outputId="f57d58e6-5dc8-4220-e288-136dd0ffa5fe" colab={"base_uri": "https://localhost:8080/", "height": 204} # !wget https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz # + colab_type="code" id="HIbXCHFHNg1b" colab={} # !mkdir data/ # !tar -xzf aclImdb_v1.tar.gz -C data/ # + [markdown] colab_type="text" id="1OyMQ_KEaMfR" # We are going to define some variables that we are going to need later. # # We will need the `<PAD>` and `<UNK>` symbols. `<PAD>` is needed to make the sentences in one batch have the same length. We are going to prepend this symbol to the end of each sentence to equalize the lengths. `<UNK>` is needed to replace the words for which we don't have a pretrained vector. # # We are also going to define the paths for our vector file and data folder, as well as maximum numer of vectors that we want to store. # + colab_type="code" id="md73ChlvLBnc" colab={} PAD = '<PAD>' PAD_ID = 0 UNK = '<UNK>' UNK_ID = 1 VOCAB_PREFIX = [PAD, UNK] VEC_PATH = Path('vector_cache') / 'wiki-news-300d-1M.vec' DATA_PATH = Path('data') / 'aclImdb' MAX_VOCAB = 25000 batch_size = 64 validation_split = .3 shuffle_dataset = True random_seed = 42 # + [markdown] colab_type="text" id="2yUjLPW1bK-q" # First, let's prepare a vocabulary for our pretrained vectors. Since the input to our model should be an index of a word, we need to build it to map from words to indices. # # Below, we define a `PretrainedWordVocab` class that is going to take a list of words and build a vocab based on it. We also define some methods that we are going to use: # # - `normalize_unit()` to put the word to lowercase if `lower` argument is set to `True`. # - `unit2id()` to return the index of a word in the vocab or an `<UNK>` index otherwise. # - `id2unit()` to return a word given its index in the vocab. # - `map()` to return a list of indeces given a list of words. # - `build_vocab()` to initialize the vocab # + colab_type="code" id="KB0tPL5DLBnn" colab={} class PretrainedWordVocab: def __init__(self, data, lower=False): self.data = data self.lower = lower self.build_vocab() def normalize_unit(self, unit): if self.lower: return unit.lower() else: return unit def unit2id(self, unit): unit = self.normalize_unit(unit) if unit in self._unit2id: return self._unit2id[unit] else: return self._unit2id[UNK] def id2unit(self, id): return self._id2unit[id] def map(self, units): return [self.unit2id(unit) for unit in units] def build_vocab(self): self._id2unit = VOCAB_PREFIX + self.data self._unit2id = {w:i for i, w in enumerate(self._id2unit)} def __len__(self): return len(self._unit2id) # + [markdown] colab_type="text" id="maj8w5GEcykb" # Next, we need to create the `Pretrain` class to store the pretrained vectors and vocab that we defined above. The vectors are going to be stored in as a numpy array. # + colab_type="code" id="Ryz6vtNoLBnt" colab={} class Pretrain: def __init__(self, vec_filename, max_vocab=-1): self._vec_filename = vec_filename self._max_vocab = max_vocab @property def vocab(self): if not hasattr(self, '_vocab'): self._vocab, self._emb = self.read() return self._vocab @property def emb(self): if not hasattr(self, '_emb'): self._vocab, self._emb = self.read() return self._emb def read(self): if self._vec_filename is None: raise Exception("Vector file is not provided.") print(f"Reading pretrained vectors from {self._vec_filename}...") words, emb, failed = self.read_from_file(self._vec_filename, open_func=open) if failed > 0: # recover failure emb = emb[:-failed] if len(emb) - len(VOCAB_PREFIX) != len(words): raise Exception("Loaded number of vectors does not match number of words.") # Use a fixed vocab size if self._max_vocab > len(VOCAB_PREFIX) and self._max_vocab < len(words): words = words[:self._max_vocab - len(VOCAB_PREFIX)] emb = emb[:self._max_vocab] vocab = PretrainedWordVocab(words, lower=True) return vocab, emb def read_from_file(self, filename, open_func=open): """ Open a vector file using the provided function and read from it. """ first = True words = [] failed = 0 with open_func(filename, 'rb') as f: for i, line in enumerate(f): try: line = line.decode() except UnicodeDecodeError: failed += 1 continue if first: # the first line contains the number of word vectors and the dimensionality first = False line = line.strip().split(' ') rows, cols = [int(x) for x in line] emb = np.zeros((rows + len(VOCAB_PREFIX), cols), dtype=np.float32) continue line = line.rstrip().split(' ') emb[i+len(VOCAB_PREFIX)-1-failed, :] = [float(x) for x in line[-cols:]] words.append(' '.join(line[:-cols])) return words, emb, failed # + [markdown] colab_type="text" id="3ktSIbcqdKWO" # Finally, we need to define the dataset class `IMDBDataSet` that is going to load and preprocess our data files. Inside the data folder, we have `train` and `test` folders that have `neg` and `pos` folders inside of then. Each of these folders have a review as a separate file. # # We are going to iterate through each file inside these folders, read the text, tokenize it with [Spacy tokenizer](https://spacy.io/api/tokenizer) and replace the words with the indices using the `PretrainedWordVocab` that we created earlier. # # We also need our custom class to inherit from the `torch.utils.data.Dataset` class. Finally, we need to define the `__len__()` method to know how big is our dataset and `__getitem__()` method to get one sample at a given index. # + colab_type="code" id="AHnV7tRJLBnx" colab={} class IMDBDataSet(Dataset): def __init__(self, pretrain, data_folder='.data', test=False): self.pretrain_vocab = pretrain.vocab self.label_vocab = {'neg': 0, 'pos': 1} if test: self.data_folder = data_folder / 'test' else: self.data_folder = data_folder / 'train' self.data = [] if self.data_folder.exists(): self.load() else: raise ValueError("Data path doesn't exist!") def load(self): for label in ['pos', 'neg']: print(f'Reading {label} sentences...') p = self.data_folder / label for fname in p.glob('*.txt'): with open(fname, 'r', encoding='utf-8') as f: text = [token.text for token in tokenizer(f.readline())] self.data.append( (self.pretrain_vocab.map(text), self.label_vocab[label]) ) def __len__(self): return len(self.data) def __getitem__(self, idx): return self.data[idx] # + [markdown] colab_type="text" id="QBTTvrcgewAE" # Additionally, we need to define a funciton to pad all the sentences in the batch to the same length. To do this, we are going to first find the longest sequence in the batch and use its length to create a torch tensor of size `(batch_size, max_len)` filled with `0` that is our padding id. Later, we are just going to append each sequence in the beginning of the corresponding row of our new batch tensor. Don't forget that `nn.Embedding` layer that we are going to use later requires indices to be of type `long`. We are also going to put the labels, with `0` corresponding to `negative` and `1` to `positive` to the `labels` tensor of length `batch_size`. To be able to use them to calculate the loss, each label must be of type `float`. # # Finally, don't forget to convert all the tensors to the current device with `.to(device)`. # + colab_type="code" id="xe34jVx2LBn0" colab={} def pad_sequences(batch): max_len = max([len(x[0]) for x in batch]) padded_sequences = torch.zeros((len(batch), max_len), dtype=torch.long) labels = torch.zeros(len(batch), dtype=torch.float) for i, sample in enumerate(batch): padded_sequences[i, :len(sample[0])] = torch.LongTensor(sample[0]) labels[i] = sample[1] padded_sequences = padded_sequences.to(device) labels = labels.to(device) return padded_sequences, labels # + [markdown] colab_type="text" id="Z-O_HtNqgQfA" # Now, we can finally load our data and pretrained vectors. It will take some time... # + colab_type="code" id="rRPKP4YfLBn8" colab={} pretrain = Pretrain(VEC_PATH, MAX_VOCAB) # + colab_type="code" id="eCMyYcj1LBoC" outputId="894e2c90-a393-4850-936a-0b30bea10351" colab={"base_uri": "https://localhost:8080/", "height": 68} train_data = IMDBDataSet(pretrain, DATA_PATH) # + colab_type="code" id="U674r0iXLBoJ" outputId="cf0b74d3-945d-4851-d704-5846b0ca4922" colab={"base_uri": "https://localhost:8080/", "height": 51} test_data = IMDBDataSet(pretrain, DATA_PATH, test=True) # + [markdown] colab_type="text" id="CTiKgSxdhMkO" # The last step in our data preparation is to define the train and validation splits. We are going to use the validation set to see how the model performs during the training. It is important to be able to see if the model is overfitting or not. # # To do that, we will just create a range of indices from `0` to the size of the training data. Then, we are going to define an index on which we are going to splite the data. Optionally, we can shuffle our indices before splitting. # # With these indices for train and validation datasets, we are going to create two corresponding `torch.utils.data.SubsetRandomSampler` objects that we are going to pass to the `torch.utils.data.DataLoader` objects in the next step. # + colab_type="code" id="F_6Y55v9LBn4" colab={} # Creating data indices for training and validation splits: dataset_size = len(train_data) indices = list(range(dataset_size)) split = int(np.floor(validation_split * dataset_size)) if shuffle_dataset: np.random.seed(random_seed) np.random.shuffle(indices) train_indices, val_indices = indices[split:], indices[:split] # Creating PT data samplers and loaders: train_sampler = SubsetRandomSampler(train_indices) valid_sampler = SubsetRandomSampler(val_indices) # + [markdown] colab_type="text" id="Zuyq7dLjiRZ1" # Here, for each set, we are going to create a `DataLoader` object that is going to create a batch iterator for us. We will pass to it out `IMDBDataSet` object as a source of data. Batch size as a `batch_size` argument. To specify train and validation splits, we are going to pass the corresponding `SubsetRandomSampler` objects as a `sampler` argument for the training set. Finally, we need to pass our `pad_sequences()` function as a `collate_fn` argument to tell the data loader how to prepare the batches so that they have the same length. # + colab_type="code" id="wgn22jTLLBoN" colab={} train_loader = DataLoader(train_data, batch_size=batch_size, sampler=train_sampler, collate_fn=pad_sequences) validation_loader = DataLoader(train_data, batch_size=batch_size, sampler=valid_sampler, collate_fn=pad_sequences) test_loader = DataLoader(test_data, batch_size=batch_size, collate_fn=pad_sequences) # + [markdown] colab_type="text" id="JyqT79e3jL4r" # The model descpition and the code below is taken from [the Build the Model section of this tutorial](https://github.com/bentrevett/pytorch-sentiment-analysis/blob/master/4%20-%20Convolutional%20Sentiment%20Analysis.ipynb). Please, refer to it for the necessary details. # + colab_type="code" id="DnEhkstjLBoQ" colab={} class CNN(nn.Module): def __init__(self, pretrain, vocab_size, embedding_dim, n_filters, filter_sizes, output_dim, dropout, pad_idx): super().__init__() self.embedding = nn.Embedding.from_pretrained( torch.from_numpy(pretrain.emb), padding_idx=pad_idx, freeze=True ) self.convs = nn.ModuleList([ nn.Conv2d(in_channels = 1, out_channels = n_filters, kernel_size = (fs, embedding_dim)) for fs in filter_sizes ]) self.fc = nn.Linear(len(filter_sizes) * n_filters, output_dim) self.dropout = nn.Dropout(dropout) def forward(self, text): #text = [batch size, sent len] embedded = self.embedding(text) #embedded = [batch size, sent len, emb dim] embedded = embedded.unsqueeze(1) #embedded = [batch size, 1, sent len, emb dim] conved = [F.relu(conv(embedded)).squeeze(3) for conv in self.convs] #conved_n = [batch size, n_filters, sent len - filter_sizes[n] + 1] pooled = [F.max_pool1d(conv, conv.shape[2]).squeeze(2) for conv in conved] #pooled_n = [batch size, n_filters] cat = self.dropout(torch.cat(pooled, dim = 1)) #cat = [batch size, n_filters * len(filter_sizes)] return self.fc(cat) # + colab_type="code" id="lrzLk7fTLBoV" colab={} INPUT_DIM = len(pretrain.vocab) EMBEDDING_DIM = pretrain.emb.shape[1] N_FILTERS = 100 FILTER_SIZES = [3,4,5] OUTPUT_DIM = 1 DROPOUT = 0.5 model = CNN(pretrain, INPUT_DIM, EMBEDDING_DIM, N_FILTERS, FILTER_SIZES, OUTPUT_DIM, DROPOUT, PAD_ID) # + colab_type="code" id="XtCisPr9LBoc" colab={} optimizer = optim.Adam(model.parameters()) criterion = nn.BCEWithLogitsLoss() model = model.to(device) criterion = criterion.to(device) # + colab_type="code" id="a0sYlvYpLBoi" colab={} def binary_accuracy(preds, y): """ Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8, NOT 8 """ #round predictions to the closest integer rounded_preds = torch.round(torch.sigmoid(preds)) correct = (rounded_preds == y).float() #convert into float for division acc = correct.sum() / len(correct) return acc # + colab_type="code" id="eEvv1JenLBon" colab={} def train(model, iterator, optimizer, criterion): epoch_loss = 0 epoch_acc = 0 model.train() for batch in iterator: optimizer.zero_grad() predictions = model(batch[0]).squeeze(1) loss = criterion(predictions, batch[1]) acc = binary_accuracy(predictions, batch[1]) loss.backward() optimizer.step() epoch_loss += loss.item() epoch_acc += acc.item() return epoch_loss / len(iterator), epoch_acc / len(iterator) # + colab_type="code" id="7yw_-9W8LBot" colab={} def evaluate(model, iterator, criterion): epoch_loss = 0 epoch_acc = 0 model.eval() with torch.no_grad(): for batch in iterator: predictions = model(batch[0]).squeeze(1) loss = criterion(predictions, batch[1]) acc = binary_accuracy(predictions, batch[1]) epoch_loss += loss.item() epoch_acc += acc.item() return epoch_loss / len(iterator), epoch_acc / len(iterator) # + colab_type="code" id="dzP7wNcALBoz" colab={} import time def epoch_time(start_time, end_time): elapsed_time = end_time - start_time elapsed_mins = int(elapsed_time / 60) elapsed_secs = int(elapsed_time - (elapsed_mins * 60)) return elapsed_mins, elapsed_secs # + colab_type="code" id="sSmBPJNqLBo5" outputId="0f4aeeca-2ed1-4310-d8cf-ee48997501aa" colab={"base_uri": "https://localhost:8080/", "height": 272} N_EPOCHS = 5 best_valid_loss = float('inf') for epoch in range(N_EPOCHS): start_time = time.time() train_loss, train_acc = train(model, train_loader, optimizer, criterion) valid_loss, valid_acc = evaluate(model, validation_loader, criterion) end_time = time.time() epoch_mins, epoch_secs = epoch_time(start_time, end_time) if valid_loss < best_valid_loss: best_valid_loss = valid_loss torch.save(model.state_dict(), 'imdb_cnn_classifier.pt') print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s') print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%') print(f'\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%') # + id="3C-LI6oGrRF-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="c8b5402d-7da1-4a70-9d59-531dc9295939" start_time = time.time() test_loss, test_acc = evaluate(model, test_loader, criterion) end_time = time.time() epoch_mins, epoch_secs = epoch_time(start_time, end_time) print(f'Epoch: test | Epoch Time: {epoch_mins}m {epoch_secs}s') print(f'\tTest Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}%') # + [markdown] colab_type="text" id="bJ2k7s81Dony" # ## User input # # Once we trained our model, we can try to predict the sentiment of our own input. We are going to define the `predict_sentiment()` function that is going to take our trained model and a sentence as an argument. # # First, we need to switch the model to evaluation mode be calling `model.eval()` on it. Then, we are going to tokenize the sentence the same way as we tokenized the input. If the sentence is less than `min_len` parameter, we are going to add the padding symbols to it, so our model doesn't throw an error. After that, we turn the words into indices with the same vocabulary that we built for training. Finally, we transform the output into tenson and adding an empty dimention in the beginning, imitating a batch of size 1. # # As we remember from the training part, `0` was a negative sentiment and `1` was positive. Thus, the closer to `0` is our prediction, the more negative is the sentiment and the opposite is true for positive. # + colab_type="code" id="CS4ZMuw-LBo_" colab={} def predict_sentiment(model, sentence, min_len = 5): model.eval() tokenized = [token.text for token in tokenizer(sentence)] if len(tokenized) < min_len: tokenized += [PAD] * (min_len - len(tokenized)) indexed = pretrain.vocab.map(tokenized) tensor = torch.LongTensor(indexed).to(device) tensor = tensor.unsqueeze(0) prediction = torch.sigmoid(model(tensor)) return prediction.item() # + colab_type="code" id="aToaCvbyC7fW" outputId="3a86ff37-a6c0-4009-d2ad-c1bcf2b9c623" colab={"base_uri": "https://localhost:8080/", "height": 34} predict_sentiment(model, "This film is so bad that I had to wash my eyes with bleach after watching it") # + colab_type="code" id="HF7hS4-FC_Qe" outputId="7fbda8a8-61df-4d3d-e0a9-85128b7dffec" colab={"base_uri": "https://localhost:8080/", "height": 34} predict_sentiment(model, "After watching this movie, I felt that I'm in heaven") # + [markdown] colab_type="text" id="F2WkdWEoFB6Z" # # References # # - [Github tutorial on CNN](https://github.com/bentrevett/pytorch-sentiment-analysis/blob/master/4%20-%20Convolutional%20Sentiment%20Analysis.ipynb) # - [<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2011, June). Learning word vectors for sentiment analysis. In Proceedings of the 49th annual meeting of the association for computational linguistics: Human language technologies-volume 1 (pp. 142-150). Association for Computational Linguistics.](https://dl.acm.org/doi/10.5555/2002472.2002491) # - [<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2016). Fasttext. zip: Compressing text classification models. arXiv preprint arXiv:1612.03651.](https://arxiv.org/abs/1612.03651) #
labs/lab4/Lab4_TextClassificationCNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error import math data = pd.read_csv('house_prices.csv') data x = data[['bedrooms','sqft_living','waterfront','view','yr_renovated','lat','long','grade']] x y = data['price'] y # we use linear Regression + fit() method for training model = LinearRegression() model.fit(x,y) # by deafault 80/20 splited # MSE and R value #regression_model_mse = mean_squared_error(x, y) #print("MSE: ", math.sqrt(regression_model_mse)) print("R square value: ",model.score(x,y)) # we can get the b values after the model fit # this is b1 print(model.coef_)#slop # This is b0 in our model print('*********************') print(model.intercept_)#intersept x.head(2) # predicting the price print("Prediction by model: ", model.predict([[2,1100,0,0,2021,47,-122.00,8]])) len(model.predict(x)) x # visualize the data-set with the fitted model plt.scatter(x['waterfront'],y,color="green") plt.plot(x,model.predict(x),color="black") plt.title("Linear Regression") plt.xlabel("Size") plt.ylabel("Price") plt.show() # visualize the data-set with the fitted model plt.scatter(x['sqft_living'],y,color="green") plt.plot(x,model.predict(x),color="black") plt.title("Linear Regression") plt.xlabel("Size") plt.ylabel("Price") plt.show() # visualize the data-set with the fitted model plt.scatter(x['bedrooms'],y,color="green") plt.plot(x,model.predict(x),color="black") plt.title("Linear Regression") plt.xlabel("Size") plt.ylabel("Price") plt.show()
Multiple Linear Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### ESMA 3016 # ### <NAME> # ### Lab22: Uso del modulo statsmodels para Inferencia acerca de la media. Prueba z para proporciones. # #### Octubre 2020 import numpy as np import statsmodels.api as sm import statsmodels.stats.api as sms import statsmodels.formula.api as smf from statsmodels.stats.proportion import proportions_ztest import matplotlib.pyplot as plt # %matplotlib inline # Caso 1: Poblacion Normal y varianza ( ๐œŽ2 ) conocida # ### Intervalo de confianza para la media # # Ejemplo: Intervalo de confianza para la media poblacional del colesterol de todos # los pacientes con problemas cardiacos asumiendo que su distribucion es normal # La varianza (o desviacion estandar) debe ser conocida. En este caso se asume que # la desviacion estandar (sigma) es 13. Se usa la muestra de 20 pacientes colest=[217,223,225,245,238,216,217,226,202,233,235,242,219,221,234,199,236,248,218,224] sm.stats.zconfint(colest,alpha=.10) #prueba de hipotesis a,b=sm.stats.ztest(colest, value=220, alternative='larger') print("El valor de la prueba estadistica es",a,"El p-values es:",b) # #### $H_o: \mu=220$ # #### $H_a: \mu>220$ # #### Caso 2: Poblacion Normal y varianza ($\sigma^2$) desconocida,muestra pequena # #### Intervalo de confianza para la media # # La media muestral $\bar{x} es un estimador de la media poblacional $\mu$ # # Intervalo de confianza para $\mu$ del $100(1-\alpha)\%$, $\alpha$ es el nivel de significacion # # $\bar{x}-t(\alpha/2,n-1)\frac{s}{\sqrt{n}},\bar{x}+t(\alpha/2,n-1)\frac{s}{\sqrt{n}}$ #Ejemplo 1: Hallar el intervalo de confianza del 95% para el promedio de todos los graduandos #basado en la siguiente muestra de 14 graduandos #Asumir que la poblacion es normal gpa=[3.15,2.80,2.56,3.17,3.73,2.77,3.18,3.12,3.33,2.99,2.71,2.83,3.45,3.55] sms.DescrStatsW(gpa).tconfint_mean() #Ejemplo 7.5. Los tiempos de sobrevivencia (en aรฑos) de 12 personas que se han sometido a un transplante de #corazรณn son los siguientes: surv=[3.1,.9,2.8,4.3,.6,1.4,5.8,9.9,6.3,10.4,0,11.5] #Hallar un intervalo de confianza del 99 por ciento para el promedio de vida #de todas las personas que se han sometido a un transplante de corazรณn. sms.DescrStatsW(surv).tconfint_mean(alpha=.01) # #### Probar la hipotesis de que el tiempo de sobrevivencia promedio es mas de 4 sms.DescrStatsW(surv).ttest_mean(4,"larger") # #### El valor de la prueba estadistica es 0.64 # #### El p-value es 0.2669 # #### Los grados de libertad de la prueba t son 11 # ## Inferencia para proporciones #Ejemplo 7.7. En 1990 en un cierto paรญs, se reportรณ que dos de cada 5 personas pensaban que #deberรญa incrementarse el poder nuclear. En una encuesta reciente hecha en #1996 a 1225 personas se encontrรณ que 478 de ellos pensaban que se deberรญa #aumentar el poder nuclear. ยฟPiensa Ud. que hay evidencia de que #la opiniรณn de la gente en 1996 ha cambiado con respecto a 1990? Justificar su contestaciรณn. prueba,p_value=proportions_ztest(478,1225,.4) print("Prueba=",prueba,"p-value=",p_value) # #### La prueba estadistica de Z es -0.702 y el pvalue es 0.4821. No se rechaza la hipotesis Nula #Ejemplo 7.8. El director de un hospital afirma que el 25 por ciento de los nacimientos que #ocurren allรญ son por cesรกrea. Un mรฉdico que trabaja en dicho hospital piensa #que ese porcentaje es mayor. Para probar su afirmaciรณn recolecta informaciรณn #de los 25 nacimientos ocurridos durante una semana. partos=['cesarea','normal','cesarea','normal','normal','normal','normal','cesarea','normal','cesarea', 'normal','cesarea','normal','normal','normal','normal','normal','cesarea','normal','normal', 'cesarea','normal','normal','cesarea','normal'] conteos=partos.count('cesarea') trials=len(partos) proportions_ztest(conteos,trials,value=.25,alternative='larger') # #### La prueba estadistica aproximada es 0.75 y el p-value es 0.226. No se rechaza la Hipotesis Nula
Notebooks/Lab22v3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.13 ('domino') # language: python # name: python3 # --- # %load_ext autoreload # %autoreload 2 import os import dcbench dcbench.config.celeba_dir = os.path.expanduser("~/data/celeba") task = dcbench.tasks["slice_discovery"] problem = dcbench.tasks["slice_discovery"].problems["p_72776"] task.problems task.problems['p_118660']["model"] problem["model"]
examples/05_evaluate.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.2.0 # language: julia # name: julia-1.2 # --- # # Problem definition # # Create some data, in this case a parabola plus white noise: # + using Plots, Random Random.seed!(2017) N = 100 x = range(0, stop=1, length=N) y = x.^2 .+ [i/1000*randn() for i=1:N] scatter(x, y, xlab="x", ylab="y", label="data") # - # Define the problem: # + using GeoStats sdata = PointSetData(Dict(:y => y), transpose(x)) sdomain = RegularGrid((0.,), (1.,), dims=(N,)) problem = EstimationProblem(sdata, sdomain, :y) # - # # Solver options # # The user can specify the number of neighbors (default to all data locations), a kernel (or variogram) function (default to `ExponentialVariogram`), and a metric from the [Distances.jl](https://github.com/JuliaStats/Distances.jl) package (default to Euclidean). # + using LocallyWeightedRegression solver = LocalWeightRegress( :y => (variogram=ExponentialVariogram(range=3/10),) ) solution = solve(problem, solver) # + yhat, yvar = solution[:y] scatter(x, y, xlab="x", ylab="y", label="data") plot!(x, yhat, ribbon=yvar, fillalpha=.5, label="LWR")
docs/Usage.ipynb
# --- # jupyter: # jupytext: # formats: ipynb,py:percent # text_representation: # extension: .py # format_name: percent # format_version: '1.3' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %% [markdown] # # Covid19dynstat - JupyterDash # %% [markdown] # The `jupyter-dash` package makes it easy to develop Plotly Dash apps from the Jupyter Notebook and JupyterLab. # Just replace the standard `dash.Dash` class with the `jupyter_dash.JupyterDash` subclass. # %% from jupyter_dash import JupyterDash # %% import dash import dash_core_components as dcc import dash_html_components as html import dash_bootstrap_components as dbc import dash_player import pandas as pd # %% [markdown] # When running in JupyterHub (or Binder), call the `infer_jupyter_config` function to detect the proxy configuration. This will detect the proper request_pathname_prefix and server_url values to use when displaying Dash apps. For example: # server_url = `https://jupyter-jsc.fz-juelich.de` # request_pathname_prefix = `/user/j.goebbert@fz-juelich.de/jureca_login/` # For details please check the source here https://github.com/plotly/jupyter-dash/blob/v0.2.1.post1/jupyter_dash/comms.py#L33 # %% JupyterDash.infer_jupyter_proxy_config() # %% [markdown] # **Attention:** I have to run this cell twice: first press play, wait a bit and hit play again while it still shows `[*]` # %% [markdown] # #### Create a Dash Flask server # Requests the browser to load Bootstrap # %% # select a theme app = JupyterDash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP]) # external_stylesheets=[dbc.themes.BOOTSTRAP] -> default theme # external_stylesheets=[dbc.themes.CYBORG] -> dark theme app.title = 'Covid-19-Interaktionsmodell' # start the server server = app.server #print(app.get_asset_url('aaa')) # %% #import base64 from textwrap import dedent from datetime import datetime as dt, timedelta from dash.dependencies import Input, Output, State asset_url=app.get_asset_url('assets') # = $JUPYTERHUB_SERVICE_PREFIX/assets # print(asset_url) # example: "https://jupyter-jsc.fz-juelich.de/user/<user>@fz-juelich.de/<machine>/proxy/8050/assets/" metadata = pd.read_csv("assets/metadata.csv") min_date=dt(2020, 1, 29).date() max_date=dt(2020, 6, 16).date() # dt.today().date() init_date=dt(2020, 6, 16).date() # dt.today().date() init_countyid=1001 deltadays = 26 def get_assets_dir(date): date = dt.strptime(date.split(' ')[0], '%Y-%m-%d') assets_dir = (date -timedelta(days=deltadays)).strftime('%Y_%m_%d') return assets_dir # %% [markdown] # #### Define the top navigation bar # %% ##################### # Header and Footer ##################### # https://dash-bootstrap-components.opensource.faculty.ai/docs/components/navbar/ navbar = dbc.NavbarSimple( brand="Bayessches rรคumlich-zeitliches Interaktionsmodell fรผr Covid-19", brand_href="#", color="dark", fixed="top", dark=True, children=[ dbc.NavItem( dbc.NavLink( "Artikel", href="https://nbviewer.jupyter.org/github/neuroinfo-os/BSTIM-Covid19/blob/master/notebooks/visualization_final.ipynb", ) ), dbc.NavItem( dbc.NavLink( "Quellcode", href="https://github.com/neuroinfo-os/BSTIM-Covid19", ) ), ]) navbar_footer = dbc.NavbarSimple( #brand="", brand_href="#", color="light", #fixed="bottom", #sticky=True, #dark=True, children=[ dbc.NavItem( dbc.NavLink( "Impressum", href="https://www.fz-juelich.de/portal/DE/Service/Impressum/impressum_node.html", ) ), dbc.NavItem( dbc.NavLink( "Datenschutz", href="https://www.fz-juelich.de/portal/DE/datenschutz/_node.html", ) ), ]) # %% ##################### # Disclaimer ##################### disclaimer_modal = html.Div( [ dbc.Button("Disclaimer", id="disclaimer_modal_open", outline=True, color="secondary", className="mr-1"), dbc.Modal( id="disclaimer_modal", size="xl", children=[ dbc.ModalHeader("Disclaimer"), dbc.ModalBody( children=[ dcc.Markdown( f""" Fรผr die Gesamtzahl der Infektionen pro Bundesland/Landkreis werden die den Gesundheitsรคmtern nach Infektionsschutzgesetz gemeldeten Fรคlle verwendet, die dem RKI bis zum jeweiligen Tag um 0 Uhr รผbermittelt wurden. Fรผr die Analyse wird das Meldedatum verwendet, s. [Details zu den Daten](https://experience.arcgis.com/experience/478220a4c454480e823b17327b2bf1d4) Da es in dem Verfahren zu Differenzen zwischen Erkrankungsdatum und Meldedatum, sowie Verzรถgerungen in dem Meldeprozess geben kann, ist die Analyse der Fallzahlen der letzten Woche bereits eine Vorhersage, die auf einer Schรคtzung basiert. Alle hier prรคsentierten Ergebnisse basieren auf statistischen Methoden und bilden damit nicht das reale Geschehen, sondern Schรคtzungen ab, die von der wirklichen Situation abweichen kรถnnen. Dies ist bei der Interpretation der Ergebnisse zu berรผcksichtigen. Fรผr eine detailliertere Analyse der COVID-19-Fรคlle verweisen wir auf den [tรคglichen Lagebericht des RKI](https://www.rki.de/DE/Content/InfAZ/N/Neuartiges_Coronavirus/Situationsberichte/Gesamt.html). """ ), ] ), dbc.ModalFooter( dbc.Button("SchlieรŸen", id="disclaimer_modal_close", className="ml-auto") ), ], ), ] ) @app.callback( Output("disclaimer_modal", "is_open"), [Input("disclaimer_modal_open", "n_clicks"), Input("disclaimer_modal_close", "n_clicks")], [State("disclaimer_modal", "is_open")], ) def toggle_modal(n1, n2, is_open): if n1 or n2: return not is_open return is_open # %% ##################### # Date-Tabs (left) ##################### left_date_tab1_modal = html.Div( [ dbc.Button("VergrรถรŸern", id="left_date_tab1_modal_open", outline=True, color="secondary", className="mr-1"), dbc.Modal( id="left_date_tab1_modal", size="xl", children=[ dbc.ModalHeader("Infektionen"), dbc.ModalBody( children=[ html.Img( id="left_date_modal1_img", src=asset_url + init_date.strftime('%Y_%m_%d') + "/map.png", style={'width':'100%', 'height':'100%'}, ), ] ), dbc.ModalFooter( dbc.Button("SchlieรŸen", id="left_date_tab1_modal_close", className="ml-auto") ), ], ), ] ) left_date_tab1 = dbc.Card( outline=True, color="light", className="mt-3", children=[ dbc.CardBody( children=[ html.Div( id="left_date_tab1_img_div", children=[ left_date_tab1_modal, html.Img( id="left_date_tab1_img", src=asset_url + init_date.strftime('%Y_%m_%d') + "/map.png", style={'width':'100%', 'height':'100%'}, ), dbc.Tooltip( "Die Infektionszahlen pro Tag (Welcher Tag (Nowcast ? Luke ) ) pro Landkreis und gewรคhltem Zeitfenster. " "Der angezeigte Wert entspricht dem Nowcast, also der Schรคtzung der Anzahl der tatsรคchlich Neuinfizierten. " "Diese Schรคtzung korrigiert die gemeldeten Zahlen, die aufgrund von Verzรถgerungen im Meldeprozess " "und einem unbekannten Erkrankungsdatum kleiner als die tatsรคchlichen Zahlen sein kรถnnen, auf der Basis einer Vorhersage. ", target="left_date_tab1_img", style={"width": "200%"}, placement="left", ), ]), ]), ]) @app.callback( Output("left_date_tab1_modal", "is_open"), [Input("left_date_tab1_img_div", "n_clicks"), Input("left_date_tab1_modal_open", "n_clicks"), Input("left_date_tab1_modal_close", "n_clicks")], [State("left_date_tab1_modal", "is_open")], ) def toggle_modal(n1, n2, n3, is_open): if n1 or n2 or n3: return not is_open return is_open ##################### left_date_tab2_modal = html.Div( [ dbc.Button("VergrรถรŸern", id="left_date_tab2_modal_open", outline=True, color="secondary", className="mr-1"), dbc.Modal( id="left_date_tab2_modal", size="xl", children=[ dbc.ModalHeader("Interaktionskernel"), dbc.ModalBody( children=[ html.Img( id="left_date_modal2_img", src=asset_url + init_date.strftime('%Y_%m_%d') + "/interaction_kernel.png", style={'width':'100%', 'height':'100%'}, ), ] ), dbc.ModalFooter( dbc.Button("SchlieรŸen", id="left_date_tab2_modal_close", className="ml-auto") ), ], ), ] ) left_date_tab2 = dbc.Card( outline=True, color="light", className="mt-3", children=[ dbc.CardBody( children=[ html.Div( id="left_date_tab2_img_div", children=[ left_date_tab2_modal, html.Img( id="left_date_tab2_img", src=asset_url + init_date.strftime('%Y_%m_%d') + "/interaction_kernel.png", style={'width':'100%', 'height':'100%'}, ), dbc.Tooltip( "Der Interaktionskernel schรคtzt ab um wie stark eine gemeldete Infektion eine Neuansteckung in den nรคchsten Tagen " "in einem Umkreis von bis zu 50km beeinflusst. " "Diese Interaktion ist ein zusรคtzlicher Faktor der den Trend in einem Landkreis verstรคrkt oder abschwรคcht. " "Eine warme Farbe indiziert, dass eine Covid-19 Meldung eine erhรถhte Wahrscheinlichkeit einer Neuinfektion " "im Verhรคltnis zum Trend zur Folge hat. " "Eine starke Farben in der Nรคhe kleiner Radien bedeutet, dass das Infektionsgeschehen vor allem Auswirkungen " "in der direkten Nรคhe der gemeldeten Fรคlle zur Folge hat. " "Die Interaktion basiert auf einer Schรคtzung der Bevรถlkerungsdichte und der Form der Landkreise. " "Daten zu den Wohnorten der Infizierten werden in dem Model nicht genutzt. " "Alle hier genutzten Daten sind vollstรคndig anonymisiert (siehe Erklรคrvideo). " "Bei der Interpretation der Interaktionskernel ist dies zu berรผcksichtigen, und wir weisen darauf hin, dass dies nur eine Schรคtzung ist " "die von der Realitรคt abweichen kann.", target="left_date_tab2_img", style={"width": "200%"}, placement="left", ), ]), ]), ]) @app.callback( Output("left_date_tab2_modal", "is_open"), [Input("left_date_tab2_img_div", "n_clicks"), Input("left_date_tab2_modal_open", "n_clicks"), Input("left_date_tab2_modal_close", "n_clicks")], [State("left_date_tab2_modal", "is_open")], ) def toggle_modal(n1, n2, n3, is_open): if n1 or n2 or n3: return not is_open return is_open # %% ##################### # Date-Window Picker (left) ##################### left_date_controls = dbc.FormGroup( children=[ dbc.Label( id='left_date-label', children=["Datumsauswahl:"], ), html.Div( children=[ dcc.DatePickerSingle( id='left_date-picker', display_format='DD. MMM YYYY', min_date_allowed=min_date, max_date_allowed=max_date, initial_visible_month=init_date, date=init_date, ), html.Div( id='left_output-container-date-picker', #style={'display': 'none'}, children=[(init_date -timedelta(days=deltadays)).strftime('%Y_%m_%d')], ), ]), ]) # Date Picker @app.callback( Output(component_id='left_output-container-date-picker', component_property='children'), [Input(component_id='left_date-picker', component_property='date')]) def update_left_date_picker(date): if date is not None: return get_assets_dir(date) # Map @app.callback( Output(component_id='left_date_tab1_img', component_property='src'), [Input(component_id='left_date-picker', component_property='date')]) def update_left_date_tab1_img(date): if date is not None: assets_dir = get_assets_dir(date) return asset_url + assets_dir + "/map.png" @app.callback( Output(component_id='left_date_modal1_img', component_property='src'), [Input(component_id='left_date-picker', component_property='date')]) def update_left_date_modal1_img(date): if date is not None: assets_dir = get_assets_dir(date) return asset_url + assets_dir + "/map.png" # Interaction Kernel @app.callback( Output(component_id='left_date_tab2_img', component_property='src'), [Input(component_id='left_date-picker', component_property='date')]) def update_left_date_tab2_img(date): if date is not None: assets_dir = get_assets_dir(date) return asset_url + assets_dir + "/interaction_kernel.png" @app.callback( Output(component_id='left_date_modal2_img', component_property='src'), [Input(component_id='left_date-picker', component_property='date')]) def update_left_date_modal2_img(date): if date is not None: assets_dir = get_assets_dir(date) return asset_url + assets_dir + "/interaction_kernel.png" # %% ##################### # Date-Tabs (right) ##################### right_date_tab1_modal = html.Div( [ dbc.Button("VergrรถรŸern", id="right_date_tab1_modal_open", outline=True, color="secondary", className="mr-1"), dbc.Modal( id="right_date_tab1_modal", size="xl", children=[ dbc.ModalHeader("Infektionen"), dbc.ModalBody( children=[ html.Img( id="right_date_modal1_img", src=asset_url + init_date.strftime('%Y_%m_%d') + "/map.png", style={'width':'100%', 'height':'100%'}, ), ] ), dbc.ModalFooter( dbc.Button("SchlieรŸen", id="right_date_tab1_modal_close", className="ml-auto") ), ], ), ] ) right_date_tab1 = dbc.Card( outline=True, color="light", className="mt-3", children=[ dbc.CardBody( children=[ html.Div( id="right_date_tab1_img_div", children=[ right_date_tab1_modal, html.Img( id="right_date_tab1_img", src=asset_url + init_date.strftime('%Y_%m_%d') + "/map.png", style={'width':'100%', 'height':'100%'}, ), dbc.Tooltip( "Die Infektionszahlen pro Tag (Welcher Tag (Nowcast ? Luke ) ) pro Landkreis und gewรคhlten Zeitfenster. " "Der anzeigte Wert entspricht dem Nowcast, also der Schรคtzung der Anzahl der tatsรคchlich neuinfizierten. " "Diese Schรคtzung korrigiert die gemeldeten Zahlen, die aufgrund von Verzรถgerungen im Meldeprozess " "und einem unbekannten Erkrankungsdatum kleiner als die tatsรคchlichen Zahlen sein kรถnnen, auf der Basis einer Vorhersage. ", target="right_date_tab1_img", style={"width": "200%"}, placement="right", ), ]), ]), ]) @app.callback( Output("right_date_tab1_modal", "is_open"), [Input("right_date_tab1_img_div", "n_clicks"), Input("right_date_tab1_modal_open", "n_clicks"), Input("right_date_tab1_modal_close", "n_clicks")], [State("right_date_tab1_modal", "is_open")], ) def toggle_modal(n1, n2, n3, is_open): if n1 or n2 or n3: return not is_open return is_open ##################### right_date_tab2_modal = html.Div( [ dbc.Button("VergrรถรŸern", id="right_date_tab2_modal_open", outline=True, color="secondary", className="mr-1"), dbc.Modal( id="right_date_tab2_modal", size="xl", children=[ dbc.ModalHeader("Interaktionskernel"), dbc.ModalBody( children=[ html.Img( id="right_date_modal2_img", src=asset_url + init_date.strftime('%Y_%m_%d') + "/interaction_kernel.png", style={'width':'100%', 'height':'100%'}, ), ] ), dbc.ModalFooter( dbc.Button("SchlieรŸen", id="right_date_tab2_modal_close", className="ml-auto") ), ], ), ] ) right_date_tab2 = dbc.Card( outline=True, color="light", className="mt-3", children=[ dbc.CardBody( children=[ html.Div( id="right_date_tab2_img_div", children=[ right_date_tab2_modal, html.Img( id="right_date_tab2_img", src=asset_url + init_date.strftime('%Y_%m_%d') + "/interaction_kernel.png", style={'width':'100%', 'height':'100%'}, ), dbc.Tooltip( "Der Interaktionskernel schรคtzt ab um wie stark eine gemeldete Infektion eine Neuansteckung in den nรคchsten Tagen " "in einem Umkreis von bis zu 50km beeinflusst. " "Diese Interaktion ist ein zusรคtzlicher Faktor der den Trend in einem Landkreis verstรคrkt oder abschwรคcht. " "Eine warme Farbe indiziert, dass eine Covid-19 Meldung eine erhรถhte Wahrscheinlichkeit einer Neuinfektion " "im Verhรคltnis zum Trend zur Folge hat. " "Eine starke Farben in der Nรคhe kleiner Radien bedeutet, dass das Infektionsgeschehen vor allem Auswirkungen " "in der direkten Nรคhe der gemeldeten Fรคlle zur Folge hat. " "Die Interaktion basiert auf einer Schรคtzung der Bevรถlkerungsdichte und der Form der Landkreise. " "Daten zu den Wohnorten der Infizierten werden in dem Model nicht genutzt. " "Alle hier genutzten Daten sind vollstรคndig anonymisiert (siehe Erklรคrvideo). " "Bei der Interpretation der Interaktionskernel ist dies zu berรผcksichtigen, und wir weisen darauf hin, dass dies nur eine Schรคtzung ist " "die von der Realitรคt abweichen kann.", target="right_date_tab2_img", style={"width": "200%"}, placement="right", ), ]), ]), ]) @app.callback( Output("right_date_tab2_modal", "is_open"), [Input("right_date_tab2_img_div", "n_clicks"), Input("right_date_tab2_modal_open", "n_clicks"), Input("right_date_tab2_modal_close", "n_clicks")], [State("right_date_tab2_modal", "is_open")], ) def toggle_modal(n1, n2, n3, is_open): if n1 or n2 or n3: return not is_open return is_open # %% ##################### # Date-Window Picker (right) ##################### right_date_controls = dbc.FormGroup( children=[ dbc.Label( id='right_date-label', children=["Datumsauswahl:"], ), html.Div( children=[ dcc.DatePickerSingle( id='right_date-picker', display_format='DD. MMM YYYY', min_date_allowed=min_date, max_date_allowed=max_date, initial_visible_month=init_date, date=init_date, ), html.Div( id='right_output-container-date-picker', #style={'display': 'none'}, children=[init_date.strftime('%Y_%m_%d')], ), ]), ]) # Date Picker @app.callback( Output(component_id='right_output-container-date-picker', component_property='children'), [Input(component_id='right_date-picker', component_property='date')]) def update_right_date_picker(date): if date is not None: return get_assets_dir(date) # Map @app.callback( Output(component_id='right_date_tab1_img', component_property='src'), [Input(component_id='right_date-picker', component_property='date')]) def update_right_date_tab1_img(date): if date is not None: assets_dir = get_assets_dir(date) return asset_url + assets_dir + "/map.png" @app.callback( Output(component_id='right_date_modal1_img', component_property='src'), [Input(component_id='right_date-picker', component_property='date')]) def update_right_date_modal1_img(date): if date is not None: assets_dir = get_assets_dir(date) return asset_url + assets_dir + "/map.png" # Interaction Kernel @app.callback( Output(component_id='right_date_tab2_img', component_property='src'), [Input(component_id='right_date-picker', component_property='date')]) def update_right_date_tab2_img(date): if date is not None: assets_dir = get_assets_dir(date) return asset_url + assets_dir + "/interaction_kernel.png" @app.callback( Output(component_id='right_date_modal2_img', component_property='src'), [Input(component_id='right_date-picker', component_property='date')]) def update_right_date_modal2_img(date): if date is not None: assets_dir = get_assets_dir(date) return asset_url + assets_dir + "/interaction_kernel.png" # %% ##################### # County-Tabs (left) ##################### left_pos_tab1_modal = html.Div( [ dbc.Button("VergrรถรŸern", id="left_pos_tab1_modal_open", outline=True, color="secondary", className="mr-1"), dbc.Modal( id="left_pos_tab1_modal", size="xl", children=[ dbc.ModalHeader("geglรคttet"), dbc.ModalBody( children=[ html.Img( id="left_pos_modal1_img", src=asset_url + init_date.strftime('%Y_%m_%d') + "/curve_{0:05d}.png".format(init_countyid), style={'width':'100%', 'height':'100%'}, ), ] ), dbc.ModalFooter( dbc.Button("SchlieรŸen", id="left_pos_tab1_modal_close", className="ml-auto") ), ], ), ] ) left_pos_tab1 = dbc.Card( outline=True, color="light", className="mt-3", children=[ dbc.CardBody( children=[ html.Div( id="left_pos_tab1_img_div", children=[ left_pos_tab1_modal, html.Img( id="left_pos_tab1_img", src=asset_url + init_date.strftime('%Y_%m_%d') + "/curve_{0:05d}.png".format(init_countyid), style={'width':'100%', 'height':'100%'}, ), dbc.Tooltip( "Analyse und Vorhersage der Infektionszahlen fรผr den ausgewรคhlten Landkreis. " "Der Nowcast entspricht der Schรคtzung der realen aktuellen Neuinfektionen fรผr den angegebenden Tag. " "Diese Schรคtzung korrigiert die gemeldeten Zahlen, die aufgrund von Verzรถgerungen im Meldeprozess " "und einem unbekannten Erkrankungsdatum kleiner als die tatsรคchlichen Zahlen sein kรถnnen, auf der Basis einer Vorhersage. " "Die Vorhersage nutzt das gleiche Modell um den Verlauf der kommenden 7 Tage, fรผr die noch keine Zahlen vorliegen, vorherzusagen. " "Das geglรคttete Model korrigiert die Ergebnisse bezรผglich eines Wochenrhythmusses bei den Meldeverzรถgerungen (siehe Erklรคrvideo). ", target="left_pos_tab1_img", style={"width": "600px"}, placement="left", ), ]), ]), ]) @app.callback( Output("left_pos_tab1_modal", "is_open"), [Input("left_pos_tab1_img_div", "n_clicks"), Input("left_pos_tab1_modal_open", "n_clicks"), Input("left_pos_tab1_modal_close", "n_clicks")], [State("left_pos_tab1_modal", "is_open")], ) def toggle_modal(n1, n2, n3, is_open): if n1 or n2 or n3: return not is_open return is_open ##################### left_pos_tab2_modal = html.Div( [ dbc.Button("VergrรถรŸern", id="left_pos_tab2_modal_open", outline=True, color="secondary", className="mr-1"), dbc.Modal( id="left_pos_tab2_modal", size="xl", children=[ dbc.ModalHeader("geglรคttet"), dbc.ModalBody( children=[ html.Img( id="left_pos_modal2_img", src=asset_url + init_date.strftime('%Y_%m_%d') + "/curve_trend_{0:05d}.png".format(init_countyid), style={'width':'100%', 'height':'100%'}, ), ] ), dbc.ModalFooter( dbc.Button("SchlieรŸen", id="left_pos_tab2_modal_close", className="ml-auto") ), ], ), ] ) left_pos_tab2 = dbc.Card( outline=True, color="light", className="mt-3", children=[ dbc.CardBody( children=[ html.Div( id="left_pos_tab2_img_div", children=[ left_pos_tab2_modal, html.Img( id="left_pos_tab2_img", src=asset_url + init_date.strftime('%Y_%m_%d') + "/curve_trend_{0:05d}.png".format(init_countyid), style={'width':'100%', 'height':'100%'}, ), dbc.Tooltip( "Analyse und Vorhersage der Infektionszahlen fรผr den ausgewรคhlten Landkreis. " "Der Nowcast entspricht der Schรคtzung der realen aktuellen Neuinfektionen fรผr den angegebenden Tag. " "Diese Schรคtzung korrigiert die gemeldeten Zahlen, die aufgrund von Verzรถgerungen im Meldeprozess " "und einem unbekannten Erkrankungsdatum kleiner als die tatsรคchlichen Zahlen sein kรถnnen, auf der Basis einer Vorhersage. " "Die Vorhersage nutzt das gleiche Modell um den Verlauf der kommenden 7 Tage, fรผr die noch keine Zahlen vorliegen, vorherzusagen. " "Das geglรคttete Model korrigiert die Ergebnisse bezรผglich eines Wochenrhythmusses bei den Meldeverzรถgerungen (siehe Erklรคrvideo). ", target="left_pos_tab2_img", style={"width": "200%"}, placement="left", ), ]), ]), ]) @app.callback( Output("left_pos_tab2_modal", "is_open"), [Input("left_pos_tab2_img_div", "n_clicks"), Input("left_pos_tab2_modal_open", "n_clicks"), Input("left_pos_tab2_modal_close", "n_clicks")], [State("left_pos_tab2_modal", "is_open")], ) def toggle_modal(n1, n2, n3, is_open): if n1 or n2 or n3: return not is_open return is_open # %% ##################### # County Picker (left) ##################### left_pos_controls = dbc.FormGroup( children=[ dbc.Label( id='left_pos-label', children=["Wรคhle Landkreis:"], ), html.Div( children=[ dcc.Dropdown( id="left_pos-variable", value=init_countyid, options=[ {"label": row['LKName'] + " (" + row['LKType'] + ")", "value": row['countyID']} for index, row in metadata.iterrows() ]), html.Div(id='left_output-container-pos-variable'), #, style={'display': 'none'}), ]), ]) # County Picker @app.callback( Output(component_id='left_output-container-pos-variable', component_property='children'), [Input(component_id='left_pos-variable', component_property='value'), Input(component_id='left_output-container-date-picker', component_property='children')]) def update_left_pos_variable(value, assets_dir): if value is not None: return asset_url + assets_dir + "/" + "curve_trend_{0:05d}.png".format(value) # geglรคttet @app.callback( Output(component_id='left_pos_tab1_img', component_property='src'), [Input(component_id='left_pos-variable', component_property='value'), Input(component_id='left_output-container-date-picker', component_property='children')]) def update_left_pos_tab1_img(value, assets_dir): if value is not None: return asset_url + assets_dir + "/" + "curve_trend_{0:05d}.png".format(value) @app.callback( Output(component_id='left_pos_modal1_img', component_property='src'), [Input(component_id='left_pos-variable', component_property='value'), Input(component_id='left_output-container-date-picker', component_property='children')]) def update_left_pos_modal1_img(value, assets_dir): if value is not None: return asset_url + assets_dir + "/" + "curve_trend_{0:05d}.png".format(value) # ungeglรคttet @app.callback( Output(component_id='left_pos_tab2_img', component_property='src'), [Input(component_id='left_pos-variable', component_property='value'), Input(component_id='left_output-container-date-picker', component_property='children')]) def update_left_pos_tab2_img(value, assets_dir): if value is not None: return asset_url + assets_dir + "/" + "curve_{0:05d}.png".format(value) @app.callback( Output(component_id='left_pos_modal2_img', component_property='src'), [Input(component_id='left_pos-variable', component_property='value'), Input(component_id='left_output-container-date-picker', component_property='children')]) def update_left_pos_modal2_img(value, assets_dir): if value is not None: return asset_url + assets_dir + "/" + "curve_{0:05d}.png".format(value) # %% ##################### # County-Tabs (right) ##################### right_pos_tab1_modal = html.Div( [ dbc.Button("VergrรถรŸern", id="right_pos_tab1_modal_open", outline=True, color="secondary", className="mr-1"), dbc.Modal( id="right_pos_tab1_modal", size="xl", children=[ dbc.ModalHeader("geglรคttet"), dbc.ModalBody( children=[ html.Img( id="right_pos_modal1_img", src=asset_url + init_date.strftime('%Y_%m_%d') + "/curve_{0:05d}.png".format(init_countyid), style={'width':'100%', 'height':'100%'}, ), ] ), dbc.ModalFooter( dbc.Button("SchlieรŸen", id="right_pos_tab1_modal_close", className="ml-auto") ), ], ), ] ) right_pos_tab1 = dbc.Card( outline=True, color="light", className="mt-3", children=[ dbc.CardBody( children=[ html.Div( id="right_pos_tab1_img_div", children=[ right_pos_tab1_modal, html.Img( id="right_pos_tab1_img", src=asset_url + init_date.strftime('%Y_%m_%d') + "/curve_{0:05d}.png".format(init_countyid), style={'width':'100%', 'height':'100%'}, ), dbc.Tooltip( "Analyse und Vorhersage der Infektionszahlen fรผr den ausgewรคhlten Landkreis. " "Der Nowcast entspricht der Schรคtzung der realen aktuellen Neuinfektionen fรผr den angegebenden Tag. " "Diese Schรคtzung korrigiert die gemeldeten Zahlen, die aufgrund von Verzรถgerungen im Meldeprozess " "und einem unbekannten Erkrankungsdatum kleiner als die tatsรคchlichen Zahlen sein kรถnnen, auf der Basis einer Vorhersage. " "Die Vorhersage nutzt das gleiche Modell um den Verlauf der kommenden 7 Tage, fรผr die noch keine Zahlen vorliegen, vorherzusagen. " "Das geglรคttete Model korrigiert die Ergebnisse bezรผglich eines Wochenrhythmusses bei den Meldeverzรถgerungen (siehe Erklรคrvideo). ", target="right_pos_tab1_img", style={"width": "200%"}, placement="right", ), ]), ]), ]) @app.callback( Output("right_pos_tab1_modal", "is_open"), [Input("right_pos_tab1_img_div", "n_clicks"), Input("right_pos_tab1_modal_open", "n_clicks"), Input("right_pos_tab1_modal_close", "n_clicks")], [State("right_pos_tab1_modal", "is_open")], ) def toggle_modal(n1, n2, n3, is_open): if n1 or n2 or n3: return not is_open return is_open ##################### right_pos_tab2_modal = html.Div( [ dbc.Button("VergrรถรŸern", id="right_pos_tab2_modal_open", outline=True, color="secondary", className="mr-1"), dbc.Modal( id="right_pos_tab2_modal", size="xl", children=[ dbc.ModalHeader("geglรคttet"), dbc.ModalBody( children=[ html.Img( id="right_pos_modal2_img", src=asset_url + init_date.strftime('%Y_%m_%d') + "/curve_trend_{0:05d}.png".format(init_countyid), style={'width':'100%', 'height':'100%'}, ), ] ), dbc.ModalFooter( dbc.Button("SchlieรŸen", id="right_pos_tab2_modal_close", className="ml-auto") ), ], ), ] ) right_pos_tab2 = dbc.Card( outline=True, color="light", className="mt-3", children=[ dbc.CardBody( children=[ html.Div( id="right_pos_tab2_img_div", children=[ right_pos_tab2_modal, html.Img( id="right_pos_tab2_img", src=asset_url + init_date.strftime('%Y_%m_%d') + "/curve_trend_{0:05d}.png".format(init_countyid), style={'width':'100%', 'height':'100%'}, ), dbc.Tooltip( "Analyse und Vorhersage der Infektionszahlen fรผr den ausgewรคhlten Landkreis. " "Der Nowcast entspricht der Schรคtzung der realen aktuellen Neuinfektionen fรผr den angegebenden Tag. " "Diese Schรคtzung korrigiert die gemeldeten Zahlen, die aufgrund von Verzรถgerungen im Meldeprozess " "und einem unbekannten Erkrankungsdatum kleiner als die tatsรคchlichen Zahlen sein kรถnnen, auf der Basis einer Vorhersage. " "Die Vorhersage nutzt das gleiche Modell um den Verlauf der kommenden 7 Tage, fรผr die noch keine Zahlen vorliegen, vorherzusagen. " "Das geglรคttete Model korrigiert die Ergebnisse bezรผglich eines Wochenrhythmusses bei den Meldeverzรถgerungen (siehe Erklรคrvideo). ", target="right_pos_tab2_img", style={"width": "200%"}, placement="right", ), ]), ]), ]) @app.callback( Output("right_pos_tab2_modal", "is_open"), [Input("right_pos_tab2_img", "n_clicks"), Input("right_pos_tab2_modal_open", "n_clicks"), Input("right_pos_tab2_modal_close", "n_clicks")], [State("right_pos_tab2_modal", "is_open")], ) def toggle_modal(n1, n2, n3, is_open): if n1 or n2 or n3: return not is_open return is_open # %% ##################### # County Picker (right) ##################### right_pos_controls = dbc.FormGroup( children=[ dbc.Label( id='right_pos-label', children=["Wรคhle Landkreis:"], ), html.Div( children=[ dcc.Dropdown( id="right_pos-variable", value=init_countyid, options=[ {"label": row['LKName'] + " (" + row['LKType'] + ")", "value": row['countyID']} for index, row in metadata.iterrows() ]), html.Div(id='right_output-container-pos-variable'), #, style={'display': 'none'}), ]), ]) # County Picker @app.callback( Output(component_id='right_output-container-pos-variable', component_property='children'), [Input(component_id='right_pos-variable', component_property='value'), Input(component_id='right_output-container-date-picker', component_property='children')]) def update_right_pos_variable(value, assets_dir): if value is not None: return asset_url + assets_dir + "/" + "curve_trend_{0:05d}.png".format(value) # geglรคttet @app.callback( Output(component_id='right_pos_tab1_img', component_property='src'), [Input(component_id='right_pos-variable', component_property='value'), Input(component_id='right_output-container-date-picker', component_property='children')]) def update_right_pos_tab1_img(value, assets_dir): if value is not None: return asset_url + assets_dir + "/" + "curve_trend_{0:05d}.png".format(value) @app.callback( Output(component_id='right_pos_modal1_img', component_property='src'), [Input(component_id='right_pos-variable', component_property='value'), Input(component_id='right_output-container-date-picker', component_property='children')]) def update_right_pos_modal1_img(value, assets_dir): if value is not None: return asset_url + assets_dir + "/" + "curve_trend_{0:05d}.png".format(value) # ungeglรคttet @app.callback( Output(component_id='right_pos_tab2_img', component_property='src'), [Input(component_id='right_pos-variable', component_property='value'), Input(component_id='right_output-container-date-picker', component_property='children')]) def update_right_pos_tab2_img(value, assets_dir): if value is not None: return asset_url + assets_dir + "/" + "curve_{0:05d}.png".format(value) @app.callback( Output(component_id='right_pos_modal2_img', component_property='src'), [Input(component_id='right_pos-variable', component_property='value'), Input(component_id='right_output-container-date-picker', component_property='children')]) def update_right_pos_modal2_img(value, assets_dir): if value is not None: return asset_url + assets_dir + "/" + "curve_{0:05d}.png".format(value) # %% [markdown] # #### Define the main body of the webpage # https://dash-bootstrap-components.opensource.faculty.ai/docs/components/layout/ # Layout in Bootstrap is controlled using the grid system. # The Bootstrap grid has **twelve** columns, and **five** responsive tiers (allowing you to specify different behaviours on different screen sizes, see below). # %% ##################### # Main Structure ##################### tab_height = '5vh' body_layout = dbc.Container( style={"marginTop": 100}, #fluid=True, children=[ ##################### # Introduction ##################### dbc.Row( children=[ dbc.Col( style={ "marginBottom": 10, "width": 12, }, children=[ dcc.Markdown( f""" ##### **Ein Gemeinschaftsprojekt der Arbeitsgruppe [Neuroinformatik an der Universitรคt Osnabrรผck](https://www.ikw.uni-osnabrueck.de/en/research_groups/neuroinformatics/people/prof_dr_gordon_pipa.html)** ##### **und des [Jรผlich Supercomputing Centre](https://www.fz-juelich.de/jsc), auf Basis der Daten des [RKI](https://www.rki.de/DE/Content/Infekt/IfSG/Signale/Projekte/Signale_Projekte_node.html;jsessionid=C61DE534E8208B0D69BEAD299FC753F9.internet091)** """ ), ]), ]), dbc.Row( children=[ dbc.Col( width=4, children=[ html.Img( src='https://www.ikw.uni-osnabrueck.de/fileadmin/templates_global/public/img/header_logo.gif', height='48', # width='500', style={ 'display':'block', 'margin-left': 'auto', 'margin-right': 'auto' }, ), ]), dbc.Col( width=4, children=[ # html.Img( # src='https://www.rki.de/SiteGlobals/StyleBundles/Bilder/Farbschema_A/logo_a.jpg?__blob=normal&v=7', # height='48', # width='500', # style={ # 'display':'block', # 'margin-left': 'auto', # 'margin-right': 'auto' # }, # ), ]), dbc.Col( width=4, children=[ html.Img( src='https://www.vi-hps.org/cms/upload/logos/full/jsc-logo.png', height='48', # width='500', style={ 'display':'block', 'margin-left': 'auto', 'margin-right': 'auto' }, ), ]), ]), dbc.Row( children=[ dbc.Col( style={ "marginTop": 30, "width": 6, }, children=[ dcc.Markdown( f""" ----- ##### BSTIM-Covid19 ----- Aktuelle Daten und Vorhersage der Neuinfizierungen mit COVID-19 fรผr Landkreise in Deutschland. Das Model beschreibt die zeitliche Entwicklung der Neuinfizierungen in einen Zeitraum von 4 Wochen. Das Model beschreibt dazu nicht nur die wahrscheinlichste Entwicklung oder die mittlere Entwicklung, sondern schรคtzt die Wahrscheinlichkeit fรผr verschiedene Szenarien ab, die mit der aktuellen Datenlage kompatibel sind. Zudem wir der Interaktionsradius vom Infektionsgeschehen geschรคtzt und als Interaktionskernel dargestellt. Die Arbeit basiert auf einer Adaption des [BSTIM Models](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0225838#pone.0225838.ref009) angepasst an die COVID-19 Situation. Das Model beschreibt die tagesaktuellen Daten basierend auf den [Daten](https://npgeo-corona-npgeo-de.hub.arcgis.com/datasets/dd4580c810204019a7b8eb3e0b329dd6_0/data?orderBy=Meldedatum) des RKI. """ ), disclaimer_modal, ]), dbc.Col( style={ "marginTop": 30, "width": 6, }, children=[ dcc.Markdown( f""" ----- ##### Wie funktioniert die Vorhersage und Analyse ----- """ ), html.Div( style={ 'width': '100%', 'float': 'left', 'margin': '0% 0% 5% 0%' # top, right, bottom, left }, children=[ dash_player.DashPlayer( id='video-player', url='https://youtu.be/8-AfYeosBW8', controls=True, width='100%' ), ]), ]), ]), ##################### # Plots Section ##################### dbc.Row( children=[ dbc.Col( dbc.Alert("Basisauswahl", color="primary") ), dbc.Col( dbc.Alert("Vergleichsauswahl", color="primary") ), ] ), dbc.Row( children=[ ##### left plots dbc.Col( children=[ dbc.Card( style={ 'margin': '0% 0% 0% 0%', # top, right, bottom, left 'padding': '0', }, body=True, children=[ # --- Zeitangabe (left) --- dbc.CardHeader( left_date_controls, ), dbc.CardBody( className="mt-3", children=[ dbc.Tabs( id="left_date-card-tabs", active_tab="tab-0", children=[ dbc.Tab(left_date_tab1, label="Infektionen", style={'padding': '0', 'height': '550px'}), dbc.Tab(left_date_tab2, label="Interaktionskernel", style={'padding': '0', 'height': '550px'}), ]), html.P( id="left_pos-card-separator", className="card-text", ), # --- Ortsangabe (left) --- dbc.Card( style={ 'margin': '0% 0% 0% 0%', # top, right, bottom, left 'padding': '0', }, children=[ dbc.CardHeader( left_pos_controls, ), dbc.CardBody( className="mt-3", children=[ dbc.Tabs( id="left_pos-card-tabs", active_tab="tab-0", children=[ dbc.Tab(left_pos_tab1, label="geglรคttet", style={'padding': '0', 'height': '300px'}), dbc.Tab(left_pos_tab2, label="ungeglรคttet", style={'padding': '0', 'height': '300px'}), ]), html.P( id="left_pos-card-content", className="card-text", ), ]), ]), ]), ]), ]), ##### right plots dbc.Col( children=[ dbc.Card( style={ 'margin': '0% 0% 0% 0%', # top, right, bottom, left 'padding': '0', }, body=True, children=[ # --- Zeitangabe (left) --- dbc.CardHeader( right_date_controls, ), dbc.CardBody( className="mt-3", children=[ dbc.Tabs( id="right_date-card-tabs", active_tab="tab-0", children=[ dbc.Tab(right_date_tab1, label="Infektionen", style={'padding': '0', 'height': '550px'}), dbc.Tab(right_date_tab2, label="Interaktionskernel", style={'padding': '0', 'height': '550px'}), ]), html.P( id="right_pos-card-separator", className="card-text", ), # --- Ortsangabe (left) --- dbc.Card( style={ 'margin': '0% 0% 0% 0%', # top, right, bottom, left 'padding': '0', }, children=[ dbc.CardHeader( right_pos_controls, ), dbc.CardBody( className="mt-3", children=[ dbc.Tabs( id="right_pos-card-tabs", active_tab="tab-0", children=[ dbc.Tab(right_pos_tab1, label="geglรคttet", style={'padding': '0', 'height': '300px'}), dbc.Tab(right_pos_tab2, label="ungeglรคttet", style={'padding': '0', 'height': '300px'}), ]), html.P( id="right_pos-card-content", className="card-text", ), ]), ]), ]), ]), ]), ]), ]) app.layout = html.Div([navbar, body_layout, navbar_footer]) # %% # multipage example: https://yadi.sk/d/JnM7BvKbJp3EdA #@<EMAIL>.callback(Output('page-content', 'children'), # [Input('url', 'pathname')]) #def display_page(pathname): # if pathname == '/apps/app1': # return app1.layout() # else: # return app1.layout() # #<EMAIL>('/static/<path:path>') #def static_file(path): # static_folder = os.path.join(os.getcwd(), 'static') # return send_from_directory(static_folder, path) # %% [markdown] # #### Start the app # %% app.run_server(mode="external") # mode="jupyterlab" -> will open the app in a tab in JupyterLab # mode="inline" -> will open the app below this cell # mode="external" -> will displays a URL that you can click on to open the app in a browser tab # %% [markdown] # -------------------------- # **Attention** # If you get the error "adress in use" this can also be the case because simply your layout has an error so that a dash-app could not been started. Open the app in a new browser-tab with the url # `<base-url>/proxy/<port>` where \<base-url\> derives from the url of your jupyterlab and \<port\> is by default 8050. # For example: `https://jupyter-jsc.fz-juelich.de/user/j.goebbert@fz-juelich.de/jureca_login/proxy/8050` # This will show the full error log. # # -------------------------- # %% [markdown] # Show the Dash Flask server is listening # %% # !echo "COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME" # !lsof -i -P -n | grep LISTEN
002-Methods/004-Dashboards/001-Dash/002-Examples/001-Covid19dynstat/covid19dynstat-dash.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # # Dynamics for Aircraft Mission Performance # # So, what we've demonstrated so far is great for problems where we care about the nitty-gritty of what's happening - we are directly simulating all flight dynamics modes. # # For the quadcopter doing a flip, this is a good thing! The vehicle's dynamics have characteristic times that are on the same timescale as the maneuvers of interest, so we should just go ahead and simulate everything. # # However, this is overly complex for most aircraft design problems. Most missions of interest are on the order of hours long, while most aircraft flight dynamics modes are on the order of seconds. # # We can therefore make some simplifying assumptions: # # * The vehicle's short-period modes are negligible, and all modes that don't affect the vehicle energy management (i.e., performance) can be discarded. (On an airplane with classical flight dynamics modes, this would mean that we would ignore all modes other than the phugoid mode.) # * This implies that the angular rates $p$, $q$, and $r$ are zero. # * All lateral modes can be ignored, as we assume that $\beta, \phi, p, r=0$. # * Because the short-period longitudinal mode is negligible and $q=0$, we assume $\theta$ can be prescribed - so $\theta$ is now a control variable, not a state variable. # * Our remaining state variables are $x_e, z_e, u, w$. We can re-parameterize this as $x_e, z_e, V, \gamma$ (where $V$ is the airspeed and $\gamma$ is the flight path angle) to reduce coupling. # # Here, we can show what that looks like in practice. # - # ## Optimal Gliding Problem # # Let's say you're my Ph.D. advisor, <NAME>, and you're flying over central Massachusetts in your Cessna 152. All of a sudden, you hear your engine start to sputter - oh no. # + pycharm={"name": "#%%\n"} thrust = 0 # + [markdown] pycharm={"name": "#%% md\n"} # Let's determine: # # 1. Can he make it back to the airport? # 2. What's the most energy-optimal trajectory to fly? # # ## Performance Estimation # # To start, let's roughly define a vehicle geometry here, which we'll then use to estimate vehicle performance. (Both Geometry and Aerodynamics are covered in later tutorial sections.) # # ### Geometry Reconstruction # # Geometry is roughly given by this three-view: # # ![threeview](./assets/cessna150.gif) # # Which we can represent as: # + pycharm={"name": "#%%\n"} import aerosandbox as asb import aerosandbox.numpy as np def ft_to_m(feet, inches=0): # Converts feet (and inches) to meters return 0.3048 * (feet + inches / 12) naca2412 = asb.Airfoil("naca2412") naca0012 = asb.Airfoil("naca0012") airplane = asb.Airplane( name="Cessna 152", wings=[ asb.Wing( name="Wing", xsecs=[ asb.WingXSec( xyz_le=[0, 0, 0], chord=ft_to_m(5, 4), airfoil=naca2412 ), asb.WingXSec( xyz_le=[0, ft_to_m(7), ft_to_m(7) * np.sind(1)], chord=ft_to_m(5, 4), airfoil=naca2412 ), asb.WingXSec( xyz_le=[ ft_to_m(4, 3 / 4) - ft_to_m(3, 8 + 1 / 2), ft_to_m(33, 4) / 2, ft_to_m(33, 4) / 2 * np.sind(1) ], chord=ft_to_m(3, 8 + 1 / 2), airfoil=naca0012 ) ], symmetric=True ), asb.Wing( name="Horizontal Stabilizer", xsecs=[ asb.WingXSec( xyz_le=[0, 0, 0], chord=ft_to_m(3, 8), airfoil=naca0012, twist=-2 ), asb.WingXSec( xyz_le=[ft_to_m(1), ft_to_m(10) / 2, 0], chord=ft_to_m(2, 4 + 3 / 8), airfoil=naca0012, twist=-2 ) ], symmetric=True ).translate([ft_to_m(13, 3), 0, ft_to_m(-2)]), asb.Wing( name="Vertical Stabilizer", xsecs=[ asb.WingXSec( xyz_le=[ft_to_m(-5), 0, 0], chord=ft_to_m(8, 8), airfoil=naca0012, ), asb.WingXSec( xyz_le=[ft_to_m(0), 0, ft_to_m(1)], chord=ft_to_m(3, 8), airfoil=naca0012, ), asb.WingXSec( xyz_le=[ft_to_m(0, 8), 0, ft_to_m(5)], chord=ft_to_m(2, 8), airfoil=naca0012, ), ] ).translate([ft_to_m(16, 11) - ft_to_m(3, 8), 0, ft_to_m(-2)]) ], fuselages=[ asb.Fuselage( xsecs=[ asb.FuselageXSec( xyz_c=[0, 0, ft_to_m(-1)], radius=0, ), asb.FuselageXSec( xyz_c=[0, 0, ft_to_m(-1)], radius=ft_to_m(1.5) ), asb.FuselageXSec( xyz_c=[ft_to_m(3), 0, ft_to_m(-0.85)], radius=ft_to_m(1.7) ), asb.FuselageXSec( xyz_c=[ft_to_m(5), 0, ft_to_m(0)], radius=ft_to_m(2.7) ), asb.FuselageXSec( xyz_c=[ft_to_m(10, 4), 0, ft_to_m(0.3)], radius=ft_to_m(2.3) ), asb.FuselageXSec( xyz_c=[ft_to_m(21, 11), 0, ft_to_m(0.8)], radius=ft_to_m(0.3) ), ] ).translate([ft_to_m(-5), 0, ft_to_m(-3)]) ] ) mesh = airplane.draw(show=False) import pyvista as pv p = pv.Plotter() p.add_mesh(mesh, show_edges=True) p.add_axes() p.add_bounding_box() p.renderer.show_grid() # p.camera.enable_parallel_projection() # p.camera_position='xz' # p.camera.zoom(2) if '__file__' not in locals(): p.show(jupyter_backend="static") # - # ### Aerodynamic Performance Estimation # # We can then use our geometry to estimate polars: # + pycharm={"name": "#%%\n"} alpha = np.linspace(-90, 90, 500) aero = asb.AeroBuildup( airplane=airplane, op_point=asb.OperatingPoint( velocity=107 * 0.514444, # knots to m/s, alpha=alpha, ) ).run() from aerosandbox.tools.pretty_plots import plt, show_plot fig, ax = plt.subplots(2, 2) plt.sca(ax[0, 0]) plt.plot(alpha, aero["CL"]) plt.xlabel(r"$\alpha$") plt.ylabel(r"$C_L$") plt.sca(ax[0, 1]) plt.plot(alpha, aero["CD"]) plt.xlabel(r"$\alpha$") plt.ylabel(r"$C_D$") plt.sca(ax[1, 0]) plt.plot(aero["CD"], aero["CL"]) plt.xlabel(r"$\alpha$") plt.ylabel(r"$C_D$") plt.sca(ax[1, 1]) plt.plot(alpha, aero["CL"] / aero["CD"]) plt.xlabel(r"$\alpha$") plt.ylabel(r"$C_L/C_D$") show_plot() # + pycharm={"name": "#%%\n"} from aerosandbox.dynamics.dynamics import FreeBodyDynamics from aerosandbox.tools.pretty_plots import plt, show_plot, mpl, adjust_lightness class AirplaneDynamics(FreeBodyDynamics): pass
tutorial/03 - Trajectory Optimization and Optimal Control/03 - The AeroSandbox Dynamics Stack/04 - 2D Aircraft Dynamics for Mission Performance Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.10.2 64-bit # language: python # name: python3 # --- # Imports import cv2 as cv import matplotlib.pyplot as plt import numpy as np # Methods def show_cv2_image( bgr: np.ndarray ): plt.imshow( cv.cvtColor( bgr, cv.COLOR_BGR2RGB ) ) # + # Open images sections_root = "./static/image/lecture_01" sections_filenames = [ "section1.webp", "section2.webp", "section3.webp", "section4.webp", "section5.webp", "section6_1.webp", "section6_2.webp", "section6_3.webp" ] sections = [ cv.imread( f"{sections_root}/{filename}" ) for filename in sections_filenames ] print( f"Successfully loaded {len(sections)} images." ) # + # Section 6 # Recombine channels section6 = sections[ 5 ] + sections[ 6 ] + sections[ 7 ] plt.imshow( section6 ) # + # Section 5 # HSV back to BGR section5 = cv.cvtColor( sections[ 4 ], cv.COLOR_HSV2BGR ) plt.imshow( section5 ) # + # Section 4 # RGB to HSV section4hsv = cv.cvtColor( sections[ 3 ], cv.COLOR_RGB2HSV ).astype( "int64", copy=False ) # Undo shifts section4hsv[ :, : ] -= [ 40, 50, -50 ] section4hsv[ :, :, 0 ] %= 180 # Because OpenCV maps 0-360 to 0-179 instead of using the full 8-bit range # HSV back to BGR section4flipped = cv.cvtColor( section4hsv.astype( "uint8", copy=False ), cv.COLOR_HSV2BGR ) # Flip right side up section4 = cv.flip( section4flipped, -1 ) plt.imshow( section4 ) # + # Section 3 # GBR to RGB section3 = sections[ 2 ][ :, :, [ 2, 0, 1 ] ] plt.imshow( section3 ) # + # Section 2 # Stretch back section2 = cv.resize( sections[ 1 ], dsize=( 1100, 600 ), interpolation=cv.INTER_AREA ) plt.imshow( section2 ) # + # Section 1 # Scale back section1 = cv.resize( sections[ 0 ], None, fx=1, fy=0.2 ) plt.imshow( section1 ) # + # Image # Reassemble image = np.zeros( ( 1360, 2500, 3 ), dtype="uint8" ) image[ 0:360, 0:1400 ] = section1 image[ 0:600, 1400:2500 ] = section2 image[ 360:1360, 0:500 ] = section3 image[ 360:600, 500:1400 ] = section4 image[ 600:1360, 500:2000 ] = section5 image[ 600:1360, 2000:2500 ] = section6 plt.imshow( image ) # - # Save output cv.imwrite( "lecture_01.webp", cv.cvtColor( image, cv.COLOR_RGB2BGR ), ( cv.IMWRITE_WEBP_QUALITY, 50 ) )
exercises/lecture_01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Benchmarking # ## 0. Setup the logging # # This step sets up logging in our environment to increase our visibility over # the steps that Draco performs. # + import logging; logging.basicConfig(level=logging.INFO) logging.getLogger().setLevel(level=logging.ERROR) logging.getLogger('draco').setLevel(level=logging.INFO) import warnings warnings.simplefilter("ignore") # - # # ## Running the Benchmarking # # The user API for the Draco Benchmarking is the `draco.benchmark.evaluate_templates` function. # # The `evaluate_templates` function accepts the following arguments: # * `templates (list)`: List of templates to try. # * `window_size_rule (list)`: List of tupples (int, str or Timedelta object). # * `metric (function or str)`: Metric to use. If an ``str`` is give it must be one of the metrics defined in the `draco.metrics.METRICS` dictionary. # * `tuning_iterations (int)`: Number of iterations to be used. # * `init_params (dict)`: Initialization parameters for the pipelines. # * `target_times (DataFrame)`: Contains the specefication problem that we are solving, which has three columns: # * `turbine_id`: Unique identifier of the turbine which this label corresponds to. # * `cutoff_time`: Time associated with this target. # * `target`: The value that we want to predict. This can either be a numerical value # or a categorical label. This column can also be skipped when preparing # data that will be used only to make predictions and not to fit any # pipeline. # * `readings (DataFrame)`: Contains the signal data from different sensors, with the following columns: # * `turbine_id`: Unique identifier of the turbine which this reading comes from. # * `signal_id`: Unique identifier of the signal which this reading comes from. # * `timestamp (datetime)`: Time where the reading took place, as a datetime. # * `value (float)`: Numeric value of this reading. # * `preprocessing (int, list or dict)`: Number of preprocessing steps to be used. # * `cost (bool)`: Wheter the metric is a cost function (the lower the better) or not. # * `test_size (float)`: Percentage of the data set to be used for the test. # * `cv_splits (int)`: Amount of splits to create. # * `random_state (int)`: Random number of train_test split. # * `output_path (str)`: Path where to save the benchmark report. # * `cache_path (str)`: If given, cache the generated cross validation splits in this folder. Defatuls to ``None``. templates = [ 'lstm_prob_with_unstack', 'dfs_xgb_prob_with_double_normalization' ] window_size_rule = [('1d', '1h'), ('2d', '2h')] init_params = { 'lstm_prob_with_unstack': { 'keras.Sequential.LSTMTimeSeriesClassifier#1': { 'epochs': 1, } } } # + from draco.benchmark import evaluate_templates results = evaluate_templates( templates=templates, window_size_rule=window_size_rule, init_params=init_params, tuning_iterations=3, cv_splits=3, ) # - results
tutorials/03_Benchmarking.ipynb
from pyspark.sql import SparkSession spark=SparkSession.builder.appName('mach2.0').getOrCreate() from pyspark.ml.regression import LinearRegression df=sqlContext.sql("SELECT * FROM ship") df.head(1) from pyspark.ml.linalg import Vectors from pyspark.ml.feature import VectorAssembler df.columns df.printSchema() from pyspark.ml.feature import StringIndexer indexer=StringIndexer(inputCol="Cruise_line",outputCol="index").fit(df) da=indexer.transform(df) da.show() assembler=VectorAssembler(inputCols=['Age','Tonnage','passengers','length','cabins','passenger_density','index'],outputCol='features') output=assembler.transform(da) output.printSchema() output.select('features').show() final=output.select('crew','features') train,test=final.randomSplit([0.68,0.32]) lr=LinearRegression(labelCol='crew') model=lr.fit(train) res=model.evaluate(test) res.residuals.show() res.r2 res.rootMeanSquaredError res.meanAbsoluteError res.meanSquaredError from pyspark.sql.functions import corr df.select(corr('crew','passengers')).show()
Machine project.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- import requests from urllib.request import urlopen from urllib.parse import urljoin page = requests.get('https://www.indeed.com/jobs?q=computer+vision+engineer&start=') from time import sleep import time from random import randint from pony_main import * from pony.orm import * from urllib.request import urlopen import json from IPython.display import clear_output from bs4 import BeautifulSoup page.status_code # + starts = list(range(10, 1000, 10)) requests = 0 start = time.time() baseurl = 'https://www.indeed.com/' nlp_jobs = [] for start in starts: my_urls = ('https://www.indeed.com/jobs?q="computer+vision"&start=' + str(start),) my_url = my_urls[0] for my_url in my_urls: uClient = urlopen(my_url) html_input = uClient.read() uClient.close() soup = BeautifulSoup(html_input, "html.parser") cards = soup.findAll('div', {'class':'jobsearch-SerpJobCard'}) it = iter(cards) next(it) # ads next(it) # ads #next(it) for curr in it: try: link = curr.find('h2').find('a', href=True)['href'] except: pass with urlopen(baseurl + link) as uClient: list_url = uClient.read() listing = BeautifulSoup(list_url, 'html.parser') title = listing.find('h3', {'class': 'icl-u-xs-mb--xs icl-u-xs-mt--none jobsearch-JobInfoHeader-title'}) if not title: print('missing content @ ' + baseurl + link) body = listing.find('div', {'class': 'jobsearch-JobComponent-description icl-u-xs-mt--md'} ) if not body: print('missing content @ ' + baseurl + link) requests += 1 sleep(randint(5,7)) end = time.time() #print("Done in", end, "seconds") print('Request: {}; Frequency: {} requests/s'.format(requests, requests/end)) clear_output(wait = True) with db_session: Job(title=str(title), job_description=str(body), job_class='Computer Vision') # - print(nlp_jobs[:200])
Final Capstone/Beautiful Soup for CV.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plot from sklearn.neural_network import MLPClassifier # + #Only run this code snippet if your dataset is located in a different directory than the directory you are running your #Jupyter Notebook. #import os #os.chdir("`") # - bank=pd.read_csv("bank.csv") bank.head() bank.info() bank = bank.drop(['ID','ZIP_Code'], axis=1) bank.head() from sklearn.model_selection import train_test_split y = bank['Personal_Loan'] x = bank.drop(['Personal_Loan'], axis=1) x_train, x_test, y_train, y_test = train_test_split(x,y, test_size= 0.30, random_state=27) from sklearn.preprocessing import StandardScaler sc = StandardScaler() x_train = sc.fit_transform(x_train) x_train x_test = sc.transform(x_test) x_test clf = MLPClassifier(hidden_layer_sizes=100, max_iter=5000, solver='sgd', verbose=True, random_state=21,tol=0.01) clf.fit(x_train, y_train) y_pred = clf.predict(x_test) from sklearn.metrics import confusion_matrix,classification_report cm = confusion_matrix(y_test, y_pred) cm pred2 = clf.predict(x_train) confusion_matrix(y_train,pred2) print(classification_report(y_test, y_pred)) print(classification_report(y_train, pred2)) import matplotlib.pyplot as plt # + # AUC and ROC for the training data # predict probabilities probs = clf.predict_proba(x_train) # keep probabilities for the positive outcome only probs = probs[:, 1] # calculate AUC from sklearn.metrics import roc_auc_score auc = roc_auc_score(y_train, probs) print('AUC: %.3f' % auc) # calculate roc curve from sklearn.metrics import roc_curve fpr, tpr, thresholds = roc_curve(y_train, probs) plt.plot([0, 1], [0, 1], linestyle='--') # plot the roc curve for the model plt.plot(fpr, tpr, marker='.') # show the plot plt.show() # + # AUC and ROC for the test data # predict probabilities probs = clf.predict_proba(x_test) # keep probabilities for the positive outcome only probs = probs[:, 1] # calculate AUC from sklearn.metrics import roc_auc_score auc = roc_auc_score(y_test, probs) print('AUC: %.3f' % auc) # calculate roc curve from sklearn.metrics import roc_curve fpr, tpr, thresholds = roc_curve(y_test, probs) plt.plot([0, 1], [0, 1], linestyle='--') # plot the roc curve for the model plt.plot(fpr, tpr, marker='.') # show the plot plt.show() # -
M4 Data Mining/W4 Artificial Nural Network/Neural Networks - Video Code.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Alfredo-L/daa_2021_1/blob/master/11Enero.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="3vnafcsJ7gDT" class NodoArbol: def __init__(self, value, left = None, right = None): self.data = value self.left = left self.right = right # + id="cC4CgbJ-8v8M" arbol = NodoArbol("R", NodoArbol("C"), NodoArbol("H")) # + id="eAAdd8la90KO" nodo1 = NodoArbol("C") nodo2 = NodoArbol("H") arbol_v2 = NodoArbol("R", nodo1, nodo2) # + colab={"base_uri": "https://localhost:8080/"} id="clk8YVIG-NYC" outputId="0180e554-3926-4284-a86a-00e2a82efc82" print(arbol.right.data) print(arbol_v2.right.data) # + colab={"base_uri": "https://localhost:8080/"} id="hdDOVR-4-yqb" outputId="55192fc4-f8f4-4ea4-86bf-0f83ff999b29" arbol2 = NodoArbol(4, NodoArbol(3, NodoArbol(2, NodoArbol(2))), NodoArbol(5)) print(arbol2.left.left.left.data) # + colab={"base_uri": "https://localhost:8080/"} id="aT3PNZDnEAXb" outputId="549fd2e1-58a5-4546-d0f7-820a503db5d9" aux = arbol2 while aux.left != None: aux = aux.left print(aux.data) # + id="vZlD6JhwEcVh" arbol3 = NodoArbol("Santi", None, NodoArbol("Jesus", NodoArbol("Pedro", None, NodoArbol("Diana")), None)) # + colab={"base_uri": "https://localhost:8080/"} id="2fCQeabiFr2o" outputId="e0795230-b58d-41da-82a7-7886aabe9968" print(arbol3.right.left.right.data)
11Enero.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Build a Customer Churn Model for Music Streaming App Users: Overview and Data Preparation # # In this demo, you are going to learn how to use various SageMaker functionalities to build, train, and deploy the model from end to end, including data pre-processing steps like ingestion, cleaning and processing, feature engineering, training and hyperparameter tuning, model explainability, and eventually deploy the model. There are two parts of the demo: in part 1: Prepare Data, you will process the data with the help of Data Wrangler, then create features from the cleaned data. By the end of part 1, you will have a complete feature data set that contains all attributes built for each user, and it is ready for modeling. Then in part 2: Modeling and Reference, you will use the data set built from part 1 to find an optimal model for the use case, then test the model predictability with the test data. To start with Part 2, you can either read in data from the output of your Part 1 results, or use the provided 'data/full_feature_data.csv' as the input for the next steps. # # # For how to set up the SageMaker Studio Notebook environment, please check the [onboarding video]( https://www.youtube.com/watch?v=wiDHCWVrjCU&feature=youtu.be). And for a list of services covered in the use case demo, please check the documentation linked in each section. # # # ## Content # * [Overview](#Overview) # * [Data Selection](#2) # * [Ingest Data](#4) # * [Data Cleaning and Data Exploration](#5) # * [Pre-processing with SageMaker Data Wrangler](#7) # * [Feature Engineering with SageMaker Processing](#6) # * [Data Splitting](#8) # * [Model Selection](#9) # * [Training with SageMaker Estimator and Experiment](#10) # * [Hyperparameter Tuning with SageMaker Hyperparameter Tuning Job](#11) # * [Deploy the model with SageMaker Batch-transform](#12) # * [Model Explainability with SageMaker Clarify](#15) # * [Optional: Automate your training and model selection with SageMaker Autopilot (Console)](#13) # ## Overview # # ### What is Customer Churn and why is it important for businesses? # Customer churn, or customer retention/attrition, means a customer has the tendency to leave and stop paying for a business. It is one of the primary metrics companies want to track to get a sense of their customer satisfaction, especially for a subscription-based business model. The company can track churn rate (defined as the percentage of customers churned during a period) as a health indicator for the business, but we would love to identify the at-risk customers before they churn and offer appropriate treatment to keep them with the business, and this is where machine learning comes into play. # # ### Use Cases for Customer Churn # # Any subscription-based business would track customer churn as one of the most critical Key Performance Indicators (KPIs). Such companies and industries include Telecom companies (cable, cell phone, internet, etc.), digital subscriptions of media (news, forums, blogposts platforms, etc.), music and video streaming services, and other Software as a Service (SaaS) providers (e-commerce, CRM, Mar-Tech, cloud computing, video conference provider, and visualization and data science tools, etc.) # # ### Define Business problem # # To start with, here are some common business problems to consider depending on your specific use cases and your focus: # # * Will this customer churn (cancel the plan, cancel the subscription)? # * Will this customer downgrade a pricing plan? # * For a subscription business model, will a customer renew his/her subscription? # # ### Machine learning problem formulation # # #### Classification: will this customer churn? # # To goal of classification is to identify the at-risk customers and sometimes their unusual behavior, such as: will this customer churn or downgrade their plan? Is there any unusual behavior for a customer? The latter question can be formulated as an anomaly detection problem. # # #### Time Series: will this customer churn in the next X months? When will this customer churn? # # You can further explore your users by formulating the problem as a time series one and detect when will the customer churn. # # ### Data Requirements # # #### Data collection Sources # # Some most common data sources used to construct a data set for churn analysis are: # # * Customer Relationship Management platform (CRM), # * engagement and usage data (analytics services), # * passive feedback (ratings based on your request), and active feedback (customer support request, feedback on social media and review platforms). # # #### Construct a Data Set for Churn Analysis # # Most raw data collected from the sources mentioned above are huge and often needs a lot of cleaning and pre-processing. For example, usage data is usually event-based log data and can be more than a few gigabytes every day; you can aggregate the data to user-level daily for further analysis. Feedback and review data are mostly text data, so you would need to clean and pre-process the natural language data to be normalized, machine-readable data. If you are joining multiple data sources (especially from different platforms) together, you would want to make sure all data points are consistent, and the user identity can be matched across different platforms. # # #### Challenges with Customer Churn # # * Business related # * Importance of domain knowledge: this is critical when you start building features for the machine learning model. It is important to understand the business enough to decide which features would trigger retention. # * Data issues # * fewer churn data available (imbalanced classes): data for churn analysis is often very imbalanced as most of the customers of a business are happy customers (usually). # * User identity mapping problem: if you are joining data from different platforms (CRM, email, feedback, mobile app, and website usage data), you would want to make sure user A is recognized as the same user across multiple platforms. There are third-party solutions that help you tackle this problem. # * Not collecting the right data for the use case or Lacking enough data # <a id='2'></a> # # ## Data Selection # # You will use generated music streaming data that is simulated to imitate music streaming user behaviors. The data simulated contains 1100 users and their user behavior for one year (2019/10/28 - 2020/10/28). Data is simulated using the [EventSim](https://github.com/Interana/eventsim) and does not contain any real user data. # # * Observation window: you will use 1 year of data to generate predictions. # * Explanation of fields: # * `ts`: event UNIX timestamp # * `userId`: a randomly assigned unique user id # * `sessionId`: a randomly assigned session id unique to each user # * `page`: event taken by the user, e.g. "next song", "upgrade", "cancel" # * `auth`: whether the user is a logged-in user # * `method`: request method, GET or PUT # * `status`: request status # * `level`: if the user is a free or paid user # * `itemInSession`: event happened in the session # * `location`: location of the user's IP address # * `userAgent`: agent of the user's device # * `lastName`: user's last name # * `firstName`: user's first name # * `registration`: user's time of registration # * `gender`: gender of the user # * `artist`: artist of the song the user is playing at the event # * `song`: song title the user is playing at the event # * `length`: length of the session # # # * the data will be downloaded from Github and contained in an [Amazon Simple Storage Service](https://aws.amazon.com/s3/) (Amazon S3) bucket. # For this specific use case, you will focus on a solution to predict whether a customer will cancel the subscription. Some possible expansion of the work includes: # # * predict plan downgrading # * when a user will churn # * add song attributes (genre, playlist, charts) and user attributes (demographics) to the data # * add user feedback and customer service requests to the data # # ## Architecture Diagram # # The services covered in the use case and an architecture diagram is shown below. # # <div> # <img src="image/use_case_diagram_v2.png" width="800"/> # # </div> # ## PART 1: Prepare Data # ### Set Up Notebook # !pip install -q 'sagemaker==2.19.0' 'botocore == 1.19.4' 's3fs==0.4.2' 'sagemaker-experiments' 'boto3 == 1.16.4' # s3fs is needed for pandas to read files from S3 import sagemaker import json import pandas as pd import glob import s3fs import boto3 # ### Parameters # The following lists configurable parameters that are used throughout the whole notebook. # + sagemaker_session = sagemaker.Session() bucket = sagemaker_session.default_bucket() # replace with your own bucket name if you have one s3 = sagemaker_session.boto_session.resource("s3") region = boto3.Session().region_name role = sagemaker.get_execution_role() smclient = boto3.Session().client("sagemaker") prefix = "music-streaming" # - # %store -r # %store # %store bucket # %store prefix # <a id='4'></a> # # ### Ingest Data # # We ingest the simulated data from the public SageMaker S3 training database. ##### Alternative: copy data from a public S3 bucket to your own bucket ##### data file should include full_data.csv and sample.json #### cell 5 - 7 is not needed; the processing job before data wrangler screenshots is not needed # !aws s3 cp s3://sagemaker-sample-files/datasets/tabular/customer-churn/customer-churn-data.zip ./data/raw # !unzip ./data/raw/customer-churn-data.zip -d ./data # unzip the partitioned data files into the same folder # !unzip -o data/simu-1.zip -d data/raw # !unzip -o data/simu-2.zip -d data/raw # !unzip -o data/simu-3.zip -d data/raw # !unzip -o data/simu-4.zip -d data/raw # !rm ./data/raw/*.zip # + # #!unzip -o data/sample.zip -d data/raw # - # !aws s3 cp ./data/raw s3://$bucket/$prefix/data/json/ --recursive # <a id='5'></a> # # ### Data Cleaning # # Due to the size of the data (~2GB), you will start exploring our data starting with a smaller sample, decide which pre-processing steps are necessary, and apply them to the whole dataset. # + import os # if your SageMaker Studio notebook's memory is getting full, you can run the following command to remove the raw data files from the instance and free up some memory. # You will read data from your S3 bucket onwards and will not need the raw data stored in the instance. os.remove("data/simu-1.zip") os.remove("data/simu-2.zip") os.remove("data/simu-3.zip") os.remove("data/simu-4.zip") os.remove("data/sample.zip") # - # !aws s3 cp s3://sage-fraud-detect/raw-data/sample.json ./data/raw sample_file_name = "./data/raw/sample.json" # s3_sample_file_name = "data/json/sample.json" # sample_path = "s3://{}/{}/{}".format(bucket, prefix, s3_sample_file_name) sample = pd.read_json(sample_file_name, lines=True) sample.head(2) # #### Remove irrelevant columns # # From the first look of data, you can notice that columns `lastName`, `firstName`, `method` and `status` are not relevant features. These will be dropped from the data. columns_to_remove = ["method", "status", "lastName", "firstName"] sample = sample.drop(columns=columns_to_remove) # #### Check for null values # # You are going to remove all events without an `userId` assigned since you are predicting which recognized user will churn from our service. In this case, all the rows(events) have a `userId` and `sessionId` assigned, but you will still run this step for the full dataset. For other columns, there are ~3% of data that are missing some demographic information of the users, and ~20% missing the song attributes, which is because the events contain not only playing a song, but also other actions including login and log out, downgrade, cancellation, etc. There are ~3% of users that do not have a registration time, so you will remove these anonymous users from the record. print("percentage of the value missing in each column is: ") sample.isnull().sum() / len(sample) sample = sample[~sample["userId"].isnull()] sample = sample[~sample["registration"].isnull()] # ### Data Exploration # # Let's take a look at our categorical columns first: `page`, `auth`, `level`, `location`, `userAgent`, `gender`, `artist`, and `song`, and start with looking at unique values for `page`, `auth`, `level`, and `gender` since the other three have many unique values and you will take a different approach. cat_columns = ["page", "auth", "level", "gender"] cat_columns_long = ["location", "userAgent", "artist", "song", "userId"] for col in cat_columns: print("The unique values in column {} are: {}".format(col, sample[col].unique())) for col in cat_columns_long: print("There are {} unique values in column {}".format(sample[col].nunique(), col)) # #### Key observations from the above information # # * There are 101 unique users with 72 unique locations, this information may not be useful as a categorical feature. You can parse this field and only keep State information, but even that will give us 50 unique values in this category, so you can either remove this column or bucket it to a higher level (NY --> Northeast). # * Artist and song details might not be helpful as categorical features as there are too many categories; you can quantify these to a user level, i.e. how many artists this user has listened to in total, how many songs this user has played in the last week, last month, in 180 days, in 365 days. You can also bring in external data to get song genres and other artist attributes to enrich this feature. # * In the column `page`, 'Thumbs Down', 'Thumbs Up', 'Add to Playlist', 'Roll Advert','Help', 'Add Friend', 'Downgrade', 'Upgrade', and 'Error' can all be great features to churn analysis. You will aggregate them to user-level later. There is a "cancellation confirmation" value that can be used for the churn indicator. # # * Let's take a look at the column `userAgent`: # # UserAgent contains little useful information, but if you care about the browser type and mac/windows difference, you can parse the text and extract the information. Sometimes businesses would love to analyze user behavior based on their App version and device type (iOS v.s. Android), so these could be useful information. In this use case, for modeling purpose, we will remove this column. but you can keep it as a filter for data visualization. columns_to_remove = ["location", "userAgent"] sample = sample.drop(columns=columns_to_remove) # Let's take a closer look at the timestamp columns `ts` and `registration`. We can convert the event timestamp `ts` to year, month, week, day, day of the week, and hour of the day. The registration time should be the same for the same user, so we can aggregate this value to user-level and create a time delta column to calculate the time between registration and the newest event. sample["date"] = pd.to_datetime(sample["ts"], unit="ms") sample["ts_year"] = sample["date"].dt.year sample["ts_month"] = sample["date"].dt.month sample["ts_week"] = sample["date"].dt.week sample["ts_day"] = sample["date"].dt.day sample["ts_dow"] = sample["date"].dt.weekday sample["ts_hour"] = sample["date"].dt.hour sample["ts_date_day"] = sample["date"].dt.date sample["ts_is_weekday"] = [1 if x in [0, 1, 2, 3, 4] else 0 for x in sample["ts_dow"]] sample["registration_ts"] = pd.to_datetime(sample["registration"], unit="ms").dt.date # #### Define Churn # # In this use case, you will use `page == "Cancellation Confirmation"` as the indicator of a user churn. You can also use `page == 'downgrade` if you are interested in users downgrading their payment plan. There are ~13% users churned, so you will need to up-sample or down-sample the full dataset to deal with the imbalanced class, or carefully choose your algorithms. print( "There are {:.2f}% of users churned in this dataset".format( ( (sample[sample["page"] == "Cancellation Confirmation"]["userId"].nunique()) / sample["userId"].nunique() ) * 100 ) ) # You can label a user by adding a churn label at a event level then aggregate this value to user level. sample["churned_event"] = [1 if x == "Cancellation Confirmation" else 0 for x in sample["page"]] sample["user_churned"] = sample.groupby("userId")["churned_event"].transform("max") # #### Imbalanced Class # # Imbalanced class (much more positive cases than negative cases) is very common in churn analysis. It can be misleading for some machine learning model as the accuracy will be biased towards the majority class. Some useful tactics to deal with imbalanced class are [SMOTE](https://imbalanced-learn.readthedocs.io/en/stable/generated/imblearn.over_sampling.SMOTE.html), use algorithms that are less sensitive to imbalanced class like a tree-based algorithm or use a cost-sensitive algorithm that penalizes wrongly classified minority class. # To Summarize every pre-processing steps you have covered: # * null removals # * drop irrelevant columns # * convert event timestamps to features used for analysis and modeling: year, month, week, day, day of week, hour, date, if the day is weekday or weekend, and convert registration timestamp to UTC. # * create labels (whether the user churned eventually), which is calculated by if one churn event happened in the user's history, you can label the user as a churned user (1). # #### Exploring Data # # Based on the available data, look at every column, and decide if you can create a feature from it. For all the columns, here are some directions to explore: # # * `ts`: distribution of activity time: time of the day, day of the week # * `sessionId`: average number of sessions per user # * `page`: number of thumbs up/thumbs down, added to the playlist, ads, add friend, if the user has downgrade or upgrade the plan, how many errors the user has encountered. # * `level`: if the user is a free or paid user # * `registration`: days the user being active, time the user joined the service # * `gender`: gender of the user # * `artist`: average number of artists the user listened to # * `song`: average number of songs listened per user # * `length`: average time spent per day per user # # **Activity Time** # # 1. Weekday v.s. weekend trends for churned users and active users. It seems like churned users are more active on weekdays than weekends whereas active users do not show a strong difference between weekday v.s. weekends. You can create some features from here: for each user, average events per day -- weekends, average events per day -- weekdays. You can also create features - average events per day of the week, but that will be converted to 7 features after one-hot-encoding, which may be less informative than the previous method. # 2. In terms of hours active during a day, our simulated data did not show a significant difference between day and night for both sets of users. You can have it on your checklist for your analysis, and similarly for the day of the month, the month of the year when you have more than a year of data. # + import seaborn as sns import matplotlib.pyplot as plt events_per_day_per_user = ( sample.groupby(["userId", "ts_date_day", "ts_is_weekday", "user_churned"]) .agg({"page": "count"}) .reset_index() ) events_dist = ( events_per_day_per_user.groupby(["userId", "ts_is_weekday", "user_churned"]) .agg({"page": "mean"}) .reset_index() ) def trend_plot( df, plot_type, x, y, hue=None, title=None, x_axis=None, y_axis=None, xticks=None, yticks=None ): if plot_type == "box": fig = sns.boxplot(x="page", y=y, data=df, hue=hue, orient="h") elif plot_type == "bar": fig = sns.barplot(x=x, y=y, data=df, hue=hue) sns.set(rc={"figure.figsize": (12, 3)}) sns.set_palette("Set2") sns.set_style("darkgrid") plt.title(title) plt.xlabel(x_axis) plt.ylabel(y_axis) plt.yticks([0, 1], yticks) return plt.show(fig) trend_plot( events_dist, "box", "page", "user_churned", "ts_is_weekday", "Weekday V.S. Weekends - Average events per day per user", "average events per user per day", yticks=["active users", "churned users"], ) # - events_per_hour_per_user = ( sample.groupby(["userId", "ts_date_day", "ts_hour", "user_churned"]) .agg({"page": "count"}) .reset_index() ) events_dist = ( events_per_hour_per_user.groupby(["userId", "ts_hour", "user_churned"]) .agg({"page": "mean"}) .reset_index() .groupby(["ts_hour", "user_churned"]) .agg({"page": "mean"}) .reset_index() ) trend_plot( events_dist, "bar", "ts_hour", "page", "user_churned", "Hourly activity - Average events per hour of day per user", "hour of the day", "average events per user per hour", ) # **Listening Behavior** # # You can look at some basic stats for a user's listening habits. Churned users generally listen to a wider variety of songs and artists and spend more time on the App/be with the App longer. # * Average total: number of sessions, App usage length, number of songs listened, number of artists listened per user, number of ad days active # * Average daily: number of sessions, App usage length, number of songs listened, number of artists listened per user # # + stats_per_user = ( sample.groupby(["userId", "user_churned"]) .agg( { "sessionId": "count", "song": "nunique", "artist": "nunique", "length": "sum", "ts_date_day": "count", } ) .reset_index() ) avg_stats_group = ( stats_per_user.groupby(["user_churned"]) .agg( { "sessionId": "mean", "song": "mean", "artist": "mean", "length": "mean", "ts_date_day": "mean", } ) .reset_index() ) print( "Average total: number of sessions, App usage length, number of songs listened, number of artists listened per user, days active: " ) avg_stats_group # - stats_per_user = ( sample.groupby(["userId", "ts_date_day", "user_churned"]) .agg({"sessionId": "count", "song": "nunique", "artist": "nunique", "length": "sum"}) .reset_index() ) avg_stats_group = ( stats_per_user.groupby(["user_churned"]) .agg({"sessionId": "mean", "song": "mean", "artist": "mean", "length": "mean"}) .reset_index() ) print( "Average daily: number of sessions, App usage length, number of songs listened, number of artists listened per user: " ) avg_stats_group # **App Usage Behavior** # # You can further explore how the users are using the App besides just listening: number of thumbs up/thumbs down, added to playlist, ads, add friend, if the user has downgrade or upgrade the plan, how many errors the user has encountered. Churned users are slightly more active than other users, and also encounter more errors, listened to more ads, and more downgrade and upgrade. These can be numerical features (number of total events per type per user), or more advanced time series numerical features (errors in last 7 days, errors in last month, etc.). events_list = [ "NextSong", "Thumbs Down", "Thumbs Up", "Add to Playlist", "Roll Advert", "Add Friend", "Downgrade", "Upgrade", "Error", ] usage_column_name = [] for event in events_list: event_name = "_".join(event.split()).lower() usage_column_name.append(event_name) sample[event_name] = [1 if x == event else 0 for x in sample["page"]] app_use_per_user = sample.groupby(["userId", "user_churned"])[usage_column_name].sum().reset_index() app_use_group = app_use_per_user.groupby(["user_churned"])[usage_column_name].mean().reset_index() app_use_group # <a id='7'></a> # # ## Pre-processing with SageMaker Data Wrangler # # Now that you have a good understanding of your data and decided which steps are needed to pre-process your data, you can utilize the new Amazon SageMaker GUI tool **Data Wrangler**, without writing all the code for the SageMaker Processing Job. # # * Here we used a Processing Job to convert the raw streaming data files downloaded from the github repo (`simu-*.zip` files) to a full, CSV formatted file for Data Wrangler Ingestion purpose. # you are importing the raw streaming data files downloaded from the github repo (`simu-*.zip` files). The raw JSON files were converted to CSV format and combined to one file for Data Wrangler Ingestion purpose. # + # %%writefile preprocessing_predw.py import argparse import os import warnings import glob import time import pandas as pd import json import argparse from sklearn.exceptions import DataConversionWarning warnings.filterwarnings(action="ignore", category=DataConversionWarning) start_time = time.time() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--processing-output-filename") args, _ = parser.parse_known_args() print("Received arguments {}".format(args)) input_jsons = glob.glob("/opt/ml/processing/input/data/**/*.json", recursive=True) df_all = pd.DataFrame() for name in input_jsons: print("\nStarting file: {}".format(name)) df = pd.read_json(name, lines=True) df_all = df_all.append(df) output_filename = args.processing_output_filename final_features_output_path = os.path.join("/opt/ml/processing/output", output_filename) print("Saving processed data to {}".format(final_features_output_path)) df_all.to_csv(final_features_output_path, header=True, index=False) # + from sagemaker.sklearn.processing import SKLearnProcessor sklearn_processor = SKLearnProcessor( framework_version="0.23-1", role=role, instance_type="ml.m5.xlarge", instance_count=1 ) # - s3_client = boto3.client("s3") list_response = s3_client.list_objects_v2(Bucket=bucket, Prefix=f"{prefix}/data/json") s3_input_uris = [f"s3://{bucket}/{i['Key']}" for i in list_response["Contents"]] s3_input_uris # + from sagemaker.processing import ProcessingInput, ProcessingOutput processing_inputs = [] for i in s3_input_uris: name = i.split("/")[-1].split(".")[0] processing_input = ProcessingInput( source=i, input_name=name, destination=f"/opt/ml/processing/input/data/{name}" ) processing_inputs.append(processing_input) # + # %%time processing_output_path = f"s3://{bucket}/{prefix}/data/processing" final_features_filename = "full_data.csv" sklearn_processor.run( code="preprocessing_predw.py", inputs=processing_inputs, outputs=[ ProcessingOutput( output_name="processed_data", source="/opt/ml/processing/output", destination=processing_output_path, ) ], arguments=["--processing-output-filename", final_features_filename], ) preprocessing_job_description = sklearn_processor.jobs[-1].describe() # - # Now you can initiate a Data Wrangler flow. An example flow (`dw_example.flow`) is provided in the github repo. # # From the SageMaker Studio launcher page, choose **New data flow**, then choose **import from S3** and select processing_output_filename. # # <div> # <img src="image/mo1.PNG" width="600"/> # </div> # # You can import any .csv format file with SageMaker Data Wrangler, preview your data, and decide what pre-processing steps are needed. # <div> # <img src="image/mo2.PNG" width="600"/> # </div> # You can choose your pre-processing steps, including drop columns and rename columns from the pre-built solutions, also customize processing and feature engineering code in the custom Pandas code block. # <div> # <img src="image/mo3.PNG" width="600"/> # # </div> # After everything run through, it will create a Processing job notebook for you. You can run through the notebook to kick off the Processing Job and check the status in the console. # # <div> # <img src="image/mo4.PNG" width="600"/> # # </div> # #### Find the data path of the SageMaker Data Wrangler Job # # You can get the results from your Data Wrangler Job, check the results, and use it as input for your feature engineering processing job. processing_output_filename = f"{processing_output_path}/{final_features_filename}" # %store processing_output_filename # %store -r # + flow_file = "dw_example.flow" # read flow file and change the s3 location to our `processing_output_filename` with open(flow_file, "r") as f: flow = f.read() flow = json.loads(flow) flow["nodes"][0]["parameters"]["dataset_definition"]["s3ExecutionContext"][ "s3Uri" ] = processing_output_filename with open("dw_example.flow", "w") as f: json.dump(flow, f) # - flow # ### Citation # The data used in this notebook is simulated using the [EventSim](https://github.com/Interana/eventsim).
use-cases/customer_churn/0_cust_churn_overview_dw.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 7.11 Flat heating Coils - Water # Water at 80 degrees Celsius flows through a flat heating coil at a rate of 60 L/min. There are 7 180 degree bends in it. The coil is 8 m long, with 0.5 m of straight length on the inlet and exit. The r/D of the bends is 4. The pipe is schedule 40, 25 mm pipe. # + from math import * from fluids.units import * from thermo.units import Chemical water = Chemical('water', P=2*u.bar, T=80*u.degC) # P assumed rho = water.rho mu = water.mu Q = 60*u.L/u.min L = (1*8 + 0.5*2)*u.m NPS, D_pipe, Do_pipe, t = nearest_pipe(Di=25*u.mm) v = Q/(pi/4*D_pipe**2) Re = Reynolds(rho=rho, mu=mu, D=D_pipe, V=v) fd = friction_factor(Re=Re, eD=0.0018*u.inch/D_pipe) K_elbow = bend_rounded(Di=D_pipe, angle=180*u.degrees, fd=fd, bend_diameters=5) K_friction = K_from_f(fd=fd, L=L, D=D_pipe) K_tot = 7*K_elbow + K_friction dP = dP_from_K(K=K_tot, rho=rho, V=v) print('Pressure drop = %s' %dP.to(u.Pa)) # - # The value presented in the solution is 19609 Pa. They chose a constant friction factor of 0.024 in this calculation. If this were used, the result compares much better. Their friction factor can be obtained at a roughness of 0.05 mm. # + fd = 0.024 K_elbow = bend_rounded(Di=D_pipe, angle=180*u.degrees, fd=fd, bend_diameters=5) K_friction = K_from_f(fd=fd, L=L, D=D_pipe) K_tot = 7*K_elbow + K_friction dP = dP_from_K(K=K_tot, rho=rho, V=v) print('Pressure drop = %s' %dP.to(u.Pa))
docs/Examples/Crane TP 410 Solved Problems/7.11 Flat heating Coils - Water.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # Importing the libraries import numpy as np import matplotlib.pyplot as plt import pandas as pd # Data Preprocessing myData = pd.read_csv('Market_Basket_Optimisation.csv', header = None) myData.head() myData.shape transactions = [] for i in range(0, myData.shape[0]): transactions.append([str(myData.values[i,j]) for j in range(0, myData.shape[1])]) print(transactions[:10]) # Training Apriori on the dataset from apyori import apriori rules = apriori(transactions, min_support = 0.003, min_confidence = 0.2, min_lift = 3, min_length = 2) # Visualising the results results = list(rules) results
Apriori - Market Basket.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Visualisation des donnรฉes fournies avec OPEN import numpy as np import pandas as pd import matplotlib.pyplot as plt import os # ##ย Production photovoltaique en Juin # + PV_data_path = os.path.join("Building/", "PVpu_1min_2013JUN.csv") PVpu_raw_smr = pd.read_csv(PV_data_path, index_col=0, parse_dates=True).values PVtotal_smr = np.sum(PVpu_raw_smr,1) print(PVtotal_smr.shape) plt.plot(PVtotal_smr) plt.show() # - # ## Charge (conso) pas 1 Min # + Loads = pd.read_csv("Loads_1min.csv", index_col=0, parse_dates=True).values Loads_total = np.sum(Loads,1) print(PVtotal_smr.shape) plt.plot(PVtotal_smr) plt.show() # -
Data/OPEN_provided_data_viz.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- # + [markdown] deletable=true editable=true # # Viewing the data # Load and view data from the created `.json` file # + deletable=true editable=true # dependencies # %matplotlib inline import json import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import LinearSegmentedColormap from sklearn import svm # + deletable=true editable=true # config JSONDATAPATH = './ht_wt_data_2014.json' # + deletable=true editable=true # load data from .json file with open(JSONDATAPATH) as data_file: data = json.load(data_file) # + deletable=true editable=true # create list of attributes # Note: in this case; group=sport, attribute=height/weight attribute_list = [] for group in data: for entry in data[group]: for attribute in entry: if attribute not in attribute_list: attribute_list.append(attribute) # + deletable=true editable=true # create dict for each outter level group group_container = {} for group in data: group_container[group] = {} # create numpy arrays of each attribute (height/weight) for each group (sport) for attribute in attribute_list: for group in data: cur_list = [] for entry in data[group]: cur_list.append(int(entry[attribute])) group_container[group][attribute] = np.asarray(cur_list) # + [markdown] deletable=true editable=true # ## All data loaded loaded to dictionary; # example: `group_container = {nba: {weight: np.array, height: np.array}, ...}` # + deletable=true editable=true # Peek into the data for attribute in attribute_list: print(attribute) print("\t{0:<8s}{1:<8s}{2:<8s}{3:<8s}".format("MIN","MEAN","MAX", "N")) for group in group_container: print("{0:<5s} {min:5d} {mean:9.2f} {max:5d} {n:6d}".format(group, min=np.amin(group_container[group][attribute]), mean=np.mean(group_container[group][attribute]), max=np.amax(group_container[group][attribute]), n=len(group_container[group][attribute]))) print("\n") # + deletable=true editable=true # visualize data using a basic histogram array num_x = len(attribute_list) num_y = len(group_container) for attribute in attribute_list: fig, axes = plt.subplots(1, num_y, figsize=(12,2)) fig.suptitle(attribute, fontsize=14, fontweight='bold') fig.subplots_adjust(top=.7) index = 0 for group in group_container: axes[index].hist(group_container[group][attribute], normed=True) axes[index].set_title(group) if attribute == 'Height': axes[index].set_xlabel('in') if attribute == 'Weight': axes[index].set_xlabel('lbs') index += 1 plt.show() # + [markdown] deletable=true editable=true # ### Overlay and scale the graphs # The axis limits aren't currently the same and so it is difficult to see how the data compares to one another # + deletable=true editable=true # plot: side by side (height and weight) for each sport fig, axes = plt.subplots(1, num_x, figsize=(14,4)) for index, attribute in enumerate(attribute_list): for group in group_container: # plot histogram - `normed` converts counts to probabilities axes[index].hist(group_container[group][attribute], normed=True, label=group.upper(), histtype='stepfilled', alpha=0.7) # axis labels if attribute == 'Height': axes[index].set_xlabel('Inches') axes[index].set_title('Height') if attribute == 'Weight': axes[index].set_xlabel('Pounds') axes[index].set_title('Weight') axes[index].set_ylabel('Frequencies') axes[index].legend() plt.show() # + [markdown] deletable=true editable=true # ### Box plots # Curous about standard deviations/outliers # + deletable=true editable=true fig, axes = plt.subplots(1, num_x, figsize=(8,4)) for index, attribute in enumerate(attribute_list): # create a box plot figure for each attribute plot_list = [] for i, group in enumerate(group_container): # create box plot for each group (sport) plot_list.append(group_container[group][attribute]) # plot axis axes[index].boxplot(plot_list) axes[index].set_xticklabels(group_container) axes[index].set_title(attribute) plt.show() # + [markdown] deletable=true editable=true # ### Height vs Weight # Investigate how the height and weight compare across professional athletes from these sports # + deletable=true editable=true # first glance fig, axes = plt.subplots(1, num_y, figsize=(14,4)) for i, group in enumerate(group_container): axes[i].scatter(group_container[group]['Weight'], group_container[group]['Height'], alpha=0.6) axes[i].set_title(group.upper()) axes[i].set_ylabel('Height') axes[i].set_xlabel('Weight') # + [markdown] deletable=true editable=true # ### Overlay and scale the graphs # Same issue as before. We have to ensure the axis are the same for quick visual comparison # + deletable=true editable=true # place points from each group on the same scatter plot (color coordinated) fig, ax = plt.subplots(figsize=(8,8)) for i, group in enumerate(group_container): ax.scatter(group_container[group]['Weight'], group_container[group]['Height'], alpha=0.5, label=group.upper(), s=50) # axis labels ax.legend() ax.set_ylabel('Height (in)') ax.set_xlabel('Weight (lbs)') plt.show() # + [markdown] deletable=true editable=true # ### Altered view # Let's spread the axis a bit and make the points more transparent # + deletable=true editable=true fig, ax = plt.subplots(figsize=(14,8)) for i, group in enumerate(group_container): ax.scatter(group_container[group]['Weight'], group_container[group]['Height'], alpha=0.15, label=group.upper(), s=80) ax.legend() ax.set_ylabel('Height (in)') ax.set_xlabel('Weight (lbs)') plt.show() # + [markdown] deletable=true editable=true # ### Clusters? # Just for fun, it would be interesting to run an svm and see how well these groups could be linearly seperated # + [markdown] deletable=true editable=true # #### Create simple svm plot # + deletable=true editable=true # create X[height, weight] and Y[label] X = [] Y = [] for index, group in enumerate(group_container): X_ = list(zip(group_container[group]['Weight'], group_container[group]['Height'])) X_ = [list(elem) for elem in X_] Y_ = [index]*len(X_) X.extend(X_) Y.extend(Y_) # classifier # doc: http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html # > `C` param noted in doc: default value is 1 C = 1.0 # since the dataset is unblanced, we will use the class_weight='balanced' svc = svm.SVC(kernel='linear', C=C, class_weight="balanced") svc.fit(X, Y) # create a mesh/grid that will plot the svm boundaries # values h = 0.1 # step size in the mesh/grid # # +/- 5 lbs to extend the grid to the plot boundry # # +/- 2 in to extend the grid to the plot boundry x_min = min(point[0] for point in X) - 5 y_min = min(point[1] for point in X) - 2 x_max = max(point[0] for point in X) + 5 y_max = max(point[1] for point in X) + 2 # creation xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # + deletable=true editable=true # create figure with color coded regions divided by the svm fig, ax = plt.subplots(figsize=(14,8)) for i, group in enumerate(group_container): Z = svc.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) ax.contourf(xx, yy, Z, cmap=LinearSegmentedColormap.from_list('mycmap', ['blue','orange','green']), alpha=0.06) ax.scatter(group_container[group]['Weight'], group_container[group]['Height'], alpha=0.6, label=group.upper(), s=40) ax.legend() ax.set_title('Athlete Height vs Weight by Sport') ax.set_ylabel('Height (in)') ax.set_xlabel('Weight (lbs)') plt.show() # + [markdown] deletable=true editable=true # ### Just how 'accurate' is this prediction # Accuracy here isn't really useful -- in the sense that we're only calculating the training accuracy. We haven't set aside a validation or test dataset. However, this wasn't really the point. The point was to explore the height vs weight of professional athletes from different sports. Still, what's the accuracy here? # + deletable=true editable=true # calculate "accuracy" meanAcc = svc.score(X, Y) print("{0:.2f}%".format(meanAcc*100)) # + [markdown] deletable=true editable=true # ### Considerations # * Our sample size is small. We only have a ~500 players for each sport # * Since our dataset is unbalanced, a `class_weight="balanced"` argument was used. # * I believe this is working as intended, but this would need to be verified. Without this argument we stay the same at ~82% accuracy # # # ### Notes # * `>80%` accuracy is surprising. # * Trend was anticipated, but wasn't expected to be this significant # # ### Where do we go from here, future directions # * More data -- (from different years?) # * We could explore this data with the addition of the players position (forward vs defender vs goalie) # * Include other sports? # * How similar are these trends in college vs professional? # * What players get the most playing time? Does recreating this experiment with the top players from each sport produce different results? # * How well would a KNN classifer work? # * Do these results provide any meaningful information? # * What if we considered how long each player has been playing? Is there a higher percentage of rookies than veterans that are outliers? # + deletable=true editable=true
explore_ht-vs-wt_sport.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %config Completer.use_jedi = False #for intellisense compatibility w/ Jupyter Notebook import pandas as pd import numpy as np import datetime as dt import matplotlib.pyplot as plt from alfredhelperfile import find_growth_gap, find_price_growth, find_vintage_percent_chg # - # Read in FOMC Meeting Dates # + # read in FOMC meeting dates from author's file df = pd.read_csv('Input_Data/fomc_dates.csv') # create list of FOMC meetings FOMC_meets = df['fomc_date'].tolist() # cast list items to datetime objects for functions (probably could have just passed df to f(n)s) FOMC_dates = [dt.datetime.strptime(meet,'%m/%d/%Y') for meet in FOMC_meets] # create df of FOMC meeting dates FOMC_df = pd.DataFrame(index=FOMC_meets) # cast the index to datetime FOMC_df.index = pd.to_datetime(FOMC_df.index) # set the name of the index for future merge FOMC_df.index.name = 'observation_date' # - # Read in Philly Fed Forecast Dates # + # URL: https://www.philadelphiafed.org/-/media/frbp/assets/surveys-and-data/survey-of-professional-forecasters/spf-release-dates.txt?la=en&hash=B0031909EE9FFE77B26E57AC5FB39899 # Note: Download used for this file was 9/10/2021 # read in Philly Fed Real GDP in levels df = pd.read_excel(io='Input_Data/Median_RGDP_Level.xlsx', sheet_name='Median_Level') #read in data # read in Philly Fed release dates philly_dates_df = pd.read_csv('Input_Data/spf-release-dates.txt', engine='python', skiprows=3, skipfooter=7, skip_blank_lines=True, sep="[ \t]{2,}") # philly_dates_df.to_csv('Input_Data/poof2.csv') # philly_dates_df.reset_index(inplace=True) # philly_dates_df.rename(columns=lambda x: x.strip(), inplace=True) # philly_dates = philly_dates_df['News Release Date'].str.replace('*', '').to_frame() # philly_dates = philly_dates[38:123] print(philly_dates_df.to_string()) # - # Read in and Format Vintage Real GDP Data from ALFRED File # + # URL: https://alfred.stlouisfed.org/series/downloaddata?seid=GDPC1 # Note: Must select 'All' in the Vintage Dates section. Data as of 1/28/2021 # read in ALFRED data df = pd.read_csv('Input_Data/GDPC1_2_Vintages_Starting_1991_12_04.txt', sep='\t', na_values='.') # set index to observation date df.set_index('observation_date', inplace=True) # set df index to datetime df.index = pd.to_datetime(df.index) # drop any remaining columns with no observations df = df.dropna(how='all', axis=1) # calculate vintage percent changes with helper function FOMC_gdp_hist = find_vintage_percent_chg(df, FOMC_dates, column_A_name='lg', column_B_name='g') # - # Read in and Format Median Real GDP Forecast Data from FRB Philadelphia # + # URL: https://www.philadelphiafed.org/surveys-and-data/rgdp # Note: 'Median Responses' as of 1/10/2021 # read in Philly Fed Real GDP in levels df = pd.read_excel(io='Input_Data/Median_RGDP_Level.xlsx', sheet_name='Median_Level') #read in data # drop last col which shows up as NaT in index (not sure why this shows up...) df = df[df['YEAR'].notna()] # drop date before the 4th quarter of 1999 df = df[124:] # set index to index of Philly data df. this is manually alligned by the authors # the alignment is done to push a quarterly 'release date' to the corresponding # observation quarter philly_dates = philly_dates.set_index(df.index) # set df index to philly fed period df = df.set_index(philly_dates['News Release Date']) # set df index to datetime df.index = pd.to_datetime(df.index) # calculate the one period ahead pct change from 1evels df['g1'] = df[['RGDP1', 'RGDP2']].apply(lambda row: (row.iloc[1]-row.iloc[0])/row.iloc[0]*100*4, axis=1) # calculate the two period ahead pct change from 1evels df['g2'] = df[['RGDP2', 'RGDP3']].apply(lambda row: (row.iloc[1]-row.iloc[0])/row.iloc[0]*100*4, axis=1) # identify columns to retain median_GDP_forecasts = ['g1', 'g2'] # filter for wanted columns median_GDP_forecasts = df.filter(median_GDP_forecasts, axis=1) # set index to observation date median_GDP_forecasts.index.name = 'observation_date' median_GDP_forecasts.to_csv('Input_Data/poof.csv') # merge dataframes to retain observations on FOMC meeting dates # FOMC_gdp_forecast_median = pd.merge_asof(FOMC_df, median_GDP_forecasts, left_index = True, right_index = True) # - # Read in and Format Vintage GDP Chain-Type Price Index Data from ALFRED File (Percent Change from a Year Ago) # + # URL: https://alfred.stlouisfed.org/series/downloaddata?seid=GDPCTPI # NOTE: Must select 'All' in the Vintage Dates section. Data as of 1/28/2021 # read in ALFRED data df = pd.read_csv('Input_Data/GDPCTPI_2_Vintages_Starting_1996_01_19.txt', sep='\t', na_values='.') # set index to observation date df.set_index('observation_date', inplace=True) # set df index to datetime df.index = pd.to_datetime(df.index) # drop any remaining columns with no observations df = df.dropna(how='all', axis=1) # calculate vintage percent change with helper function FOMC_price_hist = find_vintage_percent_chg(df, FOMC_dates, column_A_name='lp', column_B_name='p') # - # Read in and Format GDP Price Percent Change (Growth) Forecast from Philly Fed # + # URL: https://www.philadelphiafed.org/surveys-and-data/pgdp # Note: 'Median Responses' as of 1/12/2021 #read in data df = pd.read_excel(io='Input_Data/Median_PGDP_Growth.xlsx', sheet_name='Median_Growth') #read in data # read in approximate Philly Fed release dates philly_dates = pd.read_csv('Input_Data/philly_release_dates.csv') # drop last col which shows up as NaT in index (not sure why this shows up...) df = df[df['YEAR'].notna()] # set length to correct Philly Fed release date length df = df[124:] # set index to index of Philly data df. this is manually alligned by the authors # the alignment is done to push a quarterly 'release date' to the corresponding # observation quarter philly_dates = philly_dates.set_index(df.index) # set df index to philly fed period df = df.set_index(philly_dates['Period']) # # set df index to datetime df.index = pd.to_datetime(df.index) # remove blank rows df = df[df['DPGDP2'].notna()] # rename columns df['p1'] = df['DPGDP2'] df['p2'] = df['DPGDP3'] # identify columns to retain median_price_forecasts = ['p1', 'p2'] # filter for wanted columns median_price_forecasts = df.filter(median_price_forecasts, axis=1) # set index to observation date median_price_forecasts.index.name = 'observation_date' # merge dataframes to retain observations on FOMC meeting dates FOMC_price_forecast_median = pd.merge_asof(FOMC_df, median_price_forecasts, left_index = True, right_index = True) # - # Read in and Format Vintage PCE Chain-Type Price Index Data from ALFRED File # + # URL: https://alfred.stlouisfed.org/ # Note: Must select 'All' in the Vintage Dates section. # Data as of 2/8/2021 # read in ALFRED data df = pd.read_csv('Input_Data/JCXFE_2_Vintages_Starting_1999_07_29.txt', sep='\t', na_values='.') # set index to observation date df.set_index('observation_date', inplace=True) # set df index to datetime df.index = pd.to_datetime(df.index) # drop any remaining columns with no observations df = df.dropna(how='all', axis=1) # calculate vintage percent change with helper function FOMC_pce_hist = find_vintage_percent_chg(df, FOMC_dates, column_A_name='lp', column_B_name='p') # - # Read in and Format Core PCE Median Forecast Data from Philly Fed # + # URL: https://www.philadelphiafed.org/surveys-and-data/pgdp # Note: 'Median Responses' as of 1/12/2021 # read in data from file df = pd.read_excel(io='Input_Data/Median_COREPCE_Level.xlsx', sheet_name='Median_Level') #read in data # read in approximate Philly Fed release dates philly_dates = pd.read_csv('Input_Data/philly_release_dates.csv') # drop last col which shows up as NaT in index (not sure why this shows up...) df = df[df['YEAR'].notna()] # set length to correct Philly Fed release date length df = df[124:] # set index to index of Philly data df. this is manually alligned by the authors # the alignment is done to push a quarterly 'release date' to the corresponding # observation quarter philly_dates = philly_dates.set_index(df.index) # set df index to philly fed period df = df.set_index(philly_dates['Period']) # set df index to datetime df.index = pd.to_datetime(df.index) # remove blank rows df = df[df['COREPCE1'].notna()] # rename columns df['p1'] = df['COREPCE2'] df['p2'] = df['COREPCE3'] # identify columns to retain median_pce_forecasts = ['p1', 'p2'] # filter for wanted columns median_pce_forecasts = df.filter(median_pce_forecasts, axis=1) # set index to observation date median_pce_forecasts.index.name = 'observation_date' # merge dataframes to retain observations on FOMC meeting dates FOMC_pce_forecast_median = pd.merge_asof(FOMC_df, median_pce_forecasts, left_index = True, right_index = True) # - # Join GDP Price Data with Core PCE Data When Available # + # join historic and forecast gdp price data price_df = FOMC_price_hist.join(FOMC_price_forecast_median) # join historic and forecast pce data pce_df = FOMC_pce_hist.join(FOMC_pce_forecast_median) # slice gdp price data for pre 2007 part1 = price_df[:'2007-01-31'] # slice pce data for post 2007 part2 = pce_df['2007-03-21':] # rename columns to conform to macro_df variables part1.rename(columns={"pricel1": "lp", "price": "p", "pricef1med": "p1", "pricef2med": "p2"},inplace = True) part2.rename(columns={"pcel1": "lp", "pce": "p", "pcef1med": "p1", "pcef2med": "p2"},inplace = True) # concat parts to single df FOMC_prices = pd.concat([part1, part2], axis=0) # - # Read in and Format Vintage Core PCE Inflation Data -- for Taylor Regression # + # read in ALFRED data # https://alfred.stlouisfed.org/ # data as of 3/5/2021 # read in core cpi data from file df = pd.read_csv('Input_Data/PCEPILFE_2_Vintages_Starting_2000_08_01.txt', sep='\t', na_values='.') # set index to observation date df.set_index('observation_date', inplace=True) # set df index to datetime df.index = pd.to_datetime(df.index) # drop any remaining columns with no observations df = df.dropna(how='all', axis=1) # calculate vintage percent changes with helper function FOMC_corepce_growth = find_price_growth(df, FOMC_dates[4:], column_A_name='corepce') # - # Calculate Output Gap from Real GDP and Trend Growth -- for Taylor Regression # + # read in ALFRED data # https://alfred.stlouisfed.org/ # data as of 2/10/2021 # read in real gdp data from file df1 = pd.read_csv('Input_Data/GDPC1_2_Vintages_Starting_1991_12_04.txt', sep='\t', na_values='.') # set index to observation date df1.set_index('observation_date', inplace=True) # trim dataframe df1 = df1['1959-07-01':] # df1 = df1['1980-01-01':] # remove bad benchmark year # should build this into the find growt f(n). do this if publishing. # Check if I wrote this into the gap function... df1 = df1.drop('GDPC1_19991028', axis = 1) # drop any remaining columns with no observations df1 = df1.dropna(how='all', axis=1) # calculate vintage growth gap with helper function FOMC_gdp_gap = find_growth_gap(df1, FOMC_dates, column_C_name = 'gdpgap') # - # Read in and Format FOMC Target Data from Author's Calculations Based on ALFRED Data # + # to save time, this work was imported from a previous calculation. we should rebuild here if published df = pd.read_csv('Input_Data/fomc_rates.csv', index_col=0) # name index for future merge df.index.name = 'observation_date' # set the index to datetime df.index = pd.to_datetime(df.index) # drop any remaining columns with no observations df = df.dropna(how='all', axis=0) # name dataframe for join FOMC_target_hist = df # - # Combine Macro Data from ALFRED, FRED, & Philly Fed macro_df = FOMC_gdp_hist.join([FOMC_gdp_forecast_median, FOMC_prices, FOMC_corepce_growth, FOMC_gdp_gap, FOMC_target_hist]) macro_df.index.names = ['date'] macro_df.to_csv('Output_Data/macro_df.csv')
FOMC.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # #### 1. What is TensorFlow? (choose all that apply) # ##### Ans: # <ul> # <li>TensorFlow is an open-source high-performance library for numerical computation that uses directed graphs</li> # <li>TensorFlow is open source</li> # </ul> # #### 2. True or False: When you run a TensorFlow graph (like a + b), you immediately get the output of the graph (the sum of a and b) # ##### Ans: True - but only if you have tf.eager mode enabled # #### 3. What do nodes in a TensorFlow graph represent? # ##### Ans: Mathematical operations # #### 4. What is a tensor? # ##### Ans: A n-dimensional array of data (generalization of a vector) # #### 5. True or False: You can only run TensorFlow on Google Cloud # ##### Ans: False # #### 6. The iterative process where a TensorFlow model can crowdsource and combine model feedback from individual users is called what? # ##### Ans: Federated learning # #### 7. What is the high level API that allows for distributed training in TensorFlow? # ##### Ans: tf.estimator # #### 8. How do you run a TensorFlow graph? # ##### Ans: Call run() on a tf.Session # #### 9. Why would you call tf.summary.FileWriter? # ##### Ans: To output statistics and visualize them in a tool like TensorBoard # #### 10. What is the shape of tf.constant([2, 3, 5])? # ##### Ans: It's a vector so it would be (3)
Coursera/Intro to TensorFlow/Week-1/Quiz/Core-TensorFlow.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: NanopolishComp # language: python # name: nanopolishcomp # --- # # Freq_meth_calculate usage # Calculate methylation frequency at genomic CpG sites from the output of `nanopolish call-methylation` # ## Output files format # `Freq_meth_calculate` can generates 2 files, a standard BED file and a tabulated file containing extra information # #### BED file # Standard genomic BED6 (https://genome.ucsc.edu/FAQ/FAQformat.html#format1). The score correspond to the methylation frequency multiplied by 1000. The file is sorted by coordinates and can be rendered with a genome browser such as [IGV](https://software.broadinstitute.org/software/igv/) # #### Tabulated TSV file # Contrary to the bed file, in the tabulated report, positions are ordered by decreasing methylation frequency. # # The file contains the following fields: # # * **chrom / start / end / strand**: Genomic coordinates of the motif or group of motifs in case split_group was not selected. # * **site_id**: Unique integer identifier of the genomic position. # * **methylated_reads / unmethylated_reads / ambiguous_reads**: Number of reads at a given genomic location with a higher likelyhood of being methylated or unmethylated or with an ambiguous methylation call. # * **sequence**: -5 to +5 sequence of the motif or group of motifs in case split_group was not selected. # * **num_motifs**: Number of motif in the group. # * **meth_freq**: Methylation frequency (out of non anbiguous calls). # ## Bash command line usage # ### Command line help # + language="bash" # # # Load local bashrc and activate virtual environment # source ~/.bashrc # workon NanopolishComp # # NanopolishComp Freq_meth_calculate --help # - # ### Example usage # #### From an existing nanopolish call_methylation file output # + language="bash" # # # Load local bashrc and activate virtual environment # source ~/.bashrc # workon NanopolishComp # # NanopolishComp Freq_meth_calculate --verbose -i data/freq_meth_calculate/methylation_calls.tsv -b ./output/freq_meth_calculate/out_freq_meth_calculate.bed -t ./output/freq_meth_calculate/out_freq_meth_calculate.tsv -s Sample1 # # head ./output/freq_meth_calculate/out_freq_meth_calculate.bed # head ./output/freq_meth_calculate/out_freq_meth_calculate.tsv # - # #### Using a fasta index for output coordinates sorting and strand specific sites # + language="bash" # # # Load local bashrc and activate virtual environment # source ~/.bashrc # workon NanopolishComp # # NanopolishComp Freq_meth_calculate --verbose --strand_specific -i data/freq_meth_calculate/methylation_calls.tsv -b ./output/freq_meth_calculate/out_freq_meth_calculate.bed -t ./output/freq_meth_calculate/out_freq_meth_calculate.tsv -s Sample1 -f data/freq_meth_calculate/ref.fa.fai # # head ./output/freq_meth_calculate/out_freq_meth_calculate.bed # head ./output/freq_meth_calculate/out_freq_meth_calculate.tsv # - # #### Changing filtering thresholds (not recommended) # + language="bash" # # # Load local bashrc and activate virtual environment # source ~/.bashrc # workon NanopolishComp # # NanopolishComp Freq_meth_calculate --verbose -i data/freq_meth_calculate/methylation_calls.tsv -b ./output/freq_meth_calculate/out_freq_meth_calculate.bed -t ./output/freq_meth_calculate/out_freq_meth_calculate.tsv --min_depth 5 --min_llr 1 # # head ./output/freq_meth_calculate/out_freq_meth_calculate.bed # head ./output/freq_meth_calculate/out_freq_meth_calculate.tsv # - # ## Python API usage # ### Import the package # + # Import main program from NanopolishComp.Freq_meth_calculate import Freq_meth_calculate # Import helper functions from NanopolishComp.common import jhelp, head # - # ### python API help jhelp(Freq_meth_calculate) # ### Example usage # #### basic setting # + f = Freq_meth_calculate( input_fn="./data/freq_meth_calculate/methylation_calls.tsv", output_bed_fn="./output/freq_meth_calculate/out_freq_meth_calculate.bed", sample_id="Sample1", verbose=True) head("./output/freq_meth_calculate/out_freq_meth_calculate.bed") # - # #### Using a fasta index for output coordinates sorting and strand specific sites # + f = Freq_meth_calculate( input_fn="./data/freq_meth_calculate/methylation_calls.tsv", fasta_index="./data/freq_meth_calculate/ref.fa.fai", output_bed_fn="./output/freq_meth_calculate/out_freq_meth_calculate.bed", sample_id="Sample1", verbose=True, strand_specific=True) head("./output/freq_meth_calculate/out_freq_meth_calculate.bed") # - # #### Changing filtering threshold (not recommended) # + f = Freq_meth_calculate( input_fn="./data/freq_meth_calculate/methylation_calls.tsv", output_tsv_fn="./output/freq_meth_calculate/out_freq_meth_calculate.tsv", min_llr=1, min_depth=5) head("./output/freq_meth_calculate/out_freq_meth_calculate.tsv")
docs/demo/Freq_meth_calculate_usage.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import tweepy import csv import json ACCESS_TOKEN = '' ACCESS_SECRET = '' CONSUMER_KEY = '' CONSUMER_SECRET ='' auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET) auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET) api = tweepy.API(auth,wait_on_rate_limit=True) c = tweepy.Cursor(api.search, q='coronavirus', lang='en') count=201 page_needed=int(count/15) + 3 c.pages(page_needed) id_tweets = [] date = [] full_text_tweet =[] for tweet in c.items(): try: tweet = api.get_status(tweet.id, count=200, tweet_mode="extended") full_text_tweet.append(tweet.full_text) id_tweets.append(tweet.id) except: pass if len(full_text_tweet)==count: break print("Total Data length", len(full_text_tweet)) # - from sklearn.externals import joblib import string import re import pickle import pandas as pd from sklearn.base import TransformerMixin, BaseEstimator from nltk.stem import WordNetLemmatizer from nltk.tokenize import word_tokenize tweets = pd.Series(full_text_tweet) # + HAPPY_EMO = r" ([xX;:]-?[dD)]|:-?[\)]|[;:][pP]) " SAD_EMO = r" (:'?[/|\(]) " def lemmatize_tokenize(text): lemmatizer = WordNetLemmatizer() return [lemmatizer.lemmatize(token) for token in word_tokenize(text)] class TextPreProc(BaseEstimator,TransformerMixin): def __init__(self, use_mention=False): self.use_mention = use_mention def fit(self, X, y=None): return self def transform(self, X, y=None): # We can choose between keeping the mentions # or deleting them if self.use_mention: X = X.str.replace(r"@[a-zA-Z0-9_]* ", " @tags ") else: X = X.str.replace(r"@[a-zA-Z0-9_]* ", "") # Keeping only the word after the # X = X.str.replace("#", "") X = X.str.replace(r"[-\.\n]", "") # Removing HTML garbage X = X.str.replace(r"&\w+;", "") # Removing links X = X.str.replace(r"https?://\S*", "") # replace repeated letters with only two occurences # heeeelllloooo => heelloo X = X.str.replace(r"(.)\1+", r"\1\1") # mark emoticons as happy or sad X = X.str.replace(HAPPY_EMO, " happyemoticons ") X = X.str.replace(SAD_EMO, " sademoticons ") X = X.str.lower() return X # - #NV Model NV_model = joblib.load('NB_MODEL.pkl') #Feature Vector with open('NB_tokenizer.pickle', 'rb') as handle: tokenizer = pickle.load(handle) tweets = tokenizer.transform(tweets) pos_count=0 neg_count=0 neutral_count=0 # + sentiments=[] for i in range(len(full_text_tweet)): result=NV_model.predict_proba(tweets[i])[0] if result[0] > 0.60: sentiments.append("๐Ÿ˜ƒ") pos_count = pos_count+1 #print("Text : " + full_text_tweet[i] + " Sentiment : ๐Ÿ˜ƒ") elif result[0] < 0.40: sentiments.append("๐Ÿ˜”") neg_count = neg_count+1 #print("Text : " + full_text_tweet[i] + " Sentiment : ๐Ÿ˜”") else: sentiments.append("๐Ÿ˜") neutral_count=neutral_count+1 #print("Text : " + full_text_tweet[i] + " Sentiment : ๐Ÿ˜") sentiments = pd.Series(sentiments, name="Sentiment") full_text_tweet = pd.Series(full_text_tweet, name="Tweet") result_dataframe = pd.concat([full_text_tweet, sentiments], axis=1) pd.set_option('display.max_columns', 500) pd.set_option('display.max_rows', 500) pd.set_option('display.max_colwidth', -1) # - result_dataframe from matplotlib import pyplot as plt import numpy as np fig = plt.figure() ax = fig.add_axes([0,0,1,1]) ax.axis('equal') langs = ['positive', 'neutral', 'negative'] prediction = [pos_count,neutral_count,neg_count] ax.pie(prediction, labels = langs,autopct='%1.2f%%') plt.show()
Prediction/NB_PRED.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ๆˆ‘ไปฌไปŠๅคฉ่ฆ่งฃๅ†ณ็š„้—ฎ้ข˜ๆ˜ฏ**่‡ช็„ถ่ฏญ่จ€ๅค„็†ไธญ็š„ๅบๅˆ—ๆ ‡ๆณจ้—ฎ้ข˜**, ๅœจ็›ฎๅ‰, ๆฏ”่พƒไธปๆต็š„ๆŠ€ๆœฏๆ˜ฏ**่ฏญ่จ€ๆจกๅž‹(ๅฆ‚LSTM, BERT)+CRF(ๆกไปถ้šๆœบๅœบ)**, ไธบไป€ไนˆ่ฟ™ๆ ท็ป„ๅˆๆจกๅž‹ๅ‘ข? ๆˆ‘็จๅŽไผš่ฎฒๅˆฐ. ไฝ†ๆƒณ่ฆไบ†่งฃ**CRF(ๆกไปถ้šๆœบๅœบ)**, ๆˆ‘ๆƒณ้ฆ–ๅ…ˆ่ฎฉๅคงๅฎถไบ†่งฃไธ€ไธ‹**้š้ฉฌๅฐ”ๅฏๅคซๆจกๅž‹(Hidden Markov Model)**, ๆ˜ฏไธ€็งๆฆ‚็އๅ›พๆจกๅž‹, ๅช่ฆ็†่งฃไบ†HMMๆจกๅž‹ๅ’Œ**็ปด็‰นๆฏ”่งฃ็ ็ฎ—ๆณ•(viterbi algorothm)**, ็†่งฃๆกไปถ้šๆœบๅœบๅฐฑๆˆไบ†ๅˆ†ๅˆ†้’Ÿ็š„ไบ‹. # ๅœจ่ฟ™่Š‚่ฏพไธญ, ไฝ **ไธ้œ€่ฆๆœ‰ๆฆ‚็އๅ›พๆจกๅž‹็š„ๅŸบ็ก€**, ๅช่ฆๆœ‰ๅŸบๆœฌ็š„ๆฆ‚็އ่ฎบ็Ÿฅ่ฏ†ๅณๅฏ. # ้ฆ–ๅ…ˆ, ๅ…ˆๆฅ็œ‹ไธ€ไธ‹ไปŠๅคฉ็š„่ฏพ็จ‹ๅฎ‰ๆŽ’: # 0. NER(ๅ‘ฝๅๅฎžไฝ“่ฏ†ๅˆซ)้—ฎ้ข˜ๆฆ‚่ฟฐ; # 1. ไป€ไนˆๆ˜ฏ้š้ฉฌๅฐ”ๅฏๅคซๆจกๅž‹(HMM); # 2. HMMๆจกๅž‹็š„ๅ‚ๆ•ฐ; # 3. ็”จHMM่งฃๅ†ณๅบๅˆ—ๆ ‡ๆณจ้—ฎ้ข˜, HMM็š„ๅญฆไน ็ฎ—ๆณ•; # 4. ็ปด็‰นๆฏ”็ฎ—ๆณ•(Viterbi Algorithm)(HMM็š„้ข„ๆต‹็ฎ—ๆณ•). # ### 0. named entity recognition(ๅ‘ฝๅๅฎžไฝ“่ฏ†ๅˆซ)้—ฎ้ข˜ๆฆ‚่ฟฐ: # ๅ‘ฝๅๅฎžไฝ“่ฏ†ๅˆซ๏ผˆ่‹ฑ่ฏญ๏ผšNamed Entity Recognition๏ผŒ็ฎ€็งฐNER๏ผ‰, ๆ˜ฏๆŒ‡่ฏ†ๅˆซๆ–‡ๆœฌไธญๅ…ทๆœ‰็‰นๅฎšๆ„ไน‰็š„ๅฎžไฝ“๏ผŒไธป่ฆๅŒ…ๆ‹ฌไบบๅใ€ๅœฐๅใ€ๆœบๆž„ๅใ€ไธ“ๆœ‰ๅ่ฏ็ญ‰็ญ‰, ๅนถๆŠŠๆˆ‘ไปฌ้œ€่ฆ่ฏ†ๅˆซ็š„่ฏๅœจๆ–‡ๆœฌๅบๅˆ—ไธญๆ ‡ๆณจๅ‡บๆฅใ€‚ # ไพ‹ๅฆ‚ๆœ‰ไธ€ๆฎตๆ–‡ๆœฌ: **ๆตŽๅ—ๅธ‚ๆˆ็ซ‹่‡ช็”ฑ่ดธๆ˜“่ฏ•้ชŒๅŒบ**. # ๆˆ‘ไปฌ่ฆๅœจไธŠ้ขๆ–‡ๆœฌไธญ่ฏ†ๅˆซไธ€ไบ›**ๅŒบๅŸŸๅ’Œๅœฐ็‚น**, ้‚ฃไนˆๆˆ‘ไปฌ้œ€่ฆ่ฏ†ๅˆซๅ‡บๆฅๅ†…ๅฎนๆœ‰: # **ๆตŽๅ—ๅธ‚(ๅœฐ็‚น), ่‡ช็”ฑ่ดธๆ˜“่ฏ•้ชŒๅŒบ(ๅœฐ็‚น)**. # ๅœจๆˆ‘ไปฌไปŠๅคฉไฝฟ็”จ็š„NERๆ•ฐๆฎ้›†ไธญ, ไธ€ๅ…ฑๆœ‰7ไธชๆ ‡็ญพ: # 1. "B-ORG": ็ป„็ป‡ๆˆ–ๅ…ฌๅธ(organization) # 2. "I-ORG": ็ป„็ป‡ๆˆ–ๅ…ฌๅธ # 3. "B-PER": ไบบๅ(person) # 4. "I-PER": ไบบๅ # 5. "O": ๅ…ถไป–้žๅฎžไฝ“(other) # 6. "B-LOC": ๅœฐๅ(location) # 7. "I-LOC": ๅœฐๅ # # ๆ–‡ๆœฌไธญ**ไปฅๆฏไธชๅญ—ไธบๅ•ไฝ**, ๆฏไธชๅญ—ๅฟ…้กปๅˆ†ๅˆซๅฏนๅบ”ไธŠ้ข็š„ไปปไธ€ๆ ‡็ญพ. # **ไฝ†ไธบไป€ไนˆไธŠ้ขๆ ‡็ญพ้™คไบ†"O"(ๅ…ถไป–)ไน‹ๅค–้ƒฝๆ˜ฏไธ€ไธชๅฎžไฝ“็ฑปๅž‹ๅฏนๅบ”ไธคไธชๆ ‡็ญพๅ‘ข?** # ่ฏทๅฐไผ™ไผดไปฌไป”็ป†็œ‹ๆ ‡็ญพๅ‰้ขๆœ‰ๅˆ†ไธบ"B"ๅ’Œ"I"็š„ไธๅŒ, "B"่กจ็คบbegin, ๅฎžไฝ“ๅผ€ๅคด็š„้‚ฃไธชๅญ—ไฝฟ็”จ"B"ๅฏนๅบ”็š„ๆ ‡็ญพๆฅๆ ‡ๆณจ, ๅœจๅฎžไฝ“ไธญ้—ดๆˆ–็ป“ๅฐพ็š„้ƒจๅˆ†, ็”จ"I"ๆฅๆ ‡ๆณจ. # ๆฏ”ๅฆ‚่ฏด"่‡ช่ดธๅŒบ"ๅฏนๅบ”็š„ๆ ‡ๆณจๆ˜ฏ: **่‡ช(B-LOC)่ดธ(I-LOC)ๅŒบ(I-LOC)**, ่ฟ™ไธ‰ไธชๅญ—้ƒฝๅฏนๅบ”ไธ€ไธช"ๅœฐๅ"็š„ๆ ‡็ญพ, ไฝ†ๆ˜ฏ็ฌฌไธ€ไธชๅญ—ๅฑžไบŽๅฎžไฝ“ๅผ€ๅคด็š„ๅญ—, ๆ‰€ไปฅไฝฟ็”จ"B"ๅผ€ๅคด็š„ๆ ‡็ญพ, ๅŽ้ขไธคไธชๅญ—็š„ๆ ‡็ญพ้ƒฝๆ˜ฏ"I"ๅผ€ๅคด. # **ๆณจๆ„**, "B"ๅŽ้ขๆ˜ฏไธๅฏไปฅ่ทŸๅ…ถไป–็ฑปๅž‹็š„"I"็š„, ไพ‹ๅฆ‚: **่‡ช(B-PER)่ดธ(I-LOC)ๅŒบ(I-LOC)** ๅฐฑๆ˜ฏๅฑžไบŽ้”™่ฏฏ็š„ๆ ‡ๆณจ, ๅ› ไธบๅฎžไฝ“ๅผ€ๅคด"B"ๆ ‡ๆณจๆˆไบ†ไบบๅ, ๅณไฝฟๅฎžไฝ“ไธญ้—ดๆ ‡ๆณจๆˆไบ†ๅœฐๅ, ่ฟ™ไธชๅฎžไฝ“็š„ๆ ‡ๆณจๆ–นๆณ•ไนŸๆ˜ฏ้žๆณ•็š„. # ไธŠ้ข็š„ๅŽŸๅ› ๅฐฑๆ˜ฏๆˆ‘ไปฌ่ฆไปŽ่ฏญ่จ€ๆจกๅž‹(ไพ‹ๅฆ‚BERT, LSTM)ๅŽ้ขๅ†ๅŠ ไธŠๆฆ‚็އๅ›พๆจกๅž‹, ไพ‹ๅฆ‚ๆกไปถ้šๆœบๅœบ, ็”จๆฅ็บฆๆŸๆจกๅž‹็š„่พ“ๅ‡บ, ้˜ฒๆญขๅ‡บ็Žฐไธๅˆ่ง„็š„ๆ ‡ๆณจ่พ“ๅ‡บ. # ### 1. ไป€ไนˆๆ˜ฏ้š้ฉฌๅฐ”ๅฏๅคซๆจกๅž‹ $a.k.a.HMM?$ # HMMๆจกๅž‹ๆ˜ฏๆฆ‚็އๅ›พๆจกๅž‹็š„ไธ€็ง, ๅฑžไบŽ็”Ÿๆˆๆจกๅž‹, ็ฌผ็ปŸ็š„่ฏด, ๆˆ‘ไปฌไธŠ้ข่ฏด็š„"BIO"็š„ๅฎžไฝ“ๆ ‡็ญพ, ๅฐฑๆ˜ฏไธ€ไธช**ไธๅฏ่ง‚ๆต‹็š„้š็Šถๆ€**, ่€ŒHMMๆจกๅž‹ๆ่ฟฐ็š„ๅฐฑๆ˜ฏ็”ฑ่ฟ™ไบ›**้š็Šถๆ€ๅบๅˆ—**(ๅฎžไฝ“ๆ ‡่ฎฐ)็”Ÿๆˆ**ๅฏ่ง‚ๆต‹็Šถๆ€**(ๅฏ่ฏปๆ–‡ๆœฌ)็š„่ฟ‡็จ‹. # ๅœจๆˆ‘ไปฌไปŠๅคฉ็š„้—ฎ้ข˜ๅฝ“ไธญ, ้š็Šถๆ€ๅบๅˆ—ๆ˜ฏๅฎžไฝ“ๆ ‡่ฎฐๅบๅˆ—, ่€Œๅฏ่ง‚ๆต‹ๅบๅˆ—ๆ˜ฏๆˆ‘ไปฌๅฏ่ฏป็š„ๅŽŸๅง‹่ฏญๆ–™ๆ–‡ๆœฌๅบๅˆ—. # **ไพ‹ๅฆ‚**: # ้š่—็Šถๆ€ๅบๅˆ—: $B-LOC | I-LOC | I-LOC$ # ่ง‚ๆต‹็Šถๆ€ๅบๅˆ—: $่‡ช \quad \quad \quad \quad ่ดธ \quad \quad \quad \quad ๅŒบ$ # ่ฎพๆˆ‘ไปฌ็š„ๅฏ่ง‚ๆต‹็Šถๆ€ๅบๅˆ—ๆ˜ฏ็”ฑๆ‰€ๆœ‰ๆฑ‰ๅญ—็ป„ๆˆ็š„้›†ๅˆ, ๆˆ‘ไปฌ็”จ$V_{Obsevation}$ๆฅ่กจ็คบ: # $$V_{obs.}=\{v_1, v_2, ... , v_M \}$$ # ไธŠๅผไธญ, $v$่กจ็คบๅญ—ๅ…ธไธญๅ•ไธชๅญ—, ๅ‡่ฎพๆˆ‘ไปฌๅทฒ็Ÿฅ็š„ๅญ—ๆ•ฐไธบ$M$. # ่ฎพๆ‰€ๆœ‰ๅฏ่ƒฝ็š„้š่—็Šถๆ€้›†ๅˆไธบ$Q_{hidden}$, ไธ€ๅ…ฑๆœ‰$N$็ง้š่—็Šถๆ€, ไพ‹ๅฆ‚ๆˆ‘ไปฌ็Žฐๅœจ็š„ๅ‘ฝๅๅฎžไฝ“่ฏ†ๅˆซๆ•ฐๆฎ้‡Œ้ขๅชๆœ‰7็งๆ ‡็ญพ: # $$Q_{hidden} = \{ q_1, q_2, ... , q_N\}$$ # ่ฎพๆˆ‘ไปฌๆœ‰่ง‚ๆต‹ๅˆฐ็š„ไธ€ไธฒ่‡ช็„ถ่ฏญ่จ€ๅบๅˆ—ๆ–‡ๆœฌ$O$, ไธ€ๅ…ฑๆœ‰$T$ไธชๅญ—, ๅˆๆœ‰่ฟ™ๆฎต่ง‚ๆต‹ๅˆฐ็š„ๆ–‡ๆœฌๆ‰€ๅฏนๅบ”็š„ๅฎžไฝ“ๆ ‡่ฎฐ, ไนŸๅฐฑๆ˜ฏ้š็Šถๆ€$I$: # $$I=\{i_1, i_2, ... , i_T \}(้š็Šถๆ€) \quad O=\{o_1, o_2, ... , o_T \}(่ง‚ๆต‹)$$ # ๆณจๆ„ไธŠๅผไธญ, ๆˆ‘ไปฌๅธธ็งฐ$t$ไธบ**ๆ—ถๅˆป**, ๅฆ‚ไธŠๅผไธญไธ€ๅ…ฑๆœ‰$T$ไธชๆ—ถๅˆป($T$ไธชๆฑ‰ๅญ—). # ![](./imgs/trellis.jpg) # **HMMๆจกๅž‹ๆœ‰ไธคไธชๅŸบๆœฌๅ‡่ฎพ(้žๅธธ้‡่ฆ)**: # 1. ็ฌฌ$t$ไธช้š็Šถๆ€(ๅฎžไฝ“ๆ ‡็ญพ)ๅช่ทŸๅ‰ไธ€ๆ—ถๅˆป็š„$t-1$้š็Šถๆ€(ๅฎžไฝ“ๆ ‡็ญพ)ๆœ‰ๅ…ณ, ไธŽ้™คๆญคไน‹ๅค–็š„ๅ…ถไป–้š็Šถๆ€(ๅฆ‚$t-2,\ t+3$)ๆ— ๅ…ณ. # ไพ‹ๅฆ‚ไธŠๅ›พไธญ: ่“่‰ฒ็š„้ƒจๅˆ†ๆŒ‡็š„ๆ˜ฏ$i_t$ๅชไธŽ$i_{t-1}$ๆœ‰ๅ…ณ, ่€ŒไธŽ่“่‰ฒๅŒบๅŸŸไน‹ๅค–็š„ๆ‰€ๆœ‰ๅ†…ๅฎน้ƒฝๆ— ๅ…ณ, ่€Œ$P(i_{t}|i_{t-1})$ๆŒ‡็š„ๆ˜ฏ้š็Šถๆ€$i$ไปŽ$t-1$ๆ—ถๅˆป่ฝฌๅ‘$t$ๆ—ถๅˆป็š„ๆฆ‚็އ, ๅ…ทไฝ“่ฝฌๆขๆ–นๅผไธ‹้ขไผš็ป†่ฎฒ. # 2. ่ง‚ๆต‹็‹ฌ็ซ‹็š„ๅ‡่ฎพ, ๆˆ‘ไปฌไธŠ้ข่ฏด่ฟ‡, HMMๆจกๅž‹ไธญๆ˜ฏ็”ฑ**้š็Šถๆ€ๅบๅˆ—(ๅฎžไฝ“ๆ ‡่ฎฐ)็”Ÿๆˆๅฏ่ง‚ๆต‹็Šถๆ€(ๅฏ่ฏปๆ–‡ๆœฌ)็š„่ฟ‡็จ‹**, # ่ง‚ๆต‹็‹ฌ็ซ‹ๅ‡่ฎพๆ˜ฏๆŒ‡ๅœจไปปๆ„ๆ—ถๅˆป่ง‚ๆต‹$o_t$ๅชไพ่ต–ไบŽๅฝ“ๅ‰ๆ—ถๅˆป็š„้š็Šถๆ€$i_t$, ไธŽๅ…ถไป–ๆ—ถๅˆป็š„้š็Šถๆ€ๆ— ๅ…ณ. # ไพ‹ๅฆ‚ไธŠๅ›พไธญ: ็ฒ‰็บข่‰ฒ็š„้ƒจๅˆ†ๆŒ‡็š„ๆ˜ฏ$i_{t+1}$ๅชไธŽ$o_{t+1}$ๆœ‰ๅ…ณ, ่ทŸ็ฒ‰็บข่‰ฒๅŒบๅŸŸไน‹ๅค–็š„ๆ‰€ๆœ‰ๅ†…ๅฎน้ƒฝๆ— ๅ…ณ. # ### 2. HMMๆจกๅž‹็š„ๅ‚ๆ•ฐ: # 1. **HMM็š„่ฝฌ็งปๆฆ‚็އ(transition probabilities):** # ๆˆ‘ไปฌไธŠ้ขๆๅˆฐไบ†$P(i_{t}|i_{t-1})$ๆŒ‡็š„ๆ˜ฏ้š็Šถๆ€$i$ไปŽ$t-1$ๆ—ถๅˆป่ฝฌๅ‘$t$ๆ—ถๅˆป็š„ๆฆ‚็އ, ๆฏ”ๅฆ‚่ฏดๆˆ‘ไปฌ็Žฐๅœจๅฎžไฝ“ๆ ‡็ญพไธ€ๅ…ฑๆœ‰$7$็ง, ไนŸๅฐฑๆ˜ฏ$N=7$(ๆณจๆ„$N$ๆ˜ฏๆ‰€ๆœ‰ๅฏ่ƒฝ็š„ๅฎžไฝ“ๆ ‡็ญพ็ง็ฑป็š„้›†ๅˆ), ไนŸๅฐฑๆ˜ฏ$Q_{hidden} = \{ q_0, q_1, ... , q_6\}$(ๆณจๆ„ๆˆ‘ไปฌๅฎžไฝ“ๆ ‡็ญพ็ผ–ๅทไปŽ$0$็ฎ—่ตท), ๅ‡่ฎพๅœจ$t-1$ๆ—ถๅˆปไปปไฝ•ไธ€็งๅฎžไฝ“ๆ ‡็ญพ้ƒฝๅฏไปฅๅœจ$t$ๆ—ถๅˆป่ฝฌๆขไธบไปปไฝ•ไธ€็งๅ…ถไป–็ฑปๅž‹็š„ๅฎžไฝ“ๆ ‡็ญพ, ๅˆ™ๆ€ปๅ…ฑๅฏ่ƒฝ็š„่ฝฌๆข็š„่ทฏๅพ„ไธ€ๅ…ฑๆœ‰$N^2$็ง, ๆ‰€ไปฅๆˆ‘ไปฌๅฏไปฅๅšไธ€ไธช$N*N$็š„็Ÿฉ้˜ตๆฅ่กจ็คบๆ‰€ๆœ‰ๅฏ่ƒฝ็š„้š็Šถๆ€่ฝฌ็งปๆฆ‚็އ. # ![](./imgs/transition.jpg) # ไธŠๅ›พๅฐฑๆ˜ฏ**่ฝฌ็งปๆฆ‚็އ็Ÿฉ้˜ต**, ไนŸๅฐฑๆ˜ฏ$transition \ matrix$, ๆˆ‘ไปฌ่ฎพ่ฟ™ไธช็Ÿฉ้˜ตไธบ$A$็Ÿฉ้˜ต, ๅˆ™$A_{ij}$่กจ็คบ็Ÿฉ้˜ตไธญ็ฌฌi่กŒ็ฌฌjๅˆ—: # $$A_{ij}=P(i_{t+1}= q_j | i_{t} = q_i) \quad q_i \in Q_{hidden}$$ # ไธŠๅผ่กจ็คบๆŒ‡็š„ๆ˜ฏๅœจ$t$ๆ—ถๅˆปๅฎžไฝ“ๆ ‡็ญพไธบ$q_i$, ่€Œๅœจ$t+1$ๆ—ถๅˆปๅฎžไฝ“ๆ ‡็ญพ่ฝฌๆขๅˆฐ$q_j$็š„ๆฆ‚็އ. # 2. **HMM็š„ๅ‘ๅฐ„ๆฆ‚็އ(emission probabilities):** # ๆˆ‘ไปฌไน‹ๅ‰ๆๅˆฐไบ†ไปปๆ„ๆ—ถๅˆป่ง‚ๆต‹$o_t$ๅชไพ่ต–ไบŽๅฝ“ๅ‰ๆ—ถๅˆป็š„้š็Šถๆ€$i_t$, ไนŸๅฐฑๆ˜ฏ$P(o_t | i_t)$, ไนŸๅซๅšๅ‘ๅฐ„ๆฆ‚็އ, ๆŒ‡็š„ๆ˜ฏ้š็Šถๆ€็”Ÿๆˆ่ง‚ๆต‹็ป“ๆžœ็š„่ฟ‡็จ‹. # ่ฎพๆˆ‘ไปฌ็š„ๅญ—ๅ…ธ้‡Œๆœ‰$M$ไธชๅญ—, $V_{obs.}=\{v_0, v_1, ... , v_{M-1} \}$(ๆณจๆ„่ฟ™้‡Œไธ‹ๆ ‡ไปŽ0็ฎ—่ตท, ๆ‰€ไปฅๆœ€ๅŽ็š„ไธ‹ๆ ‡ๆ˜ฏ$M-1$, ไธ€ๅ…ฑๆœ‰$M$็ง่ง‚ๆต‹), ๅˆ™ๆฏ็งๅฎžไฝ“ๆ ‡็ญพ(้š็Šถๆ€)ๅฏไปฅ็”Ÿๆˆ$M$็งไธๅŒ็š„ๆฑ‰ๅญ—(ไนŸๅฐฑๆ˜ฏ่ง‚ๆต‹), ่ฟ™ไธ€่ฟ‡็จ‹ๅฏไปฅ็”จไธ€ไธช**ๅ‘ๅฐ„ๆฆ‚็އ็Ÿฉ้˜ต**ๆฅ่กจ็คบ, ไป–็š„็ปดๅบฆๆ˜ฏ$N*M$. # ![](./imgs/emission.jpg) # ไธŠๅ›พๅฐฑๆ˜ฏ**ๅ‘ๅฐ„ๆฆ‚็އ็Ÿฉ้˜ต**, ไนŸๅฐฑๆ˜ฏ$emission \ matrix$, ๆˆ‘ไปฌ่ฎพ่ฟ™ไธช็Ÿฉ้˜ตไธบ$B$็Ÿฉ้˜ต, ๅˆ™$B_{jk}$่กจ็คบ็Ÿฉ้˜ตไธญ็ฌฌ$j$่กŒ็ฌฌ$k$ๅˆ—: # $$B_{jk}=P(o_{t}= v_k | i_{t} = q_j) \quad q_i \in Q_{hidden} \quad v_k \in V_{obs.}=\{v_0, v_1, ... , v_{M-1} \}$$ # ไธŠๅผ่กจ็คบๆŒ‡็š„ๆ˜ฏๅœจ$t$ๆ—ถๅˆป็”ฑๅฎžไฝ“ๆ ‡็ญพ(้š็Šถๆ€)$q_j$็”Ÿๆˆๆฑ‰ๅญ—(่ง‚ๆต‹็ป“ๆžœ)$v_k$็š„ๆฆ‚็އ. # 3. **HMM็š„ๅˆๅง‹้š็Šถๆ€ๆฆ‚็އ:** ๅˆ็งฐไธบ$initial \ probabilities$, ๆˆ‘ไปฌ้€šๅธธ็”จ$\pi$ๆฅ่กจ็คบ, ๆณจๆ„่ฟ™้‡Œๅฏไธๆ˜ฏๅœ†ๅ‘จ็އ: # $$\pi=P(i_1=q_i) \quad q_i \in Q_{hidden} = \{ q_0, q_1, ... , q_{N-1}\}$$ # ไธŠๅผๆŒ‡็š„ๆ˜ฏ**่‡ช็„ถ่ฏญ่จ€ๅบๅˆ—ไธญ็ฌฌไธ€ไธชๅญ—**$o_1$็š„ๅฎžไฝ“ๆ ‡่ฎฐๆ˜ฏ$q_i$็š„ๆฆ‚็އ, ไนŸๅฐฑๆ˜ฏๅˆๅง‹้š็Šถๆ€ๆฆ‚็އ. # ### 3. ็”จHMM่งฃๅ†ณๅบๅˆ—ๆ ‡ๆณจ้—ฎ้ข˜, HMM็š„ๅญฆไน ็ฎ—ๆณ•; # ๆˆ‘ไปฌ็Žฐๅœจๅทฒ็ปไบ†่งฃไบ†HMM็š„ไธ‰ๅคงๅ‚ๆ•ฐ$A, \ B, \ \pi$, ๅ‡่ฎพๆˆ‘ไปฌๅทฒ็ป้€š่ฟ‡ๅปบๆจกๅญฆไน , ๅญฆๅˆฐไบ†่ฟ™ไบ›ๅ‚ๆ•ฐ, ๅพ—ๅˆฐไบ†ๆจกๅž‹็š„ๆฆ‚็އ, ๆˆ‘ไปฌๆ€Žไนˆไฝฟ็”จ่ฟ™ไบ›ๅ‚ๆ•ฐๆฅ่งฃๅ†ณๅบๅˆ—ๆ ‡ๆณจ้—ฎ้ข˜ๅ‘ข? # ่ฎพ็›ฎๅ‰ๅœจๆ—ถๅˆป$t$, ๆˆ‘ไปฌๆœ‰ๅฝ“ๅ‰ๆ—ถๅˆป็š„่ง‚ๆต‹ๅˆฐ็š„ไธ€ไธชๆฑ‰ๅญ—$o_t=v_k$(ๆŒ‡็š„็ฌฌ$t$ๆ—ถๅˆป่ง‚ๆต‹ๅˆฐ$v_k$), ๅ‡่ฎพๆˆ‘ไปฌ่ฟ˜็Ÿฅ้“ๅœจ$t-1$ๆ—ถๅˆป(ๅ‰ไธ€ๆ—ถๅˆป)ๅฏนๅบ”็š„ๅฎžไฝ“ๆ ‡่ฎฐ็ฑปๅž‹$i_{t-1} = \hat{q}^{t-1}_i$(ๆŒ‡็š„$t-1$ๆ—ถๅˆปๆ ‡่ฎฐไธบ$\hat{q}^{t-1}_i$). ๆˆ‘ไปฌ่ฆๅš็š„ไป…ไป…ๆ˜ฏๅˆ—ไธพๆ‰€ๆœ‰$i_{t}$ๅฏ่ƒฝ็š„ๅฎžไฝ“ๆ ‡่ฎฐ$\hat{q}^{t}_{j}$, ๅนถๆฑ‚ๅฏไปฅไฝฟไธ‹ๅผ่พ“ๅ‡บๅ€ผๆœ€ๅคง็š„้‚ฃไธชๅฎžไฝ“็ฑปๅž‹$q^{t}_{j}$(ไนŸๅฐฑๆ˜ฏ้š็Šถๆ€็ฑปๅž‹): # $$\hat{q}_j^{t} = argmax_{\hat{q}_j^{t} \in Q_{hidden}} # P(i_t = \hat{q}_j^{t} | i_{t-1} = \hat{q}^{t-1}_i) P(o_t=v_k| i_t = \hat{q}_j^{t})$$ # ๅฐ†ๆ‰€ๆœ‰$t$ๆ—ถๅˆป**ๅฝ“ๅ‰ๅฏๅ–็š„ๅฎžไฝ“ๆ ‡็ญพ**ๅธฆๅ…ฅไธ‹ๅผไธญ, ๆ‰พๅ‡บไธ€ไธชๅฏไปฅไฝฟไธ‹ๅผๅ–ๅ€ผๆœ€ๅคง็š„้‚ฃไธชๅฎžไฝ“ๆ ‡็ญพไฝœไธบๅฝ“ๅ‰ๅญ—็š„ๆ ‡ๆณจ: # $$P(ๅฝ“ๅ‰ๅฏๅ–ๅฎžไฝ“ๆ ‡็ญพ|ไธŠไธ€ๆ—ถๅˆปๅฎžไฝ“ๆ ‡็ญพ)P(ๆต‹ๅˆฐ็š„ๆฑ‰ๅญ—|ๅฝ“ๅ‰ๅฏๅ–ๅฎžไฝ“ๆ ‡็ญพ)$$ # **ๆณจๆ„**: ๆˆ‘ไปฌ่ฟ™้‡Œๅช่ฎฒๅˆฐไบ†ๆ€Žๆ ทๆฑ‚็ฌฌ$t$ๆ—ถๅˆป็š„ๆœ€ไผ˜ๆ ‡ๆณจ, ไฝ†ๆ˜ฏๅœจๆฏไธ€ๆ—ถๅˆป่ฟ›่กŒ่ฟ™ๆ ท็š„่ฎก็ฎ—, ๅนถไธไธ€ๅฎš่ƒฝไฟ่ฏๆœ€ๅŽ่ƒฝๅพ—ๅ‡บๅ…จๅฑ€ๆœ€ไผ˜ๅบๅˆ—่ทฏๅพ„, ไพ‹ๅฆ‚ๅœจ็ฌฌ$t$ๆ—ถๅˆปๆœ€ไผ˜ๅฎžไฝ“ๆ ‡็ญพๆ˜ฏ$q_j$, ไฝ†ๅˆฐไบ†ไธ‹ไธ€ๆญฅ, ็”ฑไบŽไปŽ$q_j$่ฝฌ็งปๅˆฐๅ…ถไป–ๆŸไบ›ๅฎžไฝ“ๆ ‡็ญพ็š„่ฝฌ็งปๆฆ‚็އๆฏ”่พƒไฝŽ, ่€Œ้™ไฝŽไบ†็ป่ฟ‡$q_j$็š„่ทฏๅพ„็š„ๆ•ดไฝ“ๆฆ‚็އ, ๆ‰€ไปฅๅˆฐไบ†ไธ‹ไธ€ๆ—ถๅˆปๆœ€ไผ˜่ทฏๅพ„ๅฐฑๆœ‰ๅฏ่ƒฝๅœจ็ฌฌ$t$ๆ—ถๅˆปไธ็ป่ฟ‡$q_j$ไบ†, ๆ‰€ไปฅๆฏไธ€ๆญฅ็š„ๅฑ€้ƒจๆœ€ไผ˜ๅนถไธไธ€ๅฎšๅฏไปฅ่พพๆˆๅ…จๅฑ€ๆœ€ไผ˜, ๆ‰€ไปฅๆˆ‘ไปฌไน‹ๅŽไผš็”จๅˆฐ**็ปด็‰นๆฏ”็ฎ—ๆณ•**ๆฅๆ‰พๅˆฐๅ…จๅฑ€ๆœ€ไผ˜็š„ๆ ‡ๆณจๅบๅˆ—, ่ฟ™ไธชๅŽ้ขไผšๆœ‰่ฏฆ็ป†่ฎฒ่งฃ. # **็”Ÿๆˆๆจกๅž‹ไธŽๅˆคๅˆซๆจกๅž‹**: # ๅฏนไบŽ็”Ÿๆˆๆจกๅž‹ไธŽๅˆคๅˆซๆจกๅž‹, ๅ› ไธบ็ฏ‡ๅน…้—ฎ้ข˜, ๆš‚ไธๅš่ฎฒ่ฟฐ, ็ฝ‘ไธŠๆœ‰ๅพˆๅคš่ต„ๆ–™. # ่ฟ™้‡Œ็จ็จๅ›ž้กพไธ€ไธ‹, ๆˆ‘ไปฌๅ‡่ฎพ$x$ไธบๆ•ฐๆฎ็‚น, $y$ไธบๆ•ฐๆฎๆ ‡่ฎฐ, ๆฏ”ๅฆ‚่ฏด้€ป่พ‘ๅ›žๅฝ’ๅฑžไบŽๅ…ธๅž‹็š„ๅˆคๅˆซๆจกๅž‹, ๆˆ‘ไปฌ่ฆ่ฎก็ฎ—$P(y|x)$ๅนถๅฝขๆˆไธ€ๆกๅˆ†็ฑป่พน็•Œ, ่€ŒๅœจHMMไธญ, ๆˆ‘ไปฌ่ฎก็ฎ—็š„ๆ˜ฏ$P(x|y)$, ่€Œไธ”่ฆ่ฎก็ฎ—ๅ‡บๆ‰€ๆœ‰$y$ๅฏๅ–็š„็ฑปๅž‹, ๅนถๆฏ”่พƒไธ€ไธ‹ๆ‰€ๆœ‰$P(x|y=y_{i})$็š„็ป“ๆžœ, ๅนถๅ–ๅฏไปฅไฝฟ$P(x|y)$ๆœ€ๅคง็š„้‚ฃไธช, ่€Œๅพ—ๅˆฐ้ข„ๆต‹็ป“ๆžœ. # **HMMๅ‚ๆ•ฐๅญฆไน (็›‘็ฃๅญฆไน )**: # ๆˆ‘ไปฌไปŠๅคฉ่ฆ็”จHMM่งฃๅ†ณ็š„ๆ˜ฏๅบๅˆ—ๆ ‡ๆณจ้—ฎ้ข˜, ๆ‰€ไปฅๆˆ‘ไปฌ่งฃๅ†ณ็š„ๆ˜ฏ็›‘็ฃๅญฆไน ็š„้—ฎ้ข˜. ไนŸๅฐฑๆ˜ฏ่ฏดๆˆ‘ไปฌ็Žฐๅœจๆœ‰ไธ€ไบ›ๆ–‡ๆœฌๅ’ŒไธŽไน‹ๅฏนๅบ”็š„ๆ ‡ๆณจๆ•ฐๆฎ, ๆˆ‘ไปฌ่ฆ่ฎญ็ปƒไธ€ไธชHMMๆฅๆ‹Ÿๅˆ่ฟ™ไบ›ๆ•ฐๆฎ, ไปฅไพฟไน‹ๅŽ็”จ่ฟ™ไธชๆจกๅž‹่ฟ›่กŒๆ•ฐๆฎๆ ‡ๆณจไปปๅŠก, ๆœ€็ฎ€ๅ•็š„ๆ–นๅผๆ˜ฏ็›ดๆŽฅ็”จ**ๆžๅคงไผผ็„ถไผฐ่ฎก**ๆฅไผฐ่ฎกๅ‚ๆ•ฐ: # 1. ๅˆๅง‹้š็Šถๆ€ๆฆ‚็އ$\pi$็š„ๅ‚ๆ•ฐไผฐ่ฎก: # $$\hat{\pi}_{q_i}=\frac{count(q^{1}_{i})}{count(o_1)}$$ # ไธŠๅผๆŒ‡็š„ๆ˜ฏ, ่ฎก็ฎ—ๅœจ็ฌฌ$1$ๆ—ถๅˆป, ไนŸๅฐฑๆ˜ฏๆ–‡ๆœฌไธญ็ฌฌไธ€ไธชๅญ—, $q^{1}_{i}$ๅ‡บ็Žฐ็š„ๆฌกๆ•ฐๅ ๆ€ป็ฌฌไธ€ไธชๅญ—$o_1$่ง‚ๆต‹ๆฌกๆ•ฐ็š„ๆฏ”ไพ‹, $q^{1}_{i}$ไธŠๆ ‡1ๆŒ‡็š„ๆ˜ฏ็ฌฌ1ๆ—ถๅˆป, ไธ‹ๆ ‡$i$ๆŒ‡็š„ๆ˜ฏ็ฌฌ$i$็งๆ ‡็ญพ(้š็Šถๆ€), $count$ๆ˜ฏ็š„ๆ˜ฏ่ฎฐๅฝ•ๆฌกๆ•ฐ. # 2. ่ฝฌ็งปๆฆ‚็އ็Ÿฉ้˜ต$A$็š„ๅ‚ๆ•ฐไผฐ่ฎก: # ๆˆ‘ไปฌไน‹ๅ‰ๆๅˆฐ่ฟ‡$transition \ matrix$้‡Œ้ข$A_{ij}$(็Ÿฉ้˜ต็š„็ฌฌi่กŒ็ฌฌjๅˆ—)ๆŒ‡็š„ๆ˜ฏๅœจ$t$ๆ—ถๅˆปๅฎžไฝ“ๆ ‡็ญพไธบ$q_i$, ่€Œๅœจ$t+1$ๆ—ถๅˆปๅฎžไฝ“ๆ ‡็ญพ่ฝฌๆขๅˆฐ$q_j$็š„ๆฆ‚็އ, ๅˆ™่ฝฌ็งปๆฆ‚็އ็Ÿฉ้˜ต็š„ๅ‚ๆ•ฐไผฐ่ฎก็›ธๅฝ“ไธŽไธ€ไธชไบŒๅ…ƒๆจกๅž‹$bigram$, ไนŸๅฐฑๆ˜ฏๆŠŠๆ‰€ๆœ‰็š„ๆ ‡ๆณจๅบๅˆ—ไธญๆฏ็›ธ้‚ป็š„ไธคไธชๅฎžไฝ“ๆ ‡็ญพๅˆ†ๆˆไธ€็ป„, ็ปŸ่ฎกไป–ไปฌๅ‡บ็Žฐ็š„ๆฆ‚็އ: # $$\hat{A}_{ij}=P(i_{t+1}= q_j | i_{t} = q_i)=\frac{count(q_iๅŽ้ขๅ‡บ็Žฐq_j็š„ๆฌกๆ•ฐ)}{count(q_i็š„ๆฌกๆ•ฐ)}$$ # 3. ๅ‘ๅฐ„ๆฆ‚็އ็Ÿฉ้˜ต$B$็š„ๅ‚ๆ•ฐไผฐ่ฎก: # ๆˆ‘ไปฌๆๅˆฐ่ฟ‡$emission \ matrix$ไธญ็š„$B_{jk}$(็Ÿฉ้˜ต็ฌฌj่กŒ็ฌฌkๅˆ—)ๆŒ‡็š„ๆ˜ฏๅœจ$t$ๆ—ถๅˆป็”ฑๅฎžไฝ“ๆ ‡็ญพ(้š็Šถๆ€)$q_j$็”Ÿๆˆๆฑ‰ๅญ—(่ง‚ๆต‹็ป“ๆžœ)$v_k$็š„ๆฆ‚็އ. # $$\hat{B}_{jk}=P(o_{t}= v_k | i_{t} = q_j)=\frac{count(q_jไธŽv_kๅŒๆ—ถๅ‡บ็Žฐ็š„ๆฌกๆ•ฐ)}{count(q_jๅ‡บ็Žฐ็š„ๆฌกๆ•ฐ)}$$ # ๅˆฐๆญคไธบๆญข, ๆˆ‘ไปฌๅฐฑๅฏไปฅ้ๅކๆ‰€ๆœ‰่ฏญๆ–™, ๆ นๆฎไธŠ้ข็š„ๆ–นๅผๅพ—ๅˆฐๆจกๅž‹็š„ๅ‚ๆ•ฐ$A, \ B, \ \pi$็š„ไผฐ่ฎก. # ๆณจๆ„, ้€š่ฟ‡ไธŠ้ข็š„่ฎก็ฎ—่ฟ‡็จ‹, ๆˆ‘ไปฌๅฏไปฅๅพ—ๅ‡บHMM็š„ๅ‚ๆ•ฐ$(A, B, \pi)$ๆœ‰ไปฅไธ‹็‰นๆ€ง: # $$\sum_{i}\pi_{q_i} = 1$$ # $$\sum_{j}A_{ij} = \sum_{j}P(i_{t+1}= q_j | i_{t} = q_i) = 1$$ # $$\sum_{k}B_{jk} = \sum_{k}P(o_{t}= v_k | i_{t} = q_j) =1$$ # ### 4. ็ปด็‰นๆฏ”็ฎ—ๆณ•(Viterbi Algorithm)(HMM็š„้ข„ๆต‹็ฎ—ๆณ•). # ็ปด็‰นๆฏ”็ฎ—ๆณ•$viterbi \ algorithm$ไฝฟ็”จไบ†ๅŠจๆ€่ง„ๅˆ’็ฎ—ๆณ•ๆฅ่งฃๅ†ณ็ฑปไผผHMMๅ’ŒCRF็š„้ข„ๆต‹้—ฎ้ข˜, ็”จ็ปด็‰นๆฏ”็ฎ—ๆณ•ๅฏไปฅๆ‰พๅˆฐๆฆ‚็އๆœ€ๅคง่ทฏๅพ„, ไนŸๅฐฑๆ˜ฏๆœ€ไผ˜่ทฏๅพ„, ๅœจๆˆ‘ไปฌไปŠๅคฉ่ฆ่งฃๅ†ณ็š„ๅบๅˆ—ๆ ‡ๆณจ้—ฎ้ข˜ไธญ, ๅฐฑ่ฆ้€š่ฟ‡็ปด็‰นๆฏ”็ฎ—ๆณ•, ๆฅๆ‰พๅˆฐๆ–‡ๆœฌๆ‰€ๅฏนๅบ”็š„ๆœ€ไผ˜็š„ๅฎžไฝ“ๆ ‡ๆณจๅบๅˆ—. # ๅฆ‚ๆžœ็”จไธ€ๅฅ่ฏๆฅๆฆ‚ๆ‹ฌ็ปด็‰นๆฏ”็ฎ—ๆณ•, ้‚ฃๅฐฑๆ˜ฏ: # **ๅœจๆฏไธ€ๆ—ถๅˆป, ่ฎก็ฎ—ๅฝ“ๅ‰ๆ—ถๅˆป่ฝๅœจๆฏ็ง้š็Šถๆ€็š„ๆœ€ๅคงๆฆ‚็އ, ๅนถ่ฎฐๅฝ•่ฟ™ไธชๆœ€ๅคงๆฆ‚็އๆ˜ฏไปŽๅ‰ไธ€ๆ—ถๅˆปๅ“ชไธ€ไธช้š็Šถๆ€่ฝฌ็งป่ฟ‡ๆฅ็š„, ๆœ€ๅŽๅ†ไปŽ็ป“ๅฐพๅ›žๆบฏๆœ€ๅคงๆฆ‚็އ, ไนŸๅฐฑๆ˜ฏๆœ€ๆœ‰ๅฏ่ƒฝ็š„ๆœ€ไผ˜่ทฏๅพ„.** ่ฟ™่ฏๅฏนไบŽๆฒกๆœ‰ๅญฆ่ฟ‡็ปด็‰นๆฏ”็ฎ—ๆณ•็š„ๅŒๅญฆๆ˜ฏๆ— ๆณ•็†่งฃ็š„, ไฝ†ๆ˜ฏๆˆ‘่ง‰ๅพ—ไปŠๅคฉๅญฆๅฎŒ็ปด็‰นๆฏ”็ฎ—ๆณ•ไน‹ๅŽๅ†ๆฅ็œ‹่ฟ™ๅฅ่ฏ, ๅฏไปฅๅŠ ๆทฑ่ฎฐๅฟ†. # ๆˆ‘ไปฌ่ฟ™้‡Œไธบไบ†ๅญฆไน ็ปด็‰นๆฏ”ๆ–นไพฟ, ๆ‰€ไปฅ่ฝฌๆขไธ€ไธ‹ๆ ‡็ญพ: # 1. $A_{i, j}^{t-1, t}$, ๆ˜ฏ่ฝฌ็งปๆฆ‚็އ็Ÿฉ้˜ต$A$ไธญ็š„็ฌฌ$i$่กŒ็ฌฌ$j$ๅˆ—(ไธ‹ๆ ‡), ๆŒ‡็š„ๆ˜ฏๅœจ$t-1$ๆ—ถๅˆปๅฎžไฝ“ๆ ‡็ญพไธบ$q_i$, ่€Œๅœจ$t$ๆ—ถๅˆปๅฎžไฝ“ๆ ‡็ญพ่ฝฌๆขๅˆฐ$q_j$็š„ๆฆ‚็އ. # 2. $B_{jk}$ๆ˜ฏๅ‘ๅฐ„็Ÿฉ้˜ต็š„็ฌฌj่กŒ็ฌฌkๅˆ—, ๆŒ‡็š„ๆ˜ฏๅœจ็ฌฌ$t$ๆ—ถๅˆป, ็”ฑ้š็Šถๆ€$q_j$็”Ÿๆˆ่ง‚ๆต‹$v_k$็š„ๆฆ‚็އ. # 3. ๆœ‰ไบ†ไธŠ้ขไธค็‚น, ๅˆ™$\hat{q}_j = A_{ij}B_{jk}$่กจ็คบๅœจ$t$ๆ—ถๅˆป็š„้š็Šถๆ€ไธบ$q_j$็š„ๆฆ‚็އไผฐ่ฎก. # ๅœจ่ฟ™้‡Œๆˆ‘ไปฌ็›ดๆŽฅไปฅๅฎžไพ‹็š„ๆ–นๅผๆฅ่ฏดๆ˜Ž็ปด็‰นๆฏ”็ฎ—ๆณ•็š„่ฎก็ฎ—่ฟ‡็จ‹(ๆณจๆ„ๆˆ‘ไปฌๅœจ่ฟ™้‡Œไธ‹ๆ ‡ไปŽ$0$ๅผ€ๅง‹็ฎ—่ตท): # 1. ๅ‡่ฎพๆˆ‘ไปฌ็Žฐๅœจๆœ‰ๆ‰€ๆœ‰ๅฏ่ƒฝ็š„่ง‚ๆต‹็ป“ๆžœ็š„้›†ๅˆ$V_{obs.}=\{v_0, v_1\}$; # 2. ๆ‰€ๆœ‰ๅฏ่ƒฝ็š„้š็Šถๆ€็š„้›†ๅˆ$Q_{hidden}=\{q_0, q_1, q_2\}$; # 3. ๅทฒ็ป่ง‚ๆต‹ๅˆฐ็š„่ง‚ๆต‹็ป“ๆžœๅบๅˆ—$O=(o_1=v_0, \ o_2=v_1, \ o_3 = v_0)$; # 4. ็„ถๅŽๅ‡่ฎพๆˆ‘ไปฌ้€š่ฟ‡HMMๅปบๆจกๅนถๅญฆไน , ๅพ—ๅˆฐไบ†ๆจกๅž‹ๆ‰€ไผฐ่ฎก็š„ๅ‚ๆ•ฐ$(A, B, \pi)$, ๆณจๆ„ไธ‹้ข็š„$A, B$็Ÿฉ้˜ตๆŒ‰่กŒๆฑ‚ๅ’Œไธบ$1$; # ![](./imgs/lambda.jpg) # 5. ๆˆ‘ไปฌ่ฆๆฑ‚ๅ‡บๅฏนๅบ”ๅฝ“ๅ‰่ง‚ๆต‹็ป“ๆžœ$O$็š„ๆœ€ๆœ‰ๅฏ่ƒฝ็š„้š็Šถๆ€ๅบๅˆ—$I=(i_0, i_1, i_2)$. # ๆˆ‘ไปฌ็Žฐๅœจ่ฆๅˆๅง‹ๅŒ–ไธคไธชๆš‚ๅญ˜่กจๆ ผ, ๆฅๆš‚ๅญ˜ๆˆ‘ไปฌๅœจๆฏไธ€ๆ—ถๅˆป็š„่ฎก็ฎ—็ป“ๆžœ, ็จๅŽๆˆ‘ไปฌไผš่ฏดๆ˜Žๆ€Žไนˆไฝฟ็”จ่ฟ™ไธคไธช่กจ, ไธ‹้ขๆˆ‘ไปฌ็œ‹ๅˆฐT1่กจๆ ผๅ’ŒT2่กจๆ ผ, ไป–ไปฌ็š„่ง„ๆ ผ้ƒฝๆ˜ฏ$num\_hidden\_states * sequence\_length$, ่ฟ™ไธคไธช่กจๆ ผๅœจๆฏไธ€ๆ—ถๅˆป$t$้ƒฝ็”ฑ$3$ไธชๆ–นๅ—็ป„ๆˆ, $3$ๆ˜ฏๆ‰€ๆœ‰ๅฏ่ƒฝ้š็Šถๆ€็š„ไธชๆ•ฐ, ๅณ$|Q_{hidden}|=3$ # ![](./imgs/t1.jpg) # ่ฎก็ฎ—่ฟ‡็จ‹: # 1. ้ฆ–ๅ…ˆๆˆ‘ไปฌๆœ‰ๅˆๅง‹้š็Šถๆ€ๆฆ‚็އ็Ÿฉ้˜ต$\pi$, ๅ’Œ็ฌฌ1ๆ—ถๅˆป็š„่ง‚ๆต‹็ป“ๆžœ$o_1=v_0$, ๅˆ™ๅœจ็ฌฌไธ€ๆ—ถๅˆป, ็”ฑ้š็Šถๆ€็”Ÿๆˆ่ง‚ๆต‹็ป“ๆžœ็š„ๆฆ‚็އ่ฎก็ฎ—ๅฏไปฅๅ†™ๆˆ$q_j^{t=1} = \pi_{j}B_{jk}$. # # **ๆˆ‘ไปฌ็Žฐๅœจ่ฏดๆ˜Ž$T1, T2$่กจๆ ผ็š„็”จ้€”:** ๅฆ‚ๆžœ$T1, T2$่กจๆ ผๆ˜ฏ$i*j$็š„็Ÿฉ้˜ต, ๅˆ™็Ÿฉ้˜ตไธญ็ฌฌ$j$ๅˆ—ๆŒ‡็š„ๆ˜ฏ็ฌฌ$j$ๆ—ถๅˆป, ็ฌฌ$i$่กŒๆŒ‡็š„ๆ˜ฏ็ฌฌ$i$็ง้š็Šถๆ€, $T1[i, \ j]$ๆŒ‡็š„ๆ˜ฏๅœจ็ฌฌ$j$ๆ—ถๅˆป, ่ฝๅˆฐ้š็Šถๆ€$i$็š„ๆœ€ๅคงๅฏ่ƒฝ็š„ๆฆ‚็އๆ˜ฏๅคšๅฐ‘(ไธ่ฆ็€ๆ€ฅ, ๅˆฐไบ†ไธ‹ไธ€ไธชๆ—ถๅˆปๅฐฑไผšๆ˜Ž็™ฝ**ๆœ€ๅคง**ๆ˜ฏไป€ไนˆๆ„ๆ€), ่€Œ$T2[i, \ j]$่ฎฐๅฝ•็š„ๆ˜ฏ่ฟ™ไธช**ๆœ€ๅคงๅฏ่ƒฝ็š„ๆฆ‚็އ**ๆ˜ฏไปŽ็ฌฌ$j-1$ๆ—ถๅˆป(ไธŠไธ€ๆ—ถๅˆป)็š„ๅ“ชไธ€็ง้š็Šถๆ€$i$่ฝฌ็งป่ฟ‡ๆฅ็š„, ไนŸๅฐฑๆ˜ฏ่ฏดๆˆ‘ไปฌ่ฎฐๅฝ•็š„ๆ˜ฏ**ๆœ€ๅคงๅฏ่ƒฝ็š„ๆฆ‚็އ็š„่ฝฌ็งป่ทฏๅพ„**. # ๆˆ‘ไปฌ็Žฐๅœจๅฐ†็ฌฌไธ€ๆ—ถๅˆป็š„่ฎก็ฎ—็ป“ๆžœๅกซๅ…ฅ$T1, T2$่กจๆ ผ, ๆณจๆ„ๅœจ็ฌฌ$0$ๆ—ถๅˆป็š„้š็Šถๆ€ๆ˜ฏ็”ฑๅˆๅง‹้š็Šถๆ€ๆฆ‚็އ็Ÿฉ้˜ตๆไพ›็š„, ่€Œไธๆ˜ฏไปŽไธŠไธ€ๆ—ถๅˆป็š„้š็Šถๆ€่ฝฌ็งป่ฟ‡ๆฅ็š„, ๆ‰€ไปฅๆˆ‘ไปฌ็›ดๆŽฅๅœจ$T2$่กจๆ ผไธŠ่ฎฐไธบ$NAN(not \ a \ number)$ # ![](./imgs/t2.jpg) # 2. ๆˆ‘ไปฌ็Žฐๅœจๆฅๅˆฐ็ฌฌ$1$ๆ—ถๅˆป(ๆ—ถๅˆปไธ‹ๆ ‡ไปŽ$0$่ตท็ฎ—), ้ฆ–ๅ…ˆๆˆ‘ไปฌๅ…ˆ่ฎก็ฎ—$T1[i=0, j=1]$(ไนŸๅฐฑๆ˜ฏ็ฌฌ$j=1$ๆ—ถๅˆป, ่ฝๅˆฐ้š็Šถๆ€$i=q_0$ไธŠ็š„ๆœ€ๅคงๅฏ่ƒฝ็š„ๆฆ‚็އๆ˜ฏๅคšๅฐ‘), ๆˆ‘ไปฌๅฏไปฅ็œ‹ๅ‡บ, ไปŽไธŠไธ€ๆ—ถๅˆปๅˆฐๅฝ“ๅ‰ๆ—ถๅˆป, ่ฆๆƒณ่ฎฉๅฝ“ๅ‰ๆ—ถๅˆป็š„้š็Šถๆ€ไธบ$i_1=q_0$, ๅˆ™ๆœ‰3ๆก่ทฏๅพ„ๅฏ่ตฐ, ๅˆ†ๅˆซๆ˜ฏ: $(i_0=q_0, i_1=q_0), \ (i_0=q_1, i_1=q_0), \ (i_0=q_2, i_1=q_0)$, # ๆˆ‘ไปฌๅœจ$T1[i=0, j=1]$็š„ไฝ็ฝฎๅฐฑๆ˜ฏ่ฆ็ฎ—ๅ‡บ, ่ฟ™ไธ‰ๆก่ทฏๅพ„ๅ“ชไธ€ๆกๆ˜ฏๆœ€ๆœ‰ๅฏ่ƒฝ็š„่ทฏๅพ„, ไนŸๅฐฑๆ˜ฏๅ–ๆฆ‚็އๆœ€ๅคง็š„ไธ€ๆก, ่ฟ™ๆ ท็š„่ฏ, ่ฎก็ฎ—ๅ…ฌๅผไธบ: # $$T1[0, 1]=\max_{i} # (P(i_1 = q_0 | i_0 = q_i) P(o_1=v_1| i_1 = q_0)) = # T1[q_i, time\_step=0] * A_{t-1=q_i, \ t=q_0} * B_{i_1 = q_0, o_1=v_1}$$ # ไธŠๅผๆœ€ๅณ่พน$T1[q_i, time\_step=0]$ไนŸๅฐฑๆ˜ฏ$T1[:, \ 0]$็š„ๆ„ๆ€ๆ˜ฏๅœจ$t-1$ๆ—ถๅˆป(ไนŸๅฐฑๆ˜ฏไธŠไธ€ๆ—ถๅˆป), ๆฏไธช้š็Šถๆ€ๅฏนๅบ”็š„ๆฆ‚็އ, ๆ˜ฏ้•ฟๅบฆไธบ$3$็š„ๅ‘้‡; # $A_{t-1=q_i, \ t=q_0}$ๆ˜ฏ$A$็Ÿฉ้˜ต็š„็ฌฌ$i$่กŒ็ฌฌ$0$ๅˆ—, ๆŒ‡็š„ๆ˜ฏๅœจ$t-1$ๆ—ถๅˆป้š็Šถๆ€ไธบ$q_i$, ่€Œๅœจ$t$ๆ—ถๅˆป้š็Šถๆ€ไธบ$q_0$็š„ๆฆ‚็އ, ไธ€ๅ…ฑๆœ‰ไธ‰็งๅฏ่ƒฝ็š„่ทฏๅพ„, ๆ‰€ไปฅไนŸๆ˜ฏ้•ฟๅบฆไธบ$3$็š„ๅ‘้‡; # $B_{i_1 = q_0, o_1=v_1}$ๆ˜ฏ$B$็Ÿฉ้˜ต็š„็ฌฌ$0$่กŒ็ฌฌ$1$ๅˆ—, ๆŒ‡็š„ๆ˜ฏ้š็Šถๆ€$q_0$็”Ÿๆˆ่ง‚ๆต‹$v_1$็š„ๆฆ‚็އ, ๆ˜ฏไธ€ไธชๆ•ฐๅ€ผ. # ้€š่ฟ‡ๆŸฅ่กจ่ฎก็ฎ—, ๆˆ‘ไปฌ็ฎ—ๅ‡บ: # $$T1[0,1]=max\{0.10 * 0.5 * 0.5, \ 0.16 * 0.3* 0.5, \ 0.28*0.2* 0.5\}=0.028$$ # ๆˆ‘ไปฌไน‹ๅ‰่ฏด่ฟ‡, ๆˆ‘ไปฌ่ฟ˜่ฆ็Ÿฅ้“็›ฎๅ‰่ฎก็ฎ—ๅ‡บๆฅ็š„่ฟ™ไธชๆœ€ๅคงๅฏ่ƒฝ็š„ๆฆ‚็އๅ‰ไธ€ๆ—ถๅˆป็š„ๅ“ชไธ€็ง้š็Šถๆ€$i$่ฝฌ็งป่ฟ‡ๆฅ็š„, ไนŸๅฐฑๆ˜ฏๆˆ‘ไปฌ่ฆๅœจ$T2[0,1]$่ฎฐๅฝ•่ฝฌ็งป่ทฏๅพ„, ่ฎก็ฎ—ๅ…ฌๅผไธบ: # $$T2[0,1]=argmax\{0.10 * 0.5 * 0.5, \ 0.16 * 0.3* 0.5, \ 0.28*0.2* 0.5\}=2$$ # ๆˆ‘ไปฌๆŠŠ่ฎก็ฎ—็ป“ๆžœๅกซๅˆฐ่กจ้‡Œ, ๆณจๆ„ๅœจไธ‹ๅ›พไธญ, ็บข่‰ฒ็š„็บฟ่กจ็คบๆœ€ๅคง็š„่ฝฌ็งป่ทฏๅพ„, ๆ˜ฏไปŽๅ‰ไธ€ๆ—ถๅˆป็š„$q_2$่ฝฌ็งป่ฟ‡ๆฅ็š„. # ![](./imgs/t3.jpg) # 3. ๆŽฅไธ‹ๆฅๆˆ‘ไปฌ็”จๅŒๆ ท็š„ๆ–นๅผ, ๆŠŠ่กจๅกซๅฎŒ, ไธ‹้ขๆˆ‘ไปฌๅผ€ๅง‹่ฎฒ็ปด็‰นๆฏ”็ฎ—ๆณ•ๆ˜ฏๆ€Žๆ ท้€š่ฟ‡่ฟ™ไบ›ๆš‚ๅญ˜็š„ๆฆ‚็އๅ’Œ่ทฏๅพ„ๆ‰พๅˆฐๆœ€ไผ˜่ทฏๅพ„็š„: # ![](./imgs/t4.jpg) # ๆœ€ไผ˜่ทฏๅพ„ๆœ‰ไปฅไธ‹็‰นๆ€ง: ๅ‡่ฎพๆˆ‘ไปฌๆœ‰ไธ€ๆกๆœ€ไผ˜่ทฏๅพ„ๅœจ$t$ๆ—ถๅˆป้€š่ฟ‡ไธ€ไธช้š็Šถๆ€$i_t$, ้‚ฃไนˆ่ฟ™ไธ€่ทฏๅพ„ไปŽ$i_t$ๅˆฐๆœ€ไผ˜่ทฏๅพ„็š„็ปˆ็‚น$i_T$็›ธๅฏนไบŽ**ๅœจ่ฟ™ๆฎต่ท็ฆป้‡Œๆ‰€ๆœ‰ๅฏ่ƒฝๅ‡บ็Žฐ็š„่ทฏๅพ„้‡Œ**, ไนŸๅฟ…้กปๆ˜ฏๆœ€ไผ˜็š„. ๅฆๅˆ™ไปŽ$i_t$ๅˆฐ$i_T$ๅฐฑไผšๆœ‰ๆ›ดไผ˜็š„ไธ€ๆก่ทฏๅพ„, ๅฆ‚ๆžœๆŠŠไป–ๅ’ŒไปŽ$i_1$ๅˆฐ$i_t$็š„่ทฏๅพ„(ๆœ€ไผ˜่ทฏๅพ„$i_t$ไน‹ๅ‰็š„้ƒจๅˆ†)่ฟž่ตทๆฅ, ็ญ‰ไบŽๆˆ‘ไปฌๅˆๆœ‰ไธ€ๆกๆ›ดไผ˜่ทฏๅพ„, ่ฟ™ๆ˜ฏ็Ÿ›็›พ็š„. # ๅˆฉ็”จ่ฟ™ไธ€็‰นๆ€ง, ๆˆ‘ไปฌๅช่ฆๆŒ‰ไธŠ้ข็š„ๆญฅ้ชค่ฎก็ฎ—็›ดๅˆฐๅพ—ๅ‡บๆœ€ๅŽไธ€ๆญฅ่พพๅˆฐ็š„ๆœ€ๅคงๆฆ‚็އ็š„้š็Šถๆ€, ๅ†็กฎ่ฎคๆœ€ๅคงๆฆ‚็އๆ˜ฏไปŽๅ‰ไธ€ๆญฅๅ“ชไธ€ไธช้š็Šถๆ€่ฝฌ็งป่ฟ‡ๆฅ็š„, ็„ถๅŽไปŽ$T2$่กจๆ ผ้‡Œ้ข้€’ๆŽจๅ›žๆบฏ็›ดๅˆฐ็ฌฌไธ€ๆ—ถๅˆป(ไนŸๅฐฑๆ˜ฏ$NAN$็š„ๅœฐๆ–น), ๅฐฑๅฏไปฅๆ‰พๅ‡บๆœ€ไผ˜่ทฏๅพ„ไบ†. # ๅ›žๆบฏ็š„่ฎก็ฎ—: # 1. ้ฆ–ๅ…ˆ็ฎ—ๅ‡บๆœ€ๅŽไธ€ๆญฅ่พพๅˆฐๆœ€ๅคง่ทฏๅพ„็š„้š็Šถๆ€, ไนŸๅฐฑๆ˜ฏๅœจ$T1$่กจๆ ผ็š„็ฌฌ$3$ๅˆ—ๆฑ‚$argmax$: # $$i_2 = argmax \ T1[:, \ time\_step = 2] = 2$$ # 2. ไน‹ๅŽๆˆ‘ไปฌ้€š่ฟ‡$T2$่กจๆ ผๅ‘ๅ‰่ฟฝๆบฏไธ€ๆญฅ, ๅฝ“ๅ‰ๆœ€ๅคงๆฆ‚็އๆ˜ฏไปŽๅ‰ไธ€ๆญฅๅ“ชไธช้š็Šถๆ€่ฝฌ็งป่ฟ‡ๆฅ็š„: # $$i_1 = T2[i_2 = 2, \ time\_step = 2] = 2$$ # 3. ๆˆ‘ไปฌๅˆฐ่พพไบ†ๅ€’ๆ•ฐ็ฌฌไธ€ๆญฅ, ๆˆ‘ไปฌ่ฟฝๆบฏๆœ€ไผ˜่ทฏๅพ„ๆ˜ฏไปŽๅ“ชไธช่ตทๅง‹้š็Šถๆ€่ฝฌ็งป่ฟ‡ๆฅ็š„: # $$i_0 = T2[i_1 = 2, \ time\_step = 1] = 2$$ # 4. ่‡ณๆญคๆˆ‘ไปฌๅพ—ๅ‡บไบ†ๆœ€ๆœ‰ๅฏ่ƒฝ็š„้š็Šถๆ€ๅบๅˆ—: # $$I=(q_2, \ q_2, \ q_2)$$ # # **็ป“่ฎบ**: # 1. ๆ—ถ้—ดๅคๆ‚ๅบฆ: ๅ‡่ฎพๆˆ‘ไปฌๆœ‰$N$็ง้š็Šถๆ€, ๅœจๆฏไธชๆ—ถๅˆปไน‹้—ด, ไธ€ๅ…ฑๅฏ่ƒฝ็š„่ทฏๅพ„ไธ€ๅ…ฑๆœ‰$N^2$็ง, ๅ‡่ฎพๆˆ‘ไปฌๆœ‰$T$ไธชๆ—ถๅˆป, ๅˆ™็ปด็‰นๆฏ”็ฎ—ๆณ•็š„ๆ—ถ้—ดๅคๆ‚ๅบฆไธบ$O(TN^2)$. # 2. ๅœจๅฎž้™…็š„้ข„ๆต‹่ฎก็ฎ—ๅฝ“ไธญ, ไธบไบ†้˜ฒๆญข่ฎก็ฎ—็ป“ๆžœไธ‹ๆบข, ๆˆ‘ไปฌ้€šๅธธๅฐ†ไน˜ๆณ•ๅ˜ไธบๅ–ๅฏนๆ•ฐไน‹ๅŽ็š„ๅŠ ๆณ•. # 3. ๅ…ทไฝ“่Œƒไพ‹ไปฃ็ ่ง่ง†้ข‘่ฎฒ่งฃ. # ไธ‹้ขไปฃ็ ้ƒจๅˆ†ไธบ็ปด็‰นๆฏ”็ฎ—ๆณ•ไธญๆญฃๅ‘้€’ๆŽจ็ฎ—ๆณ•็š„็Ÿฉ้˜ตๅŒ–็ฎ—ๆณ•, # ๅณไปŽt-1ๆ—ถๅˆปๅˆฐtๆ—ถๅˆปๆฑ‚ๅ‡บ้œ€่ฆๅกซๅ…ฅT1ๅ’ŒT2่กจๆš‚ๅญ˜็š„็ฎ—ๆณ•, # ่ฟ™้‡Œ่ฎก็ฎ—็š„ๆ˜ฏไธŠไพ‹ไปŽ็ฌฌ0ๆ—ถๅˆปๅˆฐ็ฌฌ1ๆ—ถๅˆป็š„่ฎก็ฎ—่ฟ‡็จ‹, # ๅ…ทไฝ“่ฎฒ่งฃๅ‚่งๆ•™ๅญฆ่ง†้ข‘ import numpy as np A = np.array([ [.5, .2, .3], [.3, .5, .2], [.2, .3, .5] ]) B = b = np.array([ [.5,.5], [.4,.6], [.7,.3] ]) pi = np.array([ [.2], [.4], [.4] ]) print("transitions: A") print(A) print("emissions: B") print(B) print("pi:") print(pi) T1_prev = np.array([0.1, 0.16, 0.28]) T1_prev = np.expand_dims(T1_prev, axis=-1) print(T1_prev) print(T1_prev.shape) # ๅ› ไธบ็ฌฌ1ๆ—ถๅˆป็š„่ง‚ๆต‹ไธบv_1, ๆ‰€ไปฅๅ–B็Ÿฉ้˜ต็š„็ฌฌ1ๅˆ—, ๅณๆ‰€ๆœ‰้š็Šถๆ€็”Ÿๆˆ่ง‚ๆต‹v_1็š„ๆฆ‚็އ p_Obs_State = B[:, 1] p_Obs_State = np.expand_dims(p_Obs_State, axis=0) print(p_Obs_State) print(p_Obs_State.shape) T1_prev * p_Obs_State * A # ๅœจ่กŒ็š„็ปดๅบฆๆฑ‚max np.max(T1_prev * p_Obs_State * A, axis=0) # ็œ‹็œ‹ๆ‰€ๅพ—็š„maxๆฆ‚็އ็š„่ทฏๅพ„ๆ˜ฏไปŽๅ“ช้‡Œๆฅ็š„, ๅœจไธŠไธ€ๆญฅไปŽๅ“ชไธช้š็Šถๆ€่ฝฌ็งป่ฟ‡ๆฅ็š„ np.argmax(T1_prev * p_Obs_State * A, axis=0) # ๅ‚่€ƒ่ต„ๆ–™: # 1. ไธญๆ–‡ๅ‘ฝๅๅฎžไฝ“่ฏ†ๅˆซๆ ‡ๆณจๆ•ฐๆฎ: https://github.com/SophonPlus/ChineseNlpCorpus # 2. ็ปŸ่ฎกๅญฆไน ๆ–นๆณ• (็ฌฌ2็‰ˆ) ๆŽ่ˆช ่‘— 193้กต ็ฌฌๅ็ซ  ้š้ฉฌๅฐ”ๅฏๅคซๆจกๅž‹ # 3. wikipedia Viterbi algorithm https://en.wikipedia.org/wiki/Viterbi_algorithm # 4. wikipedia Hidden Markov model https://en.wikipedia.org/wiki/Hidden_Markov_model
05_NER_hidden_markov_model/.ipynb_checkpoints/HMM_NER-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # Strings # # ## We can do things with strings # # We've already seen in Data 8 some operations that can be done with strings. first_name = "Franz" last_name = "Kafka" full_name = first_name + last_name print(full_name) # Remember that computers don't understand context. full_name = first_name + " " + last_name print(full_name) # ## Strings are made up of sub-strings # # You can think of strings as a [sequence](https://github.com/dlab-berkeley/python-intensive/blob/master/Glossary.md#sequence) of smaller strings or characters. We can access a piece of that sequence using square brackets `[]`. full_name[1] # <div class="alert alert-danger"> # Don't forget, Python (and many other langauges) start counting from 0. # </div> full_name[0] full_name[4] # ## You can slice strings using `[ : ]` # # If you want a range (or "slice") of a sequence, you get everything *before* the second index, i.e,. Python slicing is *exclusive*: full_name[0:4] full_name[0:5] # You can see some of the logic for this when we consider implicit indices. full_name[:5] full_name[5:] # If we want to find out how long a string is, we can use the `len` function: len(full_name) # ## Strings have methods # # * There are other operations defined on string data. These are called **string [methods](https://github.com/dlab-berkeley/python-intensive/blob/master/Glossary.md#method)**. # * The Jupyter Notebooks lets you do tab-completion after a dot ('.') to see what methods an [object](https://github.com/dlab-berkeley/python-intensive/blob/master/Glossary.md#object) (i.e., a defined variable) has to offer. Try it now! str. # Let's look at the `upper` method. What does it do? Let's take a look at the documentation. Jupyter Notebooks let us do this with a question mark ('?') before *or* after an object (again, a defined variable). # + # str.upper? # - # So we can use it to upper-caseify a string. full_name.upper() # You have to use the parenthesis at the end because upper is a method of the string class. # <p></p> # <div class="alert alert-danger"> # Don't forget, simply calling the method does not change the original variable, you must *reassign* the variable: # </div> print(full_name) full_name = full_name.upper() print(full_name) # For what it's worth, you don't need to have a variable to use the `upper()` method, you could use it on the string itself. "<NAME>".upper() # What do you think should happen when you take upper of an int? What about a string representation of an int? 1.upper() "1".upper() # ## Challenge 1: Write your name # # 1. Make two string variables, one with your first name and one with your last name. # 2. Concatenate both strings to form your full name and [assign](https://github.com/dlab-berkeley/python-intensive/blob/master/Glossary.md#assign) it to a variable. # 3. Assign a new variable that has your full name in all upper case. # 4. Slice that string to get your first name again. # ## Challenge 2: Try seeing what the following string methods do: # # * `split` # * `join` # * `replace` # * `strip` # * `find` my_string = "It was a Sunday morning at the height of spring." # ## Challenge 3: Working with strings # Below is a string of <NAME>'s "A Dream Within a Dream": poem = '''Take this kiss upon the brow! And, in parting from you now, Thus much let me avow โ€” You are not wrong, who deem That my days have been a dream; Yet if hope has flown away In a night, or in a day, In a vision, or in none, Is it therefore the less gone? All that we see or seem Is but a dream within a dream. I stand amid the roar Of a surf-tormented shore, And I hold within my hand Grains of the golden sand โ€” How few! yet how they creep Through my fingers to the deep, While I weep โ€” while I weep! O God! Can I not grasp Them with a tighter clasp? O God! can I not save One from the pitiless wave? Is all that we see or seem But a dream within a dream?''' # What is the difference between `poem.strip("?")` and `poem.replace("?", "")` ? # At what index does the word "*and*" first appear? Where does it last appear? # How can you answer the above accounting for upper- and lowercase? # ## Challenge 4: Counting Text # Below is a string of <NAME>'s "The Road Not Taken": poem = '''Two roads diverged in a yellow wood, And sorry I could not travel both And be one traveler, long I stood And looked down one as far as I could To where it bent in the undergrowth; Then took the other, as just as fair, And having perhaps the better claim, Because it was grassy and wanted wear; Though as for that the passing there Had worn them really about the same, And both that morning equally lay In leaves no step had trodden black. Oh, I kept the first for another day! Yet knowing how way leads on to way, I doubted if I should ever come back. I shall be telling this with a sigh Somewhere ages and ages hence: Two roads diverged in a wood, and Iโ€” I took the one less traveled by, And that has made all the difference.''' # Using the `len` function and the string methods, answer the following questions: # # How many characters (letters) are in the poem? # How many words? # How many lines? (HINT: A line break is represented as `\n` ) # How many stanzas? # How many unique words? (HINT: look up what a `set` is) # Remove commas and check the number of unique words again. Why is it different?
03-Close-Reading-I/01-Strings.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Creating a Unit # A Unit object is created with a name and both inlet and outlet streams. Unit subclasses (e.g. heat exchangers) may include additional keyword arguments. # ## Key parameters # Initialize a Unit object with an `ID`, `ins` streams, and `outs` streams. Either an iterable of Stream objects, or a Stream object may work for `ins` and `outs`: from biosteam import Unit, Stream, settings, main_flowsheet settings.set_thermo(['Water']) ins = Stream('in0') outs = [Stream('out0')] U1 = Unit(ID='U1', ins=ins, outs=outs) U1.show(data=False) # Passing data as False returns only stream names # You can also view a diagram to check connections: # + tags=["nbval-ignore-output"] U1.diagram() # - # IDs for Stream objects can also be used instead: U2 = Unit('U2', ins='', outs=['']) # Empty strings default unused IDs U2.show(data=False) # ## Parameter defaults # By default, a unique `ID` is chosen, missing streams are given to `ins`, and empty streams to `outs`: unit = Unit() unit.show(data=False) # For either `ins` or `outs`, if None is given, missing streams are initialized. If an empty iterable is given, empty streams are initialized: U4 = Unit('U4', ins=None, outs=None) U4.show(data=False) U5 = Unit('U5', ins=(), outs=()) U5.show(data=False) # The number of default streams is different for each Unit subclass: from biosteam import Mixer, Splitter Mixer().show(data=False) Splitter(split=0.5).show(data=False) # Notice how the starting letter for default IDs vary between unit operations. This is because default names follow the "area naming convention" as explained in the following section. # ## Area naming convention # Default IDs for unit operations follow the area naming convention based on {letter}{area + number} where the letter depends on the unit operation as follows: # # * C: Centrifuge # * D: Distillation column # * E: Evaporator # * F: Flash tank # * H: Heat exchange # * M: Mixer # * P: Pump (including conveying belt) # * R: Reactor # * S: Splitter (including solid/liquid separator) # * T: Tank or bin for storage # * U: Other units # * J: Junction, not a physical unit (serves to adjust streams) # * PS: Process specificiation, not a physical unit (serves to adjust streams) # Continue creating unit operations following the area naming convention: Mixer().show(data=False) Splitter(split=0.5).show(data=False) Splitter(split=0.5).show(data=False) # Notice how there were no name conflicts for default IDs. # Create a mixer following the area naming convention, this time starting from nunber 101: Mixer(100).show(data=False)
docs/tutorial/Creating_a_Unit.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tips and tricks: # # 1. [Comprehensions (list/dict/set)](#comprehensions) # 2. [String formatting](#string_formatting) # 3. [`lambda` functions](#lambda) # 4. [Variable length arguments (AKA `*args` and `**kwargs`)](#args) # 5. [Disquisitions on Truth and Falsehood](#logical) # 6. [Variable scope and lifetime](#scope) # 7. [Mutable vs immutable](#mutable) # 8. [Floating point arithmetic](#floating) # 9. [Decorators](#decorators) # 10. [Solutions](#solutions) # # <a id=comprehensions></a> # ## 1. Comprehensions # Python provides some "compound data types", such as: # * list = ordered and mutable array of objects, `['abcd', 786 , 2.23, 'john', 70.2]` # * tuple = immutable list, `('abcd', 786 , 2.23, 'john', 70.2)` # * dictionary = unordered and mutable array of key-value pairs, `{'name': 'john', 'code': 6734, 'dept': 'sales'}` # * sets = unordered and mutable set of objects, `{1, 2., 'a'}` # * frozen sets, byte arrays ... # # Comprehensions provide a concise way of creating some of them (lists, dictionaries or sets). They do not necessarily improve performance with respect to traditional *for* loops. # # They reduce the amount of code and make it more readable. # + N_SQUARES = 10 # Don't do this!!! ugly_list = [] for i in range(N_SQUARES): ugly_list.append(i**2) print('ugly list = {}'.format(ugly_list)) # You can do the same in one line wonderful_list = [ i**2 for i in range(N_SQUARES) ] print('wonderful list = {}'.format(wonderful_list)) # - # ### if clauses and embedded lists # + # List comprehensions can contain if clauses after the for clause even_list = [ i**2 for i in range(N_SQUARES) if i % 2 == 0] print('even list = {}'.format(even_list)) # List comprehensions can be embedded within one another IN_LEN = 3 embedded_list = [ [ j**2 for j in range(i, i + IN_LEN) ] for i in range(0, N_SQUARES, IN_LEN)] print('embedded list = {}'.format(embedded_list)) # - # ### dictionary and set comprehensions # + # You can use a similar syntax to create dictionaries fancy_dict = {'square of {}'.format(i): i**2 for i in range(N_SQUARES)} print('fancy dict = {}'.format(fancy_dict)) # and sets fancy_set = {i**2 for i in range(N_SQUARES)} print('fancy set = {}'.format(fancy_set)) # - # <a id='exercise_1_1'></a> # ### Exercise 1.1: Can you rewrite this into a single line of code? # For the N lower integers, find all their divisors. N = 100 all_divisors_list = [] for i in range(1, N + 1): divisors_list = [] for j in range(1, i + 1): if i % j == 0: divisors_list.append(j) all_divisors_list.append(divisors_list) print('list of divisors = {}'.format(all_divisors_list)) # + # # %load -r 2:5 solutions/03_02_TipsAndTricks.py # - # <a id="string_formatting"></a> # ## 2. String formatting # The built-in `str` type contains a `format` method that allows complex variable substitutions and value formatting # + from datetime import datetime an_int = 1 a_float = 0.123456 a_datetime = datetime.now() a_string = 'foo' a_list = list('abcd') print('''This is a string formatting example: * An integer formatted to a fixed length string filled with leading 0s: {0:06} * A float formatted as a percentage with 2 decimal positions: {1:0.2%}, or as a float with 4 decimal places: {1:0.4f} * Extract attributes from an object: the year "{2.year}" and the month "{2.month:02}" in a date * Align a text filling it with hyphens - to the left: {3:-<32} - to the right: {3:->32} * Access the values in a list: {4[0]}, {4[2]} '''.format(an_int, a_float, a_datetime, a_string, a_list)) # - # You can identify the variables to be replaced by: # * the index of the argument in the required arguments # * the name of the argument in the keyword arguments # + # an example of arguments referenced by their index print('Example 1: 1st arg: {0}, 2nd arg: {1}, referencing the indexes'.format('a', 'b')) # Empty brackets are filled with a list of indexes: print('Example 2: 1st arg: {}, 2nd arg: {}, without referencing the indexes'.format('a', 'b')) # If an argument is not referenced in the string, it is ignored print('Example 3: 1st arg: {0}, 2nd arg: {1}, other arguments are ignored'.format('a', 'b', 'c', 'd', 'e', 'f')) # You can also use keyword arguments: print('Example 4: keyword arg "a": {a}, keyword arg "b": {b}, "c" is ignored'.format(a='a', b='b', c='c')) # You can also mix non-keyword and keyword arguments print('Example 5: 1st arg: {0}, keyword arg "b": {b}'.format('a', 'c', b='b', d='d')) # - # <a id='exercise_2_1'></a> # ### Exercise 2.1: use string formatting to enhance the messaging # Imagine we want to print a message informing about the CI of the estimation of a parameter `mu` in terms of the sample mean (`x`) and the error margin (`m`), with a significance level (`r`). x = 12.3456789012345678901234567890 m = 0.98765432109876543210987654321 r = 0.05 print('The CI for mu is {} \xb1 {} with a significance level of {}'.format(x, m, r)) # You could enhance the readability of this message by formatting `x` and `m` to 2 decimal places and `r` as a percentage # # %load -r 7 solutions/03_02_TipsAndTricks.py print('The CI for mu is {:.2f} \xb1 {:.2f} with a significance level of {:.0%}'.format(x, m, r)) # ### Warning # For those familiar with Python 2, be aware that the % operator is still available in Python 3 but " will eventually be removed from the language" so: moan = ''.join([x*5 for x in 'ARGH!']) print('In the future this may crash!!\n\n%s' %moan) # <a id='lambda'></a> # ## 3. Lambda functions (or expression or operator!!??) # Lambda expressions are used to create anonymous functions. The syntax of the a lambda expression is: # ``` # lambda arguments: expression # ``` # This expression yields an unnamed function object. This object behaves like a function object defined with: # # ```def <something>(arguments): # return expression # ``` # + # This expressions are equivalent sum_ = lambda x, y: x + y print('This is the result of the lambda function: {}'.format(sum_(1., 2.))) def sum__(x, y): return x + y print('And this is the result of the standard defined function: {}'.format(sum__(1., 2.))) # - # ### When do you want to use `lambda`? # In some cases, lambda expressions are good for making your code a bit cleaner. # **When?** # >When you want to a fairly **simple function** that is going to be **called once**. # # **simple** means that it contains **only one expression**. # # It is commonly used together with some functions that take another function as an argument: map, reduce, filter, ... # Back to the squares example, using lambda and the map function list_of_squares = list(map(lambda x: x**2, range(10))) # not a very good example ... better with comprehensions print(list_of_squares) # Let's try with another one: compute the sum of the squares of all the numbers up to 10 import functools sum_of_list_of_squares = functools.reduce(lambda x, y: x + y, map(lambda x: x**2, range(10))) print(sum_of_list_of_squares) # Let's check if the result is ok sum_of_list_of_squares == sum(list_of_squares) # <a id='exercise_3_1'></a> # ### Exercise 3.1: Use lambda with filter function # Use a lambda function as an argument to the filter function to creat a string with all the vocals (upper and lower case) in *"This course is ridiculous. I wish I had not enrolled"*. # # You can type `filter?` to get some help. # # Be aware that the ouput of the `filter` function is an iterator, so you will have to manipulate it in order to turn it into a string. # + # # %load -r 10,11 solutions/03_02_TipsAndTricks.py # - # <a id='args'></a> # ## 4. Variable length arguments (AKA `*args` and `**kwargs`) # # The special syntax, `*` and `**` in function definitions is used to pass a variable number of arguments to a function. So the definition of the function would look like this: # # ```def foo(arg_1, arg_2, ..., *args, kwarg_1=kwval_1, kwarg_2=kwval_2, ..., **kwargs)``` # # The single asterisk form (\*) is used to pass a non-keyworded, variable-length argument list, and the double asterisk form (\*\*) is used to pass a keyworded, variable-length argument list. # # Inside the function definition, `*args` are tuples and `**kwargs` are dictionaries, so you can access them as usual. # # The names `args` and `kwargs` are not mandatory, but they are the most commonly used. # + # A simple example def foo(*bar, **barbar): print('type of bar: {}'.format(type(bar))) print('type of barbar: {}'.format(type(barbar))) foo() # + # An example def sample_function(*args, **kwargs): print('These are the arguments of my function:') for i, arg in enumerate(args): print(' Variable non keyword argument {}: {}'.format(i, arg)) for karg, varg in kwargs.items(): print(' Variable keyword argument: {}:{}'.format(karg, varg)) print('-'*36 + '\n') sample_function(1, 2, kwarg_1=3, kwarg_2=4) sample_function(6, 5, 4, 3, 2, 1) # - # You can also use the `*args` and `**kwargs` syntax in the function calls args = range(5) kwargs = {'kwarg_{}'.format(x): x for x in range(5)} sample_function(*args, **kwargs) # You can mix fixed arguments and variable length arguments # + # We want to force arg1 and arg2 def resample_function(arg1, arg2, *args, **kwargs): return sample_function(arg1, arg2, *args, **kwargs) sample_function() resample_function(1, 2) resample_function() # - # ### When can/shall we use them? # When we want to build a function with variable arguments length # + def multiplication(*args): z = 1 for arg in args: z *= arg return z print(multiplication(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)) print(multiplication(0.1, 4, 6.7)) # - # Whenever we inherit a class and override some of the methods of inherited class, we should use `*args` and `**kwargs` and pass the received positional and keyword arguments to the superclass method. # # This could also apply to methods being "re-used" by other methods (if they are not contained in a class). # + class ValueLogger(): def print_values(self, *values): print('These are my values:') for value in values: print(' {}'.format(value)) self.print_separator() def print_separator(self): print('-'*64) class AdvancedValueLogger(ValueLogger): def print_values(self, *values): if len(values) == 0: print('There are no values') self.print_separator() else: super().print_values(*values) primitive_logger = ValueLogger() primitive_logger.print_values() primitive_logger.print_values(1, 2, 3, 4, 5) advanced_logger = AdvancedValueLogger() advanced_logger.print_values() # - # <a id='logical'></a> # ## 5. Disquisitions on Truth and Falsehood # ### Boolean operators # These are the Boolean operations, ordered by ascending priority: # # | Operation | Result | Notes | # |:---:|:---:|:--- | # |`x or y`|if x is false, then y, else x|This is a short-circuit operator, so it only evaluates the second argument if the first one is false.| # |`x and y`|if x is false, then x, else y|This is a short-circuit operator, so it only evaluates the second argument if the first one is true.| # |`not x`|if x is false, then True, else False|(not has a lower priority than non-Boolean operators, so not a == b is interpreted as not (a == b), and a == not b is a syntax error.)| # # # # # + ex_dict_1 = {'k_1': {'k_1_1': 1, 'k_1_2':2}, 'k_2': 3} # we can use this to simplify our code # we can rewrite this if ex_dict_1.get('k_1'): if ex_dict_1['k_1'].get('k_1_1'): print(ex_dict_1['k_1']['k_1_1']) else: print(False) else: print(False) # like this print(ex_dict_1.get('k_1') and ex_dict_1['k_1'].get('k_1_1') or False) # - # <a id='exercise_5_1'></a> # ### Exercise 5.1: rewrite in a single line # + ex_dict_2 = {'type': 'sum', 'data': [1, 2, 3, 4, 5] } # This code can be reformatted into a single line if ex_dict_2['type'] == 'raw': res_1 = ex_dict_2['data'] elif ex_dict_2['type'] == 'sum': res_1 = sum(ex_dict_2['data']) else: res_1 = None print(res_1) # + # # %load -r 14,15 solutions/03_02_TipsAndTricks.py # - # ### Truth value testing # Then tested the truth value of an object (inside an `if`, a `while`, or as a boolean operator), this values are considered false: # # * `None` # * `False` # * zero of any numeric type, for example, `0`, `0.0`, `0j`. # * any empty sequence, for example, `''`, `()`, `[]`. # * any empty mapping, for example, `{}`. # * instances of user-defined classes, if the class defines a `__bool__()` or `__len__()` method, when that method returns the integer zero or bool value False # # All the other ones are considered true. a = '' if a: res = 'true' else: res = 'false' print('{!r} has been interpreted as {}'.format(a, res)) # Be careful when handling this. It can lead to weird behaviours # + def sum_function(list_of_values): if len(list_of_values) > 0: return sum(list_of_values) def wrap_around_sum(list_of_values): s = sum_function(list_of_values) if not s: print('error!!') else: print('Here is your result {}'.format(s)) wrap_around_sum([1, 2, 3, 4, 5]) # - wrap_around_sum([-1, -3, 4]) # ### Comparisons # The difference between equality and identity a = 0 b = 0. c = 4 - 4 print('0 and 0. are equal: {}'.format(a == b)) print('0 and 0. are not the same object: {}'.format(a is b)) print('But two different instances of 0, point to the same object: {}'.format(a is c)) # + class Dummy(): def __init__(self, val): self.val = val a = Dummy(0) b = Dummy(0) c = 0 print('Two instances of the same class are not equal: {}'.format(a == b)) print('or two instances of different classes: {}'.format(a == c)) # + # Unless we define the __eq__ method class NotSoDummy(Dummy): def __eq__(self, other): if other == self.val: return True else: return False a = NotSoDummy(0) b = NotSoDummy(0) c = 0 print('Now the two instances of the same class are tested equal: {}'.format(a == b)) print('even the two of different classes: {}'.format(a == c)) print('But they are not the same object: {}'.format(a is b)) # - # In the next example, `a` and `b` point to different memory locations, each of them containing the same information. Whereas `c`, points to the same location as `a`. a = [1, 2, 3] b = [1, 2, 3] c = a print('a and b are equal: {}, but are they the same instance? {}'.format(a == b, a is b)) print('but a and c are both equal: {}, and the same instance: {}'.format(a == c, a is c)) # When we modify a, we modify the memory location where it is pointing. So, when we modify `a`, we also modify `c`. In the other hand, nothing happens to `b`. a[0] = 0 print('c = {}'.format(c)) print('b = {}'.format(b)) # This behaviour does not happen with numbers though a_1 = 1 a_2 = a_1 a_1 = 2 print(a_2) # <a id='scope'></a> # ## 6. Variable scope and lifetime # We call the part of a program where a variable is accessible its **scope**, and the duration for which the variable exists its **lifetime**. # ### Global and local variables # A variable which is defined in the main body of a file is called a **global** variable. It will be visible throughout the file, and also inside any file which imports that file. We should restrict their use. Only objects which are intended to be used globally, like functions and classes, should be put in the global namespace. # # A variable which is defined inside a function is **local** to that function. It is accessible from the point at which it is defined until the end of the function, and exists for as long as the function is executing. global_var = 'foo' print('global outside: {}'.format(global_var)) def my_func(): local_var = 'bar' print('global inside: {}'.format(global_var)) print('local inside: {}'.format(local_var)) my_func() print('local outside: {}'.format(local_var)) # You should not declare a local variable with the same name as a global one. # # In this case, within `my_func`, `global_var` is a local variable so, it can't be referenced before its declaration. # + global_var = 'foo' def my_func(): print(global_var) global_var = 'bar' my_func() # - # If what you want to do is modify the global variable, you can use the `global` keyword at the beginning of the function body. # # "You can use" means "you have the possibility", but its a **VERY BAD PRACTICE** # + global_var = 'foo' def my_func(): global global_var print('original global variable value: {}'.format(global_var)) global_var = 'bar' print('new global variable value: {}'.format(global_var)) my_func() # - # ### Class and Instance variables # # **class variables** are those attributes of a class that existed in the class definition. Class variables are shared by all instances of a class. # # **instance variables** are set when the class has been already instantiated and are unique to each instance. class MySampleClass(): class_var = 'foo' class_list = [] # wrong placement of a mutable object def __init__(self, instance_var): self.instance_var = instance_var self.instance_list = [] inst_1 = MySampleClass('bar') inst_2 = MySampleClass('bar bar') print('Inst 1 - class var value: {}, instance var value: {}'.format(inst_1.class_var, inst_1.instance_var)) print('Inst 2 - class var value: {}, instance var value: {}'.format(inst_2.class_var, inst_2.instance_var)) # Not using class and instance variables as detailed above, can lead to weird behaviors, specially with mutable objects # + inst_1.class_list.append('foo') inst_1.instance_list.append('foo') inst_2.class_list.append('bar') inst_2.instance_list.append('bar') print('class_list is shared by all instances. inst_1: {}, inst_2: {}'.format(inst_1.class_list, inst_2.class_list)) print('instance_list is not: inst_1: {}, inst_2: {}'.format(inst_1.instance_list, inst_2.instance_list)) # - # <a id='mutable'></a> # ## 7. Mutable vs immutable # An **immutable** object is an object whose state cannot be modified after it is created. This is in contrast to a **mutable** object, which can be modified after it is created. # # Main data types in python: # # | Immutable | Mutable | # |:---:|:---:| # |int|list| # |float|dict| # |decimal|set| # |complex|custom class (by default)| # |bool| | # |string| | # |tuple| | # # This implies that lists can be modified and tuples can not # + my_list = [1, 2, 3] my_tuple = (1, 2, 3) # we can modify a value in a list my_list[2] = 4 # or we can extend it, drop some value, ... my_list.append(5) my_list.remove(1) print('my_list: {}'.format(my_list)) # We can not do that with tuples my_tuple[2] = 4 # Tuples don't have any append, remove,... methods # - # ### Why does mutability matter? # # As long as our code works, why should we matter about mutability? # # Let's see an example # We can do this: x = 'foo' print('old value of x: {}'.format(x)) x += ' bar' print('new value of x: {}'.format(x)) # What have we done here? Have we modified the value of x? # # The answer is **NO** # # What we have done is: # * create a string with value 'foo' and point `x` to it # * create a string with value 'foo bar', point `x` to it and throw away the old one. # # In some cases this may add a lot of overhead because we are allocating and throwing lots of large objects. # # Let's see an example: # + import csv import time with open('../resources/iris.csv', 'r') as f: reader = csv.reader(f) iris_lines = list(reader) iris_lines = iris_lines*100 # artificially increase the size of the data print(iris_lines[:10]) # - # if we want to build a string with the concatenation of all the 'species' we could to this init = time.clock() species = iris_lines[1][4] for iris_line in iris_lines[2:]: species += ',' + iris_line[4] end = time.clock() comp_time = end - init print('computation took {:0.8} seconds\n'.format(comp_time)) # ### Exercise 7.1 # Try to improve the performance by avoiding to allocate and drop so many objects # # **Hint:** use the `str.join` function (call `?str.join` to see its docstring) # + # # %load -r 18:25 solutions/03_02_TipsAndTricks.py # - # <a id='floating'></a> # ## 8. Floating point arithmetics # # As most of the programming languages (Java, C++, Fortran, Matlab, ...), python floats are mapped to IEEE-754 "double precision" format. # # In summary, a floating-point number ($f$) is rounded to its nearest binary representation with the expression: # # $$f \simeq s\frac{J}{2^N}$$ # # Where: # # | | Sign (s) | Exponent (N) | Fraction (J) | # |:---:|:---:|:---:|:---:| # | Number of bits | 1 | 11 | 52 | # # You can find more information about floating-point arithmetics in python in the [official python documentation](https://docs.python.org/3/tutorial/floatingpoint.html) and more details on the IEEE-754 standard in [Wikipedia](https://en.wikipedia.org/wiki/IEEE_754) and a [quite extense paper on the subject](https://ece.uwaterloo.ca/~dwharder/NumericalAnalysis/02Numerics/Double/paper.pdf) # # ### Why do we show this? # # This implementation may lead to non-intuitive behaviors: # Some real numbers can not be exactly represented as a float, this may throw some rounding errors 0.1 + 0.1 + 0.1 == 0.3 # Order of operations can matter b = 1e-16 + 1 - 1e-16 c = 1e-16 - 1e-16 + 1 b == c # Since $\pi$ can not be exactly represented it is not surprising that $sin(\pi)$ is not $0$ from math import sin, pi, sqrt sin(pi) # Unexpected cancellation due to loss of precision sqrt(1e-16 + 1) - 1 == 0. # Operating numbers of very different magnitudes 1e+12 + 1e-5 == 1e+12 # Overflow and underflow (reaching the maximum and minimum limits) # **Overflow** Looking for the nth fibonacci number def fib(n): return ((1. + sqrt(5.))**n - (1. - sqrt(5.))**n)/(2**n*sqrt(5.)) print([int(fib(i)) for i in range(1, 20)]) fib(700) # Starting with version 3.1, when printing, python displays the shortest number that maps to the same floating-point representation. # This numbers have all the same binary representation a = 0.1 b = 0.10000000000000000001 c = 0.1000000000000000055511151231257827021181583404541015625 print(a, b, c) # However, they are not exactly 0.1 print('{:0.24} {:0.24} {:0.24}'.format(a, b, c)) # ### Some hints # Use some error margin if you have to check for equality between two floats. # # In Python 3.5, the math.is_close function was introduced, which does exactly that: # # ```python # def math.isclose(a, b, *, rel_tol=1e-09, abs_tol=0.0):``` # from math import isclose, sin, pi print(isclose(0.1 + 0.1 + 0.1, 0.3)) print(isclose(sin(pi), 0., abs_tol=1e-09)) # In case you really need correctly-rounded decimal floating point arithmetic, check the built-in [decimal](https://docs.python.org/3/library/decimal.html) library. from decimal import Decimal a = Decimal(1) b = Decimal(10) c = Decimal(3) print(a/b, c/b) a/b + a/b + a/b == c/b # <a id='decorators'></a> # ## 9. Decorators # # Let's see what [the official doc](https://docs.python.org/3/glossary.html#term-decorator) says about decorators: # # ---- # # A function returning another function, usually applied as a function transformation using the @wrapper syntax. Common examples for decorators are `classmethod()` and `staticmethod()`. # # The decorator syntax is merely syntactic sugar, the following two function definitions are semantically equivalent: # # ```python # def f(...): # ... # f = staticmethod(f) # # @staticmethod # def f(...): # ... # ``` # The same concept exists for classes, but is less commonly used there. See the documentation for function definitions and class definitions for more about decorators. # # #### Example 9.1 # Let's see the simplest possible example (not simple though) # + def p_a_d(some_func): def wrapped(*args, **kwargs): print("going to run function '{}' with arguments: {}, {}".format(some_func.__name__, args, kwargs)) res = some_func(*args, **kwargs) print("the result of '{}' is: {}".format(some_func.__name__, res)) return wrapped @p_a_d def dummy(i): return i*10 dummy(1) # - # Remember we are doing: `dummy = p_a_d(dummy)` # # So calling: # # ```python # dummy(1) # ``` # # is the same as: # # ```python # wrapped_func = p_a_d(dummy) # wrapped_func(1) # ``` # #### Example 9.2 # Let's see a more useful (and complicated) example. # # We will implement a simple authentication system. # # We want to run the functions in our API only if the user is correctly authenticated. Let's assume that all the functions in our API have a `request` dictionary as input. The authentication data is stored in the `user` and `token` keys of the request. And we need to check the autenticity of this information against the information in our backend before running the function. # # We could implement it as follows # + auth_tokens = {'user1': '<KEY>', 'user2': '<KEY>'} def check_authentication(request): ''' Check if the token in the request correspond to the one stored''' user = request.get('user') token = request.get('token') if auth_tokens.get(user) and auth_tokens[user] == token: return True else: return False def authenticate(func): '''Decorator to add authentication checking''' def authenticate_and_call(request): if not check_authentication(request): raise Exception('Authentication Failed.') return func(request) return authenticate_and_call # - # ![GoToHellYouBastard](../resources/monkey-funny.jpg) # It may look complicated (and it is), but don't panic. It's the same as in the previous example. # # We would use the decorator like this: # ```python # @authenticate # def some_func(request): # ... # ``` # and call the decorated function: `some_func(request)` # # # Keep in mind that what you are doing is this: # ```python # def some_func(request): # ... # # some_func = authenticate(some_func) # ``` # so `some_func(request)` is the same as `authenticate(some_func)(request)` # # We create a dummy function to test the authentication decorator @authenticate def dummy_sum(request): return request.get('param1', 0.0) + request.get('param2', 0.0) # Let's see what happens if the authentication **is not** correct dummy_sum({'user': 'user1', 'token': '<PASSWORD>1eLHbnwKApay2rggAlrbOk', 'param1': 2.0, 'param2': 3.0}) # And what happens if the authentication **is** correct dummy_sum({'user': 'user1', 'token': '<KEY>', 'param1': 2.0, 'param2': 3.0}) # #### Example 9.3 # # In some cases you may want a decorator that accepts input arguments. # # For example, we can implement a decorator to check if the input arguments of a function have the desired type. Resulting in something like this: # ```python # @accepts(type1, type2, ...) # def some_func(arg1, arg2, ...): # ... # ``` # # That would be called: `some_func(arg1, arg2, ...)` def accepts(*wrapper_args): def wrapper(f): def check_input_and_call(*func_args): for func_arg, wrapper_arg in zip(func_args, wrapper_args): if type(func_arg) != wrapper_arg: raise Exception('wrong type for argument {}'.format(func_arg)) return f(*func_args) return check_input_and_call return wrapper # ![ImGoingToJumpOutTheWindow](../resources/monkey-finger.jpg) # Ok, now you can panic a little bit. # # Remember the definition of decorator. What you are doing is this: # # `some_func = accepts(type1, type2, ...)(some_func)` # # So `some_func(arg1, arg2, ...)` is the same as `accepts(type1, type2, ...)(some_func)(arg1, arg2, ...)` # # Pffffffffff # # We apply this decorator to a function that calculates the root of a float. We want the degree of the root to be an integer @accepts(float, int) def compute_root(base, degree): return base**(1./float(degree)) compute_root(4., 2.) compute_root(4., 2)
notebooks/03_02_TipsAndTricks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Squeezing in atomic qudits # In this notebook we will look into experiments with atomic qudits and reproduce them with pennylane. We first want to look at what we are trying to reproduce with pennylane. The graph below is from a [paper](https://arxiv.org/abs/1507.03782) by <NAME>. In this paper the collective spin of a Bose-Einstein-Condensate is used to observe nonlinearity in spin squeezing. # + import pennylane as qml import numpy as np import matplotlib.pyplot as plt import pandas as pd # %config InlineBackend.figure_format='retina' # + data_strobel_15 = pd.read_csv("Data/Strobel_Data_15ms.csv", names=["dB", "alpha"]) data_strobel_25 = pd.read_csv("Data/Strobel_Data_25ms.csv", names=["dB", "alpha"]) plt.figure(dpi=96) plt.title("Number Squeezing") plt.plot(data_strobel_15.dB, data_strobel_15.alpha, "ro", label="15ms", markersize=4) plt.plot(data_strobel_25.dB, data_strobel_25.alpha, "bo", label="25ms", markersize=4) plt.axhline(y=0, color="g", linestyle="--") plt.ylabel(r"Number Squeezing in $dB$") plt.xlabel(r"tomography angle $\alpha$") _ = plt.legend() # - # This number squeezing is achieved by performing the following Bloch-sphere rotations. # # We prepare the collective spin such that the Bloch-sphere-vector points to one of the poles. # # # 1. __First step__. As a first step the vector is rotated onto the equator. # # ![first step](Bloch_spheres/1.png) # # # 2. __Second step__ . Then the state is being squeezed, such that it starts to wrap around the Bloch-sphere. # # ![second step](Bloch_spheres/2.png) # # # 3. __Third step__ . In the last step we rotate the state around the $X$-axis. This rotation corresponds to the angle $\alpha$ in this notebook. # # # ![third step](Bloch_spheres/alpha.png) # We will now simulate the sequence in pennylane. # + import pennylane as qml import numpy as np from pennylane_ls import * # - # import the credentials to access the server and import the device. Make sure that you followed the necessary steps for obtaining the credentials as desribed in the [introduction](https://synqs.github.io/pennylane-ls/intro.html). # + from credentials import username, password nshots = 500 testDevice = qml.device("synqs.sqs", shots=nshots, username=username, password=password) # - # This created a single qudit device, which has the following operations. testDevice.operations # we now define the quantum circuit that implements the experimental sequence @qml.qnode(testDevice) def quantum_circuit(Nat=10, theta=0, kappa=0, alpha=0, Ntrott=2): """ The circuit that simulates the experiments. theta ... angle of the Lx term in the Hamiltonian evolution kappa ... angle of the Lz^2 term in the Hamiltonian evolution apla ... angle of rotation """ # load atoms SingleQuditOps.load(Nat, wires=0) # rotate onto x SingleQuditOps.rLx(np.pi / 2, wires=0) SingleQuditOps.rLz(np.pi / 2, wires=0) # evolution under the Hamiltonian for ii in range(Ntrott): SingleQuditOps.rLx(theta / Ntrott, wires=0) SingleQuditOps.rLz2(kappa / Ntrott, wires=0) # and the final rotation to test the variance SingleQuditOps.rLx(-alpha, wires=0) return qml.var(SingleQuditOps.Z(0)) # the parameters of the experiment. # + Nat = 200 l = Nat / 2 # spin length omegax = 2 * np.pi * 20 t1 = 15e-3 t2 = 25e-3 Lambda = 1.5 # 1.5 chi = Lambda * abs(omegax) / Nat Ntrott = 15; # - # let us visualize it once. quantum_circuit(Nat, omegax * t1, chi * t1, 0, Ntrott) print(quantum_circuit.draw()) # and now calculate the variance as presented above. # + alphas = np.linspace(0, np.pi, 15) variances_1 = np.zeros(len(alphas)) variances_2 = np.zeros(len(alphas)) for i in range(len(alphas)): if i % 10 == 0: print("step", i) # Calculate the resulting states after each rotation variances_1[i] = quantum_circuit(Nat, omegax * t1, chi * t1, alphas[i], Ntrott) variances_2[i] = quantum_circuit(Nat, omegax * t2, chi * t2, alphas[i], Ntrott) # + def number_squeezing_factor_to_db(var_CSS, var): return 10 * np.log10(var / var_CSS) f, ax = plt.subplots() ax.set_title("Number Squeezing") plt.plot( np.rad2deg(alphas), number_squeezing_factor_to_db(l / 2, variances_1), "r-", lw=5, label="simulated 15ms", alpha=0.5, ) ax.plot( data_strobel_15.dB, data_strobel_15.alpha, "ro", label="experiment 15ms", markersize=4, ) plt.plot( np.rad2deg(alphas), number_squeezing_factor_to_db(l / 2, variances_2), "b-", lw=5, label="simulated 25ms", alpha=0.5, ) ax.plot( data_strobel_25.dB, data_strobel_25.alpha, "bo", label="experiment 25ms", markersize=4, ) ax.axhline(y=0, color="g", linestyle="--") ax.set_ylabel(r"Number Squeezing in $dB$") ax.set_xlabel(r"tomography angle $\alpha$") ax.legend() # -
examples_before_PR_accept_by_RPB/Fisher_information.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 2020๋…„ 8์›” 4์ผ ํ™”์š”์ผ # ### leetCode - Word Pattern (Python) # ### ๋ฌธ์ œ : https://leetcode.com/problems/word-pattern/ # ### ๋ธ”๋กœ๊ทธ : https://somjang.tistory.com/entry/leetCode-290-Word-Pattern-Python # ### ์ฒซ๋ฒˆ์งธ ์‹œ๋„ class Solution: def wordPattern(self, pattern: str, string: str) -> bool: string_s = string.split(' ') answer = True check_dict = {} for p, s in zip(pattern, string_s): if p not in check_dict.keys(): check_dict[p] = s else: if check_dict[p] != s: answer = False break if (len(set(check_dict.keys())) != len(set(check_dict.values()))) or (len(pattern) != len(string_s)): answer = False return answer
DAY 101 ~ 200/DAY180_[leetCode] Word Pattern (Python).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: visualization-curriculum-gF8wUgMm # language: python # name: visualization-curriculum-gf8wugmm # --- # + [markdown] papermill={"duration": 0.015165, "end_time": "2021-12-28T20:05:25.327833", "exception": false, "start_time": "2021-12-28T20:05:25.312668", "status": "completed"} tags=[] # # Changes In The Daily Growth Rate # > Changes in the daily growth rate for select countries. # # - comments: true # - author: <NAME> # - categories: [growth] # - image: images/covid-growth.png # - permalink: /growth-analysis/ # + papermill={"duration": 0.025694, "end_time": "2021-12-28T20:05:25.365060", "exception": false, "start_time": "2021-12-28T20:05:25.339366", "status": "completed"} tags=[] #hide from pathlib import Path loadpy = Path('load_covid_data.py') if not loadpy.exists(): # ! wget https://raw.githubusercontent.com/github/covid19-dashboard/master/_notebooks/load_covid_data.py # + papermill={"duration": 1.091828, "end_time": "2021-12-28T20:05:26.467144", "exception": false, "start_time": "2021-12-28T20:05:25.375316", "status": "completed"} tags=[] #hide # %matplotlib inline import numpy as np import matplotlib.pyplot as plt import matplotlib import pandas as pd import seaborn as sns import load_covid_data sns.set_context('talk') plt.style.use('seaborn-whitegrid') # + papermill={"duration": 46.359004, "end_time": "2021-12-28T20:06:12.838081", "exception": false, "start_time": "2021-12-28T20:05:26.479077", "status": "completed"} tags=[] #hide df = load_covid_data.load_data(drop_states=True) annotate_kwargs = dict( s='Based on COVID Data Repository by Johns Hopkins CSSE ({})\nBy <NAME>'.format(df.index.max().strftime('%B %d, %Y')), xy=(0.05, 0.01), xycoords='figure fraction', fontsize=10) # + papermill={"duration": 0.028542, "end_time": "2021-12-28T20:06:12.877871", "exception": false, "start_time": "2021-12-28T20:06:12.849329", "status": "completed"} tags=[] #hide # Country names seem to change quite a bit df.country.unique() # + papermill={"duration": 0.018604, "end_time": "2021-12-28T20:06:12.907361", "exception": false, "start_time": "2021-12-28T20:06:12.888757", "status": "completed"} tags=[] #hide european_countries = ['Italy', 'Germany', 'France (total)', 'Spain', 'United Kingdom (total)', 'Iran'] large_engl_countries = ['US', 'Canada (total)', 'Australia (total)'] asian_countries = ['Singapore', 'Japan', 'Korea, South', 'Hong Kong'] south_american_countries = ['Argentina', 'Brazil', 'Colombia', 'Chile'] country_groups = [european_countries, large_engl_countries, asian_countries, south_american_countries] line_styles = ['-', ':', '--', '-.'] # + papermill={"duration": 0.955742, "end_time": "2021-12-28T20:06:13.874089", "exception": false, "start_time": "2021-12-28T20:06:12.918347", "status": "completed"} tags=[] #hide def plot_countries(df, countries, min_confirmed=100, ls='-', col='confirmed'): for country in countries: df_country = df.loc[(df.country == country) & (df.confirmed >= min_confirmed)] if len(df_country) == 0: continue df_country.reset_index()[col].plot(label=country, ls=ls) sns.set_palette(sns.hls_palette(8, l=.45, s=.8)) # 8 countries max fig, ax = plt.subplots(figsize=(12, 8)) for countries, ls in zip(country_groups, line_styles): plot_countries(df, countries, ls=ls) x = np.linspace(0, plt.xlim()[1] - 1) ax.plot(x, 100 * (1.33) ** x, ls='--', color='k', label='33% daily growth') ax.set(yscale='log', title='Exponential growth of COVID-19 across countries', xlabel='Days from first 100 confirmed cases', ylabel='Confirmed cases (log scale)') ax.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter()) ax.legend(bbox_to_anchor=(1.0, 1.0)) ax.annotate(**annotate_kwargs) sns.despine(); # + papermill={"duration": 0.865548, "end_time": "2021-12-28T20:06:14.753776", "exception": false, "start_time": "2021-12-28T20:06:13.888228", "status": "completed"} tags=[] #hide fig, ax = plt.subplots(figsize=(12, 8)) for countries, ls in zip(country_groups, line_styles): plot_countries(df, countries, ls=ls) x = np.linspace(0, plt.xlim()[1] - 1) ax.plot(x, 100 * (1.33) ** x, ls='--', color='k', label='33% daily growth') ax.set(title='Exponential growth of COVID-19 across countries', xlabel='Days from first 100 confirmed cases', ylabel='Confirmed cases', ylim=(0, 30000)) ax.legend(bbox_to_anchor=(1.0, 1.0)) ax.annotate(**annotate_kwargs) sns.despine(); # + papermill={"duration": 1.154871, "end_time": "2021-12-28T20:06:15.927699", "exception": false, "start_time": "2021-12-28T20:06:14.772828", "status": "completed"} tags=[] #hide_input plt.rcParams['axes.titlesize'] = 24 smooth_days = 4 fig, ax = plt.subplots(figsize=(14, 8)) df['pct_change'] = (df .groupby('country') .confirmed .pct_change() .rolling(smooth_days) .mean() ) for countries, ls in zip(country_groups, line_styles): (df.set_index('country') .loc[countries] .loc[lambda x: x.confirmed > 100] .reset_index() .set_index('days_since_100') .groupby('country', sort=False)['pct_change'] .plot(ls=ls) ) ax.set(ylim=(0, 1), xlim=(0, 20), title='Are we seeing changes in daily growth rate?', xlabel='Days from first 100 confirmed cases', ylabel='Daily percent change (smoothed over {} days)'.format(smooth_days), ) ax.axhline(.33, ls='--', color='k') ax.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter()) ax.legend(bbox_to_anchor=(1.0, .155)) sns.despine() ax.annotate(**annotate_kwargs); # This creates a preview image for the blog post and home page fig.savefig('../images/covid-growth.png') # + [markdown] papermill={"duration": 0.025398, "end_time": "2021-12-28T20:06:15.977968", "exception": false, "start_time": "2021-12-28T20:06:15.952570", "status": "completed"} tags=[] # ## Appendix: German ICU Capacity # + papermill={"duration": 0.784935, "end_time": "2021-12-28T20:06:16.786312", "exception": false, "start_time": "2021-12-28T20:06:16.001377", "status": "completed"} tags=[] #hide_input sns.set_palette(sns.hls_palette(8, l=.45, s=.8)) # 8 countries max fig, ax = plt.subplots(figsize=(12, 8)) p_crit = .05 # 28000 ICU beds total, 80% occupied icu_germany = 28000 icu_germany_free = .2 df_tmp = df.loc[lambda x: (x.country == 'Germany') & (x.confirmed > 100)].critical_estimate df_tmp.plot(ax=ax) x = np.linspace(0, 30, 30) pd.Series(index=pd.date_range(df_tmp.index[0], periods=30), data=100*p_crit * (1.33) ** x).plot(ax=ax,ls='--', color='k', label='33% daily growth') ax.axhline(icu_germany, color='.3', ls='-.', label='Total ICU beds') ax.axhline(icu_germany * icu_germany_free, color='.5', ls=':', label='Free ICU beds') ax.set(yscale='log', title='When will Germany run out of ICU beds?', ylabel='Expected critical cases (assuming {:.0f}% critical)'.format(100 * p_crit), ) ax.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter()) ax.legend(bbox_to_anchor=(1.0, 1.0)) sns.despine() ax.annotate(**annotate_kwargs); # + [markdown] papermill={"duration": 0.027642, "end_time": "2021-12-28T20:06:16.841267", "exception": false, "start_time": "2021-12-28T20:06:16.813625", "status": "completed"} tags=[] # Updated daily by [GitHub Actions](https://github.com/features/actions). # + [markdown] papermill={"duration": 0.029702, "end_time": "2021-12-28T20:06:16.898013", "exception": false, "start_time": "2021-12-28T20:06:16.868311", "status": "completed"} tags=[] # This visualization was made by [<NAME>](https://twitter.com/twiecki)[^1]. # # [^1]: Data sourced from ["2019 Novel Coronavirus COVID-19 (2019-nCoV) Data Repository by Johns Hopkins CSSE"](https://systems.jhu.edu/research/public-health/ncov/) [GitHub repository](https://github.com/CSSEGISandData/COVID-19) and recreates the (pay-walled) plot in the [Financial Times]( https://www.ft.com/content/a26fbf7e-48f8-11ea-aeb3-955839e06441). This code is provided under the [BSD-3 License](https://github.com/twiecki/covid19/blob/master/LICENSE). Link to [original notebook](https://github.com/twiecki/covid19/blob/master/covid19_growth.ipynb).
_notebooks/2020-03-14-covid19_growth.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import numpy as np import music21 as m21 import pandas as pd import json import matplotlib.pyplot as plt import time np.random.seed(777) # - # ## Functions DIV_CONST = 4 def getSongKey(song): key = song.analyze("key") return key def getSongKeyFromMelody_W_Times(melody_w_times_in_k): sc_test = m21.stream.Score() p0_test = m21.stream.Part() p0_test.id = 'part0' for pitch_i in melody_w_times_in_k: n_i = m21.note.Note(pitch_i[4]) p0_test.append(n_i) sc_test.insert(0, p0_test) return getSongKey(sc_test) # Function to retrieve a list of midi pitch events and its timestamp def getMelodyDeltaTimes(eventsintrack): # Initialize array DeltaTimes = [] # Initialize cumulative sum cum_sum = 0 # Initialize variable to track the time delta prev_deltatime = 0 # Traverse the events for ev in eventsintrack: # If a note starts if (ev.isNoteOn()): # Get the pitch name and save it with the cumulative sum, midi pitch and name pitch_in_time = m21.pitch.Pitch(ev.pitch) DeltaTimes.append((cum_sum, prev_deltatime, pitch_in_time.midi, pitch_in_time.spanish, pitch_in_time)) # Restart the delta time prev_deltatime = 0 # Else if there is a delta time elif(str(ev.type) == "DeltaTime"): # We sum the time cum_sum += ev.time # We sum it to the current delta time prev_deltatime += ev.time # Return the array return DeltaTimes def get_SCLM_v100(melody_w_times_A, melody_w_times_B): # We use a Dynamic Programming approach max_len = max(len(melody_w_times_A), len(melody_w_times_B)) + 1 # memoization array memo = np.full(shape=(max_len,max_len), fill_value=-1) # Get the limits for each melody lim_A = len(melody_w_times_A) lim_B = len(melody_w_times_B) # Actual DP implementation for i in range(lim_A, -1, -1): for j in range(lim_B, -1, -1): # If we are at the limits the solution is 0 if i == lim_A or j == lim_B: memo[i][j] = 0 continue # If there is a match a possible solution is the previous plus one curr_value = 0 tot_delta_time = (float(melody_w_times_A[i][1]) + float(melody_w_times_B[j][1])) / float(DIV_CONST) tot_diff_time = np.abs(float(melody_w_times_A[i][1]) - float(melody_w_times_B[j][1])) if (melody_w_times_A[i][3] == melody_w_times_B[j][3]) and (tot_diff_time <= tot_delta_time): curr_value = memo[i + 1][j + 1] + 1 # The actual solution is the maximum between the one if there is a match, or skip on the melody A or melody B curr_value = max(curr_value, max(memo[i + 1][j], memo[i][j + 1])) # Save the solution memo[i][j] = curr_value # With the memoization table we can retrieve the actual melody i = 0 j = 0 SCLM = [] while i != lim_A and j != lim_B: if ((memo[i + 1][j + 1] + 1) == memo[i][j]): SCLM.append((i, j)) i += 1 j += 1 elif (memo[i + 1][j] == memo[i][j]): i += 1 elif (memo[i][j + 1] == memo[i][j]): j += 1 return SCLM def get_max_timestamp_dif(melody_w_times_A, melody_w_times_B): return max( melody_w_times_A[len(melody_w_times_A) - 1][0] - melody_w_times_A[0][0], melody_w_times_B[len(melody_w_times_B) - 1][0] - melody_w_times_B[0][0] ) def getDifSCLM(melody_w_times_A, melody_w_times_B, sclm): # If there is no sclm or it is just one return max possible value if (len(sclm) <= 1): return get_max_timestamp_dif(melody_w_times_A, melody_w_times_B) # Initialize the arrays T_A = np.zeros(shape=(len(sclm) - 1)) T_B = np.zeros(shape=(len(sclm) - 1)) T_C = np.zeros(shape=(len(sclm) - 1)) Dif_ = np.zeros(shape=(len(sclm) - 1)) for i in range(1, len(sclm)): T_A[i - 1] = melody_w_times_A[sclm[i][0]][0] - melody_w_times_A[sclm[i-1][0]][0] T_B[i - 1] = melody_w_times_B[sclm[i][1]][0] - melody_w_times_B[sclm[i-1][1]][0] T_C[i - 1] = np.abs(T_A[i - 1] - T_B[i - 1]) T_C_mean = np.mean(T_C) for i in range(0, len(T_B)): T_B[i] += T_C_mean Dif_[i] = T_A[i] - T_B[i] return T_C_mean def get_MTRC_v100_from_melody_w_times(melody_w_times_A, melody_w_times_B): # Assert at least one element for each melody if (len(melody_w_times_A) == 0 or len(melody_w_times_B) == 0): print("EMPTY") return 1 # Initialize result variable result_value = 0 # Get Keys key_A = getSongKeyFromMelody_W_Times(melody_w_times_A) key_B = getSongKeyFromMelody_W_Times(melody_w_times_B) # D1: Scale scale_dif1 = 0 if (key_A.name != key_B.name): scale_dif1 = W1 result_value += scale_dif1 # D2: Mode mode_dif2 = 0 if (key_A.mode != key_B.mode): mode_dif2 = W2 result_value += mode_dif2 # Get SCLM v100 sclm = get_SCLM_v100(melody_w_times_A, melody_w_times_B) # Get max len max_len = max(len(melody_w_times_A), len(melody_w_times_B)) # D3: SCLM Length sclmlen_dif3 = ((max_len - len(sclm)) / max_len) * W3 result_value += sclmlen_dif3 # Get the Diff on temporal spacing in the SCLM dif_sclm = getDifSCLM(melody_w_times_A, melody_w_times_B, sclm) # D4: dif in sclm max_timestamp_dif = get_max_timestamp_dif(melody_w_times_A, melody_w_times_B) sclmdif_dif4 = (dif_sclm / max_timestamp_dif) * W4 result_value += sclmdif_dif4 return result_value # ## Traverse DATA ## NES ## NES_DATASET_PATH = "/media/sirivasv/JASON/Saul/MCC/DATASETS/DATASUBSET/nesmdb_midi/" # Traverse midi files nes_song_filenames = [] for root, directories, files in os.walk(NES_DATASET_PATH): for file in files: nes_song_filenames.append(file) print(nes_song_filenames[:3]) print(len(nes_song_filenames)) # + # %%time W1 = 0.0 W2 = 0.0 W3 = 1.0 W4 = 0.0 # Read Files MAX_LIM_NES_SONGS = len(nes_song_filenames) len_nes_song_filenames = len(nes_song_filenames) nes_songs_with_error = [] nes_similarities_for_sort = [] # Query File # "322_SuperMarioBros__02_03SwimmingAround.mid" # "322_SuperMarioBros__10_11SavedthePrincess.mid" # "339_Tetris_00_01TitleScreen.mid" song_filename_query = "322_SuperMarioBros__03_04BowsersCastle.mid" song_stream_query = m21.converter.parseFile(os.path.join(NES_DATASET_PATH, song_filename_query)) midi_tracks_query = m21.midi.translate.streamToMidiFile(song_stream_query) melody_w_times_query = getMelodyDeltaTimes(midi_tracks_query.tracks[1].events) # We traverse the reduced table cnt = 1 for song_filename_test in nes_song_filenames: try: song_stream_test = m21.converter.parseFile(os.path.join(NES_DATASET_PATH, song_filename_test)) midi_tracks_test = m21.midi.translate.streamToMidiFile(song_stream_test) melody_w_times_test = getMelodyDeltaTimes(midi_tracks_test.tracks[1].events) similarity_distance = get_MTRC_v100_from_melody_w_times( melody_w_times_query, melody_w_times_test) nes_similarities_for_sort.append((song_filename_test, similarity_distance)) with open('./PROP1_query_to_sort_{0}.json'.format(song_filename_query.split(".")[0]), 'w') as outfile: json.dump({"data":nes_similarities_for_sort}, outfile) except: print("[ERROR!]") nes_songs_with_error.append(song_filename_test) finally: print("{0}/{1} - {2} - {3}".format(cnt, len_nes_song_filenames, song_filename_test, similarity_distance)) cnt += 1 if (cnt == MAX_LIM_NES_SONGS): break # - nes_similarities_for_sort nes_similarities_for_sort.sort(key=lambda x: x[1]) nes_similarities_for_sort with open('./PROP1_query_sorted_{0}.json'.format(song_filename_query.split(".")[0]), 'w') as outfile: json.dump({"data":nes_similarities_for_sort}, outfile)
PROP1/NES_DBGATHERING.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.3 (''venv'': venv)' # name: python3 # --- # ![](assets/2_model_demo.gif) # # Optimizing two models at once # One might be interested in optimizing for two "compteting" models at the same time. Consider having 3 separate samples A, B, C and we'd be interesting in extracting the significance for two out of the three at the same time. Two models would be fitted, e.g one where A is signal and B & C are backgrounds and one where B is signal and A & C are backgrounds. This example shows how to optimize for both of them at the same time. # + import time import jax import jax.experimental.optimizers as optimizers import jax.experimental.stax as stax import jax.random from jax.random import PRNGKey import numpy as np from functools import partial from neos import data, makers from relaxed import infer rng = PRNGKey(22) # - def hists_from_nn_three_samples( predict, NMC=500, s1_mean=[-2, 2], s2_mean=[2, 2], s3_mean=[0, -2], LUMI=10, sig_scale=1, bkg_scale=1, group=1, real_z=False, ): """ Same as hists_from_nn_three_blobs, but parametrize grouping of signal and background for three separatate samples Args: predict: Decision function for a parameterized observable. Assumed softmax here. Returns: hist_maker: A callable function that takes the parameters of the observable, then constructs signal, background, and background uncertainty yields. """ def get_hists(network, s, bs): NMC = len(s) s_hist = predict(network, s).sum(axis=0) * sig_scale / NMC * LUMI b_hists = tuple( [ predict(network, bs[0]).sum(axis=0) * sig_scale / NMC * LUMI, predict(network, bs[1]).sum(axis=0) * bkg_scale / NMC * LUMI, ] ) b_tot = jax.numpy.sum(jax.numpy.asarray(b_hists), axis=0) b_unc = jax.numpy.sqrt(b_tot) # append raw hists for signal and bkg as well results = s_hist, b_tot, b_unc, s_hist, b_hists return results def hist_maker(): sig1 = np.random.multivariate_normal(s1_mean, [[1, 0], [0, 1]], size=(NMC,)) sig2 = np.random.multivariate_normal(s2_mean, [[1, 0], [0, 1]], size=(NMC,)) bkg = np.random.multivariate_normal(s3_mean, [[1, 0], [0, 1]], size=(NMC,)) def make(network): if group == 1: return get_hists(network, sig1, (sig2, bkg)) elif group == 2: return get_hists(network, sig2, (sig1, bkg)) elif group == 3: return get_hists(network, bkg, (sig1, sig2)) else: raise UserWarning make.bkg = bkg make.sig2 = sig2 make.sig1 = sig1 return make return hist_maker # + import pyhf pyhf.set_backend(pyhf.tensor.jax_backend()) from neos import models def nn_hepdata_like_w_hists(histogram_maker): """ Analogous function to `makers.nn_hepdata_like`, but modified to pass through the additional info added in hists_from_nn_three_samples. """ hm = histogram_maker() def nn_model_maker(hpars): network = hpars s, b, db, _, _ = hm(network) # Changed here m = models.hepdata_like(s, b, db) # neos model nompars = m.config.suggested_init() bonlypars = jax.numpy.asarray([x for x in nompars]) bonlypars = jax.ops.index_update(bonlypars, m.config.poi_index, 0.0) return m, bonlypars nn_model_maker.hm = hm return nn_model_maker # - # ### Initialise network using `jax.experimental.stax` NOUT = 3 init_random_params, predict = stax.serial( stax.Dense(1024), stax.Relu, stax.Dense(1024), stax.Relu, stax.Dense(NOUT), stax.Softmax, ) # ### Define hitsogram and model maker functions # + hmaker = hists_from_nn_three_samples(predict, group=1) nnm = nn_hepdata_like_w_hists(hmaker) hmaker2 = hists_from_nn_three_samples(predict, group=2) nnm2 = nn_hepdata_like_w_hists(hmaker2) loss1 = infer.make_hypotest(nnm, solver_kwargs=dict(pdf_transform=True)) loss2 = infer.make_hypotest(nnm2, solver_kwargs=dict(pdf_transform=True)) # optimize the average significance! loss = ( lambda params, test_mu: ( loss1(params, test_mu)["CLs"] + loss2(params, test_mu)["CLs"] ) / 2 ) # - # ### Randomly initialise nn weights and check that we can get the gradient of the loss wrt nn params # + _, network = init_random_params(jax.random.PRNGKey(2), (-1, 2)) loss(network, 1.0) # - nnm.hm(network) a, b, c, d, e = nnm.hm(network) # ### Define training loop! # + # jit_loss = jax.jit(loss) opt_init, opt_update, opt_params = optimizers.adam(0.5e-3) def train_network(N, cont=False, network=None): if not cont: _, network = init_random_params(jax.random.PRNGKey(4), (-1, 2)) if network is not None: network = network losses = [] cls_vals = [] state = opt_init(network) # parameter update function # @jax.jit def update_and_value(i, opt_state, mu, loss_choice): net = opt_params(opt_state) value, grad = jax.value_and_grad(loss_choice)(net, mu) return opt_update(i, grad, state), value, net for i in range(N): start_time = time.time() loss_choice = loss state, value, network = update_and_value(i, state, 1.0, loss_choice) epoch_time = time.time() - start_time losses.append(value) metrics = {"loss": losses} yield network, metrics, epoch_time # - # ### Plotting helper function for awesome animations :) # + # Choose colormap import matplotlib.pylab as pl from matplotlib.colors import ListedColormap def to_transp(cmap): # cmap = pl.cm.Reds_r my_cmap = cmap(np.arange(cmap.N)) # my_cmap[:,-1] = np.geomspace(0.001, 1, cmap.N) my_cmap[:, -1] = np.linspace(0, 0.7, cmap.N) # my_cmap[:,-1] = np.ones(cmap.N) return ListedColormap(my_cmap) def plot(axarr, network, metrics, hm, hm2, maxN, ith): xlim = (-5, 5) ylim = (-5, 5) g = np.mgrid[xlim[0] : xlim[1] : 101j, ylim[0] : ylim[1] : 101j] levels = np.linspace(0, 1, 20) ax = axarr[0] ax.contourf( g[0], g[1], predict(network, np.moveaxis(g, 0, -1)).reshape(101, 101, NOUT)[:, :, 0], levels=levels, cmap=to_transp(pl.cm.Reds), ) ax.contourf( g[0], g[1], predict(network, np.moveaxis(g, 0, -1)).reshape(101, 101, NOUT)[:, :, 1], levels=levels, cmap=to_transp(pl.cm.Greens), ) if NOUT > 2: ax.contourf( g[0], g[1], predict(network, np.moveaxis(g, 0, -1)).reshape(101, 101, 3)[:, :, 2], levels=levels, cmap=to_transp(pl.cm.Blues), ) # print(list(map(len, [hm.sig1[:, 0], hm.sig2[:, 0], hm.bkg[:, 0]]))) ax.scatter(hm.sig1[:, 0], hm.sig1[:, 1], alpha=0.25, c="C9", label="sig1") ax.scatter(hm.sig2[:, 0], hm.sig2[:, 1], alpha=0.17, c="C8", label="bkg2") ax.scatter(hm.bkg[:, 0], hm.bkg[:, 1], alpha=0.17, c="C1", label="bkg2") ax.set_xlim(*xlim) ax.set_ylim(*ylim) ax.set_xlabel("x") ax.set_ylabel("y") ax = axarr[1] ax.axhline(0.05, c="slategray", linestyle="--") ax.plot(metrics["loss"][:ith], c="steelblue", linewidth=2.0) ax.set_ylim(0, metrics["loss"][0]) ax.set_xlim(0, maxN) ax.set_xlabel("epoch") ax.set_ylabel(r"$cl_s$") ax = axarr[2] s, b, db, sig, bs = hm(network) ytop = np.max(np.sum([s, b], axis=0)) * 1.3 ax.bar(range(NOUT), sig, bottom=bs[0] + bs[1], color="C9", label="Sample 1") ax.bar(range(NOUT), bs[0], bottom=bs[1], color="C8", label="Sample 2") ax.bar(range(NOUT), bs[1], color="C1", label="Sample 3") ax.set_ylabel("frequency") ax.set_xlabel("nn output") ax.set_title("Raw histograms") ax.set_ylim(0, ytop) if ith == 0: ax.legend() ax = axarr[3] s, b, db, sig, bs = hm(network) ax.bar(range(NOUT), s, bottom=b, color="#722620", label="sig", alpha=0.9) ax.bar(range(NOUT), b, color="#F2BC94", label="bkg") ax.bar( range(NOUT), db, bottom=b - db / 2.0, alpha=0.3, color="black", label="bkg error", hatch="////", ) ax.set_ylabel("frequency") ax.set_xlabel("nn output") ax.set_title("Model 1: sig1 vs (sig2 + bkg)") ax.set_ylim(0, ytop) if ith == 0: ax.legend() ax = axarr[4] s, b, db, sig, bs = hm2(network) ax.bar(range(NOUT), s, bottom=b, color="#722620", label="sig") ax.bar(range(NOUT), b, color="#F2BC94", label="bkg") ax.bar( range(NOUT), db, bottom=b - db / 2.0, alpha=0.3, color="black", label="bkg error", hatch="////", ) ax.set_ylabel("frequency") ax.set_xlabel("nn output") ax.set_title("Model 2: sig2 vs (sig1 + bkg)") ax.set_ylim(0, ytop) if ith == 0: ax.legend() # - # ### Let's run it!! # + import numpy as np from matplotlib import pyplot as plt from celluloid import Camera from IPython.display import HTML plt.rcParams.update( { "axes.labelsize": 13, "axes.linewidth": 1.2, "xtick.labelsize": 13, "ytick.labelsize": 13, "figure.figsize": [12.0, 8.0], "font.size": 13, "xtick.major.size": 3, "ytick.major.size": 3, "legend.fontsize": 11, } ) fig, axarr = plt.subplots(2, 3, dpi=120) axarr = axarr.flatten() # fig.set_size_inches(15, 10) camera = Camera(fig) maxN = 20 # make me bigger for better results! animate = True # animations fail tests # Training for i, (network, metrics, epoch_time) in enumerate(train_network(maxN)): print(f"epoch {i}:", f'CLs = {metrics["loss"][-1]}, took {epoch_time}s') if animate: plot(axarr, network, metrics, nnm.hm, nnm2.hm, maxN=maxN, ith=i) plt.tight_layout() camera.snap() if animate: camera.animate().save("animation.gif", writer="imagemagick", fps=10)
nbs/demo_2models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Model Perfromance Metrics # # Note: Notebook has been heavily adapted from <NAME>, MD, MPH work on sequential severity prediction for critically ill patients (Source: https://github.com/cosgriffc/seq-severityscore) # # <hr /> # # ## Environment # + import pandas as pd import numpy as np import pickle from model_analysis import APACHEWrapper, SVCWrapper from model_analysis import gen_auc_plot from model_analysis import opr_table, sens_spec_table # Load seaborn to set plot styles import matplotlib.pyplot as plt import seaborn as sns sns.set_style('whitegrid') # - # ## Load Models and Data # # + train_X = pd.read_csv('../extraction/data/train_X.csv').set_index('patientunitstayid') train_y = pd.read_csv('../extraction/data/train_y.csv').values.ravel() cols = train_X.columns test_apache = pd.read_csv('../extraction/data/test_apache.csv') missing_apache_prob = (test_apache['apache_prediction'].isna()) test_apache = test_apache[~missing_apache_prob].values.ravel() apache_model = APACHEWrapper(test_apache) test_X = pd.read_csv('../extraction/data/test_X.csv').set_index('patientunitstayid') test_y = pd.read_csv('../extraction/data/test_y.csv') test_X = test_X[list(~missing_apache_prob)] test_y = test_y[list(~missing_apache_prob)].values.ravel() # ML models logit_full = pickle.load(open('./logit_full', 'rb')) xgb_full = pickle.load(open('./xgb_full', 'rb')) rf_full = pickle.load(open('./rf_full', 'rb')) # - # ## Plot Receiver Operator Characteristic Curves gen_auc_plot(models=[apache_model,xgb_full, rf_full, logit_full], names=['APACHE', 'XGB','RF','Logit'], title='Receiver Operator Characteristic Curves', X=test_X, y=test_y, ci_level=0.95, save_name='auroc_all_models_test') gen_auc_plot(models=[apache_model,xgb_full], names=['APACHE', 'XGB'], title='Receiver Operator Characteristic Curves', X=test_X, y=test_y, ci_level=0.95, save_name='auroc_test') # ## Observed-to-Predicted Mortality Ratios opr = opr_table(models=[apache_model,logit_full, rf_full, xgb_full], names=['APACHE','Logit','RF', 'XGB'], X=test_X, y=test_y) opr.to_csv('./opr-table.csv') opr # ## Threshold for High Sensitivity # + # 95% Sensitivity spec_at_high_sens = sens_spec_table(models=[apache_model,logit_full, rf_full, xgb_full], names=['APACHE','Logit','RF', 'XGB'], X=test_X, y=test_y, sens=0.945) spec_at_high_sens # + # 100% Sensitivity spec_at_high_sens = sens_spec_table(models=[apache_model,logit_full, rf_full, xgb_full], names=['APACHE','Logit','RF', 'XGB'], X=test_X, y=test_y, sens=0.995) spec_at_high_sens # -
modeling/model_metrics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Shashwat1001/Python/blob/master/decisiontree.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="cEg7L95w_UWm" colab_type="code" colab={} import pandas as pd data = pd.DataFrame({"Patient_ID":["1","2","3","4","5","6","7","8","9","10","11","12"], "Travel_History":["Yes","Yes","No","Yes","Yes","No","Yes","Yes","Yes","Yes","No","No"], "Fever":["Yes","Yes","Yes","Yes","Yes","No","Yes","Yes","Yes","Yes","Yes","No"], "Cough":["Yes","Yes","Yes","No","Yes","No","Yes","Yes","Yes","Yes","No","Yes"], "Dyspnoca":["Yes","Yes","No","No","Yes","No","Yes","No","Yes","Yes","No","No"], "Fatigue":["Yes","No","Yes","Yes","No","Yes","Yes","Yes","No","Yes","Yes","No"], "Sore_throat":["No","Yes","No","No","No","Yes","Yes","No","Yes","No","No","Yes"], "Diagnosis":["Sars-Cov-2","Sars-Cov-2","Seasonal Influenza","Seasonal Influenza","Sars-Cov-2","Common Cold","Sars-Cov-2","Seasonal Influenza","Sars-Cov-2","Sars-Cov-2","Seasonal Influenza","Common Cold"]}, columns=["Patient_ID","Travel_History","Fever","Cough","Dyspnoca","Sore_throat","Fatigue","Diagnosis"]) features = data[["Travel_History","Fever","Cough","Dyspnoca","Sore_throat","Fatigue"]] target = data[["Diagnosis"]] # + id="Uyie7M1y_UW1" colab_type="code" colab={} import graphviz import pydotplus import pandas as pd from sklearn.tree import DecisionTreeClassifier # Import Decision Tree Classifier from sklearn.model_selection import train_test_split # Import train_test_split function from sklearn import metrics #Import scikit-learn metrics module for accuracy calculation # + id="D5I6itHG_UW-" colab_type="code" outputId="deee16a1-4acc-418f-f506-f5d92bc52bf5" colab={"base_uri": "https://localhost:8080/", "height": 195} data.head() # + id="QQDSLYJI_UXM" colab_type="code" colab={} X = data.iloc[:,1:-1] # Features y = target # Target variable # + id="AOVP6Dpt_UXY" colab_type="code" colab={} X # + id="OOeErx6607Mq" colab_type="code" colab={} data1=pd.get_dummies(data.iloc[:,1:-1]) # + id="9iM-Dv8x1QYt" colab_type="code" colab={} data1.head() # + id="lHeiLiNW_UXl" colab_type="code" colab={} from sklearn.preprocessing import LabelEncoder # + id="U9fEf4_u_UXv" colab_type="code" colab={} label_x=LabelEncoder() X =X.apply(LabelEncoder().fit_transform) # + id="GcPVj3GN_UX-" colab_type="code" colab={} X X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=1) # + id="HIRA-dzH_UYK" colab_type="code" colab={} # Create Decision Tree classifer object clf = DecisionTreeClassifier(criterion="entropy") # Train Decision Tree Classifer clf = clf.fit(X_train,y_train) #Predict the response for test dataset y_pred = clf.predict(X_test) # + id="OXBAJH4FTj7l" colab_type="code" colab={} from sklearn.metrics import classification_report, confusion_matrix print(confusion_matrix(y_test, y_pred)) print(classification_report(y_test, y_pred)) # + id="rlfFGBLRT1Me" colab_type="code" colab={} # + id="Ptdzka3ZDTzI" colab_type="code" colab={} print("Accuracy:",metrics.accuracy_score(y_test, y_pred)) # + id="WYpkW2_O_UYS" colab_type="code" colab={} one_hot_data = pd.get_dummies(data[ ['Diagnosis', 'Travel_History','Fever','Cough','Dyspnoca','Sore_throat'] ]) # + id="fMaZQkwv_UYe" colab_type="code" colab={} one_hot_data # + id="7bHRSuzJbTaS" colab_type="code" colab={} # + id="0x8T4_2H_UYo" colab_type="code" colab={} clf1 = DecisionTreeClassifier(criterion="entropy") # Training the Decision Tree clf_train = clf1.fit(one_hot_data, data) # + id="u5TgdVD0_UY1" colab_type="code" outputId="b0237f41-872d-4fb3-9643-062b1d6565b8" colab={"base_uri": "https://localhost:8080/", "height": 141} print(clf_train, None) # + id="K8S5_rEzuNRK" colab_type="code" outputId="47ea9320-86ed-45f0-9728-942afe6d6783" colab={"base_uri": "https://localhost:8080/", "height": 320} tree.plot_tree(clf1.fit(X_test,y_test)) # + id="uUytkH1C_UZJ" colab_type="code" outputId="8fe91efe-180c-4bf3-a33e-9680d89ea3dc" colab={"base_uri": "https://localhost:8080/", "height": 34} dot_data = tree.export_graphviz(clf1, out_file=None) graph = graphviz.Source(dot_data) graph.render("decision") # + id="sDZzcgFvtc72" colab_type="code" outputId="7ccd6922-8e83-43f6-cb17-af10780d1e00" colab={"base_uri": "https://localhost:8080/", "height": 962} # + id="LMX-LsQ9xpQ4" colab_type="code" colab={}
decisiontree.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt import scipy.stats as sts # %matplotlib inline # # ะ—ะฐะดะฐะฝะธะต ั„ัƒะฝะบั†ะธะธ ะฟะปะพั‚ะฝะพัั‚ะธ cc = 4 gamma_rv = sts.gamma(a = cc) sample = gamma_rv.rvs(1000) # # ะขะตะพั€ะตั‚ะธั‡ะตัะบะฐั ั„ัƒะฝะบั†ะธั ะฟะปะพั‚ะฝะพัั‚ะธ ะธ ัะพะพั‚ะฒะตั‚ัะฒัƒัŽั‰ะฐั ะณะธัั‚ะพะณั€ะฐะผะผะฐ ะฒั‹ะฑะพั€ะบะธ x = np.linspace(min(sample),max(sample),1000) gamma_pdf = gamma_rv.pdf(x) plt.plot(x, gamma_pdf, label = "theoretical DF") plt.hist(sample, density=True, label = "histogram") plt.ylabel('$f(x)$') plt.xlabel('$x$') plt.legend(loc='upper left') # # ะŸั€ะพะฒะตั€ะบะฐ ะฆะŸะข ะฟั€ะธ n = 5 data = np.zeros((1000)) n = 5 xn = np.zeros((n)) for i in range (0,1000): xn = gamma_rv.rvs(n) data[i] = np.mean(xn) norm_rv = sts.norm(loc = cc, scale = (cc/n)**0.5) x = np.linspace(min(data),max(data),1000) norm_pdf = norm_rv.pdf(x) plt.plot(x, norm_pdf, label = "theoretical DF") plt.hist(data, density = True, label = "histogram") plt.ylabel('$f(x)$') plt.xlabel('$x$') plt.legend(loc='upper left') # # ะŸั€ะพะฒะตั€ะบะฐ ะฆะŸะข ะฟั€ะธ n = 10 n = 10 xn = np.zeros((n)) for i in range (0,1000): xn = gamma_rv.rvs(n) data[i] = np.mean(xn) norm_rv = sts.norm(loc = cc, scale = (cc/n)**0.5) x = np.linspace(min(data),max(data),1000) norm_pdf = norm_rv.pdf(x) plt.plot(x, norm_pdf, label = "theoretical DF") plt.hist(data, density = True, label = "histogram") plt.ylabel('$f(x)$') plt.xlabel('$x$') plt.legend(loc='upper left') # # ะŸั€ะพะฒะตั€ะบะฐ ะฆะŸะข ะฟั€ะธ n = 30 n = 30 xn = np.zeros((n)) for i in range (0,1000): xn = gamma_rv.rvs(n) data[i] = np.mean(xn) norm_rv = sts.norm(loc = cc, scale = (cc/n)**0.5) x = np.linspace(min(data),max(data),1000) norm_pdf = norm_rv.pdf(x) plt.plot(x, norm_pdf, label = "theoretical DF") plt.hist(data, density = True, label = "histogram") plt.ylabel('$f(x)$') plt.xlabel('$x$') plt.legend(loc='upper left') # # ะ’ั‹ะฒะพะดั‹ # ะšะฐะบ ะฟะพะบะฐะทะฐะปะฐ ะฟั€ะฐะบั‚ะธะบะฐ, ะฆะŸะข ั€ะฐะฑะพั‚ะฐะตั‚ (ะดะปั ะผะฝะพะณะธั… ั€ะฐัะฟั€ะตะดะตะปะตะฝะธะน, ั ะพะฟั€ะพะฑะพะฒะฐะป ั‚ัƒั‚ ัˆั‚ัƒะบ 10, ะธ ะณะฐะผะผะฐ, ะธ ะปะพะณะณะฐะผะผะฐ, ะธ ะ›ะพั€ะตะฝั†ะฐ ะธ ั‚.ะฟ). ะฃะถะต ะฟั€ะธ n = 5 ะฆะŸะข ั…ะพั€ะพัˆะพ ั€ะฐะฑะพั‚ะฐะตั‚. ะ“ะปะฐะฒะฝั‹ะผ ะฒ ะดะฐะฝะฝะพะน ั€ะฐะฑะพั‚ะต ะพะบะฐะทะฐะปะพััŒ ัƒะทะฝะฐั‚ัŒ, ั‡ั‚ะพ ะฒะพะพะฑั‰ะต ะทะฐ ะฟะฐั€ะฐะผะตั‚ั€ั‹ ะฒะฑะธะฒะฐัŽั‚ัั, ะบะพะณะดะฐ ะทะฐะดะฐะตั‚ัั ั€ะฐัะฟั€ะตะดะตะปะตะฝะธะต. ะ—ะฐั‡ะฐัั‚ัƒัŽ ั€ะฐัะฟั€ะตะดะตะปะตะฝะธั ะฒ ะดะฐะฝะฝะพะผ ะผะพะดัƒะปะต ะฝะตะผะฝะพะณะพ ะพั‚ะปะธั‡ะฐัŽั‚ัั ะพั‚ ั‚ะฐะบะพะฒั‹ั… ะฒ ะฒะธะบะธะฟะตะดะธะธ, ั‚ะฐะบ ั‡ั‚ะพ ัั‚ะพะธั‚ ะฑั‹ั‚ัŒ ะฐะบะบัƒั€ะฐั‚ะฝะตะต ะฟั€ะธ ะฒั‹ั‡ะธัะปะตะฝะธะธ ะธั… ั‚ะตะพั€ะตั‚ะธั‡ะตัะบะธั… ะฟะฐั€ะฐะผะตั‚ั€ะพะฒ (ะผะฐั‚. ะพะถะธะดะฐะฝะธั ะธ ะดะธัะฟะตั€ัะธะธ).
Yandex data science/1/Week 4/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Keyboard Input: Converting Fahrenheit temperatures Celsius # # ### This program allows a user to enter keyboard data in response to a request for a Fahrenheit temperature # - This program utilizes a "Try & Except" conditional Boolean statement to error trap non-numeric values entered into a program designed to convert numeric integer or float Fahrenheit degree values into Celsius degree values. # # - If an integer or float numeric value is entered, the program will convert it into a float value and calculate the degrees Celsius # # - If a non-numeric value is entered, the program will capture the error and generate an output statement: "The data you entered is unable to be converted to a Celsius temperature. Please input a numeric value, thank you!" F_input = input('Please enter temperature in Fahrenheit:') try: #this Boolean conditional statement attempts to convert the user-input value into a float value, as required by assignment. F_input = float(F_input) #this converts the input value into a float number, if possible print('The Celsius equivalent of the temperature you entered,', F_input,'degrees Fahrenheit, is', (F_input - 32.0) * 5 / 9,'degrees Celsius. Please select SHIFT + ENTER to enter another value, thank you!') # This line will transform the Fahrenheit float value into Celsius degrees except Exception as err: #If the "try" conditional statement is unable to transform the input into a float number, it will return the following statement: print('The data you entered is unable to be converted to a Celsius temperature. Please input a numeric value, thank you!')
draftfolder/Module2Assignment-Gordon.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # Student Alcohol Consumption # ### Introduction: # # This time you will download a dataset from the UCI. # # ### Step 1. Import the necessary libraries import pandas as pd # ### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/04_Apply/Students_Alcohol_Consumption/student-mat.csv). # ### Step 3. Assign it to a variable called df. url = 'https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/04_Apply/Students_Alcohol_Consumption/student-mat.csv' df = pd.read_csv(url) df.head() # ### Step 4. For the purpose of this exercise slice the dataframe from 'school' until the 'guardian' column df= df.loc[:,'school':'guardian'] df.head() # ### Step 5. Create a lambda function that will capitalize strings. cap = lambda string: string.upper() # ### Step 6. Capitalize both Mjob and Fjob df['Mjob'] = df['Mjob'].apply(cap) df['Fjob'] = df['Fjob'].apply(cap) df.head() # ### Step 7. Print the last elements of the data set. df.tail() # ### Step 8. Did you notice the original dataframe is still lowercase? Why is that? Fix it and capitalize Mjob and Fjob. # ### Step 9. Create a function called majority that returns a boolean value to a new column called legal_drinker (Consider majority as older than 17 years old) majority = lambda age: age > 17 df['legal_drinker'] = df['age'].apply(majority) df.tail() # ### Step 10. Multiply every number of the dataset by 10. # ##### I know this makes no sense, don't forget it is just an exercise new = pd.DataFrame(df[entry] * 10 for entry in df if df[entry].dtype == int).transpose() pd.concat([df.drop(new.columns, axis=1), new], ignore_index=True) new
04_Apply/Students_Alcohol_Consumption/Exercises.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # best&less account stuff # # Python command line script to register, lookup, edit, delete accounts. The accounts are sorted by email address. # # Changes from current: # # ask twice for email and password # # force captive # # For every account: # # Creates a config file. Creates a json object. Creates a markdown blog post with python-nikola. Creates socially awkward seal GOODBYE FIRSTNAME, HELLO FIRSTNAME meme. # # 3 database record of previous entries. # # gender - display male.jpg for male and female.jpg for female # # Gets art quote of the day and replaces art with fashion. add fashion to middle of authors name. add quote to top of blog post. # TODO: display quote as image # # random choice of clothes. Pregenerated random, recommended. currently random choice of - import requests import bs4 import getpass import json import shutil import PIL import json from PIL import ImageDraw, ImageFont import os #import arrow import configparser myusr = getpass.getuser() import arrow # + loginda = input('Enter first name: ') loglast = input('Enter last name: ') print('Suggestions {}{}'.format(loginda, loglast)) userset = input('Enter username: ') logemail = input('Enter email: ') emailtwic = input('Enter email again: ') if logemail == emailtwic: print('Email are the same') else: print('Email is not the same') bodenter = input('date of birth: YEAR/MNTH/DAY: ') datebith = arrow.get(bodenter) datebithz = datebith.strftime('%Y') passent = getpass.getpass('Enter password: ') passagain = getpass.getpass('Enter password again: ') if passent == passagain: print('They are correct') else: print('They are not correct') imgend = input('gender: ') #import ConfigParser config = configparser.RawConfigParser() # When adding sections or items, add them in the reverse order of # how you want them to be displayed in the actual file. # In addition, please note that using RawConfigParser's and the raw # mode of ConfigParser's respective set functions, you can assign # non-string values to keys internally, but will receive an error # when attempting to write to a file or when you get it in non-raw # mode. SafeConfigParser does not allow such assignments to take place. config.add_section(logemail) config.set(logemail, 'firstname', loginda) config.set(logemail, 'lastname', loglast) config.set(logemail, 'email', logemail) # Writing our configuration file to 'example.cfg' with open('/home/pi/.config/bestless.ini', 'w') as configfile: config.write(configfile) # - import os import random # + if imgend == 'male': genpat = ('/galleries/male.jpg') rancho = random.choice(os.listdir('/home/pi/memetest/galleries/male/')) genshit = ('/galleries/male/{}'.format(rancho)) #gmal = requests.get('https://api.gilt.com/v1/sales/men/upcoming.json?apikey=bb7cf716ec52e7a7737705f0129ed4282a35239a0a6b8a821e68f30a00ecc1a7') elif imgend == 'female': genpat = ('/galleries/female.jpg') rancho = random.choice(os.listdir('/home/pi/memetest/galleries/female/')) #gmal = requests.get('https://api.gilt.com/v1/sales/women/upcoming.json?apikey=bb7cf716ec52e7a7737705f0129ed4282a35239a0a6b8a821e68f30a00ecc1a7') genshit = ('/galleries/female/{}'.format(rancho)) # + #salejs = gmal.json() # + #salelen = len(salejs['sales']) # + #for salele in range(salelen): # print(salejs['sales'][salele]['description']) # print(salejs['sales'][salele]['image_urls']['686x374'][0]['url']) # + #requests.get('https://api.gilt.com/v1/sales/men/upcoming.json?apikey=bb7cf716ec52e7a7737705f0129ed4282a35239a0a6b8a821e68f30a00ecc1a7') # + #https://api.gilt.com/v1/sales/women/active.json?apikey=bb7cf716ec52e7a7737705f0129ed4282a35239a0a6b8a821e68f30a00ecc1a7 # - genshit # + #import random # + #ranum = random.randint(100,1000) # + #for it in range(100,1000): # print('suggestion: {}{}{}'.format(loginda, loglast, it)) # + #ranum # + #reqgif = requests.get('http://api.giphy.com/v1/gifs/search?q={}+fashion&api_key=ee58ff1d10c54fd29ddb0388126c2bcd'.format(datebith)) # + #gifjs = reqgif.json() # + #for himg in range(25): # img_data = requests.get(gifjs['data'][himg]['images']['fixed_height']['url']).content # with open('{}.gif'.format(str(himg)), 'wb') as handler: # handler.write(img_data) # print(gifjs['data'][himg]['images']['fixed_height']['url']) # - with open('/home/{}/account.json'.format(myusr), 'r') as accdict: readd = accdict.read() readdict = json.loads(readd) # + #readdict = json.loads(readd) # + #print(readdict) # - emailup = input('Email to lookup: ') print(readdict[emailup]) emailcont = ('Hello {},\n\nToday we have sale on {}. It is suitable for someone born {}.\n\nHave a great day,\n\nbest&less.'.format(loginda, imgend, datebith.humanize(), str(datebithz))) emailcont lenid = len(readdict) nexid = lenid + 1 # + textzero = 'BYE ' + loginda textone = 'HELLO ' + loginda upzero = textzero.upper() botzero = textone.upper() # In[ ]: # - botzero # + #gheigh = (gtm['height']) #gwth = (gtm['width']) #response = requests.get(gtm['url'], stream=True) #with open('{}{}-reference.jpg'.format(repathz, str(rdz.author)), 'wb') as out_file: # shutil.copyfileobj(response.raw, out_file) # del response #with open('/home/{}/memetest/galleries/{}.png'.format(myusr, gtm['id']), 'wb') as out_file: # shutil.copyfileobj(response.raw, out_file) # del response img = PIL.Image.open('/home/{}/Downloads/seal.jpg'.format(myusr)) imageSize = img.size print(imageSize) # find biggest font size that works fontSize = int(imageSize[1]/5) print(fontSize) font = ImageFont.truetype("/home/{}/impact.ttf".format(myusr), fontSize) topTextSize = font.getsize(upzero) bottomTextSize = font.getsize(botzero) print(topTextSize) while topTextSize[0] > imageSize[0]-20 or bottomTextSize[0] > imageSize[0]-20: fontSize = fontSize - 1 font = ImageFont.truetype("/home/{}/impact.ttf".format(myusr), fontSize) topTextSize = font.getsize(upzero) bottomTextSize = font.getsize(botzero) print(bottomTextSize) # find top centered position for top text topTextPositionX = (imageSize[0]/2) - (topTextSize[0]/2) topTextPositionY = 0 topTextPosition = (topTextPositionX, topTextPositionY) # find bottom centered position for bottom text bottomTextPositionX = (imageSize[0]/2) - (bottomTextSize[0]/2) bottomTextPositionY = imageSize[1] - bottomTextSize[1] -10 bottomTextPosition = (bottomTextPositionX, bottomTextPositionY) draw = ImageDraw.Draw(img) outlineRange = int(fontSize/15) for x in range(-outlineRange, outlineRange+1): for y in range(-outlineRange, outlineRange+1): draw.text((topTextPosition[0]+x, topTextPosition[1]+y), upzero, (0,0,0), font=font) draw.text((bottomTextPosition[0]+x, bottomTextPosition[1]+y), botzero, (0,0,0), font=font) draw.text(topTextPosition, upzero, (255,255,255), font=font) draw.text(bottomTextPosition, botzero, (255,255,255), font=font) img.save("/home/{}/memetest/galleries/{}.jpg".format(myusr, str(nexid))) #print(gtm['id']) #filemh = gtm['id'] #print('hello') # + #reqote = requests.get('https://www.goodreads.com/quotes/tag/clothes') # - somequote = requests.get('http://quotes.rest/qod.json?category=art') quotejs = (somequote.json()) myqute = (quotejs['contents']['quotes'][0]['quote']) lenqute = (quotejs['contents']['quotes'][0]['length']) qutefas = myqute.replace('art', 'fashion') qutefas quoteauth = quotejs['contents']['quotes'][0]['author'] auspit = quoteauth.split(' ') fashauthz = (auspit[0] + " 'fashion' " + auspit[1]) bothquote = '"' + qutefas + '" - ' + fashauthz print(bothquote) print(lenqute) lenqute #if lenqute < 20: # + #print(quotejs['contents']['quotes'][0]['id']) # + #float(lenqute) /2 # + #gheigh = (gtm['height']) #gwth = (gtm['width']) #response = requests.get(gtm['url'], stream=True) #with open('{}{}-reference.jpg'.format(repathz, str(rdz.author)), 'wb') as out_file: # shutil.copyfileobj(response.raw, out_file) # del response #with open('/home/{}/memetest/galleries/{}.png'.format(myusr, gtm['id']), 'wb') as out_file: # shutil.copyfileobj(response.raw, out_file) # del response img = PIL.Image.open('/home/pi/Downloads/seal.jpg') imageSize = img.size print(imageSize) # find biggest font size that works fontSize = int(imageSize[1]/5) print(fontSize) font = ImageFont.truetype("/home/{}/impact.ttf".format(myusr), fontSize) topTextSize = font.getsize(quotejs['contents']['quotes'][0]['quote']) #bottomTextSize = font.getsize(quotejs['contents']['quotes'][0]['quote']) print(topTextSize) while topTextSize[0] > imageSize[0]-20: fontSize = fontSize - 1 font = ImageFont.truetype("/home/{}/impact.ttf".format(myusr), fontSize) topTextSize = font.getsize(quotejs['contents']['quotes'][0]['quote']) #bottomTextSize = font.getsize(quotejs['contents']['quotes'][0]['quote']) #print(bottomTextSize) # find top centered position for top text topTextPositionX = (imageSize[0]/2) - (topTextSize[0]/2) topTextPositionY = 100 topTextPosition = (topTextPositionX, topTextPositionY) # find bottom centered position for bottom text #bottomTextPositionX = (imageSize[0]/2) - (bottomTextSize[0]/2) #bottomTextPositionY = imageSize[1] - bottomTextSize[1] -10 #bottomTextPosition = (bottomTextPositionX, bottomTextPositionY) draw = ImageDraw.Draw(img) outlineRange = int(fontSize/15) for x in range(-outlineRange, outlineRange+1): for y in range(-outlineRange, outlineRange+1): draw.text((topTextPosition[0]+x, topTextPosition[1]+y), quotejs['contents']['quotes'][0]['quote'], (0,0,0), font=font) #draw.text((bottomTextPosition[0]+x, bottomTextPosition[1]+y), quotejs['contents']['quotes'][0]['quote'], (0,0,0), font=font) draw.text(topTextPosition, quotejs['contents']['quotes'][0]['quote'], (255,255,255), font=font) #draw.text(bottomTextPosition, quotejs['contents']['quotes'][0]['quote'], (255,255,255), font=font) img2 = img.crop((0, 80, 610,200)) #img2.save("img2.jpg") img2.save("/home/{}/memetest/galleries/{}.jpg".format(myusr, str(quotejs['contents']['quotes'][0]['id']))) #print(gtm['id']) #filemh = gtm['id'] #print('hello') # - # + #print(reqote) # + #quotbs = bs4.BeautifulSoup(reqote.text) # + #print(quotbs) # + #allquote = quotbs.find('div', {'class' : "quoteText"}) # + #print(quotbs.find_all('a')) # + #for allq in allquote: # print(allq) # - accdict = ({logemail : dict({'firstname' : loginda, 'lastname' : loglast, 'email' : logemail, 'password' : '<PASSWORD>', 'gender': imgend, 'agehuman' : datebith.humanize(),'dob' : bodenter, 'id' : nexid, 'image' : '/galleries/{}.jpg', 'username' : userset, 'post' : '/posts/{}.md'.format(str(nexid), str(nexid))})}) # + #print(accdict) # - import json z = {**readdict, **accdict} z # + #json.dumps(accdict) # - with open('/home/pi/account.json', 'w') as blacc: blacc.write(json.dumps(z)) # + # #cat /home/pi/account.json # - timnow = arrow.now() timnowz = timnow.datetime # + #print(timnowz) # + with open('/home/{}/memetest/posts/{}.md'.format(myusr, str(nexid)), 'w') as resulmd: resulmd.write('{}\n\n![{}](/galleries/{}.jpg)\n\n First name: {}\n\nLast name: {}\n\nEmail: {}\n\nGender: {}\n\n{}\n\n![gender]({})\n\n![cloth]({})'.format(bothquote, str(nexid), str(nexid), loginda, loglast, logemail, imgend, emailcont, genpat, genshit)) with open ('/home/{}/memetest/posts/{}.meta'.format(myusr, str(nexid)), 'w') as opmetat: #opmetat.write("{}".format(str(curtim)) #for arage in alltags: # print(arage) opmetat.write('.. title: {}\n.. slug: {}\n.. date: {}\n.. tags: \n.. link:\n.. description:\n.. type: text'.format(str(nexid), str(nexid), timnowz)) # - #with open('/home/pi/hugosite/content/post/{}.md'.format(str(nexid)), 'w') as hupost: # hupost.write('+++\ndate = "{}"\ntitle = {}\n\n+++\n\nFirst name: {}\n\nLast name: {}\n\nUsername: {}\n\nEmail: {}\n\nGender: {}'.format(timnowz, str(nexid), loginda, loglast, logemail, imgend)) #hupost.write('+++\ndate = "{}"\ntitle = {}\n\n+++\n\nFirst name: '.format(timnowz)) # + #for salele in range(salelen): # descript = (salejs['sales'][salele]['description']) # imurl = (salejs['sales'][salele]['image_urls']['686x374'][0]['url']) # with open('/home/{}/memetest/posts/{}.md'.format(myusr, str(nexid)), 'a') as resulmd: # resulmd.write('\n\n![photo]({})\n\n'.format(imurl)) # - # + # #cat /home/pi/hugosite/content/post/6.md # -
posts/bestandless.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (Anaconda) # language: python # name: anaconda3 # --- # Name: <NAME>, <NAME> # # Student ID: 2299636, 2307470 # # Email: <EMAIL>, <EMAIL> # # Course: CS510 Fall 2017 # # Assignment: Classwork 6 import cplane_np import pandas as pd import numpy as np from abscplane import AbsComplexPlane # ## Creating the Array Complex Plane Class # First, we created the class ArrayComplexPlane. The purpose of the class is to store and manipulate a grid of complex points. Our class inherited from AbsComplexPlane, which is an abstract base class with the general structure of our final class. The data was stored in a 2D array containing grid points. # # The only difference between this classwork assignment and last week's is that we used an array instead of a list to store our grid points. To do this, we imported numpy. # ## The Grid Structure # This method differs from the way we previously created the grid by using the numpy linspace and meshgrid methods along with creating a DataFrame with pandas. Using linspace and meshgrid, we eliminate the for loop implemented before. With pandas, we also are able to label our data nicely. We chose to label our columns as $x_i$ and our rows as $y_i\cdot1j$. def __create_grid(self): rx = np.linspace(self.xmin, self.xmax, self.xlen) ry = np.linspace(self.ymin, self.ymax, self.ylen) x,y = np.meshgrid(rx,ry) grid = x + y*1j col_names = ['x'+str(i+1) for i in range(self.xlen)] row_names = ['y{0}*i'.format(str(i+1)) for i in range(self.ylen)] grid = pd.DataFrame(grid, index=row_names, columns=col_names) return grid # ## The Apply Function # The apply function takes our points $z$, where $z=(x + y\cdot1j)$ and performs $f(z)$. In testing our program, we chose $f(z)=z+2$. # # Using pandas, we were able to use less than half of the amount of code we had used in the previous classwork. We simply used the applymap method to apply our function f to our array. This allowed us to get rid of our for loop. self.plane = self.plane.applymap(f) self.fs.append(f) # ## The Refresh Function # The refresh function regenerates the complex plane and clears all functions that transform the plane. We did this by simply make fs an empty list and resetting the parameters for the 2D grid of points to regenerate the grid. # # This function did not differ much from the previous classwork's function. self.fs = [] self.plane = self.__create_grid() # ## The Zoom Function # The zoom function takes new values for our xmin, xmax, xlen, ymin, ymax, and ylen values and applies f(z) to our new values. # # This function also used the applymap method from pandas. However, we could not avoid the for loop for fs because fs is a list of functions that cannot be put in an array. for f in self.fs: self.plane = self.plane.applymap(f) # ## Test Cases # Our test module tests each of our functions. # # First we tested our plane to make sure our program outputs the correct points. # # Next, we tested our apply function to test that our points carried out the function $f(z) = z + 2$ correctly. # # Then, we tested the zoom function. This test function applies our function $f(z)$ and then carries out our zoom function with new points. The zoom function also applies $f(z) to these new points. This test function checks that we get the correct output for this as well. # # Finally, we tested the refresh function. This test function applies $f(z)$ and then applies the refresh function. The refresh function should give us the original points for our plane. This test function checks that we get our original points.
cplane_np.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/G750cloud/20MA573/blob/master/HW9.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="eU0Cd5UQxeN8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="34891b61-3e91-4a07-aea1-5e8afbff0566" import numpy as np def init_value(N): h=1/N v=np.zeros([N+1,N+1]) for i in range(N+1): for j in range(N+1): if i==0 or i==N or j==0 or j==N: v[i,j]=((i/N-0.5)**2+(j/N-0.5)**2) return v def F(u,N): h=1/N v=np.zeros([N+1,N+1]) for i in range(N+1): for j in range(N+1): if i==0 or i==N or j==0 or j==N: v[i,j]=u[i,j] else: v[i,j]=(2/(2+h**2))*((h**2/2)*((i*h)**2+(j*h)**2-(i*h)-(j*h)-(3/2))+(1/4)*(u[i+1,j]+u[i,j+1]+u[i,j-1]+u[i-1,j])) return v def VI(N, tolerance): v=init_value(N) error=1 step=0 while error>tolerance: step+=1 u=v v=F(u,N) error=np.max(np.abs(u - v)) return [error, step, v] def exact_value(N): v=np.zeros([N+1,N+1]) for i in range(0,N+1): for j in range(0,N+1): v[i,j]=(i/N-0.5)**2+(j/N-0.5)**2 return v for i in range(4): N=2**(i+2) h=1/N soln=VI(N,0.0001)[2] error=np.max(np.abs(VI(N,0.0001)[2]-exact_value(N))) print('h=' + str(h) +',the CFD solution is \n ' , soln) print('result error ' , error)
HW9.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import pandas as pd import numpy as np App_data = pd.read_csv('APP_data.csv') Soup_data = pd.read_csv('SP_data.csv') Salad_data = pd.read_csv('SLD_data.csv') Breakfast_data = pd.read_csv('BB_data.csv') Soup_data.shape
Untitled1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:spike_basico_CR2] * # language: python # name: conda-env-spike_basico_CR2-py # --- import pandas_gbq def cargamos_serie_semanal(): query = ''' WITH base_comunal_concentracion as( SELECT Time, comuna, region, AVG(PM25) as concentracion_pm25 --promediamos la concentraciรณn por comuna por hora FROM CR2.concentraciones_PM25_w_geo GROUP BY Time, comuna, region ), base_comunal_emision as( SELECT Time, comuna, region, SUM(EMI_PM25) as emision_pm25 --las emisiones las sumamos FROM CR2.emisiones_PM25_w_geo GROUP BY Time, comuna, region ), base_agrupada as( SELECT con.*, emi.emision_pm25 FROM base_comunal_concentracion as con JOIN base_comunal_emision as emi ON con.comuna=emi.comuna AND con.Time=emi.Time WHERE con.region!="Zona sin demarcar" ) SELECT EXTRACT(DAYOFWEEK from Time) as day, comuna, region, AVG(emision_pm25) as avg_emisionpm25, STDDEV(emision_pm25) as stddev_emisionpm25, AVG(concentracion_pm25) as avg_concentracionpm25, STDDEV(concentracion_pm25) as stddev_concentracionpm25 FROM base_agrupada GROUP BY day, comuna, region ''' # df = pandas_gbq.read_gbq(query, project_id='spike-sandbox', use_bqstorage_api=True) return df def filtrar_serie_daily_por_resolucion(df, resolucion, temporal = 'hora'): if resolucion == 'Todas las regiones': _df = df.groupby(['region',temporal])[['avg_emisionpm25', 'avg_concentracionpm25', 'stddev_emisionpm25', 'stddev_concentracionpm25']].mean().reset_index().copy() col_a_mirar = 'region' elif resolucion == 'Todas las comunas': _df = df.copy() col_a_mirar = 'comuna' else: _df = df.query('region==@resolucion') col_a_mirar = 'comuna' _df = simplificar_nombre_region(_df) return _df, col_a_mirar def simplificar_nombre_region(df): diccionario_regiones = {'Regiรณn de Coquimbo': 'Coquimbo' , 'Regiรณn de Valparaรญso': 'Valparaรญso' , 'Regiรณn Metropolitana de Santiago': 'Metropolitana' , "Regiรณn del Libertador Bernardo O'Higgins": "Lib B O'Higgins" , 'Regiรณn del Maule': 'Maule', 'Regiรณn de ร‘uble': 'ร‘uble' , 'Regiรณn del Bรญo-Bรญo': 'Bรญo-Bรญo' , 'Regiรณn de La Araucanรญa': 'La Araucanรญa' , 'Regiรณn de Los Rรญos': 'Los Rรญos' , 'Regiรณn de Los Lagos': 'Los Lagos', 'Regiรณn de Aysรฉn del Gral.Ibaรฑez del Campo': 'Aysรฉn' , 'Regiรณn de Magallanes y Antรกrtica Chilena': 'Magallanes'} return df.replace(diccionario_regiones).copy() import streamlit as st df = cargamos_serie_semanal() import plotly.graph_objects as go colores = [['rgb(31, 119, 180)', 'rgba(31, 119, 180, 0.2)'], ['rgb(255, 127, 14)', 'rgba(255, 127, 14, 0.2)'], ['rgb(44, 160, 44)', 'rgba(44, 160, 44, 0.2)'], ['rgb(214, 39, 40)', 'rgba(214, 39, 40, 0.2)'], ['rgb(148, 103, 189)', 'rgba(148, 103, 189, 0.2)'], ['rgb(140, 86, 75)', 'rgba(140, 86, 75, 0.2)'], ['rgb(227, 119, 194)', 'rgba(227, 119, 194, 0.2)'], ['rgb(127, 127, 127)', 'rgba(127, 127, 127, 0.2)'], ['rgb(188, 189, 34)', 'rgba(188, 189, 34, 0.2)'], ['rgb(23, 190, 207)', 'rgba(23, 190, 207, 0.2)']] TEMPLATE = "plotly_white" import pandas as pd def plot_weekly_curves(df : pd.DataFrame, resolucion = 'Todas las regiones', tipo = 'emision', leyenda_h=True): _df, col_a_mirar = filtrar_serie_daily_por_resolucion(df, resolucion, temporal = 'day') fig = go.Figure() x = list(df.sort_values(by='day').day.unique()) x_rev = x[::-1] y, y_upper, y_lower = {}, {}, {} k = 0 if tipo == 'emision': columnas = ['avg_emisionpm25', 'stddev_emisionpm25'] yaxis_title = r"$\mu g/m^3$" elif tipo == 'concentracion': columnas = ['avg_concentracionpm25', 'stddev_concentracionpm25'] yaxis_title = 'concentraciรณn promedio de pm25 [?]' for comuna in _df[col_a_mirar].unique(): if col_a_mirar == 'comuna': aux = _df.query('comuna==@comuna').copy() else: aux = _df.query('region==@comuna').copy() y[comuna] = list(aux[columnas[0]].values) y_upper[comuna] = list(y[comuna] + aux[columnas[1]].values) y_lower[comuna] = list(y[comuna] - aux[columnas[1]].values) y_lower[comuna] = y_lower[comuna][::-1] color = colores[k%len(colores)] k+=1 fig.add_trace(go.Scatter(x=x+x_rev, y=y_upper[comuna]+y_lower[comuna], fill='toself', fillcolor=f'{color[1]}', line_color=f'rgba(255,255,255,0)', showlegend=False, name=comuna, )) fig.add_trace(go.Scatter(x=x, y=y[comuna], line_color=f'{color[0]}', name=comuna, )) fig.update_traces(mode='lines') fig.update_layout(height=600, width=1000, yaxis=dict(title=yaxis_title), xaxis = dict(tickmode = 'array', tickvals = [1,2,3,4,5,6,7], ticktext = ['Lunes','Martes','Miรฉrcoles','Jueves','Viernes','Sรกbado','Domingo']), legend_title_text=col_a_mirar, template=TEMPLATE) if leyenda_h: fig.layout.update(legend=dict(x=-0.1, y=1.5)) fig.layout.update(legend_orientation="h",) fig.show() resolucion = 'Todas las regiones' leyenda_h = True plot_weekly_curves(df, resolucion=resolucion, tipo='emision', leyenda_h=leyenda_h)
app/.ipynb_checkpoints/Untitled-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # APPLE APP STORE - Transforming Raw to Clean Data # _______________________ # # * Consideration: source data was scraped from the web # # ## Objectives: # # * Create a cleaned up version of the Apple Store Source Data by filtering: # # - Games with no reviews # - Duplicates # - Converting all ratings, reviews, installs, and price to uniform types and formats by column # # # * Subsequently, make sure there's no duplicate app names or double counting / aggegration; organize by apps, and remove exact duplicates, and or take the higher of the two # # # * Final Product consist of a Clean CSV File, Pie Chart and Bar Graph, # %matplotlib notebook import pandas as pd import numpy as np import matplotlib.pyplot as plt # Displaying manually clean dataset #read = "./resources/final_clean/Final_Apple.csv" read = "./resources/original_raw_data/appleappstore.csv" apple_df = pd.read_csv(read) apple_df.head() # Preview columns and count of each apple_df.count() # Drop unnecessary columns cols = [0, 1, 3, 7, 9, -7, -6, -4, -3, -2, -1] apple_df.drop(apple_df.columns[cols], axis=1, inplace=True) apple_df.head() # Rename Columns apple_df.rename(columns={'track_name': 'App', 'currency':'Currency', 'price':'Price', 'rating_count_tot': 'Reviews', 'user_rating': 'Ratings', 'prime_genre': 'Category'}, inplace=True) apple_df.head() # Rearrange columns to match Google Play Store Set Up apple_df = apple_df[['App','Category','Ratings','Reviews', 'Price', 'Currency']] apple_df.head() apple_df['Category'].value_counts() # Sort Values by Reviews apple_df = apple_df.sort_values(by= ['Reviews'], ascending=False) apple_df.head(10) #Elimanting values in our dataset (lower 75%) in order to get a smaller sample size top_quartile = np.percentile(apple_df['Reviews'], 75) top_quartile #Creating a new dataframe which shows the top 25% top_quartile_data_df = apple_df.loc[apple_df['Reviews'] > top_quartile] top_quartile_data_df # + #Combining certain Categories in order to clean up the data even more and reducing the number of category for our pie and bar charts top_quartile_data_df['Category'] = [x.replace("Book","Education") for x in top_quartile_data_df['Category']] top_quartile_data_df['Category'] = [x.replace("News","Education") for x in top_quartile_data_df['Category']] top_quartile_data_df['Category'] = [x.replace("Catalogs","Education") for x in top_quartile_data_df['Category']] top_quartile_data_df['Category'] = [x.replace("Food & Drink","Lifestyle") for x in top_quartile_data_df['Category']] top_quartile_data_df['Category'] = [x.replace("Music","Lifestyle") for x in top_quartile_data_df['Category']] top_quartile_data_df['Category'] = [x.replace("Shopping","Lifestyle") for x in top_quartile_data_df['Category']] top_quartile_data_df['Category'] = [x.replace("Health & Fitness","Lifestyle") for x in top_quartile_data_df['Category']] top_quartile_data_df['Category'] = [x.replace("Entertainment","Lifestyle") for x in top_quartile_data_df['Category']] top_quartile_data_df['Category'] = [x.replace("Medical","Lifestyle") for x in top_quartile_data_df['Category']] top_quartile_data_df['Category'] = [x.replace("Social Networking","Social") for x in top_quartile_data_df['Category']] top_quartile_data_df['Category'] = [x.replace("Weather","Travel") for x in top_quartile_data_df['Category']] top_quartile_data_df['Category'] = [x.replace("Navigation","Travel") for x in top_quartile_data_df['Category']] top_quartile_data_df['Category'] = [x.replace("Reference","Utility") for x in top_quartile_data_df['Category']] top_quartile_data_df['Category'] = [x.replace("Photo & Video","Utility") for x in top_quartile_data_df['Category']] top_quartile_data_df['Category'] = [x.replace("Utilities","Utility") for x in top_quartile_data_df['Category']] top_quartile_data_df['Category'] = [x.replace("Finance","Business") for x in top_quartile_data_df['Category']] # - # Extract unique categories and respective total counts, and sort by index (Category Name) in Ascending Order category_sizes = top_quartile_data_df['Category'].value_counts() category_sizes = list(category_sizes.sort_index(ascending=True)) print(category_sizes) # Extract all unique category names only, and put in ascending order categories = top_quartile_data_df['Category'].unique() categories_list = list(categories) categories.sort() print(categories) # + # Creating the Pie Chart plt.figure(1, figsize=(6.5,6.5)) #members = [16, 21, 377, 98, 15, 32, 15, 24, 43] #categories = ["Business","Education","Games","Lifestyle","Productivity","Social Networking","Sports","Travel","Utlity"] explode = (0, 0, 0.05, 0, 0, 0, 0, 0, 0) plt.pie(category_sizes, labels=categories, explode=explode, shadow=False, startangle=45) plt.title("Apple App Store \n Data As Of: June 2018") plt.axis("equal") #Saving the pie chart plt.savefig("./images/Apple_Pie_Chart.png", bbox_inches='tight') # - #Exporting the final clean data to it's own CSV file top_quartile_data_df.to_csv('./resources/final_clean/final_apple_data.csv', encoding='utf-8') # Read in final clean data and drop the unnecessary first column that results from reading in a csvfile top_quartile_data_df = pd.read_csv("./resources/final_clean/final_apple_data.csv", encoding='utf-8') top_quartile_data_df.drop(top_quartile_data_df.columns[0], axis=1, inplace=True) top_quartile_data_df.head(10) facebook_rating = top_quartile_data_df.loc[top_quartile_data_df['App'] == 'Facebook'] facebook_rating instagram_rating = top_quartile_data_df.loc[top_quartile_data_df['App'] == 'Instagram'] instagram_rating coc_rating = top_quartile_data_df.loc[top_quartile_data_df['App'] == 'Clash of Clans'] coc_rating templerun_rating = top_quartile_data_df.loc[top_quartile_data_df['App'] == 'Temple Run'] templerun_rating pandora_rating = top_quartile_data_df.loc[top_quartile_data_df['App'] == 'Pandora - Music & Radio'] pandora_rating pinterest_rating = top_quartile_data_df.loc[top_quartile_data_df['App'] == 'Pinterest'] pinterest_rating bible_rating = top_quartile_data_df.loc[top_quartile_data_df['App'] == 'Bible'] bible_rating candycrushsaga_rating = top_quartile_data_df.loc[top_quartile_data_df['App'] == 'Candy Crush Saga'] candycrushsaga_rating spotify_rating = top_quartile_data_df.loc[top_quartile_data_df['App'] == 'Spotify Music'] spotify_rating angrybirds_rating = top_quartile_data_df.loc[top_quartile_data_df['App'] == 'Angry Birds'] angrybirds_rating # # Creating bar charts for Categories v Average Rating #For our bar chart we want the x-axis = categories, y-axis = average rating #Creating a new dataframe with just Category and Specific Average Ratings new_df = top_quartile_data_df[['Category', 'Ratings']].copy() new_df.head() # Extract unique categories and respective total counts, and sort by index (Category Name) in Ascending Order category_sizes = top_quartile_data_df['Category'].value_counts() category_sizes = list(category_sizes.sort_index(ascending=True)) print(category_sizes) # + #Finding the average for each category #Business business = float(new_df[new_df['Category'].str.contains("Business")].mean()) #Education education = float(new_df[new_df['Category'].str.contains("Education")].mean()) #Games games = float(new_df[new_df['Category'].str.contains("Games")].mean()) #Lifestyle lifestyle = float(new_df[new_df['Category'].str.contains("Lifestyle")].mean()) #Productivity productivity = float(new_df[new_df['Category'].str.contains("Productivity")].mean()) #Social Networking social = float(new_df[new_df['Category'].str.contains("Social")].mean()) #Sports sports = float(new_df[new_df['Category'].str.contains("Sports")].mean()) #Travel travel = float(new_df[new_df['Category'].str.contains("Travel")].mean()) #Utility utility = float(new_df[new_df['Category'].str.contains("Utility")].mean()) ys = [business, education, games, lifestyle, productivity, social, sports, travel, utility] print(ys) # - # Extract all unique category names only, and put in ascending order categories_b = new_df['Category'].unique() categories_b = list(categories) categories_b.sort() print(categories_b) # + #Charting out the bar graph plt.figure(2, figsize=(6,5)) #x = ["Business","Education","Games","Lifestyle","Productivity","Social Networking","Sports","Travel","Utlity"] #y = [4.41, 4.02, 4.30, 4.14, 4.37, 3.70, 3.47, 4.0, 3.91 ] plt.bar(categories_b, ys, color='royalblue', alpha=.6, align="center") plt.grid(color='grey', linestyle='--', linewidth=2, axis='y', alpha=.7) plt.xticks(rotation="45") plt.title("Apple Store: Categories vs Average Rating") plt.xlabel("Categories") plt.ylabel("Average Rating") plt.show() plt.savefig("./images/Apple_Bar_Graph.png", bbox_inches='tight')
.ipynb_checkpoints/Final_Apple_Code-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + hide_input=true import numpy as np import matplotlib.pyplot as plt from scipy.constants import m_e, m_p, pi fsize = 12 mi = m_p*39.948 # argon def phi_Te(g): return g/(g-1)*( 1 - (2*pi*g*m_e/mi)**((g-1)/(g+1))) # + hide_input=true from scipy.optimize import fsolve def eq(g, chi, f=1): return (1 + chi )*(1-chi)**((g-1)/2) - (2*f*np.pi*g*m_e/mi)**((g-1)/2) def dphi(g, f=1): chi_initial_guess = -2 chi_solution = fsolve(lambda c:eq(g,c,f), chi_initial_guess)[0] return -chi_solution*g/(g-1) # - gvect = np.linspace(1.000001,2.5,100) dphivect = np.array([ dphi(g) for g in gvect]) plt.style.use("presentation") # + hide_input=true f, ax = plt.subplots(1, 1, figsize = (6.4, 4)) plt.subplots_adjust(left=0.2, bottom=0.12, right=0.84, top=0.97, wspace=0.0, hspace=.0) for f in [0.1, 0.64, 1]: dphivect = np.array([ dphi(g, f=f) for g in gvect]) ax.plot(gvect, dphivect, '-b', linewidth = 2, label="new") ax.set_xlabel("$\gamma$", fontsize = fsize) ax.set_ylabel("$e\phi_s / (k_B T_{e,0})$", fontsize = fsize) ax.set_ylim(0,5) ax.set_xlim(1,1.7) ax.grid() # + hide_input=true f, ax = plt.subplots(1, 1, figsize = (6.4, 4)) plt.subplots_adjust(left=0.2, bottom=0.12, right=0.84, top=0.97, wspace=0.0, hspace=.0) ax.plot(gvect, dphivect, '-b', linewidth = 2, label="new") ax.set_xlabel("$\gamma$", fontsize = fsize) ax.set_ylabel("$e\phi_s / (k_B T_{e,0})$", fontsize = fsize) ax.set_ylim(0,5) ax.set_xlim(1,1.7) ax.grid() # plt.savefig("../figures/phinorm_theory.pdf") # - # # PIC values # + hide_input=true from astropy.constants import m_e, e, k_B, m_p from scipy.constants import pi k = k_B.value me = m_e.value q = e.value mi = 40* m_p import numpy as np import matplotlib.pyplot as plt # %matplotlib notebook import json with open("PIC_data.dat", "r") as f: pass with open("NewPic1D.dat", "r") as f: data = json.load(f) # + code_folding=[0] def findPhi(k='0'): Te = np.array(data[k]["Te2"]) phi = np.array(data[k]["phi"]) ne = np.array(data[k]["ne"]) ni = np.array(data[k]["ni"]) indexsheath = np.argwhere(np.abs(ni - ne)/ne.mean() < 0.1)[0][0] phinorm = phi/Te print(indexsheath, phinorm[indexsheath]) for k in ["0","1","2", "3"]: findPhi(k) # + code_folding=[0, 11, 27] def gammaofK(k): """Fit the polytropic coef""" Te = np.array(data[k]["Te2"]) ne = np.array(data[k]["ne"]) ln_n = np.log(ne/ne.max()) ln_p = np.log((ne*Te)/(ne*Te).max()) gamma, a = np.polyfit(ln_n, ln_p, 1) return gamma def indexUb(k): """return the best index for the modified bhom criteriom""" Te = np.array(data[k]["Te2"]) Ji = np.array(data[k]["Ji"]) ni = np.array(data[k]["ni"]) # E = np.gradient(phi,x*0.01) # electrif field in V/m vi = Ji/(ni * q) gamma = gammaofK(k) ub = np.sqrt(gamma*q*Te/mi.value) tmp = np.argmin(np.abs(np.abs(vi) -ub)[:len(ni)//2]) return tmp def indexquasineutral(k, rtol = 0.1): """return the best index for the quisineutrality""" ne = np.array(data[k]["ne"]) ni = np.array(data[k]["ni"]) tmp = np.argwhere(np.abs(ni - ne)/ne.mean()< rtol)[0][0] print(tmp) return tmp # + k = "0" Te = np.array(data[k]["Te2"]) ne = np.array(data[k]["ne"]) Ji = np.array(data[k]["Ji"]) ni = np.array(data[k]["ni"]) phi = np.array(data[k]["phi"]) # E = np.gradient(phi,x*0.01) # electrif field in V/m vi = Ji/(ni * q) gamma = gammaofK(k) ub = np.sqrt(gamma*q*Te/mi.value) # + code_folding=[6] klist = ['0','1','2',"3","4", "5"] Nk = len(klist) simu_gamma = np.zeros(Nk) simu_phiUb = np.zeros(Nk) simu_phiqn = np.zeros(Nk) for i,k in enumerate(klist): simu_gamma[i] = gammaofK(k) Te = np.array(data[k]["Te2"]) phi = np.array(data[k]["phi"]) phinorm = phi/Te i1 = indexUb(k) simu_phiUb[i] = phinorm[i1] i2 = indexquasineutral(k, rtol=0.05) simu_phiqn[i] = phinorm[i2] print(simu_gamma) print(simu_phiUb) theo = np.array([ dphi(g) for g in simu_gamma]) print(theo) print(np.mean((simu_phiUb - 4.68)/4.68*100)) print(np.mean((simu_phiUb -theo)/4.68*100)) print((simu_phiqn - 4.68)/4.68*100) print((simu_phiqn - theo)/theo*100) # + f, ax = plt.subplots(1, 1, figsize = (4, 4)) plt.subplots_adjust(left=0.2, bottom=0.12, right=0.84, top=0.97, wspace=0.0, hspace=.0) ax.plot(gvect, dphivect, '-b', linewidth = 2, label="Theory") ax.errorbar(simu_gamma, simu_phiUb, yerr=0.1*simu_phiUb, xerr=0.0*simu_gamma, ls="", marker="o",c="r", label="PIC results") if False: ax.errorbar(simu_gamma, simu_phiqn, yerr=0.1*simu_phiqn, xerr=0.05*simu_gamma, ls="", marker="o",c="g", label="PIC quasiNeutrality") ax.set_xlabel("$\gamma$", fontsize = fsize) ax.set_ylabel("$e\phi_s / (k_B T_{e,0})$", fontsize = fsize) ax.set_ylim(0,5) ax.set_xlim(1,2.3) ax.grid() ax.legend() plt.tight_layout() plt.savefig("../figures/phinorm_theoryAndPIC.pdf") # - # # Fitting from scipy import optimize # %matplotlib notebook # + gvect = np.linspace(1.000001,2,120) dphivect = np.array([ dphi(g) for g in gvect]) f, ax = plt.subplots(1, 1, figsize = (6.4, 4)) plt.subplots_adjust(left=0.2, bottom=0.12, right=0.84, top=0.97, wspace=0.0, hspace=.0) ax.plot(gvect, dphivect, '-b', linewidth = 2, label="Theory") # + def test_func(x, a, b,c): return a + b/x**c x_data = np.array(gvect) y_data = np.array(dphivect) params, params_covariance = optimize.curve_fit(test_func, x_data, y_data, p0=[2, 2,1]) ax.plot(x_data, test_func(x_data, *params), label='Fitted function') params = [0.7, 4.1, 1.7] ybar = y_data.mean() sstot = ((y_data - ybar)**2).sum() ssreg = ((test_func(x_data, *params) - ybar)**2).sum() print(ssreg / sstot) # - # # Electron mean energy from astropy import units as u # + with open("ICP_data.dat", "r") as f: dataICP = json.load(f) klist = ['0'] with open("NewPic1D.dat", "r") as f: data = json.load(f) klist = ['0','1','2',"3","4","ICP"] data["ICP"] = dataICP["0"] # + code_folding=[] Nk = len(klist) simu_v = np.zeros(Nk) simu_e = np.zeros(Nk) simu_gamma = np.zeros(Nk) for i,k in enumerate(klist): simu_gamma[i] = gammaofK(k) Te = np.array(data[k]["Te2"]) phi = np.array(data[k]["phi"]) phinorm = phi/Te i1 = indexUb(k) Te0 = Te[i1] simu_v[i] = ((data[k]["vwall"]*u.m/u.s)**2/(Te0*u.eV) *m_e).decompose()/2 simu_e[i] = (data[k]["energwall"]*u.eV/(Te0*u.eV)).decompose() print( data[k]["energwall"], Te0) # + f, ax = plt.subplots(1, 1, figsize = (4, 4)) plt.subplots_adjust(left=0.2, bottom=0.12, right=0.84, top=0.97, wspace=0.0, hspace=.0) ax.plot(gvect,2*( 1 - (gvect-1)/gvect*dphivect), '-b', linewidth = 2, label="Eq. 39") #ax.plot(simu_gamma,simu_v, 'ko', linewidth = 2, label="PIC simulations") ax.errorbar(simu_gamma[:-1],simu_v[:-1],yerr=0.15*simu_v[:-1],c="crimson", fmt="o", label="PIC simulations ${\\bf M1}$") #ax.errorbar(simu_gamma[:-1],simu_e[:-1]/3,yerr=0.15*simu_v[:-1],c="b", fmt="o", label="PIC simulations ${\\bf M1}$") #ax.errorbar(simu_gamma[-1:],simu_v[-1:],yerr=0.15*simu_v[-1:],c="b", fmt="o", label="PIC simuations ${\\bf M2}$") #ax.errorbar(simu_gamma,simu_e,yerr=0.15*simu_v,c="b", fmt="o", label="PIC simuations (E)") ax.set_xlabel("$\gamma$", fontsize = fsize) ax.set_ylabel("$Q_e/(\Gamma_e {\\rm T}_{e0})$", fontsize = fsize) ax.set_ylim(0,2.5) ax.set_xlim(1,2) ax.grid() ax.legend() plt.grid() plt.tight_layout() plt.savefig("../figures/meanelectronenergy_PIC.pdf") # + f, ax = plt.subplots(1, 1, figsize = (3.5, 3.5)) plt.subplots_adjust(left=0.2, bottom=0.12, right=0.84, top=0.97, wspace=0.0, hspace=.0) ax.plot(gvect,2*( 1 - (gvect-1)/gvect*dphivect), '-b', linewidth = 2, label="Eq. 38") ax.set_xlabel("polytropic index $\gamma$", fontsize = fsize) ax.set_ylabel("$Q_e/(\Gamma_e T_{e,0})$", fontsize = fsize) ax.set_ylim(0,2.5) ax.set_xlim(1,2) ax.grid() ax.legend() plt.grid() plt.tight_layout() plt.savefig("../figures/meanelectronenergy.pdf") # -
src/Chapitre3/figure/dphinorm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # + #install.packages("readr") #install.packages("plyr") #install.packages("dplyr") #install.packages("ggplot2") #install.packages("corrplot") # - # Load packages library(readr) library(plyr) library(dplyr) library(ggplot2) library(corrplot) # Set working directory setwd("/Users/asamra/dev/R_Project") # Import CSV nyc_crime <- read_csv("/Users/asamra/dev/R_Project/NYPD_Arrests_Data__Historic_.csv") # View first 10 records head(nyc_crime) # View statistical summary summary(nyc_crime) # Change ARREST_DATE in nyc_crime2 from character to date nyc_crime2$ARREST_DATE <- as.Date(nyc_crime2$ARREST_DATE, format = "%m/%d/%Y") # Order ARREST_DATE by ascending nyc_crime2 <- nyc_crime2[order(nyc_crime2$ARREST_DATE),] # Order ARREST_DATE by descending nyc_crime2 <- nyc_crime2[rev(order(nyc_crime2$ARREST_DATE)),] # Create new columns for Year Month Day nyc_crime2$ARREST_YEAR <- format(as.Date(nyc_crime2$ARREST_DATE, format = "%m/%d/%Y"),"%Y") nyc_crime2$ARREST_MONTH <- format(as.Date(nyc_crime2$ARREST_DATE, format = "%m/%d/%Y"),"%m") nyc_crime2$ARREST_DAY <- format(as.Date(nyc_crime2$ARREST_DATE, format = "%m/%d/%Y"),"%d") # Count all rows count(nyc_crime2) # Count rows based on condition # Shows count of all arrests in 2006 length(which(nyc_crime2$ARREST_YEAR == "2006")) # Count / Group rows by year and count using plyr count(nyc_crime2, ARREST_YEAR) count(nyc_crime2, ARREST_MONTH) count(nyc_crime2, ARREST_DAY) # Filter using dplyr and create new df df_2006_dplyr <- filter(nyc_crime2, (ARREST_YEAR == 2006)) # Filter all rows by multiple conditions using dplyr df_2006_drugs <- filter(nyc_crime2, (ARREST_YEAR == 2006 & KY_CD == 235)) nyc_crime2 %>% group_by(ARREST_YEAR) %>% summarize(total_arrests = n()) %>% ggplot( aes ( x = ARREST_YEAR, y = total_arrests, group = 1 ) ) + geom_line() # Plot total drug arrests for all years nyc_crime2 %>% filter(KY_CD == 235) %>% group_by(ARREST_YEAR) %>% summarize(drug_arrests = n()) %>% ggplot( aes ( x = ARREST_YEAR, y = drug_arrests, group = 1 ) ) + geom_line() # Plot total arrests with line for each year nyc_crime2 %>% group_by(ARREST_YEAR, ARREST_MONTH) %>% summarize(total_arrests = n()) %>% ggplot( aes ( x = ARREST_MONTH, y = total_arrests, group = ARREST_YEAR, color = ARREST_YEAR) ) + geom_line() # Plot total drug arrests by year bar nyc_crime2 %>% filter(KY_CD == 235) %>% group_by(ARREST_YEAR) %>% summarize(drug_arrests = n()) %>% ggplot( aes ( x = ARREST_YEAR, y = drug_arrests, group = ARREST_YEAR) ) + geom_bar(stat = 'identity', fill = 'steelblue') # Top 10 crimes by name crimes_by_OFNS_DESC <- nyc_crime2 %>% #filter(ARREST_YEAR == 2018) %>% group_by(OFNS_DESC) %>% summarize(total_arrests = n()) # order top 10 crimes desc top10_name <- top_n(crimes_by_OFNS_DESC, 10, total_arrests) %>% arrange(desc(total_arrests)) # Plot total arrrests of top crimes top10_name %>% group_by(OFNS_DESC) %>% #summarize(total_arrests = n()) %>% ggplot( aes ( x = OFNS_DESC, y = total_arrests, group = 1) ) + geom_bar(stat = 'identity', fill = 'steelblue') + theme(axis.text.x = element_text(angle = 45, hjust = 1)) # Plot by filtering by age groups nyc_crime2 %>% filter(AGE_GROUP %in% c("25-44","65+","45-64","18-24","<18")) %>% group_by(AGE_GROUP) %>% summarize(total_arrests = n()) %>% ggplot( aes ( x = AGE_GROUP, y = total_arrests) ) + geom_bar(stat = 'identity', fill = 'steelblue') # Pie plot arrests by gender nyc_crime2 %>% group_by(PERP_SEX) %>% summarize(total_arrests = n()) %>% ggplot(aes(x = "", y = total_arrests, fill=PERP_SEX)) + geom_bar(stat = 'identity', width=1) + coord_polar('y', start=0) + theme_void()
R/nyc_crime.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.12 ('matlab') # language: python # name: python3 # --- from sklearn.preprocessing import StandardScaler from keras.callbacks import EarlyStopping from keras.utils import np_utils from keras.models import Sequential, load_model from tensorflow.keras.layers import BatchNormalization from keras.layers.core import Activation, Dense, Dropout from keras.layers.advanced_activations import PReLU import numpy as np import pandas as pd import pandas_profiling as pdp import matplotlib.pyplot as plt import seaborn as sns import models from models.evaluation import cross_validation_predict, cross_validation_score from sklearn.metrics import accuracy_score from utils import load_datasets, load_target, save_submission import json from keras.utils import np_utils sns.set_style('darkgrid') # %matplotlib inline config = json.load(open('./config/default.json')) # X_train, X_test = load_datasets(["Age", "AgeSplit", "EducationNum"]) X_train, X_test = load_datasets(config['features']) y_train = load_target('target') # + scaler = StandardScaler() scaler.fit(X_train) patience = 1 layers = 2 dropout = 0.1 units = 3 nb_epoch = 3 batch_size = 128 model = Sequential() model.add(Dense(units, input_shape=(X_train.shape[1], ))) model.add(PReLU()) model.add(BatchNormalization()) model.add(Dropout(dropout)) for l in range(layers - 1): model.add(Dense(units)) model.add(PReLU()) model.add(BatchNormalization()) model.add(Dropout(dropout)) model.add(Dense(10)) model.add(Activation('softmax')) early_stopping = EarlyStopping( monitor='loss', patience=patience, verbose=0, restore_best_weights=True) model.compile(loss='categorical_crossentropy', optimizer='adagrad', metrics=['accuracy']) # - history = model.fit(scaler.transform(X_train), np_utils.to_categorical(y_train, num_classes=10), epochs=nb_epoch, batch_size=batch_size, verbose=0, callbacks=[early_stopping]) y_train.shape model.predict(scaler.transform(X_train)) np_utils.to_categorical(y_train, num_classes=10)
tabular-playground-series-feb-2022/test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Analyze A/B Test Results # # ## Table of Contents # - [Introduction](#intro) # - [Part I - Probability](#probability) # - [Part II - A/B Test](#ab_test) # - [Part III - Regression](#regression) # # # <a id='intro'></a> # ### Introduction # # A/B tests are very commonly performed by data analysts and data scientists. It is important to get some practice working with the difficulties of these. # # For this project, I will be working to understand the results of an A/B test run by an e-commerce website. The goal is to work through this notebook to help the company understand if they should implement the new page, keep the old page, or perhaps run the experiment longer to make their decision. # # <a id='probability'></a> # #### Part I - Probability # # To get started, let's import the libraries. # + # to download the data import requests import zipfile from io import BytesIO import os # for the analysis import pandas as pd import numpy as np import random import matplotlib.pyplot as plt # %matplotlib inline random.seed(42) # - # > Download the data # + url = "https://d17h27t6h515a5.cloudfront.net/topher/2017/December/5a32c9a0_analyzeabtestresults-2/analyzeabtestresults-2.zip" # download data req = requests.get(url) file = zipfile.ZipFile(BytesIO(req.content)) # extract file file.extract("AnalyzeABTestResults 2/ab_data.csv") file.extract("AnalyzeABTestResults 2/countries.csv") # clean up os.rename("AnalyzeABTestResults 2/ab_data.csv", "ab_data.csv") os.rename("AnalyzeABTestResults 2/countries.csv", "countries.csv") os.rmdir("AnalyzeABTestResults 2") # - # > Read the dataset and look at some generic information df = pd.read_csv("ab_data.csv") df.head() print(f"The dataset contains {df.shape[0]} rows") print(f"There are {df.user_id.unique().shape[0]} unique users in the dataset") # check how many times on average there is a duplicated user df[df.user_id.duplicated()].user_id.value_counts().mean() # + # count the occurrences of converted = 1 and converted = 0 not_conv, conv = df.drop_duplicates("user_id").converted.value_counts() print(f"There are {conv} unique users converted which corresponds to {conv/(conv+not_conv)*100}%") # + lineup_err = df.query("(group == 'treatment' and landing_page != 'new_page') or" "(group == 'control' and landing_page != 'old_page')").shape[0] print(f"There are {lineup_err} rows where the new_page and treatment don't line up.") # - df.info() # > **All rows contain values as we can see from the Non-Null Count.** # > For the rows where **treatment** is not aligned with **new_page** or **control** is not aligned with **old_page**, we cannot be sure if this row truly received the new or old page. For this reason we will drop those rows. df2 = df.query("(group == 'treatment' and landing_page == 'new_page') or" "(group == 'control' and landing_page == 'old_page')") # Double Check all of the correct rows were removed - this should be 0 df2[((df2['group'] == 'treatment') == (df2['landing_page'] == 'new_page')) == False].shape[0] # > How many unique **user_id**s are in the newly created dataset (df2)? print(f"There are {df2.user_id.unique().shape[0]} unique users out of {df2.shape[0]} rows") # > Let's find out which is the **user_id** repeated in **df2**. df2.user_id[df2.user_id.duplicated()] df2.query("user_id == 773192").head() # > At thi point we can remove the duplicate. df2 = df2.drop(2893) df2.query("user_id == 773192").head() # > What is the probability of an individual converting regardless of the page they receive? # + not_conv, conv = df2.converted.value_counts() print(f"The probability of an individual converting regardless of the page they receive" f" is {conv/(conv+not_conv):.4f}") # - # > Given that an individual was in the `control` group, what is the probability they converted? # + not_conv, conv = df2.query("group == 'control'").converted.value_counts() print(f"The probability of an individual converting in the control group " f"is {conv/(conv+not_conv):.4f}") # - # > Given that an individual was in the `treatment` group, what is the probability they converted? # + not_conv, conv = df2.query("group == 'treatment'").converted.value_counts() print(f"The probability of an individual converting in the treatment group " f"is {conv/(conv+not_conv):.4f}") # - # > What is the probability that an individual received the new page? # + new, not_new = df2.landing_page.value_counts() print(f"The probability of an individual receiving the new page " f" is {new/(new+not_new):.4f}") # - # > **We can see that the probability of an individual receiving the new page is ~50%, almost perfectly balanced, so this means that there is no difference in conversion based on giving more opportunities. With that in mind, if we focus our attention on the difference in conversion probability between control and treatment we see a conversion decrease of ~0.16% in the group treatment. This is not yet an evidence since we need to check if this delta is statistically significant. In addition to that, we should also take in consideration the time in which the test run and see if there are influences associated with time.** # <a id='ab_test'></a> # ### Part II - A/B Test # # Notice that because of the time stamp associated with each event, we could technically run a hypothesis test continuously as each observation was observed. # # However, then the hard question is do we stop as soon as one page is considered significantly better than another or does it need to happen consistently for a certain amount of time? How long do you run to render a decision that neither page is better than another? # # These questions are the difficult parts associated with A/B tests in general. # # For now, consider we need to make the decision just based on all the data provided. If we want to assume that the old page is better unless the new page proves to be definitely better at a Type I error rate of 5%, the null and alternative hypotheses would then be # $$H_0: P{new} - P{old} <= 0$$ # $$H_1: P{new} - P{old} > 0$$ # Let's assume under the null hypothesis, $p_{new}$ and $p_{old}$ both have "true" success rates equal to the **converted** success rate regardless of page - that is $p_{new}$ and $p_{old}$ are equal. Furthermore, let's assume they are equal to the **converted** rate in **ab_data.csv** regardless of the page. <br><br> # # We will now go through a step by step process to perform A/B testing. # > What is the **convert rate** for $p_{new}$ under the null? p_new = df2['converted'].mean() p_new # > What is the **convert rate** for $p_{old}$ under the null? <br><br> p_old = df2['converted'].mean() p_old # > What is $n_{new}$? n_new = len(df2.query("landing_page == 'new_page'")) n_new # > What is $n_{old}$? n_old = len(df2.query("landing_page == 'old_page'")) n_old # > We can now simulate $n_{new}$ transactions with a convert rate of $p_{new}$ under the null. new_page_converted = np.random.binomial(n_new,p_new) # > We will now simulate $n_{old}$ transactions with a convert rate of $p_{old}$ under the null. old_page_converted = np.random.binomial(n_old,p_old) # > We now find $p_{new}$ - $p_{old}$ for your simulated values from part (e) and (f). diff = new_page_converted/n_new - old_page_converted/n_old diff # > Now we simulate 10,000 $p_{new}$ - $p_{old}$ values using this same process above and Store all 10,000 values in a numpy array called **p_diffs**. # + iterations = 10000 p_diffs = np.zeros(iterations) for i in range(10000): new_page_converted = np.random.binomial(n_new,p_new) old_page_converted = np.random.binomial(n_old, p_old) p_diffs[i] = new_page_converted/n_new - old_page_converted/n_old # - plt.hist(p_diffs); # > What proportion of the **p_diffs** are greater than the actual difference observed in **ab_data.csv**? # + # observation difference in ab_data.csv obs_diff = df2.query("group == 'treatment'").converted.mean() - df2.query("group == 'control'").converted.mean() print("observation difference: ", obs_diff) # Compute p-value print("P-value: ", (p_diffs > obs_diff).mean()) # - # > **We have just calculated the P-value, which is a number describing how likely it is that the data would have occurred under the null hypothesis.** # # > **The closest the P-value is to zero, the strongest is the evidence against the null hypothesis. In this case we fail to reject the null hypothesis since the P-value is above 0.05.** # > We could also use a built-in to achieve similar results. Though using the built-in might be easier to code, the above portions are a walkthrough of the ideas that are critical to correctly thinking about statistical significance. # + import statsmodels.api as sm convert_old = len(df2.query(" landing_page == 'old_page' and converted == 1")) convert_new = len(df2.query(" landing_page == 'new_page' and converted == 1")) n_old = len(df2.query("landing_page == 'old_page'")) n_new = len(df2.query("landing_page == 'new_page'")) # - z_score, p_value = sm.stats.proportions_ztest([convert_old, convert_new], [n_old, n_new], alternative='smaller') print(z_score, p_value) # > **We can see that we are gatting the same results if we use the statsmodels API. In this case we got in addition the Z-score, which is a numerical measurement that describes a value's relationship to the mean of a group of values and it is measured in terms of numbers of standard deviations from the mean.** # <a id='regression'></a> # ### Part III - A regression approach # # In this final part, we will see that the result we acheived in the previous A/B test can also be acheived by performing regression.<br><br> # # Since each row is either a conversion or no conversion, we will be performing a simple logistic regression. df2["intercept"] = 1 df2["ab_page"] = pd.get_dummies(df2["group"]).drop("control", axis=1) df2.head() model = sm.Logit(df2.converted,df2[["intercept" ,"ab_page"]]) results = model.fit() results.summary() # > **P-value = 0.19** # # > **In this case we can see that the P-value is different because it considers the null and alternative hypotheses as follow**: # # $$H_0: P{new} - P{old} = 0$$ # $$H_1: P{new} - P{old} \not = 0$$ # # > **Since we are testing for the hypothesis to be not equal in this case, also the p-value changes.** # > **Other factors more specifically to the individual's profile might have a stronger influence. For example the age or gender of user and the nationality.** # # > **Adding additional terms may bring some disadvantages, for example multicollinearity. If we add high correlations predictor variables, it leads to unreliable and unstable estimates of regression coefficients, which affect our model.** # > Now along with testing if the conversion rate changes for different pages, we will also add an effect based on which country a user lives. countries_df = pd.read_csv('./countries.csv') df_new = countries_df.set_index('user_id').join(df2.set_index('user_id'), how='inner') df_new.head() df_new.country.unique() df_new[["UK","US"]] = pd.get_dummies(df_new["country"])[["UK","US"]] df_new.head() # > Though we have now looked at the individual factors of country and page on conversion, we would now like to look at an interaction between page and country to see if there significant effects on conversion. model = sm.Logit(df_new.converted, df_new[["intercept", "UK", "US"]]) results = model.fit() results.summary() np.exp(results.params) # ## Conclusion # By analyzing the coefficients in this logistic regression model, we can see a minimal change between the 3 countries. In addition we can see the p-values of UK and US being above 5%. # # Based on the evidence found in the data and the same outcome from multiple techniques, we accept the null hypothesis, meaning we have to keep the old page. # # **Notice:** The conclusion is derived from the provided data. The conclusion might change if more data from different source would be provided.
Analyse_AB_Test_Results/Analyze_ab_test_results_notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] tags=["papermill-error-cell-tag"] # <span style="color:red; font-family:Helvetica Neue, Helvetica, Arial, sans-serif; font-size:2em;">An Exception was encountered at '<a href="#papermill-error-cell">In [2]</a>'.</span> # + [markdown] papermill={"duration": 0.031109, "end_time": "2022-01-03T06:06:19.546321", "exception": false, "start_time": "2022-01-03T06:06:19.515212", "status": "completed"} tags=[] # # PA005: High Value Customer Identification ( Insiders ) # + [markdown] papermill={"duration": 0.03036, "end_time": "2022-01-03T06:06:19.605897", "exception": false, "start_time": "2022-01-03T06:06:19.575537", "status": "completed"} tags=[] # # 0.0 Imports # + papermill={"duration": 4.266636, "end_time": "2022-01-03T06:06:23.902305", "exception": false, "start_time": "2022-01-03T06:06:19.635669", "status": "completed"} tags=[] import re import sqlite3 import pandas as pd import numpy as np import seaborn as sns import umap.umap_ as umap from sqlalchemy import create_engine from datetime import datetime from matplotlib import pyplot as plt from sklearn import cluster as c from sklearn import metrics as met from sklearn import decomposition as dd from sklearn import ensemble as en from sklearn import mixture as mx from plotly import express as px from sklearn import preprocessing as pp import warnings warnings.filterwarnings("ignore") # + [markdown] papermill={"duration": 0.029689, "end_time": "2022-01-03T06:06:23.963682", "exception": false, "start_time": "2022-01-03T06:06:23.933993", "status": "completed"} tags=[] # ## 0.2 Load Dataset # # + [markdown] tags=["papermill-error-cell-tag"] # <span id="papermill-error-cell" style="color:red; font-family:Helvetica Neue, Helvetica, Arial, sans-serif; font-size:2em;">Execution using papermill encountered an exception here and stopped:</span> # + papermill={"duration": 0.133398, "end_time": "2022-01-03T06:06:24.126489", "exception": true, "start_time": "2022-01-03T06:06:23.993091", "status": "failed"} tags=[] # load data path = '/Users/felip/repositorio/pa005_insiders_clustering/insiders_clustering/' df_raw = pd.read_csv(path + 'data/raw/Ecommerce.csv', encoding='cp1252') # # drop extra column df_raw = df_raw.drop(columns=['Unnamed: 8'] , axis=1) # + [markdown] heading_collapsed=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # # <font color ='red'> 1.0 Descriรงรฃo dos dados </font> # + hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] df1 = df_raw.copy() # + [markdown] heading_collapsed=true hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # ## 1.1 Rename Columns # + hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] cols_new = ['invoice_no', 'stock_code', 'description', 'quantity', 'invoice_date','unit_price', 'customer_id', 'country'] df1.columns = cols_new # + [markdown] heading_collapsed=true hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # ## 1.2 Data dimensios # + hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] print('Number of rows: {}'.format(df1.shape[0])) print('Number of columns: {}'.format(df1.shape[1])) # + [markdown] heading_collapsed=true hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # ## 1.3 Data types # + hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] df1.dtypes # + [markdown] heading_collapsed=true hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # ## 1.4 Check NA # + hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] df1.isna().sum() # + [markdown] heading_collapsed=true hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # ## 1.5 Replace NA # # + hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] df_missing = df1.loc[df1['customer_id'].isna(), :] df_not_missing = df1.loc[~df1['customer_id'].isna(), :] # + hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # create referance df_backup = pd.DataFrame(df_missing['invoice_no'].drop_duplicates()) df_backup['customer_id'] = np.arange(19000 , 19000+len(df_backup), 1) df_backup.head() # merge original with reference dataframe df1 = pd.merge(df1, df_backup , on='invoice_no' , how='left') #coalesce df1['customer_id'] = df1['customer_id_x'].combine_first(df1['customer_id_y']) # drop extra columns df1 = df1.drop(columns=['customer_id_x' , 'customer_id_y' ] , axis = 1) df1.head() # + [markdown] heading_collapsed=true hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # ## 1.6 Changes dtypes # # + hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # invoice date df1['invoice_date'] = pd.to_datetime(df1['invoice_date'] , format='%d-%b-%y') # customer_id df1['customer_id'] = df1['customer_id'].astype('int64') # + [markdown] heading_collapsed=true hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # ## 1.7 Descriptive Statistics # + hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] num_attributes = df1.select_dtypes(include=['int64' , 'float64']) cat_attributes = df1.select_dtypes(exclude=['int64' , 'float64','datetime64[ns]']) # + [markdown] heading_collapsed=true hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # ### 1.7.1 Numerical Attributs # + hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # central tendency - mean, median mean = pd.DataFrame(num_attributes.apply(np.mean)).T median = pd.DataFrame(num_attributes.apply(np.median)).T # dispersion - std , min , max , range , skew , kurtosis std = pd.DataFrame(num_attributes.apply(np.std)).T mi = pd.DataFrame(num_attributes.apply(np.min)).T ma = pd.DataFrame(num_attributes.apply(np.max)).T ran = pd.DataFrame(num_attributes.apply(lambda x: x.max() - x.min())).T skew = pd.DataFrame(num_attributes.apply(lambda x: x.skew())).T kurtosis = pd.DataFrame(num_attributes.apply(lambda x: x.kurtosis())).T # concatenate m = pd.concat([mi , ma ,ran, mean , median , std, skew , kurtosis]).T.reset_index() m.columns = ['Attributes' , 'min' , 'max', 'range', 'mean', 'median', 'std', 'skew', 'kurtoses'] m # + [markdown] heading_collapsed=true hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # ### 1.7.2 Categorical Attributs # + [markdown] heading_collapsed=true hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # #### Invoice No # + hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # identificaรงรฃo: df_letter_invoices = df1.loc[df1['invoice_no'].apply(lambda x: bool(re.search('[^0-9]+', x))) , :] df_letter_invoices.head() print('Total number of invoices: {}'.format(len(df_letter_invoices))) print('Total number of negative quantity: {}'.format(len(df_letter_invoices['quantity'] < 0))) # + [markdown] heading_collapsed=true hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # #### Stock Code # + hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # check stock code only characters df1.loc[cat_attributes['stock_code'].apply(lambda x: bool(re.search('^[a-zA-Z]+$', x))),'stock_code'].unique() # Aรงรฃo: ## 1. Remove stock_code in ['POST', 'D', 'DOT', 'M', 'S', 'AMAZONFEE', 'm', 'DCGSSBOY','DCGSSGIRL', 'PADS', 'B', 'CRUK'] # + [markdown] heading_collapsed=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # # <font color ='red'> 2.0 Filtragem de variaveis </font> # + hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] df2 = df1.copy() # + hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # ==== Numerical Attributes ==== df2 = df2.loc[df2['unit_price'] > 0.04 , :] # ==== Categorical Attributes ==== df2 = df2[~df2['stock_code'].isin(['POST', 'D', 'DOT', 'M', 'S', 'AMAZONFEE', 'm', 'DCGSSBOY','DCGSSGIRL', 'PADS', 'B', 'CRUK'])] # description df2 = df2.drop('description', axis =1) # map df2 = df2[~df2['country'].isin (['European Community' , 'Unspecified'])] # bad users df2 = df2[~df2['customer_id'].isin( [16446]) ] ## quantity df2_returns = df2.loc[df2['quantity'] < 0, :] df2_purchase = df2.loc[df2['quantity'] >= 0, :] # + [markdown] heading_collapsed=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # # <font color ='red'> 3.0 Feature Engeneering </font> # + hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] df3 = df2.copy() # + [markdown] heading_collapsed=true hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # ## 3.1 Feature Creation # + hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # data reference df_ref = df3.drop(['invoice_no','stock_code','quantity', 'invoice_date','unit_price','country'], axis = 1 ).drop_duplicates(ignore_index = True) # + [markdown] heading_collapsed=true hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # ### 3.1.1. Gross Revenue # + hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # Gross Revenue ( Faturamento = quantity * price) df2_purchase.loc[: , 'gross_revenue'] = df2_purchase.loc[: , 'quantity'] * df2_purchase.loc[: , 'unit_price'] df_monetary = df2_purchase.loc[: , ['customer_id' , 'gross_revenue']].groupby('customer_id').sum().reset_index() df_ref = pd.merge(df_ref, df_monetary,on='customer_id',how='left') df_ref.isna().sum() # + [markdown] heading_collapsed=true hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # ### 3.1.2. Recency - Day from last purchase # + hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # Recency - Last day purchase df_recency = df2_purchase.loc[: ,[ 'customer_id', 'invoice_date']].groupby('customer_id').max().reset_index() df_recency['recency_days'] = (df2['invoice_date'].max() - df_recency['invoice_date']).dt.days df_recency = df_recency[['customer_id','recency_days']].copy() df_ref = pd.merge(df_ref , df_recency , on='customer_id', how='left') df_ref.isna().sum() # + [markdown] heading_collapsed=true hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # ### 3.1.3. Quantity of purchased # + hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # Numero de produtos df_freq = (df2_purchase.loc[: ,['customer_id','invoice_no']].drop_duplicates() .groupby('customer_id') .count() .reset_index() .rename(columns={'invoice_no' : 'qtde_invoices'})) df_ref = pd.merge(df_ref , df_freq , on='customer_id', how='left') df_ref.isna().sum() # + [markdown] heading_collapsed=true hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # ### 3.1.4. Quantity total of items purchased # # + hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] df_freq = df2_purchase.loc[: ,['customer_id','quantity']].groupby('customer_id').sum().reset_index().rename(columns = {'quantity' : 'qtde_items'}) df_ref = pd.merge(df_ref , df_freq , on='customer_id', how='left') df_ref.isna().sum() # + [markdown] heading_collapsed=true hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # ### 3.1.5. Quantity of products purchased # # + hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] df_freq = df2_purchase.loc[: ,['customer_id','stock_code']].groupby('customer_id').count().reset_index().rename(columns = {'stock_code' : 'qtde_products'}) df_ref = pd.merge(df_ref , df_freq , on='customer_id', how='left') df_ref.isna().sum() # + [markdown] heading_collapsed=true hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # ### 3.1.6. Avg Ticket Value # + hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # Avg ticket df_avg_ticket = df2_purchase.loc[: ,['customer_id' , 'gross_revenue']].groupby('customer_id').mean().reset_index().rename(columns={'gross_revenue':'avg_ticket'}) #df_avg_ticket['avg_ticket'] = np.round(df_avg_ticket['avg_ticket'] , 2) df_ref = pd.merge(df_ref, df_avg_ticket,on='customer_id',how='left') df_ref.isna().sum() # + [markdown] heading_collapsed=true hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # ### 3.1.8. Frequency Purchase # + hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] df3_purchase = df2_purchase.copy() # + hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] df_aux = df3_purchase[['invoice_no','customer_id', 'invoice_date']].drop_duplicates().groupby('customer_id').agg(max_ = ('invoice_date', 'max'), min_ = ('invoice_date', 'min'), days_ = ('invoice_date', lambda x: (( x.max() - x.min() ).days)+ 1), buy_ = ('invoice_date', 'count')).reset_index() # Frequency df_aux['frequency'] = df_aux[['buy_', 'days_']].apply( lambda x: x['buy_'] / x['days_'] if x['days_'] != 0 else 0, axis = 1) # Merge df_ref = pd.merge(df_ref , df_aux[['customer_id', 'frequency']], on='customer_id', how = 'left') df_ref.isna().sum() # + [markdown] heading_collapsed=true hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # ### 3.1.9. Numbers of Returns # + hidden=true papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] df_returns = df2_returns[['customer_id' , 'quantity']].groupby('customer_id').sum().reset_index().rename( columns={'quantity' : 'qtde_returns'}) df_returns['qtde_returns'] = df_returns['qtde_returns'] * -1 df_ref = pd.merge(df_ref , df_returns, on ='customer_id' , how='left') df_ref.loc[df_ref['qtde_returns'].isna() , 'qtde_returns'] = 0 df_ref.isna().sum() # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # # <font color ='red'> 4.0 EDA (Exploratory Data Analysis) </font> # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] df4 = df_ref.dropna().copy() # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # ## 4.3 Estudo do espaรงo # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # Selected Dataset cols_selected = ['customer_id', 'gross_revenue','recency_days', 'qtde_products', 'frequency', 'qtde_returns' ] df43 = df4[cols_selected].drop(columns = 'customer_id') # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] mm = pp.MinMaxScaler() df43['gross_revenue'] = mm.fit_transform(df43[['gross_revenue']]) df43['recency_days'] = mm.fit_transform(df43[['recency_days']]) df43['qtde_products'] = mm.fit_transform(df43[['qtde_products']]) df43['frequency'] = mm.fit_transform(df43[['frequency']]) df43['qtde_returns'] = mm.fit_transform(df43[['qtde_returns']]) X = df43.copy() # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # training dataset X = df43.drop(columns=[ 'gross_revenue'], axis = 1) Y = df43['gross_revenue'] # model definition rf_model = en.RandomForestRegressor(n_estimators = 300 , random_state = 42 ) # model training rf_model.fit(X,Y) # dataframe Leaf df_leaf = pd.DataFrame(rf_model.apply(X)) # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # tree num_comp=[2,3,4,5] components_list1=[] components_list2=[] components_list3=[] components_list4=[] df_tree_1 = pd.DataFrame() df_tree_2 = pd.DataFrame() df_tree_3 = pd.DataFrame() df_tree_4 = pd.DataFrame() for i in num_comp: # reduzer dimensionality reducer = umap.UMAP(random_state = 42, n_components = i) embedding = reducer.fit_transform(df_leaf) if i == 2: components_list1.append(embedding) # embedding df_tree_1['embedding_x'] = embedding[:,0] df_tree_1['embedding_y'] = embedding[:,1] if i == 3: components_list2.append(embedding) # embedding df_tree_2['embedding_x'] = embedding[:,0] df_tree_2['embedding_y'] = embedding[:,1] df_tree_2['embedding_z'] = embedding[:,2] if i == 4: components_list3.append(embedding) df_tree_3['embedding_x'] = embedding[:,0] df_tree_3['embedding_y'] = embedding[:,1] df_tree_3['embedding_z'] = embedding[:,2] df_tree_3['embedding_w'] = embedding[:,3] if i == 5: components_list4.append(embedding) df_tree_4['embedding_x'] = embedding[:,0] df_tree_4['embedding_y'] = embedding[:,1] df_tree_4['embedding_z'] = embedding[:,2] df_tree_4['embedding_w'] = embedding[:,3] df_tree_4['embedding_l'] = embedding[:,4] # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # # <font color ='red'> 5.0 Data Preparation </font> # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # UMAP EMbedding df5 = df_tree_2.copy() df5.to_csv('../src/data/tree_based_embedding.csv' , index = False) # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # # <font color ='red'> 5.0 Hyperparameter Fine-Tunning </font> # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] x = df5.copy() # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] clusters = np.arange(2,25,1) # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] kmeans_list = [] for k in clusters: # model definition kmeans_model = c.KMeans(n_clusters = k , random_state=42) # model training kmeans_model.fit(x) # model predict labels = kmeans_model.predict(x) # model performance sil = met.silhouette_score(x , labels , metric='euclidean') kmeans_list.append(sil) # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # # <font color ='red'> 6.0 Model Training </font> # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] df8 = x.copy() # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # ## 6.1 GMM # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # model definition k = 11 kmeans = c.KMeans(init='random', n_clusters=k, n_init=10, max_iter=300, random_state=42) # model training kmeans.fit(df8) # clustering labels = kmeans.labels_ # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # ## 6.2 Cluster Validation # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] print('SS value: {}'.format(met.silhouette_score(df8, labels, metric='euclidean'))) # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # # <font color ='red'> 7.0 Cluster Analysis </font> # + hide_input=false papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] df92 = df4[cols_selected].copy() df92['cluster'] = labels df92['last_training_timestamp'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S') # change dtypes df92['recency_days'] = df92['recency_days'].astype('int64') df92['qtde_products'] = df92['qtde_products'].astype('int64') df92['qtde_returns'] = df92['qtde_returns'].astype('int64') df92['last_training_timestamp'] =pd.to_datetime(df92['last_training_timestamp']) # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # ## 7.2 Cluster Profile # # + hide_input=false papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # Number of customer df_cluster = df92[['customer_id','cluster']].groupby('cluster').count().reset_index() df_cluster['perc_customers'] = 100*(df_cluster['customer_id'] / df_cluster['customer_id'].sum()) # Avg Gross revenue df_avg_gross_revenue = df92[['gross_revenue','cluster']].groupby('cluster').mean().reset_index() df_cluster = pd.merge(df_cluster , df_avg_gross_revenue , how='inner' , on='cluster') # Avg recency days df_avg_recency_days = df92[['recency_days','cluster']].groupby('cluster').mean().reset_index() df_cluster = pd.merge(df_cluster , df_avg_recency_days , how='inner' , on='cluster') # Avg invoice_no df_qtde_products = df92[['qtde_products','cluster']].groupby('cluster').mean().reset_index() df_cluster = pd.merge(df_cluster , df_qtde_products , how='inner' , on='cluster') # Frequency df_frequency = df92[['frequency','cluster']].groupby('cluster').mean().reset_index() df_cluster = pd.merge(df_cluster , df_frequency , how='inner' , on='cluster') # Returns df_qtde_returns = df92[['qtde_returns','cluster']].groupby('cluster').mean().reset_index() df_cluster = pd.merge(df_cluster , df_qtde_returns , how='inner' , on='cluster') df_cluster # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # 1 cluster Insiders # 4 cluster more products # 7 cluster spend money # 9 cluster even more products # 2 cluster less days # 10 cluster less 1k # 8 cluster Stop Returners # 9 cluster more buy # 5 cluster even more buy # 3 cluster weak # 0 cluster latecomer # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # ### Cluster 01: ( Candidato a Insiders) # # - Nรบmero de customers: 665 (12% do customers) # - Faturamento mรฉdio: 7503 # - Recรชncia mรฉdia: 52 dias # - Mรฉdia de produtos comprados: 371 produtos # - Frequรชncia de produtos comprados: 0.23 produtos/dia # - Receita em mรฉdia: $ 7503.14,00 dรณlares # # # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # # <font color ='red'> 8.0 Deploy to production </font> # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] df92.dtypes # + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # ## 8.1. Insert into SQLITE # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # database connection conn = create_engine('sqlite:///insiders_db.sqlite') # drop table # query_drop_insiders=""" # DROP TABLE Insiders # """ # create Table # query_create_table_insiders = """ # CREATE TABLE Insiders ( # customer_id INTEGER, # gross_revenue REAL, # recency_days INTEGER, # qtde_products INTEGER, # frequency REAL, # qtde_returns INTEGER, # cluster INTEGER, # last_training_timestamp TIMESTAMP # ) # """ # conn.execute(query_create_table_insiders) # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # insert data into df92.to_sql('Insiders', con=conn, if_exists='append' , index=False) # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # get query # query_collect=""" # SELECT * from Insiders # """ # df = pd.read_sql_query(query_collect , conn) # df.head() # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[] # + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
reports/c9.0-fsp-deploy-cloud-2022-01-03T03:06:18.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import cv2 import matplotlib.pyplot as plt # %matplotlib inline # - bg = cv2.imread("josh-berquist-0gNZxc5ds58.jpg") bg = cv2.cvtColor(bg, cv2.COLOR_BGR2RGB) plt.imshow(bg) # + watermark = cv2.imread("pngwing lamborghini.png", -1) wh, ww, wc = watermark.shape # plt.imshow(watermark[watermark==255]) print(watermark.shape) # - (h, w, c) = bg.shape image = np.dstack([bg, np.full((h, w), 255)]) print(image.shape) plt.imshow(image) # + x1, y1 = 20, 20 x2, y2 = x1 + ww, y1 + wh overlay = np.zeros_like(image) overlay[y1:y2, x1:x2] = watermark # - plt.imshow(overlay) # + combined = image.copy() combined = cv2.addWeighted(overlay, 0.9, combined, 1.0, 0.0) plt.imsave("watermark_img.png", combined.astype("uint8")) plt.imshow(combined) # -
Notebooks/Watermark on Image or Video/Watermark on Image or Video.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="YJYDHq1TAOdQ" # # Exercises # # We have prepared five exercises in this chapter: # # 1. Modify the HCM code to work for three groups. This exercise can be divded into four tasks: # - modify the parameters, # - modify the calculate_u function, # - execute the clustering, # - plot the results. # 2. For density clustering, plot the feature space with all element marked with different color, depending on the cluster that it's assigned to. You should do the following tasks: # - fill the get_color method, # - fill the plot code. # 3. Build a method that plot baed on dendrograms_history and pydot, a dendrogram for the divisive clustering method. You should base on agglomerative method, but keep in mind that it works top-down instead of bottom-up. This exercise need just one function to be implemented: # - show_tree_divisive. # You should loop over the dendrogram_history variable and loop over childs. # 4. Implement the $s_{2}$ metric # 5. Draw the borders between clusters in the output image # + [markdown] colab_type="text" id="mcbtsSKgAOdR" # ## Libraries # # To solve the exercises, we need the following libraries to load in the first place. # # + colab={} colab_type="code" id="tfe5klKkAOdS" import numpy import random import numpy as np import pandas as pd from math import sqrt import matplotlib.image as img import PIL.Image from matplotlib import pyplot as plt from mpl_toolkits.mplot3d import Axes3D from IPython.display import Image import pydot # + [markdown] colab_type="text" id="pPKB4s_CAOdV" # ## Load data # + [markdown] colab_type="text" id="d1eek3-3AOdd" # ## Exercise 1: Modify the HCM code to work for three groups # # The obvious part is the variable ```groups```, but the most changes needs to be done here: # + colab={} colab_type="code" id="-x1jZ9fHAOde" get_ipython().run_line_magic('store', '-r data_set') # change here: groups = 3 error_margin = 0.01 m = 2 assignation = np.zeros((len(data_set), groups)) # centers = np.array([[0.01229673, 0.25183492], # [0.3689626, 0.61904127], # [0.95732769, 0.45059586]]) centers = np.array([[0.01229673, 0.25183492], [0.3689626, 0.61904127], [0.95732769, 0.45059586]]) def calculate_distance(x, v): return sqrt((x[0]-v[0])**2+(x[1]-v[1])**2) def calculate_new_centers(u): new_centers = [] for c in range(groups): u_x_vector = np.zeros(2) u_scalar = 0.0 for i in range(len(data_set)): u_scalar = u_scalar+(u[i][c]**m) u_x_vector = np.add( u_x_vector, np.multiply(u[i][c]**m, data_set[i])) new_centers.append(np.divide(u_x_vector, u_scalar)) return new_centers def calculate_differences(new_assignation, assignation): return np.sum(np.abs(np.subtract(assignation, new_assignation))) def cluster_hcm(assignation, centers): difference_limit_not_achieved = True new_centers = centers iter = 0 while difference_limit_not_achieved: new_assignation = [] for i in range(len(data_set)): new_assignation.append(calculate_u_three(data_set[i], new_centers)) new_centers = calculate_new_centers(new_assignation) if iter > 0: if calculate_differences(new_assignation, assignation) < error_margin: difference_limit_not_achieved = False assignation = new_assignation iter = iter+1 return new_assignation, new_centers # + [markdown] colab_type="text" id="WAcl4FL3AOdg" # ### Modify the ``calculate_u`` function # # Fill the gap below to make the function working for more groups than two. The goal here is to calculate the distance between ``x`` and the center of a given group and append the value to ``minimal_distance``. # + colab={} colab_type="code" id="fRh48eywAOdi" def calculate_u_three(x, centers): u_array = np.zeros(groups) minimal_distance = [] for group in range(groups): distance = calculate_distance(x, centers[group]) minimal_distance.append(distance) min_group_id = np.argmin(minimal_distance) u_array[min_group_id] = 1 return u_array # + [markdown] colab_type="text" id="7Yg2nzpSAOdl" # ### Execute the clustering # # As in the previous example we need to cluster it. # + colab={"base_uri": "https://localhost:8080/", "height": 142} colab_type="code" id="cQNTgw6dAOdm" outputId="a5a83b29-ebd1-4c6c-c4e5-56e712b7fa8d" new_assignation_hcm3, new_centers_hcm3 = cluster_hcm(assignation, centers) pd.DataFrame(new_centers_hcm3) # + [markdown] colab_type="text" id="CHoxZO7AAOdv" # ### Plot the results # + colab={"base_uri": "https://localhost:8080/", "height": 376} colab_type="code" id="jLU8Y62HAOdw" outputId="b8bd3a71-e38b-41f9-c6a8-afc118094336" red = data_set[np.where(np.array(new_assignation_hcm3)[:, 0] == 1)] blue = data_set[np.where(np.array(new_assignation_hcm3)[:, 1] == 1)] green = data_set[np.where(np.array(new_assignation_hcm3)[:, 2] == 1)] fig, ax = plt.subplots() ax.scatter(blue[:, 0], blue[:, 1], c='blue') ax.scatter(red[:, 0], red[:, 1], c='red') ax.scatter(green[:, 0], green[:, 1], c='green') ax.scatter(np.array(new_centers_hcm3)[:, 0], np.array( new_centers_hcm3)[:, 1], c='black') ax.set(xlabel='Seats count', ylabel='Distance range (km)', title='Aircrafts (clusters)') ax.grid() plt.show() # + [markdown] colab_type="text" id="BT5-lyyeAOdz" # ## Exercise 2: Plot the density clusters # # Use the code below to plot the results. You can play with the max_distance variable to get more or less groups. # + colab={} colab_type="code" id="7AJ5soJZAOd0" get_ipython().run_line_magic('store', '-r new_assignation_density') get_ipython().run_line_magic('store', '-r data_set') # print(data_set) # print(new_assignation_density) # + [markdown] colab_type="text" id="6SCyGmV6AOd3" # ### Fill the ``get_group_objects`` method # # Only one line needs to be updated. The ``get_group_objects`` function should return the objects of a given group. # + colab={} colab_type="code" id="AXXN5F9zAOd3" colors = ['red', 'blue', 'green', 'orange', 'black', 'yellow'] def get_group_objects(color_id): objects = [] for idx, density in enumerate(new_assignation_density): if density == color_id: objects.append(data_set[idx]) return np.array(objects) # + [markdown] colab_type="text" id="LntzwsMUAOd6" # ### Fill the plot code # # If done properly the code below should return a plot of two clusters and the noise. # + colab={"base_uri": "https://localhost:8080/", "height": 376} colab_type="code" id="lNJnPaKcAOd7" outputId="886763c4-35d3-4718-9026-01ebcc984ba2" colors = ['red', 'blue', 'green', 'orange', 'black', 'yellow'] fig, ax = plt.subplots() assigned_groups = new_assignation_density # print(assigned_groups) for group in np.unique(assigned_groups): small_set = get_group_objects(group) ax.scatter(small_set[:, 0], small_set[:, 1], c=colors.pop(0)) for circle in small_set: circle1 = plt.Circle((circle[0], circle[1]), 0.25, color='green', fill=False) ax.add_artist(circle1) ax.set(xlabel='Seats count', ylabel='Distance range (km)', title='Aircrafts (clusters)') ax.grid() plt.show() # + [markdown] colab_type="text" id="tlWlDgtbAOeA" # ## Exercise 3: Build a dendrogram using dendrograms_history and pydot # # In this exercise we gonna use the variable dendrograms_history and pydot. Below we restore the variable and initialize the dendrogram graph. # + colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" id="B3kfa7eRAOeB" outputId="0e4e8fba-b479-4a47-ee15-e6f5ae98c000" get_ipython().run_line_magic('store', '-r dendrogram_hist_diana') print(dendrogram_hist_diana) tree = pydot.Dot(graph_type='graph') # + [markdown] colab_type="text" id="Vwva8m22AOeE" # ### Fill ``show_tree_divisive`` function # # The function show_tree_divisive goes through each child node and build and edge between. # + colab={} colab_type="code" id="Ka5XkmU5AOeM" def show_tree_divisive(): for item in dendrogram_hist_diana: for child in item[0]["childs"]: # print(child) new_edge = pydot.Edge(str(item[0]["acesor"]), str(child)) tree.add_edge(new_edge) tree.write('tree.png', format='png') show_tree_divisive() # + [markdown] colab_type="text" id="So4SrYcuAOeS" # Take a look if you did it properly: # + colab={"base_uri": "https://localhost:8080/", "height": 556} colab_type="code" id="ssypTjWNAOeT" outputId="dc8fcb7a-c360-46a4-bacb-5c4c35c8299c" Image(filename='tree.png') # + [markdown] colab_type="text" id="TNcb332YAOeW" # ## Exercise 4: Implement the $s_{2}$ metric # # The $s_{2}$ metric gives a better understanding of the distances between centers. # # \begin{equation} # s_{2}(c_{i},c_{j})=d(c_{i},c_{j}). # \end{equation} # # Let's restore the centers from HCM grouped by two and initialize the values for three groups as below. # + colab={"base_uri": "https://localhost:8080/", "height": 102} colab_type="code" id="IdhOdBJPAOeX" outputId="49e28eda-b0d8-45da-9675-30e993f58367" get_ipython().run_line_magic('store', '-r new_centers_hcm') new_centers_hcm = np.array(new_centers_hcm) new_centers_hcm3 = np.array( [[0.42239686, 0.38503185], [0.07858546, 0.17832272], [0.82907662, 0.97059448]]) print(new_centers_hcm) print(new_centers_hcm3) # + [markdown] colab_type="text" id="zuq6HCqcAOeZ" # Measure the distance between each center. # + colab={} colab_type="code" id="NBF4s5PKAOea" def calculate_s_2(centers): s2 = [] for center_1 in range(len(centers)): for center_2 in range(len(centers)): if center_1 == center_2: break s2.append(calculate_distance(centers[center_1], centers[center_2])) # fill the code here (3 lines) return s2 # + [markdown] colab_type="text" id="ZdZNRopZELuj" # Test: # + colab={} colab_type="code" id="KyprMKY8EJY4" print(calculate_s_2(new_centers_hcm)) print(calculate_s_2(new_centers_hcm3)) # + [markdown] colab_type="text" id="2Vx94NBmAOef" # # ## Exercise 5: Modify the output image with borders between clusters # # We use the ``Segmentation`` class as in previous example. # + colab={} colab_type="code" id="n5h3JbSuAOeg" class Segmentation: def __init__(self, feature_matrix, groups): self.__data_set = feature_matrix self.__groups = groups self.__space = [[0, 255], [0, 255], [0, 255]] self.__error_margin = 0.5 self.assignation = np.zeros((len(self.__data_set), self.__groups)) self.centers = [] self.select_centers() def select_centers(self): if len(self.centers) == 0: iter = 0 while iter < self.__groups: self.centers.append(((random.randrange(0, 255)*1.0/255), (random.randrange(0, 255)*1.0/255), (random.randrange(0, 255)*1.0/255))) iter = iter+1 def calculate_distance(self, x, v): return sqrt((x[0]-v[0])**2+(x[1]-v[1])**2+(x[2]-v[2])**2) def calculate_u(self, x, i): smallest_distance = float(self.calculate_distance(x, self.centers[0])) smallest_id = 0 for i in range(1, self.__groups): distance = self.calculate_distance(x, self.centers[i]) if distance < smallest_distance: smallest_id = i smallest_distance = distance distance = np.zeros(self.__groups) distance[smallest_id] = 1 return distance def calculate_new_centers(self, u): new_centers = [] for c in range(self.__groups): u_x_vector = np.zeros(len(self.centers[0])) u_scalar = 0 for i in range(len(u)): u_scalar = u_scalar + u[i][c] u_x_vector = np.add(u_x_vector, np.multiply( u[i][c], self.__data_set[i])) new_centers.append(np.divide(u_x_vector, u_scalar)) self.centers = new_centers def calculate_differences(self, new_assignation): diff = 0 for i in range(len(self.assignation)): for j in range(self.__groups): diff = diff + \ abs(float(new_assignation[i][j]) - float(self.assignation[i][j])) return diff def do_segmentation(self): difference_limit_not_achieved = True iter = 0 while difference_limit_not_achieved: new_assignation = [] for i in range(len(self.__data_set)): new_assignation.append( self.calculate_u(self.__data_set[i], iter)) self.calculate_new_centers(new_assignation) if iter > 0: if self.calculate_differences(new_assignation) < self.__error_margin: difference_limit_not_achieved = False self.assignation = new_assignation iter = iter + 1 def get_results(self): return self.centers, self.assignation # + [markdown] colab_type="text" id="rRfOnlOJAOei" # ### Change ``save_image`` method # # Add an if statement in the code below. It should consider the change of ``current_pixel`` variable. Please keep in mind that there should be three states considered. # + colab={} colab_type="code" id="5xELUO8HAOej" class ImageConversion: def get_image_from_url(self, img_url): image = open(img_url, 'rb') return img.imread(image) def get_unique_colours(self, image_matrix): feature_matrix = [] for i in range(len(image_matrix)): for j in range(len(image_matrix[0])): feature_matrix.append(image_matrix[i, j]) feature_matrix_np = np.array(feature_matrix) uniques, index = np.unique( [str(i) for i in feature_matrix_np], return_index=True) return feature_matrix_np[index], feature_matrix def save_image(self, size, pixel_matrix, unique_matrix, assignation_matrix, colours, output): image_out = PIL.Image.new("RGB", size) pixels = [] current_pixel = None # added for i in range(len(pixel_matrix)): pixel_list = pixel_matrix[i].tolist() for j in range(len(unique_matrix)): if (pixel_list == unique_matrix[j].tolist()): for k in range(len(colours)): if assignation_matrix[j][k] == 1: segmented_colours = [int(i) for i in (colours[k] * 255)] if current_pixel is not None and current_pixel != k: segmented_colours = [0,0,0] current_pixel = k pixels.append(tuple(segmented_colours)) # fill here (about 6-7 lines) image_out.putdata(pixels) image_out.save(output) def do_segmentation(self): difference_limit_not_achieved = True iter = 0 while difference_limit_not_achieved: new_assignation = [] for i in range(len(self.__data_set)): new_assignation.append( self.calculate_u(self.__data_set[i], iter)) self.calculate_new_centers(new_assignation) if iter > 0: if self.calculate_differences(new_assignation) < self.__error_margin: difference_limit_not_achieved = False self.assignation = new_assignation iter = iter + 1 def get_results(self): return self.centers, self.assignation # + [markdown] colab_type="text" id="W_pjFsAAAOel" # Execute segmentation without any changes: # + colab={"base_uri": "https://localhost:8080/", "height": 350} colab_type="code" id="CPUM5TIWAOem" outputId="85083caa-403f-4acd-e7bd-49fbdb2c3655" image_to_segment = "images/logo_krakow.png" image_converter = ImageConversion() image_data = image_converter.get_image_from_url(image_to_segment) unique_image_data, image_data_list = image_converter.get_unique_colours( image_data) groups = 3 segmentation = Segmentation(unique_image_data, groups) segmentation.do_segmentation() centers, assignation_matrix = segmentation.get_results() image_size = (232, 258) # image_converter.save_image(image_size, image_data_list, unique_image_data, assignation_matrix, centers, "images/segmented_hw.png") # - image_converter.save_image(image_size, image_data_list, unique_image_data, assignation_matrix, centers, "images/segmented_hw.png") # + [markdown] colab_type="text" id="wt7QnTXSAOe2" # The image should have black broders between one and the other segment. # + colab={} colab_type="code" id="XwOsb9OXAOe2" outputId="d33cec1a-f1fe-4c06-fc20-e29346fc09f4" Image("images/segmented_hw.png") # -
Lab4/hw/hw-047Clustering_Exercises.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Plant Seedlings Competition # https://www.kaggle.com/c/plant-seedlings-classification/ # %reload_ext autoreload # %autoreload 2 # %matplotlib inline from fastai.imports import * from fastai.transforms import * from fastai.conv_learner import * from fastai.model import * from fastai.dataset import * from fastai.sgdr import * from fastai.plots import * PATH = "./data/plant-seedlings/" sz = 300 # + [markdown] heading_collapsed=true # ## Exploration # + hidden=true # !ls {PATH} # + hidden=true # !ls {PATH}valid # + hidden=true # files = !ls {PATH}valid/Black-grass | head files # + hidden=true img = plt.imread(f'{PATH}valid/Black-grass/{files[0]}') plt.imshow(img); # + [markdown] hidden=true # Here is how the raw data looks like # + hidden=true img.shape # + hidden=true img[:4,:4] # - # ## Modelling # Uncomment the below if you need to reset your precomputed activations # !rm -rf {PATH}tmp arch = resnet50 data = ImageClassifierData.from_paths(PATH, tfms=tfms_from_model(arch, sz), test_name='test') learn = ConvLearner.pretrained(arch, data, precompute=True) learn.fit(0.1, 3) # + [markdown] heading_collapsed=true # ## Analyzing results: looking at pictures # + [markdown] hidden=true # As well as looking at the overall metrics, it's also a good idea to look at examples of each of: # 1. A few correct labels at random # 2. A few incorrect labels at random # 3. The most correct labels of each class (ie those with highest probability that are correct) # 4. The most incorrect labels of each class (ie those with highest probability that are incorrect) # 5. The most uncertain labels (ie those with probability closest to 0.5). # + hidden=true # This is the label for a val data data.val_y # + hidden=true data.classes # + hidden=true # this gives prediction for validation set. Predictions are in log scale log_preds = learn.predict() log_preds.shape # + hidden=true log_preds[:10] # + hidden=true preds = np.argmax(log_preds, axis=1) # from log probabilities to 0 or 1 probs = np.exp(log_preds[:,1]) # pr(dog) # + hidden=true def rand_by_mask(mask): return np.random.choice(np.where(mask)[0], 4, replace=False) def rand_by_correct(is_correct): return rand_by_mask((preds == data.val_y)==is_correct) # + hidden=true def plot_val_with_title(idxs, title): imgs = np.stack([data.val_ds[x][0] for x in idxs]) title_probs = [probs[x] for x in idxs] print(title) return plots(data.val_ds.denorm(imgs), rows=1, titles=title_probs) # + hidden=true def plots(ims, figsize=(12,6), rows=1, titles=None): f = plt.figure(figsize=figsize) for i in range(len(ims)): sp = f.add_subplot(rows, len(ims)//rows, i+1) sp.axis('Off') if titles is not None: sp.set_title(titles[i], fontsize=16) plt.imshow(ims[i]) # + hidden=true def load_img_id(ds, idx): return np.array(PIL.Image.open(PATH+ds.fnames[idx])) def plot_val_with_title(idxs, title): imgs = [load_img_id(data.val_ds,x) for x in idxs] title_probs = [probs[x] for x in idxs] print(title) return plots(imgs, rows=1, titles=title_probs, figsize=(16,8)) # + hidden=true # 1. A few correct labels at random plot_val_with_title(rand_by_correct(True), "Correctly classified") # + hidden=true # 2. A few incorrect labels at random plot_val_with_title(rand_by_correct(False), "Incorrectly classified") # + hidden=true def most_by_mask(mask, mult): idxs = np.where(mask)[0] return idxs[np.argsort(mult * probs[idxs])[:4]] def most_by_correct(y, is_correct): mult = -1 if (y==1)==is_correct else 1 return most_by_mask((preds == data.val_y)==is_correct & (data.val_y == y), mult) # + hidden=true plot_val_with_title(most_by_correct(0, True), "Most correct Black-grass") # + hidden=true plot_val_with_title(most_by_correct(1, True), "Most correct Charlock") # + hidden=true plot_val_with_title(most_by_correct(0, False), "Most incorrect Black-grass") # + hidden=true plot_val_with_title(most_by_correct(1, False), "Most incorrect Charlock") # + hidden=true most_uncertain = np.argsort(np.abs(probs -0.5))[:4] plot_val_with_title(most_uncertain, "Most uncertain predictions") # + [markdown] heading_collapsed=true # ## Choosing a learning rate # + [markdown] hidden=true # The *learning rate* determines how quickly or how slowly you want to update the *weights* (or *parameters*). Learning rate is one of the most difficult parameters to set, because it significantly affect model performance. # # The method `learn.lr_find()` helps you find an optimal learning rate. It uses the technique developed in the 2015 paper [Cyclical Learning Rates for Training Neural Networks](http://arxiv.org/abs/1506.01186), where we simply keep increasing the learning rate from a very small value, until the loss starts decreasing. We can plot the learning rate across batches to see what this looks like. # # We first create a new learner, since we want to know how to set the learning rate for a new (untrained) model. # + hidden=true learn = ConvLearner.pretrained(arch, data, precompute=True) # + hidden=true lrf=learn.lr_find() # + [markdown] hidden=true # Our `learn` object contains an attribute `sched` that contains our learning rate scheduler, and has some convenient plotting functionality including this one: # + hidden=true learn.sched.plot_lr() # + [markdown] hidden=true # Note that in the previous plot *iteration* is one iteration (or *minibatch*) of SGD. In one epoch there are # (num_train_samples/num_iterations) of SGD. # # We can see the plot of loss versus learning rate to see where our loss stops decreasing: # + hidden=true learn.sched.plot() # + [markdown] hidden=true # The loss is still clearly improving at lr=1e-2 (0.01), so that's what we use. Note that the optimal learning rate can change as we training the model, so you may want to re-run this function from time to time. # - # ## Improving our model # ### Data augmentation tfms = tfms_from_model(arch, sz, aug_tfms=transforms_side_on, max_zoom=1.1) def get_augs(): data = ImageClassifierData.from_paths(PATH, bs=2, tfms=tfms, test_name='test', num_workers=1) x,_ = next(iter(data.aug_dl)) return data.trn_ds.denorm(x)[1] ims = np.stack([get_augs() for i in range(6)]) plots(ims, rows=2) data = ImageClassifierData.from_paths(PATH, tfms=tfms, test_name='test') learn = ConvLearner.pretrained(arch, data, precompute=True) learn.fit(1e-1, 1) learn.precompute=False learn.fit(1e-1, 3, cycle_len=1) learn.sched.plot_lr() learn.save('224_lastlayer') learn.load('224_lastlayer') # ### Fine-tuning and Differential Learning Rate Annealing learn.unfreeze() lr = np.array([1e-4,1e-3,1e-1]) learn.fit(lr, 5, cycle_len=1, cycle_mult=2) learn.sched.plot_lr() learn.save('224_all') learn.load('224_all') # ## Evaluation val_log_preds, val_y = learn.TTA() accuracy(val_log_preds, val_y) test_log_preds, test_y = learn.TTA(is_test=True) # + test_labels = [] test_preds = np.argmax(test_log_preds, axis=1) for i in range(test_preds.size): test_labels.append(data.classes[test_preds[i]]) # - outp = pd.DataFrame({'file': [f[5:] for f in data.test_dl.dataset.fnames], 'species': [''.join(l) for l in test_labels]}) outp.head() outp.to_csv(PATH + 'tmp/subm.csv.gz', compression='gzip', index=None) from IPython.display import FileLink FileLink(PATH + 'tmp/subm.csv.gz')
plant-seedlings.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- # ### 1 - Greg was 14, Marcia was 12, Peter was 11, Jan was 10, Bobby was 8, and Cindy was 6 when they started playing the Brady kids on The Brady Bunch. <NAME> was 8 years old when he joined the show. What are the mean, median, and mode of the kids' ages when they first appeared on the show? What are the variance, standard deviation, and standard error? import pandas as pd import numpy as np from scipy import stats df = pd.DataFrame() df['Name'] = ['Greg', 'Marcia', 'Peter', 'Jan', 'Bobby', 'Cindy', 'Oliver'] df['Age'] = [14, 12, 11, 10, 8, 6, 8] df df.describe() df['Age'].mean() np.median(df['Age']) stats.mode(df['Age'], axis = 0)[0][0] print('The mean, median, and mode are:\nmean: {}\nmedian: {}\nmode: {}'.format(df['Age'].mean(), np.median(df['Age']),stats.mode(df['Age'], axis = 0)[0][0])) vari = df['Age'].var(ddof=0) std_dev = np.std(df['Age'], ddof=0) std_err = std_dev / np.sqrt(len(df['Age'])) print('The variance, standard deviation, and standard error are:\nvariance: {}\nstandard deviation: {}\nstandard error: {}'.format(vari, std_dev, std_err)) import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline # ### 2 - Using these estimates, if you had to choose only one estimate of central tendency and one estimate of variance to describe the data, which would you pick and why? sns.distplot(df['Age'], bins = 7) # For central tendency, I'd pick the median because with such a small dataset, any large outliers would influence the mean too much. But for now, the mean and median are similar. For variance, I'd choose standard deviation since we don't need the uncertainty of the central tendency. # ### 3 - Next, Cindy has a birthday. Update your estimates- what changed, and what didn't? df.loc[5, 'Age'] = 7 df print('The mean, median, and mode are:\nmean: {}\nmedian: {}\nmode: {}'.format(df['Age'].mean(), np.median(df['Age']),stats.mode(df['Age'], axis = 0)[0][0])) # Mean increased from 9.85 to 10 # + vari = df['Age'].var(ddof=0) std_dev = np.std(df['Age'], ddof=0) std_err = std_dev / np.sqrt(len(df['Age'])) print('The variance, standard deviation, and standard error are:\nvariance: {}\nstandard deviation: {}\nstandard error: {}'.format(vari, std_dev, std_err)) # - # The variance, standard deviation and standard error all decreased # ### 4 - Nobody likes <NAME>. Maybe the network should have used an even younger actor. Replace <NAME> with 1-year-old Jessica, then recalculate again. Does this change your choice of central tendency or variance estimation methods? df.drop(df[df['Name']=='Oliver'].index[0], inplace = True) df = df.append([{'Name': 'Jessica', 'Age': 1}]) df print('The mean, median, and mode are:\nmean: {}\nmedian: {}\nmode: {}'.format(df['Age'].mean(), np.median(df['Age']),stats.mode(df['Age'], axis = 0)[0][0])) # + vari = df['Age'].var(ddof = 0) std_dev = np.std(df['Age'], ddof=0) std_err = std_dev / np.sqrt(len(df['Age'])) print('The variance, standard deviation, and standard error are:\nvariance: {}\nstandard deviation: {}\nstandard error: {}'.format(vari, std_dev, std_err)) # - # As suspected, an outlier affected the mean but not the median. I would still choose to use the median. Variance, std dev and std error all increased, but none as drastically as variance. I will also still continue to use std dev. # ### 5 - On the 50th anniversary of The Brady Bunch, four different magazines asked their readers whether they were fans of the show. The answers were: TV Guide 20% fans Entertainment Weekly 23% fans Pop Culture Today 17% fans SciPhi Phanatic 5% fans # Based on these numbers, what percentage of adult Americans would you estimate were Brady Bunch fans on the 50th anniversary of the show? df_fans = pd.DataFrame() df_fans['Magazine']= ['TV Guide', 'Entertainment Weekly', 'Pop Culture Today', 'SciPhi Phanatic'] df_fans['Percent'] = [20, 23, 17, 5] df_fans.head() # Based on the information given from all the magazines, it is tempting to take the mean of all the percents. This would give an estimate of 16.5%. However, the magazine SciPhi Phanatic is a very different type of magazine, with a readership geared towards STEM minded people instead of entertainment/pop culture. It would be appropriate to remove the data from SciPhi Phanatic. Thus, the percent of fans would rise to 20%. df_fans['Percent'].mean() df_fans[df_fans['Magazine'] != 'SciPhi Phanatic']['Percent'].mean()
Exercises/Basic_Stats_and_Probability/1 - Summary Statistics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # ## How to use Error Generator ? # # # ### Clone the project # # ``` git clone https://github.com/BigDaMa/error-generator.git ``` # ### Run setup # please open the command line and put # # # ``` python setup.py install ``` # ### Importing project from error_generator import Explicit_Missing_Value from error_generator import Implicit_Missing_Value from error_generator import White_Noise from error_generator import Gaussian_Noise from error_generator import Random_Active_Domain from error_generator import Similar_Based_Active_Domain from error_generator import Typo_Keyboard from error_generator import Typo_Butterfingers from error_generator import Word2vec_Nearest_Neighbor from error_generator import Value_Selector from error_generator import List_selected from error_generator import Read_Write from error_generator import Error_Generator # ### Load the dataset dataset,dataframe = Read_Write.read_csv_dataset("./datasets/test.csv") # ### Choose your strategy # + mymethod=Typo_Keyboard() # mymethod=Typo_Butterfingers() # mymethod=Similar_Based_Active_Domain() # mymethod=Random_Active_Domain() # mymethod=White_Noise() # mymethod=Gaussian_Noise() # mymethod=Implicit_Missing_Value() # mymethod=Explicit_Missing_Value() # mymethod=Word2vec_Nearest_Neighbor() # - # ### Choose your selector myselector=List_selected() # ### Choose your generator mygen=Error_Generator() # ### Run the program # - Percentage : How much do you need error? # - Mute column: Columns that should be safe in error generator proccess new_dataset=mygen.error_generator(method_gen=mymethod,selector=myselector,percentage=50,dataset=dataset,mute_column=[1,2]) # ### save the output Read_Write.write_csv_dataset("./outputs/{}.csv".format(mymethod.name), new_dataset)
Help.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # ใƒใ‚คใƒŠใƒชใƒ‡ใƒผใ‚ฟใฎ่ชญใฟ่พผใฟ import struct import numpy as np import matplotlib.pyplot as plt filename = 'mnist/t10k-images-idx3-ubyte' fp = open(filename, 'rb') magic = struct.unpack('>i', fp.read(4)) print('magic number is "%d"' % (magic)) n_images, height, width = struct.unpack('>iii', fp.read(4 * 3)) print('#image: %d' % (n_images)) print('size: %d x %d' % (width, height)) n_pixels = width * height pixels = struct.unpack('>' + 'B' * n_pixels, fp.read(1 * n_pixels)) pixels = np.asarray(pixels, dtype='uint8') pixels = pixels.reshape((height, width)) # ## ใƒ‡ใƒผใ‚ฟใฎ่กจ็คบ plt.imshow(pixels) plt.show() plt.imshow(pixels, cmap='gray') plt.show() images = [] for i in range(100): pixels = struct.unpack('>' + 'B' * n_pixels, fp.read(1 * n_pixels)) pixels = np.asarray(pixels, dtype='uint8') pixels = pixels.reshape((height, width)) images.append(pixels) # + # Matplotlibใ‚’ไฝฟใฃใŸใ‚ฟใ‚คใƒชใƒณใ‚ฐ fig = plt.figure(figsize=(10, 10)) for i in range(10): for j in range(10): index = i * 10 + j image = images[index] # 10x10ใฎใ‚ฟใ‚คใƒซใฎindex็•ช็›ฎใฎaxisใจใ„ใ†ๆ„ๅ‘ณ ax = fig.add_subplot(10, 10, index + 1) ax.imshow(image, cmap='gray') ax.axis('off') plt.show() # + # ใ‚‚ใ†ๅฐ‘ใ—ๆฐ—ใฎๅˆฉใ„ใŸใ‚ฟใ‚คใƒชใƒณใ‚ฐ # ๅ…จใฆใฎ็”ปๅƒใซใƒžใƒผใ‚ธใƒณใ‚’ใคใ‘ใ‚‹ margin = 1 pad_images = [np.pad(image, (margin, margin), mode='constant', constant_values=255) for image in images] # ็”ปๅƒใฎๅˆ—ใ‚’NumPyใฎ้…ๅˆ—ใซๅค‰ใˆใฆreshape pad_images = np.asarray(pad_images).reshape((10, 10, height + margin * 2, width + margin * 2)) # ็ธฆๆจชใฎๅˆ—ใ‚’็ตๅˆใ™ใ‚‹ image_grid = np.transpose(pad_images, axes=(0, 2, 1, 3)) image_grid = np.reshape(image_grid, (10 * (height + margin * 2), 10 * (width + margin * 2))) # ่กจ็คบ plt.figure(figsize=(10, 10)) plt.imshow(image_grid, cmap='gray') plt.axis('off') plt.tight_layout() plt.show()
_programs/python/logistic/read_binary.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- # Hypothesis Testing # ================== # # Copyright 2016 <NAME> # # License: [Creative Commons Attribution 4.0 International](http://creativecommons.org/licenses/by/4.0/) # + from __future__ import print_function, division import numpy import scipy.stats import matplotlib.pyplot as pyplot from ipywidgets import interact, interactive, fixed import ipywidgets as widgets import first # seed the random number generator so we all get the same results numpy.random.seed(19) # some nicer colors from http://colorbrewer2.org/ COLOR1 = '#7fc97f' COLOR2 = '#beaed4' COLOR3 = '#fdc086' COLOR4 = '#ffff99' COLOR5 = '#386cb0' # %matplotlib inline # - # ## Part One # Suppose you observe an apparent difference between two groups and you want to check whether it might be due to chance. # # As an example, we'll look at differences between first babies and others. The `first` module provides code to read data from the National Survey of Family Growth (NSFG). live, firsts, others = first.MakeFrames() live # We'll look at a couple of variables, including pregnancy length and birth weight. The effect size we'll consider is the difference in the means. # # Other examples might include a correlation between variables or a coefficient in a linear regression. The number that quantifies the size of the effect is called the "test statistic". def TestStatistic(data): group1, group2 = data test_stat = abs(group1.mean() - group2.mean()) return test_stat # For the first example, I extract the pregnancy length for first babies and others. The results are pandas Series objects. group1 = firsts.prglngth group2 = others.prglngth # The actual difference in the means is 0.078 weeks, which is only 13 hours. actual = TestStatistic((group1, group2)) actual # The null hypothesis is that there is no difference between the groups. We can model that by forming a pooled sample that includes first babies and others. n, m = len(group1), len(group2) pool = numpy.hstack((group1, group2)) # Then we can simulate the null hypothesis by shuffling the pool and dividing it into two groups, using the same sizes as the actual sample. def RunModel(): numpy.random.shuffle(pool) data = pool[:n], pool[n:] return data # The result of running the model is two NumPy arrays with the shuffled pregnancy lengths: RunModel() # Then we compute the same test statistic using the simulated data: TestStatistic(RunModel()) # If we run the model 1000 times and compute the test statistic, we can see how much the test statistic varies under the null hypothesis. test_stats = numpy.array([TestStatistic(RunModel()) for i in range(1000)]) test_stats.shape # Here's the sampling distribution of the test statistic under the null hypothesis, with the actual difference in means indicated by a gray line. pyplot.vlines(actual, 0, 300, linewidth=3, color='0.8') pyplot.hist(test_stats, color=COLOR5) pyplot.xlabel('difference in means') pyplot.ylabel('count') None # The p-value is the probability that the test statistic under the null hypothesis exceeds the actual value. pvalue = sum(test_stats >= actual) / len(test_stats) pvalue # In this case the result is about 15%, which means that even if there is no difference between the groups, it is plausible that we could see a sample difference as big as 0.078 weeks. # # We conclude that the apparent effect might be due to chance, so we are not confident that it would appear in the general population, or in another sample from the same population. # # STOP HERE # --------- # Part Two # ======== # # We can take the pieces from the previous section and organize them in a class that represents the structure of a hypothesis test. class HypothesisTest(object): """Represents a hypothesis test.""" def __init__(self, data): """Initializes. data: data in whatever form is relevant """ self.data = data self.MakeModel() self.actual = self.TestStatistic(data) self.test_stats = None def PValue(self, iters=1000): """Computes the distribution of the test statistic and p-value. iters: number of iterations returns: float p-value """ self.test_stats = numpy.array([self.TestStatistic(self.RunModel()) for _ in range(iters)]) count = sum(self.test_stats >= self.actual) return count / iters def MaxTestStat(self): """Returns the largest test statistic seen during simulations. """ return max(self.test_stats) def PlotHist(self, label=None): """Draws a Cdf with vertical lines at the observed test stat. """ ys, xs, patches = pyplot.hist(ht.test_stats, color=COLOR4) pyplot.vlines(self.actual, 0, max(ys), linewidth=3, color='0.8') pyplot.xlabel('test statistic') pyplot.ylabel('count') def TestStatistic(self, data): """Computes the test statistic. data: data in whatever form is relevant """ raise UnimplementedMethodException() def MakeModel(self): """Build a model of the null hypothesis. """ pass def RunModel(self): """Run the model of the null hypothesis. returns: simulated data """ raise UnimplementedMethodException() # `HypothesisTest` is an abstract parent class that encodes the template. Child classes fill in the missing methods. For example, here's the test from the previous section. class DiffMeansPermute(HypothesisTest): """Tests a difference in means by permutation.""" def TestStatistic(self, data): """Computes the test statistic. data: data in whatever form is relevant """ group1, group2 = data test_stat = abs(group1.mean() - group2.mean()) return test_stat def MakeModel(self): """Build a model of the null hypothesis. """ group1, group2 = self.data self.n, self.m = len(group1), len(group2) self.pool = numpy.hstack((group1, group2)) def RunModel(self): """Run the model of the null hypothesis. returns: simulated data """ numpy.random.shuffle(self.pool) data = self.pool[:self.n], self.pool[self.n:] return data # Now we can run the test by instantiating a DiffMeansPermute object: data = (firsts.prglngth, others.prglngth) ht = DiffMeansPermute(data) p_value = ht.PValue(iters=1000) print('\nmeans permute pregnancy length') print('p-value =', p_value) print('actual =', ht.actual) print('ts max =', ht.MaxTestStat()) # And we can plot the sampling distribution of the test statistic under the null hypothesis. ht.PlotHist() # ### Difference in standard deviation # # **Exercize 1**: Write a class named `DiffStdPermute` that extends `DiffMeansPermute` and overrides `TestStatistic` to compute the difference in standard deviations. Is the difference in standard deviations statistically significant? # + # Solution goes here # - # Here's the code to test your solution to the previous exercise. data = (firsts.prglngth, others.prglngth) ht = DiffStdPermute(data) p_value = ht.PValue(iters=1000) print('\nstd permute pregnancy length') print('p-value =', p_value) print('actual =', ht.actual) print('ts max =', ht.MaxTestStat()) # ### Difference in birth weights # # Now let's run DiffMeansPermute again to see if there is a difference in birth weight between first babies and others. data = (firsts.totalwgt_lb.dropna(), others.totalwgt_lb.dropna()) ht = DiffMeansPermute(data) p_value = ht.PValue(iters=1000) print('\nmeans permute birthweight') print('p-value =', p_value) print('actual =', ht.actual) print('ts max =', ht.MaxTestStat()) # In this case, after 1000 attempts, we never see a sample difference as big as the observed difference, so we conclude that the apparent effect is unlikely under the null hypothesis. Under normal circumstances, we can also make the inference that the apparent effect is unlikely to be caused by random sampling. # # One final note: in this case I would report that the p-value is less than 1/1000 or less than 0.001. I would not report p=0, because the apparent effect is not impossible under the null hypothesis; just unlikely.
hypothesis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="J_o2L3Io9t4c" # #Deep Computer Vision # # In this guide we will learn how to peform *image classification and object detection/recognition* using deep computer vision with something called a **convolutional neural network**. # # The goal of our convolutional neural networks will be to classify and detect images or specific objects from within the image. We will be using image data as our features and a label for those images as our label or output. # # We already know how neural networks work so we can skip through the basics and move right into explaining the following concepts. # - Image Data # - Convolutional Layer # - Pooling Layer # - CNN Architectures # # The major differences we are about to see in these types of neural networks are the layers that make them up. # + [markdown] id="tdqlqfhLCHZl" # ##Image Data # So far, we have dealt with pretty straight forward data that has 1 or 2 dimensions. Now we are about to deal with image data that is usually made up of 3 dimensions. These 3 dimensions are as follows: # - image height # - image width # - color channels # # The only item in the list above you may not understand is **color channels**. The number of color channels represents the depth of an image and coorelates to the colors used in it. For example, an image with three channels is likely made up of rgb (red, green, blue) pixels. So, for each pixel we have three numeric values in the range 0-255 that define its color. For an image of color depth 1 we would likely have a greyscale image with one value defining each pixel, again in the range of 0-255. # # ![alt text](http://xrds.acm.org/blog/wp-content/uploads/2016/06/Figure1.png) # # Keep this in mind as we discuss how our network works and the input/output of each layer. # # # + [markdown] id="9mqznmTh--v2" # ##Convolutional Neural Network # **Note:** I will use the term *convnet* and convolutional neural network interchangably. # # Each convolutional neural network is made up of one or many convolutional layers. These layers are different than the *dense* layers we have seen previously. Their goal is to find patterns from within images that can be used to classify the image or parts of it. But this may sound familiar to what our densly connected neural network in the previous section was doing, well that's becasue it is. # # The fundemental difference between a dense layer and a convolutional layer is that dense layers detect patterns globally while convolutional layers detect patterns locally. When we have a densly connected layer each node in that layer sees all the data from the previous layer. This means that this layer is looking at all the information and is only capable of analyzing the data in a global capacity. Our convolutional layer however will not be densly connected, this means it can detect local patterns using part of the input data to that layer. # # *Let's have a look at how a densly connected layer would look at an image vs how a convolutional layer would.* # # This is our image; the goal of our network will be to determine whether this image is a cat or not. # ![alt text](https://img.webmd.com/dtmcms/live/webmd/consumer_assets/site_images/article_thumbnails/reference_guide/cat_weight_ref_guide/1800x1200_cat_weight_ref_guide.jpg) # # **Dense Layer:** A dense layer will consider the ENTIRE image. It will look at all the pixels and use that information to generate some output. # # **Convolutional Layer:** The convolutional layer will look at specific parts of the image. In this example let's say it analyzes the highlighted parts below and detects patterns there. # ![alt text](https://drive.google.com/uc?export=view&id=1M7v7S-b-zisFLI_G4ZY_RdUJQrGpJ3zt) # # Can you see why this might make these networks more useful? # # # # + [markdown] id="CIQvxFu_FB3h" # ###How They Work # A dense neural network learns patterns that are present in one specific area of an image. This means if a pattern that the network knows is present in a different area of the image it will have to learn the pattern again in that new area to be able to detect it. # # *Let's use an example to better illustrate this.* # # We'll consider that we have a dense neural network that has learned what an eye looks like from a sample of dog images. # # ![alt text](https://drive.google.com/uc?export=view&id=16FJKkVS_lZToQOCOOy6ohUpspWgtoQ-c) # # Let's say it's determined that an image is likely to be a dog if an eye is present in the boxed off locations of the image above. # # Now let's flip the image. # ![alt text](https://drive.google.com/uc?export=view&id=1V7Dh7BiaOvMq5Pm_jzpQfJTZcpPNmN0W) # # Since our densly connected network has only recognized patterns globally it will look where it thinks the eyes should be present. Clearly it does not find them there and therefore would likely determine this image is not a dog. Even though the pattern of the eyes is present, it's just in a different location. # # Since convolutional layers learn and detect patterns from different areas of the image, they don't have problems with the example we just illustrated. They know what an eye looks like and by analyzing different parts of the image can find where it is present. # # # + [markdown] id="20J29gz-NroA" # ###Multiple Convolutional Layers # In our models it is quite common to have more than one convolutional layer. Even the basic example we will use in this guide will be made up of 3 convolutional layers. These layers work together by increasing complexity and abstraction at each subsequent layer. The first layer might be responsible for picking up edges and short lines, while the second layer will take as input these lines and start forming shapes or polygons. Finally, the last layer might take these shapes and determine which combiantions make up a specific image. # # # # # + [markdown] id="ii-a9rXzRwNi" # ##Feature Maps # You may see me use the term *feature map* throughout this tutorial. This term simply stands for a 3D tensor with two spacial axes (width and height) and one depth axis. Our convolutional layers take feature maps as their input and return a new feature map that reprsents the prescence of spcific filters from the previous feature map. These are what we call *response maps*. # + [markdown] id="OScABB-ScXHx" # ##Layer Parameters # A convolutional layer is defined by two key parameters. # # ####**Filters** # A filter is a m x n pattern of pixels that we are looking for in an image. The number of filters in a convolutional layer reprsents how many patterns each layer is looking for and what the depth of our response map will be. If we are looking for 32 different patterns/filters than our output feature map (aka the response map) will have a depth of 32. Each one of the 32 layers of depth will be a matrix of some size containing values indicating if the filter was present at that location or not. # # Here's a great illustration from the book "Deep Learning with Python" by <NAME> (pg 124). # ![alt text](https://drive.google.com/uc?export=view&id=1HcLvvLKvLCCGuGZPMvKYz437FbbCC2eB) # # ####**Sample Size** # This isn't really the best term to describe this, but each convolutional layer is going to examine n x m blocks of pixels in each image. Typically, we'll consider 3x3 or 5x5 blocks. In the example above we use a 3x3 "sample size". This size will be the same as the size of our filter. # # Our layers work by sliding these filters of n x m pixels over every possible position in our image and populating a new feature map/response map indicating whether the filter is present at each location. # # # # + [markdown] id="vnzqr8Dzjchd" # ##Borders and Padding # The more mathematical of you may have realized that if we slide a filter of let's say size 3x3 over our image well consider less positions for our filter than pixels in our input. Look at the example below. # # *Image from "Deep Learning with Python" by <NAME> (pg 126).* # ![alt text](https://drive.google.com/uc?export=view&id=1OEfXrV16NBjwAafgBfYYcWOyBCHqaZ5M) # # This means our response map will have a slightly smaller width and height than our original image. This is fine but sometimes we want our response map to have the same dimensions. We can accomplish this by using something called *padding*. # # **Padding** is simply the addition of the appropriate number of rows and/or columns to your input data such that each pixel can be centered by the filter. # + [markdown] id="yDwH2eOMmt_N" # ##Strides # In the previous sections we assumed that the filters would be slid continously through the image such that it covered every possible position. This is common but sometimes we introduce the idea of a **stride** to our convolutional layer. The stride size reprsents how many rows/cols we will move the filter each time. These are not used very frequently so we'll move on. # + [markdown] id="nCsVC-4UnfC8" # ##Pooling # You may recall that our convnets are made up of a stack of convolution and pooling layers. # # The idea behind a pooling layer is to downsample our feature maps and reduce their dimensions. They work in a similar way to convolutional layers where they extract windows from the feature map and return a response map of the max, min or average values of each channel. Pooling is usually done using windows of size 2x2 and a stride of 2. This will reduce the size of the feature map by a factor of two and return a response map that is 2x smaller. # + [markdown] id="9qo85O0LsxbB" # ##A More Detailed Look # Please refer to the video to learn how all of this happens at the lower level! # + [markdown] id="xqLsm2XzNQSE" # ##Creating a Convnet # # Now it is time to create our first convnet! This example is for the purpose of getting familiar with CNN architectures, we will talk about how to improves its performance later. # # *This tutorial is based on the following guide from the TensorFlow documentation: https://www.tensorflow.org/tutorials/images/cnn* # # ###Dataset # The problem we will consider here is classifying 10 different everyday objects. The dataset we will use is built into tensorflow and called the [**CIFAR Image Dataset.**](https://www.cs.toronto.edu/~kriz/cifar.html) It contains 60,000 32x32 color images with 6000 images of each class. # # The labels in this dataset are the following: # - Airplane # - Automobile # - Bird # - Cat # - Deer # - Dog # - Frog # - Horse # - Ship # - Truck # # We'll load the dataset and have a look at some of the images below. # # # # + id="bnIbwiK7Ohv2" # %tensorflow_version 2.x # this line is not required unless you are in a notebook import tensorflow as tf from tensorflow.keras import datasets, layers, models import matplotlib.pyplot as plt # + id="49wbEaM1PCCR" # LOAD AND SPLIT DATASET (train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data() # Normalize pixel values to be between 0 and 1 train_images, test_images = train_images / 255.0, test_images / 255.0 class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] # + id="Bp0yAAcuPHFN" # Let's look at a one image IMG_INDEX = 7 # change this to look at other images plt.imshow(train_images[IMG_INDEX] ,cmap=plt.cm.binary) plt.xlabel(class_names[train_labels[IMG_INDEX][0]]) plt.show() # + [markdown] id="aPqeddhcPwpc" # ##CNN Architecture # A common architecture for a CNN is a stack of Conv2D and MaxPooling2D layers followed by a few denesly connected layers. To idea is that the stack of convolutional and maxPooling layers extract the features from the image. Then these features are flattened and fed to densly connected layers that determine the class of an image based on the presence of features. # # We will start by building the **Convolutional Base**. # + id="ibuJZqAXQrWJ" model = models.Sequential() model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) # + [markdown] id="tybTBoi_Qtxl" # **Layer 1** # # The input shape of our data will be 32, 32, 3 and we will process 32 filters of size 3x3 over our input data. We will also apply the activation function relu to the output of each convolution operation. # # **Layer 2** # # This layer will perform the max pooling operation using 2x2 samples and a stride of 2. # # **Other Layers** # # The next set of layers do very similar things but take as input the feature map from the previous layer. They also increase the frequency of filters from 32 to 64. We can do this as our data shrinks in spacial dimensions as it passed through the layers, meaning we can afford (computationally) to add more depth. # + id="_QahwuduSEDG" model.summary() # let's have a look at our model so far # + [markdown] id="ZXw-sreaSzTW" # After looking at the summary you should notice that the depth of our image increases but the spacial dimensions reduce drastically. # + [markdown] id="zjtADcfmSI9q" # ##Adding Dense Layers # So far, we have just completed the **convolutional base**. Now we need to take these extracted features and add a way to classify them. This is why we add the following layers to our model. # # # + id="A9TMZH_oSULo" model.add(layers.Flatten()) model.add(layers.Dense(64, activation='relu')) model.add(layers.Dense(10)) # + id="fEzHX-7ESeCl" model.summary() # + [markdown] id="dxfqtdDbSf4W" # We can see that the flatten layer changes the shape of our data so that we can feed it to the 64-node dense layer, follwed by the final output layer of 10 neurons (one for each class). # # # + [markdown] id="wdPxFvHdTLRK" # ##Training # Now we will train and compile the model using the recommended hyper paramaters from tensorflow. # # *Note: This will take much longer than previous models!* # + id="5loIug93TW1E" model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) history = model.fit(train_images, train_labels, epochs=4, validation_data=(test_images, test_labels)) # + [markdown] id="JkdRKQnETgLv" # ##Evaluating the Model # We can determine how well the model performed by looking at it's performance on the test data set. # + id="6I2vJFiiTkQE" test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2) print(test_acc) # + [markdown] id="-lKwDlvvUbIm" # You should be getting an accuracy of about 70%. This isn't bad for a simple model like this, but we'll dive into some better approaches for computer vision below. # # # + [markdown] id="cstpZFVaY7YH" # ##Working with Small Datasets # In the situation where you don't have millions of images it is difficult to train a CNN from scratch that performs very well. This is why we will learn about a few techniques we can use to train CNN's on small datasets of just a few thousand images. # + [markdown] id="8D4iWJ17ZRt_" # ###Data Augmentation # To avoid overfitting and create a larger dataset from a smaller one we can use a technique called data augmentation. This is simply performing random transofrmations on our images so that our model can generalize better. These transformations can be things like compressions, rotations, stretches and even color changes. # # Fortunately, keras can help us do this. Look at the code below to an example of data augmentation. # # # + id="_sOet0hQZ-gR" from keras.preprocessing import image from keras.preprocessing.image import ImageDataGenerator # creates a data generator object that transforms images datagen = ImageDataGenerator( rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest') # pick an image to transform test_img = train_images[20] img = image.img_to_array(test_img) # convert image to numpy arry img = img.reshape((1,) + img.shape) # reshape image i = 0 for batch in datagen.flow(img, save_prefix='test', save_format='jpeg'): # this loops runs forever until we break, saving images to current directory with specified prefix plt.figure(i) plot = plt.imshow(image.img_to_array(batch[0])) i += 1 if i > 4: # show 4 images break plt.show() # + [markdown] id="nc9RyHPYUnSK" # ###Pretrained Models # You would have noticed that the model above takes a few minutes to train in the NoteBook and only gives an accuaracy of ~70%. This is okay but surely there is a way to improve on this. # # In this section we will talk about using a pretrained CNN as apart of our own custom network to improve the accuracy of our model. We know that CNN's alone (with no dense layers) don't do anything other than map the presence of features from our input. This means we can use a pretrained CNN, one trained on millions of images, as the start of our model. This will allow us to have a very good convolutional base before adding our own dense layered classifier at the end. In fact, by using this techique we can train a very good classifier for a realtively small dataset (< 10,000 images). This is because the convnet already has a very good idea of what features to look for in an image and can find them very effectively. So, if we can determine the presence of features all the rest of the model needs to do is determine which combination of features makes a specific image. # # # + [markdown] id="u10oZO1oXT6Y" # ###Fine Tuning # When we employ the technique defined above, we will often want to tweak the final layers in our convolutional base to work better for our specific problem. This involves not touching or retraining the earlier layers in our convolutional base but only adjusting the final few. We do this because the first layers in our base are very good at extracting low level features lile lines and edges, things that are similar for any kind of image. Where the later layers are better at picking up very specific features like shapes or even eyes. If we adjust the final layers than we can look for only features relevant to our very specific problem. # # # + [markdown] id="XolyariNdj5p" # ##Using a Pretrained Model # In this section we will combine the tecniques we learned above and use a pretrained model and fine tuning to classify images of dogs and cats using a small dataset. # # *This tutorial is based on the following guide from the TensorFlow documentation: https://www.tensorflow.org/tutorials/images/transfer_learning* # # # # + id="2nRe9qWmgxm7" #Imports import os import numpy as np import matplotlib.pyplot as plt import tensorflow as tf keras = tf.keras # + [markdown] id="lUx4I_4jg2Tc" # ###Dataset # We will load the *cats_vs_dogs* dataset from the modoule tensorflow_datatsets. # # This dataset contains (image, label) pairs where images have different dimensions and 3 color channels. # # # + id="PuGu50NlgreO" import tensorflow_datasets as tfds tfds.disable_progress_bar() # split the data manually into 80% training, 10% testing, 10% validation (raw_train, raw_validation, raw_test), metadata = tfds.load( 'cats_vs_dogs', split=['train[:80%]', 'train[80%:90%]', 'train[90%:]'], with_info=True, as_supervised=True, ) # + id="Mk_MpiQyh-as" get_label_name = metadata.features['label'].int2str # creates a function object that we can use to get labels # display 2 images from the dataset for image, label in raw_train.take(5): plt.figure() plt.imshow(image) plt.title(get_label_name(label)) # + [markdown] id="XCdodmcYiPOF" # ###Data Preprocessing # Since the sizes of our images are all different, we need to convert them all to the same size. We can create a function that will do that for us below. # # # + id="tcoKn1VUieqx" IMG_SIZE = 160 # All images will be resized to 160x160 def format_example(image, label): """ returns an image that is reshaped to IMG_SIZE """ image = tf.cast(image, tf.float32) image = (image/127.5) - 1 image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE)) return image, label # + [markdown] id="wwIB21lailXh" # Now we can apply this function to all our images using ```.map()```. # + id="0E8iqYOAipdU" train = raw_train.map(format_example) validation = raw_validation.map(format_example) test = raw_test.map(format_example) # + [markdown] id="QORLTVNaiqym" # Let's have a look at our images now. # + id="dU5JIa2Jiv9U" for image, label in train.take(2): plt.figure() plt.imshow(image) plt.title(get_label_name(label)) # + [markdown] id="iFnFVaNQi7Vq" # Finally we will shuffle and batch the images. # + id="v5ZIhkFPi_Pb" BATCH_SIZE = 32 SHUFFLE_BUFFER_SIZE = 1000 train_batches = train.shuffle(SHUFFLE_BUFFER_SIZE).batch(BATCH_SIZE) validation_batches = validation.batch(BATCH_SIZE) test_batches = test.batch(BATCH_SIZE) # + [markdown] id="6QxI-fOAjDzC" # Now if we look at the shape of an original image vs the new image we will see it has been changed. # + id="zyqrCYNOjY9v" for img, label in raw_train.take(2): print("Original shape:", img.shape) for img, label in train.take(2): print("New shape:", img.shape) # + [markdown] id="NMpKJ3Xbj4BW" # ###Picking a Pretrained Model # The model we are going to use as the convolutional base for our model is the **MobileNet V2** developed at Google. This model is trained on 1.4 million images and has 1000 different classes. # # We want to use this model but only its convolutional base. So, when we load in the model, we'll specify that we don't want to load the top (classification) layer. We'll tell the model what input shape to expect and to use the predetermined weights from *imagenet* (Googles dataset). # # # + id="2a09os6dkokI" IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3) # Create the base model from the pre-trained model MobileNet V2 base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE, include_top=False, weights='imagenet') # + id="uRvMuWoFR2CO" base_model.summary() # + [markdown] id="ckYqfl7Vky3S" # At this point this *base_model* will simply output a shape (32, 5, 5, 1280) tensor that is a feature extraction from our original (1, 160, 160, 3) image. The 32 means that we have 32 layers of differnt filters/features. # + id="yojo6ONzlFGF" for image, _ in train_batches.take(1): pass feature_batch = base_model(image) print(feature_batch.shape) # + [markdown] id="oQ2kn1P_lhsg" # ###Freezing the Base # The term **freezing** refers to disabling the training property of a layer. It simply means we wonโ€™t make any changes to the weights of any layers that are frozen during training. This is important as we don't want to change the convolutional base that already has learned weights. # # # + id="6hXctqtYl8o5" base_model.trainable = False # + id="1jIGFXOrl9wc" base_model.summary() # + [markdown] id="b7UJLbJ7mJzw" # ###Adding our Classifier # Now that we have our base layer setup, we can add the classifier. Instead of flattening the feature map of the base layer we will use a global average pooling layer that will average the entire 5x5 area of each 2D feature map and return to us a single 1280 element vector per filter. # # # + id="3uUwG5wrnFD6" global_average_layer = tf.keras.layers.GlobalAveragePooling2D() # + [markdown] id="ejxd7rjInIRp" # Finally, we will add the predicition layer that will be a single dense neuron. We can do this because we only have two classes to predict for. # # # # + id="GA-iVZj9nH_N" prediction_layer = keras.layers.Dense(1) # + [markdown] id="Dn9G9KiFnXu6" # Now we will combine these layers together in a model. # + id="E_IJucQNnXBK" model = tf.keras.Sequential([ base_model, global_average_layer, prediction_layer ]) # + id="fLYdAL2uSt_a" model.summary() # + [markdown] id="NHepCsPXnpYZ" # ###Training the Model # Now we will train and compile the model. We will use a very small learning rate to ensure that the model does not have any major changes made to it. # + id="GQhg2WxHnxra" base_learning_rate = 0.0001 model.compile(optimizer=tf.keras.optimizers.RMSprop(lr=base_learning_rate), loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), metrics=['accuracy']) # + id="8Fx9nySdoZuL" # We can evaluate the model right now to see how it does before training it on our new images initial_epochs = 3 validation_steps=20 loss0,accuracy0 = model.evaluate(validation_batches, steps = validation_steps) # + id="edMXObctojl6" # Now we can train it on our images history = model.fit(train_batches, epochs=initial_epochs, validation_data=validation_batches) acc = history.history['accuracy'] print(acc) # + id="VUUt3AxA2lf2" model.save("dogs_vs_cats.h5") # we can save the model and reload it at anytime in the future new_model = tf.keras.models.load_model('dogs_vs_cats.h5') # + [markdown] id="2095EQ4Y3qJk" # And that's it for this section on computer vision! # + [markdown] id="m8YcdmWUvYae" # ##Object Detection # If you'd like to learn how you can perform object detection and recognition with tensorflow check out the guide below. # # https://github.com/tensorflow/models/tree/master/research/object_detection # + [markdown] id="oEiX-D2f2tvI" # ##Sources # 1. โ€œConvolutional Neural Network (CNN) &nbsp;: &nbsp; TensorFlow Core.โ€ TensorFlow, www.tensorflow.org/tutorials/images/cnn. # 2. โ€œTransfer Learning with a Pretrained ConvNet &nbsp;: &nbsp; TensorFlow Core.โ€ TensorFlow, www.tensorflow.org/tutorials/images/transfer_learning. # 3. <NAME>. Deep Learning with Python. Manning Publications Co., 2018. # #
AI-ML/Tensorflow fcc/Instructor notebooks/Computer Vision.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/fzantalis/colab_collection/blob/master/Toonify_yourself_ttmai.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="4_s8h-ilzHQc" colab_type="text" # # Toonify yourself! # # ฮ‘ฯ…ฯ„ฮฎ ฮตฮฏฮฝฮฑฮน ฮผฮฏฮฑ ฮตฮปฮฑฯ†ฯฯŽฯ‚ ฮฑฯ€ฮปฮฟฯ€ฮฟฮนฮทฮผฮญฮฝฮท ฮญฮบฮดฮฟฯƒฮท ฯ„ฮฟฯ… notebook ฯ€ฮฟฯ… ฯ€ฮฑฯฮญฯ‡ฮตฯ„ฮฑฮน ฮฑฯ€ฯŒ ฯ„ฮฟ original project: # https://www.justinpinkney.com/toonify-yourself/ # # # ฮ ฯฮนฮฝ ฮพฮตฮบฮนฮฝฮฎฯƒฮฟฯ…ฮผฮต, ฮฒฮตฮฒฮฑฮนฯ‰ฮธฮตฮฏฯ„ฮต ฯŒฯ„ฮน ฮญฯ‡ฮตฯ„ฮต ฮตฯ€ฮนฮปฮญฮพฮตฮน ฯ„ฮฟ **GPU** ฯƒฮฑฮฝ *Runtime Type*. # # (Runtime --> change runtime type --> Hardware accelerator: GPU) # + [markdown] id="vsAbxjfEkUOD" colab_type="text" # # ฮ‘ฯฯ‡ฮนฮบฮฌ ฯ€ฮฑฯ„ฮฌฮผฮต ฯ„ฮฑ ฯ€ฮฑฯฮฑฮบฮฌฯ„ฯ‰ ฮบฮฟฯ…ฮผฯ€ฮฌฮบฮนฮฑ ฮณฮนฮฑ ฮฝฮฑ ฮบฮฑฯ„ฮตฮฒฮฌฯƒฮฟฯ…ฮผฮต ฯ„ฮฟฮฝ ฮบฯŽฮดฮนฮบฮฑ ฯ„ฮทฯ‚ ฮตฯ†ฮฑฯฮผฮฟฮณฮฎฯ‚ ฮบฮฑฮน ฮฝฮฑ ฮตฯ„ฮฟฮนฮผฮฌฯƒฮฟฯ…ฮผฮต ฯ„ฮฟ ฯ€ฮตฯฮนฮฒฮฌฮปฮปฮฟฮฝ ฮผฮฑฯ‚. # + id="PzDuIoMcqfBT" colab_type="code" colab={} # %tensorflow_version 1.x # + id="cuMEHnpmI1Mj" colab_type="code" colab={} # !git clone https://github.com/justinpinkney/stylegan2 # %cd stylegan2 # !nvcc test_nvcc.cu -o test_nvcc -run # + id="7YFk46FLM9qo" colab_type="code" colab={} # !mkdir raw # !mkdir aligned # !mkdir generated # + [markdown] id="3IppG8Z8O19R" colab_type="text" # ## ฮ ฮฑฯ„ฮฎฯƒฯ„ฮต ฯ„ฮฟ ฯ€ฮฑฯฮฑฮบฮฌฯ„ฯ‰ ฮบฮฟฯ…ฮผฯ€ฮฌฮบฮน ฮณฮนฮฑ ฮฝฮฑ ฮฑฮฝฮตฮฒฮฌฯƒฮตฯ„ฮต ฯ„ฮทฮฝ ฮตฮนฮบฯŒฮฝฮฑ ฯƒฮฑฯ‚. # # ฮ”ฮตฮฝ ฯ‡ฯฮตฮนฮฌฮถฮตฯ„ฮฑฮน ฮฝฮฑ ฮบฮฌฮฝฮตฯ„ฮต ฮบฮฌฯ€ฮฟฮนฮฑ ฮตฯ€ฮตฮพฮตฯฮณฮฑฯƒฮฏฮฑ ฯƒฯ„ฮทฮฝ ฮตฮนฮบฯŒฮฝฮฑ ฯƒฮฑฯ‚, ฮฑฮปฮปฮฌ ฮณฮนฮฑ ฮฒฮญฮปฯ„ฮนฯƒฯ„ฮฟ ฮฑฯ€ฮฟฯ„ฮญฮปฮตฯƒฮผฮฑ, ฮตฮฏฮฝฮฑฮน ฮบฮฑฮปฯŒ ฮท ฮตฮนฮบฯŒฮฝฮฑ ฯƒฮฑฯ‚ ฮฝฮฑ ฮญฯ‡ฮตฮน ฮฑฮฝฮฌฮปฯ…ฯƒฮท ฮผฮตฮณฮฑฮปฯฯ„ฮตฯฮท ฮฑฯ€ฯŒ 1024x1024 pixels. # # + id="Gj89vqL2l-ix" colab_type="code" colab={} from google.colab import files uploaded = files.upload() for photo in uploaded.keys(): print('User uploaded file "{name}" with length {length} bytes'.format( name=photo, length=len(uploaded[photo]))) # + [markdown] id="soJxP7HUm7_j" colab_type="text" # ## ฮšฮฑฯ„ฮตฮฒฮฌฮถฮฟฯ…ฮผฮต ฯ„ฮฑ ฮตฮบฯ€ฮฑฮนฮดฮตฯ…ฮผฮญฮฝฮฑ ฮผฮฟฮฝฯ„ฮญฮปฮฑ # + id="cwVXBFaSuoIU" colab_type="code" colab={} import pretrained_networks blended_url = "https://drive.google.com/uc?id=1L3ek9B4GJ7pv-QcCFK52hqn5DFxRpg6o" ffhq_url = "http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/stylegan2-ffhq-config-f.pkl" _, _, Gs_blended = pretrained_networks.load_networks(blended_url) _, _, Gs = pretrained_networks.load_networks(ffhq_url) # + id="MLUH060th5oQ" colab_type="code" colab={} # !mv {photo} raw/. # !python align_images.py raw aligned # + id="ldHXNMYhnYC5" colab_type="code" colab={} # !python project_images.py --num-steps 550 aligned generated # + [markdown] id="SHPPSqgInfAp" colab_type="text" # ## ฮœฮตฯ„ฮฑฯ„ฯฮฟฯ€ฮฎ ฮตฮนฮบฯŒฮฝฮฑฯ‚ ฯƒฮต Cartoon # + id="EHQgAO2yqaew" colab_type="code" colab={} import numpy as np from PIL import Image import dnnlib import dnnlib.tflib as tflib from pathlib import Path latent_dir = Path("generated") latents = latent_dir.glob("*.npy") for latent_file in latents: latent = np.load(latent_file) latent = np.expand_dims(latent,axis=0) synthesis_kwargs = dict(output_transform=dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=False), minibatch_size=8) images = Gs_blended.components.synthesis.run(latent, randomize_noise=False, **synthesis_kwargs) Image.fromarray(images.transpose((0,2,3,1))[0], 'RGB').save(latent_file.parent / (f"{latent_file.stem}-toon.jpg")) # + [markdown] id="zR2v6nqVnkj4" colab_type="text" # ## ฮ ฯฮฟฮฒฮฟฮปฮฎ ฯ„ฮทฯ‚ ฮตฮนฮบฯŒฮฝฮฑฯ‚ # ฮฃฮทฮผฮตฮฏฯ‰ฯƒฮท. ฮ ฮฑฯฮฑฮบฮฌฯ„ฯ‰ ฮธฮฑ ฮดฮตฮฏฯ„ฮต ฮฝฮฑ ฯ€ฮฑฯฮฟฯ…ฯƒฮนฮฌฮถฮฟฮฝฯ„ฮฑฮน 2 ฮตฮนฮบฯŒฮฝฮตฯ‚. # # ฮ— ฯ€ฯฯŽฯ„ฮท ฮตฮนฮบฯŒฮฝฮฑ ฮตฮฏฮฝฮฑฮน ฮฟ ฯ„ฯฯŒฯ€ฮฟฯ‚ ฯ€ฮฟฯ… ฮท ฮตฯ†ฮฑฯฮผฮฟฮณฮฎ ฮฑฮฝฮฑฯƒฯ…ฮฝฮธฮญฯ„ฮตฮน ฯ„ฮฟ ฯ€ฯฯŒฯƒฯ‰ฯ€ฮฟ ฯ„ฮทฯ‚ ฯ†ฯ‰ฯ„ฮฟฮณฯฮฑฯ†ฮฏฮฑฯ‚ ฯ€ฮฟฯ… ฯ„ฮฟฯ… ฮดฯŽฯƒฮฑฮผฮต ฮผฮต ฮดฮตฮดฮฟฮผฮญฮฝฮฑ ฯ€ฮฟฯ… ฮญฯ‡ฮตฮน ฮฑฯ€ฮฟ ฯ„ฮฟ ฮตฮบฯ€ฮฑฮนฮดฮตฯ…ฮผฮญฮฝฮฟ ฮผฮฟฮฝฯ„ฮญฮปฮฟ ฯ„ฮฟฯ…. # # ฮœฮตฯ„ฮฌ, ฮฑฯ†ฮฟฯ ฮญฯ‡ฮตฮน ฮผฮฌฮธฮตฮน ฮฝฮฑ ฯƒฯ…ฮฝฮธฮญฯ„ฮตฮน ฯ„ฮฟ ฯ€ฯฯŒฯƒฯ‰ฯ€ฮฟ ฮผฮฑฯ‚, ฮดฮฏฮฝฮตฮน ฯ„ฮนฯ‚ ฮฟฮดฮทฮณฮฏฮตฯ‚ ฮณฮนฮฑ ฮฝฮฑ ฯ€ฮฑฯฮฌฮณฮตฮน ฯ„ฮทฮฝ ฮบฮฑฯฯ„ฮฟฯ…ฮฝฮฏฯƒฯ„ฮนฮบฮท ฮตฮบฮดฮฟฯ‡ฮฎ ฯ€ฮฟฯ… ฮธฮญฮปฮฟฯ…ฮผฮต. # + id="tcWXgS5DXata" colab_type="code" colab={} from IPython.display import Image embedded = Image(filename="generated/" + photo.split(".")[0] + "_01.png", width=256) display(embedded) tooned = Image(filename="generated/" + photo.split(".")[0] + "_01-toon.jpg", width=256) display(tooned) # + id="yYPXfOsZpHAR" colab_type="code" colab={}
Toonify_yourself_ttmai.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (Data Science) # language: python # name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-1:081325390199:image/datascience-1.0 # --- # # Getting the test dataset prepared in Lab `1-DataPrep` # Let's load the test dataset from the previous lab 1-DataPrep. # # (If you want, just run all cells. Go to the top toolbar click on `Run -> Run All Cells`) # + import pandas as pd import boto3 import sagemaker sess = boto3.Session() sm = sess.client('sagemaker') role = sagemaker.get_execution_role() # + # Set the paths for the datasets saved locally local_test_path = 'test.csv' test_df = pd.read_csv(local_test_path, header=None) pd.set_option('display.max_columns', 500) # Make sure we can see all of the columns pd.set_option('display.max_rows', 10) # Keep the output on one page test_df.head() # - # If you remember from the previous data preparation lab, we saved the CSV without headers and the features engineered. # Now we'll upload the file to S3 for testing. # %store -r bucket # %store -r prefix test_dir = f"{prefix}/data/test" # Upload test dataset to S3 s3uri_test = sagemaker.s3.S3Uploader.upload(local_test_path, f"s3://{bucket}/{test_dir}") s3uri_test # Moving the data to the main directory of this lab: # !cp test-dataset.csv ../ # ### Uploading the model trained in the previous 2-Modeling lab to S3 s3uri_model = sagemaker.s3.S3Uploader.upload("model.tar.gz", f"s3://{bucket}/{prefix}/model") s3uri_model # ### Saving variables to use in the main notebook for this lab # If you want to see in the console, go to S3 and verify the 2 CSV files are there: from IPython.core.display import display, HTML s3_url_placeholder = "https://s3.console.aws.amazon.com/s3/buckets/{}?&prefix={}/" display(HTML(f"<a href={s3_url_placeholder.format(bucket,test_dir)}>S3 Test object</a>")) # ### Saving variables to use in the main notebook for this lab # %store test_dir # %store s3uri_test # %store s3uri_model # [You can now go back to evaluation.ipynb](../evaluation.ipynb)
3-Evaluation/config/pre_setup.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # ExternalSource operator # # In this example, we will see how to use `ExternalSource` operator with Paddle DALI iterator, that allows us to # use an external data source as an input to the Pipeline. # # In order to achieve that, we have to define a Iterator or Generator class which `next` function will # return one or several `numpy` arrays. # + import types import collections import numpy as np from random import shuffle from nvidia.dali.pipeline import Pipeline import nvidia.dali as dali import nvidia.dali.fn as fn batch_size = 3 epochs = 3 # - # ### Defining the Iterator class ExternalInputIterator(object): def __init__(self, batch_size, device_id, num_gpus): self.images_dir = "../../data/images/" self.batch_size = batch_size with open(self.images_dir + "file_list.txt", 'r') as f: self.files = [line.rstrip() for line in f if line is not ''] # whole data set size self.data_set_len = len(self.files) # based on the device_id and total number of GPUs - world size # get proper shard self.files = self.files[self.data_set_len * device_id // num_gpus: self.data_set_len * (device_id + 1) // num_gpus] self.n = len(self.files) def __iter__(self): self.i = 0 shuffle(self.files) return self def __next__(self): batch = [] labels = [] if self.i >= self.n: self.__iter__() raise StopIteration for _ in range(self.batch_size): jpeg_filename, label = self.files[self.i % self.n].split(' ') batch.append(np.fromfile(self.images_dir + jpeg_filename, dtype = np.uint8)) # we can use numpy labels.append(np.uint8([int(label)])) self.i += 1 return (batch, labels) def __len__(self): return self.data_set_len next = __next__ # ### Defining the Pipeline # # Now let's define our pipeline. We need an instance of ``Pipeline`` class and some operators which will define the processing graph. Our external source provides 2 outpus which we can conveniently unpack by specifying ``num_outputs=2`` in the external source operator. def ExternalSourcePipeline(batch_size, num_threads, device_id, external_data): pipe = Pipeline(batch_size, num_threads, device_id) with pipe: jpegs, labels = fn.external_source(source=external_data, num_outputs=2) images = fn.decoders.image(jpegs, device="mixed") images = fn.resize(images, resize_x=240, resize_y=240) output = fn.cast(images, dtype=dali.types.UINT8) pipe.set_outputs(output, labels) return pipe # ### Using the Pipeline # # In the end, let us see how it works. # # `last_batch_padded` and `last_batch_policy` are set here only for the demonstration purposes. The user may write any custom code and change the epoch size from epoch to epoch. In that case, it is recommended to set `size` to -1 and let the iterator just wait for StopIteration exception from `iter_setup`. # # The `last_batch_padded` here tells the iterator that the difference between dataset size and batch size alignment is padded by real data that could be skipped when provided to the framework (`last_batch_policy`): # + from nvidia.dali.plugin.paddle import DALIClassificationIterator as PaddleIterator from nvidia.dali.plugin.paddle import LastBatchPolicy eii = ExternalInputIterator(batch_size, 0, 1) pipe = ExternalSourcePipeline(batch_size=batch_size, num_threads=2, device_id = 0, external_data = eii) pii = PaddleIterator(pipe, last_batch_padded=True, last_batch_policy=LastBatchPolicy.PARTIAL) for e in range(epochs): for i, data in enumerate(pii): print("epoch: {}, iter {}, real batch size: {}".format(e, i, len(np.array(data[0]["data"])))) pii.reset()
docs/examples/frameworks/paddle/paddle-external_input.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Self-Driving Car Engineer Nanodegree # # ## Deep Learning # # ## Project: Build a Traffic Sign Recognition Classifier # --- # ## Step 0: Load The Data # + import pickle import os data_dir = "./traffic-signs-data/" training_file = os.path.join(data_dir, "train.p") validation_file= os.path.join(data_dir, "valid.p") testing_file = os.path.join(data_dir, "test.p") with open(training_file, mode='rb') as f: train = pickle.load(f) with open(validation_file, mode='rb') as f: valid = pickle.load(f) with open(testing_file, mode='rb') as f: test = pickle.load(f) X_train, y_train = train['features'], train['labels'] X_valid, y_valid = valid['features'], valid['labels'] X_test, y_test = test['features'], test['labels'] print("Loaded data.") # - # --- # # ## Step 1: Dataset Summary & Exploration # # The pickled data is a dictionary with 4 key/value pairs: # # - `'features'` is a 4D array containing raw pixel data of the traffic sign images, (num examples, width, height, channels). # - `'labels'` is a 1D array containing the label/class id of the traffic sign. The file `signnames.csv` contains id -> name mappings for each id. # - `'sizes'` is a list containing tuples, (width, height) representing the original width and height the image. # - `'coords'` is a list containing tuples, (x1, y1, x2, y2) representing coordinates of a bounding box around the sign in the image. **THESE COORDINATES ASSUME THE ORIGINAL IMAGE. THE PICKLED DATA CONTAINS RESIZED VERSIONS (32 by 32) OF THESE IMAGES** # ### Provide a Basic Summary of the Data Set Using Python, Numpy and/or Pandas # + import numpy as np n_train = len(X_train) n_validation = len(X_valid) n_test = len(X_test) image_shape = X_train[0].shape classes = sorted(np.unique(y_train)) n_classes = len(classes) print("Number of training examples =", n_train) print("Number of validation examples =", n_validation) print("Number of testing examples =", n_test) print("Image data shape =", image_shape) print("Number of classes =", n_classes) # - # ### Build Class Mapping # + import pandas class_to_description = pandas.DataFrame.from_csv("./traffic-signs-data/signnames.csv").to_dict('index') # - # ### Exploratory Visualization # Visualize the German Traffic Signs Dataset using the pickled file(s). # + import matplotlib.pyplot as plt # %matplotlib inline frequencies, classes, *_ = plt.hist(y_train, bins=np.arange(y_train.min(), y_train.max() + 1)) for (freq, cls) in reversed(sorted(zip(frequencies, classes))): print("{}: {} ({})".format( str(cls).rjust(2, " "), str(int(freq)).rjust(4, " "), class_to_description[cls]["SignName"], )) plt.savefig("./artifacts/training_data_distribution.png") # + frequencies, classes, *_ = plt.hist(y_valid, bins=np.arange(y_valid.min(), y_valid.max() + 1)) for (freq, cls) in reversed(sorted(zip(frequencies, classes))): print("{}: {} ({})".format( str(cls).rjust(2, " "), str(int(freq)).rjust(4, " "), class_to_description[cls]["SignName"], )) plt.savefig("./artifacts/validation_data_distribution.png") # + frequencies, classes, *_ = plt.hist(y_test, bins=np.arange(y_test.min(), y_test.max() + 1)) for (freq, cls) in reversed(sorted(zip(frequencies, classes))): print("{}: {} ({})".format( str(cls).rjust(2, " "), str(int(freq)).rjust(4, " "), class_to_description[cls]["SignName"], )) plt.savefig("./artifacts/test_data_distribution.png") # - # ---- # # ## Step 2: Design and Test a Model Architecture # # Design and implement a deep learning model that learns to recognize traffic signs. Train and test your model on the [German Traffic Sign Dataset](http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset). # # The LeNet-5 implementation is my starting point. # # There are various aspects to consider when thinking about this problem: # # - Neural network architecture (is the network over or underfitting?) # - Play around preprocessing techniques (normalization, rgb to grayscale, etc) # - Number of examples per label (some have more than others). # - Generate fake data. # ### Pre-process # + from sklearn.utils import shuffle def preprocess(X, y): """ Pipeline for pre-processing images along with their labels """ # Normalizing values to be [-1, 1] X_ret = np.subtract(X, 128.) / 128 # Shuffling data return X_ret, y X_train, y_train = preprocess(X_train, y_train) # - # ### Model Architecture # + import tensorflow as tf from tensorflow.contrib.layers import flatten def Model(x, dropout_rate: "tensor", initial_shape, n_classes=43, mu=0, sigma=0.1): # Layer 1: Convolutional. in_x, in_y, in_depth = initial_shape conv1_W = tf.Variable(tf.truncated_normal( shape=(5, 5, in_depth, 10), mean=mu, stddev=sigma, )) conv1_b = tf.Variable(tf.zeros(10)) conv1 = tf.nn.conv2d( x, conv1_W, strides=[1, 1, 1, 1], padding='VALID' ) + conv1_b conv1 = tf.nn.relu(conv1) conv1 = tf.nn.max_pool( conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID', ) # Layer 2: Convolutional. conv2_W = tf.Variable(tf.truncated_normal( shape=(5, 5, 10, 16), mean=mu, stddev=sigma, )) conv2_b = tf.Variable(tf.zeros(16)) conv2 = tf.nn.conv2d( conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID', ) + conv2_b conv2 = tf.nn.relu(conv2) conv2 = tf.nn.max_pool( conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID', ) # Flatten. fc0 = flatten(conv2) # Layer 3: Fully Connected. fc1_W = tf.Variable(tf.truncated_normal( shape=(400, 190), mean=mu, stddev=sigma, )) fc1_b = tf.Variable(tf.zeros(190)) fc1 = tf.matmul(fc0, fc1_W) + fc1_b fc1 = tf.nn.relu(fc1) fc1 = tf.nn.dropout(fc1, dropout_rate) # Layer 4: Fully Connected. fc2_W = tf.Variable(tf.truncated_normal( shape=(190, 90), mean=mu, stddev=sigma, )) fc2_b = tf.Variable(tf.zeros(90)) fc2 = tf.matmul(fc1, fc2_W) + fc2_b fc2 = tf.nn.relu(fc2) fc2 = tf.nn.dropout(fc2, dropout_rate) # Layer 5: Fully Connected. fc3_W = tf.Variable(tf.truncated_normal( shape=(90, n_classes), mean=mu, stddev=sigma, )) fc3_b = tf.Variable(tf.zeros(n_classes)) logits = tf.matmul(fc2, fc3_W) + fc3_b return logits # - # ### Train and Validate # + rate = 0.001 initial_shape = (32, 32, 3) x = tf.placeholder(tf.float32, (None, *initial_shape)) y = tf.placeholder(tf.int32, (None)) one_hot_y = tf.one_hot(y, n_classes) keep_prob = tf.placeholder(tf.float32) logits = Model(x, keep_prob, initial_shape, n_classes=n_classes) cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits) loss_operation = tf.reduce_mean(cross_entropy) optimizer = tf.train.AdamOptimizer(learning_rate = rate) training_operation = optimizer.minimize(loss_operation) correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1)) accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) saver = tf.train.Saver() def evaluate(X_data, y_data, kp, batch_size=128): """Run inside a session""" num_examples = len(X_data) total_accuracy = 0 sess = tf.get_default_session() for offset in range(0, num_examples, batch_size): batch_x, batch_y = X_data[offset:offset + batch_size], y_data[offset:offset + batch_size] accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y, keep_prob: kp}) total_accuracy += (accuracy * len(batch_x)) return total_accuracy / num_examples print("Ready.") # + EPOCHS = 40 BATCH_SIZE = 512 with tf.Session() as sess: sess.run(tf.global_variables_initializer()) num_examples = len(X_train) print("Training...") print() for i in range(EPOCHS): X_train, y_train = shuffle(X_train, y_train) for offset in range(0, num_examples, BATCH_SIZE): end = offset + BATCH_SIZE batch_x, batch_y = X_train[offset:end], y_train[offset:end] sess.run(training_operation, feed_dict={x: batch_x, y: batch_y, keep_prob: 0.5}) validation_accuracy = evaluate(X_valid, y_valid, 1.0, batch_size=BATCH_SIZE) print("EPOCH {} ...".format(i+1)) print("Validation Accuracy = {:.3f}".format(validation_accuracy)) print() if (validation_accuracy > .93): saver.save(sess, "./models/model.{:.3f}.model".format(validation_accuracy)) print("Model saved") # - # ### Test and Retrieve Accuracies # + from matplotlib import image as mpimg training_accuracy = 0. validation_accuracy = 0. test_accuracy = 0. with tf.Session() as sess: saver.restore(sess, "./models/model.0.950.model") # manually selected training_accuracy = evaluate(X_train, y_train, 1.0, batch_size=BATCH_SIZE) validation_accuracy = evaluate(X_valid, y_valid, 1.0, batch_size=BATCH_SIZE) test_accuracy = evaluate(X_test, y_test, 1.0, batch_size=BATCH_SIZE) print("Training Data Accuracy: {:.1f}".format(training_accuracy * 100.)) print("Validation Data Accuracy: {:.1f}".format(validation_accuracy * 100.)) print("Test Data Accuracy: {:.1f}".format(test_accuracy * 100.)) # - # --- # # ## Step 3: Test a Model on New Images # # New images are from the internet, downloaded from here: http://benchmark.ini.rub.de/?section=gtsdb&subsection=dataset (had to manually edit the images to focus on a sign and be 32x32 before feeding into my classifier). The website's full dataset download also includes a csv mapping signs in images to classifications, so I can compare the answers (looks like the same class-to-sign-name mapping is used - talk about consistency). # # The file `signnames.csv` is useful here because it contains mappings from the class id to the actual sign name. I built the mapping near the beginning. # + from matplotlib import image as mpimg from glob import glob image_files = sorted(glob("./sample_images/*")) images = list(map(mpimg.imread, image_files)) soft = tf.nn.softmax(logits) top_k = tf.nn.top_k(soft, 5) top_k_value = None with tf.Session() as sess: saver.restore(sess, "./models/model.0.950.model") # manually selected # `0.941` indicates 94.1% accuracy on the validation set top_k_value = sess.run( top_k, feed_dict={x: images, keep_prob: 1.0} ) ranking_assuredness = top_k_value[0] ranking_indexes = top_k_value[1] correct = [11, 40, 39, 4, 21] # obtained from downloaded csv with classifications def show_image(index): plt.imshow(images[index]) print("Actual: {} ({})".format(correct[index], class_to_description[correct[index]]["SignName"])) for (cls, assuredness) in zip(ranking_indexes[index], ranking_assuredness[index]): print("{0} (class: {1}) {2:.2f} {3}".format(class_to_description[cls]["SignName"], cls, assuredness, "(correct)" if correct[index] == cls else "")) # - show_image(0) show_image(1) show_image(2) show_image(3) show_image(4) # Accuracy on 5 images: picks = list(map(lambda r: r[0], ranking_indexes)) matches = list(map(lambda m: m[0] == m[1], zip(picks, correct))) print(len(list(filter(lambda x: x, matches))) / float(len(matches)))
Traffic_Sign_Classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/vrry/learning-phyton/blob/learning-tuples/PY0101EN_2_1_Tuples.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="bCBvPP7GLMRr" colab_type="text" # <img src = "https://i.imgur.com/AKXoLze.jpg" align = "center"> # + [markdown] id="Z106EbNrLMRs" colab_type="text" # # <h1>Tuples in Python</h1> # + [markdown] id="gBXL_BoyLMRt" colab_type="text" # <p><strong>Welcome!</strong> This notebook will teach you about the tuples in the Python Programming Language. By the end of this lab, you'll know the basics tuple operations in Python, including indexing, slicing and sorting.</p> # + [markdown] id="MXxMSIBELMRu" colab_type="text" # # <h2>Table of Contents</h2> # <div class="alert alert-block alert-info" style="margin-top: 20px"> # <ul> # <li> # <a href="#dataset">About the Dataset</a> # </li> # <li> # <a href="#tuple">Tuples</a> # <ul> # <li><a href="index">Indexing</a></li> # <li><a href="slice">Slicing</a></li> # <li><a href="sort">Sorting</a></li> # </ul> # </li> # <li> # <a href="#escape">Quiz on Tuples</a> # </li> # </ul> # <p> # Estimated time needed: <strong>15 min</strong> # </p> # </div> # # <hr> # + [markdown] id="6rlsqKcFLMRv" colab_type="text" # # <h2 id="dataset">About the Dataset</h2> # + [markdown] id="AFK5PWOSLMRw" colab_type="text" # Imagine you received album recommendations from your friends and compiled all of the recommandations into a table, with specific information about each album. # # The table has one row for each movie and several columns: # # - **artist** - Name of the artist # - **album** - Name of the album # - **released_year** - Year the album was released # - **length_min_sec** - Length of the album (hours,minutes,seconds) # - **genre** - Genre of the album # - **music_recording_sales_millions** - Music recording sales (millions in USD) on [SONG://DATABASE](http://www.song-database.com/) # - **claimed_sales_millions** - Album's claimed sales (millions in USD) on [SONG://DATABASE](http://www.song-database.com/) # - **date_released** - Date on which the album was released # - **soundtrack** - Indicates if the album is the movie soundtrack (Y) or (N) # - **rating_of_friends** - Indicates the rating from your friends from 1 to 10 # <br> # <br> # # The dataset can be seen below: # # <font size="1"> # <table font-size:xx-small style="width:25%"> # <tr> # <th>Artist</th> # <th>Album</th> # <th>Released</th> # <th>Length</th> # <th>Genre</th> # <th>Music recording sales (millions)</th> # <th>Claimed sales (millions)</th> # <th>Released</th> # <th>Soundtrack</th> # <th>Rating (friends)</th> # </tr> # <tr> # <td><NAME></td> # <td>Thriller</td> # <td>1982</td> # <td>00:42:19</td> # <td>Pop, rock, R&B</td> # <td>46</td> # <td>65</td> # <td>30-Nov-82</td> # <td></td> # <td>10.0</td> # </tr> # <tr> # <td>AC/DC</td> # <td>Back in Black</td> # <td>1980</td> # <td>00:42:11</td> # <td>Hard rock</td> # <td>26.1</td> # <td>50</td> # <td>25-Jul-80</td> # <td></td> # <td>8.5</td> # </tr> # <tr> # <td><NAME></td> # <td>The Dark Side of the Moon</td> # <td>1973</td> # <td>00:42:49</td> # <td>Progressive rock</td> # <td>24.2</td> # <td>45</td> # <td>01-Mar-73</td> # <td></td> # <td>9.5</td> # </tr> # <tr> # <td><NAME></td> # <td>The Bodyguard</td> # <td>1992</td> # <td>00:57:44</td> # <td>Soundtrack/R&B, soul, pop</td> # <td>26.1</td> # <td>50</td> # <td>25-Jul-80</td> # <td>Y</td> # <td>7.0</td> # </tr> # <tr> # <td>Meat Loaf</td> # <td>Bat Out of Hell</td> # <td>1977</td> # <td>00:46:33</td> # <td>Hard rock, progressive rock</td> # <td>20.6</td> # <td>43</td> # <td>21-Oct-77</td> # <td></td> # <td>7.0</td> # </tr> # <tr> # <td>Eagles</td> # <td>Their Greatest Hits (1971-1975)</td> # <td>1976</td> # <td>00:43:08</td> # <td>Rock, soft rock, folk rock</td> # <td>32.2</td> # <td>42</td> # <td>17-Feb-76</td> # <td></td> # <td>9.5</td> # </tr> # <tr> # <td>Bee Gees</td> # <td>Saturday Night Fever</td> # <td>1977</td> # <td>1:15:54</td> # <td>Disco</td> # <td>20.6</td> # <td>40</td> # <td>15-Nov-77</td> # <td>Y</td> # <td>9.0</td> # </tr> # <tr> # <td>Fleetwood Mac</td> # <td>Rumours</td> # <td>1977</td> # <td>00:40:01</td> # <td>Soft rock</td> # <td>27.9</td> # <td>40</td> # <td>04-Feb-77</td> # <td></td> # <td>9.5</td> # </tr> # </table></font> # + [markdown] id="JV6QZAP3LMRx" colab_type="text" # <hr> # + [markdown] id="atolV-ZlLMRy" colab_type="text" # # <h2 id="tuple">Tuples</h2> # + [markdown] id="M-ra_wHrLMRz" colab_type="text" # In Python, there are different data types: string, integer and float. These data types can all be contained in a tuple as follows: # + [markdown] id="cZQHnY3ZLMR0" colab_type="text" # <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/TuplesType.png" width="750" align="center" /> # + [markdown] id="f7ppmLu3LMR0" colab_type="text" # Now, let us create your first tuple with string, integer and float. # + id="5A1Dyu6uLMR1" colab_type="code" outputId="8e9c3afa-462f-499a-bae6-a19efb3aa44f" colab={"base_uri": "https://localhost:8080/", "height": 34} # Create your first tuple tuple1 = ("disco",10,1.2 ) tuple1 # + [markdown] id="PCN2RBScLMR6" colab_type="text" # The type of variable is a **tuple**. # + id="HqcMtsD5LMR7" colab_type="code" outputId="6c47b4e1-52d1-4190-e623-a3b1161592fe" colab={"base_uri": "https://localhost:8080/", "height": 34} # Print the type of the tuple you created type(tuple1) # + [markdown] id="nGkgJjjSLMR-" colab_type="text" # ## <h3 id="index">Indexing</h3> # + [markdown] id="qqisI6BpLMR_" colab_type="text" # Each element of a tuple can be accessed via an index. The following table represents the relationship between the index and the items in the tuple. Each element can be obtained by the name of the tuple followed by a square bracket with the index number: # + [markdown] id="DVjMN-rJLMSA" colab_type="text" # <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/TuplesIndex.gif" width="750" align="center"> # + [markdown] id="8H5SnHyNLMSC" colab_type="text" # We can print out each value in the tuple: # + id="a3fEZDCoLMSC" colab_type="code" outputId="d8c1f3ba-cb6c-432e-81c4-d76df8430e92" colab={"base_uri": "https://localhost:8080/", "height": 68} # Print the variable on each index print(tuple1[0]) print(tuple1[1]) print(tuple1[2]) # + [markdown] id="0lh5yWRCLMSF" colab_type="text" # We can print out the **type** of each value in the tuple: # # + id="r4WbqrOWLMSH" colab_type="code" outputId="1371caaa-a005-431b-9d69-e5bf3f4354f8" colab={"base_uri": "https://localhost:8080/", "height": 68} # Print the type of value on each index print(type(tuple1[0])) print(type(tuple1[1])) print(type(tuple1[2])) # + [markdown] id="Dr0_u20qLMSL" colab_type="text" # We can also use negative indexing. We use the same table above with corresponding negative values: # + [markdown] id="75eDuURYLMSN" colab_type="text" # <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/TuplesNeg.png" width="750" align="center"> # + [markdown] id="G6Z7T0u3LMSO" colab_type="text" # We can obtain the last element as follows (this time we will not use the print statement to display the values): # + id="G29nBqUULMSQ" colab_type="code" outputId="cbc0ad09-fbda-4e34-af2a-a75b0a17b41e" colab={"base_uri": "https://localhost:8080/", "height": 34} # Use negative index to get the value of the last element tuple1[-1] # + [markdown] id="qHWV54mxLMSU" colab_type="text" # We can display the next two elements as follows: # + id="6EGs_zT_LMSV" colab_type="code" outputId="2bedd7e8-bbb1-4602-e2df-3df804722d4e" colab={"base_uri": "https://localhost:8080/", "height": 34} # Use negative index to get the value of the second last element tuple1[-2] # + id="DmqFZhA3LMSX" colab_type="code" outputId="ae329ed2-15c5-430c-df8f-826e184413f0" colab={"base_uri": "https://localhost:8080/", "height": 34} # Use negative index to get the value of the third last element tuple1[-3] # + [markdown] id="zE3i5-JTLMSb" colab_type="text" # ## <h3 id="concate">Concatenate Tuples</h3> # + [markdown] id="x39WDmz7LMSc" colab_type="text" # We can concatenate or combine tuples by using the **+** sign: # + id="8eQXaMoFLMSe" colab_type="code" outputId="40f25f80-2c00-406d-b226-09821322c627" colab={"base_uri": "https://localhost:8080/", "height": 34} # Concatenate two tuples tuple2 = tuple1 + ("hard rock", 10) tuple2 # + [markdown] id="WZkw1CGyLMSh" colab_type="text" # We can slice tuples obtaining multiple values as demonstrated by the figure below: # + [markdown] id="4Burl5JJLMSi" colab_type="text" # <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/TuplesSlice.gif" width="750" align="center"> # + [markdown] id="hpIKx5tRLMSj" colab_type="text" # ## <h3 id="slice">Slicing</h3> # + [markdown] id="OeopJYHkLMSk" colab_type="text" # We can slice tuples, obtaining new tuples with the corresponding elements: # + id="kA9im5JyLMSl" colab_type="code" outputId="544db9b3-9a7a-465e-9fee-e151c142c959" colab={"base_uri": "https://localhost:8080/", "height": 34} # Slice from index 0 to index 2 tuple2[0:3] # + [markdown] id="xYrS7ZZPLMSn" colab_type="text" # We can obtain the last two elements of the tuple: # + id="ig-dupGpLMSo" colab_type="code" outputId="1f968263-8409-47c8-c41d-3a1293b5aaf0" colab={"base_uri": "https://localhost:8080/", "height": 34} # Slice from index 3 to index 4 tuple2[3:5] # + [markdown] id="2jQcPoTsLMSs" colab_type="text" # We can obtain the length of a tuple using the length command: # + id="u9eZ0gD7LMSt" colab_type="code" outputId="fc30d31b-0629-4a5d-d283-9d32a34a9a19" colab={"base_uri": "https://localhost:8080/", "height": 34} # Get the length of tuple len(tuple2) # + [markdown] id="W_LvUJsXLMSz" colab_type="text" # This figure shows the number of elements: # + [markdown] id="7L4VuwEULMS0" colab_type="text" # <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/TuplesElement.png" width="750" align="center"> # + [markdown] id="UYPSIgYjLMS1" colab_type="text" # ## <h3 id="sort">Sorting</h3> # + [markdown] id="sqib5DqKLMS2" colab_type="text" # Consider the following tuple: # + id="LNGG2lTwLMS3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b933ef6d-74af-40cc-b320-eea23bc27878" # A sample tuple Ratings = (0, 9, 6, 5, 10, 8, 9, 6, 2) Ratings # + [markdown] id="Csrz0XdmLMS6" colab_type="text" # We can sort the values in a tuple and save it to a new tuple: # + id="fU_EvDZSLMS6" colab_type="code" outputId="5b99d0dc-2767-4ace-dd0b-4df0d43648a1" colab={"base_uri": "https://localhost:8080/", "height": 34} # Sort the tuple RatingsSorted = sorted(Ratings) RatingsSorted # + [markdown] id="-lr7ji7XLMS-" colab_type="text" # ## <h3 id="nest">Nested Tuple</h3> # + [markdown] id="sBFDa5HzLMS_" colab_type="text" # A tuple can contain another tuple as well as other more complex data types. This process is called 'nesting'. Consider the following tuple with several elements: # + id="1Vlt3R1_LMTA" colab_type="code" colab={} # Create a nest tuple NestedT =(1, 2, ("pop", "rock") ,(3,4),("disco",(1,2))) # + [markdown] id="ydPapAX1LMTD" colab_type="text" # Each element in the tuple including other tuples can be obtained via an index as shown in the figure: # + [markdown] id="0y72yib8LMTE" colab_type="text" # <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/TuplesNestOne.png" width="750" align="center"> # + id="K5h-VBiOLMTF" colab_type="code" outputId="70838eb7-e38b-45a5-ee6c-a0e2c1b99a10" colab={"base_uri": "https://localhost:8080/", "height": 102} # Print element on each index print("Element 0 of Tuple: ", NestedT[0]) print("Element 1 of Tuple: ", NestedT[1]) print("Element 2 of Tuple: ", NestedT[2]) print("Element 3 of Tuple: ", NestedT[3]) print("Element 4 of Tuple: ", NestedT[4]) # + [markdown] id="GK0CSaEzLMTH" colab_type="text" # We can use the second index to access other tuples as demonstrated in the figure: # + [markdown] id="ykExShAyLMTH" colab_type="text" # <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/TuplesNestTwo.png" width="750" align="center"> # + [markdown] id="uo8Y_vPSLMTI" colab_type="text" # We can access the nested tuples : # + id="WRc0ccQMLMTJ" colab_type="code" outputId="650b8c39-7dc4-46b3-b238-f0c3cc8f7fd3" colab={"base_uri": "https://localhost:8080/", "height": 119} # Print element on each index, including nest indexes print("Element 2, 0 of Tuple: ", NestedT[2][0]) print("Element 2, 1 of Tuple: ", NestedT[2][1]) print("Element 3, 0 of Tuple: ", NestedT[3][0]) print("Element 3, 1 of Tuple: ", NestedT[3][1]) print("Element 4, 0 of Tuple: ", NestedT[4][0]) print("Element 4, 1 of Tuple: ", NestedT[4][1]) # + [markdown] id="sYTnYgi1LMTM" colab_type="text" # We can access strings in the second nested tuples using a third index: # + id="dDZ4qKtqLMTO" colab_type="code" outputId="7574e62c-3775-492e-ed54-23129d965667" colab={"base_uri": "https://localhost:8080/", "height": 34} # Print the first element in the second nested tuples NestedT[2][1][0] # + id="pO0_qAtRLMTQ" colab_type="code" outputId="587c9fa5-68c8-483c-b6a6-0c0a3b35ca46" colab={"base_uri": "https://localhost:8080/", "height": 34} # Print the second element in the second nested tuples NestedT[2][1][1] # + [markdown] id="z_8ilxrILMTS" colab_type="text" # We can use a tree to visualise the process. Each new index corresponds to a deeper level in the tree: # + [markdown] id="4uYkm5YmLMTT" colab_type="text" # <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/TuplesNestThree.gif" width="750" align="center"> # + [markdown] id="t-Y3txw0LMTU" colab_type="text" # Similarly, we can access elements nested deeper in the tree with a fourth index: # + id="pAH1BrGULMTV" colab_type="code" outputId="42f461c0-8cab-451e-c6ae-952571cd683f" colab={"base_uri": "https://localhost:8080/", "height": 34} # Print the first element in the second nested tuples NestedT[4][1][0] # + id="1XdvI23tLMTY" colab_type="code" outputId="0d26326b-ace6-4876-de6f-34f99c1cbfec" colab={"base_uri": "https://localhost:8080/", "height": 34} # Print the second element in the second nested tuples NestedT[4][1][1] # + [markdown] id="dtNJmV1bLMTa" colab_type="text" # The following figure shows the relationship of the tree and the element <code>NestedT[4][1][1]</code>: # + [markdown] id="jA-Kx5N8LMTc" colab_type="text" # <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/TuplesNestFour.gif" width="750" align="center"> # + [markdown] id="u7kGPbNRLMTe" colab_type="text" # # <h2 id="quiz">Quiz on Tuples</h2> # + [markdown] id="kBG-o-FkLMTf" colab_type="text" # Consider the following tuple: # + id="03MEjWrqLMTg" colab_type="code" outputId="f79d4475-4ab4-4150-b31a-853dfc46d510" colab={"base_uri": "https://localhost:8080/", "height": 153} # sample tuple genres_tuple = ("pop", "rock", "soul", "hard rock", "soft rock", \ "R&B", "progressive rock", "disco") genres_tuple # + [markdown] id="nNtiszsfLMTi" colab_type="text" # Find the length of the tuple, <code>genres_tuple</code>: # + id="_XXyC3ZRLMTi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="9c3ebeae-9649-487b-fc9f-7b27db6d3d48" # Write your code below and press Shift+Enter to execute len(genres_tuple) # + [markdown] id="SwoUbZ_fLMTl" colab_type="text" # <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/TuplesQuiz.png" width="1100" align="center"> # + [markdown] id="nB4I6mKlLMTl" colab_type="text" # Double-click __here__ for the solution. # # <!-- Your answer is below: # len(genres_tuple) # --> # + [markdown] id="YBMRBDDsLMTm" colab_type="text" # Access the element, with respect to index 3: # + id="byprHW0mLMTm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="7e88a7ad-1103-4fdb-90ee-da41ab8461e3" # Write your code below and press Shift+Enter to execute genres_tuple[:3] # + [markdown] id="Sf44diSmLMTo" colab_type="text" # Double-click __here__ for the solution. # # <!-- Your answer is below: # genres_tuple[3] # --> # + [markdown] id="1PogEK8KLMTp" colab_type="text" # Use slicing to obtain indexes 3, 4 and 5: # + id="Ud6I8iJ9LMTp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d4a35deb-ca75-4b47-8422-ac52673e1892" # Write your code below and press Shift+Enter to execute genres_tuple[3:6] # + [markdown] id="ut2yAAaSLMTr" colab_type="text" # Double-click __here__ for the solution. # # <!-- Your answer is below: # genres_tuple[3:6] # --> # + [markdown] id="z13HxHvlLMTw" colab_type="text" # Find the first two elements of the tuple <code>genres_tuple</code>: # + id="_YAJrkvZLMTx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="987e6652-17a7-4644-ce32-0d1d3d7a314c" # Write your code below and press Shift+Enter to execute genres_tuple[0][0:2] # + [markdown] id="6C7t186PLMT3" colab_type="text" # Double-click __here__ for the solution. # # <!-- Your answer is below: # genres_tuple[0:2] # --> # + [markdown] id="2AaR31BTLMT4" colab_type="text" # Find the first index of <code>"disco"</code>: # + id="rVXYpykiLMT5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a2c69e70-3561-4004-b606-74717fe3e481" # Write your code below and press Shift+Enter to execute genres_tuple[7][0] temp = genres_tuple.index('disco') genres_tuple[temp][0] # + [markdown] id="HpACIh_rLMUE" colab_type="text" # Double-click __here__ for the solution. # # <!-- Your answer is below: # genres_tuple.index("disco") # --> # + [markdown] id="vdrZWWeJLMUG" colab_type="text" # Generate a sorted List from the Tuple <code>C_tuple=(-5, 1, -3)</code>: # + id="yOW1admYLMUJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="bf31c80c-94f9-43b9-8785-b1bad96e8746" # Write your code below and press Shift+Enter to execute C_tuple = (-5, 1, -3) sorted(C_tuple) # + [markdown] id="Lgix67QCLMUN" colab_type="text" # Double-click __here__ for the solution. # # <!-- Your answer is below: # C_tuple = (-5, 1, -3) # C_list = sorted(C_tuple) # C_list # --> # + [markdown] id="XPcsJrKbLMUN" colab_type="text" # <hr> # <h2>The last exercise!</h2> # <p>Congratulations, you have completed your first lesson and hands-on lab in Python. However, there is one more thing you need to do. The Data Science community encourages sharing work. The best way to share and showcase your work is to share it on GitHub. By sharing your notebook on GitHub you are not only building your reputation with fellow data scientists, but you can also show it off when applying for a job. Even though this was your first piece of work, it is never too early to start building good habits. So, please read and follow <a href="https://cognitiveclass.ai/blog/data-scientists-stand-out-by-sharing-your-notebooks/" target="_blank">this article</a> to learn how to share your work. # <hr> # + [markdown] id="bv37r0rDLMUR" colab_type="text" # <p>Copyright &copy; 2018 IBM Developer Skills Network. This notebook and its source code are released under the terms of the <a href="https://cognitiveclass.ai/mit-license/">MIT License</a>.</p>
PY0101EN_2_1_Tuples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Introduction to the Interstellar Medium # ### <NAME> # ### Figure 11.6: HST image of the R136 super star cluster in the Large Magellanic Cloud # #### image downloaded from https://www.spacetelescope.org/images/opo0932c/ import numpy as np import matplotlib.pyplot as plt import matplotlib.image as mpimg # %matplotlib inline # this is needed for binder to read jpg using matplotlib import sys # !{sys.executable} -m pip install pillow # + img = mpimg.imread('R136_hst.jpg') # image is 1.97 x 1.97 arcminutes print(img.shape) # 1000 x 1000 # => 0.1182 arcsec / pixel # 100 pix = 2.87 pc for d = 50 kpc i1, i2 = 250, 850 j1, j2 = 399, 999 img_b = img[i1:i2, j1:j2, 0] img_v = img[i1:i2, j1:j2, 1] img_i = img[i1:i2, j1:j2, 2] fig = plt.figure(figsize=(8,8)) ax = fig.add_subplot(111) ax.set_xticks([]) ax.set_yticks([]) # scale bar xbar = 70 x0 = 500 x1 = x0 + xbar y0 = 570 dy = 6 ax.plot([x0,x1],[y0,y0], 'w-', lw=2) ax.plot([x0,x0],[y0-dy,y0+dy], 'w-', lw=2) ax.plot([x1,x1],[y0-dy,y0+dy], 'w-', lw=2) ax.text(0.5*(x0+x1), y0-1.5*dy, '2 pc', color='white', fontsize=14, ha='center') #ax.imshow(img_b, cmap='gist_gray') #ax.text(0.04,0.92,'336+438 nm', {'color': 'w', 'fontsize': 16}, transform=ax.transAxes) ax.imshow(img_v, cmap='gist_gray') ax.text(0.04,0.93,'555 nm', {'color': 'w', 'fontsize': 20}, transform=ax.transAxes) #ax.imshow(img_i, cmap='gist_gray') #ax.text(0.04,0.92,'814 nm', {'color': 'w', 'fontsize': 16}, transform=ax.transAxes) plt.tight_layout() plt.savefig('R136.pdf') # -
extragalactic/R136.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="TcQnpE9WWLpP" colab_type="code" colab={} import pandas as pd import matplotlib.pyplot as plt from collections import Counter # + id="DliK6DhCQnXO" colab_type="code" outputId="166d0393-1f6f-4aec-e22d-2243412c1256" executionInfo={"status": "ok", "timestamp": 1585771521277, "user_tz": -120, "elapsed": 1006, "user": {"displayName": "<NAME>\u0119dzierski", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghu1AZoNys9qmixpBnJlZ_8c8mZuipDj1nRYiMPkg=s64", "userId": "14162817076247878771"}} colab={"base_uri": "https://localhost:8080/", "height": 34} # cd "/content/drive/My Drive/Colab Notebooks/dataworkshop_matrix/Matrix_3_road_sign/data" # + id="LzlAfNGYR58p" colab_type="code" outputId="e1979866-5dc5-4c02-ac00-d01120cdee43" executionInfo={"status": "ok", "timestamp": 1585771532726, "user_tz": -120, "elapsed": 1969, "user": {"displayName": "<NAME>\u0119dzierski", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghu1AZoNys9qmixpBnJlZ_8c8mZuipDj1nRYiMPkg=s64", "userId": "14162817076247878771"}} colab={"base_uri": "https://localhost:8080/", "height": 34} # ls # + id="wBeKf3RlTW5D" colab_type="code" colab={} train = pd.read_pickle('train.p') # + id="LYsK6K1nTdZk" colab_type="code" outputId="c803b3e7-2253-4343-8059-c3e35132f273" executionInfo={"status": "ok", "timestamp": 1585771559398, "user_tz": -120, "elapsed": 366, "user": {"displayName": "<NAME>\u0119dzierski", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghu1AZoNys9qmixpBnJlZ_8c8mZuipDj1nRYiMPkg=s64", "userId": "14162817076247878771"}} colab={"base_uri": "https://localhost:8080/", "height": 34} train.keys() # + id="23Yl94icTgMv" colab_type="code" outputId="59733322-0a61-4f84-af9f-716353e376ab" executionInfo={"status": "ok", "timestamp": 1585771572804, "user_tz": -120, "elapsed": 533, "user": {"displayName": "<NAME>\u0119dzierski", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghu1AZoNys9qmixpBnJlZ_8c8mZuipDj1nRYiMPkg=s64", "userId": "14162817076247878771"}} colab={"base_uri": "https://localhost:8080/", "height": 34} X_train, y_train = train['features'], train['labels'] X_train.shape, y_train.shape # + id="MGbTWxsjT1XZ" colab_type="code" outputId="890c02f7-73ad-4f5f-c36d-39cafb452123" executionInfo={"status": "ok", "timestamp": 1585771664470, "user_tz": -120, "elapsed": 709, "user": {"displayName": "<NAME>\u0119dzierski", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghu1AZoNys9qmixpBnJlZ_8c8mZuipDj1nRYiMPkg=s64", "userId": "14162817076247878771"}} colab={"base_uri": "https://localhost:8080/", "height": 283} plt.imshow(X_train[0]) # + id="3aeQuXu_UQyU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 173} outputId="a4f07a1a-e205-47e5-f142-5581e45a0ebd" executionInfo={"status": "ok", "timestamp": 1585771798550, "user_tz": -120, "elapsed": 560, "user": {"displayName": "<NAME>\u0119dzierski", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghu1AZoNys9qmixpBnJlZ_8c8mZuipDj1nRYiMPkg=s64", "userId": "14162817076247878771"}} sings = pd.read_csv('signnames.csv') sings.head(4) # + id="xCIzy5z3XP07" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="165c9dec-b9e6-4084-ff3c-c7c3f588e727" executionInfo={"status": "ok", "timestamp": 1585771921166, "user_tz": -120, "elapsed": 704, "user": {"displayName": "<NAME>0119dzierski", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghu1AZoNys9qmixpBnJlZ_8c8mZuipDj1nRYiMPkg=s64", "userId": "14162817076247878771"}} dict_signs = sings.to_dict()['b'] dict_signs[2] # + id="-hi8SnwDXe23" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000, "output_embedded_package_id": "1FnZ9rHVMy3qgjdikJeznH_Gint-ytnLC"} outputId="00491113-d7f8-4163-bb75-71c8650fdc6e" executionInfo={"status": "ok", "timestamp": 1585772747307, "user_tz": -120, "elapsed": 39899, "user": {"displayName": "<NAME>0119dzierski", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghu1AZoNys9qmixpBnJlZ_8c8mZuipDj1nRYiMPkg=s64", "userId": "14162817076247878771"}} for id_sign in dict_signs.keys(): given_sings = X_train[y_train == id_sign] plt.figure(figsize=(15,5)) for i in range(9): plt.subplot('19{0}'.format(i+1)) plt.imshow(given_sings[i]) #plt.axes("off") plt.tight_layout() plt.show() # + id="PciFahfAZFNR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 609} outputId="46c4ec0f-2b5f-438d-fd0d-96f0d13f6526" executionInfo={"status": "ok", "timestamp": 1585773691754, "user_tz": -120, "elapsed": 1143, "user": {"displayName": "<NAME>\u0119dzierski", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ghu1AZoNys9qmixpBnJlZ_8c8mZuipDj1nRYiMPkg=s64", "userId": "14162817076247878771"}} cnt = Counter(y_train).most_common() id_labels, cnt_labels = zip(*cnt) ids = range(len(id_labels)) plt.figure(figsize=(15,5)) plt.bar(ids, cnt_labels) plt.xlabel('Znaki') labels = [dict_signs[id_labels[id_]] for id_ in id_labels] plt.xticks(ids, labels, rotation = 'vertical' ) plt.title('Wykres znakรณw drogowch') plt.show # + id="ZT7J553GbklE" colab_type="code" colab={}
Matrix_3_road_sign/M3_day2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import warnings import numpy as np import pandas as pd import scipy.stats as st import statsmodels as sm import matplotlib import matplotlib.pyplot as plt disciplinas = ['CรLCULO I','CรLCULO II','CรLCULO III','VETORES E GEOMETRIA ANALรTICA','PRร‰-CรLCULO','PROBABILIDADE E ESTATรSTICA', 'INTRODUร‡รƒO ร€ FรSICA CLรSSICA I','INTRODUร‡รƒO ร€ FรSICA CLรSSICA II','INTRODUร‡รƒO ร€ FรSICA CLรSSICA III','Lร“GICA DE PROGRAMAร‡รƒO', 'LINGUAGEM DE PROGRAMAร‡รƒO','COMPUTAร‡รƒO NUMร‰RICA','QUรMICA GERAL','รLGEBRA LINEAR','MECร‚NICA DOS Sร“LIDOS','MECร‚NICA DOS FLUIDOS', 'CIรŠNCIA E TECNOLOGIA DOS MATERIAIS'] # + colunas = ['discente', 'id_turma', 'media_final', 'nome'] df = pd.read_csv('../data_science/turmas_new.csv') df = df[colunas].drop_duplicates() # cont = 0; # for disciplina in disciplinas: # serie = df[df['nome'] == disciplina].media_final # plota(serie, cont) # cont+=1 # - df[df['nome']=='CรLCULO I'].shape # + def best_fit_distribution(data, bins=700, ax=None): y, x = np.histogram(data, bins=bins, density=True) x = (x + np.roll(x, -1))[:-1] / 2.0 # DISTRIBUTIONS = [ # st.alpha,st.anglit,st.arcsine,st.beta,st.betaprime,st.bradford,st.burr,st.cauchy,st.chi,st.chi2,st.cosine, # st.dgamma,st.dweibull,st.erlang,st.expon,st.exponnorm,st.exponweib,st.exponpow,st.f,st.fatiguelife,st.fisk, # st.foldcauchy,st.foldnorm,st.frechet_r,st.frechet_l,st.genlogistic,st.genpareto,st.gennorm,st.genexpon, # st.genextreme,st.gausshyper,st.gamma,st.gengamma,st.genhalflogistic,st.gilbrat,st.gompertz,st.gumbel_r, # st.gumbel_l,st.halfcauchy,st.halflogistic,st.halfnorm,st.halfgennorm,st.hypsecant,st.invgamma,st.invgauss, # st.invweibull,st.johnsonsb,st.johnsonsu,st.ksone,st.kstwobign,st.laplace,st.levy,st.levy_l,st.levy_stable, # st.logistic,st.loggamma,st.loglaplace,st.lognorm,st.lomax,st.maxwell,st.mielke,st.nakagami,st.ncx2,st.ncf, # st.nct,st.norm,st.pareto,st.pearson3,st.powerlaw,st.powerlognorm,st.powernorm,st.rdist,st.reciprocal, # st.rayleigh,st.rice,st.recipinvgauss,st.semicircular,st.t,st.triang,st.truncexpon,st.truncnorm,st.tukeylambda, # st.uniform,st.vonmises,st.vonmises_line,st.wald,st.weibull_min,st.weibull_max,st.wrapcauchy # ] # Escolhe as distribuicoes DISTRIBUTIONS=[ st.norm,st.t,st.expon,st.bernoulli,st.gamma ] best_distribution = st.norm best_params = (0.0, 1.0) best_sse = np.inf for distribution in DISTRIBUTIONS: try: with warnings.catch_warnings(): warnings.filterwarnings('ignore') params = distribution.fit(data) arg = params[:-2] loc = params[-2] scale = params[-1] pdf = distribution.pdf(x, loc=loc, scale=scale, *arg) sse = np.sum(np.power(y - pdf, 2.0)) try: if ax: pd.Series(pdf, x).plot(ax=ax) except Exception: pass if best_sse > sse > 0: best_distribution = distribution best_params = params best_sse = sse except Exception: pass return (best_distribution.name, best_params) def make_pdf(dist, params, size=1000): arg = params[:-2] loc = params[-2] scale = params[-1] start = dist.ppf(0.01, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.01, loc=loc, scale=scale) end = dist.ppf(0.99, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.99, loc=loc, scale=scale) x = np.linspace(start, end, size) y = dist.pdf(x, loc=loc, scale=scale, *arg) pdf = pd.Series(y, x) return pdf def plota(serie, cont): # Dados das notas notas = pd.Series(serie) # notas = pd.Series(sm.datasets.elnino.load_pandas().data.set_index('YEAR').values.ravel()) # Plota histograma notas.plot.hist(bins=50,normed=True) # Calcula melhor distribuicao best_fit_name, best_fir_paramms = best_fit_distribution(notas, 200) best_dist = getattr(st, best_fit_name) # Cria curva da distribuicao pdf= make_pdf(best_dist,best_fir_paramms) # Plota melhor distribuicao param_names = (best_dist.shapes + ', loc, scale').split(', ') if best_dist.shapes else ['loc', 'scale'] param_str = ', '.join(['{}={:0.2f}'.format(k,v) for k,v in zip(param_names, best_fir_paramms)]) dist_str = '{}({})'.format(best_fit_name, param_str) plt.title(u'Melhor distribuicao \n' + dist_str) plt.xlabel(u'Notas') plt.ylabel('Frequencia') pdf.plot().get_figure().savefig('teste'+str(cont)+'.png') plt.show() # - for disciplina in disciplinas: fig= plt.figure() ax= fig.gca() # plt.style.use('ggplot') notas = df[df['nome'] == disciplina].media_final notas.plot.hist(ax=ax,density=True, bins=20, alpha=.5) # x= st.kde.gaussian_kde(notas) #x.evaluate(data) # print(x.covariance) # x1= np.linspace(0,10,1000) # y= x.pdf(x1) plt.title(disciplina) # plt.plot(x1,y) plt.savefig('plotTeste'+str(disciplina)+'.png') # + from pylab import * from scipy.optimize import curve_fit, least_squares def gauss(x,mu,sigma,A): return A*exp(-(x-mu)*2/2/sigma*2) def bimodal(x0,x): return gauss(x,x0[0],x0[1],x0[2])+gauss(x,x0[3],x0[4],x0[5]) def bimodal_fit(x0,x,y): return gauss(x,x0[0],x0[1],x0[2])+gauss(x,x0[3],x0[4],x0[5])-y notas = df[df['nome'] == 'CรLCULO II'].media_final y,x,_= hist(notas,20,alpha=.3,density=True) x=x[:-1] x0 = [np.mean(x),np.std(y),np.mean(y),np.mean(x),np.std(y),np.mean(y)] X0= [1,2,1,6,2,1] res_robust= least_squares(bimodal_fit,x0,loss='soft_l1',f_scale=1000,args=(x,y)) for a,b in zip(['Media','Desvio','Amplitude']*2,res_robust.x): print('{}: {}'.format(a,b)) x1= np.linspace(np.min(x),np.max(x),1000) plt.plot(x1,bimodal(res_robust.x,x1),color='red',lw=3,label='model') # -
back/api/distribuicao.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="U7sjmwi3rlk0" # # Scatter Plots # + id="2Mc-dgJ4rnzm" import numpy as np import pandas as pd import matplotlib from matplotlib import pyplot as plt # %matplotlib inline # + [markdown] id="kRr-mZnjryrq" # ## 1. Read in the data # # These two data sets are from https://voteview.com/data and use the using the [DW-NOMINATE method](https://en.wikipedia.org/wiki/NOMINATE_(scaling_method)) to evaluate the political characteristics of individuals on a scale from 0 to 1. Each row in the data is for a different congress person and contains the name, and "x" value and an "alt" value. The horizonal axis, "x", measures the level of liberal (low "X") or conservative (high "x") ideology and can also be interpreted as the position on government intervention in the economy. The vertical axis, "alt" can be interpreted as the position on cross-cutting, salient issues of the day. Most experts agree that the "x" dimension explains the vast majority of differences in voting behaviors. # # *I'm using the python library pandas to read in the data.* # + colab={"base_uri": "https://localhost:8080/", "height": 423} id="XnMcvE7brt3C" outputId="19873c87-8e1a-40ce-9d92-2144c8f6ac1c" url90 = 'https://raw.githubusercontent.com/ageller/IDEAS_FSS-Vis/master/matplotlib/scatter/congress90.csv' c90 = pd.read_csv(url90) c90 # + colab={"base_uri": "https://localhost:8080/", "height": 423} id="BHk3ZxBGrx5U" outputId="7e0aea9f-02ce-45cb-f61a-f7d800ecbd57" url116 = 'https://raw.githubusercontent.com/ageller/IDEAS_FSS-Vis/master/matplotlib/scatter/congress116.csv' c116 = pd.read_csv(url116) c116 # + [markdown] id="wP6YovQasXk9" # ## 2 Let's plot these as two subplots # # *Is there anything that we should improve upon here?* # + colab={"base_uri": "https://localhost:8080/", "height": 446} id="9IuItkOOsUjz" outputId="86a68332-253c-4d63-b6c8-5bc7d05861a8" #define the subplots and figure size f, (ax1, ax2) = plt.subplots(1, 2, figsize=(10,7)) #plot the data with better colors ax1.scatter(c90['x'], c90['alt']) ax2.scatter(c116['x'], c116['alt']) # + [markdown] id="gKxc17pk7rsq" # ## 3 Let's improve this # * We need to force the axes to have the same limits # * Let's add some descriptive labels to the axes # * In that case, they could share a y axis # * We can improve the colors # + colab={"base_uri": "https://localhost:8080/", "height": 508} id="PCefnoX6sdLa" outputId="0d28a39d-9692-4dbd-a3e2-497c05f65f23" #define the subplots and figure size f, (ax1, ax2) = plt.subplots(1, 2, figsize = (14, 6.5), sharey = True) #plot the data with better colors ax1.scatter(c90['x'], c90['alt'], edgecolors = 'black', zorder = 3, c = c90['x'], cmap = 'bwr', vmin = -1, vmax = 1) ax2.scatter(c116['x'], c116['alt'], edgecolors = 'black', zorder = 3, c = c116['x'], cmap = 'bwr' , vmin = -1, vmax = 1) #add axes labels,and define the limits ax1.set_ylabel('Position on salient issues',fontsize = 18) #lim = 1.07 lim = 1.02 for a in [ax1, ax2]: a.set_xlabel(r'Liberal $\rightarrow$ Conservative', fontsize = 18) a.set_xlim(-lim, lim) a.set_ylim(-lim, lim) #add titles f.suptitle('The US Congress Has Become More Politically Polarized', fontsize=28, y=1.06) #ax1.set_title('Congress 90', fontsize = 24) #ax2.set_title('Congress 116', fontsize = 24) ax1.set_title('1967 - 1969', fontsize = 24, y = 1.01) ax2.set_title('2019 - 2021', fontsize = 24, y = 1.01) #add a grid? (and darker lines crossing the origin) #t = np.arange(9)/4. - 1 #t = np.arange(5)/2. - 1 #maybe even remove the ticks entirely, since they don't really have much physical meaning t = [] for a in [ax1, ax2]: a.set_yticks(t) a.set_xticks(t) a.grid(color='lightgray', linestyle='-', linewidth=1, zorder = 1, alpha = 0.5) #add a darker central axis to mark the (0,0) origin for a in [ax1, ax2]: a.plot([0,0],[-2,2], color='black', zorder = 2, linewidth=1) a.plot([-2,2],[0,0], color='black', zorder = 2, linewidth=1) #lighten the outer axis? for a in [ax1, ax2]: a.spines['bottom'].set_color('lightgray') a.spines['top'].set_color('lightgray') a.spines['right'].set_color('lightgray') a.spines['left'].set_color('lightgray') #gray out the area where no data can occupy? x = np.linspace(-lim,lim,500) y = np.nan_to_num((1. - x*x)**0.5) for a in [ax1, ax2]: a.fill_between(x, lim, y, color='lightgray', alpha=0.5) a.fill_between(x, -lim, -y, color='lightgray', alpha=0.5) #Fine-tune figure; make subplots close to each other and hide x ticks for f.subplots_adjust(wspace=0.02) #also hide the ticks in the middle ax2.yaxis.set_ticks_position('none') f.savefig('scatter.pdf',format='pdf', bbox_inches = 'tight') # + [markdown] id="8VS14dU9OTGg" # ## 4 Would this be better as two overlapping histograms? # # *If we don't really care about the y axis, we don't need to use it.* # + colab={"base_uri": "https://localhost:8080/", "height": 447} id="M80PInFoOPTy" outputId="22cf0d3c-3ae4-49f0-cf39-84f7f87198ea" f, ax = plt.subplots(figsize=(10,5)) color1 = '#386B5D' color2 = '#3D007A' ax.hist(c90['x'], density=True, alpha=0.5, color=color1) ax.hist(c90['x'], density=True, histtype='step', color=color1, linewidth=2) ax.hist(c116['x'], density=True, alpha=0.5, color=color2) ax.hist(c116['x'], density=True, histtype='step', color=color2, linewidth=2) #add title and labels f.suptitle('The US Congress Has Become More\nPolitically Polarized.', fontsize=28, y=1.17, x=0.52) ax.set_title('Conservatives have moved further to the right.', fontsize=18, y=1.05) ax.set_xlabel(r'Liberal $\rightarrow$ Conservative', fontsize = 18) #set the limits ax.set_xlim(-1, 1) ax.set_ylim(0, 1.6) #remove the ticks entirely, since they don't really have much physical meaning t = [] ax.set_yticks(t) ax.set_xticks(t) #add a center line ax.plot([0,0],[0,1.6], color = 'black') #remove a few of the axes? ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['left'].set_visible(False) #add annotations #ax.annotate("1967 - 1969", xy = (0.5,1.5), fontsize = 14, color = color1) #ax.annotate("2019 - 2021", xy = (0.5,1.37), fontsize = 14, color = color2) ax.annotate("1967 - 1969", xy = (0.06,0.75), fontsize = 14, color = color1) ax.annotate("2019 - 2021", xy = (0.43,0.75), fontsize = 14, color = color2) f.savefig('hist.pdf',format='pdf', bbox_inches = 'tight') # + id="iDnnv7oKOs2J"
matplotlib/scatter/scatter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import random # ## Import data df=pd.read_csv("https://raw.githubusercontent.com/kishore-s-gowda/Machine_Learning/main/Data/kc_house_data.csv") df.head() df=df.drop(['id','date'],axis=1) df.shape # Lets reduce the data as this is just an example to df=df.sample(frac=0.1) # ## Spliiting of data to get df_features and df_target df_features=df.drop('price',axis=1) df_target=df["price"] # ## Install library pip install fastreport==0.0.6 # ## Import library import report # ## Report # %%time report.report_regression(df_features,df_target,change_data_type=False)
Regression Problem.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="-GesvmV0L6jX" # # Python List # # --- # # # + id="E6DddC80MAF1" x=[1,True,'s',[1,2,3,4,5],6.7] # + id="PkcxUE4UMDXe" type(x) # + id="Bs8OoWGwSX3c" dir(x) # + [markdown] id="A7E230I5MfrB" # In python, we can store many type of data in one variable using <a>list</a>.<br> # Each element can be accesed using index(starting from 0) subscription. For example # + id="-zBz6fhuMWkO" x[0] # + id="Asof33gcNAh9" x[1] # + id="stHTQvwUNEuU" x[6] # + [markdown] id="Qz5pkyTNNMye" # Since x[3] is list too, its element can be accessed by addtional subscript index. # + id="zRTEgDg5NcvI" x[3][2] # + [markdown] id="IkKNVEZgNofO" # The length of data stored in a list can be found by function len() # + id="RcYeSaDMNv8z" len(x) # + id="qmVf77waNze8" x=[1,2,3] len(x) # + [markdown] id="oEPcZMTMN9sV" # We can also check if there is a element we want in a list using a keyword in # + id="JO5ug5KhOLu_" 2 in x # + id="xLjlM_5LON93" 20 in x # + [markdown] id="oo2eXwsgcz5j" # We can modify the element in our list if we know its index # + id="J0bEX2Kcc9FR" x=[1,2,3,4,5] print(x) x[3]=2 print(x) # + [markdown] id="rMR_tlbJUprF" # ## List: Append, Addition and Extension<br> # # + id="RXoki_x2VB4K" x=[1,2,3,4,5,6,7,8] print(x) # + id="NbZvO0DSVI-T" x.append([1,2,3]) print(x) # + id="k8pmvfisVN4G" print(x+[10]) # + id="0COPdJWtVSR1" print(x) # + id="-lH3lnSUZuFk" x=x+[10] print(x) # + id="k9cpowgxVnh3" x=[1,2,3,4] y=[5,6,7,8] print(x+y) # + id="utjqxmsLVt86" x.append(y) print(x) # + id="qwQNIINUbFsb" x # + id="5eoE0zftbP89" print(x.append(1)) # + id="vCLhC8ebbcEc" x # + [markdown] id="cSOfjRQPck1O" # ## Insert and remove an element # # --- # # We can add/remove element to the middle of existing list using <a>insert</a> or <a>del</a> # + id="5q9dUbugdx9N" x=[1,2,3,4,5,6] print(x) x.insert(1,100) print(x) # + id="S3m3ul4He6Di" x.insert(5,50) print(x) # + id="xrBFeaGQcaB1" # ?list.insert # + id="3AaLngTdc4c2" x # + id="hruW2CFEcvm_" x.insert(20,1) # + id="qPT0a4dLc1hK" len(x) # + id="q9Ki73E_HeC9" x=[1,1,1,2,3,4,5,1,2,3,1,1,2] print(x) # + id="bMbtTYWjdR27" x.remove(100) print(x) # + id="dS4Kn04qdryB" print(x) del x[2] print(x) # + id="0gyR6wuNeFE4" del x print(x) # + [markdown] id="KaXOHYvjbhk7" # ## List slicing # # --- # We can subset the list using subsription [] # # + id="XgjGPFG9bzVu" x=[1,2,3,4,5,6,7,8,9,10] print('x[:] = ',x[:]) print('x[0:] = ',x[0:]) print('x[1:5] = ',x[1:5]) # + id="YfQ3QIFCbqjI" x[::2] # + id="0rcpDX-LgdNK" x[:6:3] # + id="amHY_9l4gis9" x[1:3:2] # + id="8bLWO1KOfN--" x[-1] # + id="CqH3dvBlfQ7z" x[-1::-1] # + [markdown] id="U1nrdLK9aUvd" # We cannot use <a>append</a> method to join list together # + id="L3zO7kjPbJ3X" x=[1,2,3,4,5,6,7] y=[8,9,10] x.append(y) x # + id="zax3QwvGaIfe" x=[1,2,3,4,5,6,7] y=[[1,2,3,4],2,3,4] y[0].extend(x) y # + id="vO0vV8FxhBZ1" x+2 # + [markdown] id="vBAP4Jf3CpMI" # ## List sort # # --- # # # + id="EMHE_JdRCwiU" x=[1,5,7,2,3,4,8,9,4,1,20,4,15,222] x.sort() print(x) # + id="s9AWLttVGczz" x=[1,5,7,2,3,4,8,9,4,1,20,4,15,222,[500,'y']] x.sort(reverse=True) print(x) # + id="I7tr0vGDhslv" x=['1','ab','bb','aa','ba'] x.sort() x # + id="tFEhlSRqGwii" x=[1,1,2,2,[1,2],4,1,2,4,6,7,1,2,3,4,1,2,6,7,8,2,3,4,1,2,6,1,2,3,8,3,1,8,3,8] print(x.count(1)) # + id="EFbJ-Sgziamm" x.count(1) # + [markdown] id="U6R7D69CPQzo" # # range() # # <b>range(start,end, step)</b> is a function that returns a sequence of integer in the interval [<b>start,end</b>) with the separation of <b>step</b> # + id="8AlL9kGEPzQn" print(range(0,10,2)) # + id="8YUjBG4VQfqy" for i in range(0,10,2): print(i) # + id="T9gwP8JWPuKY" type(range(10)) # + id="AR0d1qk2P0Hd" x=range(1,100,3) dir(x) # + id="ud18jpxEP59k" print(x.__getitem__(2))
Coding_Hour2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/pachterlab/GRNP_2020/blob/master/notebooks/test/Tests.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="dhydD96df06z" colab_type="text" # **Unit tests** # # Steps: # 1. Clone the code repo # 2. Prepare the R environment # 3. Run the tests # # + [markdown] id="h8RnKVMXgbzr" colab_type="text" # **1. Clone the code repo and download data to process** # + id="doUAtCxIyOiI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="5ffc680c-ca8d-47b1-9701-9f781a41689f" ![ -d "GRNP_2020" ] && rm -r GRNP_2020 # !git clone https://github.com/pachterlab/GRNP_2020.git # + [markdown] id="sCmhNVdYgkWH" colab_type="text" # **2. Prepare the R environment** # + id="5Gt6rQkSXriM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="361ccf81-5511-4109-de21-70fe5a3f962d" #switch to R mode # %reload_ext rpy2.ipython # + id="jJ3rQJCdgeJa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="ee8a8a11-755f-4e24-dcc2-f6497917df52" #install the R packages # %%R install.packages("qdapTools") install.packages("dplyr") install.packages("stringdist") install.packages("stringr") install.packages("preseqR") # + [markdown] id="x56fjfCSicrp" colab_type="text" # **3. Run the tests** # # All logical values presented should be TRUE # + id="V37XLBAO68oR" colab_type="code" colab={} #First set some path variables # %%R source("GRNP_2020/RCode/pathsGoogleColab.R") # + id="R6kuhOmzZL_X" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="57d66a8f-fc7e-44a4-8c0c-9596c7299025" #Run the tests # %%R source(paste0(sourcePath,"ButterflyHelpers.R")) source(paste0(sourcePath,"preseqHelpers.R")) source(paste0(sourcePath,"GenBugSummary.R")) source(paste0(sourcePath,"BUGProcessingHelpers.R")) test_fig_data_path = paste0(sourcePath, "test/tmp/") #TCR0001 - ClosestDists: ############################################## bugTest = read.table(paste0(sourcePath, "test/TestClosestDists.txt"), header = T, stringsAsFactors = F) print(all(ClosestDists(bugTest, bugTest[bugTest$gene=="g1",], 10) == c(2,0,1,3,0,0,0,0,0,0))) #ok print(all(ClosestDists(bugTest, bugTest[bugTest$gene=="g2",], 10) == c(1,0,0,0,1,0,0,0,0,0))) #ok print(all(ClosestDists(bugTest, bugTest[bugTest$gene=="g3",], 10) == c(0,0,0,0,1,2,0,0,0,0))) #ok #TCR0002 - geneIndices2Symbols ##################################### #31 == ENSMUSG00000076800.1 == Trav6n-5 res = geneIndices2Symbols(31, paste0(sourcePath, "test/smallBug/bus_output/coll.genes.txt"), paste0(sourcePath, "test/smallBug/bus_output/transcripts_to_genes.txt")) print(res == "Trav6n-5") # ok #TCR0003 - createStandardBugsData #test 1 simple bug only #so, there are 8 lines in the bug. The two last lines should be discarded, #one because it is multimapped and one because its cell has only one read ##################################### #create the files for the dataset in the tmp folder createStandardBugsData(paste0(sourcePath, "test/smallBug/"), "smallBug", c(0.5,1), UmisPerCellLimit = 1, fig_data_path = test_fig_data_path) #check full bug file briefly loadBug("smallBug", 1, fig_data_path = test_fig_data_path) smallBug = getBug("smallBug",1) print(all(dim(smallBug) == c(6,4))) #the size of the bug, ok print(smallBug[[4,3]] == "Trbd2") #check of one value, ok rmBug("smallBug", 1) #check downsampled bug file briefly loadBug("smallBug", 0.5, fig_data_path = test_fig_data_path) smallBug50 = getBug("smallBug",0.5) print(sum(smallBug50[,4]) == 15) #downsampling, OK rmBug("smallBug", 0.5) #check that the stats are ok loadStats("smallBug", fig_data_path = test_fig_data_path) res = statssmallBug print(res$UMIs_smallBug_d_100[1] == 4) #UMIs Gene 1, ok print(res$UMIs_smallBug_d_100[2] == 2) #UMIs Gene 2, ok print(res$Counts_smallBug_d_100[1] == 18) #counts Gene 1, ok print(res$Counts_smallBug_d_100[2] == 12) #counts Gene 2, ok print(abs(res$CPM_smallBug_d_100[1] - 10^6*4/6) < 0.0001) #CPM Gene 1, ok print(abs(res$CPM_smallBug_d_100[2] - 10^6*2/6) < 0.0001) #CPM Gene 2, ok print(res$FracOnes_smallBug_d_100[1] == 0.25) #fracOnes Gene 1, ok print(res$FracOnes_smallBug_d_100[2] == 0) #fracOnes Gene 2, ok print(res$CountsPerUMI_smallBug_d_100[1] == 18/4) #counts per UMI Gene 1, ok print(res$CountsPerUMI_smallBug_d_100[2] == 12/2) #counts per UMI Gene 2, ok print(sum(res$Counts_smallBug_d_50) == 15) #checking downsampling again, ok print(res$gene[1] == "Trbd1") #ok print(res$gene[2] == "Trbd2") #ok #TCR0005 - Good-Toulmin ##################################### dat = c(1,1,1,2,2,3,4) h = hist(dat, breaks=seq(0.5, 4.5, by=1), plot = F) print(goodToulmin(h,2) == 8) #3-2+1-1 + existing number, ok #TCR0006 - downSampleManyTimesAndGetHist ##################################### #just check that we get the right total number of counts loadBug("smallBug", 1, fig_data_path = paste0(sourcePath, "test/tmp/")) smallBug = getBug("smallBug",1) histMany = downSampleManyTimesAndGetHist(smallBug, 0.5, numTimes=20) histAllGenes = colSums(histMany) totCounts = sum(histAllGenes * 1:100) print(totCounts == 30*20*0.5) #The dataset has 30 counts, downsampled to 0.5, repeated 20 times. Ok rmBug("smallBug", 1) #TCR0007 - getDsHist ##################################### #just check that we get the right total number of counts loadBug("smallBug", 1, fig_data_path = paste0(sourcePath, "test/tmp/")) smallBug = getBug("smallBug",1) h = getDsHist(smallBug) print(all(h[1,1:10] == c(1,0,0,2,0,0,0,0,1,0))) #ok print(all(h[2,1:10] == c(0,0,0,0,0,2,0,0,0,0))) #ok rmBug("smallBug", 1) #TCR0008 - downSampleBUGNTimes ##################################### #test that the expression is halved and multiplied with n if we have only single-copy molecules bugOnlyOnes = as_tibble(read.table(paste0(sourcePath, "test/bugOnlyOnes.txt"), header=T, stringsAsFactors=F)) m = downSampleBUGNTimes(bugOnlyOnes, 0.5, 5) print(sum(m$n) == 25) #ok #test that if we have one molecule from gene 0 with 3 copies and one from gene 1 with 1 copy, #Gene 0 is not affected by downsampling by 50%. bugUneven = as_tibble(read.table(paste0(sourcePath, "test/bugUneven.txt"), header=T, stringsAsFactors=F)) m = downSampleBUGNTimes(bugUneven, 0.5, 5) print(m$n[1] == 5) #ok #TCR0009 - poolPrediction ##################################### #We use 3 different genes, which are all the same in the bug (4 UMIs, hist 3 1), not much amplified. #Gene1 is well amplified in the pool, and has 100 UMIs #Gene2 has the same amplification as the bug, and has 100 UMIs #Gene3 is well amplified in the pool, but only 1 UMI (that is weird, but should work) #so, Gene1 is not expected to increase much, Gene2 a lot, Gene3 somewhere in between bugPP = as_tibble(read.table(paste0(sourcePath, "test/bugPooled.txt"), header=T, stringsAsFactors=F)) UMIs = as_tibble(read.table(paste0(sourcePath, "test/pooledUMIs.txt"), header=T, stringsAsFactors=F)) h1 = as.matrix(read.table(paste0(sourcePath, "test/pooledHist1.txt"), header=F, row.names=1, stringsAsFactors=F)) h2 = as.matrix(read.table(paste0(sourcePath, "test/pooledHist2.txt"), header=F, row.names=1, stringsAsFactors=F)) pHList = list(UMIs, list(h1,h2)) pred = poolPrediction(bugPP, t=10, pHList, usePoolLimit = 100000) pv = pred[[2]] print(pv[1] < pv[2])#ok print(pv[1] < pv[3])#ok print(pv[2] > pv[3])#ok #TCR0010 - genBugSummary ##################################### createStandardBugsData(paste0(sourcePath, "test/statsBug/"), "statsBug", c(0.5,1), UmisPerCellLimit = 1, fig_data_path = paste0(sourcePath, "test/tmp/")) loadBug("statsBug", fig_data_path = paste0(sourcePath, "test/tmp/")) genBugSummary("statsBug", "Gene1", "Gene2", 10, fig_data_path = paste0(sourcePath, "test/tmp/")) #now read the summary file and check that it reported the expected values: con = file(paste0(test_fig_data_path, "statsBug/ds_summary.txt")) lines = readLines(con) #print(lines) close(con) print(unlist(strsplit(lines[[3]], "\\s+"))[2] == 13) #num UMIs, ok print(unlist(strsplit(lines[[4]], "\\s+"))[2] == 3) #num cells, ok print(unlist(strsplit(lines[[5]], "\\s+"))[2] == 37) #counts, ok print(unlist(strsplit(lines[[5]], "\\s+"))[2] == 37) #counts, ok print(unlist(strsplit(lines[[6]], "\\s+"))[2] == 37/13) #counts per UMI, ok print(unlist(strsplit(lines[[7]], "\\s+"))[2] == 13/3) #UMIs per cell, ok print(unlist(strsplit(lines[[8]], "\\s+"))[2] == 37/3) #counts per cell, ok print(unlist(strsplit(lines[[9]], "\\s+"))[2] == 5/13) #totFracOnes, ok #Gene1 is low, Gene2 is high print(all(unlist(strsplit(lines[[10]], "\\s+"))[2:4] == c("1,","1,","3,"))) # f1H print(all(unlist(strsplit(lines[[11]], "\\s+"))[2:3] == c("1,","4,"))) # f1L print(all(unlist(strsplit(lines[[12]], "\\s+"))[2:4] == c("0.2,","0.2,","0.6,"))) # f1HFrac print(all(unlist(strsplit(lines[[13]], "\\s+"))[2:3] == c("0.2,","0.8,"))) # f1LFrac print(all(unlist(strsplit(lines[[14]], "\\s+"))[2:4] == c("1,","3,", "1,"))) # 1cpy print(all(unlist(strsplit(lines[[15]], "\\s+"))[2:4] == c("1,","1,", "1,"))) # 2cpy print(all(unlist(strsplit(lines[[16]], "\\s+"))[2:4] == c("0,","4,", "1,"))) # >3cpy print(all(unlist(strsplit(lines[[17]], "\\s+"))[2:4] == c("0.2,","0.6,", "0.2,"))) # 1cpy frac #skip the rest of the frac, it is a trivial calculation and they have a lot of decimals
notebooks/test/Tests.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Robust Kalman Filtering for Path Retrieval # # #### <NAME> # # Please consult the accompanying report for a fuller explanation of the goal of this notebook. # # _References:_ [Robust Kalman filtering for vehicle tracking](https://www.cvxpy.org/examples/applications/robust_kalman.html) import numpy as np import matplotlib.pyplot as plt import cvxpy as cvx # ### Simulating the data # # Across all experimentts, we simulate 1000 time steps for a total duration of 60 seconds. The damping factor is set to 0.05. We shall vary the parametrizations of the unknown input vector and of the measurement noise. # # _Experiment 1_ # # $$\textbf{u}(k)\sim \mathcal{N}(0, I_2)$$ # # $$ # \begin{equation} # \boldsymbol{\eta}(k) \sim P(X),\text{ where } # P(X)= # \begin{cases} # \mathcal{N}(0, I_2), & \text{if}\ \rho > 0.2 \\ # \mathcal{N}(0, 15I_2), & \text{otherwise} # \end{cases} # \end{equation} # $$ # $$\rho \sim \mathcal{U}(0, 1)$$ # # _Experiment 2_ # # $$\textbf{u}(k)\sim \mathcal{N}(0, I_2)$$ # # $$ # \begin{equation} # \boldsymbol{\eta}(k) \sim P(X),\text{ where } # P(X)= # \begin{cases} # \mathcal{N}(0, I_2), & \text{if}\ \rho > 0.4 \\ # \mathcal{N}(0, 15I_2), & \text{otherwise} # \end{cases} # \end{equation} # $$ # # $$\rho \sim \mathcal{U}(0, 1)$$ # # _Experiment 3_ # # $$\textbf{u}(k)\sim \mathcal{N}(0, 3I_2)$$ # # $$ # \begin{equation} # \boldsymbol{\eta}(k) \sim P(X),\text{ where } # P(X)= # \begin{cases} # \mathcal{N}(0, 3I_2), & \text{if}\ \rho > 0.4 \\ # \mathcal{N}(0, 15I_2), & \text{otherwise} # \end{cases} # \end{equation} # $$ # # $$\rho \sim \mathcal{U}(0, 1)$$ # + n_experiments = 3 n = 1000 # timesteps T = 60 # total duration ts, delta = np.linspace(0,T, n, endpoint=True, retstep=True) gamma = .05 # damping factor A = np.zeros((4,4)) B = np.zeros((4,2)) C = np.zeros((2,4)) A[0,0] = 1 A[1,1] = 1 A[0,2] = (1-gamma*delta/2)*delta A[1,3] = (1-gamma*delta/2)*delta A[2,2] = 1 - gamma*delta A[3,3] = 1 - gamma*delta B[0,0] = delta**2/2 B[1,1] = delta**2/2 B[2,0] = delta B[3,1] = delta C[0,0] = 1 C[1,1] = 1 # + np.random.seed(164) outlier_ps = [0.2, 0.4, 0.4] input_sigmas = [1, 1, 3] noise_sigmas = [1, 1, 3] outliers_sigmas = [15, 15, 15] positions, measurements, inputs = [], [], [] for i, (p, input_s, noise_s, out_s) in enumerate(zip(outlier_ps, input_sigmas, noise_sigmas, outliers_sigmas)): positions.append(np.zeros((4,n+1))) positions[i][:,0] = [0,0,0,0] measurements.append(np.zeros((2,n))) # generate random input and noise vectors inputs.append(input_s*np.random.randn(2,n)) noise = noise_s*np.random.randn(2,n) # add outliers to noise inds = np.random.rand(n) <= p noise[:,inds] = out_s*np.random.randn(2,n)[:,inds] # simulate the system forward in time for t in range(n): measurements[i][:,t] = C@positions[i][:,t] + noise[:,t] positions[i][:,t+1] = A@positions[i][:,t] + B@inputs[i][:,t] # - def experiment_plot_helper(true, observed, axis): colors = ['ro', 'bo', 'go'] n_cols = len(true) fig, ax = plt.subplots(2, 3, figsize=(15, 10)) for i in range(n_cols): ax[0,i].plot(true[i][0,:], true[i][1,:], colors[i], alpha=.1) ax[1,i].plot(observed[i][0,:], observed[i][1,:], colors[i], alpha=.1) ax[0,i].axis(axis[i]) ax[1,i].axis(axis[i]) ax[0,i].set_title("Experiment {}: True".format(i+1)) ax[1,i].set_title("Experiment {}: Measured".format(i+1)) plt.show() experiment_plot_helper(positions, measurements, [[-1, 16, -60, 5], [-2, 33, -6, 1], [-55, 3, -60, 12]]) # + # We copy the true trajectory since we will now predict it positions_true = [p.copy() for p in positions] def simulator(filtering, tau = .1, tau_robust = 2, rho = 2): if filtering not in ["Kalman", "Robust Kalman"]: raise("Invalid filter type.") # Parameters tau = tau if filtering == "Kalman" else tau_robust positions_pred, inputs_pred = [], [] for i in range(n_experiments): # Optimization variables positions = cvx.Variable(shape=(4, n+1)) inputs = cvx.Variable(shape=(2, n)) noise = cvx.Variable(shape=(2, n)) # Objective obj = cvx.Minimize(cvx.sum_squares(inputs) + tau*cvx.sum_squares(noise)) \ if filtering == "Kalman" else \ cvx.Minimize(cvx.sum_squares(inputs) + \ cvx.sum([tau*cvx.huber(cvx.norm(noise[:,t]), rho) for t in range(n)])) constr = [] for t in range(n): constr += [ positions[:,t+1] == A*positions[:,t] + B*inputs[:,t] , measurements[i][:,t] == C*positions[:,t] + noise[:,t] ] cvx.Problem(obj, constr).solve() positions_pred.append(np.array(positions.value)) return positions_pred # + import warnings warnings.filterwarnings("ignore", category=RuntimeWarning) pos_results = {} parametrizations = [("Kalman", 0.01), ("Kalman", 0.1), ("Kalman", 1), ("Kalman", 10), ("Robust Kalman", None, 1, 1), ("Robust Kalman", None, 1, 10), ("Robust Kalman", None, 10, 1), ("Robust Kalman", None, 10, 10)] for par in parametrizations: pos_results[par] = simulator(*par) # + fig, ax = plt.subplots(6, 4, figsize=(20, 30)) clr = ['r', 'b', 'g'] for k, ax_lim in enumerate([[-1, 16, -60, 5], [-2, 33, -6, 1], [-55, 3, -60, 12]]): for i, par in enumerate(parametrizations): if i < 4: ax[2*k, i].plot(positions_true[k][0,:], positions_true[k][1,:], clr[k]+'-') ax[2*k, i].plot(measurements[k][0,:], measurements[k][1,:], clr[k]+'o', alpha=.05) ax[2*k, i].plot(pos_results[par][k][0,:], pos_results[par][k][1,:], 'k--', alpha=.8) ax[2*k, i].axis(ax_lim) ax[2*k, i].set_title(r"Normal Filter. $\tau = {:.2f}$".format(par[1])) else: ax[2*k+1, i - 4].plot(positions_true[k][0,:], positions_true[k][1,:], clr[k]+'-') ax[2*k+1, i - 4].plot(measurements[k][0,:], measurements[k][1,:], clr[k]+'o', alpha=.05) ax[2*k+1, i - 4].plot(pos_results[par][k][0,:], pos_results[par][k][1,:], 'k--', alpha=.8) ax[2*k+1, i - 4].axis(ax_lim) ax[2*k+1, i - 4].set_title(r"Robust Filter. $\tau = {:.2f}$, $\varrho = {:.2f}$".format(par[2], par[3])) plt.show()
Robust Kalman Filtering for Path Retrieval.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import requests import json # + """ get_category_items.py MediaWiki API Demos Demo of `Categorymembers` module : List twenty items in a category MIT License """ S = requests.Session() URL = "https://hi.wikipedia.org/w/api.php" PARAMS = { "action": "query", "cmtitle": "เคถเฅเคฐเฅ‡เคฃเฅ€:เคถเคฟเค•เฅเคทเคพ", "cmlimit": "100", "list": "categorymembers", "format": "json" } # + R = S.get(url=URL, params=PARAMS) DATA = R.json() PAGES = DATA['query']['categorymembers'] page_names = [] for page in PAGES: page_names.append(page['title']) # - p = [] for i in page_names: S = requests.Session() URL = "https://hi.wikipedia.org/w/api.php" PARAMS = { "format": "json", "action": "query", "prop": "extracts", "exlimit":"max", "explaintext":True, "exintro":True, "titles":i} R = S.get(url=URL, params=PARAMS) DATA = R.json() p.append(DATA["query"]["pages"]) doc_data = [] for i in p: for key in i: doc_data.append(i[key]["extract"]) o = doc_data count = 0 doc_data = [] for i in range(len(o)): if o[i] != "": doc_data.append(o[i]) import codecs,string def detect_language(character): maxchar = max(character) if u'\u0900' <= maxchar <= u'\u097f': return 'hindi' updated_data = [] for j in range(len(doc_data)): string = "" for i in doc_data[j]: if i == " ": string = string + i else: isEng = detect_language(i) if isEng == "hindi": string = string + i updated_data.append(string) for i in range(len(updated_data)): filename = str(i)+".txt" filename = "hindi_data/" + filename file1 = open(filename,"a") file1.writelines(updated_data[i]) file1.close()
Data_Extraction_From_Wikipedia.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # import the necessary libraries import numpy as np import matplotlib.pyplot as plt from scipy.io import wavfile from scipy.signal import fftconvolve from IPython.display import Audio import pyroomacoustics as pra import itertools as it import ast # + # allows us to convert string to list # corners = ast.literal_eval(user_corners) # user_max_order = int(input("Enter max order: ")) # user_absorption = float(input("Enter an absorption: ")) # or just do it manually for debugging corners = [[0,0], [0,3], [5,3], [5, 1], [3,1], [3,0]] user_max_order = 8 user_absorption = 1 # find the max of x and y coordinates x_vals = [x for x,y in corners] x_max = max(x_vals) + 1 y_vals = [y for x,y in corners] y_max = max(y_vals) + 1 # use itertools to find all coordinates in the box all_coords = list(it.product([i for i in range(x_max)], [j for j in range(y_max)])) # set up pyroomacoustics variables np_corners = np.array(corners).T # specify a signal source fs, signal = wavfile.read("FCJF0_SA1.wav") # + for coord in all_coords: # set max_order to a low value for a quick (but less accurate) RIR room = pra.Room.from_corners(np_corners, fs=fs, max_order=user_max_order, absorption=user_absorption) # add source and set the signal to WAV file content room.add_source([1., 1.], signal=signal) # in 2-D # add two-microphone array # R = np.array([[3.5, 3.6], [2., 2.]]) # [[x], [y], [z]] # or instead add circular microphone array R = pra.circular_2D_array(center=[2., 2.], M=6, phi0=0, radius=0.1) room.add_microphone_array(pra.MicrophoneArray(R, room.fs)) # compute image sources room.image_source_model(use_libroom=True) # fig, ax = room.plot(img_order=6) # fig.set_size_inches(16 / 2, 9 / 2) # room.plot_rir() # fig = plt.gcf() # # adjust the figure to show the plots in a viewable manner # fig.set_size_inches(10, 5) # plt.subplots_adjust(top=0.92, bottom=0.08, left=0.10, right=0.95, hspace=0.25,wspace=0.35) # plt.suptitle(coord) # ax = plt.gca() # line = ax.lines[0] # simulate signal # - room.simulate() print("Original WAV:") Audio(signal, rate=fs) print("Simulated propagation to mic: ", coord) Audio(room.mic_array.signals[0,:], rate=fs)
Tests/pyroomtest1fin.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import math values = [400000,420000,440000,460000,480000] def func(n): rhs = (8 * (n ** 10) ) * math.exp( 0.125 * 0.0025 * n ) rhs = n**10 lhs = 0.05 print "{0} less than or equal to {1}".format(lhs,rhs) [ (n ** 10)*math.exp(-n) for n in values]
week_4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 (tensorflow) # language: python # name: rga # --- # <a href="https://colab.research.google.com/github/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class10.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # # T81-558: Applications of Deep Neural Networks # * Instructor: [<NAME>](https://sites.wustl.edu/jeffheaton/), School of Engineering and Applied Science, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx) # * For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/). # # **Module 10 Assignment: Time Series Neural Network** # # **Student Name: <NAME>** # # Assignment Instructions # # For this assignment, you will use an LSTM to predict a time series contained in the data file **[series-31-num.csv](https://data.heatonresearch.com/data/t81-558/datasets/series-31-num.csv)**. The code that you will use to complete this will be similar to the sunspots example from the course module. This data set contains two columns: *time* and *value*. Create an LSTM network and train it with a sequence size of 5 and a prediction window of 1. If you use a different sequence size, you will not have the correct number of submission rows. Train the neural network, the data set is relatively simple, and you should easily be able to get an RMSE below 1.0. FYI, I generate this dataset by fitting a cubic spline to a series of random points. # # This file contains a time series data set, do not randomize the order of the rows! For your training data, use all *time* values less than 3000, and for the test, use the remaining amounts greater than or equal to 3000. For the submit file, please send me the results of your test evaluation. You should have two columns: *time* and *value*. The column *time* should be the time at the beginning of each predicted sequence. The *value* should be the next value that your neural network predicted for each of the sequences. # # Your submission file will look similar to: # # |time|value| # |-|-| # |3000|37.022846| # |3001|37.030582| # |3002|37.03816| # |3003|37.045563| # |3004|37.0528| # |...|...| # # Google CoLab Instructions # # If you are using Google CoLab, it will be necessary to mount your GDrive so that you can send your notebook during the submit process. Running the following code will map your GDrive to ```/content/drive```. try: from google.colab import drive drive.mount('/content/drive', force_remount=True) COLAB = True print("Note: using Google CoLab") # %tensorflow_version 2.x except: print("Note: not using Google CoLab") COLAB = False # # Assignment Submit Function # # You will submit the ten programming assignments electronically. The following **submit** function can be used to do this. My server will perform a basic check of each assignment and let you know if it sees any underlying problems. # # **It is unlikely that should need to modify this function.** # + import base64 import os import numpy as np import pandas as pd import requests # This function submits an assignment. You can submit an assignment as much as you like, only the final # submission counts. The paramaters are as follows: # data - Pandas dataframe output. # key - Your student key that was emailed to you. # no - The assignment class number, should be 1 through 1. # source_file - The full path to your Python or IPYNB file. This must have "_class1" as part of its name. # . The number must match your assignment number. For example "_class2" for class assignment #2. def submit(data,key,no,source_file=None): if source_file is None and '__file__' not in globals(): raise Exception('Must specify a filename when a Jupyter notebook.') if source_file is None: source_file = __file__ suffix = '_class{}'.format(no) if suffix not in source_file: raise Exception('{} must be part of the filename.'.format(suffix)) with open(source_file, "rb") as image_file: encoded_python = base64.b64encode(image_file.read()).decode('ascii') ext = os.path.splitext(source_file)[-1].lower() if ext not in ['.ipynb','.py']: raise Exception("Source file is {} must be .py or .ipynb".format(ext)) r = requests.post("https://api.heatonresearch.com/assignment-submit", headers={'x-api-key':key}, json={'csv':base64.b64encode(data.to_csv(index=False).encode('ascii')).decode("ascii"), 'assignment': no, 'ext':ext, 'py':encoded_python}) if r.status_code == 200: print("Success: {}".format(r.text)) else: print("Failure: {}".format(r.text)) # - # # Assignment #10 Sample Code # # The following code provides a starting point for this assignment. # + import numpy as np def to_sequences(seq_size, obs): x = [] y = [] for i in range(len(obs)-SEQUENCE_SIZE): #print(i) window = obs[i:(i+SEQUENCE_SIZE)] after_window = obs[i+SEQUENCE_SIZE] window = [[x] for x in window] #print("{} - {}".format(window,after_window)) x.append(window) y.append(after_window) return np.array(x),np.array(y) # This is your student key that I emailed to you at the beginnning of the semester. key = "<KEY>" # This is an example key and will not work. # You must also identify your source file. (modify for your local setup) # file='/content/drive/My Drive/Colab Notebooks/assignment_yourname_class10.ipynb' # Google CoLab # file='C:\\Users\\jeffh\\projects\\t81_558_deep_learning\\assignments\\assignment_yourname_class10.ipynb' # Windows file='/Users/jheaton/projects/t81_558_deep_learning/assignments/assignment_yourname_class10.ipynb' # Mac/Linux # Read from time series file df = pd.read_csv("https://data.heatonresearch.com/data/t81-558/datasets/series-31-num.csv") print("Starting file:") print(df[0:10]) print("Ending file:") print(df[-10:]) #submit(source_file=file,data=df,key=key,no=10) # -
assignments/assignment_yourname_class10.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.10 64-bit (''umda'': conda)' # name: python3710jvsc74a57bd038815272606d4e3b04bbd3a96dc4b085d5a6c2ed1c5ead0b1b607595242e786b # --- import pandas as pd from matplotlib import pyplot as plt from palettable.cartocolors.qualitative import Vivid_5 import numpy as np plt.style.use("publication") tmc1_preds = pd.read_csv("../../scripts/tmc1_results.csv") # + H2_CD = 1e22 tmc1_preds["ObsAbundance"] = tmc1_preds["Column density (cm^-2)"] / H2_CD tmc1_preds["LinearAbundance"] = 10**tmc1_preds["linear"] / H2_CD tmc1_preds["MLError"] = 10**(np.abs(np.log10(tmc1_preds["LinearAbundance"] / tmc1_preds["ObsAbundance"]))) # - molecules = ["HC11N", "H2CS", "H2CO", "C6H5CN", "C11H7N"] labels = ["HC$_{11}$N", "H$_2$CS", "H$_2$CO", "C$_6$H$_5$CN", "C$_{10}$H$_7$CN"] temp = tmc1_preds.loc[tmc1_preds["Formula"].isin(molecules)].drop_duplicates(["Formula"]) obs_abundance = temp["ObsAbundance"].values test = pd.read_csv("../../data/external/nautilus/ab/HC9N.ab", header=None, names=["T", "X"], skiprows=1, delim_whitespace=True) new_df = pd.DataFrame(temp["MLError"].tolist()) new_df.index = labels # + tags=[] top_index = test["X"].argmax() # - def get_molecule_abundance(molecule: str, index: int = 810) -> float: if molecule == "C11H7N": molecule = "C10H7CN" temp = pd.read_csv(f"../../data/external/nautilus/ab/{molecule}.ab", header=None, names=["T", "X"], skiprows=1, delim_whitespace=True) return temp.iloc[index]["X"] new_df["ChemModelError"] = 10**(np.abs(np.log10(np.array(list(map(get_molecule_abundance, molecules))) / obs_abundance))) list(map(get_molecule_abundance, molecules)) (1e-15 - 1.2e-9) / 1.2e-9 new_df.columns = ["Linear regression", "GOTHAM Nautilus"] new_df # + fig, ax = plt.subplots(figsize=(2.5, 2.5)) new_df.sort_values(["GOTHAM Nautilus"], ascending=True).plot(kind="bar", ax=ax) ax.set(ylabel="Magnitude error (Pred. / Obs.)", yscale="log", ylim=[1., 1e7]) ax.legend(fontsize="x-small", loc="upper left") # ax.axhline(1., 0., 1., ls="--", alpha=0.3) fig.savefig("../../reports/figures/ml_model_comparison.pdf", dpi=300) # + fig, ax = plt.subplots(figsize=(4., 2.5)) colors = Vivid_5.hex_colors zero, end = 5e4, 1e7 tmc1_preds["X"] = (10**tmc1_preds["linear"]) / 1e22 tmc1_preds["Obs X"] = tmc1_preds["Column density (cm^-2)"] / 1e22 for color, molecule, label in zip(colors, molecules, labels): temp = tmc1_preds.loc[tmc1_preds["Formula"] == molecule] obs_X = temp["Obs X"].values[0] pred_X = temp["X"].values[0] # load in the model data if molecule == "C11H7N": molecule = "C10H7CN" model_data = pd.read_csv(f"../../data/external/nautilus/ab/{molecule}.ab", header=None, names=["T", "X"], skiprows=1, delim_whitespace=True) model_data = model_data.loc[model_data["T"] > zero] model_data["Ratio"] = model_data["X"] / obs_X ax.plot(model_data["T"], model_data["Ratio"], color=color, alpha=0.9) ax.text(model_data["T"].max() * 1.1, model_data["Ratio"].iloc[-1], label, fontsize="x-small", color=color, horizontalalignment="left", verticalalignment="center") ax.scatter(5e5, pred_X / obs_X, color=color, marker="x", s=10.) ax.set(xscale="log", xlim=[zero, end * 3], yscale="log", ylabel="Pred. / Obs.", xlabel="Time (years)") ax.axhline(1., ls="--", alpha=0.6) fig.savefig("../../reports/figures/ml_model_comparison.pdf", dpi=300) # -
notebooks/reports/comparisons-with-chemical-models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="JKI-xvgOd_7P" outputId="d8b01659-a40f-4955-a3f9-596984017b3a" import numpy as np import pandas as pd # build the sequence data original_data = pd.read_csv('Practice_Log_Demographics.csv') # + id="zVPaWNOTd_7U" # build the sequence data import datetime user_id_key = list(original_data['user_id.x'].unique()) # + id="f8fuDvYYd_7V" outputId="2708b603-e7f5-4dcb-a1b6-9eafd9f66cff" len(user_id_key) # + id="cKLySUc4d_7W" import warnings import gc # + id="1lEaczgZd_7W" original_data = original_data.sort_values(by = ['start_practice']) # + id="m0_C7E8vd_7X" # data preprocessing # according to sub_chapter_label and user_id, generate sequences # generate (chapter,sub_chapter,user_id) sequences according to user_id and term original_data['user_id.x'] = original_data['user_id.x'].astype(int) original_data['user_id.x'] = original_data['user_id.x'].astype(str) original_data['user_id.x'] = original_data['term'].str.cat(original_data['user_id.x'], sep=':') original_data['user_id.x'] = original_data['user_id.x'].astype('category') original_data['term'] = original_data['term'].astype('category') original_data['chapter_label'] = original_data['chapter_label'].astype('category') original_data['sub_chapter_label'] = original_data['sub_chapter_label'].astype('category') # + id="c3paSSCId_7X" original_data['start_practice'] = pd.to_datetime(original_data['start_practice'], format = '%Y-%m-%d %H:%M:%S') original_data['end_practice'] = pd.to_datetime(original_data['end_practice'], format = '%Y-%m-%d %H:%M:%S') # + id="54f_cpYXd_7Y" original_data['label'] = original_data['chapter_label'].astype(str) + '_' + original_data['sub_chapter_label'].astype(str) # + id="acy5Bj77d_7Z" user_id_key = list(original_data['user_id.x'].unique()) # + id="yeyU6RDbd_7Z" outputId="ba09b3dd-6664-4a58-932f-401fe0d7d993" len(user_id_key) # + id="bFOU08e4d_7Z" user_record_dict = {} for item in user_id_key: user_record_dict[item] = {} # + id="zS1sArSfd_7a" for index, row in original_data.iterrows(): user_id = row['user_id.x'] if row['label'] not in user_record_dict[user_id].keys(): user_record_dict[user_id][row['label']] = [] user_record_dict[user_id][row['label']].append([row['q'],row['label'],row['start_practice'],row['end_practice']]) else: user_record_dict[user_id][row['label']].append([row['q'],row['label'],row['start_practice'],row['end_practice']]) # + id="W1fygSD5d_-q" # construct the record according to students. user_training_set = {} for item in user_id_key: user_training_set[item] = [] user_record_key = user_record_dict[item].keys() for mid_item in user_record_key: mid_sequence = [] for iter_item in user_record_dict[item][mid_item]: mid_sequence.append(iter_item[5]) user_training_set[item].append(mid_sequence) # + id="qFz-rtEed_-9" outputId="9e31d786-14ce-4dad-d8f5-6040bbdca0e1" # train the model and record the params according to students import hmmlearn.hmm as hmm states = ['NotUnderstood', 'Understood'] obs = [0,1] n_states = len(states) user_model = {} count = 0 user_model_params = {} for item in user_id_key: user_model_params[item] = {} model = hmm.MultinomialHMM(n_components=n_states) len_training_set = int(0.8*len(user_training_set[item])) mid_training_set = user_training_set[item][:len_training_set] mid_test_set = user_training_set[item][len_training_set:] all_X = [] length = [] start_probability = 0 for mid_item in mid_training_set: if mid_item[0] == 0: start_probability += 1 if len(all_X) == 0: well_formed_mid_item = [] for iter_item in mid_item: well_formed_mid_item.append([iter_item]) all_X = well_formed_mid_item length.append(len(mid_item)) else: well_formed_mid_item = [] for iter_item in mid_item: well_formed_mid_item.append([iter_item]) all_X = np.concatenate([all_X, well_formed_mid_item]) length.append(len(mid_item)) #else: # all_X = np.concatenate([all_X, mid_item]) # length.append(len(mid_item)) start_probability = start_probability / len(length) start_probability = np.array([start_probability, 1-start_probability]) #odel.startprob_ = start_probability #rint(all_X) model.fit(all_X, lengths = length) user_model_params[item]["startprob_"] = model.startprob_ user_model_params[item]["transmat_"] = model.transmat_ user_model_params[item]["emissionprob_"] = model.emissionprob_ user_model_params[item]["test_set"] = mid_test_set #rint(model.startprob_) #rint(model.transmat_) #rint(model.emissionprob_) # + id="5cHK9M1Zd_-_" # predict and calculate auc and acc all_count = 0 prob = [] tag = [] for item in user_id_key: mid_test_set = user_model_params[item]["test_set"] model = hmm.MultinomialHMM(n_components=n_states) model.startprob_ = user_model_params[item]["startprob_"] model.transmat_ = user_model_params[item]["transmat_"] model.emissionprob_ = user_model_params[item]["emissionprob_"] for mid_item in user_model_params[item]["test_set"]: count = 0 mid_prob = 0 while count < len(mid_item): #print(int(mid_item[count])) if count == 0: mid_prob = model.startprob_[0] #print(mid_prob) #print(mid_item[count]) #print(model.startprob_) mid_tag = 0 if mid_prob > 0.5: mid_tag = 0 else: mid_tag = 1 mid_prob = 1 - mid_prob if mid_tag == mid_item[count]: mid_tag = 1 else: mid_tag = 0 prob.append(mid_prob) tag.append(mid_tag) else: #rint(count) #rint(mid_item[:count]) mid_mid_item = mid_item[:count] well_formed_mid_item = [] for iter_item in mid_mid_item: well_formed_mid_item.append([iter_item]) state_sequence = model.predict(well_formed_mid_item) mid_prob = model.transmat_[state_sequence[-1], :] mid_prob = mid_prob.dot(model.emissionprob_) mid_prob = mid_prob[0] mid_tag = 0 if mid_prob > 0.5: mid_tag = 0 else: mid_tag = 1 mid_prob = 1 - mid_prob if mid_tag == mid_item[count]: mid_tag = 1 else: mid_tag = 0 prob.append(mid_prob) tag.append(mid_tag) count += 1 all_count += 1 # + id="M1pnOCitd__A" #f = list(zip(prob,tag)) #print(f) # + id="QZWC0bNXd__A" outputId="7fe499fc-7d0a-453e-c57b-12b3103b4846" # calculate acc sum(tag) / len(tag) # + id="-_eKZ-1pd__B" outputId="901a084d-81e4-4720-ad23-b4c97bbde9f2" type(model.transmat_[state_sequence[-1], :]) # + id="ZRWAgiB1d__B" outputId="f46de8c7-51b1-417f-f78a-98494db78c78" # calculate auc def calAUC(prob,labels): f = list(zip(prob,labels)) rank = [values2 for values1,values2 in sorted(f,key=lambda x:x[0])] values = [values1 for values1,values2 in sorted(f,key=lambda x:x[0])] rankList = [i+1 for i in range(len(rank)) if rank[i]==1] posNum = 0 negNum = 0 for i in range(len(labels)): if(labels[i]==1): posNum+=1 else: negNum+=1 auc = 0 auc = (sum(rankList)- (posNum*(posNum+1))/2)/(posNum*negNum) return auc auc = calAUC(prob, tag) print(auc) # + id="4I4sXoAJd__C"
additional_features/baseline/BKT-experiment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] school_cell_uuid="84865712f2da41a9a6cf46b87b4e5405" # # Scikit-Learn ํŒจํ‚ค์ง€์˜ ์†Œ๊ฐœ # + [markdown] school_cell_uuid="2ca8e3c3114b44c2962b17ed41b9e68c" # Scikit-Learn ํŒจํ‚ค์ง€๋Š” ๋จธ์‹  ๋Ÿฌ๋‹ ๊ต์œก ๋ฐ ์‹ค๋ฌด๋ฅผ ์œ„ํ•œ ํŒŒ์ด์ฌ ํŒจํ‚ค์ง€๋กœ ๋‹ค์Œ๊ณผ ๊ฐ™์€ ๊ตฌ์„ฑ ์š”์†Œ๋“ค์„ ๊ฐ–์ถ”๊ณ  ์žˆ๋‹ค. # # * ๋ฒค์น˜๋งˆํฌ์šฉ ์ƒ˜ํ”Œ ๋ฐ์ดํ„ฐ ์„ธํŠธ # * ๋ฐ์ดํ„ฐ ์ „์ฒ˜๋ฆฌ(preprocessing) ๊ธฐ๋Šฅ # * Supervised learning # * Unsupervised learning # * ๋ชจํ˜• ํ‰๊ฐ€ ๋ฐ ์„ ํƒ # # # ์ž์„ธํ•œ ๋‚ด์šฉ์€ ๋‹ค์Œ ์›น์‚ฌ์ดํŠธ๋ฅผ ์ฐธ์กฐํ•œ๋‹ค. # # * http://scikit-learn.org # # + [markdown] school_cell_uuid="25898ff3d1fc4b2aa3b1d507503c8643" # ## scikit-learn ํŒจํ‚ค์ง€์—์„œ ์ œ๊ณตํ•˜๋Š” ๋จธ์‹  ๋Ÿฌ๋‹ ๋ชจํ˜• # + [markdown] school_cell_uuid="6d4ed41d565749dd969a4a942067771f" # scikit-learn ํŒจํ‚ค์ง€์˜ ์žฅ์ ์€ ๋‹ค์–‘ํ•œ ๋จธ์‹  ๋Ÿฌ๋‹ ๋ชจํ˜• ์ฆ‰, ์•Œ๊ณ ๋ฆฌ์ฆ˜์„ ํ•˜๋‚˜์˜ ํŒจํ‚ค์ง€์—์„œ ๋ชจ๋‘ ์ œ๊ณตํ•˜๊ณ  ์žˆ๋‹ค๋Š” ์ ์ด๋‹ค. ๋‹ค์Œ์€ scikit-learn ํŒจํ‚ค์ง€์—์„œ ์ œ๊ณตํ•˜๋Š” ๋จธ์‹  ๋Ÿฌ๋‹ ๋ชจํ˜•์˜ ๋ชฉ๋ก์ด๋‹ค. ์ด ๋ชฉ๋ก์€ ๋Œ€ํ‘œ์ ์ธ ๊ฒƒ๋“ค๋งŒ์„ ๋‚˜์—ดํ•œ ๊ฒƒ์ด๋ฉฐ ์ง€์†์ ์œผ๋กœ ๋ชจํ˜•๋“ค์ด ์ถ”๊ฐ€๋˜๊ณ  ์žˆ๋‹ค. # + [markdown] school_cell_uuid="0519ef9826dd4704ae70ebc05d8140d6" # ### Supervised learning # # * http://scikit-learn.org/stable/supervised_learning.html # # * Generalized Linear Models # * Ordinary Least Squares # * Ridge/Lasso/Elastic Net Regression # * Logistic regression # * Polynomial regression # * Perceptron # * Linear and Quadratic Discriminant Analysis # * Support Vector Machines # * Stochastic Gradient Descent # * Nearest Neighbor Algorithms # * Gaussian Processes # * Naive Bayes # * Decision Trees # * Ensemble methods # * Random Forests # * AdaBoost # + [markdown] school_cell_uuid="0f4218f68f3049b4960b6946de4f8e67" # ### Unsupervised learning # # * http://scikit-learn.org/stable/unsupervised_learning.html # # # # * Gaussian mixture models # * Manifold learning # * Clustering # * K-means # * DBSCAN # * Biclustering # * Decomposing # * Principal component analysis (PCA) # * Factor Analysis # * Independent component analysis (ICA) # * Latent Dirichlet Allocation (LDA) # * Covariance estimation # * Novelty and Outlier Detection # * Density Estimation # + [markdown] school_cell_uuid="49fcd30747184f5da9f3faf669c7476f" # ## scikit-learn์˜ ์„œ๋ธŒ ํŒจํ‚ค์ง€ # + [markdown] school_cell_uuid="7801ef59fef944c2ae4f8615fd8d94f0" # scikit-learn ์€ ์„œ๋ธŒ ํŒจํ‚ค์ง€ ๋‹จ์œ„๋กœ ๋ณ„๋„์˜ ๊ธฐ๋Šฅ์„ ์ œ๊ณตํ•˜๊ณ  ์žˆ๋‹ค. ๋Œ€ํ‘œ์ ์ธ ์„œ๋ธŒ ํŒจํ‚ค์ง€์™€ ๊ธฐ๋Šฅ์„ ๋‚˜์—ดํ•˜๋ฉด ๋‹ค์Œ๊ณผ ๊ฐ™๋‹ค. # # * ์ž๋ฃŒ ์ œ๊ณต: # * `sklearn.datasets`: ์ƒ˜ํ”Œ ๋ฐ์ดํ„ฐ ์„ธํŠธ ์ œ๊ณต # # # * ์ž๋ฃŒ ์ „์ฒ˜๋ฆฌ: # * `sklearn.preprocessing`: imputation, encoding ๋“ฑ ๋‹จ์ˆœ ์ „์ฒ˜๋ฆฌ # * `sklearn.feature_extraction`: Feature Extraction # # # * ๋ชจํ˜•: # * `sklearn.base`: Base classes and utility functions # * `sklearn.pipeline`: Pipeline # * `sklearn.linear_model`: Generalized Linear Models # * `sklearn.naive_bayes`: Naive Bayes # * `sklearn.discriminant_analysis`: Discriminant Analysis # * `sklearn.neighbors`: Nearest Neighbors # * `sklearn.mixture`: Gaussian Mixture Models # * `sklearn.svm`: Support Vector Machines # * `sklearn.tree`: Decision Trees # * `sklearn.ensemble`: Ensemble Methods # * `sklearn.cluster`: Clustering # # # * ๋ชจํ˜• ํ‰๊ฐ€: # * `sklearn.metrics`: Metrics # * `sklearn.cross_validation`: Cross Validation # * `sklearn.grid_search`: Grid Search # # + [markdown] school_cell_uuid="8860b5bf8f894087a94d3698a9098199" # ## scikit-learn์˜ Class # + [markdown] school_cell_uuid="fd3d991cf8fb432b86c2b71c71820672" # scikit-learn์„ ์‚ฌ์šฉํ•˜๊ธฐ ์œ„ํ•ด์„œ๋Š” ์›ํ•˜๋Š” ๊ธฐ๋Šฅ์„ ๊ฐ€์ง€๊ณ  ์žˆ๋Š” ํด๋ž˜์Šค ๊ฐ์ฒด๋ฅผ ์ƒ์„ฑํ•ด์•ผ ํ•œ๋‹ค. scikit-learn์€ ๋‹ค์–‘ํ•œ ํด๋ž˜์Šค๋ฅผ ์ œ๊ณตํ•˜์ง€๋งŒ ๋Œ€๋ถ€๋ถ„์˜ ํด๋ž˜์Šค๋Š” ๋‹ค์Œ๊ณผ ๊ฐ™์ด ์„ธ๊ฐ€์ง€ ๊ทธ๋ฃน์œผ๋กœ ๋‚˜๋ˆŒ์ˆ˜ ์žˆ๋‹ค. # # # * ์ „์ฒ˜๋ฆฌ์šฉ ํด๋ž˜์Šค # * **Transformer** ํด๋ž˜์Šค # * ์ž๋ฃŒ ๋ณ€ํ™˜ # * ๊ณตํ†ต ๋ฉ”์„œ๋“œ # * `fit()`: ๋ชจํ˜• ๊ณ„์ˆ˜ ์ถ”์ •, ํŠธ๋ ˆ์ด๋‹(training) # * `transform()` : ์ž๋ฃŒ ์ฒ˜๋ฆฌ # * `fit_transform()` : ๋ชจํ˜• ๊ณ„์ˆ˜ ์ถ”์ • ๋ฐ ์ž๋ฃŒ ์ฒ˜๋ฆฌ ๋™์‹œ ์ˆ˜ํ–‰ # # # * ๋จธ์‹ ๋Ÿฌ๋‹ ๋ชจํ˜• ํด๋ž˜์Šค ๊ทธ๋ฃน # * **Regressor** ํด๋ž˜์Šค # * ํšŒ๊ท€๋ถ„์„ # * **Classifier** ํด๋ž˜์Šค # * ๋ถ„๋ฅ˜ # * **Cluster** ํด๋ž˜์Šค # * ํด๋Ÿฌ์Šคํ„ฐ๋ง # * ๊ณตํ†ต ๋ฉ”์„œ๋“œ # * `fit()`: ๋ชจํ˜• ๊ณ„์ˆ˜ ์ถ”์ •, ํŠธ๋ ˆ์ด๋‹(training) # * `predict()`: ์ฃผ์–ด์ง„ x๊ฐ’์— ๋Œ€ํ•ด y ์˜ˆ์ธก # * `score()`: ์„ฑ๊ณผ ๋ถ„์„ # # # # * **Pipeline** ํด๋ž˜์Šค # * ๋ณต์ˆ˜์˜ Preprocessor์™€ Model์„ ์—ฐ๊ฒฐํ•˜์—ฌ ํ•˜๋‚˜์˜ Model์ฒ˜๋Ÿผ ํ–‰๋™ # * Model ํด๋ž˜์Šค๊ฐ€ ์ œ๊ณตํ•˜๋Š” ๊ณตํ†ต ๋ฉ”์„œ๋“œ๋ฅผ ๋ชจ๋‘ ์ œ๊ณต # * pipeline ๋‚ด๋ถ€์—์„œ Preprocessor์—์„œ ์ž๋ฃŒ๋ฅผ ๊ณ„์† ๋ณ€ํ˜•ํ•œ ํ›„ ๋งˆ์ง€๋ง‰์œผ๋กœ Model์— ์ž…๋ ฅ # #
13. Scikit-Learn, Statsmodel/01. Scikit-Learn ํŒจํ‚ค์ง€์˜ ์†Œ๊ฐœ.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # Overview # # This notebook introduces the ipyplotly visualization library and demonstrates some of its features. # # ## What is ipyplotly? # ipyplotly wraps the excellent Plotly.js JavaScript plotting library for interactive use as an ipywidget inside the Jupyter Notebook. # # ## Features # # - Traces can be added and updated interactively by simply assigning to properties # - The full Traces and Layout API is generated from the plotly schema to provide a great experience for interactive use in the notebook # - Data validation covering the full API with clear, informative error messages # - Jupyter friendly docstrings on constructor params and properties # - Support for setting array properties as numpy arrays. When numpy arrays are used, ipywidgets binary serialization protocol is used to avoid converting these to JSON strings. # - Context manager API for animation # - Export figures to standalone html # - Programmatic export of figures to static SVG, PNG, or PDF images # # Imports # + # ipyplotly from ipyplotly.datatypes import FigureWidget from ipyplotly.callbacks import Points, InputState # pandas import pandas as pd # numpy import numpy as np # scikit learn from sklearn import datasets # ipywidgets from ipywidgets import HBox, VBox, Button # functools from functools import partial # - # Load iris dataset iris_data = datasets.load_iris() feature_names = [name.replace(' (cm)', '').replace(' ', '_') for name in iris_data.feature_names] iris_df = pd.DataFrame(iris_data.data, columns=feature_names) iris_class = iris_data.target + 1 iris_df.head() # Create and display an empty ipyplotly Figure f1 = FigureWidget() f1 # # Tab completion # Entering ``f1.add_<tab>`` displays add methods for all of the supported trace types # + # f1.add_scatter # - # Entering ``f1.add_scatter(<tab>)`` displays the names of all of the top-level properties for the scatter trace type # # Entering ``f1.add_scatter(<shift+tab>)`` displays the signature pop-up. Expanding this pop-up reveals the method doc string which contains the descriptions of all of the top level properties # + # f1.add_scatter( # - # # Add scatter trace scatt1 = f1.add_scatter(x=iris_df.sepal_length.values, y=iris_df.petal_width.values) f1 # + # scatt1.mode? # - # That's not what we wanted, change the mode to 'markers' scatt1.mode = 'markers' # Set size to 8 scatt1.marker.size = 8 # Color markers by iris class scatt1.marker.color = iris_class # + # Change colorscale scatt1.marker.cmin = 0.5 scatt1.marker.cmax = 3.5 scatt1.marker.colorscale = [[0, 'red'], [0.33, 'red'], [0.33, 'green'], [0.67, 'green'], [0.67, 'blue'], [1.0, 'blue']] scatt1.marker.showscale = True # - # Fix up colorscale ticks scatt1.marker.colorbar.ticks = 'outside' scatt1.marker.colorbar.tickvals = [1, 2, 3] scatt1.marker.colorbar.ticktext = iris_data.target_names.tolist() # Set colorscale title scatt1.marker.colorbar.title = 'Species' scatt1.marker.colorbar.titlefont.size = 16 scatt1.marker.colorbar.titlefont.family = 'Rockwell' # Add axis labels f1.layout.xaxis.title = 'sepal_length' f1.layout.yaxis.title = 'petal_width' f1 # Hover info scatt1.text = iris_data.target_names[iris_data.target] scatt1.hoverinfo = 'text+x+y' f1.layout.hovermode = 'closest' f1 # ## Animate marker size change # Set marker size based on petal_length with f1.batch_animate(duration=1000): scatt1.marker.size = np.sqrt(iris_df.petal_length.values * 50) # Restore constant marker size with f1.batch_animate(duration=1000): scatt1.marker.size = 8 # ## Set drag mode property callback # Make points more transparent when `dragmode` is `select` or `lasso` def set_opacity(marker, layout, dragmode): if dragmode in ['select', 'lasso']: marker.opacity = 0.5 else: marker.opacity = 1.0 f1.layout.on_change(partial(set_opacity, scatt1.marker), 'dragmode') # ## Configure colorscale for brushing scatt1.marker.colorbar = None scatt1.marker.colorscale = [[0, 'lightgray'], [0.5, 'lightgray'], [0.5, 'red'], [1, 'red']] scatt1.marker.cmin = -0.5 scatt1.marker.cmax = 1.5 scatt1.marker.colorbar.ticks = 'outside' scatt1.marker.colorbar.tickvals = [0, 1] scatt1.marker.colorbar.ticktext = ['unselected', 'selected'] # Reset colors to zeros (unselected) scatt1.marker.color = np.zeros(iris_class.size) selected = np.zeros(iris_class.size) # ### Configure brushing callback # Completion helpers trace, points, state = scatt1, Points(), InputState() def brush(trace, points, state): inds = np.array(points.point_inds) if inds.size: selected[inds] = 1 trace.marker.color = selected scatt1.on_selected(brush) # Now box or lasso select points on the figure and see them turn red # Reset brush selected = np.zeros(iris_class.size) scatt1.marker.color = selected # ## Create second plot with different features f2 = FigureWidget(data=[{'type': 'scatter', 'x': iris_df.petal_length.values, 'y': iris_df.sepal_width.values, 'mode': 'markers'}]) f2 # Set axis titles f2.layout.xaxis.title = 'petal_length' f2.layout.yaxis.title = 'sepal_width' # Grab trace reference scatt2 = f2.data[0] # Set marker styles / colorbars to match between figures scatt2.marker = scatt1.marker # + # Configure brush on both plots to update both plots def brush(trace, points, state): inds = np.array(points.point_inds) if inds.size: selected = scatt1.marker.color.copy() selected[inds] = 1 scatt1.marker.color = selected scatt2.marker.color = selected scatt1.on_selected(brush) scatt2.on_selected(brush) # - f2.layout.on_change(partial(set_opacity, scatt2.marker), 'dragmode') # Reset brush def reset_brush(btn): selected = np.zeros(iris_class.size) scatt1.marker.color = selected scatt2.marker.color = selected # Create reset button button = Button(description="clear") button.on_click(reset_brush) # Hide colorbar for figure 1 scatt1.marker.showscale = False # Set dragmode to lasso for both plots f1.layout.dragmode = 'lasso' f2.layout.dragmode = 'lasso' # Display two figures and the reset button f1.layout.width = 600 f2.layout.width = 600 VBox([HBox([f1, f2]), button]) # + # Save figure 2 to a png image in the exports directory #f2.save_image('exports/f2.png') # + # Save figure 1 to a pdf in the exports directory #f1.save_image('exports/f1.pdf') # -
examples/overviews/Overview.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + from ..Template import Template from ..Library.DailyPct import DailyPct import pandas as pd class Model(Template): def __init__(self, returns): super(Model, self).__init__(returns) self.dmi_plus = self.px['DMI_PLUS'] self.dmi_minus = self.px['DMI_MINUS'] self.px_last = self.px['PX_LAST'] self.bb_ma = self.px['BB_MA'] self.macd_diff = self.px['MACD_DIFF'] self.trender_up = self.px['TRENDER_UP'] self.trender_down = self.px['TRENDER_DN'] self.tas_k = self.px['TAS_K'] self.tas_d = self.px['TAS_D'] self.ema_5 = self.px['EMA(5)'] self.ema_20 = self.px['EMA(20)'] self.px_open = self.px['PX_OPEN'] self.px_last = self.px['PX_LAST'] self.cap_dail_p = DailyPct(self.px['PX_LAST']) def Core(self): score = [] capital = [100] position = [0, 0] for i in xrange(self.dmi_plus.size-1): score.append(0) if i>0: position.append(position[-1]) capital.append(capital[-1]) if (self.dmi_plus[i] > self.dmi_minus[i]): score[i] += 1 elif (self.dmi_plus[i] < self.dmi_minus[i]) : score[i] -= 1 if (self.px_last[i] > self.bb_ma[i]) : score[i] += 1 elif (self.px_last[i] < self.bb_ma[i]) : score[i] -= 1 if (self.macd_diff[i] > 0): score[i] += 2 elif (self.macd_diff[i] < 0) : score[i] -= 2 if (self.trender_up[i] == "#N/A N/A") : score[i] += 1 if (self.trender_down[i] == "#N/A N/A") : score[i] -= 1 if (self.tas_k[i] > self.tas_d[i]) : score[i] += 1 elif (self.tas_k[i] < self.tas_d[i]) : score[i] -= 1 if (self.ema_5[i] > self.ema_20[i]) : score[i] += 1 elif (self.ema_5[i] > self.ema_20[i]) : score[i] -= 1 if (position[-1] == 1 or position[-1] == -1): capital[i] = capital[i] * (1 + (self.cap_dail_p[i]* position[-1])) if (len(score)>1): if (score[-1] > -4 and score[-1] < 4): position[-1] = 0 elif (score[-1] <= -4): position[-1] = -1 else: position[-1] = 1 return score, capital, self.cap_dail_p, position # -
TradingSystem/Algorithm/Demo/.ipynb_checkpoints/Demo-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Taxi Cab Classification (prior to TF2) # This notebook presents a simplified version of Kubeflow's *taxi cab clasification* pipeline, built upon TFX components. # # Here all the pipeline components are stripped down to their core to showcase how to run it in a self-contained local Juyter Noteobok. # # Additionally, the pipeline has been upgraded to work with Python3 and all major libraries (Tensorflow, Tensorflow Transform, Tensorflow Model Analysis, Tensorflow Data Validation, Apache Beam) have been bumped to their latests versions. # !pip install tensorflow==1.15.0 --user # !pip install apache_beam tensorflow_transform tensorflow_model_analysis tensorflow_data_validation --user # You may have to restart the workbook after installing these packages # + tags=["imports"] import os import shutil import logging import apache_beam as beam import tensorflow as tf import tensorflow_transform as tft import tensorflow_model_analysis as tfma import tensorflow_data_validation as tfdv from apache_beam.io import textio from apache_beam.io import tfrecordio from tensorflow_transform.beam import impl as beam_impl from tensorflow_transform.beam.tft_beam_io import transform_fn_io from tensorflow_transform.coders.csv_coder import CsvCoder from tensorflow_transform.coders.example_proto_coder import ExampleProtoCoder from tensorflow_transform.tf_metadata import dataset_metadata from tensorflow_transform.tf_metadata import metadata_io # + tags=["functions"] DATA_DIR = 'data/' TRAIN_DATA = os.path.join(DATA_DIR, 'taxi-cab-classification/train.csv') EVALUATION_DATA = os.path.join(DATA_DIR, 'taxi-cab-classification/eval.csv') # Categorical features are assumed to each have a maximum value in the dataset. MAX_CATEGORICAL_FEATURE_VALUES = [24, 31, 12] CATEGORICAL_FEATURE_KEYS = ['trip_start_hour', 'trip_start_day', 'trip_start_month'] DENSE_FLOAT_FEATURE_KEYS = ['trip_miles', 'fare', 'trip_seconds'] # Number of buckets used by tf.transform for encoding each feature. FEATURE_BUCKET_COUNT = 10 BUCKET_FEATURE_KEYS = ['pickup_latitude', 'pickup_longitude', 'dropoff_latitude', 'dropoff_longitude'] # Number of vocabulary terms used for encoding VOCAB_FEATURES by tf.transform VOCAB_SIZE = 1000 # Count of out-of-vocab buckets in which unrecognized VOCAB_FEATURES are hashed. OOV_SIZE = 10 VOCAB_FEATURE_KEYS = ['pickup_census_tract', 'dropoff_census_tract', 'payment_type', 'company', 'pickup_community_area', 'dropoff_community_area'] # allow nan values in these features. OPTIONAL_FEATURES = ['dropoff_latitude', 'dropoff_longitude', 'pickup_census_tract', 'dropoff_census_tract', 'company', 'trip_seconds', 'dropoff_community_area'] LABEL_KEY = 'tips' FARE_KEY = 'fare' # + tags=["pipeline-parameters"] # training parameters EPOCHS = 1 STEPS = 3 BATCH_SIZE = 32 HIDDEN_LAYER_SIZE = '1500' LEARNING_RATE = 0.1 # + tags=["functions"] tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) # tf.get_logger().setLevel(logging.ERROR) # - # #### Data Validation # For an overview of the TFDV functions: https://www.tensorflow.org/tfx/tutorials/data_validation/chicago_taxi # + tags=["block:data_validation"] vldn_output = os.path.join(DATA_DIR, 'validation') # TODO: Understand why this was used in the conversion to the output json # key columns: list of the names for columns that should be treated as unique keys. key_columns = ['trip_start_timestamp'] # read the first line of the cvs to have and ordered list of column names # (the Schema will scrable the features) with open(TRAIN_DATA) as f: column_names = f.readline().strip().split(',') stats = tfdv.generate_statistics_from_csv(data_location=TRAIN_DATA) schema = tfdv.infer_schema(stats) eval_stats = tfdv.generate_statistics_from_csv(data_location=EVALUATION_DATA) anomalies = tfdv.validate_statistics(eval_stats, schema) # Log anomalies for feature_name, anomaly_info in anomalies.anomaly_info.items(): logging.getLogger().error( 'Anomaly in feature "{}": {}'.format( feature_name, anomaly_info.description)) # show inferred schema tfdv.display_schema(schema=schema) # + # Resolve anomalies company = tfdv.get_feature(schema, 'company') company.distribution_constraints.min_domain_mass = 0.9 # Add new value to the domain of feature payment_type. payment_type_domain = tfdv.get_domain(schema, 'payment_type') payment_type_domain.value.append('Prcard') # Validate eval stats after updating the schema updated_anomalies = tfdv.validate_statistics(eval_stats, schema) tfdv.display_anomalies(updated_anomalies) # - # #### Data Transformation # For an overview of the TFT functions: https://www.tensorflow.org/tfx/tutorials/transform/simple # + tags=["block:data_transformation", "prev:data_validation"] def to_dense(tensor): """Takes as input a SparseTensor and return a Tensor with correct default value Args: tensor: tf.SparseTensor Returns: tf.Tensor with default value """ if not isinstance(tensor, tf.sparse.SparseTensor): return tensor if tensor.dtype == tf.string: default_value = '' elif tensor.dtype == tf.float32: default_value = 0.0 elif tensor.dtype == tf.int32: default_value = 0 else: raise ValueError(f"Tensor type not recognized: {tensor.dtype}") return tf.squeeze(tf.sparse_to_dense(tensor.indices, [tensor.dense_shape[0], 1], tensor.values, default_value=default_value), axis=1) # TODO: Update to below version # return tf.squeeze(tf.sparse.to_dense(tensor, default_value=default_value), axis=1) def preprocess_fn(inputs): """tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. """ outputs = {} for key in DENSE_FLOAT_FEATURE_KEYS: # Preserve this feature as a dense float, setting nan's to the mean. outputs[key] = tft.scale_to_z_score(to_dense(inputs[key])) for key in VOCAB_FEATURE_KEYS: # Build a vocabulary for this feature. if inputs[key].dtype == tf.string: vocab_tensor = to_dense(inputs[key]) else: vocab_tensor = tf.as_string(to_dense(inputs[key])) outputs[key] = tft.compute_and_apply_vocabulary( vocab_tensor, vocab_filename='vocab_' + key, top_k=VOCAB_SIZE, num_oov_buckets=OOV_SIZE) for key in BUCKET_FEATURE_KEYS: outputs[key] = tft.bucketize(to_dense(inputs[key]), FEATURE_BUCKET_COUNT) for key in CATEGORICAL_FEATURE_KEYS: outputs[key] = tf.cast(to_dense(inputs[key]), tf.int64) taxi_fare = to_dense(inputs[FARE_KEY]) taxi_tip = to_dense(inputs[LABEL_KEY]) # Test if the tip was > 20% of the fare. tip_threshold = tf.multiply(taxi_fare, tf.constant(0.2)) outputs[LABEL_KEY] = tf.logical_and( tf.logical_not(tf.math.is_nan(taxi_fare)), tf.greater(taxi_tip, tip_threshold)) for key in outputs: if outputs[key].dtype == tf.bool: outputs[key] = tft.compute_and_apply_vocabulary(tf.as_string(outputs[key]), vocab_filename='vocab_' + key) return outputs # + trns_output = os.path.join(DATA_DIR, "transformed") if os.path.exists(trns_output): shutil.rmtree(trns_output) tft_input_metadata = dataset_metadata.DatasetMetadata(schema) runner = 'DirectRunner' with beam.Pipeline(runner, options=None) as p: with beam_impl.Context(temp_dir=os.path.join(trns_output, 'tmp')): converter = CsvCoder(column_names, tft_input_metadata.schema) # READ TRAIN DATA train_data = ( p | 'ReadTrainData' >> textio.ReadFromText(TRAIN_DATA, skip_header_lines=1) | 'DecodeTrainData' >> beam.Map(converter.decode)) # TRANSFORM TRAIN DATA (and get transform_fn function) transformed_dataset, transform_fn = ( (train_data, tft_input_metadata) | beam_impl.AnalyzeAndTransformDataset(preprocess_fn)) transformed_data, transformed_metadata = transformed_dataset # SAVE TRANSFORMED TRAIN DATA _ = transformed_data | 'WriteTrainData' >> tfrecordio.WriteToTFRecord( os.path.join(trns_output, 'train'), coder=ExampleProtoCoder(transformed_metadata.schema)) # READ EVAL DATA eval_data = ( p | 'ReadEvalData' >> textio.ReadFromText(EVALUATION_DATA, skip_header_lines=1) | 'DecodeEvalData' >> beam.Map(converter.decode)) # TRANSFORM EVAL DATA (using previously created transform_fn function) eval_dataset = (eval_data, tft_input_metadata) transformed_eval_data, transformed_metadata = ( (eval_dataset, transform_fn) | beam_impl.TransformDataset()) # SAVE EVAL DATA _ = transformed_eval_data | 'WriteEvalData' >> tfrecordio.WriteToTFRecord( os.path.join(trns_output, 'eval'), coder=ExampleProtoCoder(transformed_metadata.schema)) # SAVE transform_fn FUNCTION FOR LATER USE # TODO: check out what is the transform function (transform_fn) that came from previous step _ = (transform_fn | 'WriteTransformFn' >> transform_fn_io.WriteTransformFn(trns_output)) # SAVE TRANSFORMED METADATA metadata_io.write_metadata( metadata=tft_input_metadata, path=os.path.join(trns_output, 'metadata')) # - # #### Train # Estimator API: https://www.tensorflow.org/guide/premade_estimators # + tags=["block:train", "prev:data_transformation"] def training_input_fn(transformed_output, transformed_examples, batch_size, target_name): """ Args: transformed_output: tft.TFTransformOutput transformed_examples: Base filename of examples batch_size: Batch size. target_name: name of the target column. Returns: The input function for training or eval. """ dataset = tf.data.experimental.make_batched_features_dataset( file_pattern=transformed_examples, batch_size=batch_size, features=transformed_output.transformed_feature_spec(), reader=tf.data.TFRecordDataset, shuffle=True) transformed_features = dataset.make_one_shot_iterator().get_next() transformed_labels = transformed_features.pop(target_name) return transformed_features, transformed_labels def get_feature_columns(): """Callback that returns a list of feature columns for building a tf.estimator. Returns: A list of tf.feature_column. """ return ( [tf.feature_column.numeric_column(key, shape=()) for key in DENSE_FLOAT_FEATURE_KEYS] + [tf.feature_column.indicator_column(tf.feature_column.categorical_column_with_identity(key, num_buckets=VOCAB_SIZE + OOV_SIZE)) for key in VOCAB_FEATURE_KEYS] + [tf.feature_column.indicator_column(tf.feature_column.categorical_column_with_identity(key, num_buckets=FEATURE_BUCKET_COUNT, default_value=0)) for key in BUCKET_FEATURE_KEYS] + [tf.feature_column.indicator_column(tf.feature_column.categorical_column_with_identity(key, num_buckets=num_buckets, default_value=0)) for key, num_buckets in zip(CATEGORICAL_FEATURE_KEYS, MAX_CATEGORICAL_FEATURE_VALUES)] ) # + training_output = os.path.join(DATA_DIR, "training") if os.path.exists(training_output): shutil.rmtree(training_output) hidden_layer_size = [int(x.strip()) for x in HIDDEN_LAYER_SIZE.split(',')] tf_transform_output = tft.TFTransformOutput(trns_output) # Set how often to run checkpointing in terms of steps. config = tf.estimator.RunConfig(save_checkpoints_steps=1000) n_classes = tf_transform_output.vocabulary_size_by_name("vocab_" + LABEL_KEY) # Create estimator estimator = tf.estimator.DNNClassifier( feature_columns=get_feature_columns(), hidden_units=hidden_layer_size, n_classes=n_classes, config=config, model_dir=training_output) # TODO: Simplify all this: https://www.tensorflow.org/guide/premade_estimators # - estimator.train(input_fn=lambda: training_input_fn( tf_transform_output, os.path.join(trns_output, 'train' + '*'), BATCH_SIZE, "tips"), steps=STEPS) # + tags=["block:eval", "prev:train"] eval_result = estimator.evaluate(input_fn=lambda: training_input_fn( tf_transform_output, os.path.join(trns_output, 'eval' + '*'), BATCH_SIZE, "tips"), steps=50) print(eval_result) # - # #### Model Analysis # TF Model Analysis docs: https://www.tensorflow.org/tfx/model_analysis/get_started # + tags=["skip"] # TODO: Implement model load and params analysis def eval_input_receiver_fn(transformed_output): """Build everything needed for the tf-model-analysis to run the model. Args: transformed_output: tft.TFTransformOutput Returns: EvalInputReceiver function, which contains: - Tensorflow graph which parses raw untranformed features, applies the tf-transform preprocessing operators. - Set of raw, untransformed features. - Label against which predictions will be compared. """ serialized_tf_example = tf.compat.v1.placeholder( dtype=tf.string, shape=[None], name='input_example_tensor') features = tf.io.parse_example(serialized_tf_example, transformed_output.raw_feature_spec()) transformed_features = transformed_output.transform_raw_features(features) receiver_tensors = {'examples': serialized_tf_example} return tfma.export.EvalInputReceiver( features=transformed_features, receiver_tensors=receiver_tensors, labels=transformed_features[LABEL_KEY]) # EXPORT MODEL eval_model_dir = os.path.join(training_output, 'tfma_eval_model_dir') tfma.export.export_eval_savedmodel( estimator=estimator, export_dir_base=eval_model_dir, eval_input_receiver_fn=(lambda: eval_input_receiver_fn(tf_transform_output)))
03-pipelines/kale/taxi-cab-classification_[TF]/taxicab_pipeline.ipynb