code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os, glob, json, spacy, pickle import random # + class TemplateGenerator(): def __init__(self): self.templates = json.load(open(r'../data/interim/templates.json', 'r', encoding='utf-8')) entidades = json.load(open(r'../data/interim/ents_para_template.json', 'r', encoding='latin9')) with open(r'../data/interim/despesapessoal.txt', 'r', encoding='utf-8') as f: self.despesa_pessoal = set([x.replace('\n','') for x in f.readlines() if x]) with open(r'../data/interim/despesapublica.txt', 'r', encoding='utf-8') as f: self.despesa_publica = set([x.replace('\n','') for x in f.readlines() if x]) with open(r'../data/interim/substantivos.txt', 'r', encoding='latin9') as f: self.substantivos = list(set([x.replace('\n','') for x in f.readlines() if x])) self.substantivos = list(filter(lambda x:x not in self.despesa_pessoal and x not in self.despesa_publica, self.substantivos)) self.utterances = {} self.utterances['DESPESAPESSOAL'] = [] self.utterances['DESPESAPUBLICA'] = [] self.utterances['GENERICA'] = [] def generate(self): # pessoal self.tpessoal = self.templates['DESPESAPESSOAL'] for t in self.tpessoal: for d in self.despesa_pessoal: self.utterances['DESPESAPESSOAL'].append(t.replace('{ENTIDADE}',d)) self.tpublica = self.templates['DESPESAPUBLICA'] for t in self.tpublica: for d in self.despesa_publica: self.utterances['DESPESAPUBLICA'].append(t.replace('{ENTIDADE}',d)) len_generica = max(len(self.utterances['DESPESAPUBLICA']), len(self.utterances['DESPESAPESSOAL'])) tgen = self.tpessoal + self.tpublica for _ in range(len_generica): t = random.choice(tgen) s = random.choice(self.substantivos) self.utterances['GENERICA'].append(t.replace('{ENTIDADE}',s)) def save(self): with open(r'../data/interim/utterances.json', 'w', encoding='utf-8') as fp: fp.write(json.dumps(self.utterances, indent=2, ensure_ascii=False)) def save_train_data(self): self.train_data = [] self.tpessoal = self.templates['DESPESAPESSOAL'] for t in self.tpessoal: for d in self.despesa_pessoal: cnt = t.replace('{ENTIDADE}',d) edict = {'entities':[(cnt.find(d),cnt.find(d)+len(d),"CONTROLEEXTERNO")]} self.train_data.append((cnt, edict)) self.tpublica = self.templates['DESPESAPUBLICA'] for t in self.tpublica: for d in self.despesa_publica: cnt = t.replace('{ENTIDADE}',d) edict = {'entities':[(cnt.find(d),cnt.find(d)+len(d),"CONTROLEEXTERNO")]} self.train_data.append((cnt, edict)) with open(r'../data/interim/synth_train_data.pkl', 'wb') as fp: pickle.dump(self.train_data, fp) # - t = TemplateGenerator() t.generate() t.save_train_data() t.save() # Geração do embedding específico with open(r'data\gazetteers\despesapessoal.txt', 'r', encoding='utf-8') as f: despesa_pessoal = set([x.replace('\n','') for x in f.readlines() if x]) with open(r'data\gazetteers\despesapublica.txt', 'r', encoding='utf-8') as f: despesa_publica = set([x.replace('\n','') for x in f.readlines() if x]) gazetteers = list(despesa_pessoal) + list(despesa_publica) nlp corpus = [[str(x) for x in nlp(g)] for g in gazetteers] model = Word2Vec(corpus, min_count=1, size=30, workers=3, window=2, sg=1) model.wv['siai-dp'] model.wv.save_word2vec_format(r'data\wordemb\specific.txt')
notebooks/02_template_generation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from lxml.html import fromstring import requests from itertools import cycle import traceback import random import time def get_proxies(): url = 'https://free-proxy-list.net/' response = requests.get(url) parser = fromstring(response.text) proxies = set() for i in parser.xpath('//tbody/tr')[:10]: if i.xpath('.//td[7][contains(text(),"yes")]'): proxy = ":".join([i.xpath('.//td[1]/text()')[0], i.xpath('.//td[2]/text()')[0]]) proxies.add(proxy) return proxies def get_random_proxies(): proxies = [] ports = ["80", "8080", "3128"] for _ in range(10): ip = f"{random.randint(1,255)}.{random.randint(1,255)}.{random.randint(1,255)}.{random.randint(1,255)}" for port in ports: proxies.append(ip + ":" + port) return set(proxies) #If you are copy pasting proxy ips, put in the list below #proxies = ['172.16.58.3:80', '192.168.3.11:45169', '192.168.127.12:8080', '172.16.31.10:3128', '172.16.31.10:8080', '192.168.127.12:3128', '13.92.196.150:8080'] def get_working_proxies(): working_proxies = [] proxies = get_proxies() proxy_pool = cycle(proxies) url = 'https://httpbin.org/ip' for i in range(len(proxies)): #Get a proxy from the pool proxy = next(proxy_pool) try: response = requests.get(url, proxies={"http": proxy, "https": proxy}) working_proxies.append(proxy) except: #Most free proxies will often get connection errors. You will have retry the entire request using another proxy to work. #We will just skip retries as its beyond the scope of this tutorial and we are only downloading a single url pass return working_proxies working_proxies = [] for _ in range(15): working_proxies += get_working_proxies() time.sleep(60 * 5) # - first_part = [] for proxy in working_proxies: first_part.append(proxy.split(".")[0]) set(first_part) # + first_part = [] for proxy in working_proxies: first_part.append(proxy.split(":")[1]) keys = {}.fromkeys(list(set(first_part)), 0) for i in first_part: keys[i] += 1 keys # + first_part = [] for proxy in working_proxies: first_part.append(proxy.split(".")[0]) keys = {}.fromkeys(list(set(first_part)), 0) for i in first_part: keys[i] += 1 keys # -
analysis/analyze_proxies.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import nltk tweets=pd.read_csv('Tweets.csv') tweets.head() tweets.shape tweets_df=tweets.drop(tweets[tweets['airline_sentiment_confidence']<0.5].index,axis=0) tweets_df.shape X=tweets_df['text'] y=tweets_df['airline_sentiment'] # Cleaning the text data from nltk.corpus import stopwords nltk.download('stopwords') import string from nltk.stem import PorterStemmer stop_words=stopwords.words('english') punct=string.punctuation stemmer=PorterStemmer() # + import re cleaned_data=[] for i in range(len(X)): tweet=re.sub('[^a-zA-Z]',' ',X.iloc[i]) tweet=tweet.lower().split() tweet=[stemmer.stem(word) for word in tweet if (word not in stop_words) and (word not in punct)] tweet=' '.join(tweet) cleaned_data.append(tweet) # - cleaned_data y # + sentiment_ordering = ['negative', 'neutral', 'positive'] y = y.apply(lambda x: sentiment_ordering.index(x)) # - y.head() from sklearn.feature_extraction.text import CountVectorizer cv=CountVectorizer(max_features=3000,stop_words=['virginamerica','unit']) X_fin=cv.fit_transform(cleaned_data).toarray() X_fin.shape from sklearn.naive_bayes import MultinomialNB from sklearn.model_selection import train_test_split model=MultinomialNB() # + X_train,X_test,y_train,y_test=train_test_split(X_fin,y,test_size=0.3) # - model.fit(X_train,y_train) # + y_pred=model.predict(X_test) # - from sklearn.metrics import classification_report cf=classification_report(y_test,y_pred) print(cf)
Twitter_sentiment_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import matplotlib.pyplot as plt import seaborn as sns data = pd.read_csv('/home/atrides/Desktop/R/statistics_with_Python/04_Exploring_Data_with_Graphs/Data_Files/DownloadFestival.dat', sep='\s+') print(data.head()) # boxplot using seaborn _ = sns.boxplot(x=data['gender'],y=data['day1']) plt.show() # ## Without outliers in data data2 = pd.read_csv('/home/atrides/Desktop/R/statistics_with_Python/04_Exploring_Data_with_Graphs/Data_Files/DownloadFestival1.dat', sep='\s+') # day=1 _ = sns.boxplot(x=data2['gender'],y=data2['day1']) plt.show() # boxplot using pandas _ = data2.boxplot(column='day1', by='gender') plt.show() # day=2 _ = sns.boxplot(x=data2['gender'],y=data2['day2']) plt.show() # day=3 _ = sns.boxplot(x=data2['gender'],y=data2['day3']) plt.show()
Python/statistics_with_Python/04_Exploring_Data_with_Graphs/Markdown_notebook/04_boxplot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # !pip3 install geograpy3 import pandas as pd import requests import json import os import pprint from bs4 import BeautifulSoup from io import StringIO import csv import geograpy from tqdm.notebook import tqdm # # STEP 1 : Extract ip address from individual parsed email if there is any PATH = '/Users/naturbiz/Desktop/DSCI550/DSCI-550-Assignment-1/data/separated_by_email/' ip = [] for ipaddress in os.listdir(PATH): with open(PATH+ipaddress)as f: jsonfile=json.load(f) try: location = jsonfile['MboxParser-x-originating-ip'] location = location.split(" ") location=location[0].lstrip("[").rstrip("]") if "]" in location: location = location.split("]") ip.append(location[0]) else: ip.append(location) except: ip.append("None") # # STEP 2: Extract email contents emails = [] for file in os.listdir(PATH): with open(PATH+file)as f: f=json.load(f) try: emails.append(f['X-TIKA:content']) except: emails.append("No email content") # # STEP 3: Extract Location From Email Content and use geocode api to retrieve lng and lad email_location = [] for index,value in tqdm(enumerate(ip)): if value == "None": location = geograpy.get_place_context(text = emails[index]) email_location.append(location.country_cities) else: email_location.append(value) one_location = [] for i in email_location: if len(i) == 0 or type(i)==str: continue else: one_location.append(i) # # STEP 4: Use Google Geocode API to retrieve the lag and lng of country and its corresponding city city_country = [] url = "https://maps.googleapis.com/maps/api/geocode/json?" API_KEY = "<KEY>" for location in email_location: try: for keys,values in location.items(): possible_location = [] for city in values: address = city+", "+keys params = {'key':API_KEY,"address":address} req = requests.get(url,params=params) possible_location.append(req.json()["results"][0]["geometry"]["location"]) city_country.append(possible_location) except: city_country.append(location) for index,value in enumerate(city_country): if len(value)== 0: city_country[index] = "Location unknown" city_country city_country_2 = [] for i in city_country: if type(i)==str and i != "Location unknown": city_country_2.append(i) elif type(i) == list and len(i) ==1: city_country_2.append(i[0]) elif type(i) == list and len(i) > 1: city_country_2.append(i[0]) elif type(i) == dict: first_key = next(iter(i)) new_dict = {first_key:i[first_key][0]} city_country_2.append(new_dict) city_country_2 city_country_3 = [] for i in city_country_2: if type(i)!=str: city_country_3.append(i) city_country_3 dict_list= [] third_list= [] for i in city_country_2: if type(i)==dict and next(iter(i))=='lat': dict_list.append(i) elif type(i) == dict: third_list.append(i) dict_list third_list loc_list = [] for i in third_list: for keys,values in i.items(): loc_dict = {"country":keys,"city":values} loc_list.append(loc_dict) API_KEY = "<KEY>" for i in loc_list: city = i['city'] country = i["country"] url = f'https://maps.googleapis.com/maps/api/geocode/json?address={city}+{country}&key={API_KEY}' response = requests.get(url) try: info=response.json()['results'][0]["geometry"]["location"] i.update(info) except: continue loc_df = pd.DataFrame(loc_list) loc_df API_KEY = "<KEY>" location_list = [] for location in dict_list: address = [] lat= location["lat"] lng=location['lng'] complete_url = f'https://maps.googleapis.com/maps/api/geocode/json?latlng={lat},{lng}&key={API_KEY}' req = requests.get(complete_url) info = req.json()['results'][0]['address_components'] location_list.append(info) try_1 = [] for info in location_list: try_2 = {} for i in info: if 'administrative_area_level_1' in i["types"]: try_2["city"] = i['long_name'] elif 'country' in i["types"]: try_2["country"] =i["long_name"] try_1.append(try_2) # for dicts in info: # print(dicts) # break try_location = dict_list for index,value in enumerate(try_1): value.update(try_location[index]) try_1 coordinates_df = pd.DataFrame(try_1) coordinates_df # # STEP 4 : Use IP address to search for lng and lad import requests countryAndCity = [] key = "c71a09e8533eda727d6c8e4fe025ea79" for location in city_country_2: if isinstance(location,str): ip_info = requests.get("http://api.ipstack.com/"+location+"?access_key="+key+"&ouput=json") ip_info = ip_info.json() countryAndCity.append({"continent":ip_info['continent_name'],"lat":ip_info['latitude'],"lng":ip_info["longitude"],"country":ip_info['country_name'],"city":ip_info['city']}) else: continue ip_df = pd.DataFrame(countryAndCity) ip_df ip_df.drop(columns=['continent'],inplace=True) country_geo = pd.concat([coordinates_df,loc_df,ip_df],ignore_index=True) # + df['Counts'] = df.groupby(['Color'])['Value'].transform('count') # - country_geo['count'] = country_geo.groupby(['country'])['city'].transform('count') country_geo country_geo.to_csv("country.csv",index=False)
notebooks/vis5conversion.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Ruby 3.0.0 # language: ruby # name: ruby # --- # ## Roll Options and Futures # # **IB-Ruby** provides helpers to roll existing positions into other strikes and/or other expiries # require_relative '../lib/iruby_init.rb' # List Portfoliopositions G.get_account_data puts G.clients.last.contracts.as_table # The _Ford_-Option will be rolled into the next month ford = G.clients.last.contracts.detect{|x| x.symbol == 'F'} # ------------------------------------------------- rolling_contract = ford.roll expiry: '+1m' # ------------------------------------------------- puts rolling_contract.as_table puts rolling_contract.legs.as_table puts rolling_contract.combo_legs.to_human # * Option#roll takes the properties of the target option as parameters # * Thus, `:expiry, :strike, :trading_class` might be specified # * Instead of _YYYYMM_ or _YYYYMMDD_ for `:expiry``[+/-]{n}{w|m}`may be specified, too # * The existing position is **bougth**! # # The `Option#roll`helper is designed to close a short-Options position and to open another one with slightly changed attributes. # # ### Rolling Futures # # To roll a future is equivalent to setup a calendar-spread es = Symbols::Futures.es.verify.first puts es.as_table rolling_future = es.roll expiry: "+12m" puts rolling_future.as_table puts rolling_future.legs.as_table puts rolling_future.combo_legs.to_human # To roll an existing long-position, the `rolling_future` has to be **sold**! # # To roll an existing short-position the `rolling_future` has to be **bought**! # # eg: `G.client.last.place contract: rolling_future, order: Limit_order( size: 1, price: 45, action: :sell ) ` to roll a long-Future to the other expiry
contract/notebooks/roll-options.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import glob base_dir = os.path.join('F:/0Sem 7/B.TECH PROJECT/0Image data/cell_images') infected_dir = os.path.join(base_dir,'Parasitized') healthy_dir = os.path.join(base_dir,'Uninfected') infected_files = glob.glob(infected_dir+'/*.png') healthy_files = glob.glob(healthy_dir+'/*.png') print("Infected samples:",len(infected_files)) print("Uninfected samples:",len(healthy_files)) # + import numpy as np import pandas as pd np.random.seed(42) files_df = pd.DataFrame({ 'filename': infected_files + healthy_files, 'label': ['malaria'] * len(infected_files) + ['healthy'] * len(healthy_files) }).sample(frac=1, random_state=42).reset_index(drop=True) files_df.head() # + from sklearn.model_selection import train_test_split from collections import Counter train_files, test_files, train_labels, test_labels = train_test_split(files_df['filename'].values, files_df['label'].values, test_size=0.3, random_state=42) train_files, val_files, train_labels, val_labels = train_test_split(train_files, train_labels, test_size=0.1, random_state=42) print(train_files.shape, val_files.shape, test_files.shape) print('Train:', Counter(train_labels), '\nVal:', Counter(val_labels), '\nTest:', Counter(test_labels)) # + import cv2 from concurrent import futures import threading def get_img_shape_parallel(idx, img, total_imgs): if idx % 5000 == 0 or idx == (total_imgs - 1): print('{}: working on img num: {}'.format(threading.current_thread().name, idx)) return cv2.imread(img).shape ex = futures.ThreadPoolExecutor(max_workers=None) data_inp = [(idx, img, len(train_files)) for idx, img in enumerate(train_files)] print('Starting Img shape computation:') train_img_dims_map = ex.map(get_img_shape_parallel, [record[0] for record in data_inp], [record[1] for record in data_inp], [record[2] for record in data_inp]) train_img_dims = list(train_img_dims_map) print('Min Dimensions:', np.min(train_img_dims, axis=0)) print('Avg Dimensions:', np.mean(train_img_dims, axis=0)) print('Median Dimensions:', np.median(train_img_dims, axis=0)) print('Max Dimensions:', np.max(train_img_dims, axis=0)) # + IMG_DIMS = (32, 32) def get_img_data_parallel(idx, img, total_imgs): if idx % 5000 == 0 or idx == (total_imgs - 1): print('{}: working on img num: {}'.format(threading.current_thread().name, idx)) img = cv2.imread(img) img = cv2.resize(img, dsize=IMG_DIMS, interpolation=cv2.INTER_CUBIC) img = np.array(img, dtype=np.float32) return img ex = futures.ThreadPoolExecutor(max_workers=None) train_data_inp = [(idx, img, len(train_files)) for idx, img in enumerate(train_files)] val_data_inp = [(idx, img, len(val_files)) for idx, img in enumerate(val_files)] test_data_inp = [(idx, img, len(test_files)) for idx, img in enumerate(test_files)] print('Loading Train Images:') train_data_map = ex.map(get_img_data_parallel, [record[0] for record in train_data_inp], [record[1] for record in train_data_inp], [record[2] for record in train_data_inp]) train_data = np.array(list(train_data_map)) print('\nLoading Validation Images:') val_data_map = ex.map(get_img_data_parallel, [record[0] for record in val_data_inp], [record[1] for record in val_data_inp], [record[2] for record in val_data_inp]) val_data = np.array(list(val_data_map)) print('\nLoading Test Images:') test_data_map = ex.map(get_img_data_parallel, [record[0] for record in test_data_inp], [record[1] for record in test_data_inp], [record[2] for record in test_data_inp]) test_data = np.array(list(test_data_map)) train_data.shape, val_data.shape, test_data.shape # + import matplotlib.pyplot as plt # %matplotlib inline plt.figure(1 , figsize = (8 , 8)) n = 0 for i in range(16): n += 1 r = np.random.randint(0 , train_data.shape[0] , 1) plt.subplot(4 , 4 , n) plt.subplots_adjust(hspace = 0.5 , wspace = 0.5) plt.imshow(train_data[r[0]]/255.) plt.title('{}'.format(train_labels[r[0]])) plt.xticks([]) , plt.yticks([]) # + BATCH_SIZE = 32 NUM_CLASSES = 2 EPOCHS = 25 INPUT_SHAPE = (32, 32, 3) train_imgs_scaled = train_data / 255. val_imgs_scaled = val_data / 255. # encode text category labels from sklearn.preprocessing import LabelEncoder le = LabelEncoder() le.fit(train_labels) train_labels_enc = le.transform(train_labels) val_labels_enc = le.transform(val_labels) print(train_labels[:6], train_labels_enc[:6]) # - import tensorflow as tf # + vgg = tf.keras.applications.densenet.DenseNet121(include_top=False, weights='imagenet', input_shape=INPUT_SHAPE) # Freeze the layers vgg.trainable = True set_trainable = False for layer in vgg.layers: layer.trainable = False base_vgg = vgg base_out = base_vgg.output pool_out = tf.keras.layers.Flatten()(base_out) hidden1 = tf.keras.layers.Dense(512, activation='relu')(pool_out) drop1 = tf.keras.layers.Dropout(rate=0.3)(hidden1) hidden2 = tf.keras.layers.Dense(512, activation='relu')(drop1) drop2 = tf.keras.layers.Dropout(rate=0.3)(hidden2) out = tf.keras.layers.Dense(1, activation='sigmoid')(drop2) model = tf.keras.Model(inputs=base_vgg.input, outputs=out) from tensorflow.keras.optimizers import Adam adam = Adam(lr=0.0001) model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy']) print("Total Layers:", len(model.layers)) print("Total trainable layers:", sum([1 for l in model.layers if l.trainable])) # - print(model.summary()) history = model.fit(x=train_imgs_scaled, y=train_labels_enc, batch_size=BATCH_SIZE, epochs=EPOCHS, validation_data=(val_imgs_scaled, val_labels_enc), verbose=1) # + f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4)) t = f.suptitle('Basic CNN Performance', fontsize=12) f.subplots_adjust(top=0.85, wspace=0.3) max_epoch = len(history.history['accuracy'])+1 epoch_list = list(range(1,max_epoch)) ax1.plot(epoch_list, history.history['accuracy'], label='Train Accuracy') ax1.plot(epoch_list, history.history['val_accuracy'], label='Validation Accuracy') ax1.set_xticks(np.arange(1, max_epoch, 5)) ax1.set_ylabel('Accuracy Value') ax1.set_xlabel('Epoch') ax1.set_title('Accuracy') l1 = ax1.legend(loc="best") ax2.plot(epoch_list, history.history['loss'], label='Train Loss') ax2.plot(epoch_list, history.history['val_loss'], label='Validation Loss') ax2.set_xticks(np.arange(1, max_epoch, 5)) ax2.set_ylabel('Loss Value') ax2.set_xlabel('Epoch') ax2.set_title('Loss') l2 = ax2.legend(loc="best") # - # + test_imgs_scaled = test_data/255. test_labels_enc = le.transform(test_labels) # evaluate the model _, train_acc = model.evaluate(train_imgs_scaled, train_labels_enc, verbose=0) _, test_acc = model.evaluate(test_imgs_scaled, test_labels_enc, verbose=0) print('Train: %.3f, Test: %.3f' % (train_acc, test_acc)) # - print(model.summary())
malaria6_1_denseNet.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Validate Tiles from IPython.core.display import display, HTML display(HTML("<style>.container { width:100% !important; }</style>")) # + import cv2 import numpy as np import shutil, os, sys, glob from sklearn.metrics import confusion_matrix, jaccard_similarity_score # - # # Confusion matrix # + datasets = ['xval_set_0', 'xval_set_1', 'xval_set_2', 'xval_set_3', 'xval_set_4', ] appendage = '_1024' size = 300 cfmats = [] for ds in datasets: predictions = sorted(glob.glob('/home/nathan/histo-seg/semantic-pca/analysis_segnet_basic/{}/*.png'.format(ds+appendage))) annotations = sorted(glob.glob('/home/nathan/histo-seg/semantic-pca/data/{}/val/mask/*.png'.format(ds))) print ds, len(predictions), len(annotations) preds = [] annos = [] for predx, annox in zip(predictions, annotations): # predx = predictions[0] # annox = annotations[0] preds.append(cv2.imread(predx,-1)) annos.append(cv2.resize(cv2.imread(annox,-1), dsize=(size,size), interpolation=cv2.INTER_NEAREST)) print len(preds), preds[0].shape#, preds[0].ravel().shape() print len(preds), annos[0].shape#, annos[0].ravel().shape() predimg = np.hstack([pimg.ravel() for pimg in preds]) annoimg = np.hstack([aimg.ravel() for aimg in annos]) cfmat = confusion_matrix(annoimg, predimg) print cfmat cfmats.append(cfmat) cfmat_total = np.sum(cfmats, axis=0) print cfmat_total # - # # Jaccard directly # + datasets = ['xval_set_0', 'xval_set_1', 'xval_set_2', 'xval_set_3', 'xval_set_4', ] appendage = '_1024' size = 300 jaccards = [] preds = [] annos = [] for ds in datasets: predictions = sorted(glob.glob('/home/nathan/histo-seg/semantic-pca/analysis_segnet_basic/{}/*.png'.format(ds+appendage))) annotations = sorted(glob.glob('/home/nathan/histo-seg/semantic-pca/data/{}/val/mask/*.png'.format(ds))) print ds, len(predictions), len(annotations) # preds = [] # annos = [] for predx, annox in zip(predictions, annotations): # predx = predictions[0] # annox = annotations[0] preds.append(cv2.imread(predx,-1)) annos.append(cv2.resize(cv2.imread(annox,-1), dsize=(size,size), interpolation=cv2.INTER_NEAREST)) print len(preds), preds[0].shape#, preds[0].ravel().shape() print len(preds), annos[0].shape#, annos[0].ravel().shape() predimg = np.hstack([pimg.ravel() for pimg in preds]) annoimg = np.hstack([aimg.ravel() for aimg in annos]) for k in [0,1,2,3]: anno_ = annoimg==k pred_ = predimg==k print '\t', k, jaccard_similarity_score(anno_, pred_) jaccard = jaccard_similarity_score(annoimg, predimg) print '\t',jaccard jaccards.append(jaccard) # cfmat_total = np.sum(cfmats, axis=0) # print cfmat_total # - np.mean([0.921, 0.8566, 0.9509, 0.8586])
notebooks/validate_tiles.ipynb
# # Hyperparameter tuning by randomized-search # # In the previous notebook, we showed how to use a grid-search approach to # search for the best hyperparameters maximizing the statistical performance # of a predictive model. # # However, a grid-search approach has limitations. It does not scale when # the number of parameters to tune is increasing. Also, the grid will imposed # a regularity during the search which might be problematic. # # In this notebook, we will present the another method to tune hyperparameters # called randomized search. # ## Our predictive model # # Let us reload the dataset as we did previously: # + from sklearn import set_config set_config(display="diagram") # + import pandas as pd adult_census = pd.read_csv("../datasets/adult-census.csv") # - # We extract the column containing the target. target_name = "class" target = adult_census[target_name] target # We drop from our data the target and the `"education-num"` column which # duplicates the information with `"education"` columns. data = adult_census.drop(columns=[target_name, "education-num"]) data.head() # Once the dataset is loaded, we split it into a training and testing sets. # + from sklearn.model_selection import train_test_split data_train, data_test, target_train, target_test = train_test_split( data, target, random_state=42) # - # We will create the same predictive pipeline as seen in the grid-search # section. # + from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OrdinalEncoder from sklearn.compose import make_column_selector as selector categorical_columns_selector = selector(dtype_include=object) categorical_columns = categorical_columns_selector(data) categorical_preprocessor = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1) preprocessor = ColumnTransformer([ ('cat-preprocessor', categorical_preprocessor, categorical_columns)], remainder='passthrough', sparse_threshold=0) # + # for the moment this line is required to import HistGradientBoostingClassifier from sklearn.experimental import enable_hist_gradient_boosting from sklearn.ensemble import HistGradientBoostingClassifier from sklearn.pipeline import Pipeline model = Pipeline([ ("preprocessor", preprocessor), ("classifier", HistGradientBoostingClassifier(random_state=42, max_leaf_nodes=4)), ]) model # - # ## Tuning using a randomized-search # # With the `GridSearchCV` estimator, the parameters need to be specified # explicitly. We already mentioned that exploring a large number of values for # different parameters will be quickly untractable. # # Instead, we can randomly generate the parameter candidates. Indeed, # such approach avoids the regularity of the grid. Hence, adding more # evaluations can increase the resolution in each direction. This is the # case in the frequent situation where the choice of some hyperparameters # is not very important, as for hyperparameter 2 in the figure below. # # ![Randomized vs grid search](../figures/grid_vs_random_search.svg) # # Indeed, the number of evaluation points need to be divided across the # two different hyperparameters. With a grid, the danger is that the # region of good hyperparameters fall between the line of the grid: this # region is aligned with the grid given that hyperparameter 2 has a weak # influence. Rather, stochastic search will sample hyperparameter 1 # independently from hyperparameter 2 and find the optimal region. # # The `RandomizedSearchCV` class allows for such stochastic search. It is # used similarly to the `GridSearchCV` but the sampling distributions # need to be specified instead of the parameter values. For instance, we # will draw candidates using a log-uniform distribution because the parameters # we are interested in take positive values with a natural log scaling (.1 is # as close to 1 as 10 is). # # <div class="admonition note alert alert-info"> # <p class="first admonition-title" style="font-weight: bold;">Note</p> # <p class="last">Random search (with <tt class="docutils literal">RandomizedSearchCV</tt>) is typically beneficial compared # to grid search (with <tt class="docutils literal">GridSearchCV</tt>) to optimize 3 or more # hyperparameters.</p> # </div> # # We will optimize 3 other parameters in addition to the ones we # optimized above: # # * `max_iter`: it corresponds to the number of trees in the ensemble; # * `min_samples_leaf`: it corresponds to the minimum number of samples # required in a leaf; # * `max_bins`: it corresponds to the maximum number of bins to construct the # histograms. # # <div class="admonition note alert alert-info"> # <p class="first admonition-title" style="font-weight: bold;">Note</p> # <p class="last">The <tt class="docutils literal">loguniform</tt> function from SciPy returns a floating number. Since we # want to us this distribution to create integer, we will create a class that # will cast the floating number into an integer.</p> # </div> # + from scipy.stats import loguniform class loguniform_int: """Integer valued version of the log-uniform distribution""" def __init__(self, a, b): self._distribution = loguniform(a, b) def rvs(self, *args, **kwargs): """Random variable sample""" return self._distribution.rvs(*args, **kwargs).astype(int) # - # # Now, we can define the randomized search using the different distributions. # Executing 10 iterations of 5-fold cross-validation for random # parametrizations of this model on this dataset can take from 10 seconds to # several minutes, depending on the speed of the host computer and the number # of available processors. # + # %%time from sklearn.model_selection import RandomizedSearchCV param_distributions = { 'classifier__l2_regularization': loguniform(1e-6, 1e3), 'classifier__learning_rate': loguniform(0.001, 10), 'classifier__max_leaf_nodes': loguniform_int(2, 256), 'classifier__min_samples_leaf': loguniform_int(1, 100), 'classifier__max_bins': loguniform_int(2, 255), } model_random_search = RandomizedSearchCV( model, param_distributions=param_distributions, n_iter=10, cv=5, verbose=1, ) model_random_search.fit(data_train, target_train) # - # Then, we can compute the accuracy score on the test set. # + accuracy = model_random_search.score(data_test, target_test) print(f"The test accuracy score of the best model is " f"{accuracy:.2f}") # + from pprint import pprint print("The best parameters are:") pprint(model_random_search.best_params_) # - # # We can inspect the results using the attributes `cv_results` as we did # previously. def shorten_param(param_name): if "__" in param_name: return param_name.rsplit("__", 1)[1] return param_name # + # get the parameter names column_results = [ f"param_{name}" for name in param_distributions.keys()] column_results += [ "mean_test_score", "std_test_score", "rank_test_score"] cv_results = pd.DataFrame(model_random_search.cv_results_) cv_results = cv_results[column_results].sort_values( "mean_test_score", ascending=False) cv_results = cv_results.rename(shorten_param, axis=1) cv_results # - # In practice, a randomized hyperparameter search is usually run with a large # number of iterations. In order to avoid the computation cost and still make a # decent analysis, we load the results obtained from a similar search with 200 # iterations. # + # model_random_search = RandomizedSearchCV( # model, param_distributions=param_distributions, n_iter=500, # n_jobs=2, cv=5) # model_random_search.fit(df_train, target_train) # cv_results = pd.DataFrame(model_random_search.cv_results_) # cv_results.to_csv("../figures/randomized_search_results.csv") # - cv_results = pd.read_csv("../figures/randomized_search_results.csv", index_col=0) # As we have more than 2 parameters in our grid-search, we cannot visualize the # results using a heatmap. However, we can us a parallel coordinates plot. (cv_results[column_results].rename( shorten_param, axis=1).sort_values("mean_test_score")) # + import numpy as np import plotly.express as px fig = px.parallel_coordinates( cv_results.rename(shorten_param, axis=1).apply({ "learning_rate": np.log10, "max_leaf_nodes": np.log2, "max_bins": np.log2, "min_samples_leaf": np.log10, "l2_regularization": np.log10, "mean_test_score": lambda x: x}), color="mean_test_score", color_continuous_scale=px.colors.sequential.Viridis, ) fig.show() # - # # The parallel coordinates plot will display the values of the hyperparameters # on different columns while the performance metric is color coded. Thus, we # are able to quickly inspect if there is a range of hyperparameters which is # working or not. # # <div class="admonition note alert alert-info"> # <p class="first admonition-title" style="font-weight: bold;">Note</p> # <p class="last">We <strong>transformed most axis values by taking a log10 or log2</strong> to # spread the active ranges and improve the readability of the plot.</p> # </div> # # In particular for this hyper-parameter search, it is interesting to see that # the yellow lines (top performing models) all reach intermediate values for # the learning rate, that is, tick values between -2 and 0 which correspond to # learning rate values of 0.01 to 1.0 once we invert the log10 transform for # that axis. # # It is possible to **select a range of results by clicking and holding on any # axis** of the parallel coordinate plot. You can then slide (move) the range # selection and cross two selections to see the intersections. You can undo a # selection by clicking once again on the same axis. # # We also observe that it is not possible to select the highest performing # models by selecting lines of on the `max_bins` axis with tick values between # 1 and 3. # # The other hyper-parameters are not very sensitive. We can check that if we # select the `learning_rate` axis tick values between -1.5 and -0.5 and # `max_bins` tick values between 5 and 8, we always select top performing # models, whatever the values of the other hyper-parameters. # # In this notebook, we have seen how randomized search offer a valuable # alternative to grid-search when the number of hyperparameters to tune is more # than two. It also alleviates the regularity imposed by the grid that might be # problematic sometimes.
notebooks/parameter_tuning_randomized_search.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="E08w9hc0TAbI" # # BioMedBERT: BREATHE -> (BERT+BioBERT) # + # # !sudo pip install tensorflow==1.15 # - import os import sys import json import tensorflow as tf tf.__version__ # + [markdown] colab_type="text" id="0uz-18dZ2tDH" # Save model assets and checkpoints to GCS # + colab={} colab_type="code" id="LMZKDdyL1pb3" BUCKET_NAME = "ekaba-assets" MODEL_DIR = "biomedbert_base" tf.io.gfile.mkdir(MODEL_DIR) # + [markdown] colab_type="text" id="2mVR9qBK3V5j" # Hyparameter configuration for BERT BASE # + # VOC_SIZE = 32000 # VOC_FNAME = "biomedbert-8M.txt" # + colab={} colab_type="code" id="oIibj7MY3TH5" # # use this for BERT-base # bert_base_config = { # "attention_probs_dropout_prob": 0.1, # "directionality": "bidi", # "hidden_act": "gelu", # "hidden_dropout_prob": 0.1, # "hidden_size": 768, # "initializer_range": 0.02, # "intermediate_size": 3072, # "max_position_embeddings": 512, # "num_attention_heads": 12, # "num_hidden_layers": 12, # "pooler_fc_size": 768, # "pooler_num_attention_heads": 12, # "pooler_num_fc_layers": 3, # "pooler_size_per_head": 128, # "pooler_type": "first_token_transform", # "type_vocab_size": 2, # "vocab_size": VOC_SIZE # } # with open("{}/bert_config.json".format(MODEL_DIR), "w") as fo: # json.dump(bert_base_config, fo, indent=2) # + # # update vocab # # !cp ../vocabulary/full_text/biomedbert-8M.txt biomedbert_base/biomedbert-8M.txt # + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="2QHoAJs63PrO" outputId="c98a8bcb-9ba0-43d1-9fe7-4df5f80419df" # # move to GCS # # !gsutil -m cp -r $MODEL_DIR gs://ekaba-assets/ # - #import bert modules sys.path.append("bert") from bert import modeling, optimization, tokenization from bert.run_pretraining import input_fn_builder, model_fn_builder # + colab={} colab_type="code" id="QR5ffWV15OHf" import logging # configure logging log = logging.getLogger('tensorflow') log.setLevel(logging.INFO) # + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" id="u0fRU1XT3vu-" outputId="268c16ab-9274-4d9a-e185-b46b556dfd00" PRETRAINING_DIR = "pre_trained_data_full_biomed" VOC_FNAME = "biomedbert-8M.txt" # Input data pipeline config TRAIN_BATCH_SIZE = 128 # 128 -> 12.8K -> 1.2K MAX_PREDICTIONS = 20 #@param {type:"integer"} MAX_SEQ_LENGTH = 128 #@param {type:"integer"} MASKED_LM_PROB = 0.15 #@param # Training procedure config EVAL_BATCH_SIZE = 128 # 64, 128 - 12.8K -> 1.2K LEARNING_RATE = 1e-5 # 2e-5 TRAIN_STEPS = 100000000 # 1M -> 100M SAVE_CHECKPOINTS_STEPS = 25000 # 2500 -> 25K NUM_TPU_CORES = 128 if BUCKET_NAME: BUCKET_PATH = "gs://{}".format(BUCKET_NAME) else: BUCKET_PATH = "." BERT_GCS_DIR = "{}/{}".format(BUCKET_PATH, MODEL_DIR) DATA_GCS_DIR = "{}/{}".format(BUCKET_PATH, PRETRAINING_DIR) VOCAB_FILE = os.path.join(BERT_GCS_DIR, VOC_FNAME) CONFIG_FILE = os.path.join(BERT_GCS_DIR, "bert_config.json") INIT_CHECKPOINT = tf.train.latest_checkpoint(BERT_GCS_DIR) # 'gs://ekaba-assets/biomedbert_base/model.ckpt-20577500 bert_config = modeling.BertConfig.from_json_file(CONFIG_FILE) input_files = tf.io.gfile.glob(os.path.join(DATA_GCS_DIR,'*tfrecord')) log.info("Using checkpoint: {}".format(INIT_CHECKPOINT)) log.info("Using {} data shards".format(len(input_files))) # + [markdown] colab_type="text" id="jEawhTlo5frp" # **Train on TPUs** # + language="bash" # export TPU_NAME='biomedbert-preempt' # echo $TPU_NAME # + colab={"base_uri": "https://localhost:8080/", "height": 275} colab_type="code" id="aM4Vn5RZ3pqk" outputId="b96576de-fb9c-47a2-b770-572e99dba93b" USE_TPU = True model_fn = model_fn_builder( bert_config=bert_config, init_checkpoint=INIT_CHECKPOINT, learning_rate=LEARNING_RATE, num_train_steps=TRAIN_STEPS, num_warmup_steps=10, #10, use_tpu=USE_TPU, use_one_hot_embeddings=True ) tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver( zone='europe-west4-a', project='ai-vs-covid19', job_name='biomedbert', tpu='biomedbert-preempt') run_config = tf.compat.v1.estimator.tpu.RunConfig( cluster=tpu_cluster_resolver, model_dir=BERT_GCS_DIR, save_checkpoints_steps=SAVE_CHECKPOINTS_STEPS, tpu_config=tf.compat.v1.estimator.tpu.TPUConfig( iterations_per_loop=SAVE_CHECKPOINTS_STEPS, num_shards=NUM_TPU_CORES, per_host_input_for_training=tf.compat.v1.estimator.tpu.InputPipelineConfig.PER_HOST_V2)) estimator = tf.compat.v1.estimator.tpu.TPUEstimator( use_tpu=USE_TPU, model_fn=model_fn, config=run_config, train_batch_size=TRAIN_BATCH_SIZE, eval_batch_size=EVAL_BATCH_SIZE) train_input_fn = input_fn_builder( input_files=input_files, max_seq_length=MAX_SEQ_LENGTH, max_predictions_per_seq=MAX_PREDICTIONS, is_training=True, num_cpu_threads=64 ) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="-3KYolcy5kvn" outputId="03a57f14-eba6-4e78-9325-63677b8cb253" estimator.train(input_fn=train_input_fn, max_steps=TRAIN_STEPS) # -
notebooks/ekaba_full_biomedbert.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: gen2 # language: python # name: gen2 # --- # + import os # %matplotlib inline import matplotlib.pyplot as plt from models.cycleGAN import CycleGAN from utils.loaders import DataLoader # %matplotlib inline import matplotlib.pyplot as plt import matplotlib.image as mpimg import numpy as np # + # run params SECTION = 'mlz' RUN_ID = '0002' DATA_NAME = 'diff_specular' RUN_FOLDER = 'run/{}/'.format(SECTION) RUN_FOLDER += '_'.join([RUN_ID, DATA_NAME]) if not os.path.exists(RUN_FOLDER): os.mkdir(RUN_FOLDER) os.mkdir(os.path.join(RUN_FOLDER, 'viz')) os.mkdir(os.path.join(RUN_FOLDER, 'images')) os.mkdir(os.path.join(RUN_FOLDER, 'weights')) #mode = 'build' # 'build' # mode = '' # - IMAGE_SIZE = 128 data_loader = DataLoader(dataset_name=DATA_NAME, img_res=(IMAGE_SIZE, IMAGE_SIZE)) # + gan = CycleGAN( input_dim = (IMAGE_SIZE,IMAGE_SIZE,3) ,learning_rate = 0.0002 , buffer_max_length = 50 , lambda_validation = 1 , lambda_reconstr = 10 , lambda_id = 2 , generator_type = 'unet' , gen_n_filters = 32 , disc_n_filters = 32 ) if mode == 'build': gan.save(RUN_FOLDER) else: #gan.load_weights(os.path.join(RUN_FOLDER, 'weights/weights-90.h5')) gan.load_weights(os.path.join(RUN_FOLDER, 'weights/weights-66.h5')) imgs_B = data_loader.load_img('data/diff_specular/trainB/33.jpg') #imgs_B = data_loader.load_img('data/jpeg/testB/10.jpg') fake_A = gan.g_BA.predict(imgs_B) gen_imgs = np.concatenate([imgs_B, fake_A]) r, c = 1, 2 # Rescale images 0 - 1 gen_imgs = 0.5 * gen_imgs + 0.5 gen_imgs = np.clip(gen_imgs, 0, 1) titles = ['Original', 'Translated'] fig, axs = plt.subplots(r, c, figsize=(25,12.5)) cnt = 0 for j in range(c): axs[j].imshow(gen_imgs[cnt]) axs[j].set_title(titles[j]) axs[j].axis('off') cnt += 1 fig.savefig(os.path.join(RUN_FOLDER ,"temps/test.png")) plt.close() # - # + import time imgs_B = data_loader.load_img('data/pbr_data/trainB/11.png') start_time = time.time() t_B = gan.g_BA.predict(imgs_B) out_img = np.squeeze(t_B) print(out_img.shape) imgplot = plt.imshow(out_img) #t_B_A = gan.g_AB.predict(t_B) #out_B = gan.g_BA.predict(t_B_A) elapsed_time = time.time() - start_time print(elapsed_time) print(t_B.shape) # + import time imgs_A = data_loader.load_img('data/pbr_data/testA/64.png') start_time = time.time() t_B = gan.g_AB.predict(imgs_A) t_A = gan.g_BA.predict(t_B) out_img = np.squeeze(t_A) print(out_img.shape) imgplot = plt.imshow(out_img) #t_B_A = gan.g_AB.predict(t_B) #out_B = gan.g_BA.predict(t_B_A) elapsed_time = time.time() - start_time print(elapsed_time) print(t_B.shape) # - x = np.arange(10) outfile = os.path.join(RUN_FOLDER, 'loss.npy') np.save(outfile, x) np.load(outfile)
gan/mlz.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: PyCharm (claraTrain2.0) # language: python # name: pycharm-e5c8d846 # --- # + [markdown] pycharm={"name": "#%% md\n"} # # AI Assisted Annotation with OHIF # # Zero foot print Dicom viewers are ideal for radiology workflow. # [Open Health Imaging Foundation (OHIF)](http://ohif.org/) Viewer is an open source, web-based, medical imaging viewer. # It can be configured to connect to Image Archives that support DicomWeb, # and offers support for mapping to proprietary API formats. # OHIF maintained extensions add support for viewing, annotating, # and reporting on DICOM images in 2D (slices) and 3D (volumes). # OHIF by it self is a web viewer that requires a PACS on the back end to hold the dicom images. # You can use either [Dcm4chee](https://github.com/dcm4che/dcm4chee-arc-light/wiki) or [Orthanc](https://www.orthanc-server.com/). # # For this we expand on how to use OHIF with AIAA. # This note book will guide your through how to set up # - OHIF web viewer with Nvidia AIAA integration # - Orthanc # - This current notebooks # # + [markdown] pycharm={"metadata": false} # ## Prerequisites # - Nvidia GPU with 8Gb of memory # - Going through [AIAA Notebook](AIAA.ipynb) to get basic understanding of AIAA. # - For trouble shot, basic understanding of docker and docker compose is recommended # # + [markdown] pycharm={"metadata": false} # # 0. Licensing # # ### 0.1 ORTHANC License # This note book will help you set up ORTHANC as a pacs back end to OHIF. # Please read through the license of ORTHANC and its plugins # https://book.orthanc-server.com/faq/licensing.html # # Please note: ORTHANC has plugins which in turn has more restrictive licenses as # - Orthanc Core: GPL v3 https://hg.orthanc-server.com/orthanc/file/tip/COPYING # - Web viewer: AGPL v3 https://hg.orthanc-server.com/orthanc-webviewer/file/tip/COPYING # - PostgreSQL Support: AGPL v3 https://hg.orthanc-server.com/orthanc-postgresql/file/tip/COPYING # - DICOMweb Support: AGPL v3 https://hg.orthanc-server.com/orthanc-dicomweb/file/tip/COPYING # - Whole slide imaging: AGPL v3 https://hg.orthanc-server.com/orthanc-wsi/file/tip/COPYING # # ORTHANC will be pulled as a docker image as it is specified in the docker compose file # ``` # orthanc: # image: jodogne/orthanc-plugins:1.9.1 # Orthanc docker image with plugins # ``` # ### 0.2 Nginx License # Docker compose will set up OHIF which in turn uses [Nginx](https://nginx.org/en/) # from their website it states # "The sources and documentation are distributed under the [2-clause BSD-like license](https://nginx.org/LICENSE)" # # + [markdown] pycharm={"metadata": false} # ## 1. Setup OHIF # Nvidia has integrated AIAA into OHIF as a plugin. # We have packaged all under OHIF-Orthanc sub-folder. # This will launch OHIF with Orthanc and this current notebooks.<br> # **Note: You need to close this docker container before doing steps below. # You can do this by running `scripts/stopClaraTrainNoteBooks.sh`** <br> # These steps will launch multiple dockers as shown below # <br><img src="screenShots/ThreeDockers.png" alt="Drawing" style="height: 300px;"/><br> # # 1. Open terminal on your bare bone host # 2. cd into `AIAA/OHIF-Orthanc` sub-folders # 3. (optional) edit docker-compose.yml to change/ map the data dir. # By default it will be inside the sub-folder. # You can simply change the `ConfigLocalPath=./data` in the `.env` file. # 4. Run `./restart.sh`. This will launch this notebooks, OHIF, and orthanc # 5. Basic authentication is provided. # # + [markdown] pycharm={"metadata": false} # ### 1.2. Check that everything is running: # Your main landing page is [yourip:3030/start/](http://localhost:3030/start/). # User/password is `<PASSWORD>`. # To change this user/password please refer to exercise section below. # You should see a page as below with links as listed below # <br><img src="screenShots/OHIFmainPage.png" alt="Drawing" style="height: 300px;"/><br> # # # 1. This Notebooks: </br> # Go to your browser at [yourip:3030/notebooks/](http://localhost:3030/notebooks/) # and use the token printed out after running the `restart.sh` script # 2. OHIF: </br> # Go to your browser [yourip:3030](http://localhost:3030/) to see OHIF web page as below # <br><img src="screenShots/OHIFList.png" alt="Drawing" style="height: 300px;"/><br> # 3. Orthanc: </br> # Go to your browser at [yourip:3030/pacs-admin/](http://localhost:3030/pacs-admin) to see Orthanc web UI (username/password is <PASSWORD>/<PASSWORD>) # <br><img src="screenShots/Orthanc.png" alt="Drawing" style="height: 300px;"/><br> # + [markdown] pycharm={"metadata": false} # ## 2. Download Dicom Studies # # OHIF and the back end PACS uses dicom format. # For this you need to have a some Dicom studies locally. # You can run [TCIA Notebook](../Data/TCIA/TCIADownloader.ipynb) # with any of the sample data links. # For basic workflow you can stop right after you download the dicom data and unzipping it. # # You should find the downloaded data under `/claraDevDay/Data/<Dataname>/DCM/` # # + [markdown] pycharm={"metadata": false} # ## 3. Upload a dicom study into the PACS (orthanc) # Now that you have dicom images, # we would want to upload it to our PACS. # For this we can use dcmtk library # + pycharm={"metadata": false, "name": "#%%\n"} # install apt-get install dcmtk # !apt-get -y install dcmtk # + [markdown] pycharm={"metadata": false} # As we are using docker compose internal network we can directly upload to orthanc using its intenal name # `orthanc` and its internal port 4242 # + pycharm={"metadata": false, "name": "#%%\n"} # !storescu -v +sd +r -xb -v -aet "fromtest" -aec "ORTHANC" orthanc 4242 /claraDevDay/Data/NSCLC_5/DCM/1.3* # + [markdown] pycharm={"metadata": false} # ## 4. Start AIAA server # You should run the [AIAA Notebook](AIAA.ipynb) and start the AIAA server, then load at least one model. # We recommend the 2D deepgrow model as a start. # + [markdown] pycharm={"metadata": false} # ## 5. Open OHIF and use AIAA # Now that we have dicom data in our PACS, you can open OHIF in your browser and click on a patient to view it as below. # <br><img src="screenShots/OHIF.png" alt="Drawing" style="height: 300px;"/><br> # # # Next you need to setup the AIAA plugin (see image below) : # 1. AIAA server url: # 1. Set to `http://<yourmachineIP>:3030/aiaa/` # 2. Click refresh icon next to it. # 3. You should see the models you have loaded in AIAA server. # 2. Setup fetch from pacs: # 1. Under more AIAA setup # 2. Check Fetch Images From Dicom Server # 3. Dicom server = `orthanc:4242`. This is the internal port exposed through docker network # 4. Set AETitle = `ORTHANC` # <br><img src="screenShots/OHIF-AIAAwRevProxy.png" alt="Drawing" style="height: 300px;"/><br> # # + [markdown] pycharm={"metadata": false} # ## 6. Stop All # To stop all dockers (Notebooks, OHIF and PACS) you should run `./stop.sh`. # ***Note: Running cell below would terminate this notebook*** # # + pycharm={"metadata": false, "name": "#%%\n"} #Note: Running cell below would terminate this notebook # #!./stop.sh` # + [markdown] pycharm={"metadata": false} # # Exercise # + [markdown] pycharm={"metadata": false} # ### 1. Remove user/password # In order to have no security at all, you can change the [docker-compose](OHIF-Orthanc/docker-compose.yml) # to use [nginxNoPassword](OHIF-Orthanc/config/nginxNoPassword.conf) instead of [nginxWPassword](OHIF-Orthanc/config/nginxWPassword.conf) # + [markdown] pycharm={"metadata": false} # ### 2. Change user/password file # We provide basic authentication file [`.htpasswd`](OHIF-Orthanc/config/.htpasswd) # with user/password = <PASSWORD>. # In order to change this or create a new file altogether, you can run # + pycharm={"metadata": false, "name": "#%%\n"} htpasswd -c /claraDevDay/AIAA/OHIF-Orthanc/config/.MyNewhtpasswd username1 # you would be prompt for password then again to confirm it # + [markdown] pycharm={"metadata": false} # To a add another user we can use the same command without `-c` flag as # + pycharm={"metadata": false, "name": "#%%\n"} htpasswd /claraDevDay/AIAA/OHIF-Orthanc/config/.MyNewhtpasswd anotherusername2 # + [markdown] pycharm={"metadata": false} # # ### 3. Use https instead of http # Setup above is using http for simplicity that would work on internal secure network. # In order to use this setup on public you should use https to secure communication between client and server. # For this we would need to: # 1. Have a ssl certification and key. For this you can: # 1. Use the sample self certified crt and key files provided # 2. Use your own ssl files # 3. Generate your own self signed by running # ``` # openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /claraDevDay/AIAA/OHIF-Orthanc/config/nginx-selfsigned.key -out /claraDevDay/AIAA/OHIF-Orthanc/config/nginx-selfsigned.crt # ``` # For more options you can follow instructions [here](https://www.digitalocean.com/community/tutorials/how-to-create-a-self-signed-ssl-certificate-for-nginx-in-ubuntu-16-04) # 2. Change the [docker-compose.yml](OHIF-Orthanc/docker-compose.yml) to: # 1. Map port 443 instead of port 80 # 2. Map the crt and key files. # 3. Use [`nginxHttpsWPassword.conf`](OHIF-Orthanc/config/nginxHttpsWPassword.conf) # + [markdown] pycharm={"metadata": false} # If you use self signed certification would would get warnings on your first access as shown below. # You would need to click `Advanced` then click `Proceed to _____ (unsafe)` link as shown below # # <br><img src="screenShots/httpsWarning.png" alt="Drawing" style="height: 300px;"/><br> #
PyTorch/NoteBooks/AIAA/AIAAwOHIF.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.1.0 # language: julia # name: julia-1.1 # --- # ## Julia GASS # # Testing the GASS with Julia # + using PyCall using Distributions using Statistics using Distances , Random using DataFrames using Printf import CSV rootdir = "/home/stephane/Science/ALMA/ArrayConfig/GASS" push!(LOAD_PATH,"$rootdir/master/src") using GASS import PyPlot @pyimport astroquery.vizier as vizier @pyimport astropy.units as u @pyimport astropy.coordinates as coord ## directory datadir = "$rootdir/master/data" wdir = "$rootdir/products" plotdir = "$rootdir/products/test" cd(wdir) # + ## Types ## struct _observation Array_Configuration_File::String Result_Folder::String Display_Verbose::Bool Observatory_Latitude::Float64 Source_Declination::Float64 Source_Hour_Angle::Float64 Subarray_Number::Int end struct _subarrayParameters Pads_Per_Subarray::Vector{Int} Subarray_Name::Vector{String} Subrange::Array{UnitRange{Int64},1} Spatial_Resolution::Vector{Float64} Maximum_Recoverable_Scale::Vector{Float64} Elongation::Vector{Float64} Sidelobe_Level::Vector{Float64} end struct _weight Weight_Subarray::Vector{Float64} Weight_Spatial_Resolution::Vector{Float64} Weight_Maximum_Recoverable_Scale::Vector{Float64} Weight_Elongation::Vector{Float64} Weight_Sidelobe_Levels::Vector{Float64} end struct _GA Number_Iterations::Int Population_Size::Int Termination_Condition::Bool Threshold::Float64 Mutation_Rate::Float64 Tournament_Size::Int Number_Elitism::Int end struct _cfg arr::AbstractDataFrame obs::_observation sub::_subarrayParameters wei::_weight ga::_GA end # - function _input_parameters(file) let param = [] open(file,"r") do f for line in eachline(f) l = lstrip(line) if length(l) > 0 && l[1] != '#' res = strip.(split(l,":")) push!(param,res) end end end return(param) end end # + ## parse input file function _parse_input(input_param) let fileParam= "default.cfg" saveFold= "Results/" verbose= true for pair in input_param if pair[1] == "File_Parameters" fileParam = pair[2] elseif pair[1] == "Folder_Results" saveFold= pair[2] elseif pair[1] == "Display_Screen_Results" && pair[2] == "false" verbose= false end end res= _inputCfg(fileParam , saveFold , verbose) return(res) end end # + ## parse parameter file for the subarray constraints function _parse_parameters(input_param) let arraycfg= 0 obs= 0 sub= 0 wei= 0 ga= 0 Array_Configuration_File= "alma.cfg" Result_Folder= "." Display_Verbose= true Observatory_Latitude= -23.0262015 Source_Declination= -30 Source_Hour_Angle = -1 Subarray_Number= 4 ## array of nsubarray elements Pads_Per_Subarray= [] Weight_Subarray= [] Subarray_Name= [] Spatial_Resolution= [] Maximum_Recoverable_Scale= Elongation= [] Sidelobe_Level= [] Weight_Spatial_Resolution= [] Weight_Maximum_Recoverable_Scale= [] Weight_Elongation= [] Weight_Sidelobe_Levels= [] ## GA parameters Number_Iterations = 100 Population_Size= 100 Termination_Condition= false Threshold= -0.05 Mutation_Rate= 0.05 Tournament_Size= 5 Number_Elitism= 5 for pair in input_param println(pair) if pair[1] == "Array_Configuration_File" Array_Configuration_File= pair[2] elseif pair[1] == "Result_Folder" Folder_Results= pair[2] elseif pair[1] == "Display_Verbose" Display_Verbose= pair[2]=="true" ? true : false elseif pair[1] == "Observatory_Latitude" Observatory_Latitude= parse(Float64, pair[2]) elseif pair[1] == "Source_Declination" Source_Declination= parse(Float64, pair[2]) elseif pair[1] == "Source_Hour_Angle" Source_Hour_Angle= parse(Float64, pair[2]) elseif pair[1] == Subarray_Number Subarray_Number= parse(Int, pair[2]) elseif pair[1] == "Number_Iterations" Number_Iterations= parse(Int, pair[2]) elseif pair[1] == "Population_Size" Population_Size= parse(Int, pair[2]) elseif pair[1] == "Termination_Condition" Termination_Condition= pair[2]=="true" ? true : false elseif pair[1] == "Threshold" Threshold=parse(Float64, pair[2]) elseif pair[1] == "Mutation_Rate" Mutation_Rate=parse(Float64, pair[2]) elseif pair[1] == "Tournament_Size" Tournament_Size= parse(Int, pair[2]) elseif pair[1] == "Number_Elitism" Number_Elitism= parse(Int, pair[2]) elseif pair[1] == "Pads_Per_Subarray" Pads_Per_Subarray= map(x->(v = tryparse(Int,x); v==nothing ? 0.0 : v),split(pair[2],",")) elseif pair[1] == "Weights_Subarray" Weight_Subarray= map(x->(v = tryparse(Float64,x); v==nothing ? 0.0 : v),split(pair[2],",")) elseif pair[1] == "Subarray_Name" Subarray_Name= split(pair[2],",") elseif pair[1] == "Spatial_Resolution" Spatial_Resolution= map(x->(v = tryparse(Float64,x); v==nothing ? 0.0 : v),split(pair[2],",")) elseif pair[1] == "Maximum_Recoverable_Scale" Maximum_Recoverable_Scale= map(x->(v = tryparse(Float64,x); v==nothing ? 0.0 : v),split(pair[2],",")) elseif pair[1] == "Elongation" Elongation= map(x->(v = tryparse(Float64,x); v==nothing ? 0.0 : v),split(pair[2],",")) elseif pair[1] == "Sidelobe_Level" Sidelobe_Level= map(x->(v = tryparse(Float64,x); v==nothing ? 0.0 : v),split(pair[2],",")) elseif pair[1] == "Weight_Spatial_Resolution" Weight_Spatial_Resolution= map(x->(v = tryparse(Float64,x); v==nothing ? 0.0 : v),split(pair[2],",")) elseif pair[1] == "Weight_Maximum_Recoverable_Scale" Weight_Maximum_Recoverable_Scale= map(x->(v = tryparse(Float64,x); v==nothing ? 0.0 : v),split(pair[2],",")) elseif pair[1] == "Weight_Elongation" Weight_Elongation= map(x->(v = tryparse(Float64,x); v==nothing ? 0.0 : v),split(pair[2],",")) elseif pair[1] == "Weight_Sidelobe_Levels" Weight_Sidelobe_Levels=map(x->(v = tryparse(Float64,x); v==nothing ? 0.0 : v),split(pair[2],",")) end end ### println(typeof(Array_Configuration_File)) arrcfg = CSV.read(convert(String,Array_Configuration_File), datarow=4 , header=["X" , "Y", "Z" , "diam" , "name"] , delim= " ") subrange = [] start= 1 for npad in Pads_Per_Subarray push!(subrange,start:start+npad-1) start= start+npad end ### setting the struct. obs= _observation(Array_Configuration_File, Result_Folder, Display_Verbose, Observatory_Latitude , Source_Declination,Source_Hour_Angle, Subarray_Number) sub= _subarrayParameters(Pads_Per_Subarray, Subarray_Name , subrange, Spatial_Resolution, Maximum_Recoverable_Scale , Elongation, Sidelobe_Level) wei= _weight(Weight_Subarray,Weight_Spatial_Resolution,Weight_Maximum_Recoverable_Scale, Weight_Elongation,Weight_Sidelobe_Levels) ga= _GA(Number_Iterations , Population_Size,Termination_Condition ,Threshold , Mutation_Rate , Tournament_Size ,Number_Elitism) ### population setting pop =_cfg(arrcfg , obs , sub , wei , ga) return(pop) end end # + function _check_consistency(pop::_cfg) nsubpads= sum(pop.sub.Pads_Per_Subarray) npads= nrow(pop.arr) if nsubpads != npads error("##Error: Number of pads per subarray is not equal to the total of pads.") end return(true) end ## main function to read the cfg and check it. function _read_cfg(inpfile, verbose=true) # res= _input_parameters(inpfile) # inpcfg= _parse_input(res) ## parameters inputs res= _input_parameters(inpfile) cfg= _parse_parameters(res) if verbose @printf("%3.3f", cfg.obs.Source_Declination) end _check_consistency(cfg) return(cfg) end # + macro main(inpfile) cfg= _read_cfg(inpfile) end @main("../master/data/GA_Parameters_O-1.txt.julia") # @main("../master/data/GA_Parameters_O-10.txt.julia")
notebooks/.ipynb_checkpoints/GASS-inputs-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Limpapat/Python101/blob/main/workshop02.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="giPPBVBsIZzY" # # Python 101 : Workshop #02 # by <NAME> # # 14/09/2021 # + [markdown] id="9V8o1VzPlNHa" # **Put your inspiration and idea to present/visualize the following dataset.** # + id="L6cfT5FWKU9r" import numpy as np import pandas as pd import matplotlib.pyplot as plt # + id="QfwBOHmsBexX" # !mkdir mydata # !gdown --id 1_3XIn2QnZ8aBzpMooB4L8Q9HKXKakTDc --output /content/mydata/covid-19.xlsx # + id="FKNSdwCSTR52" DATA_ = pd.read_excel('/content/mydata/covid-19.xlsx') df = DATA_.copy() df.head(10)
source/workshop02.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:conda-env] # language: python # name: conda-env-conda-env-py # --- # + # Copyright (c) 2019 ETH Zurich, <NAME>, <NAME>, <NAME> # + import numpy as np # %matplotlib inline import matplotlib.pyplot as plt plt.rc('axes', axisbelow=True) from dataCollect import getModel, getFMs from analysisTools import Analyzer # + #instantiate dataset loader and pre-trained model training = True epoch = None # modelName = 'resnet34' # modelName = 'squeezenet' modelName = 'alexnet' # modelName = 'vgg16' # modelName = 'mobilenet2' # modelName, epoch = 'alexnet-cust', 54 # modelName, epoch = 'mobilenetV2-cust', 200 model, loss_func = getModel(modelName, epoch=epoch) # - # gather sparsity data model.eval() analyzer = Analyzer(quantMethod='fixed16', compressor=None) outputsReLU, _, _, _ = getFMs(model, loss_func, training=training, numBatches=1, batchSize=10)#250) sparsitiesOutp = [[100*analyzer.getSparsity(batchOutp) for batchOutp in layerOutp] for layerOutp in outputsReLU] np.save('results/sparsityByLayer-boxplot-%s.npy' % modelName, sparsitiesOutp) plt.subplots(figsize=(7,4)) layerNames = [lid for lid in range(len(sparsitiesOutp))] # 'Layer %d' plt.boxplot(sparsitiesOutp, labels=layerNames, whis=[1,99]) plt.grid() plt.xlabel('layer index') plt.ylabel('feature map sparsity [%]') plt.ylim(bottom=0, top=100) plt.xticks(rotation=90) plt.savefig('figs/sparsityByLayer-boxplot-%s.pdf' % modelName, bbox_inches='tight', pad_inches=0.0) # using torchvision models: # make sure you run the above code for both networks first, such that the files are created sparsitiesOutpAlexNet = np.load('results/sparsityByLayer-boxplot-alexnet.npy').tolist() sparsitiesOutpMobileNetV2 = np.load('results/sparsityByLayer-boxplot-mobilenet2.npy').tolist() # in the paper: # sparsitiesOutpAlexNet = np.load('results/sparsityByLayer-boxplot-alexnet-cust.npy').tolist() # sparsitiesOutpMobileNetV2 = np.load('results/sparsityByLayer-boxplot-mobilenetV2-cust.npy').tolist() fig, axarr = plt.subplots(ncols=2, figsize=(10*7/5,3*7/5), #figsize=(10,3), gridspec_kw={'width_ratios': [1, 3], 'wspace': 0.03}, sharey=True, squeeze=True) for axidx, (sparsitiesOutp, netName) in enumerate(zip([sparsitiesOutpAlexNet, sparsitiesOutpMobileNetV2], ['AlexNet', 'MobileNetV2'])): plt.sca(axarr[axidx]) layerNames = ['%3d' % (lid+1) for lid in range(len(sparsitiesOutp))] # 'Layer %d' plt.boxplot(sparsitiesOutp, labels=layerNames, whis=[1,99], showfliers=False) axarr[axidx].label_outer() plt.grid() plt.xlabel('layer index') if axidx == 0: plt.ylabel('feature map sparsity [%]') plt.ylim(bottom=0, top=100) plt.xticks(rotation=90) plt.title(netName) plt.savefig('figs/sparsityByLayer-boxplot-joint.pdf', bbox_inches='tight', pad_inches=0.0)
algoEvals/sparsityBoxplot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # #!pip install requests # #!pip install beautifulsoup4 # #!pip install pandas import requests from bs4 import BeautifulSoup import pandas as pd titles = [] prices = [] ratings = [] url = 'https://webscraper.io/test-sites/e-commerce/allinone/computers/laptops' request = requests.get(url) soup = BeautifulSoup(request.text, "html.parser") for product in soup.find_all('div', {'class': 'col-sm-4 col-lg-4 col-md-4'}): for pr in product.find_all('div', {'class': 'caption'}): for p in pr.find_all('h4', {'class': 'pull-right price'}): prices.append(p.text) for title in pr.find_all('a' , {'title'}): titles.append(title.get('title')) for rt in product.find_all('div', {'class': 'ratings'}): ratings.append(len(rt.find_all('span', {'class': 'glyphicon glyphicon-star'}))) #build dataframe and export to csv product_df = pd.DataFrame(zip(titles,prices,ratings), columns =['Titles', 'Prices', 'Ratings']) product_df.head() product_df.to_csv("ecommerce.csv",index=False)
Chapter02/02_02_web_scraping_intro.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Board creation board = ['0','1','2','3','4','5','6','7','8','9'] def display_board(board): print(' '+board[7]+' | '+board[8]+' | '+board[9]+' ') print('-----------') print(' '+board[4]+' | '+board[5]+' | '+board[6]+' ') print('-----------') print(' '+board[1]+' | '+board[2]+' | '+board[3]+' ') display_board(board) # + from IPython.display import clear_output board = ['#','X','O','X','O','X','O','X','O','X','O'] def display_board(board): clear_output() print(' '+board[7]+' | '+board[8]+' | '+board[9]+' ') print('-----------') print(' '+board[4]+' | '+board[5]+' | '+board[6]+' ') print('-----------') print(' '+board[1]+' | '+board[2]+' | '+board[3]+' ') # - display_board(board) # # Player Input def player_input(): marker = ' ' while not (marker == 'X' or marker == 'O'): marker = input("Enter a marker X/O : ").upper() if marker == 'X': return ('X','O') else: return ('O','X') player_input() # # Place Marker def place_marker(board,marker,position): board[position] = marker place_marker(board,'X',5) display_board(board) # # Win check # + def win_check(board,marker): if ((board[1] == marker and board[2] == marker and board[3] == marker ) or (board[4] == marker and board[5] == marker and board[6] == marker ) or (board[7] == marker and board[8] == marker and board[9] == marker ) or (board[7] == marker and board[4] == marker and board[1] == marker ) or (board[8] == marker and board[5] == marker and board[2] == marker ) or (board[9] == marker and board[6] == marker and board[3] == marker ) or (board[7] == marker and board[5] == marker and board[3] == marker ) or (board[9] == marker and board[5] == marker and board[1] == marker )): return True else: return False # - win_check(board,'X') win_check(board,'Y') # # Select player to start import random player = random.randint(1,2) print ("Player to start:",player) # + import random def choose_first(): num = random.randint(0,1) if num == 1: return "Player1" else : return "Player 2" # - choose_first() board = ['#','X','O',' ','O','X',' ','X','O','X','O'] def display_board(board): clear_output() print(' '+board[7]+' | '+board[8]+' | '+board[9]+' ') print('-----------') print(' '+board[4]+' | '+board[5]+' | '+board[6]+' ') print('-----------') print(' '+board[1]+' | '+board[2]+' | '+board[3]+' ') display_board(board) def empty_check(board,position): if (board[position] == 'X' or board[position] == 'O' ): return "Position is marked" else: return "Position is empty" empty_check(board,6) empty_check(board,1) def empty_check(board,position): return board[position] == ' ' empty_check(board,1) empty_check(board,6) # # Full board check board_new = ['#','X','O','X','O','X','O','X','O','X','O'] def display_board(board_new): clear_output() print(' '+board[7]+' | '+board[8]+' | '+board[9]+' ') print('-----------') print(' '+board[4]+' | '+board[5]+' | '+board[6]+' ') print('-----------') print(' '+board[1]+' | '+board[2]+' | '+board[3]+' ') def board_full(board_new): return (board_new[1] == ' ' or board_new[2] == ' ' or board_new[3] == ' ' or board_new[4] == ' ' or board_new[5] == ' ' or board_new[6] == ' ' or board_new[7] == ' ' or board_new[8] == ' ' or board_new[9] == ' ') board_full(board_new) # # Check if entered space is free to mark board = ['#','X','O',' ','O','X',' ','X','O','X','O'] def display_board(board): clear_output() print(' '+board[7]+' | '+board[8]+' | '+board[9]+' ') print('-----------') print(' '+board[4]+' | '+board[5]+' | '+board[6]+' ') print('-----------') print(' '+board[1]+' | '+board[2]+' | '+board[3]+' ') def nxt_pos(board): position = 0 while not position in range(1,9) or not empty_check(board,position): position = int(input("Enter a position:")) return position nxt_pos(board) # # Check for replay def play_again(): return input ("Enter Y / N : ").lower().startswith('y') play_again() play_again()
Tic-Tac-Toe.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import matplotlib.pyplot as plt import seaborn as sns dataset = pd.read_csv('chipotle.tsv',sep='\t') # chipo.head(10) users = pd.read_csv('https://raw.githubusercontent.com/justmarkham/DAT8/master/data/u.user', sep='|') users # ## Examples of BoxPlots! # # Here we are analyzing the distribution of age per gender users.boxplot(by='gender', column=['age'],figsize=(5,5)) plt.xlabel("Gender") plt.ylabel("Age") plt.title("Distribution of Age by Gender") plt.suptitle("") # #### Only beautifying # + #If you want, you can set some colors. properties = dict(boxes="Gray", whiskers="Red", medians="Blue", caps="Black") medianprops = dict(linestyle='-.', linewidth=1) users.boxplot(by='gender', column=['age'], patch_artist=True, color=properties, medianprops=medianprops, figsize=(5,5)) plt.xlabel("Gender") plt.ylabel("Age") plt.title("Distribution of Age by Gender") plt.suptitle("") # - # #### Same thing using Seaborn library # + plt.figure(figsize=(5,5)) boxplot = sns.boxplot(y="age", x="gender", data=users, palette="colorblind", width=0.3) plt.xlabel("Gender") plt.ylabel("Age") plt.title("Distribution of Age by Gender") plt.suptitle("") # - # ### Now: Boxplot for M x F, only for those people older than 40 # + plt.figure(figsize=(5,5)) boxplot = sns.boxplot(y="age", x="gender", data=users[users["age"]>40], palette="colorblind", width=0.3) plt.xlabel("Gender") plt.ylabel("Age") plt.title("Distribution of Age by Gender") plt.suptitle("") # - # #### Boxplots again # **Here we are analyzing the distribution of age per Occupation** # + users.boxplot(by='occupation', column=['age'],figsize=(15,10)) plt.xlabel("Occupation") plt.xticks(rotation = 90) plt.ylabel("Age") plt.title("Distribution of Age by Occupation") plt.suptitle("") # - plt.figure(figsize=(15,10)) boxplot = sns.boxplot(y="age", x="occupation", data=users, palette="colorblind", width=0.7) plt.xlabel("Occupation") plt.xticks(rotation = 90) plt.ylabel("Age") plt.title("Distribution of Age by Occupation") plt.suptitle("") # ### Scatterplot # Checking the relationship between price and quantity of items ordered # *Not grouped by order* # + chipo.plot.scatter(x="item_price",y="quantity", figsize=(5,5)) plt.xlabel("Item Price") plt.ylabel("Items ordered") plt.title("Number of items ordered per item price") # - # **Once again, same thing using Seaborn** plt.figure(figsize=(5,5)) sns.relplot(x="quantity", y="item_price", sizes=(40, 400), alpha=.5, palette="muted", height=6, data=chipo) # ### Lineplots # First, just creating something to mimic a timeline. Instead, we are using the order id to show the temporal behavior of items ordered. # We group by order, and, then, we sum the quantity. #just for sake of readability I am getting the last 40 orders #grouping by order id, leaving order_id as a regular column (not an index) #and aggregating the quantity of items, summing them. Lastly, sorting by order_id chipo_subset = chipo.tail(40)[:].groupby("order_id", as_index=False).agg({"quantity":"sum"}).sort_values("order_id") # + #Creating the plot, adjusting the sizing chipo_subset.plot.line(x="order_id", y="quantity", figsize=(15,10)) #beautification plt.xlabel("Order number") plt.ylabel("Number of items ordered") #Working with the ticks. I want the ticks' steps to be "1", all the items appearing; and set the minimum and maximum #i do this by settin the range of xticks and yticks: (range(start value, end value, step)) plt.xticks(range(chipo_subset['order_id'].min(), chipo_subset['order_id'].max()+1, 1)) plt.yticks(range(0, chipo_subset['quantity'].max()+1, 1)) plt.title("Number of items ordered per order") # - # **Seaborn gives us the same graphic** # + plt.figure(figsize=(15,10)) sns.lineplot(data=chipo_subset, x="order_id", y="quantity") plt.xlabel("Order number") plt.ylabel("Number of items ordered") plt.xticks(range(chipo_subset['order_id'].min(), chipo_subset['order_id'].max()+1, 1)) plt.yticks(range(0, chipo_subset['quantity'].max()+1, 1)) plt.title("Number of items ordered per order") # - # Working with another dataset, showing how to create basic line plots users_subset = users.groupby("occupation", as_index=False) users_subset.size().plot.line(figsize=(15,5)) plt.xlabel("Occupation") plt.xticks(rotation = 90) plt.ylabel("#") # #### Comparing occupation per gender users_subset_men = users[users["gender"]=="M"].groupby("occupation", as_index=False) users_subset_women = users[users["gender"]=="F"].groupby("occupation", as_index=False) users_subset_women.size().plot.line(figsize=(15,5)) users_subset_men.size().plot.line(figsize=(15,5)).legend(["Men", "Women"]) # ### Bar plot # Finally, creating a bar plot show the number of rows that each item appears. chipo.head(100)['item_name'].value_counts().plot.bar() # And playing with a different dataset as well... users['occupation'].value_counts().plot.bar()
notebooks/Graphic_examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # # Crop ROI Labels # # Use the labeling information to split into train/val/test and then assign the relevant label # # Assume that a label exists in an image if it's at least 10 pixels away from the horizontal edges # # + import pandas as pd import numpy as np import os import s3fs # for reading from S3FileSystem import json # for working with JSON files import matplotlib.pyplot as plt pd.set_option('max_colwidth', -1) # - # # Load Reference CSVs # ## Labels and Crops Info SAGEMAKER_PATH = r'/home/ec2-user/SageMaker' SPLIT_PATH = os.path.join(SAGEMAKER_PATH, 'classify-streetview', 'split-train-test') MINI_PATH = os.path.join(SAGEMAKER_PATH, 'classify-streetview', 'mini-crops') df_crops = pd.read_csv(os.path.join(MINI_PATH, 'Crops_with_Labels.csv')) print(df_crops.shape) print(df_crops.columns) df_crops.head() df_crops['label_in_crop'].value_counts() df_crops['heading'] = df_crops['filename'].str.extract('(.*)_(.*).jpg', expand = True)[1] crops_cols = ['img_id', 'heading', 'crop_num', 'present_ramp', 'missing_ramp', 'label_name', 'sv_image_x', 'sv_image_y', 'xpt_minus_xleft', 'xright_minus_xpt', 'ypt_minus_ytop', 'ybottom_minus_ypt'] df_crops = df_crops[crops_cols] df_crops.head() # Add a column for jpg_name df_crops['jpg_name'] = df_crops['img_id'].astype(str) + '_' + df_crops['heading'].astype(str) + '_' + df_crops['crop_num'].astype(str) + '.jpg' df_crops['label_name'].value_counts() # ### Determine if within 10px ROI df_margin = pd.DataFrame({'crop_num': df_crops['crop_num'], 'left_margin': np.where(df_crops['crop_num'] == 'A', 0, 10), 'right_margin' : np.where(df_crops['crop_num'] == 'F', 0, 10)}) df_margin.head() df_crops['in_10px_roi'] = (df_margin['left_margin'] <= df_crops['xpt_minus_xleft']) & (df_margin['right_margin'] <= df_crops['xright_minus_xpt']) df_crops['in_10px_roi'].value_counts() df_crops.head() df_crops.to_csv('labels_in_10px_roi.csv', index = False) # ## Flatten to 1 row per jpg_name df_crops_roi = df_crops.loc[df_crops['in_10px_roi']] feature_cols = ['present_ramp', 'missing_ramp'] df_crops_roi_group = df_crops_roi.groupby(['jpg_name', 'img_id', 'heading', 'crop_num'])[feature_cols].sum() df_crops_roi_group['total_count'] = df_crops_roi_group[feature_cols].sum(axis = 1) df_crops_roi_group = df_crops_roi_group.reset_index() df_crops_roi_group.head() df_crops_roi_group.shape df_crops_roi_group['total_count'].value_counts() df_crops_roi_group['includes_both'] = (df_crops_roi_group['present_ramp'] > 0) & (df_crops_roi_group['missing_ramp'] > 0) df_crops_roi_group['includes_both'].value_counts() df_crops_roi_group['present_ramp'].value_counts() df_crops_roi_group['missing_ramp'].value_counts() # ## Apply Labels Logic true_missing_mask = (df_crops_roi_group['includes_both'] == False) & (df_crops_roi_group['missing_ramp'] > 0) df_crops_roi_group['ground_truth'] = np.where(df_crops_roi_group['present_ramp'] > 0, 'present', '1_null') df_crops_roi_group['ground_truth'] = np.where(df_crops_roi_group['present_ramp'] > 1, 'multiple_present', df_crops_roi_group['ground_truth']) df_crops_roi_group['ground_truth'] = np.where(true_missing_mask, 'only_missing', df_crops_roi_group['ground_truth']) df_crops_roi_group['ground_truth'].value_counts() df_crops_roi_group['img_id'].unique().shape df_crops_roi_group.dtypes df_crops_roi_group.columns # ### Create Null Columns img_id_list = list(df_crops_roi_group['img_id'].unique()) heading_list = list(df_crops_roi_group['heading'].unique()) crop_num_list = list(df_crops_roi_group['crop_num'].unique()) df_mesh = pd.DataFrame(np.array(np.meshgrid(img_id_list, heading_list, crop_num_list, )).T.reshape(-1,3)) df_mesh.columns = ['img_id', 'heading', 'crop_num'] df_mesh['jpg_name'] = df_mesh['img_id'].astype(str) + '_' + df_mesh['heading'].astype(str) + '_' + df_mesh['crop_num'].astype(str) + '.jpg' df_mesh['img_id'] = pd.to_numeric(df_mesh['img_id'], downcast = 'integer') print(len(img_id_list) * len(heading_list) * len(crop_num_list)) print(df_mesh.shape) df_mesh.head() df_mesh.dtypes on_cols = list(df_mesh.columns) df_all_crops = df_mesh.merge(df_crops_roi_group, how = 'left', left_on = on_cols, right_on = on_cols) # Fill NAs counts_cols = ['present_ramp', 'missing_ramp', 'total_count'] df_all_crops[counts_cols] = df_all_crops[counts_cols].fillna(0) df_all_crops['includes_both'] = df_all_crops['includes_both'].fillna(False) df_all_crops['ground_truth'] = df_all_crops['ground_truth'].fillna('1_null') df_all_crops.head() df_all_crops['ground_truth'].value_counts() # ## Img_id class split # + # Get class list df_split = pd.read_csv(os.path.join(SPLIT_PATH, 'train-validation-test-imgid-list.csv')) df_split = df_split[['img_id', 'train/val/test']] print(df_split.shape) df_split.head() # - df_split['train/val/test'].value_counts() # # Merge Ground Truth with train/val/test df_merge = df_all_crops.merge(df_split, how = 'left', left_on = 'img_id', right_on = 'img_id') df_merge.head() df_merge['ground_truth'].value_counts() df_merge.groupby(['train/val/test', 'ground_truth'])['jpg_name'].count() os.getcwd() df_merge.to_csv('imgid_groundtruth_trainvaltest.csv', index = False)
mini-crops/2020-04-05-CropROILabels.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # This cell is added by sphinx-gallery # !pip install mrsimulator --quiet # %matplotlib inline import mrsimulator print(f'You are using mrsimulator v{mrsimulator.__version__}') # - # # # NiCl₂.2D₂O, ²H (I=1) Shifting-d echo # # ²H (I=1) 2D NMR CSA-Quad 1st order correlation spectrum. # # The following is an example of fitting static shifting-*d* echo NMR correlation # spectrum of $\text{NiCl}_2\cdot 2\text{D}_2\text{O}$ crystalline solid. The # spectrum used here is from Walder `et al.` [#f1]_. # # # + import numpy as np import csdmpy as cp import matplotlib.pyplot as plt from lmfit import Minimizer from mrsimulator import Simulator, Site, SpinSystem from mrsimulator.methods import Method2D from mrsimulator import signal_processing as sp from mrsimulator.utils import spectral_fitting as sf from mrsimulator.utils import get_spectral_dimensions # - # ## Import the dataset # # # + filename = "https://sandbox.zenodo.org/record/830903/files/NiCl2.2D2O.csdf" experiment = cp.load(filename) # standard deviation of noise from the dataset sigma = 7.500 # For spectral fitting, we only focus on the real part of the complex dataset experiment = experiment.real # Convert the coordinates along each dimension from Hz to ppm. _ = [item.to("ppm", "nmr_frequency_ratio") for item in experiment.dimensions] # plot of the dataset. max_amp = experiment.max() levels = (np.arange(29) + 1) * max_amp / 30 # contours are drawn at these levels. options = dict(levels=levels, linewidths=0.5) # plot options plt.figure(figsize=(4.25, 3.0)) ax = plt.subplot(projection="csdm") ax.contour(experiment, colors="k", **options) ax.set_xlim(1000, -1000) ax.set_ylim(1500, -1500) plt.grid() plt.tight_layout() plt.show() # - # ## Create a fitting model # **Guess model** # # Create a guess list of spin systems. # # # + site = Site( isotope="2H", isotropic_chemical_shift=-90, # in ppm shielding_symmetric={ "zeta": -610, # in ppm "eta": 0.15, "alpha": 0.7, # in rads "beta": 2.0, # in rads "gamma": 3.0, # in rads }, quadrupolar={"Cq": 75.2e3, "eta": 0.9}, # Cq in Hz ) spin_systems = [SpinSystem(sites=[site])] # - # **Method** # # Use the generic 2D method, `Method2D`, to generate a shifting-d echo method. The # reported shifting-d 2D sequence is a correlation of the shielding frequencies to the # first-order quadrupolar frequencies. Here, we create a correlation method using the # :attr:`~mrsimulator.method.event.freq_contrib` attribute, which acts as a switch # for including the frequency contributions from interaction during the event. # # In the following method, we assign the ``["Quad1_2"]`` and # ``["Shielding1_0", "Shielding1_2"]`` as the value to the ``freq_contrib`` key. The # *Quad1_2* is an enumeration for selecting the first-order second-rank quadrupolar # frequency contributions. *Shielding1_0* and *Shielding1_2* are enumerations for # the first-order shielding with zeroth and second-rank tensor contributions, # respectively. See `freq_contrib_api` for details. # # # + # Get the spectral dimension parameters from the experiment. spectral_dims = get_spectral_dimensions(experiment) shifting_d = Method2D( channels=["2H"], magnetic_flux_density=9.395, # in T spectral_dimensions=[ { **spectral_dims[0], "label": "Quadrupolar frequency", "events": [ { "rotor_frequency": 0, "transition_query": {"P": [-1]}, "freq_contrib": ["Quad1_2"], } ], }, { **spectral_dims[1], "label": "Paramagnetic shift", "events": [ { "rotor_frequency": 0, "transition_query": {"P": [-1]}, "freq_contrib": ["Shielding1_0", "Shielding1_2"], } ], }, ], experiment=experiment, # also add the measurement to the method. ) # Optimize the script by pre-setting the transition pathways for each spin system from # the method. for sys in spin_systems: sys.transition_pathways = shifting_d.get_transition_pathways(sys) # - # **Guess Spectrum** # # # + # Simulation # ---------- sim = Simulator(spin_systems=spin_systems, methods=[shifting_d]) sim.config.integration_volume = "hemisphere" sim.run() # Post Simulation Processing # -------------------------- processor = sp.SignalProcessor( operations=[ # Gaussian convolution along both dimensions. sp.IFFT(dim_index=(0, 1)), sp.apodization.Gaussian(FWHM="5 kHz", dim_index=0), # along dimension 0 sp.apodization.Gaussian(FWHM="5 kHz", dim_index=1), # along dimension 1 sp.FFT(dim_index=(0, 1)), sp.Scale(factor=5e8), ] ) processed_data = processor.apply_operations(data=sim.methods[0].simulation).real # Plot of the guess Spectrum # -------------------------- plt.figure(figsize=(4.25, 3.0)) ax = plt.subplot(projection="csdm") ax.contour(experiment, colors="k", **options) ax.contour(processed_data, colors="r", linestyles="--", **options) ax.set_xlim(1000, -1000) ax.set_ylim(1500, -1500) plt.grid() plt.tight_layout() plt.show() # - # ## Least-squares minimization with LMFIT # Use the :func:`~mrsimulator.utils.spectral_fitting.make_LMFIT_params` for a quick # setup of the fitting parameters. # # params = sf.make_LMFIT_params(sim, processor) print(params.pretty_print(columns=["value", "min", "max", "vary", "expr"])) # **Solve the minimizer using LMFIT** # # minner = Minimizer(sf.LMFIT_min_function, params, fcn_args=(sim, processor, sigma)) result = minner.minimize() result # ## The best fit solution # # # + best_fit = sf.bestfit(sim, processor)[0] # Plot the spectrum plt.figure(figsize=(4.25, 3.0)) ax = plt.subplot(projection="csdm") ax.contour(experiment, colors="k", **options) ax.contour(best_fit, colors="r", linestyles="--", **options) ax.set_xlim(1000, -1000) ax.set_ylim(1500, -1500) plt.grid() plt.tight_layout() plt.show() # - # ## Image plots with residuals # # # + residuals = sf.residuals(sim, processor)[0] fig, ax = plt.subplots( 1, 3, sharey=True, figsize=(10, 3.0), subplot_kw={"projection": "csdm"} ) vmax, vmin = experiment.max(), experiment.min() for i, dat in enumerate([experiment, best_fit, residuals]): ax[i].imshow(dat, aspect="auto", cmap="gist_ncar_r", vmax=vmax, vmin=vmin) ax[i].set_xlim(1000, -1000) ax[0].set_ylim(1500, -1500) plt.tight_layout() plt.show() # - # .. [#f1] <NAME>, <NAME>., <NAME>, and <NAME> # Hydrogen motional disorder in crystalline iron group chloride dihydrates # spectroscopy, J. Chem. Phys. (2018) **149**, 084503. # `DOI: 10.1063/1.5037151 <https://doi.org/10.1063/1.5037151>`_ # #
docs/notebooks/fitting/2D_fitting/plot_4_NiCl2.2D2O_shifting-d.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Seaborn: jointplot # + import seaborn as sns from matplotlib import pyplot as plt import matplotlib # - sns.__version__ matplotlib.__version__ geyser = sns.load_dataset('geyser') geyser.head() geyser.shape # ## Intro Visuals plt.rc('xtick', labelsize=12) plt.rc('ytick', labelsize=12) plt.rc('axes', labelsize=14) # + sns.set_style('white') import numpy as np my_color = '#643db8' # - j = sns.jointplot(x='waiting', y='duration', data=geyser, color=my_color) j.ax_joint.set_xlabel('') j.ax_joint.set_ylabel(''); j = sns.jointplot(x='waiting', y='duration', data=geyser.iloc[:, :-1]+np.random.normal(0, 0.15, size=geyser.iloc[:, :-1].shape), #adding "jitter" color='white') j.plot_marginals(sns.rugplot, height=.5, color=my_color) j.ax_joint.set_xlabel('') j.ax_joint.set_ylabel(''); j = sns.jointplot(x='waiting', y='duration', data=geyser, color=my_color, kind='kde') j.ax_joint.set_xlabel('') j.ax_joint.set_ylabel(''); # ### Thumbnail Figure j = sns.jointplot(x='waiting', y='duration', data=geyser.iloc[:, :-1]+np.random.normal(0, 0.15, size=geyser.iloc[:, :-1].shape), #adding "jitter" color= my_color) j.plot_marginals(sns.rugplot, height=-.15, color='black', clip_on=False) j.plot_joint(sns.kdeplot, color='black', levels=5) j.ax_joint.set_xlabel('') j.ax_joint.set_ylabel('') j.ax_joint.set_ylim(0.5, 6) j.ax_joint.set_xlim(30, 110); plt.rc('xtick', labelsize=10) plt.rc('ytick', labelsize=10) plt.rc('axes', labelsize=10) # ## Basics sns.set_style('darkgrid') sns.jointplot(x=geyser.waiting, y=geyser.duration); sns.jointplot(x='waiting', y='duration', data=geyser); # ## Plot Kinds # # There are currently six kinds of jointplots allowed in seaborn: # 1. scatter (default) # 2. kde # 3. reg # 4. hist # 5. hex # 6. resid # ### scatter (Default) sns.jointplot(x='waiting', y='duration', data=geyser, kind='scatter' ); # ### kde sns.jointplot(x='waiting', y='duration', data=geyser, kind='kde' ); # ### reg sns.jointplot(x='waiting', y='duration', data=geyser, kind='reg' ); # ### hist sns.jointplot(x='waiting', y='duration', data=geyser, kind='hist' ); # ### Returns JointGrid g = sns.jointplot(x='waiting', y='duration', data=geyser); type(g) g.plot_joint(sns.kdeplot, color='gray', levels=5); # ## Categorical Variables geyser.head() # ### hue sns.jointplot(x='waiting', y='duration', data=geyser); sns.jointplot(x='waiting', y='duration', data=geyser, hue='kind' ); sns.jointplot(x='waiting', y='duration', data=geyser, hue='kind', kind='kde' ); # ## Styling # ### color, palette sns.jointplot(x='waiting', y='duration', data=geyser, color='green' ); sns.jointplot(x='waiting', y='duration', data=geyser, hue='kind', palette='autumn' ); # ### height, space, ratio sns.jointplot(x='waiting', y='duration', data=geyser, height=8, #Default: 6 space=0.1, #Default: 0.2 ratio=2 #Default: 5 ); # ### joint_kws # # Joint keywords should match the figure type in the middle of the jointplot. sns.jointplot(x='waiting', y='duration', data=geyser, joint_kws={'marker': 5} ); sns.jointplot(x='waiting', y='duration', data=geyser, kind='reg', joint_kws={'ci': None, 'order': 2} ); # ### marginal_kws # # Marginal keywords should match the type of plot for the marginal distribution plots (the outside plots). sns.jointplot(x='waiting', y='duration', data=geyser, marginal_kws={'color': 'xkcd:golden'} ); sns.jointplot(x='waiting', y='duration', data=geyser, kind='kde', marginal_kws={'lw': 4, 'shade': True} );
13_jointplot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # <br> # # # Introdução # - # <br> # # ## Install Locals # !pip install ../dist/traquitanas*.tar.gz # <br> # # ## Install PyPi # + # #!pip3 uninstall -y traquitanas # #!pip3 install pycep_correios --upgrade # - # Install / Upgrade from PyPi # !pip3 install traquitanas --upgrade # <br> # # ## Import Locals # + pycharm={"name": "#%%\n"} # Imports Locais #from src.traquitanas.geo import converts #from src.traquitanas import numbers # - # <br> # # ## Import PyPi # + pycharm={"name": "#%%\n"} #from traquitanas.geo import converts from traquitanas import geo # + pycharm={"name": "#%%\n"} geo.converts.dms2dd('23°06’12,48”S') # + pycharm={"name": "#%%\n"} #import traquitanas.geo.converts # - # <br> # # ## Geo # + pycharm={"name": "#%%\n"} from traquitanas import geo geo.converts.dms2dd('23°06’12,48”S') geo.converts.dms2dd_infoaguas('22 13 52') # - # <br> # # ## Utils # + pycharm={"name": "#%%\n"} from traquitanas import utils as tt tt.predict_encoding() # + pycharm={"name": "#%%\n"}
test/codes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## This example shows some utilities for post-processing head results from MODFLOW # + import os import sys import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt # run installed version of flopy or add local path try: import flopy except: fpth = os.path.abspath(os.path.join("..", "..")) sys.path.append(fpth) import flopy from flopy.utils.postprocessing import ( get_transmissivities, get_water_table, get_gradients, ) import flopy.utils.binaryfile as bf print(sys.version) print("numpy version: {}".format(np.__version__)) print("matplotlib version: {}".format(mpl.__version__)) print("flopy version: {}".format(flopy.__version__)) # - mfnam = "EXAMPLE.nam" model_ws = "../data/mp6/" heads_file = "EXAMPLE.HED" # ### Load example model and head results m = flopy.modflow.Modflow.load(mfnam, model_ws=model_ws) hdsobj = bf.HeadFile(model_ws + heads_file) hds = hdsobj.get_data(kstpkper=(0, 2)) hds.shape # ### Plot heads in each layer; export the heads and head contours for viewing in a GIS # for more information about GIS export, type `help(export_array)`, for example # + fig, axes = plt.subplots(2, 3, figsize=(11, 8.5)) axes = axes.flat grid = m.modelgrid for i, hdslayer in enumerate(hds): im = axes[i].imshow(hdslayer, vmin=hds.min(), vmax=hds.max()) axes[i].set_title("Layer {}".format(i + 1)) ctr = axes[i].contour(hdslayer, colors="k", linewidths=0.5) # export head rasters # (GeoTiff export requires the rasterio package; for ascii grids, just change the extension to *.asc) flopy.export.utils.export_array( grid, "data/heads{}.tif".format(i + 1), hdslayer ) # export head contours to a shapefile flopy.export.utils.export_array_contours( grid, "data/heads{}.shp".format(i + 1), hdslayer ) fig.delaxes(axes[-1]) fig.subplots_adjust(right=0.8) cbar_ax = fig.add_axes([0.85, 0.15, 0.03, 0.7]) fig.colorbar(im, cax=cbar_ax, label="Head"); # - # ### Compare rotated arc-ascii and GeoTiff output # + grid.set_coord_info(angrot=30.0) nodata = 0.0 flopy.export.utils.export_array( grid, "data/heads5_rot.asc", hdslayer, nodata=nodata ) flopy.export.utils.export_array( grid, "data/heads5_rot.tif", hdslayer, nodata=nodata ) results = np.loadtxt("data/heads5_rot.asc".format(i + 1), skiprows=6) results[results == nodata] = np.nan plt.imshow(results) plt.colorbar(); # - try: import rasterio except: rasterio = None print("install rasterio to create GeoTiff output") if rasterio is not None: with rasterio.open("data/heads5_rot.tif") as src: print(src.meta) plt.imshow(src.read(1)) # ### Get the vertical head gradients between layers # + grad = get_gradients(hds, m, nodata=-999) fig, axes = plt.subplots(2, 3, figsize=(11, 8.5)) axes = axes.flat for i, vertical_gradient in enumerate(grad): im = axes[i].imshow(vertical_gradient, vmin=grad.min(), vmax=grad.max()) axes[i].set_title( "Vertical gradient\nbetween Layers {} and {}".format(i + 1, i + 2) ) ctr = axes[i].contour( vertical_gradient, levels=[-0.1, -0.05, 0.0, 0.05, 0.1], colors="k", linewidths=0.5, ) plt.clabel(ctr, fontsize=8, inline=1) fig.delaxes(axes[-2]) fig.delaxes(axes[-1]) fig.subplots_adjust(right=0.8) cbar_ax = fig.add_axes([0.85, 0.15, 0.03, 0.7]) fig.colorbar(im, cax=cbar_ax, label="positive downward"); # - # ### Get the saturated thickness of a layer # `m.modelgrid.saturated_thick()` returns an nlay, nrow, ncol array of saturated thicknesses. # + st = m.modelgrid.saturated_thick(hds, mask=-9999.0) plt.imshow(st[0]) plt.colorbar(label="Saturated thickness") plt.title("Layer 1"); # - # ### Get the water table # `get_water_table()` returns an nrow, ncol array of the water table elevation. # This method can be useful when HDRY is turned on and the water table is in multiple layers. # + wt = get_water_table(heads=hds, nodata=-9999) plt.imshow(wt) plt.colorbar(label="Elevation"); # - # ### Get layer transmissivities at arbitrary locations, accounting for the position of the water table # * for this method, the heads input is an nlay x nobs array of head results, which could be constructed using the Hydmod package with an observation in each layer at each observation location, for example . # * x, y values in real-world coordinates can be used in lieu of row, column, provided a correct coordinate information is supplied to the flopy model object's grid. # * open interval tops and bottoms can be supplied at each location for computing transmissivity-weighted average heads # * this method can also be used for apportioning boundary fluxes for an inset from a 2-D regional model # * see `**flopy3_get_transmissivities_example.ipynb**` for more details on how this method works r = [20, 5] c = [5, 20] headresults = hds[:, r, c] get_transmissivities(headresults, m, r=r, c=c) r = [20, 5] c = [5, 20] sctop = [340, 320] # top of open interval at each location scbot = [210, 150] # top of bottom interval at each location headresults = hds[:, r, c] tr = get_transmissivities(headresults, m, r=r, c=c, sctop=sctop, scbot=scbot) tr # #### convert to transmissivity fractions trfrac = tr / tr.sum(axis=0) trfrac # #### Layer 3 contributes almost no transmissivity because of its K-value m.lpf.hk.array[:, r, c] m.modelgrid.thick[:, r, c]
examples/Notebooks/flopy3_Modflow_postprocessing_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # #!/usr/bin/env python # coding: utf-8 import os import pandas as pd from matplotlib import pyplot as plt import seaborn as sb os.chdir('/Users/pauline/Documents/Python') df = pd.read_csv("Tab-Bathy.csv") sb.set_style('darkgrid') sb.set_context('paper') # plotting fig, ax = plt.subplots(1, 5, sharex=True, sharey=True, figsize=(10.0, 4.0), dpi=300 ) fig.suptitle('Regression analysis plot: Mariana Trench bathymetry', fontsize=10, fontweight='bold', x=0.5, y=0.97 ) sb.regplot(x="observ", y="profile11", data=df, marker=".", ax=ax[0]) sb.regplot(x="observ", y="profile12", data=df, marker=".", ax=ax[1]) sb.regplot(x="observ", y="profile13", data=df, marker=".", ax=ax[2]) sb.regplot(x="observ", y="profile14", data=df, marker=".", ax=ax[3]) sb.regplot(x="observ", y="profile15", data=df, marker=".", ax=ax[4]) plt.subplots_adjust(wspace=0.05, left=0.025, bottom=0.1, right=0.9, top=0.9) # visualizing plt.tight_layout() plt.subplots_adjust(top=0.92, bottom=0.08, left=0.10, right=0.95, hspace=0.25, wspace=0.35) plt.savefig('plot_RegrAn.png', dpi=300) plt.show() # -
Script-008-RegrAn-5Subplots.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Optimisation numérique avec `scipy` # *Prérequis : connaître numpy et les concepts généraux de l'optimisation numérique.* # # Pour l'optimisation, on peut utiliser les fonctions du module `SciPy`. import numpy as np from scipy.optimize import linprog # ## 1. Premier exemple en programmation linéaire # On va résoudre le problème de programmation linéaire: # ``` # Minimiser : c^T * x # Sous contraintes : A_ub * x <= b_ub # A_eq * x == b_eq # ``` # N'oubliez pas qu'on peut toujours demander de l'aide ! help(linprog) # Commençons par un problème basique. c = [40, 10, 30] A_eq = [[1, 1, 1]] b_eq = [30] A_ub = [[0, 1, -1]] b_ub = [0] bounds = [ (0, 20), (0, 20), (0, 20) ] res = linprog(c, A_ub, b_ub, A_eq, b_eq, bounds, options={"disp": True}) print(res) # ## Correction de l'exercice sur les centrales linprog( c=[5, 10], A_eq=[[1, 1]], b_eq=[50], bounds=[[0, 30], [0, 40]] ) # ## 2. Un exemple un peu plus complexe # On a à notre disposition 5 centrales électriques pour produire une puissance de 350 MW pendant une heure. # # * Les coûts des centrales sont de 20 €/MWh, 40 €/MWh, 24 €/MWh, 12 €/MWh et 32 €/MWh. # * Les puissances maximales de chaque centrale sont de 100 MW. c = [20, 40, 24, 12, 32] A = [[1, 1, 1, 1, 1]] b = [350] bounds = (0, 100) res = linprog(c, A_eq=A, b_eq=b, bounds=(bounds, bounds, bounds, bounds, bounds), options={"disp": True}) print(res) # La solution semble effectivement la plus raisonnable (**toujours vérifier la solution !!**) : on commence par prouire avec la centrale la moins chère, puis on augmente. # ## 3. Augmentons un peu la taille du problème # On va monitorer le temps passé. # Pour cela, on utilise la fonction `time.time()` qui nous rend l'heure en secondes. import time def my_opt(n=5): tt = time.time() c = np.random.uniform(low=30, high=50, size=n) A = np.ones(shape=(1, n)) b = np.random.uniform(low=0.7, high=1.0, size=1) * n * 100 bounds = (0, 100) res = linprog(c, A_eq=A, b_eq=b, bounds=bounds) return time.time() - tt for nb in [50, 500]: temps_moyen = 0 for i in range(10): temps_moyen += my_opt(nb) temps_moyen /= 10 print(f"Pour n={nb}, ça prend {temps_moyen:.2f} secondes") # Profitons-en pour faire quelques affichages avec `matplotlib`. import matplotlib.pyplot as plt # + nb_expes = 10 nb_centrales = [2, 20, 200, 2000] tous_les_temps = [] for nb in nb_centrales: temps = [] for i in range(nb_expes): temps.append(my_opt(nb)) tous_les_temps.append(temps) # Affichage fig, ax = plt.subplots(1, 1, figsize=(12, 8)) ax.boxplot(tous_les_temps, labels=nb_centrales) ax.set(yscale='log') plt.show() # -
tutoriels/scipy_for_optimization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] toc=true # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc"><ul class="toc-item"><li><span><a href="#Data-Preparation-for-Plotting" data-toc-modified-id="Data-Preparation-for-Plotting-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Data Preparation for Plotting</a></span></li><li><span><a href="#Total-Deaths-by-Borough" data-toc-modified-id="Total-Deaths-by-Borough-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Total Deaths by Borough</a></span></li><li><span><a href="#Aggregate-plots" data-toc-modified-id="Aggregate-plots-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Aggregate plots</a></span></li></ul></div> # - # # Pedestrian Deaths in NYC # Data Visualization for pedestrians and cyclists killed by motor vehicles in NYC 2012 - 2020 import pandas as pd import numpy as np import datetime from bokeh.plotting import figure, output_notebook, show, gmap from bokeh.models import CategoricalColorMapper, ColumnDataSource, Legend, CDSView, GroupFilter from bokeh.models import CheckboxGroup, HoverTool, GMapOptions, FactorRange, Title from bokeh.layouts import widgetbox, row, column from bokeh.io import curdoc, push_notebook from bokeh.transform import factor_cmap from bokeh.palettes import colorblind output_notebook() # + # import json # with open('keys.json') as f: # keys = json.load(f) # google_api_key = keys['google_map'] # - df = pd.read_csv('data/peds_death_data', index_col=0) df.head() # ## Data Preparation for Plotting df.borough.value_counts(dropna=False) df = df.drop(df[(df.borough == 'NOT NYC') | (df.borough.isna() == True)].index) df = df.drop(['borough_gps', 'location'], axis=1) df.borough.value_counts(dropna=False) df['total_deaths'] = df.number_of_cyclist_killed+df.number_of_pedestrians_killed df['month_year'] = pd.to_datetime(df['date']).dt.to_period('M') df['date'] = pd.to_datetime(df['date']) # ## Total Deaths by Borough # See `test_plots.py` for interactive html plot. Run `bokeh serve --show test_plots.py` in terminal for use. boro_list = sorted(df.borough.unique().tolist()) boro_list source = ColumnDataSource(df) # + boro_list = sorted(df.borough.unique().tolist()) idx = pd.date_range(start=df.date.min(), end=df.date.max()) rolling_df_list = [] for boro in boro_list: tmp_df = df[df.borough ==boro] \ .groupby(['date']).sum()['total_deaths'] \ .reindex(idx, fill_value=0) \ .rolling(window=365) \ .sum() \ .reset_index() tmp_df['borough'] = boro rolling_df_list.append(tmp_df) rolling_df = pd.concat(rolling_df_list, axis=0) source = ColumnDataSource(rolling_df) # color_mapper = CategoricalColorMapper(factors=sorted(df.borough.unique().tolist()), # palette=colorblind['Colorblind'][5]) checkbox = CheckboxGroup(labels=df.borough.unique().tolist(), active=[0,1,2,3,4]) # + p = figure(title = "One Year Rolling Total 2012 - 2020", x_axis_label = "Time", y_axis_label = "Number of Deaths", x_axis_type='datetime', plot_width = 800, toolbar_location = 'above', tools='box_zoom, reset') p.add_layout(Title(text="Pedestrian and cyclist deaths in each borough", align="left", text_font_style="normal"),"above") for i in range(5): p.circle(x='index', y='total_deaths', color=colorblind['Colorblind'][5][i], source=source, view=CDSView(source=source, filters=[GroupFilter(column_name='borough', group=boro_list[i])])) # source=ColumnDataSource(rolling_df[rolling_df['borough']==boro_list[i]]), # legend_label=boro_list[i], # ) # p.legend.orientation = "horizontal" # p.legend.location = "top_center" # p.legend.click_policy="hide" hover_glyph = p.circle(x='index', y='total_deaths', source=source, size=3, alpha=0, hover_fill_color='red', hover_alpha=0.5) tooltips = [('Borough', '@borough'), ('Total', '@total_deaths'), ('Date', "@index{%Y-%m-%d}") ] p.add_tools(HoverTool(tooltips=tooltips, mode='vline', renderers=[hover_glyph], formatters={'@index':'datetime', })) # color=dict(field='borough', transform=color_mapper), # p.circle(x='month_year', y='total_deaths', # selection_color="blue", # nonselection_fill_color='gray', # nonselection_alpha=0.2, # size=10, # source=source, # color=dict(field='borough', transform=color_mapper), # legend_field='borough', # hover_fill_color='red', # hover_alpha=0.5, # hover_line_color='white') # hover_glyph = p.circle(x='date', y='total_deaths', # source=source, size=11, alpha=0, # hover_fill_color='red', hover_alpha=0.5) # tooltips = [('Borough', '@borough'), # ('Date', '@month_year{%Y-%m}'), # ('Deaths', '@total_deaths')] # p.add_tools(HoverTool(tooltips = tooltips, # mode='vline', # renderers=[hover_glyph], # formatters={'month_year':'datetime', })) # #push_notebook() show(p) # - # ## Aggregate plots df.head() year_df_main = df[(df['year']!= 2012) & (df['year']!= 2020)] # + year_df = year_df_main.groupby(['year']).sum()['total_deaths'].reset_index() source = ColumnDataSource(year_df) y = figure(title = "Total Deaths by Year 2013 - 2019", x_axis_label = "Years", y_axis_label = "Number of Deaths", plot_width = 800, plot_height = 300, toolbar_location = None, tools="") y.line(x='year', y='total_deaths', source=source, color = colorblind["Colorblind"][3][0]) tooltips = [('Deaths', '@total_deaths')] y.add_tools(HoverTool(tooltips = tooltips)) show(y) # + crash_df = df.groupby(['contributing_factor_vehicle_1']).sum()[['total_deaths']] \ .reset_index().sort_values(['total_deaths'], ascending=True) crash_df = crash_df.loc[crash_df["total_deaths"] >= 5] source = ColumnDataSource(crash_df) v = figure(y_range=crash_df.contributing_factor_vehicle_1.unique(), plot_width=500, plot_height=300, title="Contributing Factors to Incident", toolbar_location=None, tools="") v.hbar(y='contributing_factor_vehicle_1', right='total_deaths', height=0.9, source=source, line_color = 'white', hover_fill_color='red', hover_alpha=1.0, hover_line_color='gray') tooltips = [('Deaths', '@total_deaths')] v.add_tools(HoverTool(tooltips = tooltips)) show(v) # + crash_df = df.groupby(['vehicle_type_code1']).sum()[['total_deaths']] \ .reset_index().sort_values(['total_deaths'], ascending=True) crash_df = crash_df.loc[crash_df["total_deaths"] >= 5] crash_df.vehicle_type_code1 = crash_df.vehicle_type_code1.apply(lambda x: x.title()) y_range = crash_df.vehicle_type_code1.unique() source = ColumnDataSource(crash_df) v = figure(y_range=y_range, plot_width=500, plot_height=300, title="Vehicle Type in Incident", toolbar_location=None, tools="") v.hbar(y='vehicle_type_code1', right='total_deaths', height=0.9, source=source, line_color = 'white', hover_fill_color='red', hover_alpha=1.0, hover_line_color='gray') tooltips = [('Deaths', '@total_deaths')] v.add_tools(HoverTool(tooltips = tooltips)) show(v) # + pop_df = pd.read_csv('data/pop_borough', index_col=0) pop_df = pop_df[pop_df.year!=2012] pop_df = pop_df.groupby(['borough', 'year']).sum()['population'] \ .reset_index().sort_values(['year'], ascending=True) year_df_main = df[(df['year']!= 2012) & (df['year']!= 2020)] pop_df['year_mean_pop'] = pop_df.groupby('year').transform('mean') crash_df = year_df_main.groupby(['borough', 'year']).sum()[['total_deaths']] \ .reset_index().sort_values(['year'], ascending=True) crash_df['year_mean_deaths'] = crash_df.groupby(["year"]).transform('mean') # + # Grouped bar charts in bokeh are non-trivial totals = [] pops = [] year_avg_deaths =[] year_avg_pops = [] group_data = {} pop_data = {} boros = crash_df.borough.unique().tolist() years = crash_df.year.unique().tolist() year_avg_death = crash_df.year_mean_deaths.tolist()[::5] year_avg_pop = pop_df.year_mean_pop.tolist()[::5] for i in range(5): year_avg_deaths.extend(year_avg_death) year_avg_pops.extend(year_avg_pop) for i in years: # dict; 'year':[total_1, total_2, ..] counts = crash_df[crash_df.year == i] \ .sort_values('borough').total_deaths.tolist() populations = pop_df[pop_df.year == i] \ .sort_values('borough').population.tolist() entry_c = {i:counts} entry_p = {i:populations} group_data.update(entry_c) pop_data.update(entry_p) for i in range(5): # list of ordered seq of totals for k, v in group_data.items(): totals.append(v[i]) for i in range(5): # list of ordered seq of populations for k, v in pop_data.items(): pops.append(v[i]) percentage_pop = tuple([round((total/pop)*100000,2) \ for pop, total in zip(pops, totals)]) avg_percent_pop = tuple([round((total/avg_pop)*100000,2) \ for avg_pop, total in zip(year_avg_pops, year_avg_deaths)]) totals = tuple(totals) # bokeh needs tuples strings for grouped bars #list of tuples [ ('Bronx', '2013'), ('brooklyn', '2013')...('Queens', '2018') ] x = [(str(year), boro) for boro in boros for year in years] boro_source = ColumnDataSource(data=dict(x=x, total=totals, avg_total=year_avg_deaths, pop_percent=percentage_pop, avg_percent=avg_percent_pop)) x_range= FactorRange(*x) b = figure(x_range=x_range, plot_width=800, plot_height= 300, title="Deaths per Borough by Year", toolbar_location=None, tools="") b.vbar(x='x', top='total', width=0.9, source=boro_source, line_color = 'white', hover_fill_color='red', hover_alpha=1.0, hover_line_color='gray', fill_color=factor_cmap('x', palette=colorblind['Colorblind'][7], factors=boros, start=1, end=2)) b.y_range.start = 0 b.x_range.range_padding = 0.1 b.xaxis.major_label_orientation = 1 b.xgrid.grid_line_color = None tooltips = [('Deaths', '@total'), ('Average Yearly Deaths', '@avg_total')] b.add_tools(HoverTool(tooltips = tooltips)) b1 = figure(x_range=x_range, plot_width=800, plot_height= 300, title="Percentage Deaths per 100,000", toolbar_location=None, tools="") b1.vbar(x='x', top='pop_percent', width=0.9, source=boro_source, line_color = 'white', hover_fill_color='red', hover_alpha=1.0, hover_line_color='gray', fill_color=factor_cmap('x', palette=colorblind['Colorblind'][7], factors=boros, start=1, end=2)) b1.y_range.start = 0 b1.x_range.range_padding = 0.1 b1.xaxis.major_label_orientation = 1 b1.xgrid.grid_line_color = None tooltips = [('Percent Deaths', '@pop_percent'), ('Yearly Avg', '@avg_percent')] b1.add_tools(HoverTool(tooltips = tooltips)) layout = column(b,b1) show(layout) # - def make_victim_column(row): if row.number_of_pedestrians_killed > 1 & \ row.number_of_cyclist_killed == 0: return 'Pedestrian' if row.number_of_cyclist_killed > 1 & \ row.number_of_pedestrians_killed == 0: return 'Cyclist' else: return 'Both' map_df = df.dropna(subset=['latitude', 'longitude']) map_df['victim'] = df.apply(lambda x: make_victim_column(x), axis=1) # + map_options = GMapOptions(lat=40.737, lng=-73.990, map_type="roadmap", zoom=15) color_mapper = CategoricalColorMapper(factors=["Pedestrian", "Cyclist"], palette=[colorblind['Colorblind'][5][1], colorblind['Colorblind'][5][0]]) hover_map = HoverTool(tooltips = [('Date', '@month_year{%Y-%m}'), ('Deaths', '@total_deaths'), ('Vehicle Type', '@vehicle_type_code1'), ('Cause', '@contributing_factor_vehicle_1')], formatters={'month_year':'datetime', }) g = gmap(google_api_key=google_api_key, map_options=map_options, title="NYC Pedestrian and Cyclists Deaths 2012 - 2020", plot_width=800, toolbar_location = 'above') map_source = ColumnDataSource(map_df) g.circle(x='longitude', y='latitude', size=10, fill_alpha=1.0, color=dict(field='victim', transform=color_mapper), legend='victim', source=map_source) g.legend.location = "top_left" g.add_tools(hover_map) show(g) # -
Pedestrian_deaths_dashbord_dev.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .scala // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: Spark 2.0.0 - Scala 2.11 // language: scala // name: spark2-scala // --- // ### Note // // Please view the [README](https://github.com/deeplearning4j/dl4j-examples/tree/overhaul_tutorials/tutorials/README.md) to learn about installing, setting up dependencies, and importing notebooks in Zeppelin // ### Background // // In our previous tutorial, we learned about a very simple neural network model - the logistic regression model. Although you can solve many tasks with a simple model like that, most of the problems require a much complex network configuration. Typical Deep leaning model consists of many layers between the inputs and outputs. In this tutorial, we are going to learn about one of those configuration i.e. Feed-forward neural networks. // // ### Feed-Forward Networks // // Feed-forward networks are those in which there is not cyclic connection between the network layers. The input flows forward towards the output after going through several intermediate layers. A typical feed-forward network looks like this: // // |---|---|---| // |**Feed-forward network** | ![A typical feed-forward network](https://upload.wikimedia.org/wikipedia/en/5/54/Feed_forward_neural_net.gif) | [Source](https://upload.wikimedia.org/wikipedia/en/5/54/Feed_forward_neural_net.gif) | // // Here you can see a different layer named as a hidden layer. The layers in between our input and output layers are called hidden layers. It's called hidden because we don't directly deal with them and hence not visible. There can be more than one hidden layer in the network. // // Just as our softmax activation after our output layer in the previous tutorial, there can be activation functions between each layer of the network. They are responsible to allow (activate) or disallow our network output to the next layer node. There are different activation functions such as sigmoid and relu etc. // ### Imports import org.deeplearning4j.nn.api.OptimizationAlgorithm import org.deeplearning4j.nn.conf.graph.MergeVertex import org.deeplearning4j.nn.conf.layers.{DenseLayer, GravesLSTM, OutputLayer, RnnOutputLayer} import org.deeplearning4j.nn.conf.{ComputationGraphConfiguration, MultiLayerConfiguration, NeuralNetConfiguration, Updater} import org.deeplearning4j.nn.graph.ComputationGraph import org.deeplearning4j.nn.multilayer.MultiLayerNetwork import org.deeplearning4j.nn.weights.WeightInit import org.nd4j.linalg.activations.Activation import org.nd4j.linalg.learning.config.Nesterovs import org.nd4j.linalg.lossfunctions.LossFunctions // ### Let's create the feed-forward network configuration val conf = new NeuralNetConfiguration.Builder() .seed(12345) .iterations(1) .weightInit(WeightInit.XAVIER) .updater(Updater.ADAGRAD) .activation(Activation.RELU) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .learningRate(0.05) .regularization(true).l2(0.0001) .list() .layer(0, new DenseLayer.Builder().nIn(784).nOut(250).weightInit(WeightInit.XAVIER).activation(Activation.RELU) //First hidden layer .build()) .layer(1, new OutputLayer.Builder().nIn(250).nOut(10).weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX) //Output layer .lossFunction(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) .build()) .pretrain(false).backprop(true) .build() // ### What we did here? // // As you can see above that we have made a feed-forward network configuration with one hidden layer. We have used a RELU activation between our hidden and output layer. RELUs are one of the most popularly used activation functions. Activation functions also introduce non-linearities in our network so that we can learn on more complex features present in our data. Hidden layers can learn features from the input layer and it can send those features to be analyzed by our output layer to get the corresponding outputs. // // You can similarly make network configurations with more hidden layers as: //Just make sure the number of inputs of the next layer equals to the number of outputs in the previous layer. val conf = new NeuralNetConfiguration.Builder() .seed(12345) .iterations(1) .weightInit(WeightInit.XAVIER) .updater(Updater.ADAGRAD) .activation(Activation.RELU) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .learningRate(0.05) .regularization(true).l2(0.0001) .list() .layer(0, new DenseLayer.Builder().nIn(784).nOut(250).weightInit(WeightInit.XAVIER).activation(Activation.RELU) //First hidden layer .build()) .layer(1, new OutputLayer.Builder().nIn(250).nOut(100).weightInit(WeightInit.XAVIER).activation(Activation.RELU) //Second hidden layer .build()) .layer(2, new OutputLayer.Builder().nIn(100).nOut(50).weightInit(WeightInit.XAVIER).activation(Activation.RELU) //Third hidden layer .build()) .layer(3, new OutputLayer.Builder().nIn(50).nOut(10).weightInit(WeightInit.XAVIER).activation(Activation.SOFTMAX) //Output layer .lossFunction(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) .build()) .pretrain(false).backprop(true) .build() // ### What's next? // // - Check out all of our tutorials available [on Github](https://github.com/deeplearning4j/dl4j-examples/tree/master/tutorials). Notebooks are numbered for easy following. //
dl4j-examples/tutorials/04. Feed-forward.zepp.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/kunal-kumar-chaudhary/Global-AI-Challenge/blob/main/Global_AI_Challenge_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="V543l_yS_NvR" # importing our essential libraries # + id="emsOcmqI_NBl" import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="0mjTuDyz_JXr" outputId="425782d7-4e87-4f62-b217-68733c52d5fd" train_data = pd.read_csv("train.csv") train_data.head() # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="wCBMmkHkdiY9" outputId="ce8a2c4b-ceb3-490f-cc0e-680f0d2f70ea" test_data = pd.read_csv("test.csv") test_data.head() # + id="s31uag8P9FBG" # !pip install rdkit-pypi -qqq # + id="STrMFE4l-RSt" from rdkit.Chem import rdMolDescriptors # + id="P1Y6aVZ0ALP1" from rdkit import Chem # + id="GNrZ-Jag-GNr" descriptor_names = list(rdMolDescriptors.Properties.GetAvailableProperties()) get_descriptors = rdMolDescriptors.Properties(descriptor_names) # + colab={"base_uri": "https://localhost:8080/"} id="pofdiaqB-8ik" outputId="4fa57e41-5aa9-4aac-bac0-f7ca7aaca110" descriptor_names # + id="0UdBfaEn_IBV" def smi_to_descriptors(smile): mol = Chem.MolFromSmiles(smile) descriptors = [] if mol: descriptors = np.array(get_descriptors.ComputeProperties(mol)) return descriptors # + colab={"base_uri": "https://localhost:8080/"} id="sW9VyyrxB-ep" outputId="445bbcd9-c76d-4d5e-97bf-ab5256d291ac" smi_to_descriptors(train_data["Smiles"][0]) # + id="dGU3emgBC851" train_data[[descriptor_names]] = "" # + colab={"base_uri": "https://localhost:8080/", "height": 305} id="66VphNGkDCvS" outputId="4c76fd2f-7c3f-4e40-f7cd-26b89b49d696" train_data.head() # + id="g14yherq_rC2" train_data['descriptors'] = train_data.Smiles.apply(smi_to_descriptors) # + colab={"base_uri": "https://localhost:8080/", "height": 461} id="TVOLN-VHBEGO" outputId="5de5c32a-a670-4ab0-ddf5-d27750376e32" train_data.head() # + colab={"base_uri": "https://localhost:8080/"} id="HMjSZWwqAS0K" outputId="d34e7ea2-2c8d-4c73-9e67-5b444fdf7ac5" train_data['descriptors'].head() # + colab={"base_uri": "https://localhost:8080/"} id="eljDxMKGOkjJ" outputId="b2f4ee58-392d-4517-e22b-e9dddb3b6cdf" for row in range(len(train_data)): for key, value in enumerate(train_data['descriptors'][row]): train_data[descriptor_names[key]][row] = value # + colab={"base_uri": "https://localhost:8080/", "height": 461} id="hM7ZPgsTVgU3" outputId="6adca3f6-6265-4b2c-c3a1-7a5739068f31" train_data.head() # + id="vfJQfDrudX1i" test_data[[descriptor_names]] = "" # + id="4C0gmcOLdyyd" test_data['descriptors'] = test_data.Smiles.apply(smi_to_descriptors) # + colab={"base_uri": "https://localhost:8080/"} id="ee8NuSigdGhl" outputId="d940e7b7-40ba-4bb9-a568-51ed78dee73c" for row in range(len(test_data)): for key, value in enumerate(test_data['descriptors'][row]): test_data[descriptor_names[key]][row] = value # + colab={"base_uri": "https://localhost:8080/", "height": 461} id="FtoXhnOweFuI" outputId="31859cc3-bfb9-4116-ffd7-29c4605499ec" test_data.head() # + id="pYi2BLEcwahy" train_data.to_csv("training data.csv") test_data.to_csv("testing data.csv") # + [markdown] id="uOpfFBqyeMkC" # # DATA CLEANING/ EXPLORATORY DATA ANALYSIS # + colab={"base_uri": "https://localhost:8080/"} id="DD4X4VX5eQ5x" outputId="c4b9400a-761f-4a32-a142-ee7ae224675d" train_data.columns # + id="2nCzXu4PgkgL" train_data.drop(columns=["Unnamed: 0", "Smiles", "descriptors"], inplace=True) test_data.drop(columns=["Unnamed: 0", "Smiles", "descriptors"], inplace=True) # + colab={"base_uri": "https://localhost:8080/", "height": 207} id="UgYVNDrOh7gs" outputId="c750ba77-e64d-4c36-f674-b2758400d6f2" train_data.head(3) # + colab={"base_uri": "https://localhost:8080/"} id="OfGS41f-h_lO" outputId="ed54cfa5-d373-4a5d-d2b8-3cd463db82b5" train_data.info() # + id="0SiS1NQfjhQB" train_data.to_csv("modified data") # + [markdown] id="s-32_Ejfk8mu" # # FEATURE SCALING # + [markdown] id="qIdEciUqlEqF" # seperating data into `features` and `labels` # + id="sAap5gwQlAHt" data_y = train_data['Active'] data_x = train_data.drop(columns=['Active']) # + [markdown] id="1sOFHm7dtRyN" # let's check the distribution of classes in our dataset # + colab={"base_uri": "https://localhost:8080/", "height": 483} id="iUtFb9U6tQjF" outputId="644acd7f-04b7-4788-9433-d7065b27aaf5" plt.figure(figsize=(10,8)) data_y.value_counts().plot.pie(autopct="%.2f") # + [markdown] id="IQ1G54aflk9i" # let's label encode the `data_y` # + colab={"base_uri": "https://localhost:8080/"} id="KaNuIaAilj00" outputId="93c2822a-c6fc-4d7d-cb9d-8f052febe6ae" from sklearn.preprocessing import LabelEncoder le = LabelEncoder() data_y = le.fit_transform(data_y) data_y # + [markdown] id="__Hji2b2lps1" # let's scale our features using `standardization` technique # + colab={"base_uri": "https://localhost:8080/"} id="ksOab_SjmDzL" outputId="2a05632b-eb4f-4834-db9d-c988ad2b7f24" from sklearn.preprocessing import StandardScaler sc = StandardScaler() data_x = sc.fit_transform(data_x) test_data = sc.fit_transform(test_data) data_x # + colab={"base_uri": "https://localhost:8080/"} id="B4nMRGgNmpiC" outputId="706c5a70-142b-435c-9bac-fb3e41f4af5b" test_data # + [markdown] id="d2-3QEmamtfN" # # SPLITTING OUR DATASET INTO TRAINING AND VALIDATION SET # + id="ZXIKo-KYm1jq" from sklearn.model_selection import train_test_split x_train, x_val, y_train, y_val = train_test_split(data_x, data_y, test_size=0.2, random_state = 42, stratify=data_y) # + colab={"base_uri": "https://localhost:8080/"} id="I8G8u4X6o2RY" outputId="25213211-3b18-47ba-febf-cf745ef35f69" len(x_train), len(y_train), len(x_val), len(y_val) # + [markdown] id="qMFhRN3XqR9u" # # RANDOM OVERSAMPLING # + colab={"base_uri": "https://localhost:8080/"} id="ZgHBBEZhqgQ1" outputId="e0e31027-9f55-4e83-dde6-a7076eed45cd" # !pip install -U imbalanced-learn from imblearn.over_sampling import SMOTE ruc = SMOTE(sampling_strategy=1) """ this will provide us with the classes in equal ratio. we can change the value of sampling_strategy to change class number ratio. """ x_res, y_res = ruc.fit_resample(x_train, y_train) # + colab={"base_uri": "https://localhost:8080/"} id="APzzfjXntgSX" outputId="410f11ca-8b73-4c9e-d3da-22ca1312e3d4" y_res # + colab={"base_uri": "https://localhost:8080/", "height": 483} id="vXI05GpVtmu1" outputId="99b309c2-08c4-4907-9558-62daa6a4daca" df = pd.DataFrame(y_res, columns = ["Active"]) plt.figure(figsize=(10,8)) df.value_counts().plot.pie(autopct="%.2f") # + [markdown] id="A6_S30FwuJM4" # now, we can see above that our data is not imbalanced any more. # # we only randomly oversampled our training set to deal with data imbalancement, we did not do anything with our validation data. # + [markdown] id="mtxWsoVPuc_x" # # Model 1: Using RUSBoostClassifier # + colab={"base_uri": "https://localhost:8080/"} id="5FvhO5RFubEU" outputId="3a94890e-50b2-4934-b6fe-9456e17bd466" from imblearn.ensemble import RUSBoostClassifier classifier = RUSBoostClassifier(random_state=42) classifier.fit(x_res,y_res) # + colab={"base_uri": "https://localhost:8080/"} id="G_zbk80buuUZ" outputId="9e87e11d-ecf2-48ac-be43-f0ea900975f9" from sklearn.metrics import f1_score y_pred = classifier.predict(x_val) f1 = f1_score(y_val, y_pred) f1 # + [markdown] id="yEvKHU3wvGV2" # # Model 2: Using XGBoost # + colab={"base_uri": "https://localhost:8080/"} id="f89MebGKvFsd" outputId="9b222e06-2431-4815-f4b9-17cab493f76c" import xgboost as xgb XGB = xgb.XGBClassifier() XGB.fit(x_res, y_res) # + colab={"base_uri": "https://localhost:8080/"} id="GqwzU-DqvUFv" outputId="961a4b8d-81a5-4907-d539-374a41c358db" y_pred = XGB.predict(x_val) f1 = f1_score(y_val, y_pred) f1 # + [markdown] id="pMI7DTV7vXhg" # # Model 3: Using Random Forest Classifier # + colab={"base_uri": "https://localhost:8080/"} id="uZfL7M51vWvE" outputId="2f72b5b9-4ab7-46a9-947b-10a758c1a4e6" from sklearn.ensemble import RandomForestClassifier rvc = RandomForestClassifier() rvc.fit(x_res, y_res) # + colab={"base_uri": "https://localhost:8080/"} id="S3xLAQpyvozQ" outputId="8bf16561-68e5-4e6b-dd25-80814f9c7422" y_pred = rvc.predict(x_val) f1 = f1_score(y_val, y_pred) f1 # + [markdown] id="cIOGlqQpv8UF" # # Model 4: USING ANN # + id="3KNapozHwBU2" import tensorflow as tf from tensorflow.keras import layers # + colab={"base_uri": "https://localhost:8080/"} id="6dDyM3b8xM29" outputId="6cf7b3b4-53d0-4b05-948f-ec7f07f197c7" len(x_res[0]) # + id="vb6iLTjEwK1f" model = tf.keras.models.Sequential( [ layers.Dense(8, activation="relu", input_dim = len(x_res[0])), layers.Dense(16, activation = "relu"), # adding a dropout layer layers.Dropout(0.5), layers.Dense(24, activation='relu'), layers.Dense(1, activation="sigmoid") ] ) # compiling the model model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy', tf.keras.metrics.Precision(), tf.keras.metrics.Recall() ]) # + colab={"base_uri": "https://localhost:8080/"} id="6o2ydpABzJ2t" outputId="7ebac05e-2b25-44bd-b75e-ba4bc339b4d1" # let's train our model history = model.fit(x_res, y_res, batch_size=10, epochs=20) # + id="CZeVN8Z7zjee" def cal_f1_score(history): """ this function will take precision and recall and will return the f1-score """ precision = history.history['precision'][-1] recall = history.history['recall'][-1] f1 = 2*((precision*recall)/(precision+recall)) return f1 # + colab={"base_uri": "https://localhost:8080/"} id="Urj3mNTjOLOH" outputId="c277ebd4-62a5-4b2d-8d0d-f834bd1946d3" cal_f1_score(history) # + colab={"base_uri": "https://localhost:8080/"} id="uUJS8PUSPU1w" outputId="d1afe53b-8e90-416b-edfa-7bf7ea088de8" y_prob = model.predict(test_data) y_prob # + colab={"base_uri": "https://localhost:8080/"} id="xr2D5LXdQbQ0" outputId="2180d44d-ca36-41f9-a854-1e42d5829d74" y_pred = tf.argmax(y_prob, axis=1) y_pred # + colab={"base_uri": "https://localhost:8080/", "height": 346} id="h51vCLpLQngj" outputId="08783b03-d067-4a03-a192-95110bed9c4b" sub = pd.read_csv("submission_2.csv") sub.head() # + id="xOAEri9xRghx" sub['Active'] = y_pred # + id="MGYkdDCSRveB" sub.head() # + id="2tNAmNAhRyPl" sub.drop(columns=["Unnamed: 0"], inplace=True) sub.head() # + id="99ou6PUHR3ZX" sub.to_csv("final_submission_2.csv") # + id="2X_Dt7PWW_fW" y_pred = XGB.predict(test_data) y_pred # + id="smrvH5aYXOz3" sub_2 = pd.read_csv("/content/submission_3.csv") sub_2 # + id="OQJ2mT-EXxx_" sub_2['Active'] = y_pred # + id="cI9H8mPPX_Tp" sub_2.head() # + id="w0tZ9JLxYCoM" sub_2.drop(columns=["Unnamed: 0"], inplace=True) sub_2.head() # + id="0hD2ZJvtYNtJ" # sub_2.to_csv("final_submission_3.csv")
Global_AI_Challenge_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + pycharm={"name": "#%%\n"} import matplotlib.pyplot as plt from samitorch.inputs.augmentation.transformers import AddBiasField, AddNoise plt.rc('font', size=25) import nibabel as nib import numpy as np import seaborn as sns import torch # + pycharm={"name": "#%%\n"} image_2 = nib.load("/Users/pierre-luc-delisle/ComputeCanada/ComputeCanada/final_run/DCGAN_scaled_gaussian_filter_5_disc_ratio_1.50/Reconstructed_Normalized_MRBrainS_Image_90.nii.gz").get_fdata() mask_2 = nib.load("/Users/pierre-luc-delisle/ComputeCanada/ComputeCanada/final_run/DCGAN_scaled_gaussian_filter_5_disc_ratio_1.50/Reconstructed_Segmented_MRBrainS_Image_90.nii.gz").get_fdata() # + pycharm={"name": "#%%\n"} image_2[mask==0]=0 image_2 = image_2 / image_2.max() # + pycharm={"name": "#%%\n"} plt.imshow(image_2[:, :, 160], cmap="gray") plt.show() nib_image = nib.Nifti1Image(image_2, None) nib.save(nib_image, "new_image_mrbrains.nii.gz") # + pycharm={"name": "#%%\n"} image_3 = nib.load("/Users/pierre-luc-delisle/ComputeCanada/ComputeCanada/final_run/DCGAN_scaled_gaussian_filter_5_disc_ratio_1.50/Reconstructed_Normalized_iSEG_Image_90.nii.gz").get_fdata() mask_3 = nib.load("/Users/pierre-luc-delisle/ComputeCanada/ComputeCanada/final_run/DCGAN_scaled_gaussian_filter_5_disc_ratio_1.50/Reconstructed_Segmented_iSEG_Image_90.nii.gz").get_fdata() # + pycharm={"name": "#%%\n"} image_3[mask_3==0]=0 max = image_3.max() image_3 = image_3 / image_3.max() # + pycharm={"name": "#%%\n"} plt.imshow(image_3[:, :, 160], cmap="gray") plt.show() nib_image = nib.Nifti1Image(image_3, None) nib.save(nib_image, "new_image_iseg.nii.gz") # + pycharm={"name": "#%%\n"} input_mrbrains = nib.load("/Users/pierre-luc-delisle/Documents/DCGAN_canada_scaled_gaussian_filter_5_disc_ratio_5_00/Reconstructed_Input_MRBrainS_Image_90.nii.gz").get_fdata() input_iseg = nib.load("/Users/pierre-luc-delisle/Documents/DCGAN_canada_scaled_gaussian_filter_5_disc_ratio_5_00/Reconstructed_Input_iSEG_Image_90.nii.gz").get_fdata() image_mrbrains = nib.load("/Users/pierre-luc-delisle/Documents/DCGAN_canada_scaled_gaussian_filter_5_disc_ratio_5_00/Reconstructed_Normalized_MRBrainS_Image_90.nii.gz").get_fdata() image_iseg = nib.load("/Users/pierre-luc-delisle/Documents/DCGAN_canada_scaled_gaussian_filter_5_disc_ratio_5_00/Reconstructed_Normalized_iSEG_Image_90.nii.gz").get_fdata() seg_mrbrains = nib.load("/Users/pierre-luc-delisle/Documents/DCGAN_canada_scaled_gaussian_filter_5_disc_ratio_5_00/Reconstructed_Segmented_MRBrainS_Image_90.nii.gz").get_fdata() gt_mrbrains = nib.load("/Users/pierre-luc-delisle/Documents/DCGAN_canada_scaled_gaussian_filter_5_disc_ratio_5_00/Reconstructed_Ground_Truth_MRBrainS_Image_90.nii.gz").get_fdata() seg_iseg = nib.load("/Users/pierre-luc-delisle/Documents/DCGAN_canada_scaled_gaussian_filter_5_disc_ratio_5_00/Reconstructed_Segmented_iSEG_Image_90.nii.gz").get_fdata() gt_iseg = nib.load("/Users/pierre-luc-delisle/Documents/DCGAN_canada_scaled_gaussian_filter_5_disc_ratio_5_00/Reconstructed_Ground_Truth_iSEG_Image_90.nii.gz").get_fdata() # + pycharm={"name": "#%%\n"} fig1, (ax1, ax2, ax3) = plt.subplots(nrows=3, ncols=1, figsize=(15, 15)) _, bins, _ = ax1.hist(input_mrbrains.flatten()[np.where(gt_mrbrains.flatten() == 1)], bins=128, range=(0, 1), density=True, label="MRBrainS Input CSF", color="darkblue") _ = ax1.hist(input_iseg.flatten()[np.where(gt_iseg.flatten() == 1)], bins=128, range=(0, 1), alpha=0.75, density=True, label="iSEG Input CSF", color="red") _ = ax1.hist(image_mrbrains.flatten()[np.where(seg_mrbrains.flatten() == 1)], bins=128, range=(0, 1), density=True, label="MRBrainS Normalized CSF", color="cornflowerblue") _ = ax1.hist(image_iseg.flatten()[np.where(seg_iseg.flatten() == 1)], bins=128, range=(0, 1), alpha=0.75, density=True, label="iSEG Normalized CSF", color="lightcoral") ax1.set_xlabel("Intensity") ax1.set_ylabel("Normalized voxel count") ax1.set_title("Generated CSF Histogram") ax1.legend() _, bins, _ = ax2.hist(input_mrbrains.flatten()[np.where(gt_mrbrains.flatten() == 2)], bins=128, range=(0, 1), density=True, label="MRBrainS Input GM", color="darkblue") _ = ax2.hist(input_iseg.flatten()[np.where(gt_iseg.flatten() == 2)], bins=128, range=(0, 1), alpha=0.75, density=True, label="iSEG Input GM", color="red") _ = ax2.hist(image_mrbrains.flatten()[np.where(seg_mrbrains.flatten() == 2)], bins=128, range=(0, 1), density=True, label="MRBrainS Normalized GM", color="cornflowerblue") _ = ax2.hist(image_iseg.flatten()[np.where(seg_iseg.flatten() == 2)], bins=128, range=(0, 1), alpha=0.75, density=True, label="iSEG Normalized GM", color="lightcoral") ax2.set_xlabel("Intensity") ax2.set_ylabel("Normalized voxel count") ax2.set_title("Generated Grey Matter Histogram") ax2.legend() _, bins, _ = ax3.hist(input_mrbrains.flatten()[np.where(gt_mrbrains.flatten() == 3)], bins=128, range=(0, 1), density=True, label="MRBrainS Input WM", color="darkblue") _ = ax3.hist(input_iseg.flatten()[np.where(gt_iseg.flatten() == 3)], bins=128, range=(0, 1), alpha=0.75, density=True, label="iSEG Input WM", color="red") _ = ax3.hist(image_mrbrains.flatten()[np.where(seg_mrbrains.flatten() == 3)], bins=128, range=(0, 1), density=True, label="MRBrainS Normalized WM", color="cornflowerblue") _ = ax3.hist(image_iseg.flatten()[np.where(seg_iseg.flatten() == 3)], bins=128, range=(0, 1), alpha=0.75, density=True, label="iSEG Normalized WM", color="lightcoral") ax3.set_xlabel("Intensity") ax3.set_ylabel("Normalized voxel count") ax3.set_title("Generated White Matter Histogram") ax3.legend() fig1.tight_layout() fig1.savefig("histograms.png") # + pycharm={"name": "#%%\n"} input_mrbrains = nib.load("/Users/pierre-luc-delisle/Documents/DualUNet_canada_scaled_dual_dataset/reconstructed_images/Reconstructed_Input_MRBrainS_Image_80.nii.gz").get_fdata() input_iseg = nib.load("/Users/pierre-luc-delisle/Documents/DualUNet_canada_scaled_dual_dataset/reconstructed_images/Reconstructed_Input_iSEG_Image_80.nii.gz").get_fdata() image_mrbrains = nib.load("/Users/pierre-luc-delisle/Documents/DualUNet_canada_scaled_dual_dataset/reconstructed_images/Reconstructed_Normalized_MRBrainS_Image_80.nii.gz").get_fdata() image_iseg = nib.load("/Users/pierre-luc-delisle/Documents/DualUNet_canada_scaled_dual_dataset/reconstructed_images/Reconstructed_Normalized_iSEG_Image_80.nii.gz").get_fdata() seg_mrbrains = nib.load("/Users/pierre-luc-delisle/Documents/DualUNet_canada_scaled_dual_dataset/reconstructed_images/Reconstructed_Segmented_MRBrainS_Image_80.nii.gz").get_fdata() gt_mrbrains = nib.load("/Users/pierre-luc-delisle/Documents/DualUNet_canada_scaled_dual_dataset/reconstructed_images/Reconstructed_Ground_Truth_MRBrainS_Image_80.nii.gz").get_fdata() seg_iseg = nib.load("/Users/pierre-luc-delisle/Documents/DualUNet_canada_scaled_dual_dataset/reconstructed_images/Reconstructed_Segmented_iSEG_Image_80.nii.gz").get_fdata() gt_iseg = nib.load("/Users/pierre-luc-delisle/Documents/DualUNet_canada_scaled_dual_dataset/reconstructed_images/Reconstructed_Ground_Truth_iSEG_Image_80.nii.gz").get_fdata() # + pycharm={"name": "#%%\n"} fig1, (ax1, ax2, ax3) = plt.subplots(nrows=3, ncols=1, figsize=(15, 15)) _, bins, _ = ax1.hist(input_mrbrains.flatten()[np.where(gt_mrbrains.flatten() == 1)], bins=128, range=(0, 1), density=True, label="MRBrainS Input CSF", color="darkblue") _ = ax1.hist(input_iseg.flatten()[np.where(gt_iseg.flatten() == 1)], bins=128, range=(0, 1), alpha=0.75, density=True, label="iSEG Input CSF", color="red") _ = ax1.hist(image_mrbrains.flatten()[np.where(seg_mrbrains.flatten() == 1)], bins=128, range=(0, 1), density=True, label="MRBrainS Normalized CSF", color="cornflowerblue") _ = ax1.hist(image_iseg.flatten()[np.where(seg_iseg.flatten() == 1)], bins=128, range=(0, 1), alpha=0.75, density=True, label="iSEG Normalized CSF", color="lightcoral") ax1.set_xlabel("Intensity") ax1.set_ylabel("Normalized voxel count") ax1.set_title("Generated CSF Histogram") ax1.legend() _, bins, _ = ax2.hist(input_mrbrains.flatten()[np.where(gt_mrbrains.flatten() == 2)], bins=256, range=(0, 1), density=True, label="MRBrainS Input GM", color="darkblue") _ = ax2.hist(input_iseg.flatten()[np.where(gt_iseg.flatten() == 2)], bins=256, range=(0, 1), alpha=0.75, density=True, label="iSEG Input GM", color="red") _ = ax2.hist(image_mrbrains.flatten()[np.where(seg_mrbrains.flatten() == 2)], bins=256, range=(0, 1), density=True, label="MRBrainS Normalized GM", color="cornflowerblue") _ = ax2.hist(image_iseg.flatten()[np.where(seg_iseg.flatten() == 2)], bins=256, range=(0, 1), alpha=0.75, density=True, label="iSEG Normalized GM", color="lightcoral") ax2.set_xlabel("Intensity") ax2.set_ylabel("Normalized voxel count") ax2.set_title("Generated Grey Matter Histogram") ax2.legend() _, bins, _ = ax3.hist(input_mrbrains.flatten()[np.where(gt_mrbrains.flatten() == 3)], bins=256, range=(0, 1), density=True, label="MRBrainS Input WM", color="darkblue") _ = ax3.hist(input_iseg.flatten()[np.where(gt_iseg.flatten() == 3)], bins=256, range=(0, 1), alpha=0.75, density=True, label="iSEG Input WM", color="red") _ = ax3.hist(image_mrbrains.flatten()[np.where(seg_mrbrains.flatten() == 3)], bins=256, range=(0, 1), density=True, label="MRBrainS Normalized WM", color="cornflowerblue") _ = ax3.hist(image_iseg.flatten()[np.where(seg_iseg.flatten() == 3)], bins=256, range=(0, 1), alpha=0.75, density=True, label="iSEG Normalized WM", color="lightcoral") ax3.set_xlabel("Intensity") ax3.set_ylabel("Normalized voxel count") ax3.set_title("Generated White Matter Histogram") ax3.legend() fig1.tight_layout() fig1.savefig("histograms.png") # + pycharm={"name": "#%%\n"} input_mrbrains = nib.load("/Users/pierre-luc-delisle/ComputeCanada/ComputeCanada/final_run/DualUNet_canada_dual_dataset/Reconstructed_Input_MRBrainS_Image_80.nii.gz").get_fdata() input_iseg = nib.load("/Users/pierre-luc-delisle/ComputeCanada/ComputeCanada/final_run/DualUNet_canada_dual_dataset/Reconstructed_Input_iSEG_Image_80.nii.gz").get_fdata() image_mrbrains = nib.load("/Users/pierre-luc-delisle/ComputeCanada/ComputeCanada/final_run/DualUNet_canada_dual_dataset/Reconstructed_Normalized_MRBrainS_Image_80.nii.gz").get_fdata() image_iseg = nib.load("/Users/pierre-luc-delisle/ComputeCanada/ComputeCanada/final_run/DualUNet_canada_dual_dataset/Reconstructed_Normalized_iSEG_Image_80.nii.gz").get_fdata() seg_mrbrains = nib.load("/Users/pierre-luc-delisle/ComputeCanada/ComputeCanada/final_run/DualUNet_canada_dual_dataset/Reconstructed_Segmented_MRBrainS_Image_80.nii.gz").get_fdata() gt_mrbrains = nib.load("/Users/pierre-luc-delisle/ComputeCanada/ComputeCanada/final_run/DualUNet_canada_dual_dataset/Reconstructed_Ground_Truth_MRBrainS_Image_80.nii.gz").get_fdata() seg_iseg = nib.load("/Users/pierre-luc-delisle/ComputeCanada/ComputeCanada/final_run/DualUNet_canada_dual_dataset/Reconstructed_Segmented_iSEG_Image_80.nii.gz").get_fdata() gt_iseg = nib.load("/Users/pierre-luc-delisle/ComputeCanada/ComputeCanada/final_run/DualUNet_canada_dual_dataset/Reconstructed_Ground_Truth_iSEG_Image_80.nii.gz").get_fdata() # + pycharm={"name": "#%%\n"} fig1, (ax1, ax2, ax3) = plt.subplots(nrows=3, ncols=1, figsize=(15, 15)) _, bins, _ = ax1.hist(input_mrbrains.flatten()[np.where(gt_mrbrains.flatten() == 1)], bins=256, range=(0, 1), density=True, label="MRBrainS Input CSF", color="darkblue") _ = ax1.hist(input_iseg.flatten()[np.where(gt_iseg.flatten() == 1)], bins=256, range=(0, 1), alpha=0.75, density=True, label="iSEG Input CSF", color="red") _ = ax1.hist(image_mrbrains.flatten()[np.where(seg_mrbrains.flatten() == 1)], bins=256, range=(0, 1), density=True, label="MRBrainS Normalized CSF", color="cornflowerblue") _ = ax1.hist(image_iseg.flatten()[np.where(seg_iseg.flatten() == 1)], bins=256, range=(0, 1), alpha=0.75, density=True, label="iSEG Normalized CSF", color="lightcoral") ax1.set_xlabel("Intensity") ax1.set_ylabel("Normalized voxel count") ax1.set_title("Generated CSF Histogram") ax1.legend() _, bins, _ = ax2.hist(input_mrbrains.flatten()[np.where(gt_mrbrains.flatten() == 2)], bins=256, range=(0, 1), density=True, label="MRBrainS Input GM", color="darkblue") _ = ax2.hist(input_iseg.flatten()[np.where(gt_iseg.flatten() == 2)], bins=256, range=(0, 1), alpha=0.75, density=True, label="iSEG Input GM", color="red") _ = ax2.hist(image_mrbrains.flatten()[np.where(seg_mrbrains.flatten() == 2)], bins=256, range=(0, 1), density=True, label="MRBrainS Normalized GM", color="cornflowerblue") _ = ax2.hist(image_iseg.flatten()[np.where(seg_iseg.flatten() == 2)], bins=256, range=(0, 1), alpha=0.75, density=True, label="iSEG Normalized GM", color="lightcoral") ax2.set_xlabel("Intensity") ax2.set_ylabel("Normalized voxel count") ax2.set_title("Generated Grey Matter Histogram") ax2.legend() _, bins, _ = ax3.hist(input_mrbrains.flatten()[np.where(gt_mrbrains.flatten() == 3)], bins=256, range=(0, 1), density=True, label="MRBrainS Input WM", color="darkblue") _ = ax3.hist(input_iseg.flatten()[np.where(gt_iseg.flatten() == 3)], bins=256, range=(0, 1), alpha=0.75, density=True, label="iSEG Input WM", color="red") _ = ax3.hist(image_mrbrains.flatten()[np.where(seg_mrbrains.flatten() == 3)], bins=256, range=(0, 1), density=True, label="MRBrainS Normalized WM", color="cornflowerblue") _ = ax3.hist(image_iseg.flatten()[np.where(seg_iseg.flatten() == 3)], bins=256, range=(0, 1), alpha=0.75, density=True, label="iSEG Normalized WM", color="lightcoral") ax3.set_xlabel("Intensity") ax3.set_ylabel("Normalized voxel count") ax3.set_title("Generated White Matter Histogram") ax3.legend() fig1.tight_layout() fig1.savefig("histograms_dual_unet.png") # + pycharm={"name": "#%%\n"} import torch ones = torch.Tensor().new_ones((1, 256, 256, 192)) from torchvision.transforms import transforms transforms_ = transforms.Compose( [AddBiasField(1.0, alpha=0.5), AddNoise(1.0, snr=60, noise_type="rician")]) transformed = transforms_(ones) plt.imshow(transformed[0, 128, : ,:]) plt.show() # + pycharm={"name": "#%%\n"}
deepNormalize/postprocessing/histograms.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # user authentication # list of employee names employees = ["jake","king","max","jacob","hero","titan","ray","drake","hiroshima","ogun","blake","timothy","john","lisa","david"] # + # user input name = (input("What is your name: ")) # does the user exist print (name in employees) if name.lower() in employees: print("Good day",name,". Your identity has been verified") else: print("You don't belong here, bye") # + name = (input("What is your name: ")) for i in employees: if name.lower() in employees: ID = True else: ID = False if ID == True: print("welcome") else: print("Get out")
Python week 12/.ipynb_checkpoints/Project 1-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <div class="alert alert-block alert-success"> # <b><center>NEURAL NETWORK</center></b> # <b><center>Tensorflow 2를 이용한 XOR 문제 풀어 보기</center></b> # </div> # # Configure Learning Environment # # !pip install git+https://github.com/nockchun/rspy --force import rspy as rsp rsp.setSystemWarning(off=True) # + import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import tensorflow as tf from tensorflow.keras import utils, layers, models, losses # - np.set_printoptions(linewidth=200, precision=2) sns.set_style("whitegrid") tf.__version__ # # Prepare Data dfJuice = pd.read_excel("resources/juice.xlsx") dfJuice # # XOR dfXor = dfJuice.replace({"맛":{"없다":0, "있다":1}}) dfXor data = dfXor[["오랜지", "딸기", "포도"]].to_numpy() label = dfXor[["맛"]].values data.shape, label.shape # ## Generate Model model = models.Sequential([ layers.Input([3]), layers.Dense(30, activation=tf.keras.activations.relu), layers.Dense(1, activation=tf.keras.activations.sigmoid) ]) model.summary() utils.plot_model(model, 'model.png', show_shapes=True) model.compile( loss=tf.keras.losses.BinaryCrossentropy(), optimizer=tf.keras.optimizers.SGD(learning_rate=0.1) ) # ## Training history = model.fit( data, label, epochs=2000, verbose=0 ) plt.xlabel("Epoch") plt.ylabel("Loss") plt.plot(history.history["loss"]) plt.show() # ## Predict predictedAll = np.array([[]]) predictedLabelAll = np.array([[]]) for itemData, itemLabel in zip(data, label): predicted = model.predict([itemData.tolist()]) predictedLabel = np.where(predicted > 0.5, 1, 0) predictedAll = np.hstack((predictedAll, predicted)) predictedLabelAll = np.hstack((predictedLabelAll, predictedLabel)) rsp.showMulti( predictedAll.reshape(-1,1), predictedLabelAll.reshape(-1,1), label )
lecture_source/machine_learning/0209_neural_network_tf2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" executionInfo={"elapsed": 1959, "status": "ok", "timestamp": 1581866987234, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDse36DSuXrIsAb88zhKhOHmqPmc7gYxaCgWYBlgOE=s64", "userId": "11337003563921778338"}, "user_tz": -480} id="LyG5GNjwx0Kg" outputId="7bed13cc-5ded-47d6-b7be-153bb95ced6c" # !wget http://files.grouplens.org/datasets/movielens/ml-latest-small.zip # + colab={"base_uri": "https://localhost:8080/", "height": 136} colab_type="code" executionInfo={"elapsed": 1763, "status": "ok", "timestamp": 1581866990747, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDse36DSuXrIsAb88zhKhOHmqPmc7gYxaCgWYBlgOE=s64", "userId": "11337003563921778338"}, "user_tz": -480} id="cuKqYhXbyICh" outputId="95bf4b14-4fb4-4cc1-9599-fce307872904" # !unzip ml-latest-small.zip # + colab={"base_uri": "https://localhost:8080/", "height": 765} colab_type="code" executionInfo={"elapsed": 642, "status": "ok", "timestamp": 1581866993333, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDse36DSuXrIsAb88zhKhOHmqPmc7gYxaCgWYBlgOE=s64", "userId": "11337003563921778338"}, "user_tz": -480} id="t1sHY1tu0DLX" outputId="f3fbcdd8-b2f7-48f2-accd-5c8cd885641c" import pandas as pd from IPython.display import display download_dir = './ml-latest-small' links = pd.read_csv(download_dir + '/links.csv') display(links.head()) tag = pd.read_csv(download_dir + '/tags.csv') display(tag.head()) ratings = pd.read_csv(download_dir + '/ratings.csv') display(ratings.head()) movies = pd.read_csv(download_dir + '/movies.csv') movies.head() # + [markdown] colab_type="text" id="jSdABIF597aK" # For this dataset, the movies are already clustered into genres, and the time of ratings are given. Both are useful information, the former can give a direction to building the movie embeddings, the latter can tell us how the user profile has changed over years. Therefore, to take into account of these factors, we assume rating is a function of user embedding (numeric representation of user profile), time, movie embedding and genres. To build a baseline model, we try to incorporate these ideas into the architecture. # + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 496, "status": "ok", "timestamp": 1581866998153, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDse36DSuXrIsAb88zhKhOHmqPmc7gYxaCgWYBlgOE=s64", "userId": "11337003563921778338"}, "user_tz": -480} id="Zn1pQ4fQ3rCy" outputId="194ba993-74d2-4c0e-ce2d-c19e71b90260" # check if any user didnt give a rating ratings.rating.isnull().value_counts() # + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" executionInfo={"elapsed": 648, "status": "ok", "timestamp": 1581867002039, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDse36DSuXrIsAb88zhKhOHmqPmc7gYxaCgWYBlgOE=s64", "userId": "11337003563921778338"}, "user_tz": -480} id="8mv00hb190xY" outputId="8e325b82-c0ee-4efd-89de-81de0cb980bb" # left join genres onto ratings table df = ratings.join(movies.set_index('movieId'), on='movieId') df.head() # + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" executionInfo={"elapsed": 920, "status": "ok", "timestamp": 1581867006027, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDse36DSuXrIsAb88zhKhOHmqPmc7gYxaCgWYBlgOE=s64", "userId": "11337003563921778338"}, "user_tz": -480} id="t-Ru8LOdC_oO" outputId="83ea4be2-a5d0-45d9-fb2a-baae6b5a0c8f" # turn timestamp from seconds to year change_to_year = lambda x: pd.Timestamp(x, unit='s').year df.timestamp = df.timestamp.apply(change_to_year) df.head() # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 634, "status": "ok", "timestamp": 1581867009104, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDse36DSuXrIsAb88zhKhOHmqPmc7gYxaCgWYBlgOE=s64", "userId": "11337003563921778338"}, "user_tz": -480} id="Hc5mZqnuFJeV" outputId="d189b600-d732-4fcc-991b-ca9706401aec" # turn genres into vector representation genres_list = df.genres.tolist() genres = [] for genre in genres_list: genres.extend(genre.split('|')) genres = set(genres) print('unique genres: {}'.format(len(genres))) # + colab={} colab_type="code" id="SvsD9Rg6Ftud" # to numerically encode features and create mapping for them genres_map = {} for i, genre in enumerate(genres): genres_map[genre] = i year_map = {} year_set = set(df.timestamp.tolist()) for i, year in enumerate(year_set): year_map[year] = i userId_map = {} userId_set = set(df.userId.tolist()) for i, userId in enumerate(userId_set): userId_map[userId] = i movieId_map = {} movieId_set = set(df.movieId.tolist()) for i, movieId in enumerate(movieId_set): movieId_map[movieId] = i rating_map = {} rating_set = set(df.rating.tolist()) for i, rating in enumerate(rating_set): rating_map[rating] = i # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 557, "status": "ok", "timestamp": 1581878065418, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDse36DSuXrIsAb88zhKhOHmqPmc7gYxaCgWYBlgOE=s64", "userId": "11337003563921778338"}, "user_tz": -480} id="vtkIFskYprsB" outputId="312ba994-95db-4087-da7b-5462c35efb6b" # before defining our model, check the tensor dimensions input_d = len(df.userId.unique()) + len(df.movieId.unique()) + len(genres) + len(year_set) hidden_layer1_d = 500 output_d = len(df.rating.unique()) print(input_d, output_d) # + colab={} colab_type="code" id="pXXQQGkmo_p_" # define model architecture in pytorch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.fc1 = nn.Linear(input_d, hidden_layer1_d) self.fc2 = nn.Linear(hidden_layer1_d, output_d) def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) # normally, this is where softmax used for normalization, # however, in pytorch, CrossEntropyLoss will do for us return x net = Net() # + colab={} colab_type="code" id="_7GWnOpOpCIK" # prepare train, validation, test sets X = df.iloc[:, [0, 1, 3, 5]] y = df.iloc[:, 2] import numpy as np from sklearn.model_selection import train_test_split # 70% train, 21% validation, 9% test X_train, X_validation, y_train, y_validation = train_test_split(X, y, test_size=0.3, random_state=42) X_validation, X_test, y_validation, y_test = train_test_split(X_validation, y_validation, test_size=0.3, random_state=41) # + colab={} colab_type="code" id="lMnBuA56wi4o" # define optimizer import torch.optim as optim criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9) # + colab={} colab_type="code" id="V5KSMhJLzV_g" import torch # generate tensor for pytorch model's input and output for SGD def batch_generator(X, y, batch_size): for i in range(X.shape[0]//batch_size): batch_input = X.iloc[i*batch_size: i*batch_size + batch_size] batch_output = y.iloc[i*batch_size: i*batch_size + batch_size] tensor_input = [] tensor_output = [] for _, input_ in batch_input.iterrows(): userId_vector = np.zeros(len(userId_map)) userId_vector[userId_map[input_.userId]] = 1. movieId_vector = np.zeros(len(movieId_map)) movieId_vector[movieId_map[input_.movieId]] = 1. year_vector = np.zeros(len(year_map)) year_vector[year_map[input_.timestamp]] = 1. genre_vector = np.zeros(len(genres_map)) for g in input_.genres.split('|'): genre_vector[genres_map[g]] = 1. concat_vector = np.concatenate((userId_vector, movieId_vector, year_vector, genre_vector)) tensor_input.append(concat_vector) for _, output in batch_output.iteritems(): tensor_output.append(rating_map[output]) tensor_input = torch.FloatTensor(tensor_input) # type long (64bit integer) for CrossEntropyLoss implementation tensor_output = torch.tensor(tensor_output, dtype=torch.long) yield tensor_input, tensor_output # + colab={"base_uri": "https://localhost:8080/", "height": 544} colab_type="code" executionInfo={"elapsed": 352230, "status": "ok", "timestamp": 1581878507732, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDse36DSuXrIsAb88zhKhOHmqPmc7gY<KEY>OE=s64", "userId": "11337003563921778338"}, "user_tz": -480} id="N_klGajDwlU7" outputId="1059266a-4979-4c39-8d01-4e0896c9e355" # loop over the dataset multiple times batch_size = 700 for epoch in range(3): running_loss = 0.0 batch_no = 0 for inputs, labels in batch_generator(X_train, y_train, batch_size): # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # print statistics running_loss += loss.item() if batch_no % 10 == 9: print('[{}, {}] average_loss per batch: {}'.format(epoch+1, batch_no+1, running_loss/10)) running_loss = 0.0 batch_no += 1 print('Finished Training') # + colab={} colab_type="code" id="Ls5ZofzVQzfA" def predict(model, X): outputs = model(X) _, predicted = torch.max(outputs, 1) return predicted # save our initial model PATH = './drive/My Drive/focus/recommender-system/recommendation-system/2layer-lr_0.01.pth' torch.save(net.state_dict(), PATH) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 35073, "status": "ok", "timestamp": 1581880063878, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mDse36DSuXrIsAb88zhKhOHmqPmc7gYxaCgWYBlgOE=s64", "userId": "11337003563921778338"}, "user_tz": -480} id="JfQfwBwZVvPn" outputId="16c9850e-d7d8-4547-9e32-bd853baf7ab6" # test model on unseen validation data with torch.no_grad(): X_vali, y_vali = list(batch_generator(X_validation, y_validation, len(X_validation)))[0] prediction = predict(net, X_vali) print('validation accuracy: {}'.format((prediction == y_vali).sum().item()/y_vali.size(0))) # + [markdown] colab_type="text" id="_xTbiXNtZtZC" # That's better than random guess already, since we have 10 classes, random guessing is just 10% accuracy, lets try to increase learning rate more and 50 more epochs or until validation accuracy start to fall, whichever comes first. # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="_Kq3Ml0QZCiN" outputId="8660d3cb-c0f5-4fc7-e163-ae5e5256ff3b" optimizer_1 = optim.SGD(net.parameters(), lr=0.03, momentum=0.9) sample_size = 2000 X_validation_sample = X_validation.iloc[:sample_size].copy() y_validation_sample = y_validation.iloc[:sample_size].copy() def validation_accuracy(model, X_validation, y_validation): with torch.no_grad(): X_vali, y_vali = list(batch_generator(X_validation, y_validation, len(X_validation)))[0] prediction = predict(model, X_vali) return (prediction == y_vali).sum().item()/y_vali.size(0) def second_training(): # loop over the dataset multiple times batch_size = 700 last_vali_acc = 0. for epoch in range(50): running_loss = 0.0 batch_no = 0 for inputs, labels in batch_generator(X_train, y_train, batch_size): # zero the parameter gradients optimizer_1.zero_grad() # forward + backward + optimize outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer_1.step() # print statistics running_loss += loss.item() if batch_no % 30 == 29: print('[{}, {}] average_loss per batch: {}'.format(epoch+1, batch_no+1, running_loss/30)) running_loss = 0.0 batch_no += 1 # check if validation accuracy drops to prevent overfitting vali_acc = validation_accuracy(net, X_validation_sample, y_validation_sample) if last_vali_acc - vali_acc > 0.03: print('Early Stopping, validation_accuracy: {}'.format(vali_acc)) break else: last_vali_acc = vali_acc print('validation_accuracy: {}'.format(vali_acc)) print('Finished Training') second_training() # + colab={} colab_type="code" id="CnIpg-14iKey" # save our second model PATH = './drive/My Drive/focus/recommender-system/recommendation-system/50_epochs-earlyStopping-lr_0.03.pth' torch.save(net.state_dict(), PATH) # + colab={} colab_type="code" id="b7CADHNdkZ5V"
.ipynb_checkpoints/MovieLens dataset-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Nome: <NAME> # Orientações: # # 1 - Acesse a base pública disponibilizada no link: http://insideairbnb.com/get-the-data.html # # 2 - Utilize os dados do Airbnb do Rio de Janeiro (listings.csv.gz) # # 3 - Faça uma análise exploratória para avaliar a consistência dos dados (pré-processamentos necessários) # # 4 - Crie um pipeline para um modelo classificação do room type (feature ‘room_type’). # # 5 - Responda aos questionamentos que se segue: # - Qual o processo utilizado para definir a sua estratégia de modelagem? # - Qual o critério utilizado na seleção do modelo final? # - Você utilizou algum tipo de validação para o modelo? Se sim, qual o processo adotado? Por que escolheu utilizar este método? # - Quais métricas lhe garantiu que o modelo que você desenvolveu é bom? # ## Considerações: # - Por convenção chamarei os room types de quartos, mesmo sabendo que envolve quartos, lugares inteiros (casas/apt) e locais compartilhados. # - O modelo de classificação será voltado para classificar os novos anuncios na plataforma, então trabalharemos muito na parte de escolhas das features para evitar contaminação com dados que possivelmente um anuncio novo não teria. # ## Bibliotecas e leitura da base de dados: import pandas as pd pd.__version__ import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split plt.style.use('ggplot') from sklearn.preprocessing import StandardScaler from sklearn.metrics import accuracy_score #from sklearn import metrics from sklearn.ensemble import RandomForestClassifier data = pd.read_csv('listings/listings.csv') data.info() # ## Pré-processamento: # ### Podemos inicialmente retirar algumas features que tem pouca relevância em relação ao tipo de 'room'. # #### *No site disponibilizado foi encontrado o dicionário dos atributos e que também será utilizado para feature enginner. data.head() df_json=data[['id','amenities']] #iremos trabalhar nessa feature após o base model # #### Alguns dados em relação ao anfitrião serão removido, como também alguns valores relacionados as métricas do Airbnb (como numero de anuncios por host, data de entrada, taxa de aceitação de reserva, tipo de propriedade...) visando evitar possiveis contaminações nos dados. # #### Primeiro drop: ColumnsDrop=['id','listing_url','scrape_id','last_scraped','name','description','neighborhood_overview','picture_url','host_id', 'host_url','host_name','host_since','host_location','host_about','host_response_time','host_response_rate', 'host_acceptance_rate','host_is_superhost','host_thumbnail_url','host_picture_url','host_neighbourhood', 'host_listings_count','host_total_listings_count','host_verifications','host_has_profile_pic', 'host_identity_verified','neighbourhood', 'neighbourhood_cleansed','neighbourhood_group_cleansed', 'latitude','longitude','property_type'] data.drop(ColumnsDrop, axis=1, inplace=True) data.head(3) data.isna().mean().reset_index(name="Dados Faltantes").sort_values(by="Dados Faltantes", ascending=False).head(17) # ### Observações importantes: # # - Algumas features que poderiam ser importantes apresentam muitos dados faltantes. # - A feature bathrooms apresenta muitos valores faltantes, mas podem ser recuperados pela feature de texto 'bathrooms_text'. # - Temos um JSON com as Facilidades ('amenities') do quarto que contêm uma série de informações valiosas, mas podemos ignorá-lo até fazer nosso baseline. # - O 'price' está em formato string e precisará de ajustes. # #### Segundo drop: ColumnsDrop2=['calendar_updated', 'minimum_nights','maximum_nights','minimum_minimum_nights','maximum_minimum_nights', 'minimum_maximum_nights','maximum_maximum_nights','minimum_nights_avg_ntm','maximum_nights_avg_ntm', 'has_availability','availability_30','availability_60','availability_90','availability_365', 'calendar_last_scraped','number_of_reviews','number_of_reviews_ltm','number_of_reviews_l30d', 'first_review','last_review','calculated_host_listings_count','calculated_host_listings_count_entire_homes', 'calculated_host_listings_count_private_rooms','calculated_host_listings_count_shared_rooms', 'license'] data.drop(ColumnsDrop2, axis=1, inplace=True) data.head() # ### Tratando o 'bathrooms_text': data['bathrooms_text'].unique() # - # - Temos que criar uma nova feature para identificar se o banheiro é privativo, compartilhado ou normal. # + #https://www.ic32montijo.com/hotel/pergunta-o-que-e-banheiro-privativo-em-hotel.html def ValueBathrooms(val): val=str(val) val_split=val.split() #Half-bath, Shared, Private if val_split[0]=='Half-bath': return 1 elif val_split[0]=='Shared': return 1 elif val_split[0]=='Private': return 1 else: return float(val_split[0]) # - data['bathrooms']=list(map(ValueBathrooms, data['bathrooms_text'])) # - Podemos criar uma forma de retirar informação sobre o tipo do banheiro compartilhado, privativo ou lavabo. data.head(2) def AjustPrice(val): val=val.replace(',','') return float(val[1:]) data['price']=list(map(AjustPrice, data['price'])) ColumnsDrop3=['bathrooms_text','amenities','review_scores_rating','review_scores_accuracy','review_scores_cleanliness', 'review_scores_checkin','review_scores_communication','review_scores_location', 'review_scores_value', 'instant_bookable','reviews_per_month'] data.drop(ColumnsDrop3, axis=1, inplace=True) data # Do dicionário temos: # # All homes are grouped into the following three room types: # # Entire place, # Private room, # Shared room data["room_type"].value_counts()/23635 # - Nossa base de dados é desbalanceada e podemos partir de um modelo Dummy como modelo base. # + data1=data.dropna() y=data1['room_type'] X=data1[['accommodates','bathrooms','bedrooms','beds','price']] X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.7, random_state=0) # + from sklearn.dummy import DummyClassifier dummy_majority = DummyClassifier(strategy = 'most_frequent').fit(X_train, y_train) # - dummy_majority.score(X_test,y_test) # #### Temos um modelo com 72.6% de acurácia como base. # ### Pipeline: # + from sklearn.pipeline import Pipeline from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.naive_bayes import GaussianNB from sklearn.neural_network import MLPClassifier #from sklearn.linear_model import Lasso, LassoCV, Ridge #from sklearn.model_selection import cross_val_score # - modelos_reg=[ GaussianNB(), KNeighborsClassifier(), DecisionTreeClassifier(random_state=0), RandomForestClassifier(random_state=0), AdaBoostClassifier(random_state=0), GradientBoostingClassifier(learning_rate = 0.01, max_depth = 3, random_state = 0), SVC(kernel='rbf',gamma='scale', C=1), SVC(kernel="linear",C=0.025,probability=True), #MLPClassifier(solver='lbfgs', activation = 'tanh',max_iter=1000, # alpha = 0.1, # hidden_layer_sizes = [100, 100], # random_state = 0) ] for modelo in modelos_reg: pipe= Pipeline([('scaler', StandardScaler()), ('model',modelo)]) pipe.fit(X_train, y_train) predicts=pipe.predict(X_test) print(modelo) print('accuracy: {}\n' .format(accuracy_score(y_test,predicts))) # ### Cuidando do Json: # - Esta coluna contém muitos dados importantes para nosso classificação, porém ao ler o dataframe obtemos uma lista em formato de string com todas as "facilidades" do quarto. # - Nossa meta é obter todos estes dados organizados em colunas numericas para implementação do modelo. df_json['amenities'] # - Primeiramente podemos transformar a lista que está em formato string em uma lista de verdade. # - Depois transformar cada lista em um pequeno dataframe e concaten=a-los, usando o atributo 'sort=False' para manter os dados de cada dataframe. # - Os dados faltantes podemos completar com 0 já que a falta do dado indica a ausência daquele atributo no quarto. #transforma a string em uma lista de elementos def HelpJson(list_json): list_json=list_json.split(',') aux=[] for i in range(len(list_json)): list_json[i]=list_json[i].replace('[','').replace('"','').replace(']','') #precisamos remover os elementos repetivos list_json=list(set(list_json)) return list_json # + #aplica a função a todo dataset df_json['amenities']=list(map(HelpJson, df_json['amenities'])) # - list_json3=df_json['amenities'] # - Agora podemos fazer um laço para percorrer a dataframe com as listas e a cada passo criar um dataframe para a lista e concatenar com o dataset principal. # + data_json = pd.DataFrame() k=0 for i in list_json3: data_aux = pd.DataFrame(np.ones(len(i)).reshape(1,-1), columns = i) #cria o dataframe para a lista i print(k) k+=1 data_json=pd.concat([data_aux, data_json], sort=False) #concatena com o dataframe principal 'data_json' # - # - Criar este dataset me custou um certo tempo para processar, então irei criar um .csv com estes dados para posteriormente utilizá-lo. data_json.head() data_json['id']=list(df_json['id']) data_json.head() data_json=data_json.reset_index(drop=True) data_json.to_csv('data_json.csv',index=False) # - O que fazer agora? # - Temos 1263 features com informações sobre o quarto; # - Podemos retirar as features com mais dados 'NaNs' que é equivalente as features que têm dados muitos especificos. # - Podemos completar os 'NaNs' com 0 e treinar nosso modelo para avaliar os resultados. # + #Podemos pegar as features por ordem decrescente de dados faltantes columnsjson_ord=data_json.isna().mean().reset_index(name="Dados Faltantes").sort_values(by="Dados Faltantes", ascending=False)['index'] data_json.isna().mean().reset_index(name="Dados Faltantes").sort_values(by="Dados Faltantes", ascending=False).head(1100) # - list(columnsjson_ord[0:10]) #Ficando apenas com as 163 features mais importantes data_json2=data_json.drop(columns=columnsjson_ord[0:1250]) data_json2.head() #completando o dataset com 0 data_json2=data_json2.replace(np.nan,0) data_json2.head() data['id']=list(df_json['id']) data1=data.merge(data_json2, how='outer', on='id') # + data1=data1.dropna() y=data1['room_type'] X=data1.drop(columns=['room_type','id']) X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.7, random_state=0) # - for modelo in modelos_reg: pipe= Pipeline([('scaler', StandardScaler()), ('model',modelo)]) pipe.fit(X_train, y_train) predicts=pipe.predict(X_test) print(modelo) print('accuracy: {}\n' .format(accuracy_score(y_test,predicts))) # - A escolha do classificador se deu por alguns motivos, o erro deste modelo acaba sendo baixo, por trabalhar com diversas árvores, cada uma com um subespaço amostral um pouco difente, isso torna o metodo bem robusto em relação a uma arvore só. # - Além disso, apresenta uma maior segurança quanto a Overfit. # A acurácia(%) resultante foi de 72,80% pode ser melhorada de diversas maneiras. # # Na parte do tratamento dos dados: # - Aumentando a quantidade de colunas trazidas dos dados brutos. # - Não foi possivel trazer os dados de bathrooms e isso teve impacto nos resultados, também. # - Também seria interessante fazer uma analise melhor do data frame e validar as colunas que de fato trazem impacto para o 'room_type'.
Desafio_Neurotech/Airbnb classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Flame Temperature # # This example demonstrates calculation of the adiabatic flame temperature for a methane/air mixture. # %matplotlib notebook import cantera as ct import numpy as np import matplotlib.pyplot as plt # ## Chemical reaction considered (complete combustion) # # The equation of a lean methane/air mixture ($\phi < 1$ or $z > z_s$) is: # # $$\mathrm{C_3H_8 + z (0.21 O_2 + 0.79 N_2) \rightarrow CO_2 + 2 H_2O + 0.21 (z - z_s) O_2 + 0.79 z N_2}$$ # # For a rich mixture ($\phi > 1$ or $z < z_s$), this becomes: # # $$\mathrm{ C_3H_8 + z (0.21 O_2 + 0.79 N_2) \rightarrow (z/z_s) CO_2 + 2 (z/z_s) H_2O + (1-z/z_s) CH_4 + 0.79 z N_2}$$ # # To find the flame temperature resulting from these reactions using Cantera, we create a gas object containing only the species in the above equations, and then use the `equilibrate()` function to find the resulting mixture composition and temperature, taking advantage of the fact that equilibrium will strongly favor conversion of the fuel molecule. # + # Get all of the Species objects defined in the GRI 3.0 mechanism species = {S.name: S for S in ct.Species.listFromFile('gri30.cti')} # Create an IdealGas object with species rconsidered complete_species = [species[S] for S in ('C3H8','O2','N2','CO2','H2O')] gas1 = ct.Solution(thermo='IdealGas', species=complete_species) z = np.linspace(5, 40, 100) T_complete = np.zeros(z.shape) for i in range(len(z)): X = {'C3H8':1.0, 'O2': 0.21*z[i], 'N2': 0.79*z[i]} gas1.TPX = 300, ct.one_atm, X gas1.equilibrate('HP') T_complete[i] = gas1.T # - plt.plot(z, T_complete, label='complete combustion', lw=2) plt.grid(True) plt.xlabel('$z$ mol air / mol C3H8') plt.ylabel('Temperature [K]');
.ipynb_checkpoints/flame_temperature_vrobin1-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: xpython # language: python # name: xpython # --- # + [markdown] deletable=false editable=false # Copyright 2021 <NAME> and made available under [CC BY-SA](https://creativecommons.org/licenses/by-sa/4.0) for text and [Apache-2.0](http://www.apache.org/licenses/LICENSE-2.0) for code. # - # # Descriptive statistics: length-based metrics # # Text affords different opportunities for descriptive statistics than typical variables. # # Why not analyze text like any other variable? # Suppose we consider the primary unit of analysis to be the word. # Then we could consider characterizing words as categorical (also known as nominal) variables. # As a categorical variable, we might expect a word variable to have millions of levels if we include dictionary words (about 250 thousand words in a college dictionary) as well as proper nouns (Wikipedia has over 4 million entries). # That's just for a single word. # Since words typically don't occur in isolation but rather in the company of other words, we are forced to consider using a sequence of words as a categorical variable. # However, we clearly run into sparsity issues very quickly (consider sequences of 2 words could have in the ballpark of 4 million-squared levels) - we will never observe many levels of our variable of interest. # # For these reasons, it is not useful or practical to try to directly calculate descriptive statistics as we would for typical variables. # Instead, we typically **transform the text into numeric form** using some kind of metric. # <!-- or we **transform the text into a distribution.** --> # ## What you will learn # # You will learn about text-oriented descriptive statistics based on text length. # # We will cover: # # - Length-based metrics # - text length # - text length in words # - text length in sentences # - average word/sentence length # - readability # ## When to use length-based metrics # # Descriptive statistics are helpful for exploring the data and considering other potential analyses. # The transformations on text that we discuss may also be useful as features in later modeling. # ## Length-based metrics # # We'll continue with the built-in `gutenberg` corpus from NLTK. # Let's import the `gutenberg` corpus: # # - from `nltk.corpus` import `gutenberg` # + from nltk.corpus import gutenberg #<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="`$^y`^v4:G8DO1QCBCw8">gutenberg</variable></variables><block type="importFrom" id="XD9YVa/m9vX;ax-@^}(K" x="16" y="64"><field name="libraryName">nltk.corpus</field><field name="libraryAlias" id="`$^y`^v4:G8DO1QCBCw8">gutenberg</field></block></xml> # - # ### Text length in characters # # When we consider text length as a metric, we can clearly consider it on multiple scales. # At the coarsest level we can consider the length of the entire text in characters. # We've previously seen how to use NLTK to get a list of texts in a corpus and the raw form of the text (i.e. string, or sequence of characters). # Let's combine those operations and also get the length of the raw text: # # - Set rawLengths to a list with one element containing # - for each item `i` in list with `gutenberg` do `fileids` (see LOOPS) # - yield length of (see TEXT) with `gutenberg` do `raw` using `i` # - Display rawLengths # + rawLengths = [(len(gutenberg.raw(i))) for i in (gutenberg.fileids())] rawLengths #<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="AO?GdNQ:*92|iDWB.)YR">rawLengths</variable><variable id="LZ#}.J~9XYczA[nu4?|Q">i</variable><variable id="`$^y`^v4:G8DO1QCBCw8">gutenberg</variable></variables><block type="variables_set" id="ZR=zxJ`,Px9$cDQ}u?!N" x="29" y="316"><field name="VAR" id="AO?GdNQ:*92|iDWB.)YR">rawLengths</field><value name="VALUE"><block type="lists_create_with" id="8eO0[B%0~mtEmLX:+=dw"><mutation items="1"></mutation><value name="ADD0"><block type="comprehensionForEach" id="qHXxT3|WBM:kMw~muDTN"><field name="VAR" id="LZ#}.J~9XYczA[nu4?|Q">i</field><value name="LIST"><block type="varDoMethod" id="tLR@!_zful,@toy1e3E("><field name="VAR" id="`$^y`^v4:G8DO1QCBCw8">gutenberg</field><field name="MEMBER">fileids</field><data>gutenberg:fileids</data></block></value><value name="YIELD"><block type="text_length" id="C[M$vurF8L;mQ|g`!((h"><value name="VALUE"><shadow type="text" id="OWYJxoN6q~fXVN0HS0QW"><field name="TEXT">abc</field></shadow><block type="varDoMethod" id="#IalxaHkKH5=q1de@8Ar"><field name="VAR" id="`$^y`^v4:G8DO1QCBCw8">gutenberg</field><field name="MEMBER">raw</field><data>gutenberg:raw</data><value name="INPUT"><block type="variables_get" id="u[Lug07Y.B({Q]G-6du|"><field name="VAR" id="LZ#}.J~9XYczA[nu4?|Q">i</field></block></value></block></value></block></value></block></value></block></value></block><block type="variables_get" id=")DU}BKs8923`-}.@eA)Q" x="8" y="402"><field name="VAR" id="AO?GdNQ:*92|iDWB.)YR">rawLengths</field></block></xml> # - # Each one of these is the length (in characters) of a book in the `gutenberg` corpus. # ### Text length in words # # Let's repeat this operation but retrieve words instead of text length. # Since we've already covered how to do word tokenization manually, we'll use the built in `gutenberg` tokenization to focus on the new concept: # # - Set wordLengths to a list with one element containing # - for each item `i` in list with `gutenberg` do `fileids` (see LOOPS) # - yield length of (see LISTS) with `gutenberg` do `words` using `i` # - Display wordLengths # # Note the only changes are `words` instead of `raw` and `length of` from LISTS instead of TEXT. # This is because while `raw` gives us one big string, `words` gives us a list of words. # However, the logic of the loop is the same (we sometimes call this a traversal, because we are traversing the data to calculate something). # + wordLengths = [(len(gutenberg.words(i))) for i in (gutenberg.fileids())] wordLengths #<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="nb}L;_W{,H)*Jc!qq]@S">wordLengths</variable><variable id="LZ#}.J~9XYczA[nu4?|Q">i</variable><variable id="`$^y`^v4:G8DO1QCBCw8">gutenberg</variable></variables><block type="variables_set" id="SKZpRLP{wcl/g*{^W3WV" x="4" y="319"><field name="VAR" id="nb}L;_W{,H)*Jc!qq]@S">wordLengths</field><value name="VALUE"><block type="lists_create_with" id="8eO0[B%0~mtEmLX:+=dw"><mutation items="1"></mutation><value name="ADD0"><block type="comprehensionForEach" id="qHXxT3|WBM:kMw~muDTN"><field name="VAR" id="LZ#}.J~9XYczA[nu4?|Q">i</field><value name="LIST"><block type="varDoMethod" id="tLR@!_zful,@toy1e3E("><field name="VAR" id="`$^y`^v4:G8DO1QCBCw8">gutenberg</field><field name="MEMBER">fileids</field><data>gutenberg:fileids</data></block></value><value name="YIELD"><block type="lists_length" id="b5(0SiwR87=9]SU8IBvy"><value name="VALUE"><block type="varDoMethod" id="#IalxaHkKH5=q1de@8Ar"><field name="VAR" id="`$^y`^v4:G8DO1QCBCw8">gutenberg</field><field name="MEMBER">words</field><data>gutenberg:words</data><value name="INPUT"><block type="variables_get" id="u[Lug07Y.B({Q]G-6du|"><field name="VAR" id="LZ#}.J~9XYczA[nu4?|Q">i</field></block></value></block></value></block></value></block></value></block></value></block><block type="variables_get" id="aJWMl/VaGmo=-c|`$nxp" x="8" y="436"><field name="VAR" id="nb}L;_W{,H)*Jc!qq]@S">wordLengths</field></block></xml> # - # As expected, the number or words is much shorter than the number of characters. # We will return to this shortly. # ### Text length in sentences # # Let's look at the same text, but this time in sentences: # # - Set sentenceLengths to a list with one element containing # - for each item `i` in list with `gutenberg` do `fileids` (see LOOPS) # - yield length of (see LISTS) with `gutenberg` do `sents` using `i` # - Display sentenceLengths # + sentenceLengths = [(len(gutenberg.sents(i))) for i in (gutenberg.fileids())] sentenceLengths #<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="$OD[:+1843Cn0O3j8JiE">sentenceLengths</variable><variable id="LZ#}.J~9XYczA[nu4?|Q">i</variable><variable id="`$^y`^v4:G8DO1QCBCw8">gutenberg</variable></variables><block type="variables_set" id="SKZpRLP{wcl/g*{^W3WV" x="4" y="319"><field name="VAR" id="$OD[:+1843Cn0O3j8JiE">sentenceLengths</field><value name="VALUE"><block type="lists_create_with" id="8eO0[B%0~mtEmLX:+=dw"><mutation items="1"></mutation><value name="ADD0"><block type="comprehensionForEach" id="qHXxT3|WBM:kMw~muDTN"><field name="VAR" id="LZ#}.J~9XYczA[nu4?|Q">i</field><value name="LIST"><block type="varDoMethod" id="tLR@!_zful,@toy1e3E("><field name="VAR" id="`$^y`^v4:G8DO1QCBCw8">gutenberg</field><field name="MEMBER">fileids</field><data>gutenberg:fileids</data></block></value><value name="YIELD"><block type="lists_length" id="b5(0SiwR87=9]SU8IBvy"><value name="VALUE"><block type="varDoMethod" id="#IalxaHkKH5=q1de@8Ar"><field name="VAR" id="`$^y`^v4:G8DO1QCBCw8">gutenberg</field><field name="MEMBER">sents</field><data>gutenberg:sents</data><value name="INPUT"><block type="variables_get" id="u[Lug07Y.B({Q]G-6du|"><field name="VAR" id="LZ#}.J~9XYczA[nu4?|Q">i</field></block></value></block></value></block></value></block></value></block></value></block><block type="variables_get" id="aJWMl/VaGmo=-c|`$nxp" x="8" y="436"><field name="VAR" id="$OD[:+1843Cn0O3j8JiE">sentenceLengths</field></block></xml> # - # And again, the number of sentences is quite a bit lower than the number of words, as expected. # ### Average word/sentence length # # There are at least two ways we could calculate average word length using what we've covered so far. # We could to a traversal of words and calculate the average word length for each text. # Alternatively, we could use the values we've already compute and divide. # The same applies to sentence length. # # Conceptually what we want to do is perform operations with the first element of `wordLengths`, `sentenceLengths`, and `rawLengths`, the second element of these lists, and so on. # Rather than create our own data structures for facilitating these operations, let's put these lists into a dataframe. # Start by importing `pandas`: # # - import `pandas` as `pd` # + import pandas as pd #<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="_V`RIwppcpbRKT:m6^qH">pd</variable></variables><block type="importAs" id="Gy5)p-`[BHUUWE}k1DeL" x="16" y="10"><field name="libraryName">pandas</field><field name="libraryAlias" id="_V`RIwppcpbRKT:m6^qH">pd</field></block></xml> # - # We're going to create a dataframe with these lists using the `zip` operator in LISTS: # # - Set `dataframe` to with `pd` create `DataFrame` using a list containing # - `zip` a list containing # - with `gutenberg` do `fileids`,`wordLengths`, `sentenceLengths`, `rawLengths` # - freestyle `columns=['corpus','words','sentences','characters']` # - Display `dataframe` # + dataframe = pd.DataFrame(zip(gutenberg.fileids(), wordLengths, sentenceLengths, rawLengths), columns=['corpus','words','sentences','characters']) dataframe #<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="d*P53^Ni!VyA[RubgfYr">dataframe</variable><variable id="_V`RIwppcpbRKT:m6^qH">pd</variable><variable id="`$^y`^v4:G8DO1QCBCw8">gutenberg</variable><variable id="nb}L;_W{,H)*Jc!qq]@S">wordLengths</variable><variable id="$OD[:+1843Cn0O3j8JiE">sentenceLengths</variable><variable id="AO?GdNQ:*92|iDWB.)YR">rawLengths</variable></variables><block type="variables_set" id="Nnj3jwtJa6+~cV1=RDn_" x="-149" y="237"><field name="VAR" id="d*P53^Ni!VyA[RubgfYr">dataframe</field><value name="VALUE"><block type="varCreateObject" id="*C0V~q-fS]kDUMRq`O-N"><field name="VAR" id="_V`RIwppcpbRKT:m6^qH">pd</field><field name="MEMBER">DataFrame</field><data>pd:DataFrame</data><value name="INPUT"><block type="lists_create_with" id="DoM~-@qI)6TgbDc;vBMb"><mutation items="2"></mutation><value name="ADD0"><block type="zipBlock" id="nv7/65]-+;=,M.B)yU%U"><value name="x"><block type="lists_create_with" id="@PN$;KCRy[Jv;QJ+d#($"><mutation items="4"></mutation><value name="ADD0"><block type="varDoMethod" id="FJ#DraHg!(/g)_[-^Dn]"><field name="VAR" id="`$^y`^v4:G8DO1QCBCw8">gutenberg</field><field name="MEMBER">fileids</field><data>gutenberg:fileids</data></block></value><value name="ADD1"><block type="variables_get" id="1UbO_xg6I)qqO#p=]Trh"><field name="VAR" id="nb}L;_W{,H)*Jc!qq]@S">wordLengths</field></block></value><value name="ADD2"><block type="variables_get" id="%~eQRKbkLX{by+raZ$LQ"><field name="VAR" id="$OD[:+1843Cn0O3j8JiE">sentenceLengths</field></block></value><value name="ADD3"><block type="variables_get" id="}U=.2[w^C@TMLrYf)B-o"><field name="VAR" id="AO?GdNQ:*92|iDWB.)YR">rawLengths</field></block></value></block></value></block></value><value name="ADD1"><block type="dummyOutputCodeBlock" id="o35uqta54?^UVk|[,.|("><field name="CODE">columns=['corpus','words','sentences','characters']</field></block></value></block></value></block></value></block><block type="variables_get" id="(gZ^x=Q!@}~:|xjc+ZXy" x="-133" y="397"><field name="VAR" id="d*P53^Ni!VyA[RubgfYr">dataframe</field></block></xml> # - # This nicely brings together and displays everything we've done so far. # # To calculate average word length and sentence length, just add columns: # # - set `dataframe` to with `dataframe` to `assign` using # - freestyle `avg_wl =` `dataframe["characters"]` / `dataframe["words"]` # - set `dataframe` to with `dataframe` to `assign` using # - freestyle `avg_sl =` `dataframe["words"]` / `dataframe["sentences"]` # - Display dataframe # # Note that the standard unit for word length is characters but that the standard unit for sentence length is words. # + dataframe = dataframe.assign(avg_wl= (dataframe['characters'] / dataframe['words'])) dataframe = dataframe.assign(avg_sl= (dataframe['words'] / dataframe['sentences'])) dataframe #<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="d*P53^Ni!VyA[RubgfYr">dataframe</variable></variables><block type="variables_set" id="{UO)w}M?tYx?A02OPAw9" x="-83" y="249"><field name="VAR" id="d*P53^Ni!VyA[RubgfYr">dataframe</field><value name="VALUE"><block type="varDoMethod" id="L%G*;r8*$i5SLn{,Cc$T"><field name="VAR" id="d*P53^Ni!VyA[RubgfYr">dataframe</field><field name="MEMBER">assign</field><data>dataframe:assign</data><value name="INPUT"><block type="valueOutputCodeBlock" id="Ux2OR.~,)cCrIxgQW6VI"><field name="CODE">avg_wl=</field><value name="INPUT"><block type="math_arithmetic" id="^JEFtmhs2.cv#;80c/nT"><field name="OP">DIVIDE</field><value name="A"><shadow type="math_number" id="syWxT)`[^TBT:IsIQGMf"><field name="NUM">1</field></shadow><block type="indexer" id="5zNI-WP@PpW0doRlek8W"><field name="VAR" id="d*P53^Ni!VyA[RubgfYr">dataframe</field><value name="INDEX"><block type="text" id="3@IMZDY0GOmgS:YQgx?C"><field name="TEXT">characters</field></block></value></block></value><value name="B"><shadow type="math_number" id="t1.s:%LN=uK/vv%zl:f:"><field name="NUM">1</field></shadow><block type="indexer" id="QeW+!Bpy|dosxiHiI(Vq"><field name="VAR" id="d*P53^Ni!VyA[RubgfYr">dataframe</field><value name="INDEX"><block type="text" id="sg3+/)wO!$kLNdtDKOJN"><field name="TEXT">words</field></block></value></block></value></block></value></block></value></block></value><next><block type="variables_set" id=";EOx!PoNSJEXe3}Nc1AU"><field name="VAR" id="d*P53^Ni!VyA[RubgfYr">dataframe</field><value name="VALUE"><block type="varDoMethod" id="-{X4*i^Z:O4lkNW=F]rs"><field name="VAR" id="d*P53^Ni!VyA[RubgfYr">dataframe</field><field name="MEMBER">assign</field><data>dataframe:assign</data><value name="INPUT"><block type="valueOutputCodeBlock" id="P+/y;r)NaM2k#OEIf08~"><field name="CODE">avg_sl=</field><value name="INPUT"><block type="math_arithmetic" id="Xl4rhP=?Xbe$O90]f;b1"><field name="OP">DIVIDE</field><value name="A"><shadow type="math_number"><field name="NUM">1</field></shadow><block type="indexer" id="fp`q$osSW7CCT3EC^-jh"><field name="VAR" id="d*P53^Ni!VyA[RubgfYr">dataframe</field><value name="INDEX"><block type="text" id="M/QDQE2#j$}uzL!/L{ow"><field name="TEXT">words</field></block></value></block></value><value name="B"><shadow type="math_number"><field name="NUM">1</field></shadow><block type="indexer" id="u.cc%cZ]S6NC+|6@7MZc"><field name="VAR" id="d*P53^Ni!VyA[RubgfYr">dataframe</field><value name="INDEX"><block type="text" id="FaX@#$GZ_F/S]$=oXR@?"><field name="TEXT">sentences</field></block></value></block></value></block></value></block></value></block></value></block></next></block><block type="variables_get" id="uzxkeHUiih]mUfldHAMe" x="-98" y="405"><field name="VAR" id="d*P53^Ni!VyA[RubgfYr">dataframe</field></block></xml> # - # ### Readability # # What we've calculated so far may seem simplistic and perhaps not that useful. # However, several of these metrics are components of perhaps the most well known readability formula, [Flesch Kincaid Grade Level (FKGL)](https://en.wikipedia.org/wiki/Flesch%E2%80%93Kincaid_readability_tests): # # \begin{equation*} # 0.39 \left( \frac{\mbox{total words}}{\mbox{total sentences}} \right) +11.8 \left( \frac{\mbox{total syllables}}{\mbox{total words}} \right) - 15.59 # \end{equation*} # # FKGL gives us a sense of how difficult text is to read, which could be an important/useful predictor as well as an interesting descriptive statistic. # # We don't have syllable length, however. # Syllable length is a bit of a pain to calculate because English has a deep orthography, so the best way is to use a pronunciation dictionary like [this](https://github.com/steveash/jg2p). # For now, we will just assume that English has 1.5 syllables per word and estimate this component: # # - set `dataframe` to with `dataframe` to `assign` using # - freestyle `fkgl =` 0.39 * `dataframe["words"]` / `dataframe["sentences"]` + 11.8 * 1.5 - 15.59 # - Display dataframe # # *Note 1.5 * words/words = 1.5* # + dataframe = dataframe.assign(fkgl= ((0.39 * (dataframe['words'] / dataframe['sentences']) + 11.8 * 1.5) - 15.59)) dataframe #<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="d*P53^Ni!VyA[RubgfYr">dataframe</variable></variables><block type="variables_set" id="d3ELqhcW@UA^R%PLy3cV" x="-83" y="308"><field name="VAR" id="d*P53^Ni!VyA[RubgfYr">dataframe</field><value name="VALUE"><block type="varDoMethod" id="!?g`gMP):imN8F)T,@V|"><field name="VAR" id="d*P53^Ni!VyA[RubgfYr">dataframe</field><field name="MEMBER">assign</field><data>dataframe:assign</data><value name="INPUT"><block type="valueOutputCodeBlock" id="k^_3ol/yS5*#Dha3rPVq"><field name="CODE">fkgl=</field><value name="INPUT"><block type="math_arithmetic" id="Md5u*{OHNtri0AjDj],6"><field name="OP">MINUS</field><value name="A"><shadow type="math_number" id="oOOHvHpa1xT0^Jls*1X+"><field name="NUM">0.39</field></shadow><block type="math_arithmetic" id="QGJQ@m3mx5!l=,g9-R+d"><field name="OP">ADD</field><value name="A"><shadow type="math_number" id="X]w:+`6NY)?+=r#3v?Aj"><field name="NUM">0.39</field></shadow><block type="math_arithmetic" id=",gsasO{}sRYc]60t@#ud"><field name="OP">MULTIPLY</field><value name="A"><shadow type="math_number" id="^KWa~qEgh3Q5R?==H)+9"><field name="NUM">0.39</field></shadow></value><value name="B"><shadow type="math_number" id="?4DQu[d7x;oy^4iY|Y3("><field name="NUM">1</field></shadow><block type="math_arithmetic" id="b1}M?uM#pV~iqEz`%Dh~"><field name="OP">DIVIDE</field><value name="A"><shadow type="math_number"><field name="NUM">1</field></shadow><block type="indexer" id="/t1A;EYsDfw+BYK-f{P8"><field name="VAR" id="d*P53^Ni!VyA[RubgfYr">dataframe</field><value name="INDEX"><block type="text" id="/`cSF)tsx%3]HefEnKlV"><field name="TEXT">words</field></block></value></block></value><value name="B"><shadow type="math_number"><field name="NUM">1</field></shadow><block type="indexer" id="9_HN7=?;UGbK`ZFpBD)n"><field name="VAR" id="d*P53^Ni!VyA[RubgfYr">dataframe</field><value name="INDEX"><block type="text" id="SDk,ItV,c;N.~+Z%vOH]"><field name="TEXT">sentences</field></block></value></block></value></block></value></block></value><value name="B"><shadow type="math_number" id="osy!WzPZ^Ac.Li?,M]Sq"><field name="NUM">1</field></shadow><block type="math_arithmetic" id="pns2jEFkEFV2potV{wzJ"><field name="OP">MULTIPLY</field><value name="A"><shadow type="math_number" id="~z95el]}Uct`QV~*3@LC"><field name="NUM">11.8</field></shadow></value><value name="B"><shadow type="math_number" id="L,wO%x|x/NAv$%-5KtZc"><field name="NUM">1.5</field></shadow></value></block></value></block></value><value name="B"><shadow type="math_number" id="N6x=21,Sww{nHR%@JF|@"><field name="NUM">15.59</field></shadow></value></block></value></block></value></block></value></block><block type="variables_get" id="F^RD,W:_|bYpb/e8Wq3b" x="-90" y="410"><field name="VAR" id="d*P53^Ni!VyA[RubgfYr">dataframe</field></block></xml> # - # The readability differences generally make sense - `Alice and Wonderland` lower than the `King James Bible` lower than `Paradise Lost`, but we also see Shakespeare is the least difficult. # This is perhaps expected given the formula, since Shakespeare has the lowest `avg_sl`, but seems intuitively incorrect for those who have experienced Shakespeare. # Note however, that FKGL does not take into account the frequency of the words themselves (how rare they are), which is a question of *distribution*. # # This are just a few common length-based text metrics. # There are a potentially infinite number - As FKGL shows, basic length-based text metrics can be combined in arbitrary ways. #
Descriptive-statistics-length.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Bootstrap Sampling # # Below is an array of the possible values you can obtain from a die. Let's consider different methods of sampling from these values. # + import numpy as np np.random.seed(42) die_vals = np.array([1,2,3,4,5,6]) # - # 1. Take a random sample of 20 values from **die_vals** using the code below, then answer the question in the first quiz below. np.random.choice(die_vals, size=20) # `2.` Use the code below to choose another sample from **die_vals**, then answer the question in the first quiz below. np.random.choice(die_vals, replace=False, size=20)
Practical_Statistics/Practical_Statistics/08_Sampling Distributions/Sampling Distribution - 24 - Bootstrapping.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .sh # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Bash # language: bash # name: bash # --- pwd ls cd / pwd ls cd home ls cd jupyter-nurlaura2 ls cd test mkdir test1 touch 11.txt touch 12.mp3 touch 13.jpg touch 14.mp4 touch 15.png touch ab.jpg touch bc.png touch cd.mp3 touch de.jpg touch ef.txt ls pwd ls *.png ls *[a-z].png ls *[13].jpg ls -k ls -al cd .. pwd ls ls -l test cd test1 cd .. pwd cd .. cd test ls -l test pwd ls -l test cd test ls -l 11.txt chmod u-w 11.txt ls -l 11.txt chmod u+w 11.txt chmod g-r 11.txt chmod o-r 11.txt ls -l 11.txt chmod g+w 11.txt chmod o+x 11.txt ls -l 11.txt chmod g+r 11.txt chmod o-x 11.txt chmod o+r 11.txt ls -l 11.txt man chmod
homework/nurlaura23.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #File: crossValidation.ipynb #Purpose: use cross validation to evaluate the model #Author: <NAME>, <NAME> import fasttext import pandas as pd import numpy as np import matplotlib.pyplot as plt import os from sklearn.model_selection import KFold #Method: trainModel #Purpose: train fastText model #Paremater: trainSet -> the trainSet file path # input_word_vector -> the pretrained word vector file path def trainModel(trainSet, input_word_vector): model = fasttext.train_supervised(input = trainSet, dim = 300, lr = 0.5, epoch = 20, loss ='ova', pretrainedVectors = input_word_vector) return model #Method: cross_Validation #Purpose: use cross validation to evaluate the result #Parameter: df -> the input data # NFold -> number of fold def cross_Validation(df,NFold,lr,epoch, filename): kfold = KFold(n_splits=NFold,shuffle=True,random_state=1) n = 1 resultPD = pd.DataFrame(columns=["filename", 'precision', 'recall']) for train, test in kfold.split(df): df.iloc[train].to_csv('train.train', header=False, index=False) df.iloc[test].to_csv('test.valid', header=False, index=False) model = trainModel('train.train','wiki-news-300d-1M.vec') result = model.test('test.valid',k=1) os.remove('test.valid') os.remove('train.train') print(f"{n} number of cross validation") print("Precision:",result[1]) print("Recall:",result[2]) resultPD.loc[n-1] = [filename, result[1], result[2]] n+=1 return resultPD # + p = open('new_title.txt', 'r') line = list(set(p.readlines())) save = pd.DataFrame(columns=["filename", 'precision', 'recall']) result = [] for i in line: df = pd.read_csv(i[:-1], header=None) save = cross_Validation(df, 5, 0.5, 20, i[:-1]) result.append(save) #print(pd.concat(result,axis=1)) # - # get the precision and recall data # put data on the test.csv for i in result: i.to_csv("test.csv", mode='a', index=True)
Attribute_rank/crossValidation.ipynb
# # Serializing # For ahlive to work, data must be serialized into a standard format. # ### serializing classes # ahlive incorporates these classes to achieve this: # # ahlive class | input data type | use cases # --- | --- | --- # `ah.Array` | `collections.Iterable` | for single, unlabeled 1D arrays # `ah.DataFrame` | `pd.DataFrame` | for multiple, labeled 1D arrays # `ah.Array2D` | `np.ndarray` | for single, unlabeled 2D arrays # `ah.Dataset` | `xr.Dataset` | for multiple, labeled 2D arrays # ### ahlive.Array # `ah.Array` is the basic building block for building an animation in ahlive. # # To instantiate, `xs` and `ys` are often specified as positional arguments; they are, respectively, the horizontal and vertical coordinates of a single data point. If `ys` is not specified, `ys` is set to `xs`, and `xs` is set as an incremental `range`. # # `label` is a common keyword argument for instantiation which yields a legend label in the output animation. Note, the legend will not show automatically if there is only a single item so `legend=True` must be specified. import ahlive as ah x = [1, 2, 3] y = [40, 50, 60] ah_arr = ah.Array(x, y, label='Array', legend=True) print(ah_arr) ah_arr.render() # <div class="alert alert-info"> # # The desired type of plot can be specified; e.g. `chart='bar'` # # </div> # ### ahlive.DataFrame # `ah.DataFrame` builds off `ah.Array`, but utilize column names. # # To instantiate, `df` is required on top of `xs` and `ys`. However, unlike `ah.Array`, column names from the `pd.DataFrame` are passed, i.e. not the actual data values-- likewise for `label`. # # An advantage of using `ah.DataFrame` instead of `ah.Array` is automatic labeling from column names. import pandas as pd import ahlive as ah x = [1, 2, 3] y = [40, 50, 60] df = pd.DataFrame({'the_x_name': x, 'y_axis': y}) ah_df = ah.DataFrame(df, 'the_x_name', 'y_axis') print(ah_df) ah_df.render() # Another advantage is if the `label` column contains multiple unique values; the unique values will be grouped and joined separately. import pandas as pd import ahlive as ah x = [1, 2, 3, 4] y = [40, 50, 60, 70] labels = ['a', 'a', 'b', 'b'] df = pd.DataFrame({'the_x_name': x, 'y_axis': y, 'label': labels}) display(df) ah_df = ah.DataFrame(df, 'the_x_name', 'y_axis', label='label') print(ah_df) ah_df.render() # <div class="alert alert-info"> # # `ah.DataFrame` expects a tidy format `df`: # # 1. Each variable forms a column. # 2. Each observation forms a row. # 3. Each type of observational unit forms a table. # # If the input `pd.DataFrame` is not tidy, try using `pd.melt`. An example can be found [here](../tutorials/covid_19_time_series). # # </div> # ### ahlive.Array2D # `ah.Array2D` is much like `ah.Array` but for gridded, 2D arrays. # # In addition to `xs` and `ys`, `ah.Array2D` requires `cs` which is a 2D array used for colormapping. import numpy as np import ahlive as ah x = np.arange(0, 3) y = np.arange(3, 6) c = np.random.random((2, len(y), len(x))) ah_arr2d = ah.Array2D(x, y, c) ah_arr2d.render() # ### ahlive.Dataset # `ah.Dataset` is the `ah.Array2D` version of `ah.DataFrame`. # # `ah.Dataset` requires a total of four positional arguments: `ds`, `xs`, `ys`, `cs`. Similar to `ah.DataFrame`, these arguments should be labels from the `xr.Dataset`, not actual arrays. import numpy as np import xarray as xr import ahlive as ah x = np.arange(0, 3) y = np.arange(3, 6) c = np.random.random((2, len(y), len(x))) ds = xr.Dataset() ds['color_data'] = xr.DataArray( c, coords={'the_x_name': x, 'y_axis': y}, dims=('time_dim', 'y_axis', 'the_x_name')) ah_ds = ah.Dataset(ds, 'the_x_name', 'y_axis', 'color_data') print(ah_ds) ah_ds.render() # <div class="alert alert-warning"> # # If you have an `xr.Dataset` with 1D arrays for data variables, do not use `ah.Dataset`! # # Instead, first convert to a `pd.DataFrame` using `ds.to_dataframe()` then use `ah.DataFrame` to serialize the data. # # </div> # ### ahlive.Data # # The aforementioned classes all inherit from the internal class, `ah.Data`. # # It's essentially a higher level `dict`. The `dict` is accessible through the `data` property. import ahlive as ah ah.Array([0, 1, 2], [3, 4, 5]).data # The keys of the `dict` store the subplots' positions as a (row, col) `tuple`. import ahlive as ah arr_dict = ah.Array([0, 1, 2], [3, 4, 5]).data print(arr_dict.keys()) # The values of the `dict` store the serialized input data as `xr.Dataset`s. import ahlive as ah arr_dict = ah.Array([0, 1, 2], [3, 4, 5]).data print(arr_dict.values()) # To access individual `xr.Dataset`s, simply specify the subplot's position. import ahlive as ah arr_dict = ah.Array([0, 1, 2], [3, 4, 5]).data print(arr_dict[1, 1]) # You can also manually edit the `xr.Dataset` after calling `finalize`. import ahlive as ah arr_dict = ah.Array([0, 1, 2], [3, 4, 5]).finalize().data for rowcol, ds in arr_dict.items(): ds['x'] += 10 print(arr_dict)
docs/source/essentials/serializing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Markov Chain Monte Carlo and Bayesian Inference # #### Problem Description # # My Garmin Vivosmart watch tracks the time I fall asleep and wake up each day using motion sensing and heart rate monitoring. To augment this data, I have estimated likelihoods that I am asleep based on the condition of my bedroom light (on/off) and if my phone is charging (yes/no). My objective is to use this data to create a model that returns the probability I am asleep at a specific time for a given status of my bedroom light and phone. For a specific time, the probability of sleep given information about my bedroom light and phone is expressed as: # # _P(sleepbedroom light, phone charging)_ # In probability theory terms, this is the posterior probability at a specific time I am asleep given the status of my bedroom light and condition of my phone. The time is a continuous variable and the two additional pieces of information are discrete variables each with two states. # ### Approach # # In order to solve this problem, I first need to express the final model in terms of Bayes Rule # The general method is as follows, with additional details provided in the respective sections. # # 1. Format the data (done in separate notebook) and visualize # 2. Choose function to represent probabilty of sleep given the time # 3. Use Markov Chain Monte Carlo and the data to find most likely parameters for the selected posterior distribution # 4. Use the posterior probability as the prior for applying Bayes Rule using additional data about light and phone status # 4. Build a model for Bayesian Inference to find the probabilty of sleep given the time, light condition, and phone charging info # Interpret and visualize model # 5. We can do this separately for both the sleep and waking data, although I will only build the complete model for the sleep data. # # I make extensive use of the PyMC3 library for Markov Chain Monte Carlo and Bayesian Inference methods. # #### Data Exploration # # The wake and sleep data contains more than two months of information. The watch records when I fall asleep and wake up based on motion and heart rate. It is not 100% accurate as it often will think I'm sleeping if I turn off notifications and am quietly reading in bed. Sometimes we have to deal with imperfect data, and, because there are more truthful than false observations, we can expect the correct data to have a larger effect on the model. # # First, we will import the required libraries, and visualize both the sleep data and the waking data. # + # pandas and numpy for data manipulation import pandas as pd import numpy as np # scipy for algorithms import scipy from scipy import stats # pymc3 for Bayesian Inference, pymc built on t import pymc3 as pm import theano.tensor as tt import scipy # matplotlib for plotting import matplotlib.pyplot as plt # %matplotlib inline from IPython.core.pylabtools import figsize import matplotlib import json s = json.load(open('bmh_matplotlibrc.json')) #matplotlib.rcParams.update(s) matplotlib.rcParams['figure.figsize'] = (10, 3) matplotlib.rcParams['font.size'] = 14 # Number of samples for Markov Chain Monte Carlo N_SAMPLES = 1000 # + # Data formatted in different notebook sleep_data = pd.read_csv('sleep_data.csv') wake_data = pd.read_csv('wake_data.csv') # Labels for plotting sleep_labels = ['9:00', '9:30', '10:00', '10:30', '11:00', '11:30', '12:00'] wake_labels = ['5:00', '5:30', '6:00', '6:30', '7:00', '7:30', '8:00'] # - #sleep data # + figsize(9, 3) # Sleep data plt.scatter(sleep_data['time_offset'], sleep_data['indicator'], s= 60, alpha=0.01, facecolor = 'b', edgecolors='b') plt.yticks([0, 1], ['Awake', 'Asleep']); plt.xlabel('PM Time'); plt.title('Falling Asleep Data') plt.xticks([-60, -30, 0, 30, 60, 90, 120], sleep_labels); # - #awake data # Wake data plt.scatter(wake_data['time_offset'], wake_data['indicator'], s= 50, alpha = 0.01, facecolor='r', edgecolors = 'r'); plt.yticks([0, 1], ['Awake', 'Asleep']); plt.xlabel('AM Time'); plt.title('Waking Up Data') plt.xticks([-60, -30, 0, 30, 60, 90, 120], wake_labels); # #### Logistic Function to Represent Transition # We need to decide on a function to represent the transition from being awake to sleeping. There are a number of acceptable models, and here we will assume this transition can be modeled as a logistic function. A logistic function (also called a sigmoid) is a non-linear function bounded between 0 and 1. # + figsize(9,3) # logistic function with beta def logistic(x,beta): return 1./(1.+np.exp(beta*x)) x=np.linspace(-5,5, 1000) for beta in [-5, -1, 0.5, 1, 5]: plt.plot(x, logistic(x, beta), label=r"$\beta$ = %.1f" %beta) plt.legend(); plt.title(r'Logistic Function with Different $\beta$ values'); # - # There is one problem with the basic logistic function as shown above: the transition is centered at 0. However, in my sleeping data, the transition is around 10:00 pm for sleeping and 6:00 am for waking. # We address this by adding an offset, called a bias, to adjust the location of the logistic function. # This introduces another unknown parameter, $\alpha$, which we will also find from Markov Chain Monte Carlo. # # The logistic function with various $\alpha$ and $\beta$ parameters is shown below. # + #logistic function with beta and alpha def logistic(x, beta, alpha=0): return 1.0/(1.0+np.exp(np.dot(beta,x)+alpha)) x = np.linspace(-5, 5, 1000) plt.plot(x, logistic(x, beta=1), label=r"$\beta = 1$", ls="--", lw=2) plt.plot(x, logistic(x, beta=-1), label=r"$\beta = -1$", ls="--", lw=2) plt.plot(x, logistic(x, 1, 1), label=r"$\beta = 1, \alpha = 1$", color="darkblue") plt.plot(x, logistic(x, 1, -1), label=r"$\beta = 1, \alpha = -1$",color="skyblue") plt.plot(x, logistic(x, -1, 5), label=r"$\beta = -1, \alpha = 5$", color="orangered") plt.plot(x, logistic(x, -1, -5), label=r"$\beta = -1, \alpha = -5$", color="darkred") plt.legend(); plt.title(r'Logistic Function with Varying $\beta$ and $\alpha$'); # + # Logistic function with both beta and alpha def logistic(x, beta, alpha=0): return 1.0 / (1.0 + np.exp(np.dot(beta, x) + alpha)) x = np.linspace(-5, 5, 1000) plt.plot(x, logistic(x, beta=1), label=r"$\beta = 1$", ls="--", lw=2) plt.plot(x, logistic(x, beta=-1), label=r"$\beta = -1$", ls="--", lw=2) plt.plot(x, logistic(x, 1, 1), label=r"$\beta = 1, \alpha = 1$", color="darkblue") plt.plot(x, logistic(x, 1, -1), label=r"$\beta = 1, \alpha = -1$",color="skyblue") plt.plot(x, logistic(x, -1, 5), label=r"$\beta = -1, \alpha = 5$", color="orangered") plt.plot(x, logistic(x, -1, -5), label=r"$\beta = -1, \alpha = -5$", color="darkred") plt.legend(); plt.title(r'Logistic Function with Varying $\beta$ and $\alpha$'); # - # # $\beta$ shifts the direction and steepness of the curve, while $\alpha$ changes the location. We will use MCMC to find the most likely value of these parameters under the data. # #### Prior Distribution for $\beta$ and $\alpha$ # We have no evidence to suggest what the prior distributions for the model parameters $\beta$ and $\alpha$ are ahead of time. Therefore, we can model them as if they came from a normal distribution. The normal, or Gaussian, distribution is defined by the mean, $\mu$, and the precision, $\tau$. The precision is the reciprocal of the standard deviation, $\sigma$. The mean defines the location of the distribution and the precision shows the spread. A larger value of $\tau$ indicates the data is less spread out (it is more precise) and hence the variation is smaller. # Probability density functions for three normal distributions are shown below. # + # Set up the plotting normal=stats.norm x=np.linspace(-10,10,1000) mu=(-5,0,5) tau=(0.5,1,2.5) colors=("turquoise", "orchid", "darkred") #plot pdfs params=zip(mu,tau, colors) for param in params: y=normal.pdf(x, loc=param[0], scale=1/param[1]) plt.plot(x, y, label="$\mu = %d,\;\\tau = %.1f$" % (param[0], param[1]), color = param[2]) plt.fill_between(x, y, color = param[2], alpha = 0.3) plt.legend(); plt.xlabel("$x$") plt.ylabel("Probability Density") plt.title("Probability Density Functions for Normal Distributions"); # - # The expected value of a normal distribution is the mean. # Again, we have no assumptions about the value for either $\mu$ or $\tau$ in the prior distributions for $\alpha$ and $\beta$. When we initialize the model, we can use $\mu = 0$ and a relatively large variance such as $\tau = 0.05$. Markov Chain Monte Carlo will samples values of $\mu$ and $\tau$ that try to maximize the likelihood of $\alpha$ and $\beta$ under the data. # ### Markov Chain Monte Carlo # # Markov Chain Monte Carlo will sample both $\beta$ and $\alpha$ from two normal distributions to find the parameters. Each iteration (state), an estimate for both $\beta$ and $\alpha$ are drawn from the prior. If the parameters increase the probabilty of the data, the state is accepted, but if the parameters are not in agreement with the data, the state is rejected. Monte Carlo refers to the sampling part of the algorithm. Markov Chain means that the next state is only dependent on the current state in a first order process (second order depends on the current and 1 previous step, third order on the current and 2 previous steps and so on). MCMC will return every sample of the parameters for the number of specified steps. This is known as the model trace. To find the most likely parameters, we can take the average of the samples in the trace. MCMC does not given an exact answer, but rather tries to find the maximum likelihood states under the data. # When modeling with MCMC up to 50% of the initial steps, referred to as the burn-in part of the trace, are discarded because the algorithm returns more likely parameters as the number of samples increases. The initial samples are less likely than the latter samples on average. There are a number of methods to test for convergence of MCMC, including visually inspecting the trace, and calculating the auto-correlation of the trace (a lower auto-correlation is an indicator of convergence). We will look at the trace in this example, but will not take rigorous steps to address convergence. There are also a number of methods to choose a smart starting value for the Markov Chain such as Maximum A Posterior estimation. Choosing an intelligent initial value can speed up convergence. # #### Posterior Probability of Sleep given Time # # We have all the pieces for the poesterior probabilty and can now put them together. The logistic function describes the transition from awake to asleep, but we do not konw the parameters $\beta$ and $\alpha$. The aim is to find the parameters of the logistic function which maximize the likelihood of the observed data. The parameters are assumed to come from a normal distribution defined by a mean, $\mu$ and a variance, $\tau$. The MCMC algorithm will sample values of $\mu$ and $\tau$ for both $\alpha$ and $\beta$ to try and maximize the parameters of the logistic function given the data. # # The data is connected to the parameters through a Bernoulli Variable. # #### Bernoulli Variable # # A bernoulli variable is a discrete random variable that is either 0 or 1. In our example, we can model asleep or awake as a Bernoulli variable where awake is 0 and asleep is 1. The Bernoulli variable for sleep depends on the time, in a manner defined by the logistic function. # #### PyMC3 Model # We are using a powerful Bayesian Inference library in Python called PyMC3. This library has features for running Markov Chain Monte Carlo and other inference algorithms. # The following code creates the model and performs MCMC, drawing N_SAMPLES number of samples for $\beta$ and $\alpha$. The specific sampling algorithm is Metropolic Hastings. We feed in the data and tell the model it is observations of the Bernoulli variable. The model then tries to maximize the parameters under the data. #sort values by time offset sleep_data.sort_values('time_offset',inplace=True) time=np.array(sleep_data.loc[:,'time_offset']) #Observations are the indicator sleep_obs=np.array(sleep_data.loc[:,'indicator']) with pm.Model() as sleep_model: #alpha and beta parameters alpha=pm.Normal('alpha', mu=0.0, tau=0.05, testval=0.0) beta = pm.Normal('beta', mu=0.0, tau=0.05, testval=0.0) #probability from the logistic function p=pm.Deterministic('p',1./(1.+tt.exp(beta*time+alpha)) ) # Create the bernoulli parameter which uses the observed data observed=pm.Bernoulli('obs', p, observed=sleep_obs) # Using Metropolis Hastings Sampling step = pm.Metropolis() # Sample from the posterior using the sampling method sleep_trace = pm.sample(N_SAMPLES, step=step); # The trace variable contains all of the samples drawn from the posterior for $\beta$ and $\alpha$. We can graph these samples to explore how they change over the course of sampling. The idea of MCMC is that the samples get more likely given the data as the algorithm continues. In other words, the MCMC algorithm converges on the most likely values as the samples increase. We expect the latter values drawn from the posterior to be more accurate than the earlier values. In Markov Chain Monte Carlo, it is common practice to discard a portion of the samples, usually about 50%, which are known as the burn-in samples. For this report I am not discarding any samples, but in a real application, we would run the model for many more steps and discard the initial samples.
notebooks/MCMC-BayesianInference.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # This code will be used to generate the table for PsyToolkit # Author: <NAME> (http://shashikg.github.io/) # - import numpy as np from numpy.random import choice as randC # Just change these according to your experiment. target_name = 'tar' # file name of your target image distractors_name = ['dist_1', 'dist_2'] # distractors can be of multiple types so list of names # + empty_name = 'empty' obj_size = (50, 50) view_area = (250, 250) items = [6, 12, 18, 24] num_per_case = 15 # + idx = [] for i in range(-2, 3, 1): for j in range(-2, 3, 1): idx.append((i*obj_size[0], j*obj_size[1])) idx = np.array(idx) # + table = [] for k in range(len(items)): for n in range(num_per_case): item = items[k] rand_pos = idx[randC(len(idx), item, replace=False)] txt = " " + str(item) + " " + target_name + " " + str(rand_pos[0][0]) + " " + str(rand_pos[0][1]) for i in range(1, item): j = randC(len(distractors_name)) txt += " " + distractors_name[j] + " " + str(rand_pos[i][0]) + " " + str(rand_pos[i][1]) for i in range(item, len(idx)): txt += " " + empty_name + " " + str(250) + " " + str(250) table.append(txt) np.savetxt("table.txt", table, fmt="%s") # -
Psychophysics/Assignment/create_stimuli_table.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python3.7 (fastairoot) # language: python # name: fastairoot # --- # # Covid-19 analysis # > Analyzing coronavirus total cases, deaths and new cases by country. # # - author: <NAME> # - categories: [interactive] # - hide: false # - toc:true # - badges: true # - permalink: /covid-analysis/ # The visualizations presented here are based on data from the Johns Hopkins # University's GitHub [repository](https://github.com/CSSEGISandData/COVID-19). # # The source code of this app is published on GitHub # [here](https://github.com/erwulff/covid-19_data_exploration). # # I have also created a [Dash](https://plotly.com/dash/) app with more interactivity. For instance you're able to choose which countries to include in the plots. The app is hosted on [heroku](https://www.heroku.com) and can be acessed by clicking [here](https://covid19-data-exploration-app.herokuapp.com). #hide import plotly.io as pio pio.renderers #hide pio.renderers.default #hide pio.renderers.default = "colab" # + #hide import pandas as pd import plotly.graph_objects as go import numpy as np from datetime import datetime import plotly.graph_objects as go from ipywidgets import widgets from utils import datetimeify, process_df def get_frame(name): url = ( 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/' f'csse_covid_19_time_series/time_series_covid19_{name}_global.csv') return pd.read_csv(url, index_col='Country/Region') width = 2 # function definitions def total_vs_time(df, descr): date_list = datetimeify(df.index) traces = [] # Add traces, one for each slider step for ii, country in enumerate(df.keys()): if country == 'Sweden': width = 4 else: width = 2 traces.append( dict( line=dict(width=width), mode='lines+markers', name=str(country), x=date_list, y=df[country])) layout = dict( autosize=True, width=900, height=600, ) return traces, layout def new_vs_total(df, descr, window=1): # Create figure traces = [] # Add traces, one for each slider step for ii, country in enumerate(df.keys()): if country == 'Sweden': width = 4 else: width = 2 traces.append( dict( line=dict(width=width), name=str(country), mode='lines+markers', x=df[country].rolling(window=window).mean(), y=df.diff()[country].rolling(window=window).mean())) layout = dict(title='Covid-19 {} rolling mean of {} days'.format(descr, window), autosize=True, width=900, height=600, ) return traces, layout def new_vs_time(df, descr, window=1, countries=['Sweden', 'Norway', 'Denmark', 'Finland']): date_list = datetimeify(df.index) # Create figure traces = [] # Add traces, one for each slider step for ii, country in enumerate(countries): traces.append( dict( line=dict(width=width), mode='lines+markers', name=str(country), x=date_list[39:], y=df.iloc[39:].diff()[country].rolling(window).mean())) layout = dict( title='Covid-19 new {} rolling mean of {} days'.format(descr, window), autosize=True, width=900, height=600, ) return traces, layout # - #hide df_conf = process_df(get_frame('confirmed')) df_deaths = process_df(get_frame('deaths')) # ## Total confirmed cases/deaths # + #hide # Pick countries to plot countries = ['China', 'Sweden', 'Denmark', 'Norway', 'France', 'Spain', 'Germany', 'Switzerland', 'Finland', 'US', #'South Korea', #'Singapore', 'Italy', ] countries.sort() # Filter DataFrames df_conf = df_conf[countries] df_deaths = df_deaths[countries] # + #hide_input traces_conf, layout_conf = total_vs_time(df_conf, descr='confirmed cases') traces_deaths, layout_deaths = total_vs_time(df_deaths, descr='deaths') fig = go.Figure(layout=layout_conf) for trace in traces_deaths: trace.update({'visible': False}) fig.add_traces(traces_conf) fig.add_traces(traces_deaths) fig.update_layout(yaxis_type="log", xaxis_title='Date', yaxis_title='Covid-19 {}'.format('confirmed cases'), # title='Covid-19 {}'.format('confirmed cases'), ) visible_list = [True for ii in range(len(traces_conf))] + [False for ii in range(len(traces_deaths))] fig.update_layout( updatemenus=[ dict( x=0, xanchor='left', y=1.08, active=0, buttons=list([ dict(label="Confirmed cases", method="update", args=[{"visible": visible_list}, {'yaxis.title': 'Covid-19 {}'.format('confirmed cases')}, ]), dict(label="Confirmed deaths", method="update", args=[{"visible": np.invert(visible_list)}, {'yaxis.title': 'Covid-19 {}'.format('confirmed deaths')}, ]), ]), ), dict( x=0.22, xanchor='left', y=1.08, active=1, buttons=list([ dict(label="Linear", method="relayout", args=["yaxis.type", '']), dict(label="Logarithmic", method="relayout", args=["yaxis.type", 'log']), ]), ) ]) fig.layout.template = 'plotly_white+xgridoff' fig.show() # - #hide import plotly plotly.io.templates # ## New cases/deaths per day # + #hide_input window = 7 fig = go.Figure(layout=layout_conf) traces_conf, layout_conf = new_vs_time(df_conf, descr='confirmed cases', window=window, countries=countries) traces_deaths, layout_deaths = new_vs_time(df_deaths, descr='deaths', window=window, countries=countries) for trace in traces_deaths: trace.update({'visible': False}) fig.add_traces(traces_conf) fig.add_traces(traces_deaths) descr = 'confirmed cases' fig.update_layout( yaxis_title='New {} per day'.format(descr), xaxis_title='Total {}'.format(descr), title='Rolling mean of {} days'.format(window), yaxis_type="log", ) fig.update_layout( updatemenus=[ dict( x=0, xanchor='left', y=1.08, active=0, buttons=list([ dict(label="Confirmed cases", method="update", args=[{"visible": [True, True, True, True, False, False, False, False]}, {'yaxis.title': 'New {} per day'.format('confirmed cases')}, ]), dict(label="Confirmed deaths", method="update", args=[{"visible": [False, False , False , False, True, True, True, True]}, {'yaxis.title': 'New {} per day'.format('confirmed deaths')}, ]), ]), ), dict( x=0.22, xanchor='left', y=1.08, active=1, buttons=list([ dict(label="Linear", method="relayout", args=["yaxis.type", '']), dict(label="Logarithmic", method="relayout", args=["yaxis.type", 'log']), ]), ) ]) fig.layout.template = 'plotly_white+xgridoff' fig.show() # -
_notebooks/2020-04-05-covid19-dashboard.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.12 64-bit (''codeforecon'': conda)' # name: python3 # --- # # Geo-Spatial Visualisation # # ## Introduction # # In this chapter, you'll learn the basics of geospatial visualisation using code. If you're new to geospatial analysis, you should look at the introduction page first. # # You should be aware when following this chapter that installing geographic analysis packages isn't always the easiest and things can and do go wrong! (Some geospatial analysis courses recommend running everything in a Docker container.) # # ### Imports and packages # # We'll be using [**geopandas**](https://geopandas.org/index.html), the go-to package for vector spatial analysis in Python. The easiest way to install this package is using `conda install geopandas`; if you want to install it via pip then look at the [install instructions](https://geopandas.org/install.html). # # Let's import some of the packages we'll be using: import matplotlib.pyplot as plt import pandas as pd import os import numpy as np import geopandas as gpd from pathlib import Path # Set max rows displayed for readability pd.set_option("display.max_rows", 6) # Plot settings plt.style.use( "https://github.com/aeturrell/coding-for-economists/raw/main/plot_style.txt" ) # For this page, use data limits and bigger default fig size plt.style.use( { "axes.autolimit_mode": "data", "figure.figsize": (10, 8), "figure.dpi": 125, "patch.linewidth": 0.2, } ) # ## Maps # # If you've looked at the introductory page on geospatial analysis, you'll know it's easy to make basic maps: you just need to load a shapefile and use the `.plot` method. # To run this example, you will need to download the files at this url: # https://github.com/aeturrell/coding-for-economists/tree/main/data/geo/uk_lad # and save them in the path 'data/geo/uk_lad' (path relative to where you're running the code) df = gpd.read_file( Path("data/geo/uk_lad/Local_Authority_Districts__May_2020__UK_BUC.shp") ) df.plot(); # As it goes, this is not very attractive, so let's see some options for customisation that will make it a little better. It's rare that you'll want to include the axes on maps, and these can be turned off by turning everything to do with the axes off. There are two ways to do further manipulations of the figure axis: calling plot returns an axis object or we can create one and then pass `ax=ax_name` to plot as a keyword argument. Colour can be changed using the `color=` keyword. ax = df.plot(color="#2ca25f") ax.axis("off"); # The lines that divide up the different local authority districts are faint. They can be controlled with the `edgecolor` and `linewidth` keywords. We can also change the background using the `fig.patch.set_facecolor` method, and add a scale using an extra package, [**matplotlib-scalebar**](https://github.com/ppinard/matplotlib-scalebar). # + from matplotlib_scalebar.scalebar import ScaleBar fig, ax = plt.subplots() df.plot(color="#2ca25f", edgecolor="k", linewidth=0.2, facecolor="blue", ax=ax) ax.axis("off") fig.patch.set_facecolor("#9ecae1") # Create scale bar scalebar = ScaleBar( 1, box_alpha=0, location="lower right", length_fraction=0.25, font_properties={"size": 12}, ) ax.add_artist(scalebar) plt.show() # - # ## Choropleths # A choropleth map shows different areas in colours according to a statistic that represents an aggregate summary of a geographic characteristic within each area. Population density or per-capita income are good examples of characteristics. The statistic shown might be unique values, equal intervals, quantiles, or the Fisher-Jenks natural breaks. # # First, though, let's create a basic choropleth. pay = pd.read_csv( "https://github.com/aeturrell/coding-for-economists/raw/main/data/geo/ashe_lad_median_pay_2020.csv" ) pay = pay.rename(columns={"lad": "LAD20CD"}) df = df.merge(pay, on=["LAD20CD"], how="inner") # + col = "Log median weekly pay (2020 GBP)" df[col] = np.log(df["Median weekly pay (2020 GBP)"]) fig, ax = plt.subplots() ax.set_title(col, loc="left") df.plot( ax=ax, column=col, legend=True, legend_kwds={"label": "", "shrink": 0.6}, vmin=round(df[col].min()), vmax=round(df[col].max()), ) ax.axis("off") plt.tight_layout() plt.show(); # - # This used **geopandas**. There's a dedicated plotting tool called [**geoplot**](https://residentmario.github.io/geoplot/index.html) as well. # + import geoplot as gplt import geoplot.crs as gcrs gplt.choropleth( df.to_crs("EPSG:4326"), hue=col, projection=gcrs.AlbersEqualArea(), cmap="viridis", legend=True, ); # - # Another way to create choropleths is to split the variable into a distinct number of ranges according to a scheme. In the below, we use `scheme='Quantiles'` with `k=4` to produce a choropleth with four distinct groups. fig, ax = plt.subplots(figsize=(8, 10)) ax.set_title(col, loc="left") ax.axis("off") df.plot(ax=ax, column=col, legend=True, scheme="Quantiles", k=4, legend_kwds={"loc": 2}); # A third kind of choropleth has distinct levels based on pre-existing categories. Our data doesn't have any of those, so let's generate some just to show how it works. df["cat_col"] = df["LAD20CD"].apply(lambda x: x[0]) df.iloc[:5, -3:] fig, ax = plt.subplots() df.plot( column="cat_col", categorical=True, ax=ax, legend=True, legend_kwds={"loc": 1, "frameon": True}, ) ax.set_axis_off() plt.show() # This is useful for, for example, plotting streets of different types. # ## Cartogram # # A cartogram is a thematic map in which the geographic size of regions is altered to be proportional to a variable. The shape of the region is warped or shrunk. They can be especially useful when trying to refectly the fact that regions with the largest area do not always have proportionate variable values. Actually, due to the tendency of the shape of political regions to reflect choices made 100s or 1000s of years previously, and for urban areas to be under separate political arrangements, quite often economic variables are anti-correlated with areas. # # A cartogram of pay in Wales demonstrates this. Some areas with higer median incomes, such as Monmouthshire and Conwy, almost completely fill their potential region areas. But others, including Blaenau Gwent and Powys, are shown much smaller than their actual areas. # # The important part of the plot below is the `gplt.cartogram` but, along with other bits to make the plot look better, we're adding `gplt.polyplot` to show what the true size of each region is when it is not proportional to another variable. # + df = df.to_crs("EPSG:4326") # Get a representative point for each region to annotate df["coords"] = df["geometry"].representative_point().apply(lambda x: x.coords[:][0]) df_wales = df[df["LAD20CD"].str.contains("W")].fillna(0.0) fig, ax = plt.subplots(figsize=(10, 5), dpi=125) gplt.cartogram(df_wales, scale="Median weekly pay (2020 GBP)", ax=ax) gplt.polyplot(df_wales, facecolor="lightgray", edgecolor="white", linewidth=0.5, ax=ax) # Add text annotation to the largest polygons for idx, row in df_wales.iterrows(): if row["geometry"].area > np.quantile(df.area, q=0.7): ax.annotate( text=row["LAD20NM"], xy=row["coords"], horizontalalignment="center", weight="bold", fontsize=8, color="black", ) plt.tight_layout(); # - # ## Quadtree # # A quadtree is a tree data structure that splits a space into increasingly small rectangular fractals. This plot takes a sequence of point or polygonal geometries as input and builds a choropleth out of their centroids. Quadtrees are good at illustrating density, and are more flexible than a conventional choropleth: remember that choropleths can be the result of binning point occurrences into geographical regions or of data that are already aggregated to the region level. Quadtree is not a replacement for the latter, because the data are already aggregated. But, if you have point data, quadtree allows you to aggregate them *not* according to a pre-defined geography. Given pre-defined geographies such as Local Authority Districts may not be useful for the question you're thinking about (or worse could be downright misleading), this is a very helpful property. # # We'll use an example from the **geoplot** documentation to illustrate them. The most basic layer just turns a series of points into a quadtree. We'll use the lats and longs of collisions in New York: collisions = gpd.read_file(gplt.datasets.get_path("nyc_collision_factors")) gplt.quadtree(collisions, nmax=1); # Now let's project this onto a background of NY's boroughs. Because this is computationally expensive, we'll use the `simplify` method to reduce the complexity of the geometries we're using. boroughs = gpd.read_file(gplt.datasets.get_path("nyc_boroughs")) gplt.quadtree( collisions, nmax=1, projection=gcrs.AlbersEqualArea(), clip=boroughs.simplify(0.001), facecolor="lightgray", edgecolor="white", ); # We can enjoy the best of a choropleth's ability to show us magnitudes *alongside* the ability of quadtree to show us geographic density through smaller rectangles: gplt.quadtree( collisions, nmax=1, agg=np.mean, projection=gcrs.AlbersEqualArea(), clip=boroughs, hue="NUMBER OF PEDESTRIANS INJURED", cmap="plasma", edgecolor="k", legend=True, ); # ## KDE plot # # You probably know kernel density estimation from 1D distribution functions, but there's no reason not to have the same fun in 2D. Taking the collisions data again, below is an example for New York. The `thresh=0` keyword argument just tells the KDE estimation to leave no empty whitespace where the estimated values are at their lowest. ax = gplt.polyplot(boroughs, projection=gcrs.AlbersEqualArea(), linewidth=0.5, zorder=1) gplt.kdeplot( collisions, cmap="plasma", shade=True, thresh=0, clip=boroughs, ax=ax, zorder=0 ); # ## Spatio-Temporal Plots # # This is really going to combine two things we already have at our fingertips: space and time. There are various ways we could approach this. The first we'll see is to do a series of *small multiples*, also known as a *facet chart*, and advance time by one unit in each plot. The second is just a heat map in which the two dimensions are space and time. # # The data we'll be using tell a tragic story. They are counts of deaths that occurred within 28 days of (a known case of) coronavirus organised by the death date. Note that there are various issues with these data and they do not tell the whole story of coronavirus by any means. But this is just an illustrative example. # # We'll just bring in data for London. They don't come with their own geometry, so our first job is to merge them onto our existing UK local authority geodataframe, which does have a geometry. Fortunately, both data sets have the 'LAD20CD' and 'LAD20NM' columns, which makes this easier than it might have been. df = df[df["LAD20CD"].str.contains("E09")] cv_df = pd.read_parquet( "https://github.com/aeturrell/coding-for-economists/raw/main/data/geo/cv_ldn_deaths.parquet" ) df = df.merge(cv_df, on=["LAD20CD", "LAD20NM"], how="inner") df.head() # Now we will create the small multiple chart. There's quite a lot going on in the below so let's talk through the moving pieces. There's a perpectually uniform heatmap from the **colorcet** package that ranges between a min of 0 and max of 250 as set by `vmin` and `vmax`. To plot every one of the 12 time periods, `plt.subplots` is called with `nrows=4` and `ncols=3` for 12 axes in total. We `.flatten()` the axes to make it easier to iterate over them. We turn the legend off for all but one axis. With all of this set up, we iterate through the unique values of the date column, which is at monthly frequency, and plot each in turn on a separate axis oject. For each, we also add a title that is the month and year. # # The overall effect is quite powerful: you can really see how deaths peaked around April 2020 and January 2021, having begun to pick up in November, but you can also see the long period of relatively few deaths 28 days after diagnosis during the summer months. # + from datetime import datetime import colorcet as cc col = "newDeaths28DaysByDeathDate" fig, axes = plt.subplots(nrows=4, ncols=3) axes = axes.flatten() legend_choice = [False] * len(axes) legend_choice[2] = True for i, date in enumerate(df["date"].unique()): df_cut = df[df["date"] == date] df_cut.plot( ax=axes[i], column=col, legend_kwds={"label": "", "shrink": 1.5}, vmin=0, vmax=250, legend=legend_choice[i], cmap=cc.cm.CET_L19, ) axes[i].axis("off") axes[i].set_title(pd.to_datetime(str(date)).strftime("%B %Y"), size=10) plt.suptitle("Coronavirus - deaths within 28 days of diagnosis", size=12); # - # Another way to show time and space is using a heatmap. The easiest way to plot a heatmap is to put the data into wide format first. To ensure we have nice labels though, we're going to cast the datetime variable in a month and year in the format 'Jan 20' using `strftime` first. df["Time"] = df.date.apply(lambda x: x.strftime("%b \n %y")) hmap_df = df.pivot("Time", "LAD20NM", "newDeaths28DaysByDeathDate").T # Puts the datetimes in the correct order hmap_df = hmap_df[list(df["Time"].unique())] hmap_df.head() # Now we have the data where we want it, we can call the `ax.imshow` method with a colourmap and set the labels to the index and columns in order to show how, for each London Borough, the number of deaths has changed over time. Note that, unlike the geospatial map, this does not capture the linkages in space very well: but it arguably does a better job of capturing linkages in time. fig, ax = plt.subplots() im = ax.imshow(hmap_df.values, cmap=cc.cm.CET_L19) cbar = ax.figure.colorbar(im, ax=ax, aspect=50) ax.set_xticks(np.arange(len(hmap_df.columns))) ax.set_yticks(np.arange(len(hmap_df.index))) # Labels ax.set_xticklabels(hmap_df.columns, rotation=0, fontsize=8) ax.set_yticklabels(hmap_df.index, fontsize=8) plt.show() # ## Using basemaps # # All of the examples we've seen have just been lines around coloured polygons. This is, fortunately, not how most maps look. Instead, they have lots of detail or other features. Sometimes you want to incorporate other features into your map, or just give some context to where things are beyond the boundaries of your polygons. Enter different basemaps, ie different backgrounds for you to draw the geospatial data on top of. # # There are a few options to do this. Let's begin with **geoplot**, the package from the examples above. It has projection called `WebMercator` that is a 'real' map. It's easiest to illustrate with an example; let's use Wales again. But the whole point is that we can combine this map with other things, so we'll do a few points to show where some places are: LLandudno, Cardiff, Rhyll, Newport, and St Davids. St Davids is the smallest city in Britain, and we'll make the points proportional to population, so you may have to squint to see it! wales_places = { "name": ["Cardiff", "Llandudno", "Rhyl", "Newport", "St Davids"], "lat": [51.481583, 53.3241, 53.3191, 51.5842, 51.8812], "lon": [-3.179090, -3.8276, -3.4916, -2.9977, -5.2660], "pop": [335145, 20701, 25149, 145700, 1600], } wdf = pd.DataFrame(wales_places) gwdf = gpd.GeoDataFrame(wdf, geometry=gpd.points_from_xy(wdf.lon, wdf.lat)) gwdf.head() # Create basemap ax = gplt.webmap(df_wales, projection=gcrs.WebMercator()) # Add points for places gplt.pointplot(gwdf, ax=ax, hue="name", legend=True, sizes=gwdf["pop"] / 300); # ### Contextily # # **geoplot** has some basic functionality for basemaps but the [**contextlily**](https://contextily.readthedocs.io/en/latest/intro_guide.html) package provides a whole lot more flexibility and is solely focused on different 'context tiles' for your map. It's also designed to work with **geopandas** (it's built by the same people) and the syntax is quite similar to what you've seen already. # # Let's see an example of it in action. We'll ask for a bounding box around a place we're interested in, London's Canary Wharf. The option that brings in different basemaps is the `source=` keyword argument. There are a range of sources of basemaps but the real magic is that, given a location, **contextily** *downloads a background map for you*. Pretty cool. There is a full list of providers [here](https://contextily.readthedocs.io/en/latest/providers_deepdive.html). # + import contextily as cx west, south, east, north = (-0.030251, 51.499019, 0.002017, 51.509511) cw_img, cw_ext = cx.bounds2img( west, south, east, north, ll=True, source=cx.providers.Stamen.Toner ) # - # Because the map gets downloaded when you make this request, it takes a long time the first time it's run. But the map is locally cached so that, if you call the same function again, it will be much faster the second time. Let's plot the map out: fig, ax = plt.subplots() ax.axis("off") ax.imshow(cw_img, extent=cw_ext); # You can also get a map through a text search, though be warned you may get another place with the same name in a different region. You can do this type of map-grab using `cx.Place("...", source=...)`. # # Okay, it's great to download a map but what about combining it with some useful info? Well, we can do that too. Let's use **osmnx** to get some data on coffee shops in an area more tightly focused around Canary Wharf. We'll then pop these onto a map of the area. # + import osmnx as ox coffee_shops = ox.geometries_from_place( "Canary Wharf", tags={"amenity": "cafe"}, buffer_dist=300 ) coffee_shops = coffee_shops.to_crs("EPSG:3857") # - fig, ax = plt.subplots(dpi=150) coffee_shops.plot(ax=ax, markersize=80, color="darkviolet", edgecolor="k", marker="X") ax.axis("off") cx.add_basemap( ax, crs=coffee_shops.crs.to_string(), source=cx.providers.OpenStreetMap.Mapnik ); # You can also add boundary and polygon objects to **contextily** basemaps. # ## Interactive maps # # We'll use [**folium**](https://python-visualization.github.io/folium/index.html), a wrapper for the leaflet javascript library, to create interactive maps that we can layer information on top of. The library has a number of built-in tilesets from OpenStreetMap, Mapbox, and Stamen, and supports custom tilesets too. If it has a disadvantage, it's that it doesn't play that nicely with **geopandas**. It isn't easy to create an interactive choropleth using a **geopandas** dataframe, for example. However, choropleths can be added as a layer from a URL that points to a geoJSON file. # # In the simple example below, we'll do two things: create a basemap showing the City of London and add some markers to it that show new information on a mouse hover and when clicked. Let's put markers down for the places to get coffee in the serenity of a City church. # + import folium # create data for markers caff_df = pd.DataFrame( { "name": ["Host Coffee", "Cosy Coffee Corner", "The Wren Coffee"], "loc": [[51.5128, -0.0933], [51.5128, -0.0882], [51.512106, -0.096870]], } ) # create hover over marker msg tooltip = "Click here." # create map m = folium.Map(location=[51.5128, -0.0933], zoom_start=16) # add markers to map for index, row in caff_df.iterrows(): folium.Marker(row["loc"], popup=row["name"], tooltip=tooltip).add_to(m) # show map m # - # Folium can also be combined with a choropleth; see the [documentation](https://python-visualization.github.io/folium/quickstart.html#) for more information. # ## Static maps # # This is an honourable mention to [**py-staticmaps**](https://github.com/flopp/py-staticmaps), which provides the kind of tilesets we've seen already with **contextlily** and **folium** but includes easy methods to add: # # - markers # - image (PNG) markers # - geodesic lines, i.e. the shortest path between two points given the geometry. These are *very* important in general relativity, where they appear as solutions to the geodesic equation ${\displaystyle {d^{2}x^{\mu } \over ds^{2}}+\Gamma ^{\mu }{}_{\alpha \beta }{dx^{\alpha } \over ds}{dx^{\beta } \over ds}=0\ }$ # - geodesic circles # - polygons; and # - GPX Tracks, i.e. the paths traced out when you record the route of a run using GPS. # # # ## Review # # If you know how to : # # - ✅ plot geographic data on a map; # - ✅ plot choropleths of different kinds on maps; # - ✅ create cartographs; # - ✅ create quadtrees and when you might use one over a choropleth; # - ✅ create geographic kernel density estimate plots; # - ✅ use different basemaps in your geospatial visualisations; # - ✅ show time and space dimensions on plots; and # - ✅ produce interactive geospatial maps # # then you are well on your way to becoming a geospatial visualisation pro! #
geo-vis.ipynb
# + # https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html # http://www.pitt.edu/~naraehan/presentation/Movie+Reviews+sentiment+analysis+with+Scikit-Learn.html # https://medium.com/@cristhianboujon/how-to-list-the-most-common-words-from-text-corpus-using-scikit-learn-dad4d0cab41d import numpy as np from numpy.testing import assert_allclose corpus = [ "This is the first example.", "This example is the second example", "Do you want to see more examples, or is three examples enough?", ] from sklearn.feature_extraction.text import CountVectorizer # default tokenizer drops important words, NLTK tokenzier keeps everything, try: from nltk.tokenize import RegexpTokenizer except ModuleNotFoundError: # %pip install -qq nltk from nltk.tokenize import RegexpTokenizer tokenizer = lambda s: RegexpTokenizer(r"\w+").tokenize(s) # alphanumeric strings get tokenized vectorizer = CountVectorizer(tokenizer=tokenizer) B = vectorizer.fit_transform(corpus).todense() # bag of words, (N,T) print(vectorizer.get_feature_names()) [ "do", "enough", "example", "examples", "first", "is", "more", "or", "second", "see", "the", "this", "three", "to", "want", "you", ] print(B) """ [[0 0 1 0 1 1 0 0 0 0 1 1 0 0 0 0] [0 0 2 0 0 1 0 0 1 0 1 1 0 0 0 0] [1 1 0 2 0 1 1 1 0 1 0 0 1 1 1 1]] """ try: from tensorflow import keras except ModuleNotFoundError: # %pip install -qq tensorflow from tensorflow import keras t = keras.preprocessing.text.Tokenizer() t.fit_on_texts(corpus) print(t.document_count) print(t.word_counts) print(t.word_docs) print(t.word_index) """ 3 OrderedDict([('this', 2), ('is', 3), ('the', 2), ('first', 1), ('example', 3), ('second', 1), ('do', 1), ('you', 1), ('want', 1), ('to', 1), ('see', 1), ('more', 1), ('examples', 2), ('or', 1), ('three', 1), ('enough', 1)]) defaultdict(<class 'int'>, {'first': 1, 'the': 2, 'is': 3, 'this': 2, 'example': 2, 'second': 1, 'you': 1, 'see': 1, 'do': 1, 'or': 1, 'examples': 1, 'enough': 1, 'three': 1, 'more': 1, 'want': 1, 'to': 1}) {'is': 1, 'example': 2, 'this': 3, 'the': 4, 'examples': 5, 'first': 6, 'second': 7, 'do': 8, 'you': 9, 'want': 10, 'to': 11, 'see': 12, 'more': 13, 'or': 14, 'three': 15, 'enough': 16} """ encoded_docs = t.texts_to_matrix(corpus, mode="count") print(encoded_docs) """ [[0. 1. 1. 1. 1. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 1. 2. 1. 1. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 2. 0. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1.]] """ reverse_word_index = dict([(value, key) for (key, value) in t.word_index.items()]) """ {1: 'is', 2: 'example', 3: 'this', 4: 'the', 5: 'examples', 6: 'first', 7: 'second', 8: 'do', 9: 'you', 10: 'want', 11: 'to', 12: 'see', 13: 'more', 14: 'or', 15: 'three', 16: 'enough'} """ ## TF transform from sklearn.feature_extraction.text import TfidfTransformer tf_transformer = TfidfTransformer(use_idf=False).fit(B) Btf = tf_transformer.transform(B).todense() # Compute TF matrix "manually" # Btf[i,j] = L2-normalize(tf[i,:])_j from sklearn.preprocessing import normalize assert_allclose(Btf, normalize(B), atol=1e-2) assert_allclose(Btf, B / np.sqrt(np.sum(np.power(B, 2), axis=1)), atol=1e-2) ## TF-IDF transform tfidf_transformer = TfidfTransformer(use_idf=True, smooth_idf=True) Btfidf = tfidf_transformer.fit_transform(B).todense() # Compute idf "manually" Bbin = B > 0 # Bbin[i,j]=1 iff word j occurs at least once in doc i df = np.ravel(np.sum(Bbin, axis=0)) # convert from (1,T) to (T) n = np.shape(B)[0] idf = np.log((1 + n) / (1 + df)) + 1 assert_allclose(idf, tfidf_transformer.idf_, atol=1e-2) # Compute tf-idf "manually" tfidf = normalize(np.multiply(B, idf)) assert_allclose(tfidf, Btfidf, atol=1e-2) # Make a pipeline from sklearn.pipeline import Pipeline pipeline = Pipeline( [("bow", CountVectorizer(tokenizer=tokenizer)), ("tfidf", TfidfTransformer(use_idf=True, smooth_idf=True))] ) Btrain = pipeline.fit_transform(corpus).todense() assert_allclose(Btfidf, Btrain) corpus_test = ["This example is a new document.", "And this is the second test."] Btest = pipeline.transform(corpus_test) print(np.round(Btest.todense(), 3)) """ [[0. 0. 0.62 0. 0. 0.481 0. 0. 0. 0. 0. 0.62 0. 0. 0. 0. ] [0. 0. 0. 0. 0. 0.373 0. 0. 0.632 0. 0.48 0.48 0. 0. 0. 0. ]] """
notebooks/book1/01/tfidf_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Projection, Joining, and Sorting # ## Setup import ibis import os hdfs_port = os.environ.get('IBIS_WEBHDFS_PORT', 50070) hdfs = ibis.hdfs_connect(host='quickstart.cloudera', port=hdfs_port) con = ibis.impala.connect(host='quickstart.cloudera', database='ibis_testing', hdfs_client=hdfs) print('Hello!') # ## Projections: adding/selecting columns # # Projections are the general way for adding new columns to tables, or selecting or removing existing ones. table = con.table('functional_alltypes') table.limit(5) # First, the basics: selecting columns: # + proj = table['bool_col', 'int_col', 'double_col'] proj.limit(5) # - # You can make a list of columns you want, too, and pass that: to_select = ['bool_col', 'int_col'] table[to_select].limit(5) # You can also use the explicit `projection` or `select` functions table.select(['int_col', 'double_col']).limit(5) # We can add new columns by using named column expressions bigger_expr = (table.int_col * 2).name('bigger_ints') proj2 = table['int_col', bigger_expr] proj2.limit(5) # Adding columns is a shortcut for projection. In Ibis, adding columns always produces a new table reference table2 = table.add_column(bigger_expr) table2.limit(5) # In more complicated projections involving joins, we may need to refer to all of the columns in a table at once. This is how `add_column` works. We just pass the whole table in the projection: table.select([table, bigger_expr]).limit(5) # # To use constants in projections, we have to use a special `ibis.literal` function foo_constant = ibis.literal(5).name('foo') table.select([table.bigint_col, foo_constant]).limit(5) # ## Joins # # Ibis attempts to provide good support for all the standard relational joins supported by Impala, Hive, and other relational databases. # # - inner, outer, left, right joins # - semi and anti-joins # # To illustrate the joins we'll use the TPC-H tables for now region = con.table('tpch_region') nation = con.table('tpch_nation') customer = con.table('tpch_customer') lineitem = con.table('tpch_lineitem') # `region` and `nation` are connected by their respective `regionkey` columns join_expr = region.r_regionkey == nation.n_regionkey joined = region.inner_join(nation, join_expr) # If you have multiple join conditions, either compose them yourself (like filters) or pass a list to the join function # # join_exprs = [cond1, cond2, cond3] # joined = table1.inner_join(table2, join_exprs) # Once you've joined tables, you don't necessarily have anything yet. I'll put it in big letters # # ### Joins are declarations of intent # # After calling the join function (which validates the join condition, of course), you may perform any number of other operations: # # - Aggregation # - Projection # - Filtering # # and so forth. Most importantly, depending on your schemas, the joined tables may include overlapping column names that could create a conflict if not addressed directly. Some other systems, like pandas, handle this by applying suffixes to the overlapping column names and computing the fully joined tables immediately. We don't do this. # # So, with the above data, suppose we just want the region name and all the nation table data. We can then make a projection on the joined reference: table_ref = joined[nation, region.r_name.name('region')] table_ref.columns table_ref.limit(5) agged = table_ref.aggregate([table_ref.n_name.count().name('nrows')], by=['region']) agged # Things like `group_by` work with unmaterialized joins, too, as you would hope. joined.group_by(region.r_name).size() # ### Explicit join materialization # # If you're lucky enough to have two table schemas with no overlapping column names (lucky you!), the join can be *materialized* without having to perform some other relational algebra operation: # # joined = a.inner_join(b, join_expr).materialize() # # Note that this is equivalent to doing # # joined = a.join(b, join_expr)[a, b] # # i.e., joining and then selecting all columns from both joined tables. If there is a name overlap, just like with the equivalent projection, there will be an immediate error. # ### Writing down join keys # # In addition to having explicit comparison expressions as join keys, you can also write down column names, or use expressions referencing the joined tables, e.g.: # # joined = a.join(b, [('a_key1', 'b_key2')]) # # joined2 = a.join(b, [(left_expr, right_expr)]) # # joined3 = a.join(b, ['common_key']) # # These will be compared for equality when performing the join; if you want non-equality conditions in the join, you will have to form those yourself. # ### Join referential nuances # # There's nothing to stop you from doing many joins in succession, and, in fact, with complex schemas it will be to your advantage to build the joined table references for your analysis first, then reuse the objects as you go: # # joined_ref = (a.join(b, a.key1 == b.key2) # .join(c, [a.key3 == c.key4, b.key5 == c.key6])) # # Note that, at least right now, you need to provide explicit comparison expressions (or tuples of column references) referencing the joined tables. # ### Aggregating joined table with metrics involving more than one base reference # # Let's consider the case similar to the SQL query # # SELECT a.key, sum(a.foo - b.bar) AS metric # FROM a # JOIN b # ON a.key = b.key # GROUP BY 1 # # I'll use a somewhat contrived example using the data we already have to show you what this looks like. Take the `functional.alltypes` table, and suppose we want to compute the **mean absolute deviation (MAD) from the hourly mean of the double_col**. Silly, I know, but bear with me. # # First, the hourly mean: # + table = con.table('functional_alltypes') hour_dim = table.timestamp_col.hour().name('hour') hourly_mean = (table.group_by(hour_dim) .aggregate([table.double_col.mean().name('avg_double')])) hourly_mean # - # Okay, great, now how about the MAD? The only trick here is that we can form an aggregate metric from the two tables, and we then have to join it later. Ibis **will not** figure out how to join the tables automatically for us. mad = (table.double_col - hourly_mean.avg_double).abs().mean().name('MAD') # This metric is only valid if used in the context of `table` joined with `hourly_mean`, so let's do that. Writing down the join condition is simply a matter of writing: join_expr = hour_dim == hourly_mean.hour # Now let's compute the MAD grouped by `string_col` result = (table.inner_join(hourly_mean, join_expr) .group_by(table.string_col) .aggregate([mad])) result # ## Sorting # # Sorting tables works similarly to the SQL `ORDER BY` clause. We use the `sort_by` function and pass one of the following: # # - Column names # - Column expressions # - One of these, with a False (descending order) or True (ascending order) qualifier # # So, to sort by `total` in ascending order we write: # # table.sort_by('total') # # or by `key` then by `total` in descending order # # table.sort_by(['key', ('total', False)]) # # For descending sort order, there is a convenience function `desc` which can wrap sort keys # # from ibis import desc # table.sort_by(['key', desc(table.total)]) # Here's a concrete example involving filters, custom grouping dimension, and sorting # + table = con.table('functional_alltypes') keys = ['string_col', (table.bigint_col > 40).ifelse('high', 'low').name('bigint_tier')] metrics = [table.double_col.sum().name('total')] agged = (table .filter(table.int_col < 8) .group_by(keys) .aggregate(metrics)) sorted_agged = agged.sort_by(['bigint_tier', ('total', False)]) sorted_agged # - # For sorting in descending order, you can use the special `ibis.desc` function: agged.sort_by(ibis.desc('total'))
docs/source/notebooks/tutorial/3-Projection-Join-Sort.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # TP3 : Clustering et word2vec - <NAME> # ## Imports # + import collections import os import string import sys import pandas as pd from nltk import word_tokenize from nltk.corpus import stopwords from pprint import pprint from sklearn.cluster import KMeans from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.decomposition import PCA import numpy as np import matplotlib.pyplot as plt from scipy.spatial.distance import cosine # + import nltk nltk.download('punkt') # - data_path = "../data/txt/" # ## Effectuer un clustering des bulletins de la décennie 1960 et adaptation du nombre de clusters désirés # Choisir la décennie 1960 et nombre de clusters DECADE = '1960' N_CLUSTERS = 3 # + [markdown] tags=[] # ## Charger tous les fichiers de la décennie et en créer une liste de textes # - files = [f for f in sorted(os.listdir(data_path)) if f"_{DECADE[:-1]}" in f] # Exemple de fichiers files[:10] texts = [open(data_path + f).read() for f in files] # Exemple de textes texts[0][:400] # ## Vectoriser les documents à l'aide de TF-IDF def process_text(text, stem=True): """ Tokenize text and remove punctuation """ text = text.translate(string.punctuation) tokens = word_tokenize(text) return tokens vectorizer = TfidfVectorizer(tokenizer=process_text, stop_words=stopwords.words('french'), max_df=0.5, min_df=0.1, lowercase=True) # %time tfidf_vectors = vectorizer.fit_transform(texts) tfidf_vectors locavectorizer = TfidfVectorizer( tokenizer=preprocessing, stop_words=stopwords.words('french'), max_df=0.5, min_df=0.1, lowercase=True) # ### Imprimer le vecteur tf-IDF du premier document pd.Series( tfidf_vectors[0].toarray()[0], index=vectorizer.get_feature_names_out() ).sort_values(ascending=False) # ## Comprendre les vecteurs et leurs "distances" cosine([1, 2, 3], [1, 2, 3]) cosine([1, 2, 3], [1, 2, 2]) cosine([1, 2, 3], [2, 2, 2]) # ### Tests sur nos documents tfidf_array = tfidf_vectors.toarray() # Vecteur du document 0 tfidf_array[0] # Vecteur du document 1 tfidf_array[1] cosine(tfidf_array[0], tfidf_array[1]) # ## Appliquer un algorithme de clustering sur les vecteurs TF-IDF des documents # Pour en savoir plus sur le KMeans clustering : # - https://medium.com/dataseries/k-means-clustering-explained-visually-in-5-minutes-b900cc69d175 N_CLUSTERS = 3 km_model = KMeans(n_clusters=N_CLUSTERS) # ### Appliquer le clustering à l'aide de la fonction `fit_predict` clusters = km_model.fit_predict(tfidf_vectors) # + clustering = collections.defaultdict(list) for idx, label in enumerate(clusters): clustering[label].append(files[idx]) # - pprint(dict(clustering)) # ## Visualiser les clusters # ### Réduire les vecteurs à 2 dimensions à l'aide de l'algorithme PCA # Cette étape est nécessaire afin de visualiser les documents dans un espace 2D # # https://fr.wikipedia.org/wiki/Analyse_en_composantes_principales pca = PCA(n_components=2) reduced_vectors = pca.fit_transform(tfidf_vectors.toarray()) reduced_vectors[:10] # ### Générer le plot # + x_axis = reduced_vectors[:, 0] y_axis = reduced_vectors[:, 1] plt.figure(figsize=(10,10)) scatter = plt.scatter(x_axis, y_axis, s=100, c=clusters) # Ajouter les centroïdes centroids = pca.transform(km_model.cluster_centers_) plt.scatter(centroids[:, 0], centroids[:, 1], marker = "x", s=100, linewidths = 2, color='black') # Ajouter la légende plt.legend(handles=scatter.legend_elements()[0], labels=set(clusters), title="Clusters") # - # ### Le clustering ci-dessus regroupe les données en clusters les plus homogènes possibles ainsi que la technique k-means utilisé aide à regrouper itérativement les données autour de centres k (x). On peut dire que les résultats obtenus font sens car les trois couleurs sont assez séparés. # ## Entraînement d'un modèle word2vec (word embeddings) sur le fichier sents.txt # ### Imports # + import sys from gensim.models.phrases import Phrases, Phraser from gensim.models import Word2Vec import nltk from nltk.tokenize import wordpunct_tokenize from unidecode import unidecode # - # ### Chargement et traitement des phrases du corpus # ### Création d'un objet qui *streame* les lignes d'un fichier pour économiser de la RAM class MySentences(object): """Tokenize and Lemmatize sentences""" def __init__(self, filename): self.filename = filename def __iter__(self): for line in open(self.filename, encoding='utf-8', errors="backslashreplace"): yield [unidecode(w.lower()) for w in wordpunct_tokenize(line)] # + infile = f"../data/sents.txt" sentences = MySentences(infile) sentences = [sentence for sentence in sentences] # - # ### Détection des bigrams # # Article intéressant sur le sujet : https://towardsdatascience.com/word2vec-for-phrases-learning-embeddings-for-more-than-one-word-727b6cf723cf bigram_phrases = Phrases(sentences) # L'object `phrases` peut être vu comme un large dictionnaire d'expressions multi-mots associées à un score, le *PMI-like scoring*. Ce dictionnaire est construit par un apprentissage sur base d'exemples. # Voir les références ci-dessous : # - https://arxiv.org/abs/1310.4546 # - https://en.wikipedia.org/wiki/Pointwise_mutual_information type(bigram_phrases.vocab) # Il contient de nombreuses clés qui sont autant de termes observés dans le corpus len(bigram_phrases.vocab.keys()) # Prenons une clé au hasard : key_ = list(bigram_phrases.vocab.keys())[144] print(key_) # Le dictionnaire indique le score de cette coocurrence : bigram_phrases.vocab[key_] # Lorsque l'instance de `Phrases` a été entraînée, elle peut concaténer les bigrams dans les phrases lorsque c'est pertinent. # %time bigram_phrases[sentences[78]] # ### Conversion des `Phrases` en objet `Phraser` # # `Phraser` est un alias pour `gensim.models.phrases.FrozenPhrases`, voir ici https://radimrehurek.com/gensim/models/phrases.html. # # Le `Phraser` est une version *light* du `Phrases`, plus optimale pour transformer les phrases en concaténant les bigrams. bigram_phraser = Phraser(phrases_model=bigram_phrases) # Le `Phraser` est un objet qui convertit certains unigrams d'une liste en bigrams lorsqu'ils ont été identifiés comme pertinents. # %time bigram_phraser[sentences[78]] # ### Extraction des trigrams # Nous répétons l'opération en envoyant cette fois la liste de bigrams afin d'extraire les trigrams. trigram_phrases = Phrases(bigram_phraser[sentences]) trigram_phraser = Phraser(phrases_model=trigram_phrases) # ### Création d'un corpus d'unigrams, bigrams, trigrams corpus = list(trigram_phraser[bigram_phraser[sentences]]) print(corpus[:100]) # ## Entrainement d'un modèle Word2Vec sur ce corpus # %%time model = Word2Vec( corpus, # On passe le corpus de ngrams que nous venons de créer vector_size=32, # Le nombre de dimensions dans lesquelles le contexte des mots devra être réduit, aka. vector_size window=3, # La taille du "contexte", ici 5 mots avant et après le mot observé min_count=7, # On ignore les mots qui n'apparaissent pas au moins 5 fois dans le corpus workers=4, # Permet de paralléliser l'entraînement du modèle en 4 threads epochs=5 # Nombre d'itérations du réseau de neurones sur le jeu de données pour ajuster les paramètres avec la descende de gradient, aka. epochs. ) # #### Remarque # # Vous voyez ici que l'entrainement du modèle est parallélisé (sur 4 workers). # # Lors qu'on parallélise l'entrainement du modèle, 4 modèles "séparés" sont entrainés sur environ un quart des phrases. # # Ensuite, les résultats sont agrégés pour ne plus faire qu'un seul modèle. # # On ne peut prédire quel worker aura quelle phrase, car il y a des aléas lors de la parallélisation (p. ex. un worker qui serait plus lent, etc.). # # Du coup, les valeurs peuvent varier légèrement d'un entrainement à l'autre. # # Mais, globalement, les résultats restent cohérents. # ### Sauver le modèle dans un fichier outfile = f"../data/bulletins.model" model.save(outfile) # ## Exploration du modèle en utilisant les deux fonctions `similarity` et `most_similar` # ### Charger le modèle en mémoire model = Word2Vec.load("../data/bulletins.model") # ### Calculer la similarité entre deux termes avec `similarity` model.wv.similarity("honneur", "vous_soumettre") model.wv.similarity("examiner", "terrains") model.wv.similarity("communal", "conseil") # ### Chercher les mots les plus proches d'un terme donné avec `most-similar` model.wv.most_similar("bourgmestre", topn=5) model.wv.most_similar("obligatoire", topn=5) model.wv.most_similar("conclusions", topn=5)
TP3/tp3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # #!/usr/bin/env python # coding: utf-8 # ######################################## # cluster Gaussian surrogate # ######################################## #Author: <NAME> #<EMAIL> #Last update: 2021-Jan-13 # ######################################## # System information # ######################################## #Print the python version and the name/input arguments # #%pylab inline import sys print('Clean everything.') sys.modules[__name__].__dict__.clear() import sys print("Python version: ", sys.version) print("This is the name of the script: ", sys.argv[0]) print(sys.argv) #Print the numpy version and set the random seed import numpy as np print('numpy version: ', np.__version__) RND_SEED=111 np.random.seed(RND_SEED) #Random string #Get a random string stamp for this specific run, used for the filename of image export. import random import string def get_random_string(length): return ''.join(random.choice(string.ascii_lowercase) for i in range(length)) rdstr=get_random_string(8) print('random stamp for this run:',rdstr) #Print the matplotlib version import matplotlib import matplotlib.pyplot as plt print('matplotlib version: ', matplotlib.__version__) #Print the GPy version import GPy print('GPy version: ', GPy.__version__) #Print the GPy version import sklearn print('sklearn version: ', sklearn.__version__) from sklearn.gaussian_process import GaussianProcessRegressor ####################################### # Model specification # ######################################## #How many pilot and sequential samples do we allow to get? #N_PILOT is the pilot samples we start with, usually a small number would do. #N_SEQUENTIAL is the number of sequential (noisy) samples we should draw from the black-box function. N_PILOT = 10 #int(sys.argv[1]) N_SEQUENTIAL = 90#int(sys.argv[2]) #Which method should we use for the Bayesian optimization scheme? #'FREQUENTIST' method means that the (hyper-)parameters are estimated by using some frequestist optimization like lbfgs. #'BAYESIAN' method means that the paramteres are estimated by putting a prior(Gamma)-posterior mechnism, the estimated value would be posterior mean. METHOD = 'FREQUENTIST' #Following 3 parameters are only for HMC Bayesian sampling, you have to choose METHOD = 'BAYESIAN' to use these parameters. N_BURNIN = 500 N_MCMCSAMPLES = 500 N_INFERENCE = 300 #Exploration rate is the probability (between 0 and 1) of following the next step produced by acquisition function. EXPLORATION_RATE = 0.5#1.0 #Do you want a cluster GP? If NO_CLUSTER = True, a simple GP will be used. NO_CLUSTER = False #Do you want to amplify the weight/role of response X when doing clustering? X_AMPLIFY = 1#/4096 #Do you want to subtract an amount from the response X when doing clustering? X_TRANSLATE = [] #Do you want to amplify the weight/role of response Y when doing clustering? Y_AMPLIFY = 1#/1000 #Do you want to subtract an amount from the response Y when doing clustering? Y_TRANSLATE = 0. #What is the maximal number of cluster by your guess? This option will be used only if NO_CLUSTER=False. N_COMPONENTS = 3 #When deciding cluster components, how many neighbors shall we look into and get their votes? This option will be used only if NO_CLUSTER=False. N_NEIGHBORS = 1 #Amount of NUGGET in the GP surrogate that stabilize the GP model, especially in FREQUENTIST approach. #NUGGET = 1e-4(Deprecated since ver 0.7, we can use a white kernel to estimate this) #How many time shall we jitter the diagonal of the covariance matrix when we encounter numerical non-positive definiteness in Gaussian process surrogate fitting. #This is a GPy parameter, default is 5 in GPy. N_JITTER = 5 #Overriding GPy default jitter, dangerous jittering GPy.util.linalg.jitchol.__defaults__ = (N_JITTER,) print(GPy.util.linalg.jitchol.__defaults__) #This is a GPy parameter, whether you want to normalize the response before/after fitting. Don't change unless necessary. GPy_normalizer = True #Whether we should sample repetitive locations in the sequential sampling procedure. #If True, we would keep identical sequential samples no matter what. (Preferred if we believe a lot of noise) #If False, we would re-sample when we run into identical sequential samples. (Default) #In a acquisition maximization step, this is achieved by setting the acquisition function at repetitive samples to -Inf #In a random search step, this is achieved by repeat the random selection until we got a new location. REPEAT_SAMPLE = False #ver 0.7 new, we can use sklearn GP regression implementation. USE_SKLEARN = True ALPHA_SKLEARN = 1e-5 #Value added to the diagonal of the kernel matrix during fitting. SKLEARN_normalizer = True # + ################################################## # Example 4: record dataset mapping, matmul # ################################################## EXAMPLE_NAME='matmul' #This loads the dataset for building a black-box function #The format of the dataset should be a csv file, the first column being the response (Y) #The rest columns of the dataset is the d-dimensional inputs (X) # from numpy import genfromtxt my_data = genfromtxt('Giulia_1000.csv', delimiter=',') print(my_data.shape) my_data = np.delete(my_data, (0), axis=0) Y_obs = my_data[:,2].astype(float).reshape(-1,1) X_obs = my_data[:,1].astype(float).reshape(-1,1) #Dimension of the input domain #d = X_obs.shape[1] print(X_obs.shape) print(Y_obs.shape) ######################################## # Function wrapping # ######################################## #This allows us to wrap a real-world dataset into the format of a black-box function useful #Given a point X, we find the closest point X' in the dataset (by some distance measure, currently L^2). #The black-box function would return the observed response value Y' for X'. #This wrapping would makes the black-box function to be piece-wise constant. # from scipy.spatial.distance import cdist def f_truth(X): to_obs = cdist(X,X_obs, metric='euclidean') closest_obs = np.argmin(to_obs) ret_X = X_obs[closest_obs,:] ret_Y = Y_obs[closest_obs,:] ret_X = int(X) #print(np.argwhere(ret_X==X_obs)) #ret_Y = Y_obs[np.argwhere(ret_X==X_obs)[0,0],:] ret_Y = Y_obs[np.argmin(np.abs(ret_X-X_obs) ),:] print('Closest point in dataset is ',ret_X,' with observed value ',ret_Y[0]) return ret_Y[0].astype(float) point1 = np.ones((1,1))*128.0 print(f_truth(point1)) bounds = np.array([[1,1000]]).astype(float) #print(bounds) # + N_GRID = 1024 x_p = [None]*bounds.shape[0] for i in range(bounds.shape[0]): x_p[i] = np.linspace(start=bounds[i,0], stop=bounds[i,1], num=N_GRID) x0grid_ravel = np.vstack(np.meshgrid( *x_p )).reshape(bounds.shape[0],-1).T x0grid_ravel = np.arange(0,4096+1,8) x0grid_ravel = x0grid_ravel.astype(float).reshape(-1,1) #You must supply a parameter called 'bounds'. inp_dim=bounds.shape[0] #Which kernel you want to use for your model? Such a kernel must be implemented as a GPy/sklearn kernel class. if USE_SKLEARN==True: from sklearn.gaussian_process import * KERNEL_TEMPLATE = sklearn.gaussian_process.kernels.Matern(length_scale=np.ones(inp_dim,), length_scale_bounds=(1e-05, 100000.0), nu=3/2) + sklearn.gaussian_process.kernels.WhiteKernel(noise_level=1.0, noise_level_bounds=(1e-03, 1000.0)) #KERNEL_TEMPLATE = sklearn.gaussian_process.kernels.Matern(length_scale=np.ones(inp_dim,), length_scale_bounds=(1e-05, 100000.0), nu=1/2) else: KERNEL_TEMPLATE = GPy.kern.Matern32(input_dim=inp_dim, variance=1., lengthscale=1.) + GPy.kern.White(input_dim=inp_dim) #KERNEL_TEMPLATE = GPy.kern.Exponential(input_dim=inp_dim, variance=1., lengthscale=1.) #Do you want to penalize boundary sample points? If so, how? def boundary_penalty(X,data_X=None): #return 0 #return np.zeros((X.shape[0],1)) #if you don't want any penalty, use this line as the definition of your penalty #ret = [] #for g in range(X.shape[0]): # g_list = [] # for h in range(bounds.shape[1]): # g_list.append( np.sum( (X[g,:]-bounds[:,h])**2 ) ) # ret.append(min(g_list)) #res = X.astype(int)%8==0 #return res*(100)\ #if X<100: # return -1e5 if X.astype(int)%8==0: return 0 else: return -1e3 return -1e3 def censor_function(Y): #return Y #if you don't want any censor, use this line as the definition of your censor function. ret = Y #ret = Y.*(Y<20000 & Y>100) return ret#-np.minimum(0.1,10/np.asarray(ret)) #ver 0.6 new, #if random_domain returns TRUE, then such a choice by the random step is acceptable. #if random_domain returns FALSE, then such a choice is out of our search input domain, and we would like to re-sample another random location. def random_domain(X,data_X=None): #return True for i in range(data_X.shape[0]): if all(X.astype(int)== data_X[i,:].astype(int)) and ~REPEAT_SAMPLE: return False #This is only for matmul example searching only multiples of 8. return X.astype(int)%8==0 # + from datetime import datetime # datetime object containing current date and time samplestartingtime = datetime.now() ######################################## # Draw pilot samples # ######################################## #This cell only provides a pilot sample. #Prepare pilot samples (X,Y) print('\n>>>>>>>>>>Sampling ',N_PILOT,' pilot samples...<<<<<<<<<<\n') print('Example : ',EXAMPLE_NAME) X_sample = np.zeros((N_PILOT,bounds.shape[0])) Y_sample = np.zeros((N_PILOT,1)) for j in range(bounds.shape[0]): X_sample[:,j] = np.random.uniform(bounds[j,0],bounds[j,1],size=(N_PILOT,1)).ravel() Y_sample = np.zeros((N_PILOT,1)) for k in range(N_PILOT): Y_sample[k,0] = f_truth(X_sample[k,:].reshape(1,-1)) Y_sample[k,0] = censor_function(Y_sample[k,0]) #print('Pilot X',X_sample) #print('Pilot Y',Y_sample) from scipy.stats import norm from scipy.optimize import minimize from sklearn.cluster import KMeans from sklearn.mixture import BayesianGaussianMixture from sklearn.neighbors import KNeighborsClassifier #The cGP procedure consists of following steps #Step 1. For observations, we can do a (unsupervised) (X,Y)-clustering and label them, different components are generated. #Step 2. For predictive locations, we can do a (supervised) k-nearest neighbor classification, and predict at each location based on which component it belongs to. #Step 3. We compute the acquisition function and then proceed to the next sample, after adding the new sample we repeat Step 1 and 2. #Prepare an up-to-date X_TRANSLATE, as the empirical mean of the X_sample if len(X_TRANSLATE)>0: X_TRANSLATE = np.mean(X_sample,axis=0) #Prepare an up-to-date Y_TRANSLATE, as the empirical mean of the Y_sample if Y_TRANSLATE != 0: Y_TRANSLATE = np.mean(Y_sample) #print(Y_sample - Y_TRANSLATE) #Prepare initial clusters, with XY-joint. XY_sample = np.concatenate((X_AMPLIFY*(X_sample-X_TRANSLATE),Y_AMPLIFY*(Y_sample-Y_TRANSLATE).reshape(-1,1)),axis=1) #dgm_XY = BayesianGaussianMixture( # #weight_concentration_prior_type="dirichlet_distribution", # weight_concentration_prior_type="dirichlet_process", # n_components=N_COMPONENTS,#pick a big number, DGM will automatically adjust # ) dgm_XY = KMeans(n_clusters=N_COMPONENTS, random_state=0) XY_label = dgm_XY.fit_predict(XY_sample) print('\n Initial labels for (X,Y)-joint clustering',XY_label) #Make copies of X_sample for X-only fitting and XY-joint fitting. X_sample_XY = np.copy(X_sample) Y_sample_XY = np.copy(Y_sample) #Prepare initial labels clf_XY = KNeighborsClassifier(n_neighbors=N_NEIGHBORS) clf_XY.fit(X_sample_XY, XY_label) #This is an artifact, we need to have at least d samples to fit a d-dimensional GP model (for its mean and variance) for c in np.unique(XY_label): if sum(XY_label==c)<=X_sample_XY.shape[1]: occ = np.bincount(XY_label) XY_label[np.where(XY_label==c)] = np.argmax(occ) print(X_sample,Y_sample) print(XY_sample) # + ######################################## # Draw sequential samples # ######################################## from scipy import stats from matplotlib import cm mycm = cm.Spectral VERBOSE = False GETPLOT = False #Prepare sequential samples (X,Y) print('\n>>>>>>>>>>Sampling ',N_SEQUENTIAL,' sequential samples...<<<<<<<<<<\n') X_sample = X_sample_XY Y_sample = Y_sample_XY cluster_label = XY_label def get_KER(): return KERNEL_TEMPLATE #This recode function will turn the labels into increasing order,e.g. [1, 1, 3, 3, 0] ==> [0, 0, 1, 1, 2]. def recode(label): level = np.unique(np.array(label)) ck = 0 for j in level: label[label==j]=ck ck=ck+1 return label #Main loop that guides us in sampling sequential samples comp_l = np.unique(np.array(cluster_label)) for it in range(N_SEQUENTIAL): print('\n>>>>>>>>>> ***** STEP ',it+1,'/',N_SEQUENTIAL,'***** <<<<<<<<<<') #Step 1. For observations, we can do a (unsupervised) (X,Y)-clustering and label them, different components are generated. #Create the (X,Y) joint sample to conduct (unsupervised clustering) if len(X_TRANSLATE)>0: X_TRANSLATE = np.mean(X_sample,axis=0) if Y_TRANSLATE != 0: Y_TRANSLATE = np.mean(Y_sample) #The cluster must be based on adjusted response value Y. XY_sample = np.concatenate((X_AMPLIFY*(X_sample-X_TRANSLATE),Y_AMPLIFY*(Y_sample-Y_TRANSLATE).reshape(-1,1)),axis=1) if NO_CLUSTER: print('>>NO CLUSTER, a GP surrogate.') cluster_label = np.zeros(XY_sample.shape[0]) else: print('>>CLUSTERED, a cGP surrogate.',len(comp_l),' components in surrogate model.') cluster_label = dgm_XY.fit_predict(XY_sample)#cluster_label if VERBOSE: print('dgm label', cluster_label) #Again, we need to ensure that every cluster has at least d (dimension of covariate) samples. for c in np.unique(cluster_label): if sum(cluster_label==c)<=X_sample.shape[1]: occ = np.bincount(cluster_label) cluster_label[np.where(cluster_label==c)] = np.argmax(occ) if VERBOSE: print('merged label',cluster_label) cluster_label = recode(cluster_label) if VERBOSE: print('All labels are recoded: ',cluster_label) #Create arrays to store the mean&variance at observed locations and predictive locations. n_component=len(np.unique(cluster_label)) mean_fun = np.zeros((len(cluster_label),1)) var_fun = np.copy(mean_fun) #Step 2. For predictive locations, we can do a (supervised) k-nearest neighbor classification, and predict at each location based on which component it belongs to. clf_XY = KNeighborsClassifier(n_neighbors=N_NEIGHBORS) clf_XY.fit(X_sample,cluster_label) #Step 3. We either randomly search one location or compute the acquisition function and then proceed to the next sample, after adding the new sample we repeat Step 1 and 2. coin = np.random.uniform(0,1,1) if coin<EXPLORATION_RATE: print('>>>>Find next sample: acquisition proposal.') comp_l = np.unique(np.array(cluster_label)) for c in comp_l: #Assign the corresponding X_sample and Y_sample values to the cluster coded by c. c_idx = np.where(cluster_label == int(c)) if VERBOSE: print('>>>>Fitting component ',c,'/',len(comp_l)-1,' total components') print(c_idx) Xt = X_sample[c_idx].ravel().reshape(-1,X_sample.shape[1]) Yt = Y_sample[c_idx].ravel().reshape(-1,1) #Fit the model with normalization if USE_SKLEARN==True: mt = GaussianProcessRegressor(kernel=get_KER(), random_state=0, normalize_y=SKLEARN_normalizer,alpha=ALPHA_SKLEARN, optimizer='fmin_l_bfgs_b', n_restarts_optimizer=int(10*bounds.shape[0])) else: mt = GPy.models.GPRegression(Xt, Yt, kernel=get_KER(), normalizer=GPy_normalizer) ### if METHOD == 'FREQUENTIST': ############################## #Frequentist MLE GP surrogate# ############################## print('>>>>>>METHOD: frequentist MLE approach, component '+str(c)+'/'+str(len(comp_l)-1)) print('>>>>>>SAMPLE: component sample size =',len(c_idx[0]) ) if USE_SKLEARN==True: mt.fit(Xt, Yt) #No need to do more for sklearn GP print('>>>>>>MODULE: sklearn is used, l-bfgs optimization.') if VERBOSE: print(mt.kernel_, mt.log_marginal_likelihood(mt.kernel_.theta)) else: print('>>>>>>MODULE: GPy is used, l-bfgs optimization.') mt.optimize(optimizer='bfgs', gtol = 1e-100, messages=VERBOSE, max_iters=int(10000*bounds.shape[0])) mt.optimize_restarts(num_restarts=int(10*bounds.shape[0]),robust=True,verbose=VERBOSE) elif METHOD == 'BAYESIAN': if USE_SKLEARN: sys.exit('FUTURE: Currently we cannot fit with Bayesian method using sklearn, we have GPy only.') ############################## #Fully Bayesian GP surrogate # ############################## #Prior on the "hyper-parameters" for the GP surrogate model. print('>>>>>>METHOD: Fully Bayesian approach, component '+str(c)+'/'+str(len(comp_l)-1)) print('>>>>>>SAMPLE: component sample size =',len(c_idx[0]) ) mt.kern.lengthscale.set_prior(GPy.priors.Gamma.from_EV(1.,10.)) mt.kern.variance.set_prior(GPy.priors.Gamma.from_EV(1.,10.)) #HMC sampling, fully Bayesian approach to estimate the kernel parameters. hmc = GPy.inference.mcmc.HMC(mt,stepsize=0.1) s = hmc.sample(num_samples=N_BURNIN) # Burnin s = hmc.sample(num_samples=N_MCMCSAMPLES) MCMC_samples = s[N_INFERENCE:] # cut out the burn-in period # Set the model parameters as the posterior mean mt.kern.variance[:] = MCMC_samples[:,0].mean() mt.kern.lengthscale[:] = MCMC_samples[:,1].mean() ####################################### # Optimization module(each component) # ####################################### #mt2 predicts on observed locations. #No matter GRID_SEARCH true or not, we still need to predict on observed locations if USE_SKLEARN: mt2 = mt.predict(Xt,return_std=True, return_cov=False) mean_fun[c_idx,0] = mean_fun[c_idx,0] + mt2[0].reshape(1,-1) var_fun[c_idx,0] = var_fun[c_idx,0] + mt2[1].reshape(1,-1) else: mt2 = mt.predict(Xt) mean_fun[c_idx,0] = mean_fun[c_idx,0] + mt2[0].reshape(1,-1)#*np.std(Yt) + np.mean(Yt) var_fun[c_idx,0] = var_fun[c_idx,0] + mt2[1].reshape(1,-1)#*np.std(Yt)*np.std(Yt) #Define the expected improvement as objective function to optimize over. def my_obj(X): my_X = X.reshape(1, -1) my_X_label = clf_XY.predict(my_X) #If not in this component, set it to zero immediately. if my_X_label != int(c): return -0 my_xi = 0.0 #tuning parameter, set it to zero for now. if USE_SKLEARN: my_gp = mt.predict(my_X, return_std=True, return_cov=False) my_mu = my_gp[0] my_sigma = my_gp[1] else: my_gp = mt.predict(my_X) my_mu = my_gp[0] my_sigma = my_gp[1] my_sigma = np.sqrt(np.absolute(my_sigma)).reshape(-1, 1) my_mu = np.asarray(my_mu) my_sigma = np.asarray(my_sigma) with np.errstate(divide='warn'): my_imp = my_mu - np.max(mt2[0].reshape(1,-1)) - my_xi my_Z = np.divide(my_imp,my_sigma) #norm = mvn(mean=np.zeros(X_sample[0,:].shape), cov=np.eye(X_sample.shape[1])) my_ei = my_imp * norm.cdf(my_Z) + my_sigma * norm.pdf(my_Z) my_ei[np.where(my_sigma <= 0.0)] = 0.0 #Here we penalize the acquisition function value according to boundary_penalty function, by default this would be disabled. See document for details. my_ei = my_ei + boundary_penalty(my_X,X_sample) my_ei = float(my_ei.ravel()) if VERBOSE: print('EI=',my_ei,'\n') return - my_ei/Xt.shape[0] #Optimize this my_obj using some optimization method. from scipy.optimize import minimize #from scipy.optimize import dual_annealing func = my_obj#lambda x:my_obj(x,mt,clf_XY) #Since the anneal finds minimum lw = bounds[:,0].tolist() up = bounds[:,1].tolist() #ret = dual_annealing(func, bounds=list(zip(lw, up)), seed=123) #dual annealing works for dim=1 ret = minimize(fun=func, x0=np.random.uniform(bounds[:,0].T,bounds[:,1].T), bounds=list(zip(lw, up)), method='L-BFGS-B') print('>>>>Maximal acquisition function = ',-ret.fun,' attained at ',ret.x,' for component ',c) X_next = ret.x else: print('>>>>Find next sample: random search.') randomize_counter = 0 X_rand = np.zeros((1,bounds.shape[0])) for j in range(bounds.shape[0]): X_rand[0,j] = np.random.uniform(bounds[j,0],bounds[j,1],1) X_next = X_rand #If we do not want repetitive samples, we sample until there are no points nearby. while ~random_domain(X_next,X_sample): if VERBOSE: print('Random search: ',X_next,'hits a repetitive sample OR does not hit the random_domain constraint, resampling...') X_rand = np.zeros((1,bounds.shape[0])) for j in range(bounds.shape[0]): X_rand[0,j] = np.random.uniform(bounds[j,0],bounds[j,1],1) X_next = X_rand randomize_counter = randomize_counter + 1 print('>>>>Random search stops after ',randomize_counter,' steps.') #Optional: Following are plotting features that tracks the optimization procedure X_next = X_next.reshape(1,-1) Y_next = f_truth(X_next) print('----------') print('>>Next sample input is chosen to be: ',X_next) print('>>Next sample response is chosen to be: ',Y_next.ravel()) if GETPLOT: X_new = x0grid_ravel if bounds.shape[0]==1: fig, axs = plt.subplots(2,figsize=(6,6)) fig.suptitle('Fitted surrogate model, sample size = '+str(X_sample.shape[0])) axs[0].plot(X_new,mean_new,color='b') axs[0].scatter(X_sample,Y_sample,color='b') axs[0].set_title('observed samples and mean') ci = np.sqrt(var_new)#/mean_new axs[0].fill_between(X_new.ravel(), (mean_new-ci).ravel(), (mean_new+ci).ravel(), color='b', alpha=.1) axs[1].plot(fine_grid,ei_grid,color='k') axs[1].scatter(X_next,ei_next,marker='v',color='r',s=100) axs[1].text(s='x='+str(X_next),x=X_next,y=np.max(ei_grid),color='r',fontsize=12) axs[1].set_title('acquisition/expected improvement function') plt.show() if bounds.shape[0]==2: fig, axs = plt.subplots(2,figsize=(6,12)) fig.suptitle('Fitted surrogate model, sample size = '+str(X_sample.shape[0])) axs[0].scatter(X_new[:,0],X_new[:,1],c=mean_new.ravel(),cmap=mycm) axs[0].scatter(X_sample[:,0],X_sample[:,1],c=Y_sample.ravel(),cmap=mycm,marker='v',s=200,edgecolors='k') axs[0].set_title('observed samples and mean') ci = np.sqrt(var_new)#/mean_new axs[1].scatter(fine_grid[:,0],fine_grid[:,1],c=ei_grid.ravel(),cmap=mycm) axs[1].scatter(X_next[0,0],X_next[0,1],marker='v',color=None,s=200,edgecolors='k') axs[1].text(s='x='+str(X_next),x=X_next[0,0],y=X_next[0,1],color='k',fontsize=12) axs[1].set_title('acquisition/expected improvement function') plt.show() #plt.savefig('cGP'+rdstr+'_step'+str(it)+'_'+str(n)+'_'+str(m)+'_'+str(l)+'.png') #Update X and Y from this step. X_sample = np.vstack((X_sample,X_next)) Y_sample = np.vstack((Y_sample,censor_function(Y_next) )) # + sampleendingtime = datetime.now() # dd/mm/YY H:M:S samplestartingtime = samplestartingtime.strftime("%Y/%m/%d %H:%M:%S") sampleendingtime = sampleendingtime.strftime("%Y/%m/%d %H:%M:%S") print("Sample start date and time =", samplestartingtime) print("Sample end date and time =", sampleendingtime) #print(X_sample) #print(Y_sample) #print(np.hstack((Y_sample,X_sample)).shape) if NO_CLUSTER==True: FILE_NAME = EXAMPLE_NAME+'_local_GP('+rdstr+')' else: FILE_NAME = EXAMPLE_NAME+'_local_cGP_k='+str(N_COMPONENTS)+'('+rdstr+')' np.savetxt(FILE_NAME+'.txt', np.hstack((Y_sample,X_sample)), delimiter =', ') sample_max_x = X_sample[np.argmax(Y_sample),:] sample_max_f = np.round( Y_sample[np.argmax(Y_sample),:],3) sample_min_x = X_sample[np.argmin(Y_sample),:] sample_min_f = np.round( Y_sample[np.argmin(Y_sample),:],3) # + if True: original_stdout = sys.stdout # Save a reference to the original standard output with open(FILE_NAME+'.log', 'w') as f: sys.stdout = f # Change the standard output to the file we created. #print('This message will be written to a file.') print("Example: ",EXAMPLE_NAME,file=f) print("Sample start date and time = ", samplestartingtime) print("Sample end date and time = ", sampleendingtime) print("Python version: ", sys.version) #print("Filename of the script: ", sys.argv[0]) print("Commandline arguments: ",sys.argv) print("Random seed: ",RND_SEED) print('Random stamp: ',rdstr) print('GPy version: ', GPy.__version__) print('sklearn version: ', sklearn.__version__) print('Number of pilot samples: ',N_PILOT) print('Number of sequential samples: ',N_SEQUENTIAL) print('Surrogate fitting method: ',METHOD) if METHOD=="BAYESIAN": print('MCMC>Burn-in steps: ',N_BURNIN) print('MCMC>Sampling steps: ',N_MCMCSAMPLES) print('MCMC>Inference sample length: ',N_INFERENCE) print('Surrogate> Are we using sklearn for GPR?: ',USE_SKLEARN) print('Surrogate> kernel type: ',get_KER()) if USE_SKLEARN: print('Surrogate>sklearn>jittering: ',ALPHA_SKLEARN) print('Surrogate>sklearn>normalizer; ',SKLEARN_normalizer) else: #print('Surrogate>GPy>Nugget noise variance',NUGGET) print('Surrogate>GPy>jittering: ',N_JITTER) print('Surrogate>GPy>normalizer; ',GPy_normalizer) print('Surrogate> Fit a simple GP?(no cluster): ',NO_CLUSTER) print('Cluster> Response amplifier when clustering: ',Y_AMPLIFY) print('Cluster> Maximal number of components/clusters: ',N_COMPONENTS) print('Classify> k in k-nearest neighbor classifier',N_NEIGHBORS) print('Exploration rate: ',EXPLORATION_RATE) #print('Exploration> Do we perform grid-search in acquisition maximization?',GRID_SEARCH) print('Exploration> Do we allow repeat samples in random searching?',REPEAT_SAMPLE) print('domain bounds: ',bounds) #print('blur amount: ',blur_amount) print('sample minimum, f_min=',sample_min_f,' at ',sample_min_x) print('sample maximum, f_max=',sample_max_f,' at ',sample_max_x) print('>>Cluster X_AMPLIFY=',X_AMPLIFY) print('>>Cluster X_TRANSLATE=',X_TRANSLATE) print('>>Cluster Y_AMPLIFY=',Y_AMPLIFY) print('>>Cluster Y_TRANSLATE=',Y_TRANSLATE) sys.stdout = original_stdout # Reset the standard output to its original value # #%debug import os print('Logs of run with stamp: ',rdstr,', is saved at',os.getcwd()) # - cluster_label = dgm_XY.fit_predict(XY_sample)#cluster_label prediction_label = clf_XY.predict(x0grid_ravel)#XY_predlabel print('dgm label', cluster_label) #Again, we need to ensure that every cluster has at least d (dimension of covariate) samples. for c in np.unique(cluster_label): if sum(cluster_label==c)<=X_sample.shape[1]: occ = np.bincount(cluster_label) cluster_label[np.where(cluster_label==c)] = np.argmax(occ) print('merged label',cluster_label) # + ######################################## # Plot the final model(1/2D) # ######################################## mycm = cm.coolwarm X_new = x0grid_ravel fine_grid = x0grid_ravel prediction_label = clf_XY.predict(x0grid_ravel) new_label = clf_XY.predict(X_new) col=['r','k','y','b','g'] #Generate a color scale, here usually there would not be more than 5 components. mean_new = np.zeros((len(prediction_label),1)) var_new = np.copy(mean_new) fig = plt.figure(figsize=(12,12)) #from IPython.display import display if len(X_TRANSLATE)>0: X_TRANSLATE = np.mean(X_sample,axis=0) if Y_TRANSLATE != 0: Y_TRANSLATE = np.mean(Y_sample) XY_sample = np.concatenate((X_AMPLIFY*(X_sample-X_TRANSLATE),Y_AMPLIFY*(Y_sample-Y_TRANSLATE).reshape(-1,1)),axis=1) #XY_sample = np.concatenate((X_sample,Y_AMPLIFY*Y_sample.reshape(-1,1)),axis=1) if NO_CLUSTER: cluster_label = np.zeros(XY_sample.shape[0]) prediction_label = x0grid_ravel*0. else: cluster_label = dgm_XY.fit_predict(XY_sample)#cluster_label prediction_label = clf_XY.predict(x0grid_ravel)#XY_predlabel if VERBOSE: print('dgm label', cluster_label) #Again, we need to ensure that every cluster has at least d (dimension of covariate) samples. for c in np.unique(cluster_label): if sum(cluster_label==c)<=X_sample.shape[1]: occ = np.bincount(cluster_label) cluster_label[np.where(cluster_label==c)] = np.argmax(occ) if VERBOSE: print('merged label',cluster_label) cluster_label = recode(cluster_label) clf_XY = KNeighborsClassifier(n_neighbors=N_NEIGHBORS) clf_XY.fit(X_sample,cluster_label) #if GRID_SEARCH==True: new_label = clf_XY.predict(X_new) for c in np.unique(cluster_label): if sum(cluster_label==c)<=X_sample.shape[1]: occ = np.bincount(cluster_label) cluster_label[np.where(cluster_label==c)] = np.argmax(occ) cluster_label = recode(cluster_label) print(cluster_label) new_label = recode(new_label) print(new_label) for c in np.unique(np.array(cluster_label)): print('Fitting component ',c) c = int(c) #Assign the corresponding X_sample and Y_sample values to the cluster coded by c. c_idx = np.where(cluster_label == int(c)) if len(c_idx) <1: continue print(c_idx) Xt = X_sample[c_idx].ravel().reshape(-1,X_sample.shape[1]) Yt = Y_sample[c_idx].ravel().reshape(-1,1) #print(Xt.shape,Yt.shape) #print(Xt,Yt) #Normalization #Fit the model if 'mt' in locals(): del(mt) # mt exists. if USE_SKLEARN: mt = GaussianProcessRegressor(kernel=get_KER(), random_state=0, normalize_y=SKLEARN_normalizer, alpha=ALPHA_SKLEARN, optimizer='fmin_l_bfgs_b', n_restarts_optimizer=int(10*bounds.shape[0])) mt.fit(Xt, Yt) print('Summary of component '+str(c)+' GP surrogate model.') print(mt.kernel_, mt.log_marginal_likelihood(mt.kernel_.theta)) else: mt = GPy.models.GPRegression(Xt, Yt, kernel=get_KER(), normalizer=GPy_normalizer) mt.optimize(optimizer='bfgs', gtol = 10e-32, messages=False, max_iters=int(10000*bounds.shape[0])) mt.optimize_restarts(num_restarts=int(100*bounds.shape[0]),robust=True,verbose=False) #mt.plot() #plt.show() print('Summary of component '+str(c)+' GP surrogate model.') display(mt) c_idx_new = np.where(new_label == int(c)) c_idx_new = c_idx_new[0] if len(c_idx_new) <1: continue print(c_idx_new) #print(mean_new.shape) if USE_SKLEARN: mt1 = mt.predict(X_new[c_idx_new],return_std=True, return_cov=False) mt2 = mt.predict(fine_grid,return_std=True, return_cov=False) mu_new = mt1[0] sigma2_new = np.power(mt1[1],2) else: mt1 = mt.predict(X_new[c_idx_new]) mt2 = mt.predict(fine_grid) mu_new = mt1[0] sigma2_new = mt1[1] mean_new[c_idx_new,0] = mean_new[c_idx_new,0] + mu_new.reshape(1,-1) var_new[c_idx_new,0] = var_new[c_idx_new,0] + sigma2_new.reshape(1,-1) if bounds.shape[0] == 1: plt.scatter(X_new[c_idx_new],np.ones(X_new[c_idx_new].shape)*0+50,c=col[c],alpha=1,marker='s',s=100) #plt.plot(fine_grid, mt2[0],color=col[c],linestyle='--',label='component '+str(c)+' mean') plt.scatter(X_sample[c_idx], Y_sample[c_idx],label='sequential samples',c=col[c],alpha=0.5) plt.plot(X_obs,Y_obs,c='m') plt.vlines(x=112, ymin=0, ymax=35000,color='g',linewidth=10,alpha=0.5) if bounds.shape[0] == 1: print('1d plot') plt.plot(X_new,mean_new,color='b',linewidth=4,alpha=0.5,label='overall mean') plt.fill_between(X_new.ravel(), (mean_new-np.sqrt(var_new)).ravel(), (mean_new+np.sqrt(var_new)).ravel(), color='b', alpha=.1, label='overall std. deviation') #plt.vlines(x=sample_max_x, ymin=0, ymax=sample_max_f,color='b',linestyle='-.') #plt.text(s='sample max:'+str(sample_max_f[0])+'\n @'+str(sample_max_x),x=sample_max_x,y=100,c='k',fontsize=12,rotation=45) #plt.text(s=str(sample_max_x[0]),x=sample_max_x,y=20,c='b',fontsize=12) ##plt.vlines(x=sample_min_x, ymin=0, ymax=sample_min_f,color='b',linestyle='-.') #plt.text(s='sample min:'+str(sample_min_f[0])+'\n @'+str(sample_min_x),x=sample_min_x,y=100,c='k',fontsize=12,rotation=45) #plt.text(s=str(sample_min_x[0]),x=sample_min_x,y=10,c='b',fontsize=12) plt.title('Sample size ='+str(N_PILOT)+'+'+str(N_SEQUENTIAL)+'='+str(X_sample.shape[0])+', '+str(len(np.unique(np.array(cluster_label))))+' components.'+\ '\n f_max='+str(sample_max_f[0])+', x_max='+str(np.round(sample_max_x[0])),fontsize=32) plt.ylabel('Y', fontsize=24) plt.xlabel('X', fontsize=24) plt.xlim((0,1001)) plt.ylim((0,2000)) plt.xticks(np.linspace(0, 1000, 9), fontsize=24) plt.yticks(np.linspace(0, 2000, 6), fontsize=24) #plt.legend(fontsize=18,loc='lower center') if bounds.shape[0] == 2: print('2d plot') plt.scatter(X_new[:,0], X_new[:,1], c=mean_new.ravel(),cmap=mycm,alpha=1.0,label='overall mean',marker='s',s=200) plt.scatter(X_sample[:,0], X_sample[:,1], c=Y_sample.ravel(),cmap=mycm,alpha=1.0,label='sequential samples',edgecolors='k') plt.scatter(X_sample_XY[:,0],X_sample_XY[:,1],c=Y_sample_XY.ravel(),cmap=mycm,alpha=1.0,label='pilot samples',marker='v',s=150,edgecolors='k') #plt.scatter(x=x_min[0], y=x_min[1], color='k') #plt.text(s='model min:'+str(f_min[0])+'\n @'+str(x_min),x=x_min[0],y=x_min[1],c='k',fontsize=12,rotation=45) #plt.scatter(x=x_max[0], y=x_max[1], color='k') #plt.text(s='model max:'+str(f_max[0])+'\n @'+str(x_max),x=x_max[0],y=x_max[1],c='k',fontsize=12,rotation=45) #plt.scatter(x=sample_max_x[0], y=sample_max_x[1], color='k') #plt.text(s='sample max:'+str(sample_max_f[0])+'\n @'+str(sample_max_x),x=sample_max_x[0],y=sample_max_x[1],c='k',fontsize=12,rotation=45) #plt.text(s=str(sample_max_x[0]),x=sample_max_x,y=20,c='b',fontsize=12) #plt.scatter(x=sample_min_x[0], y=sample_min_x[1], color='k') #plt.text(s='sample min:'+str(sample_min_f[0])+'\n @'+str(sample_min_x),x=sample_min_x[0],y=sample_min_x[1],c='k',fontsize=12,rotation=45) #plt.text(s=str(sample_min_x[0]),x=sample_min_x,y=10,c='b',fontsize=12) #plt.title('Sample size ='+str(X_sample.shape[0]),fontsize=24) plt.xlabel('X1', fontsize=24) plt.ylabel('X2', fontsize=24) plt.xlim((-1,1)) plt.ylim((-1,1)) plt.xticks(np.linspace(-1, 1, 6), fontsize=24) plt.yticks(np.linspace(-1, 1, 6), fontsize=24) #plt.legend() plt.ylim((0,2100)) #plt.yticks(ticks=[21000,23000,25000,27000,29000,31000]) plt.show() fig.savefig(FILE_NAME+'.png', dpi=fig.dpi) print('sample minimum, f_min=',sample_min_f,' at ',sample_min_x) print('sample maximum, f_max=',sample_max_f,' at ',sample_max_x) print('>>Cluster X_AMPLIFY=',X_AMPLIFY) print('>>Cluster X_TRANSLATE=',X_TRANSLATE) print('>>Cluster Y_AMPLIFY=',Y_AMPLIFY) print('>>Cluster Y_TRANSLATE=',Y_TRANSLATE) # - print(np.concatenate((X_AMPLIFY*(X_sample-X_TRANSLATE),Y_AMPLIFY*(Y_sample-Y_TRANSLATE).reshape(-1,1)),axis=1)) print(np.concatenate((X_sample,Y_AMPLIFY*(Y_sample-0.).reshape(-1,1)),axis=1))
archive/Sec3_1/.ipynb_checkpoints/cGP_for_matmul_illustrative-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Fast Sign Adversary Generation Example # # This notebook demos find adversary example by using symbolic API and integration with Numpy # Reference: # # [1] Goodfellow, <NAME>., <NAME>, and <NAME>. "Explaining and harnessing adversarial examples." arXiv preprint arXiv:1412.6572 (2014). # https://arxiv.org/abs/1412.6572 # + # %matplotlib inline import mxnet as mx import numpy as np import matplotlib.pyplot as plt import matplotlib.cm as cm from data import mnist_iterator # - # Build Network # # note: in this network, we will calculate softmax, gradient in numpy dev = mx.gpu() batch_size = 100 train_iter, val_iter = mnist_iterator(batch_size=batch_size, input_shape = (1,28,28)) # input data = mx.symbol.Variable('data') # first conv conv1 = mx.symbol.Convolution(data=data, kernel=(5,5), num_filter=20) tanh1 = mx.symbol.Activation(data=conv1, act_type="tanh") pool1 = mx.symbol.Pooling(data=tanh1, pool_type="max", kernel=(2,2), stride=(2,2)) # second conv conv2 = mx.symbol.Convolution(data=pool1, kernel=(5,5), num_filter=50) tanh2 = mx.symbol.Activation(data=conv2, act_type="tanh") pool2 = mx.symbol.Pooling(data=tanh2, pool_type="max", kernel=(2,2), stride=(2,2)) # first fullc flatten = mx.symbol.Flatten(data=pool2) fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=500) tanh3 = mx.symbol.Activation(data=fc1, act_type="tanh") # second fullc fc2 = mx.symbol.FullyConnected(data=tanh3, num_hidden=10) def Softmax(theta): max_val = np.max(theta, axis=1, keepdims=True) tmp = theta - max_val exp = np.exp(tmp) norm = np.sum(exp, axis=1, keepdims=True) return exp / norm def LogLossGrad(alpha, label): grad = np.copy(alpha) for i in range(alpha.shape[0]): grad[i, label[i]] -= 1. return grad # Prepare useful data for the network # + data_shape = (batch_size, 1, 28, 28) arg_names = fc2.list_arguments() # 'data' arg_shapes, output_shapes, aux_shapes = fc2.infer_shape(data=data_shape) arg_arrays = [mx.nd.zeros(shape, ctx=dev) for shape in arg_shapes] grad_arrays = [mx.nd.zeros(shape, ctx=dev) for shape in arg_shapes] reqs = ["write" for name in arg_names] model = fc2.bind(ctx=dev, args=arg_arrays, args_grad = grad_arrays, grad_req=reqs) arg_map = dict(zip(arg_names, arg_arrays)) grad_map = dict(zip(arg_names, grad_arrays)) data_grad = grad_map["data"] out_grad = mx.nd.zeros(model.outputs[0].shape, ctx=dev) # - # Init weight for name in arg_names: if "weight" in name: arr = arg_map[name] arr[:] = mx.rnd.uniform(-0.07, 0.07, arr.shape) # + def SGD(weight, grad, lr=0.1, grad_norm=batch_size): weight[:] -= lr * grad / batch_size def CalAcc(pred_prob, label): pred = np.argmax(pred_prob, axis=1) return np.sum(pred == label) * 1.0 def CalLoss(pred_prob, label): loss = 0. for i in range(pred_prob.shape[0]): loss += -np.log(max(pred_prob[i, label[i]], 1e-10)) return loss # - # Train a network num_round = 4 train_acc = 0. nbatch = 0 for i in range(num_round): train_loss = 0. train_acc = 0. nbatch = 0 train_iter.reset() for batch in train_iter: arg_map["data"][:] = batch.data[0] model.forward(is_train=True) theta = model.outputs[0].asnumpy() alpha = Softmax(theta) label = batch.label[0].asnumpy() train_acc += CalAcc(alpha, label) / batch_size train_loss += CalLoss(alpha, label) / batch_size losGrad_theta = LogLossGrad(alpha, label) out_grad[:] = losGrad_theta model.backward([out_grad]) # data_grad[:] = grad_map["data"] for name in arg_names: if name != "data": SGD(arg_map[name], grad_map[name]) nbatch += 1 #print(np.linalg.norm(data_grad.asnumpy(), 2)) train_acc /= nbatch train_loss /= nbatch print("Train Accuracy: %.2f\t Train Loss: %.5f" % (train_acc, train_loss)) # Get pertubation by using fast sign method, check validation change. # See that the validation set was almost entirely correct before the perturbations, but after the perturbations, it is much worse than random guessing. val_iter.reset() batch = val_iter.next() data = batch.data[0] label = batch.label[0] arg_map["data"][:] = data model.forward(is_train=True) theta = model.outputs[0].asnumpy() alpha = Softmax(theta) print("Val Batch Accuracy: ", CalAcc(alpha, label.asnumpy()) / batch_size) ######### grad = LogLossGrad(alpha, label.asnumpy()) out_grad[:] = grad model.backward([out_grad]) noise = np.sign(data_grad.asnumpy()) arg_map["data"][:] = data.asnumpy() + 0.15 * noise model.forward(is_train=True) raw_output = model.outputs[0].asnumpy() pred = Softmax(raw_output) print("Val Batch Accuracy after pertubation: ", CalAcc(pred, label.asnumpy()) / batch_size) # Visualize an example after pertubation. # Note that the prediction is consistently incorrect. import random as rnd idx = rnd.randint(0, 99) images = data.asnumpy() + 0.15 * noise plt.imshow(images[idx, :].reshape(28,28), cmap=cm.Greys_r) print("true: %d" % label.asnumpy()[idx]) print("pred: %d" % np.argmax(pred, axis=1)[idx])
example/adversary/adversary_generation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Part 10: Federated Learning with Encrypted Gradient Aggregation # # In the last few sections, we've been learning about encrypted computation by building several simple programs. In this section, we're going to return to the [Federated Learning Demo of Part 4](https://github.com/OpenMined/PySyft/blob/dev/examples/tutorials/Part%204%20-%20Federated%20Learning%20via%20Trusted%20Aggregator.ipynb), where we had a "trusted aggregator" who was responsible for averaging the model updates from multiple workers. # # We will now use our new tools for encrypted computation to remove this trusted aggregator because it is less than ideal as it assumes that we can find someone trustworthy enough to have access to this sensitive information. This is not always the case. # # Thus, in this notebook, we will show how one can use SMPC to perform secure aggregation such that we don't need a "trusted aggregator". # # Authors: # - <NAME> - Twitter: [@theoryffel](https://twitter.com/theoryffel) # - <NAME> - Twitter: [@iamtrask](https://twitter.com/iamtrask) # # Section 1: Normal Federated Learning # # First, here is some code which performs classic federated learning on the Boston Housing Dataset. This section of code is broken into several sections. # # ### Setting Up # + import pickle import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.utils.data import TensorDataset, DataLoader class Parser: """Parameters for training""" def __init__(self): self.epochs = 10 self.lr = 0.001 self.test_batch_size = 8 self.batch_size = 8 self.log_interval = 10 self.seed = 1 args = Parser() torch.manual_seed(args.seed) kwargs = {} # - # ## Loading the Dataset # + with open('../data/BostonHousing/boston_housing.pickle','rb') as f: ((X, y), (X_test, y_test)) = pickle.load(f) X = torch.from_numpy(X).float() y = torch.from_numpy(y).float() X_test = torch.from_numpy(X_test).float() y_test = torch.from_numpy(y_test).float() # preprocessing mean = X.mean(0, keepdim=True) dev = X.std(0, keepdim=True) mean[:, 3] = 0. # the feature at column 3 is binary, dev[:, 3] = 1. # so we don't standardize it X = (X - mean) / dev X_test = (X_test - mean) / dev train = TensorDataset(X, y) test = TensorDataset(X_test, y_test) train_loader = DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs) test_loader = DataLoader(test, batch_size=args.test_batch_size, shuffle=True, **kwargs) # - # ## Neural Network Structure # + class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.fc1 = nn.Linear(13, 32) self.fc2 = nn.Linear(32, 24) self.fc3 = nn.Linear(24, 1) def forward(self, x): x = x.view(-1, 13) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x model = Net() optimizer = optim.SGD(model.parameters(), lr=args.lr) # - # ## Hooking PyTorch # + import syft as sy hook = sy.TorchHook(torch) bob = sy.VirtualWorker(hook, id="bob") alice = sy.VirtualWorker(hook, id="alice") james = sy.VirtualWorker(hook, id="james") compute_nodes = [bob, alice] # - # **Send data to the workers** <br> # Usually they would already have it, this is just for demo purposes that we send it manually # + train_distributed_dataset = [] for batch_idx, (data,target) in enumerate(train_loader): data = data.send(compute_nodes[batch_idx % len(compute_nodes)]) target = target.send(compute_nodes[batch_idx % len(compute_nodes)]) train_distributed_dataset.append((data, target)) # - # ## Training Function # + def train(epoch): model.train() for batch_idx, (data,target) in enumerate(train_distributed_dataset): worker = data.location model.send(worker) optimizer.zero_grad() # update the model pred = model(data) loss = F.mse_loss(pred.view(-1), target) loss.backward() optimizer.step() model.get() if batch_idx % args.log_interval == 0: loss = loss.get() print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * data.shape[0], len(train_loader), 100. * batch_idx / len(train_loader), loss.item())) # - # ## Testing Function def test(): model.eval() test_loss = 0 for data, target in test_loader: output = model(data) test_loss += F.mse_loss(output.view(-1), target, reduction='sum').item() # sum up batch loss pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability test_loss /= len(test_loader.dataset) print('\nTest set: Average loss: {:.4f}\n'.format(test_loss)) # ## Training the Model import time # + t = time.time() for epoch in range(1, args.epochs + 1): train(epoch) total_time = time.time() - t print('Total', round(total_time, 2), 's') # - # ## Calculating Performance test() # # Section 2: Adding Encrypted Aggregation # # Now we're going to slightly modify this example to aggregate gradients using encryption. The main piece that's different is really 1 or 2 lines of code in the `train()` function, which we'll point out. For the moment, let's re-process our data and initialize a model for bob and alice. # + remote_dataset = (list(),list()) train_distributed_dataset = [] for batch_idx, (data,target) in enumerate(train_loader): data = data.send(compute_nodes[batch_idx % len(compute_nodes)]) target = target.send(compute_nodes[batch_idx % len(compute_nodes)]) remote_dataset[batch_idx % len(compute_nodes)].append((data, target)) def update(data, target, model, optimizer): model.send(data.location) optimizer.zero_grad() pred = model(data) loss = F.mse_loss(pred.view(-1), target) loss.backward() optimizer.step() return model bobs_model = Net() alices_model = Net() bobs_optimizer = optim.SGD(bobs_model.parameters(), lr=args.lr) alices_optimizer = optim.SGD(alices_model.parameters(), lr=args.lr) models = [bobs_model, alices_model] params = [list(bobs_model.parameters()), list(alices_model.parameters())] optimizers = [bobs_optimizer, alices_optimizer] # - # ## Building our Training Logic # # The only **real** difference is inside of this train method. Let's walk through it step-by-step. # # ### Part A: Train: # this is selecting which batch to train on data_index = 0 # update remote models # we could iterate this multiple times before proceeding, but we're only iterating once per worker here for remote_index in range(len(compute_nodes)): data, target = remote_dataset[remote_index][data_index] models[remote_index] = update(data, target, models[remote_index], optimizers[remote_index]) # ### Part B: Encrypted Aggregation # create a list where we'll deposit our encrypted model average new_params = list() # iterate through each parameter for param_i in range(len(params[0])): # for each worker spdz_params = list() for remote_index in range(len(compute_nodes)): # select the identical parameter from each worker and copy it copy_of_parameter = params[remote_index][param_i].copy() # since SMPC can only work with integers (not floats), we need # to use Integers to store decimal information. In other words, # we need to use "Fixed Precision" encoding. fixed_precision_param = copy_of_parameter.fix_precision() # now we encrypt it on the remote machine. Note that # fixed_precision_param is ALREADY a pointer. Thus, when # we call share, it actually encrypts the data that the # data is pointing TO. This returns a POINTER to the # MPC secret shared object, which we need to fetch. encrypted_param = fixed_precision_param.share(bob, alice, crypto_provider=james) # now we fetch the pointer to the MPC shared value param = encrypted_param.get() # save the parameter so we can average it with the same parameter # from the other workers spdz_params.append(param) # average params from multiple workers, fetch them to the local machine # decrypt and decode (from fixed precision) back into a floating point number new_param = (spdz_params[0] + spdz_params[1]).get().float_precision()/2 # save the new averaged parameter new_params.append(new_param) # ### Part C: Cleanup with torch.no_grad(): for model in params: for param in model: param *= 0 for model in models: model.get() for remote_index in range(len(compute_nodes)): for param_index in range(len(params[remote_index])): params[remote_index][param_index].set_(new_params[param_index]) # ## Let's put it all Together!! # # And now that we know each step, we can put it all together into one training loop! def train(epoch): for data_index in range(len(remote_dataset[0])-1): # update remote models for remote_index in range(len(compute_nodes)): data, target = remote_dataset[remote_index][data_index] models[remote_index] = update(data, target, models[remote_index], optimizers[remote_index]) # encrypted aggregation new_params = list() for param_i in range(len(params[0])): spdz_params = list() for remote_index in range(len(compute_nodes)): spdz_params.append(params[remote_index][param_i].copy().fix_precision().share(bob, alice, crypto_provider=james).get()) new_param = (spdz_params[0] + spdz_params[1]).get().float_precision()/2 new_params.append(new_param) # cleanup with torch.no_grad(): for model in params: for param in model: param *= 0 for model in models: model.get() for remote_index in range(len(compute_nodes)): for param_index in range(len(params[remote_index])): params[remote_index][param_index].set_(new_params[param_index]) def test(): models[0].eval() test_loss = 0 for data, target in test_loader: output = models[0](data) test_loss += F.mse_loss(output.view(-1), target, reduction='sum').item() # sum up batch loss pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability test_loss /= len(test_loader.dataset) print('Test set: Average loss: {:.4f}\n'.format(test_loss)) # + t = time.time() for epoch in range(args.epochs): print(f"Epoch {epoch + 1}") train(epoch) test() total_time = time.time() - t print('Total', round(total_time, 2), 's') # - # # Congratulations!!! - Time to Join the Community! # # Congratulations on completing this notebook tutorial! If you enjoyed this and would like to join the movement toward privacy preserving, decentralized ownership of AI and the AI supply chain (data), you can do so in the following ways! # # ### Star PySyft on Github # # The easiest way to help our community is just by starring the Repos! This helps raise awareness of the cool tools we're building. # # - [Star PySyft](https://github.com/OpenMined/PySyft) # # ### Join our Slack! # # The best way to keep up to date on the latest advancements is to join our community! You can do so by filling out the form at [http://slack.openmined.org](http://slack.openmined.org) # # ### Join a Code Project! # # The best way to contribute to our community is to become a code contributor! At any time you can go to PySyft Github Issues page and filter for "Projects". This will show you all the top level Tickets giving an overview of what projects you can join! If you don't want to join a project, but you would like to do a bit of coding, you can also look for more "one off" mini-projects by searching for github issues marked "good first issue". # # - [PySyft Projects](https://github.com/OpenMined/PySyft/issues?q=is%3Aopen+is%3Aissue+label%3AProject) # - [Good First Issue Tickets](https://github.com/OpenMined/PySyft/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) # # ### Donate # # If you don't have time to contribute to our codebase, but would still like to lend support, you can also become a Backer on our Open Collective. All donations go toward our web hosting and other community expenses such as hackathons and meetups! # # [OpenMined's Open Collective Page](https://opencollective.com/openmined)
examples/tutorials/Part 10 - Federated Learning with Secure Aggregation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # **Fecha** : 2021-06-28 # # # Autograd: Diferenciación automática # # El paquete `autograd` proporciona una diferenciación automática para todas las operaciones # en tensores. Es un marco de definición por ejecución, lo que significa que su backprop es # definido por cómo se ejecuta su código, y que cada iteración puede ser # diferente. import torch # Creando a un tensor: # Cree un tensor 2x2 con capacidades de acumulación de gradientes x = torch.tensor([[1, 2], [3, 4]], requires_grad=True, dtype=torch.float32) print(x) # Ahora podemos hacer operaciónes sobre los tensores # Deduct 2 from all elements y = x - 2 print(y) # `y` fue creada como resultado de una operación, y por ello tiene ele atributo `grad_fn`: print(y.grad_fn) # ¿Qué pasa acá? print(x.grad_fn) # Let's dig further... y.grad_fn y.grad_fn.next_functions[0][0] y.grad_fn.next_functions[0][0].variable # + # Podemos aplicar más funciones sobre `y` z = y * y * 3 a = z.mean() # promedio print(z) print(a) # - # ## Gradients # # Ahora podemos retroceder con `out.backward()`, lo cual es equivalente a # `out.backward(torch.tensor([1.0]))`. # Backprop a.backward() # Print gradients $\frac{\text{d}a}{\text{d}x}$. # # # print(x.grad) # + x = torch.randn(3, requires_grad=True) y = x * 2 i = 0 while y.data.norm() < 1000: y = y * 2 i += 1 print(y) # + # Si no corremos hacia atrás en un escalar, necesitamos especificar el grad_output gradients = torch.FloatTensor([0.1, 1.0, 0.0001]) y.backward(gradients) print(x.grad) # - print(i) # ## Inferencia # Esta variable decide el rango del tensor por debajo de n = 3 # Tanto `x` como `w` que permiten la acumulación de gradientes. x = torch.arange(1., n + 1, requires_grad=True) w = torch.ones(n, requires_grad=True) z = w @ x z.backward() print(x.grad, w.grad, sep='\n') # Solo `w` que permite la acumulación de gradiente x = torch.arange(1., n + 1) w = torch.ones(n, requires_grad=True) z = w @ x z.backward() print(x.grad, w.grad, sep='\n') # + x = torch.arange(1., n + 1) w = torch.ones(n, requires_grad=True) # Independientemente de lo que haga en este contexto, todos los tensores de la # antorcha no tendrán acumulación de gradiente with torch.no_grad(): z = w @ x try: z.backward() # PyTorch arrojará un error aquí, ya que z no tiene acumulaciones graduales. except RuntimeError as e: print('RuntimeError!!! >:[') print(e) # - z # ## More stuff # # La documentación del paquete de diferenciación automática (`autograd`) se encuentra en # http://pytorch.org/docs/autograd.
notebooks/001-Autograd-Tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- class Solution(object): def isPowerOfFour(self, num): """ :type num: int :rtype: bool """ if num <= 0: return False while num > 0: if num == 1: return True elif num % 4 == 0: num = num // 4 else: return False return True
algorithms/342-power-of-four.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Relatório de análise I # ## Importando a base de dados import pandas as pd alugueis = pd.read_csv('dados/aluguel.csv', sep=";") alugueis.sample(5) # ## Informações gerais sobre a base de dados tipos_dado_alugueis = pd.DataFrame(alugueis.dtypes, columns = ["Tipos de dados"]) tipos_dado_alugueis.columns.name = "Variáveis" tipos_dado_alugueis shape = alugueis.shape print(f"A base de dados apresenta {shape[0]} registros (imóveis) e {shape[1]} variáveis")
Pandas/1 - Base de Dados.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # # This file contains the common functions and definitions used in the Egeria Hands on # Lab Notebooks. # # These functions define the location and names of Coco Pharmaceutical's # Open Metadata and Governance (OMAG) Server Platforms where the metadata servers # and governance servers run. The `os.environ.get` function tests for the presence # of the environment variables that define the platform network addresses in the # Docker and Kubernetes runtime environments. If the environment variables are not # present then the localhost defaults are used. import os import csv import time # # Disable certificate checking for local development use with self-signed certificate # import urllib3 urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) import os os.environ['CURL_CA_BUNDLE'] = '' # # This is the location of the file system that is used in the lab. It defaults to a "data" subfolder in # the platform's home folder. Here you will find the runtime files of the platforms and the data files # used in various labs. The default value can be overridden using the "fileSystemRoot" environment variable. # fileSystemRoot = os.environ.get('fileSystemRoot', 'data') # # This is directory where the sample data from the Egeria distribution has been unpacked in. The default value # describes its location in an IntelliJ workspace. The default value can be overridden using the "egeriaDistributionRoot" # environment variable. # egeriaSampleDataRoot = os.environ.get('egeriaSampleDataRoot', '.') # # Flag to enable debug, this cases extra information to be printed including rest calls request and response details # Switching this flag to True produces a very large amount of output and is not recommended. # A targeted use of this flag is recommended, set this before and reset this after the code you would like to produce debug # isDebug = False # + # # Definitions of the Coco Pharmaceuticals Environment # # These are the three main platforms used to run Egeria's OMAG Servers corePlatformURL = os.environ.get('corePlatformURL', 'https://localhost:9443') corePlatformName = "Core Platform" dataLakePlatformURL = os.environ.get('dataLakePlatformURL', 'https://localhost:9444') dataLakePlatformName = "Data Lake Platform" devPlatformURL = os.environ.get('devPlatformURL', 'https://localhost:9445') devPlatformName = "Dev Platform" # The OMAG Server Platforms host one to many OMAG Servers. An OMAG Server could be # a metadata server or a specialized governance server. Its behavior is determined # by a configuration document that defines which OMAG services are activated. # All OMAG Server Platforms support the administration commands to define a server's # configuration document. It is also possible to create configuration documents # through admin calls to one OMAG Server Platform and then deploy them to the # OMAG Server Platform where they are to run. In the Egeria hands on lab, the # OMAG Server configuration is created on the dev platform and deployed to the # production platforms as needed. adminPlatformURL = devPlatformURL # <NAME> is the IT Administration Lead at Coco Pharmaceuticals. # He does all of the configuration for the OMAG Servers. Other users are introduced and make # calls to the server as required adminUserId = "garygeeke" petersUserId = "peterprofile" erinsUserId = "erinoverview" calliesUserId = 'calliequartile' faithsUserId = 'faithbroker' # These are the names of the metadata servers used by Coco Pharmaceuticals. Each metadata # server runs as an OMAG Server on one of the OMAG Server Platforms cocoMDS1PlatformURL = dataLakePlatformURL cocoMDS1PlatformName = dataLakePlatformName cocoMDS1Name = "cocoMDS1" cocoMDS2PlatformURL = corePlatformURL cocoMDS2PlatformName = corePlatformName cocoMDS2Name = "cocoMDS2" cocoMDS3PlatformURL = corePlatformURL cocoMDS3PlatformName = corePlatformName cocoMDS3Name = "cocoMDS3" cocoMDS4PlatformURL = dataLakePlatformURL cocoMDS4PlatformName = dataLakePlatformName cocoMDS4Name = "cocoMDS4" cocoMDS5PlatformURL = corePlatformURL cocoMDS5PlatformName = corePlatformName cocoMDS5Name = "cocoMDS5" cocoMDS6PlatformURL = corePlatformURL cocoMDS6PlatformName = corePlatformName cocoMDS6Name = "cocoMDS6" cocoMDSxPlatformURL = devPlatformURL cocoMDSxPlatformName = devPlatformName cocoMDSxName = "cocoMDSx" # The open metadata servers are linked together through the following open metadata cohorts. # The servers linked via a cohort can exchange open metadata either through federated # queries or metadata replication. cocoCohort = "cocoCohort" devCohort = "devCohort" iotCohort = "iotCohort" ctsCohort = "ctsCohort" # This is the view server that runs the services that support Egeria's user interface. cocoView1PlatformURL = dataLakePlatformURL cocoView1PlatformName = dataLakePlatformName cocoView1Name = "cocoView1" # These are the names of the governance servers used in Coco Pharmaceuticals' data lake. # Each governance server runs as an OMAG Server on the Data Lake OMAG Server Platform. # They also connect to a metadata server to retrieve their configuration and store their # results. governDL01PlatformURL = dataLakePlatformURL governDL01PlatformName = dataLakePlatformName governDL01Name = "governDL01" governDL01ServerType = "Engine Host" governDL01MDS = "cocoMDS1" exchangeDL01PlatformURL = dataLakePlatformURL exchangeDL01PlatformName = dataLakePlatformName exchangeDL01Name = "exchangeDL01" exchangeDL01ServerType = "Integration Daemon" exchangeDL01MDS = "cocoMDS1" # + # # Common processing of REST API errors. # import requests import pprint import json def printResponse(response): prettyResponse = json.dumps(response.json(), indent=4) print(" ") print("Response: ") print(prettyResponse) print(" ") def printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response): if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode == 200: print("Unexpected response from server " + serverName) printResponse(response) else: exceptionErrorMessage = response.json().get('exceptionErrorMessage') exceptionSystemAction = response.json().get('exceptionSystemAction') exceptionUserAction = response.json().get('exceptionUserAction') if exceptionErrorMessage != None: print(exceptionErrorMessage) print(" * " + exceptionSystemAction) print(" * " + exceptionUserAction) else: print("Unexpected response from server " + serverName) printResponse(response) else: print("Unexpected response from server platform " + serverPlatformName + " at " + serverPlatformURL) printResponse(response) # # Rest calls, these functions issue rest calls and print debug if required. # def issuePost(url, body): if (isDebug): printRestRequest("POST " + url) printRestRequestBody(body) jsonHeader = {'content-type':'application/json'} response=requests.post(url, json=body, headers=jsonHeader) if (isDebug): printRestResponse(response) return response def issueDataPost(url, body): if (isDebug): printRestRequest("POST " + url) printRestRequestBody(body) jsonHeader = {'content-type':'text/plain'} response=requests.post(url, data=body) if (isDebug): printRestResponse(response) return response def issuePut(url, body): if (isDebug): printRestRequest("PUT " + url) printRestRequestBody(body) jsonHeader = {'content-type':'application/json'} response=requests.put(url, json=body, headers=jsonHeader) if (isDebug): printRestResponse(response) return response def issueGet(url): if (isDebug): printRestRequest("GET " + url) jsonHeader = {'content-type':'application/json'} response=requests.get(url, headers=jsonHeader) if (isDebug): printRestResponse(response) return response def printRestRequest(url): print (" ") print (url) def printRestRequestBody(body): prettyBody = json.dumps(body, indent=4) print (prettyBody) print (" ") def printRestResponse(response): print ("Returns:") prettyResponse = json.dumps(response.json(), indent=4) print (prettyResponse) print (" ") def processErrorResponse(serverName, serverPlatformName, serverPlatformURL, response): if response.status_code != 200: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) else: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode != 200: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) return [] def postAndPrintResult(url, json=None, headers=None): print (" ...... (POST", url, ")") response = requests.post(url, json=json, headers=headers) print (" ...... Response: ", response.json()) # - # + # # Administration services # # # OMAG Server configuration functions. These functions add definitions to an OMAG server's configuration document # def configurePlatformURL(adminPlatformURL, adminUserId, serverName, serverPlatform): adminCommandURLRoot = adminPlatformURL + '/open-metadata/admin-services/users/' + adminUserId + '/servers/' print (" ... configuring the platform the server will run on...") url = adminCommandURLRoot + serverName + '/server-url-root?url=' + serverPlatform postAndPrintResult(url) def configureMaxPageSize(adminPlatformURL, adminUserId, serverName, maxPageSize): adminCommandURLRoot = adminPlatformURL + '/open-metadata/admin-services/users/' + adminUserId + '/servers/' print (" ... configuring the maximum page size...") url = adminCommandURLRoot + serverName + '/max-page-size?limit=' + maxPageSize postAndPrintResult(url) def configureServerType(adminPlatformURL, adminUserId, serverName, serverType): adminCommandURLRoot = adminPlatformURL + '/open-metadata/admin-services/users/' + adminUserId + '/servers/' print (" ... configuring the server's type...") url = adminCommandURLRoot + serverName + '/server-type?typeName=' + serverType postAndPrintResult(url) def clearServerType(adminPlatformURL, adminUserId, serverName): adminCommandURLRoot = adminPlatformURL + '/open-metadata/admin-services/users/' + adminUserId + '/servers/' print (" ... clearing the server's type...") url = adminCommandURLRoot + serverName + '/server-type?typeName=' postAndPrintResult(url) def configureOwningOrganization(adminPlatformURL, adminUserId, serverName, organizationName): adminCommandURLRoot = adminPlatformURL + '/open-metadata/admin-services/users/' + adminUserId + '/servers/' print (" ... configuring the server's owning organization...") url = adminCommandURLRoot + serverName + '/organization-name?name=' + organizationName postAndPrintResult(url) def configureUserId(adminPlatformURL, adminUserId, serverName, userId): adminCommandURLRoot = adminPlatformURL + '/open-metadata/admin-services/users/' + adminUserId + '/servers/' print (" ... configuring the server's userId...") url = adminCommandURLRoot + serverName + '/server-user-id?id=' + userId postAndPrintResult(url) def configurePassword(adminPlatformURL, adminUserId, serverName, password): adminCommandURLRoot = adminPlatformURL + '/open-metadata/admin-services/users/' + adminUserId + '/servers/' print (" ... configuring the server's password (optional)...") url = adminCommandURLRoot + serverName + '/server-user-password?password=' + password postAndPrintResult(url) def configureSecurityConnection(adminPlatformURL, adminUserId, serverName, securityBody): adminCommandURLRoot = adminPlatformURL + '/open-metadata/admin-services/users/' + adminUserId + '/servers/' print (" ... configuring the server's security connection...") url = adminCommandURLRoot + serverName + '/security/connection' jsonContentHeader = {'content-type':'application/json'} postAndPrintResult(url, json=securityBody, headers=jsonContentHeader) def configureDefaultAuditLog(adminPlatformURL, adminUserId, serverName): adminCommandURLRoot = adminPlatformURL + '/open-metadata/admin-services/users/' + adminUserId + '/servers/' print (" ... configuring the default audit log...") url = adminCommandURLRoot + serverName + '/audit-log-destinations/default' postAndPrintResult(url) # # These commands are for metadata servers, metadata access points and repository proxies def configureMetadataRepository(adminPlatformURL, adminUserId, serverName, repositoryType): adminCommandURLRoot = adminPlatformURL + '/open-metadata/admin-services/users/' + adminUserId + '/servers/' print (" ... configuring the metadata repository...") url = adminCommandURLRoot + serverName + '/local-repository/mode/' + repositoryType postAndPrintResult(url) def configureRepositoryProxyDetails(adminPlatformURL, adminUserId, serverName, connectorProvider): adminCommandURLRoot = adminPlatformURL + '/open-metadata/admin-services/users/' + adminUserId + '/servers/' print (" ... configuring the repository proxy...") url = adminCommandURLRoot + serverName + '/local-repository/mode/repository-proxy/details?connectorProvider=' + connectorProvider postAndPrintResult(url) def configureDescriptiveName(adminPlatformURL, adminUserId, serverName, collectionName): adminCommandURLRoot = adminPlatformURL + '/open-metadata/admin-services/users/' + adminUserId + '/servers/' print (" ... configuring the short descriptive name of the metadata stored in this server...") url = adminCommandURLRoot + serverName + '/local-repository/metadata-collection-name/' + collectionName postAndPrintResult(url) def configureEventBus(adminPlatformURL, adminUserId, serverName, busBody): adminCommandURLRoot = adminPlatformURL + '/open-metadata/admin-services/users/' + adminUserId + '/servers/' print (" ... configuring the event bus for this server...") url = adminCommandURLRoot + serverName + '/event-bus' postAndPrintResult(url, json=busBody, headers=jsonContentHeader) def configureCohortMembership(adminPlatformURL, adminUserId, serverName, cohortName): adminCommandURLRoot = adminPlatformURL + '/open-metadata/admin-services/users/' + adminUserId + '/servers/' print (" ... configuring the membership of the cohort...") url = adminCommandURLRoot + serverName + '/cohorts/' + cohortName postAndPrintResult(url) def configureAccessService(adminPlatformURL, adminUserId, serverName, accessService, accessServiceOptions): adminCommandURLRoot = adminPlatformURL + '/open-metadata/admin-services/users/' + adminUserId + '/servers/' print (" ... configuring the " + accessService + " access service for this server...") url = adminCommandURLRoot + serverName + '/access-services/' + accessService jsonContentHeader = {'content-type':'application/json'} postAndPrintResult(url, json=accessServiceOptions, headers=jsonContentHeader) # # The commands below are for View Servers only # def configureGovernanceSolutionViewService(adminPlatformURL, adminUserId, viewServerName, viewService, remotePlatformURL,remoteServerName): adminCommandURLRoot = adminPlatformURL + '/open-metadata/admin-services/users/' + adminUserId + '/servers/' print (" ... configuring the " + viewService + " Governance Solution View Service for this server...") url = adminCommandURLRoot + viewServerName + '/view-services/' + viewService jsonContentHeader = {'content-type':'application/json'} viewBody = { "class": "ViewServiceConfig", "omagserverPlatformRootURL": remotePlatformURL, "omagserverName" : remoteServerName } postAndPrintResult(url, json=viewBody, headers=jsonContentHeader) def configureIntegrationViewService(adminPlatformURL, adminUserId, viewServerName, viewService, configBody): adminCommandURLRoot = adminPlatformURL + '/open-metadata/admin-services/users/' + adminUserId + '/servers/' print (" ... configuring the " + viewService + " Integration View Service for this server...") url = adminCommandURLRoot + viewServerName + '/view-services/' + viewService jsonContentHeader = {'content-type':'application/json'} postAndPrintResult(url, json=configBody, headers=jsonContentHeader) # # The commands below are for Integration Daemon OMAG Servers only # def configureIntegrationService(adminPlatformURL, adminUserId, daemonServerName, mdrServerName, mdrServerPlatform, integrationServiceURLMarker, integrationServiceOptions, connectorConfigs): adminCommandURLRoot = adminPlatformURL + '/open-metadata/admin-services/users/' + adminUserId + '/servers/' print (" ... configuring the " + integrationServiceURLMarker + " integration service in " + daemonServerName + " integration daemon ...") jsonContentHeader = {'content-type':'application/json'} requestBody = { "class": "IntegrationServiceRequestBody", "omagserverPlatformRootURL": mdrServerPlatform, "omagserverName" : mdrServerName, "integrationServiceOptions" : integrationServiceOptions, "integrationConnectorConfigs" : connectorConfigs } print(requestBody) url = adminCommandURLRoot + daemonServerName + '/integration-services/' + integrationServiceURLMarker postAndPrintResult(url, json=requestBody, headers=jsonContentHeader) # # The commands below are for Engine Host OMAG Servers only # def configureEngineDefinitionServices(adminPlatformURL, adminUserId, engineServerName, mdrServerName, mdrServerPlatform): adminCommandURLRoot = adminPlatformURL + '/open-metadata/admin-services/users/' + adminUserId + '/servers/' print (" ... configuring the " + engineServerName + " engine definition services...") jsonContentHeader = {'content-type':'application/json'} requestBody = { "class": "OMAGServerClientConfig", "omagserverPlatformRootURL": mdrServerPlatform, "omagserverName" : mdrServerName } print(requestBody) url = adminCommandURLRoot + engineServerName + '/engine-definitions/client-config' postAndPrintResult(url, json=requestBody, headers=jsonContentHeader) def configureGovernanceEngineService(adminPlatformURL, adminUserId, engineServerName, mdrServerName, mdrServerPlatform, engineServiceURLMarker, governanceEngines): adminCommandURLRoot = adminPlatformURL + '/open-metadata/admin-services/users/' + adminUserId + '/servers/' print (" ... configuring the " + engineServiceURLMarker + " engine service in " + engineServerName + " engine host server ...") jsonContentHeader = {'content-type':'application/json'} requestBody = { "class": "EngineServiceRequestBody", "omagserverPlatformRootURL": mdrServerPlatform, "omagserverName" : mdrServerName, "engines" : governanceEngines } print(requestBody) url = adminCommandURLRoot + engineServerName + '/engine-services/' + engineServiceURLMarker postAndPrintResult(url, json=requestBody, headers=jsonContentHeader) # + # Server configuration can be deployed from one OMAG Server Platform to another. # This enables a server configuration to be tested and then deployed in production. def deployServerToPlatform(adminPlatformURL, adminUserId, serverName, platformURL): print(" ... deploying", serverName, "to the", platformURL, "platform...") adminCommandURLRoot = adminPlatformURL + '/open-metadata/admin-services/users/' + adminUserId + '/servers/' url = adminCommandURLRoot + serverName + '/configuration/deploy' platformTarget = { "class": "URLRequestBody", "urlRoot": platformURL } jsonContentHeader = {'content-type':'application/json'} postAndPrintResult(url, json=platformTarget, headers=jsonContentHeader) # + # The OMAG Server Platform is a single executable (application) that can be started # from the command line or a script or as part of a pre-built container environment # such as Kubernetes. The function below checks that a specific # server platform is running. def checkServerPlatform(serverPlatformName, serverPlatformURL): try: isPlatformActiveURL = serverPlatformURL + "/open-metadata/platform-services/users/" + adminUserId + "/server-platform/origin" response = requests.get(isPlatformActiveURL) if response.status_code == 200: print(" ", serverPlatformName, "is active") return True else: print(" ", serverPlatformName, "is not an OMAG Platform") return False except Exception as error: print("Exception: %s" % error) print(" ", serverPlatformName, "is down - start it before proceeding") return False # The OMAG Server Platform has the implementation of the open metadata and governance (OMAG) services. These are # divided into three groups: Common Services, Access Services and Governance Services. def printServiceDescriptions(serverPlatformName, serviceGroupName, services): print(serviceGroupName, "for", serverPlatformName) for x in range(len(services)): serviceName = services[x].get('serviceName') serviceDescription = services[x].get('serviceDescription') serviceWiki = services[x].get('serviceWiki') print (" * ", serviceName) print (" ", serviceDescription) print (" ", serviceWiki) print (" ") def getServerPlatformServices(serverPlatformName, serverPlatformURL): getOMAGServicesURL = serverPlatformURL + "/open-metadata/platform-services/users/" + adminUserId + "/server-platform/registered-services" response = issueGet(getOMAGServicesURL + "/common-services") if response.status_code == 200: printServiceDescriptions(serverPlatformName, "Common services", response.json().get('services')) response = issueGet(getOMAGServicesURL + "/access-services") if response.status_code == 200: printServiceDescriptions(serverPlatformName, "Access services", response.json().get('services')) response = issueGet(getOMAGServicesURL + "/engine-services") if response.status_code == 200: printServiceDescriptions(serverPlatformName, "Engine services", response.json().get('services')) response = issueGet(getOMAGServicesURL + "/integration-services") if response.status_code == 200: printServiceDescriptions(serverPlatformName, "Integration services", response.json().get('services')) response = issueGet(getOMAGServicesURL + "/view-services") if response.status_code == 200: printServiceDescriptions(serverPlatformName, "View services", response.json().get('services')) response = issueGet(getOMAGServicesURL + "/governance-services") if response.status_code == 200: printServiceDescriptions(serverPlatformName, "Governance services", response.json().get('services')) def getAccessServices(serverPlatformName, serverPlatformURL): getOMAGServicesURL = serverPlatformURL + "/open-metadata/platform-services/users/" + adminUserId + "/server-platform/registered-services" response = issueGet(getOMAGServicesURL + "/access-services") if response.status_code == 200: printServiceDescriptions(serverPlatformName, "Access services", response.json().get('services')) def getEngineServices(serverPlatformName, serverPlatformURL): getOMAGServicesURL = serverPlatformURL + "/open-metadata/platform-services/users/" + adminUserId + "/server-platform/registered-services" response =issueGet(getOMAGServicesURL + "/engine-services") if response.status_code == 200: printServiceDescriptions(serverPlatformName, "Engine services", response.json().get('services')) def getIntegrationServices(serverPlatformName, serverPlatformURL): getOMAGServicesURL = serverPlatformURL + "/open-metadata/platform-services/users/" + adminUserId + "/server-platform/registered-services" response = issueGet(getOMAGServicesURL + "/integration-services") if response.status_code == 200: printServiceDescriptions(serverPlatformName, "Integration services", response.json().get('services')) def getViewServices(serverPlatformName, serverPlatformURL): getOMAGServicesURL = serverPlatformURL + "/open-metadata/platform-services/users/" + adminUserId + "/server-platform/registered-services" response = issueGet(getOMAGServicesURL + "/view-services") if response.status_code == 200: printServiceDescriptions(serverPlatformName, "View services", response.json().get('services')) def queryKnownServers(serverPlatformName, serverPlatformURL): print (" ") print ("Querying the known servers on platform: " + serverPlatformName + " ...") platformServicesURLRoot = serverPlatformURL + "/open-metadata/platform-services/users/" + adminUserId + "/server-platform" url = platformServicesURLRoot + '/servers' print ("GET " + url) response = requests.get(url) prettyResponse = json.dumps(response.json(), indent=4) print ("Response: ") print (prettyResponse) print (" ") def queryActiveServers(serverPlatformName, serverPlatformURL): print (" ") print ("Querying the active servers on platform: " + serverPlatformName + " ...") platformServicesURLRoot = serverPlatformURL + "/open-metadata/platform-services/users/" + adminUserId + "/server-platform" url = platformServicesURLRoot + '/servers/active' print ("GET " + url) response = requests.get(url) prettyResponse = json.dumps(response.json(), indent=4) print ("Response: ") print (prettyResponse) print (" ") # + # Each server is configured to define which services should be actived. This configuration results in # the creation of a configuration document. This document is read when the server is started and # drives the initialization of the services. def checkServerConfigured(serverName, serverPlatformName, serverPlatformURL): isServerKnownOnPlatform = serverPlatformURL + "/open-metadata/admin-services/users/" + adminUserId + "/servers/" + serverName + "/configuration" response = issueGet(isServerKnownOnPlatform) if response.status_code == 200: serverConfig=response.json().get('omagserverConfig') auditTrail = serverConfig.get('auditTrail') if auditTrail is not None: print(" ...", serverName, "is configured") return True else: print(" ...", serverName, "needs configuring") else: print(" ...", serverPlatformName, "at", serverPlatformURL, "is down - unable to check server configuration") return False # The OMAG Server Platform supports a call to return if a server is active. def checkServerActive(serverName, serverPlatformName, serverPlatformURL): isServerActiveOnPlatform = serverPlatformURL + "/open-metadata/platform-services/users/" + adminUserId + "/server-platform/servers/" + serverName + "/status" response = issueGet(isServerActiveOnPlatform) if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode == 200: serverStatus = response.json().get('active') if serverStatus == True: print(" ...", serverName, "is active - ready to begin") else: print(" ...", serverName, "is down - needs to be started") return serverStatus else: print(" ...", serverName, "is not known - needs to be started") return False else: print(" ...", serverPlatformName, "at", serverPlatformURL, "is down - unable to check server configuration") return False # This is the call to start a server on a specific platform. Once the server is running it is possible to # make use of the open metadata and governance services that have been activated in the server. def activateServerOnPlatform(serverName, serverPlatformName, serverPlatformURL): print (" Starting server " + serverName + " ...") activateServerURL = serverPlatformURL + "/open-metadata/admin-services/users/" + adminUserId + '/servers/' + serverName + "/instance" response = issuePost(activateServerURL, {}) if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode == 200: return True else: errorMessage = response.json().get('exceptionErrorMessage') print (" ..." + serverName + " failed to start") print (" error message is: " + errorMessage) return False else: print (" ..." + serverName + " failed to start") return False # Once a server is active, it is possible to query the services that are active. def getServerServices(serverName, serverPlatformName, serverPlatformURL): getServerActiveServicesURL = serverPlatformURL + "/open-metadata/platform-services/users/" + adminUserId + "/server-platform/servers/" + serverName + "/services" print ("Services for server:", serverName) response = issueGet(getServerActiveServicesURL) if response.status_code == 200: serviceList = response.json().get('serverServicesList') for x in range(len(serviceList)): print (" * ", serviceList[x]) else: print (response) # This function checks whether a server is active and starts it if it is down. def activateServerIfDown(serverName, serverPlatformName, serverPlatformURL): print(" Checking OMAG Server " + serverName) configured = checkServerConfigured(serverName, serverPlatformName, serverPlatformURL) if configured == True: active = checkServerActive(serverName, serverPlatformName, serverPlatformURL) if active == False: activateServerOnPlatform(serverName, serverPlatformName, serverPlatformURL) # This function checks the platform is running and ensures the servers are started on it. # It requests user action if either the platform is not running or the servers are not configured. # Otherwise it should return with all of the servers running. def activatePlatform(serverPlatformName, serverPlatformURL, hostedServerNames): available = checkServerPlatform(serverPlatformName, serverPlatformURL) if available == True: for x in range(len(hostedServerNames)): activateServerIfDown(hostedServerNames[x], serverPlatformName, serverPlatformURL) # + # # Repository Services # # Understanding Cohorts # # The metadata servers, metadata access points, repository proxies and the CTS are linked together through open metadata repository cohorts. # The servers linked via a cohort can exchange open metadata either through federated queries or metadata replication. def queryServerCohorts(serverName, serverPlatformName, serverPlatformURL): cohortNames = [] try: metadataHighwayServicesURLcore = '/servers/' + serverName + '/open-metadata/repository-services/users/' + adminUserId + '/metadata-highway' url = serverPlatformURL + metadataHighwayServicesURLcore + '/cohort-descriptions' response = requests.get(url) if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode == 200: cohorts=response.json().get('cohorts') if cohorts != None: for x in range(len(cohorts)): cohortName = cohorts[x].get('cohortName') cohortNames.append(cohortName) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) except Exception as error: print("Exception: %s" % error) print("Platform " + serverPlatformURL + " is returning an error") return cohortNames def printServerCohortsStatus(serverName, serverPlatformName, serverPlatformURL): try: metadataHighwayServicesURLcore = '/servers/' + serverName + '/open-metadata/repository-services/users/' + adminUserId + '/metadata-highway' url = serverPlatformURL + metadataHighwayServicesURLcore + '/cohort-descriptions' response = issueGet(url) if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode == 200: cohorts=response.json().get('cohorts') if cohorts == None: print("Server " + serverName + " is not connected to any cohorts") else: print("Server " + serverName + " is connected to the following cohorts:") for x in range(len(cohorts)): cohortName = cohorts[x].get('cohortName') connectionStatus = cohorts[x].get('connectionStatus') print (" * " + cohortName + " [" + connectionStatus + "]") else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) except Exception as error: print("Exception: %s" % error) print("Platform " + serverPlatformURL + " is returning an error") def printCohortMember(cohortMember, localRegistration): serverName = cohortMember.get('serverName') serverType = cohortMember.get('serverType') metadataCollectionId = cohortMember.get('metadataCollectionId') metadataCollectionName = cohortMember.get('metadataCollectionName') registrationTime = cohortMember.get('registrationTime') if localRegistration == True: print("Registration details for local " + serverType + " " + serverName) else: print("Registration details for remote " + serverType + " " + serverName) if (metadataCollectionId != None): print(" * Metadata collection id: " + metadataCollectionId) if (metadataCollectionName != None): print(" * Metadata collection name: " + metadataCollectionName) if (registrationTime != None): print(" * Registration time: " + registrationTime) repositoryConnection = cohortMember.get('repositoryConnection') if repositoryConnection != None: endpoint = repositoryConnection.get('endpoint') if endpoint != None: address = endpoint.get('address') if address != None: print(" * URL for metadata queries: " + address) else: print(" * URL for metadata queries: null") else: print(" * URL for metadata queries: no endpoint") else: print(" * URL for metadata queries: not supported") def printLocalRegistration(serverName, serverPlatformName, serverPlatformURL): try: metadataHighwayServicesURLcore = '/servers/' + serverName + '/open-metadata/repository-services/users/' + adminUserId + '/metadata-highway' url = serverPlatformURL + metadataHighwayServicesURLcore + '/local-registration' response = issueGet(url) if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode == 200: cohortMember = response.json().get('cohortMember') printCohortMember(cohortMember, True) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) except Exception as error: print("Exception: %s" % error) print("Platform " + serverPlatformURL + " is returning an error") def printLocalRegistrationForCohort(serverName, cohortName, serverPlatformName, serverPlatformURL): try: metadataHighwayServicesURLcore = '/servers/' + serverName + '/open-metadata/repository-services/users/' + adminUserId + '/metadata-highway' url = serverPlatformURL + metadataHighwayServicesURLcore + '/cohorts/' + cohortName + '/local-registration' response = issueGet(url) if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode == 200: cohortMember = response.json().get('cohortMember') printCohortMember(cohortMember, True) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) except Exception as error: print("Exception: %s" % error) print("Platform " + serverPlatformURL + " is returning an error") def printRemoteRegistrations(serverName, cohortName, serverPlatformName, serverPlatformURL): try: metadataHighwayServicesURLcore = '/servers/' + serverName + '/open-metadata/repository-services/users/' + adminUserId + '/metadata-highway' url = serverPlatformURL + metadataHighwayServicesURLcore + '/cohorts/' + cohortName + '/remote-members' response = issueGet(url) if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode == 200: cohortMembers = response.json().get('cohortMembers') if cohortMembers != None: for x in range(len(cohortMembers)): printCohortMember(cohortMembers[x], False) else: print("No remote members") else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) except Exception as error: print("Exception: %s" % error) print("Platform " + serverPlatformURL + " is returning an error") def printServerCohorts(serverName, serverPlatformName, serverPlatformURL): print("Reviewing the cohort registry for server: " + serverName) print(" ") printLocalRegistration(serverName, serverPlatformName, serverPlatformURL) print(" ") cohorts = queryServerCohorts(serverName, serverPlatformName, serverPlatformURL) for x in range(len(cohorts)): print("Cohort " + cohorts[x] + " member details") printLocalRegistrationForCohort(serverName, cohorts[x], serverPlatformName, serverPlatformURL) printRemoteRegistrations(serverName, cohorts[x], serverPlatformName, serverPlatformURL) print(" ") # - # + # # OCF Common services # Working with assets - this set of functions displays assets returned from the open metadata repositories. # def printAssetSummary(asset): elementHeader = asset.get('elementHeader') elementType = elementHeader.get('type') assetTypeName = elementType.get('typeName') requestType = elementHeader.get('guid') assetProperties = asset.get('assetProperties') assetQualifiedName = assetProperties.get('qualifiedName') print (assetQualifiedName) print(assetTypeName + " \t| " + requestType + " | " + assetQualifiedName) def printAssetDetail(asset): elementHeader = asset.get('elementHeader') elementType = elementHeader.get('type') assetTypeName = elementType.get('typeName') assetSuperTypes = elementType.get('superTypeNames') requestType = elementHeader.get('guid') assetProperties = asset.get('assetProperties') assetQualifiedName = assetProperties.get('qualifiedName') assetDisplayName = assetProperties.get('displayName') assetCatalogBean = assetProperties.get('description') assetOwner = assetProperties.get('owner') assetOrigin = assetProperties.get('otherOriginValues') assetOwnerType = assetProperties.get('ownerTypeName') assetZones = assetProperties.get('zoneMembership') assetLatestChange = assetProperties.get('latestChange') if not requestType: requestType = "<null>" if not assetDisplayName: assetDisplayName = "<none>" print(assetDisplayName + " [" + requestType + "]") if not assetQualifiedName: assetQualifiedName = "<null>" print(" qualifiedName: " + assetQualifiedName) if assetCatalogBean: print(" description: " + assetCatalogBean) print(" type: " + assetTypeName + ' [%s]' % ', '.join(map(str, assetSuperTypes))) if assetOwner: print(" owner: " + assetOwner + " [" + assetOwnerType + "]") if assetOrigin: contact = assetOrigin.get('contact') dept = assetOrigin.get('originatingDept') org = assetOrigin.get('originatingOrganization') print(" origin: contact=" + contact + ", dept=" + dept + ", org=" + org) if assetZones: print(" zones: " + '%s' % ', '.join(map(str, assetZones))) if assetLatestChange: print(" latest change: " + assetLatestChange) def printAssetListSummary(assets): print(" ") for x in range(len(assets)): printAssetSummary(assets[x]) def printAssetListDetail(assets): print("\n--------------------------------------\n") for x in range(len(assets)): printAssetDetail(assets[x]) print("\n--------------------------------------\n") def printGUIDList(guids): if guids == None: print("No assets created") else: prettyGUIDs = json.dumps(guids, indent=4) print(prettyGUIDs) def getLastGUID(guids): if guids == None: return "<unknown>" else: for guid in guids: returnGUID = guid return returnGUID def getAssetUniverse(serverName, serverPlatformName, serverPlatformURL, serviceURLMarker, userId, requestType): connectedAssetURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/common-services/' + serviceURLMarker + '/connected-asset/users/' + userId getAsset = connectedAssetURL + '/assets/' + requestType response = issueGet(getAsset) asset = response.json().get('asset') if asset: return response.json() else: print ("No Asset returned") processErrorResponse(serverName, serverPlatformName, serverPlatformURL, response) def printCommentList(responseObjects): print(" ") if len(responseObjects) == 0: print ("No comments found") else: if len(responseObjects) == 1: print ("1 comment found:") else: print (str(len(responseObjects)) + " comments found:") for x in range(len(responseObjects)): printComment(responseObjects[x]) def printAssetComments(serverName, serverPlatformName, serverPlatformURL, serviceURLMarker, userId, requestType): connectedAssetURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/common-services/' + serviceURLMarker + '/connected-asset/users/' + userId commentQuery = connectedAssetURL + '/assets/' + requestType + '/comments?elementStart=0&maxElements=50' response = issueGet(commentQuery) responseObjects = response.json().get('list') if (responseObjects): printCommentList(responseObjects) else: print("No comments returned") processErrorResponse(serverName, serverPlatformName, serverPlatformURL, response) def printAssetCommentReplies(serverName, serverPlatformName, serverPlatformURL, serviceURLMarker, userId, requestType, commentGUID): connectedAssetURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/common-services/' + serviceURLMarker + '/connected-asset/users/' + userId commentReplyQuery = connectedAssetURL + '/assets/' + requestType + '/comments/' + commentGUID + '/replies?elementStart=0&maxElements=50' response = issueGet(commentReplyQuery) responseObjects = response.json().get('list') if (responseObjects): printCommentList(responseObjects) else: print("No comments returned") processErrorResponse(serverName, serverPlatformName, serverPlatformURL, response) def printAssetUniverse(serverName, serverPlatformName, serverPlatformURL, serviceURLMarker, userId, requestType): connectedAssetURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/common-services/' + serviceURLMarker + '/connected-asset/users/' + userId getAsset = connectedAssetURL + '/assets/' + requestType print (" ") print ("GET " + getAsset) response = requests.get(getAsset) print ("Returns:") prettyResponse = json.dumps(response.json(), indent=4) print (prettyResponse) print (" ") def printRelatedAssets(serverName, serverPlatformName, serverPlatformURL, serviceURLMarker, userId, requestType): connectedAssetURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/common-services/' + serviceURLMarker + '/connected-asset/users/' + userId getRelatedAsset = connectedAssetURL + '/assets/' + requestType + '/related-assets?elementStart=0&maxElements=50' print (" ") print ("GET " + getRelatedAsset) response = requests.get(getRelatedAsset) print ("Returns:") prettyResponse = json.dumps(response.json(), indent=4) print (prettyResponse) print (" ") def printComment(commentObject): if commentObject: comment = commentObject.get('comment') relyCount = commentObject.get('replyCount') if comment: commentType = comment.get('commentType') commentText = comment.get('commentText') commentUser = comment.get('user') isPublic = comment.get('isPublic') if commentType: print(" comment type: " + commentType) if commentText: print(" comment text: " + commentText) if commentUser: print(" created by: " + commentUser) if isPublic: print(" public: " + str(isPublic)) def getSchemaAttributesFromSchemaType(serverName, serverPlatformName, serverPlatformURL, serviceURLMarker, userId, schemaTypeGUID): ocfURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/common-services/' + serviceURLMarker + '/connected-asset/users/' + userId getSchemaAttributesURL = ocfURL + '/assets/schemas/' + schemaTypeGUID + '/schema-attributes?elementStart=0&maxElements=100' response = issueGet(getSchemaAttributesURL) schemaAttributes = response.json().get('list') if schemaAttributes: return schemaAttributes else: print ("No Schema attributes retrieved") processErrorResponse(serverName, serverPlatformName, serverPlatformURL, response) # + # # Working with assets - this set of functions are for authoring assets. # Using the Asset Owner OMAS interface to create and query assets. Notice that the interface returns all of the asset contents. # def assetOwnerCreateAsset(assetTypeURL, serverName, serverPlatformName, serverPlatformURL, userId, displayName, description, fullPath): assetOwnerURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/access-services/asset-owner/users/' + userId createAssetURL = assetOwnerURL + assetTypeURL createAssetBody = { "class" : "NewCSVFileAssetRequestBody", "displayName" : displayName, "description" : description, "fullPath" : fullPath } response=issuePost(createAssetURL, createAssetBody) guids = response.json().get('guids') if guids: return guids else: print ("No assets created") processErrorResponse(serverName, serverPlatformName, serverPlatformURL, response) def assetOwnerCreateAsset(assetTypeURL, serverName, serverPlatformName, serverPlatformURL, userId, displayName, description, fullPath): assetOwnerURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/access-services/asset-owner/users/' + userId createAssetURL = assetOwnerURL + assetTypeURL jsonHeader = {'content-type':'application/json'} createAssetBody = { "class" : "NewCSVFileAssetRequestBody", "displayName" : displayName, "description" : description, "fullPath" : fullPath } response=issuePost(createAssetURL, createAssetBody) guids = response.json().get('guids') if guids: return guids else: print ("No assets created") processErrorResponse(serverName, serverPlatformName, serverPlatformURL, response) def assetOwnerCreateCSVAsset(serverName, serverPlatformName, serverPlatformURL, userId, displayName, description, fullPath): return assetOwnerCreateAsset('/assets/data-files/csv', serverName, serverPlatformName, serverPlatformURL, userId, displayName, description, fullPath) def assetOwnerCreateAvroAsset(serverName, serverPlatformName, serverPlatformURL, userId, displayName, description, fullPath): return assetOwnerCreateAsset('/assets/data-files/avro', serverName, serverPlatformName, serverPlatformURL, userId, displayName, description, fullPath) def assetOwnerCreateCSVAssetWithColumnHeaders(serverName, serverPlatformName, serverPlatformURL, userId, displayName, description, fullPath, columnHeaders): assetOwnerURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/access-services/asset-owner/users/' + userId createAssetURL = assetOwnerURL + '/assets/data-files/csv' createAssetBody = { "class" : "NewCSVFileAssetRequestBody", "displayName" : displayName, "description" : description, "fullPath" : fullPath, "columnHeaders" : columnHeaders } response=issuePost(createAssetURL, createAssetBody) guids = response.json().get('guids') if guids: return guids else: print ("No CSV assets created") processErrorResponse(serverName, serverPlatformName, serverPlatformURL, response) def assetOwnerGetSchemaAttributesFromSchemaType(serverName, serverPlatformName, serverPlatformURL, serviceURLMarker, userId, schemaTypeGUID): return getSchemaAttributesFromSchemaType(serverName, serverPlatformName, serverPlatformURL, serviceURLMarker, userId, schemaTypeGUID) # Delete the asset with the supplied guid. def assetOwnerDelete(serverName, serverPlatformName, serverPlatformURL, userId, assetGUID): assetOwnerURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/access-services/asset-owner/users/' + userId deleteAssetURL = assetOwnerURL + '/assets/"+ assetGUID + "/delete' response=issuePost(deleteAssetURL, {}) if relatedHTTPCode == 200: print ("deleted Asset") return [] else: processErrorResponse(serverName, serverPlatformName, serverPlatformURL, response) def assetOwnerSearchForAssets(serverName, serverPlatformName, serverPlatformURL, userId, searchString): assetOwnerURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/access-services/asset-owner/users/' + userId getAssetsURL = assetOwnerURL + '/assets/by-search-string?startFrom=0&pageSize=50' response = issueDataPost(getAssetsURL, searchString) if response: assets = response.json().get('assets') if assets: return assets else: print ("No assets found") processErrorResponse(serverName, serverPlatformName, serverPlatformURL, response) else: print ("Search Request Failed") def assetOwnerPrintAssets(serverName, serverPlatformName, serverPlatformURL, userId, searchString): assets = assetOwnerSearchForAssets(serverName, serverPlatformName, serverPlatformURL, userId, searchString) if assets: if len(assets) == 1: print ("1 asset found:") else: print (str(len(assets)) + " assets found:") printAssetListSummary(assets) printAssetListDetail(assets) def assetOwnerFindAssetQualifiedName(serverName, serverPlatformName, serverPlatformURL, userId, searchString): qualifiedName = None assets = None assets = assetOwnerSearchForAssets(serverName, serverPlatformName, serverPlatformURL, userId, searchString) if assets == None: time.sleep(1) assets = assetOwnerSearchForAssets(serverName, serverPlatformName, serverPlatformURL, userId, searchString) if assets: if len(assets) == 1: assetProperties = assets[0].get('assetProperties') qualifiedName = assetProperties.get('qualifiedName') else: print (str(len(assets)) + " assets found:") return qualifiedName def assetOwnerDeleteAssets(serverName, serverPlatformName, serverPlatformURL, userId, searchString): assets = assetOwnerSearchForAssets(serverName, serverPlatformName, serverPlatformURL, userId, searchString) if assets: if len(assets) == 1: print ("1 asset to delete") else: print (str(len(assets)) + " assets to delete:") for asset in assets: elementHeader = asset.get('elementHeader') assetGUID = elementHeader.get('guid') assetProperties = asset.get('assetProperties') assetQualifiedName = assetProperties.get('qualifiedName') print("Deleting asset " + assetQualifiedName) assetOwnerCreateAssetDelete(serverName, serverPlatformName, serverPlatformURL, userId, assetGUID) def addOwner(serverName, serverPlatformName, serverPlatformURL, userId, assetName, assetGUID, assetOwner, ownerType): governanceURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/access-services/asset-owner/users/' + userId print ("Setting owner on " + assetName + " to " + assetOwner + " ...") body = { "class" : "OwnerRequestBody", "ownerType" : ownerType, "ownerId" : assetOwner } addOwnerURL = governanceURL + "/assets/" + assetGUID + "/owner" response=issuePost(addOwnerURL, body) if response.status_code != 200: processErrorResponse(serverName, serverPlatformName, serverPlatformURL, response) def addOrigin(serverName, serverPlatformName, serverPlatformURL, userId, assetName, assetGUID, contact, originatingDept, originatingOrganization): governanceURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/access-services/asset-owner/users/' + userId print ("Setting origin on " + assetName + " ...") body = { "class" : "OriginRequestBody", "otherOriginValues" : { "originatingOrganization" : originatingOrganization, "originatingDept" : originatingDept, "contact" : contact } } addOriginURL = governanceURL + "/assets/" + assetGUID + "/origin" response=issuePost(addOriginURL, body) if response.status_code != 200: processErrorResponse(serverName, serverPlatformName, serverPlatformURL, response) def addZones(serverName, serverPlatformName, serverPlatformURL, userId, assetName, assetGUID, zones): print ("Setting governance zones on " + assetName + " ...") governanceURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/access-services/asset-owner/users/' + userId addZonesURL = governanceURL + "/assets/" + assetGUID + "/governance-zones" response=issuePost(addZonesURL, zones) if response.status_code != 200: processErrorResponse(serverName, serverPlatformName, serverPlatformURL, response) # Create a semantic assignment between an Asset's attachment - like a Schema Type - and Glossary Term def createSemanticAssignment(serverName, serverPlatformName, serverPlatformURL, userId, assetGUID, schemaTypeGUID, glossaryTermGUID): assetOwnerURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/access-services/asset-owner/users/' + userId semanticAssignmentURL = assetOwnerURL + '/assets/' + assetGUID + '/attachments/' + schemaTypeGUID + '/meanings/' + glossaryTermGUID response=issuePost(semanticAssignmentURL, {}) if response.status_code == 200: print("Semantic assignment relationship created") else: print ("No semantic assignment Relationship created") processErrorResponse(serverName, serverPlatformName, serverPlatformURL, response) # + # # Asset Consumer OMAS # def searchForAssets(serverName, serverPlatformName, serverPlatformURL, userId, searchString): commonServicesURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/access-services/asset-consumer/users/' + userId getAssetsURL = commonServicesURL + '/assets/by-search-string?startFrom=0&pageSize=50' response=issueDataPost(getAssetsURL, searchString) guids = response.json().get('guids') if guids: return guids else: print ("No assets found") processErrorResponse(serverName, serverPlatformName, serverPlatformURL, response) def assetConsumerSearchForAssets(serverName, serverPlatformName, serverPlatformURL, userId, searchString): return searchForAssets(serverName, serverPlatformName, serverPlatformURL, userId, searchString) def addCommentToAsset(serverName, serverPlatformName, serverPlatformURL, userId, requestType, commentText, commentType, isPublic): assetConsumerURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/access-services/asset-consumer/users/' + userId addCommentURL = assetConsumerURL + '/assets/' + requestType + '/comments' commentBody={ "class" : "CommentProperties", "commentType" : commentType, "commentText" : commentText, "isPublic" : isPublic } response = issuePost(addCommentURL,commentBody) commentGUID = response.json().get('guid') if commentGUID: return commentGUID else: print ("No comment added") processErrorResponse(serverName, serverPlatformName, serverPlatformURL, response) def addReplyToAssetComment(serverName, serverPlatformName, serverPlatformURL, userId, requestType, commentGUID, commentText, commentType, isPublic): assetConsumerURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/access-services/asset-consumer/users/' + userId commentReplyURL = assetConsumerURL + '/assets/' + requestType + '/comments/' + commentGUID + '/replies' print (" ") print ("POST " + commentReplyURL) commentReplyBody={ "class" : "CommentProperties", "commentType" : commentType, "commentText" : commentText, "isPublic" : isPublic } response = issuePost(commentReplyURL, commentReplyBody) commentGUID = response.json().get('guid') if commentGUID: return commentGUID else: print ("No comment added") processErrorResponse(serverName, serverPlatformName, serverPlatformURL, response) def assetConsumerAddReplyToAssetComment(serverName, serverPlatformName, serverPlatformURL, userId, requestType, commentGUID, commentText, commentType, isPublic): return addReplyToAssetComment(serverName, serverPlatformName, serverPlatformURL, userId, requestType, commentGUID, commentText, commentType, isPublic) def assetConsumerPrintAssets(serverName, serverPlatformName, serverPlatformURL, userId, searchString): guids = assetConsumerSearchForAssets(serverName, serverPlatformName, serverPlatformURL, userId, searchString) if guids: printGUIDList(guids) def assetConsumerGetAssetUniverse(serverName, serverPlatformName, serverPlatformURL, userId, requestType): return getAssetUniverse(serverName, serverPlatformName, serverPlatformURL, "asset-consumer", userId, requestType) def assetConsumerPrintAssetUniverse(serverName, serverPlatformName, serverPlatformURL, userId, requestType): printAssetUniverse(serverName, serverPlatformName, serverPlatformURL, "asset-consumer", userId, requestType) def assetConsumerPrintAssetCommentReplies(serverName, serverPlatformName, serverPlatformURL, userId, requestType, commentGUID): printAssetCommentReplies(serverName, serverPlatformName, serverPlatformURL, "asset-consumer", userId, requestType, commentGUID) def assetConsumerPrintRelatedAssets(serverName, serverPlatformName, serverPlatformURL, userId, requestType): printRelatedAssets(serverName, serverPlatformName, serverPlatformURL, "asset-consumer", userId, requestType) def assetConsumerPrintAssetComments(serverName, serverPlatformName, serverPlatformURL, userId, requestType): printAssetComments(serverName, serverPlatformName, serverPlatformURL, "asset-consumer", userId, requestType) # - # # Asset Manager OMAS # This function assumes the assets exist, and it creates the lineage between the source asset schema type, to the port to the process to the port to # the target asset schematype # def createDirectedProcessBetweenAssets(serverName, serverPlatformName, serverPlatformURL, userId, sourceSchemaTypeGuid, targetSchemaTypeGuid, processName, processDescription, processFormula): assetManagerURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/access-services/asset-manager/users/' + userId createProcessURL = assetManagerURL + '/processes?assetManagerIsHome=false&initialStatus=ACTIVE' elementProperties = { 'class':'ProcessProperties', 'qualifiedName' : processName, 'technicalName':processName, 'technicalDescription':processDescription, 'formula': processFormula } metadataCorrelationProperties = { 'class':'MetadataCorrelationProperties' } # create the process processRequestBody= { 'class':'ProcessRequestBody', 'elementProperties': elementProperties, 'metadataCorrelationProperties': metadataCorrelationProperties } response=issuePost(createProcessURL, processRequestBody) processGuid = response.json().get('guid') if processGuid: # create in port createPortURL = assetManagerURL + '/processes/' + processGuid + '/ports?assetManagerIsHome=false' metadataCorrelationProperties= { 'class':'MetadataCorrelationProperties' } inPortProperties = { 'class':'PortProperties', 'qualifiedName': processGuid + "-in-port", 'portType': 'INPUT_PORT', 'displayName': 'in port' } inPortRequestBody = { 'class':'PortRequestBody', 'MetadataCorrelationProperties': metadataCorrelationProperties, 'elementProperties': inPortProperties } response=issuePost(createPortURL, inPortRequestBody) inPortGuid = response.json().get('guid') if inPortGuid is None: print ("No in port created") if response.status_code != 200: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) return [] else: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode != 200: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) return [] # create in portschema createInPortSchemaURL = assetManagerURL + '/ports/' + inPortGuid + '/schema-type/' + sourceSchemaTypeGuid +'?assetManagerIsHome=false' assetManagerIdentifiersRequestBody = { 'class':'AssetManagerIdentifiersRequestBody' } response=issuePost(createInPortSchemaURL, assetManagerIdentifiersRequestBody) if response.status_code != 200: print ("No in port schema created") printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) return [] # create out port outPortProperties = { 'class':'PortProperties', 'qualifiedName': processGuid + "-out-port", 'portType': 'OUTPUT_PORT', 'displayName': 'out port' } outPortRequestBody = { 'class':'PortRequestBody', 'MetadataCorrelationProperties': metadataCorrelationProperties, 'elementProperties': outPortProperties } response=issuePost(createPortURL, outPortRequestBody) outPortGuid = response.json().get('guid') if outPortGuid is None: print ("No out port created") processErrorResponse(serverName, serverPlatformName, serverPlatformURL, response) # create out portschema createOutPortSchemaURL = assetManagerURL + '/ports/' + outPortGuid + '/schema-type/' + targetSchemaTypeGuid +'?assetManagerIsHome=false' assetManagerIdentifiersRequestBody = { 'class':'AssetManagerIdentifiersRequestBody' } response=issuePost(createOutPortSchemaURL, assetManagerIdentifiersRequestBody) if response.status_code != 200: print ("No out port schema created") processErrorResponse(serverName, serverPlatformName, serverPlatformURL, response) else: print ("Process " + processName + " created.") return processGuid else: print ("No process created") processErrorResponse(serverName, serverPlatformName, serverPlatformURL, response) # + # # Subject Area OMAS # Working with glossaries - this set of functions displays glossary content returned from the open metadata repositories. # def printGlossarySummary(glossary): nodeTypeName = glossary.get('nodeType') nodeQualifiedName = glossary.get('qualifiedName') systemAttributes = glossary.get('systemAttributes') guid = systemAttributes.get('guid') print (nodeQualifiedName) print(nodeTypeName + " \t| " + guid + " | " + nodeQualifiedName) def printTermSummary(term): nodeTypeName = term.get('nodeType') nodeQualifiedName = term.get('qualifiedName') systemAttributes = term.get('systemAttributes') guid = systemAttributes.get('guid') print (nodeQualifiedName) print(nodeTypeName + " \t| " + guid + " | " + nodeQualifiedName) # # Working with glossary content - this set of functions are for authoring glossary content. # # Create a glossary with a display name and description def createGlossary(serverName, serverPlatformName, serverPlatformURL, userId, displayName, description): subjectAreaURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/access-services/subject-area/users/' + userId createGlossaryURL = subjectAreaURL + '/glossaries' createGlossaryBody = { "class" : "Glossary", "name" : displayName, "description" : description, "nodeType" : "Glossary", } response = issuePost(createGlossaryURL, createGlossaryBody) result = response.json().get('result') createdGlossary = result[0] if createdGlossary: return createdGlossary else: print ("No Glossary created") processErrorResponse(serverName, serverPlatformName, serverPlatformURL, response) # Create a glossary term with a display name and description def createTerm(serverName, serverPlatformName, serverPlatformURL, userId, glossaryGUID, displayName, description, example): subjectAreaURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/access-services/subject-area/users/' + userId createTermURL = subjectAreaURL + '/terms' glossaryBody = { "guid": glossaryGUID } createTermBody = { "class" : "Term", "name" : displayName, "description" : description, "example" : example, "nodeType" : "Term", "glossary" : glossaryBody } response = issuePost(createTermURL, createTermBody) result = response.json().get('result') createdTerm = result[0] if createdTerm: return createdTerm else: print ("No Term created") processErrorResponse(serverName, serverPlatformName, serverPlatformURL, response) # Find a Term that matches the supplied search criteria and is within the glossary identified by glossaryGUID def findTerm(serverName, serverPlatformName, serverPlatformURL, userId, searchCriteria, glossaryGUID): subjectAreaURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/access-services/subject-area/users/' + userId termURL = subjectAreaURL + '/terms?searchCriteria=' + searchCriteria response=issueGet(termURL) results = response.json().get('result') for resultTerm in results: resultTermGlossary = resultTerm.get('glossary') if (resultTermGlossary): resultTermGlossaryGUID = resultTermGlossary.get('guid') if resultTermGlossaryGUID == glossaryGUID: return resultTerm print ("No Term found") processErrorResponse(serverName, serverPlatformName, serverPlatformURL, response) # update a Term with the supplied governance classifications def updateTermGovernanceClassifications(serverName, serverPlatformName, serverPlatformURL, userId, termGuid, retention, criticality, confidence, confidentiality): subjectAreaURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/access-services/subject-area/users/' + userId termURL = subjectAreaURL + '/terms/' + termGuid governanceClassifications = { "class": "GovernanceClassifications" } if retention: retention['class'] = "Retention" governanceClassifications['retention'] = retention if criticality: criticality['class'] = "Criticality" governanceClassifications['criticality'] = criticality if confidence: confidence['class'] = "Confidence" governanceClassifications['confidence'] = confidence if confidentiality: confidentiality['class'] = "confidentiality" governanceClassifications['confidentiality'] = confidentiality termBody = { "class" : "Term", "governanceClassifications": governanceClassifications } response=issuePut(termURL, termBody) result = response.json().get('result') updatedTerm = result[0] if updatedTerm: print("updatedTerm Governance action classifications") print(updatedTerm.get('governanceClassifications')) return updatedTerm else: print ("Term not updated") processErrorResponse(serverName, serverPlatformName, serverPlatformURL, response) # create a synonym relationship between 2 glossary terms def createSynonym(serverName, serverPlatformName, serverPlatformURL, userId, guid1, guid2): subjectAreaURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/access-services/subject-area/users/' + userId createSynonymURL = subjectAreaURL + '/relationships/synonyms' end1 = { "class": "LineEnd", "nodeGuid" : guid1, "nodeType": "Term", "name":"synonyms" } end2 = { "class": "LineEnd", "nodeGuid" : guid2, "nodeType": "Term", "name":"synonyms" } createSynonymBody = { "class" : "Synonym", "typeDefGuid": "74f4094d-dba2-4ad9-874e-d422b69947e2", "lineType": "Synonym", "name": "Synonym", "end1": end1, "end2": end2 } response=issuePost(createSynonymURL, createSynonymBody) result = response.json().get('result') createdSynonym = result[0] if createdSynonym: return createdSynonym else: print("No Synonym created") processErrorResponse(serverName, serverPlatformName, serverPlatformURL, response) # Create a valid value relationship between the valid value for and the valid value glossary terms. def createValidValue(serverName, serverPlatformName, serverPlatformURL, userId, validValueForGuid, validValuesGuid): subjectAreaURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/access-services/subject-area/users/' + userId createValidValueURL = subjectAreaURL + '/relationships/valid-values' end1 = { "class": "LineEnd", "nodeGuid" : validValueForGuid, "nodeType": "Term", "name":"validValueFor" } end2 = { "class": "LineEnd", "nodeGuid" : validValuesGuid, "nodeType": "Term", "name":"validValues" } createValidValuesBody = { "class" : "ValidValue", "typeDefGuid": "707a156b-e579-4482-89a5-de5889da1971", "lineType": "ValidValue", "name": "ValidValue", "end1": end1, "end2": end2 } response=issuePost(createValidValueURL, createValidValuesBody) result = response.json().get('result') createdValidValue = result[0] if createdValidValue: return createdValidValue else: print("No ValidValue relationship created") processErrorResponse(serverName, serverPlatformName, serverPlatformURL, response) # This function takes a glossary guid, and a file name. The file name should be a csv file that contains contain columns with names nameColumnHeader, descriptionColumnHeader, exampleColumn. # - the fileName on the Mac platform that works is of the form /.../.../zzz.csv. The file needs to exist. The Python read might be sensitive to different file encodings. # - nameColumnHeader is the column for that will be used for the Glossary Term Name, # - descriptionColumnHeader is the column for that will be used for the Glossary Term Description, # - exampleColumn is the column for that will be used for the Glossary Term Example # # Note that the qualifiedName for each term is not specified so it is generated by the Subject Area OMAS. def createSimpleTermsFromCSVFile(serverName, serverPlatformName, serverPlatformURL, userId, fileName, glossaryGUID, nameColumnHeader, descriptionColumnHeader, exampleColumnHeader): term_count =0 headers= [] # open the file containing the glossary term definitions to get the headers try: with open(fileName, newline='', encoding='utf-8-sig') as csvfile: reader = csv.reader(csvfile) headers = next(reader) except EnvironmentError: # parent of IOError, OSError *and* WindowsError where available print("Error opening File to read headers" + fileName) # open the file containing the glossary term definitions to read the data try: with open(fileName, newline='', encoding='utf-8-sig') as csvfile: isNameHeaderValid =False isDescriptionHeaderValid =False isExampleHeaderValid =False if (nameColumnHeader in headers): isNameHeaderValid =True if (descriptionColumnHeader in headers): isDescriptionHeaderValid if (exampleColumnHeader in headers): isExampleHeaderValid =True if (isNameHeaderValid): dictionary = csv.DictReader(csvfile) for dictionary_element in dictionary: if (isDebug): print(dictionary_element) name_dictionary_element = dictionary_element[nameColumnHeader] if (name_dictionary_element == None or name_dictionary_element == ''): print("Found a Row without a name - so ignoring.") else: # only look for a description and example - if those rows exist in the csv file if (isDescriptionHeaderValid): description_dictionary_element = dictionary_element[descriptionColumnHeader] else: description_dictionary_element = "" if (isExampleHeaderValid): example_dictionary_element = dictionary_element[exampleColumnHeader] else: example_dictionary_element = "" createdTerm = createTerm(serverName, serverPlatformName, serverPlatformURL, userId, glossaryGUID, name_dictionary_element, description_dictionary_element, example_dictionary_element ) if (isDebug): printTermSummary(createdTerm) term_count = term_count +1 else: print("Expected Name column " + nameColumnHeader + " does not exist in the file, so we cannot process the file") except EnvironmentError: # parent of IOError, OSError *and* WindowsError where available print("Error opening File " + fileName) if (term_count > 0): print("Created " + str(term_count)) else: print("No terms created") # + # Open Governance and Discovery # Working with the engine host server and the Governance Engine OMAS of the metadata server. ## Governance Action Open Metadata Engine Services (OMES) validation of a governance service connector def validateGovernanceActionEngineConnector(serverName, serverPlatformName, serverPlatformURL, userId, connectorProvider): try: engineServiceURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/engine-services/governance-action/users/' + userId getStatusURL = engineServiceURL + '/validate-connector/' + connectorProvider response=issueGet(getStatusURL) if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode == 200: providerReport = response.json().get('providerReport') if providerReport: connectorType = providerReport.get('connectorType') if connectorType: print(" connectorType: ") guid = connectorType.get('guid') if guid: print(" guid: " + guid) qualifiedName = connectorType.get('qualifiedName') if qualifiedName: print(" qualifiedName: " + qualifiedName) displayName = connectorType.get('displayName') if displayName: print(" displayName: " + displayName) description = connectorType.get('description') if description: print(" description: " + description) connectorProviderClassName = connectorType.get('connectorProviderClassName') if connectorProviderClassName: print(" connectorProviderClassName: " + connectorProviderClassName) recognizedConfigurationProperties = connectorType.get('recognizedConfigurationProperties') if recognizedConfigurationProperties: print(" recognizedConfigurationProperties: ") for x in range(len(recognizedConfigurationProperties)): print(" - " + recognizedConfigurationProperties[x]) supportedRequestTypes = providerReport.get('supportedRequestTypes') if supportedRequestTypes: print(" supportedRequestTypes: ") for x in range(len(supportedRequestTypes)): print(" - " + supportedRequestTypes[x]) supportedRequestParameters = providerReport.get('supportedRequestParameters') if supportedRequestParameters: print(" supportedRequestParameters: ") for x in range(len(supportedRequestParameters)): print(" - " + supportedRequestParameters[x]) supportedRequestSourceNames = providerReport.get('supportedRequestSourceNames') if supportedRequestSourceNames: print(" supportedRequestSourceNames: ") for x in range(len(supportedRequestSourceNames)): print(" - " + supportedRequestSourceNames[x]) supportedActionTargetNames = providerReport.get('supportedActionTargetNames') if supportedActionTargetNames: print(" supportedActionTargetNames: ") for x in range(len(supportedActionTargetNames)): print(" - " + supportedActionTargetNames[x]) supportedGuards = providerReport.get('supportedGuards') if supportedGuards: print(" supportedGuards: ") for x in range(len(supportedGuards)): print(" - " + supportedGuards[x]) else: printResponse(response) else: printResponse(response) else: printResponse(response) else: printResponse(response) except Exception as error: print("Exception: %s" % error) print("Platform " + serverPlatformURL + " is returning an error") ## Asset Analysis Open Metadata Engine Services (OMES) validation of a discovery service connector def validateAssetAnalysisEngineConnector(serverName, serverPlatformName, serverPlatformURL, userId, connectorProvider): try: engineServiceURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/engine-services/asset-analysis/users/' + userId getStatusURL = engineServiceURL + '/validate-connector/' + connectorProvider response=issueGet(getStatusURL) if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode == 200: connectorType = response.json().get('connectorType') if connectorType: guid = connectorType.get('guid') if guid: print(" guid: " + guid) qualifiedName = connectorType.get('qualifiedName') if qualifiedName: print(" qualifiedName: " + qualifiedName) displayName = connectorType.get('displayName') if displayName: print(" displayName: " + displayName) description = connectorType.get('description') if description: print(" description: " + description) connectorProviderClassName = connectorType.get('connectorProviderClassName') if connectorProviderClassName: print(" connectorProviderClassName: " + connectorProviderClassName) recognizedConfigurationProperties = connectorType.get('recognizedConfigurationProperties') if recognizedConfigurationProperties: print("") print(" recognizedConfigurationProperties: ") for x in range(len(recognizedConfigurationProperties)): print(" - " + recognizedConfigurationProperties[x]) else: printResponse(response) else: printResponse(response) else: printResponse(response) except Exception as error: print("Exception: %s" % error) print("Platform " + serverPlatformURL + " is returning an error") # The configuration of governance engines and governance services is managed in an open metadata server # through the Governance Engine OMAS. The functions below support the definition of these capabilities. def getGovernanceEngineProperties(serverName, serverPlatformName, serverPlatformURL, userId, qualifiedName): configCommandURLRoot = serverPlatformURL + "/servers/" + serverName + "/open-metadata/access-services/governance-engine/users/" + userId getGovernanceEngineURL = configCommandURLRoot + '/governance-engines/by-name/' + qualifiedName governanceEngineProperties = None response = requests.get(getGovernanceEngineURL) if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode == 200: governanceEngineProperties = response.json().get('element') else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) return governanceEngineProperties def createGovernanceEngine(serverName, serverPlatformName, serverPlatformURL, userId, typeName, qualifiedName, displayName, description): governanceEngineProperties = getGovernanceEngineProperties(serverName, serverPlatformName, serverPlatformURL, userId, qualifiedName) if (governanceEngineProperties): print("Governance Engine " + qualifiedName + " is already defined with the following properties:") displayName = governanceEngineProperties.get('displayName') if (displayName): print(" displayName: " + displayName) description = governanceEngineProperties.get('description') if (description): print(" description: " + description) elementHeader = governanceEngineProperties.get('elementHeader') return elementHeader.get('guid') else: configCommandURLRoot = serverPlatformURL + "/servers/" + serverName + "/open-metadata/access-services/governance-engine/users/" + userId createGovernanceEngineURL = configCommandURLRoot + '/governance-engines/new/' + typeName governanceEngineGUID = None try: body = { "class" : "NewGovernanceEngineRequestBody", "qualifiedName" : qualifiedName, "displayName" : displayName, "description" : description } response=issuePost(createGovernanceEngineURL, body) if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode == 200: governanceEngineGUID = response.json().get('guid') else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) except Exception as error: print("Exception: %s" % error) print("Platform " + serverPlatformURL + " is returning an error") return governanceEngineGUID def createGovernanceService(serverName, serverPlatformName, serverPlatformURL, userId, typeName, qualifiedName, displayName, description, providerClassName, configurationProperties): configCommandURLRoot = serverPlatformURL + "/servers/" + serverName + "/open-metadata/access-services/governance-engine/users/" + userId createGovernanceServiceURL = configCommandURLRoot + '/governance-services/new/' + typeName governanceServiceGUID = None try: body = { "class" : "NewGovernanceServiceRequestBody", "qualifiedName" : qualifiedName, "displayName" : displayName, "description" : description, "connection" : { "class": "Connection", "type": { "class": "ElementType", "elementTypeId": "114e9f8f-5ff3-4c32-bd37-a7eb42712253", "elementTypeName": "Connection", "elementTypeVersion": 1, "elementTypeDescription": "A set of properties to identify and configure a connector instance.", "elementOrigin": "CONFIGURATION" }, "qualifiedName": qualifiedName + "-implementation", "displayName": displayName + " Implementation Connection", "description": "Connection for governance service " + qualifiedName, "connectorType": { "class": "ConnectorType", "type": { "class": "ElementType", "elementTypeId": "954421eb-33a6-462d-a8ca-b5709a1bd0d4", "elementTypeName": "ConnectorType", "elementTypeVersion": 1, "elementTypeDescription": "A set of properties describing a type of connector.", "elementOrigin": "LOCAL_COHORT" }, "guid": "1111f73d-e343-abcd-82cb-3918fed81da6", "qualifiedName": qualifiedName + "-GovernanceServiceProvider", "displayName": displayName + " Governance Service Provider Implementation", "description": description, "connectorProviderClassName": providerClassName }, "configurationProperties" : configurationProperties } } response=issuePost(createGovernanceServiceURL, body) if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode == 200: governanceServiceGUID = response.json().get('guid') else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) except Exception as error: print("Exception: %s" % error ) print("Platform " + serverPlatformURL + " is returning an error") return governanceServiceGUID def registerGovernanceServiceWithEngine(serverName, serverPlatformName, serverPlatformURL, userId, governanceEngineGUID, governanceServiceGUID, requestType): configCommandURLRoot = serverPlatformURL + "/servers/" + serverName + "/open-metadata/access-services/governance-engine/users/" + userId registerGovernanceServiceURL = configCommandURLRoot + '/governance-engines/' + governanceEngineGUID + '/governance-services' governanceServerGUID = None try: body = { "class" : "GovernanceServiceRegistrationRequestBody", "governanceServiceGUID" : governanceServiceGUID, "requestType" : requestType } response=issuePost(registerGovernanceServiceURL, body) if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode == 200: governanceServiceGUID = response.json().get('guid') else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) except Exception as error: print("Exception: %s" % error ) print("Platform " + serverPlatformURL + " is returning an error") # The engine host periodically refreshes its configuration. This call causes it to request fresh # configuration from the Metadata Server immediately. This is useful in demos or labs so that there is # no need to wait for the asynchronous update if demonstrating governance engine configuration and you want # any configuration changes to take effect immediately. def refreshGovernanceEngineConfig(serverName, serverPlatformName, serverPlatformURL, userId, qualifiedName): governanceServerCommandURLRoot = serverPlatformURL + "/servers/" + serverName + "/open-metadata/engine-host-services/users/" + userId refreshConfigURL = governanceServerCommandURLRoot + '/governance-engines/' + qualifiedName + '/refresh-config' response = requests.get(refreshConfigURL) if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode == 200: print("Done.") else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) # The engine can return the current status of the governance engines that it is hosting. # This is useful to show how the Engine Host is configured and the different types of # governance requests that are supported. def printGovernanceEngineStatus(governanceEngineSummary): governanceEngineName = governanceEngineSummary.get('governanceEngineName') if governanceEngineName: print(" Governance Engine: " + governanceEngineName) governanceEngineTypeName = governanceEngineSummary.get('governanceEngineTypeName') if governanceEngineTypeName: print(" type: " + governanceEngineTypeName) governanceEngineService = governanceEngineSummary.get('governanceEngineService') if governanceEngineService: print(" supporting service: " + governanceEngineService) governanceEngineStatus = governanceEngineSummary.get('governanceEngineStatus') if governanceEngineStatus: print(" status: " + governanceEngineStatus) governanceEngineGUID = governanceEngineSummary.get('governanceEngineGUID') if governanceEngineGUID: print(" guid: " + governanceEngineGUID) governanceEngineDescription = governanceEngineSummary.get('governanceEngineDescription') if governanceEngineDescription: print(" description: " + governanceEngineDescription) requestTypes = governanceEngineSummary.get('governanceRequestTypes') if requestTypes: print(" requestTypes: ") for x in range(len(requestTypes)): print(" - " + requestTypes[x]) def checkGovernanceEngineStatus(governanceEngineSummary, desiredState): governanceEngineStatus = governanceEngineSummary.get('governanceEngineStatus') if governanceEngineStatus == desiredState: return True else: return False def getGovernanceEngineSummary(serverName, serverPlatformName, serverPlatformURL, userId, governanceEngineName): governanceEngineSummary = None try: governanceServerRootURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/engine-host-services/users/' + userId getStatusURL = governanceServerRootURL + '/governance-engines/' + governanceEngineName + '/summary' response = requests.get(getStatusURL) if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode == 200: governanceEngineSummary = response.json().get('governanceEngineSummary') else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) except Exception as error: print("Exception: %s" % error) print("Platform " + serverPlatformURL + " is returning an error") return governanceEngineSummary def waitForConfiguringGovernanceEngine(serverName, serverPlatformName, serverPlatformURL, userId, governanceEngineName): governanceEngineSummary = getGovernanceEngineSummary(serverName, serverPlatformName, serverPlatformURL, userId, governanceEngineName) if governanceEngineSummary != None: while not checkGovernanceEngineStatus(governanceEngineSummary, "CONFIGURING"): print(" ... Waiting for status change") time.sleep(1) governanceEngineSummary = getGovernanceEngineSummary(serverName, serverPlatformName, serverPlatformURL, userId, governanceEngineName) print("") def waitForRunningGovernanceEngine(serverName, serverPlatformName, serverPlatformURL, userId, governanceEngineName): governanceEngineSummary = getGovernanceEngineSummary(serverName, serverPlatformName, serverPlatformURL, userId, governanceEngineName) if governanceEngineSummary != None: while not checkGovernanceEngineStatus(governanceEngineSummary, "RUNNING"): print(" ... Waiting for status change") time.sleep(1) governanceEngineSummary = getGovernanceEngineSummary(serverName, serverPlatformName, serverPlatformURL, userId, governanceEngineName) print("") def printGovernanceEngineStatuses(serverName, serverPlatformName, serverPlatformURL, userId): try: governanceServerRootURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/engine-host-services/users/' + userId getStatusURL = governanceServerRootURL + '/governance-engines/summary' response = requests.get(getStatusURL) if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode == 200: governanceEngineSummaries = response.json().get('governanceEngineSummaries') if governanceEngineSummaries: if len(governanceEngineSummaries) == 1: print("One governance engine defined for engine host server " + serverName) else: print(str(len(governanceEngineSummaries)) + " governance engines defined for engine host server " + serverName) print(' ') for x in range(len(governanceEngineSummaries)): printGovernanceEngineStatus(governanceEngineSummaries[x]) print(" ") time.sleep(1) else: print("No governance engines defined for engine host server " + serverName) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) except Exception as error: print("Exception: %s" % error) print("Platform " + serverPlatformURL + " is returning an error") def runDiscoveryService(serverName, serverPlatformName, serverPlatformURL, userId, discoveryEngineName, discoveryRequestType, requestType): try: discoveryServerRootURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/engine-services/asset-analysis/users/' + userId requestDiscoveryURL = discoveryServerRootURL + '/discovery-engines/' + discoveryEngineName + '/discovery-request-types/' + discoveryRequestType + '/assets/' + requestType body = { "class" : "DiscoveryRequestRequestBody", } response=issuePost(requestDiscoveryURL, body) if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode == 200: reportGUID = response.json().get('guid') if reportGUID: return reportGUID else: print("No discovery results returned from Asset Analysis OMES in server " + serverName) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) except Exception as error: print("Exception: %s" % error) print("Platform " + serverPlatformURL + " is returning an error") def getDiscoveryReport(serverName, serverPlatformName, serverPlatformURL, userId, discoveryEngineName, reportGUID): try: discoveryServerRootURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/engine-services/asset-analysis/users/' + userId requestReportURL = discoveryServerRootURL + '/discovery-engines/' + discoveryEngineName + '/discovery-analysis-reports/' + reportGUID response=issueGet(requestReportURL) if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode == 200: printResponse(response) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) except Exception as error: print("Exception: %s" % error) print("Platform " + serverPlatformURL + " is returning an error") def getDiscoveryReportAnnotations(serverName, serverPlatformName, serverPlatformURL, userId, discoveryEngineName, reportGUID): try: discoveryServerRootURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/engine-services/asset-analysis/users/' + userId requestReportURL = discoveryServerRootURL + '/discovery-engines/' + discoveryEngineName + '/discovery-analysis-reports/' + reportGUID + '/annotations?startingFrom=0&maximumResults=100' response=requests.get(requestReportURL) if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode == 200: printResponse(response) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) except Exception as error: print("Exception: %s" % error) print("Platform " + serverPlatformURL + " is returning an error") # + # # Working with Governance Actions, Governance Action Processes and Governance Action Types through the Governance Engine OMAS # def initiateGovernanceAction(serverName, serverPlatformName, serverPlatformURL, userId, governanceEngineName, qualifiedName, requestType, requestParameters): commandURLRoot = serverPlatformURL + "/servers/" + serverName + "/open-metadata/access-services/governance-engine/users/" + userId initiateGovernanceActionURL = commandURLRoot + '/governance-engines/' + governanceEngineName + '/governance-actions/initiate' governanceActionGUID = None try: body = { "class" : "GovernanceActionRequestBody", "qualifiedName" : qualifiedName, "requestType" : requestType, "requestParameters" : requestParameters } response=issuePost(initiateGovernanceActionURL, body) if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode == 200: governanceActionGUID = response.json().get('guid') else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) except Exception as error: print("Exception: %s" % error ) print("Platform " + serverPlatformURL + " is returning an error") return governanceActionGUID def printGovernanceAction(serverName, serverPlatformName, serverPlatformURL, userId, governanceActionGUID): commandURLRoot = serverPlatformURL + "/servers/" + serverName + "/open-metadata/access-services/governance-engine/users/" + userId getGovernanceActionURL = commandURLRoot + '/governance-actions/' + governanceActionGUID try: response=issueGet(getGovernanceActionURL) if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode == 200: element = response.json().get('element') if element: properties = element.get('properties') if properties: prettyResponse = json.dumps(properties, indent=4) else: prettyResponse = json.dumps(element, indent=4) else: prettyResponse = json.dumps(response, indent=4) print(prettyResponse) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) except Exception as error: print("Exception: %s" % error ) print("Platform " + serverPlatformURL + " is returning an error") def checkGovernanceActionStatus(governanceActionElement, desiredState): governanceActionProperties = governanceActionElement.get('properties') governanceActionStatus = governanceActionProperties.get('actionStatus') if governanceActionStatus == desiredState: return True else: return False def checkGovernanceActionCompletion(governanceActionElement): governanceActionProperties = governanceActionElement.get('properties') governanceActionCompletionTime = None governanceActionCompletionTime = governanceActionProperties.get('completionTime') if governanceActionCompletionTime: return True else: return False def getGovernanceAction(serverName, serverPlatformName, serverPlatformURL, userId, governanceActionGUID): governanceActionElement = None try: commandURLRoot = serverPlatformURL + "/servers/" + serverName + "/open-metadata/access-services/governance-engine/users/" + userId getGovernanceActionURL = commandURLRoot + '/governance-actions/' + governanceActionGUID response = requests.get(getGovernanceActionURL) if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode == 200: governanceActionElement = response.json().get('element') else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) except Exception as error: print("Exception: %s" % error) print("Platform " + serverPlatformURL + " is returning an error") return governanceActionElement def waitForRunningGovernanceAction(serverName, serverPlatformName, serverPlatformURL, userId, governanceActionGUID): governanceActionElement = getGovernanceAction(serverName, serverPlatformName, serverPlatformURL, userId, governanceActionGUID) if governanceActionElement != None: while not checkGovernanceActionCompletion(governanceActionElement): print(" ... Waiting for completion status change") time.sleep(1) governanceActionElement = getGovernanceAction(serverName, serverPlatformName, serverPlatformURL, userId, governanceActionGUID) print("") def getGovernanceActions(serverName, serverPlatformName, serverPlatformURL, userId): governanceActions = None try: commandURLRoot = serverPlatformURL + "/servers/" + serverName + "/open-metadata/access-services/governance-engine/users/" + userId getGovernanceActionURL = commandURLRoot + '/governance-actions?startFrom=0&pageSize=0' response = requests.get(getGovernanceActionURL) if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode == 200: governanceActions = response.json().get('elements') else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) except Exception as error: print("Exception: %s" % error) print("Platform " + serverPlatformURL + " is returning an error") return governanceActions def printGovernanceActionSummary(governanceAction): properties = governanceAction.get('properties') if properties: qualifiedName = properties.get('qualifiedName') actionStatus = properties.get('actionStatus') print(actionStatus + " \t| " + qualifiedName ) def monitorGovernanceActions(serverName, serverPlatformName, serverPlatformURL, userId): governanceActions = getGovernanceActions(serverName, serverPlatformName, serverPlatformURL, userId) if governanceActions != None: for x in range(len(governanceActions)): printGovernanceActionSummary(governanceActions[x]) def createGovernanceActionProcess(serverName, serverPlatformName, serverPlatformURL, userId, qualifiedName, displayName, description, technicalName, technicalDescription): commandURLRoot = serverPlatformURL + "/servers/" + serverName + "/open-metadata/access-services/governance-engine/users/" + userId createGovernanceActionProcessURL = commandURLRoot + '/governance-action-processes/new' governanceActionProcessGUID = None try: body = { "class" : "NewGovernanceActionProcessRequestBody", "processStatus" : "ACTIVE", "properties" : { "class" : "GovernanceActionProcessProperties", "qualifiedName" : qualifiedName, "displayName" : displayName, "description" : description, "technicalName" : technicalName, "technicalDescription" : technicalDescription, "owner" : userId, "ownerCategory" : "USER_ID" } } response=issuePost(createGovernanceActionProcessURL, body) if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode == 200: governanceActionProcessGUID = response.json().get('guid') else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) except Exception as error: print("Exception: %s" % error ) print("Platform " + serverPlatformURL + " is returning an error") return governanceActionProcessGUID def createGovernanceActionType(serverName, serverPlatformName, serverPlatformURL, userId, governanceEngineGUID, qualifiedName, supportedGuards, requestType, requestParameters): commandURLRoot = serverPlatformURL + "/servers/" + serverName + "/open-metadata/access-services/governance-engine/users/" + userId createGovernanceActionTypeURL = commandURLRoot + '/governance-action-types/new' governanceActionTypeGUID = None try: body = { "class" : "GovernanceActionTypeProperties", "qualifiedName" : qualifiedName, "supportedGuards" : supportedGuards, "governanceEngineGUID" : governanceEngineGUID, "requestType" : requestType, "requestParameters" : requestParameters } response=issuePost(createGovernanceActionTypeURL, body) if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode == 200: governanceActionTypeGUID = response.json().get('guid') else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) except Exception as error: print("Exception: %s" % error ) print("Platform " + serverPlatformURL + " is returning an error") return governanceActionTypeGUID def setupFirstActionType(serverName, serverPlatformName, serverPlatformURL, userId, processGUID, actionTypeGUID, optionalGuard): commandURLRoot = serverPlatformURL + "/servers/" + serverName + "/open-metadata/access-services/governance-engine/users/" + userId firstActionTypeURL = commandURLRoot + '/governance-action-processes/' + processGUID + '/first-action-type/' + actionTypeGUID + '/new' try: body = optionalGuard response=issueDataPost(firstActionTypeURL, body) if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode != 200: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) except Exception as error: print("Exception: %s" % error ) print("Platform " + serverPlatformURL + " is returning an error") def setupNextActionType(serverName, serverPlatformName, serverPlatformURL, userId, currentActionTypeGUID, nextActionTypeGUID, guard, mandatoryGuard, ignoreMultipleTriggers): commandURLRoot = serverPlatformURL + "/servers/" + serverName + "/open-metadata/access-services/governance-engine/users/" + userId nextActionTypeURL = commandURLRoot + '/governance-action-types/' + currentActionTypeGUID + '/next-action-types/' + nextActionTypeGUID + '/new' try: body = { "class" : "NewGovernanceActionTypeRequestBody", "guard" : guard, "mandatoryGuard" : mandatoryGuard, "ignoreMultipleTriggers" : ignoreMultipleTriggers } response=issuePost(nextActionTypeURL, body) if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode != 200: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) except Exception as error: print("Exception: %s" % error ) print("Platform " + serverPlatformURL + " is returning an error") # + # # Interaction with the Integration Daemon OMAG Server # def printIntegrationConnectorStatus(integrationConnectorSummary): connectorName = integrationConnectorSummary.get('connectorName') if connectorName: print(" Integration Connector: " + connectorName) connectorStatus = integrationConnectorSummary.get('connectorStatus') if connectorStatus: print(" status: " + connectorStatus) if connectorStatus == "FAILED": failingExceptionMessage = integrationConnectorSummary.get('failingExceptionMessage') if failingExceptionMessage: print(" failingExceptionMessage: " + failingExceptionMessage) else: lastStatusChange = integrationConnectorSummary.get('lastStatusChange') if lastStatusChange: print(" lastStatusChange: " + lastStatusChange) lastRefreshTime = integrationConnectorSummary.get('lastRefreshTime') if lastRefreshTime: print(" lastRefreshTime: " + lastRefreshTime) minMinutesBetweenRefresh = integrationConnectorSummary.get('minMinutesBetweenRefresh') if minMinutesBetweenRefresh: print(" minMinutesBetweenRefresh: " + str(minMinutesBetweenRefresh)) def printIntegrationServiceSummary(integrationServiceSummary): integrationServiceName = integrationServiceSummary.get('integrationServiceFullName') if integrationServiceName: print("Integration Service: " + integrationServiceName) connectors = integrationServiceSummary.get('integrationConnectorReports') if connectors: print(" Integration Connector Reports: ") for x in range(len(connectors)): printIntegrationConnectorStatus(connectors[x]) def getIntegrationDaemonStatus(serverName, serverPlatformName, serverPlatformURL, userId): try: integrationDaemonURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/integration-daemon/users/' + userId getStatusURL = integrationDaemonURL + '/status' response=issueGet(getStatusURL) if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode == 200: integrationServiceSummaries = response.json().get('integrationServiceSummaries') if integrationServiceSummaries: if len(integrationServiceSummaries) == 1: print("One integration service defined for integration daemon " + serverName) else: print(str(len(integrationServiceSummaries)) + " integration services defined for integration daemon " + serverName) print(' ') for x in range(len(integrationServiceSummaries)): printIntegrationServiceSummary(integrationServiceSummaries[x]) print(" ") else: print("No governance engines defined for engine host server " + serverName) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) except Exception as error: print("Exception: %s" % error) print("Platform " + serverPlatformURL + " is returning an error") def restartIntegrationConnector(serverName, serverPlatformName, serverPlatformURL, userId, serviceURLMarker, connectorName): try: integrationDaemonURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/integration-daemon/users/' + userId restartURL = integrationDaemonURL + '/integration-services/' + serviceURLMarker + '/restart' body = { "class" : "NameRequestBody", "name" : connectorName } response=issuePost(restartURL, body) if response.status_code == 200: print("Done.") else: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) except Exception as error: print("Exception: %s" % error) print("Platform " + serverPlatformURL + " is returning an error") def updateConnectorConfigurationProperties(serverName, serverPlatformName, serverPlatformURL, serviceURLMarker, userId, connectorName, configurationProperties): try: integrationDaemonURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/integration-daemon/' + '/users/' + userId + '/integration-services/' + serviceURLMarker updateConnectorURL = integrationDaemonURL + '/connectors/configuration-properties' body = { "class" : "ConnectorConfigPropertiesRequestBody", "connectorName" : connectorName, "mergeUpdate" : True, "configurationProperties" : configurationProperties } response=issuePost(updateConnectorURL, body) if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode != 200: printUnexpectedResponse(serverName, serverPlatformName, serverPlatformURL, response) else: print("Done.") else: printResponse(response) except Exception as error: print("Exception: %s" % error) print("Platform " + serverPlatformURL + " is returning an error") def getIntegrationConnectorConfigProperties(serverName, serverPlatformName, serverPlatformURL, serviceURLMarker, userId, connectorName): try: integrationDaemonURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/integration-daemon/' + '/users/' + userId + '/integration-services/' + serviceURLMarker getConfigPropertiesURL = integrationDaemonURL + '/connectors/' + connectorName + '/configuration-properties' response=issueGet(getConfigPropertiesURL) if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode == 200: printResponse(response) else: printResponse(response) else: printResponse(response) except Exception as error: print("Exception: %s" % error) print("Platform " + serverPlatformURL + " is returning an error") # + ## Open Metadata Integration Services def validateIntegrationConnector(serverName, serverPlatformName, serverPlatformURL, serviceURLMarker, userId, connectorProvider): try: integrationServiceURL = serverPlatformURL + '/servers/' + serverName + '/open-metadata/integration-services/' + serviceURLMarker + '/users/' + userId getStatusURL = integrationServiceURL + '/validate-connector/' + connectorProvider response=issueGet(getStatusURL) if response.status_code == 200: relatedHTTPCode = response.json().get('relatedHTTPCode') if relatedHTTPCode == 200: connectorType = response.json().get('connectorType') if connectorType: guid = connectorType.get('guid') if guid: print(" guid: " + guid) qualifiedName = connectorType.get('qualifiedName') if qualifiedName: print(" qualifiedName: " + qualifiedName) displayName = connectorType.get('displayName') if displayName: print(" displayName: " + displayName) description = connectorType.get('description') if description: print(" description: " + description) connectorProviderClassName = connectorType.get('connectorProviderClassName') if connectorProviderClassName: print(" connectorProviderClassName: " + connectorProviderClassName) recognizedConfigurationProperties = connectorType.get('recognizedConfigurationProperties') if recognizedConfigurationProperties: print("") print(" recognizedConfigurationProperties: ") for x in range(len(recognizedConfigurationProperties)): print(" - " + recognizedConfigurationProperties[x]) else: printResponse(response) else: printResponse(response) else: printResponse(response) except Exception as error: print("Exception: %s" % error) print("Platform " + serverPlatformURL + " is returning an error") # - # + # # Perform basic checks to ensure the calling notebook has a good environment to work against. # These are the only calls to run functions defined above. # print("\nChecking OMAG Server Platform availability...") activatePlatform(corePlatformName, corePlatformURL, [cocoMDS2Name, cocoMDS3Name, cocoMDS5Name, cocoMDS6Name]) activatePlatform(dataLakePlatformName, dataLakePlatformURL, [cocoMDS1Name, cocoMDS4Name, cocoView1Name ]) activatePlatform(devPlatformName, devPlatformURL, [cocoMDSxName]) print ("Done.") print (" ") # -
open-metadata-resources/open-metadata-labs/common/environment-check.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: CellStar # language: python # name: cellstar # --- # + # #!/usr/bin/env python # coding: utf-8 # # Stomach Cancer Diagnosis by Graph Neural Networks # ## PyTorch Implementation for GCN + TopKPooling with LeNet5 Architecture # ### <NAME>. @UNSW, Dec 2019 # ### <NAME> @Monash # ## Main Program # In[ ]: # import os import time from matplotlib import pyplot as plt import numpy as np import scipy.io as sio from sklearn.metrics import confusion_matrix import matplotlib import argparse from sklearn.metrics import f1_score, accuracy_score, auc, roc_curve import torch import torch.nn.functional as F from torch.nn import Sequential as Seq, Linear as Lin, ReLU from torch_geometric.data import Data, DataLoader from torch_geometric.nn import GraphConv, GINConv, TopKPooling from torch_geometric.nn import global_mean_pool as gap, global_max_pool as gmp # from torch.utils.data import random_split # from torch_geometric.data import InMemoryDataset # import matplotlib.pyplot as plt # import pandas as pd def trim_axs(axs, N): """little helper to massage the axs list to have correct length...""" axs = axs.flat for ax in axs[N:]: ax.remove() return axs[:N] def plot_multi_label_confusion_matrix(_save_path, y_true, y_pred, labels, normalize=False, title=None, cmap=plt.cm.Blues): plt.close('all') plt.style.use("ggplot") plt.rcParams.update({'font.size': 8}) plt.rcParams.update({'font.family':'Arial'}) conf_mat_dict={} class_num = len(labels) plot_rows = int(class_num/4)+1 plot_cols = 4 if class_num>=4 else class_num for label_col in range(class_num): y_true_label = y_true[:, label_col] y_pred_label = y_pred[:, label_col] print(y_true_label) print(y_pred_label) conf_mat_dict[labels[label_col]] = confusion_matrix(y_pred=y_pred_label, y_true=y_true_label) fig, axes = plt.subplots(nrows=plot_rows, ncols=plot_cols, sharex=False, sharey=False,gridspec_kw = {'wspace':0.5, 'hspace':0.05},figsize=(10,10)) axes = trim_axs(axes, class_num) for ii in range(len(labels)): _label = labels[ii] _matrix = conf_mat_dict[_label] axes[ii].imshow(_matrix,interpolation='nearest', cmap=plt.cm.Blues) axes[ii].set(xticks=np.arange(_matrix.shape[1]), yticks=np.arange(_matrix.shape[0]), # ... and label them with the respective list entries xticklabels=["Neg","Pos"], yticklabels=["Neg","Pos"], title=_label, ylabel='True label', xlabel='Predicted label') fmt = 'd' thresh = _matrix.max() / 2. for i in range(_matrix.shape[0]): for j in range(_matrix.shape[1]): axes[ii].text(j, i, format(_matrix[i, j], fmt), ha="center", va="center", fontsize=8, color="white" if _matrix[i, j] > thresh else "black") plt.savefig(_save_path, dpi=100,pad_inches = 0.1,bbox_inches = 'tight') # In[ ]: def calculate_metrics(gts, ops, preds, class_num, labels, outputs, mode): if mode: gts = np.vstack([gts, labels.cpu()]) if gts.size else labels.cpu() y_pred = outputs.unsqueeze(1) y_pred = torch.cat([1.0 - y_pred, y_pred], dim=1) y_pred = torch.max(y_pred, dim=1)[1] # print("Predict is %s"%y_pred) preds = np.vstack([preds, y_pred.cpu()]) if preds.size else y_pred.cpu() else: _labels = labels.cpu() tmp = torch.zeros(len(_labels), class_num) for idx, ele in enumerate(_labels): tmp[idx][ele] = 1 gts = np.vstack([gts, tmp]) if gts.size else tmp view = outputs.view(-1, class_num) y_pred = (view == view.max(dim=1, keepdim=True)[0]).view_as(outputs).type(torch.ByteTensor) # y_pred = torch.max(outputs, 1)[1].view(labels.size()) # y_pred = np.argmax(y_pred.cpu()) # print(y_pred) preds = np.vstack([preds, y_pred.cpu()]) if preds.size else y_pred.cpu() acc_list = [] auc_list = [] f1 = f1_score(gts, preds, average="micro") for j in range(0, class_num): gts_i = gts[:,j] preds_i = preds[:,j] ops_i = ops[:,j] fpr, tpr, thresholds = roc_curve(gts_i, ops_i) acc_score = accuracy_score(gts_i, preds_i) auc_score = auc(fpr, tpr) acc_list.append(acc_score) auc_list.append(auc_score) print("class_num: %d, acc_score: %f, auc_score: %f"%(j, acc_score, auc_score)) return acc_list, auc_list, f1, gts, ops, preds def plot_confusion_matrix(_model, y_true, y_pred, classes, normalize=False, title=None, cmap=plt.cm.Blues): plot_multi_label_confusion_matrix('/home/yuguang/cellstar/figures/%s_Confusion_matrix.png' % _model, y_true, y_pred, classes) def plot_roc_curve(pred_y, test_y, class_label, n_classes, fig_name="roc_auc.png"): #pred_y = pred_y/pred_y.max(axis=0) colors = ["#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#000000", "#66CC99", "#999999"] plt.close('all') plt.style.use("ggplot") matplotlib.rcParams['font.family'] = "Arial" plt.figure(figsize=(8, 8), dpi=400) for i in range(n_classes): _tmp_pred = pred_y _tmp_label = test_y #print(_tmp_label[:, 0], _tmp_pred[:, 0]) _fpr, _tpr, _ = roc_curve(_tmp_label[:, i], _tmp_pred[:, i]) _auc = auc(_fpr, _tpr) plt.plot(_fpr, _tpr, color=colors[i], label=r'%s ROC (AUC = %0.3f)' % (class_label[i], _auc), lw=2, alpha=.9) plt.plot([0, 1], [0, 1], 'k--', lw=2) plt.xlim([0.0, 1.01]) plt.ylim([0.0, 1.01]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') #plt.title('ROC curve of') plt.legend(loc="lower right") plt.savefig(fig_name, dpi=400) plt.close('all') ##Define Model Class class GCNTopK(torch.nn.Module): def __init__(self, num_feature, num_class, nhid=256, pooling_ratio=0.75): super(GCNTopK, self).__init__() self.nhid = nhid self.pooling_ratio = pooling_ratio self.conv1 = GraphConv(int(num_feature), self.nhid) self.pool1 = TopKPooling(self.nhid, ratio = self.pooling_ratio) # edited by Ming with concern for further extension self.conv2 = GraphConv(self.nhid, self.nhid) self.pool2 = TopKPooling(self.nhid, ratio = self.pooling_ratio) self.conv3 = GraphConv(self.nhid, self.nhid) self.pool3 = TopKPooling(self.nhid, ratio = self.pooling_ratio) #add one more conv-pooling block, i.e., conv4 and pool4 self.conv4 = GraphConv(self.nhid, self.nhid) self.pool4 = TopKPooling(self.nhid, ratio = self.pooling_ratio) self.lin1 = torch.nn.Linear(self.nhid*2, self.nhid) # edited by Ming with concern for further extension self.lin2 = torch.nn.Linear(self.nhid, self.nhid//2) self.lin3 = torch.nn.Linear(self.nhid//2, num_class) # edited by Ming with concern for further extension def forward(self, data): x, edge_index, edge_attr, batch = data.x, data.edge_index, data.edge_attr, data.batch x = F.relu(self.conv1(x, edge_index)) x, edge_index, edge_attr, batch, _, _ = self.pool1(x, edge_index, edge_attr, batch) x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv2(x, edge_index)) x, edge_index, edge_attr, batch, _, _ = self.pool2(x, edge_index, edge_attr, batch) x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv3(x, edge_index)) x, edge_index, edge_attr, batch, _, _ = self.pool3(x, edge_index, edge_attr, batch) x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) #add one more conv-pooling block, corresponding to conv4 and pool4 x = F.relu(self.conv4(x, edge_index)) x, edge_index, edge_attr, batch, _, _ = self.pool4(x, edge_index, edge_attr, batch) x4 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = x1 + x2 + x3 + x4 # x = x1 + x2 + x3 x = F.relu(self.lin1(x)) x = F.dropout(x, p=0.5, training=self.training) x = F.relu(self.lin2(x)) # print('shape of x before log_softmax: ',x.shape) y1 = F.log_softmax(self.lin3(x), dim=-1) # print('shape of x after log_softmax: ',x.shape) y2 = torch.sigmoid(self.lin3(x)) return y1, y2 ##GINTopK class GINTopK(torch.nn.Module): def __init__(self, num_feature, num_class, nhid): super(GINTopK, self).__init__() self.conv1 = GINConv(Seq(Lin(num_feature, nhid), ReLU(), Lin(nhid, nhid))) self.pool1 = TopKPooling(nhid, ratio=0.8) self.conv2 = GINConv(Seq(Lin(nhid, nhid), ReLU(), Lin(nhid, nhid))) self.pool2 = TopKPooling(nhid, ratio=0.8) self.conv3 = GINConv(Seq(Lin(nhid, nhid), ReLU(), Lin(nhid, nhid))) self.pool3 = TopKPooling(nhid, ratio=0.8) self.conv4 = GINConv(Seq(Lin(nhid, nhid), ReLU(), Lin(nhid, nhid))) self.pool4 = TopKPooling(nhid, ratio=0.8) self.lin1 = torch.nn.Linear(2*nhid, nhid) self.lin2 = torch.nn.Linear(nhid, nhid//2) self.lin3 = torch.nn.Linear(nhid//2, num_class) def forward(self, data): x, edge_index, batch = data.x, data.edge_index, data.batch x = F.relu(self.conv1(x, edge_index)) x, edge_index, _, batch, _, _ = self.pool1(x, edge_index, None, batch) x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv2(x, edge_index)) x, edge_index, _, batch, _, _ = self.pool2(x, edge_index, None, batch) x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv3(x, edge_index)) x, edge_index, _, batch, _, _ = self.pool3(x, edge_index, None, batch) x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = F.relu(self.conv4(x, edge_index)) x, edge_index, _, batch, _, _ = self.pool4(x, edge_index, None, batch) x4 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1) x = x1 + x2 + x3 + x4 x = F.relu(self.lin1(x)) x = F.dropout(x, p=0.5, training=self.training) x = F.relu(self.lin2(x)) y1 = F.log_softmax(self.lin3(x), dim=-1) y2 = torch.sigmoid(self.lin3(x)) return y1, y2 def train(model,train_loader,device): model.train() loss_all = 0 for data in train_loader: data = data.to(device) optimizer.zero_grad() output, _ = model(data) loss = F.nll_loss(output, data.y) loss.backward() loss_all += data.num_graphs * loss.item() optimizer.step() return loss_all / len(train_loader.dataset) def test(model,loader): model.eval() device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') correct = 0. loss = 0. # edited by Ming with concern for further extension pred_1 = list() out_1 = np.array([]) gt_l = np.array([]) pred_bi = np.array([]) label = np.array([]) for data in loader: data = data.to(device) out, out2 = model(data) # print('out, out2 in test: ',out,out2) pred = out.max(dim=1)[1] correct += pred.eq(data.y).sum().item() loss += F.nll_loss(out, data.y,reduction='sum').item() pred_1.append(pred.cpu().detach().numpy()) out_1 = np.vstack([out_1, out2.cpu().detach().numpy()]) if out_1.size else out2.cpu().detach().numpy() _tmp_label = data.y.cpu().detach().numpy() for _label in _tmp_label: if(_label == 0): _label_2d = np.array([1, 0]) elif(_label == 1): _label_2d = np.array([0, 1]) gt_l = np.vstack([gt_l, _label_2d]) if gt_l.size else _label_2d for _pred in pred: if(_pred == 0): _pred_bi = np.array([1, 0]) if(_pred == 1): _pred_bi = np.array([0, 1]) pred_bi = np.vstack([pred_bi,_pred_bi]) if pred_bi.size else _pred_bi label = np.hstack([label,_tmp_label]) if label.size else _tmp_label # pred_1 = np.array(pred_1).reshape(pred_1) return correct *1.0 / len(loader.dataset), loss / len(loader.dataset), pred_1, out_1, gt_l, label, pred_bi # + # import argparse #def hyperopt_train(batch_size=256, learning_rate=0.01, weight_decay=0.0005, nhid=256, pooling_ratio=0.75, epochs=200, runs=1): ## Parameter Setting #added by ming for future pooling extensions # parser = argparse.ArgumentParser() # parser.add_argument('--batch_size', type=int, default=256, # help='batch size') # parser.add_argument('--learning_rate', type=float, default=5e-4, # help='learning rate') # parser.add_argument('--weight_decay', type=float, default=1e-4, # help='weight decay') # parser.add_argument('--nhid', type=int, default=512, # help='hidden size') # parser.add_argument('--pooling_ratio', type=float, default=0.5, # help='pooling ratio') # parser.add_argument('--epochs', type=int, default=200, # help='maximum number of epochs') # # parser.add_argument('--early_stopping', type=int, default=100, # # help='patience for earlystopping') # parser.add_argument('--num_layers', type=int, default=4, # help='number of layers') # parser.add_argument('--runs', type=int, default=1, # help='number of runs') # args = parser.parse_args() # batch_size = args.batch_size # learning_rate = args.learning_rate # weight_decay = args.weight_decay # nhid = args.nhid # pooling_ratio = args.pooling_ratio # epochs = args.epochs # # early_stopping = args.early_stopping # num_layers = args.num_layers # runs = args.runs batch_size = 256 learning_rate = 5e-4 weight_decay = 1e-4 nhid = 512 pooling_ratio = 0.5 epochs = 200 # early_stopping = args.early_stopping num_layers = 4 runs = 10 fold = 4 # early_stopping = epochs num_layers = 4 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print('Device: {}'.format(device)) # + import os def load_dataset(dataset_path): ## load and preprocess data for stomach cancer ld_edge_index = "" ld_edge_attr = "" ld_feature = "" ld_label = "" ld_pid = "" for _root, _dirs, _files in os.walk(dataset_path): for _file in _files: #print(_file) if("weighted_edge_index" in _file): ld_edge_index = os.path.join(_root, _file) elif("weighted_edge_attr" in _file): ld_edge_attr = os.path.join(_root, _file) elif("weighted_feature" in _file): ld_feature = os.path.join(_root, _file) elif("weighted_label" in _file): ld_label = os.path.join(_root, _file) elif("weighted_pid.mat" in _file): ld_pid = os.path.join(_root, _file) # print(ld_edge_index) # print(ld_edge_attr) # print(ld_feature) # print(ld_label) # print(ld_pid) edge_index = sio.loadmat(ld_edge_index) edge_index = edge_index['edge_index'][0] # load edge_attr edge_attr = sio.loadmat(ld_edge_attr) edge_attr = edge_attr['edge_attr'][0] # load feature feature = sio.loadmat(ld_feature) feature = feature['feature'] #print(feature) # load label label = sio.loadmat(ld_label) label = label['label'][0] # load label_pid pid = sio.loadmat(ld_pid) pid = pid['pid'][0] stomach = list() num_edge = 0 #num_feature = 0 num_node = 0 num_class = 2 num_graph = edge_index.shape[0] for i in range(num_graph): # extract edge index, turn to tensor edge_index_1 = np.array(edge_index[i][:,0:2],dtype=np.int) edge_index_1 = torch.tensor(edge_index_1, dtype=torch.long).to(device) # number of edges num_edge = num_edge + edge_index_1.shape[0] # extract edge_attr, turn to tensor edge_attr_1 = np.array(edge_attr[i][:,0:1],dtype=np.int) edge_attr_1 = torch.tensor(edge_attr_1, dtype=torch.float).to(device) # extract feature, turn to tensor feature_1 = torch.tensor(feature[i], dtype=torch.float).to(device) #print(feature_1.shape) # number of nodes num_node = num_node + feature_1.shape[0] # number of features if i==0: num_feature = feature_1.shape[1] # extract label, turn to tensor label_1 = torch.tensor([label[i]-1],dtype=torch.long).to(device) # extract patient id, turn to tensor pid_1 = torch.tensor([pid[i]],dtype=torch.long).to(device) # put edge, feature, label together to form graph information in "Data" format data_1 = Data(x=feature_1, edge_index=edge_index_1.t().contiguous(), edge_attr=edge_attr_1, y=label_1, pid=pid_1) stomach.append(data_1) return(stomach, num_feature, num_edge, num_node) train_data_list, num_feature, num_edge, num_node = load_dataset("data/selected_new_data_file/train_data_fold_{}/".format(fold)) val_data_list, _, _, _ = load_dataset("data/selected_new_data_file/val_data_fold_{}/".format(fold)) test_data_list, _, _, _ = load_dataset("data/selected_new_data_file/test_data/") test_data_sur_list, _, _, _ = load_dataset("data/selected_new_data_file/test_data_surv/") train_val_list = train_data_list + val_data_list # generate training, validation and test data sets nv = np.random.permutation(len(train_val_list)) stomach_1 = train_val_list stomach = list() for i in nv: stomach.append(stomach_1[nv[i]]) num_train_val = len(stomach) num_train = int(num_train_val * 0.8) #num_val = num_train_val - num_train train_loader = DataLoader(stomach[0:num_train], batch_size=batch_size, shuffle = True) val_loader = DataLoader(stomach[num_train:-1], batch_size=batch_size, shuffle = True) test_loader = DataLoader(test_data_list, batch_size=1, shuffle = False) test_surv_loader = DataLoader(test_data_sur_list, batch_size=1, shuffle = False) # + # import EarlyStopping from pytorchtools import EarlyStopping sv_dat = 'gintopk/test_data.pt' torch.save(test_data_list, sv_dat) num_class = 2 print('**Data Set') #print('Data name: {}, Data type: {}, #Graph: {}'.format('Stomach',data_type,num_graph)) print('Ave.#Edge: {:.1f}, Ave.#Feature: {:.1f}, Ave.#Node: {:.1f}, #Classes: {:d}'.format(num_edge,num_feature,num_node,num_class)) print('Train-val-test ratio: 7:1:2, Shuffle: True') print('- number of training data:',len(train_loader)) print('- number of validation data:',len(val_loader)) print('- number of test data:',len(test_loader)) print('**Network Parameter Setting') print('- batch size: ',batch_size) print('- learning rate: ',learning_rate) print('- weight decay: ',weight_decay) print('- hidden size: ',nhid) print('- pooling_ratio: ',pooling_ratio) print('- maximum number of epochs: ',epochs) # print('- patience for earlystopping: ',early_stopping) print('- graph convolution: ','GCNConv') print('- number of graph convoluational layers: {}x{}'.format(1,num_layers)) print('- graph pooling: ','TopKPooling') print('- number of pooling layers: ',num_layers) print('- number of fully connected layers: ',num_layers) ############################################################### train_loss = np.zeros((runs,epochs),dtype=np.float) val_acc = np.zeros((runs,epochs)) val_loss = np.zeros((runs,epochs)) test_acc_c = np.zeros(runs) test_loss_c = np.zeros(runs) test_pred_c = np.zeros(runs) test_out_c = np.zeros((runs,num_class)) groud_truth_c = np.zeros((runs,num_class)) test_acc_p = np.zeros(runs) min_loss = 1e10*np.ones(runs) # num_test_p = num_test # pid_test_p = np.zeros((runs,num_test_p)) for run in range(runs): # print('\n*** Training ***') # print('** Run {} of total {} runs ...'.format(run+1,runs)) # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = GINTopK(num_feature=num_feature, num_class=num_class, nhid=nhid).to(device) optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay = weight_decay) ## Training # initialize the early_stopping object patience = 20 early_stopping = EarlyStopping(patience=patience, verbose=True, path="gintopk/model_gintopk_fold{}_run{}.pth".format(fold, run)) # val_acc_c = np.zeros((runs,epochs)) # val_loss_c = np.zeros((runs,epochs)) # test_acc_c = np.zeros(runs) # test_acc_p = np.zeros(runs) for epoch in range(epochs): model.train() loss_all = 0 for i, data in enumerate(train_loader): data = data.to(device) # print('data in train: ',data) out, out2 = model(data) loss = F.nll_loss(out, data.y) # print(out, data.y) #writer.add_scalar('train/loss', loss, len(train_loader)*epoch+i) #print("Training loss: {:.5f}".format(loss.item())) loss.backward() loss_all += data.num_graphs * loss.item() optimizer.step() optimizer.zero_grad() loss = loss_all / len(train_loader.dataset) train_loss[run,epoch] = loss val_acc[run,epoch], val_loss[run, epoch], _, _, _, _, _ = test(model, val_loader) print("Run: {:03d}, Epoch: {:03d}, Val loss: {:.5f}, Val acc: {:.5f}".format(run+1,epoch+1,val_loss[run,epoch],val_acc[run,epoch])) # early_stopping needs the validation loss to check if it has decresed, # and if it has, it will make a checkpoint of the current model early_stopping(val_loss[run, epoch], model) if early_stopping.early_stop: print("Early stopping") break # if val_loss[run,epoch] < min_loss[run]: # torch.save(model.state_dict(), 'model_gintopk.pth') # save the model and reuse later in test # #print("Model saved at epoch: {:03d}".format(epoch)) # min_loss[run] = val_loss[run,epoch] # model = GCNTopK(num_feature=num_feature, num_class=num_class, nhid=nhid, pooling_ratio=pooling_ratio).to(device) model = GINTopK(num_feature=num_feature, num_class=num_class, nhid=nhid).to(device) model.load_state_dict(torch.load("gintopk/model_gintopk_fold{}_run{}.pth".format(fold, run))) test_acc_c[run], test_loss_c[run], test_pred_c, test_out_c, ground_truth_c, test_label_c, test_pred_bi_c = test(model,test_loader) print("** Run: {:03d}, test loss: {:.5f}, test acc: {:.5f}".format(run+1,test_loss_c[run],test_acc_c[run])) pid_list = list() test_data = list([None] * len(test_loader)) for i, data in enumerate(test_loader): pid_temp = data.pid.cpu().numpy() gt = data.y.cpu().numpy() test_data[i] = [pid_temp,gt,test_pred_c[i]] if not pid_temp in pid_list: pid_list.append(pid_temp) num_test_p = len(pid_list) test_pred_1 = np.zeros([num_class,num_test_p],dtype=np.int) pred_p = np.zeros(num_test_p,dtype=np.int) test_label_p = np.zeros(num_test_p,dtype=np.int) pid_test = np.array(pid_list) for j in range(num_test_p): pid_1 = pid_list[j] k = 0 for i, data in enumerate(test_loader): if data.pid.cpu().numpy()==pid_1: if k==0: test_label_p[j] = data.y.cpu().numpy() k = 1 test_pred_i = int(test_pred_c[i]) test_pred_1[test_pred_i,j] = test_pred_1[test_pred_i,j] + 1 pred_p[j] = np.argmax(test_pred_1[:,j]) # print('j: {}, pred_p[j]: {}, test_pred_p[j]: {}'.format(j,pred_p[j],test_label_p[j])) test_acc_p[run] = (pred_p==test_label_p).sum()*1.0/num_test_p print("Test accuarcy at patient level: {:.2f}".format(test_acc_p[run]*100)) ## save data t1 = time.time() print("** Model {}, mean test acc (cell): {:.5f}".format(t1,np.mean(test_acc_c))) sv = 'gintopk/scdiag_gintopk' + '_fold' + str(fold) + '_runs' + str(runs) + '_run' + str(run) + '_epochs' + str(epochs)+'.mat' sio.savemat(sv,mdict={'val_loss':val_loss,'val_acc':val_acc,'test_loss_c':test_loss_c,'test_acc_c':test_acc_c,'train_loss':train_loss,'test_pred_c':test_pred_c,'test_out_c':test_out_c,'ground_truth_c':ground_truth_c,'test_label_c':test_label_c,'test_pred_bi_c':test_pred_bi_c,'test_acc_p':test_acc_p,'test_pred_p':pred_p,'pid_test':pid_test,'test_data':test_data}) # - for run in range(runs): # visualize the loss as the network trained fig = plt.figure(figsize=(10,8)) t_loss = train_loss[run][np.where(train_loss[run] > 0)] v_loss = val_loss[run][np.where(val_loss[run] > 0)] plt.plot(range(1,len(t_loss)+1),t_loss, label='Training Loss') plt.plot(range(1,len(v_loss)+1),v_loss,label='Validation Loss') # find position of lowest validation loss #print(np.where(v_loss == np.min(v_loss))[0][0]) minposs = np.where(v_loss == np.min(v_loss))[0][0] + 1 plt.axvline(minposs, linestyle='--', color='r',label='Early Stopping Checkpoint') plt.xlabel('epochs') plt.ylabel('loss') plt.ylim(0, 1) # consistent scale plt.xlim(0, len(v_loss)+1) # consistent scale plt.grid(True) plt.legend() plt.tight_layout() plt.show() fig.savefig('gintopk/loss_plot_fold{}_run{}.png'.format(fold, run), bbox_inches='tight',dpi=400) # + fold=0 run=0 num_class = 2 runs = 10 epochs = 200 train_loss = np.zeros((runs,epochs),dtype=np.float) val_acc = np.zeros((runs,epochs)) val_loss = np.zeros((runs,epochs)) test_acc_c = np.zeros(runs) test_loss_c = np.zeros(runs) test_pred_c = np.zeros(runs) test_out_c = np.zeros((runs,num_class)) groud_truth_c = np.zeros((runs,num_class)) test_acc_p = np.zeros(runs) model = GINTopK(num_feature=num_feature, num_class=num_class, nhid=nhid).to(device) model.load_state_dict(torch.load("gintopk/model_gintopk_fold{}_run{}.pth".format(fold, run))) test_acc_c[run], test_loss_c[run], test_pred_c, test_out_c, ground_truth_c, test_label_c, test_pred_bi_c = test(model,test_surv_loader) print("** Run: {:03d}, test loss: {:.5f}, test acc: {:.5f}".format(run+1,test_loss_c[run],test_acc_c[run])) pid_list = list() test_data = list([None] * len(test_surv_loader)) print(len(test_data)) for i, data in enumerate(test_surv_loader): pid_temp = data.pid.cpu().numpy() gt = data.y.cpu().numpy() test_data[i] = [pid_temp,gt,test_pred_c[i]] if not pid_temp in pid_list: pid_list.append(pid_temp) num_test_p = len(pid_list) test_pred_1 = np.zeros([num_class,num_test_p],dtype=np.int) pred_p = np.zeros(num_test_p,dtype=np.int) test_label_p = np.zeros(num_test_p,dtype=np.int) pid_test = np.array(pid_list) for j in range(num_test_p): pid_1 = pid_list[j] k = 0 for i, data in enumerate(test_surv_loader): if data.pid.cpu().numpy()==pid_1: if k==0: test_label_p[j] = data.y.cpu().numpy() k = 1 test_pred_i = int(test_pred_c[i]) test_pred_1[test_pred_i,j] = test_pred_1[test_pred_i,j] + 1 pred_p[j] = np.argmax(test_pred_1[:,j]) # print('j: {}, pred_p[j]: {}, test_pred_p[j]: {}'.format(j,pred_p[j],test_label_p[j])) test_acc_p[run] = (pred_p==test_label_p).sum()*1.0/num_test_p print("Test accuarcy at patient level: {:.2f}".format(test_acc_p[run]*100)) ## save data t1 = time.time() print("** Model {}, mean test acc (cell): {:.5f}".format(t1,np.mean(test_acc_c))) sv = 'gintopk/survival_scdiag_gintopk' + '_fold' + str(fold) + '_runs' + str(runs) + '_run' + str(run) + '_epochs' + str(epochs)+'.mat' sio.savemat(sv,mdict={'val_loss':val_loss,'val_acc':val_acc,'test_loss_c':test_loss_c,'test_acc_c':test_acc_c,'train_loss':train_loss,'test_pred_c':test_pred_c,'test_out_c':test_out_c,'ground_truth_c':ground_truth_c,'test_label_c':test_label_c,'test_pred_bi_c':test_pred_bi_c,'test_acc_p':test_acc_p,'test_pred_p':pred_p,'pid_test':pid_test,'test_data':test_data}) # -
scdiag_gintopk_roc_tc-binary.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import networkx as nx import numpy as np import scipy.stats from tqdm.notebook import tqdm import collections import matplotlib.pyplot as plt import matplotlib.dates as md import pandas as pd import seaborn as sns import random import json import pickle import math import re import datetime import matplotlib.dates as mdates from scipy.special import comb # + #file1 = open('../datasets/CollegeMsg.txt', 'r') file2 = open('../datasets/email-Eu-core-temporal.txt','r') Lines = file2.readlines() Lines2 = file2.readlines() #G.clear() G = nx.MultiDiGraph() count = 0 for line in tqdm(Lines): count += 1 myLine = line.split(',') if myLine[0] not in G: G.add_node(myLine[0],falsePos=1) if myLine[1] not in G: G.add_node(myLine[1],falsePos=1) G.add_edge(myLine[0],myLine[1], weight=myLine[2]) print("There were",count,"lines in the file!")##in our datasets lines correspond to sent messages in the communication system #count = 0 #I = nx.MultiDiGraph() #for line in tqdm(Lines2): # count += 1 # myLine = line.split(',') # if myLine[0] not in I: # I.add_node(myLine[0],falsePos=1) # if myLine[1] not in G: # I.add_node(myLine[1],falsePos=1) # I.add_edge(myLine[0],myLine[1], weight=myLine[2]) #print("There were",count,"lines in the 2nd file!") # - print("Number of nodes : ", G.order()) ##number of communicating parties print("Number of edges: ", G.size()) ##number of sent messages in the system print("Density of the transaction graph: ",nx.classes.function.density(G)) # + noMsgsPerNodes = [] for u in G.nodes: for v in G.nodes: if u!=v and G.number_of_edges(u,v)!=0: noMsgsPerNodes.append(G.number_of_edges(u,v)) noMsgsPerNodes2 = [] for u in I.nodes: for v in I.nodes: if u!=v and I.number_of_edges(u,v)!=0: noMsgsPerNodes2.append(I.number_of_edges(u,v)) # - plt.hist(noMsgsPerNodes2,bins=len(set(noMsgsPerNodes2)),color = "blue", ec="blue", rwidth = 1, alpha=0.5, label='EU Mail') plt.hist(noMsgsPerNodes, bins=len(set(noMsgsPerNodes)), color = "red", ec="red",rwidth = 1, alpha=0.7, label='College IM') plt.xscale('log') plt.yscale('log') plt.xlabel('Count', fontweight='bold') plt.ylabel('Messages') #plt.title('Number of messages between pairs of nodes') plt.tight_layout() plt.legend(loc='upper right') plt.savefig("messagesBetweenPairs.pdf") plt.show() # + inDegrees = [] inDegrees2 = [] for k,v in G.in_degree(): inDegrees.append(v) for k,v in I.in_degree(): inDegrees2.append(v) degree_sequenceA = sorted(inDegrees,reverse=True) # degree sequence degree_sequence2A = sorted(inDegrees2,reverse=True) degreeCount = collections.Counter(degree_sequence) d = {} for k,v in degreeCount.items(): d[k]=v with open('degreeInDistCollegeMsgs.txt', 'w') as fp: json.dump(d, fp) deg, cnt = zip(*degreeCount.items()) plt.plot(degree_sequenceA,'b+',marker='x',label="EU Mail") plt.plot(degree_sequence2A,'r+',marker='+',label="College IM") #plt.bar(deg, cnt, color="b") #plt.title("InDegree Distributions") plt.ylabel("Count") plt.xlabel("Degree") plt.yscale('log') plt.xscale('log') plt.legend() plt.tight_layout() plt.savefig("inDegreeHistograms.pdf") plt.show() # + outDegrees = [] outDegrees2 = [] for k,v in G.out_degree(): outDegrees.append(v) for k,v in I.out_degree(): outDegrees2.append(v) degree_sequence = sorted(outDegrees,reverse=True) # degree sequence degree_sequence2 = sorted(outDegrees2,reverse=True) degreeCount = collections.Counter(degree_sequence) d = {} for k,v in degreeCount.items(): d[k]=v with open('degreeOutDistEUMail.txt', 'w') as fp: json.dump(d, fp) deg, cnt = zip(*degreeCount.items()) #plt.bar(deg, cnt, color="b") plt.plot(degree_sequence,'b+',marker='x',label="EU Mail") plt.plot(degree_sequence2,'r+',marker='+',label="College IM") #plt.title("OutDegree Distributions") plt.ylabel("Count") plt.xlabel("Degree") plt.yscale('log') plt.xscale('log') plt.legend() plt.tight_layout() plt.savefig("outDegreeHistograms.pdf") plt.show() # - ## Recipient unlinkability adversarial advantage noOfUsers=11 ##2^20 minFalsePositiveRate = 9 allProbs = np.zeros((minFalsePositiveRate,noOfUsers-1)) #advantage of the sender unlinkability adversary probability = np.zeros((noOfUsers-1)) for p in range(1,minFalsePositiveRate): falsePosRate = math.pow(2,-p) for l in tqdm(range(0,noOfUsers-1)): U = 2**l ##number of all users in the system sumprob = np.float64(0.0) for i in range(0,U+1): for j in range(0,U+1): #usersInAnonSet = math.floor(2**(-p+k)) #print(p,k,allUsers,usersInAnonSet) prob = np.float64(1.0) probij = np.float64(1.0) probij = comb(U, i, exact=True)*comb(U, j, exact=True)*math.pow(falsePosRate,i+j)*math.pow(1-falsePosRate,2*U-i-j) for k in range(1,j+1): if U-i-k<0: prob = 0 break prob *= (U-i-k) prob /= U sumprob += prob * probij #print(U,falsePosRate,i,j,prob,probij) probability[l]=sumprob*0.5 #print("Users: ",U," False Positive Rate: ",falsePosRate," Prob: ",sumprob*0.5) allProbs[p]=probability allProbs = allProbs[1:] print(allProbs) x_axis_labels = ["$2^1$","$2^2$","$2^3$","$2^4$","$2^5$","$2^6$","$2^7$","$2^8$","$2^9$","$2^{10}$"] ##"$2^{11}$","$2^{12}$","$2^{13}$","$2^{14}$","$2^{15}$","$2^{16}$","$2^{17}$","$2^{18}$","$2^{19}$"] ##x_axis_labels = [2**(i+1) for i in range(0,19)] #y_axis_labels = [math.pow(2,-(i)) for i in range(1,10)] y_axis_labels = ["$2^{-1}$","$2^{-2}$","$2^{-3}$","$2^{-4}$","$2^{-5}$","$2^{-6}$","$2^{-7}$","$2^{-8}$"]#,"$2^{-9}$"] ax = sns.heatmap(allProbs,xticklabels=x_axis_labels, yticklabels = y_axis_labels,cbar_kws={'label': 'Adversary\'s advantage in the RU game'}) plt.xticks(rotation = 45) plt.yticks(rotation = 0) #ax.set_title('Receiver Unlinkability Adversarial Advantage') plt.xlabel("Number of all users", fontweight='bold') plt.ylabel("Recipients' false positive rate", fontweight='bold') plt.tight_layout() plt.savefig("receiverUnlinkability.pdf") plt.show() ## Relationship Anonymity noOfSentMessages=[100,250,500,1000,2500,5000,10000] ##out(s) in the paper incomingMsgsFromS = np.arange(1,1000) ##tag_s(v) in the paper #falsePositives = [i*0.1 for i in range(1,10)] falsePositives = [2**(-i) for i in range(9,0,-1)] detectableMsgs = np.zeros((len(noOfSentMessages),len(falsePositives))) for p in falsePositives: for allSent in noOfSentMessages: for incomingFromS in incomingMsgsFromS: if allSent < incomingFromS: continue mu0 = allSent*p ##the expected number of links in the "cover" graph X = (allSent-incomingFromS)*p + incomingFromS s = math.sqrt(p*(1-p)*allSent) z = (X-mu0)/(float)(s) ##Z-score p_values = scipy.stats.norm.sf(abs(z))*2 #twosided if p_values < 0.01: detectableMsgs[noOfSentMessages.index(allSent)][falsePositives.index(p)] = int(incomingFromS) #print("DETECTABLE",p,allSent,incomingFromS) break print(detectableMsgs) y_axis_labels = ["$10000$","$5000$","$2500$","$1000$","$500$","$250$","$100$"] y_axis_labels.reverse() x_axis_labels = ["$2^{-9}$", "$2^{-8}$", "$2^{-7}$", "$2^{-6}$", "$2^{-5}$", "$2^{-4}$", "$2^{-3}$", "$2^{-2}$", "$2^{-1}$"] #y_axis_labels = [math.pow(2,-(i)) for i in range(1,10)] #x_axis_labels = ["$0.1$", "$0.2$", "$0.3$", "$0.4$", "$0.5$", "$0.6$", "$0.7$", "$0.8$", "$0.9$"] #Create the style of the font font = {'family' : 'serif', 'weight' : 'bold', 'size' : 10} plt.rc('font', **font) #set the font style created sns.set(font_scale=1.1) ax = sns.heatmap(detectableMsgs, fmt='g',annot=True, mask=detectableMsgs < 1,xticklabels=x_axis_labels, yticklabels = y_axis_labels, cbar_kws={'label': 'Messages from sender to recipient'}) plt.xticks(rotation = 45) plt.yticks(rotation = 0) plt.rcParams["axes.labelsize"] = 12 #ax.set_title('No. of messages from a sender to a recipient \n breaking relationship anonymity for $100 <= out(s)$') plt.xlabel("Recipient's false-positive rate", fontweight='bold') plt.ylabel("Sender's sent messages", fontweight='bold') plt.tight_layout() plt.savefig("relationshipAnonymity.pdf") plt.show() ## Relationship Anonymity vol. 2. noOfSentMessages=[10,15,20,25,30] ##out(s) in the paper incomingMsgsFromS = np.arange(1,30) ##tag_s(v) in the paper falsePositives = [2**(-i) for i in range(9,0,-1)] #falsePositives = [i*0.1 for i in range(1,10)] detectableMsgs = np.zeros((len(noOfSentMessages),len(falsePositives))) #advantage of the sender unlinkability adversary for p in falsePositives: for allSent in noOfSentMessages: for incomingFromS in incomingMsgsFromS: if allSent < incomingFromS: continue mu0 = allSent*p X = (allSent-incomingFromS)*p + incomingFromS s = math.sqrt(p*(1-p)*allSent) t = (X-mu0)/(float)(s) ##t-score p_values = scipy.stats.t.sf(np.abs(t), allSent-1)*2 #two-sided if p_values < 0.01: detectableMsgs[noOfSentMessages.index(allSent)][falsePositives.index(p)] = int(incomingFromS) #print(p,allSent,incomingFromS) if incomingFromS!=1: #print("DETECTABLE",p,allSent,incomingFromS) break print(detectableMsgs) y_axis_labels = ["$30$","$25$","$20$","$15$","$10$"] y_axis_labels.reverse() ##x_axis_labels = [2**(i+1) for i in range(0,19)] #y_axis_labels = [math.pow(2,-(i)) for i in range(1,10)] x_axis_labels = ["$2^{-9}$", "$2^{-8}$", "$2^{-7}$", "$2^{-6}$", "$2^{-5}$", "$2^{-4}$", "$2^{-3}$", "$2^{-2}$", "$2^{-1}$"] #x_axis_labels = ["$0.1$", "$0.2$", "$0.3$", "$0.4$", "$0.5$", "$0.6$", "$0.7$", "$0.8$","$0.9$"] font = {'family' : 'serif', 'weight' : 'bold', 'size' : 10} plt.rc('font', **font) #set the font style created sns.set(font_scale=1.1) ax = sns.heatmap(detectableMsgs,annot=True, mask=detectableMsgs < 1,xticklabels=x_axis_labels, yticklabels = y_axis_labels, cbar_kws={'label': 'Messages from sender to recipient'}) plt.xticks(rotation = 45) plt.yticks(rotation = 0) #ax.set_title('No. of messages from a sender to a recipient \n breaking relationship anonymity for $out(s)<=30$') plt.xlabel("Recipient's false-positive rate", fontweight='bold') plt.ylabel("Sender's sent messages", fontweight='bold') plt.tight_layout() plt.savefig("relationshipAnonymity2.pdf") plt.show() ## Setting randomly false positive rates to FMD users! falsePositiveRates = [pow(2,-k) for k in range(1,8)] for u in tqdm(G.nodes): randIndex = random.randint(0,len(falsePositiveRates)-1) G.nodes[u]['falsePos']=falsePositiveRates[randIndex] #coverG.clear() coverG = G.copy() ##this graph will contain all the "cover" edges thanks to FMD according to recipients' false positive rates print(G.nodes['5']) for msg in tqdm(G.edges): for recipient in G.nodes()-msg[1]: ##for the original recipient we already have an edge in the cover graph recFalsePos = G.nodes[recipient]['falsePos'] #randNum = random.uniform(0, 1) randNum = np.random.uniform(0,1) if randNum <= recFalsePos: coverG.add_edge(msg[0],recipient,weight=1996) print("Number of nodes : ", coverG.order()) print("Number of edges: ", coverG.size()) print("Density of the transaction graph: ",nx.classes.function.density(coverG)) ##Let's uncover the social graph using statistical tests (Z- and t-tests)! ##Relationship anonymity analysis ##Hereby we assume the server knows the number of sent messages per users. ##This is the case when FMD is deployed without an additional layer of anonymous communication system (e.g. Tor) relevants = 0 truePositives = 0 trueNegatives = 0 falseNegatives = 0 falsePositives = 0 w, h = 7, 13; tPDetailed = [[0 for x in range(h)] for x in range(w)] tNDetailed = [[0 for x in range(h)] for y in range(w)] fNDetailed = [[0 for x in range(h)] for y in range(w)] fPDetailed = [[0 for x in range(h)] for y in range(w)] for u in tqdm(G.nodes): allLinks = coverG.out_degree(u) neighbors = list(coverG.neighbors(u)) ##this is the neighbors the server sees for v in neighbors: flags = coverG.number_of_edges(u,v) p = G.nodes[v]['falsePos'] mu0 = G.out_degree(u)*p ##in the paper we also call this ideal rate s = math.sqrt(G.out_degree(u)*p*(1-p)) z = (flags-mu0)/s relatedNodes = False if 30 < G.out_degree(u): p_values = scipy.stats.norm.sf(abs(z))*2 else: p_values = scipy.stats.t.sf(np.abs(z), G.out_degree(u)-1)*2 ##t-test x = -int(math.log2(p))-1 if 0 < G.number_of_edges(u,v): y = math.floor(math.log2(G.number_of_edges(u,v)))+1 #print(x,y,G.number_of_edges(u,v)) if p_values < 0.01: relevants+=1 if 0 < G.number_of_edges(u,v): relatedNodes = True truePositives+=1 #print(x,y,G.number_of_edges(u,v)) tPDetailed[x][y] += 1 else: falseNegatives+=1 fNDetailed[x][y] += 1 else: if 0==G.number_of_edges(u,v): trueNegatives+=1 tNDetailed[x][0] += 1 else: falsePositives+=1 fPDetailed[x][0] += 1 relatedNodes = True #print("Node",u," and node ",v," are connected: ",relatedNodes, mu0,flags,p_values,p,G.number_of_edges(u,v)) print("Precision:",truePositives/(truePositives+falsePositives)) print("Recall:",truePositives/(truePositives+falseNegatives)) print(truePositives, falsePositives, trueNegatives, falseNegatives) print("Number of irrelevants:",relevants) #TP = np.array(tPDetailed) #FP = np.array(fPDetailed) #FN = np.array(fNDetailed) TP = TP1+TP2+TP3 FP = FP1+FP2+FP3 FN = FN1+FN2+FN3 #print(tPDetailed) #print(fPDetailed) #print(fNDetailed) print(TP/(TP+FN)) TPFP = TP+FP TPFP = np.delete(TPFP, -1, axis=1) TPFP = np.delete(TPFP, -1, axis=1) TP = np.delete(TP, -1, axis=1) TP = np.delete(TP, -1, axis=1) print(TP) print(TPFP) print(TP/TPFP) ##Results for RA simulations Precision = np.array([[0, 1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 1, 1]]) TP0 = np.array([[ 0, 965, 564, 271, 104, 30, 8, 1], [ 0, 911, 508, 233, 117, 38, 11, 2], [ 0, 810, 475, 238, 86, 25, 7, 5], [ 0, 676, 453, 260, 126, 45, 12, 9], [ 0, 631, 391, 214, 96, 37, 6, 1], [ 0, 731, 617, 300, 130, 56, 8, 1], [ 0, 1110, 832, 416, 153, 42, 13, 3]]) TPFP0 = np.array([[ 706, 965, 564, 271, 104, 30, 8, 1], [1125, 911, 508, 233, 117, 38, 11, 2], [1376, 810, 475, 238, 86, 25, 7, 5], [1716, 676, 453, 260, 126, 45, 12, 9], [1169, 631, 391, 214, 96, 37, 6, 1], [ 794, 731, 617, 300, 130, 56, 8, 1], [ 632, 1110, 832, 416, 153, 42, 13, 3]]) TPFN0 = np.array([[68165, 24383, 7836, 2259, 538, 8, 1], [51837, 18141, 6288, 1863, 444, 11, 2], [47262, 16374, 5911, 1651, 219, 7, 5], [55712, 20369, 6956, 1746, 345, 12, 9], [56478, 20648, 6978, 1637, 345, 6, 1], [66252, 24655, 8275, 1982, 411, 8, 1], [99878, 36749, 12126, 2749, 572, 13, 3]]) TP2 = np.array([[0, 17, 18, 46, 44, 31, 13, 3, 0, 0], [0, 31, 30, 62, 59, 36, 7, 4, 0, 0], [0, 16, 45, 70, 66, 33, 11, 3, 0, 0], [0, 30, 73, 134, 94, 40, 7, 4, 0, 0], [0, 60, 205, 207, 122, 31, 17, 3, 0, 0], [0, 109, 297, 228, 148, 45, 13, 3, 0, 0], [0, 241, 485, 325, 128, 40, 9, 2, 0, 0]]) FP2 = np.array([[2986, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2668, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2515, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2364, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2341, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1983, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1694, 0, 0, 0, 0, 0, 0, 0, 0, 0]]) FN2 = np.array([[0, 1051, 400, 118, 35, 10, 0, 0, 0, 0], [0, 1170, 441, 113, 43, 12, 0, 0, 0, 0], [0, 1031, 400, 133, 35, 7, 0, 0, 0, 0], [0, 1515, 591, 203, 37, 14, 0, 0, 0, 0], [0, 2307, 833, 297, 59, 16, 0, 0, 0, 0], [0, 2592, 1056, 303, 88, 12, 0, 0, 0, 0], [0, 4068, 1433, 486, 89, 27, 0, 0, 0, 0]]) TP1 = np.array([[0, 21, 16, 41, 29, 27, 13, 2, 0, 0], [0, 19, 37, 63, 45, 38, 11, 4, 0, 0], [0, 22, 32, 57, 48, 21, 16, 4, 0, 0], [0, 38, 108, 144, 105, 43, 14, 4, 0, 0], [0, 62, 171, 235, 148, 63, 8, 1, 0, 0], [0, 113, 296, 236, 113, 45, 7, 3, 0, 0], [0, 226, 486, 358, 170, 34, 8, 4, 0, 0]]) FP1 = np.array([[2815, 0, 0, 0, 0, 0, 0, 0, 0, 0], [3090, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1947, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2651, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2274, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2008, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1702, 0, 0, 0, 0, 0, 0, 0, 0, 0]]) FN1 = np.array([[0, 1176, 438, 139, 53, 10, 0, 0, 0, 0], [0, 973, 379, 110, 30, 8, 0, 0, 0, 0], [0, 1008, 377, 102, 27, 13, 0, 0, 0, 0], [0, 1614, 592, 207, 43, 12, 0, 0, 0, 0], [0, 2273, 820, 265, 59, 14, 0, 0, 0, 0], [0, 2970, 1112, 350, 94, 18, 0, 0, 0, 0], [0, 4091, 1527, 483, 110, 31, 0, 0, 0, 0]]) TP3 = np.array([[0, 13, 23, 31, 29, 19, 8, 4, 0, 0], [0, 15, 22, 42, 48, 41, 11, 2, 0, 0], [0, 23, 55, 95, 96, 55, 16, 5, 0, 0], [0, 35, 90, 126, 94, 33, 7, 4, 0, 0], [0, 50, 157, 192, 109, 31, 15, 4, 0, 0], [0, 151, 360, 283, 156, 36, 7, 3, 0, 0], [0, 226, 425, 315, 120, 47, 13, 0, 0, 0]]) FP3 = np.array([[2282, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2576, 0, 0, 0, 0, 0, 0, 0, 0, 0], [3201, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2397, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2199, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2360, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1539, 0, 0, 0, 0, 0, 0, 0, 0, 0]]) FN3 = np.array([[0, 973, 354, 109, 42, 8, 0, 0, 0, 0], [0, 975, 329, 110, 27, 8, 0, 0, 0, 0], [0, 1163, 429, 116, 30, 7, 0, 0, 0, 0], [0, 1531, 607, 194, 30, 6, 0, 0, 0, 0], [0, 2202, 851, 262, 71, 12, 0, 0, 0, 0], [0, 2969, 1180, 331, 109, 17, 0, 0, 0, 0], [0, 4207, 1563, 501, 113, 41, 0, 0, 0, 0]]) ## EUmailResults #[[0. 0. 0. 0. 0. 0. 0. ] # [0.02405858 0.02466793 0.02371542 0.0212766 0.03448276 0.04429783 0.05956376] # [0.03757225 0.04901961 0.11167513 0.06738544 0.16533333 0.29147982 0.24874791] # [0.24603175 0.33823529 0.4125 0.53157895 0.55454545 0.72563177 0.62340967] # [0.85245902 0.86666667 0.88636364 0.88050314 0.86752137 0.92361111 0.84868421] # [0.96103896 0.98924731 0.98473282 0.98895028 0.98672566 0.99056604 0.97461929] # [0.96774194 0.99186992 0.9858156 0.98170732 0.98823529 0.96226415 0.95348837]] #FN = np.delete(FN, -1, axis=1) #FN = np.delete(FN, -1, axis=1) #FN = np.delete(FN, 0, axis=1) recall = TP/(TP+FN) recall = np.delete(recall,-1,axis=1) recall = np.delete(recall,-1,axis=1) recall = np.delete(recall,-1,axis=1) recall = np.transpose(recall) recall[0] = np.array([0,0,0,0,0,0,0]) print(recall) # + #recall = TP/(TP+FN) recall = Precision recall = np.delete(recall,-1,axis=1) #recall = np.delete(recall,-1,axis=1) #recall = np.delete(recall,-1,axis=1) recall = np.transpose(recall) recall = np.round(recall,3) print(recall) recall[0] = np.zeros(7) size = 7 # Limits for the extent x_start = 0.0 x_end = 7 y_start = 0.0 y_end = 7 fig = plt.figure(figsize=(6, 6)) ax = fig.add_subplot(111) #ax.set_title('Recall') ax.set_aspect('equal') # Add the text jump_x = (x_end - x_start) / (2.0 * size)-0.5 jump_y = (y_end - y_start) / (2.0 * size)-0.5 x_positions = np.linspace(start=x_start, stop=x_end, num=size, endpoint=False) y_positions = np.linspace(start=y_start, stop=y_end, num=size, endpoint=False) for y_index, y in enumerate(y_positions): for x_index, x in enumerate(x_positions): label = recall[y_index, x_index] text_x = x + jump_x text_y = y + jump_y #ax.text(text_x, text_y, label, color='black', ha='center', va='center') y_axis_labels = ["$[0,2^{0}]$","$[2^{1},2^{2}]$", "$[2^{2},2^{3}]$","$[2^{3},2^{4}]$","$[2^{4},2^{5}]$","$[2^{5},2^{6}]$","$2^{6}$<"] x_axis_labels = ["$2^{-1}$", "$2^{-2}$", "$2^{-3}$", "$2^{-4}$", "$2^{-5}$", "$2^{-6}$", "$2^{-7}$"] font = {'family' : 'normal', 'weight' : 'bold', 'size' : 10} plt.rc('font', **font) cmap = plt.cm.gist_gray ax = sns.heatmap(recall) ax.set_xticklabels(x_axis_labels) ax.set_yticklabels(y_axis_labels) plt.yticks(rotation = 0) plt.xlabel("False-positive rate of the recipient", fontweight='bold') plt.ylabel("#Messages between sender and recipient", fontweight='bold') #plt.title("College IM") plt.tight_layout() plt.savefig("precisionGranularEvalCollege.pdf") plt.show() # + ## College results ## Precision: 0.1692057942057942 ## Recall: 0.19295122819508723 ## Number of nodes : 1899 ## Number of messages : 59835 ## Density of the message graph: 0.016600995144136092 ## Number of "cover" edges: 16095322 ## Density of the cover message graph: 4.4655864020273555 ## Number of edges: 16354849 ## Density of the transaction graph: 4.537591189639492 ## College results with t-tests ## Precision: 0.18141505715411904 ## Recall: 0.1453382805715639 ## TP: 3682 FP: 16614 TN: 1394619 FN: 21652 ## Number of relevants: 25334 ## EU results ## Precision: 0.22809780356402817 ## Recall: 0.41361689336439467 ## Number of nodes : 986 ## Number of messages: 332334 ## Number of cover edges: 47708341 ## Density of the cover message graph: 49.12258008051812 ## TP:5504 FP:18626 TN:537578 FN:7803 ## Number of irrelevants: 369 ## EU results with t-tests ## Precision: 0.2294115287416262 ## Recall: 0.39184652278177456 ## TP:5719 FP:19210 TN:619865 FN:8876 ## Number of relevants: 14595 collegeMsgRAPrecisions = np.array([0.18141505715411904,0.19087504927079227,0.18200630666141113,0.19570358691367756]) collegeMsgRARecalls = np.array([0.1453382805715639,0.15141684580809067,0.15223573047599423,0.15904540722351246]) collegeMsgDeniabilityPrecisions = np.array([0.9627473806752037,0.9616766467065868,0.9629629629629629,0.9636576787807737, 0.9614485981308412,0.9638273045507585,0.9657210401891253,0.9626168224299065, 0.9628339140534262,0.9637002341920374]) collegeMsgDeniabilityRecalls = np.array([0.7525022747952684,0.7306642402183804,0.7333939945404914,0.7479526842584168, 0.7488626023657871,0.7515923566878981,0.7434030937215651,0.7497725204731575, 0.7543221110100091,0.7488626023657871]) euMsgRAPrecisions = np.array([0.2294115287416262]) euMsgRARecalls = np.array([0.39184652278177456]) euMsgDeniabilityPrecisions = np.array([0.9394736842105263,0.9432432432432433,0.9335180055401662,0.9388297872340425, 0.946524064171123,0.9436619718309859,0.9424657534246575,0.9394736842105263, 0.9392265193370166,0.945054945054945]) euMsgDeniabilityRecalls = np.array([0.5063829787234042,0.4950354609929078,0.47801418439716314,0.500709219858156, 0.502127659574468,0.475177304964539,0.4879432624113475,0.5063829787234042, 0.48226950354609927,0.4879432624113475]) # Calculate the average collegeMsgRAPrecisionsMean = np.mean(collegeMsgRAPrecisions) collegeMsgRARecallsMean = np.mean(collegeMsgRARecalls) collegeMsgDeniabilityPrecisionsMean = np.mean(collegeMsgDeniabilityPrecisions) collegeMsgDeniabilityRecallsMean = np.mean(collegeMsgDeniabilityRecalls) euMsgRAPrecisionsMean = np.mean(euMsgRAPrecisions) euMsgRARecallsMean = np.mean(euMsgRARecalls) euMsgDeniabilityPrecisionsMean = np.mean(euMsgDeniabilityPrecisions) euMsgDeniabilityRecallsMean = np.mean(euMsgDeniabilityRecalls) # Calculate the standard deviation collegeMsgRAPrecisionsStd = np.std(collegeMsgRAPrecisions) collegeMsgRARecallsStd = np.std(collegeMsgRARecalls) collegeMsgDeniabilityPrecisionsStd = np.std(collegeMsgDeniabilityPrecisions) collegeMsgDeniabilityRecallsStd = np.std(collegeMsgDeniabilityRecalls) euMsgRAPrecisionsStd = np.std(euMsgRAPrecisions) euMsgRARecallsStd = np.std(euMsgRARecalls) euMsgDeniabilityPrecisionsStd = np.std(euMsgDeniabilityPrecisions) euMsgDeniabilityRecallsStd = np.std(euMsgDeniabilityRecalls) length = 4 x_labels = ['Rel.Anonymity\n Precision', 'Rel.Anonymity\n Recall', 'Temp. Det.\n Amb. Precision','Temp. Det.\n Amb. Recall'] # Set plot parameters fig, ax = plt.subplots() width = 0.45 # width of bar x = np.arange(length) collegeMeans = np.array([collegeMsgRAPrecisionsMean,collegeMsgRARecallsMean,collegeMsgDeniabilityPrecisionsMean,collegeMsgDeniabilityRecallsMean], dtype=float) euMeans = np.array([euMsgRAPrecisionsMean, euMsgRARecallsMean, euMsgDeniabilityPrecisionsMean, euMsgDeniabilityRecallsMean], dtype=float) ax.bar(x, collegeMeans, width, color='red', label='College IM', yerr=np.array([collegeMsgRAPrecisionsStd,collegeMsgRARecallsStd,collegeMsgDeniabilityPrecisionsStd,collegeMsgDeniabilityRecallsStd], dtype=float)) ax.bar(x + width, euMeans, width, color='blue', label='EU Mail', yerr=np.array([euMsgRAPrecisionsStd,euMsgRARecallsStd,euMsgDeniabilityPrecisionsStd,euMsgDeniabilityRecallsStd], dtype=float)) ax.set_ylabel('') ax.set_ylim(0,1) ax.set_xticks(x + width/2) ax.set_xticklabels(x_labels) ax.yaxis.grid(True) #ax.set_xlabel('Scenario') ax.set_title('Performance Evaluation of the Statistical Attacks', fontweight='bold') ax.legend() plt.grid(True, 'major', 'y', ls='--', lw=.5, c='k', alpha=.6) fig.tight_layout() plt.savefig('evaluation.pdf') plt.show() # - H = nx.Graph(G) print(len(list(H.edges))) # + ## Temporal Uniformity of Detection ## We build the graph edge by edge adding also the cover (fuzzy edges) and try to predict ## whether a node has received a message given only the fuzzy edges G.clear() #file1 = open('../datasets/CollegeMsg.txt', 'r') file1 = open('../datasets/email-Eu-core-temporal.txt','r') Lines = file1.readlines() G = nx.MultiDiGraph() ## this is the graph with also the "cover/fuzzy" edges H = nx.MultiDiGraph() ## this is the graph that only contains the edges of the real messages minFp = 8 falsePositiveRates = [pow(2,-k) for k in range(1,minFp)] msgCount = 0 truePositivesDetectionRates = [] truePositivesDegrees = [] falsePositivesDetectionRates = [] falsePositivesDegrees = [] truePositives = 0 trueNegatives = 0 falseNegatives = 0 falsePositives = 0 receivedMsg = [] for line in tqdm(Lines): msgCount += 1 myLine = line.split(',') if myLine[0] not in G: randIndex = random.randint(0,len(falsePositiveRates)-1) G.add_node(myLine[0],falsePos=falsePositiveRates[randIndex]) H.add_node(myLine[0]) if myLine[1] not in G: randIndex = random.randint(0,len(falsePositiveRates)-1) G.add_node(myLine[1],falsePos=falsePositiveRates[randIndex]) H.add_node(myLine[1]) G.add_edge(myLine[0],myLine[1], weight=myLine[2]) H.add_edge(myLine[0],myLine[1], weight=myLine[2]) receivedMsg.append(myLine[1]) ## add cover edges probabilistically for all the other nodes for this message for u in G: if u!=myLine[0] and u!=myLine[1]: recFalsePos =G.nodes[str(u)]['falsePos'] randNum = np.random.uniform(0,1) if randNum <= recFalsePos: G.add_edge(myLine[0],u,weight=1996) if msgCount % 1000 == 0: print(msgCount) if msgCount == 25000: break for u in G: flags = G.in_degree(u) p = G.nodes[str(u)]['falsePos'] mu0 = msgCount*p ##in the paper we also call this ideal rate s = math.sqrt(msgCount*p*(1-p)) z = (flags-mu0)/s p_values = scipy.stats.norm.sf(abs(z)) if p_values < 0.01: if u in receivedMsg: truePositives+=1 truePositivesDetectionRates.append(p) truePositivesDegrees.append(H.degree(u)) else: falsePositives+=1 falsePositivesDetectionRates.append(p) falsePositivesDegrees.append(H.degree(u)) else: if u in receivedMsg: falseNegatives+=1 else: trueNegatives+=1 print(truePositives,falsePositives,trueNegatives,falseNegatives) print("Precision:",truePositives/(truePositives+falsePositives)) print("Recall:",truePositives/(truePositives+falseNegatives)) # + ## Detection ambiguity up to 25000 messages ## Results for EU core e-mail: TP: 350 FP:22 TN:3 FN:355 ## Results for the college IM: TP: 810 FP: 31 TN:6 FN:289 # - ##Detection ambiguity and temporal uniformity of detection allMessages = [1000,10000,100000,1000000,10000000] sentOutMsgs = [5,10,25,50,100,250,500,1000] maxSentMessages = 100 detectableTrueMsgs = np.zeros((len(allMessages),len(sentOutMsgs))) for N in allMessages: for m in sentOutMsgs: for p in np.arange(0.5,0.001,-0.001): sigma=math.sqrt(p*(1-p)*N) if 3*sigma < m: detectableTrueMsgs[allMessages.index(N)][sentOutMsgs.index(m)]=p break print(detectableTrueMsgs) y_axis_labels = ["$10^3$","$10^4$","$10^5$","$10^6$","$10^7$"] x_axis_labels = ["$5$", "$10$", "$25$", "$50$", "$100$", "$250$", "$500$","$1000$"] font = {'family' : 'serif', 'weight' : 'bold', 'size' : 10} plt.rc('font', **font) #set the font style created sns.set(font_scale=1) ax = sns.heatmap(detectableTrueMsgs, annot=True, mask=detectableTrueMsgs < 0.00001,xticklabels=x_axis_labels, yticklabels = y_axis_labels, cbar_kws={'label': 'False positive rate'}) # use matplotlib.colorbar.Colorbar object cbar = ax.collections[0].colorbar # here set the labelsize by 20 cbar.ax.tick_params(labelsize=10) #ax.set_facecolor('xkcd:black') plt.xticks(rotation = 45) plt.yticks(rotation = 0) #ax.set_title('Smallest detection rates achieving detection ambiguity') plt.ylabel("All messages stored on the server", fontweight='bold') plt.xlabel("User's incoming messages", fontweight='bold') plt.tight_layout() plt.savefig("detectionAmbiguity.pdf") plt.show() userId = '1624' ##it has an in_degree 558 userFalsePositiveRate = math.pow(2,-7) timestamps = [] ##contains the timestamp of all the messages userTimestamps = [] userIncomingMessages = [] userTimestampsFrequent = [] for edge in tqdm(G.edges(data=True)): if edge[2]['weight'] == 1996: ##these are cover edges, we are not interested in them at this point continue else: timestamps.append(int(edge[2]['weight'].rstrip())) if edge[1]==userId: userIncomingMessages.append(int(edge[2]['weight'].rstrip())) userTimestamps.append(int(edge[2]['weight'].rstrip())) userTimestampsFrequent.append(int(edge[2]['weight'].rstrip())) else: randNum = np.random.uniform(0,1) if randNum <= userFalsePositiveRate: userTimestamps.append(int(edge[2]['weight'].rstrip())) if randNum <= 8*userFalsePositiveRate: userTimestampsFrequent.append(int(edge[2]['weight'].rstrip())) print(len(timestamps),len(userTimestamps),len(userTimestampsFrequent)) plt.hist(timestamps, color = "red",density=True,bins=100,alpha=0.5,label="All messages") plt.hist(userTimestampsFrequent,color = "blue",density=True, bins=100, alpha=0.5, label='User #1624, $p=2^{-4}$') plt.hist(userTimestamps,color = "green",density=True, bins=100, alpha=0.5, label='User #1624, $p=2^{-7}$') plt.hist(userIncomingMessages,color="orange",density=True,bins=100,alpha=0.5,label='User #1624, $p=0$') plt.legend(loc='best') plt.ylabel('Probability') plt.xlabel('Time') plt.title('Detection ambiguity in a time interval') locs, labels = plt.xticks() plt.xticks(locs[1:-1],[datetime.datetime.fromtimestamp(t).strftime("%Y/%m") for t in locs][1:-1], rotation='0') plt.tight_layout() plt.savefig("detectionAmbiguityinEpoch.pdf") plt.show()
src/Analyser.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 6. Groupwise Image Registration # Groupwise registration methods try to mitigate uncertainties associated with any one image by simultaneously registering all images in a population. This incorporates all image information in registration process and eliminates bias towards a chosen reference frame. The method described here uses a 3D (2D+time) free-form B-spline deformation model and a similarity metric that minimizes variance of intensities under the constraint that the average deformation over images is zero. This constraint defines a true mean frame of reference that lie in the center of the population without having to calculate it explicitly. # # The method can take into account temporal smoothness of the deformations and a cyclic transform in the time dimension. This may be appropriate if it is known a priori that the anatomical motion has a cyclic nature e.g. in cases of cardiac or respiratory motion. # ### Registration import itk # Load folder containing images. images = itk.imread("data/00", itk.F) # Create Groupwise Parameter Object parameter_object = itk.ParameterObject.New() groupwise_parameter_map = parameter_object.GetDefaultParameterMap('groupwise') parameter_object.AddParameterMap(groupwise_parameter_map) # Registration can either be done in one line with the registration function... # + # Call registration function # both fixed and moving image should be set with the vector_itk to prevent elastix from throwing errors result_image, result_transform_parameters = itk.elastix_registration_method( images, images, parameter_object=parameter_object, log_to_console=True) # - # .. or by initiating an elastix image filter object. # + # Load Elastix Image Filter Object # Fixed and moving image should be given to the Elastix method to ensure that # the correct 3D class is initialized. # Both fixed and moving image should be set with the vector_itk to prevent elastix from throwing errors elastix_object = itk.ElastixRegistrationMethod.New(images, images) elastix_object.SetParameterObject(parameter_object) # Set additional options elastix_object.SetLogToConsole(False) # Update filter object (required) elastix_object.UpdateLargestPossibleRegion() # Results of Registration result_image = elastix_object.GetOutput() result_transform_parameters = elastix_object.GetTransformParameterObject()
examples/ITK_Example06_GroupwiseRegistration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tkinter as tk from tkinter import * from cv2 import * import PIL from PIL import Image,ImageTk from tkinter.filedialog import askopenfilename window = tk.Tk() window.title("License Plate Recognition") canvas = tk.Canvas(window) canvas.pack(side=RIGHT) btn_blur=tk.Button(window, text="Load Image",width=50,height=2,command=selectfile) btn_blur.pack(anchor=tk.CENTER, expand=True) result=tk.Label(window,text="Result Here") result.config(font=("Courier", 44)) objects=tk.Label(window,text="") objects.config(font=("Courier",30)) extract=tk.Label(window,text="") objects.pack() extract.pack() result.pack() window.mainloop() # + import numpy as np class ObjectDetection(object): """Class for Custom Vision's exported object detection model """ ANCHORS = np.array([[0.573, 0.677], [1.87, 2.06], [3.34, 5.47], [7.88, 3.53], [9.77, 9.17]]) IOU_THRESHOLD = 0.45 def __init__(self, labels, prob_threshold = 0.10, max_detections = 20): """Initialize the class Args: labels ([str]): list of labels for the exported model. prob_threshold (float): threshold for class probability. max_detections (int): the max number of output results. """ assert len(labels) >= 1, "At least 1 label is required" self.labels = labels self.prob_threshold = prob_threshold self.max_detections = max_detections def _logistic(self, x): return np.where(x > 0, 1 / (1 + np.exp(-x)), np.exp(x) / (1 + np.exp(x))) def _non_maximum_suppression(self, boxes, class_probs, max_detections): """Remove overlapping bouding boxes """ assert len(boxes) == len(class_probs) if len(boxes)<max_detections: max_detections=len(boxes) max_probs = np.amax(class_probs, axis=1) max_classes = np.argmax(class_probs, axis=1) areas = boxes[:,2] * boxes[:,3] selected_boxes = [] selected_classes = [] selected_probs = [] while len(selected_boxes) < max_detections: # Select the prediction with the highest probability. i = np.argmax(max_probs) if max_probs[i] < self.prob_threshold: break # Save the selected prediction selected_boxes.append(boxes[i]) selected_classes.append(max_classes[i]) selected_probs.append(max_probs[i]) box = boxes[i] other_indices = np.concatenate((np.arange(i), np.arange(i+1,len(boxes)))) other_boxes = boxes[other_indices] # Get overlap between the 'box' and 'other_boxes' x1 = np.maximum(box[0], other_boxes[:,0]) y1 = np.maximum(box[1], other_boxes[:,1]) x2 = np.minimum(box[0]+box[2], other_boxes[:,0]+other_boxes[:,2]) y2 = np.minimum(box[1]+box[3], other_boxes[:,1]+other_boxes[:,3]) w = np.maximum(0, x2 - x1) h = np.maximum(0, y2 - y1) # Calculate Intersection Over Union (IOU) overlap_area = w * h iou = overlap_area / (areas[i] + areas[other_indices] - overlap_area) # Find the overlapping predictions overlapping_indices = other_indices[np.where(iou > self.IOU_THRESHOLD)[0]] overlapping_indices = np.append(overlapping_indices, i) # Set the probability of overlapping predictions to zero, and udpate max_probs and max_classes. class_probs[overlapping_indices,max_classes[i]] = 0 max_probs[overlapping_indices] = np.amax(class_probs[overlapping_indices], axis=1) max_classes[overlapping_indices] = np.argmax(class_probs[overlapping_indices], axis=1) assert len(selected_boxes) == len(selected_classes) and len(selected_boxes) == len(selected_probs) return selected_boxes, selected_classes, selected_probs def _extract_bb(self, prediction_output, anchors): assert len(prediction_output.shape) == 3 num_anchor = anchors.shape[0] height, width, channels = prediction_output.shape assert channels % num_anchor == 0 num_class = int(channels / num_anchor) - 5 assert num_class == len(self.labels) outputs = prediction_output.reshape((height, width, num_anchor, -1)) # Extract bouding box information x = (self._logistic(outputs[...,0]) + np.arange(width)[np.newaxis, :, np.newaxis]) / width y = (self._logistic(outputs[...,1]) + np.arange(height)[:, np.newaxis, np.newaxis]) / height w = np.exp(outputs[...,2]) * anchors[:,0][np.newaxis, np.newaxis, :] / width h = np.exp(outputs[...,3]) * anchors[:,1][np.newaxis, np.newaxis, :] / height # (x,y) in the network outputs is the center of the bounding box. Convert them to top-left. x = x - w / 2 y = y - h / 2 boxes = np.stack((x,y,w,h), axis=-1).reshape(-1, 4) # Get confidence for the bounding boxes. objectness = self._logistic(outputs[...,4]) # Get class probabilities for the bounding boxes. class_probs = outputs[...,5:] class_probs = np.exp(class_probs - np.amax(class_probs, axis=3)[..., np.newaxis]) class_probs = class_probs / np.sum(class_probs, axis=3)[..., np.newaxis] * objectness[..., np.newaxis] class_probs = class_probs.reshape(-1, num_class) assert len(boxes) == len(class_probs) return (boxes, class_probs) def predict_image(self, image): inputs = self.preprocess(image) prediction_outputs = self.predict(inputs) return self.postprocess(prediction_outputs) def preprocess(self, image): image = image.convert("RGB") if image.mode != "RGB" else image image = image.resize((416, 416)) return image def predict(self, preprocessed_inputs): """Evaluate the model and get the output Need to be implemented for each platforms. i.e. TensorFlow, CoreML, etc. """ raise NotImplementedError def postprocess(self, prediction_outputs): """ Extract bounding boxes from the model outputs. Args: prediction_outputs: Output from the object detection model. (H x W x C) Returns: List of Prediction objects. """ boxes, class_probs = self._extract_bb(prediction_outputs, self.ANCHORS) # Remove bounding boxes whose confidence is lower than the threshold. max_probs = np.amax(class_probs, axis=1) index, = np.where(max_probs > self.prob_threshold) index = index[(-max_probs[index]).argsort()] # Remove overlapping bounding boxes selected_boxes, selected_classes, selected_probs = self._non_maximum_suppression(boxes[index], class_probs[index], self.max_detections) return [{'probability': round(float(selected_probs[i]), 8), 'tagId': int(selected_classes[i]), 'tagName': self.labels[selected_classes[i]], 'boundingBox': { 'left': round(float(selected_boxes[i][0]), 8), 'top': round(float(selected_boxes[i][1]), 8), 'width': round(float(selected_boxes[i][2]), 8), 'height': round(float(selected_boxes[i][3]), 8) } } for i in range(len(selected_boxes))] # + # Suggested steps before inference: # 1. for an image of width and height being (w, h) pixels, resize image to (w', h'), where w/h = w'/h' and w' x h' = 262144 # 2. resize network input size to (w', h') # 3. pass the image to network and do inference # (4. if inference speed is too slow for you, try to make w' x h' smaller) import sys import tensorflow.compat.v1 as tf tf.disable_v2_behavior() import numpy as np from PIL import Image import os MODEL_FILENAME = 'model.pb' LABELS_FILENAME = 'labels.txt' class TFObjectDetection(ObjectDetection): """Object Detection class for TensorFlow """ def __init__(self, graph_def, labels): super(TFObjectDetection, self).__init__(labels) self.graph = tf.Graph() with self.graph.as_default(): tf.import_graph_def(graph_def, name='') def predict(self, preprocessed_image): inputs = np.array(preprocessed_image, dtype=np.float)[:,:,(2,1,0)] # RGB -> BGR with tf.Session(graph=self.graph) as sess: output_tensor = sess.graph.get_tensor_by_name('model_outputs:0') outputs = sess.run(output_tensor, {'Placeholder:0': inputs[np.newaxis,...]}) return outputs[0] def main(image_filename): # Load a TensorFlow model graph_def = tf.GraphDef() with tf.gfile.FastGFile(MODEL_FILENAME, 'rb') as f: graph_def.ParseFromString(f.read()) # Load labels with open(LABELS_FILENAME, 'r') as f: labels = [l.strip() for l in f.readlines()] od_model = TFObjectDetection(graph_def, labels) image = Image.open(image_filename) predictions = od_model.predict_image(image) total=len(predictions) if(total>0): objects["text"]=str(total)+" Object(s) detected..." else: objects["text"]="No objects detected" for i in predictions: filename=os.path.splitext(image_filename)[0] extract_name_plate_bbox(image_filename,i["boundingBox"]["left"],i["boundingBox"]["top"],i["boundingBox"]["width"],i["boundingBox"]["height"],filename+"_plate.jpg",i["probability"]) def predict(filename): main(filename) # + from cv2 import * #OCR Utility Function. def perform_OCR(plate,name): cv2.imwrite(name,plate) text=azureOCR(name) result["text"]=text window.mainloop() #extract nameplate from the image. def extract_name_plate_bbox(path,left,top,width,height,name,probability): img=cv2.imread(path) x = int(left * img.shape[1]) y = int(top * img.shape[0]) x2 = x + int(width * img.shape[1]) y2 = y + int(height * img.shape[0]) #crop image at name plate plate=img[y:y2,x:x2] #resize the image. plate=cv2.resize(plate,(0,0),fx=2,fy=2) cv2.imwrite(path,img) img = cv2.rectangle(img, (x,y), (x2,y2), (0,0,255), 2) background = cv2.cvtColor(plate, cv2.COLOR_BGR2RGB) photo = ImageTk.PhotoImage(image = PIL.Image.fromarray(background)) canvas = Canvas(window, width = plate.shape[1], height = plate.shape[0]) canvas.create_image(0, 0, image=photo, anchor=tk.NW) canvas.pack(side=LEFT,padx=30) perform_OCR(plate,name) # + import os import requests import matplotlib.pyplot as plt from matplotlib.patches import Rectangle from PIL import Image from io import BytesIO if 'COGNITIVE_SERVICE_KEY' in os.environ: subscription_key = os.environ['COGNITIVE_SERVICE_KEY'] else: print("Set the COMPUTER_VISION_SUBSCRIPTION_KEY environment variable.") sys.exit() if 'COMPUTER_VISION_ENDPOINT' in os.environ: endpoint = os.environ['COMPUTER_VISION_ENDPOINT'] ocr_url = endpoint + "vision/v2.0/ocr" params = {'language': 'unk', 'detectOrientation': 'true'} def azureOCR(filename): extract["text"]="extracting text" image_data = open(filename, "rb").read() headers = {'Ocp-Apim-Subscription-Key': subscription_key, 'Content-Type': 'application/octet-stream'} response = requests.post(ocr_url, headers=headers, params=params, data = image_data) analysis = response.json() line_infos = [region["lines"] for region in analysis["regions"]] word_infos = [] for line in line_infos: for word_metadata in line: for word_info in word_metadata["words"]: word_infos.append(word_info) word_infos text="" for word in word_infos: bbox = [int(num) for num in word["boundingBox"].split(",")] text += word["text"] return text # - def selectfile(): filename = askopenfilename(title = "Select Image to perform LPR",filetypes = [("All pictures","*.jpg *.png")]) # show an "Open" dialog box and return the path to the selected file background = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB) cv2.resize(background,(0,0),fx=2,fy=2) photo = ImageTk.PhotoImage(image = PIL.Image.fromarray(background)) canvas["width"]=background.shape[1] canvas["height"]=background.shape[0] btn_blur.destroy() canvas.create_image(0, 0, image=photo, anchor=tk.NW) predict(filename)
LPR_GUI.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] graffitiCellId="id_ykogmat" # ## Problem statement # # Given an array `arr` and a target element `target`, find the last index of occurence of `target` in `arr` using recursion. If `target` is not present in `arr`, return `-1`. # # For example: # # 1. For `arr = [1, 2, 5, 5, 4]` and `target = 5`, `output = 3` # # 2. For `arr = [1, 2, 5, 5, 4]` and `target = 7`, `output = -1` # + graffitiCellId="id_jy2464y" def last_index(arr, target): """ :param: arr - input array :param: target - integer element return: int - last index of target in arr TODO: complete this method to find the last index of target in arr """ pass # + [markdown] graffitiCellId="id_vwcsmcw" # <span class="graffiti-highlight graffiti-id_vwcsmcw-id_flmfhqn"><i></i><button>Show Solution</button></span> # + graffitiCellId="id_80igiok" def test_function(test_case): arr = test_case[0] target = test_case[1] solution = test_case[2] output = last_index(arr, target) if output == solution: print("Pass") else: print("False") # + graffitiCellId="id_ph6zw07" arr = [1, 2, 5, 5, 4] target = 5 solution = 3 test_case = [arr, target, solution] test_function(test_case) # + graffitiCellId="id_ikxk069" arr = [1, 2, 5, 5, 4] target = 7 solution = -1 test_case = [arr, target, solution] test_function(test_case) # + graffitiCellId="id_t1qqzpn" arr = [91, 19, 3, 8, 9] target = 91 solution = 0 test_case = [arr, target, solution] test_function(test_case) # + graffitiCellId="id_la123ly" arr = [1, 1, 1, 1, 1, 1] target = 1 solution = 5 test_case = [arr, target, solution] test_function(test_case)
recursion/.ipynb_checkpoints/Last-index-recursion-checkpoint.ipynb
# # Fine-tuning a model with the Trainer API # Install the Transformers and Datasets libraries to run this notebook. # !pip install datasets transformers[sentencepiece] # + from datasets import load_dataset from transformers import AutoTokenizer, DataCollatorWithPadding raw_datasets = load_dataset("glue", "mrpc") checkpoint = "bert-base-uncased" tokenizer = AutoTokenizer.from_pretrained(checkpoint) def tokenize_function(example): return tokenizer(example["sentence1"], example["sentence2"], truncation=True) tokenized_datasets = raw_datasets.map(tokenize_function, batched=True) data_collator = DataCollatorWithPadding(tokenizer=tokenizer) # + from transformers import TrainingArguments training_args = TrainingArguments("test-trainer") # + from transformers import AutoModelForSequenceClassification model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2) # + from transformers import Trainer trainer = Trainer( model, training_args, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["validation"], data_collator=data_collator, tokenizer=tokenizer, ) # - trainer.train() predictions = trainer.predict(tokenized_datasets["validation"]) print(predictions.predictions.shape, predictions.label_ids.shape) import numpy as np preds = np.argmax(predictions.predictions, axis=-1) # + from datasets import load_metric metric = load_metric("glue", "mrpc") metric.compute(predictions=preds, references=predictions.label_ids) # - def compute_metrics(eval_preds): metric = load_metric("glue", "mrpc") logits, labels = eval_preds predictions = np.argmax(logits, axis=-1) return metric.compute(predictions=predictions, references=labels) # + training_args = TrainingArguments("test-trainer", evaluation_strategy="epoch") model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2) trainer = Trainer( model, training_args, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["validation"], data_collator=data_collator, tokenizer=tokenizer, compute_metrics=compute_metrics )
course/chapter3/section3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="Ng2mJtpgIVZu" # # <NAME> # # Data Science & Business Analytics Intern at The Sparks Foundation # # Dataset : student dataset (http://bit.ly/w-data) # # Let's Begin... # + [markdown] id="PVWZuInW_vXg" # ### Linear Regression with Python Scikit Learn # ### In this section we will see how the Python Scikit-Learn library for machine learning can be used to implement regression functions. # # ### Simple Linear Regression # ### In this regression task we will predict the percentage of marks that a student is expected to score based upon the number of hours they studied. This is a simple linear regression task as it involves just two variables. That is their is one dependent variable (scores) and other is independent (hours). # + id="KuLeWKcr_r9h" # Importing all libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt # + colab={"base_uri": "https://localhost:8080/"} id="1ybpyZy__8Lg" outputId="d3f50c23-c1e1-4920-ce0b-a18cb5e048fa" # Reading data from remote link url = "http://bit.ly/w-data" dataset = pd.read_csv(url) print("Data imported") # + colab={"base_uri": "https://localhost:8080/", "height": 819} id="L9pbUWMMG3xd" outputId="9c19c6cc-2a3d-43cc-eb2f-827a1acf30cf" dataset # + id="bvtW4sAeBmJJ" X = dataset.iloc[:, :-1].values #get a copy of dataset exclude last column y = dataset.iloc[:, 1].values #get array of dataset in column 1st # + colab={"base_uri": "https://localhost:8080/"} id="nmWf98rjBu2p" outputId="d387ddc5-8121-4ca7-de7c-4edb2bc5b8b1" X # + colab={"base_uri": "https://localhost:8080/"} id="uFSyWfZaBviG" outputId="b49b59cd-e1f7-410c-d753-207b8c6d001b" y # + id="7dT6HMMfCNn3" from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) # + colab={"base_uri": "https://localhost:8080/"} id="lLIdrMjZCfTW" outputId="0968f324-0f25-4c2b-c31b-ff154e8ad587" # Fitting Simple Linear Regression to the Training set from sklearn.linear_model import LinearRegression regressor = LinearRegression() regressor.fit(X_train, y_train) # + colab={"base_uri": "https://localhost:8080/", "height": 282} id="jXZW0GQqC-mm" outputId="3602d4cf-5e52-468d-dac1-1e7d2995bdac" plt.scatter(X, y,color='red') # + colab={"base_uri": "https://localhost:8080/", "height": 265} id="m4BYUJN5DEX3" outputId="d745c42a-2ea9-4630-b890-e52cb9648539" # Plotting the regression line line = regressor.coef_*X+regressor.intercept_ # Plotting for the test data plt.scatter(X, y,color='red') plt.plot(X, line); plt.show() # + [markdown] id="j5fmr9vgDUfd" # ## Doing Predictions # Now that we have trained our algorithm, it's time to make some predictions # + colab={"base_uri": "https://localhost:8080/"} id="41_-4knFFvM9" outputId="b6569566-c74e-4479-9e1f-7211a16ce2f2" print(X_test) # Testing data - In Hours y_pred = regressor.predict(X_test) # Predicting the scores # + colab={"base_uri": "https://localhost:8080/"} id="0pNrWxrIFygd" outputId="e6a69b75-063a-4394-bf5c-9f2a99e32d3f" y_pred # + colab={"base_uri": "https://localhost:8080/"} id="7ebTyiSCDS8f" outputId="df3f5876-0100-4ce1-e73e-eaa5158d9616" hours = [[9.25]] own_pred = regressor.predict(hours) print("No of Hours = {}".format(hours)) print("Predicted Score = {}".format(own_pred[0])) # + colab={"base_uri": "https://localhost:8080/"} id="_ot0mPYCFDH4" outputId="c3743a09-26ed-4b2d-cdef-a640694b2146" own_pred # + [markdown] id="asxFVQ67Ga5F" # ### Evaluating the model # + colab={"base_uri": "https://localhost:8080/"} id="kafIIrLdGXn2" outputId="272abad8-bc64-498d-936b-aac980250bec" from sklearn import metrics print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
Task_1_Simple_linear_regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ___ # # <a href='http://www.pieriandata.com'><img src='../Pierian_Data_Logo.png'/></a> # ___ # <center><em>Copyright <NAME></em></center> # <center><em>For more information, visit us at <a href='http://www.pieriandata.com'>www.pieriandata.com</a></em></center> # # NumPy Exercises # # Now that we've learned about NumPy let's test your knowledge. We'll start off with a few simple tasks and then you'll be asked some more complicated questions. # # <div class="alert alert-danger" style="margin: 10px"><strong>IMPORTANT NOTE!</strong> Make sure you don't run the cells directly above the example output shown, <br>otherwise you will end up writing over the example output!</div> # #### 1. Import NumPy as np # #### 2. Create an array of 10 zeros # CODE HERE # + # DON'T WRITE HERE # - # #### 3. Create an array of 10 ones # + # DON'T WRITE HERE # - # #### 4. Create an array of 10 fives # + # DON'T WRITE HERE # - # #### 5. Create an array of the integers from 10 to 50 # + # DON'T WRITE HERE # - # #### 6. Create an array of all the even integers from 10 to 50 # + # DON'T WRITE HERE # - # #### 7. Create a 3x3 matrix with values ranging from 0 to 8 # + # DON'T WRITE HERE # - # #### 8. Create a 3x3 identity matrix # + # DON'T WRITE HERE # - # #### 9. Use NumPy to generate a random number between 0 and 1<br><br>&emsp;NOTE: Your result's value should be different from the one shown below. # + # DON'T WRITE HERE # - # #### 10. Use NumPy to generate an array of 25 random numbers sampled from a standard normal distribution<br><br>&emsp;&ensp;NOTE: Your result's values should be different from the ones shown below. # + # DON'T WRITE HERE # - # #### 11. Create the following matrix: # + # DON'T WRITE HERE # - # #### 12. Create an array of 20 linearly spaced points between 0 and 1: # + # DON'T WRITE HERE # - # ## Numpy Indexing and Selection # # Now you will be given a starting matrix (be sure to run the cell below!), and be asked to replicate the resulting matrix outputs: # RUN THIS CELL - THIS IS OUR STARTING MATRIX mat = np.arange(1,26).reshape(5,5) mat # #### 13. Write code that reproduces the output shown below.<br><br>&emsp;&ensp;Be careful not to run the cell immediately above the output, otherwise you won't be able to see the output any more. # CODE HERE # + # DON'T WRITE HERE # - # #### 14. Write code that reproduces the output shown below. # + # DON'T WRITE HERE # - # #### 15. Write code that reproduces the output shown below. # + # DON'T WRITE HERE # - # #### 16. Write code that reproduces the output shown below. # + # DON'T WRITE HERE # - # #### 17. Write code that reproduces the output shown below. # + # DON'T WRITE HERE # - # ## NumPy Operations # #### 18. Get the sum of all the values in mat # + # DON'T WRITE HERE # - # #### 19. Get the standard deviation of the values in mat # + # DON'T WRITE HERE # - # #### 20. Get the sum of all the columns in mat # + # DON'T WRITE HERE # - # ## Bonus Question # # We worked a lot with random data with numpy, but is there a way we can insure that we always get the same random numbers? [Click Here for a Hint](https://www.google.com/search?q=numpy+random+seed&rlz=1C1CHBF_enUS747US747&oq=numpy+random+seed&aqs=chrome..69i57j69i60j0l4.2087j0j7&sourceid=chrome&ie=UTF-8) # # Great Job!
Data Science Resources/Jose portila - ML/02-Numpy/.ipynb_checkpoints/03-NumPy-Exercises-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: DS 5110 Spark 3.1 # language: python # name: ds5110_spark3.1 # --- from utils_nlp import Tools tools = Tools('mhk9c') # + from sparknlp.base import * from sparknlp.annotator import * from sparknlp.pretrained import PretrainedPipeline import sparknlp from pyspark.sql import SparkSession from pyspark.ml import Pipeline from pyspark.ml.feature import CountVectorizer from pyspark.ml.clustering import LDA from pyspark.sql import functions as F # - import sparknlp spark = sparknlp.start() # ### Data Load and Prep # + df = tools.load_data(spark, "russian-troll-tweets-enriched") df = df.withColumn("publish_date_timestamp",F.to_timestamp(F.col("publish_date"),"M/d/yyyy H:mm")) df = df.withColumn("publish_date_date",F.to_date(F.col("publish_date_timestamp"))) df = df.withColumn("publish_hour", F.hour(F.col("publish_date_timestamp"))) df = df.filter((df["publish_date_date"] >= F.lit("2014-10-14")) & (df["publish_date_date"] <= F.lit("2017-12-14"))) df_troll = df.filter(df['label']==1) df_nontroll = df.filter(df['label']==0) df_wikileaks = df.filter((df["publish_date_date"] >= F.lit("2016-10-05")) & (df["publish_date_date"] <= F.lit("2016-10-07"))) df_wikileaks_troll = df.filter(df['label']==1) df_utr = df.filter((df["publish_date_date"] >= F.lit("2017-08-01")) & (df["publish_date_date"] <= F.lit("2017-08-31"))) df_utr_troll = df.filter(df['label']==1) # df.printSchema() # - # Spark NLP requires the input dataframe or column to be converted to document. document_assembler = DocumentAssembler() \ .setInputCol("content") \ .setOutputCol("document") \ .setCleanupMode("shrink") # Split sentence to tokens(array) tokenizer = Tokenizer() \ .setInputCols(["document"]) \ .setOutputCol("token") # clean unwanted characters and garbage normalizer = Normalizer() \ .setInputCols(["token"]) \ .setOutputCol("normalized") # remove stopwords stopwords_cleaner = StopWordsCleaner()\ .setInputCols("normalized")\ .setOutputCol("cleanTokens")\ .setCaseSensitive(False) # stem the words to bring them to the root form. stemmer = Stemmer() \ .setInputCols(["cleanTokens"]) \ .setOutputCol("stem") finisher = Finisher() \ .setInputCols(["stem"]) \ .setOutputCols(["tokens"]) \ .setOutputAsArray(True) \ .setCleanAnnotations(False) # We build a ml pipeline so that each phase can be executed in sequence. This pipeline can also be used to test the model. nlp_pipeline = Pipeline( stages=[document_assembler, tokenizer, normalizer, stopwords_cleaner, stemmer, finisher]) def run_pipeline(_df): # train the pipeline nlp_model = nlp_pipeline.fit(_df) # apply the pipeline to transform dataframe. processed_df = nlp_model.transform(_df) # tokens_df = processed_df.select('publish_date','tokens').limit(10000) tokens_df = processed_df.select('publish_date','tokens') cv = CountVectorizer(inputCol="tokens", outputCol="features", vocabSize=500, minDF=3.0) # train the model cv_model = cv.fit(tokens_df) # transform the data. Output column name will be features. vectorized_tokens = cv_model.transform(tokens_df) num_topics = 3 lda = LDA(k=num_topics, maxIter=10) model = lda.fit(vectorized_tokens) ll = model.logLikelihood(vectorized_tokens) lp = model.logPerplexity(vectorized_tokens) print("The lower bound on the log likelihood of the entire corpus: " + str(ll)) print("The upper bound on perplexity: " + str(lp)) # extract vocabulary from CountVectorizer vocab = cv_model.vocabulary topics = model.describeTopics() topics_rdd = topics.rdd topics_words = topics_rdd\ .map(lambda row: row['termIndices'])\ .map(lambda idx_list: [vocab[idx] for idx in idx_list])\ .collect() for idx, topic in enumerate(topics_words): print("topic: {}".format(idx)) print("*"*25) for word in topic: print(word) print("*"*25) run_pipeline(df_wikileaks_troll)
NLP-Topic_Modelling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # In this chapter we will create an ontology and populate it with labels # # ## Preparing - Entities setup # import dtlpy as dl if dl.token_expired(): dl.login() project = dl.projects.get(project_name='project_name') dataset = project.datasets.get(dataset_name='dataset_name') # Get recipe from list recipe = dataset.recipes.list()[0] # Or get specific recipe: recipe = dataset.recipes.get(recipe_id='id') # Get ontology from list or create it using the "Create Ontology" script ontology = recipe.ontologies.list()[0] # Or get specific ontology: ontology = recipe.ontologies.get(ontology_id='id') # Print entities: recipe.print() ontology.print() # ## Create an Ontology # # project = dl.projects.get(project_name='project_name') ontology = project.ontologies.create(title="your_created_ontology_title", labels=[dl.Label(tag="Chameleon", color=(255, 0, 0))]) # ## Labels # # Ontology uses the ‘Labels’ entity, which is a python list object, and as such you can use python list methods such as sort(). Be sure to use ontology.update() after each python list action. # # ontology.add_labels(label_list=['Shark', 'Whale', 'Animal.Donkey'], update_ontology=True) # Labels can be added with branched hierarchy to facilitate sub-labels at up-to 5 levels. # Labels hierarchy is created by adding ‘.’ between parent and child labels. # In the above example, this script will get the Donkey Label: # child_label = ontology.labels[-1].children[0] print(child_label.tag, child_label.rgb) # ## Attributes # An attribute describes a label, without having to add more labels. For example “Car” is a label, but its color is an attribute. You can add multiple attributes to the ontology, and map it to labels. For example create the “color” attribute once, but have multiple labels use it. # Attributes can be multiple-selection (e.g checkbox), single selection (radio button), value over slider, a yes/no question and free-text. # An attribute can be set as a mandatory one, so annotators have to answer it before they can complete the item. # # ## Add attributes to the ontology # The following example adds 1 attribute of every type, all as a mandatory attribute: # * Multiple-choice attribute # * Single-choice attributes # * Slider attribute # * Yes/no question attribute # * Free text attribute # # This option is not available yet ... # ## Read Ontology Attributes # Read & print the all the ontology attributes: # # print(ontology.metadata['attributes']) keys = [att['key'] for att in ontology.metadata['attributes']] # ## Getting all labels is (including children): # # print(ontology.labels_flat_dict)
tutorials/recipe_and_ontology/ontology/chapter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/tanmaych/Testing/blob/master/Stock_price_detection.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="237siS8KnJ2W" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 74} outputId="b59d589b-fe11-405b-a5b8-639d19384f4c" # Install the libraries import numpy as np import pandas as pd import math import pandas_datareader as web from sklearn.tree import DecisionTreeRegressor from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt plt.style.use('bmh') # + id="prvuLPDEqZV1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 238} outputId="e8d81e04-eda3-47e3-aa7e-e69070cb9006" #Read data fom web df = web.DataReader('TTM',data_source='yahoo',start='2012-01-01',end='2019-12-17') df.head(5) # + id="KoYK7LiJwqZc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="c2a81657-5b61-4b13-fab9-94542563a865" df.shape # + id="VNXKBFrRxHjZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 513} outputId="691b8236-8050-4390-b6d7-4d4a8549ddab" plt.figure(figsize=(16,8)) plt.title('Tata Motors') plt.xlabel('Days') plt.ylabel('Close Price') plt.plot(df['Close']) plt.show() # + id="Qxdz3DqQx9gC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="86588be7-9c03-47d0-cac0-e092a752f141" df=df[['Close']] df.head(4) # + id="cbVeKij0yZ8H" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="e1eba7c2-32e5-45ce-a4c6-e06c1120a0af" #Create a variable to predict 'x' days into the future future_days = 50 # create a new column 'x' days up df['Prediction'] = df['Close'].shift(-future_days) df.tail(4) # + id="_R1RqoTYzeXk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 146} outputId="9aaf0e54-0e55-465c-ecac-cceb6497c20a" #Create the feature data set 'X' and convert it into a numpy array and remove the last 'x' rows/days X = np.array(df.drop(['Prediction'], 1))[:-future_days] print(X) # + id="_Q_ktHw54RvF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="e289fd6d-8ff2-4424-dcf3-9a08d04a31c6" y = np.array(df['Prediction'])[:-future_days] print(y) # + id="6figvklH4exr" colab_type="code" colab={} x_train,x_test,y_train,y_test = train_test_split(X,y,test_size = 0.25) # + id="zokTIvTY4nYZ" colab_type="code" colab={} tree = DecisionTreeRegressor().fit(x_train,y_train) lr= LinearRegression().fit(x_train,y_train) # + id="robN8hp44r1s" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 935} outputId="2ebccef7-3875-47fd-c6ba-5e7ec572fd22" x_f = df.drop(['Prediction'],1)[:-future_days] x_f=x_f.tail(future_days) x_f=np.array(x_f) x_f # + id="FrJq2UQK42Cl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 366} outputId="6ba385af-fce8-40b2-e172-966e92ca8d10" tree_p =tree.predict(x_f) print(tree_p) print() lr_p=lr.predict(x_f) print(lr_p) # + id="UFhS8XbH49Ag" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 606} outputId="7d69e6d8-6279-4e8b-d452-844f70a815c2" pr =tree_p valid = df[X.shape[0]:] valid['Predictions']=pr plt.figure(figsize=(16,8)) plt.xlabel('Days') plt.ylabel('Close Price') plt.plot(df['Close']) plt.plot(valid[['Close','Predictions']]) plt.legend(['Orig','Val','Pred']) plt.show() # + id="HRym0tmy5M3y" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 606} outputId="d250c678-c892-4ec4-c5f2-c9d6c669c8e9" pr =lr_p valid = df[X.shape[0]:] valid['Predictions']=pr plt.figure(figsize=(16,8)) plt.xlabel('Days') plt.ylabel('Close Price') plt.plot(df['Close']) plt.plot(valid[['Close','Predictions']]) plt.legend(['Orig','Val','Pred']) plt.show()
Stock_price_detection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Intro # # The goal of this notebook is to present implementations of three important loss functions for this problem and compare their effects on a simplified implementation of U-Net. The loss functions in question are binary cross entropy (referred to as "nll" in the notebook because my initial version used the related NLLLoss instead of BCEWithLogitLoss), the soft Dice loss (introduced here: http://campar.in.tum.de/pub/milletari2016Vnet/milletari2016Vnet.pdf and considered to be useful for segmentation problems), and the focal loss, the investigation of which is the main focus of this notebook. # # The focal loss is described in "Focal Loss for Dense Object Detection" (https://arxiv.org/pdf/1708.02002.pdf) and is simply a modified version of binary cross entropy in which the loss for confidently correctly classified labels is scaled down, so that the network focuses more on incorrect and low confidence labels than on increasing its confidence in the already correct labels. # # In the image below, you can see the ordinary binary cross entropy loss function as the top line and different variants of focal loss (focal loss is parameterized by $\gamma$, $\gamma = 0$ is the same as binary cross entropy). We can clearly see that focal loss places much less importance on examples for which the confidence is high than ordinary BCE. # # ![image.png](attachment:image.png) # # In addition, this notebook also investigates a claim made in "Focal Loss for Dense Object Detection": # # > Binary classification models are by default initialized to # have equal probability of outputting either y = −1 or 1. Under such an initialization, in the presence of class imbal- # ance, the loss due to the frequent class can dominate total loss and cause instability in early training. To counter this, we introduce the concept of a ‘prior’ for the value of p estimated by the model for the rare class (foreground) at the start of training. We denote the prior by π and set it so that the model’s estimated p for examples of the rare class is low, e.g. 0.01. We note that this is a change in model initialization (see §4.1) and not of the loss function. We found this to improve training stability for both the cross entropy and focal loss in the case of heavy class imbalance. # # Basically the paper claims that in cases where the probability of each class isn't close to 50/50, it is useful to initialize the model so that on average it will output the actual probability of each class, so that the loss isn't very large at the beginning of training. # # Because the setting of the competition is different from that in the paper (eg we are looking for masks instead of bounding boxes, and the class imbalance is actually not that bad: 12% of pixels in the training set contain a cell), we definitely expect different results. The point of this notebook is to explore those differences. It would be interesting to implement something closer to what the paper describes to fully investigate the usefulness of focal loss for the data science bowl 2018. # %reload_ext autoreload # %autoreload 2 # %matplotlib inline # Note: not all the imports below are necessary, they are just copied from my main notebook. import glob import cv2 import numpy as np import torch import torch.utils.data import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.autograd as autograd from torch.autograd import Variable import matplotlib.pyplot as plt import random from tqdm import tqdm_notebook as tqdm import os.path import csv import joblib import PIL import pandas as pd import math from sklearn.model_selection import train_test_split from functools import reduce from Augmentor.Pipeline import Pipeline from Augmentor.ImageUtilities import AugmentorImage # Some useful utilities: # - cache is a decorator that will store the output of expensive functions on disk. # - w is a wrapper function that sends (or does not send) PyTorch objects to gpu depending on whether a USE_CUDA variable is set. # + # !mkdir -p _cache_focal cache = joblib.Memory(cachedir='ce_dice_cache/_cache_focal', verbose=0) USE_CUDA = True def w(v): if USE_CUDA: return v.cuda() return v # - # # Utils # ## Evaluation # # These are the various evaluation functions I have been using, they allow us to compute iou and also to compress and generate the csv file. # # Note: these are just copied from my main notebook for this competition. They aren't necessarily all used in this notebook. # # + def uncombine(mask): max_val = np.max(mask) + 1 results = [] for i in range(1, max_val): results.append(mask == i) return results def iou(mask1, mask2): return np.sum(mask1 & mask2) / np.sum(mask1 | mask2) def evaluate_split(labels, y_pred): true_objects = len(np.unique(labels)) pred_objects = len(np.unique(y_pred)) #print("Number of true objects:", true_objects) #print("Number of predicted objects:", pred_objects) # Compute intersection between all objects intersection = np.histogram2d(labels.flatten(), y_pred.flatten(), bins=(true_objects, pred_objects))[0] # Compute areas (needed for finding the union between all objects) area_true = np.histogram(labels, bins = true_objects)[0] area_pred = np.histogram(y_pred, bins = pred_objects)[0] area_true = np.expand_dims(area_true, -1) area_pred = np.expand_dims(area_pred, 0) # Compute union union = area_true + area_pred - intersection # Exclude background from the analysis intersection = intersection[1:,1:] union = union[1:,1:] union[union == 0] = 1e-9 # Compute the intersection over union iou = intersection / union # Precision helper function def precision_at(threshold, iou): matches = iou > threshold true_positives = np.sum(matches, axis=1) == 1 # Correct objects false_positives = np.sum(matches, axis=0) == 0 # Missed objects false_negatives = np.sum(matches, axis=1) == 0 # Extra objects tp, fp, fn = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives) return tp, fp, fn # Loop over IoU thresholds prec = [] #print("Thresh\tTP\tFP\tFN\tPrec.") for t in np.arange(0.5, 1.0, 0.05): tp, fp, fn = precision_at(t, iou) if tp + fp + fn == 0: p = 1.0 else: p = tp / (tp + fp + fn) #print("{:1.3f}\t{}\t{}\t{}\t{:1.3f}".format(t, tp, fp, fn, p)) prec.append(p) #print("AP\t-\t-\t-\t{:1.3f}".format(np.mean(prec))) return np.mean(prec) def evaluate_combined(combined_mask_true, combined_mask_pred): return evaluate_split(combined_mask_true, combined_mask_pred) def evaluate_naive_tuple(tup): return evaluate_naive(*tup) def classify_naive(image, factor, kernel_sz): if np.median(image) < 127: thresholded = (image > np.mean(image) + np.std(image) * factor).astype(np.uint8) * 255 else: thresholded = (image < np.mean(image) - np.std(image) * factor).astype(np.uint8) * 255 kernel = np.ones((kernel_sz, kernel_sz)) thresholded = cv2.morphologyEx(thresholded, cv2.MORPH_OPEN, kernel) thresholded = cv2.morphologyEx(thresholded, cv2.MORPH_CLOSE, kernel) _, connected = cv2.connectedComponents(thresholded) return connected def evaluate_naive(folder, factor, kernel_sz): image = glob.glob(folder + '/images/*')[0] image = imread(image) masks = glob.glob(folder + '/masks/*') total_mask = None for i, m in enumerate(masks): m = (imread(m) // 255).astype(np.int32) if total_mask is None: total_mask = m else: total_mask += m * (i+1) connected = classify_naive(image, factor, kernel_sz) return evaluate_combined(total_mask, connected) def rle(x): dots = np.where(x.T.flatten() == 1)[0] run_lengths = [] prev = -2 for b in dots: if (b>prev+1): run_lengths.extend((b + 1, 0)) run_lengths[-1] += 1 prev = b return run_lengths def rle_combined(combined): all_rle = [] if np.max(combined) == 0: combined[0, 0] = 1 max_val = np.max(combined) + 1 for i in range(1, max_val): all_rle.append(rle(combined == i)) return all_rle # TODO: test rle by encoding and decoding and figuring out if it matches def rle_encoding(x): ''' x: numpy array of shape (height, width), 1 - mask, 0 - background Returns run length as list ''' dots = np.where(x.T.flatten()==1)[0] # .T sets Fortran order down-then-right run_lengths = [] prev = -2 for b in dots: if (b>prev+1): run_lengths.extend((b+1, 0)) run_lengths[-1] += 1 prev = b return run_lengths def prob_to_rles(lab_img, cut_off = 0.5): if lab_img.max()<1: lab_img[0,0] = 1 # ensure at least one prediction per image for i in range(1, lab_img.max()+1): yield rle_encoding(lab_img==i) def open_res_csv(key=''): cur = 0 while True: path = '_%s_submit_%03d.csv' % (key, cur) if not os.path.exists(path): return open(path, 'w') cur += 1 def find_clusters(img): return cv2.connectedComponents((img > 0.5).astype(np.uint8))[1] # - # ## Loading # # Just a few loading functions. # # Note that crucially I am **eroding** the different masks, so that no two masks can touch each other. # # Here is an example of a full mask image if erosion is disabled: # # ![image.png](attachment:image.png) # # As you can see for example on the upper-right corner, some masks completely touch each other. Because our approach for finding individual cells once the pixels are labeled will be based on finding connected components, we would merge these two nuclei into a single one in our submission and thus hurt our IoU. # # Perhaps the simplest way to fix this is to teach our network to label pixels which are *inside the nucleus* by at least 1 pixel. This way two distinct nuclei can't touch each other, and we can expand the masks by one pixels once the nuclei are found. For this we use the erosion operation when we load our training labels, and the dilation operation when we generate our submission. # # Here is the image above after erosion of the masks: # # ![image.png](attachment:image.png) # # As you can see, no two masks touch anymore! # + def load_image_labels(folder, border_sz=1): image = glob.glob(folder + '/images/*')[0] image = cv2.imread(image)[:, :, ::-1] masks = glob.glob(folder + '/masks/*') all_masks = [] for i, mask in enumerate(masks): mask_img = np.sum(cv2.imread(mask), axis=-1) mask_img = cv2.erode(mask_img.astype(np.uint8),np.ones((3, 3), np.uint8),iterations = 1) all_masks.append((mask_img.astype(np.int16) * (i + 1))) if len(masks) == 0: return image return image, np.sum(all_masks, axis=0, dtype=np.int16) def convert_masks(masks, border_sz=2): return np.sum( [extract_border(mask, border_sz) for mask in masks], axis=0 ) folder = glob.glob('../data/stage1_train/*')[9] img, masks = load_image_labels(folder) plt.imshow(img) plt.show() plt.imshow(masks, cmap='tab20c') plt.show() # + @cache.cache def load_train_data(): x = [] y = [] for path in glob.glob('../data/stage1_train/*/'): image, mask = load_image_labels(path) x.append(PIL.Image.fromarray(image)) y.append(PIL.Image.fromarray(mask)) return x, y #load_train_data.clear() FULL_TRAIN_X, FULL_TRAIN_Y = load_train_data() # - TRAIN_X, VAL_X, TRAIN_Y, VAL_Y = train_test_split(FULL_TRAIN_X, FULL_TRAIN_Y, test_size=0.1, random_state=0) # # Implementation # ## Model # # This is a U-Net (https://arxiv.org/abs/1505.04597) inspired model. Note that the UNetClassify class implements the bias initialization described in the paper! # + DROPOUT = 0.5 class UNetBlock(nn.Module): def __init__(self, filters_in, filters_out): super().__init__() self.filters_in = filters_in self.filters_out = filters_out self.conv1 = nn.Conv2d(filters_in, filters_out, (3, 3), padding=1) self.norm1 = nn.BatchNorm2d(filters_out) self.conv2 = nn.Conv2d(filters_out, filters_out, (3, 3), padding=1) self.norm2 = nn.BatchNorm2d(filters_out) self.activation = nn.ReLU() def forward(self, x): conved1 = self.conv1(x) conved1 = self.activation(conved1) conved1 = self.norm1(conved1) conved2 = self.conv2(conved1) conved2 = self.activation(conved2) conved2 = self.norm2(conved2) return conved2 class UNetDownBlock(UNetBlock): def __init__(self, filters_in, filters_out, pool=True): super().__init__(filters_in, filters_out) if pool: self.pool = nn.MaxPool2d(2) else: self.pool = lambda x: x def forward(self, x): return self.pool(super().forward(x)) class UNetUpBlock(UNetBlock): def __init__(self, filters_in, filters_out): super().__init__(filters_in, filters_out) self.upconv = nn.Conv2d(filters_in, filters_in // 2, (3, 3), padding=1) self.upnorm = nn.BatchNorm2d(filters_in // 2) def forward(self, x, cross_x): x = F.upsample(x, size=cross_x.size()[-2:], mode='bilinear') x = self.upnorm(self.activation(self.upconv(x))) x = torch.cat((x, cross_x), 1) return super().forward(x) class UNet(nn.Module): def __init__(self, layers, init_filters): super().__init__() self.down_layers = nn.ModuleList() self.up_layers = nn.ModuleList() self.init_filters = init_filters filter_size = init_filters for _ in range(layers - 1): self.down_layers.append( UNetDownBlock(filter_size, filter_size*2) ) filter_size *= 2 self.down_layers.append(UNetDownBlock(filter_size, filter_size * 2, pool=False)) for i in range(layers): self.up_layers.append( UNetUpBlock(filter_size * 2, filter_size) ) filter_size //= 2 self.data_norm = nn.BatchNorm2d(1) self.init_layer = nn.Conv2d(1, init_filters, (7, 7), padding=3) self.activation = nn.ReLU() self.init_norm = nn.BatchNorm2d(init_filters) self.dropout = nn.Dropout(DROPOUT) def forward(self, x): x = self.data_norm(x) x = self.init_norm(self.activation(self.init_layer(x))) saved_x = [x] for layer in self.down_layers: saved_x.append(x) x = self.dropout(layer(x)) is_first = True for layer, saved_x in zip(self.up_layers, reversed(saved_x)): if not is_first: is_first = False x = self.dropout(x) x = layer(x, saved_x) return x class UNetClassify(UNet): def __init__(self, *args, **kwargs): init_val = kwargs.pop('init_val', 0.5) super().__init__(*args, **kwargs) self.output_layer = nn.Conv2d(self.init_filters, 1, (3, 3), padding=1) for name, param in self.named_parameters(): typ = name.split('.')[-1] if typ == 'bias': if 'output_layer' in name: # Init so that the average will end up being init_val param.data.fill_(-math.log((1-init_val)/init_val)) else: param.data.zero_() def forward(self, x): x = super().forward(x) # Note that we don't perform the sigmoid here. return self.output_layer(x) # - # ## Loss functions # # Binary cross entropy is unsurprisingly part of pytorch, but we need to implement soft dice and focal loss. For numerical stability purposes, focal loss tries to work in log space as much as possible. # + # From: https://github.com/pytorch/pytorch/issues/1249 def dice_loss(input, target): input = torch.sigmoid(input) smooth = 1. iflat = input.view(-1) tflat = target.view(-1) intersection = (iflat * tflat).sum() return 1 - ((2. * intersection + smooth) / (iflat.sum() + tflat.sum() + smooth)) class FocalLoss(nn.Module): def __init__(self, gamma): super().__init__() self.gamma = gamma def forward(self, input, target): # Inspired by the implementation of binary_cross_entropy_with_logits if not (target.size() == input.size()): raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size())) max_val = (-input).clamp(min=0) loss = input - input * target + max_val + ((-max_val).exp() + (-input - max_val).exp()).log() # This formula gives us the log sigmoid of 1-p if y is 0 and of p if y is 1 invprobs = F.logsigmoid(-input * (target * 2 - 1)) loss = (invprobs * self.gamma).exp() * loss return loss.mean() def get_loss(loss): if loss[0] == 'dice': print('dice') return dice_loss elif loss[0] == 'focal': print('focal') return w(FocalLoss(loss[1])) else: print('bce') return w(nn.BCEWithLogitsLoss()) # - # **Bonus**: an implementation of multi-label focal loss with support for class weights as well! It functions just like NLLLoss and takes its input as a log softmax and its target as a LongTensor of the classes. # # Sadly this version is *not* numerically stable, unlike the binary version above. In particular I have found that it worked well with $\gamma = 2$ but generated lots of NaNs with $\gamma = 0.5$. # + def make_one_hot(labels, C=2): one_hot = w(torch.FloatTensor(labels.size(0), C, labels.size(2), labels.size(3)).zero_()) target = one_hot.scatter_(1, labels.data, 1) target = w(Variable(target)) return target class FocalLossMultiLabel(nn.Module): def __init__(self, gamma, weight): super().__init__() self.gamma = gamma self.nll = nn.NLLLoss(weight=weight, reduce=False) def forward(self, input, target): loss = self.nll(input, target) one_hot = make_one_hot(target.unsqueeze(dim=1), input.size()[1]) inv_probs = 1 - input.exp() focal_weights = (inv_probs * one_hot).sum(dim=1) ** self.gamma loss = loss * focal_weights return loss.mean() # - # ## Datasets # # I use the Augmentor library to convert images to grayscale and make sure they are all cropped to a fixed size. RepeatablePipeline is my implementation of a Pipeline in Augmentor that has the ability to repeat itself so that we can apply the same transformation to the original image and the mask. # + class RepeatablePipeline(Pipeline): def sample_with_image_repeatable(self, image, state=None, save_to_disk=False): a = AugmentorImage(image_path=None, output_directory=None) a.image_PIL = image pystate = random.getstate() npstate = np.random.get_state() if state is not None: random.setstate(state[0]) np.random.set_state(state[1]) res = self._execute(a, save_to_disk) if state is None: # We weren't given a state, don't reset the state at all # and simply return the current state as the way to reproduce # this. return res, (pystate, npstate) else: # If we were given a state, put things back to normal random.setstate(pystate) np.random.set_state(npstate) return res, state def get_pipeline(train=True, mask=False): pipeline = RepeatablePipeline() if train: pipeline.crop_by_size(1.0, SIZE, SIZE, centre=not train) if not mask: pipeline.greyscale(1.0) return pipeline SIZE = 128 class CellDataset(torch.utils.data.Dataset): def __init__(self, images, mask_arrays, transform_pipeline, mask_pipeline): super().__init__() self.images = images self.mask_arrays = mask_arrays self.trans_pipeline = transform_pipeline self.mask_pipeline = mask_pipeline def __len__(self): return len(self.images) def __getitem__(self, idx): ret_img, state = self.trans_pipeline.sample_with_image_repeatable(self.images[idx]) ret_img = np.expand_dims(np.array(ret_img), -1) if np.min(ret_img) < 0.0: ret_img -= np.min(ret_img) if np.max(ret_img) > 255.0: ret_img = ret_img / np.max(ret_img) * 255.0 masks = np.array(self.mask_pipeline.sample_with_image_repeatable(self.mask_arrays[idx], state)[0]) return (ret_img / 255.0).astype(np.float32), np.expand_dims(masks.astype(np.float32), -1) # - # # Experiment # ## Fitting # # We implement a simple fit function that is parameterized on the loss and the init value. # + BATCH_SIZE = 5 @cache.cache(ignore=['verbose']) def fit(epochs, verbose=False, layers=4, lr=0.001, init_filters=32, loss='nll', init_val=0.5): net = w(UNetClassify(layers=layers, init_filters=init_filters, init_val=init_val)) criterion = get_loss(loss) optimizer = optim.Adam(net.parameters(), lr=lr) train = torch.utils.data.DataLoader( dataset=CellDataset(TRAIN_X, TRAIN_Y, get_pipeline(), get_pipeline(mask=True)), batch_size=BATCH_SIZE, shuffle=True, num_workers=6 ) val = torch.utils.data.DataLoader( dataset=CellDataset(VAL_X, VAL_Y, get_pipeline(train=False), get_pipeline(train=False, mask=True)), batch_size=1, shuffle=False, num_workers=2 ) print(dir(train)) best_iou = -1.0 best_net_dict = None best_epoch = -1 best_loss = 1000.0 for epoch in tqdm(range(epochs), f'Full Run'): net.train() train_losses = [] for batch, labels in train: batch = w(autograd.Variable(batch.permute(0, 3, 1, 2))) labels = w(autograd.Variable((labels >= 1).float().permute(0, 3, 1, 2))) optimizer.zero_grad() output = net(batch) loss = criterion(output, labels) loss.backward() train_losses.append(loss.data.cpu().numpy()[0]) optimizer.step() print('train loss', np.mean(train_losses)) net.eval() losses = [] iou = [] to_show = random.randint(0, len(val) - 1) for batch, labels_true in val: assert len(batch) == 1 labels = w(autograd.Variable((labels_true >= 1).float().permute(0, 3, 1, 2))) batch = w(autograd.Variable(batch.permute(0, 3, 1, 2))) output = net(batch) loss = criterion(output, labels) losses += [loss.data.cpu().numpy()[0]] * batch.size()[0] result = (F.sigmoid(output).permute(0, 2, 3, 1).data.cpu().numpy() > 0.5).astype(np.uint8) for label, res in zip(labels_true, result): label = label.cpu().numpy()[:, :, 0] # plt.imshow(label, cmap='tab20c') # plt.show() # plt.imshow(find_clusters(res), cmap='tab20c') # plt.show() iou.append(evaluate_combined(label, find_clusters(res))) cur_iou = np.mean(iou) if cur_iou > best_iou or (cur_iou == best_iou and np.mean(losses) < best_loss): best_iou = cur_iou best_epoch = epoch import copy best_net_dict = copy.deepcopy(net.state_dict()) best_loss = np.mean(losses) print(np.mean(losses), np.mean(iou), best_loss, best_iou) return best_iou, best_loss, best_epoch, best_net_dict # - # ## Final test # # Now we can test the various losses and initialization. For each loss/initialization combination, a kaggle submission file is generated. For focal loss we try 0.5, 1.0, 2.0 and 4.0 as the $\gamma$ parameter. # + @cache.cache def get_test_imgs(): results = [] for path in sorted(glob.glob('../data/stage1_test/*/')): folder = path.split('/')[-2] print(folder) img = load_image_labels(path, border_sz=1) results.append(img) return results def test_set(): val_sets = [] pipeline_val = get_pipeline(False) pipeline_val_mask = get_pipeline(False, mask=True) imgs = [PIL.Image.fromarray(img) for img in get_test_imgs()] # We create fake masks here masks = [PIL.Image.fromarray(np.zeros(np.array(img).shape[:2], dtype=np.uint8)) for img in imgs] return CellDataset(imgs, masks, pipeline_val, pipeline_val_mask) @cache.cache def get_iou(*args, **kwargs): return fit(*args, **kwargs)[0] for loss in [('nll',), ('dice',), ('focal', 0.5), ('focal', 1.0), ('focal', 2.0), ('focal', 4.0)]: for init in [0.5, 0.12]: print(loss, init, get_iou(200, loss=loss, init_val=init)) _, _, _, net_dict = fit(200, loss=loss, init_val=init) net = UNetClassify(layers=4, init_filters=32) net.load_state_dict(net_dict) with open_res_csv('_'.join(map(str, loss)) + f'_{init}') as f: out = csv.writer(f) out.writerow(['ImageId', 'EncodedPixels']) test = torch.utils.data.DataLoader( dataset=test_set(), batch_size=1, shuffle=False, num_workers=1 ) for (batch, _), folder in zip(test, sorted(glob.glob('../data/stage1_test/*/'))): assert len(batch) == 1 batch = autograd.Variable(batch.permute(0, 3, 1, 2)) net.eval() output = F.sigmoid(net(batch)).permute(0, 2, 3, 1).data.cpu().numpy()[0, :, :, 0] output = find_clusters((output > 0.5).astype(np.uint8)) real_output = np.zeros(output.shape, dtype=np.int32) for cluster in range(1, np.max(output) + 1): cur = ((output == cluster) * 255).astype(np.uint8) cur = cv2.dilate(cur,np.ones((3, 3), np.uint8),iterations = 1) real_output[cur > 0.5] = cluster output = real_output img_id = folder.split('/')[-2] results = rle_combined(output) for rl in sorted([r for r in results if r], key=lambda x: x[0]): out.writerow([ img_id, ' '.join(map(str, rl)) ]) # - # # Conclusion # We can see above that the different loss functions have a relatively small effect on the validation IoU in this dataset. Out of all of them, dice and focal loss with $\gamma = 0.5$ seem to do the best, indicating that there might be some benefit to using these unorthodox loss functions. # # Initialization with the prior seems to have even less effect, presumably because 0.12 is close enough to 0.5 that the training is not strongly negatively affected. # # A legitimate question to ask is what the effect is on the actual test set in the competition on Kaggle. I submitted 5 of the generated solutions and got the following leaderboard scores: # - Binary cross-entropy: 0.333 # - Dice: 0.348 # - Focal, $\gamma = 0.5$: 0.346 # - Focal, $\gamma = 1$: 0.359 # - Focal, $\gamma = 2$: 0.325 # # So again we see that focal loss and dice do a fair amount better than simple binary cross entropy. This time the best result actually came from focal with $\gamma = 1$, which is not reflected in our validation results above but definitely not surprising given the margins of error. # # Thus clearly the use of a focal or dice loss is not a major factor in this competition, but it can legitimately be combined with whatever other tricks you come up with.
scratch_notebooks/.ipynb_checkpoints/ce_dice_focal-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Learning how to move a human arm # # In this tutorial we will show how to train a basic biomechanical model using `keras-rl`. # # ## Installation # # To make it work, follow the instructions in # https://github.com/stanfordnmbl/osim-rl#getting-started # i.e. run # # conda create -n opensim-rl -c kidzik opensim git python=2.7 # source activate opensim-rl # pip install git+https://github.com/stanfordnmbl/osim-rl.git # Then run # # git clone https://github.com/stanfordnmbl/osim-rl.git # conda install keras -c conda-forge # pip install git+https://github.com/matthiasplappert/keras-rl.git # pip install tensorflow # cd osim-rl # conda install jupyter # follow the instructions and once jupyter is installed and type # # jupyter notebook # This should open the browser with jupyter. Navigate to this notebook, i.e. to the file `scripts/train.arm.ipynb`. # # ## Preparing the environment # # The following two blocks load necessary libraries and create a simulator environment. # + # Derived from keras-rl import opensim as osim import numpy as np import sys from keras.models import Sequential, Model from keras.layers import Dense, Activation, Flatten, Input, concatenate from keras.optimizers import Adam import numpy as np from rl.agents import DDPGAgent from rl.memory import SequentialMemory from rl.random import OrnsteinUhlenbeckProcess from osim.env.arm import ArmEnv from keras.optimizers import RMSprop import argparse import math # + # Load walking environment env = ArmEnv(True) env.reset() # Total number of steps in training nallsteps = 10000 nb_actions = env.action_space.shape[0] # - # ## Creating the actor and the critic # # The actor serves as a brain for controlling muscles. The critic is our approximation of how good is the brain performing for achieving the goal # Create networks for DDPG # Next, we build a very simple model. actor = Sequential() actor.add(Flatten(input_shape=(1,) + env.observation_space.shape)) actor.add(Dense(32)) actor.add(Activation('relu')) actor.add(Dense(32)) actor.add(Activation('relu')) actor.add(Dense(32)) actor.add(Activation('relu')) actor.add(Dense(nb_actions)) actor.add(Activation('sigmoid')) print(actor.summary()) action_input = Input(shape=(nb_actions,), name='action_input') observation_input = Input(shape=(1,) + env.observation_space.shape, name='observation_input') flattened_observation = Flatten()(observation_input) x = concatenate([action_input, flattened_observation]) x = Dense(64)(x) x = Activation('relu')(x) x = Dense(64)(x) x = Activation('relu')(x) x = Dense(64)(x) x = Activation('relu')(x) x = Dense(1)(x) x = Activation('linear')(x) critic = Model(inputs=[action_input, observation_input], outputs=x) print(critic.summary()) # ## Train the actor and the critic # # We will now run `keras-rl` implementation of the DDPG algorithm which trains both networks. # Set up the agent for training memory = SequentialMemory(limit=100000, window_length=1) random_process = OrnsteinUhlenbeckProcess(theta=.15, mu=0., sigma=.2, size=env.noutput) agent = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input, memory=memory, nb_steps_warmup_critic=100, nb_steps_warmup_actor=100, random_process=random_process, gamma=.99, target_model_update=1e-3, delta_clip=1.) agent.compile(Adam(lr=.001, clipnorm=1.), metrics=['mae']) # Okay, now it's time to learn something! We visualize the training here for show, but this # slows down training quite a lot. You can always safely abort the training prematurely using # Ctrl + C. agent.fit(env, nb_steps=2000, visualize=False, verbose=0, nb_max_episode_steps=200, log_interval=10000) # After training is done, we save the final weights. # agent.save_weights(args.model, overwrite=True) # ## Evaluate the results # Check how our trained 'brain' performs. Below we will also load a pretrained model (on the larger number of episodes), which should perform better. It was trained exactly the same way, just with a larger number of steps (parameter `nb_steps` in `agent.fit`. # agent.load_weights(args.model) # Finally, evaluate our algorithm for 1 episode. agent.test(env, nb_episodes=2, visualize=False, nb_max_episode_steps=1000) agent.load_weights("../models/example.h5f") # Finally, evaluate our algorithm for 1 episode. agent.test(env, nb_episodes=5, visualize=False, nb_max_episode_steps=1000)
osim-rl/examples/legacy/train.arm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 # language: python # name: python36 # --- # !pip3 install torch import torch # Creating a 1-D tensor v = torch.tensor([1,2,3,4,5,6]) print(v) # Viewing the data type print(v.dtype) # indexing print(v[1]) # Index slicing print(v[1:]) f = torch.FloatTensor([1,2,3,4,5,6]) print(f) # Size print(f.size()) # Type # Size print(f.dtype) # Reshape to 6 rows in 1 column (Rows, Column) v.view(6,1) # Reshape with neagtive, value inferred based on length of tensor # E.g. length of vector is 6, it has 3 rows, therefore it must have two columns v.view(3,-1) import numpy as np # Convert Numpy array to tensor a = np.array([1,2,3,4,5]) tensor_cnv = torch.from_numpy(a) print(tensor_cnv) print(tensor_cnv.type()) # Convert Tensor back to Numpy numpy_cnv = tensor_cnv.numpy print(numpy_cnv)
0.1-pytorch-tensors.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #Ejemplo de While iteracion = 1 while iteracion <=5: iteracion =iteracion+1 #iteracion +=1 print(f"Estamos Iterando {iteracion}") #break # para que entre al buble solo una ves else: print("Termino la Iteracion") # - #Aplicamos El If dentro de While iteracion = 0 while iteracion <=5: iteracion =iteracion+1 #iteracion +=1 print(f"Estamos Iterando {iteracion}") if iteracion == 4: break # para que entre al buble solo una ves else: print("Termino la Iteracion")
Bucles/.ipynb_checkpoints/While_Jupyter-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Multivariate Regression # Let's grab a small little data set of Blue Book car values: # + import pandas as pd df = pd.read_excel('http://cdn.sundog-soft.com/Udemy/DataScience/cars.xls') # - df.head() # We can use pandas to split up this matrix into the feature vectors we're interested in, and the value we're trying to predict. # # Note how we are avoiding the make and model; regressions don't work well with ordinal values, unless you can convert them into some numerical order that makes sense somehow. # # Let's scale our feature data into the same range so we can easily compare the coefficients we end up with. # + import statsmodels.api as sm from sklearn.preprocessing import StandardScaler scale = StandardScaler() X = df[['Mileage', 'Cylinder', 'Doors']] y = df['Price'] X[['Mileage', 'Cylinder', 'Doors']] = scale.fit_transform(X[['Mileage', 'Cylinder', 'Doors']].as_matrix()) print (X) est = sm.OLS(y, X).fit() est.summary() # - # The table of coefficients above gives us the values to plug into an equation of form: # B0 + B1 * Mileage + B2 * model_ord + B3 * doors # # In this example, it's pretty clear that the number of cylinders is more important than anything based on the coefficients. # # Could we have figured that out earlier? y.groupby(df.Doors).mean() # Surprisingly, more doors does not mean a higher price! (Maybe it implies a sport car in some cases?) So it's not surprising that it's pretty useless as a predictor here. This is a very small data set however, so we can't really read much meaning into it. # ## Activity # Mess around with the fake input data, and see if you can create a measurable influence of number of doors on price. Have some fun with it - why stop at 4 doors?
MultivariateRegression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # !pip install nltk import nltk nltk.download([ "names", "stopwords", "state_union", "twitter_samples", "movie_reviews", "averaged_perceptron_tagger", "vader_lexicon", "punkt" ]) # + import pandas as pd bilag6 = pd.read_excel (r'/Users/emilstephens/Desktop/DO2021/teaching_material/session_11/case/bilag/Bilag_6.xlsx', sheet_name ='Medlemmernes Dag besvarelser') print (bilag6) # - print(type("Spørgsmål 1. Hvordan går det \nhos jer, og hvad optager jer lige nu?")) Corona_list=["corona", "Corona", "Covid-19", "covid", "Covid", "covid-19", "pandemien", "pandemi"] print(Corona_list) bilag6 # + nævn_corona = [] for i in bilag6[bilag6.columns[1]]: c19 = 0 try: token = nltk.word_tokenize(i) for elm in Corona_list: if elm in token: c19 = 1 break nævn_corona.append(c19) continue except: nævn_corona.append(0) # - df=pd.DataFrame(nævn_corona, columns=["c19_ja_nej"]) bilag6=pd.concat([bilag6, df], axis=1) df_fjernet = bilag6[bilag6['Farvemarkering af hvordan det går'].notna()] df_fjernet #fjerner alle, der ikke har svaret på farvemarkeringen df_fjernet['c19_ja_nej'].sum() #antallet der nævner corona 383/1506 ##procentdel virksomheder, der nævner covid iso = df_fjernet[['Farvemarkering af hvordan det går','c19_ja_nej']] df_fjern0 = iso[iso.c19_ja_nej != 0] df_fjern0 #Vil se om der er sammenhæng mellem at nævne corona og hvilke farve man anigver df_fjern0.value_counts() df_fjern1 = iso[iso.c19_ja_nej != 1] df_fjern1 df_fjern1.value_counts() #tyder ikke på, at der er sammenhæng mellem at nævne covid og farvekoden man angiver # + # !pip install Afinn from afinn import Afinn afinn = Afinn(language='da') # - afinn.score # + afinn_score = [] for i in df_fjernet[df_fjernet.columns[1]]: score = 0 try: score = afinn.score(i) afinn_score.append(score) continue except: afinn_score.append(0) afinn_score # - afinn=pd.DataFrame(afinn_score, columns=["sent_score"]) afinn nyeste_df=pd.concat([df_fjernet, afinn], axis=1) nyeste_df sent_c19 = nyeste_df[['sent_score','c19_ja_nej']] sent_c19 en_iso nul_iso["sent_score"].mean() en_iso = sent_c19[iso.c19_ja_nej != 1] en_iso["sent_score"].mean() #dem der ikke nævner corona er i overvejende grad mere positive end dem der gør # + from matplotlib import pyplot as plt import numpy as np y = en_iso["sent_score"].to_numpy() x = np.arange(len(y)) plt.scatter(x, y) plt.grid() plt.show() # + y = nul_iso["sent_score"].to_numpy() x = np.arange(len(y)) plt.scatter(x, y) plt.grid() plt.show() # - joe = nyeste_df[['Spørgsmål 3: \nHvordan kan vi i DI gøre det endnu bedre for dig og din virksomhed?']] stringg = "" for i in joe[joe.columns[0]]: try: stringg = stringg + i except: pass stringg spg3_korpus=nltk.word_tokenize(stringg) spg3_korpus from pprint import pprint finder = nltk.collocations.BigramCollocationFinder.from_words(spg3_korpus) pprint(finder.ngram_fd.most_common(5)) stopwords = nltk.corpus.stopwords.words("danish") stopwords finder = nltk.collocations.BigramCollocationFinder.from_words(spg3_korpus) pprint(finder.ngram_fd.most_common(5)) print(len(spg3_korpus)) words = [w for w in spg3_korpus if w.lower() not in stopwords] print(len(words)) #fjerner sletkomma = [",", ".","(",")"] print(len(words)) words = [w for w in words if w.lower() not in sletkomma] print(len(words)) finder = nltk.collocations.BigramCollocationFinder.from_words(words) pprint(finder.ngram_fd.most_common(10)) finder = nltk.collocations.TrigramCollocationFinder.from_words(words) pprint(finder.ngram_fd.most_common(10)) finder = nltk.collocations.QuadgramCollocationFinder.from_words(words) pprint(finder.ngram_fd.most_common(10)) # + #i disse collocations tegnes et billede af et DI, der gør det godt, men også kunne gøre det bedre. #<NAME> fylder tilsyneladende en del. #Men det vurderes ikke, at man kan ikke sige det store ud fra dette. # - fd = nltk.FreqDist(words) pprint(fd) fd.most_common(100) iso = df_fjernet.iloc[:, 4:16] for i in range(12): print(iso.iloc[:,i].value_counts(normalize= True))
teaching_material/session_12/gruppe_14/BILAG6, delanalyse 2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Problem 2: Generative Adversarial Networks # # * **Learning Objective:** In this problem, you will implement a Generative Adversarial Network with the network structure proposed in [*Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks*](https://arxiv.org/abs/1511.06434), and learn a visualization technique, *activation maximization*. # * **Provided code:** The code for constructing the two parts of the GAN, the discriminator and the generator, is done for you, along with the skeleton code for the training. # * **TODOs:** You will need to figure out how to properly feed the data, compute the loss and update the parameters to complete the training and visualization. In addition, to test your understanding, you will answer some non-coding questions. # + # Import required libraries import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import math # %matplotlib inline # for auto-reloading external modules # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython # %load_ext autoreload # %autoreload 2 # - # ## Introduction: The forger versus the police # # *Note: read the story even if you are already familiar with GANs, as one of the questions is related to this story.* # # Generative models try to model the distribution of the data in an explicit way, in the sense that we can easily sample new data points from this model. This is in contrast to discriminative models that try to infer the output from the input. In the class we have seen one classic deep generative model, the Variational Autoencoder (VAE). Here, we will learn another generative model that has risen to prominence in recent years, the Generative Adversarial Network (GAN). # # As the maths of Generative Adversarial Networks is somewhat tedious, a story is often told of a forger and a police to illustrate the idea. # # > Imagine a forger that makes fake bills, and a police that tries to find these forgeries. If the forger were a VAE, his goal would be to take some real bills, and try to replicate the real bills as precisely as possible. In GAN, he has a different idea in his mind: rather than trying to replicate the real bills, it suffices to make fake bills such that people *think* they are real. # > # > Now let's start. In the beginning, the police knows nothing about how to distinguish between real and fake bills. The forger knows nothing either and only produces white paper. # > # > In the first round, the police gets the fake bill and learns that the forgeries are white while the real bills are green. The forger then finds out that white papers can no longer fool the police and starts to produce green papers. # > # > In the second round, the police learns that real bills have denominations printed on them while the forgeries do not. The forger then finds out that plain papers can no longer fool the police and starts to print numbers on them. # > # > In the third round, the police learns that real bills have watermarks on them while the forgeries do not. The forger then has to reproduce the watermarks on his fake bills. # > # > ... # > # > Finally, the police is able to spot the tiniest difference between real and fake bills and the forger has to make perfect replicas of real bills to fool the police. # # Now in a GAN, the forger becomes the generator and the police becomes the discriminator. The discriminator is a binary classifier with the two classes being "taken from the real data" ("real") and "generated by the generator" ("fake"). Its objective is to minimize the classification loss. The generator's objective is to generate samples so that the discriminator misclassifies them as real. # # Here we have some complications: the goal is not to find one perfect fake sample. Such a sample will not actually fool the discriminator: if the forger makes hundreds of the exact same fake bill, they will all have the same serial number and the police will soon find out that they are fake. Instead, we want the generator to be able to generate a variety of fake samples such that when presented as a distribution alongside the distribution of real samples, these two are indistinguishable by the discriminator. # # So how do we generate different samples with a diterministic generator? We provide it with random numbers as input. # # Typically, for the discriminator we use binary cross entropy loss with label 1 being real and 0 being fake. For the generator, the input is a random vector drawn from a standard normal distribution. Denote the generator by $G_{\phi}(z)$, discriminator by $D_{\theta}(x)$, the distribution of the real samples by $p(x)$ and the input distribution to the generator by $q(z)$. Recall that the binary cross entropy loss with classifier output $y$ and label $\hat{y}$ is # # $$L(y, \hat{y}) = -\hat{y} \log y - (1 - \hat{y}) \log (1 - y)$$ # # For the discriminator, the objective is # # $$\min_{\theta} \mathrm{E}_{x \sim p(x)}[L(D_{\theta}(x), 1)] + \mathrm{E}_{z \sim q(z)}[L(D_{\theta}(G_{\phi}(z)), 0)]$$ # # For the generator, the objective is # # $$\max_{\phi} \mathrm{E}_{z \sim q(z)}[L(D_{\theta}(G_{\phi}(z)), 0)]$$ # # The generator's objective corresponds to maximizing the classification loss of the discriminator on the generated samples. Alternatively, we can **minimize** the classification loss of the discriminator on the generated samples **when labelled as real**: # # $$\min_{\phi} \mathrm{E}_{z \sim q(z)}[L(D_{\theta}(G_{\phi}(z)), 1)]$$ # # And this is what we will use in our implementation. The strength of the two networks should be balanced, so we train the two networks alternatingly, updating the parameters in both networks once in each interation. # ## Problem 2-1: Implementing the GAN # # We first load the data (CIFAR-10) and define some convenient functions. You should already have CIFAR-10 from assignment 1. Just copy the data from there or use ```data/get_datasets.sh``` if you don't have them. # + def unpickle(file): import sys if sys.version_info.major == 2: import cPickle with open(file, 'rb') as fo: dict = cPickle.load(fo) return dict['data'], dict['labels'] else: import pickle with open(file, 'rb') as fo: dict = pickle.load(fo, encoding='bytes') return dict[b'data'], dict[b'labels'] def load_train_data(): X = [] for i in range(5): X_, _ = unpickle('data/cifar-10-batches-py/data_batch_%d' % (i + 1)) X.append(X_) X = np.concatenate(X) X = X.reshape((X.shape[0], 3, 32, 32)).transpose(0, 2, 3, 1) return X def load_test_data(): X_, _ = unpickle('data/cifar-10-batches-py/test_batch') X = X_.reshape((X_.shape[0], 3, 32, 32)).transpose(0, 2, 3, 1) return X # Load cifar-10 data train_samples = load_train_data() / 255.0 test_samples = load_test_data() / 255.0 # + def viz_grid(Xs, padding): N, H, W, C = Xs.shape grid_size = int(math.ceil(math.sqrt(N))) grid_height = H * grid_size + padding * (grid_size + 1) grid_width = W * grid_size + padding * (grid_size + 1) grid = np.zeros((grid_height, grid_width, C)) next_idx = 0 y0, y1 = padding, H + padding for y in range(grid_size): x0, x1 = padding, W + padding for x in range(grid_size): if next_idx < N: img = Xs[next_idx] grid[y0:y1, x0:x1] = img next_idx += 1 x0 += W + padding x1 += W + padding y0 += H + padding y1 += H + padding return grid def set_seed(seed): np.random.seed(seed) tf.random.set_random_seed(seed) def conv2d(input, kernel_size, stride, num_filter, name = 'conv2d'): with tf.variable_scope(name): stride_shape = [1, stride, stride, 1] filter_shape = [kernel_size, kernel_size, input.get_shape()[3], num_filter] W = tf.get_variable('w', filter_shape, tf.float32, tf.random_normal_initializer(0.0, 0.02)) b = tf.get_variable('b', [1, 1, 1, num_filter], initializer = tf.constant_initializer(0.0)) return tf.nn.conv2d(input, W, stride_shape, padding = 'SAME') + b def conv2d_transpose(input, kernel_size, stride, num_filter, name = 'conv2d_transpose'): with tf.variable_scope(name): stride_shape = [1, stride, stride, 1] filter_shape = [kernel_size, kernel_size, num_filter, input.get_shape()[3]] output_shape = tf.stack([tf.shape(input)[0], tf.shape(input)[1] * 2, tf.shape(input)[2] * 2, num_filter]) W = tf.get_variable('w', filter_shape, tf.float32, tf.random_normal_initializer(0.0, 0.02)) b = tf.get_variable('b', [1, 1, 1, num_filter], initializer = tf.constant_initializer(0.0)) return tf.nn.conv2d_transpose(input, W, output_shape, stride_shape, padding = 'SAME') + b def fc(input, num_output, name = 'fc'): with tf.variable_scope(name): num_input = input.get_shape()[1] W = tf.get_variable('w', [num_input, num_output], tf.float32, tf.random_normal_initializer(0.0, 0.02)) b = tf.get_variable('b', [num_output], initializer = tf.constant_initializer(0.0)) return tf.matmul(input, W) + b def batch_norm(input, is_training): out = tf.contrib.layers.batch_norm(input, decay = 0.99, center = True, scale = True, is_training = is_training, updates_collections = None) return out def leaky_relu(input, alpha = 0.2): return tf.maximum(alpha * input, input) # - # To save you some mundane work, we have defined a discriminator and a generator for you, in ```_discriminator()``` and ```_generator()``` respectively. Look at the code to see what layers are there. # # For this part, you need to complete code blocks marked with "Prob 2-1": # # * **Build the computation graph for the losses:** Complete the following definitions in ```_init_ops()``` # * ```fake_samples_op```: generate famples from ```noise``` # * ```dis_loss_op```: compute discriminator's loss, with real samples from ```real_input``` and fake # samples generated by the generator # * ```gen_loss_op```: compute generator's loss # * **Define the optimizer:** We use RMSprop for training. Adam is observed to perform poorly with an unstable objective as is the case in GANs. We've defined ```dis_train_op``` and ```gen_train_op``` for you but those are wrong: rather than updating all the parameters all the time, when training one network we want to keep the other one fixed. Modify the definition to reflect this. [Check here](https://stackoverflow.com/a/35304001) if you are not sure how this is possible. # * **Feed the data:** Feed the proper samples and labels in ```train()``` for training and in ```generate_one_sample()``` for visualizing the generated samples. # # The batch normalization layers should operate in training mode. As per *[How to Train a GAN? Tips and tricks to make GANs work](https://github.com/soumith/ganhacks)*, we put real samples and fake samples in different batches when training the discriminator. # # *Note: use the advices on that page with caution if you are doing GAN for your team project. It is already more than 2 years old, which is a **really long time** in deep learning research. It does not reflect the latest results.* class DCGAN(object): def __init__(self): self.num_epoch = 5 self.batch_size = 32 self.log_step = 50 self.visualize_step = 200 self.code_size = 64 self.learning_rate = 1e-4 self.vis_learning_rate = 1e-2 self.recon_steps = 100 self.actmax_steps = 100 self._dis_called = False self._gen_called = False self.tracked_noise = np.random.normal(0, 1, [64, self.code_size]) self.real_input = tf.placeholder(tf.float32, [None, 32, 32, 3]) self.real_label = tf.placeholder(tf.float32, [None, 1]) self.fake_label = tf.placeholder(tf.float32, [None, 1]) self.noise = tf.placeholder(tf.float32, [None, self.code_size]) self.is_train = tf.placeholder(tf.bool) self.recon_sample = tf.placeholder(tf.float32, [1, 32, 32, 3]) self.actmax_label = tf.placeholder(tf.float32, [1, 1]) with tf.variable_scope('actmax'): self.actmax_code = tf.get_variable('actmax_code', [1, self.code_size], initializer = tf.constant_initializer(0.0)) self._init_ops() def _discriminator(self, input): # We have multiple instances of the discriminator in the same computation graph, # so set variable sharing if this is not the first invocation of this function. with tf.variable_scope('dis', reuse = self._dis_called): self._dis_called = True dis_conv1 = conv2d(input, 4, 2, 32, 'conv1') dis_lrelu1 = leaky_relu(dis_conv1) dis_conv2 = conv2d(dis_lrelu1, 4, 2, 64, 'conv2') dis_batchnorm2 = batch_norm(dis_conv2, self.is_train) dis_lrelu2 = leaky_relu(dis_batchnorm2) dis_conv3 = conv2d(dis_lrelu2, 4, 2, 128, 'conv3') dis_batchnorm3 = batch_norm(dis_conv3, self.is_train) dis_lrelu3 = leaky_relu(dis_batchnorm3) dis_reshape3 = tf.reshape(dis_lrelu3, [-1, 4 * 4 * 128]) dis_fc4 = fc(dis_reshape3, 1, 'fc4') return dis_fc4 def _generator(self, input): with tf.variable_scope('gen', reuse = self._gen_called): self._gen_called = True gen_fc1 = fc(input, 4 * 4 * 128, 'fc1') gen_reshape1 = tf.reshape(gen_fc1, [-1, 4, 4, 128]) gen_batchnorm1 = batch_norm(gen_reshape1, self.is_train) gen_lrelu1 = leaky_relu(gen_batchnorm1) gen_conv2 = conv2d_transpose(gen_lrelu1, 4, 2, 64, 'conv2') gen_batchnorm2 = batch_norm(gen_conv2, self.is_train) gen_lrelu2 = leaky_relu(gen_batchnorm2) gen_conv3 = conv2d_transpose(gen_lrelu2, 4, 2, 32, 'conv3') gen_batchnorm3 = batch_norm(gen_conv3, self.is_train) gen_lrelu3 = leaky_relu(gen_batchnorm3) gen_conv4 = conv2d_transpose(gen_lrelu3, 4, 2, 3, 'conv4') gen_sigmoid4 = tf.sigmoid(gen_conv4) return gen_sigmoid4 def _loss(self, labels, logits): loss = tf.nn.sigmoid_cross_entropy_with_logits(labels = labels, logits = logits) return tf.reduce_mean(loss) def _reconstruction_loss(self, generated, target): loss = tf.nn.l2_loss(generated - target) return tf.reduce_mean(loss) # Define operations def _init_ops(self): ################################################################################ # Prob 2-1: complete the definition of these operations # ################################################################################ # self.fake_samples_op = None # self.dis_loss_op = None # self.gen_loss_op = None ################################################################################ # Prob 2-1: fix the definition of these operations # ################################################################################ # dis_optimizer = tf.train.RMSPropOptimizer(self.learning_rate) # self.dis_train_op = dis_optimizer.minimize(self.dis_loss_op) # gen_optimizer = tf.train.RMSPropOptimizer(self.learning_rate) # self.gen_train_op = gen_optimizer.minimize(self.gen_loss_op) ################################################################################ # Prob 2-4: check the definition of these operations # # skip this part when working on problem 2-1 and come back for problem 2-4 # ################################################################################ self.actmax_sample_op = self._generator(self.actmax_code) actmax_dis = self._discriminator(self.actmax_sample_op) self.actmax_loss_op = self._loss(self.actmax_label, actmax_dis) actmax_optimizer = tf.train.AdamOptimizer(self.vis_learning_rate) self.actmax_op = actmax_optimizer.minimize(self.actmax_loss_op, var_list = [self.actmax_code]) ################################################################################ # Prob 2-4: complete the definition of these operations # # skip this part when working on problem 2-1 and come back for problem 2-4 # ################################################################################ # self.recon_loss_op = None # recon_optimizer = tf.train.AdamOptimizer(self.vis_learning_rate) # self.reconstruct_op = recon_optimizer.minimize(self.recon_loss_op) ################################################################################ # END OF YOUR CODE # ################################################################################ # Training function def train(self, sess, train_samples): sess.run(tf.global_variables_initializer()) num_train = train_samples.shape[0] step = 0 # smooth the loss curve so that it does not fluctuate too much smooth_factor = 0.95 plot_dis_s = 0 plot_gen_s = 0 plot_ws = 0 dis_losses = [] gen_losses = [] max_steps = int(self.num_epoch * (num_train // self.batch_size)) print('Start training ...') for epoch in range(self.num_epoch): for i in range(num_train // self.batch_size): step += 1 batch_samples = train_samples[i * self.batch_size : (i + 1) * self.batch_size] noise = np.random.normal(0, 1, [self.batch_size, self.code_size]) zeros = np.zeros([self.batch_size, 1]) ones = np.ones([self.batch_size, 1]) ################################################################################ # Prob 2-1: complete the feed dictionary # ################################################################################ # dis_feed_dict = {} ################################################################################ # END OF YOUR CODE # ################################################################################ _, dis_loss = sess.run([self.dis_train_op, self.dis_loss_op], feed_dict = dis_feed_dict) ################################################################################ # Prob 2-1: complete the feed dictionary # ################################################################################ # gen_feed_dict = {} ################################################################################ # END OF YOUR CODE # ################################################################################ _, gen_loss = sess.run([self.gen_train_op, self.gen_loss_op], feed_dict = gen_feed_dict) plot_dis_s = plot_dis_s * smooth_factor + dis_loss * (1 - smooth_factor) plot_gen_s = plot_gen_s * smooth_factor + gen_loss * (1 - smooth_factor) plot_ws = plot_ws * smooth_factor + (1 - smooth_factor) dis_losses.append(plot_dis_s / plot_ws) gen_losses.append(plot_gen_s / plot_ws) if step % self.log_step == 0: print('Iteration {0}/{1}: dis loss = {2:.4f}, gen loss = {3:.4f}'.format(step, max_steps, dis_loss, gen_loss)) fig = plt.figure(figsize = (8, 8)) ax1 = plt.subplot(111) ax1.imshow(viz_grid(self.generate(self.tracked_noise), 1)) plt.show() plt.plot(dis_losses) plt.title('discriminator loss') plt.xlabel('iterations') plt.ylabel('loss') plt.show() plt.plot(gen_losses) plt.title('generator loss') plt.xlabel('iterations') plt.ylabel('loss') plt.show() print('... Done!') # Find the reconstruction of one input sample def reconstruct_one_sample(self, sample): ################################################################################ # Prob 2-4: initialize self.actmax_code # # skip this part when working on problem 2-1 and come back for problem 2-4 # ################################################################################ # actmax_init_val = None ################################################################################ # END OF YOUR CODE # ################################################################################ sess.run(self.actmax_code.assign(actmax_init_val)) last_reconstruction = None last_loss = None for i in range(self.recon_steps): ################################################################################ # Prob 2-4: complete the feed dictionary # # skip this part when working on problem 2-1 and come back for problem 2-4 # ################################################################################ # recon_feed_dict = {} ################################################################################ # END OF YOUR CODE # ################################################################################ run_ops = [self.recon_loss_op, self.reconstruct_op, self.actmax_sample_op] last_loss, _, last_reconstruction = sess.run(run_ops, feed_dict = recon_feed_dict) return last_loss, last_reconstruction # Find the reconstruction of a batch of samples def reconstruct(self, samples): reconstructions = np.zeros(samples.shape) total_loss = 0 for i in range(samples.shape[0]): loss, reconstructions[i:i+1] = self.reconstruct_one_sample(samples[i:i+1]) total_loss += loss return total_loss / samples.shape[0], reconstructions # Generates a single sample from input code def generate_one_sample(self, code): ################################################################################ # Prob 2-1: complete the feed dictionary # ################################################################################ # gen_vis_feed_dict = {} ################################################################################ # END OF YOUR CODE # ################################################################################ generated = sess.run(self.fake_samples_op, feed_dict = gen_vis_feed_dict) return generated # Generates samples from input batch of codes def generate(self, codes): generated = np.zeros((codes.shape[0], 32, 32, 3)) for i in range(codes.shape[0]): generated[i:i+1] = self.generate_one_sample(codes[i:i+1]) return generated # Perform activation maximization on one initial code def actmax_one_sample(self, initial_code): ################################################################################ # Prob 2-4: check this function # # skip this part when working on problem 2-1 and come back for problem 2-4 # ################################################################################ actmax_init_val = tf.convert_to_tensor(initial_code, dtype = tf.float32) sess.run(self.actmax_code.assign(actmax_init_val)) for i in range(self.actmax_steps): actmax_feed_dict = { self.actmax_label: np.ones([1, 1]), self.is_train: False } _, last_actmax = sess.run([self.actmax_op, self.actmax_sample_op], feed_dict = actmax_feed_dict) return last_actmax # Perform activation maximization on a batch of different initial codes def actmax(self, initial_codes): actmax_results = np.zeros((initial_codes.shape[0], 32, 32, 3)) for i in range(initial_codes.shape[0]): actmax_results[i:i+1] = self.actmax_one_sample(initial_codes[i:i+1]) return actmax_results.clip(0, 1) # Now let's do the training! # # Don't panic if the loss curve goes wild. The two networks are competing for the loss curve to go different directions, so virtually anything can happen. If your code is correct, the generated samples should have a high variety. # + tf.reset_default_graph() set_seed(21) with tf.Session() as sess: with tf.device('/cpu:0'): dcgan = DCGAN() sess.run(tf.global_variables_initializer()) dcgan.train(sess, train_samples) dis_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'dis') gen_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'gen') saver = tf.train.Saver(dis_var_list + gen_var_list) saver.save(sess, 'model/dcgan') # - # ## Problem 2-2: The forger versus the police, revisited # # In the forger versus police story, we made part of it hand-wavy to hide a flaw that makes the story improbable to actually happen and makes it a bad analogy of how the training works in a GAN. Now that you have implemented a GAN, can you spot the flaw? # # Specifically, when we consider one of the two parties, the other is treated as a black box. They know their opponent's result but not how they works. What is wrong here? # **Your answer below:** # # --- # # *The Tao of GANs: They might be adversaries, yet they are also cooperative.* # # --- # ## Problem 2-3: The Batch Normalization dilemma # # Here are three questions related to the use of Batch Normalization in GANs. The first two will not be graded and their answers are provided. But you should attempt to solve them before looking at the answer. # # --- # # We made separate batches for real samples and fake samples when training the discriminator. Is this just an arbitrary design decision made by the inventor that later becomes the common practice, or is it critical to the correctness of the algorithm? # **Select text below to see answer:** # <p style="color:white;">When we are training the generator, the input batch to the discriminator will always consist of only fake samples. If we separate real and fake batches when training the discriminator, then the fake samples are normalized in the same way when we are training the discriminator and when we are training the generator. If we mix real and fake samples in the same batch when training the discriminator, then the fake samples are not normalized in the same way when we train the two networks, which causes the generator to fail to learn the correct distribution.</p> # --- # Look at the construction of the discriminator carefully. You will find that between ```dis_conv1``` and ```dis_lrelu1``` there is no batch normalization. This is not a mistake. What could go wrong if there were a batch normalization layer there? Why do you think that omitting this batch normalization layer solves the problem practically if not theoretically? # **Select text below to see answer:** # <p style="color:white;">Since we put real samples and fake samples in separate batches, if we add a batch normalization layer between dis_conv1 and dis_lrelu1, the discriminator would not be able to distinguish two distributions if one can be obtained by applying an isotropic scaling and a translation in color space to the other.</p> # # <p style="color:white;">By removing the first batch normalization layer, for two different distributions to get confused with each other they must produce two distributions after dis_lrelu1 such that one can be obtained by applying an isotropic scaling and a translation to the other. Such a case is still possible but extremely unlikely to happen.</p> # --- # Propose a different way of feeding the samples to solve the problem in the second question without omitting any batch normalization layers or changing their mode of operation. # **Your answer below:** # # --- # # *Take-aways from this problem: always excercise extreme caution when using batch normalization in your network!* # # *For further info (optional): you can read this paper to find out more about why Batch Normalization might be bad for your GANs: [On the Effects of Batch and Weight Normalization in Generative Adversarial Networks](https://arxiv.org/abs/1704.03971)* # # --- # ## Problem 2-4: Activation Maximization # Activation Maximization is a visualization technique to see what a particular neuron has learned, by finding the input that maximizes the activation of that neuron. Here we use methods similar to *[Synthesizing the preferred inputs for neurons in neural networks via deep generator networks](https://arxiv.org/abs/1605.09304)*. # # In short, what we want to do is to find the samples that the discriminator considers most real, among all possible outputs of the generator, which is to say, we want to find the codes (i.e. a point in the input space of the generator) from which the generated images, if labelled as real, would minimize the classification loss of the discriminator: # # $$\min_{z} L(D_{\theta}(G_{\phi}(z)), 1)$$ # # Compare this to the objective when we were training the generator: # # $$\min_{\phi} \mathrm{E}_{z \sim q(z)}[L(D_{\theta}(G_{\phi}(z)), 1)]$$ # # The function to minimize is the same, with the difference being that when training the network we fix a set of input data and find the optimal model parameters, while in activation maximization we fix the model parameters and find the optimal input. # # So, similar to the training, we use gradient descent to solve for the optimal input. Starting from a random code drawn from a standard normal distribution, we perform a fixed step of Adam optimization algorithm on the code. # # The batch normalization layers should work in evaluation mode. # # We provide the code for this part, as a reference for solving the next part. You may want to go back to the code above and check the following: # # * **Build the computation graph for the loss:** Check the definition of these operations in ```_init_ops()``` # * ```actmax_sample_op```: generate samples from ```actmax_code``` # * ```actmax_loss_op```: compute discriminator's loss on samples generated from ```actmax_code``` # * **Define the optimizer:** Check the definition of ```actmax_op```, which updates ```actmax_code``` # * **Feed the data:** Check the function ```actmax_one_sample()``` # + tf.reset_default_graph() set_seed(241) with tf.Session() as sess: with tf.device('/cpu:0'): dcgan = DCGAN() sess.run(tf.global_variables_initializer()) dis_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'dis') gen_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'gen') saver = tf.train.Saver(dis_var_list + gen_var_list) saver.restore(sess, 'model/dcgan') actmax_results = dcgan.actmax(np.random.random([64, dcgan.code_size])) fig = plt.figure(figsize = (8, 8)) ax1 = plt.subplot(111) ax1.imshow(viz_grid(actmax_results, 1)) plt.show() # - # The output should have less variety than those generated from random code. While it is reasonable that the samples that are "most real" makes up only a small portion of the sample space, this also gives us a hint that the so-called "mode collapse", in which the GAN simply fails to model a majority part of the data distribution, is a real problem. # # A similar technique can be used to reconstruct a test sample, that is, to find the code that most closely approximates the test sample. To achieve this, we only need to change the loss function from discriminator's loss to the squared L2-distance between the generated image and the target image: # # $$\min_{z} \left|\left|G_{\phi}(z)-x\right|\right|_2^2$$ # # This time, we always start from a zero vector. # # For this part, you need to complete code blocks marked with "Prob 2-4": # # * **Build the computation graph for the loss:** Complete the definition of ```recon_loss_op``` in ```_init_ops()```, which computes the squared L2-distance between ```recon_sample``` and the sample generated from ```actmax_code```. # * **Define the optimizer:** Modify the definition of ```reconstruct_op``` so that it updates ```actmax_code``` rather than the parameters of the networks. # * **Feed the data:** Set the proper initial value and feed the proper data in ```reconstruct_one_sample()``` # + tf.reset_default_graph() with tf.Session() as sess: with tf.device('/cpu:0'): dcgan = DCGAN() sess.run(tf.global_variables_initializer()) dis_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'dis') gen_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'gen') saver = tf.train.Saver(dis_var_list + gen_var_list) saver.restore(sess, 'model/dcgan') avg_loss, reconstructions = dcgan.reconstruct(test_samples[0:64]) print('average reconstruction loss = {0:.4f}'.format(avg_loss)) fig = plt.figure(figsize = (8, 8)) ax1 = plt.subplot(111) ax1.imshow(viz_grid(test_samples[0:64], 1)) plt.show() fig = plt.figure(figsize = (8, 8)) ax1 = plt.subplot(111) ax1.imshow(viz_grid(reconstructions, 1)) plt.show() # - # Now you can see the effect of increasing the training epochs. You should be able to achieve a reconstruction loss lower than 32.
references/CSCI599-Assignment2-master/CSCI599-Assignment2-master/Old_p_ipynb_checkpoints/Problem_2-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from matplotlib import pyplot as plt from imblearn.ensemble import BalancedRandomForestClassifier import preprocessing.utils as utils import pandas as pd import numpy as np import seaborn as sns from sklearn.metrics import plot_confusion_matrix from sklearn.metrics import confusion_matrix # ### This notebook is for illustration purpose. Please visit https://github.com/hym97/CAM_final_project if you want to play with the model yourself # ## Classification # ### Introduction # # This problem is a fraud detection problem. Where the False label occupies nearly 99% of the dataset, we can simply achieve 99% accuracy by making negative predictions for all the data. But it will not help us to detect fraud. Therefore, we must do something to the dataset. # # To address the problem, we can use techniques like **Undersampling**, **Oversampling**, or **Ensemble Learning**. # # We used balanced random forest in this implementation. df = pd.read_csv('./data/TrainingData.csv') X, Y = utils.pipeline(df) Y_regression = Y.values[:,0] Y = Y.values[:,1] data = np.c_[X,Y] np.random.shuffle(data) train, validate, test = np.split(data,[int(.6 * data.shape[0]), int(.8 * data.shape[0])]) train_X, train_Y = train[:,:-1], train[:,-1] validate_X, validate_Y = validate[:,:-1], validate[:,-1] test_X, test_Y = test[:,:-1], test[:,-1] brf = BalancedRandomForestClassifier(n_estimators=150, random_state=37) brf.fit(train_X,train_Y) predict_Y = brf.predict(validate_X) # + fig, axs = plt.subplots(ncols=2, figsize=(10, 5)) plot_confusion_matrix(brf, validate_X, validate_Y, ax=axs[0], colorbar=False) axs[0].set_title("Balanced random forest (val)") plot_confusion_matrix(brf, test_X, test_Y, ax=axs[1], colorbar=False) axs[1].set_title("Balanced random forest (test)") plt.show() # - # #### Remarks # # From the figures above, we can see many False Positive cases (24382, 24543), and the accuracy drops to 79%. However, in return, we can successfully detect fraud which is far more important than accuracy in reality. def calcualte_metrics(Y_predict, Y_labeled): metrics = confusion_matrix(Y_predict, Y_labeled) TPR = metrics[1,1] / (metrics[0,1] + metrics[1,1]) FPR = metrics[1,0] / (metrics[0,0] + metrics[1,0]) return TPR, FPR # ### Calculate the metric # + predict_val, predict_test = brf.predict(validate_X), brf.predict(test_X) TPR_val, FPR_val = calcualte_metrics(predict_val, validate_Y) TPR_test, FPR_test = calcualte_metrics(predict_test, test_Y) print("Performance on val set: TPR:{:.2f} FPR:{:.2f}".format(TPR_val,FPR_val)) print("Performance on test set: TPR:{:.2f} FPR:{:.2f}".format(TPR_test,FPR_test)) # - # ## Regression # ### Introduction # # This problem is simply a regression problem. But the labeled data are highly skewed. We'd better use the log transformation to make NMONTHS columns more normally distributed to get better performance. # # There are many regression methods. However, considering I do not need much interoperability, I choose FFNN to make the prediction. # #### Architecture # # FFNN_classifer(<br> # (layer1): Linear(in_features=49, out_features=64, bias=True)<br> # (layer2): Linear(in_features=64, out_features=128, bias=True)<br> # (layer3): Linear(in_features=128, out_features=10, bias=True)<br> # (layer4): Linear(in_features=10, out_features=1, bias=True)<br> # (dropout): Dropout(p=0.2, inplace=False)<br> # ) import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import math X, Y = utils.pipeline(df) Y_regression = Y.values[:,0] data = np.c_[X,Y_regression] np.random.shuffle(data) train, validate, test = np.split(data, [int(.6 * data.shape[0]), int(.8 * data.shape[0])]) train_X, train_Y = train[:,:-1], np.log(train[:,-1]) validate_X, validate_Y = validate[:,:-1], np.log(validate[:,-1]) class FFNN_classifer(nn.Module): def __init__(self, input_size): super(FFNN_classifer, self).__init__() self.layer1 = nn.Linear(input_size, 64) self.layer2 = nn.Linear(64, 128) self.layer3 = nn.Linear(128, 10) self.layer4 = nn.Linear(10, 1) self.dropout = nn.Dropout(.2) def forward(self, input_data): input_data =input_data.float() output = self.layer1(input_data) output = F.relu(output) output = self.layer2(output) output = F.relu(output) output = self.layer3(output) output = F.relu(output) output = self.dropout(output) output = self.layer4(output) return output # + def train_model(input_data, input_labels, optimizer, model,loss_func): optimizer.zero_grad() output = model(input_data) loss = loss_func(output.squeeze(1), input_labels.float()) loss.backward() optimizer.step() return loss.item() def mini_batch(batch_size, input_data, label): length = len(input_data) batch_num = math.ceil(length / batch_size) for i in range(batch_num): input_batch, input_label = input_data[batch_size*i:batch_size * (i + 1), :], \ label[batch_size*i:batch_size * (i + 1)] yield input_batch, input_label def eval_model(input_data, input_labels, model,loss_func): model.eval() input_data, input_labels = torch.tensor(input_data), torch.tensor(input_labels) output = model(input_data) loss = loss_func(output.squeeze(1), input_labels.float()) model.train() return loss.item() # - epoch, N_epoch = 0, 50 batch_size = 128 model = FFNN_classifer(49) optimizer = optim.Adam(model.parameters()) loss_func = nn.L1Loss() while epoch < N_epoch: loss = 0 for input_batch, input_label in mini_batch(batch_size, train_X, train_Y): input_batch, input_label = torch.tensor(input_batch), torch.tensor(input_label) loss = train_model(input_batch, input_label, optimizer, model, loss_func) if epoch % 10 == 0: print("epoch:{} Loss on training:{:.2f}".format(epoch, loss)) loss_val = eval_model(validate_X, validate_Y,model,loss_func) print("\tLoss on dev:{:.2f}".format(loss_val)) epoch += 1 # #### Remarks # # We can see the loss on training set is still less than the loss on dev set even if a dropout layer is included. That may indicate we include too many parameters in the model. # ### Calculate the metric # + test_X, test_Y = test[:,:-1], test[:,-1] test_log_error = eval_model(test_X, np.log(test_Y),model, loss_func) model.eval() test_X = torch.tensor(test_X) test_normal_error = np.abs(np.exp(model(test_X).detach().numpy()).squeeze(1) - test_Y).sum() / test_Y.shape[0] print('On Log Scale: MAD: {:.2f}'.format(test_log_error)) print('On Normal Scale: MAD: {:.2f}'.format(test_normal_error)) # - # ## Make Predictions test_data = pd.read_csv('./data/TestDataYremoved.csv') LID = test_data.LID.values pp_df = utils.pipeline_test(test_data) model.eval() FORCLOSED = brf.predict(pp_df) NMONTHS = np.exp(model(torch.tensor(pp_df)).detach().numpy()) prediction = np.c_[LID, FORCLOSED, NMONTHS] df = pd.DataFrame(prediction, columns = ['LID', 'FORCLOSED', 'NMONTHS']) df.FORCLOSED = df.FORCLOSED.map({0:False,1:True}) df.LID = df.LID.astype('int64') df.head() df.to_csv('submission.csv', index = False)
.ipynb_checkpoints/SubmissionNotebook-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # import module import numpy as np import numba import xarray as xr # + [markdown] slideshow={"slide_type": "slide"} # # Continuous Equation # # $$ # \begin{align} # \frac{\partial h}{\partial t}+\frac{\partial q_x}{\partial x} +\frac{\partial q_y}{\partial y} = 0 # \end{align} # $$ # + @numba.jit(nopython=True, parallel=False) def conEq(dep, qx, qy, zb, dt, dx, dy, hmin, hbuf): imax, jmax = len(dep), len(dep[0]) depn = np.zeros_like(dep, dtype=np.float64) fluxx = np.zeros((imax+1, jmax), dtype=np.float64) fluxy = np.zeros((imax, jmax+1), dtype=np.float64) gravity = float( 9.8 ) f = lambda Qp, Qm : Qm if Qp >= 0.0 and Qm >= 0.0 else (Qp if Qp <= 0.0 and Qm <= 0.0 else 0.5*Qp+0.5*Qm ) def flux(Qp, Qm, depp, depm, zbp, zbm) : r = f(Qp, Qm) if ( (depm + zbm) < zbp) and (depp <= hbuf) : r = 0.0 if ( (depp + zbp) < zbm) and (depm <= hbuf) : r = 0.0 return r for i in numba.prange( 1, imax ): for j in numba.prange( jmax ): c, xm = (i,j), (i-1,j) fluxx[c] = flux(qx[c], qx[xm], dep[c], dep[xm], zb[c], zb[xm]) for i in numba.prange( imax ): for j in numba.prange( 1, jmax ): c, ym = (i,j), (i,j-1) fluxy[c] = flux(qy[c], qy[ym], dep[c], dep[ym], zb[c], zb[ym]) # wall boundary fluxy[:,0] = 0.0 #fluxy[:,0] fluxy[:,-1] = 0.0 #fluxy[:,0] n = 0 for i in numba.prange(1, imax-1): for j in numba.prange(jmax): c, xp, yp = (i, j), (i+1, j), (i, j+1) depn[c] = dep[c] - dt*(fluxx[xp] - fluxx[c])/dx - dt*(fluxy[yp] - fluxy[c])/dy if depn[c] < hmin : n += 1 depn[c] = hmin # upstream boundary depn[0][:] = depn[1][:] # downstream boundary depn[-1][:] = depn[-2][:] return depn, n # + [markdown] slideshow={"slide_type": "slide"} # # Momentum Equation # # $$ # \begin{align} # \frac{\partial q_x}{\partial t}+\frac{\partial u q_x}{\partial x}+\frac{\partial v q_x}{\partial y}+gh\frac{\partial H}{\partial x}+\frac{\tau_{0x}}{\rho} # - \nu_t h \left(\frac{\partial^2 u}{\partial x^2}+\frac{\partial^2 u}{\partial y^2} \right)= 0 \\ # \frac{\partial q_y}{\partial t}+\frac{\partial u q_y}{\partial x}+\frac{\partial v q_y}{\partial y}+gh\frac{\partial H}{\partial y}+\frac{\tau_{0y}}{\rho}- \nu_t h \left(\frac{\partial^2 v}{\partial x^2}+\frac{\partial^2 v}{\partial y^2} \right) # = 0 # \end{align} # $$ # + slideshow={"slide_type": "slide"} @numba.jit(nopython=True, parallel=False) def momentEq(dep, qx, qy, depn, zb, dt, dx, dy, qup, cManning, hmin, hbuf, direction): #direction = 1:x, 2:y gravity = float( 9.8 ) q = qx if direction == 1 else qy u, v = qx/dep, qy/dep Vdir = q/dep imax, jmax = len(q), len(q[0]) qn = np.zeros_like(q, dtype=np.float64) fluxx = np.zeros((imax+1, jmax), dtype=np.float64) fluxy = np.zeros((imax, jmax+1), dtype=np.float64) f = lambda vp,vm,qp,qm : vm*qm if vp >= 0.0 and vm >= 0.0 else \ (vp*qp if vp <= 0.0 and vm <= 0.0 else (0.5*vp+0.5*vm)*(0.5*qp+0.5*qm) ) def flux1(vp, vm, qp, qm, depp, depm, zbp, zbm) : if ( (depm + zbm) < zbp) and (depp <= hbuf) : r = 0.0 if ( (depp + zbp) < zbm) and (depm <= hbuf) : r = 0.0 r = f(vp,vm,qp,qm) return r for i in numba.prange( 1, imax ): for j in numba.prange( jmax ): c, xm = (i,j), (i-1,j) fluxx[c] = flux1( u[c], u[xm], q[c], q[xm], dep[c], dep[xm], zb[c], zb[xm] ) # boundary : not use fluxx[-1,:] = -9999 fluxx[0,:] = -9999 for i in numba.prange( imax ): for j in numba.prange( 1, jmax ): c, ym = (i,j), (i,j-1) fluxy[c] = flux1( v[c], v[ym], q[c], q[ym], dep[c], dep[ym], zb[c], zb[ym] ) # wall boundary fluxy[:,0] = -fluxy[:,1] fluxy[:,-1] = -fluxy[:,-2] for i in numba.prange(1, imax-1): for j in numba.prange(jmax): c = (i, j) if depn[c] <= hbuf : qn[c] = 0.0 else: # pressure & gravity term if direction == 2 and ((j == 0) or (j == jmax-1)) : if j == 0 : c, yp = (i, j), (i, j+1) Hc, Hp = depn[c] + zb[c], depn[yp] + zb[yp] if Hc < zb[yp] and depn[yp] <= hbuf : dHdx = 0.0 else : dHdx = ( Hp - Hc )/dy elif j == jmax-1 : c, ym = (i, j), (i, j-1) Hc, Hm = depn[c] + zb[c], depn[ym] + zb[ym] if Hc < zb[ym] and depn[ym] <= hbuf : dHdx = 0.0 else : dHdx = ( Hc - Hm )/dy # elif direction == 1 and i == imax-1 : # pass else : c = (i, j) if direction == 1 : xp = (i+1, j) #if i == imax-1 else (i+1, j) xm = (i-1, j) dp, dm, delta = xp, xm, dx else : yp = (i, j+1) ym = (i, j-1) dp, dm, delta = yp, ym, dy Vc, Vp, Vm = q[c]/dep[c], q[dp]/dep[dp], q[dm]/dep[dm] Hc, Hp, Hm = depn[c]+zb[c], depn[dp]+zb[dp], depn[dm]+zb[dm] if Hc < zb[dp] and depn[dp] <= hbuf : if Hc < zb[dm] and depn[dm] <= hbuf : dHdx = 0.0 else: dHdx = (Hc-Hm)/delta elif Hc < zb[dm] and depn[dm] <= hbuf : dHdx = (Hp-Hc)/delta else : if Vc > 0.0 and Vp > 0.0 and Vm > 0.0: Cr1, Cr2 = 0.5*(abs(Vc)+abs(Vp))*dt/delta, 0.5*(abs(Vc)+abs(Vm))*dt/delta dHdx1, dHdx2 = (Hp-Hc)/delta, (Hc-Hm)/delta elif Vc < 0.0 and Vp < 0.0 and Vm < 0.0: Cr1, Cr2 = 0.5*(abs(Vc)+abs(Vm))*dt/delta, 0.5*(abs(Vc)+abs(Vp))*dt/delta dHdx1, dHdx2 = (Hc-Hm)/delta, (Hp-Hc)/delta else: Cr1 = Cr2 = 0.5*(abs(0.5*(Vc+Vp))+abs(0.5*(Vc+Vm)))*dt/delta dHdx1 = dHdx2 = (0.5*(Hc+Hp) - 0.5*(Hc+Hm)) / delta w1, w2 = 1-Cr1**0.5, Cr2**0.5 dHdx = w1 * dHdx1 + w2 * dHdx2 # viscous sublayer Cf = gravity*cManning**2.0/dep[c]**(1.0/3.0) Vnorm = np.sqrt(u[c]**2.0+v[c]**2.0) Vis = Cf * Vnorm * u[c] if direction == 1 else Cf * Vnorm * v[c] # turbulence nut = 0.4/6.0*dep[c]*np.sqrt(Cf)*np.abs(Vnorm) c = (i, j) xp = (i+1, j) xm = (i-1, j) yp = (i, j+1) ym = (i, j-1) # side boundary : non-slip condition # if i == imax-1: # turb = 0.0 if j == 0 : if direction == 1: turb = nut * ( Vdir[xp] - 2.0*Vdir[c] + Vdir[xm] )/ dx**2 \ + nut * ( Vdir[yp] - 3.0*Vdir[c] )/ dy**2 elif direction == 2: turb = nut * ( Vdir[xp] - 2.0*Vdir[c] + Vdir[xm] )/ dx**2 \ + nut * ( Vdir[yp] - 3.0*Vdir[c] )/ dy**2 elif j == jmax-1 : if direction == 1: turb = nut * ( Vdir[xp] - 2.0*Vdir[c] + Vdir[xm] )/ dx**2 \ + nut * ( - 3.0*Vdir[c] + Vdir[ym] )/ dy**2 elif direction == 2: turb = nut * ( Vdir[xp] - 2.0*Vdir[c] + Vdir[xm] )/ dx**2 \ + nut * ( - 3.0*Vdir[c] + Vdir[ym] )/ dy**2 else : turb = nut * ( Vdir[xp] - 2.0*Vdir[c] + Vdir[xm] )/ dx**2 \ + nut * ( Vdir[yp] - 2.0*Vdir[c] + Vdir[ym] )/ dy**2 c, xp, yp = (i,j), (i+1,j), (i,j+1) sourcet = Vis - dep[c] * turb qn[c] = q[c] - dt * ( fluxx[xp] - fluxx[c] ) / dx \ - dt * ( fluxy[yp] - fluxy[c] ) / dy \ - dt * gravity * depn[c] * dHdx \ - dt * sourcet # upstream boundary if direction == 2 : qn[0,:] = 0.0 else : # updep = depn.copy() # for j in range(jmax): # updep[0,j] = 0.0 if updep[0,j] <= hmin else updep[0,j] # print(np.sum( updep[0,:]**(5/3) )) # alpha = Qup / dy / np.sum( updep[0,:]**(5/3) ) qn[0,:] = qup # alpha * updep[0,:]**(5/3) # downstream boundary qn[-1,:] = qn[-2,:] return qn # + slideshow={"slide_type": "slide"} @numba.jit(nopython=True, parallel=False) def simulation(dep, qx, qy, zb, dt, dx, dy, qup, cManning, hmin, hbuf): depn, count = conEq(dep, qx, qy, zb, dt, dx, dy, hmin, hbuf) qxn = momentEq(dep, qx, qy, depn, zb, dt, dx, dy, qup, cManning, hmin, hbuf, 1) qyn = momentEq(dep, qx, qy, depn, zb, dt, dx, dy, qup, cManning, hmin, hbuf, 2) # CFL = ((np.abs(qxn/depn) + np.sqrt(9.8*depn))/dx + ( np.abs(qyn/depn) + np.sqrt(9.8*depn) )/dy)*dt CFL = ((np.abs(qxn/depn))/dx + ( np.abs(qyn/depn))/dy)*dt CFLmax = np.max(CFL) return depn, qxn, qyn, CFLmax, count # - # # read zb data ds = xr.open_dataset('zb.nc') # + [markdown] slideshow={"slide_type": "slide"} # # Main routine # + # %%time hmin = float(10.0**(-5)) hbuf = float(10.0**(-2)) dtout= float(300.0) trunup = float(0.0 * 3600.0) tmax = float(2.01 * 3600.0) nout = 0 CFL = float(0.01) cManning = 0.03 dx, dy = float(2.5), float(2.5) dtini = float(0.05) nxmax, nymax = ds.dims['x'], ds.dims['y'] qx = np.zeros((nxmax,nymax), dtype=np.float64) qy = np.zeros_like(qx, dtype=np.float64) dep = np.full_like(qx, hmin, dtype=np.float64) zb = np.zeros_like(qx, dtype=np.float64) zb[:,:] = ds['elevation'].values[:,:] # 初期条件の設定方法 # 上流端で等流で10m3/sとなるような水位を与える(下のセルを参照) # i=0と1に同じ値を設定 ib = 1/60 # condition zb0 = zb[0,:] dep0 = - zb0 + 213.96 dep0 = np.where(dep0<0.0, hmin, dep0) dep[0,:] = dep0 dep[1,:] = dep0 dep0 = - zb0 + 213.96 dep0 = np.where(dep0<0.0, 0.0, dep0) qup = ib**0.5*dep0**(5.0/3.0)/cManning qx[0,:] = qup qx[1,:] = qup t = trunup dt = dtini while tmax >= t : t += dt dep, qx, qy, CFLmax, count = simulation(dep, qx, qy, zb, dt, dx, dy, qup, cManning, hmin, hbuf) # update dt # dt = np.round( dt * CFL/CFLmax, 5) if t >= nout*dtout : print(t, dt, CFLmax, count) dss = xr.Dataset({'depth': (['x','y'], dep), 'u': (['x','y'], qx/dep), 'v': (['x','y'], qy/dep) , 'elevation': (['x','y'], zb) } , coords={'xc': (('x', 'y'), ds['xc']), 'yc': (('x', 'y'), ds['yc'])} , attrs={'total_second' : round(t, 2)} ) dss.to_netcdf('out' + str(nout).zfill(8) + '.nc') dss.close() nout += 1 print(t, dt, CFLmax, count) # - # # ref:setting Qup ib = 1/60 # condition zb0 = zb[0,:] dh = 0.44 H = zb0.min() + dh dep = - zb0 + H dep = np.where(dep<0.0,0.0,dep) print(np.sum( ib**0.5*dep**(5.0/3.0)/cManning )*dy) print(H)
D2Dmodel.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import pandas as pd import numpy as np import random # ## Task 1 data = pd.read_csv('../data/loan/train.csv') print len(data) sampler = np.array(range(0,len(data), 10)) data.take(sampler) # # Task 2 # + # sampler = np.random.randint(0, len(data), # size = len(data) / 10) sampler = random.sample(xrange(len(data)), len(data) / 10) sampled_data = data.take(sampler) sampled_data.sort_index(inplace = True) sampled_data # - # # Task 3 # + data_Y = data.loc[data['Loan_Status'] == 'Y'] # sampler = np.random.randint(0, len(data_Y), # size = len(data_Y) / 2) sampler = random.sample(xrange(len(data_Y)), len(data_Y) / 2) sampled_Y = data_Y.take(sampler) sampled_Y # - data_N = data.loc[data['Loan_Status'] == 'N'] sampler = np.random.randint(0, len(data_N), size = len(data_N) / 10) # sampler = random.sample(xrange(len(data_N)), len(data_Y) / N) sampled_N = data_N.take(sampler) sampled_N sampled_data = pd.concat([sampled_Y, sampled_N]) sampled_data.sort_index(inplace = True) sampled_data # # Big File np.random.seed(12345) chunker = pd.read_csv('../data/loan/train.csv', chunksize = 1000) sampled_chunker = [] for piece in chunker: for index, row in piece.iterrows(): # print type(row) val = np.random.random() if row['Loan_Status'] == 'Y' and val < 0.5: sampled_chunker.append(row) elif row['Loan_Status'] == 'N' and val < 0.1: sampled_chunker.append(row) sampled_data = pd.DataFrame(sampled_chunker) sampled_data.sort_index(inplace = True) sampled_data
ml-workshop/src/Data-Wrangling-Workshop.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.6 64-bit (''django-clite'': pipenv)' # name: python37664bitdjangoclitepipenvffd73afb54ae4cb6b9b8abe669a2360d # --- # # Generate 🗂 # ### The generator is accessible through the `generate` command (abbreviated `g`). # ! D generate --help # ## Generating Models # ! D generate model --help # In order to generate a model, specify the type identifier and then the name of the attribute field. Type identifiers are abbreviated to a more generic name that omits the word `Field`. The input here is case-insensitive, but the fields will be properly CamelCased in the corresponding Python file as in the example below: # ! D generate model album text:title image:artwork bool:is_compilation # This would add the following model `album.py` under the `models` directory within the music app: # ! cat models/album.py # Note the presence of the `--inherits` flag. You can specify a base model and the generated model will extend it. For example (from within the music directory): # ! D generate model ep --inherits album # ! cat models/ep.py # **Defaults** # # As one can see, `class Meta` and `_str_` are added to a model by default along with `uuid`, `slug`, `created_at` and `updated_at` fields. # The `db_table` name is inferred from the name of the app and the current model while the ordering attribute is defined based on the default `created_at` field. # **Relationships** # # If a relationship identifier is passed, the attribute name will be used as the name of the model it relates to. # Specifying a relationship also checks the current app scope for the specified related model. If such model does not exist in scope, the CLI will prompt you to create the missing model. How to invoke the command: # ! D generate --dry model track char:title
docs/cli/commands/generate.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.6 64-bit (''covid-tools'': conda)' # name: python37664bitcovidtoolscondaed93c0ad89524ea5ad4f4946bde80980 # --- # + # %matplotlib inline import os import pandas as pd import numpy as np from ast import literal_eval import matplotlib.pyplot as plt from utils.visualization_utils import generic_plot, Curve, format_xtick, generic_sub_plot, Plot from learning_models.torch_sir import SirEq # - exp_dir = os.path.join("regioni", "heun_test_2") results_path = os.path.join(os.getcwd(), exp_dir, "results") if not os.path.exists(results_path): os.mkdir(results_path) regions = ["Lombardia"] # + def get_scores_path(region): return os.path.join(exp_dir, region, "scores.csv") def get_df(region): path = get_scores_path(region) df = pd.read_csv(path, sep="\t") return df def get_best_exp(df): return df[df.val_risk == df.val_risk.min()] # + def compute_r0(df, t): df["r0_t" + str(t)] = df["final_beta_t" + str(t)] / df["final_gamma_t0"] def get_list_from_column(df, raw_pos, column, max_size): col_val = df[column].iloc[raw_pos] x = literal_eval(col_val) return x + [-1.0]*(max_size - len(x)) def add_integrator_column(df): df["integrator"] = df["name"].apply(lambda name: 'heun' if 'Heun' in name else 'euler' if 'euler' in name else 'rk4') def add_id_column(df): df["id"] = df["integrator"] + "_" \ + df["first_derivative_reg"].apply(str) + "_" \ + df["t_inc"].apply(str) \ #+ "_m" + df["m"].apply(str) + "_a" + df["a"].apply(str) + "_b" + df["b"].apply(str) def get_res_parameters(scores_df, params=("beta", "gamma", "delta")): max_len = scores_df["train_size"].max() p_columns = ["final_" + p + "_t" + str(i) for p in params for i in range(max_len)] # all columns except beta/gamma/delta score_columns = list(set(scores_df.columns) - set(params)) rows = [] for column_idx in range(scores_df.shape[0]): # create rows for the params df values = [] for param in params: # get the param as a list val = get_list_from_column(scores_df, column_idx, param, max_len) values.extend(val) # the row contains all scores + params (one column for each value of list) rows.append(list(scores_df.iloc[column_idx][score_columns]) + values) params_df = pd.DataFrame(rows, columns=score_columns + p_columns) for param in p_columns: #Set unavailable param as None params_df.loc[params_df[param] == -1, param] = None for t in range(max_len): # add r0_t to the df compute_r0(params_df, t) add_integrator_column(params_df) add_id_column(params_df) return params_df, p_columns # + tags=["outputPrepend"] for region in regions: df = get_df(region) params_df, _ = get_res_parameters(df) params_df = params_df.sort_values(by=['dataset_risk']) #params_df.plot.bar(x='id', y='val_risk') #params_df.plot.bar(x='id', y='train_risk') #params_df.plot.bar(x='id', y='dataset_risk') size = (15,9) params_df.plot.bar(x='id', y=['val_risk','dataset_risk', 'train_risk'], figsize=size) params_df.plot.bar(x='id', y='best_epoch', figsize=size)
analyze_tests.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # https://docs.python.org/3/library/webbrowser.html import tweepy import webbrowser import time # https://docs.tweepy.org/en/stable/index.html consumer_key = "" consumer_secret = "" callback_uri = 'oob' # https://cfe.sh/twitter/callback try: auth = tweepy.OAuthHandler(consumer_key, consumer_secret, callback_uri) redirect_url = auth.get_authorization_url() print(redirect_url) except tweepy.TweepError: print('Error! Failed to get request token.') # A NEW PAGE OPENS REQUESTING AUTHORIZATION FROM TWITTER TO GENERATE A CODE webbrowser.open(redirect_url) user_pin_input = input("What's the pin value? \n") print(user_pin_input) try: auth.get_access_token(user_pin_input) except tweepy.TweepError: print('Error! Failed to get access token.') print(auth.access_token, auth.access_token_secret) api = tweepy.API(auth) me = api.me() print(me.screen_name) # UPDATE STATUS TWITTER new_status = api.update_status("Testing #tweepy with #Python :-)") # DELETE SEND TWEET new_status.destroy() dir(new_status) dir(api) img_obj = api.media_upload("gorickyourself.png") dir(img_obj) img_obj.media_id_string img_obj.media_id img_obj.image # UPLOAD IMAGES WITH tweepy new_status_with_img_obj = api.update_status("Testing #tweepy with #Pytho. Me in Rick and Morty world", media_ids=[img_obj.media_id_string])
n_day_python/SandBoxPyTwitter/2- Update Status aka Tweet.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # !pyNTCIREVAL # !cat ../data/eval/q1.rel # 適合性評価ファイル # !cat ../data/eval/method1.q1.res # 検索結果ファイル # --- # ## 必須課題 (1) 動作の確認 # このページで用いた検索結果に対するpyNTCIREVALの出力のうち,MSnDCG@0003とnERR@0003が,講義資料の定義に従った計算と一致していることを確かめよ.つまり,nDCG@3とnERR@3を計算するプログラム書き,その結果がpyNTCIREVALの結果と一致していることを確認せよ. # # --- import math # + def rel_convert(rel): """ rel: 文書IDをi、rel[i]はi番目の文書の適合度とするリスト """ with open(rel, "r") as rr: rels = [] ress = [] for line in rr: rels.append(int(line.split(" ")[1].strip("L"))) # d1: 1 d2: 0...のような形式 ress.append(int(line.split(" ")[0].strip("d"))) return rels, ress def res_convert(res): """ res: 検索結果順に並んだリスト """ with open(res, "r") as rs: ress = [rels[int(line.strip("d"))-1] for line in rs] # [2, 1, 0, 1] 検索結果順に並んだ適合率 return ress # - def MSnDCG(k, rels, ress): """ nDCG@kを計算 """ dcg_k = 0 ideal_dcg_k = 0 ideal_rels = sorted(rels, reverse=True) for i in range(k): dcg_k += (2**ress[i] - 1) / math.log2(1 + (i+1)) ideal_dcg_k += (2**ideal_rels[i] - 1) / math.log2(1 + (i+1)) ndcg_k = dcg_k / ideal_dcg_k #print("DCG / DCG' = {0} / {1} = {2}".format(dcg_k, ideal_dcg_k, ndcg_k)) return ndcg_k def nERR(k, rels, ress): ideal_rels = sorted(rels, reverse=True) def p_stop(rel_i, rel_max): return (2**rel_i - 1) / (2**rel_max) def permute(k, ress, rel_max): p = 1 if k == 0: return p for i in range(k): p = p * (1 - p_stop(ress[i], max(rels))) return p def ERR(docIDs, max_rel): p_err = 0 for i in range(len(docIDs)): p_s = p_stop(docIDs[i], max_rel) prm = permute(i, docIDs, max(rels)) p_err += p_s * prm / (i+1) #print("P_stop: {0} * ERR: {1} / Rank: {2} = P_ERR: {3}".format(p_s, prm, i+1, p_err)) return p_err ideal_p_err = ERR(ideal_rels[:k], max(rels)) p_err = ERR(ress[:k], max(rels)) #print("{0}: ERR / ERR* = {1} / {2}".format(i+1, p_err, ideal_p_err)) #print("ERR / ERR* = {0} / {1} = {2}".format(p_err, ideal_p_err, p_err/ideal_p_err)) return p_err / ideal_p_err rel = "../data/eval/q1.rel" res = "../data/eval/method1.q1.res" rels, ress = rel_convert(rel) # ress = res_convert(res) print(rels) print(ress) print("MSnDCG: {}".format(MSnDCG(3, rels, ress))) print("nERR: {}".format(nERR(3, rels, ress))) # !pyNTCIREVAL label -r ../data/eval/q1.rel < ../data/eval/method1.q1.res > method1.q1.rel # !pyNTCIREVAL compute -r ../data/eval/q1.rel -g 1:3 --cutoffs=1,3 < method1.q1.rel # 上記のMSnDCGとnERRが、自ら作成した関数の値と一致していることが確認できた # --- # ## 必須課題(2)独自データに対する評価指標の計算 # 演習課題1で扱った検索課題集合と検索結果に対して各自で評価用データを作成しpyNTCIREVALを用いて評価指標を計算せよ.そして, MRR,nDCG@3およびnERR@3の平均を報告し,それらの値の違いが各指標のどういった要因によるものか考察せよ.なお,演習課題1で扱ったコーパス以外で評価データを作成してもい.ただし,評価データはダミーデータでなく実際の何らかのランキングを評価したものとし,検索課題(クエリ)は3つ以上とする. # # --- for i in range(3): rl = "../data/eval/my_q{}.rel".format(i+1) rs = "../data/eval/my_q{}.res".format(i+1) rls, rss = rel_convert(rl) print(rls) print(rss) print("{0}-MSnDCG: {1}".format(i+1, MSnDCG(10, rls, rls[:10]))) print("{0}-nERR: {1}".format(i+1, nERR(10, rls, rls[:10])))
codes/Exercise2_Daisuke_Kataoka.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9 # language: python # name: python3 # --- # <center> # <img src="https://gitlab.com/ibm/skills-network/courses/placeholder101/-/raw/master/labs/module%201/images/IDSNlogo.png" width="300" alt="cognitiveclass.ai logo" /> # </center> # # <h1 align=center><font size = 5>Assignment: SQL Notebook for Peer Assignment</font></h1> # # Estimated time needed: **60** minutes. # # ## Introduction # # Using this Python notebook you will: # # 1. Understand the Spacex DataSet # 2. Load the dataset into the corresponding table in a Db2 database # 3. Execute SQL queries to answer assignment questions # # ## Overview of the DataSet # # SpaceX has gained worldwide attention for a series of historic milestones. # # It is the only private company ever to return a spacecraft from low-earth orbit, which it first accomplished in December 2010. # SpaceX advertises Falcon 9 rocket launches on its website with a cost of 62 million dollars wheras other providers cost upward of 165 million dollars each, much of the savings is because Space X can reuse the first stage. # # Therefore if we can determine if the first stage will land, we can determine the cost of a launch. # # This information can be used if an alternate company wants to bid against SpaceX for a rocket launch. # # This dataset includes a record for each payload carried during a SpaceX mission into outer space. # # ### Download the datasets # # This assignment requires you to load the spacex dataset. # # In many cases the dataset to be analyzed is available as a .CSV (comma separated values) file, perhaps on the internet. Click on the link below to download and save the dataset (.CSV file): # # <a href="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-DS0321EN-SkillsNetwork/labs/module_2/data/Spacex.csv?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDS0321ENSkillsNetwork26802033-2021-01-01" target="_blank">Spacex DataSet</a> # # ### Store the dataset in database table # # **it is highly recommended to manually load the table using the database console LOAD tool in DB2**. # # <img src = "https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-DS0321EN-SkillsNetwork/labs/module_2/images/spacexload.png"> # # Now open the Db2 console, open the LOAD tool, Select / Drag the .CSV file for the dataset, Next create a New Table, and then follow the steps on-screen instructions to load the data. Name the new table as follows: # # **SPACEXDATASET** # # **Follow these steps while using old DB2 UI which is having Open Console Screen** # # **Note:While loading Spacex dataset, ensure that detect datatypes is disabled. Later click on the pencil icon(edit option).** # # 1. Change the Date Format by manually typing DD-MM-YYYY and timestamp format as DD-MM-YYYY HH\:MM:SS. # # Here you should place the cursor at Date field and manually type as DD-MM-YYYY. # # 2. Change the PAYLOAD_MASS\_\_KG\_ datatype to INTEGER. # # <img src = "https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-DS0321EN-SkillsNetwork/labs/module_2/images/spacexload2.png"> # # **Changes to be considered when having DB2 instance with the new UI having Go to UI screen** # # * Refer to this insruction in this <a href="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DB0201EN-SkillsNetwork/labs/Labs_Coursera_V5/labs/Lab%20-%20Sign%20up%20for%20IBM%20Cloud%20-%20Create%20Db2%20service%20instance%20-%20Get%20started%20with%20the%20Db2%20console/instructional-labs.md.html?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDS0321ENSkillsNetwork26802033-2021-01-01">link</a> for viewing the new Go to UI screen. # # * Later click on **Data link(below SQL)** in the Go to UI screen and click on **Load Data** tab. # # * Later browse for the downloaded spacex file. # # <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-DS0321EN-SkillsNetwork/labs/module_2/images/browsefile.png" width="800"/> # # * Once done select the schema andload the file. # # <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-DS0321EN-SkillsNetwork/labs/module_2/images/spacexload3.png" width="800"/> # # !pip install sqlalchemy==1.3.9 # !pip install ibm_db_sa # !pip install ipython-sql # ### Connect to the database # # Let us first load the SQL extension and establish a connection with the database # # %load_ext sql # **DB2 magic in case of old UI service credentials.** # # In the next cell enter your db2 connection string. Recall you created Service Credentials for your Db2 instance before. From the **uri** field of your Db2 service credentials copy everything after db2:// (except the double quote at the end) and paste it in the cell below after ibm_db_sa:// # # <img src ="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DB0201EN-SkillsNetwork/labs/FinalModule_edX/images/URI.jpg"> # # in the following format # # **%sql ibm_db_sa://my-username:my-password\@my-hostname:my-port/my-db-name** # # **DB2 magic in case of new UI service credentials.** # # <img src ="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-DS0321EN-SkillsNetwork/labs/module_2/images/servicecredentials.png" width=600> # # * Use the following format. # # * Add security=SSL at the end # # **%sql ibm_db_sa://my-username:my-password\@my-hostname:my-port/my-db-name?security=SSL** # # %sql ibm_db_sa://xvs97022:RzWLrYw0atwHYrTx@ea286ace-86c7-4d5b-8580-3fbfa46b1c66.bs2io90l08kqb1od8lcg.databases.appdomain.cloud:31505/bludb?security=SSL # %sql SELECT * FROM SPACEXTBL # ## Tasks # # Now write and execute SQL queries to solve the assignment tasks. # # ### Task 1 # # ##### Display the names of the unique launch sites in the space mission # # %sql SELECT DISTINCT launch_site FROM SPACEXTBL; # ### Task 2 # # ##### Display 5 records where launch sites begin with the string 'CCA' # # %sql SELECT launch_site FROM SPACEXTBL WHERE launch_site LIKE 'CCA%' LIMIT 5; # ### Task 3 # # ##### Display the total payload mass carried by boosters launched by NASA (CRS) # # %sql SELECT SUM(payload_mass__kg_) FROM SPACEXTBL WHERE customer LIKE 'NASA (CRS)'; # ### Task 4 # # ##### Display average payload mass carried by booster version F9 v1.1 # # %sql SELECT AVG(payload_mass__kg_) FROM SPACEXTBL WHERE booster_version LIKE '%F9 v1.1%'; # ### Task 5 # # ##### List the date when the first successful landing outcome in ground pad was acheived. # # *Hint:Use min function* # # %sql SELECT MIN(DATE) FROM SPACEXTBL WHERE landing__outcome LIKE 'Success%'; # ### Task 6 # # ##### List the names of the boosters which have success in drone ship and have payload mass greater than 4000 but less than 6000 # # %sql SELECT booster_version FROM SPACEXTBL WHERE mission_outcome='Success' AND landing__outcome LIKE '%drone ship%' AND payload_mass__kg_ BETWEEN 4000 AND 6000; # ### Task 7 # # ##### List the total number of successful and failure mission outcomes # # %sql SELECT mission_outcome, COUNT(*) FROM SPACEXTBL GROUP BY mission_outcome; # ### Task 8 # # ##### List the names of the booster_versions which have carried the maximum payload mass. Use a subquery # # %sql SELECT booster_version, payload_mass__kg_ FROM SPACEXTBL WHERE payload_mass__kg_=(SELECT MAX(payload_mass__kg_) FROM SPACEXTBL); # ### Task 9 # # ##### List the failed landing_outcomes in drone ship, their booster versions, and launch site names for in year 2015 # # %sql SELECT landing__outcome, booster_version, launch_site FROM SPACEXTBL WHERE YEAR(DATE)=2015 AND landing__outcome LIKE '%Fail%drone ship%'; # ### Task 10 # # ##### Rank the count of landing outcomes (such as Failure (drone ship) or Success (ground pad)) between the date 2010-06-04 and 2017-03-20, in descending order # # %sql SELECT landing__outcome, COUNT(*) FROM SPACEXTBL WHERE (DATE BETWEEN '2010-06-04' AND '2017-03-20') GROUP BY landing__outcome ORDER BY 1 DESC; # ### Reference Links # # * <a href ="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DB0201EN-SkillsNetwork/labs/Labs_Coursera_V5/labs/Lab%20-%20String%20Patterns%20-%20Sorting%20-%20Grouping/instructional-labs.md.html?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDS0321ENSkillsNetwork26802033-2021-01-01&origin=www.coursera.org">Hands-on Lab : String Patterns, Sorting and Grouping</a> # # * <a href="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DB0201EN-SkillsNetwork/labs/Labs_Coursera_V5/labs/Lab%20-%20Built-in%20functions%20/Hands-on_Lab__Built-in_Functions.md.html?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDS0321ENSkillsNetwork26802033-2021-01-01&origin=www.coursera.org">Hands-on Lab: Built-in functions</a> # # * <a href="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DB0201EN-SkillsNetwork/labs/Labs_Coursera_V5/labs/Lab%20-%20Sub-queries%20and%20Nested%20SELECTs%20/instructional-labs.md.html?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDS0321ENSkillsNetwork26802033-2021-01-01&origin=www.coursera.org">Hands-on Lab : Sub-queries and Nested SELECT Statements</a> # # * <a href="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DB0201EN-SkillsNetwork/labs/Module%205/DB0201EN-Week3-1-3-SQLmagic.ipynb?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDS0321ENSkillsNetwork26802033-2021-01-01">Hands-on Tutorial: Accessing Databases with SQL magic</a> # # * <a href= "https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DB0201EN-SkillsNetwork/labs/Module%205/DB0201EN-Week3-1-4-Analyzing.ipynb?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDS0321ENSkillsNetwork26802033-2021-01-01">Hands-on Lab: Analyzing a real World Data Set</a> # # ## Author(s) # # <h4> <NAME> </h4> # # ## Other Contributors # # <h4> <NAME> </h4> # # ## Change log # # | Date | Version | Changed by | Change Description | # | ---------- | ------- | ------------- | ------------------------- | # | 2021-10-12 | 0.4 | <NAME> | Changed markdown | # | 2021-08-24 | 0.3 | <NAME> | Added library update | # | 2021-07-09 | 0.2 | <NAME> | Changes made in magic sql | # | 2021-05-20 | 0.1 | <NAME> | Created Initial Version | # # ## <h3 align="center"> © IBM Corporation 2021. All rights reserved. <h3/> #
EDA with SQL.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Baseline Experiment 1: Training Decision Tree Classifier # # Using Grid Search with 5-fold Cross-Validation, with a 70/30 train/test split. # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import joblib import graphviz import pydotplus import sklearn from sklearn.preprocessing import MinMaxScaler from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import GridSearchCV, train_test_split from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, plot_confusion_matrix from sklearn import tree # For reproducible results RANDOM_STATE_SEED = 420 # - df_dataset = pd.read_csv("processed_friday_dataset.csv") df_dataset df_dataset.info() # ### 1- Making an 70/30 train/test split train, test = train_test_split(df_dataset, test_size=0.3, random_state=RANDOM_STATE_SEED) train test # #### MinMax Scaling of numerical attributes based on train set numerical_cols = df_dataset.columns[:-3] numerical_cols min_max_scaler = MinMaxScaler().fit(train[numerical_cols]) train[numerical_cols] = min_max_scaler.transform(train[numerical_cols]) train test[numerical_cols] = min_max_scaler.transform(test[numerical_cols]) test # ### 2- Checking label distribution # + print("Full dataset:\n") print("Benign: " + str(df_dataset["Label"].value_counts()[[0]].sum())) print("Malicious: " + str(df_dataset["Label"].value_counts()[[1]].sum())) print("---------------") print("Training set:\n") print("Benign: " + str(train["Label"].value_counts()[[0]].sum())) print("Malicious: " + str(train["Label"].value_counts()[[1]].sum())) print("---------------") print("Test set:\n") print("Benign: " + str(test["Label"].value_counts()[[0]].sum())) print("Malicious: " + str(test["Label"].value_counts()[[1]].sum())) # - # ### 3- Splitting to X_train, y_train, X_test, y_test # + y_train = np.array(train.pop("Label")) # pop removes "Label" from the dataframe X_train = train.values print(type(X_train)) print(type(y_train)) print(X_train.shape) print(y_train.shape) # + y_test = np.array(test.pop("Label")) # pop removes "Label" from the dataframe X_test = test.values print(type(X_test)) print(type(y_test)) print(X_test.shape) print(y_test.shape) # - # ### 4- Fitting Decision Tree model # + model = DecisionTreeClassifier( criterion='gini', splitter='best', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=None, random_state=None, max_leaf_nodes=None, min_impurity_decrease=0.0, class_weight=None, ccp_alpha=0.0 ) hyperparameters = { 'max_depth': [i for i in range(1, 20)] } # - clf = GridSearchCV( estimator=model, param_grid=hyperparameters, cv=5, verbose=1, n_jobs=-1 # Use all available CPU cores ) clf.fit(X=X_train, y=y_train) # ### 5- Extracting best performing model in the 5-fold cross-validation Grid Search print("Accuracy score on Validation set: \n") print(clf.best_score_ ) print("---------------") print("Best performing hyperparameters on Validation set: ") print(clf.best_params_) print("---------------") print(clf.best_estimator_) model = clf.best_estimator_ model # ### 6- Evaluating on Test set predictions = model.predict(X_test) predictions # #### 6.1 Accuracy on Test set print(accuracy_score(y_test, predictions)) # #### 6.2 Confusion matrix cm = confusion_matrix(y_test, predictions) print(cm) plot_confusion_matrix(model, X_test, y_test, cmap="cividis") # #### 6.3 Classification report print(classification_report(y_test, predictions, digits=5)) # ### 7- Saving model joblib.dump(model, "trained_models/decision-tree-classifier.pkl") # ### 8- Testing loading model model = joblib.load("trained_models/decision-tree-classifier.pkl") model # ### 9- Decision Tree Plot # + dot_data = tree.export_graphviz( model, out_file=None, feature_names=train.columns ) graphviz.Source(dot_data, format="png") # - graph = pydotplus.graph_from_dot_data(dot_data) graph.write_png('trained_models_confusion_matrix_plots_on_test_set/decision_tree.png')
3-Training_Experiment_Decision_Tree.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3-azureml # kernelspec: # display_name: Python 3.6 - AzureML # language: python # name: python3-azureml # --- # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} gather={"logged": 1618956015781} # Imports import base64 import json import os import requests import azureml.core from azureml.core import Workspace from azureml.core.model import Model from azureml.core.model import InferenceConfig from azureml.core import Environment from azureml.core.webservice import AciWebservice, Webservice from azureml.exceptions import WebserviceException # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} gather={"logged": 1618956015958} # The name of the model as it will appear in AzureML aml_model_name = 'PneumoniaDetection-secure' # Updating this requires an update to score.py # The name of the model endpoint to be created in AzureML aci_service_name = 'pneumonia-detection-onnx' # The name of the model as it will appear in AI Builder aib_model_name = "pneumonia-detection-v1" # The local path of the parent of the model directory model_path = '.' is_secure = True # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} gather={"logged": 1618956016602} ws = Workspace.from_config() # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} gather={"logged": 1618956022224} # Register an AML MOdel model_root = os.path.join(model_path, './model') model = Model.register(workspace=ws, model_path=model_root, model_name=aml_model_name, tags={'area': "image", 'type': "classification"}, ) print(f"Registered model {model.name}, Version {model.version}") # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} gather={"logged": 1618956022371} #Creating an inference configuration entry_script = os.path.join(model_root, "score.py") conda_file = os.path.join(model_root, "myenv.yml") inference_config = InferenceConfig(runtime="python", entry_script=entry_script, conda_file=conda_file) # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} gather={"logged": 1618956050147} service = None try: # get any existing service with the specified name service = Webservice(ws, name=aci_service_name) except WebserviceException as e: print(f"Webservice not found: {aci_service_name}") # Update the service with the new model if the service exists, otherwise deploy a new service if service: print (f"Updating service {aci_service_name}") model = Model(workspace=ws, name=aml_model_name) service.update(models=[model], inference_config=inference_config, auth_enabled=is_secure) else: print (f"Deploying new service {aci_service_name}") deployment_config = AciWebservice.deploy_configuration(cpu_cores = 2, memory_gb = 2, auth_enabled=is_secure) service = Model.deploy(ws, aci_service_name, [model], inference_config, deployment_config) service.wait_for_deployment(True) print(service.state) # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} gather={"logged": 1618956051130} #validate service using response service = Webservice(ws, name=aci_service_name) uri = service.scoring_uri image_path = './TestData/test_1.jpeg' api_key = service.get_keys()[0] # Replace this with the API key for the web service headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)} with open(image_path, "rb") as image_file: encoded_string = base64.b64encode(image_file.read()).decode('utf-8') request_body = [{'image': encoded_string}] request_data = json.dumps({'request': request_body}) response = requests.post(uri, headers=headers, data=request_data) print(response.text)
ai-builder/BringYourOwnModelTutorial/PneumoniaDetection/SetupService.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: pyml # language: python # name: pyml # --- from shared.utils import load_mnist import os import numpy as np import tensorflow as tf X_data, y_data = load_mnist('./mnist/', kind='train') print('Rows: {}, Columns: {}'.format(X_data.shape[0], X_data.shape[1])) X_test, y_test = load_mnist('./mnist/', kind='t10k') print('Rows: {}, Columns: {}'.format(X_test.shape[0], X_test.shape[1])) X_train, y_train = X_data[:50000,:], y_data[:50000] X_valid, y_valid = X_data[50000:,:], y_data[50000:] print('Training: ', X_train.shape, y_train.shape) print('Validation: ', X_valid.shape, y_valid.shape) print('Test Set: ', X_test.shape, y_test.shape) def batch_generator(X, y, batch_size=64, shuffle=False, random_seed=None): idx = np.arange(y.shape[0]) if shuffle: rng = np.random.RandomState(random_seed) rng.shuffle(idx) X = X[idx] y = y[idx] for i in range(0, X.shape[0], batch_size): yield (X[i:i+batch_size, :], y[i:i+batch_size]) mean_vals = np.mean(X_train, axis=0) std_val = np.std(X_train) X_train_centered = (X_train - mean_vals)/std_val X_valid_centered = (X_valid - mean_vals)/std_val X_test_centered = (X_test - mean_vals)/std_val import tensorflow as tf def conv_layer(input_tensor, name, kernel_size, n_output_channels, padding_mode='SAME', strides=(1, 1, 1, 1)): with tf.variable_scope(name): input_shape = input_tensor.get_shape().as_list() n_input_channels = input_shape[-1] weights_shape = list(kernel_size) + [n_input_channels, n_output_channels] weights = tf.get_variable(name='_weights', shape=weights_shape) print(weights) biases = tf.get_variable(name='_biases', initializer=tf.zeros(shape=[n_output_channels])) print(biases) conv = tf.nn.conv2d(input=input_tensor, filter=weights, strides=strides, padding=padding_mode) print(conv) conv = tf.nn.bias_add(conv, biases, name='net_pre-activation') print(conv) conv = tf.nn.relu(conv, name='activation') print(conv) return conv g = tf.Graph() with g.as_default(): x = tf.placeholder(tf.float32, shape=[None, 28, 28, 1]) conv_layer(x, name='convtest', kernel_size=(3, 3), n_output_channels=32) del g, x def fc_layer(input_tensor, name, n_output_units, activation_fn=None): with tf.variable_scope(name): input_shape = input_tensor.get_shape().as_list()[1:] n_input_units = np.prod(input_shape) if len(input_shape) > 1: input_tensor = tf.reshape(input_tensor, shape=(-1, n_input_units)) weights_shape = [n_input_units, n_output_units] weights = tf.get_variable(name='_weights', shape=weights_shape) print(weights) biases = tf.get_variable(name='_biases', initializer=tf.zeros(shape=[n_output_units])) print(biases) layer = tf.matmul(input_tensor, weights) print(layer) layer = tf.nn.bias_add(layer, biases, name='net_pre-activaiton') print(layer) if activation_fn is None: return layer layer = activation_fn(layer, name='activation') print(layer) return layer g = tf.Graph() with g.as_default(): x = tf.placeholder(tf.float32, shape=[None, 28, 28, 1]) fc_layer(x, name='fctest', n_output_units=32, activation_fn=tf.nn.relu) del g, x def build_cnn(learning_rate=1e-4): ## Placeholders for X and y: tf_x = tf.placeholder(tf.float32, shape=[None, 784], name='tf_x') tf_y = tf.placeholder(tf.int32, shape=[None], name='tf_y') # reshape x to a 4D tensor: # [batchsize, width, height, 1] tf_x_image = tf.reshape(tf_x, shape=[-1, 28, 28, 1], name='tf_x_reshaped') ## One-hot encoding: tf_y_onehot = tf.one_hot(indices=tf_y, depth=10, dtype=tf.float32, name='tf_y_onehot') ## 1st layer: Conv_1 print('Building 1st layer:') h1 = conv_layer(tf_x_image, name='conv_1', kernel_size=(5, 5), padding_mode='VALID', n_output_channels=32) ## MaxPooling h1_pool = tf.nn.max_pool(h1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') ## 2nd layer: Conv_2 print('Building 2nd layer:') h2 = conv_layer(h1_pool, name='conv_2', kernel_size=(5, 5), padding_mode='VALID', n_output_channels=64) ## MaxPooling h2_pool = tf.nn.max_pool(h2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') ## 3rd layer: Fully Connected print('Building 3rd layer:') h3 = fc_layer(h2_pool, name='fc_3', n_output_units=1024, activation_fn=tf.nn.relu) ## Dropout keep_prob = tf.placeholder(tf.float32, name='fc_keep_prob') h3_drop = tf.nn.dropout(h3, keep_prob=keep_prob, name='dropout_layer') ## 4th layer: Fully Connected (linear activation) print('Building 4th layer:') h4 = fc_layer(h3_drop, name='fc_4', n_output_units=10, activation_fn=None) ## Prediction predictions = { 'probabilities': tf.nn.softmax(h4, name='probabilities'), 'labels': tf.cast(tf.argmax(h4, axis=1), tf.int32, name='labels') } ## Loss Function and Optimization cross_entropy_loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=h4, labels=tf_y_onehot), name='cross_entropy_loss') ## Optimizer optimizer = tf.train.AdamOptimizer(learning_rate) optimizer = optimizer.minimize(cross_entropy_loss, name='train_op') ## Computing the prediction accuracy correct_predictions = tf.equal(predictions['labels'], tf_y, name='correct_preds') accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32), name='accuracy') # + def save(saver, sess, epoch, path='./model/'): if not os.path.isdir(path): os.makedirs(path) print('Saving model in %s' % path) saver.save(sess, os.path.join(path, 'cnn-model.ckpt'), global_step=epoch) def load(saver, sess, path, epoch): print('Loading model from %s' % path) saver.restore(sess, os.path.join(path, 'cnn-model.ckpt-%d' % epoch)) def train(sess, training_set, validation_set=None, initialize=True, epochs=20, shuffle=True, dropout=0.5, random_seed=None): X_data = np.array(training_set[0]) y_data = np.array(training_set[1]) training_loss = [] # initialize variables if initialize: sess.run(tf.global_variables_initializer()) np.random.seed(random_seed) # for shuflling in batch_generator import time start = time.time() for epoch in range(1, epochs+1): batch_gen = batch_generator(X_data, y_data, shuffle=shuffle) avg_loss = 0.0 for i, (batch_x, batch_y) in enumerate(batch_gen): feed = {'tf_x:0': batch_x, 'tf_y:0': batch_y, 'fc_keep_prob:0': dropout} loss, _ = sess.run(['cross_entropy_loss:0', 'train_op'], feed_dict=feed) avg_loss += loss training_loss.append(avg_loss / (i+1)) print('Epoch %02d Training Avg. Loss: %7.3f' % (epoch, avg_loss), end=' ') if validation_set is not None: feed = { 'tf_x:0': validation_set[0], 'tf_y:0': validation_set[1], 'fc_keep_prob:0': 1.0} valid_acc = sess.run('accuracy:0', feed_dict=feed) print(' Validation Acc: %7.3f' % valid_acc) else: print() print('Time Elapsed: {:.3f}s'.format(time.time()-start)) def predict(sess, X_test, return_proba=False): feed = {'tf_x:0': X_test, 'fc_keep_prob:0': 1.0} if return_proba: return sess.run('probabilities:0', feed_dict=feed) else: return sess.run('labels:0', feed_dict=feed) # - random_seed = 123 g = tf.Graph() with g.as_default(): tf.set_random_seed(random_seed) ## build the graph build_cnn() ## saver: saver = tf.train.Saver() with tf.Session(graph=g) as sess: train(sess, training_set=(X_train_centered, y_train), validation_set=(X_valid_centered, y_valid), initialize=True, random_seed=123) save(saver, sess, epoch=20) del g g2 = tf.Graph() with g2.as_default(): tf.set_random_seed(random_seed) build_cnn() saver = tf.train.Saver() with tf.Session(graph=g2) as sess: load(saver, sess, epoch=20, path='./model/') preds = predict(sess, X_test_centered, return_proba=False) print('Test Accuracy: %.3f%%' % (100*np.sum(preds == y_test)/len(y_test))) np.set_printoptions(precision=2, suppress=True) with tf.Session(graph=g2) as sess: load(saver, sess, epoch=20, path='./model/') print(predict(sess, X_test_centered[:10], return_proba=False)) print(predict(sess, X_test_centered[:10], return_proba=True)) ## continue training for 20 more epochs ## without re-initializing :: initialize=False ## create a new session ## and restore the model with tf.Session(graph=g2) as sess: load(saver, sess, epoch=20, path='./model/') train(sess, training_set=(X_train_centered, y_train), validation_set=(X_valid_centered, y_valid), initialize=False, epochs=20, random_seed=123) save(saver, sess, epoch=40, path='./model/') preds = predict(sess, X_test_centered, return_proba=False) print('Test Accuracy: %.3f%%' % (100*np.sum(preds == y_test)/len(y_test)))
ch15/convolution_mnist_low_level_api.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd data = pd.read_csv('People_Covered_in_the_News/people_2014.csv') def custom_standardization(df): spec_chars = ["!",'"',"#","%","&","'","(",")", "*","+",",", "-",".","/",":",";","<", "=",">","?","@","[", "\\","]","^","_", "`","{","|","}","~","–", "\xc2", "\xa0", "\x80", "\x9c", "\x99", "\x94", "\xad", "\xe2", "\x9d", "\n", "x9d", "xc2", "xa0", "x80", "x9c", "x99", "x94", "xad", "xe2", "n"] for char in spec_chars: df['article'] = df['article'].str.strip() #data['text'] = str(data['text']).lower() df['article'] = df['article'].str.replace(char, ' ') #data['text'] = stemmer.stem(str(data['text'])) return df data = custom_standardization(data) data = data[data['race'] == 'black'] # + #1103 in data.index # - len(black_df.index) black_df = data[data['race'] == 'black'] names_list = black_df['last_name'].tolist() names_list = list(set(names_list)) for i in range(len(names_list)): names_list[i] = names_list[i].lower() # + tags=[] names_list # + # turn DataFrame of articles into one big chunk of text documents = '' for ind in black_df.index: temp = black_df.loc[ind]['article'] temp = temp.lower().split() for word in temp: documents = documents + ' ' + word #print(len(documents)) # + # just using the sentence, which words surround black names? # use: https://peekaboo-vision.blogspot.com/2012/11/a-wordcloud-in-python.html # http://amueller.github.io/word_cloud/ # don't need to run Word2Vec # perhaps, 'black' race predictions are low, but not as low as it seems since coverage # might have to do with the general picture, not specific individuals # + #from wordcloud import WordCloud # Generate a word cloud image #wordcloud = WordCloud().generate(documents) # + from wordcloud import WordCloud import matplotlib.pyplot as plt # lower max_font_size wordcloud = WordCloud(max_font_size=40, background_color='white').generate(documents) plt.figure(figsize=(15, 6)) plt.imshow(wordcloud, interpolation="bilinear") plt.axis("off") #plt.savefig('Black_Ents_Clouds/cloud_2014.png') wordcloud.to_file('Black_Ents_Clouds/cloud_2014.png') plt.show() # + #plt.savefig('Black_Ents_Clouds/cloud_2014.png') # + # wiki last name model has the highest F-1 score for black people # I focused on F-1 score since we don't want to label people of other races as black, but we also don't want to miss out on black people # I asked the journalism team to look at the results and share their insights, but they have been busy with other work # We can either look at the data now, or I can just proceed with the wiki last name model # and update the name + sentence + race dataset, the word clouds, as well as the list of unique black people covered (not stored yet) # # change name in the sentence to some key/remove name from the sentence # - # take a look at the words surrounding people of different races for each year # for example, group black people sentences, lemmatize sentences and create a word cloud # parse the sentence using "dependency parser" (check out SpaCy dependency parser) # separate words into which race's person the words depend on/on which a given race's person depends in the sentence # person is subject vs person is object
NAACP/Entity_Recognition/Black_Names_Common_Words.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # Useful for debugging # %load_ext autoreload # %autoreload 2 # # Impact-T fieldmap reconstruction from impact import Impact, fieldmaps import numpy as np ifile = 'templates/lcls_injector/ImpactT.in' I = Impact(input_file=ifile) # Fieldmaps are stored here I.input['fieldmaps'].keys() # Look at a solrf element. I.ele['SOL1'] # This is its fieldmap filename I.ele['SOL1']['filename'] # + # That data is here. rdfata = I.input['fieldmaps']['rfdata102'] #This will process the fieldmap to extract Ez and Bz fmap = fieldmaps.process_fieldmap_solrf(rdfata['data']) fmap # - # Reconstruction function fieldmaps.fieldmap_reconsruction(fmap['Bz'], 0) # # Basic plot import matplotlib.pyplot as plt # %matplotlib inline # %config InlineBackend.figure_format='retina' zlist = np.linspace(0, 0.49308, 1000) fieldlist = [fieldmaps.fieldmap_reconsruction(fmap['Bz'], z) for z in zlist] # z at max field zlist[np.argmax(np.array(fieldlist))] plt.plot(zlist, fieldlist); # Integrated field (approximate) field_scale = 0.243 # from imput file BL = np.sum(fieldlist)*0.49308/1000 # T*m BL * field_scale * 10 # T*m -> kG*m 1/BL # # Create Fieldmap fmap2 = fmap.copy() fmap2['Bz']['z0'] = min(zlist) fmap2['Bz']['z1'] = max(zlist) fmap2['Bz']['L'] = zlist.ptp() fmap2['Bz']['fourier_coefficients'] = fieldmaps.create_fourier_coefficients(zlist, fieldlist, n=20) fieldlist2 = [fieldmaps.fieldmap_reconsruction(fmap2['Bz'], z) for z in zlist] plt.plot(zlist, fieldlist, label='original') plt.plot(zlist, fieldlist2, '--', label='created') plt.legend() fmap2
examples/fieldmap_reconstruction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + ## Get dependencies ## import string import math import sys import matplotlib.pyplot as plt import matplotlib import seaborn as sn sys.path.append('..') from GIR import * import scipy as sp import pickle import time import scipy as sp from scipy import ndimage from scipy import signal import os import statsmodels.api as sm from mpl_toolkits.axes_grid1.inset_locator import inset_axes import glob import requests import ftplib import PyPDF2 import io import cmocean import multiprocessing import xarray as xr import numpy as np import pandas as pd import requests import xml.etree.ElementTree as ET import zarr import gcsfs import multiprocessing # + gs_stores = pd.read_csv('gs://cmip6/cmip6-zarr-consolidated-stores.csv') gcs = gcsfs.GCSFileSystem(token='anon') gs_stores.loc[:,'ism'] = gs_stores.loc[:,'institution_id'] + '_' + gs_stores.loc[:,'source_id'] + '_' + gs_stores.loc[:,'member_id'] def get_annual_CMIP6_data_info(activity, table, variable, experiment, institution, source, member): # eg activity='CMIP', table='Amon', variable='tas', experiment='historical', institution="NCAR", source="CESM2", member="r10i1p1f1" query = gs_stores.query("activity_id==\'"+activity+"\' & table_id==\'"+table+"\' & variable_id==\'"+variable+"\' & experiment_id==\'"+experiment+"\' & institution_id==\'"+institution+"\' & source_id==\'"+source+"\' & member_id==\'"+member+"\'") if query.empty: print('No results for this request') return None # create a mutable-mapping-style interface to the store mapper = gcs.get_mapper(query.zstore.values[0]) # open it using xarray and zarr ds = xr.open_zarr(mapper, consolidated=True) df = pd.Series(name=institution+'_'+source+'_'+member+'_'+experiment,dtype=object) try: df.loc['parent_branch_time'] = ds.branch_time_in_parent except: df.loc['parent_branch_time'] = np.nan try: df.loc['parent_time_units'] = ds.parent_time_units except: df.loc['parent_time_units'] = np.nan try: df.loc['parent_variant'] = ds.parent_variant_label except: df.loc['parent_variant'] = np.nan try: df.loc['parent_experiment'] = ds.parent_experiment_id except: df.loc['parent_experiment'] = np.nan try: df.loc['parent_source'] = ds.parent_source_id except: df.loc['parent_source'] = np.nan try: df.loc['comment'] = ds.comment except: df.loc['comment'] = np.nan try: df.loc['calendar'] = ds.time.values[0].__class__ except: df.loc['comment'] = np.nan return df # + all_info = gs_stores.loc[(gs_stores.experiment_id.isin(['1pctCO2','abrupt-4xCO2']))&(gs_stores.variable_id.isin(['tas']))&(gs_stores.table_id=='Amon')] all_info_data = [] for index,row in all_info.iterrows(): print('getting '+row.ism) all_info_data += [get_annual_CMIP6_data_info(row.loc['activity_id'], row.loc['table_id'], row.loc['variable_id'], row.loc['experiment_id'], row.loc['institution_id'], row.loc['source_id'], row.loc['member_id'])] # - pd.concat(all_info_data,axis=1).to_csv('./cmip6_data/cmip6_branch_info.csv')
GIR/tools/get_cmip6_info.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #to do #import packages #upload txt file #use pandas to make newline characters into tabs #export as .csv file # - import pandas as pd from pathlib import Path import re # path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Index-text.txt') path = Path('../../Cleaned_Pentagon_Papers_text_files/Cleaned_Pentagon-Papers-I.txt') input_doc = open(path, encoding = 'utf-8').read() input_doc #attempting to clean ASCII format text_encode = input_doc.encode(encoding="ascii", errors = "ignore") text_decode = text_encode.decode() decoded_text = " ".join([word for word in text_decode.split()]) print(decoded_text) pp_index = decoded_text #attempting to create a function to clean ASCII messes for all documents def clean_ASCII(original_document): text_encode = input_doc.encode(encoding="ascii", errors = "ignore") text_decode = text_encode.decode() cleaned_text = " ".join([word for word in text_decode.split()]) return cleaned_text pp_part_1= clean_ASCII(input_doc) print(pp_part_1) # + pp_part_1= re.sub(r".(\.\.)",'', pp_part_1) #removes two periods, replaces with one space pp_part_1 = re.sub(r".(\.\.\.\.+)",'', pp_part_1) #removes any more than four periods, replaces with one space pp_part_1 = re.sub(r"( +)",' ', pp_part_1) #removes more than one space, replaces with one space pp_part_1 = re.sub(r"(\\\\)","", pp_part_1) #supposedly removes multiple slashes pp_part_1 = re.sub(r"\\^\\","", pp_part_1) #supposedly removes carret character (^) pp_part_1 = re.sub(r"\\*\\","", pp_part_1) #supposedly removes * astrick character pp_part_1 = re.sub("sone", "some", pp_part_1) pp_part_1 = re.sub('and.', 'and', pp_part_1) pp_part_1 = re.sub("Declassified per Executive Order 13526, Section 3.3", "", pp_part_1) pp_part_1 = re.sub('NND Project Number: NND 66316. By: NWD Date: 2011', '', pp_part_1) pp_part_1 = re.sub('TOP SECRET - Sensitive', '', pp_part_1) print(pp_part_1) # - path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-II-text.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_2= clean_ASCII(input_doc) # print(pp_part_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-III-text.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_3= clean_ASCII(input_doc) # print(pp_part_3) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-IV-A-1-text.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_4_a_1= clean_ASCII(input_doc) # print(pp_part_4_a_1) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-IV-A-2-text.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_4_a_2= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-IV-A-3-text.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_4_a_3= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-IV-A-4-text.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_4_a_4= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-IV-A-5-text.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_4_a_5= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-IV-B-1.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_4_b_1= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-IV-B-2.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_4_b_2= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-IV-B-3.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_4_b_3= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-IV-B-4.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_4_b_4= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-IV-B-5.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_4_b_5= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-IV-C-1.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_4_c_1= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-IV-C-2a.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_4_c_2a= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-IV-C-2b.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_4_c_2b= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-IV-C-2c.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_4_c_2c= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-IV-C-3.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_4_c_3= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-IV-C-4.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_4_c_4= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-IV-C-5.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_4_c_5= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-IV-C-6-a.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_4_c_6_a= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-IV-C-6-b.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_4_c_6_b= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-IV-C-6-c.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_4_c_6_c= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-IV-C-7-a.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_4_c_7_a= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-IV-C-7-b.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_4_c_7_b= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-IV-C-8.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_4_c_8= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-IV-C-9a.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_4_c_9a = clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-IV-C-9b.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_4_c_9b= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-V-A-Vol-IA.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_5_a_vol_ia= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-V-A-Vol-IB.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_5_a_vol_ib= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-V-A-Vol-IC.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_5_a_vol_ic= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-V-A-Vol-IID.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_5_a_vol_iid= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-V-B1.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_5_b_1= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-V-B-2a.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_5_b_2a= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-V-B-2b.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_5_b_2b= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-V-B-3a.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_5_b_3a= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-V-B-3b.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_5_b_3b= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-V-B-3c.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_5_b_3c= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-V-B-3d.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_5_b_3d= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-V-B-4-Book-I.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_5_b_4_book_I= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-V-B-4-Book-II.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_5_b_4_book_II= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-VI-A.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_6_a= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-VI-B.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_6_b= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-VI-C-1.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_6_c_1= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-VI-C-2.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_6_c_2= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-VI-C-3.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_6_c_3= clean_ASCII(input_doc) # print(pp_part_4_a_2) path = Path('../../Pentagon_Papers_Text_Files/Pentagon-Papers-Part-VI-C-4.txt') input_doc = open(path, encoding = 'utf-8').read() clean_ASCII(input_doc) pp_part_6_c_4= clean_ASCII(input_doc) # print(pp_part_4_a_2) # <H2> Part 2: Regex Cleaning </H2> # OCR is a nightmare. This makes it a bit less of a nightmare. # Code and understanding of the code curtousy of <NAME> and her lesson <a href="https://programminghistorian.org/en/lessons/cleaning-ocrd-text-with-regular-expressions">"Cleaning OCR'd text with Regular Expressions"</a> on the Programming Historian. import re # <H2> Exporting Cleaned Txt Files </H2> list [pp_index, pp_part_1, pp_part_2, pp_part_3, pp_part_4_a_1, pp_part_4_a_2, pp_part_4_a_3, pp_part_4_a_4, pp_part_4_a_5, pp_part_4_b_1, pp_part_4_b_2, pp_part_4_b_3, pp_part_4_b_4, pp_part_4_b_5, pp_part_4_c_1, pp_part_4_c_2a, pp_part_4_c_2b, pp_part_4_c_2c, pp_part_4_c_3, pp_part_4_c_4, pp_part_4_c_5, pp_part_4_c_6_a, pp_part_4_c_6_b, pp_part_4_c_6_c, pp_part_4_c_7_a, pp_part_4_c_7_b, pp_part_4_c_8, pp_part_4_c_9a, pp_part_4_c_9b, pp_part_5_a_vol_ia, pp_part_5_a_vol_ib, pp_part_5_a_vol_ic, pp_part_5_a_vol_iid, pp_part_5_b_1, pp_part_5_b_2a, pp_part_5_b_2b, pp_part_5_b_3a, pp_part_5_b_3b, pp_part_5_b_3c, pp_part_5_b_3d, pp_part_5_b_4_book_I, pp_part_5_b_4_book_II, pp_part_6_a, pp_part_6_b, pp_part_6_c_1, pp_part_6_c_2, pp_part_6_c_3, pp_part_6_c_4] path = Path('../../Cleaned_Pentagon_Papers_text_files/Cleaned_Pentagon-Papers-Index.txt') with open(path, 'w') as f: f.write(pp_index) path = Path('../../Cleaned_Pentagon_Papers_text_files/Cleaned_Pentagon-Papers-I.txt') with open(path, 'w') as f: f.write(pp_part_1) path = Path('../../Cleaned_Pentagon_Papers_text_files/Cleaned_Pentagon-Papers-II.txt') with open(path, 'w') as f: f.write(pp_part_2) path = Path('../../Cleaned_Pentagon_Papers_text_files/Cleaned_Pentagon-Papers-III.txt') with open(path, 'w') as f: f.write(pp_part_3) path = Path('../../Cleaned_Pentagon_Papers_text_files/Cleaned_Pentagon-Papers-IV-A-1.txt') with open(path, 'w') as f: f.write(pp_part_4_a_1) path = Path('../../Cleaned_Pentagon_Papers_text_files/Cleaned_Pentagon-Papers-IV-A-2.txt') with open(path, 'w') as f: f.write(pp_part_4_a_2) path = Path('../../Cleaned_Pentagon_Papers_text_files/Cleaned_Pentagon-Papers-IV-A-3.txt') with open(path, 'w') as f: f.write(pp_part_4_a_3) path = Path('../../Cleaned_Pentagon_Papers_text_files/Cleaned_Pentagon-Papers-IV-A-4.txt') with open(path, 'w') as f: f.write(pp_part_4_a_4) path = Path('../../Cleaned_Pentagon_Papers_text_files/Cleaned_Pentagon-Papers-IV-A-5.txt') with open(path, 'w') as f: f.write(pp_part_4_a_5) path = Path('../../Cleaned_Pentagon_Papers_text_files/Cleaned_Pentagon-Papers-IV-B-1.txt') with open(path, 'w') as f: f.write(pp_part_4_b_1) path = Path('../../Cleaned_Pentagon_Papers_text_files/Cleaned_Pentagon-Papers-IV-B-2.txt') with open(path, 'w') as f: f.write(pp_part_4_b_2) path = Path('../../Cleaned_Pentagon_Papers_text_files/Cleaned_Pentagon-Papers-IV-B-3.txt') with open(path, 'w') as f: f.write(pp_part_4_b_3) path = Path('../../Cleaned_Pentagon_Papers_text_files/Cleaned_Pentagon-Papers-IV-B-4.txt') with open(path, 'w') as f: f.write(pp_part_4_b_4) path = Path('../../Cleaned_Pentagon_Papers_text_files/Cleaned_Pentagon-Papers-IV-B-5.txt') with open(path, 'w') as f: f.write(pp_part_4_b_5) path = Path('../../Cleaned_Pentagon_Papers_text_files/Cleaned_Pentagon-Papers-IV-C-1.txt') with open(path, 'w') as f: f.write(pp_part_4_c_1) path = Path('../../Cleaned_Pentagon_Papers_text_files/Cleaned_Pentagon-Papers-IV-C-1.txt') with open(path, 'w') as f: f.write(pp_part_4_c_1) path = Path('../../Cleaned_Pentagon_Papers_text_files/Cleaned_Pentagon-Papers-Part-VI-C-4.txt') with open(path, 'w') as f: f.write(pp_part_6_c_4) # <H2> DataFrame Creation <H2> # + list = ["1945-1967","1940-1950", "1950-1954", "1954", "1954-1960", "1950-1954", "1954-1956","1954-1959", "1954-1960", "1961", "1961-1963", "1961-1967", "1962-1964", "1963", "1963-1965", "1964", "1964", "1964", "1965", "1965", "1965", "1965-1967", "1965-1967", "1965-1967", "1965-1968", "1965-1968", "1965-1967", "1963-1965", "1965-1967", "1950-1952", "1953-1960", "1960-1963", "1964-1967", "1940-1945", "1945-1949", "1950-1952", "1953", "1954", "1956", "1960", "1961", "1962-1963", "1965-1967", "1965-1967", "1965-1966", "1966-1967","1967-1967", "1967-1968"] print(len(list)) specific = ["1945-1967", "1940-1950", "1950-1954", "1954", "1954-1960", "1950-1954", "1954-1956", "1954-1959", "1954-1960", "1961", "1961-1963", "1961-1967", "1962-1964", "05/1963-11/1963", "11/1963-04/1965", "02/1964-06/1964", "07/1964-10/1964", "11/1964-12/1964", "01/1965-06/1965", "03/1965", "03/1965-07/1965", "1965-1967", "1965-1967", "1965-1967", "1965-1968", "1965-1968", "1965-1967", "12/1963-06/1965", "07/1965-12/1967", "05/08/1950-12/07/1952", "08/04/1953-08/25/1960", "02/29/1960-11/22/1963", "02/15/1964-11/17/1967", "1940-1945", "1945-1949", "1950-1952", "1953", "1954", "03/15/1956", "1960", "01/1961-12/1961", "01/1962-10/1963", "1965-1967", "1965-1967", "1965-1966", "06/1966-12/1967", "01/1967-09/1967", "1967-1968"] print(len(specific)) name_list = ["Index", "Part-I", "Part-2", "Part-3", "Part-IV-A-1", "Part-IV-A-2", "Part-IV-A-3", "Part-IV-A-4", "Part-IV-A-5", "Part-IV-B-1", "Part-IV-B-2", "Part-IV-B-3", "Part-IV-B-4", "Part-IV-B-5", "Part-IV-C-1", "Part-IV-C-2a", "Part-IV-C-2b", "Part-IV-C-2c", "Part-IV-C-3", "Part-IV-C-4", "Part-IV-C-5", "Part-IV-C-6a", "Part-IV-C-6b", "Part-IV-C-6c", "Part-IV-C-7a", "Part-IV-C-7b", "Part-IV-C-8", "Part-IV-C-9a", "Part-IV-C-9b", "Part-V-A-Vol-IA", "Part-V-A-Vol-IB", "Part-V-A-Vol-IC", "Part-V-A-Vol-IID", "Part-V-B-1", "Part-V-B-2a", "Part-V-B-2b", "Part-V-B-3a", "Part-V-B-3b", "Part-V-B-3c", "Part-V-B-3d", "Part-V-B-4-Book-I", "Part-V-B-4-Book-II", "Part-VI-A", "Part-VI-B", "Part-VI-C-1", "Part-VI-C-2", "Part-VI-C-3", "Part-VI-C-4"] print(len(name_list)) text_list = [pp_index, pp_part_1, pp_part_2, pp_part_3, pp_part_4_a_1, pp_part_4_a_2, pp_part_4_a_3, pp_part_4_a_4, pp_part_4_a_5, pp_part_4_b_1, pp_part_4_b_2, pp_part_4_b_3, pp_part_4_b_4, pp_part_4_b_5, pp_part_4_c_1, pp_part_4_c_2a, pp_part_4_c_2b, pp_part_4_c_2c, pp_part_4_c_3, pp_part_4_c_4, pp_part_4_c_5, pp_part_4_c_6_a, pp_part_4_c_6_b, pp_part_4_c_6_c, pp_part_4_c_7_a, pp_part_4_c_7_b, pp_part_4_c_8, pp_part_4_c_9a, pp_part_4_c_9b, pp_part_5_a_vol_ia, pp_part_5_a_vol_ib, pp_part_5_a_vol_ic, pp_part_5_a_vol_iid, pp_part_5_b_1, pp_part_5_b_2a, pp_part_5_b_2b, pp_part_5_b_3a, pp_part_5_b_3b, pp_part_5_b_3c, pp_part_5_b_3d, pp_part_5_b_4_book_I, pp_part_5_b_4_book_II, pp_part_6_a, pp_part_6_b, pp_part_6_c_1, pp_part_6_c_2, pp_part_6_c_3, pp_part_6_c_4] print(len(text_list)) # - df = pd.DataFrame({ "name": ["Index", "Part-I", "Part-2", "Part-3", "Part-IV-A-1", "Part-IV-A-2", "Part-IV-A-3", "Part-IV-A-4", "Part-IV-A-5", "Part-IV-B-1", "Part-IV-B-2", "Part-IV-B-3", "Part-IV-B-4", "Part-IV-B-5", "Part-IV-C-1", "Part-IV-C-2a", "Part-IV-C-2b", "Part-IV-C-2c", "Part-IV-C-3", "Part-IV-C-4", "Part-IV-C-5", "Part-IV-C-6a", "Part-IV-C-6b", "Part-IV-C-6c", "Part-IV-C-7a", "Part-IV-C-7b", "Part-IV-C-8", "Part-IV-C-9a", "Part-IV-C-9b", "Part-V-A-Vol-IA", "Part-V-A-Vol-IB", "Part-V-A-Vol-IC", "Part-V-A-Vol-IID", "Part-V-B-1", "Part-V-B-2a", "Part-V-B-2b", "Part-V-B-3a", "Part-V-B-3b", "Part-V-B-3c", "Part-V-B-3d", "Part-V-B-4-Book-I", "Part-V-B-4-Book-II", "Part-VI-A", "Part-VI-B", "Part-VI-C-1", "Part-VI-C-2", "Part-VI-C-3", "Part-VI-C-4"], "overall_years": ["1945-1967","1940-1950", "1950-1954", "1954", "1954-1960", "1950-1954", "1954-1956","1954-1959", "1954-1960", "1961", "1961-1963", "1961-1967", "1962-1964", "1963", "1963-1965", "1964", "1964", "1964", "1965", "1965", "1965", "1965-1967", "1965-1967", "1965-1967", "1965-1968", "1965-1968", "1965-1967", "1963-1965", "1965-1967", "1950-1952", "1953-1960", "1960-1963", "1964-1967", "1940-1945", "1945-1949", "1950-1952", "1953", "1954", "1956", "1960", "1961", "1962-1963", "1965-1967", "1965-1967", "1965-1966", "1966-1967","1967-1967", "1967-1968"], "specific_dates": ["1945-1967", "1940-1950", "1950-1954", "1954", "1954-1960", "1950-1954", "1954-1956", "1954-1959", "1954-1960", "1961", "1961-1963", "1961-1967", "1962-1964", "05/1963-11/1963", "11/1963-04/1965", "02/1964-06/1964", "07/1964-10/1964", "11/1964-12/1964", "01/1965-06/1965", "03/1965", "03/1965-07/1965", "1965-1967", "1965-1967", "1965-1967", "1965-1968", "1965-1968", "1965-1967", "12/1963-06/1965", "07/1965-12/1967", "05/08/1950-12/07/1952", "08/04/1953-08/25/1960", "02/29/1960-11/22/1963", "02/15/1964-11/17/1967", "1940-1945", "1945-1949", "1950-1952", "1953", "1954", "03/15/1956", "1960", "01/1961-12/1961", "01/1962-10/1963", "1965-1967", "1965-1967", "1965-1966", "06/1966-12/1967", "01/1967-09/1967", "1967-1968"], "text": [pp_index, pp_part_1, pp_part_2, pp_part_3, pp_part_4_a_1, pp_part_4_a_2, pp_part_4_a_3, pp_part_4_a_4, pp_part_4_a_5, pp_part_4_b_1, pp_part_4_b_2, pp_part_4_b_3, pp_part_4_b_4, pp_part_4_b_5, pp_part_4_c_1, pp_part_4_c_2a, pp_part_4_c_2b, pp_part_4_c_2c, pp_part_4_c_3, pp_part_4_c_4, pp_part_4_c_5, pp_part_4_c_6_a, pp_part_4_c_6_b, pp_part_4_c_6_c, pp_part_4_c_7_a, pp_part_4_c_7_b, pp_part_4_c_8, pp_part_4_c_9a, pp_part_4_c_9b, pp_part_5_a_vol_ia, pp_part_5_a_vol_ib, pp_part_5_a_vol_ic, pp_part_5_a_vol_iid, pp_part_5_b_1, pp_part_5_b_2a, pp_part_5_b_2b, pp_part_5_b_3a, pp_part_5_b_3b, pp_part_5_b_3c, pp_part_5_b_3d, pp_part_5_b_4_book_I, pp_part_5_b_4_book_II, pp_part_6_a, pp_part_6_b, pp_part_6_c_1, pp_part_6_c_2, pp_part_6_c_3, pp_part_6_c_4]}) df path = Path('../../Entity_Extraction/full_text_document.csv') df.to_csv(r"C:\Users\bob\IS417_DSH\New_IS_417_DSH_V02\is417\Entity_Extraction\full_txt_document.csv") # "C:\Users\bob\IS417_DSH\New_IS_417_DSH_V02\is417\Entity_Extraction" #
Pentagon_Paper_Data_Cleaning/.ipynb_checkpoints/Txt_File_Cleaning_V02-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %load_ext autoreload # %autoreload 2 # *Make sure to have the right kernel selected!* # + import qiskit_metal as metal from qiskit_metal import designs, draw from qiskit_metal import MetalGUI, Dict, open_docs # %metal_heading Welcome to Qiskit Metal! # - # Welcome to Qiskit Metal! # # For this example tutorial, we will attempt to create a multi qubit chip with a variety of components. We will want to generate the layout, simulate/analyze and tune the chip to hit the parameters we are wanting, finally rendering to a GDS file. # # One could generate subsections of the layout and tune individual components first, but in this case we will create all of the layout. We will be using both transmon pockets and crossmons, meandered and simple transmission lines, capacitive couplers, and launchers for wirebond connections. So we will import these, and also create a design instance and launch the GUI. # # Layout # + from qiskit_metal.qlibrary.qubits.transmon_pocket_6 import TransmonPocket6 from qiskit_metal.qlibrary.qubits.transmon_cross_fl import TransmonCrossFL from qiskit_metal.qlibrary.couplers.tunable_coupler_01 import TunableCoupler01 from qiskit_metal.qlibrary.tlines.meandered import RouteMeander from qiskit_metal.qlibrary.tlines.pathfinder import RoutePathfinder from qiskit_metal.qlibrary.tlines.anchored_path import RouteAnchors from qiskit_metal.qlibrary.lumped.cap_n_interdigital import CapNInterdigital from qiskit_metal.qlibrary.couplers.cap_n_interdigital_tee import CapNInterdigitalTee from qiskit_metal.qlibrary.couplers.coupled_line_tee import CoupledLineTee from qiskit_metal.qlibrary.terminations.launchpad_wb import LaunchpadWirebond from qiskit_metal.qlibrary.terminations.launchpad_wb_coupled import LaunchpadWirebondCoupled # + design = metal.designs.DesignPlanar() gui = metal.MetalGUI(design) # - # Since we are likely to be making many changes while tuning and modifying our design, we will enable overwriting. We can also check all of the chip properties to see if we want to change the size or any other parameter. design.overwrite_enabled = True design.chips.main design.chips.main.size.size_x = '11mm' design.chips.main.size.size_y = '9mm' # #### The Qubits # We will add a collection of qubits. First we will place a transmon pocket with six connection pads. We can see any options the qubit qcomponent has to figure out what we might want to modify when creating the component. This will include the components default options (which the component designer included) as well as renderer options (which are added based on what renderers are present in Metal). TransmonPocket6.get_template_options(design) # + options = dict( pad_width = '425 um', pocket_height = '650um', connection_pads=dict( readout = dict(loc_W=0, loc_H=-1, pad_width = '80um', pad_gap = '50um'), bus_01 = dict(loc_W=-1, loc_H=-1, pad_width = '60um', pad_gap = '10um'), bus_02 = dict(loc_W=-1, loc_H=+1, pad_width = '60um', pad_gap = '10um'), bus_03 = dict(loc_W=0, loc_H=+1, pad_width = '90um', pad_gap = '30um'), bus_04 = dict(loc_W=+1, loc_H=+1, pad_width = '60um', pad_gap = '10um'), bus_05 = dict(loc_W=+1, loc_H=-1, pad_width = '60um', pad_gap = '10um') )) q_main = TransmonPocket6(design,'Q_Main', options = dict( pos_x='0mm', pos_y='-1mm', gds_cell_name ='FakeJunction_01', hfss_inductance ='14nH', **options)) gui.rebuild() gui.autoscale() # - # We then will add a mixture of additional qubits. This is not (though do not let me stop any experimental investigation) a design one would normally create for any experiment of computational purpose, but allows for having a mixture of different components on one chip. TransmonCrossFL.get_template_options(design) # We will add two crossmons with flux lines to the west side of the chip, which we will couple to each other using a tunable coupler. To make sure the various readout and control lines will have space to connect to launchers at the chip edge, we have to be mindful of where we place them, and making sure we have enough space for routing while avoiding cross talk. # + Q1 = TransmonCrossFL(design, 'Q1', options = dict(pos_x = '-2.75mm', pos_y='-1.8mm', connection_pads = dict( bus_01 = dict(connector_location = '180',claw_length ='95um'), readout = dict(connector_location = '0')), fl_options = dict())) Q2 = TransmonCrossFL(design, 'Q2', options = dict(pos_x = '-2.75mm', pos_y='-1.2mm', orientation = '180', connection_pads = dict( bus_02 = dict(connector_location = '0',claw_length ='95um'), readout = dict(connector_location = '180')), fl_options = dict())) tune_c_Q12 = TunableCoupler01(design,'Tune_C_Q12', options = dict(pos_x = '-2.81mm', pos_y = '-1.5mm', orientation=90, c_width='500um')) gui.rebuild() gui.autoscale() # - # We then will add three transmon pockets to the north side of the chip, with the intention of having them in a linear series of coupling to each other, as well as the 'main' qubit to the south. # + Q3 = TransmonPocket6(design,'Q3', options = dict( pos_x='-3mm', pos_y='0.5mm', gds_cell_name ='FakeJunction_01', hfss_inductance ='14nH', connection_pads = dict( bus_03 = dict(loc_W=0, loc_H=-1, pad_width = '80um', pad_gap = '15um'), bus_q3_q4 = dict(loc_W=1, loc_H=-1, pad_width = '80um', pad_gap = '15um'), readout = dict(loc_W=0, loc_H=1, pad_width = '80um', pad_gap = '50um')))) Q4 = TransmonPocket6(design,'Q4', options = dict( pos_x='0mm', pos_y='1mm', gds_cell_name ='FakeJunction_01', hfss_inductance ='14nH', connection_pads = dict( bus_04 = dict(loc_W=0, loc_H=-1, pad_width = '80um', pad_gap = '15um'), bus_q3_q4 = dict(loc_W=-1, loc_H=-1, pad_width = '80um', pad_gap = '15um'), bus_q4_q5 = dict(loc_W=1, loc_H=-1, pad_width = '80um', pad_gap = '15um'), readout = dict(loc_W=0, loc_H=1, pad_width = '80um', pad_gap = '50um')))) Q5 = TransmonPocket6(design,'Q5', options = dict( pos_x='3mm', pos_y='0.5mm', gds_cell_name ='FakeJunction_01', hfss_inductance ='14nH', connection_pads = dict( bus_05 = dict(loc_W=0, loc_H=-1, pad_width = '80um', pad_gap = '15um'), bus_q4_q5 = dict(loc_W=-1, loc_H=-1, pad_width = '80um', pad_gap = '15um'), readout = dict(loc_W=0, loc_H=1, pad_width = '80um', pad_gap = '50um')))) # - # #### The Busses # We now couple the qubits to each other, primarily using RouteMeander. Although one needs to run simulations to properly tune the line lengths for target frequencies, an initial estimate could be determined from the below method; # + from qiskit_metal.analyses.em.cpw_calculations import guided_wavelength def find_resonator_length(frequency, line_width, line_gap, N): #frequency in GHz #line_width/line_gap in um #N -> 2 for lambda/2, 4 for lambda/4 [lambdaG, etfSqrt, q] = guided_wavelength(frequency*10**9, line_width*10**-6, line_gap*10**-6, 750*10**-6, 200*10**-9) return str(lambdaG/N*10**3)+" mm" # - # As we are not worried about a creating a functional chip in this tutorial, we will give the resonators somewhat arbitraty lengths. First coupling the two crossmons to Q_Main. # + bus_01 = RouteMeander(design,'Bus_01', options = dict(hfss_wire_bonds = True, pin_inputs=Dict( start_pin=Dict( component='Q_Main', pin='bus_01'), end_pin=Dict( component='Q1', pin='bus_01') ), lead=Dict( start_straight='125um', end_straight = '225um' ), meander=Dict( asymmetry = '1305um'), fillet = "99um", total_length = '6mm')) bus_02 = RouteMeander(design,'Bus_02', options = dict(hfss_wire_bonds = True, pin_inputs=Dict( start_pin=Dict( component='Q_Main', pin='bus_02'), end_pin=Dict( component='Q2', pin='bus_02') ), lead=Dict( start_straight='325um', end_straight = '125um' ), meander=Dict( asymmetry = '450um'), fillet = "99um", total_length = '6.4mm')) gui.rebuild() # - # Then the three transmon pockets on the north side to Q_Main. # + bus_03 = RouteMeander(design,'Bus_03', options = dict(hfss_wire_bonds = True, pin_inputs=Dict( start_pin=Dict( component='Q_Main', pin='bus_03'), end_pin=Dict( component='Q3', pin='bus_03') ), lead=Dict( start_straight='225um', end_straight = '25um' ), meander=Dict( asymmetry = '50um'), fillet = "99um", total_length = '6.8mm')) #To help set the right spacing, jogs can be used to set some initially controlled routing paths from collections import OrderedDict jogs_start = OrderedDict() jogs_start[0] = ["L", '250um'] jogs_start[1] = ["R", '200um'] #jogs_start[2] = ["L", '200um'] jogs_end = OrderedDict() jogs_end[0] = ["L", '600um'] #jogs_end[1] = ["L", '800um'] bus_04 = RouteMeander(design,'Bus_04', options = dict(hfss_wire_bonds = True, pin_inputs=Dict( start_pin=Dict( component='Q_Main', pin='bus_04'), end_pin=Dict( component='Q4', pin='bus_04') ), lead=Dict( start_straight='225um', #end_straight = '25um', start_jogged_extension=jogs_start, #end_jogged_extension = jogs_end ), meander=Dict( asymmetry = '150um'), fillet = "99um", total_length = '7.2mm')) bus_05 = RouteMeander(design,'Bus_05', options = dict(hfss_wire_bonds = True, pin_inputs=Dict( start_pin=Dict( component='Q_Main', pin='bus_05'), end_pin=Dict( component='Q5', pin='bus_05') ), lead=Dict( start_straight='225um', end_straight = '25um' ), meander=Dict( asymmetry = '50um'), fillet = "99um", total_length = '7.6mm')) gui.rebuild() # - # Finally the three transmon pockets on the north side to each other. This concludes the interconnectivity between the qubits. # + bus_q3_q4 = RouteMeander(design,'Bus_Q3_Q4', options = dict(hfss_wire_bonds = True, pin_inputs=Dict( start_pin=Dict( component='Q3', pin='bus_q3_q4'), end_pin=Dict( component='Q4', pin='bus_q3_q4') ), lead=Dict( start_straight='125um', end_straight = '125um' ), meander=Dict( asymmetry = '50um'), fillet = "99um", total_length = '6.4mm')) bus_q4_q5 = RouteMeander(design,'Bus_Q4_Q5', options = dict(hfss_wire_bonds = True, pin_inputs=Dict( start_pin=Dict( component='Q4', pin='bus_q4_q5'), end_pin=Dict( component='Q5', pin='bus_q4_q5') ), lead=Dict( start_straight='125um', end_straight = '25um' ), meander=Dict( asymmetry = '50um'), fillet = "99um", total_length = '6.8mm')) gui.rebuild() # - # #### The Readouts and Control Lines # # The intention for this design is to have the three north transmon pockets be multiplexed to one readout line. The crossmons to their own readouts, as well as Q_Main. The tunable coupler, and the two crossmons also have flux lines which need to be connected to launchers. # First we will place the wirebond launchers at the edges of the chip. # + launch_qmain_read = LaunchpadWirebond(design, 'Launch_QMain_Read', options = dict(pos_x = '2mm', pos_y ='-4mm', orientation = '90')) launch_q1_fl = LaunchpadWirebond(design, 'Launch_Q1_FL', options = dict(pos_x = '0mm', pos_y ='-4mm', orientation = '90', trace_width = '5um', trace_gap = '3um',)) launch_q1_read = LaunchpadWirebondCoupled(design, 'Launch_Q1_Read', options = dict(pos_x = '-2mm', pos_y ='-4mm', orientation = '90')) launch_tcoup_fl = LaunchpadWirebond(design, 'Launch_TuneC_FL', options = dict(pos_x = '-4mm', pos_y ='-4mm', orientation = '90', trace_width = '5um', trace_gap = '3um',)) launch_tcoup_read = LaunchpadWirebondCoupled(design, 'Launch_TuneC_Read', options = dict(pos_x = '-5mm', pos_y ='-3mm', orientation = '0')) launch_q2_read = LaunchpadWirebondCoupled(design, 'Launch_Q2_Read', options = dict(pos_x = '-5mm', pos_y ='-1mm', orientation = '0')) launch_q2_fl = LaunchpadWirebond(design, 'Launch_Q2_FL', options = dict(pos_x = '-5mm', pos_y ='1mm', orientation = '0', trace_width = '5um', trace_gap = '3um',)) launch_nw = LaunchpadWirebond(design, 'Launch_NW',options = dict(pos_x = '-5mm', pos_y='3mm', orientation=0)) launch_ne = LaunchpadWirebond(design, 'Launch_NE',options = dict(pos_x = '5mm', pos_y='3mm', orientation=180)) gui.rebuild() # - # We then will add in the readout resonators for Q_Main, Q1, Q2 and the tuneable coupler. # We will add a finger capacitor for the Q_Main readout, instead of just using the LaunchpadWirebondCoupled. # + #Main Readout read_q_main_cap = CapNInterdigital(design,'Read_Q_Main_Cap', options = dict(pos_x = '2mm', pos_y ='-3.5mm', orientation = '0')) jogs_end = OrderedDict() jogs_end[0] = ["L", '600um'] jogs_start = OrderedDict() jogs_start[0] = ["L", '250um'] read_q_main = RouteMeander(design,'Read_Q_Main', options = dict(hfss_wire_bonds = True, pin_inputs=Dict( start_pin=Dict( component='Q_Main', pin='readout'), end_pin=Dict( component='Read_Q_Main_Cap', pin='north_end') ), lead=Dict( start_straight='725um', end_straight = '625um', start_jogged_extension = jogs_start, end_jogged_extension = jogs_end ), meander=Dict( asymmetry = '50um'), fillet = "99um", total_length = '5.6mm')) read_q_main_cap_launch = RoutePathfinder(design, 'Read_Q_Main_Cap_Launch', options = dict(hfss_wire_bonds = True, pin_inputs = dict( start_pin=Dict( component='Read_Q_Main_Cap', pin='south_end'), end_pin=Dict( component='Launch_QMain_Read', pin='tie')), lead=Dict( start_straight='0um', end_straight = '0um', #start_jogged_extension = jogs_start, #end_jogged_extension = jogs_end ))) gui.rebuild() # + #Crossmon's Readouts jogs_end = OrderedDict() jogs_end[0] = ["L", '600um'] jogs_start = OrderedDict() jogs_start[0] = ["L", '250um'] read_q1 = RouteMeander(design,'Read_Q1', options = dict(hfss_wire_bonds = True, pin_inputs=Dict( start_pin=Dict( component='Q1', pin='readout'), end_pin=Dict( component='Launch_Q1_Read', pin='tie') ), lead=Dict( start_straight='250um', end_straight = '25um', #start_jogged_extension = jogs_start, #end_jogged_extension = jogs_end ), meander=Dict( asymmetry = '50um'), fillet = "99um", total_length = '6.8mm')) jogs_end = OrderedDict() jogs_end[0] = ["L", '600um'] jogs_start = OrderedDict() jogs_start[0] = ["L", '250um'] read_tunec = RouteMeander(design,'Read_TuneC', options = dict(hfss_wire_bonds = True, pin_inputs=Dict( start_pin=Dict( component='Tune_C_Q12', pin='Control'), end_pin=Dict( component='Launch_TuneC_Read', pin='tie') ), lead=Dict( start_straight='1525um', end_straight = '125um', #start_jogged_extension = jogs_start, #end_jogged_extension = jogs_end ), meander=Dict( asymmetry = '50um'), fillet = "99um", total_length = '5.8mm')) jogs_end = OrderedDict() jogs_end[0] = ["L", '600um'] jogs_start = OrderedDict() jogs_start[0] = ["L", '250um'] read_q2 = RouteMeander(design,'Read_Q2', options = dict(hfss_wire_bonds = True, pin_inputs=Dict( start_pin=Dict( component='Q2', pin='readout'), end_pin=Dict( component='Launch_Q2_Read', pin='tie') ), lead=Dict( start_straight='350um', end_straight = '0um', #start_jogged_extension = jogs_start, #end_jogged_extension = jogs_end ), meander=Dict( asymmetry = '-450um'), fillet = "99um", total_length = '5.4mm')) gui.rebuild() # - # Finishing off this section of the chip by connecting the flux lines to appropraite wirebond launch pads. # + #Crossmon flux lines flux_line_Q1 = RoutePathfinder(design,'Flux_Line_Q1', options = dict(hfss_wire_bonds = True, pin_inputs=Dict( start_pin=Dict( component='Q1', pin='flux_line'), end_pin=Dict( component='Launch_Q1_FL', pin='tie')), fillet = '99um', trace_width = '5um', trace_gap = '3um', #anchors = anchors )) jogs_start = OrderedDict() jogs_start[0] = ["L", '750um'] flux_line_tunec = RoutePathfinder(design,'Flux_Line_TuneC', options = dict(hfss_wire_bonds = True, pin_inputs=Dict( start_pin=Dict( component='Tune_C_Q12', pin='Flux'), end_pin=Dict( component='Launch_TuneC_FL', pin='tie')), lead=Dict( start_straight='875um', end_straight = '350um', start_jogged_extension = jogs_start, #end_jogged_extension = jogs_end ), fillet = '99um', trace_width = '5um', trace_gap = '3um', #anchors = anchors )) jogs_start = OrderedDict() jogs_start[0] = ["L", '525um'] jogs_start[1] = ["R", '625um'] flux_line_Q2 = RoutePathfinder(design,'Flux_Line_Q2', options = dict(hfss_wire_bonds = True, pin_inputs=Dict( start_pin=Dict( component='Q2', pin='flux_line'), end_pin=Dict( component='Launch_Q2_FL', pin='tie')), lead=Dict( start_straight='175um', end_straight = '150um', start_jogged_extension = jogs_start, #end_jogged_extension = jogs_end ), fillet = '99um', trace_width = '5um', trace_gap = '3um', #anchors = anchors )) gui.rebuild() # - # Shifting our focus now to the three transmon pockets in the north. As we want these to be multiplexed to a single readout line, we will add in a few three port components, such as the CoupledLineTee and CapNInterdigitalTee. # Q3 will have an inductive coupling to the readout line (as we want a lambda/4 resonator), Q4 will have a simple gap capacitor, and Q5 will have an interdigitated capacitor. # + q3_read_T = CoupledLineTee(design,'Q3_Read_T', options=dict(pos_x = '-3mm', pos_y = '3mm', orientation = '0', coupling_length = '200um', open_termination = False)) #We use finger count to set the width of the gap capacitance, -> N*cap_width + (N-1)*cap_gap q4_read_T = CapNInterdigitalTee(design,'Q4_Read_T', options=dict(pos_x = '0mm', pos_y = '3mm', orientation = '0', finger_length = '0um', finger_count = '8')) q5_read_T = CapNInterdigitalTee(design,'Q5_Read_T', options=dict(pos_x = '3mm', pos_y = '3mm', orientation = '0', finger_length = '50um', finger_count = '11')) gui.rebuild() # - # We add in the readout resonators to each respective qubit. # + read_q3 = RouteMeander(design,'Read_Q3', options = dict(hfss_wire_bonds = True, pin_inputs=Dict( start_pin=Dict( component='Q3', pin='readout'), end_pin=Dict( component='Q3_Read_T', pin='second_end') ), lead=Dict( start_straight='150um', end_straight = '150um', #start_jogged_extension = jogs_start, #end_jogged_extension = jogs_end ), meander=Dict( asymmetry = '0um'), fillet = "99um", total_length = '5mm')) read_q4 = RouteMeander(design,'Read_Q4', options = dict(hfss_wire_bonds = True, pin_inputs=Dict( start_pin=Dict( component='Q4', pin='readout'), end_pin=Dict( component='Q4_Read_T', pin='second_end') ), lead=Dict( start_straight='125um', end_straight = '125um', #start_jogged_extension = jogs_start, #end_jogged_extension = jogs_end ), meander=Dict( asymmetry = '0um'), fillet = "99um", total_length = '5.8mm')) read_q5 = RouteMeander(design,'Read_Q5', options = dict(hfss_wire_bonds = True, pin_inputs=Dict( start_pin=Dict( component='Q5', pin='readout'), end_pin=Dict( component='Q5_Read_T', pin='second_end') ), lead=Dict( start_straight='125um', end_straight = '125um', #start_jogged_extension = jogs_start, #end_jogged_extension = jogs_end ), meander=Dict( asymmetry = '0um'), fillet = "99um", total_length = '5.4mm')) gui.rebuild() # - # We complete the layout by connecting the multiplexed readout line to the launchpads on either side of the chip. # + mp_tl_01 = RoutePathfinder(design, 'ML_TL_01', options = dict(hfss_wire_bonds = True, pin_inputs = dict( start_pin=Dict( component='Launch_NW', pin='tie'), end_pin=Dict( component='Q3_Read_T', pin='prime_start')) )) mp_tl_02 = RoutePathfinder(design, 'ML_TL_02', options = dict(hfss_wire_bonds = True, pin_inputs = dict( start_pin=Dict( component='Q3_Read_T', pin='prime_end'), end_pin=Dict( component='Q4_Read_T', pin='prime_start')) )) mp_tl_03 = RoutePathfinder(design, 'ML_TL_03', options = dict(hfss_wire_bonds = True, pin_inputs = dict( start_pin=Dict( component='Q4_Read_T', pin='prime_end'), end_pin=Dict( component='Q5_Read_T', pin='prime_start')) )) mp_tl_04 = RoutePathfinder(design, 'ML_TL_04', options = dict(hfss_wire_bonds = True, pin_inputs = dict( start_pin=Dict( component='Q5_Read_T', pin='prime_end'), end_pin=Dict( component='Launch_NE', pin='tie')) )) gui.rebuild() # - # With this, we have completed the construction of our layout. # # Now, anyone familiar with chip design might find some of the location choices to be sub-optimal, with large sections of your chip left unused, or perhaps some CPW transmission lines running a bit closer to each other than would be ideal for avoiding cross talk concerns. These could be address by shifting the origin of your chip, or modifying component options to better compact your layout and alleviate crosstalk concerns. # # For this tutorial, we aren't too concerned how much space we may use up on our fictional chip, so we will instead continue on to analysis and tuning. # # Analyze # ## Capacitance Extraction and LOM # First we want to quickly look at the qubit parameters. Initial simulation and analysis is to use a lumped element approximation, by extracting the capacitance matrix of the qubit. We first render the qubit (we will focus on Q_Main for all of these simulations), by rendering it into Ansys Q3D and then using LOM analysis on the resulting capacitance matrix. q_main_q3d = design.renderers.q3d # We can check if we wish to change any of the default options for the renderer instance. For now we will leave them as is. q_main_q3d.options # + # If Ansys is not already open, uncomment the line below to open Ansys or you can open it manually #q_main_q3d.open_ansys() # #! Important! Make sure Ansys is fully opened and you have, if necessary, clicked ‘close’ #in the Ansys pop up box before running further cells! # + #If you open Ansys manually, uncomment the code below to add a project. #q_main_q3d.new_ansys_project() # - q_main_q3d.connect_ansys() # We next add a design and a setup. The setup options are important to modify dependent on the simulation you are running. You should modify the number of passes and convergence based on the accuracy you require for your simulation. q_main_q3d.activate_q3d_design("Q_Main") q_main_q3d.add_q3d_setup(name = 'Tune_Q_Main', max_passes = 21, min_converged_passes = 2, percent_error = 0.05) q_main_q3d.activate_q3d_setup('Tune_Q_Main') # Next we will want to render Q_Main. As we are wanting the complete capacitance matrix from this simulation, we will want to be sure to terminate the unconnected pins of Q_Main with opens, so that they are separate charge islands in the simulation. If not recalling all of the pin names, one can look at the GUI, or check the pin dictionary attached to Q_Main. q_main.pins.keys() q_main_q3d.render_design(['Q_Main'], [('Q_Main', 'readout'), ('Q_Main', 'bus_01'),('Q_Main', 'bus_02'),('Q_Main', 'bus_03'), ('Q_Main', 'bus_04'), ('Q_Main', 'bus_05')]) # With it having been rendered, we now start the simulation. Depending on the complexity of the simulation, it could take a minute, or multiple hours. It is generally best to start with a small number of `max_passes` if you are unsure, so can get a sense on the timing. As each adaptive pass adds additional tetrahedrons, the simulation time per pass will increase significantly (as well as the amount of system memory necessary). q_main_q3d.analyze_setup('Tune_Q_Main') # With the simulation completed, we can look at the capacitance matrix; q_main_q3d.get_capacitance_matrix() # But more importantly, we can use that matrix to run LOM analysis. The method, `lumped_oscillator_vs_passes` takes input as follows; # * Lj - the Josephson inductance of your Josephson junction (we will pick a value such that the qubit frequency is 5 GHz) # * Cj - the capacitance of your Josephson junction # * N - the total number of connection pads (in our case 6) # * fr - the frequency of the readout resonator (for simplicity of this tutorial, we pick 7 GHz) # * [fb1, fb2, fb3...., fbN-1] - list of the frequencies of the busses (for simplicity of this tutorial, we will pick 5.6, 5.7, 5.8, 5.9 and 6 GHz) # * passes - the number of passes your simulation ran for (in our case 19) # + #q_main_lom = q_main_q3d.lumped_oscillator_vs_passes(Lj, Cj ~ 2 fF, N- total number of connectionPads, fr (readout frequency), # [fb1,fb2,.... fbN-1] - list of the bus frequencies, maxPass - how many passes did Ansys Q3D take) q_main_lom = q_main_q3d.lumped_oscillator_vs_passes(14, 2, 6, 7, [5.6, 5.7,5.8,5.9,6.0], 19) # - # Which gives us the qubits frequency, anharmonicity, and coupling strength to the different connection pads. We can further check if these parameters converged well, as if they have not we may want to modify our simulation in order to get a more accurate result. We then will want to make modifications to our qubit options, such as `pad_gap` of the qubit, or modifying the size of the connection pads, in order to hit the desired qubit anharmonicity or readout chi values respectively. q_main_q3d.plot_convergence_main(q_main_lom); q_main_q3d.plot_convergence_chi(q_main_lom) # Once the analysis and tuning is complete, we can disconnect from Ansys. q_main_q3d.disconnect_ansys() # ## Eigenmode and EPR # # Once each of our qubits have been run through LOM, we can begin to look at the resonant busses and readouts, and larger coupled sections of the chip. One such case could be looking at Q_Main, Q5, and Bus_05. This allows us not only to look at some of the parameters of the individual qubits, but also the bus frequency and if the qubits are coupled (via the bus) to the degree we wish. # # We will setup the design and simulation in the same manner as we did previously, but with the methods needed for an eigenmode simulation. q_main_q5_eigen = design.renderers.hfss q_main_q5_eigen.options['wb_size'] = 5 q_main_q5_eigen.options # + # If Ansys is not already open, uncomment the line below to open Ansys or you can open it manually #q_main_q5_eigen.open_ansys() # #! Important! Make sure Ansys is fully opened and you have, if necessary, clicked ‘close’ #in the Ansys pop up box before running further cells! # + #If you open Ansys manually, uncomment the code below to add a project. #q_main_q5_eigen.new_ansys_project() # - q_main_q5_eigen.connect_ansys() q_main_q5_eigen.add_eigenmode_design("QMain_Q5_Bus05") q_main_q5_eigen.add_eigenmode_setup(name='3Modes', min_freq_ghz = 4, n_modes=3, max_delta_f = 0.1, max_passes = 10, min_converged = 2) q_main_q5_eigen.activate_eigenmode_setup('3Modes') # With the simulation setup, we next render the desired components. All unconnected pins are left as shorts, as we are only concerned about simulating the resonant mode of the three components listed. We also may want to modify the junction inductance of the two qubits based on the previous LOM analysis, so they are near the desired frequency. Further, one may want to change the length of the bus after initial simulations to get it to the target frequency. # + q_main.options.hfss_inductance = '13nH' Q5.options.hfss_inductance = '15nH' bus_05.options.total_length = '7.5mm' gui.rebuild() # - q_main_q5_eigen.render_design(['Q_Main', 'Q5','Bus_05'], []) e_design = q_main_q5_eigen.pinfo.design e_design.set_variable('Lj1', '13 nH') e_design.set_variable('Cj1', '0 fF') e_design.set_variable('Lj2', '15 nH') e_design.set_variable('Cj2', '0 fF') q_main_q5_eigen.analyze_setup('3Modes') #Note - simulation can take a while depending on your computer # Once the simulation is complete, we can check to see if the convergence was good. q_main_q5_eigen.plot_convergences() # With the eigenmode simulation complete (and nicely converged) we can run some EPR analysis on the result. import pyEPR as epr # + pinfo = q_main_q5_eigen.pinfo pinfo.junctions['jj1'] = {'Lj_variable': 'Lj1', 'rect': 'JJ_rect_Lj_Q_Main_rect_jj', 'line': 'JJ_Lj_Q_Main_rect_jj_', 'Cj_variable': 'Cj1'} pinfo.junctions['jj2'] = {'Lj_variable': 'Lj2', 'rect': 'JJ_rect_Lj_Q5_rect_jj', 'line': 'JJ_Lj_Q5_rect_jj_', 'Cj_variable': 'Cj2'} pinfo.validate_junction_info() # Checks that valid names of variables and objects have been supplied #Specifying the dissipative elements pinfo.dissipative['dielectrics_bulk'] = ['main'] eprd = epr.DistributedAnalysis(q_main_q5_eigen.pinfo) # - # We can first look at the electric field and subtrate participation. # + eprd.set_mode(1) ℰ_elec = eprd.calc_energy_electric() ℰ_elec_substrate = eprd.calc_energy_electric(None, 'main') ℰ_mag = eprd.calc_energy_magnetic() print(f""" ℰ_elec_all = {ℰ_elec} ℰ_elec_substrate = {ℰ_elec_substrate} EPR of substrate = {ℰ_elec_substrate / ℰ_elec * 100 :.1f}% ℰ_mag_all = {ℰ_mag} ℰ_mag % of ℰ_elec_all = {ℰ_mag / ℰ_elec * 100 :.1f}% """) # - # Then run the EPR analysis to find the kerr matrix. # + eprd.do_EPR_analysis() epra = epr.QuantumAnalysis(eprd.data_filename) epra.analyze_all_variations(cos_trunc = 7, fock_trunc = 6) swp_variable = 'Lj1' # suppose we swept an optimetric analysis vs. inductance Lj epra.plot_hamiltonian_results(swp_variable=swp_variable) epra.report_results(swp_variable=swp_variable, numeric=True) # - # From the analysis results we can determine the qubits anharmonicities and coupling strength. # Once the analysis and tuning is complete, we can close the connection to Ansys. q_main_q5_eigen.disconnect_ansys() # ### Rendering to a GDS File # Once all of the tuning is complete, we will want to prepare a GDS file so we can create a mask and fabricate our chip. We first create a gds render instance. full_chip_gds = design.renderers.gds # The various options for the gds renderer can also be checked and changed as necessary. A key option is the gds file which holds the cells for your junction ebeam design. Make sure this is pointing at the correct file so they are placed in your final mask at the appropriate locations. full_chip_gds.options full_chip_gds.options['path_filename'] ='../resources/Fake_Junctions.GDS' full_chip_gds.options['no_cheese']['buffer']='50um' full_chip_gds.export_to_gds('Full_Chip_01.gds') # With the design complete, we can close the GUI. gui.main_window.close()
tutorials/8 Full Design Flow Examples/Full_Chip_Design_01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + active="" # <a href=\"https://colab.research.google.com/github/warwickdatascience/beginners-python/blob/master/session_one/session_one_subject_questions/session_one_mathstat_questions.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a> # - # # Beginner's Python—Session One Mathematics/Statisics Questions # ## Annular Areas # Create a variable, `pi` storing the value of $\pi$ to 5 decimal places. # The equation for the area of an annulus is given by # # $$A = \pi(R^2-r^2)$$ # # Create variables `R` and `r` with values $4.2$ and $1.7$ respectively. Create a variable `A` storing the corresponding annulus area. # Print the type of the variable `A`? # Print the value of `A` as an integer (hint: use the `int()` function) # ## Biased Coins # Define variables `n` and `p` with values of your choice to represent a game in which `n` coins are tossed, each having a bias towards heads of `p`. # Most of Python's mathematical functionality is not loaded by default. Instead, we can _import_ functions from the maths _module_. In the example below, we import the `factorial()` function (you don't need to do this again) and print the value of $4!$. from math import factorial print(factorial(4)) # What is the value of $10!$? # The probability of tossing `k` heads in the scenario above is given by the binomial formula, # # $$\mathbb{P}(k\textrm{ heads}) = \frac{n!}{k!(n-k)!}p^k \left(1-p\right)^{n-k}$$ # # Create a variable `k` of your choosing and `p_k` giving the probability of tossing `k` heads. # Print the following sentence, replacing the '???'s with values: # # _"When tossing a coin ??? times, that is biased towards heads with probability ???, the probability of obtaining ??? heads is ???."_ # # **NOTE:** By default, `print` will place spaces between each input. If we wish to have the numbers output next to the full stops/commas without a space, we'll need to use string concatenation. For example `str(42) + "."` will give `42.`. Notice that we have to convert the number to a string first using `str` else we will recieve a type error. # ## It's All Downhill From Here # The Newton-Raphson method is a numerical method used for finding the root of a function by moving down the tangent line of the curve. We start with an initial guess `x` and then update this using the map, # # $$ # x \rightarrow x - \frac{f(x)}{f'(x)} # $$ # # where $f$ is the function in question and $f'$ its derivative. # Create a variable `x` with initial value $2.3$. # Write code to run one step of the Newton-Raphson method for $f(x) = (x-2)(x+1)$, overwriting `x` and printing the new value. Run this cell repeatedly until the solution coverges.
session-one/session_one_subject_questions/session_one_mathstat_questions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt from scipy import stats from scipy.stats import norm from scipy import special import pandas as pd # + def linear_regressor(vec_x,vec_y): nx = vec_x.shape[0] ny = vec_y.shape[0] if nx != ny: print(nx,ny) print('Warning: Vector X and Y have different size!') else: print('X & Y have same size :)') sum_xy = np.sum(vec_x*vec_y) sum_x = np.sum(vec_x) sum_y = np.sum(vec_y) sum_x_sq = np.sum(vec_x**2) sum_dx_sq = np.sum((vec_x-np.mean(vec_x))**2) b = (nx*sum_xy-sum_x*sum_y)/(nx*sum_x_sq-sum_x**2) a = np.mean(vec_y)-b*np.mean(vec_x) y_fit = a+b*vec_x sum_y_yfit_sq = np.sum((vec_y-y_fit)**2) s_e = np.sqrt(1/(nx-2)*sum_y_yfit_sq) sigma_a = s_e*np.sqrt(sum_x_sq/(nx*sum_dx_sq)) sigma_b = s_e/np.sqrt(sum_dx_sq) SST = np.sum((vec_y-np.mean(vec_y))**2) SSR = np.sum((y_fit-np.mean(vec_y))**2) SSE = sum_y_yfit_sq R_sq = SSR/SST R_pearson = np.sqrt(R_sq) F_test = SSR/s_e**2 t_a = a/sigma_a t_b = b/sigma_b print('=======================') print('y = a + bx') print('Slope b:',f"{b:.3}") print('Sigma b:',f"{sigma_b:.3}") print('Intercept a:',f"{a:.4}") print('Sigma Intercept a:',f"{sigma_a:.4}") print('MSE: s_e**2:',f"{s_e**2:.4}") print('=======================') print('ANOVA Table') print('Total sum of sqares - SST:',f"{SST:.3}") print('SSR:',f"{SSR:.3}") print('SSE:',f"{SSE:.3}") print('Coefficient of determination - R^2:',f"{R_sq:.3}") print('Pearson correlation - R:',f"{R_pearson:.3}") print('F ratio SSR/SSE - F:',f"{F_test:.3}") print('t-Student ratio - a:',f"{t_a:.3}") print('t-Student ratio - b:',f"{t_b:.3}") print('=======================') #return b,a # + # Example 6.1 - A Simple Linear Regression # Table A.1 Tmin at Itacta vs. Canadaiguga in °F #Tmin Canadaigua y = np.array([28,28,26,19,16,24,26,24,24,29,29,27,31,26,38,23,13,14,28,19,19,17, 22,2,4,5,7,8,14,14,23]) #Date x1 = np.arange(1,32) #Itaca T Max x2 = np.array([33,32,30,29,25,30,37,37,29,30,36,32,33,34,53,45,25,28, 32,27,26,28,24,26,9,22,17,26,27,30,34]) #Itaca T Min x3 = np.array([19,25,22,-1,4,14,21,22,23,27,29,25,29,15,29,24,0,2,26,17,19,9,20,-6, -13,-13,-11,-4,-4,11,23]) #Itaca ppt +0.01 x3 = np.log(np.array([0.01,0.07,1.11,0.01,0.01,0.01,0.01,0.04,0.02,0.05,0.34,0.06, 0.18,0.02,0.02,0.01,0.01,0.01,0.01,0.45,0.01,0.01,0.70,0.01,0.01,0.01,0.01,0.01, 0.01,0.02,0.05])) #Can. T Max x4 = np.log(np.array([34,36,30,29,30,35,44,38,31,33,39,33,34,39,51,44,25,34, 36,29,27,29,27,24,11,21,19,26,28,31,38])) #Can. ppt +0.01 x5 = np.log(np.array([0.01,0.04,0.84,0.01,0.01,0.01,0.02,0.05,0.01,0.09,0.18,0.04, 0.04,0.01,0.06,0.03,0.04,0.01,0.01,0.35,0.02,0.01,0.35,0.08,0.01,0.01,0.01,0.01, 0.01,0.01,0.13])) #print(T_min_Cndg*T_min_Ithc) linear_regressor(x5,y) # + # K=1 Linear Function # X MSE R2 F #Date 51.1 0.363 16.5 #Ith Max 33.8 0.579 39.9 #Ith Min 11.8 0.85 169 #Ith Ppt 66.4 0.17 6.6 #Can Max 29.6 0.63 49.5 #Can Ppt 71.85 0.10 3.4 # K=2 Bilinear Function with x1 included
Chapter_6/Wilks - Example 6.6 - Equation Development Using Forward Selection - Table A.1 - 14-JUN-2021.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # This chapter was added by me (<NAME>). My goal was to present a couple of graphical features of the matplotlib package and to explore some set up possibilities of the graphs. # <!--NAVIGATION--> # < [Further Resources](17-Further-Resources.ipynb) | [Contents](Index.ipynb) | [Appendix: Figure Code](19-Figures.ipynb) > # ## Line Plots import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np plt.style.use('classic') plt.style.available # + x = np.linspace(0,10,100) fig = plt.figure() plt.plot(x, np.sin(x),"-"); # solid line plt.plot(x, np.cos(x),"--"); # dashed line # other linetypes: ":" dotted line,"-." dashdot, "---" # + # plt.plot? # + # plt.style.use? # + # plt.style.available? # - # %matplotlib inline fig.savefig("SinCosFig.png") fig.canvas.get_supported_filetypes() # + plt.figure() plt.subplot(2,1,1) plt.plot(x, np.sin(x)) plt.subplot(2,1,2) plt.plot(x, np.cos(x)) # - plt.gcf() plt.gca() # + fig, ax = plt.subplots(2) ax[0].plot(x, np.sin(x)) ax[1].plot(x, np.cos(x)); # - plt.style.use("seaborn-whitegrid") # To create a fig and axes we can do the following. # + fig = plt.figure() ax = plt.axes() x = np.linspace(0,10, 1000) ax.plot(x, np.sin(x)) ax.plot(x, np.cos(x)) # + # ax.plot? # - # Set the range of visibility for axis. # + fig = plt.figure() ax = plt.axes() x = np.linspace(0,10, 1000) ax.plot(x, np.sin(x), color = "blue", linestyle = "solid") ax.plot(x, np.cos(x), color = "0.75", linestyle = ":") ax.plot(x, np.sin(x + np.pi/3), "--c") plt.ylim(-1.5,1.5) plt.xlim(-1,11) # or equivalently plt.axis([-1, 11, -1.5, 1.5]) # - # Assure the thigtest axis in which one can represent all data. # + fig = plt.figure() ax = plt.axes() x = np.linspace(0,10, 1000) ax.plot(x, np.sin(x), color = "blue", linestyle = "solid") ax.plot(x, np.cos(x), color = "0.75", linestyle = ":") ax.plot(x, np.sin(x + np.pi/3), "--c") plt.axis("tight") # - # Equal aspect ration on the two axis. # + fig = plt.figure() ax = plt.axes() x = np.linspace(0,10, 1000) ax.plot(x, np.sin(x), color = "blue", linestyle = "solid") ax.plot(x, np.cos(x), color = "0.75", linestyle = ":") ax.plot(x, np.sin(x + np.pi/3), "--c") plt.axis("equal") # or # ax.axis("equal") # + # plt.axes? # - # Labelling axis and title. # + fig = plt.figure() ax = plt.axes() x = np.linspace(0,10, 1000) ax.plot(x, np.sin(x), color = "blue", linestyle = "solid") ax.plot(x, np.cos(x), color = "0.75", linestyle = ":") plt.axis("equal") plt.title("Sin and cos function") plt.xlabel("x values") plt.ylabel("y values") # - # Generating legend. # + fig = plt.figure() ax = plt.axes() x = np.linspace(0,10, 1000) ax.plot(x, np.sin(x), color = "blue", linestyle = "solid", label = "sin(x)") ax.plot(x, np.cos(x), color = "0.75", linestyle = ":", label = "cos(x)") plt.axis("equal") plt.title("Sin and cos function") plt.xlabel("x values") plt.ylabel("y values") plt.legend() # - # The following correspondence exists between the plt and corresponding ax functions: # - plt.xlabel( ) $\Rightarrow$ ax.set_xlabel( ) # - plt.ylabel( ) $\Rightarrow$ ax.set_ylabel( ) # - plt.xlim( ) $\Rightarrow$ ax.set_xlim( ) # - plt.ylim( ) $\Rightarrow$ ax.set_ylim( ) # - plt.title( ) $\Rightarrow$ ax.title( ) # + fig = plt.figure() ax = plt.axes() x = np.linspace(0,10,1000) ax.plot(x, np.sin(x), color = "blue", linestyle = "solid") ax.plot(x, np.cos(x), color = "0.75", linestyle = ":") ax.axis("equal") # ax.set_title("Sin and cos function") # ax.set_xlabel("x values") # ax.set_ylabel("y values") # or ax.set(xlabel = "x values", ylabel = "y values") # - # ## Scatter Plots rng = np.random.RandomState(0) for marker in ["o", ".", ",","x","+","v","^","<",">","p","s","d"]: plt.plot(rng.rand(5), rng.rand(5), marker, label = "marker = ‘{0}‘".format(marker)) plt.legend() plt.xlim(0,1.8) # By the following command you can see what other parameters can be set for a plot. Like markersize, linewidth, markerfacecolor, markeredgecolor, markeredgewidth. # + # plt.plot? # + x = np.linspace(0,10, 30) plt.plot(x, np.sin(x), "-p", color = "blue", markersize = 15, markerfacecolor = "white", linewidth = 4, label = "sin(x)"); # - # Alternatively to plt.plot one can use the plt.scatter function to create scatterplot. Here the properties of all points can be set individually. # + rng = np.random.RandomState(0) x = rng.randn(100) y = rng.randn(100) colors = rng.rand(100) sizes = 1000 * rng.rand(100) plt.scatter(x,y, c = colors, s = sizes, alpha = 0.3, cmap = "viridis") plt.colorbar() # + # plt.scatter? # + from sklearn.datasets import load_iris iris = load_iris() features = iris.data.T plt.scatter(features[0], features[1], alpha = 0.2, s = 100*features[3], c = iris.target, cmap = "viridis") plt.xlabel(iris.feature_names[0]) plt.ylabel(iris.feature_names[1]) # - # ## 3-Dimensional Function # + def f(x,y): return np.sin(x) ** 10 + np.cos(10 + y * x) * np.cos(x) x = np.linspace(0, 5, 50) y = np.linspace(0, 5, 50) X, Y = np.meshgrid(x,y) Z = f(X,Y) # - plt.contour(X,Y,Z, colors = "black"); # Show 20 equally distanced hight lines. plt.contour(X, Y, Z, 20, colors = "black"); # + plt.contour(X, Y, Z, 20, cmap = "RdGy") plt.colorbar() # + plt.contourf(X, Y, Z, 20, cmap = "RdGy") plt.colorbar() # - # Heightlines. # + contours = plt.contour(X, Y, Z, 3, colors = "black") plt.clabel(contours, inline = True, fontsize = 8) plt.imshow(Z, extent = [0, 5, 0, 5], origin = "lower", cmap = "RdGy", alpha = 0.5) plt.colorbar() # - # ## Histograms # + data = np.random.randn(1000) plt.hist(data); # - # To save the frequencies and the bins we can do the following. count, bin_edges = np.histogram(data, bins = 5) print(count) print(bin_edges) # <!--NAVIGATION--> # < [Further Resources](17-Further-Resources.ipynb) | [Contents](Index.ipynb) | [Appendix: Figure Code](19-Figures.ipynb) >
Course/18-Plotting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- from rdflib import Graph document = "http://vocab.gtfs.org/gtfs.ttl" name = "gtfs" g = Graph() g.parse(document) v = g.serialize(format="xml", destination = name) print(v)
src/Notebooks/ParseToXML.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from lenskit.metrics import dataGenerator from lenskit import batch, topn, util, topnFair from lenskit import crossfold as xf from lenskit.algorithms import Recommender, als, user_knn as knn from lenskit import topn, topnFair import numpy as np import pandas as pd # %matplotlib inline import math # + #ratings = pd.read_csv('/Users/denisehansen/Desktop/ITU/Thesis/Fair-Recommendations/ml-latest-small/ratings.1.csv', sep=',', # names=['user', 'item', 'rating', 'timestamp'], header=0) #items = pd.read_csv('/Users/denisehansen/Desktop/ITU/Thesis/Fair-Recommendations/ml-latest-small/movies.csv', sep=',', # names=['movieId','title','genres'], header=0) ratings = pd.read_csv('/Users/josse/Git-kode projects/Speciale/Fair-Recommendations/ml-latest-small/ratings.csv', sep=',', names=['user', 'item', 'rating', 'timestamp'], header=0) items = pd.read_csv('/Users/josse/Git-kode projects/Speciale/Fair-Recommendations/ml-latest-small/movies.csv', sep=',', names=['item','title','genres'], header=0) #C:\Users\josse\Git-kode projects\Speciale\Fair-Recommendations\ml-latest-small\ratings.csv # - items.info() items_dummy = pd.concat([items, items['genres'].str.get_dummies(sep='|')], axis=1) items_dummy.head() algo_ii = knn.UserUser(20) algo_als = als.BiasedMF(50) def eval(aname, algo, train, test): fittable = util.clone(algo) fittable = Recommender.adapt(fittable) fittable.fit(train) users = test.user.unique() # now we run the recommender recs = batch.recommend(fittable, users, 100) # add the algorithm name for analyzability recs['Algorithm'] = aname return recs # + all_recs = [] test_data = [] for train, test in xf.partition_users(ratings[['user', 'item', 'rating']], 1, xf.SampleFrac(0.2)): test_data.append(test) all_recs.append(eval('ItemItem', algo_ii, train, test)) all_recs.append(eval('ALS', algo_als, train, test)) # - test_data = pd.concat(test_data, ignore_index=True) all_recs = pd.concat(all_recs, ignore_index=True) all_recs_joined = all_recs.join(items_dummy.set_index('item'), on='item') all_recs_joined.head() test_data = pd.concat(test_data, ignore_index=True) test_data.head() _protected_group2 = all_recs_joined.loc[all_recs_joined['Drama'] == 1] _protected_group2.head() _protected_group2['item'].values recs_temp = all_recs_joined.iloc[100:110,:] recs_temp def calculate_demParity(recs, protected_group): #beslut hvad der er nemmest. Skal denne tage "protected group". contains eller tage en protected variabel? exposure_pro = 0; exposure_unpro = 0; print(_protected_group2['item'].values) for index, row in recs.iterrows(): if row["item"] in _protected_group2['item'].values: print(row["item"], "in protected group") exposure_pro = exposure_pro + (1/math.log2(1+row["rank"])) else: exposure_unpro = exposure_unpro + (1/math.log2(1+row["rank"])) return abs(exposure_pro-exposure_unpro) x = calculate_demParity(recs_temp, _protected_group2) x # + # user , algorithms = iput to group_cols --> de koloner der skal groupes på # res: categories of protected variable res= list(all_recs_joined.iloc[:,8:]) res1= all_recs_joined.item.nunique() res2 = all_recs_joined.loc[ all_recs_joined['Drama'] == 1] print(res) rla = topnFair.FairRecListAnalysis(['user', "Algorithm"]) rla.add_metric("rND") rla.add_metric("rKL") rla.add_metric("rRD") rla.add_metric("div") rla.add_metric("dem_parity") results = rla.compute(all_recs_joined, test_data, "Drama", res, res1, res2.item.nunique()) results.head() # - results.groupby('Algorithm').mean() results.groupby('Algorithm').rND.mean() results.groupby('Algorithm').rND.mean().plot.bar() results.groupby('Algorithm').div.mean().plot.bar() # + active="" # # -
Read_dummy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.5 ('base') # language: python # name: python3 # --- # # Univariate Bijections # ## Background # [Normalizing Flows](https://flowtorch.ai/dev/bibliography#surveys) are a family of methods for constructing flexible distributions. As mentioned in [the introduction](https://flowtorch.ai/users), Normalizing Flows can be seen as a modern take on the [change of variables method for random distributions](https://en.wikipedia.org/wiki/Probability_density_function#Function_of_random_variables_and_change_of_variables_in_the_probability_density_function), and this is most apparent for univariate bijections. Thus, in this first section we restrict our attention to representing univariate distributions with bijections. # # The basic idea is that a simple source of noise, for example a variable with a standard normal distribution, $X\sim\mathcal{N}(0,1)$, is passed through a bijective (i.e. invertible) function, $g(\cdot)$ to produce a more complex transformed variable $Y=g(X)$. For such a random variable, we typically want to perform two operations: sampling and scoring. Sampling $Y$ is trivial. First, we sample $X=x$, then calculate $y=g(x)$. Scoring $Y$, or rather, evaluating the log-density $\log(p_Y(y))$, is more involved. How does the density of $Y$ relate to the density of $X$? We can use the substitution rule of integral calculus to answer this. Suppose we want to evaluate the expectation of some function of $X$. Then, # # $$ # \begin{aligned} # \mathbb{E}_{p_X(\cdot)}\left[f(X)\right] &= \int_{\text{supp}(X)}f(x)p_X(x)dx\\ # &= \int_{\text{supp}(Y)}f(g^{-1}(y))p_X(g^{-1}(y))\left|\frac{dx}{dy}\right|dy \\ # &= \mathbb{E}_{p_Y(\cdot)}\left[f(g^{-1}(Y))\right], # \end{aligned} # $$ # # where $\text{supp}(X)$ denotes the support of $X$, which in this case is $(-\infty,\infty)$. Crucially, we used the fact that $g$ is bijective to apply the substitution rule in going from the first to the second line. Equating the last two lines we get, # # $$ # \begin{aligned} # \log(p_Y(y)) &= \log(p_X(g^{-1}(y)))+\log\left(\left|\frac{dx}{dy}\right|\right)\\ # &= \log(p_X(g^{-1}(y)))-\log\left(\left|\frac{dy}{dx}\right|\right). # \end{aligned} # $$ # # Inituitively, this equation says that the density of $Y$ is equal to the density at the corresponding point in $X$ plus a term that corrects for the warp in volume around an infinitesimally small length around $Y$ caused by the transformation. # # If $g$ is cleverly constructed (and we will see several examples shortly), we can produce distributions that are more complex than standard normal noise and yet have easy sampling and computationally tractable scoring. Moreover, we can compose such bijective transformations to produce even more complex distributions. By an inductive argument, if we have $L$ transforms $g_{(0)}, g_{(1)},\ldots,g_{(L-1)}$, then the log-density of the transformed variable $Y=(g_{(0)}\circ g_{(1)}\circ\cdots\circ g_{(L-1)})(X)$ is # # $$ # \begin{aligned} # \log(p_Y(y)) &= \log\left(p_X\left(\left(g_{(L-1)}^{-1}\circ\cdots\circ g_{(0)}^{-1}\right)\left(y\right)\right)\right)+\sum^{L-1}_{l=0}\log\left(\left|\frac{dg^{-1}_{(l)}(y_{(l)})}{dy'}\right|\right), # \end{aligned} # $$ # # where we've defined $y_{(0)}=x$, $y_{(L-1)}=y$ for convenience of notation. In the following tutorial, we will see how to generalize this method to multivariate $X$. # # ## Fixed Univariate `Bijector`s # [FlowTorch](https://flowtorch.ai) contains classes for representing *fixed* univariate bijective transformations. These are particularly useful for restricting the range of transformed distributions, for example to lie on the unit hypercube. (In the following sections, we will explore how to represent learnable bijectors.) # # Let us begin by showing how to represent and manipulate a simple transformed distribution, # # $$ # \begin{aligned} # X &\sim \mathcal{N}(0,1)\\ # Y &= \text{exp}(X). # \end{aligned} # $$ # # You may have recognized that this is by definition, $Y\sim\text{LogNormal}(0,1)$. # # We begin by importing the relevant libraries: import torch import flowtorch.bijectors as B import flowtorch.distributions as D import matplotlib.pyplot as plt import seaborn as sns # A variety of bijective transformations live in the [`flowtorch.bijectors`](https://flowtorch.ai/api/flowtorch.bijectors) module, and the classes to define transformed distributions live in [`flowtorch.distributions`](https://flowtorch.ai/api/flowtorch.distributions). We first create the base distribution of $X$ and the class encapsulating the transform $\text{exp}(\cdot)$: dist_x = torch.distributions.Independent( torch.distributions.Normal(torch.zeros(1), torch.ones(1)), 1 ) bijector = B.Exp() # The class [`B.Exp`](https://flowtorch.ai/api/flowtorch.bijectors.exp) derives from [`B.Fixed`](https://flowtorch.ai/api/flowtorch.bijectors.fixed) and defines the forward, inverse, and log-absolute-derivative operations for this transform, # $$ # \begin{aligned} # g(x) &= \text{exp(x)}\\ # g^{-1}(y) &= \log(y)\\ # \log\left(\left|\frac{dg}{dx}\right|\right) &= y. # \end{aligned} # $$ # In general, a bijector class defines these three operations, from which it is sufficient to perform sampling and scoring. *We should think of a bijector as a plan to construct a normalizing flow rather than the normalizing flow itself* - it requires being instantiated with a concrete base distribution supplying the relevant shape information, dist_y = D.Flow(dist_x, bijector) # This statement returns the object `dist_y` of type [`flowtorch.distributions.Flow`](https://flowtorch.ai/api/flowtorch.distributions.flow) representing an object that has an interface compatible with `torch.distributions.Distribution`. We are able to sample and score from `dist_y` object using its methods `.sample`, `.rsample`, and `.log_prob`. # # Now, plotting samples from both the base and transformed distributions to verify that we that have produced the log-normal distribution: plt.subplot(1, 2, 1) plt.hist(dist_x.sample([1000]).numpy(), bins=50) plt.title('Standard Normal') plt.subplot(1, 2, 2) plt.hist(dist_y.sample([1000]).numpy(), bins=50) plt.title('Standard Log-Normal') plt.show() # Our example uses a single transform. However, we can compose transforms to produce more expressive distributions. For instance, if we apply an affine transformation we can produce the general log-normal distribution, # # $$ # \begin{aligned} # X &\sim \mathcal{N}(0,1)\\ # Y &= \text{exp}(\mu+\sigma X). # \end{aligned} # $$ # # or rather, $Y\sim\text{LogNormal}(\mu,\sigma^2)$. In FlowTorch this is accomplished, e.g. for $\mu=3, \sigma=0.5$, as follows: # + bijector = B.Compose([ B.AffineFixed(loc=3, scale=0.5), B.Exp()]) dist_y = D.Flow(dist_x, bijector) plt.subplot(1, 2, 1) plt.hist(dist_x.sample([1000]).numpy(), bins=50) plt.title('Standard Normal') plt.subplot(1, 2, 2) plt.hist(dist_y.sample([1000]).numpy(), bins=50) plt.title('Log-Normal') plt.show() # - # The class [`B.Compose`](https://flowtorch.ai/api/flowtorch.bijectors.compose) combines multiple [`B.Bijector`](https://flowtorch.ai/api/flowtorch.bijectors.bijector)'s with [function composition](https://en.wikipedia.org/wiki/Function_composition) to produce a single *plan* for a Normalizing Flow, which is then intiated in the regular way. For the forward operation, transformations are applied in the order of the list. In this case, first [`B.AffineFixed`](https://flowtorch.ai/api/flowtorch.bijectors.affinefixed) is applied to the base distribution and then [`B.Exp`](https://flowtorch.ai/api/flowtorch.bijectors.exp). # ## Learnable Univariate `Bijector`s # Having introduced the interface for bijections and transformed distributions, we now show how to represent *learnable* transforms and use them for density estimation. Our dataset in this section and the next will comprise samples along two concentric circles. Examining the joint distribution: # + import numpy as np from sklearn import datasets from sklearn.preprocessing import StandardScaler n_samples = 1000 X, y = datasets.make_circles(n_samples=n_samples, factor=0.5, noise=0.05) X = torch.Tensor(StandardScaler().fit_transform(X)) plt.title(r'Samples from $p(x_1,x_2)$') plt.xlabel(r'$x_1$') plt.ylabel(r'$x_2$') plt.scatter(X[:,0], X[:,1], alpha=0.5) plt.show() # - # And the marginals: plt.subplot(1, 2, 1) sns.distplot(X[:,0], hist=False, kde=True, bins=None, hist_kws={'edgecolor':'black'}, kde_kws={'linewidth': 2}) plt.title(r'$p(x_1)$') plt.subplot(1, 2, 2) sns.distplot(X[:,1], hist=False, kde=True, bins=None, hist_kws={'edgecolor':'black'}, kde_kws={'linewidth': 2}) plt.title(r'$p(x_2)$') plt.show() # We will learn the marginals of the above distribution using a learnable transform, [`B.Spline`](https://flowtorch.ai/api/flowtorch.bijectors.spline), defined on a two-dimensional input: dist_x = torch.distributions.Independent( torch.distributions.Normal(torch.zeros(2), torch.ones(2)), 1 ) bijector = B.Spline() dist_y = D.Flow(dist_x, bijector) # [`B.Spline`](https://flowtorch.ai/api/flowtorch.bijectors.spline) passes each dimension of its input through a separate monotonically increasing function known as a spline. From a high-level, a spline is a complex parametrizable curve for which we can define specific points known as knots that it passes through and the derivatives at the knots. The knots and their derivatives are parameters that can be learnt, e.g., through stochastic gradient descent on a maximum likelihood objective, as we now demonstrate: optimizer = torch.optim.Adam(dist_y.parameters(), lr=1e-2) for step in range(1001): optimizer.zero_grad() loss = -dist_y.log_prob(X).mean() loss.backward() optimizer.step() if step % 200 == 0: print('step: {}, loss: {}'.format(step, loss.item())) # Plotting samples drawn from the transformed distribution after learning: X_flow = dist_y.sample(torch.Size([1000,])).detach().numpy() plt.title(r'Joint Distribution') plt.xlabel(r'$x_1$') plt.ylabel(r'$x_2$') plt.scatter(X[:,0], X[:,1], label='data', alpha=0.5) plt.scatter(X_flow[:,0], X_flow[:,1], color='firebrick', label='flow', alpha=0.5) plt.legend() plt.show() plt.subplot(1, 2, 1) sns.distplot(X[:,0], hist=False, kde=True, bins=None, hist_kws={'edgecolor':'black'}, kde_kws={'linewidth': 2}, label='data') sns.distplot(X_flow[:,0], hist=False, kde=True, bins=None, color='firebrick', hist_kws={'edgecolor':'black'}, kde_kws={'linewidth': 2}, label='flow') plt.title(r'$p(x_1)$') plt.subplot(1, 2, 2) sns.distplot(X[:,1], hist=False, kde=True, bins=None, hist_kws={'edgecolor':'black'}, kde_kws={'linewidth': 2}, label='data') sns.distplot(X_flow[:,1], hist=False, kde=True, bins=None, color='firebrick', hist_kws={'edgecolor':'black'}, kde_kws={'linewidth': 2}, label='flow') plt.title(r'$p(x_2)$') plt.show() # As we can see, we have learnt close approximations to the marginal distributions, $p(x_1),p(x_2)$. *It would have been challenging to fit the irregularly shaped marginals with standard methods, for example, a mixture of normal distributions*. As expected, since there is a dependency between the two dimensions, we do not learn a good representation of the joint, $p(x_1,x_2)$. In the next section, we explain how to learn multivariate distributions whose dimensions are not independent.
tutorials/univariate_bijections.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda-env-mp-py # language: python # name: conda-env-mp-py # --- # # Website notes and exercises # # A basic website walkthrough (40min) was pre-recorded and is # [online](https://www.youtube.com/watch?v=Mg9AgpwoArQ) on MP's YouTube channel. # # The following notes and exercises are to be referenced by an instructor for walking through a few topics interactively in a workshop setting. Each of the four sections should take ~15 minutes: walkthrough (5min), exercise (5min), and walkthrough of exercise solution (5min). Thus, an interactive walkthrough of this notebook should take ~1 hour. # ## 1. Analyze: thermodynamic (meta)stability # - Interface reaction between LiCoO2 and Li3PS4 # - Open to lithium elemental reservoir ($\mu$ = 0, -1.6 eV, -3.2 eV) # - Li-S phase diagram # - See unstable LiS detail. Show help guides. # - See stable Li2S. Go back to phase diagram with "Generate Phase Diagram". Note that Pourbaix diagram is phase diagram with oxygen and hydrogen included, and helps evaluate, among other things, suitability for photocatalysis / artificial photosynthesis by indicating oxygen and hydrogen evolution curves. # - Li-P-S phase diagram # - Open to Li # - At what lithium chemical potential is lithium phosphorus sulfide (Li<sub>3</sub>PS<sub>4</sub>) thermodynamically stable in a Li-P-S system open to a lithium reservoir? # ### Exercise: # # At what lithium chemical potentials is Li<sub>2</sub>O not thermodynamically stable in a Li-Co-O system open to a lithium reservoir? # # Extra: Go to the Battery Explorer and search for a cathode material that references stable LiCoO2 (mp-24850). How is the chemical potential formalism connected to battery safety? # ## 2. Explore: computed properties # - Pb # - Pb, >=1 elements. most stable 500. # - click logo to clear all state # - mpquery: `{'elements': {'$in': ['Se','Te'], '$all': ['Pb']}, 'nelements': {'$lte': 3}, 'has': {'$all': ['bandstructure', 'phonons', 'xas']}}` # - Walk through PbTe (mp-2201). Note user-contributed photovoltaic metrics. # # ### Exercise: # # What other computed properties are available for materials? Be sure to check out elemental matererials. # ## 3. Design: (data-driven) structural substitutions # - material with piezoelectric properties: Use disambiguation page: `mp.org/PbTiO3`. # - Click 'Piezoelectricity' header. Click browser back button. # - Pb-free piezoelectric material? Click 'Edit Crystal' # - xtal toolkit. substitute Pb->Sr. Download file. MPComplete? # - xtal toolkit. detect likely oxidation states. go to structure predictor. # # ### Exercise: # # alpha cristobality (SiO2, mp-6945) has negative Poisson ratio. Edit xtal, subtitute Si with Ge. Submit to MPComplete. See also: [Computational prediction of new auxetic materials](https://doi.org/10.1038/s41467-017-00399-6). # ## 4. Characterize: Match on computed spectra # # # - Explore Si-N Si K-edge spectra. Upload experimental spectrum for silicon nitride alpha. Source: [https://eelsdb.eu/](https://eelsdb.eu/), a database with 17 K-edge X-ray absorption near-edge spectra (XANES). MP has ~50k computed K-edge XANES spectra. # - Explore Al-O Al K-edge spectra. Upload experimental spectrum for alumina alpha. # # ### Exercise: # # Try to match silicon nitride beta spectrum. Where does Si3N4 beta phase appear in the rankings?
lessons/website/website_notes_and_exercises.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Conda Configuration # + [markdown] slideshow={"slide_type": "subslide"} # ## Overview: # - **Teaching:** 10 min # - **Exercises:** 10 min # # **Questions** # - Why would I want to customise conda? # - How can I customise my conda configuration? # # **Objectives** # - Understand why you would want to customise conda configuration on Balena. # - Know how to customise conda configuration. # + [markdown] slideshow={"slide_type": "slide"} # ## Python module # # First we need to load the python module. There is a system version of Python but this does not have conda available by default. The recommended module has conda installed: # # ```bash # module load python3/2019.x # ``` # # Verify that you have loaded the module: # # ```bash # which python # /apps/intel/python3/2019.x/bin/python # ``` # + [markdown] slideshow={"slide_type": "slide"} # Now we can customise our `conda` configuration file, open it with `nano ~/.condarc` or your preferred editor and enter the following: # # ```bash # # To disable changing the terminal prompt # changeps1: false # # # Define location for my environments # # this should be change to a directory in your scratch space # # Run `pwd` in your home directory to get your root '/home/X/username' # envs_dirs: # - /home/q/rjg20/scratch/conda-env # # # # Location for my packages and cache # # as for env this should be change to a directory in your scratch space # pkgs_dirs: # - /apps/intel/python3/2019.x/pkgs # - /home/q/rjg20/scratch/pkgs # # # # Channels in priority to pull/search packages from # channels: # - intel # - defaults # ``` # + [markdown] slideshow={"slide_type": "slide"} # What are each of these settings doing? # # - `changeps1` modifies the prompt, this is a matter of preference, for the demonstration your instructor may set this to `true` so that we can more easily follow what the commands are doing. # # - `envs_dirs` and `pkgs_dirs` set the location that conda will use to store environment details and packages. Conda allows us to specify specific versions of all the libraries we will need, even the Python version, and these can get large very quickly. Therefore we will use our `scratch` storage for our conda environments. While this storage is not backed up we will see later how we can create files which can be used to recreate our environment. This allows us to recreate our environment is required and share reproducible workflows by recording the specific version of libraries we used for our computation. # # - `channels` conda uses channels to provide packages, specifying `intel` first means that conda will will check these first for intel optimised versions of libraries. # # Full documentation about how to use `.condarc` is available at: https://conda.io/projects/conda/en/latest/user-guide/configuration/use-condarc.html. # + [markdown] slideshow={"slide_type": "slide"} # ## Information: Why should you use the Intel Distribution of Python? # # **Close-to-Native Code Performance** and potential to use whole nodes without having to program in parallel. # # The Intel Distribution for Python incorporates multiple libraries and techniques to bridge the performance gap between Python and equivalent functions written in C and C++ languages, including: # # Intel Math Kernel Library (Intel MKL) for BLAS and LAPACK # Intel MKL vector math library for universal functions (uMath) # Intel Data Analytics Acceleration Library (Intel DAAL) for machine learning and data analytics # Integration with Intel Advanced Vector Extensions (Intel AVX), a feature of Intel Xeon processors # Example: TensorFlow framework has been optimized using Intel Math Kernel Library for Deep Neural Networks (Intel MKL-DNN) primitives # + [markdown] slideshow={"slide_type": "slide"} # ## Key Points: # - Conda configuration is customised in the `.condarc` file # - You can customise features such as where conda environments are stored # - On Balena use the `intel` distribution as this will install libraries compiled to optimise performance
nbplain/01_conda_config.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.2 64-bit # language: python # name: python392jvsc74a57bd0aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49 # --- # + # #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue Apr 20 18:50:34 2021 @author: arpanganguli """ # import packages import pandas as pd from sqlalchemy import create_engine import os # import files HOME = os.path.dirname(os.getcwd()) URL = 'sqlite:///' + os.path.join(HOME, 'Database/CCS.db') engine = create_engine(URL, echo = True) # sqlite:////absolute/path/to/file.db # ALITER: engine = create_engine('sqlite:////Users/arpanganguli/Documents/Projects/SINE/Database/CCS.db', echo = True) # sqlite:////absolute/path/to/file.db # initial query query_string = "SELECT * FROM CCS" df = pd.read_sql(sql=query_string, con=engine) print(df.head()) df_working = df.copy(deep=True) # deep copy to preserve original dataframe df_working.dropna(how="all") # delete null and na values df_working.drop('Assembly Constituency Name', inplace=True, axis=1)
Code/.ipynb_checkpoints/Rough_Work-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: aas (ipykernel) # language: python # name: python3 # --- # # Visual Inspection of Motorcycle Connecting Rods # ### Abstract # Blob analysis is the set of processes that aims to extract specific features from scene objects, usually referred to as *blobs*. # \ # This project aims the extraction of many features from a set of connecting rods images. In particular, for each blob the following features are required: # # * <a href=#Classification>Classification/type of the rod (there are only 2 types)</a> # * <a href=#Orientation>Position and Orientation (modulo $\pi$)</a> # * <a href=#Length-and-Width>Length ($L$), Width ($W$).</a> # * <a href=#Width-at-Barycentre> Width at the barycenter ($W_B$)</a> # * <a href=#Centre-Position-and-Radius>For each hole, position of the centre and diameter size.</a> # # Also, many changes may affect the images: # * Images may contain other objects (i.e. screws and washers) that need not to be analysed by the system (such objects are often referred to as “distractors”) # * Rods can have contact points but do not overlap one to another. # * The inspection area may be dirty due to the presence of scattered <a href=#Iron-powder>iron powder</a>. # # ### This work has been done as a project for the *Computer Vision and Image Processing* course, University of Bologna (UNIBO) # ## Blob Analysis workflow # 1) Read image. # 2) Crop ROI (regions of interests: obtain the portions of image containing the objects we want to inspect. # 3) Binarization: isolate *FOREGROUND* and *BACKGROUND* pixels. # 4) Connected component labeling: separate and enumerate the objects to be analysed. # 5) Feature extraction img_path = 'img/TESI98.BMP' OUT_DIR = 'out/' # + # retrieve local name of the image (i.e. without path) img_name = img_path.split('/')[-1] # remove file extension src_name = "".join(img_name.split('.')[:-1]) # - # # 1. Read Image # Not much to say about it, isn't it? # + import cv2 import matplotlib.pyplot as plt import numpy as np gray = cv2.imread(img_path, cv2.IMREAD_UNCHANGED) rgb = cv2.cvtColor(gray.copy(), cv2.COLOR_GRAY2RGB) # uncomment below lines to test the program on custom scales # SIZE_FACTOR = 4 # MUST BE INT #new_size = np.array(gray.shape)[::-1]*SIZE_FACTOR #gray = cv2.resize(gray, new_size) # - # # 2. Crop ROI # Not needed, those images are already a ROI :) # # 3. Binarization # Binarization according to Otsu's threshold. Otsu's algorithm returns the optimal threshold that maximizes the *inter-class variance*, an indicator that tells how well two classes (FOREGROUND and BACKGROUND) are separated by thresholding with a specific value. # \ # For further details, take a look at https://en.wikipedia.org/wiki/Otsu%27s_method # + tags=[] # Otsu's threshold is computed by OpenCV by passing the cv2.THRESH_OTSU flag th, binary_image = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) binary_image = np.asarray(binary_image/255, 'uint8') # normalize plt.subplot(1,2,1) plt.title("Original image") plt.imshow(gray, cmap='gray') plt.subplot(1,2,2) plt.title("Thresholded by Otsu's algorithm") plt.imshow(binary_image, cmap='gray') plt.show() # - # # Iron powder # The powder acts as salt-and-pepper noise, which can be dealt with using a *median filter*. High-density areas are filtered out by performing many iterations. # Median filter takes a set of pixel intensities as input and return the *median* value of the SORTED input. # \ # \ # So, assuming we have $x = [20, 8, 253, 90]$ as input and a sliding window of size $k = 3$, we will have # \ # $y = median\_filter(x)$, where \ # $y[1] = median[sort(20,20,8)] = median[8,20,20] = 20 # \\ # y[2] = median[sort(20,8,253)] = median[8,20,253] = 20 # \\ # y[3] = median[sort(8, 253, 90)] = median[8,90,253] = 90 # \\ # y[4] = median[sort(253, 90, 90)] = median[90,90,253] = 90$ # \ # So, accordin to that: $median\_filter(sort([20, 8, 253, 90]))$ (having kernel-size = 3) $= [20, 20, 90, 90]$ from scipy.ndimage import median_filter signal = np.array([[20, 8, 253, 90]]) print(f"median(sort({signal}))\t =\t {median_filter(signal, size=3)}") # A median filter is applied to an image by applying the ($k$ x $k$) sliding window on each pixel. # # Median filtering actually denoises without introducing significant blur. High-density areas are filtered out by performing many iterations. # # After several experiments i've seen that using a 3x3 kernel yields to a good filtering, since bigger kernels would delete some pixels among the border of the holes, if not wide enough. # + def remove_powder(src, ksize=3, max_iter=5): iterations = 0 median = src.copy() last_iteration = np.zeros(median.shape) # filtering stops as soon as the last # two filtered images are equal while not np.array_equal(median,last_iteration) and iterations < max_iter: iterations += 1 last_iteration = median.copy() median = median_filter(median, ksize) # median filtering # logical-AND between the filtered and the source binary image to # prevent from the median filter to fill regions aroung points # featuring high convexity defection median = src & median return median median = remove_powder(binary_image, 3) plt.title(f'Before-After filtering') plt.imshow(np.hstack((binary_image, median)), cmap='gray') plt.show() # - # # 4. Connected component labeling # Labeling consists on scanning the input binary image and check for the intensity of any foreground pixel's neighbourhood, according to either 4-connectivity or 8-connectivity. BACKGROUND = 0 # White FOREGROUND = 1 # Black def crop_object(img, label=FOREGROUND): # coords y, x = np.where(img == label)[:2] if label is not -1 else np.nonzero(img)[:2] # max and min coordinates x_min, y_min = np.min(x), np.min(y) x_max, y_max = np.max(x), np.max(y) # crop out = img[y_min:y_max+1, x_min:x_max+1].copy() return out def label_components(img): # lookup table has dynamic size and # will grow as new labels are found lookup_table = [0] # pad to prevent bad indexing errors pad = 5 labeled_image = np.pad(img, pad) # new_label counter current_label = 0 H, W = labeled_image.shape for i in range(H): for j in range(W): lx = BACKGROUND # check if foreground if labeled_image[i, j] == FOREGROUND: # take labels of the upper and left-sided px of [i, j] lp = labeled_image[i-1, j] lq = labeled_image[i, j-1] if lp == BACKGROUND and lq == BACKGROUND: # new connected component found current_label += 1 lx = current_label lookup_table.append(current_label) elif lp != lq and lp != BACKGROUND and lq != BACKGROUND: # both left and upper pixels are foreground, but # with different labels. # equivalence between lp and lq has to be handled lx = lq lp_label = lookup_table[lp] # handling equivalence for k in range(len(lookup_table)): if lookup_table[k] == lp_label: lookup_table[k] = lookup_table[lq] elif lq != BACKGROUND: lx = lq elif lp != BACKGROUND: lx = lp labeled_image[i, j] = lx # mapping labels labeled_image = labeled_image[pad:-pad, pad:-pad] labeled_image = np.reshape([lookup_table[k] for k in labeled_image.flatten()], img.shape) unique_labels = np.unique(labeled_image) unique_labels = unique_labels[unique_labels != BACKGROUND] # cutting off the background label return labeled_image, unique_labels labeled_components, labels = label_components(median) # Since labels(e.g. Intensities of each blob) could have low contrast, intensities are reverted and gray-scale labels are turned into RGB ones, just to enhance different blobs (different blobs have different labels/intensities), this won't affect feature detections. # + import random as rng def enhance_contrast(labeled_components, labels, revert=True): src = labeled_components.copy() src = np.stack((src,)*3, axis=-1) # gray-scale to RGB # assign a random RGB label to a grey-scaled one for l in labels: rgb_label = [rng.randint(0,256), rng.randint(0,256),rng.randint(0,256)] src = np.where(src == l, rgb_label, src) if revert: src = [255, 255, 255] - src return src after_enhance = enhance_contrast(labeled_components, labels) before_enhance = np.stack((median*255,)*3, axis=-1) res = np.hstack((before_enhance, after_enhance)) # stack images side-by-side plt.imshow(res) plt.show() # - # ### Blob separation # just cropping the original image into several windows, one for each blob. # + def show_blob(src, crop=True): img = crop_object(src) if crop else src.copy() plt.imshow(img, cmap='gray') plt.show() def show_many_blobs(lst, crop=True, inline=True): if inline: for i, blob in enumerate(lst): img = crop_object(blob) if crop else blob plt.subplot(1, len(lst), i+1) plt.title("Blob "+str(i+1)) plt.imshow(img, cmap='gray') plt.show() else: [show_blob(b, crop) for b in lst] def separate_blob(img, label=FOREGROUND, crop=True): if crop: out = crop_object(img, label) else: out = img.copy() # any px that does not belong to the blob is set to zero out[out != label] = 0 # if we don't do so, a portion of another blob may occur inside the crop return (out/label).astype('uint8') # - blobs = [separate_blob(labeled_components, l, crop=False) for l in labels] show_many_blobs(blobs, crop=True) # # 5. Feature Extraction # Before extracting any feature, we need to: # * <a href=#Remove-Contact-Points>Remove contact points</a> # * <a href=#Screw-detection>Remove screws</a> # * <a href=#Remove-washers>Remove washers</a> # ### Obtain contours # Contours will be used as a basis to extract any feature from the images, since they encodes every relevant information about them. # # findContours() -> contours, hierarchy. See https://docs.opencv.org/4.5.3/d3/dc0/group__imgproc__shape.html#gadf1ad6a0b82947fa1fe3c3d497f260e0 # # For each i-th contour "contours[i]", the elements hierarchy[i][0] , hierarchy[i][1] , hierarchy[i][2] , and hierarchy[i][3] are set to 0-based indices in contours of the next and previous contours at the same hierarchical level, the first child contour and the parent contour, respectively. If for the contour i there are no next, previous, parent, or nested contours, the corresponding elements of hierarchy[i] will be negative. # + def get_contours(blobs): # findContours returns (nested_contours, hierarchy) contour_list = [cv2.findContours(blob, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) for blob in blobs] cnts = [c[0] for c in contour_list] # i-th contour is external if the 4-th column of hierarchies is "-1" ext_cnts = [cnt[0] for cnt in cnts] # i-th contour is an hole if the 3-th column of hierarchies is "-1" hole_cnts = [cnt[1:] for cnt in cnts] return ext_cnts, hole_cnts ext_cnts, hole_cnts = get_contours(blobs) # + for i in range(len(blobs)): plt.subplot(1, len(blobs), i+1) img = cv2.cvtColor(blobs[i].copy()*255, cv2.COLOR_GRAY2RGB) cv2.drawContours(img, ext_cnts[i], -1, [255, 0, 0], thickness=2) cv2.drawContours(img, hole_cnts[i], -1, [255, 0, 0], thickness=2) plt.imshow(crop_object(img, label=255), cmap='gray') plt.show() # - # # Remove Contact Points # Contacts between rods prevents us from analyzing rods, since the labeling process would mark many connecting rods as just one, therefore they must be removed. # # Let us define the convex hull of a shape as the smallest convex object that contains it and is entirely described by a set of vertexes. # # Then, for any side of the convex hull(described by a couple of vertexes A and B), a possible contact point is found as the one featuring maximum distance between the side of the hull and the n-point belonging to the contour of the object and delimited by A and B. # # - ##### $Possible Contact Points = \{ \underset{p} \argmax\ distance(p, L),\ p\in [A, B] \}_{L=line(A, B)\in ConvexHull}$ # # Pruning weak possible contact points is mandatory, since we'll end up having an occurrence for EACH side of the convex hull. Therefore, thresholding by considering nonzero-pizels within the neighbourhood(5x5) of such points provides to eliminate unwanted points. # - ##### $p_i$, i-th Possible contact point # - ##### Number of foreground px within a k-Neighbourhood: $N_k(p_i)$ # - ##### $L$, length of $Possible Contact Points$ # - ##### Threshold:$\ T = 0.7 \cdot k^2$, with $k$ being the kernel size # # Optimal contact points are defined as follow: # - ##### $ContactPoints$ : $\{p_i \in Possible Contact Points\ |\ N_5(p_i) > T\}$ def find_defect_points(cnt): pts = cnt.copy() epsilon = 0.01*cv2.arcLength(pts,True) pts = cv2.approxPolyDP(pts, epsilon, True) # convex hull object hull_defects = cv2.convexHull(pts, returnPoints=False) defect_vec = cv2.convexityDefects(pts, hull_defects) # returns (start_index, end_index, def_point_index, defpoint_hull_distance) if defect_vec is None: # convex objects have no defect_points return np.array([]) def_points = np.take(pts.squeeze(1), defect_vec.squeeze(1)[:, 2], axis=0) return def_points # + def threshold_fn(tolerance, k=5): threshold = k**2 * tolerance return threshold def get_neighbour(src, pt, size=5): window_size = size//2 # pad to prevent bounding errors template = np.pad(src, window_size) # centre coordinates x, y = pt + window_size # shift by the padding factor to refine centre neighbourhood = template[y-window_size:y+window_size+1, x-window_size:x+window_size+1].copy() return neighbourhood DEF_POINTS_NONZERO_PX_TOLERANCE = .7 def prune_defect_points(src, pts, k=5): # Get k x k neighbourhoods of such points neighbourhoods = np.asarray([get_neighbour(src, p, k) for p in pts]) # count nonzero pixels nonzero_pixels = neighbourhoods.sum(axis=1).sum(axis=1) threshold = threshold_fn(DEF_POINTS_NONZERO_PX_TOLERANCE, k) good_points_mask = nonzero_pixels > threshold good_ones = pts[good_points_mask] bad_ones = pts[~good_points_mask] return good_ones, bad_ones # + from sklearn.neighbors import KDTree def find_closest_points(points): if (len(points) < 2): return np.array([]) kdt = KDTree(points, leaf_size=30, metric='euclidean') distances, ind = kdt.query(points, k=2) result = [] dist = distances[:, 1].copy() # at each iteration, the two closest points # are paired and removed from the data structure. while dist.min() < float('inf') and len(dist)>=2: p1_idx = np.argmin(dist) p2_idx = ind[p1_idx, 1] p1 = points[p1_idx] p2 = points[p2_idx] result.append([p1, p2]) dist[p1_idx] = float('inf') dist[p2_idx] = float('inf') return np.array(result) # - def remove_contact_points(src, cont_points): checking = src.copy() # draw background lines between each pair of contact points [cv2.line(checking, couple[0], couple[1], BACKGROUND, lineType=cv2.LINE_8, thickness=1) for couple in cont_points] # labeling components, labs = label_components(checking.astype(np.int32)) separated_blobs = [separate_blob(components, l, crop=False) for l in labs] return separated_blobs def show_defect_points(src, good_ones, bad_ones): template = src.copy() BAD_INTENSITY = [255,0,0] GOOD_INTENSITY = [0,0,255] test_img = np.dstack((template.copy()*255,)*3).astype(np.int32) # Gray(1 channel) to RGB (3 channels) [cv2.circle(test_img, c, 3, BAD_INTENSITY, -1) for c in bad_ones] # red circles enhance unwanted contact points [cv2.circle(test_img, c, 3, GOOD_INTENSITY, -1) for c in good_ones] # blue circles enhance strong contact points # printing show_blob(test_img, crop=False) # + bad_blobs_indices = [] OUTLIER_THRESHOLD = 0.01 for i, b in enumerate(blobs): # Obtain defect points defect_points = find_defect_points(ext_cnts[i]) # Prune weak defect points if len(defect_points) > 0: good_def_points, bad_def_points = prune_defect_points(b.astype(np.int32), defect_points) # bad points are also keeped to be printed # Blob is analysed if at least one good defect point is found condition = len(defect_points) > 0 and good_def_points.shape[0] >= 2 if condition: show_defect_points(b, good_def_points, bad_def_points) bad_blobs_indices.append(i) # pair contacts points contact_points = find_closest_points(good_def_points) # Remove contacs new_blobs = np.asarray(remove_contact_points(b, contact_points)) # Image reconstruction. # If a newly found blob has a very low number of points compared # the others, it means that it's a part of another blob which was cut # away during the detach process and it's marked as "outlier" nonzero_pixels = new_blobs.sum(1).sum(1) mask = nonzero_pixels/nonzero_pixels.mean() > OUTLIER_THRESHOLD outliers = new_blobs[~mask] new_blobs = new_blobs[mask] # Draw a FOREGROUND line between contacts on each SEPARATED blob. [[cv2.line(sb, cp[0], cp[1], FOREGROUND) for i,cp in enumerate(contact_points)] for sb in new_blobs] # If any outlier(a small appendix of a blob) is found, it means that a portion of a blob # was cut during the separation (BACKGROUND line drawing) and it has to be rejoined to its parent. # So, every of those are drawn (logical OR) into EVERY other new-blob, # but this creates an outlie region if the new-blob had not that portion of pixels. # To solve this, a median filter is applied to every new-blob. if np.any(~mask): # Draw outliers into new blobs for o in outliers: new_blobs = new_blobs | o # median filtering new_blobs = np.array([remove_powder(sb) for sb in new_blobs]) # Get new contours new_ext_cnts, new_hole_cnts = get_contours(new_blobs) # Append new features relative to the new(separated) blobs blobs = np.vstack((blobs, new_blobs)) #ext_cnts = ext_cnts.tolist() [ext_cnts.append(nb) for nb in new_ext_cnts] ext_cnts = np.array(ext_cnts, dtype=object) #hole_cnts = hole_cnts.tolist() [hole_cnts.append(nb) for nb in new_hole_cnts] hole_cnts = np.array(hole_cnts, dtype=object) for i in bad_blobs_indices: # Delete features relative to the deleting blob blobs = np.delete(blobs, i, axis=0) ext_cnts = np.delete(ext_cnts, i, axis=0) hole_cnts = np.delete(hole_cnts, i, axis=0) show_many_blobs(blobs) # - # ### Screw detection # A screw is an object with no (holes) inner contours, so by exploiting the hierarchy given by findCountours() we find and remove any object that does not present at least an hole (child contour). nonscrew_indices = np.where([len(hole_cnt) > 0 for hole_cnt in hole_cnts])[0] hole_cnts = np.take(hole_cnts, nonscrew_indices, axis=0) ext_cnts = np.take(ext_cnts, nonscrew_indices, axis=0) blobs = np.take(blobs, nonscrew_indices, axis=0) # ## Remove washers # To identify a washer we determine how much an object's shape is *circular* and then cut any of them having high circularity factor. \ # Let's introduce the Barycentre first, which is needed to compute the Circularity (we'll also need it later).\ # The position of the barycentre is the sum of pixel's coordinates beloning to the same component, divided by its area # - ##### $B =\begin{bmatrix} i_b\\j_b \end{bmatrix} = \frac{1}{A} \begin{bmatrix} \sum_{p\in Component}^{}i_p \\ \sum_{p\in Component}^{}j_p \end{bmatrix}$ # # The above formula can be applied to contours instead of the whole component, thus we treat the number of (contour)points as the area # + def compute_centroid(points): area = len(points) sum_coords = points.sum(0) return np.round(sum_coords/area).astype(int) barycentres = np.array([compute_centroid(np.flip(np.argwhere(blobs[i]), axis=1)) for i in range(len(blobs))]) # - # #### Haralick Circularity # Let us define: # - ##### $\mu = \frac{1}{N}\sum_{k=1}^{N}d_k$ # - ##### $\sigma^2 = \frac{1}{N}\sum_{k=1}^{N}(d_k - \mu)^2$ # - ##### $d_k = \left \| p_k - B \right \|, p_k = \begin{bmatrix} i_k\\ j_k\end{bmatrix}$ # # $\mu$ is the mean distance between contour points and the barycentre\ # $\sigma$ is the standard deviation of such distances\ # $N$ is the number of pixels belonging to the contour\ # $d_k$ is the distance between the k-th contour point and the barycentre\ # # Haralick's Circularity is defined as follow: # - ##### $C = \frac{\mu}{\sigma}$ # # So the feature gets bigger as the standard deviation gets smaller. # + def haralick_circularity(cnt, barycentre): n = len(cnt) distances = np.array([np.linalg.norm(p - barycentre) for p in cnt]) mu = distances.sum()/n sigma_square = np.sum((distances - mu) ** 2)/n return mu/np.sqrt(sigma_square) circularities = np.array([haralick_circularity(cnt, barycentres[i]) for (i, cnt) in enumerate(ext_cnts)]).round(2) # - # #### Blobs featuring high circularity(i.e. washers) are filtered out # + CIRCULARITY_THRESHOLD = 3.0 # indices of blobs featuring low circularity (i.e. rods) indices = np.where(circularities <= CIRCULARITY_THRESHOLD)[0] blobs = np.take(blobs, indices, axis=0) ext_cnts = np.take(ext_cnts, indices, axis=0) hole_cnts = np.take(hole_cnts, indices, axis=0) barycentres = np.take(barycentres, indices, axis=0) # - # ### Printing remaining blobs show_many_blobs(blobs) # # Orientation # The orientation, i.e. the angle between major-axis and horizontal-axis, has been computed using the covariance matrix and seeking for eigenvector associated with the highest eigenvalue, since it is aligned with the major axis. # + def compute_orientation(points): coords = points.copy() # Obtain covariance matrix cov = np.cov(coords.T) # Look for the eigenvectors and eigenvalues evals, evecs = np.linalg.eig(cov) sort_indices = np.argsort(evals)[::-1] e1_i, e1_j = evecs[:, sort_indices[0]] # Eigenvector associated with the largest eigenvalue # argument of the eigenvector theta = -np.arctan((e1_i)/(e1_j)) # angle return theta orientations_rad = np.expand_dims([compute_orientation(np.flip(pts.squeeze(1), axis=1)) for i, pts in enumerate(ext_cnts)], -1) orientations_rad = np.where(orientations_rad > 0, orientations_rad, np.pi + orientations_rad) # let the orientation be modulo pi # along major axis orientations = orientations_rad*180/np.pi # along minor axis (add 90 degrees) orientations_min_rad = orientations_rad + np.pi/2 orientations_min = orientations_min_rad * 180/np.pi # - # # Length and Width # To find out the size of the MER(minimum oriented rectangle) we take the contour points of each blob and rotate it, according to the orientation found above, to let contours assume a *vertical* position # + tags=[] def rotate(points, angle, centre): pts = points.copy() # build rotation matrix s, c = np.sin(angle), np.cos(angle) rot_matrix = np.array([[c, -s], [s, c]]) # rotate points new_coords = (pts - centre) @ rot_matrix + centre new_coords = new_coords.round().astype(np.int32) # Due to the rounding operation, the figure described by the new set of points # may be "opened" in some areas, therefore we exploit the drawContours function # to "close" those regions. before_fill = new_coords.shape[0] template = np.zeros((new_coords[:, 1].max()+1, new_coords[:, 0].max()+1)) # black box img = cv2.drawContours(template.copy(), [new_coords], -1, 1, lineType=cv2.LINE_8) # cropping new_coords = np.flip(np.argwhere(img), axis=1) after_fill = new_coords.shape[0] filled_points = after_fill - before_fill print(f"Filled with {filled_points} point{'s' if filled_points > 1 else ''}") return new_coords rotation_angles = np.pi / 2 - orientations_rad # angle between major axis and vertical axis rotation_angles = np.squeeze(rotation_angles, axis=1) rotated_ext_cnts = [rotate(pts.squeeze(1), rotation_angles[i], barycentres[i]) for i,pts in enumerate(ext_cnts)] # - # ### Show rotated contours # + rotated_contours_img = rgb.copy() for i, cnt in enumerate(rotated_ext_cnts): # black boxes cnt = np.expand_dims(cnt,1) cv2.drawContours(rotated_contours_img, cnt, -1, [255, 0, 0], lineType=cv2.LINE_8) # cropping plt.imshow(rotated_contours_img) plt.show() # - # Then, the length($L$) is computed as the difference between the maximum and minimum value along vertical-axis, As well as is for the width($W$) along horizontal axis. # $L = \max{i} - \min{i}$ \ # $W = \max{j} - \min{j}$ measures = np.array([[cnt[:, 1].max() - cnt[:, 1].min(), cnt[:, 0].max() - cnt[:, 0].min()] for cnt in rotated_ext_cnts]).astype('int32') lengths = measures[:, 0] widths = measures[:, 1] # # <span id="Classification">Classification</span> # Rods are classified by their number of holes, which can be found by counting the number of internal contours of each blob. # + rod_classes = ['A', 'B'] # A: one hole, B: two holes classes = np.array([rod_classes[len(int_cnt)-1] for int_cnt in hole_cnts]) # - # # Centre Position and Radius of Holes # Radius has been computed as the mean distance between (inner)contour points and their centre(e.g. barycentre) # #### $r = \frac{1}{N}\sum_{k=1}^{N}\left \| p_k - B \right \|$ # + def radius(points): bary = compute_centroid(points) distances = np.linalg.norm(points - bary, axis=1) radius = distances.mean().round().astype(np.int32) # hole barycentres will be used to draw the diameter later return radius, bary.round().astype(int) data = np.array([[radius(pts.squeeze(1)) for pts in holes] for holes in hole_cnts], dtype=object) radiuses = [[x[0] for x in blob] for blob in data] hole_centres = np.array([[x[1] for x in blob] for blob in data]) # - # # Width at Barycentre # # We exploit the rotated contours computed few steps ago. In facts, the $W_B$ can be obtained by just measuring the distance between the points in the same *row* of the barycentre of the vertically-aligned contour. # + def barycentre_width(points): pts = points.copy() # centroid has to be computed for the rotated blob bary = compute_centroid(pts) _, y_bary = bary # second column of the points that # represents vertical components q = pts[:, 1] # indices of points laying in the same # horizontal line of the barycentre indices = np.nonzero(q == y_bary)[0] coords = np.take(pts, indices, axis=0) # points coordinates # depending on the connectivity (4 or 8), # more than 2 points may be found, so we take the # WB as the mean doubled-distance between points # found above and the barycentre distances = np.linalg.norm(coords - bary, axis=1) wb = (distances*2).mean().round().astype(int) return wb wbs = [barycentre_width(rotated_ext_cnts[i]) for i in range(len(blobs))] # - # # Drawing # In this section, images are prepared to be printed out: the minimum oriented rectangle(MER) and the diameter of each inner-hole will be drawn within the blob to improve readability, other features will be simply written in. # + def compute_axes(angle, bary): alpha = np.sin(angle) beta = np.cos(angle) j, i = bary major_axis = (alpha, -beta, beta*i - alpha*j) minor_axis = (beta, alpha, -beta*j - alpha*i) return major_axis, minor_axis axes = np.array([np.vstack(compute_axes(orientations_rad[i][0], barycentres[i])) for i in range(len(blobs))]) # + def box_points(points, major_axis, minor_axis): def signed_distance(point, line): a,b,c = line i, j = point d = (a*j +b*i + c)/np.sqrt(a**2 + b**2) return d def max_distance_points(points, major, minor): pts = points.copy() dMAmin = float('inf') dMAmax = float('-inf') dMImin = float('inf') dMImax = float('-inf') for i, p in enumerate(pts): dMA = signed_distance(p, major) dMI = signed_distance(p, minor) if dMA < dMAmin: dMAmin = dMA; c1 = p.copy() # point having minimum (signed) from major-axis if dMA > dMAmax: dMAmax = dMA; c2 = p.copy() # point having maximum (signed) from major-axis if dMI < dMImin: dMImin = dMI; c3 = p.copy() # point having minimum (signed) from minor-axis if dMI > dMImax: dMImax = dMI; c4 = p.copy() # point having maximum (signed) from minor-axis return np.vstack((c1, c2, c3, c4)) max_dist_pts = max_distance_points(points, major_axis, minor_axis) # define axes parameters a, b, _ = major_axis a2, b2, _ = minor_axis b = -b b2 = -b2 p1, p2, p3, p4 = max_dist_pts # define points components j1, i1 = p1; j2, i2 = p2; j3, i3 = p3; j4, i4 = p4 # define 3rd parameter of each of the two lines parallel to the # major and minor axis. Starting from axis equation, they differ # by the constant value (c) only and it is calculated according # to the coordinates of the points computed above (p1, p2, p3, p4) cw1 = -(a2*j1 + b2*i1) # line w1: parallel to minor axis through p1 cw2 = -(a2*j2 + b2*i2) # line w2: parallel to minor axis through p2 cl1 = -(a*j3 + b*i3) # line l1: parallel to major axis through p3 cl2 = -(a*j4 + b*i4) # line l2: parallel to major axis through p4 # intersections yield the vertexes of the oriented MER i_v1 = (a2*cl1 - a*cw1); j_v1 = (b*cw1 - b2*cl1) # w1 V l1 i_v2 = (a2*cl1 - a*cw2); j_v2 = (b*cw2 - b2*cl1) # w2 V l1 i_v3 = (a2*cl2 - a*cw1); j_v3 = (b*cw1 - b2*cl2) # w1 V l2 i_v4 = (a2*cl2 - a*cw2); j_v4 = (b*cw2 - b2*cl2) # w2 V l2 vertexes = np.vstack([ [j_v2, i_v2], [j_v1, i_v1], [j_v3, i_v3], [j_v4, i_v4]]) / (a*b2 - b*a2) vertexes = vertexes.round().astype(np.int32) return max_dist_pts, vertexes data = [box_points(ext_cnts[i].squeeze(1), axes[i][0], axes[i][1]) for i in range(len(blobs))] max_dist_points = np.array([d[0] for d in data]) box_vertexes = np.array([d[1] for d in data]) # - # ### Minimum Oriented Rectangle # Minimum oriented rectangle is drawn according to the vertexes found above. # Every blob is wrapped inside a bigger empty image to improve the visibility of the MER itself. \ # Barycentres (red dots) and points having max/min signed distance between axes (blue dots) are also drawn in this section. def draw_rect(src, vertexes, centre, angle, mer_index=1): box = src # draw MER vertexes cv2.drawContours(box, [vertexes], -1, [255, 0, 0], thickness=1) # direction arrows distance = 30 p1 = centre + distance*np.array([np.cos(angle), -np.sin(angle)]) # arrow along major axis p2 = centre + distance/2*np.array([np.cos(angle+np.pi/2), -np.sin(angle+np.pi/2)]) # arrow along minor axis p1 = p1.round().astype(np.int32) p2 = p2.round().astype(np.int32) cv2.arrowedLine(box, centre, p1, [0,255,0], tipLength=.25) cv2.arrowedLine(box, centre, p2, [0,255,0], tipLength=.25) cv2.putText(src, f"{mer_index}", vertexes[1] + [0, -5], cv2.FONT_HERSHEY_SIMPLEX, 0.80, [255, 0, 0]) # centroid cv2.circle(box, centre, 1, [255, 0, 0], thickness=2) return box outcome = rgb.copy() [draw_rect(outcome, box_vertexes[i], barycentres[i], angle[0], mer_index=i+1) for i,angle in enumerate(orientations_rad)]; plt.imshow(outcome) plt.show() # Utility function: given a source image and a set of points, representing the contour of a hole, draws a red diameter arrowed line along the diameter and a text that underlines its length def draw_diameter(src, points, draw_text=True, padding=0, hole_index=1): rad, centre = radius(points) cx, _ = centre min_x = points[:, 0].min() max_x = points[:, 0].max() p1 = centre - [cx - min_x - 1, 0] + padding # starting point p2 = centre + [max_x - cx - 1, 0] + padding # ending point text_point = centre + [-4, 10] + padding index_text_point = centre + [-2, -6] + padding cv2.arrowedLine(src, p1, p2, [0, 0, 255], tipLength=0.15) # tipLength = arrow length cv2.arrowedLine(src, p2, p1, [0, 0, 255], tipLength=0.15) cv2.circle(src, centre, 2, [0, 255, 255], thickness=-1) if draw_text: cv2.putText(src, f"{2*rad}",text_point, cv2.FONT_HERSHEY_SIMPLEX, 0.20, [0, 0, 255]) cv2.putText(src, f"{hole_index}",index_text_point, cv2.FONT_HERSHEY_SIMPLEX, 0.20, [0, 0, 255]) return src # Drawing every diamater [[draw_diameter(outcome, holes.squeeze(1), hole_index=i+1) for i, holes in enumerate(pts)] for pts in hole_cnts]; plt.imshow(outcome) plt.show() outstruct = dict() for i, b in enumerate(blobs): # datastruct tmp = dict() tmp['class'] = classes[i] tmp['angle'] = orientations_rad[i][0].round(3) tmp['centre'] = barycentres[i].tolist() tmp['length'] = int(lengths[i]) tmp['width'] = int(widths[i]) tmp['widthB'] = int(wbs[i]) tmp['holes'] = [{'centre':hole_centres[i][x].tolist(), 'diameter':int(r*2)} for x,r in enumerate(radiuses[i])] outstruct[i+1] = tmp # printing print("-"*25,i+1,"-"*25) print(f"Class: {classes[i]}, Angle: {orientations_rad[i][0].round(3)} rad, Centre: {barycentres[i]}") print(f"Length: {lengths[i]}, Width: {widths[i]}, Width at barycentre: {wbs[i]}") [print(f"Hole {x+1}: Centre = {hole_centres[i][x]}, Diameter = {r*2}") for x,r in enumerate(radiuses[i])] # + import json # save output json with open(OUT_DIR+src_name+'_OUT.json', "w") as outjs: json.dump(outstruct, outjs, indent=4) # save output image cv2.imwrite(OUT_DIR+src_name+'_OUT.BMP', np.flip(outcome, axis=2)); # -
Rods.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## About # # In this competition, participants are requiered to predict `FVC` and its **_`Confidence`_**. # Here, I trained Lightgbm to predict them at the same time by utilizing custom metric. # # Most of codes in this notebook are forked from @yasufuminakama 's [lgbm baseline](https://www.kaggle.com/yasufuminakama/osic-lgb-baseline). Thanks! # ## Library # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" import os import operator import typing as tp from logging import getLogger, INFO, StreamHandler, FileHandler, Formatter from functools import partial import numpy as np import pandas as pd import random import math from tqdm.notebook import tqdm import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import StratifiedKFold, GroupKFold, KFold from sklearn.metrics import mean_squared_error import category_encoders as ce from PIL import Image import cv2 import pydicom import torch import lightgbm as lgb from sklearn.linear_model import Ridge import warnings warnings.filterwarnings("ignore") # - # ## Utils # + _kg_hide-input=true def get_logger(filename='log'): logger = getLogger(__name__) logger.setLevel(INFO) handler1 = StreamHandler() handler1.setFormatter(Formatter("%(message)s")) handler2 = FileHandler(filename=f"{filename}.log") handler2.setFormatter(Formatter("%(message)s")) logger.addHandler(handler1) logger.addHandler(handler2) return logger logger = get_logger() def seed_everything(seed=777): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True # - # ## Config # + OUTPUT_DICT = './' ID = 'Patient_Week' TARGET = 'FVC' SEED = 42 seed_everything(seed=SEED) N_FOLD = 4 # - # # Data Loading # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" train = pd.read_csv('../input/osic-pulmonary-fibrosis-progression/train.csv') train[ID] = train['Patient'].astype(str) + '_' + train['Weeks'].astype(str) print(train.shape) train.head() # + # construct train input output = pd.DataFrame() gb = train.groupby('Patient') tk0 = tqdm(gb, total=len(gb)) for _, usr_df in tk0: usr_output = pd.DataFrame() for week, tmp in usr_df.groupby('Weeks'): rename_cols = {'Weeks': 'base_Week', 'FVC': 'base_FVC', 'Percent': 'base_Percent', 'Age': 'base_Age'} tmp = tmp.drop(columns='Patient_Week').rename(columns=rename_cols) drop_cols = ['Age', 'Sex', 'SmokingStatus', 'Percent'] _usr_output = usr_df.drop(columns=drop_cols).rename(columns={'Weeks': 'predict_Week'}).merge(tmp, on='Patient') _usr_output['Week_passed'] = _usr_output['predict_Week'] - _usr_output['base_Week'] usr_output = pd.concat([usr_output, _usr_output]) output = pd.concat([output, usr_output]) train = output[output['Week_passed']!=0].reset_index(drop=True) print(train.shape) train.head() # + # construct test input test = pd.read_csv('../input/osic-pulmonary-fibrosis-progression/test.csv')\ .rename(columns={'Weeks': 'base_Week', 'FVC': 'base_FVC', 'Percent': 'base_Percent', 'Age': 'base_Age'}) submission = pd.read_csv('../input/osic-pulmonary-fibrosis-progression/sample_submission.csv') submission['Patient'] = submission['Patient_Week'].apply(lambda x: x.split('_')[0]) submission['predict_Week'] = submission['Patient_Week'].apply(lambda x: x.split('_')[1]).astype(int) test = submission.drop(columns=['FVC', 'Confidence']).merge(test, on='Patient') test['Week_passed'] = test['predict_Week'] - test['base_Week'] print(test.shape) test.head() # - submission = pd.read_csv('../input/osic-pulmonary-fibrosis-progression/sample_submission.csv') print(submission.shape) submission.head() # # Prepare folds folds = train[[ID, 'Patient', TARGET]].copy() #Fold = KFold(n_splits=N_FOLD, shuffle=True, random_state=SEED) Fold = GroupKFold(n_splits=N_FOLD) groups = folds['Patient'].values for n, (train_index, val_index) in enumerate(Fold.split(folds, folds[TARGET], groups)): folds.loc[val_index, 'fold'] = int(n) folds['fold'] = folds['fold'].astype(int) folds.head() # ## Custom Objective / Metric # # The competition evaluation metric is: # # $ # \displaystyle \sigma_{clipped} = \max \left ( \sigma, 70 \right ) \\ # \displaystyle \Delta = \min \left ( \|FVC_{ture} - FVC_{predicted}\|, 1000 \right ) \\ # \displaystyle f_{metric} = - \frac{\sqrt{2} \Delta}{\sigma_{clipped}} - \ln \left( \sqrt{2} \sigma_{clipped} \right) . # $ # # This is too complex to directly optimize by custom metric. # Here I use negative loglilelihood loss (_NLL_) of gaussian. # # Let $FVC_{ture}$ is $t$ and $FVC_{predicted}$ is $\mu$, the _NLL_ $l$ is formulated by: # # $ # \displaystyle l\left( t, \mu, \sigma \right) = # -\ln \left [ \frac{1}{\sqrt{2 \pi} \sigma} \exp \left \{ - \frac{\left(t - \mu \right)^2}{2 \sigma^2} \right \} \right ] # = \frac{\left(t - \mu \right)^2}{2 \sigma^2} + \ln \left( \sqrt{2 \pi} \sigma \right). # $ # # `grad` and `hess` are calculated as follows: # # $ # \displaystyle \frac{\partial l}{\partial \mu } = -\frac{t - \mu}{\sigma^2} \ , \ \frac{\partial^2 l}{\partial \mu^2 } = \frac{1}{\sigma^2} # $ # # $ # \displaystyle \frac{\partial l}{\partial \sigma} # =-\frac{\left(t - \mu \right)^2}{\sigma^3} + \frac{1}{\sigma} = \frac{1}{\sigma} \left\{ 1 - \left ( \frac{t - \mu}{\sigma} \right)^2 \right \} # \\ # \displaystyle \frac{\partial^2 l}{\partial \sigma^2} # = -\frac{1}{\sigma^2} \left\{ 1 - \left ( \frac{t - \mu}{\sigma} \right)^2 \right \} # +\frac{1}{\sigma} \frac{2 \left(t - \mu \right)^2 }{\sigma^3} # = -\frac{1}{\sigma^2} \left\{ 1 - 3 \left ( \frac{t - \mu}{\sigma} \right)^2 \right \} # $ # For numerical stability, I replace $\sigma$ with $\displaystyle \tilde{\sigma} := \log\left(1 + \mathrm{e}^{\sigma} \right).$ # # $ # \displaystyle l'\left( t, \mu, \sigma \right) # = \frac{\left(t - \mu \right)^2}{2 \tilde{\sigma}^2} + \ln \left( \sqrt{2 \pi} \tilde{\sigma} \right). # $ # # $ # \displaystyle \frac{\partial l'}{\partial \mu } = -\frac{t - \mu}{\tilde{\sigma}^2} \ , \ \frac{\partial^2 l}{\partial \mu^2 } = \frac{1}{\tilde{\sigma}^2} # $ # <br> # # $ # \displaystyle \frac{\partial l'}{\partial \sigma} # = \frac{1}{\tilde{\sigma}} \left\{ 1 - \left ( \frac{t - \mu}{\tilde{\sigma}} \right)^2 \right \} \frac{\partial \tilde{\sigma}}{\partial \sigma} # \\ # \displaystyle \frac{\partial^2 l'}{\partial \sigma^2} # = -\frac{1}{\tilde{\sigma}^2} \left\{ 1 - 3 \left ( \frac{t - \mu}{\tilde{\sigma}} \right)^2 \right \} # \left( \frac{\partial \tilde{\sigma}}{\partial \sigma} \right) ^2 # +\frac{1}{\tilde{\sigma}} \left\{ 1 - \left ( \frac{t - \mu}{\tilde{\sigma}} \right)^2 \right \} \frac{\partial^2 \tilde{\sigma}}{\partial \sigma^2} # $ # # , where # # $ # \displaystyle # \frac{\partial \tilde{\sigma}}{\partial \sigma} = \frac{1}{1 + \mathrm{e}^{-\sigma}} \\ # \displaystyle # \frac{\partial^2 \tilde{\sigma}}{\partial^2 \sigma} = \frac{\mathrm{e}^{-\sigma}}{\left( 1 + \mathrm{e}^{-\sigma} \right)^2} # = \frac{\partial \tilde{\sigma}}{\partial \sigma} \left( 1 - \frac{\partial \tilde{\sigma}}{\partial \sigma} \right) # $ # + _kg_hide-input=false class OSICLossForLGBM: """ Custom Loss for LightGBM. * Objective: return grad & hess of NLL of gaussian * Evaluation: return competition metric """ def __init__(self, epsilon: float=1) -> None: """Initialize.""" self.name = "osic_loss" self.n_class = 2 # FVC & Confidence self.epsilon = epsilon def __call__(self, preds: np.ndarray, labels: np.ndarray, weight: tp.Optional[np.ndarray]=None) -> float: """Calc loss.""" sigma_clip = np.maximum(preds[:, 1], 70) Delta = np.minimum(np.abs(preds[:, 0] - labels), 1000) loss_by_sample = - np.sqrt(2) * Delta / sigma_clip - np.log(np.sqrt(2) * sigma_clip) loss = np.average(loss_by_sample, weight) return loss def _calc_grad_and_hess( self, preds: np.ndarray, labels: np.ndarray, weight: tp.Optional[np.ndarray]=None ) -> tp.Tuple[np.ndarray]: """Calc Grad and Hess""" mu = preds[:, 0] sigma = preds[:, 1] sigma_t = np.log(1 + np.exp(sigma)) grad_sigma_t = 1 / (1 + np.exp(- sigma)) hess_sigma_t = grad_sigma_t * (1 - grad_sigma_t) grad = np.zeros_like(preds) hess = np.zeros_like(preds) grad[:, 0] = - (labels - mu) / sigma_t ** 2 hess[:, 0] = 1 / sigma_t ** 2 tmp = ((labels - mu) / sigma_t) ** 2 grad[:, 1] = 1 / sigma_t * (1 - tmp) * grad_sigma_t hess[:, 1] = ( - 1 / sigma_t ** 2 * (1 - 3 * tmp) * grad_sigma_t ** 2 + 1 / sigma_t * (1 - tmp) * hess_sigma_t ) if weight is not None: grad = grad * weight[:, None] hess = hess * weight[:, None] return grad, hess def return_loss(self, preds: np.ndarray, data: lgb.Dataset) -> tp.Tuple[str, float, bool]: """Return Loss for lightgbm""" labels = data.get_label() weight = data.get_weight() n_example = len(labels) # # reshape preds: (n_class * n_example,) => (n_class, n_example) => (n_example, n_class) preds = preds.reshape(self.n_class, n_example).T # # calc loss loss = self(preds, labels, weight) return self.name, loss, True def return_grad_and_hess(self, preds: np.ndarray, data: lgb.Dataset) -> tp.Tuple[np.ndarray]: """Return Grad and Hess for lightgbm""" labels = data.get_label() weight = data.get_weight() n_example = len(labels) # # reshape preds: (n_class * n_example,) => (n_class, n_example) => (n_example, n_class) preds = preds.reshape(self.n_class, n_example).T # # calc grad and hess. grad, hess = self._calc_grad_and_hess(preds, labels, weight) # # reshape grad, hess: (n_example, n_class) => (n_class, n_example) => (n_class * n_example,) grad = grad.T.reshape(n_example * self.n_class) hess = hess.T.reshape(n_example * self.n_class) return grad, hess # - # ## Training Utils # + #=========================================================== # model #=========================================================== def run_single_lightgbm( model_param, fit_param, train_df, test_df, folds, features, target, fold_num=0, categorical=[], my_loss=None, ): trn_idx = folds[folds.fold != fold_num].index val_idx = folds[folds.fold == fold_num].index logger.info(f'len(trn_idx) : {len(trn_idx)}') logger.info(f'len(val_idx) : {len(val_idx)}') if categorical == []: trn_data = lgb.Dataset( train_df.iloc[trn_idx][features], label=target.iloc[trn_idx]) val_data = lgb.Dataset( train_df.iloc[val_idx][features], label=target.iloc[val_idx]) else: trn_data = lgb.Dataset( train_df.iloc[trn_idx][features], label=target.iloc[trn_idx], categorical_feature=categorical) val_data = lgb.Dataset( train_df.iloc[val_idx][features], label=target.iloc[val_idx], categorical_feature=categorical) oof = np.zeros((len(train_df), 2)) predictions = np.zeros((len(test_df), 2)) best_model_str = [""] clf = lgb.train( model_param, trn_data, **fit_param, valid_sets=[trn_data, val_data], fobj=my_loss.return_grad_and_hess, feval=my_loss.return_loss, ) oof[val_idx] = clf.predict(train_df.iloc[val_idx][features], num_iteration=clf.best_iteration) fold_importance_df = pd.DataFrame() fold_importance_df["Feature"] = features fold_importance_df["importance"] = clf.feature_importance(importance_type='gain') fold_importance_df["fold"] = fold_num predictions += clf.predict(test_df[features], num_iteration=clf.best_iteration) # RMSE logger.info("fold{} RMSE score: {:<8.5f}".format( fold_num, np.sqrt(mean_squared_error(target[val_idx], oof[val_idx, 0])))) # Competition Metric logger.info("fold{} Metric: {:<8.5f}".format( fold_num, my_loss(oof[val_idx], target[val_idx]))) return oof, predictions, fold_importance_df def run_kfold_lightgbm( model_param, fit_param, train, test, folds, features, target, n_fold=5, categorical=[], my_loss=None, ): logger.info(f"================================= {n_fold}fold lightgbm =================================") oof = np.zeros((len(train), 2)) predictions = np.zeros((len(test), 2)) feature_importance_df = pd.DataFrame() for fold_ in range(n_fold): print("Fold {}".format(fold_)) _oof, _predictions, fold_importance_df =\ run_single_lightgbm( model_param, fit_param, train, test, folds, features, target, fold_num=fold_, categorical=categorical, my_loss=my_loss ) feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0) oof += _oof predictions += _predictions / n_fold # RMSE logger.info("CV RMSE score: {:<8.5f}".format(np.sqrt(mean_squared_error(target, oof[:, 0])))) # Metric logger.info("CV Metric: {:<8.5f}".format(my_loss(oof, target))) logger.info(f"=========================================================================================") return feature_importance_df, predictions, oof def show_feature_importance(feature_importance_df, name): cols = (feature_importance_df[["Feature", "importance"]] .groupby("Feature") .mean() .sort_values(by="importance", ascending=False)[:50].index) best_features = feature_importance_df.loc[feature_importance_df.Feature.isin(cols)] #plt.figure(figsize=(8, 16)) plt.figure(figsize=(6, 4)) sns.barplot(x="importance", y="Feature", data=best_features.sort_values(by="importance", ascending=False)) plt.title('Features importance (averaged/folds)') plt.tight_layout() plt.savefig(OUTPUT_DICT+f'feature_importance_{name}.png') # - # ## predict FVC & Confidence(signa) # + target = train[TARGET] test[TARGET] = np.nan # features cat_features = ['Sex', 'SmokingStatus'] num_features = [c for c in test.columns if (test.dtypes[c] != 'object') & (c not in cat_features)] features = num_features + cat_features drop_features = [ID, TARGET, 'predict_Week', 'base_Week'] features = [c for c in features if c not in drop_features] if cat_features: ce_oe = ce.OrdinalEncoder(cols=cat_features, handle_unknown='impute') ce_oe.fit(train) train = ce_oe.transform(train) test = ce_oe.transform(test) lgb_model_param = { 'num_class': 2, # 'objective': 'regression', 'metric': 'None', 'boosting_type': 'gbdt', 'learning_rate': 5e-02, 'seed': SEED, "subsample": 0.4, "subsample_freq": 1, 'max_depth': 1, 'verbosity': -1, } lgb_fit_param = { "num_boost_round": 10000, "verbose_eval":100, "early_stopping_rounds": 500, } feature_importance_df, predictions, oof = run_kfold_lightgbm( lgb_model_param, lgb_fit_param, train, test, folds, features, target, n_fold=N_FOLD, categorical=cat_features, my_loss=OSICLossForLGBM()) show_feature_importance(feature_importance_df, TARGET) # - oof[:5, :] predictions[:5] train["FVC_pred"] = oof[:, 0] train["Confidence"] = oof[:, 1] test["FVC_pred"] = predictions[:, 0] test["Confidence"] = predictions[:, 1] # # Submission submission.head() sub = submission.drop(columns=['FVC', 'Confidence']).merge(test[['Patient_Week', 'FVC_pred', 'Confidence']], on='Patient_Week') sub.columns = submission.columns sub.to_csv('submission.csv', index=False) sub.head()
kaggle_notebooks/osic-baseline-lgbm-with-custom-metric.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Dependencies # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _kg_hide-input=true _kg_hide-output=true _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" import os import sys import cv2 import shutil import random import warnings import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from tensorflow import set_random_seed from sklearn.utils import class_weight from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, cohen_kappa_score, classification_report from keras import backend as K from keras.models import Model from keras.utils import to_categorical from keras import optimizers, applications from keras.preprocessing.image import ImageDataGenerator from keras.layers import Dense, Dropout, GlobalAveragePooling2D, Input from keras.callbacks import EarlyStopping, ReduceLROnPlateau, Callback, LearningRateScheduler def seed_everything(seed=0): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) set_random_seed(0) seed = 0 seed_everything(seed) # %matplotlib inline sns.set(style="whitegrid") warnings.filterwarnings("ignore") sys.path.append(os.path.abspath('../input/efficientnet/efficientnet-master/efficientnet-master/')) from efficientnet import * # + [markdown] _kg_hide-output=true # ## Load data # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _kg_hide-input=true _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" hold_out_set = pd.read_csv('../input/aptos-data-split/hold-out.csv') X_train = hold_out_set[hold_out_set['set'] == 'train'] X_val = hold_out_set[hold_out_set['set'] == 'validation'] test = pd.read_csv('../input/aptos2019-blindness-detection/test.csv') print('Number of train samples: ', X_train.shape[0]) print('Number of validation samples: ', X_val.shape[0]) print('Number of test samples: ', test.shape[0]) # Preprocecss data X_train["id_code"] = X_train["id_code"].apply(lambda x: x + ".png") X_val["id_code"] = X_val["id_code"].apply(lambda x: x + ".png") test["id_code"] = test["id_code"].apply(lambda x: x + ".png") display(X_train.head()) # - # # Model parameters # Model parameters BATCH_SIZE = 8 EPOCHS = 30 WARMUP_EPOCHS = 5 LEARNING_RATE = 1e-4 WARMUP_LEARNING_RATE = 1e-3 HEIGHT = 456 WIDTH = 456 CHANNELS = 3 ES_PATIENCE = 5 RLROP_PATIENCE = 3 DECAY_DROP = 0.5 LR_WARMUP_EPOCHS_1st = 2 LR_WARMUP_EPOCHS_2nd = 5 STEP_SIZE = (5 * (len(X_train) // BATCH_SIZE)) / 2 # # Pre-procecess images # + _kg_hide-input=true train_base_path = '../input/aptos2019-blindness-detection/train_images/' test_base_path = '../input/aptos2019-blindness-detection/test_images/' train_dest_path = 'base_dir/train_images/' validation_dest_path = 'base_dir/validation_images/' test_dest_path = 'base_dir/test_images/' # Making sure directories don't exist if os.path.exists(train_dest_path): shutil.rmtree(train_dest_path) if os.path.exists(validation_dest_path): shutil.rmtree(validation_dest_path) if os.path.exists(test_dest_path): shutil.rmtree(test_dest_path) # Creating train, validation and test directories os.makedirs(train_dest_path) os.makedirs(validation_dest_path) os.makedirs(test_dest_path) def crop_image(img, tol=7): if img.ndim ==2: mask = img>tol return img[np.ix_(mask.any(1),mask.any(0))] elif img.ndim==3: gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) mask = gray_img>tol check_shape = img[:,:,0][np.ix_(mask.any(1),mask.any(0))].shape[0] if (check_shape == 0): # image is too dark so that we crop out everything, return img # return original image else: img1=img[:,:,0][np.ix_(mask.any(1),mask.any(0))] img2=img[:,:,1][np.ix_(mask.any(1),mask.any(0))] img3=img[:,:,2][np.ix_(mask.any(1),mask.any(0))] img = np.stack([img1,img2,img3],axis=-1) return img def circle_crop(img): img = crop_image(img) height, width, depth = img.shape largest_side = np.max((height, width)) img = cv2.resize(img, (largest_side, largest_side)) height, width, depth = img.shape x = width//2 y = height//2 r = np.amin((x, y)) circle_img = np.zeros((height, width), np.uint8) cv2.circle(circle_img, (x, y), int(r), 1, thickness=-1) img = cv2.bitwise_and(img, img, mask=circle_img) img = crop_image(img) return img def preprocess_image(base_path, save_path, image_id, HEIGHT, WIDTH, sigmaX=10): image = cv2.imread(base_path + image_id) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = circle_crop(image) image = cv2.resize(image, (HEIGHT, WIDTH)) image = cv2.addWeighted(image, 4, cv2.GaussianBlur(image, (0,0), sigmaX), -4 , 128) cv2.imwrite(save_path + image_id, image) # Pre-procecss train set for i, image_id in enumerate(X_train['id_code']): preprocess_image(train_base_path, train_dest_path, image_id, HEIGHT, WIDTH) # Pre-procecss validation set for i, image_id in enumerate(X_val['id_code']): preprocess_image(train_base_path, validation_dest_path, image_id, HEIGHT, WIDTH) # Pre-procecss test set for i, image_id in enumerate(test['id_code']): preprocess_image(test_base_path, test_dest_path, image_id, HEIGHT, WIDTH) # - # # Data generator # + _kg_hide-input=true datagen=ImageDataGenerator(rescale=1./255, rotation_range=360, horizontal_flip=True, vertical_flip=True, zoom_range=[0.75,1], fill_mode='constant', cval=0) train_generator=datagen.flow_from_dataframe( dataframe=X_train, directory=train_dest_path, x_col="id_code", y_col="diagnosis", class_mode="raw", batch_size=BATCH_SIZE, target_size=(HEIGHT, WIDTH), seed=seed) valid_generator=datagen.flow_from_dataframe( dataframe=X_val, directory=validation_dest_path, x_col="id_code", y_col="diagnosis", class_mode="raw", batch_size=BATCH_SIZE, target_size=(HEIGHT, WIDTH), seed=seed) test_generator=datagen.flow_from_dataframe( dataframe=test, directory=test_dest_path, x_col="id_code", batch_size=1, class_mode=None, shuffle=False, target_size=(HEIGHT, WIDTH), seed=seed) # + _kg_hide-input=true class CyclicLR(Callback): """This callback implements a cyclical learning rate policy (CLR). The method cycles the learning rate between two boundaries with some constant frequency. # Arguments base_lr: initial learning rate which is the lower boundary in the cycle. max_lr: upper boundary in the cycle. Functionally, it defines the cycle amplitude (max_lr - base_lr). The lr at any cycle is the sum of base_lr and some scaling of the amplitude; therefore max_lr may not actually be reached depending on scaling function. step_size: number of training iterations per half cycle. Authors suggest setting step_size 2-8 x training iterations in epoch. mode: one of {triangular, triangular2, exp_range}. Default 'triangular'. Values correspond to policies detailed above. If scale_fn is not None, this argument is ignored. gamma: constant in 'exp_range' scaling function: gamma**(cycle iterations) scale_fn: Custom scaling policy defined by a single argument lambda function, where 0 <= scale_fn(x) <= 1 for all x >= 0. mode paramater is ignored scale_mode: {'cycle', 'iterations'}. Defines whether scale_fn is evaluated on cycle number or cycle iterations (training iterations since start of cycle). Default is 'cycle'. The amplitude of the cycle can be scaled on a per-iteration or per-cycle basis. This class has three built-in policies, as put forth in the paper. "triangular": A basic triangular cycle w/ no amplitude scaling. "triangular2": A basic triangular cycle that scales initial amplitude by half each cycle. "exp_range": A cycle that scales initial amplitude by gamma**(cycle iterations) at each cycle iteration. For more detail, please see paper. # Example for CIFAR-10 w/ batch size 100: ```python clr = CyclicLR(base_lr=0.001, max_lr=0.006, step_size=2000., mode='triangular') model.fit(X_train, Y_train, callbacks=[clr]) ``` Class also supports custom scaling functions: ```python clr_fn = lambda x: 0.5*(1+np.sin(x*np.pi/2.)) clr = CyclicLR(base_lr=0.001, max_lr=0.006, step_size=2000., scale_fn=clr_fn, scale_mode='cycle') model.fit(X_train, Y_train, callbacks=[clr]) ``` # References - [Cyclical Learning Rates for Training Neural Networks]( https://arxiv.org/abs/1506.01186) """ def __init__(self, base_lr=0.001, max_lr=0.006, step_size=2000., mode='triangular', gamma=1., scale_fn=None, scale_mode='cycle'): super(CyclicLR, self).__init__() if mode not in ['triangular', 'triangular2', 'exp_range']: raise KeyError("mode must be one of 'triangular', ""'triangular2', or 'exp_range'") self.base_lr = base_lr self.max_lr = max_lr self.step_size = step_size self.mode = mode self.gamma = gamma if scale_fn is None: if self.mode == 'triangular': self.scale_fn = lambda x: 1. self.scale_mode = 'cycle' elif self.mode == 'triangular2': self.scale_fn = lambda x: 1 / (2.**(x - 1)) self.scale_mode = 'cycle' elif self.mode == 'exp_range': self.scale_fn = lambda x: gamma ** x self.scale_mode = 'iterations' else: self.scale_fn = scale_fn self.scale_mode = scale_mode self.clr_iterations = 0. self.trn_iterations = 0. self.history = {} self._reset() def _reset(self, new_base_lr=None, new_max_lr=None, new_step_size=None): if new_base_lr is not None: self.base_lr = new_base_lr if new_max_lr is not None: self.max_lr = new_max_lr if new_step_size is not None: self.step_size = new_step_size self.clr_iterations = 0. def clr(self): cycle = np.floor(1 + self.clr_iterations / (2 * self.step_size)) x = np.abs(self.clr_iterations / self.step_size - 2 * cycle + 1) if self.scale_mode == 'cycle': return self.base_lr + (self.max_lr - self.base_lr) * \ np.maximum(0, (1 - x)) * self.scale_fn(cycle) else: return self.base_lr + (self.max_lr - self.base_lr) * \ np.maximum(0, (1 - x)) * self.scale_fn(self.clr_iterations) def on_train_begin(self, logs={}): logs = logs or {} if self.clr_iterations == 0: K.set_value(self.model.optimizer.lr, self.base_lr) else: K.set_value(self.model.optimizer.lr, self.clr()) def on_batch_end(self, epoch, logs=None): logs = logs or {} self.trn_iterations += 1 self.clr_iterations += 1 K.set_value(self.model.optimizer.lr, self.clr()) self.history.setdefault( 'lr', []).append( K.get_value( self.model.optimizer.lr)) self.history.setdefault('iterations', []).append(self.trn_iterations) for k, v in logs.items(): self.history.setdefault(k, []).append(v) def on_epoch_end(self, epoch, logs=None): logs = logs or {} logs['lr'] = K.get_value(self.model.optimizer.lr) # - # # Model def create_model(input_shape): input_tensor = Input(shape=input_shape) base_model = EfficientNetB5(weights=None, include_top=False, input_tensor=input_tensor) base_model.load_weights('../input/efficientnet-keras-weights-b0b5/efficientnet-b5_imagenet_1000_notop.h5') x = GlobalAveragePooling2D()(base_model.output) x = Dropout(0.5)(x) x = Dense(2048, activation='relu')(x) x = Dropout(0.5)(x) final_output = Dense(1, activation='linear', name='final_output')(x) model = Model(input_tensor, final_output) return model # # Train top layers # + _kg_hide-output=true model = create_model(input_shape=(HEIGHT, WIDTH, CHANNELS)) for layer in model.layers: layer.trainable = False for i in range(-5, 0): model.layers[i].trainable = True metric_list = ["accuracy"] optimizer = optimizers.Adam(lr=WARMUP_LEARNING_RATE) model.compile(optimizer=optimizer, loss='mean_squared_error', metrics=metric_list) model.summary() # + _kg_hide-input=true _kg_hide-output=true STEP_SIZE_TRAIN = train_generator.n//train_generator.batch_size STEP_SIZE_VALID = valid_generator.n//valid_generator.batch_size history_warmup = model.fit_generator(generator=train_generator, steps_per_epoch=STEP_SIZE_TRAIN, validation_data=valid_generator, validation_steps=STEP_SIZE_VALID, epochs=WARMUP_EPOCHS, verbose=2).history # - # # Fine-tune the complete model # + _kg_hide-input=false _kg_hide-output=true for layer in model.layers: layer.trainable = True es = EarlyStopping(monitor='val_loss', mode='min', patience=ES_PATIENCE, restore_best_weights=True, verbose=1) cyclic_lr = CyclicLR(base_lr=(LEARNING_RATE / 20), max_lr=(LEARNING_RATE * 2), step_size=STEP_SIZE, mode='triangular2') callback_list = [es, cyclic_lr] optimizer = optimizers.Adam(lr=LEARNING_RATE) model.compile(optimizer=optimizer, loss='mean_squared_error', metrics=metric_list) model.summary() # + _kg_hide-input=true _kg_hide-output=true history = model.fit_generator(generator=train_generator, steps_per_epoch=STEP_SIZE_TRAIN, validation_data=valid_generator, validation_steps=STEP_SIZE_VALID, epochs=EPOCHS, callbacks=callback_list, verbose=2).history # + _kg_hide-input=true fig, ax = plt.subplots(figsize=(20, 4)) ax.plot(cyclic_lr.history['lr']) ax.set_title('Fine-tune learning rates') plt.xlabel('Steps') plt.ylabel('Learning rate') sns.despine() plt.show() # - # # Model loss graph # + _kg_hide-input=true fig, (ax1, ax2) = plt.subplots(2, 1, sharex='col', figsize=(20, 14)) ax1.plot(history['loss'], label='Train loss') ax1.plot(history['val_loss'], label='Validation loss') ax1.legend(loc='best') ax1.set_title('Loss') ax2.plot(history['acc'], label='Train accuracy') ax2.plot(history['val_acc'], label='Validation accuracy') ax2.legend(loc='best') ax2.set_title('Accuracy') plt.xlabel('Epochs') sns.despine() plt.show() # + _kg_hide-input=true # Create empty arays to keep the predictions and labels df_preds = pd.DataFrame(columns=['label', 'pred', 'set']) train_generator.reset() valid_generator.reset() # Add train predictions and labels for i in range(STEP_SIZE_TRAIN + 1): im, lbl = next(train_generator) preds = model.predict(im, batch_size=train_generator.batch_size) for index in range(len(preds)): df_preds.loc[len(df_preds)] = [lbl[index], preds[index][0], 'train'] # Add validation predictions and labels for i in range(STEP_SIZE_VALID + 1): im, lbl = next(valid_generator) preds = model.predict(im, batch_size=valid_generator.batch_size) for index in range(len(preds)): df_preds.loc[len(df_preds)] = [lbl[index], preds[index][0], 'validation'] df_preds['label'] = df_preds['label'].astype('int') # + _kg_hide-input=true def classify(x): if x < 0.5: return 0 elif x < 1.5: return 1 elif x < 2.5: return 2 elif x < 3.5: return 3 return 4 # Classify predictions df_preds['predictions'] = df_preds['pred'].apply(lambda x: classify(x)) train_preds = df_preds[df_preds['set'] == 'train'] validation_preds = df_preds[df_preds['set'] == 'validation'] # - # # Model Evaluation # + _kg_hide-input=true labels = ['0 - No DR', '1 - Mild', '2 - Moderate', '3 - Severe', '4 - Proliferative DR'] def evaluate_model(train, validation): train_labels, train_preds = train validation_labels, validation_preds = validation print("Train Cohen Kappa score: %.3f" % cohen_kappa_score(train_preds, train_labels, weights='quadratic')) print("Validation Cohen Kappa score: %.3f" % cohen_kappa_score(validation_preds, validation_labels, weights='quadratic')) print("Complete set Cohen Kappa score: %.3f" % cohen_kappa_score(np.append(train_preds, validation_preds), np.append(train_labels, validation_labels), weights='quadratic')) print(' \t\t\t\t TRAIN') print(classification_report(train_labels, train_preds, target_names=labels)) print(' \t\t\t\t VALIDATION') print(classification_report(validation_labels, validation_preds, target_names=labels)) evaluate_model((train_preds['label'], train_preds['predictions']), (validation_preds['label'], validation_preds['predictions'])) # - # ## Confusion Matrix # + _kg_hide-input=true def plot_confusion_matrix(train, validation, labels=labels): train_labels, train_preds = train validation_labels, validation_preds = validation fig, (ax1, ax2) = plt.subplots(1, 2, sharex='col', figsize=(24, 7)) train_cnf_matrix = confusion_matrix(train_labels, train_preds) validation_cnf_matrix = confusion_matrix(validation_labels, validation_preds) train_cnf_matrix_norm = train_cnf_matrix.astype('float') / train_cnf_matrix.sum(axis=1)[:, np.newaxis] validation_cnf_matrix_norm = validation_cnf_matrix.astype('float') / validation_cnf_matrix.sum(axis=1)[:, np.newaxis] train_df_cm = pd.DataFrame(train_cnf_matrix_norm, index=labels, columns=labels) validation_df_cm = pd.DataFrame(validation_cnf_matrix_norm, index=labels, columns=labels) sns.heatmap(train_df_cm, annot=True, fmt='.2f', cmap="Blues",ax=ax1).set_title('Train') sns.heatmap(validation_df_cm, annot=True, fmt='.2f', cmap=sns.cubehelix_palette(8),ax=ax2).set_title('Validation') plt.show() plot_confusion_matrix((train_preds['label'], train_preds['predictions']), (validation_preds['label'], validation_preds['predictions'])) # - # ## Apply model to test set and output predictions # + _kg_hide-input=true def apply_tta(model, generator, steps=10): step_size = generator.n//generator.batch_size preds_tta = [] for i in range(steps): generator.reset() preds = model.predict_generator(generator, steps=step_size) preds_tta.append(preds) return np.mean(preds_tta, axis=0) preds = apply_tta(model, test_generator) predictions = [classify(x) for x in preds] results = pd.DataFrame({'id_code':test['id_code'], 'diagnosis':predictions}) results['id_code'] = results['id_code'].map(lambda x: str(x)[:-4]) # + _kg_hide-input=true _kg_hide-output=false # Cleaning created directories if os.path.exists(train_dest_path): shutil.rmtree(train_dest_path) if os.path.exists(validation_dest_path): shutil.rmtree(validation_dest_path) if os.path.exists(test_dest_path): shutil.rmtree(test_dest_path) # - # # Predictions class distribution # + _kg_hide-input=true fig = plt.subplots(sharex='col', figsize=(24, 8.7)) sns.countplot(x="diagnosis", data=results, palette="GnBu_d").set_title('Test') sns.despine() plt.show() # + _kg_hide-input=true results.to_csv('submission.csv', index=False) display(results.head())
Model backlog/EfficientNet/EfficientNetB5/153 - EfficientNetB5 -Reg- Cyc LR triangular2 Adam.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sys import os sys.path.append(os.path.abspath(os.path.join('..','..','..','..'))) from pudl import pudl, ferc1, eia923, settings, constants from pudl import models, models_ferc1, models_eia923 import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use('ggplot') # %matplotlib inline # + pudl_engine = pudl.connect_db() frc_contracts = pd.read_sql('''SELECT report_date, contract_expiration_date, fuel_group FROM fuel_receipts_costs_eia923;''', pudl_engine) frc_contracts['report_date'] = pd.to_datetime(frc_contracts['report_date']) frc_contracts['contract_expiration_date'] = pd.to_datetime(frc_contracts['contract_expiration_date']) frc_contracts['remaining_contract_time'] = frc_contracts.contract_expiration_date - frc_contracts.report_date frc_contracts['remaining_contract_time'] = frc_contracts['remaining_contract_time'].apply(lambda x: np.nan if x is None else x) frc_contracts.dropna(subset=['remaining_contract_time'], inplace=True) frc_contracts = frc_contracts[frc_contracts['remaining_contract_time']>pd.to_timedelta('0 days')] frc_contracts['tot_months'] = frc_contracts.remaining_contract_time.dt.total_seconds()/(30*60*60*24) coal_by_year = {} gas_by_year = {} for yr in range(2009,2017): start = pd.to_datetime('{}-01-01'.format(yr)) end = pd.to_datetime('{}-12-31'.format(yr)) mask = (frc_contracts['report_date'] >= start) & (frc_contracts['report_date'] <= end) contracts_by_year = frc_contracts.loc[mask] coal_by_year[yr] = contracts_by_year[contracts_by_year['fuel_group']=='Coal'] gas_by_year[yr] = contracts_by_year[contracts_by_year['fuel_group']=='Natural Gas'] # + font = 'Libre Franklin' font_weight = 'heavy' font_color = 'black' title_size = 16 label_size = 14 label_color = 'black' from scipy import stats fig, axarr = plt.subplots(8,2) fig.set_figwidth(16) fig.set_figheight(24) years = range(2009,2017) for (yr,coal_ax) in zip(years, axarr[:,0]): if yr == min(years): coal_ax.set_title("Months remaining on coal contracts",size= title_size, fontname = font, weight = font_weight) coal_ax.grid(b=True) coal_ax.hist(coal_by_year[yr]['tot_months'], bins=108, range=(0,108), label='Coal', color='#2C2C2C') coal_ax.set_xticks(np.arange(0,120,12)) coal_ax.tick_params(axis='both',labelsize=label_size, labelcolor = label_color) coal_ax.set_ylim(0,850) for n in range(0,9): pct = '{:.0%}'.format(stats.percentileofscore(coal_by_year[yr]['tot_months'],(n+1)*12)/100) coal_ax.text(n*12+6, 780, pct, fontsize=13, ha='center', fontname = font) coal_ax.set_ylabel("Number of Deliveries", fontname = font, fontsize=label_size, color = label_color) coal_ax.text(76, 700, 'of all coal contracts',fontsize=13) coal_ax.set_xlabel("Months Remaining on Contract at Time of Delivery", fontname = font, fontsize=label_size, color = label_color) for (yr,gas_ax) in zip(years,axarr[:,1]): if yr == min(years): gas_ax.set_title("Months remaining on natural gas contracts", size= title_size, fontname = font, weight = font_weight) gas_ax.grid(b=True) gas_ax.hist(gas_by_year[yr]['tot_months'], bins=108, range=(0,108), label='Natural Gas', color='#0083CC') gas_ax.set_xticks(np.arange(0,120,12)) gas_ax.tick_params(axis='both',labelsize=label_size, labelcolor = label_color) gas_ax.set_ylim(0,250) for n in range(0,9): pct = '{:.0%}'.format(stats.percentileofscore(gas_by_year[yr]['tot_months'],(n+1)*12)/100) gas_ax.text(n*12+6, 230, pct, fontsize=13, ha='center', fontname = font) gas_ax.text(96, 110, str(yr), fontsize=30, ha='center') gas_ax.text(65, 205, 'of all natural gas contracts',fontsize=13) gas_ax.set_xlabel("Months Remaining on Contract at Time of Delivery", fontname = font, fontsize=label_size, color = label_color) plt.tight_layout() plt.show() # -
notebooks/examples/eia923-fuel-contracts-vs-time.ipynb