code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## MixedNB with digits dataset<br> # ### using categorical naive bayes # Load the required modules import numpy as np from sklearn.datasets import load_digits from sklearn.naive_bayes import GaussianNB from mixed_naive_bayes import MixedNB # Load the digits dataset digits = load_digits() X = digits['data'] y = digits['target'] # Fit to `sklearn`'s GaussianNB gaussian_nb = GaussianNB() gaussian_nb.fit(X, y) gaussian_nb_score = gaussian_nb.score(X, y) # Fit to our classifier mixed_nb = MixedNB(categorical_features='all', max_categories=np.repeat(17,64)) mixed_nb.fit(X, y) mixed_nb_score = mixed_nb.score(X, y) print(gaussian_nb_score) print(mixed_nb_score)
examples/dataset_digits.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/RCML2016/SendEmiltoEmail/blob/main/SendEMail.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="gKgLcRA3CzZu" outputId="c7859922-0f2e-4c82-f51a-ea2f9abdf2b8" colab={"base_uri": "https://localhost:8080/", "height": 398} import smtplib s=smtplib.SMTP('smtp.gmail.com',587) s.starttls() s.login("<EMAIL>","Cmegroup2351") message="A Text Message from Python" s.sendmail("<EMAIL>","<EMAIL>",message) S.quit()
SendEMail.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # 1、随机生成1万个整数,范围在0-10万之间,分别进行简单选择排序、快速排序(自行递归实现的)以及内置sort函数3种排序,打印出3种排序的运行时间。 # 假设有快速排序算法quick_sort(seq),可以实现快速排序。 # # 令left_seq = [], right_seq = [] # 令待排序序列区间的第一个元素为p,即p=seq[0] # 对seq的[start+1,end]区间中的每一个元素: # 如果元素 < p: # 将该元素加入到left_seq中 # 否则: # 将该元素加入到right_seq中 # # 如left_seq非空,利用快速排序算法quick_sort,对left_seq进行快速排序 # 如right_seq非空,利用快速排序算法quick_sort,对right_seq进行快速排序 # # 返回:left_seq + p + right_seq # + import random import time def simple_sort(numbers): for i in range(len(numbers)): for j in range(i+1,len(numbers)): min=i if numbers[min]>numbers[j]: min=j numbers[i],numbers[min]=numbers[min],numbers[i] def quick_sort(seq): left_seq=[] right_seq=[] p=seq[0] start=0 end=len(seq) for i in range(start+1,end): if seq[i]<=p: left_seq.append(seq[i]) else : right_seq.append(seq[i]) if len(left_seq)!=0: quick_sort(left_seq) elif len(right_seq)!=0: quick_sort(right_seq) else : left_seq.append(p) left_seq.extend(right_seq) return (left_seq) n=[] for i in range(100000): n.append(random.randint(1,100000)) nums1=[] nums2=[] nums1.extend(n) nums2.extend(n) start_time=time.time() quick_sort(nums2) end_time=time.time() print("time for quick-sort:",end_time-start_time,"-"*30) start_time=time.time() num=sorted(n) end_time=time.time() print("time for sort-function:",end_time-start_time,"-"*30) #start_time=time.time() #simple_sort(nums1) #end_time=time.time() #print("简单排序所用时间:",end_time-start_time,"-"*30) print("too much time for the simple-sort! I have no patience for the outcome(facepalm) though I've coded it above.") # - # 2、随机生成1万个整数,范围在0-10万之间,求其中每个整数出现的次数。并按照整数大小排序输出整数及出现次数。 # + import random from collections import Counter def numbers_and_freq(numbers): list_needed=Counter() for i in range(len(numbers)): list_needed += Counter(i for i in numbers) return list_needed n=[random.randint(0,100000) for i in range(100)]#test for list of lenth 100 the_list=numbers_and_freq(n) print(the_list) # - # 3、对本任务中的语料.txt文件,随机抽取其5001-10000行存为test1.txt文件,写函数,可得到其与本任务中test.txt文件的共用字以及独用字(相关概念自行百度)。 # + from collections import defaultdict import random #制造test1.txt def chose_lines(yuliao.txt): num_of_line=0 randomline=[] with open(file) as fh: fhh=[line for line in fh.split("/")] num_of_line+=1 for i in range(5001): randomline=[fhh[i] for i in random.randint(num_of_line)] return randomline def practice3(file,file2):
chapter2/homework/computer/end/201611680575.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Vera in the city project mapping # # We create tags on a map, where feedback forms are located. # For this we do the following: # # get locations (X,Y) # get tag information adjusted to timeline, e.g. t1 tag1: feedback form is attached, t2 tag1: feedback form is checked etc. # update information about tags and display them on a map. # # # + x1, y1 = 48.862337, 2.366640 # Rogier x2, y2 = 48.871529, 2.382836 # 79 Rue <NAME>, 75020 Paris Lezarts Paris 20 x3, y3 = 48.870254, 2.383656 # Momo shop x4, y4 = 48.851111, 2.350759 #65 Quai de la Tournelle, 75005 Paris x5, y5 = 48.51038, 2.21026 # 48.878524, 2.384093 # Rue Buttes Ch. x6, y6 = 48.849265, 2.348795 # Paris museum of Prefecture police x,y = 48.8566, 2.3522 # - # # Plotting on a map # + import numpy as np from matplotlib import pyplot as plt import numpy as np import matplotlib.cm as cm import folium #first we get datapoints from the file latitude = [x1,x2,x3,x4,x5,x] # later we will read the data file from openstreetmaps longitude = [x1,x2,x3,x4,x5,y] #df.longitudestart.values print(type(latitude)) #then we zip two arrays of lat, lon of datapoints latlon = list(zip(latitude, longitude)) #We put map to show first location of Paris 48.8566° North, 2.3522° est # 40.7808 et la longitude de la ville de New York City est -73.9772. mapit = folium.Map( location=[48.75 , 2.35], zoom_start=6 ) for coord in latlon: folium.Marker( location=[ coord[0], coord[1] ], fill_color='#43d9de', radius=80 ).add_to( mapit ) mapit SAF=folium.PolyLine(locations=latlon,weight=5,color = 'r')#color_array[ind]) mapit.add_child(SAF) # -
vera_notebooks/Vera_in_the_city.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:icesat2020] # language: python # name: conda-env-icesat2020-py # --- # ## ICESat-2 Data Download # This notebook orders and downloads ICESat-2 ATL03 data in the Beaufort, Chukchi, and Laptev Sea regions for the ICESat-2 hackweek project *Overcast*. # # It also has capability to gather data from any ICESat-2 data product in any region and timeframe of interest, based on user-defined input. # # The notebook makes use of *icepyx* functionality, and writes data to respective folders in shared-data on the hackweek's JupyterHub (at least for now); it borrows heavily from the tutorial material for the same hackweek, compiled by Jessica # + import os import icepyx as ipx # %matplotlib inline # - # #### Set up order project_name = input("Project name: ") roi = input("name of region of interest: ") dataset= input("ICESat-2 product short name: ") # + short_name = dataset if 'overcast' == project_name: if 'beaufort' == roi: spatial_extent = [-140,68,-120,80] elif 'chukchi' == roi: spatial_extent = [170,70,190,75] elif 'laptev' == roi: spatial_extent = [110,73,140,80] else: spatial_extent = [float(input("west: ")), float(input("low lat: ")) , float(input("east: ")), float(input("high lat: "))] date_range = ['2019-06-10','2019-06-16'] else: spatial_extent = [float(input("west: ")), float(input("low lat: ")) , float(input("east: ")), float(input("high lat: "))] date_range = [input("start date: "), input("end date: ")] # - region = ipx.Query(short_name, spatial_extent, date_range) region.avail_granules(ids=True) # + earthdata_uid = 'mwiering' email = '<EMAIL>' region.earthdata_login(earthdata_uid, email) # - region.order_granules(subset=False, format = 'HDF5', email=False) print("orders: ", region.granules.orderIDs) # + if 'overcast' == project_name: path = '../jovyan/shared/data-overcast/'+ roi.upper() + '/' + short_name else: path = './download/' + roi.upper() + '/' + short_name region.download_granules(path) # -
notebooks/is2_download_general.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # **[Feature Engineering Home Page](https://www.kaggle.com/learn/feature-engineering)** # # --- # # # Introduction # # In this set of exercises, you'll create new features from the existing data. Again you'll compare the score lift for each new feature compared to a baseline model. First off, run the cells below to set up a baseline dataset and model. # + # set up code checking from learntools.core import binder binder.bind(globals()) from learntools.feature_engineering.ex3 import * # create features from timestamps import pandas as pd click_data = pd.read_csv( '../input/feature-engineering-data/train_sample.csv', parse_dates=['click_time'] ) click_times = click_data['click_time'] clicks = click_data.assign( day=click_times.dt.day.astype('uint8'), hour=click_times.dt.hour.astype('uint8'), minute=click_times.dt.minute.astype('uint8'), second=click_times.dt.second.astype('uint8') ) # label encoding for categorical features cat_features = ['ip', 'app', 'device', 'os', 'channel'] from sklearn.preprocessing import LabelEncoder for feature in cat_features: label_encoder = LabelEncoder() clicks[feature] = label_encoder.fit_transform(clicks[feature]) def get_data_splits(dataframe, valid_fraction=0.1): # sort data dataframe = dataframe.sort_values('click_time') # split data valid_rows = int(len(dataframe) * valid_fraction) train = dataframe[:-valid_rows * 2] valid = dataframe[-valid_rows * 2:-valid_rows] test = dataframe[-valid_rows:] return train, valid, test def train_model(train, valid, test=None, feature_cols=None): # choose features if feature_cols is None: feature_cols = train.columns.drop( ['click_time', 'attributed_time', 'is_attributed'] ) # define train & valid dataset import lightgbm as lgb dtrain = lgb.Dataset(train[feature_cols], label=train['is_attributed']) dvalid = lgb.Dataset(valid[feature_cols], label=valid['is_attributed']) # fit model param = {'num_leaves': 64, 'objective': 'binary', 'metric': 'auc', 'seed': 7} num_round = 1000 print("Training model. Hold on a minute to see the validation score") bst = lgb.train( param, dtrain, num_round, valid_sets=[dvalid], early_stopping_rounds=20, verbose_eval=False ) # make predictions valid_pred = bst.predict(valid[feature_cols]) # evaluate the model from sklearn.metrics import roc_auc_score valid_score = roc_auc_score(valid['is_attributed'], valid_pred) print(f"Validation AUC score: {valid_score}") # test if test is not None: test_pred = bst.predict(test[feature_cols]) test_score = roc_auc_score(test['is_attributed'], test_pred) return bst, valid_score, test_score else: return bst, valid_score print("Baseline model score") train, valid, test = get_data_splits(clicks) _ = train_model(train, valid, test) # - # # 1. Add Interaction Features # # Here you'll add interaction features for each pair of categorical features (ip, app, device, os, channel). The easiest way to iterate through the pairs of features is with `itertools.combinations`. For each new column, join the values as strings with an underscore, so 13 and 47 would become `"13_47"`. As you add the new columns to the dataset, be sure to label encode the values. # + cat_features = ['ip', 'app', 'device', 'os', 'channel'] interactions = pd.DataFrame(index=clicks.index) # Iterate through each pair of features, combine them into interaction features from itertools import combinations for col1, col2 in combinations(cat_features, 2): new_col_name = '_'.join([col1, col2]) # convert to strings and combine new_values = clicks[col1].map(str) + "_" + clicks[col2].map(str) # encode encoder = LabelEncoder() interactions[new_col_name] = encoder.fit_transform(new_values) # check your answer q_1.check() # + # uncomment if you need some guidance # q_1.hint() # q_1.solution() # - interactions.head() clicks = clicks.join(interactions) print("Score with interactions") train, valid, test = get_data_splits(clicks) _ = train_model(train, valid) # we removed ip in past exercise. are ip-combined-features have benefits now? clicks_wo_ip = clicks.drop(['ip_app', 'ip_device', 'ip_os', 'ip_channel'], axis=1) print("Score with droped-`ip`-interactions") train, valid, test = get_data_splits(clicks_wo_ip) _ = train_model(train, valid) # <hr/> # # # Generating Numerical Features # # Adding interactions is a quick way to create more categorical features from the data. It's also effective to create new numerical features, you'll typically get a lot of improvement in the model. This takes a bit of brainstorming and experimentation to find features that work well. # # For these exercises I'm going to have you implement functions that operate on Pandas Series. It can take multiple minutes to run these functions on the entire data set so instead I'll provide feedback by running your function on a smaller dataset. # <hr/> # # ## 2. Number of Events in the past Six Hours # # The first feature you'll be creating is the number of events from the same IP in the last six hours. It's likely that someone who is visiting often will download the app. # # Implement a function `count_past_events` that takes a Series of click times (timestamps) and returns another Series with the number of events in the last hour. **Tip:** The `rolling` method is useful for this. # + def count_past_events(series, time_window='6H'): series = pd.Series(series.index, index=series).sort_index() # subtract 1 so the current event isn't counted past_events = series.rolling(time_window).count() - 1 return past_events # check your answer q_2.check() # + # uncomment if you need some guidance # q_2.hint() # q_2.solution() # - # Because this can take a while to calculate on the full data, we'll load pre-calculated versions in the cell below to test model performance. # + # loading in from saved Parquet file past_events = pd.read_parquet('../input/feature-engineering-data/past_6hr_events.pqt') clicks['ip_past_6hr_counts'] = past_events train, valid, test = get_data_splits(clicks) _ = train_model(train, valid, test) # - # <hr/> # # ## 3. Features from Future Information # # In the last exercise you created a feature that looked at past events. You could also make features that use information from events in the future. Should you use future events or not? # # Run the following line after you've decided your answer. # check your answer (Run this code cell to receive credit!) q_3.solution() # <hr/> # # ## 4. Time since Last Event # # Implement a function `time_diff` that calculates the time since the last event in seconds from a Series of timestamps. This will be ran like so: # # ```python # timedeltas = clicks.groupby('ip')['click_time'].transform(time_diff) # ``` # + def time_diff(series): return series.diff().dt.total_seconds() # check your answer q_4.check() # + # uncomment if you need some guidance # q_4.hint() # q_4.solution() # - # We'll again load pre-computed versions of the data, which match what your function would return # + # loading in from saved Parquet file past_events = pd.read_parquet('../input/feature-engineering-data/time_deltas.pqt') clicks['past_events_6hr'] = past_events train, valid, test = get_data_splits(clicks.join(past_events)) _ = train_model(train, valid, test) # - # <hr/> # # ## 5. Number of Previous App Downloads # # It's likely that if a visitor downloaded an app previously, it'll affect the likelihood they'll download one again. Implement a function `previous_attributions` that returns a Series with the number of times an app has been download (`'is_attributed' == 1`) before the current event. # + def previous_attributions(series): return series.expanding(min_periods=2).sum() - series # Check your answer q_5.check() # + # uncomment if you need some guidance # q_5.hint() # q_5.solution() # - # Again loading pre-computed data. # + # loading in from saved Parquet file past_events = pd.read_parquet('../input/feature-engineering-data/downloads.pqt') clicks['ip_past_6hr_counts'] = past_events train, valid, test = get_data_splits(clicks) _ = train_model(train, valid, test) # - # <hr/> # # # 6. Tree-based vs Neural Network Models # # So far we've been using LightGBM, a tree-based model. Would these features we've generated work well for neural networks as well as tree-based models? # # Run the following line after you've decided your answer. # check your answer (Run this code cell to receive credit!) q_6.solution() # Now that you've generated a bunch of different features, you'll typically want to remove some of them to reduce the size of the model and potentially improve the performance. Next, I'll show you how to do feature selection using a few different methods such as L1 regression and Boruta. # # Keep Going # # You know how to generate a lot of features. In practice, you'll frequently want to pare them down for modeling. Learn to do that in the **[Feature Selection lesson](https://www.kaggle.com/matleonard/feature-selection)**. # --- # **[Feature Engineering Home Page](https://www.kaggle.com/learn/feature-engineering)** # # # # # # *Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum) to chat with other Learners.*
feature-engineering/exercise/feature-generation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # # Welcome and Introduction # # | Time | Description | # | ------------- |:-------------:| # | 16:30 - 16:50 | [Introduction Presentation](https://github.com/kcraath/2018INFINITI_Workshop/blob/master/Workshop_Presentation_New.pdf) | # | 16:50 - 17:00 | Questions | # | 17:00 - 18:00 | Example Code (Unit 1-3) | # # [Unit 1 - Example from Presentation](https://github.com/kcraath/2018INFINITI_Workshop/blob/master/Unit%201%20-%20Example%20from%20Presentation.ipynb) # # [Unit 2 - CWT Power Spectrum and Wavelet Coherence of Water and Energy](https://github.com/kcraath/2018INFINITI_Workshop/blob/master/Unit%202%20-%20CWT%20Power%20Spectrum%20and%20Wavelet%20Coherence%20of%20Water%20and%20Energy.ipynb) # # [Unit 3 - Windowed Scalogram Difference of Water and Energy](https://github.com/kcraath/2018INFINITI_Workshop/blob/master/Unit%203%20-%20Windowed%20Scalogram%20Difference%20of%20Water%20and%20Energy.ipynb) # # Who am I: [<NAME>](https://www.linkedin.com/in/kim-raath-364646a6/) # # - Originally from South Africa # - B.S. in Mathematics (NCAA Div I. Track ad Field Scholarship) # - Ph.D. Statistics and M.A. Economics Graduate Student # - NSF Graduate Research Fellow # - [Center for Computational Finance and Economic Systems](http://www.cofes-rice.org/) (CoFES) Ph.D. Student Affiliate # - Co-founder and Head of Financial Engineering at [Topl](https://topl.co/) # # A tailor-made blockchain technological infrastructure enabling socially-beneficial investments into any region in the world, and in particular into emerging markets. # # - Contributor to several R packages # - I primarily study time-varying applications with focus on spectral analysis # # ## R Background # # - Follow this [link](https://github.com/kcraath/2018INFINITI_Workshop) for instructions on downloading R and RStudio and the overview or introductory paragraph describing the overall aim of this workshop. # # ## Presentation Focus # # - Paper I wrote called "Time-varying wavelet-based applications for evaluating the Water-Energy Nexus", submitted to Energy Economics Special Issue: Energy Finance Frontiers and Future Development. SSRN [link](https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3123858). #
Welcome and Introduction .ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="GMQMS5ek8aB5" outputId="9d11e9de-bc7d-4976-b74e-f3fb0a883cc8" # !pip install torch==1.8.0+cu111 torchvision==0.9.0+cu111 -f https://download.pytorch.org/whl/torch_stable.html # + id="Cizjk9AQ4F1r" # # !pip install torch==1.8.0 # + colab={"base_uri": "https://localhost:8080/"} id="T69X3oo5SGiH" outputId="4c6b5b3c-06fb-4c09-e89b-d73039552e2f" # !gdown --id 1Y8EOFLIRCcKpe_e0pO03yCAosTRjRMtC # + id="WCB4dhcAS0FY" # !unzip -q /content/UTKFace.zip -d data # + id="kWaagvdOS6A1" import os # + colab={"base_uri": "https://localhost:8080/"} id="ri_iti5uTA5f" outputId="46723037-2107-43c8-b8ee-e23825ada274" n = len(os.listdir('/content/data/UTKFace')) n # + colab={"base_uri": "https://localhost:8080/"} id="LqiQ9pVz5Hmd" outputId="332b59cb-8fcb-4652-d799-a29715e75a77" # !ls # + id="-YozGLLB0Svr" import torch import torch.nn as nn import torch.optim as optim from torch.optim import lr_scheduler import numpy as np import torchvision from torchvision import datasets, models, transforms from torch.utils.data import Dataset, DataLoader from PIL import Image import matplotlib.pyplot as plt import time import os import copy import random # + [markdown] id="MQbF-aVYqfRc" # # Gender # + id="JEdPHLSsqhnZ" class AgeDataset(Dataset): def __init__(self, img_files, img_dir): self.img_dir = img_dir self.img_files = img_files self.image_transforms = transforms.Compose([ transforms.Resize((224,224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) def __len__(self): return len(self.img_files) def __getitem__(self, index): img_name = self.img_files[index] gender = int(img_name.split("_")[1]) image = Image.open(os.path.join(self.img_dir, img_name)) image = self.image_transforms(image) gender = torch.tensor(gender, dtype=torch.long) return image, gender # + colab={"base_uri": "https://localhost:8080/"} id="J7CoAx9EnRtf" outputId="c177b3e3-c517-4f6e-e6be-0ff2440d33eb" img_dir = "data/UTKFace/" train_val_ratio = 0.3 img_filenames = os.listdir(img_dir) random.shuffle(img_filenames) split = int(train_val_ratio * len(img_filenames)) train_files = img_filenames[split:] val_files = img_filenames[:split] print(len(train_files), len(val_files)) train_set = AgeDataset(train_files, img_dir) val_set = AgeDataset(val_files, img_dir) train_dataloader = DataLoader(train_set, batch_size=128, shuffle=True) val_dataloader = DataLoader(val_set, batch_size=128, shuffle=False) # + id="-34RJ5Qrpq6-" colab={"base_uri": "https://localhost:8080/"} outputId="15c71276-d245-444b-bd3a-6011c8e91aaa" for img, label in train_dataloader: print(img, label) break # + id="-RS6-dGCxvZ2" def train_model(model, criterion, optimizer, scheduler, num_epochs=25): since = time.time() best_model_wts = copy.deepcopy(model.state_dict()) best_acc = 0.0 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) # Each epoch has a training and validation phase for phase in ['train', 'val']: if phase == 'train': model.train() # Set model to training mode else: model.eval() # Set model to evaluate mode running_loss = 0.0 running_corrects = 0 # Iterate over data. for inputs, labels in dataloaders[phase]: inputs = inputs.to(device) labels = labels.to(device) # zero the parameter gradients optimizer.zero_grad() # forward # track history if only in train with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) # backward + optimize only if in training phase if phase == 'train': loss.backward() optimizer.step() # statistics running_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds == labels.data) if phase == 'train': scheduler.step() epoch_loss = running_loss / dataset_sizes[phase] epoch_acc = running_corrects.double() / dataset_sizes[phase] print('{} Loss: {:.4f} Acc: {:.4f}'.format( phase, epoch_loss, epoch_acc)) # deep copy the model if phase == 'val' and epoch_acc > best_acc: best_acc = epoch_acc best_model_wts = copy.deepcopy(model.state_dict()) print() time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) print('Best val Acc: {:4f}'.format(best_acc)) # load best model weights model.load_state_dict(best_model_wts) return model # + colab={"base_uri": "https://localhost:8080/"} id="htLgyoMA5VN0" outputId="779c378f-91a1-48b7-93ce-d66974d822d0" device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") device # + colab={"base_uri": "https://localhost:8080/"} id="pzfC_tkwzjWt" outputId="7c7cc724-56a5-4218-d46d-6f4eb15eb4a5" model = torch.hub.load('pytorch/vision:v0.9.0', 'shufflenet_v2_x1_0', pretrained=True) for param in model.parameters(): param.requires_grad = False num_ftrs = model.fc.in_features model.fc = nn.Linear(num_ftrs, 2) model = model.to(device) dataloaders = {'train': train_dataloader, 'val': val_dataloader} dataset_sizes = {'train': len(train_files), 'val': len(val_files)} # + colab={"base_uri": "https://localhost:8080/"} id="wYGjItzt5ZLq" outputId="f6490d07-0b1a-4bca-cab4-e5a897098bae" # warm up criterion = nn.CrossEntropyLoss() # Observe that only parameters of final layer are being optimized as # opposed to before. # optimizer = optim.SGD(model.fc.parameters(), lr=0.001, momentum=0.9) optimizer = optim.Adam(model.parameters(), lr=0.001) exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1) model = train_model(model, criterion, optimizer, exp_lr_scheduler, num_epochs=1) # + colab={"base_uri": "https://localhost:8080/"} id="xRfQ4xet9Tyy" outputId="4738c7d3-715f-4ac3-a56f-baa218dfc519" # fine-tuning for param in model.parameters(): param.requires_grad = True optimizer = optim.Adam(model.parameters(), lr=0.0003) exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1) model = train_model(model, criterion, optimizer, exp_lr_scheduler, num_epochs=10) # + [markdown] id="Rs7BL-UVVREy" # ## save model # + id="GtmxTqm5L6Hg" os.mkdir("./gender_shufflenetv2") model_path = "./gender_shufflenetv2/model.pt" torch.save(model.state_dict(), model_path) # + [markdown] id="tmSM2FL2VV-s" # ## load and inference # + id="OxcGydpcNKve" def eval_model(model, criterion): since = time.time() for epoch in range(1): # Each epoch has a training and validation phase for phase in ['val']: if phase == 'train': model.train() # Set model to training mode else: model.eval() # Set model to evaluate mode running_loss = 0.0 running_corrects = 0 # Iterate over data. for inputs, labels in dataloaders[phase]: inputs = inputs.to(device) labels = labels.to(device) # forward # track history if only in train with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) # statistics running_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds == labels.data) epoch_loss = running_loss / dataset_sizes[phase] epoch_acc = running_corrects.double() / dataset_sizes[phase] print('{} Loss: {:.4f} Acc: {:.4f}'.format( phase, epoch_loss, epoch_acc)) print() time_elapsed = time.time() - since print('Val complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) # + colab={"base_uri": "https://localhost:8080/"} id="IgUwic2QMV12" outputId="4b0bb9ad-f23c-4b71-f87f-7e08bd62caac" new_model = torch.hub.load('pytorch/vision:v0.9.0', 'shufflenet_v2_x1_0', pretrained=True) num_ftrs = new_model.fc.in_features new_model.fc = nn.Linear(num_ftrs, 2) new_model.load_state_dict(torch.load(model_path)) new_model = new_model.to(device) criterion = nn.CrossEntropyLoss() eval_model(new_model, criterion) # + [markdown] id="j3-kq3YJY6u-" # # Age # + id="o-EKrMy1ZGUf" class AgeDataset(Dataset): def __init__(self, img_files, img_dir): self.img_dir = img_dir self.img_files = img_files self.image_transforms = transforms.Compose([ transforms.Resize((224,224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) def __len__(self): return len(self.img_files) def __getitem__(self, index): img_name = self.img_files[index] age = float(img_name.split("_")[0]) / 116 image = Image.open(os.path.join(self.img_dir, img_name)) image = self.image_transforms(image) return image, age # + colab={"base_uri": "https://localhost:8080/"} id="ZhB3UV6QZGUk" outputId="880cc22b-37f0-4bb1-be07-a2d9605664dc" img_dir = "data/UTKFace/" train_val_ratio = 0.3 img_filenames = os.listdir(img_dir) random.shuffle(img_filenames) split = int(train_val_ratio * len(img_filenames)) train_files = img_filenames[split:] val_files = img_filenames[:split] print(len(train_files), len(val_files)) train_set = AgeDataset(train_files, img_dir) val_set = AgeDataset(val_files, img_dir) train_dataloader = DataLoader(train_set, batch_size=128, shuffle=True) val_dataloader = DataLoader(val_set, batch_size=128, shuffle=False) # + colab={"base_uri": "https://localhost:8080/"} id="n5JAi7HAZGUp" outputId="6eaee2c5-0ac2-412b-ce12-7cf97d9d899a" for img, label in train_dataloader: print(img, label) break # + id="HuWEajZMuBxy" # import torchvision # def imshow(inp, title=None): # """Imshow for Tensor.""" # inp = inp.numpy().transpose((1, 2, 0)) # mean = np.array([0.485, 0.456, 0.406]) # std = np.array([0.229, 0.224, 0.225]) # inp = std * inp + mean # inp = np.clip(inp, 0, 1) # plt.imshow(inp) # if title is not None: # plt.title(title) # plt.pause(0.001) # pause a bit so that plots are updated # # Get a batch of training data # inputs, classes = next(iter(train_dataloader)) # # Make a grid from batch # out = torchvision.utils.make_grid(inputs) # imshow(out, title=[x.item() for x in classes]) # + id="CT8-OuXNZGUv" def train_model(model, criterion, optimizer, scheduler, num_epochs=25): since = time.time() best_model_wts = copy.deepcopy(model.state_dict()) best_mae = 0.0 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) # Each epoch has a training and validation phase for phase in ['train', 'val']: if phase == 'train': model.train() # Set model to training mode else: model.eval() # Set model to evaluate mode running_loss = 0.0 running_corrects = 0 # Iterate over data. for inputs, labels in dataloaders[phase]: inputs = inputs.to(device) labels = labels.to(device) # zero the parameter gradients optimizer.zero_grad() # forward # track history if only in train with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) # backward + optimize only if in training phase if phase == 'train': loss.backward() optimizer.step() # statistics running_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds == labels.data) if phase == 'train': scheduler.step() epoch_loss = running_loss / dataset_sizes[phase] # epoch_acc = running_corrects.double() / dataset_sizes[phase] print('{} Loss: {:.4f} Mae: {:.4f}'.format( phase, epoch_loss, epoch_loss)) # deep copy the model if phase == 'val' and epoch_loss < best_mae: best_mae = epoch_loss best_model_wts = copy.deepcopy(model.state_dict()) print() time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) print('Best val Mae: {:4f}'.format(best_mae)) # load best model weights model.load_state_dict(best_model_wts) return model # + colab={"base_uri": "https://localhost:8080/"} id="b1-R1YXhZGUw" outputId="a39daf39-a2b0-4e13-c691-1b5d15242aa2" device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") device # + colab={"base_uri": "https://localhost:8080/"} id="30oW5WL5ZGUy" outputId="072032fe-e365-4c0d-bde0-df634630f436" model = torch.hub.load('pytorch/vision:v0.9.0', 'shufflenet_v2_x1_0', pretrained=True) for param in model.parameters(): param.requires_grad = False num_ftrs = model.fc.in_features model.fc = nn.Linear(num_ftrs, 1) model = model.to(device) dataloaders = {'train': train_dataloader, 'val': val_dataloader} dataset_sizes = {'train': len(train_files), 'val': len(val_files)} # + colab={"base_uri": "https://localhost:8080/"} id="1PhIGkdkZGUy" outputId="a32ab6a8-ea94-4c8b-d9a8-0b587b1ccbf7" # warm up criterion = nn.L1Loss() # Observe that only parameters of final layer are being optimized as # opposed to before. # optimizer = optim.SGD(model.fc.parameters(), lr=0.001, momentum=0.9) optimizer = optim.Adam(model.parameters(), lr=0.001) exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1) model = train_model(model, criterion, optimizer, exp_lr_scheduler, num_epochs=1) # + colab={"base_uri": "https://localhost:8080/"} id="qqeAErfdZGU0" outputId="64b01fb8-75ad-48df-f843-64810dfb9653" # fine-tuning for param in model.parameters(): param.requires_grad = True # optimizer = optim.SGD(model.parameters(), lr=0.0003, momentum=0.9) optimizer = optim.Adam(model.parameters(), lr=0.0003) exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1) model = train_model(model, criterion, optimizer, exp_lr_scheduler, num_epochs=10) # + [markdown] id="WdWogqqWZGU2" # ## save model # + id="Ca-1WCXCZGU3" os.mkdir("./age_shufflenetv2") model_path = "./age_shufflenetv2/model.pt" torch.save(model.state_dict(), model_path) # + [markdown] id="OH85_UBfZGU4" # ## load and inference # + id="2FZa92cTZGU4" def eval_model(model, criterion): since = time.time() # best_model_wts = copy.deepcopy(model.state_dict()) # best_mae = 0.0 for epoch in range(1): # print('Epoch {}/{}'.format(epoch, num_epochs - 1)) # print('-' * 10) # Each epoch has a training and validation phase for phase in ['val']: if phase == 'train': model.train() # Set model to training mode else: model.eval() # Set model to evaluate mode running_loss = 0.0 running_corrects = 0 # Iterate over data. for inputs, labels in dataloaders[phase]: inputs = inputs.to(device) labels = labels.to(device) # forward # track history if only in train with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) # statistics running_loss += loss.item() * inputs.size(0) epoch_loss = running_loss / dataset_sizes[phase] print('{} Loss: {:.4f} Mae: {:.4f}'.format( phase, epoch_loss, epoch_loss)) print() time_elapsed = time.time() - since print('Val complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) # + colab={"base_uri": "https://localhost:8080/"} id="BPxdNlyvZGU5" outputId="510fd9be-7d37-4b51-b2d8-29e14c1714a6" new_model = torch.hub.load('pytorch/vision:v0.9.0', 'shufflenet_v2_x1_0', pretrained=True) num_ftrs = new_model.fc.in_features new_model.fc = nn.Linear(num_ftrs, 1) new_model.load_state_dict(torch.load(model_path)) new_model = new_model.to(device) criterion = nn.L1Loss() eval_model(new_model, criterion) # + id="A_x0iMiceyjE" # + [markdown] id="KYGPpCD2juQD" # # download the model file # + colab={"base_uri": "https://localhost:8080/"} id="gPYmFEM_jyb5" outputId="9ff5560b-31ad-4618-bd40-7b205270cda8" # !pwd # + colab={"base_uri": "https://localhost:8080/"} id="ZTGnQEzUkSPF" outputId="03ac84d9-dca0-431f-d865-bd3da4177546" # !ls # + id="pcwR1E-HkUXs" # !cp -R "./age_shufflenetv2" "/content/drive/MyDrive/Colab Notebooks/" # + id="CDxCHLwzkf5Y" # !cp -R "./gender_shufflenetv2" "/content/drive/MyDrive/Colab Notebooks/" # + colab={"base_uri": "https://localhost:8080/"} id="IT1c-wyOkl7u" outputId="139992fb-c632-42d8-b08f-0bfa2e286347" # !ls # + colab={"base_uri": "https://localhost:8080/"} id="2URXVAgQpg-v" outputId="ff30acfa-c910-410a-f5ca-53d8717e808d" # !gdown --id "1nXHFCYHm9bQ3shB_4INU5c_VuMPckPZh" # + colab={"base_uri": "https://localhost:8080/"} id="U99y-oAfp-Oh" outputId="06853a80-c92a-48a6-94a0-07830559fd3a" # %cd /content/drive/MyDrive/Colab Notebooks/data # + colab={"base_uri": "https://localhost:8080/"} id="YdOvd7xbrTQd" outputId="5ade9702-b667-4477-e74c-7131b011b035" # %cd .. # + colab={"base_uri": "https://localhost:8080/"} id="AQHy33yNrXzn" outputId="f3f459a9-117a-400a-d717-cf6ee84c76b6" # %cd /content # + colab={"base_uri": "https://localhost:8080/"} id="ml-SnvCNraWt" outputId="71ea47df-4f8b-4699-d203-5d07ce912c5d" # !ls # + id="A_UNvg9asGhM" TEST_DIR = "/content/drive/.shortcut-targets-by-id/1nXHFCYHm9bQ3shB_4INU5c_VuMPckPZh/data/" # + [markdown] id="B4TCzE_5rjy_" # ## test age # + id="WFKnlQQkrusV" class AgeDataset(Dataset): def __init__(self, img_files, img_dir): self.img_dir = img_dir self.img_files = img_files self.image_transforms = transforms.Compose([ transforms.Resize((224,224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) def __len__(self): return len(self.img_files) def __getitem__(self, index): img_name = self.img_files[index] age = float(img_name.split("_")[0]) / 116 image = Image.open(os.path.join(self.img_dir, img_name)) image = self.image_transforms(image) return image, age # + colab={"base_uri": "https://localhost:8080/"} id="wXhQvHxKrusc" outputId="613dda1a-c8b6-47ab-f6ef-7235b2428fe0" img_dir = TEST_DIR # train_val_ratio = 0.3 img_filenames = os.listdir(img_dir) # random.shuffle(img_filenames) # split = int(train_val_ratio * len(img_filenames)) val_files = img_filenames print(len(val_files)) val_set = AgeDataset(val_files, img_dir) val_dataloader = DataLoader(val_set, batch_size=64, shuffle=False) # + colab={"base_uri": "https://localhost:8080/"} id="7jF0MqWQsjUf" outputId="84453eb0-28fd-44a4-d6f7-8577d633a237" dataloaders = {'val': val_dataloader} dataset_sizes = {'val': len(val_files)} def eval_model(model, criterion): since = time.time() # best_model_wts = copy.deepcopy(model.state_dict()) # best_mae = 0.0 for epoch in range(1): # print('Epoch {}/{}'.format(epoch, num_epochs - 1)) # print('-' * 10) # Each epoch has a training and validation phase for phase in ['val']: if phase == 'train': model.train() # Set model to training mode else: model.eval() # Set model to evaluate mode running_loss = 0.0 running_corrects = 0 # Iterate over data. for inputs, labels in dataloaders[phase]: inputs = inputs.to(device) labels = labels.to(device) # forward # track history if only in train with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) # statistics running_loss += loss.item() * inputs.size(0) epoch_loss = running_loss / dataset_sizes[phase] print('{} Loss: {:.4f} Mae: {:.4f}'.format( phase, epoch_loss, epoch_loss)) print() time_elapsed = time.time() - since print('Val complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) new_model = torch.hub.load('pytorch/vision:v0.9.0', 'shufflenet_v2_x1_0', pretrained=True) num_ftrs = new_model.fc.in_features new_model.fc = nn.Linear(num_ftrs, 1) model_path = "./age_shufflenetv2/model.pt" new_model.load_state_dict(torch.load(model_path)) new_model = new_model.to(device) criterion = nn.L1Loss() eval_model(new_model, criterion) # + id="Ho7W9kZCtVuo" # + [markdown] id="O9V1Ep7QtZNk" # ## test gender # + id="nfr1JqRcta8n" class AgeDataset(Dataset): def __init__(self, img_files, img_dir): self.img_dir = img_dir self.img_files = img_files self.image_transforms = transforms.Compose([ transforms.Resize((224,224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) def __len__(self): return len(self.img_files) def __getitem__(self, index): img_name = self.img_files[index] gender = int(img_name.split("_")[1]) image = Image.open(os.path.join(self.img_dir, img_name)) image = self.image_transforms(image) gender = torch.tensor(gender, dtype=torch.long) return image, gender # + colab={"base_uri": "https://localhost:8080/"} id="-KR6sXv3tmUP" outputId="c984e084-0eca-4061-f773-738a532dd80c" img_dir = TEST_DIR img_filenames = os.listdir(img_dir) val_files = img_filenames print(len(val_files)) val_set = AgeDataset(val_files, img_dir) val_dataloader = DataLoader(val_set, batch_size=64, shuffle=False) # + colab={"base_uri": "https://localhost:8080/"} id="MKK8TiBVtyEQ" outputId="3870ba78-0655-4ae4-ed25-3f2eafdd10e4" dataloaders = {'val': val_dataloader} dataset_sizes = {'val': len(val_files)} def eval_model(model, criterion): since = time.time() for epoch in range(1): # Each epoch has a training and validation phase for phase in ['val']: if phase == 'train': model.train() # Set model to training mode else: model.eval() # Set model to evaluate mode running_loss = 0.0 running_corrects = 0 # Iterate over data. for inputs, labels in dataloaders[phase]: inputs = inputs.to(device) labels = labels.to(device) # forward # track history if only in train with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) # statistics running_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds == labels.data) epoch_loss = running_loss / dataset_sizes[phase] epoch_acc = running_corrects.double() / dataset_sizes[phase] print('{} Loss: {:.4f} Acc: {:.4f}'.format( phase, epoch_loss, epoch_acc)) print() time_elapsed = time.time() - since print('Val complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) new_model = torch.hub.load('pytorch/vision:v0.9.0', 'shufflenet_v2_x1_0', pretrained=True) num_ftrs = new_model.fc.in_features new_model.fc = nn.Linear(num_ftrs, 2) model_path = "./gender_shufflenetv2/model.pt" new_model.load_state_dict(torch.load(model_path)) new_model = new_model.to(device) criterion = nn.CrossEntropyLoss() eval_model(new_model, criterion) # + id="QHLzYjgkuMAp"
notebook/Age_and_Gender_shufflenetv2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # WeakAlign demo notebook # # This notebook shows how to run a trained model on a given image pair # ## Imports # + from __future__ import print_function, division import os from os.path import exists import argparse import torch import torch.nn as nn from torch.utils.data import Dataset, DataLoader from model.cnn_geometric_model import CNNGeometric, TwoStageCNNGeometric from data.pf_dataset import PFDataset from data.download_datasets import download_PF_pascal from image.normalization import NormalizeImageDict, normalize_image from util.torch_util import BatchTensorToVars, str_to_bool from geotnf.transformation import GeometricTnf from geotnf.point_tnf import * import matplotlib.pyplot as plt from skimage import io import warnings from torchvision.transforms import Normalize from collections import OrderedDict import torch.nn.functional as F warnings.filterwarnings('ignore') # - from model.loss import TransformedGridLoss, WeakInlierCount, TwoStageWeakInlierCount # ## Parameters # + # Select one of the following models: # cnngeo_vgg16, cnngeo_resnet101, proposed_resnet101 model_selection = 'proposed_resnet101' model_aff_path = '' model_tps_path = '' model_aff_tps_path = '' if model_selection=='cnngeo_vgg16': model_aff_path = 'trained_models/trained_models/cnngeo_vgg16_affine.pth.tar' model_tps_path = 'trained_models/trained_models/cnngeo_vgg16_tps.pth.tar' feature_extraction_cnn = 'vgg' elif model_selection=='cnngeo_resnet101': model_aff_path = 'trained_models/trained_models/cnngeo_resnet101_affine.pth.tar' model_tps_path = 'trained_models/trained_models/cnngeo_resnet101_tps.pth.tar' feature_extraction_cnn = 'resnet101' elif model_selection=='proposed_resnet101': model_aff_tps_path = 'trained_models/weakalign_resnet101_affine_tps.pth.tar' feature_extraction_cnn = 'resnet101' # source_image_path='datasets/proposal-flow-pascal/PF-dataset-PASCAL/JPEGImages/2008_006325.jpg' # target_image_path='datasets/proposal-flow-pascal/PF-dataset-PASCAL/JPEGImages/2010_004954.jpg' # source_image_path='datasets/1.jpg' # target_image_path='datasets/2.jpg' # source_image_path='datasets/3.JPEG' # target_image_path='datasets/4.JPEG' # - # ## Load models # + use_cuda = torch.cuda.is_available() model = TwoStageCNNGeometric(use_cuda=use_cuda, return_correlation=True, feature_extraction_cnn=feature_extraction_cnn) # load pre-trained model if model_aff_tps_path!='': checkpoint = torch.load(model_aff_tps_path, map_location=lambda storage, loc: storage) checkpoint['state_dict'] = OrderedDict([(k.replace('vgg', 'model'), v) for k, v in checkpoint['state_dict'].items()]) for name, param in model.FeatureExtraction.state_dict().items(): model.FeatureExtraction.state_dict()[name].copy_(checkpoint['state_dict']['FeatureExtraction.' + name]) for name, param in model.FeatureRegression.state_dict().items(): model.FeatureRegression.state_dict()[name].copy_(checkpoint['state_dict']['FeatureRegression.' + name]) for name, param in model.FeatureRegression2.state_dict().items(): model.FeatureRegression2.state_dict()[name].copy_(checkpoint['state_dict']['FeatureRegression2.' + name]) else: checkpoint_aff = torch.load(model_aff_path, map_location=lambda storage, loc: storage) checkpoint_aff['state_dict'] = OrderedDict([(k.replace('vgg', 'model'), v) for k, v in checkpoint_aff['state_dict'].items()]) for name, param in model.FeatureExtraction.state_dict().items(): model.FeatureExtraction.state_dict()[name].copy_(checkpoint_aff['state_dict']['FeatureExtraction.' + name]) for name, param in model.FeatureRegression.state_dict().items(): model.FeatureRegression.state_dict()[name].copy_(checkpoint_aff['state_dict']['FeatureRegression.' + name]) checkpoint_tps = torch.load(model_tps_path, map_location=lambda storage, loc: storage) checkpoint_tps['state_dict'] = OrderedDict([(k.replace('vgg', 'model'), v) for k, v in checkpoint_tps['state_dict'].items()]) for name, param in model.FeatureRegression2.state_dict().items(): model.FeatureRegression2.state_dict()[name].copy_(checkpoint_tps['state_dict']['FeatureRegression.' + name]) # - # ## Create image transformers tpsTnf = GeometricTnf(geometric_model='tps', use_cuda=use_cuda) affTnf = GeometricTnf(geometric_model='affine', use_cuda=use_cuda) # ## Load and preprocess images flowers = ['datasets/1.jpg', 'datasets/2.jpg'] dogs = ['datasets/3.JPEG', 'datasets/4.JPEG'] armours = ['datasets/5.JPEG', 'datasets/6.JPEG'] # if not exists(source_image_path): # download_PF_pascal('datasets/proposal-flow-pascal/') source_image_path = dogs[0] target_image_path = flowers[1] # + resizeCNN = GeometricTnf(out_h=240, out_w=240, use_cuda = False) normalizeTnf = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) def preprocess_image(image): # convert to torch Variable image = np.expand_dims(image.transpose((2,0,1)),0) image = torch.Tensor(image.astype(np.float32)/255.0) image_var = Variable(image,requires_grad=False) # Resize image using bilinear sampling with identity affine tnf image_var = resizeCNN(image_var) # Normalize image image_var = normalize_image(image_var) return image_var # - # + source_image = io.imread(source_image_path) target_image = io.imread(target_image_path) source_image_var = preprocess_image(source_image) target_image_var = preprocess_image(target_image) if use_cuda: source_image_var = source_image_var.cuda() target_image_var = target_image_var.cuda() batch = {'source_image': source_image_var, 'target_image':target_image_var} resizeTgt = GeometricTnf(out_h=target_image.shape[0], out_w=target_image.shape[1], use_cuda = use_cuda) # - type(batch['source_image']) # ## Evaluate model # + model.eval() # Evaluate model #theta_aff,theta_aff_tps=model(batch) theta_aff,theta_aff_tps,corr_aff,corr_aff_tps=model(batch) # - print("theta_aff : {}, theta_aff_tps : {}".format(theta_aff.data, theta_aff_tps.data)) arg_groups = {'tps_grid_size': 3, 'tps_reg_factor': 0.2, 'normalize_inlier_count': True, 'dilation_filter': 0, 'use_conv_filter': False} inliersAffine = WeakInlierCount(geometric_model='affine',**arg_groups) #inliersTps = WeakInlierCount(geometric_model='tps',**arg_groups['weak_loss']) inliersComposed = TwoStageWeakInlierCount(use_cuda=use_cuda,**arg_groups) inliers_comp = inliersComposed(matches=corr_aff, theta_aff=theta_aff, theta_aff_tps=theta_aff_tps) inliers_aff = inliersAffine(matches=corr_aff, theta=theta_aff) # ## Compute warped images # + def affTpsTnf(source_image, theta_aff, theta_aff_tps, use_cuda=use_cuda): tpstnf = GeometricTnf(geometric_model = 'tps',use_cuda=use_cuda) sampling_grid = tpstnf(image_batch=source_image, theta_batch=theta_aff_tps, return_sampling_grid=True)[1] X = sampling_grid[:,:,:,0].unsqueeze(3) Y = sampling_grid[:,:,:,1].unsqueeze(3) Xp = X*theta_aff[:,0].unsqueeze(1).unsqueeze(2)+Y*theta_aff[:,1].unsqueeze(1).unsqueeze(2)+theta_aff[:,2].unsqueeze(1).unsqueeze(2) Yp = X*theta_aff[:,3].unsqueeze(1).unsqueeze(2)+Y*theta_aff[:,4].unsqueeze(1).unsqueeze(2)+theta_aff[:,5].unsqueeze(1).unsqueeze(2) sg = torch.cat((Xp,Yp),3) warped_image_batch = F.grid_sample(source_image, sg) return warped_image_batch warped_image_aff = affTnf(batch['source_image'],theta_aff.view(-1,2,3)) warped_image_aff_tps = affTpsTnf(batch['source_image'],theta_aff,theta_aff_tps) # - # ## Display # + # Un-normalize images and convert to numpy warped_image_aff_np = normalize_image(resizeTgt(warped_image_aff),forward=False).data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy() warped_image_aff_tps_np = normalize_image(resizeTgt(warped_image_aff_tps),forward=False).data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy() N_subplots = 4 fig, axs = plt.subplots(1,N_subplots) axs[0].imshow(source_image) axs[0].set_title('src') axs[1].imshow(target_image) axs[1].set_title('tgt') axs[2].imshow(warped_image_aff_np) axs[2].set_title('aff') axs[3].imshow(warped_image_aff_tps_np) axs[3].set_title('aff+tps') for i in range(N_subplots): axs[i].axis('off') fig.set_dpi(150) plt.show() # - print("inliers_aff : {} \n inliers_comp : {} \n total {}: ".format(inliers_aff.data, inliers_comp.data, inliers_aff.data+inliers_comp.data))
demo_notebook-counter_check.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Data Wrangling Challenge # ### Pull and manipulate the API data # # The point of this exercise is to try data enrichment with data from external APIs. We are going to take data about car crashes in Monroe County, Indiana from 2003 to 2015 and try to figure out the weather during the accident and how many bars there are in the area. We will work with two different APIs during this challenge: # # - Foursquare API # - Visual Crossing API # # We will try to find correlations between the severity of crash and weather/number of bars in the area. To indicate the severity of a crash, we will use column `Injury Type`. # ## Data # # The data for this exercise can be found [here](https://drive.google.com/file/d/1_KF9oIJV8cB8i3ngA4JPOLWIE_ETE6CJ/view?usp=sharing). # # Just run the cells below to get your data ready. Little help from us. # import pandas as pd from pandas.io.json import json_normalize import os import foursquare import requests data = pd.read_csv("data/monroe-county-crash-data2003-to-2015.csv", encoding="unicode_escape") # ======================== # preparing data data.dropna(subset=['Latitude', 'Longitude'], inplace=True) # creation of variable with lon and lat together data['ll'] = data['Latitude'].astype(str) + ',' + data['Longitude'].astype(str) data = data[data['ll'] != '0.0,0.0'] print(data.shape) data.head(20) data['Injury Type'].value_counts() # # Foursquare API # # Foursquare API documentation is [here](https://developer.foursquare.com/) # # 1. Start a foursquare application and get your keys. # 2. For each crash, create the function **get_venues** that will pull bars in the radius of 5km around the crash # # #### example # `get_venues('48.146394, 17.107969')` # # 3. Find a relationship (if there is any) between number of bars in the area and severity of the crash. # # HINTs: # - check out python package "foursquare" (no need to send HTTP requests directly with library `requests`) # - **categoryId** for bars and nightlife needs to be found in the [foursquare API documentation](https://developer.foursquare.com/docs/api-reference/venues/search/) #set the keys foursquare_id = os.environ[" "] foursquare_secret = os.environ[" "] VERSION = '20200130' # + # Construct the client object client = foursquare.Foursquare(client_id=foursquare_id, client_secret=foursquare_secret, redirect_uri='http://fondu.com/oauth/authorize') # pull the ll column out of the dataframe into a list latlon = [] for i in data['ll']: latlon.append(i) # 'Bar' CatID is 4bf58dd8d48988d116941735 # search for a venue bars = client.venues.search(params={'query': 'Bar', 'll': '39.15920668,-86.52587356', 'radius': 5000, 'limit': 50}) bar_list = [] for bar in bars['venues']: bar_list.append(bar['name']) #bar_list # - # build function def get_venues(coords): bars = client.venues.search(params={'query': 'Bar', 'll': coords, 'radius': 5000, 'limit': 50}) bar_list = [] for bar in bars['venues']: bar_list.append(bar['name']) return len(bar_list) incapacitating = '39.19927216,-86.63702393' get_venues(incapacitating) for element in data['ll']: data['count_bars'] = get_venues(element) df = data[['Injury Type','count_bars']] df.groupby('Injury Type').agg('mean') # # Visual Crossing API # # Virtual Crossing API documentation is [here](https://www.visualcrossing.com/resources/documentation/) # # 1. Sign up for FREE api key if you haven't done that before. # 2. For each crush, get the weather for the location and date. # 3. Find a relationship between the weather and severity of the crash. # # Hints: # # * randomly sample only 250 or so (due to API limits), or pull weather only for smaller sample of crashes # * for sending HTTP requests check out "requests" library [here](http://docs.python-requests.org/en/master/) # # + import requests import time import numpy as np api_key = os.environ["<>"] # - data[:1] # + df = data.copy() df['Date'] = df['Year'].astype(str) + '-' + df['Month'].astype(str).str.zfill(2) + '-' + df['Day'].astype(str).str.zfill(2) date1 = df['Date'][0] location = df['ll'][0] url = f'https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/{location}/{date1}?unitGroup=metric&key={api_key}&include=obs%2Calerts' #weather = pd.read_json(url) #weather.head() res = requests.get(url) weather_snapshot = res.json() weather_snapshot # - weatherdf = df[['Injury Type','ll','Date']].copy() # remove .head() when ready to test the full dataset weatherdf['temp'] = np.nan weatherdf['precip'] = np.nan weatherdf['snow'] = np.nan weatherdf['visibility'] = np.nan weatherdf for row in range(len(weatherdf)): try: # grab location and date from the crash information location = weatherdf.loc[row,'ll'] date1 = weatherdf.loc[row,'Date'] # run get request url = f'https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/{location}/{date1}?unitGroup=metric&key={api_key}&include=obs%2Calerts' res = requests.get(url) weather_snapshot = res.json() # index the weather api's json for information within date set weather_snapshot = weather_snapshot['days'][0] # apply weather info into df weatherdf.loc[row,'temp'] = weather_snapshot['temp'] weatherdf.loc[row,'precip'] = weather_snapshot['precip'] weatherdf.loc[row,'snow'] = weather_snapshot['snow'] weatherdf.loc[row,'visibility'] = weather_snapshot['visibility'] except: continue for row in range(len(dfloc)): dftest.loc[row,'temp'] = row dftest # + df = data.copy() df['Date'] = df['Year'].astype(str) + '-' + df['Month'].astype(str).str.zfill(2) + '-' + df['Day'].astype(str).str.zfill(2) date1 = df['Date'][0] location = df['ll'][0] url = f'https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/{location}/{date1}?unitGroup=metric&key={api_key}&include=obs%2Calerts' res = requests.get(url) weather_snapshot = res.json() weather_snapshot = weather_snapshot['days'][0] weather_snapshot['temp']
APIs and Other Data Types/Data Wrangling Challenge.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.5 64-bit # name: python3 # --- # + #ADGSTUDIOS 2021 - ADGGOOGLEMAPS import googlemaps import requests import json from __future__ import print_function import folium class ADGGoogleMaps: def __init__(self, google_api_secret, address): self.google_api_secret = google_api_secret self.address = address self.gmaps = googlemaps.Client(key=self.google_api_secret) self.geocode_result = self.gmaps.geocode(self.address) self.json_format = json.dumps(self.geocode_result) self.results = [] def GetCordsFromAddress(self): cords = self.findvalues('location', self.json_format) locationdict = cords[0] cords = list(locationdict.values()) return cords def findvalues(self, id, json_repr): results = [] def _decode_dict(a_dict): try: results.append(a_dict[id]) except KeyError: pass return a_dict json.loads(json_repr, object_hook=_decode_dict) return results def ReturnMap(self,size): mapit = folium.Map( location=self.GetCordsFromAddress(),zoom_start = size) return mapit # -
ADGGoogleClass.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:indoor-recognition] # language: python # name: conda-env-indoor-recognition-py # --- import argparse import os import cv2 import numpy as np from tqdm import tqdm from preprocessing import parse_csv_annotations from utils import draw_boxes from frontend import YOLO import json import matplotlib.pyplot as plt # %matplotlib inline config_path = 'config.json' with open(config_path) as config_buffer: config = json.load(config_buffer) yolo = YOLO(backend = config['model']['backend'], input_size = config['model']['input_size'], labels = config['model']['labels'], max_box_per_image = config['model']['max_box_per_image'], anchors = config['model']['anchors']) weights_path = 'tiny_yolo_indoors.h5' yolo.load_weights(weights_path) # + image = cv2.imread('/media/rodsnjr/Files/Datasets/indoor_datasets/Images/6-10.jpg') boxes = yolo.predict(image) image = draw_boxes(image, boxes, config['model']['labels']) print(boxes) plt.imshow(image) # -
Generate Results.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Neptune.ai # # > Integration with [neptune.ai](https://www.neptune.ai). # # > [Track fastai experiments](https://docs.neptune.ai/integrations/fast_ai.html) like in this example project. # ## Install # `pip install fastai_neptune` # ## Registration # 1. Create **free** account: [neptune.ai/register](https://neptune.ai/register). # 2. Export API token to the environment variable (more help [here](https://docs.neptune.ai/python-api/tutorials/get-started.html#copy-api-token)). In your terminal run: # # ``` # export NEPTUNE_API_TOKEN='YOUR_LONG_API_TOKEN' # ``` # # or append the command above to your `~/.bashrc` or `~/.bash_profile` files (**recommended**). More help is [here](https://docs.neptune.ai/python-api/tutorials/get-started.html#copy-api-token). # # ## Installation # 1. You need to install neptune-client. In your terminal run: # # ``` # pip install neptune-client # ``` # # or (alternative installation using conda). In your terminal run: # # ``` # conda install neptune-client -c conda-forge # ``` # 2. Install [psutil](https://psutil.readthedocs.io/en/latest/) to see hardware monitoring charts: # # ``` # pip install psutil # ``` # ## How to use # # Key is to call `neptune.init()` before you create `cnn_learner()`. A new experiment will be created before model fitting. # # Use `NeptuneCallback` in your `Learner`, like this: # # ``` # from fastai.callback.neptune import NeptuneCallback # # neptune.init('USERNAME/PROJECT_NAME') # specify your project here # # learn = cnn_learner(dls, resnet18, metrics=error_rate, cbs=NeptuneCallback()) # learn.fit_one_cycle(1) # ``` #
nbs/index.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from utils.preprocessing import * from utils.config import * from utils.model import * # loading dataset corpus = load_dataset() # spliting X and y X, y = splitXY(corpus) # cleaning X clean_X = cleanX(X) # building vocabulary vocab = buildVocab(clean_X) vocab_size = len(vocab) # tokenizing and padding X_final = tokenizeAndPadd(clean_X, vocab, vocab_size) embedding_matrix = gloveEmbedding(vocab) x = SentimentAnalysis(vocab, vocab_size, embedding_matrix) model = x.build_model() while True: input_seq = input("Enter String ") prediction = x.predict(input_seq, model) print('Input sentence:', input_seq) print('Score', prediction) ans = input("Continue? [Y/n]") if ans and ans.lower().startswith('n'): break
Inference.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Models with parallel faults and tthree layers # This example tries to visualize how simply _GeoMeshPy_ can export results from _Gempy_. <br> # This notebook include a complicated model with tthree layers and four fault. <br> # To see how _Gempy_ works, please see https://github.com/cgre-aachen/gempy # + import matplotlib.pyplot as plt import copy import math import gempy as gp import numpy as np from numpy import savetxt geo_model = gp.create_model('3_F') gp.init_data(geo_model, [0., 1000., 0., 1000., -1600., -1000.], [60, 60, 60], path_i = 'Para_F_interfaces.csv', path_o = 'Para_F_orientations.csv'); gp.map_series_to_surfaces(geo_model, {"Fault1_series":'fault1', "Fault2_series":'fault2', "Fault3_series":'fault3', "Fault4_series":'fault4', "Strati_series":('Cap_rock', 'Reservoir')}) geo_model.set_is_fault(['Fault1_series', 'Fault2_series', 'Fault3_series', 'Fault4_series'], change_color=False) gp.set_interpolator(geo_model, compile_theano=True, theano_optimizer='fast_compile', verbose=[]) sol = gp.compute_model(geo_model) extent = geo_model.grid.regular_grid.extent resolution = geo_model.grid.regular_grid.resolution.reshape(-1,1) df=geo_model.series.df if len (np.unique (sol.fault_block))>1: no_of_faults=df.groupby(by='BottomRelation').count().iloc[1,0] else: no_of_faults=0 surfaces=geo_model.surface_points.df['surface'] if no_of_faults==0: surfaces_layer=[i for i in surfaces.unique()] else: surfaces_layer=[i for i in surfaces.unique()[no_of_faults:]] fault_name=[i for i in surfaces.unique()[:no_of_faults]] grid=geo_model.grid.values z_resolution = abs (grid[0,-1] - grid[1,-1]) res_x=abs(extent[1]-extent[0])/resolution[0,0] surfaces_layer.append('Basement') lith_blocks = np.array([]) ver = [] fault_ind = [] n_iter = 10 for i in range(n_iter): # INITIALIZER df_int_X = copy.copy(geo_model.surface_points.df['X']) df_int_Y = copy.copy(geo_model.surface_points.df['Y']) df_int_Z = copy.copy(geo_model.surface_points.df['Z']) df_or_X = copy.copy(geo_model.orientations.df['X']) df_or_Y = copy.copy(geo_model.orientations.df['Y']) df_or_Z = copy.copy(geo_model.orientations.df['Z']) df_or_dip = copy.copy(geo_model.orientations.df['dip']) df_or_azimuth = copy.copy(geo_model.orientations.df['azimuth']) surfindexes = list(geo_model.surface_points.df.index) orindexes = list(geo_model.orientations.df.index) geo_model.modify_surface_points(surfindexes, X=df_int_X, Y=df_int_Y, Z=df_int_Z) geo_model.modify_orientations(orindexes, X=df_or_X, Y=df_or_Y, Z=df_or_Z,dip = df_or_dip, azimuth = df_or_azimuth) fault_3_surfpoints = geo_model.surface_points.df.surface.isin(['fault3']) indexes_Fa_3_sp = geo_model.surface_points.df[fault_3_surfpoints].index fault_3_orient = geo_model.orientations.df.surface.isin(['fault3']) index_Fa_3_o = geo_model.orientations.df[fault_3_orient].index # Randomization_Method if i == 0: std1=std2=0 else: std1=5 rand1 = np.random.uniform(-std1, std1, size=1) # Randomized_input a= geo_model.surface_points.df['Z'].values[fault_3_surfpoints][0] + rand1 b= geo_model.surface_points.df['Z'].values[fault_3_surfpoints][1] + rand1 new_Z_fa_3=np.array([a,b]) new_Z_fa_3=new_Z_fa_3.flatten() new_Y_fa_3 = geo_model.surface_points.df['Y'].values[fault_3_surfpoints] new_X_fa_3 = geo_model.surface_points.df['X'].values[fault_3_surfpoints] # Modifier geo_model.modify_surface_points(indexes_Fa_3_sp, Z=new_Z_fa_3) # this block updates the model geo_model.update_to_interpolator() sol=gp.compute_model(geo_model) # Export Block ver.append(geo_model.solutions.vertices) lith_blocks = np.append(lith_blocks, geo_model.solutions.lith_block) fault_ind.append (np.hstack([grid,np.round(sol.fault_block.T[0:sol.grid.values.shape[0]])])) lith_blocks = lith_blocks.reshape(n_iter, -1) lays_fault_name=geo_model.surface_points.df.loc[:, 'surface'].unique() all_vers=[list(column) for column in zip(*ver)] df=geo_model.series.df no_of_faults=df.groupby(by='BottomRelation').count().iloc[1,0] name_of_faults=lays_fault_name[0:no_of_faults].tolist() name_of_layers=lays_fault_name[no_of_faults:].tolist() savetxt('z_resolution.csv', np.array([z_resolution]), delimiter=',') # - gp.plot_2d(geo_model) from GeoMeshPy import vmod fr = np.array([[True], [True], [True], [True]]) mesh_resolution = 30. name_of_layers = np.array(['Cap_rock', 'Reservoir', 'basement']) model_faulted = vmod.vertice_modifier(n_iter, no_of_faults, all_vers, name_of_layers, mesh_resolution, fr, extent, resolution) sub_fourc_list = model_faulted.faults_corners()[0] len_fal = model_faulted.faults_corners()[1] new_result_list = model_faulted.contact_generator()[0] length_layers = model_faulted.contact_generator()[1] repre_pts = model_faulted.contact_generator()[2] # The visialization in the next block reveals some facts: <br> # 1. There are extra redundant point in verticed coming out of Gempy # 2. Gempy does not cut layers when while they are relocated by fault. in the other words, # layers just strech along the fault surfaces. # 3. Almost caused by 2, contact of the layer is uniform while there ae two faults cutting it. To solve # this issue, surfaces are deivided based on the existing fault. # For example, in this case the cotact should be split into three patches which are # shown by different color in the visualization cell. If you zoom in the contact of # layer and two cutting fault, you will see some vertices of Gempy there. # + import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from matplotlib.ticker import (AutoMinorLocator, MultipleLocator) # %matplotlib qt5 fig = plt.figure() ax = fig.add_subplot (111, projection="3d") Gempy utputs for the only layers and third fault Gempy_out_layer_1 = all_vers[-1][0] Gempy_out_layer_2 = all_vers[-2][0] x2 = Gempy_out_layer_1[:,0]; y2 = Gempy_out_layer_1[:,1]; z2 = Gempy_out_layer_1[:,2] ax.scatter3D(x2,y2,z2, color='k', s=1, label='Raw output') x2 = Gempy_out_layer_2[:,0]; y2 = Gempy_out_layer_2[:,1]; z2 = Gempy_out_layer_2[:,2] ax.scatter3D(x2,y2,z2, color='k', s=1, label='Raw output') # cleaned and separated data coming from GeoMeshPy faults = np.array(sub_fourc_list[0]) f1 = faults[:4,:] f2 = faults[4:8,:] f3 = faults[8:12,:] f4 = faults[12:,:] x2=faults[:,0]; y2=faults[:,1]; z2=faults[:,2] ax.scatter3D(x2,y2,z2, color='r', s=10, marker= '*', label='Faults corners') ax.plot_surface(np.array([[f1[0,0], f1[1,0]], [f1[3,0], f1[2,0]]]), np.array([[f1[0,1], f1[1,1]], [f1[3,1], f1[2,1]]]), np.array([[f1[0,2], f1[1,2]], [f1[3,2], f1[2,2]]]), color='b', alpha = 0.5) ax.plot_surface(np.array([[f2[0,0], f2[1,0]], [f2[3,0], f2[2,0]]]), np.array([[f2[0,1], f2[1,1]], [f2[3,1], f2[2,1]]]), np.array([[f2[0,2], f2[1,2]], [f2[3,2], f2[2,2]]]), color='b', alpha = 0.5) ax.plot_surface(np.array([[f3[0,0], f3[1,0]], [f3[3,0], f3[2,0]]]), np.array([[f3[0,1], f3[1,1]], [f3[3,1], f3[2,1]]]), np.array([[f3[0,2], f3[1,2]], [f3[3,2], f3[2,2]]]), color='b', alpha = 0.8) ax.plot_surface(np.array([[f4[0,0], f4[1,0]], [f4[3,0], f4[2,0]]]), np.array([[f4[0,1], f4[1,1]], [f4[3,1], f4[2,1]]]), np.array([[f4[0,2], f4[1,2]], [f4[3,2], f4[2,2]]]), color='b', alpha = 0.8) po = np.array(new_result_list[0]) le = np.array(length_layers[0]).astype('int') le = np.cumsum (le) ax.scatter3D(po[:le[0],0],po[:le[0],1],po[:le[0],2], facecolor='None', color='orange', s=5, linewidths=0.5) ax.scatter3D(po[le[0]:le[1],0],po[le[0]:le[1],1],po[le[0]:le[1],2],facecolor='None',linewidths=0.5, color='orange', s=5) ax.scatter3D(po[le[1]:le[2],0],po[le[1]:le[2],1],po[le[1]:le[2],2],facecolor='None',linewidths=0.5, color='orange', s=5) ax.scatter3D(po[le[2]:le[3],0],po[le[2]:le[3],1],po[le[2]:le[3],2],facecolor='None',linewidths=0.5, color='orange', s=5) ax.scatter3D(po[le[3]:le[4],0],po[le[3]:le[4],1],po[le[3]:le[4],2],facecolor='None',linewidths=0.5, color='orange', s=5) ax.scatter3D(po[le[4]:le[5],0],po[le[4]:le[5],1],po[le[4]:le[5],2],facecolor='None',linewidths=0.5, color='m', s=5) ax.scatter3D(po[le[5]:le[6],0],po[le[5]:le[6],1],po[le[5]:le[6],2],facecolor='None',linewidths=0.5, color='m', s=5) ax.scatter3D(po[le[6]:le[7],0],po[le[6]:le[7],1],po[le[6]:le[7],2],facecolor='None',linewidths=0.5, color='m', s=5) ax.scatter3D(po[le[7]:le[8],0],po[le[7]:le[8],1],po[le[7]:le[8],2],facecolor='None',linewidths=0.5, color='m', s=5) ax.scatter3D(po[le[8]:le[9],0],po[le[8]:le[9],1],po[le[8]:le[9],2],facecolor='None',linewidths=0.5, color='m', s=5) # representative point proposed by GeoMeshPy reps = np.array(repre_pts[0])[:,:-1].astype('float') ax.scatter3D(reps[:5,0],reps[:5,1],reps[:5,2], marker= '*', color='k', s=50) ax.scatter3D(reps[5:10,0],reps[5:10,1],reps[5:10,2], marker= '*', color='c', s=50) ax.scatter3D(reps[10:,0],reps[10:,1],reps[10:,2], marker= '*', color='b', s=50) ax.set_yticks([0, 500, 1000]) ax.set_xticks([0, 700, 1400]) ax.set_zticks([-1000, -1400, -1800]) ax.set_ylim(0, 1000) ax.set_xlim([0, 1000]) ax.set_zlim([-1700, -1000]) ax.tick_params(axis='both', which='major', labelsize=10) ax._facecolors2d = ax._facecolor ax.grid(None) plt.show() ax.view_init(5, 270) # - from numpy import savetxt sets = zip(sub_fourc_list, new_result_list, repre_pts, len_fal) for ind, (crn_fal, vertices, rep_pnt, len_fals) in enumerate(sets): savetxt(f'fal_crn_{ind}.csv', np.array(crn_fal), delimiter=',') savetxt(f'vertices_{ind}.csv', np.array(vertices), delimiter=',') savetxt(f'rep_pnt_{ind}.csv', np.array(rep_pnt), delimiter=',', fmt="%s") savetxt(f'len_fals_{ind}.csv', np.array(len_fals), delimiter=',') savetxt('len_layer.csv', length_layers, delimiter=',') import numpy as np import copy from GeoMeshPy import vmod n_iter = 10 name_of_faults=['fault1', 'fault2', 'fault3' , 'fault4'] no_of_faults= len (name_of_faults) sub_fourc_list=[] new_result_list=[] repre_pts=[] len_fal=[] from numpy import genfromtxt length_layers=genfromtxt('len_layer.csv', delimiter=',').tolist() import glob files_fal_cr = glob.glob("fal_crn_*.csv") files_fal_crn= sorted(files_fal_cr, key=lambda name: int(name[8:-4])) files_ve = glob.glob("vertices_*.csv") files_ver = sorted(files_ve, key=lambda name: int(name[9:-4])) files_repr= glob.glob("rep_pnt_*.csv") files_repre= sorted(files_repr, key=lambda name: int(name[8:-4])) files_le= glob.glob("len_fals_*.csv") files_len= sorted(files_le, key=lambda name: int(name[9:-4])) set_names = zip(files_fal_crn, files_ver, files_repre, files_len) for name_fal, name_ver, name_rep, name_len in set_names: fal_crn=np.around(genfromtxt(name_fal, delimiter=','), decimals=6) sub_fourc_list.append(fal_crn.tolist()) new_result_list.append(np.around(genfromtxt(name_ver, delimiter=','), decimals=6).tolist()) repre_pts.append(genfromtxt(name_rep, delimiter=',', dtype=str).tolist()) len_fal.append([genfromtxt(name_len, delimiter=',').tolist()]) # + from numpy import savetxt import gmsh import itertools from itertools import chain gmsh.initialize() if no_of_faults>0: def cleanup_and_mesh(): entities = gmsh.model.getEntities() gmsh.model.mesh.removeEmbedded(entities) # remove all surfaces, curves and points that are not connected to any # higher-dimensional entities gmsh.model.removeEntities(gmsh.model.getEntities(2), True) gmsh.model.removeEntities(gmsh.model.getEntities(1), True) gmsh.model.removeEntities(gmsh.model.getEntities(0)) # get all surfaces that are not of type "Plane", i.e. all surfaces except the # box surfaces = [s[1] for s in gmsh.model.getEntities(2) if gmsh.model.getType(s[0], s[1]) != 'Plane'] # also refine close to the wells surface_after = gmsh.model.getEntities(2) points=copy.deepcopy(surface_new_tag) check_values=[row[-1] for row in surface_after] extracted = [] for sublist in points: second_vals = [sec for fir, sec in sublist] if all(val in check_values for val in second_vals): extracted.append(second_vals) fl = [item for sublist in extracted[6:] for item in sublist] layer_surface = list(set(surfaces) - set(fl)) # create a distance + threshold mesh size field w.r.t. these surfaces gmsh.model.mesh.field.add("Distance", 1) gmsh.model.mesh.field.setNumbers(1, "SurfacesList", fault_surfaces) gmsh.model.mesh.field.setNumber(1, "Sampling", 100) gmsh.model.mesh.field.add("Threshold", 2) gmsh.model.mesh.field.setNumber(2, "InField", 1) gmsh.model.mesh.field.setNumber(2, "SizeMin", 30) gmsh.model.mesh.field.setNumber(2, "SizeMax", 100) gmsh.model.mesh.field.setNumber(2, "DistMin", 35) gmsh.model.mesh.field.setNumber(2, "DistMax", 100) gmsh.model.mesh.field.add("Distance", 3) gmsh.model.mesh.field.setNumbers(3, "SurfacesList", layer_surface) gmsh.model.mesh.field.setNumber(3, "Sampling", 100) gmsh.model.mesh.field.add("Threshold", 4) gmsh.model.mesh.field.setNumber(4, "InField", 3) gmsh.model.mesh.field.setNumber(4, "SizeMin", 20) gmsh.model.mesh.field.setNumber(4, "SizeMax", 100) gmsh.model.mesh.field.setNumber(4, "DistMin", 25) gmsh.model.mesh.field.setNumber(4, "DistMax", 200) gmsh.model.mesh.field.add("Min", 5) gmsh.model.mesh.field.setNumbers(5, "FieldsList", [2,4]) gmsh.model.mesh.field.setAsBackgroundMesh(5) gmsh.option.setNumber("Mesh.MeshSizeMax", 100) # don't extend mesh sizes from boundaries and use new 3D algo gmsh.option.setNumber("Mesh.MeshSizeExtendFromBoundary", 0) gmsh.option.setNumber("Mesh.Algorithm3D", 10) gmsh.model.mesh.generate(3) # using representative points to create physical volumes rep=[list(x) for _,x in itertools.groupby(rep_pnt,lambda x:x[3])] vol_num=np.arange(1,1+len(rep)) for ind, surfaces in enumerate (rep): tags=[] for sects in surfaces: eleTag = gmsh.model.mesh.getElementByCoordinates(float (sects[0]), float (sects[1]), float (sects[2]))[0] eleType, eleNodes, entDim, entTag = gmsh.model.mesh.getElement(eleTag) tags.append(entTag) gmsh.model.addPhysicalGroup(3, tags, vol_num[ind]) gmsh.model.setPhysicalName(3, vol_num[ind], sects[-1]) for tag_nu, name in zip (sp_fls, name_of_faults): ps1 = gmsh.model.addPhysicalGroup(2, tag_nu) gmsh.model.setPhysicalName(2, ps1, name) #adding boundaries as physical lines around_box=['in', 'out', 'front', 'back', 'bottom', 'top'] for tag_nu, name in zip (extracted[:6], around_box): ps1 = gmsh.model.addPhysicalGroup(2, tag_nu) gmsh.model.setPhysicalName(2, ps1, name) gmsh.write("Para_F_" + str(kk) + ".msh") gmsh.fltk.run() gmsh.initialize() degree = 3 numPointsOnCurves = 10 numIter = 10 anisotropic = False tol2d = 0.00001 tol3d = .1 tolAng = 1 tolCurv = 1 maxDegree = 3 maxSegments = 100 sets = zip(sub_fourc_list, new_result_list, repre_pts, len_fal) for kk, (crn_fal, vertices, rep_pnt, len_fals) in enumerate(sets): ar=np.array(vertices) l_tags=[] sp_fal_num=np.cumsum(np.array(len_fals).astype('int')) sp_fal=np.split (np.array(crn_fal), sp_fal_num[:-1]) gmsh.model.occ.addBox(min(ar[:,0]),crn_fal[0][1],crn_fal[0][2],max(ar[:,0])-min(ar[:,0]), crn_fal[1][1]-crn_fal[0][1],crn_fal[2][2]-crn_fal[0][2]) for i in range (len(sp_fal)): for [x, y, z] in sp_fal[i]: gmsh.model.occ.addPoint(x, y, z) tag_p_fal=np.arange(9, len (crn_fal)+9) tag_sp_fal=np.split (tag_p_fal, sp_fal_num[:-1]) for i in tag_sp_fal: for j in range (len(i)): if j==len(i)-1: gmsh.model.occ.addLine (i[j], i[0]) else: gmsh.model.occ.addLine (i[j], i[j+1]) tag_l_fal=np.arange(13, len (crn_fal)+13) tag_sl_fal=np.split (tag_l_fal, sp_fal_num[:-1]) for i in tag_sl_fal: lop=i.tolist() gmsh.model.occ.addCurveLoop(lop, lop[0]*10) gmsh.model.occ.addSurfaceFilling(lop[0]*10, lop[0]*10) spl_num=np.cumsum(length_layers[kk]).tolist()[:-1] # each cloud of points is separated spl_num=[int (i) for i in spl_num] sep_ar=np.split(ar,spl_num) for ind, point_clouds in enumerate (sep_ar): i_l=point_clouds.tolist() for [x, y, z] in i_l: gmsh.model.occ.addPoint(x, y, z) if len (point_clouds)>3: y_sub=np.unique(point_clouds[:,1].round(5),return_counts=True)[1] x_sub=np.unique(point_clouds[:,0].round(5),return_counts=True)[1] pts=[] for j in np.split (point_clouds, np.cumsum(x_sub)[:-1]): if (j[0]!=j[-1]).any(): pts.append([j[0], j[-1]]) for m in np.split (point_clouds[np.lexsort((point_clouds[:,0],point_clouds[:,1]))], np.cumsum(y_sub)[:-1]): if (m[0]!=m[-1]).any(): pts.append([m[0], m[-1]]) a=[[j.tolist() for j in i] for i in pts] b = list(chain.from_iterable(a)) c=list(set(tuple(x) for x in b)) d=[list(i) for i in c] f= [sublist for sublist in d] g=np.array(f) h=g[np.lexsort((g[:,1],g[:,0]))] # it include all the extrerior points of the cloud pnt=h[:,0:-1].tolist() arround_pts=vmod.vertice_modifier.rotational_sort(pnt, (np.mean(np.array(pnt)[:,0]),np.mean(np.array(pnt)[:,1])),True) tags=np.where((point_clouds[:,:-1]==np.array(arround_pts)[:,None]).all(-1))[1] + 1 l_tags.append(len(tags)) start_point=int (8+len(crn_fal)+np.sum(length_layers[kk][0:ind])) start_line=int (12+len(crn_fal)+1+np.sum(l_tags[0:ind])) for i in range (len(tags)): # this for loop creates the exterior lines of each cloud if i!=len(tags)-1: gmsh.model.occ.addSpline([tags[i]+start_point,tags[i+1]+start_point]) else: gmsh.model.occ.addSpline([tags[i]+start_point,tags[0]+start_point]) gmsh.model.occ.addCurveLoop([i for i in range (start_line, start_line+len(tags))], start_line*10) gmsh.model.occ.addSurfaceFilling(start_line*10, start_line*10, [m for m in range (start_point+1, start_point+np.max(tags)) if m not in tags+start_point], degree, numPointsOnCurves, numIter, anisotropic, tol2d, tol3d, tolAng, tolCurv, maxDegree, maxSegments) # create surface by connecting exterior lines # and inclding interior ones gmsh.model.occ.synchronize() gmsh.option.setNumber('Geometry.ToleranceBoolean', 0.01) in_surf = gmsh.model.occ.getEntities(2) out_all=gmsh.model.occ.fragment(in_surf, gmsh.model.occ.getEntities(3))#[1] out=out_all[1] surface_new_tag = out[0:len(in_surf)] gmsh.model.occ.synchronize() # extracting fault indices fault_tag_num = [i[0]*10 for i in tag_sl_fal] ind_fault_surface = [x for x, y in enumerate(in_surf) if y[1] in fault_tag_num] sp_f = [i for ind, i in enumerate (surface_new_tag) if ind in ind_fault_surface] sp_fls = [[i[1] for i in j] for j in sp_f] fault_surfaces = np.concatenate (np.array(sp_fls)).tolist() cleanup_and_mesh() gmsh.clear() gmsh.finalize() # -
Examples/Faulted/Parallel_Faults/Para_F.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.6.2 # language: julia # name: julia-1.6 # --- # + # Change this to where you have MyPoly # cd("/home/jpy_class/mit/12.010/cn<EMAIL>/poly-testing/fall-2021-12.010/Julia/MyPoly/MyPoly") # - using Pkg Pkg.activate(".") using MyPoly p1=MyPoint(0.,0.) p2=MyPoint(1.,0.) p3=MyPoint(1.,1.) p4=MyPoint(0.,1.) p5=MyPoint(0.,0.) poly1=MyPolyVar([p1,p2,p3,p4,p5],10) a=area(poly1)
Julia/mypoly-julia.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Python チュートリアル 第4版 # ### Pythonはプログラムが小さくすみ読みやすい理由 # - 高水準のデータ型が、複雑な操作を単一文で表記することを可能にしている # - 文のグルーピングは、カッコで囲うことでなくインデントで行われる # - 変数や引数の宣言が不要 # ## 3章 気楽な入門編 # ### Pythonを電卓として使う # #### 3.1.1 数値 2 + 2 50 - 5 * 6 (50 - 5 * 6) / 4 8 / 5 # 除算は常に浮動小数点数を返す 17 / 3 # 標準の除算はfloatを返す 17 // 3 # 切り上げ徐算 17 % 3 # 剰余のみを得る 5 * 3 + 2 5 ** 2 # 累乗 2 ** 7 width = 20 height = 5 * 9 width * height # 未定義のまま実行するとエラーが出る n # 型が混在していた場合、整数は浮動小数点数に変換される 4 * 3.75 - 1 # 10進数(Decimal)や有理数(Fraction)、複素数もサポートされている(3+5j)虚部を示すのに接尾語を「j」または「J」を使う # #### 3.1.2 文字列 # 引用符にはシングルクオート('...')もダブルクオート("...")も使えどちらも同じ結果になる。 # バックスラッシュ(\)でクオート文字のエスケープができる。 # 他の言語と異なり、\nのような特殊文字の扱いはシングルクオートでもダブルクオートでも同じだ。 # 唯一の違いは、シングルクオートの中では"をエスケープする必要がなく(しかし'は\'の形でエスケープする必要がある)、逆にダブルクオートの中ではシングルクオートをエスケープする必要がないことである。 'spam eggs' 'doesn\'t' # ダブルクオートで出力される "doesn't" '"Yes," they said.' "\"Yes,\" they said." '"Isn\'t," they said.' # 表示の引用符がダブルクオートとなるのは、文字列自体がシングルクオートを含みダブルクオートを含まない場合のみで、それ以外はシングルクオートとなる。print()関数ではもっと読みやすい出力を生成する。全体を含む引用符を除去し、エスケープ文字や特殊文字をプリントする。 print('"Ins\'t," they said.') s = 'First line.\nSecond line.' s print(s) # \を前置した文字が特殊文字として解釈されるのが嫌なときは、raw文字列を使えばよい。これは最初の引用符の前にrを置く print('C:\some\name') print(r'C:\some\name') # 文字列リテラルを複数行にわたり書くことができる。トリプルクオートを使う。 # \を置くことで、自動的に文字列に含有されるのを避けることができる。 print("""\ Usage: thingy [OPTIONS] -h Display this usage message -H hostname Hostname to connect to """) # \がない場合最初の改行が含まれる print(""" Usage: thingy [OPTIONS] -h Display this usage message -H hostname Hostname to connect to """) print("""Usage: thingy [OPTIONS] -h Display this usage message -H hostname Hostname to connect to """) # 列挙された文字列リテラル(引用符で囲まれたものたち)は自動的に連結される 'Py' 'thon' text = ('カッコの中にながい文字列を' '入れておいて繋げてやろう') text # 変数と文字列は連結できない prefix = 'Py' prefix 'thon' # 変数とリテラル、変数と変数の連結には+を使う prefix + 'thon' # 文字列にはインデックス指定ができる。最初のキャラクタのインデックスは0。キャラクタ型というものは存在しないので、ここでいうキャラクタとは長さが1の文字列のことである。 word = 'Python' word[0] word[5] word[-1] word[-0] word[-2] word[-6] word[7] word[-100] word[6] word[5] # インデックス操作に加えてスライス操作もサポートされている。部分文字列の取得ができる。 word[0:2] word[2:5] # 始点は常に含まれ、終点は常に除外されることに注目すると、s[:i] + s[i:]はsと常に等価になる word[:2] + word[2:] word[:4] + word[4:] # スライスインデックスには便利なデフォルト値がある。第一文字の省略時デフォルトが0、第二文字の省略時デフォルトが文字列サイズとなっている word[:2] word[:4] word[-2:] # スライスのインデックスでは範囲外を指定した場合にもいい感じに処置してくれる word[4:42] # Pythonの文字列は改変できない「不変体(immutable)」であるという。このため文字列のインデックス位置に代入を行うことができない word[0] = 'J' 'J' + word[1:] # ビルトイン関数len()は文字列の長さを返す len('kl;jfd;lakj;flkdjsa;flksj') # #### 3.1.3 リスト # Pythonには複合したデータのための型がいくつかあり、他の種類の値をまとめるのに使える。もっとも万能なのがリスト(list)で、これは角カッコの中にカンマ区切りの値(アイテム)を入れていくだけでかける。リストには異なる型のアイテムが入れられるが、通常は全て同じ型を入れる。 squares = [1, 4, 9, 16, 25] squares # 文字列同様(そして他のすべてのシーケンス型同様)、リストにもインデックス操作とスライス操作が使える squares[0] squares[2:4] # スライス操作は新たなリストを作って返す # スライス操作は常に、要求された要素を含んだ新たなリストを返す。これはつまり、以下のスライシングは上のリストのシャローコピーを新しく作って返す、ということである squares[:] # 文字列は不変体であったが、リストは可変体(mutable)である。すなわち内容を入れ替えることができる cubes = [1, 8, 27, 65, 125] cubes[3] = 64 cubes # またappend()メソッドを使うことで末尾にアイテムを追加することができる cubes.append(216) cubes cubes.append(7 ** 3) cubes # スライスへの代入も可能であり、これによりリストの長さを変えることや、リストの内容を全てクリアすることもできる cubes[:4] = [] cubes cubes[:] = [] cubes # リストは入れ子にできる(リストを要素とするリストが生成できる) a = ['a', 'b', 'c'] n = [1, 2, 3] x = [a, n] x x[0] x[0][1] x[1:] x[2:] x[0:] x[1][2] # ### 3.2 プルグラミング、はじめの一歩 # フィボナッチ数列 a, b = 0, 1 while a < 10: print(a) a, b = b, a+b # print()関数は、文字列を引用符なしで表示し、アイテムの間にはスペースが挿入される i = 256 * 256 print('The value of i is', i) # キーワード引用endを使えば、出力末尾の改行の抑制や、出力末尾を他の文字列に変えることができる a, b = 0, 1 while a < 10: print(a, end=',') a, b = b, a+b
Chapter 3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # FUNCIONES import os import numpy as np import pprint import copy from math import sqrt from scipy.linalg import solve_triangular # ### Creación matrices def crea_matriz(renglones,columnas,maximo_valor,minimo_valor,entero=False): """ Función de apoyo para genear matrices aleatorias params: renglones no. de renglones de la matriz columnas no. de renglones de la matriz maximo_valor valor máximo de las entradas de la matriz minimo_valor valor mínimo de las entradas de la matriz entero Indica si las entradas serán enteras (True) o no return: M Matriz con numeros al azar """ M=np.zeros((renglones, columnas)) for i in range(renglones): for j in range(columnas): if entero: M[i][j]=(np.random.rand(1)*(maximo_valor+1-minimo_valor)+minimo_valor)//1 else: M[i][j]=np.random.rand(1)*(maximo_valor-minimo_valor)+minimo_valor return M # ### Factorización QR def house(x): """ Función que calcula la proyección de householder params: x vector al que se le hará la reflexión householder return: Beta constante utilizada para obtener v v vector que representa la reflexión de householder """ m=len(x) norm_2_m=x[1:m].dot(np.transpose(x[1:m])) v=np.concatenate((1,x[1:m]), axis=None) Beta=0 if (norm_2_m==0 and x[0]>=0): Beta=0 elif (norm_2_m==0 and x[0]<0): Beta=2 else: norm_x=np.sqrt(pow(x[0],2)+norm_2_m) if (x[0]<=0): v[0]=x[0]-norm_x else: v[0]=-norm_2_m/(x[0]+norm_x) Beta=2*pow(v[0],2)/(norm_2_m+pow(v[0],2)) v=v/v[0] return Beta, v def factorizacion_QR(A): """ Función que genera una matriz que contendrá información escencial de las proyecciones householder (vectores v's) y componentes de la matriz triangular superior R, del estilo: [r11 r12 r13 r14 ] [v_2_(1) r22 r23 r24 ] [v_3_(1) v_3_(2) r33 r34 ] [v_4_(1) v_4_(2) v_4_(3) r44 ] [v_5_(1) v_5_(2) v_5_(3) v_5_(4)] params: A Matriz (mxn) de la que se desea obtner factorización QR return: A_r_v Matriz (mxn) con la información escencial (es igual a la matriz R, pero en lugar de tener ceros en la parte inferior, contiene info de los vectores householder que serán útiles para futuros cálculos, que entre otros están el calcular la matriz ortonormal Q) """ m=A.shape[0] n=A.shape[1] A_r_v=copy.copy(A) for j in range(n): beta, v=house(A_r_v[j:m,j]) A_r_v[j:m,j:n]=A_r_v[j:m,j:n]-beta*(np.outer(v,v)@A_r_v[j:m,j:n]) A_r_v[(j+1):m,j]=v[1:(m-j)] return A_r_v def QT_C(A_r_v,C): """ Función que calcula el producto matricial de Q_transpuesta por una matriz dada C params: A_r_v Matriz (mxn) con la info escencial C Matriz (mxp) (si se pasa por ejemplo C=Identidad (mxm) la funcion devolverá Q) return: M Matriz con numero al azar """ m=A_r_v.shape[0] n=A_r_v.shape[1] QT_por_C=np.eye(m) for j in range(n-1,-1,-1): v=np.concatenate((1,A_r_v[(j+1):m,j]), axis=None) beta=2/(1+A_r_v[(j+1):m,j].dot(A_r_v[(j+1):m,j])) QT_por_C[j:m,j:m]=C[j:m,j:m]-beta*np.outer(v,v)@C[j:m,j:m] return QT_por_C def Q_j(A_r_v,j): """ Función que calcula la matriz Qj (en el proceso de obtención de factorización QR se van obteniendo n Qj's, que si se multiplican todas da por resultado Q=Q1*Q2*...*Qn) params: A_r_v Matriz (mxn) con la info escencial C Matriz (mxp) (si se pasa por ejemplo C=Identidad (mxm) la funcion devolverá Q) return: Qj Matriz Q de la j-esima iteración del proceso iterativo de factorización QR """ m=A_r_v.shape[0] n=A_r_v.shape[1] Qj=np.eye(m) v=np.concatenate((1,A_r_v[(j+1):m,j]), axis=None) beta=2/(1+A_r_v[(j+1):m,j].dot(A_r_v[(j+1):m,j])) Qj[j:m,j:m]=np.eye(m-j)-beta*np.outer(v,v) return Qj # ### Funciones para solución de Sistemas de Ecuaciones Lineales def Solucion_SEL_QR_nxn(A,b): """ Función que obtiene la solución de un sistema de ecuaciones lineala (SEL) con n ecuaciones y n incognitas params: A Matriz (nxn) que representa los coeficientas de las ecuaciones b vector (nx1) constantes del sistema return: x vector que satisface (Ax=b) """ A_r_v=factorizacion_QR(A) m=A_r_v.shape[0] #Q=np.transpose(QT_C(A_r_v,np.eye(m))) #R=np.transpose(Q)@A n=A_r_v.shape[0] Q=np.eye(m) R=copy.copy(A) for j in range(m): Qj=Q_j(A_r_v,j) Q=Q@Qj R=Q_j(A_r_v,j)@R b_prima=np.transpose(Q)@b x = solve_triangular(R, np.transpose(Q)@b) return x # #### Eliminación por bloques def bloques(A, b=False, n1=False, n2=False): """ Esta es la función para la creación de bloques usando un arreglo de numpy params: A Matriz (nxn) que representa los coeficientas de las ecuaciones b vector (nx1) constantes del sistema n1 Numero de renglones que tendrá el 1er bloque n2 Numero de renglones que tendrá el 2do bloque return: A11 Fraccion de la matriz dividida A12 Fraccion de la matriz dividida A12 Fraccion de la matriz dividida A12 Fraccion de la matriz dividida b1 Fraccion del vector dividido b2 Fraccion del vector dividido """ # Primero definimos el n m,n = A.shape # Condiciones de A # Si no se dan los n deseados, se intentan hacer los bloques casi iguales if not (n1&n2): n1 = n//2 n2 = n - n1 # Los bloques deben cumplir la condicion de tamaño elif n1+n1 != n: sys.exit('n1 + n2 debe ser igual a n') else: None # Condiciones de b if b is False: b1 = None b2 = None print('condicion1') elif len(b) == m: b1 = b[:n1] b2 = b[n1:m] else: sys.exit('los renglones de A y b deben ser del mismo tamaño') A11 = A[:n1,:n1] A12 = A[:n1,n1:n] A21 = A[n1:m,:n1] A22 = A[n1:m,n1:n] return A11,A12,A21,A22,b1,b2 def eliminacion_bloques(A,b): """ Función que obtiene la solución de un sistema de ecuaciones lineala (SEL) con n ecuaciones y n incognitas params: A Matriz (nxn) que representa los coeficientas de las ecuaciones b vector (nx1) constantes del sistema return: x1 Solucion al 1er sistema de ecuaciones obtenido con la división por bloques x2 Solucion al 2do sistema de ecuaciones obtenido con la división por bloques """ if np.linalg.det(A)==0: sys.exit('A debe ser no singular') A11,A12,A21,A22,b1,b2 = bloques(A,b) if np.linalg.det(A11)==0: ys.exit('A11 debe ser no singular') ## 1. Calcular A11^{-1}A12 y A11^{-1}b1 teniendo cuidado en no calcular la inversa sino un sistema de ecuaciones lineales ## Aquí se debe usar el método QR una vez que esté desarrollado ## Definimos y = A11^{-1}b1, por tanto A11y=b1. Resolviendo el sistema anterior para 11y: y = Solucion_SEL_QR_nxn(A11,b1) #y = np.linalg.solve(A11,b1) ## Definimos Y = A11^{-1}A12 Y = Solucion_SEL_QR_nxn(A11,A12) #Y = np.linalg.solve(A11,A12) ## 2. Calcular el complemento de Schur del bloque A11 en A. Calcular b_hat S = A22 - A21@Y b_h = b2 - A21@y ## 3. Resolver Sx2 = b_hat x2 = Solucion_SEL_QR_nxn(S,b_h) #x2 = np.linalg.solve(S,b_h) ## 4. Resolver A11x1 = b1-A12X2 x1 = Solucion_SEL_QR_nxn(A11,b1-A12@x2) #x1 = np.linalg.solve(A11,b1-A12@x2) return np.concatenate((x1,x2), axis=0) # # Prueba Unitaria # ## Eliminación por bloques con QR considerando sistemas con soluciones infinitas # ### Ejemplo 1 - Matriz 2 x 2 (Ecuaciones linealmente dependientes) # El siguiente sistema representa un sistema de 2 ecuaciones con 2 incógnitas. La segunda ecuación es una combinación lineal de la primera, por lo que al multiplicar la segunda ecuación por -2 se obtiene la primera. # + # Generación de lado izquierdo A y lado derecho b de un sistema de ecuaciones de 2x2 A = np.array([[-6, 4], [3, -2]], dtype='d') b = np.array([[2], [-1]], dtype='d') print("A:") pprint.pprint(A) print("b:") pprint.pprint(b) # - # **Numpy** # Puesto que el sistema tiene infinitas soluciones, se utilizará de la función *linalg.solve(A,b)*, perteneciente a la librería numpy para conocer el error que marca al ingresar tal sistema. np.linalg.solve(A,b) # Puntualmente, el error indicado por la función *linalg.solve* indica que la matriz A es singular, lo que implica que no sea posible obtener la matriz inversa y por tanto no se puede resolver el sistema. # **Implementación Programadores - Eliminación por bloques con QR** # La función *eliminacion_bloques*, implementada por los programadores permite resolver un sistema de ecuaciones lineales. Se revisará el error que despliega al intentar resovler el sistema de ecuaciones propuesto. eliminacion_bloques(A,b) # Lo anterior muestra que la función *eliminacion_bloques* validó si el determinante de la matriz A era igual cero, lo cual es equivalente a evaluar si la matriz es *no singular*. Como lo condición no fue cumplida, es decir, la matriz es singular, entonces la función ya no es completada. # # El error que marca esta función es consistente con el desplegado por la función *linalg.solve*. # ### Ejemplo 2 - Matriz $10^{2}$ x $10^{2}$ (Matriz de unos) # Se genera un sistema de ecuaciones lineales de $10^{2} x 10^{2}$, cuyo lado izquierdo es una matriz de 1's y el lado derecho es un vector que contiene al número 100 en cada una de sus entradas. Este sistema representa la misma ecuación, repetida 100 veces. # + # Generación lado izquierdo A (matriz de 1's) y lado derecho b (vector con entradas igual a 100) m = 10**2 n = 10**2 A = crea_matriz(m, n, 1, 1,True) b = crea_matriz(m, 1, 100, 100,True) print("A:") pprint.pprint(A) print("b:") pprint.pprint(b[0:10,:]) # - # **Numpy** # Nuevamente se hará uso de la función *linalg.solve(A,b)* de la librería numpy para conocer qué error despliega al intentar resolver el sistema propuesto. np.linalg.solve(A,b) # De igual forma que en el Ejemplo 1, el error indica que la matriz A es singular. # **Implementación Programadores - Eliminación por bloques con QR** # Se implementa la función *eliminacion_bloques* creada por los programadores para verificar que se obtenga el mismo error que marca al función *linalg.solve(A,b)*. eliminacion_bloques(A,b) # Efectivamente, la función *eliminacion_bloques* indica que la matriz A es singular. # ### Ejemplo 3 - Matriz $10^{2}$ x $10^{2}$ (Matriz de unos, sistema homogéneo) # De forma análoga al Ejemplo 2, se genera un sistema de ecuaciones lineales de $10^{2} x 10^{2}$, cuyo lado izquierdo es una matriz de 1's; sin embargo el lado derecho es un vector que contiene al número 0 en cada una de sus entradas, es decir, se trata de un sistema homogéneo. De igual forma, este sistema representa la misma ecuación, repetida 100 veces. # + # Generación lado izquierdo A (matriz de 1's) y lado derecho b (vector con entradas igual a 0) m = 10**2 n = 10**2 A = crea_matriz(m, n, 1, 1,True) b = crea_matriz(m, 1, 0, 0,True) print("A:") pprint.pprint(A) print("b:") # se muestran los primeros 10 elementos del lado derecho b pprint.pprint(b[0:10,:]) # - # **Numpy** # Se intenta resolver el sistema con *linalg.solve(A,b)*: np.linalg.solve(A,b) # *linalg.solve(A,b)* despliega el mismo error que en los Ejemplos 1 y 2. # **Implementación Programadores - Eliminación por bloques con QR** # Finalmente se ejecuta la función *eliminacion_bloques* implementada por los programadores. eliminacion_bloques(A,b) # Tal como se esperaba, *eliminacion_bloques* indica que A debe ser una matriz no singular. # # Los Ejemplos 2 y 3 demuestran que la solución de un sistema de ecuaciones es independiente del lado derecho $b$. En estos ejemplos se mantuvo el mismo lado izquierdo $A$ y se varió el lado derecho $b$, de forma que el Ejemplo 3 es un sistema homogéneo y el Ejemplo 2 no lo es. En ambos casos el error asociado indica que la matriz $A$ es singular. Por lo tanto la solución del sistema de ecuaciones depende en su totalidad del lado izquierdo $A$.
Pruebas/Revision1/pu_epbQR_sistema_soluciones_infinitas.ipynb
(* -*- coding: utf-8 -*- (* --- *) (* jupyter: *) (* jupytext: *) (* text_representation: *) (* extension: .ml *) (* format_name: light *) (* format_version: '1.5' *) (* jupytext_version: 1.14.4 *) (* kernelspec: *) (* display_name: OCaml *) (* language: ocaml *) (* name: iocaml *) (* --- *) (* <h1>Somme de nombres aléatoires</h1> *) (* *) (* <h2>Présentation</h2> *) (* *) (* On tire au hasard des nombres entre 0 et 1. *) (* *) (* En moyenne, combien de nombres faut-il tirer pour que la somme dépasse 1 ? *) (* + open Random;; Random.self_init;; let tirage () = let rec loop s count = if s < 1. then loop (s+.(Random.float 1.)) (count+1) else count in loop 0. 0;; let simulation nbre_simul = let rec loop num_simul res = if num_simul < nbre_simul then loop (num_simul+1) (res+tirage()) else float_of_int res /. (float_of_int nbre_simul) in loop 0 0;; simulation 100000 (* - (* <h2>Solution</h2> *) (* *) (* <h3>Théorème</h3> *) (* Soit $a$ un réel compris entre 0 et 1. Soient $\{X_i\}$ un ensemble de $n$ variables aléatoires {\bf indépendantes} suivant la loi uniforme sur $[0,1]$. *) (* $\forall n\in\mathbb{N}^*\quad P(X_1+X_2+\dots+X_n\leq a)=\frac{a^n}{n!}$. *) (* *) (* <h3>Démonstration</h3> La proposition est vraie pour $n=1$.<br> *) (* Pour $n=2$,<br> $\displaystyle P(X_1+X_2\leq a) *) (* =\frac{\textrm{surface triangle}}{\textrm{surface carré}} *) (* =\frac{\displaystyle\int_{X_1=0}^a\int_{X_2=0}^{a-X_1} \textrm{d} X_2.\textrm{d}X_1}{\displaystyle\int_{X_1=0}^1\int_{X_2=0}^{1} \textrm{d} X_2.\textrm{d}X_1} *) (* =\frac{\displaystyle\int_{X_1=0}^a\left(\int_{X_2=0}^{a-X_1} \textrm{d} X_2\right).\textrm{d}X_1}{\displaystyle\int_{X_1=0}^1 \left(\int_{X_2=0}^{1} \textrm{d} X_2\right).\textrm{d}X_1}<br> *) (* =\frac{\displaystyle\int_{X_1=0}^a (a-X_1).\textrm{d}X_1}{\displaystyle\int_{X_1=0}^1 1.\textrm{d}X_1} *) (* =\frac{\frac12.a^2}{1}$ *) (* *) (* <img src="./somme_nombres_aleatoires_fig1.svg" /> *) (* *) (* Pour $n=3$,<br> $\displaystyle P(X_1+X_2+X_3\leq a) *) (* =\frac{\textrm{volume trièdre}}{\textrm{volume cube}} *) (* =\frac{\displaystyle\int_{X_1=0}^a\int_{X_2=0}^{a-X_1}\int_{X_3=0}^{a-X_1-X_2} \textrm{d} X_3.\textrm{d} X_2.\textrm{d}X_1}{\displaystyle\int_{X_1=0}^1\int_{X_2=0}^{1}\int_{X_3=0}^{1} \textrm{d} X_3.\textrm{d} X_2.\textrm{d}X_1}<br> *) (* % *) (* =\frac{\displaystyle\int_{X_1=0}^a\left(\int_{X_2=0}^{a-X_1}\left(\int_{X_3=0}^{a-X_1-X_2} \textrm{d} X_3\right) \textrm{d} X_2\right).\textrm{d}X_1}{\displaystyle\int_{X_1=0}^1 \left(\int_{X_2=0}^{1} \left(\int_{X_3=0}^{1} \textrm{d} X_3\right)\textrm{d} X_2\right).\textrm{d}X_1} *) (* % *) (* =\frac{\displaystyle\int_{X_1=0}^a\left(\int_{X_2=0}^{a-X_1}(a-X_1-X_2).\textrm{d} X_2\right).\textrm{d}X_1} *) (* {\displaystyle\int_{X_1=0}^1 \left(\int_{X_2=0}^{1} \textrm{d} X_2\right).\textrm{d}X_1}<br> *) (* % *) (* =\frac{\displaystyle\int_{X_1=0}^a \frac{(a-X_1)^2}{2}.\textrm{d}X_1} *) (* {\displaystyle\int_{X_1=0}^1 \textrm{d}X_1} *) (* % *) (* =\frac{\frac1{3!}.a^3}{1}$ *) (* *) (* <img src="./somme_nombres_aleatoires_fig2.svg" /> *) (* *) (* On démontre par récurrence que $\displaystyle P(X_1+X_2+\dots+X_n\leq a) *) (* =\frac{\textrm{volume hyper-prisme}}{\textrm{volume hyper-cube}}=\frac{a^n}{n!}$. *) (* *) (* <h3>Réponse à la question initiale</h3> *) (* *) (* Soit $Y$ la variable aléatoire donnant le plus petit nombre de variables $X_i$ *) (* pour que $\displaystyle\sum_{i=1}^{Y}X_i\geq 1$. *) (* *) (* On a $Y=n$ si $(X_1+\dots+X_{n-1})<1$ et $(X_1+\dots+X_{n-1}+X_n)\geq1$ *) (* *) (* On a $P(Y=n)=P(X_1+\dots+X_{n-1}<1)-P(X_1+\dots+X_{n}<1)=\frac1{(n-1)!}-\frac1{n!}=\frac{n-1}{n!}$. *) (* *) (* L'espérance de $Y$ est donc: *) (* *) (* $\displaystyle E(Y)=\sum_{n\geq1} n.P(Y=n)=\sum_{n\geq1} n.\sum_{n\geq1}\frac{n-1}{n!}=\sum_{n\geq1}\frac{n-1}{(n-1)!}=\sum_{n\geq2}\frac{1}{(n-2)!}=e$. *) (* *) (* En moyenne, il faut faire la somme de $e$ tirages pour avoir un nombre supérieur à 1. *)
Somme_nombres_aleatoires/Somme_nombres_aleatoires_OCaml_solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:notebook] * # language: python # name: conda-env-notebook-py # --- # # Best-practices for Cloud-Optimized Geotiffs # # # **Part 3. Dask LocalCluster** # # As the number of COGs starts to grow you can quickly excede the amount of RAM on your system. This is where a Dask Cluster can be extremely useful. A LocalCluster is able to utlize all your CPUs and will manage your RAM such that you shouldn't get 'out of memory' errors when running computations. Often this amount of parallelism is all you need for efficient data exploration and analysis. # # In this notebook we'll focus on computing the temporal mean for a stack of COGS that excede our notebook memory (8GB) # + import xarray as xr import s3fs import pandas as pd import os import dask from dask.distributed import Client, LocalCluster, progress # - env = dict(GDAL_DISABLE_READDIR_ON_OPEN='EMPTY_DIR', AWS_NO_SIGN_REQUEST='YES', GDAL_MAX_RAW_BLOCK_CACHE_SIZE='200000000', GDAL_SWATH_SIZE='200000000', VSI_CURL_CACHE_SIZE='200000000') os.environ.update(env) # + # %%time s3 = s3fs.S3FileSystem(anon=True) objects = s3.glob('sentinel-s1-rtc-indigo/tiles/RTC/1/IW/10/T/ET/**Gamma0_VV.tif') images = ['s3://' + obj for obj in objects] print(len(images)) images.sort(key=lambda x: x[-32:-24]) #sort list in place by date in filename images[:6] #january 2020 scenes # - # Let's use first 100 images for simplicity images = images[:100] dates = [pd.to_datetime(x[-32:-24]) for x in images] # ### Load in series (no dask) # # skip these cells and go to Dask if you want to avoid local caching from previous steps # + # %%time # ~8.5s # 100 images, 12 GB uncompressed # All the image metadata = ~275 images, 33GB uncompressed dataArrays = [xr.open_rasterio(url, chunks={}) for url in images] # note use of join='override' b/c we know these COGS have the same coordinates da = xr.concat(dataArrays, dim='band', join='override', combine_attrs='drop').rename(band='time') da['time'] = dates da # + # as a benchmark, let's say we want to calculate the mean of each of these COGs. # we can just loop over all 100 images if each calculation is ~1.5s (based on single-cog notebook) that should take ~2.5 min: 100 * 1.5 / 60 # we should be able to do better than this though since each COG can be operated on independently... # - # ### Load in parallel (dask w/ threads) cluster = LocalCluster(processes=False, local_directory='/tmp') client = Client(cluster) client # NOTES: # dask workers write to SSD (/tmp) rather than home directory NFS mount # 1 worker, 4 cores --> 1 process w/ 4 threads # Open 'Dask 'Graph', 'Task Steam', and 'Workers' from labextension to see computation progress @dask.delayed def lazy_open(href): chunks=dict(band=1, x=2745, y=2745) return xr.open_rasterio(href, chunks=chunks) # + # %%time # ~10s ... basically loading in series (file locks?) # picks up cache if run again (300ms) dataArrays = dask.compute(*[lazy_open(href) for href in images]) da = xr.concat(dataArrays, dim='band', join='override', combine_attrs='drop').rename(band='time') da['time'] = dates da # + # lets say we want the spatial mean of each COG. We can operate on each of these 278 files simultaneously ("embarrassingly parallel") # workers should be able to operate on each COG in isolation and just return a single result # It can be helpful to look at the task graph for a single COG like so: da.isel(time=0).mean(dim=['x','y']).data.visualize(optimize_graph=True, rankdir='LR') # + # %%time # 2min 25s # spatial mean of each COG (output = 278x1 vector) # task stream shows that this actually goes in series (due to xr.open_rasterio file lock?) da.mean(dim=['x','y']).compute() # + # %%time # ~ 2 min 32s # temporal mean of all COGs (output = 5490x5490 array) # this workflow requires pulling (nCOGS x chunk size) into worker RAM to get mean through time for each chunk (3GB) da.mean(dim='time').compute() # + # GOTCHAS: The following is not a good idea because the output is the full uncompressed DataArray in local memory, # so we eventually hit RAM limits and start writing bytes to disk instead of RAM or the computation fails #scaled = da + 100 #scaled.compute() # - # #### recap # # * The initial load of this dataset is slow b/c each thread is reading metadata sequentially # * subsequent calls to da are an order of magnitude faster b/c the file handles and metadata are cached locally # * computations can be slow (maybe due to file read locks preventing simultaneous operations) # * threads are good for computations where memory needs to be shared by tasks (e.g. temporal mean for many COGs) # * might want to experiment with chunk sizes (30--> 100MB), there should be less network requests that way... # ### Load in parallel (dask w/ processes) # # Restart the kernel before running this section to avoid cache in timing #processes=True allows us to open COGs in parallel, circumventing locks. should be faster by a factor of 'nCores' # we have 4 by default on this machine cluster = LocalCluster(local_directory='/tmp') #processes=True by default client = Client(cluster) client # + # %%time # ~3.7s first run, 1.6s subsequent run (caching but maybe cache is separate per process?), dataArrays = dask.compute(*[lazy_open(href) for href in images]) da = xr.concat(dataArrays, dim='band', join='override', combine_attrs='drop').rename(band='time') da['time'] = dates da # + # %%time # 42.8s da.mean(dim=['x','y']).compute() # + # %%time # Kernel dies :( # temporal mean of all COGs (output = 5490x5490 array) # this workflow requires pulling (nCOGS x chunk size) into worker RAM to get mean through time for each chunk (3GB) # because each processe uses it's own RAM with a max of 2GB, we are forced to do some writing to disk and this is super slow. da.mean(dim='time').compute() # task stream is very inefficient here with high memory use! # - # ### recap # # * processes=True is great for dask delayed opening a bunch of datasets # * it's also great for tasks where workers don't need to communicate information # * it is really bad if tasks need to store a lot of intermediate results in memory (workers start writing to disk instead of RAM) # ### Best of both worlds? # # Turns out you can mix and match dask cluster operations in a workflow. For example: # + # %%time with LocalCluster(local_directory='/tmp') as cluster, Client(cluster) as client: dataArrays = dask.compute(*[lazy_open(href) for href in images]) da = xr.concat(dataArrays, dim='band', join='override', combine_attrs='drop').rename(band='time') da['time'] = dates spatial_means = da.mean(dim=['x','y']).compute() with LocalCluster(processes=False, local_directory='/tmp') as cluster, Client(cluster) as client: temporal_mean = da.mean(dim='time').compute() # - # ### recap # # * if performance is what you're going for, you might need to mix and match dask settings with processes and threads # * while COGs are loaded as Dask Arrays via xarray, references to files and file locks can complicated parallelization
2-dask-localcluster.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import time import json import random import requests import platform import grpc import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow_serving.apis import predict_pb2 from tensorflow_serving.apis import prediction_service_pb2_grpc # %matplotlib inline # + # TF-Serving gRPC informations: https://github.com/tensorflow/serving/blob/master/tensorflow_serving # Docker informations: https://www.docker.com # + # Check OS informations, docker version # resp = !docker --version docker_version = ' '.join(resp[0].split(' ')[2:]) print("Current OS: \n" " {}\n".format(platform.platform())) print("Docker version: \n" " {}".format(docker_version)) # + # Prepare testing data mnist = tf.keras.datasets.mnist (_, _), (x_test, y_test) = mnist.load_data() x_test = x_test.astype(np.float32) x_test /= 255.0 # + # Print MNIST dataset informations # 打印 MNIST 資料集相關資訊 print("Images numbers:\n" " Test : {}".format(x_test.shape[0])) print("Image size: {} * {}".format(x_test.shape[1:][0], x_test.shape[1:][1])) # + # Pull tf-serving latest official image from docker hub # Or use !docker pull tensorflow/serving:1.12.0 to pull specific version # Use gpu with tensorflow/serving:latest-gpu # 從 docker hub 拉取最新的 tf-serving 映像檔 # 或是使用 !docker pull tensorflow/serving:1.12.0 拉取特定版本 # Gpu 版本請用 tensorflow/serving:latest-gpu # !docker pull tensorflow/serving:latest # + # Run serving docker, better not run this shit in IPython notebook # Port 8501 exposed for RESTful API, Port 8500 exposed for gRPC # 請打開一個終端已架設 serving docker,別使用 IPython notebook # !$MODELPATH = "Your models folder path ... must be absolute path!" # !docker run -t --rm -p 8500:8500 \ # --mount type=bind,source=$MODELPATH,target=/models \ # -e MODEL_NAME=mnist \ # --name serving_test \ # tensorflow/serving # + # You can get informations of served model with API after you successfully ran a serving docker # 如果成功跑起一個 serving docker,可以用 API 取得模型資訊 # !curl http://localhost:8501/v1/models/mnist # + # One of the key advantages of REST APIs is that they provide a great deal of flexibility # Data is not tied to resources or methods, so REST can handle multiple types of calls # Use python request lib resp = requests.get('http://localhost:8501/v1/models/mnist') print(resp.text) # + # Get model metadata that we can easily know the signatures in model resp = requests.get('http://localhost:8501/v1/models/mnist/metadata') print("Status code: \n" " {}".format(resp)) print("Respones: \n" " {}".format(resp.text)) # + # Predict using gRPC, show results format hostport ='localhost:8500' channel = grpc.insecure_channel(hostport) stub = prediction_service_pb2_grpc.PredictionServiceStub(channel) results = list() for i in range(len(x_test)): request = predict_pb2.PredictRequest() request.model_spec.name ='mnist' request.model_spec.signature_name ='serving_default' request.inputs['flatten_input'].CopyFrom(tf.make_tensor_proto(x_test[i], shape=[1,1,28, 28])) results.append(stub.Predict(request)) print(results[0]) # + print(results[0].outputs['dense_1'].float_val) example_num = 10 plt.figure(figsize=(15, 15)) for i in range(0, example_num): plt.subplot(9,10,i + 1) pred = results[i].outputs['dense_1'].float_val plt.title("Inference: {}".format(np.argmax(pred))) plt.imshow(x_test[i], cmap ='gray', ) plt.xticks([]) plt.yticks([]) plt.tight_layout()
TF-Serving-gRPC.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <center> # <img src="../../img/ods_stickers.jpg" /> # # ## [mlcourse.ai](mlcourse.ai) – Open Machine Learning Course # Author: [<NAME>](https://yorko.github.io) (@yorko). Edited by <NAME> (@feuerengel). This material is subject to the terms and conditions of the [Creative Commons CC BY-NC-SA 4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/) license. Free use is permitted for any non-commercial purpose. # # <center>Assignment #3. Fall 2018 # ## <center> Decision trees for classification and regression # **In this assignment, we will find out how a decision tree works in a regression task, then will build and tune classification decision trees for identifying heart diseases. # Fill in the missing code in the cells marked "You code here" and answer the questions in the [web form](https://docs.google.com/forms/d/1hsrNFSiRsvgB27gMbXfQWpq8yzNhLZxuh_VSzRz7XhI).** import numpy as np import pandas as pd from matplotlib import pyplot as plt from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.metrics import accuracy_score, make_scorer from sklearn.tree import DecisionTreeClassifier, export_graphviz import seaborn as sns # ## 1. A simple example of regression using decision trees # Let's consider the following one-dimensional regression problem. It is needed to build the function $a(x)$ to approximate original dependency $y = f(x)$ using mean-squared error $min \sum_i {(a(x_i) - f(x_i))}^2$. # + X = np.linspace(-2, 2, 7) y = X ** 3 plt.scatter(X, y) plt.xlabel(r'$x$') plt.ylabel(r'$y$'); # - # Let's make several steps to build the decision tree. Let's choose the symmetric thresholds equal to 0, 1.5 and -1.5 for partitioning. In the case of a regression task, the leaf outputs mean answer for all observations in this leaf. # Let's start from tree of depth 0 that contains all train observations. How will predictions of this tree look like for $x \in [-2, 2]$? Create the appropriate plot using a pen, paper and Python if it is needed (without using `sklearn`). # + # You code here # - # Let's split the data according to the following condition $[x < 0]$. It gives us the tree of depth 1 with two leaves. Let's create a similar plot for predictions of this tree. # + # You code here # - # In the decision tree algorithm, the feature and the threshold for splitting are chosen according to some criterion. The commonly used criterion for regression is based on variance: $$\large Q(X, y, j, t) = D(X, y) - \dfrac{|X_l|}{|X|} D(X_l, y_l) - \dfrac{|X_r|}{|X|} D(X_r, y_r),$$ # where $\large X$ and $\large y$ are a feature matrix and a target vector (correspondingly) for training instances in a current node, $\large X_l, y_l$ and $\large X_r, y_r$ are splits of samples $\large X, y$ into two parts w.r.t. $\large [x_j < t]$ (by $\large j$-th feature and threshold $\large t$), $\large |X|$, $\large |X_l|$, $\large |X_r|$ (or, the same, $\large |y|$, $\large |y_l|$, $\large |y_r|$) are sizes of appropriate samples, and $\large D(X, y)$ is variance of answers $\large y$ for all instances in $\large X$: # $$\large D(X) = \dfrac{1}{|X|} \sum_{j=1}^{|X|}(y_j – \dfrac{1}{|X|}\sum_{i = 1}^{|X|}y_i)^2$$ # Here $\large y_i = y(x_i)$ is the answer for the $\large x_i$ instance. Feature index $\large j$ and threshold $\large t$ are chosen to maximize the value of criterion $\large Q(X, y, j, t)$ for each split. # # In our 1D case, there's only one feature so $\large Q$ depends only on threshold $\large t$ and training data $\large X$ and $\large y$. Let's designate it $\large Q_{1d}(X, y, t)$ meaning that the criterion no longer depends on feature index $\large j$, i.e. in 1D case $\large j = 0$. # # Create the plot of criterion $\large Q_{1d}(X, y, t)$ as a function of threshold value $t$ on the interval $[-1.9, 1.9]$. def regression_var_criterion(X, y, t): pass # You code here # + # You code here # - # **<font color='red'>Question 1.</font> Is the threshold value $t = 0$ optimal according to the variance criterion?** # - Yes # - No # Then let's make splitting in each of the leaves' nodes. In the left branch (where previous split was $x < 0$) using the criterion $[x < -1.5]$, in the right branch (where previous split was $x \geqslant 0$) with the following criterion $[x < 1.5]$. It gives us the tree of depth 2 with 7 nodes and 4 leaves. Create the plot of these tree predictions for $x \in [-2, 2]$. # + # You code here # - # **<font color='red'>Question 2.</font> How many segments are there on the plot of tree predictions in the interval [-2, 2] (it is necessary to count only horizontal lines)?** # - 2 # - 3 # - 4 # - 5 # ## 2. Building a decision tree for predicting heart diseases # Let's read the data on heart diseases. The dataset can be downloaded from the course repo from [here](https://github.com/Yorko/mlcourse.ai/blob/master/data/mlbootcamp5_train.csv) by clicking on `Download` and then selecting `Save As` option. # # **Problem** # # Predict presence or absence of cardiovascular disease (CVD) using the patient examination results. # # **Data description** # # There are 3 types of input features: # # - *Objective*: factual information; # - *Examination*: results of medical examination; # - *Subjective*: information given by the patient. # # | Feature | Variable Type | Variable | Value Type | # |---------|--------------|---------------|------------| # | Age | Objective Feature | age | int (days) | # | Height | Objective Feature | height | int (cm) | # | Weight | Objective Feature | weight | float (kg) | # | Gender | Objective Feature | gender | categorical code | # | Systolic blood pressure | Examination Feature | ap_hi | int | # | Diastolic blood pressure | Examination Feature | ap_lo | int | # | Cholesterol | Examination Feature | cholesterol | 1: normal, 2: above normal, 3: well above normal | # | Glucose | Examination Feature | gluc | 1: normal, 2: above normal, 3: well above normal | # | Smoking | Subjective Feature | smoke | binary | # | Alcohol intake | Subjective Feature | alco | binary | # | Physical activity | Subjective Feature | active | binary | # | Presence or absence of cardiovascular disease | Target Variable | cardio | binary | # # All of the dataset values were collected at the moment of medical examination. df = pd.read_csv('mlbootcamp5_train.csv', index_col='id', sep=';') df.head() # Transform the features: create "age in years" (full age) and also create 3 binary features based on `cholesterol` and 3 more on `gluc`, where they are equal to 1, 2 or 3. This method is called dummy-encoding or One Hot Encoding (OHE). It is more convenient to use `pandas.get_dummmies.`. There is no need to use the original features `cholesterol` and `gluc` after encoding. # + df = pd.concat([df, pd.get_dummies(df.cholesterol, prefix='chol', prefix_sep='_'), pd.get_dummies(df.gluc, 'gluc', '_')], axis=1) df.drop(['cholesterol', 'gluc'], axis = 1, inplace = True) df.head() # - # Split data into train and holdout parts in the proportion of 7/3 using `sklearn.model_selection.train_test_split` with `random_state=17`. # + X = df.drop(['cardio'], axis = 1) y = df['cardio'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=17) # - # Train the decision tree on the dataset `(X_train, y_train)` with max depth equals to 3 and `random_state=17`. Plot this tree with `sklearn.tree.export_graphviz`, `dot` and `pydot`. You don't need to use quotes in the file names in order to make it work in a jupyter notebook. The commands starting from the exclamation mark are terminal commands that are usually run in terminal/command line. # + clf = DecisionTreeClassifier(max_depth=3, random_state=17) heart_fit = clf.fit(X_train, y_train) # + import graphviz dot_data = export_graphviz(heart_fit, out_file=None, feature_names=X.columns, class_names=['Disease', 'Healthy'], filled=True, rounded=True, special_characters=True) graph = graphviz.Source(dot_data) graph # - # **<font color='red'>Question 3.</font> What 3 features are used to make predictions in the created decision tree?** # - weight, height, gluc=3 # - smoke, age, gluc=3 # - age, weight, chol=3 # - **age, ap_hi, chol=3** # Make predictions for holdout data `(X_valid, y_valid)` with the trained decision tree. Calculate accuracy. # + pred = heart_fit.predict(X_test) ## How accurate were our predictions? accuracy_score(y_true=y_test, y_pred=pred) # - # Set up the depth of the tree using cross-validation on the dataset `(X_train, y_train)` in order to increase quality of the model. Use `GridSearchCV` with 5 folds. Fix `random_state=17` and change `max_depth` from 2 to 10. # + tree_params = {'max_depth': list(range(2, 11))} accurate = make_scorer(accuracy_score) tree_grid = GridSearchCV(clf, param_grid=tree_params, cv = 5, scoring=accurate) grid = tree_grid.fit(X_train, y_train) # - # Draw the plot to show how mean accuracy is changing in regards to `max_depth` value on cross-validation. # + params = tree_grid.cv_results_['params'] mean_acc = tree_grid.cv_results_['mean_test_score'] sns.scatterplot(x=np.arange(2, 11), y= mean_acc); # - # Print the best value of `max_depth` where the mean value of cross-validation quality metric reaches maximum. Also compute accuracy on holdout data. All these computations are possible to make using the trained instance of the class `GridSearchCV`. tree_grid.best_params_ tree_grid.best_score_ # **<font color='red'>Question 4.</font> Is there a local maximum of accuracy on the built validation curve? Did `GridSearchCV` help to tune `max_depth` so that there's been at least 1% change in holdout accuracy?** # (check out the expression (acc2 - acc1) / acc1 * 100%, where acc1 and acc2 are accuracies on holdout data before and after tuning `max_depth` with `GridSearchCV` respectively)? # - yes, yes # - yes, no # - no, yes # - no, no # Take a look at the SCORE table to estimate ten-year risk of fatal cardiovascular disease in Europe. [Source paper](https://academic.oup.com/eurheartj/article/24/11/987/427645). # # <img src='../../img/SCORE2007-eng.png' width=70%> # # Create binary features according to this picture: # - $age \in [40,50), \ldots age \in [60,65) $ (4 features) # - systolic blood pressure: $ap\_hi \in [120,140), ap\_hi \in [140,160), ap\_hi \in [160,180),$ (3 features) # # If the values of age or blood pressure don't fall into any of the intervals then all binary features will be equal to zero. Then we create decision tree with these features and additional ``smoke``, ``cholesterol`` and ``gender`` features. Transform the ``cholesterol`` to 3 binary features according to it's 3 unique values ( ``cholesterol``=1, ``cholesterol``=2 and ``cholesterol``=3). This method is called dummy-encoding or One Hot Encoding (OHE). Transform the ``gender`` from 1 and 2 into 0 and 1. It is better to rename it to ``male`` (0 – woman, 1 – man). In general, this is typically done with ``sklearn.preprocessing.LabelEncoder`` but here in case of only 2 unique values it's not necessary. # # Finally the decision tree is built using 12 binary features (without original features). # # Create a decision tree with the limitation `max_depth=3` and train it on the whole train data. Use the `DecisionTreeClassifier` class with fixed `random_state=17`, but all other arguments (except for `max_depth` and `random_state`) should be set by default. # # **<font color='red'>Question 5.</font> What binary feature is the most important for heart disease detection (it is placed in the root of the tree)?** # - Systolic blood pressure from 160 to 180 (mmHg) # - Gender male / female # - Systolic blood pressure from 140 to 160 (mmHg) # - Age from 50 to 55 (years) # - Smokes / doesn't smoke # - Age from 60 to 65 (years) # + # You code here
ML_course/Decision_Trees/assignment3_decision_trees.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # + [markdown] nbgrader={"grade": false, "grade_id": "q2_prompt", "locked": true, "solution": false} # # Q2 # # In this question, we'll look at using conditionals to change the behavior of code. # + [markdown] nbgrader={"grade": false, "grade_id": "q2a_prompt", "locked": true, "solution": false} # ### A # # In this question, you'll write a method that takes a list of numbers [0-9] and returns a corresponding list with the "ordinal" versions. That is, if you see a 1 in the list, you'll create a string "1st". If you see a 3, you'll create a string "3rd", and so on. # # For example, if you receive `[2, 1, 4, 3, 4]` as input, you should create a list of strings that looks like this: `["2nd", "1st", "4th", "3rd", "4th"]`. # + nbgrader={"grade": false, "grade_id": "q2a", "locked": false, "solution": true} def return_ordinals(numbers): out_list = [] ### BEGIN SOLUTION ### END SOLUTION return out_list # + nbgrader={"grade": true, "grade_id": "q2a_test1", "locked": true, "points": 5, "solution": false} inlist = [5, 6, 1, 9, 5, 5, 3, 3, 9, 4] outlist = ["5th", "6th", "1st", "9th", "5th", "5th", "3rd", "3rd", "9th", "4th"] for y_true, y_pred in zip(outlist, return_ordinals(inlist)): assert y_true == y_pred.lower() inlist = [7, 5, 6, 6, 3, 5, 1, 0, 5, 2] outlist = ["7th", "5th", "6th", "6th", "3rd", "5th", "1st", "0th", "5th", "2nd"] for y_true, y_pred in zip(outlist, return_ordinals(inlist)): assert y_true == y_pred.lower() # + [markdown] nbgrader={"grade": false, "grade_id": "q2b_prompt", "locked": true, "solution": false} # --- # ### B # # In this question, you'll write code that computes the median of a sorted list of numbers. You can assume the list you receive is already sorted in ascending order (least to greatest). # # If your input list is `[1, 1, 2, 4, 7, 7, 8]`, then your output should be `4`. Recall the rule about median: if you get a list with an *even* number of elements, you should return the average of the two middle ones. # # Store your answer in the variable `med_num`. # + nbgrader={"grade": false, "grade_id": "q2b", "locked": false, "solution": true} def median(numbers): med_num = 0 ### BEGIN SOLUTION ### END SOLUTION return med_num # + nbgrader={"grade": true, "grade_id": "q2b_test", "locked": true, "points": 5, "solution": false} inlist = [ 35.20575598, 45.05634995, 45.42573818, 55.07275661, 66.42501038, 66.48337884, 73.59004688, 81.09609177, 87.67779046, 93.90508029] outmed = 66 assert outmed == int(median(inlist)) inlist = [ 12899.59248764, 19792.31177415, 31156.00415682, 31764.93625914, 41443.07238461, 50669.10268086, 55408.34012113, 61352.47232585, 72682.91992934, 86883.37175784] outmed = 46056 assert outmed == int(median(inlist)) # + [markdown] nbgrader={"grade": false, "grade_id": "q2c_prompt", "locked": true, "solution": false} # --- # ### C # # In this question, you'll write code to find the mode of a list of numbers. Recall that the mode is the number that occurs most frequently. Ties can be broken arbitrarily (meaning you can pick whichever number among those tied for the most frequent). # # If your input list is `[5, 1, 3, 1, 2, 5, 1]`, you should return `1`. # # Store your answer in the variable `mode_num`. # + nbgrader={"grade": false, "grade_id": "q2c", "locked": false, "solution": true} def mode(numbers): mode_num = 0 ### BEGIN SOLUTION ### END SOLUTION return mode_num # + nbgrader={"grade": true, "grade_id": "q2c_test", "locked": true, "points": 5, "solution": false} l1 = [5, 1, 3, 1, 2, 5, 1] a1 = 1 assert mode(l1) == a1 l2 = [1, 2, 3, 1, 2, 3, 1, 2, 3] a2 = mode(l2) assert a2 == 1 or a2 == 2 or a2 == 3
assignments/A2/A2_Q2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt import pandas as pd #Check pandas version to make sure 0.25.1 and the worksheet is working pd.__version__ # %matplotlib inline # **How to use this worksheet** # # **1.Point Misty at something interesting** # # There needs to be different levels of depth or something interesting in the frame. # # # **2.Get Depth Data** # # The data is pulled in using either Misty API Explorer or Postman. # GET http://<your Misty robots IP address>/api/cameras/depth # # The data should be in json format: # { # "result": { # "height": 240, # "image": [ "NaN", ...], # "width": 320 # }, # "status": "Success" # } # # 3. Save the data as a <yourfilename>.json file # # 4. Update the script below to reference your file # # 4. Run each of the cells # # later: Take a fisheye picture and compare # # + # Read in a json file #depth = pd.read_json('../coderepo/<your file name here>.json') data = pd.read_json('../coderepo/BB8_HW.json') #Transpose the DataFrame to get the column labels to contain height width, and image dataT = data.T print(" The height and width should be (240, 320):", dataT['height'].result, dataT['width'].result); dataT # + #Next Extract the Image sf = dataT['image'].result; #Turn the result into a DataFrame depth = pd.DataFrame(sf); #Rename the volumn values depth.rename(columns={0:'Values'}, inplace=True); #Check to see the count of NaN values in the Image - Count all NaN's NaN_cnt = (depth.Values == 'NaN').sum(); coverage = round(((76800-NaN_cnt)/76800)*100,2) #percent of non-NaN "good" values print("Depth point coverage is: ", coverage, "%"); if coverage < 50: print("Data not so good - you may not have valid data in all cells") print("You may have problems running the rest of the cells") # - depth.describe() # + #Replace all of the NaN's with 0 # "fo" is just an intermediate holding variable fo = depth.replace('NaN',0) #Check to make sure there are no NaNs numberNaN = (fo.Values == 'NaN').sum() #Reshape the array data = np.array(fo).reshape((240,320)) data.shape #Check to make sure all NaN's were replaced print(" There are " + str(numberNaN) + " NaN's, and the array is " +str(data.shape) + " of type " + str(type(data)) ) # + #Plot the figure with a couple of overlays plt.rcParams['figure.figsize'] = [12, 7] #plt.imshow(data, cmap=plt.get_cmap('gray')); #plt.imshow(data, cmap=plt.get_cmap('gray')); plt.imshow(data, cmap=plt.get_cmap('Greens')); #Some other plot methods #plt.rcParams['figure.figsize'] = [9, 7] #plt.contour(data) #plt.gca().invert_yaxis() #need to rotate or flip it so that 0 is at the top #plt.rcParams['figure.figsize'] = [9, 7] #plt.pcolormesh(data); # + #Create the column arrays for the sum results to be stored in #There are 320 columns so create zero arrays of length 320 #Create Column Arrays for the entire picture colDepthSum = np.empty(320) colDepthCount = np.empty(320) columnDepth = np.empty(320) #Create Slice Arrays to only cover a middle portion of the picture Slice_sum = np.empty(320) Slice_cnt = np.empty(320) sumSliceDistance = np.empty(320) #Array will contain averaged slice distance values for i in range(320): colDepthSum[i]=0 colDepthCount[i]=0 columnDepth[i]=0 Slice_sum[i] = 0 Slice_cnt[i] = 0 sumSliceDistance[i] = 10000; #Set each element out of way 10000=10meters - larger than max expected measured distance # + #Populate the array of 320 columns with values # - the total depth coverage (average of 240 elements in the 320 columns) # - the depth of just a slice of the image, defined by two points that indicate the element line depthSum = 0; #sum total of all depth points depthCount = 0; #count of all non-zero points tempDepth = 0; #temporary variable for holding value #Lines are rows between 1 and 240 #topline is less than bottom line -- since first pixel is top left topline = 150; #change these for lines - 120 is the middle row botline = 160; toprow = 320*topline; #convert the row line to overall depth array position botrow = 320*botline; for i in range(76800): #76800 is the number of array points returned from the depth picture if fo.Values[i] != 0: tempDepth = fo.Values[i] #don't really need this step depthCount = depthCount +1 depthSum = depthSum + tempDepth indes=i%320 #There are 320 colums - find correct column by remainder of position index if i >= toprow and i<= botrow: #Check column for the Far Left FL variables Slice_sum[indes] = Slice_sum[indes] + fo.Values[i] Slice_cnt[indes] = Slice_cnt[indes] + 1 colDepthSum[indes] = colDepthSum[indes] + fo.Values[i] colDepthCount[indes] = colDepthCount[indes] + 1 print("Number of non-zero values:", depthCount, 'which is', round(depthCount/76800*100,1) ,'% coverage') print("Average overall depth:", round(depthSum/depthCount,4)) # + #Calculate the average for the slice for i in range(320): if Slice_cnt[i] > 0: sumSliceDistance[i] = Slice_sum[i]/Slice_cnt[i] #Calculate the average for each column over the ENTIRE depth picture for i in range(320): if colDepthCount[i] !=0: columnDepth[i] = colDepthSum[i]/colDepthCount[i] #Plot both the average and slice arrays fig = plt.figure() ax = plt.subplot(111) plt.plot(sumSliceDistance, 'orange') plt.plot(columnDepth, 'r') ax.set_ylim(1,2000); #two vertical lines at 0 and 320 - the limits of the sensor plt.plot([0, 0], [0, 2100], 'b'); #plt.plot([320, 320], [0, 2100], 'b'); #320 is the max - but the sensor does not appear to go over that far plt.plot([285, 285], [0, 2100], 'b'); #285 is where the sensor appears to stop #place "Misty" where is placed and looking from plt.annotate('Misty', xy=(150, 120), c='b', fontsize = 'large', fontweight = 'bold'); #Change the axis to not show the 10000 out of the way points. # + # Re-plot the entire DepthPicture (with points) and inlay lines # that show the positions of consolidated points, and the depth # level of each of the 12 points plt.rcParams['figure.figsize'] = [12, 7] plt.imshow(data, cmap=plt.get_cmap('Oranges')); #Oranges instead of Greys for BB8 - plus it makes blue lines show up better c_width = botline-topline; c_aveline = (botline+topline)/2; #plot horizontal lines plt.plot([0, 319], [botline, botline], 'b', linewidth='1'); # plot([x1,x2], [y1,y2], color='b') plt.plot([0, 319], [topline, topline], 'b', linewidth='1'); plt.plot([0, 319], [c_aveline, c_aveline], 'g-', alpha=0.4, linewidth=c_width*1.5); #plot average thick line print("The Green line shows where the depth data is constrained to for sumSliceDistance. Columns in this range are averaged.") # - #Print out the array to see it sumSliceDistance # + #Now that have the array - check to see if there is anything within 1meter of Misty, and depthThresholdtoWarn = 1000; #Set to 1000mm or 1m if (min(sumSliceDistance) < depthThresholdtoWarn): print("Watch out!") else: print("All Good! Keep Driving") # -
takeDepth/SliceDepth.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np from sklearn.naive_bayes import MultinomialNB,BernoulliNB import warnings warnings.filterwarnings(action='ignore') #不输出警告 x = np.random.randint(5,size=(50,20)) y = np.array(range(50)).reshape(50,1) clf = MultinomialNB() clf.fit(x,y) clf.predict(x[22].reshape(1,-1)) #predict expects 2D array # 值得注意的是,多项式模型在训练一个数据集结束后可以继续训练其他数据集而无需将两个数据集放在一起进行训练。在sklearn中,MultinomialNB()类的partial_fit()方法可以进行这种训练。这种方式特别适合于训练集大到内存无法一次性放入的情况。 # # 在第一次调用partial_fit()时需要给出所有的分类标号。 clf_1 = BernoulliNB() clf_1.fit(x,y) clf_1.predict(x[22].reshape(1,-1))
NLP/2-NaiveBayes_N-gram/MultinomialNB-BernoulliNB.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Natural language inference using spaCy and Keras # ## Introduction # This notebook details an implementation of the natural language inference model presented in [(Parikh et al, 2016)](https://arxiv.org/abs/1606.01933). The model is notable for the small number of paramaters *and hyperparameters* it specifices, while still yielding good performance. # ## Constructing the dataset import spacy import numpy as np # We only need the GloVe vectors from spaCy, not a full NLP pipeline. nlp = spacy.load('en_vectors_web_lg') # Function to load the SNLI dataset. The categories are converted to one-shot representation. The function comes from an example in spaCy. # + import ujson as json from keras.utils import to_categorical LABELS = {'entailment': 0, 'contradiction': 1, 'neutral': 2} def read_snli(path): texts1 = [] texts2 = [] labels = [] with open(path, 'r') as file_: for line in file_: eg = json.loads(line) label = eg['gold_label'] if label == '-': # per Parikh, ignore - SNLI entries continue texts1.append(eg['sentence1']) texts2.append(eg['sentence2']) labels.append(LABELS[label]) return texts1, texts2, to_categorical(np.asarray(labels, dtype='int32')) # - # Because Keras can do the train/test split for us, we'll load *all* SNLI triples from one file. texts,hypotheses,labels = read_snli('snli/snli_1.0_train.jsonl') def create_dataset(nlp, texts, hypotheses, num_oov, max_length, norm_vectors = True): sents = texts + hypotheses # the extra +1 is for a zero vector represting NULL for padding num_vectors = max(lex.rank for lex in nlp.vocab) + 2 # create random vectors for OOV tokens oov = np.random.normal(size=(num_oov, nlp.vocab.vectors_length)) oov = oov / oov.sum(axis=1, keepdims=True) vectors = np.zeros((num_vectors + num_oov, nlp.vocab.vectors_length), dtype='float32') vectors[num_vectors:, ] = oov for lex in nlp.vocab: if lex.has_vector and lex.vector_norm > 0: vectors[lex.rank + 1] = lex.vector / lex.vector_norm if norm_vectors == True else lex.vector sents_as_ids = [] for sent in sents: doc = nlp(sent) word_ids = [] for i, token in enumerate(doc): # skip odd spaces from tokenizer if token.has_vector and token.vector_norm == 0: continue if i > max_length: break if token.has_vector: word_ids.append(token.rank + 1) else: # if we don't have a vector, pick an OOV entry word_ids.append(token.rank % num_oov + num_vectors) # there must be a simpler way of generating padded arrays from lists... word_id_vec = np.zeros((max_length), dtype='int') clipped_len = min(max_length, len(word_ids)) word_id_vec[:clipped_len] = word_ids[:clipped_len] sents_as_ids.append(word_id_vec) return vectors, np.array(sents_as_ids[:len(texts)]), np.array(sents_as_ids[len(texts):]) sem_vectors, text_vectors, hypothesis_vectors = create_dataset(nlp, texts, hypotheses, 100, 50, True) texts_test,hypotheses_test,labels_test = read_snli('snli/snli_1.0_test.jsonl') _, text_vectors_test, hypothesis_vectors_test = create_dataset(nlp, texts_test, hypotheses_test, 100, 50, True) # We use spaCy to tokenize the sentences and return, when available, a semantic vector for each token. # # OOV terms (tokens for which no semantic vector is available) are assigned to one of a set of randomly-generated OOV vectors, per (Parikh et al, 2016). # # Note that we will clip sentences to 50 words maximum. from keras import layers, Model, models from keras import backend as K # ## Building the model # The embedding layer copies the 300-dimensional GloVe vectors into GPU memory. Per (Parikh et al, 2016), the vectors, which are not adapted during training, are projected down to lower-dimensional vectors using a trained projection matrix. def create_embedding(vectors, max_length, projected_dim): return models.Sequential([ layers.Embedding( vectors.shape[0], vectors.shape[1], input_length=max_length, weights=[vectors], trainable=False), layers.TimeDistributed( layers.Dense(projected_dim, activation=None, use_bias=False)) ]) # The Parikh model makes use of three feedforward blocks that construct nonlinear combinations of their input. Each block contains two ReLU layers and two dropout layers. def create_feedforward(num_units=200, activation='relu', dropout_rate=0.2): return models.Sequential([ layers.Dense(num_units, activation=activation), layers.Dropout(dropout_rate), layers.Dense(num_units, activation=activation), layers.Dropout(dropout_rate) ]) # The basic idea of the (Parikh et al, 2016) model is to: # # 1. *Align*: Construct an alignment of subphrases in the text and hypothesis using an attention-like mechanism, called "decompositional" because the layer is applied to each of the two sentences individually rather than to their product. The dot product of the nonlinear transformations of the inputs is then normalized vertically and horizontally to yield a pair of "soft" alignment structures, from text->hypothesis and hypothesis->text. Concretely, for each word in one sentence, a multinomial distribution is computed over the words of the other sentence, by learning a multinomial logistic with softmax target. # 2. *Compare*: Each word is now compared to its aligned phrase using a function modeled as a two-layer feedforward ReLU network. The output is a high-dimensional representation of the strength of association between word and aligned phrase. # 3. *Aggregate*: The comparison vectors are summed, separately, for the text and the hypothesis. The result is two vectors: one that describes the degree of association of the text to the hypothesis, and the second, of the hypothesis to the text. # 4. Finally, these two vectors are processed by a dense layer followed by a softmax classifier, as usual. # # Note that because in entailment the truth conditions of the consequent must be a subset of those of the antecedent, it is not obvious that we need both vectors in step (3). Entailment is not symmetric. It may be enough to just use the hypothesis->text vector. We will explore this possibility later. # We need a couple of little functions for Lambda layers to normalize and aggregate weights: # + def normalizer(axis): def _normalize(att_weights): exp_weights = K.exp(att_weights) sum_weights = K.sum(exp_weights, axis=axis, keepdims=True) return exp_weights/sum_weights return _normalize def sum_word(x): return K.sum(x, axis=1) # + def build_model(vectors, max_length, num_hidden, num_classes, projected_dim, entail_dir='both'): input1 = layers.Input(shape=(max_length,), dtype='int32', name='words1') input2 = layers.Input(shape=(max_length,), dtype='int32', name='words2') # embeddings (projected) embed = create_embedding(vectors, max_length, projected_dim) a = embed(input1) b = embed(input2) # step 1: attend F = create_feedforward(num_hidden) att_weights = layers.dot([F(a), F(b)], axes=-1) G = create_feedforward(num_hidden) if entail_dir == 'both': norm_weights_a = layers.Lambda(normalizer(1))(att_weights) norm_weights_b = layers.Lambda(normalizer(2))(att_weights) alpha = layers.dot([norm_weights_a, a], axes=1) beta = layers.dot([norm_weights_b, b], axes=1) # step 2: compare comp1 = layers.concatenate([a, beta]) comp2 = layers.concatenate([b, alpha]) v1 = layers.TimeDistributed(G)(comp1) v2 = layers.TimeDistributed(G)(comp2) # step 3: aggregate v1_sum = layers.Lambda(sum_word)(v1) v2_sum = layers.Lambda(sum_word)(v2) concat = layers.concatenate([v1_sum, v2_sum]) elif entail_dir == 'left': norm_weights_a = layers.Lambda(normalizer(1))(att_weights) alpha = layers.dot([norm_weights_a, a], axes=1) comp2 = layers.concatenate([b, alpha]) v2 = layers.TimeDistributed(G)(comp2) v2_sum = layers.Lambda(sum_word)(v2) concat = v2_sum else: norm_weights_b = layers.Lambda(normalizer(2))(att_weights) beta = layers.dot([norm_weights_b, b], axes=1) comp1 = layers.concatenate([a, beta]) v1 = layers.TimeDistributed(G)(comp1) v1_sum = layers.Lambda(sum_word)(v1) concat = v1_sum H = create_feedforward(num_hidden) out = H(concat) out = layers.Dense(num_classes, activation='softmax')(out) model = Model([input1, input2], out) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) return model # - K.clear_session() m = build_model(sem_vectors, 50, 200, 3, 200) m.summary() # The number of trainable parameters, ~381k, is the number given by Parikh et al, so we're on the right track. # ## Training the model # Parikh et al use tiny batches of 4, training for 50MM batches, which amounts to around 500 epochs. Here we'll use large batches to better use the GPU, and train for fewer epochs -- for purposes of this experiment. m.fit([text_vectors, hypothesis_vectors], labels, batch_size=1024, epochs=50,validation_data=([text_vectors_test, hypothesis_vectors_test], labels_test)) # The result is broadly in the region reported by Parikh et al: ~86 vs 86.3%. The small difference might be accounted by differences in `max_length` (here set at 50), in the training regime, and that here we use Keras' built-in validation splitting rather than the SNLI test set. # ## Experiment: the asymmetric model # It was suggested earlier that, based on the semantics of entailment, the vector representing the strength of association between the hypothesis to the text is all that is needed for classifying the entailment. # # The following model removes consideration of the complementary vector (text to hypothesis) from the computation. This will decrease the paramater count slightly, because the final dense layers will be smaller, and speed up the forward pass when predicting, because fewer calculations will be needed. m1 = build_model(sem_vectors, 50, 200, 3, 200, 'left') m1.summary() # The parameter count has indeed decreased by 40,000, corresponding to the 200x200 smaller H function. m1.fit([text_vectors, hypothesis_vectors], labels, batch_size=1024, epochs=50,validation_data=([text_vectors_test, hypothesis_vectors_test], labels_test)) # This model performs the same as the slightly more complex model that evaluates alignments in both directions. Note also that processing time is improved, from 64 down to 48 microseconds per step. # # Let's now look at an asymmetric model that evaluates text to hypothesis comparisons. The prediction is that such a model will correctly classify a decent proportion of the exemplars, but not as accurately as the previous two. # # We'll just use 10 epochs for expediency. m2 = build_model(sem_vectors, 50, 200, 3, 200, 'right') m2.summary() m2.fit([text_vectors, hypothesis_vectors], labels, batch_size=1024, epochs=10,validation_split=.2) # Comparing this fit to the validation accuracy of the previous two models after 10 epochs, we observe that its accuracy is roughly 10% lower. # # It is reassuring that the neural modeling here reproduces what we know from the semantics of natural language!
examples/notebooks/Decompositional Attention.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: ai # language: python # name: ai # --- # ### IF문 # IF문 이전 3항 연산자 # 3항 연산자 : 참 if 조건 else 거짓 year = 2019 product = 'RTX 3080'if year > 2020 else 'RTX 2080' # 3항 연산자 print(product) # RTX 2080 # + # if 조건의 print 개행 문제 tot = 80 if tot >= 80 : print('tot은 : ', 80, '으로 합격') # print('노트북 지급') # 한칸 들여쓰기 만으로 오류 print("노트북 지급") # 4칸 들여쓰기로 정상 출력 print('수고하셨습니다~') # 들여쓰기 안할시 if 조건에 들어가지않음. # + # if~elif~else 문 su =10 if(su%2==0): print('짝수') else: print('홀수') if su == 1: print('January') elif su == 2: print('February') else: print("September") # - # 2중 if 문 test = 80 if test >=60: print('채용 합격') if test >=85: print('-> 프로젝트 투입') # 85점이상 else: print('-> 추가 교육 1개월') # 60~84 else: print("다음 채용에 지원해주세요") # < 60 # + # AND 연산자 + if~elif # JAVA와 다르게ㅔ &연산자대신 and 사용 score = 70 str1='' if score >= 90: str1 = 'A' elif score >= 80 and score <= 89: # 범위 str1 = 'B' elif score >= 70 and score <= 79: str1 = 'C' elif score >= 60 and score <= 69: str1 = 'D' else: str1 = 'F' print('등급: ' + str1) # OR연산자 or사용 month = 12 if (month == 12 or month == 1 or month == 2): print('겨울입니다.') # + [실습 2] 등수별 상품을 출력하는 프로그램을 제작하세요. - Juper에서 바로 입력: rank = int(input("데이터:")) # 문자열임으로 정수로 변환 - Hint: if ~ else if ~ - 기준 1등: 노트북 2등: 무한 복합 프린터 3등: 512 SSD 4등: 다음 기회에... 5등: 다음 기회에... 6등: 다음 기회에... [실행 화면] 등수: 1 상품: 노트북 # + # 실습 2 해답 # rank = input('데이터:') # 입력텍스트 란 출력됨. str 형태이기에 -> int형변환 필요 rank = int(input("데이터:")) # 문자열임으로 정수로 변환 product = '' # 입력을 if rank == 1: product='노트북' elif rank == 2: product='무한 복합 프린터' elif rank == 3: product='512 SSD' else: product='다음 기회에...' print('등수: ', rank, '상품: ', product) # + [실습 3] 논리 연산자의 사용 하나의 수를 입력받고 '1의자리', '10의 자리', '100의 자리', '1000의 자리이상'인지 력하는 프로그램을 작성하세요.(음수 제외) - Hint: if문 사용, 1의 자리 범위: 0 ~ 9까지, and 논리 연산자 사용 if 0 <= su and su <= 9: 또는 if su >=0 and su <= 9: [실행 화면] 정수: 5 1의 자리 or 정수: 50 10의 자리 or 정수: 1000 1000의 자리 이상 or 정수: -1000 음수는 처리하지 않습니다. # + # 실습 3 해답 su = int(input("정수:")) # 문자열임으로 정수로 변환 str = '' if su >= 0 and su<=9: str='1의자리' elif su >=10 and su <=99: str='10의자리' elif su >=100 and su <=999: str='100의자리' elif su >=1000 and su <=9999: str='1000의자리' elif su < 0: str='음수는 처리하지않습니다!' print('입력한 값은 :', str) # + [실습 4] python, MariaDB, html5 성적중에 한 과목이라도 80점이 넘으면 합격처리하는 프로그램을 작성하세요. - or: OR 연산자, 조건중 하나라도 참이면 참처리 예) if su == 1 or su == 2: [실행 화면] 결과: 통과 ← python: 50, mysql: 80, html5: 60 일 경우 or 결과: 재심사 ← python: 50, mysql: 70, html5: 60 일 경우 # + # 실습4 해석 su1 = int(input("python 점수 :")) su2 = int(input("MariaDB 점수 :")) su3 = int(input("html5 점수 : ")) str = '' if su1 >= 80 or su2 >= 80 or su3 >= 80: str='합격' else: str='재심사' print(str, '← python 점수 : ', su1, 'MariaDB 점수 : ', su2, 'html5 점수 : ', su3) # + [Upgrade 1] 한 과목이 80점이 넘어도 전체 평균이 60점이 안넘으면 불합격 처리하세요. - Hint: if문안에 if문을 계속 사용 할 수 있습니다. [실행 화면] 결과: 평균 점수 미달입니다. ← python: 40, mysql: 80, html5: 40 일 경우 or 결과: 통과 ← python: 50, mysql: 80, html5: 60 일 경우 or 결과: 재심사 ← python: 50, mysql: 70, html5: 60 일 경우 # + # upgrade 1 해석 su1 = int(input("python 점수 :")) su2 = int(input("MariaDB 점수 :")) su3 = int(input("html5 점수 : ")) str = '' avg = (su1+su2+su3)/3 if avg >= 60: if su1 >= 80 or su2 >= 80 or su3 >= 80: str='합격' else: str ='불합격' print(str, "<- 평균 : ", avg) # + [Upgrade 2] 한 과목이라도 40점 미만(40점은 포함 안됨)이 발생하면 과락 처리하세요. - Hint: if문안에 if문안에 if문을 계속 사용 할 수 있습니다. [실행 화면] 결과: 과락이 발생했습니다. ← python: 30, mysql: 80, html5: 30 일 경우 or 결과: 평균 점수 미달입니다. ← python: 40, mysql: 80, html5: 40 일 경우 or 결과: 통과 ← python: 50, mysql: 80, html5: 60 일 경우 or 결과: 재심사 ← python: 50, mysql: 70, html5: 60 일 경우 # + # upgrade 2 해석 su1 = int(input("python 점수 :")) su2 = int(input("MariaDB 점수 :")) su3 = int(input("html5 점수 : ")) str = '' avg = (su1+su2+su3)/3 if su1 < 40 or su2 < 40 or su3 < 40: # 한과목이라도(AND) str='과락 발생' else: if avg >= 60: if su1 >= 80 or su2 >= 80 or su3 >= 80: # 한과목이(OR) str='통과' else: str='재심사' else: str ='평균 점수 미달입니다' print(str, "<- 평균 : ", avg)
notebook/core/basic/T4_.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Categorical data, advanced indexing and deling with Not-a-Number data in Numpy. # ### Before we start, let's quickly see a Python data structure called dictionary, which will help understand some of the materials and is also useful to understand how XArray works later on. # # #### A dictionary represents a map between values that can be of different types. Using curly braces we specify the key followed by a semicolon and the value for each element in the dictionary. For example: # + d = {1: 'one', 2: 'two', 3: 'tree'} print(d[1], " + ", d[2], " = ", d[3]) # - # #### Elements in a dictionary can be modified or new elements added by doing: # + d[3] = 'three' d[4] = 'four' d # - # ### Categorical data # # #### First let's import some libraries: # + # %matplotlib inline import numpy as np from matplotlib import pyplot as plt from matplotlib import colors # - # #### Sometimes remote sensing data contains categorical data as a way of representing classes of individual pixels. # # #### As an example, the following cell simulates a very simple satellite image with three different land cover types. Value `1` represents area covered with grass, `2` croplands and `3` city. # + # grass = 1 area = np.ones((100,100)) # crops = 2 area[10:60,20:50] = 2 # city = 3 area[70:90,60:80] = 3 # We map the values to colours index = {1: 'green', 2: 'yellow', 3: 'grey'} # Create a discrete colour map cmap = colors.ListedColormap(index.values()) # Plot plt.imshow(area, cmap=cmap) # - # #### The harvesting season has arrived and our cropping lands have changed colour to brown. # # #### Can you add a new entry to the `index` dictionary mapping number `4` to the value `brown` and regenerate the colourmap? Then using your new indexing skills can you modify the yellow area to contain the new value `4`. Finally plot again using the same function to show the change in the season. # + ### Your code goes here # - # #### Boolean indexing of numpy arrays: # + arr = np.arange(10) print("Before:", arr) indexes = [True,False,True,False,True,False,True,False,True,False] arr[indexes] # - # #### Boolean arrays also allow to assign new values to an array for the `True` indexes leaving the `False` indexes intact: # + arr[indexes] = -1 arr # - # #### Can you use the previous examples to update the following array so that all the values greater or equal to `10` are made equal to `10`. _This operation is called `clipping` in Numpy and there is a specific function to do it called `np.clip`_. # + arr = np.random.randint(20, size=10) print("Before:", arr) ### Your code goes here print("After clipping:", arr) # - # #### Masking out regions is a very common practice in remote sensing analysis. For example the following image reprensents a satellite image of a region in which there are clouds, shadows and some water. # <img src="data/land_mask.png" alt="drawing" width="220" align="left"/> # #### We have three files, containing numpy arrays `.npy` which represent the mask for each category: # # #### These masks are stored as `dtype=uint8` using `1` to indicate presence and `0` for absence of each feature. # + import matplotlib.gridspec as gridspec plt.figure(figsize=(12,8)) gs = gridspec.GridSpec(1,3) # set up a 1 x 3 grid of images ax1=plt.subplot(gs[0,0]) water_mask = np.load("data/water_mask.npy") plt.imshow(water_mask) ax1.set_title('Water Mask') ax2=plt.subplot(gs[0,1]) cloud_mask = np.load("data/cloud_mask.npy") plt.imshow(cloud_mask) ax2.set_title('Cloud Mask') ax3=plt.subplot(gs[0,2]) shadow_mask = np.load("data/shadow_mask.npy") plt.imshow(shadow_mask) ax3.set_title('Shadow Mask') plt.show() # - # #### Let's load the RGB image, which is a 3-dimensional array of type `uint8` with the 3rd dimension corresponding to the colour bands. # # #### Can you use the previous masks to filter clouds, shadows and water out of the image? _Hint: Use your new boolean indexing skills and assign pixels in these regions the value 0_ # + import imageio im = imageio.imread('data/land_mask.png') ## Your code here plt.imshow(im) # - # ## Bit Flags # # #### Some remote sensing products contain ancillary data describing the quality of each pixel. This quality information is normally encoded using bit flags, in which each bit in a number acts as an 'on/off' switch for a particular feature. # # #### For example, a uint8 number is represented in binary format using 8 bits, so it can be used to encode up to 8 different features. # # * Bit 0: Cloud # `00000001` -> 1 # * Bit 1: Terrain oclussion # `00000010` -> 2 # * Bit 3: Saturation # `00001000` -> 8 # # #### So, if one pixel is both classified as cloud and is saturated, the pixel quality mask would be: # `00001001` -> 9 # # #### This is the description of the pixel quality mask of Landsat 8 # # <img src="data/ls8_pq.png" alt="drawing" width="220" align="left"/> # #### And this is a real example of a pixel quality Landsat 8 image over Margaret River encoded using the previous bit flags. # + import rasterio pq = rasterio.open('data/LC08_L1TP_112084_20190820_20190902_01_T1_BQA.tiff').read()[0,:,:] print(pq.shape, pq.dtype) plt.imshow(pq) # This function displays unique values in the PQ mask np.unique(pq) # - # #### For the value `2720` we can see the binary representation doing: "{:016b}".format(2720) # #### Can you work out what is the interpretation of the `2800` value in the PQ mask? # ## Analysing data with NaNs. # # #### NaN is a special value of `float32` and `float64` arrays used to designate Not-a-Number values. # + arr = np.array([1,2,3,4,5,np.nan,7,8,9], dtype=np.float32) arr # - # #### To compute statistics on arrays containing NaN values, Numpy has special versions of common functions such as `mean` or `std` that ignore the NaN values: # + print(np.mean(arr)) print(np.nanmean(arr)) # - # #### Consider the following uint16 array in which the value `0` designates no data. If we want to compute the mean of all the valid values, we can do converting the array to float type and then assigning the value `0` to NaN. # + arr = np.array([234,243,0,231,219,0,228,220,237], dtype=np.uint16) arr = arr.astype(np.float32) arr[arr==0]=np.nan np.nanmean(arr) # - # #### **Exercise**: Can you calculate the mean value of each of the RGB channels for just the area covered with grass in the following image? # <img src="data/land_mask.png" alt="drawing" width="220" align="left"/> # + import imageio im = imageio.imread('data/land_mask.png')[:,:,:3] print(im.shape) ### Red is im[:,:,0], Green is im[:,:,1], Blue is im[:,:,2] ### You code here # -
session1/3_advanced_indexing_numpy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + # %matplotlib inline import pandas as pd import numpy as np from __future__ import division import itertools import matplotlib.pyplot as plt import seaborn as sns plt.rcParams['axes.grid'] = False import logging logger = logging.getLogger() # - # 6 Frequent Itemsets # =========== # ### 6.1 The Market-Basket Model # Each **Basket** consists of a set of **items**(an itemset) # # + The number of items in a basket is small. # # + The number of baskets is usually very large. # # + Basket are sets, and in priciple items can appear only once. # # # ##### Definition of Frequent Itemses # a set of items that appears in many baskets is said to be "frequent". # # **support**: if ${I}$ is a set of items, the support of ${I}$ is the number of baskets for which I is a subset. # # Assume $s$ is the support threshold, then we say ${I}$ is frequent if its support is $s$ or more. # + logger.setLevel('WARN') data_raw = [ ['Cat', 'and', 'dog', 'bites'], ['Yahoo', 'news', 'claims', 'a', 'cat', 'mated', 'with', 'a', 'dog', 'and', 'produced', 'viable', 'offspring'], ['Cat', 'killer', 'likely', 'is', 'a', 'big', 'dog'], ['Professional', 'free', 'advice', 'on', 'dog', 'training', 'puppy', 'training'], ['Cat', 'and', 'kitten', 'training', 'and', 'behavior'], ['Dog', '&', 'Cat', 'provides', 'dog', 'training', 'in', 'Eugene', 'Oregon'], ['Dog', 'and', 'cat', 'is', 'a', 'slang', 'term', 'used', 'by', 'police', 'officers', 'for', 'a', 'male-female', 'relationship'], ['Shop', 'for', 'your', 'show', 'dog', 'grooming', 'and', 'pet', 'supplies'] ] data = [set(map(str.lower, x)) for x in data_raw] data = pd.Series(data) def calc_occurrence_of_doubletons(df, data): logger.info('df: \n{}\n'.format(df)) for i_ in df.index: for c_ in df.columns: if not np.isnan(df.loc[i_,c_]): key = {i_, c_} ind_ = [ind+1 for ind, is_in in enumerate(data.apply(lambda x: key.issubset(x))) if is_in] df.loc[i_,c_] = ','.join([str(x) for x in ind_]) return df mask = [ [1, 1, 1, 1], [1, 1, 1, np.nan], [1, 1, np.nan, np.nan], [1, np.nan, np.nan, np.nan] ] df = pd.DataFrame(mask, index=['dog', 'cat', 'and', 'a'], columns=['training', 'a', 'and', 'cat']) calc_occurrence_of_doubletons(df, data) # - # ##### applications # 1. Related concepts # # 2. Plagiarism # # 3. Biomarkers # #### 6.1.3 Associaton Rules # an association rule $I \to j$: # if all of the items in $I$ appear in some basket, then $j$ is "likely" to appear in that basket as well. # # **confidence**: $$\text{confidence}(I \to j) = \frac{\text{support}(I \cup \{j\})}{\text{support}(I)}$$ # # **interest**: $$\text{interest}(I \to j) = \text{confience}(I \to j) - \frac{\text{support}(j)}{m}$$ where $m$ is the number of all baskets. # # # ##### Finding Association Rules with High Confidence # if $j$ is a set of $n$ items that is found to be frequent, then we have $n$ candidates: $J - \{j\} \to j$ for each $j$ in $J$. Then their confidence can be calculated. # # cons: assumed that there are not too many frequent itemsets, since each one found must be acted upon. # solution: adjust the support threshold $s$ so that we do not get too many frequent itemsets. # ##### 6.1.5 Exercise for Section 6.1 # 略 # ### 6.2 Market Baskets and the A-Priori Algorithm # #### 6.2.1 Representation of Market-Basket Data # We assume that: # + market-basket data is stored in a file basket-by-basket. # + the size of the file of baskets is sufficiently large that it doesn't fit in main memory. # + a major cost of any algorithm is the time it takes to read the baskets from disk. # Why we miss the time it takes to generate all the subsets of size $k$? # 1. $k$ is usually small, never grows beyond 2 or 3. # 2. It's possible to eliminate many of items in procession. # # Measure: only the number of passes taken by the algorithm matters. # #### 6.2.2 Use of Main Memory for Itemset Counting # Each algorithm has a limit on how many items it can deal with. # # ##### Coding items as integers # In general, we need a hash table that translates items as they appear in the file to integers. # # ##### Count a pair $\{i,j\}$ # 1. The Triangular-Matrix Method # + Use the entry $a[i,j]$ in a two-dimensional array. # make half the array __useless__. # + Use a one-dimensional triangular array. # We store in $a[k]$ the count for the pair $\{i,j\}$, where $k = (i-1)(n-\frac{i}{2}) + (j-i), \, 1 \leq i < j \leq n$. # # 2. The Triples Method # We can store counts as triples $[i,j,c]$. # eg. a hash table with $i$ and $j$ as the search key. # # pros: don't store anything if a pair counts 0. # cons: store 3 integers for every pair. # # ###### comparison # use the triangular matrix if at least 1/3 of the $C_n^2$ possible pairs actually appear in some basket. # use the triples method if significatly fewer than 1/3 of the possible pairs occur. # # We might be better off using the triples method, because it would be normal to be a sufficiently uneven distribution of items even if the were ten or a hundred times as many baskets. # #### 6.2.3 Monotonicity of Itemsets # __monotonicity__ for itemsets: # If a set $I$ of items is frequent, then so it every subsets of $I$. # # If we are given a support threshold $s$, then we say an itemset is __maximal__ if no superset is frequent. # #### 6.2.4 Tyranny of Counting Pairs # The number of items is rarely so large we cannot count all the singleton sets in main memory at the same time, while it would be impossible to count the larger sets - triples, quadruples, since $C_n^k$ - the number of them - is too large. # #### 6.2.5 The A-Priori Algorithm # to avoid counting many triples or larger sets. # # 1. The First Pass # two tables: one is used to translate item names to integers, and another one is used to count. # # 2. Between the Passes # we get frequent sets after we set the threshold $s$. # # 3. The Second Pass # We count all pairs of the frequent sets as follows: # 1. For each basket, identify its frequent items in frequent sets. # 2. Generate all pairs. of its frequent items. # 3. Add one for each pairs above. # #### 6.2.6 A-Priori for All Frequent Itemsets plt.imshow(plt.imread('./res/fig6_4.png')) # #### 6.2.7 Exercises for Section 6.2 # 略 # ### 6.3 Handling Larger Datasets in Main Memory # A-Priori Algorithm: the greatest requirement for main memory when counting of $C_2$. $\to$ **idea**: cut down on the size of $C_2$. # #### 6.3.1 The Algorithm of Park, Chen and Yu (PCY) # 1. 1st pass: # + count single item $C_1$. # + hash each pairs in the basket to the bucket, and add 1. # # 2. during pass: # + filter frequent items $L_1$. # + filter frequent buckets. $to$ summaried as a *bitmap*. # # 3. $C_2$, pairs ${i,j}$: # 1. $i$ and $j$ are frequent items. # 2. ${i,j}$ hashes to a frequent bucket. # # pros: $C_2 \downarrow$. # cons: cannot renumber $1,\dotsc,m$ $\to$ cannot use triangular matrix $\to$ **ONLY use the triple method**. plt.imshow(plt.imread('./res/fig6_5.png')) # #### 6.3.2 The Multistage Algorithm # It improves upon PCA by using several *successive* hash tables to reduce further the number of candidate pairs. # # 1. 1st pass is the same as of PCY. # # 2. 2nd pass: # We hash ${i,j}$ if and only if: # + $i$ and $j$ are both frequent. # + ${i,j}$ hashed to a frequent bucket of $B_1$ on the 1st pass. # # Then summarized as a bitmap $B_2$. # # 3. $C_2$ pairs ${i,j}$: # + $i$ and $j$ are both frequent items. # + ${i,j}$ hashed to a frequent bucket in $B_1$. # + ${i,j}$ hashed to a frequent bucket in $B_2$ as well. # # Attention: # Each pass must store the bitmaps, eventually, there is not enough space left to count if used too many stages. plt.imshow(plt.imread('./res/fig6_6.png')) # #### 6.3.3 The Multihash Algorithm # use two hash functions and two seperate hash tables on the 1st pass. # # The danger of using two hash tables on one pass is that each hash table has half as many buckets as the one large hash table of PCY. $\implies$ the average count of a bucket for PCY is much lower than the support threshold. # # $C_2$: $i$ and $j$ must both be frequent, and the pair must have hashed to a frequent bucket according to both hash tables. # # The **risk** is that should we use too many hash tables, the average count for a bucket will exceed the support threshold. # $\implies$ the probability an infrequent pair will be a candidate rises, rather than falls, if we add another hash table. plt.imshow(plt.imread('./res/fig6_7.png')) # #### 6.3.4 Exercises for Section 6.3 # `#maybe` # ### 6.4 Limited-Pass Algorithms # Main memory is too small. $to$ $k$ passes to compute. # solution: it's not essential to discover every frequent itemset. # #### 6.4.1 The Simple, Randomized Algorithm # to pick a random subset of the baskets and adjust the support thresold. # # The *safety* way: # read the entire dataset one by one, # and for each basket, select that basket for the sample with some fixed probility $p$. # # ##### Avoiding Errors # eliminate False Positives: making a pass through the full datasets and counting all the candidates to check. # # reduce False Negatives: # use smaller threshold for the samples, such as $0.9ps$, and so push more itemsets to be checked. # cons: need more main memory. # #### 6.4.3 The Algorithm of Savasere, Omiecinski, and Navathe(SON) # Avoid both Fasle Negatives and False Positives, at the cost of making two full passes. # # 1. 1st pass to find candidates. # 1. Divide the input files into chunks. # 2. Treat each chunks as sample, use $ps$ as the thresold. # 3. *candidate* itemsets: the union of all the itemsets that have been found frequent for *one or more* chunks. # idea: every itemset that's frequent in the whole is frequent in at least one chunk. # # 2. 2nd pass to count all the candidates and check. # # # ##### The SON Algorithm and MapReduces # 1. First Map Function: $(F,1)$, where $F$ is a frequent itemset from the sample. # # 2. First Reduce Function: combine all the $F$ to construct the candidate itemsets. # # 3. Second Map Function: $(C,v)$, where $C$ is one of the candidate sets and $v$ is the support. # # 4. Second Reduce Function: Sum and filter out the frequent itemsets. # #### 6.4.5 Toivonen's Algorithm # pass over a small sample and one full pass over the data. # avoid both FN and FP, but there is a small probability that it will fail to produce any answer at all. # # 1. 1st pass: candidates # 1. select a small sample. # 2. use a smaller threshold, such as $0.9ps$, to find candidate frequent itemsets $F$. # 3. construct the *negative border*($N$): # They are not frequent in the sample, but all of their *immediate subsets*(subsets constructed by deleting exactly one item) are frequent in the sample. # # 2. 2nd pass: check, counting all $F$ and $N$. # + if no member of $N$ is frequent in the whole datasets. $to$ output the $F$. # + otherwise, give no answer and resample again. # # ##### Why it works. # 1. eliminate FP $gets$ check in the full datasets. # # 2. eliminate FN(namely, find all *real* frequent itemset in the *sample*): # Proof: # When no member of the $N$ is frequent in the whole, # there can be no itemset $S$ whatsoever that is: # 1. Frequent in the whole, but # 2. In neither $N$ or $F$. # # Proof by Contradiction: # Suppose $S$ exist, but the algorithm gives OUTPUT when no member of the $N$ is frequent in the whole. # # Let $T$ be a subset of $S$ that is of the smallest possible size among all subsets of $S$ that are not frequent in the sample. # + smallest $to$ all of its immediate subsets are frequent. # + $T$ is not frequent. # So, $T \in N$. # While $T$ is frequent in the whold datasets, $\to$ fail to answer. CONTRATY with output. # ##### 6.4.7 Exercises for Section 6.4 # 略 # ### 6.5 Counting Frequent Items in a Stream # For stream, we must think of the support threshold $s$ as a fraction of the baskets in which an itemset must appear in order to be considered frequent. # #### 6.5.1 Sampling Methods for Streams # When the frquent-itemsets algorithm finishes, we have an estimate of the frequent itemsets in the stream. # Then we have several options: # # 1. Use the collection at handy, but start running another iteration immediately. # # 2. Continue to count the frequent itemsets, and # + drop someone when they reach below $s$, # + add new frequent itemsets. # eg: # + Periodically gather. # + Add negative border.(most potential itemsets) # #### 6.5.2 Frequent Itemsets in Decaying Windows # a decaying window on a stream: # 1. picking a small constant $c$. # 2. giving the $i$th element the weight $(1-c)^i \approx e^{-ci}$. # # record all items whose score was at least $1/2$. # # ##### baskets $\to$ items # 1. unfold directly. $\{a,b\}, \{c,d\} \to a, b, c, d$ # cons: We want to find all frequent itemsets, not just singleton itemsets. # # 2. Start scoring certain itemsets as soon as we see one instance, but be conservative about which itemsets we start. $gets$ too many counts. # eg: Only start an itemset $I$ if all its immediate subsets are already being scored. # #### 6.5.3 Hybird Methods # The big disadvantage of decaying window is: # It requires us to maintain scores for each itemset with a score of at least $1/2$, # while limiting by $c$ will force us to accept information that tracks the local fluctuations in frequency too closely. # # Solution: # 1. Use sampling method to find candidates and give them initial scores. # 2. When the score of an candidate reach upper $s$, then it's collected as frequent-itemsets. # #### 6.5.4 Exercises for Section 6.5 # 略
Mining_of_Massive_Datasets/Frequent_Itemsets/note.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/jonra1993/Data-Science-with-Python/blob/master/Module_1_Introduction.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="nlBDEr95yOOc" colab_type="text" # <h1 id="data_acquisition">Data Acquisition</h1> # <ul> # <li>data source: <a href="https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data" target="_blank">https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data</a></li> # <li>data type: csv</li> # </ul> # + id="1QdwbEp_Smoz" colab_type="code" colab={} # import pandas library import pandas as pd # + [markdown] id="2BBfeab6yYE9" colab_type="text" # <h2>Read Data</h2> # <p> # We use <code>pandas.read_csv()</code> function to read the csv file. # </p> # + id="Hn46STQmyT8d" colab_type="code" colab={} # Import pandas library import pandas as pd # Read the online file other_path = "https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DA0101EN/auto.csv" df = pd.read_csv(other_path, header=None) #It does not have header # + id="3KFA-BNmy1DL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 292} outputId="14b68e9a-e20c-49ed-92f7-4fde02aa2df5" # show the first 5 rows using dataframe.head() method print("The first 5 rows of the dataframe") df.head(5) # + id="hRje3KB-y-1w" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 241} outputId="b1312c86-59a0-4b71-fe00-12bead1a9aed" # show the last 5 rows using dataframe.tail() method print("The last 5 rows of the dataframe") df.tail(5) # + [markdown] id="iiQZhlrLzb1l" colab_type="text" # <h3>Add Headers</h3> # <p> # Take a look at our dataset; pandas automatically set the header by an integer from 0. # </p> # <p> # To better describe our data we can introduce a header, this information is available at: <a href="https://archive.ics.uci.edu/ml/datasets/Automobile" target="_blank">https://archive.ics.uci.edu/ml/datasets/Automobile</a> # </p> # + id="RCiYaiz_zF5r" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 71} outputId="8500862d-0a39-4392-e30d-0846cb2dee0a" # create headers list headers = ["symboling","normalized-losses","make","fuel-type","aspiration", "num-of-doors","body-style", "drive-wheels","engine-location","wheel-base", "length","width","height","curb-weight","engine-type", "num-of-cylinders", "engine-size","fuel-system","bore","stroke","compression-ratio","horsepower", "peak-rpm","city-mpg","highway-mpg","price"] print("headers\n", headers) # + id="34vBnXoEzkru" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 199} outputId="f41796f3-641b-49df-9e24-053eca0f1b1e" df.columns = headers df.head(2) # + [markdown] id="pTfok6kcz2Z0" colab_type="text" # we can drop missing values along the column "price" as follows # + id="OHK2FBUeznyk" colab_type="code" colab={} df.dropna(subset=["price"], axis=0) # + id="8xsmlDbvz48x" colab_type="code" colab={} # Prints header or names of columns print(df.columns) # + [markdown] id="rFl6qbUh0KUu" colab_type="text" # <h2>Save Dataset</h2> # + id="57RyIEM50CzX" colab_type="code" colab={} df.to_csv("automobile.csv", index=False) # + [markdown] id="qJl0s5lw0Z_E" colab_type="text" # <h1 id="basic_insight">Basic Insight of Dataset</h1> # # + [markdown] id="yNITL3la0flu" colab_type="text" # <h2>Data Types</h2> # # + id="HLsfbFOS0S_c" colab_type="code" colab={} df.dtypes # + [markdown] id="zClvpNqW0y3G" colab_type="text" # <h2>Describe</h2> # If we would like to get a statistical summary of each column, such as count, column mean value, column standard deviation, etc. # + id="JDuqc0LM0xj4" colab_type="code" colab={} # This method will provide various summary statistics, excluding NaN (Not a Number) values. df.describe() # + id="VRRDZUUw0mX4" colab_type="code" colab={} # Describe all the columns in "df" df.describe(include = "all") # + id="tPgR-QHG1CVd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="03634487-762c-4cc0-ffd7-30c20392a6d7" # Describe some specific columns in "df" df[['length', 'compression-ratio']].describe() # + [markdown] id="6cDcR4WZ1pRA" colab_type="text" # <h2>Info</h2> # # + id="WPLHkRRl1j_u" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 275} outputId="326fc280-a8c2-4e76-824a-4eb8614877b4" # look at the info of "df" df.info # + id="0LZkLvUX1tkd" colab_type="code" colab={}
Data Analysis with python/Module_1_Introduction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Questions for die Post Dataset # ## Setup SQL # %load_ext sql # %config SqlMagic.autocommit=False # avoiding the error: FAILED: IllegalStateException COMMIT is not supported yet. # %sql hive://hadoop@localhost:10000/post # ## Basic Checks # %sql SELECT * FROM plz limit 1 # %sql SELECT * FROM streets limit 1 # %sql SELECT * FROM bevoelkerung limit 1 # %sql SELECT * FROM nachnamen limit 1 # ## Count all zip codes (PLZ) per canton and order them by size. # How many cantons do we have and what tells us the output? Why is there a difference? # + language="sql" # select kanton, count(*) as cnt from plz group by kanton order by cnt DESC # - # ## Find `suurstoffi` in the `streets` table and find the corresponding record in `plz` # %sql select * from streets where lower(STRBEZ2L) like '%suurstoffi%' # %sql select * from plz where onrp = 3431 # ## Find the top 5 newest `gilt_ab_dat` entries in `plz` # %sql select * from plz order by gilt_ab_dat DESC limit 5 # ## Check the `bevoelkerung` table for "your" `plz`. # Do you know what `typ` means? # Check out the [schema](https://swisspost.opendatasoft.com/explore/dataset/bevoelkerung_proplz/information/?disjunctive.plz&disjunctive.typ&disjunctive.ortbez18&sort=stichdatum) from the "post" if you don't. # %sql select * from bevoelkerung where plz = 8640 # ## Find the top three `PLZ` for `typ='f'` # %sql select * from bevoelkerung where typ='f' order by anzahl DESC limit 3 # ## Is your `nachname` in the `nachnamen` table? # Order it by anzahl # + language="sql" # # select * from nachnamen where nachname = 'Egli' and geschlecht = 'm' order by anzahl # - # ## What are the top 10 lastnames in the table `nachnamen`? # + language="sql" # # select nachname, sum(anzahl) as cnt from nachnamen group by nachname order by cnt DESC limit 10
V6/v6_exercises_material/solutions/3_SQL_Solution.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.5.3 # language: julia # name: julia-1.5 # --- # # MOwNiT # ## Laboratorium # ### Całkowanie using Pkg Pkg.add("QuadGK") using QuadGK using Polynomials # <a href="https://www.icsr.agh.edu.pl/~mownit/pdf/06_kwadratury_v2.pdf"> Wykład </a> # # Kwadratury Gaussa bazują na tzw. odciętych punktów Gaussa $x_{i}$ oraz współczynnikach $a_{i}$ # # funkcja obliczająca odcięte punktów Gaussa oraz ich współczynniki (xp,a)=gauss(Float64,7) # Podstawowe użycie w/w danych: # aby policzyć $\int_{-1}^{1} f dx$ używamy odciętych punktów wraz z wagami wg wzoru # $\sum_{i=1}^{n}{a_{i}f(x_{i})}$ # f(x)=x^2 sum(a .* f.(xp)) # ### Zadanie 1 # - Korzytając z pakietu <a href="https://github.com/JuliaMath/Polynomials.jl">Polynomials</a> zaimplementuj wielomiany Legendre'a zdefiniowane w taki sposób: # # $P_{0}(x)=1$ # # $P_{1}(x)=x$ # # $P_{k+1}(x)=\frac{2k+1}{k+1} x P_{k}(x)-\frac{k}{k+1}P_{k-1}(x)$ # # - narysuj ich wykresy w przedziale (-1,1). # - sprawdź(np. za pomocą fukcji <i>roots</i> z pakietu Polynomials), że ich zera sa odciętymi punktów Gaussa. Test wykonaj dla wielomianow od 2 do 4 stopnia # - podaj związek tego faktu z podstawowym twierdzeniem kwadratur Gaussa (z wykładu) # ### Zadanie 2 # - Napisz funkcję, która dla podanej liczby punktów Gaussa $k$ oraz funkcji $f$ policzy $\int_{-1}^{1} f dx$ # metodą używającą funkcji: # ```julia # gauss(k) # ``` # oraz sumy # ```julia # sum(a .* f.(xp)) # ``` # - przetestuj dla wielomianów coraz większych stopni # - sprawdz kiedy przestaje być dokładna, # - podaj związek z twierdzeniem o stopniu dokładności kwadratury Gaussa # ### Zadanie 3 # # Skorzystaj z rozwiązania zadania 2 do napisania funkcji # liczącej całki w dowolnym przedziale $\int_{a}^{b} f(x) dx$ # # dokonując normalizacji do $\int_{-1}^{1} F(z) dz$ # # podstawiając: # # $x=\frac{b+a}{2}+ \frac{b-a}{2} z $ oraz # # $dx =\frac{b-a}{2} dz $ # # Przetestuj działanie na kilku przykładach i sprawdź z wynikami otrzymanymi analitycznie. # # ### Zadanie 4 # Głowną funkcją pakietu QuadGK jest adaptacyjna funkcja <a href="https://juliamath.github.io/QuadGK.jl/stable/#QuadGK.quadgk"> guadgk</a> używająca całkowania Gauss-Kronroda # # - użyj tej funkcji do policzenia całki dla przykładowego wielomianu. # - funkcja ta ma możliwość liczenia również całek do nieskończoności. # # # Policz całkę od minus do plus nieskonczonosci # standardowego rozkładu normalnego Gaussa # $ \frac{1}{\sqrt{2\pi}}exp(\frac{-x^2}{2})$ # ### Zadanie 5 # Napisz własną funkcję całkującą metodą prostokątów albo trapezów. Narysuj wykres funkcji błędu # w stosunku do wyniku otrzymanego analitycznie, # w zaleznosci od ilosci potrzebnych przedziałów dla przykładowych funkcji: # 1. wykładniczej # 2. trygonometrycznej (np. sin (x) w przedziale $[0,2\pi]$)
Mownit_Lab_Calkowanie.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # A Visual History of Nobel Prize Winners # # Explore a dataset from Kaggle containing a century's worth of Nobel Laureates. Who won? Who got snubbed? # # ## Project Description # # The Nobel Prize is perhaps the world's most well known scientific award. Every year it is given to scientists and scholars in chemistry, literature, physics, medicine, economics, and peace. The first Nobel Prize was handed out in 1901, and at that time the prize was Eurocentric and male-focused, but nowadays it's not biased in any way. Surely, right? # # Well, let's find out! What characteristics do the prize winners have? Which country gets it most often? And has anybody gotten it twice? It's up to you to figure this out. # # The [dataset](https://www.kaggle.com/nobelfoundation/nobel-laureates) used in this project is from The Nobel Foundation on Kaggle. # # ## Guided Project # # In this project, you will use your data manipulation and visualization skills to explore patterns and trends over 100 years worth of Nobel Prize winners. # # ### Project Tasks # # 1. The most Nobel of Prizes # 2. So, who gets the Nobel Prize? # 3. USA dominance # 4. USA dominance, visualized # 5. What is the gender of a typical Nobel Prize winner? # 6. The first woman to win the Nobel Prize # 7. Repeat laureates # 8. How old are you when you get the prize? # 9. Age differences between prize categories # 10. Oldest and youngest winners # 11. You get a prize!
a_visual_history_of_nobel_prize_winners/summary.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="VROEldczoFWf" # # CatBoost Algorithm # + [markdown] id="hUslzbR-oVOz" # CatBoost is a gradient boosting framework which attempts to solve for Categorical features using a permutation driven alternative compared to the classical algorithm. # + [markdown] id="Em_xJDAFpYfx" # ## Advantages # + [markdown] id="rMF8Vru0plJ7" # 1. Little preprcessing required. # 2. Faster in comparision with other algorithm (reason being use symmetric trees). # 3. Faster GPU implementation. # # + [markdown] id="BdFR7ndSrPDJ" # Installation of CatBoost # + colab={"base_uri": "https://localhost:8080/"} id="EyuGfgbxrgqL" outputId="26744a97-be17-4700-dfa2-601aaf0abb8e" # !pip install catboost #using pip to install catboost # + [markdown] id="v6Srzw86rw0p" # Importing the necessary Libraries # + id="gjPgKo2Huu31" import pandas as pd #For data manipulation and analysis from catboost import CatBoostClassifier from sklearn.model_selection import train_test_split # For spliting into test and train set from sklearn.metrics import accuracy_score, f1_score # For checking of the accuracy of model # + [markdown] id="fCkOtVr-dBjH" # Loading Data # + id="LYZNBA68vYrb" data=pd.read_csv("data.csv") # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="D8NM0KRXyCPw" outputId="d8a42a86-16c1-41f7-f722-b0b422fc5cde" data.head() # + [markdown] id="SqSnQ2ezdKzK" # Seprating the X and Y from the data # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="n1jFBBQ4yHce" outputId="e1eea277-6cc3-4da2-8a23-5aecc9c3a343" X=data.loc[:,data.columns!="Loan_Status"] Y=data.loc[:,data.columns=="Loan_Status"] X # + [markdown] id="XRYuPYEudge4" # Test and train spilt # + id="2iAf-dMs8AdZ" X_train, X_test, y_train, y_test = train_test_split(X, Y,test_size=0.2,random_state=0) # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="gOKmKeno8LtG" outputId="7b7980bd-4706-4f60-e54b-548c7485d66a" X_train.head() # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="bldxw9SE8Q7R" outputId="431f761c-9dc4-4dfa-8ba7-894ddda2e33d" y_train.head() # + [markdown] id="MO2W9j-4dpf2" # Creating a CatBoostClassifier # + id="tPyBACxn8k7B" model = CatBoostClassifier(task_type='GPU', iterations=150, random_state = 2000, eval_metric="F1") # + [markdown] id="Wwst9Mu6dvsb" # Model Fitting # + colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["70a138e97cd345948b6784eef80f51e6"]} id="613B9yUK8mpn" outputId="64953234-7ea9-4108-eb5a-5699d3017b69" model.fit(X_train, y_train, cat_features= ["Credit_History"], plot=True,eval_set=(X_test, y_test)) # + [markdown] id="8Jo_rfR6d1O3" # Predicting the values for X_test # + id="yKRhzLbeSVV8" y_pred=model.predict(X_test) # + [markdown] id="dpoPLmFOd7bs" # Finding the accuracy of model using f1_score and accuracy_score # + colab={"base_uri": "https://localhost:8080/"} id="7LxuUHttSjRk" outputId="c379daab-eafa-46ed-df92-7bd78ac3857e" f1_score(y_test, y_pred) # + colab={"base_uri": "https://localhost:8080/"} id="GjRTOczsSjkY" outputId="6b0fb9fa-36a8-49d9-d9ca-e42137ac036c" accuracy_score(y_test,y_pred)
Datascience_With_Python/Machine Learning/Algorithms/CatBoost/CatBoost_Algorithm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:devel] # language: python # name: conda-env-unidata-python-workshop-py # --- # <a name="top"></a> # <div style="width:1000 px"> # # <div style="float:right; width:98 px; height:98px;"> # <img src="https://raw.githubusercontent.com/Unidata/MetPy/master/metpy/plots/_static/unidata_150x150.png" alt="Unidata Logo" style="height: 98px;"> # </div> # # <h1>Working with Surface Observations in Siphon and MetPy</h1> # <h3>Unidata Python Workshop</h3> # # <div style="clear:both"></div> # </div> # # <hr style="height:2px;"> # # <div style="float:right; width:250 px"><img src="http://weather-geek.net/images/metar_what.png" alt="METAR" style="height: 200px;"></div> # # ## Overview: # # * **Teaching:** 20 minutes # * **Exercises:** 20 minutes # # ### Questions # 1. What's the best way to get surface station data from a THREDDS data server? # 1. What's the best way to make a station plot of data? # 1. How can I request a time series of data for a single station? # # ### Objectives # 1. <a href="#ncss">Use the netCDF Subset Service (NCSS) to request a portion of the data</a> # 2. <a href="#stationplot">Download data for a single time across stations and create a station plot</a> # 3. <a href="#timeseries">Request a time series of data and plot</a> # <a name="ncss"></a> # ## 1. Using NCSS to get point data # + from siphon.catalog import TDSCatalog # copied from the browser url box metar_cat_url = ('http://thredds.ucar.edu/thredds/catalog/' 'irma/metar/catalog.xml?dataset=irma/metar/Metar_Station_Data_-_Irma_fc.cdmr') # Parse the xml catalog = TDSCatalog(metar_cat_url) # what datasets are here? print(list(catalog.datasets)) # - metar_dataset = catalog.datasets['Feature Collection'] # Once we've grabbed the "Feature Collection" dataset, we can request a subset of the data: # Can safely ignore the warnings ncss = metar_dataset.subset() # What variables do we have available? ncss.variables # <a href="#top">Top</a> # <hr style="height:2px;"> # <a name="stationplot"></a> # ## 2. Making a station plot # * Make new NCSS query # * Request data closest to a time # + from datetime import datetime query = ncss.query() query.lonlat_box(north=34, south=24, east=-80, west=-90) query.time(datetime(2017, 9, 10, 12)) query.variables('temperature', 'dewpoint', 'altimeter_setting', 'wind_speed', 'wind_direction', 'sky_coverage') query.accept('csv') # - # Get the data data = ncss.get_data(query) data # Now we need to pull apart the data and perform some modifications, like converting winds to components and convert sky coverage percent to codes (octets) suitable for plotting. # + import numpy as np import metpy.calc as mpcalc from metpy.units import units # Since we used the CSV data, this is just a dictionary of arrays lats = data['latitude'] lons = data['longitude'] tair = data['temperature'] dewp = data['dewpoint'] alt = data['altimeter_setting'] # Convert wind to components u, v = mpcalc.wind_components(data['wind_speed'] * units.knots, data['wind_direction'] * units.degree) # Need to handle missing (NaN) and convert to proper code cloud_cover = 8 * data['sky_coverage'] / 100. cloud_cover[np.isnan(cloud_cover)] = 10 cloud_cover = cloud_cover.astype(np.int) # For some reason these come back as bytes instead of strings stid = np.array([s.tostring().decode() for s in data['station']]) # - # ### Create the map using cartopy and MetPy! # One way to create station plots with MetPy is to create an instance of `StationPlot` and call various plot methods, like `plot_parameter`, to plot arrays of data at locations relative to the center point. # # In addition to plotting values, `StationPlot` has support for plotting text strings, symbols, and plotting values using custom formatting. # # Plotting symbols involves mapping integer values to various custom font glyphs in our custom weather symbols font. MetPy provides mappings for converting WMO codes to their appropriate symbol. The `sky_cover` function below is one such mapping. # + # %matplotlib inline import cartopy.crs as ccrs import cartopy.feature as cfeature import matplotlib.pyplot as plt from metpy.plots import StationPlot, sky_cover # Set up a plot with map features fig = plt.figure(figsize=(12, 12)) proj = ccrs.Stereographic(central_longitude=-95, central_latitude=35) ax = fig.add_subplot(1, 1, 1, projection=proj) ax.add_feature(cfeature.STATES, edgecolor='black') ax.coastlines(resolution='50m') ax.gridlines() # Create a station plot pointing to an Axes to draw on as well as the location of points stationplot = StationPlot(ax, lons, lats, transform=ccrs.PlateCarree(), fontsize=12) stationplot.plot_parameter('NW', tair, color='red') # Add wind barbs stationplot.plot_barb(u, v) # Plot the sky cover symbols in the center. We give it the integer code values that # should be plotted, as well as a mapping class that can convert the integer values # to the appropriate font glyph. stationplot.plot_symbol('C', cloud_cover, sky_cover) # - # Notice how there are so many overlapping stations? There's a utility in MetPy to help with that: `reduce_point_density`. This returns a mask we can apply to data to filter the points. # + # Project points so that we're filtering based on the way the stations are laid out on the map proj = ccrs.Stereographic(central_longitude=-95, central_latitude=35) xy = proj.transform_points(ccrs.PlateCarree(), lons, lats) # Reduce point density so that there's only one point within a 200km circle mask = mpcalc.reduce_point_density(xy, 200000) # - # Now we just plot with `arr[mask]` for every `arr` of data we use in plotting. # + # Set up a plot with map features fig = plt.figure(figsize=(12, 12)) ax = fig.add_subplot(1, 1, 1, projection=proj) ax.add_feature(cfeature.STATES, edgecolor='black') ax.coastlines(resolution='50m') ax.gridlines() # Create a station plot pointing to an Axes to draw on as well as the location of points stationplot = StationPlot(ax, lons[mask], lats[mask], transform=ccrs.PlateCarree(), fontsize=12) stationplot.plot_parameter('NW', tair[mask], color='red') stationplot.plot_barb(u[mask], v[mask]) stationplot.plot_symbol('C', cloud_cover[mask], sky_cover) # - # More examples for MetPy Station Plots: # - [MetPy Examples](https://unidata.github.io/MetPy/latest/examples/index.html) # - [MetPy Symbol list](https://unidata.github.io/MetPy/latest/api/generated/metpy.plots.StationPlot.html#metpy.plots.StationPlot.plot_symbol) # <div class="alert alert-success"> # <b>EXERCISE</b>: # <ul> # <li>Modify the station plot (reproduced below) to include dewpoint, altimeter setting, as well as the station id. The station id can be added using the `plot_text` method on `StationPlot`.</li> # <li>Re-mask the data to be a bit more finely spaced, say: 75km</li> # <li>Bonus Points: Use the `formatter` argument to `plot_parameter` to only plot the 3 significant digits of altimeter setting. (Tens, ones, tenths)</li> # </ul> # </div> # + # Use reduce_point_density # Set up a plot with map features fig = plt.figure(figsize=(12, 12)) ax = fig.add_subplot(1, 1, 1, projection=proj) ax.add_feature(cfeature.STATES, edgecolor='black') ax.coastlines(resolution='50m') ax.gridlines() # Create a station plot pointing to an Axes to draw on as well as the location of points # Plot dewpoint # Plot altimeter setting--formatter can take a function that formats values # Plot station id # - # # %load solutions/reduce_density.py # <a href="#top">Top</a> # <hr style="height:2px;"> # <a name="timeseries"></a> # ## 3. Time Series request and plot # * Let's say we want the past days worth of data... # * ...for Boulder (i.e. the lat/lon) # * ...for the variables mean sea level pressure, air temperature, wind direction, and wind_speed # + from datetime import timedelta # define the time range we are interested in end_time = datetime(2017, 9, 12, 0) start_time = end_time - timedelta(days=2) # build the query query = ncss.query() query.lonlat_point(-80.25, 25.8) query.time_range(start_time, end_time) query.variables('altimeter_setting', 'temperature', 'dewpoint', 'wind_direction', 'wind_speed') query.accept('csv') # - # ### Let's get the data! data = ncss.get_data(query) print(list(data.keys())) # ### What station did we get? station_id = data['station'][0].tostring() print(station_id) # That indicates that we have a Python `bytes` object, containing the 0-255 values corresponding to `'K', 'M', 'I', 'A'`. We can `decode` those bytes into a string: station_id = station_id.decode('ascii') print(station_id) # Let's get the time into datetime objects. We see we have an array with byte strings in it, like station id above. data['time'] # So we can use a list comprehension to turn this into a list of date time objects: time = [datetime.strptime(s.decode('ascii'), '%Y-%m-%dT%H:%M:%SZ') for s in data['time']] # ### Now for the obligatory time series plot... # + from matplotlib.dates import DateFormatter, AutoDateLocator fig, ax = plt.subplots(figsize=(10, 6)) ax.plot(time, data['wind_speed'], color='tab:blue') ax.set_title(f'Site: {station_id} Date: {time[0]:%Y/%m/%d}') ax.set_xlabel('Hour of day') ax.set_ylabel('Wind Speed') ax.grid(True) # Improve on the default ticking locator = AutoDateLocator() hoursFmt = DateFormatter('%H') ax.xaxis.set_major_locator(locator) ax.xaxis.set_major_formatter(hoursFmt) # - # <div class="alert alert-success"> # <b>EXERCISE</b>: # <ul> # <li>Pick a different location</li> # <li>Plot temperature and dewpoint together on the same plot</li> # </ul> # </div> # Your code goes here # + # # %load solutions/time_series.py # - # <a href="#top">Top</a> # <hr style="height:2px;">
notebooks/Surface_Data/Surface Data with Siphon and MetPy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + ## RCS Graph Theory Glossary # - # * Node / Vertex # * Vertex set # * Edge set # * Loop # * Multiple edge # * Simple graph # * Adjacent/neighbors # * Finite graph # * Null graph # * Complement # * Clique # * Independent set # * Bipartite graph (union of disjoint sets) # * K-regular # * Chromatic number # * Planar graph # * Connected/disconnecedted graph # * Subgraph # * Path # * Cycle # * Walk # * Trail # * u,v walk, u,v trail # * Adjacency matrix # * Incident (vertex and edge) # * Incidence matrix # * Isomorphism (bijection) # * Finding isomorphism using compliments # * Equivalence relation (an isomorphic transform is one) # * Complete graph # * Number of edges of a complete graph (formula) # * Complete bipartite graph # * Self-complementary graph # * Decomposition # * Petersen graph # * Girth # * Automorphism # * Complete graph # * Handshaking lemma # * Dimensional hypercube # * Minimum degree # * Maximal path # * Degree sequence # * Graphic sequence # * Havel-Hakimi algorithm # * Acyclic graph # * Tree # * Leaf # * Spanning subgraph # * Distance # * Diameter # * Eccentricity # * Radius # * Center # * Forward Prufer algorithm # * Backwards Prufer algorithm # * Deletion-contraction reccurrance for $\tau(G)$ (the number of spanning trees of a graph) # * Matrix-tree theorem # * Kruskal algorithm
NoSQL/Graph Theory Glossary.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline from matplotlib import style style.use('fivethirtyeight') import matplotlib.pyplot as plt import numpy as np import pandas as pd import datetime as dt # # Reflect Tables into SQLAlchemy ORM # Python SQL toolkit and Object Relational Mapper import sqlalchemy from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine, func # create engine to hawaii.sqlite engine = create_engine("sqlite:///Resources/hawaii.sqlite") # reflect an existing database into a new model Base = automap_base() # reflect the tables Base.prepare(engine, reflect=True) # View all of the classes that automap found Base.classes.keys() # Save references to each table Measurement = Base.classes.measurement Station = Base.classes.station # Create our session (link) from Python to the DB session = Session(engine) # # Exploratory Precipitation Analysis # Find the most recent date in the data set. date = session.query(Measurement.date).order_by(Measurement.date.desc()).first() date # + # Design a query to retrieve the last 12 months of precipitation data and plot the results. # Starting from the most recent data point in the database. # Calculate the date one year from the last date in data set. one_year = dt.date(2017,8,23) - dt.timedelta(days=365) one_year # Query to retreive last 12 months of Precipitation Data with data and precipitation prcp_data = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= one_year).order_by(Measurement.date).all() prcp_data # Perform a query to retrieve the data and precipitation scores scores = session.query(Measurement.date, Measurement.prcp).order_by(Measurement.date.desc()).all() scores # Save the query results as a Pandas DataFrame and set the index to the date column df = pd.DataFrame(prcp_data, columns=['Date', 'Precipitation']) df.head() # Use Pandas Plotting with Matplotlib to plot the data x = "Date" y = "Precipitation" df.plot(x, y, title="Precipitation Analysis", figsize=(14,7)) plt.show() # - # Use Pandas to calcualte the summary statistics for the precipitation data df.describe() # # Exploratory Station Analysis # Design a query to calculate the total number stations in the dataset station_count = session.query(Measurement.station).distinct().count() station_count # Design a query to find the most active stations (i.e. what stations have the most rows?) # List the stations and the counts in descending order. active_stations = session.query(Measurement.station, func.count(Measurement.station)).group_by(Measurement.station).order_by(func.count(Measurement.station).desc()).all() active_stations # + # Using the most active station id from the previous query, calculate the lowest, highest, and average temperature. calculate = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)] most_active_id = session.query(*calculate).filter(Measurement.station == 'USC00519281').all() most_active_id # + # Using the most active station id # Query the last 12 months of temperature observation data for this station and plot the results as a histogram tobs_data = session.query(Measurement.tobs).filter(Measurement.date >= one_year). filter(Measurement.station == 'USC00519281').order_by(Measurement.date).all() tobs_data tobs_df = pd.DataFrame(tobs_data, columns=['TOBS']) tobs_df.plot.hist(bins= 12, figsize=(14,7)) plt.xlabel('Temperature') plt.show() # - # # Close session # Close Session session.close()
.ipynb_checkpoints/climate_starter-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + jupyter={"outputs_hidden": true} import matplotlib.pyplot as plt import netCDF4 as nc import numpy as np import pandas as pd import xarray as xr from salishsea_tools import gsw_calls # %matplotlib inline # + jupyter={"outputs_hidden": true} mesh = nc.Dataset('/home/sallen/MEOPAR/grid/mesh_mask201702.nc') # + jupyter={"outputs_hidden": true} tracers = xr.open_dataset('https://salishsea.eos.ubc.ca/erddap/griddap/ubcSSg3DTracerFields1hV17-02') iY = 258; iX = 178; iZ = 28 depths = slice(0, iZ) iY2 = 388; iX2 = 271 # + jupyter={"outputs_hidden": true} e3t = mesh.variables['e3t_0'][0, :iZ, iY, iX] # - salinity2015 = tracers.salinity.sel(time ='2015', gridX=iX, gridY=iY, method='nearest').isel(depth=depths) salinity2015.plot() temperature2015 = tracers.temperature.sel(time ='2015', gridX=iX, gridY=iY, method='nearest').isel(depth=depths) temperature2015.plot() # + jupyter={"outputs_hidden": true} sigma2015 = gsw_calls.generic_gsw_caller( 'gsw_sigma0.m', [salinity2015.values, temperature2015.values]) # + jupyter={"outputs_hidden": true} tlen = sigma2015.shape[0] south2015 = np.sum(sigma2015[0:tlen] * e3t, axis=1) # + jupyter={"outputs_hidden": true} # now make pandas arrays of these so I can day bin them etc den2015 = pd.DataFrame(data=south2015, index=salinity2015.time, columns=['sigma']) #den2015_2 = pd.DataFrame(data=north2015, index=salinity2015.time, columns=['sigma']) # + jupyter={"outputs_hidden": true} day_avg_south2015 = den2015.resample('1D').mean() #day_avg_south2015_2 = den2015_2.resample('1D').mean() # + jupyter={"outputs_hidden": true} day_avg_south2015.to_csv('day_avg_south2015.csv') #day_avg_south2015_2.to_csv('day_avg_south2015.csv') # + jupyter={"outputs_hidden": true} salinity2016 = tracers.salinity.sel(time ='2016', gridX=iX, gridY=iY, method='nearest').isel(depth=depths) temperature2016 = tracers.temperature.sel(time ='2016', gridX=iX, gridY=iY, method='nearest').isel(depth=depths) # + jupyter={"outputs_hidden": true} salinity2016.plot() # + jupyter={"outputs_hidden": true} temperature2016.plot() # + jupyter={"outputs_hidden": true} sigma2016 = gsw_calls.generic_gsw_caller( 'gsw_sigma0.m', [salinity2016.values, temperature2016.values]) # + jupyter={"outputs_hidden": true} tlen = sigma2016.shape[0] south2016 = np.sum(sigma2016[0:tlen] * e3t, axis=1) # + jupyter={"outputs_hidden": true} den2016 = pd.DataFrame(data=south2016, index=salinity2016.time, columns=['sigma']) # + jupyter={"outputs_hidden": true} day_avg_south2016 = den2016.resample('1D').mean() # + jupyter={"outputs_hidden": true} day_avg_south2016.to_csv('day_avg_south2016.csv') # + jupyter={"outputs_hidden": true} salinity2015_2 = tracers.salinity.sel(time ='2015', gridX=iX2, gridY=iY2, method='nearest').isel(depth=depths) temperature2015_2 = tracers.temperature.sel(time ='2015', gridX=iX2, gridY=iY2, method='nearest').isel(depth=depths) # - salinity2015_2.plot() temperature2015_2.plot() # + jupyter={"outputs_hidden": true} sigma2015_2 = gsw_calls.generic_gsw_caller( 'gsw_sigma0.m', [salinity2015_2.values, temperature2015_2.values]) # + jupyter={"outputs_hidden": true} tlen = sigma2015_2.shape[0] south2015_2 = np.sum(sigma2015_2[0:tlen] * e3t, axis=1) # + jupyter={"outputs_hidden": true} # now make pandas arrays of these so I can day bin them etc den2015_2 = pd.DataFrame(data=south2015_2, index=salinity2015_2.time, columns=['sigma']) # + jupyter={"outputs_hidden": true} #day_avg_south2015 = den2015.resample('1D').mean() day_avg_south2015_2 = den2015_2.resample('1D').mean() # + jupyter={"outputs_hidden": true} #day_avg_south2015.to_csv('day_avg_south2015.csv') day_avg_south2015_2.to_csv('day_avg_south2015.csv') # - salinity2016_2 = tracers.salinity.sel(time ='2016', gridX=iX2, gridY=iY2, method='nearest').isel(depth=depths) salinity2016_2.plot() temperature2016_2 = tracers.temperature.sel(time ='2016', gridX=iX2, gridY=iY2, method='nearest').isel(depth=depths) temperature2016_2.plot() # + jupyter={"outputs_hidden": true} sigma2016_2 = gsw_calls.generic_gsw_caller( 'gsw_sigma0.m', [salinity2016_2.values, temperature2016_2.values]) # + jupyter={"outputs_hidden": true} tlen = sigma2016_2.shape[0] south2016_2 = np.sum(sigma2016_2[0:tlen] * e3t, axis=1) # + jupyter={"outputs_hidden": true} # now make pandas arrays of these so I can day bin them etc den2016_2 = pd.DataFrame(data=south2016_2, index=salinity2016_2.time, columns=['sigma']) # + jupyter={"outputs_hidden": true} #day_avg_south2015 = den2015.resample('1D').mean() day_avg_south2016_2 = den2016_2.resample('1D').mean() # + jupyter={"outputs_hidden": true} #day_avg_south2015.to_csv('day_avg_south2015.csv') day_avg_south2016_2.to_csv('day_avg_north2016.csv') # - salinity2017n = tracers.salinity.sel(time ='2017-05', gridX=iX, gridY=iY, method='nearest').isel(depth=depths) salinity2017n.plot() salinity2017 = xr.concat([salinity2017, salinity2017n], dim='time') salinity2017.plot() salinity2017.time.plot(marker='o') # + jupyter={"outputs_hidden": true} salinity2017.to_netcdf('salinity2017_ss.nc') # - temperature2017n = tracers.temperature.sel(time ='2017-12', gridX=iX, gridY=iY, method='nearest').isel(depth=depths) temperature2017n.plot() temperature2017 = xr.concat([temperature2017, temperature2017n], dim='time') temperature2017.plot() # + jupyter={"outputs_hidden": true} temperature2017.to_netcdf('temperature2017_ss.nc') # + jupyter={"outputs_hidden": true} sigma2017 = gsw_calls.generic_gsw_caller( 'gsw_sigma0.m', [salinity2017.values, temperature2017.values]) # + jupyter={"outputs_hidden": true} tlen = sigma2017.shape[0] south2017 = np.sum(sigma2017[0:tlen] * e3t, axis=1) # + jupyter={"outputs_hidden": true} den2017 = pd.DataFrame(data=south2017, index=salinity2017.time, columns=['sigma']) # + jupyter={"outputs_hidden": true} day_avg_south2017 = den2017.resample('1D').mean() # + jupyter={"outputs_hidden": true} day_avg_south2017.to_csv('day_avg_south2017.csv') # + jupyter={"outputs_hidden": true} sigma2016_2 = gsw_calls.generic_gsw_caller( 'gsw_sigma0.m', [salinity2016_2.values, temperature2016_2.values]) # + jupyter={"outputs_hidden": true} salinity2017_2 = tracers.salinity.sel(time ='2017', gridX=iX2, gridY=iY2, method='nearest').isel(depth=depths) temperature2017_2 = tracers.temperature.sel(time ='2017', gridX=iX2, gridY=iY2, method='nearest').isel(depth=depths) # + jupyter={"outputs_hidden": true} sigma2017_2 = gsw_calls.generic_gsw_caller( 'gsw_sigma0.m', [salinity2017_2.values, temperature2017_2.values]) # + jupyter={"outputs_hidden": true} tlen = sigma2017.shape[0] south2017 = np.sum(sigma2017[0:tlen] * e3t, axis=1) north2017 = np.sum(sigma2017_2[0:tlen] * e3t, axis=1) # + jupyter={"outputs_hidden": true} den2016 = pd.DataFrame(data=south2016, index=salinity2016.time, columns=['sigma']) den2016_2 = pd.DataFrame(data=north2016, index=salinity2016.time, columns=['sigma']) # + jupyter={"outputs_hidden": true} den2017 = pd.DataFrame(data=south2017, index=salinity2017.time, columns=['sigma']) den2017_2 = pd.DataFrame(data=north2017, index=salinity2017.time, columns=['sigma']) # + jupyter={"outputs_hidden": true} catsouth = pd.concat([den2015, den2016, den2017]) catnorth = pd.concat([den2015_2, den2016_2, den2017_2]) # + jupyter={"outputs_hidden": true} day_avg_south = catsouth.resample('1D').mean() day_avg_north = catnorth.resample('1D').mean() # + jupyter={"outputs_hidden": true} msouth = day_avg_south.rolling(window=4, center=True).mean() mnorth = day_avg_north.rolling(window=4, center=True).mean() # + jupyter={"outputs_hidden": true} fig, ax = plt.subplots(1, 1) day_avg_south.plot(ax=ax) day_avg_north.plot(ax=ax) msouth.plot(ax=ax) mnorth.plot(ax=ax) # + jupyter={"outputs_hidden": true} plt.plfig, ax = plt.subplots(1, 1) (day_avg_south-day_avg_north).plot(ax=ax) (msouth-mnorth).plot(ax=ax) # + jupyter={"outputs_hidden": true} msouth.to_csv('south_sigma_57.csv') mnorth.to_csv('north_sigma_57.csv') # + jupyter={"outputs_hidden": true}
notebooks/Ariane/CalculateDensity.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #Importing the python libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf import sklearn as sk import os import cv2 import albumentations as A from PIL import Image from tensorflow import keras from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix #Setting the training and testing paths to extract the files training_path = "/kaggle/input/sp-society-camera-model-identification/train/train/" testing_path = "/kaggle/input/sp-society-camera-model-identification/test/test/" _,training_classes,_=next(os.walk(training_path)) #print(training_classes) # - #Setting the hyperparameters for the deep learning model batch_size = 8 epochs = 40 learning_rate = 1e-4 #testing_files _,_,testing_files = next(os.walk(testing_path)) #Creating the training dataset by iterating over the directory #Appending the training paths to the list and extracting the classes from the directory training_paths = [] labels = [] for i in training_classes: _,_,filenames = next(os.walk(training_path+i)) for j in filenames: training_paths.append(training_path+i+'/'+j) labels.append(i) #Ensure that the training size and the class labels are the same assert len(training_paths)==len(labels) #Setting the random seed to 33 for reproducible results seed = 33 #Moving the training paths to an pandas DataFrame for easy indexing and one-hot encoding the class labels training_data = pd.DataFrame(training_paths,columns=['Training Image Path']) classes = pd.DataFrame(labels) classes = pd.get_dummies(classes) classes.columns=np.unique(labels)# One Hot Encode the class variables #print the training data training_data #printing the one-hot encoded class variable classes.head() #Creating the class dictionary for converting the one-hot encoded variables classes_dict = {'0':'HTC-1-M7', '1':'LG-Nexus-5x', '2':'Motorola-Droid-Maxx', '3':'Motorola-Nexus-6', '4':'Motorola-X', '5':'Samsung-Galaxy-Note3', '6':'Samsung-Galaxy-S4', '7':'Sony-NEX-7', '8':'iPhone-4s', '9':'iPhone-6'} #Creating the testing DataFrame for easy indexing testing_data = pd.DataFrame(testing_files,columns=['Testing Image Path']) testing_data #Reading images form the disk to memory and converting from BGR to RGB def read_img(path): temp = cv2.imread(path) temp = cv2.cvtColor(temp, cv2.COLOR_BGR2RGB) return np.array(temp) # + #Defining the training and testing augmentations length = 512 #Defining the length used for each image training_transforms = A.Compose([A.RandomCrop(height=length,width=length), A.RandomGamma(gamma_limit=(80,120),p=0.9), A.JpegCompression(quality_lower=70,quality_upper=90,p=0.9), A.GridDistortion(interpolation=cv2.INTER_CUBIC)]) testing_augmentation = A.Compose([ A.CenterCrop(height=length,width=length)]) # + #Creating a custom dataset to read the images in a mini-batch format class Dataset(keras.utils.Sequence): def __init__(self,x,y,batch_size,augmentations,test): self.x = x self.y = y self.batch_size = batch_size self.augmentations = augmentations self.test = test def __len__(self): return int(len(self.x)/self.batch_size) def __getitem__(self,index): batched_x = self.x.iloc[index*self.batch_size:(index+1)*self.batch_size].to_numpy() if(self.test): images = [(self.augmentations(image=read_img(i))['image']) for i in batched_x] else: batched_y = self.y.iloc[index*self.batch_size : (index+1)*self.batch_size,:] images = [(self.augmentations(image=read_img(i[0]))['image']) for i in batched_x] return np.array(images)if self.test else (np.array(images),np.array(batched_y.values)) # - #Creating a dummy datast to test the functionality dummy_dataset = Dataset(training_data,classes,batch_size=batch_size,augmentations=training_transforms,test=False) # + #Calling the dummy dataset to check the functionality (a,b)=dummy_dataset.__getitem__(5) print(a[0]) print(a.shape) print(b) print(b.shape) print(type(b)) plt.imshow(a[0]) # - #Creating the Keras model for training with the EfficientNet Model being instantiated with imagenet weights def keras_model(length,input_size,output_classes): p=0.3 base_model = tf.keras.applications.EfficientNetB4(weights='imagenet', include_top=False, input_shape=[length, length, 3]) base_model.trainable = False inputs =keras.layers.Input(shape=(length,length,3)) x = keras.applications.efficientnet.preprocess_input(inputs) x = base_model(x, training=False) x = keras.layers.GlobalAveragePooling2D()(x) x = keras.layers.Dense(32,activation='relu')(x) outputs = keras.layers.Dense(10, activation='softmax')(x) model =keras.Model(inputs=inputs, outputs=outputs) model.compile(optimizer=tf.keras.optimizers.Adam(lr=learning_rate), loss='categorical_crossentropy', metrics=['accuracy']) model.summary() return model #Instantiating the keras model training_model = keras_model(length,length*length*3,10) #Printing the summary of training model training_model.summary() # Splitting the training dataset into a training and validation dataset with the validation split = 0.1 and setting the Shuffle=True xtrain,xval,ytrain,yval = train_test_split(training_data,classes,test_size=0.1,shuffle=True,random_state=seed) #Printing the size of each array to check that the sizes match print(len(xtrain),len(xval)) print(len(ytrain),len(yval)) # + #Pushing the arrays into a custom keras dataset to feed to the neural networ training_dataset = Dataset(xtrain,ytrain,batch_size=batch_size,augmentations=training_transforms,test=False) validation_dataset = Dataset(xval,yval,batch_size=1,augmentations=training_transforms,test=False) print(len(training_dataset)) print(len(validation_dataset)) # + #Creating a list of callbacks to add to the training model model_file = "output/base_model_weights.hd5" # defining the output path to save the model file checkpoint = tf.keras.callbacks.ModelCheckpoint(model_file, monitor="val_accuracy", save_best_only=True, mode='max') # Save the model which has the best validation acc reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor="val_accuracy", factor=0.9, patience=2, min_lr=1e-6, mode="max", verbose=True) # reduce Lr if the validation acc does not increase early_stopping = tf.keras.callbacks.EarlyStopping(monitor="val_accuracy", patience=5, mode="max", verbose=True) # Stop the training if the val acc does not improve callbacks_list = [checkpoint, reduce_lr, early_stopping] # Call all of the instantiated callbacks into one variable # + #Training the model for 10 epochs multiple times, each time the best model is read back for training inner_epochs = 10 training_model.fit(training_dataset,validation_data=validation_dataset,epochs=inner_epochs,batch_size=batch_size,callbacks=callbacks_list,verbose=1) loops = (epochs-inner_epochs)//inner_epochs for i in range(0,3): training_model= keras.models.load_model(model_file) training_model.fit(training_dataset,validation_data=validation_dataset,epochs=inner_epochs,batch_size=batch_size,callbacks=callbacks_list,verbose=1) # - #Reading the submission file from the directory sample_submission = pd.read_csv('/kaggle/input/sp-society-camera-model-identification/sample_submission.csv') sample_submission.head() #Printing the top few rows from the submission file # + #Creating a testing dataset with the filepaths from the submission file x_test = testing_path+sample_submission['fname'] #x_test = x_test.to_numpy() print(x_test[0]) #training_model.load_model(model_file) x_test = Dataset(x_test,x_test,test=True,batch_size=1,augmentations=testing_augmentation) # - #generating predictions for the validation dataset val_predicted = training_model.predict(validation_dataset) # + #reading the best file from memory and generating predictions for the testing dataset training_model = tf.keras.models.load_model(model_file) predicted = training_model.predict(x_test) print(predicted) # - #Creating a function to convert the one-hot encoded item into class labels def convert_predictions_to_labels(labels): temp = [] for item in labels.argmax(axis=1): temp.append(classes_dict[str(item)]) return temp # + #converting the test and validation predictions to their class labels test_labels = convert_predictions_to_labels(predicted) validation_labels = convert_predictions_to_labels(val_predicted) # - #filling up the submission file with the test predictions sample_submission['camera'] = test_labels sample_submission.head() #Submitting the test predictions sample_submission.to_csv("submission.csv", index=False) #Converting the validation one-hot encoded labels yval=yval.idxmax(axis=1) #printing the confusion matrix con_matrix = confusion_matrix(yval,validation_labels) print(con_matrix)
models/model-3-without-dropout.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Problem # # * Input # * Transform # * Output # ## Input # # The input is a string, any string, all characters. # # let's assume that input is 'City of Madrid' s = 'City of Madrid' # ## Transform # # ??? x = [1,5,34,2,6,7,89,9,0] for elem in x: print(elem) len(x) len(s) 17%2 s.lower() c = 0 output = '' for elem in s: # elem is the character that we want to change # how do we know if elem is in an odd or even position? if c%2 == 0: # Do something if condition is true # TODO: Upper case elem = elem.upper() else: # Do something if condition is false # TODO: Lower case elem = elem.lower() output += elem c += 1 output # What's the value of c? c # ## Output # # * Every even position is upper case # * Every odd position is lower case # # Examples of even numbers: 0,2,4,6,8,.... # # Examples of odd numbers: 1,3,5,7,9 # # The result should be 'CiTy oF MaDrId'
week 2/lower_upper_string_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img src="../img/logo_white_bkg_small.png" align="right" /> # # # Worksheet 1.2: Exploring One Dimensional Data # This worksheet covers concepts covered in the first half of Module 1 - Exploratory Data Analysis in One Dimension. It should take no more than 20-30 minutes to complete. Please raise your hand if you get stuck. # # There are many ways to accomplish the tasks that you are presented with, however you will find that by using the techniques covered in class, the exercises should be relatively simple. # # ## Import the Libraries # For this exercise, we will be using: # * Pandas (http://pandas.pydata.org/pandas-docs/stable/) # * Numpy (https://docs.scipy.org/doc/numpy/reference/) # * Matplotlib (http://matplotlib.org/api/pyplot_api.html) # import pandas as pd import numpy as np import matplotlib.pyplot as plt plt.style.use('ggplot') # %pylab inline # ## Exercise 1: Summarize the Data # For this exercise, you are given a Series of random numbers creatively names `random_numbers`. For the first exercise please do the following: # # 1. Remove all the numbers less than 10 # 2. Sort the series # 3. Calculate the Tukey 5 number summary for this dataset # 4. Count the number of even and odd numbers # 5. Find the five largest and 5 smallest numbers in the series #Generate a series of random numbers between 1 and 100. random_numbers = pd.Series( np.random.randint(1, 100, 50) ) # + # Your code here... #Filter the Series #Sort the Series #Calculate the Tukey 5 Number Summary #Count the number of even and odd numbers #Find the five largest and smallest numbers # - # ## Exercise 2: # Using the random number Series create a histogram with 8 bins. # ## Exercise 3: # You have been given a list of US phone numbers. The area code is the first three digits. Your task is to produce a summary of how many times each area code appears in the list. To do this you will need to: # 1. Extract the area code from each phone number # 2. Count the unique occurances. phone_numbers = [ '(833) 759-6854', '(811) 268-9951', '(855) 449-4648', '(833) 212-2929', '(833) 893-7475', '(822) 346-3086', '(844) 259-9074', '(855) 975-8945', '(811) 385-8515', '(811) 523-5090', '(844) 593-5677', '(833) 534-5793', '(899) 898-3043', '(833) 662-7621', '(899) 146-8244', '(822) 793-4965', '(822) 641-7853', '(833) 153-7848', '(811) 958-2930', '(822) 332-3070', '(833) 223-1776', '(811) 397-1451', '(844) 096-0377', '(822) 000-0717', '(899) 311-1880'] # Your code here...
notebooks/Worksheet 1.2 - Exploring One Dimensional Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Variational Fashion Encoder # # In this experiment I want to try out [Variational Auto Encoders](https://arxiv.org/pdf/1606.05908.pdf) # on a new fashion classification dataset from [Zalando](https://github.com/zalandoresearch/fashion-mnist). # # First copy the /data/fashion/ folder from the zalando repository next to this notebook. # # I will build a generic Variational Auto Encoder and then learn it on the new fashion-mnist dataset. # + import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data from scipy.misc import imsave from scipy.misc import imresize # - labels_dict = { 0: "T-shirt-top", 1: "Trouser", 2: "Pullover", 3: "Dress", 4: "Coat", 5: "Sandal", 6: "Shirt", 7: "Sneaker", 8: "Bag", 9: "Ankle boot" } data = input_data.read_data_sets('data/fashion/') n_samples = data.train.num_examples np.random.seed(0) tf.set_random_seed(0) # # Variational Auto Encoder # # ![VAE](images/vae_small.png) # # + The bottom part of the model is embedding the input X into a mean and variance vector # + The mean and variance represent the parameters of a gaussian that is trained to be close to a standard normal distribution N(0, I) # + The decoder network is trying to reconstruct the input from a sample from said distribution # + Implementation is inspired by hwalsuklee [github](https://github.com/hwalsuklee/tensorflow-mnist-VAE/blob/master/vae.py) # # First lets define some standard utils to make construction of the neural network easier. We define an # initialization method called Xavier that samples uniformly: # $$(-\sqrt{\frac{6}{in + out}}, \sqrt{\frac{6}{in + out}})$$ # We then define a layer by the weights(Xavier) and biases(zeros) and it's result as: # $$z = x * W +b$$ # + def xavier(nin, nout): hi = np.sqrt( 6 / (nin + nout)) lo = -hi w = tf.random_uniform((nin, nout), minval=lo, maxval=hi, dtype= tf.float32) return w def bias(nout): return tf.zeros([nout], dtype=tf.float32) def layer(x, l, nin, nout): w = tf.Variable(xavier(nin, nout), name="W" + str(l)) b = tf.Variable(bias(nout), name= "b" + str(l)) z = tf.matmul(x, w) + b return z # - # In the encoder part the input is fed through feed forward layers multiple layers. In this case I chose to # use ReLu activations except for the output. The output layer is special since it's activation is used # as the parameters of a multivariate normal disribution with a diagonal covariance matrix or in other words a variance vector.The mean vector and the variance vector are concatenated. # $$output = [\mu, \sigma]$$ # That means that the encoders output needs to be twice as large. # Furthermore, the output's activation is a tanh function. A ReLu function's # output is between 0 and 1 but a normal distribution has real valued parameters. # A tanh gives values between -1 and 1 which is more appropriate. def encoder(x, shapes): a = x l = 0 for nin, nout in shapes: if l == len(shapes) - 1: z = layer(a, l, nin, nout * 2) # In the last layer, the embedding represents the mean and variance concat a = tf.nn.tanh(z) else: z = layer(a,l, nin, nout) a = tf.nn.relu(z) l += 1 n_out = int(int(a.shape[1]) / 2) mean = a[:, n_out:] sigm = 1e-6 + tf.nn.softplus(a[:, :n_out]) return (mean, sigm) # The decoder network is a simple feed forward net with ReLu activations and # a sigmoid output def decoder(x, shapes): a = x l = 0 for nin, nout in shapes: z = layer(a, l, nin, nout) if l == 0: a = tf.nn.tanh(z) elif l == len(shapes) - 1: a = tf.nn.sigmoid(z) else: a = tf.nn.relu(z) l += 1 return a # The actual variation autoencder then passes the input through the encoder # receiving the mean and variance of the normal distribution. # A sample is drawn from said distribution and passed into the decoder. # # The loss for the decoder is defined using the difference between the input and the output. # The encoder loss uses the kullback leibler divergence to a standard normal. # The ELBO is the expectaion lower bound. def vae(x, enc_shapes, dec_shapes): mu, sigm = encoder(x, enc_shapes) sample = mu + sigm * tf.random_normal(tf.shape(mu), 0, 1, dtype=tf.float32) y = decoder(sample, dec_shapes) marginal_likelihood = tf.reduce_sum(x * tf.log(y) + (1 - x) * tf.log(1 - y), 1) KL_divergence = 0.5 * tf.reduce_sum(tf.square(mu) + tf.square(sigm) - tf.log(1e-8 + tf.square(sigm)) - 1, 1) marginal_likelihood = tf.reduce_mean(marginal_likelihood) KL_divergence = tf.reduce_mean(KL_divergence) ELBO = marginal_likelihood - KL_divergence loss = -ELBO return y, sample, loss, -marginal_likelihood, KL_divergence # The decoder is basically the encoder reversed. The learning is set up below using the # adam optimizer # + def reverse(shapes): x = [(o, i) for i, o in shapes] x.reverse() return x input_layer = tf.placeholder(tf.float32, shape=(None, 784)) enc_shapes = [ (784, 512), (512, 256), (256, 128) ] dec_shapes = reverse(enc_shapes) rate = 0.001 batch_size = 100 total_batch = int(n_samples / batch_size) epochs = 15 y, z, loss, neg_marginal_likelihood, KL_divergence = vae(input_layer, enc_shapes, dec_shapes) train_op = tf.train.AdamOptimizer(rate).minimize(loss) # - # Last but not least learning the model using batch gradient descent and then plotting the reconstruction. # I also run a clustering experiment using k-means # + with tf.Session() as sess: sess.run(tf.global_variables_initializer()) # training print("Learning: ") for epoch in range(epochs): total_loss = 0.0 loss_lik = 0.0 loss_div = 0.0 for i in range(0, total_batch): (batch, _) = data.train.next_batch(batch_size) _, tot_loss, loss_likelihood, loss_divergence = sess.run( (train_op, loss, neg_marginal_likelihood, KL_divergence), feed_dict={ input_layer: batch } ) total_loss += tot_loss loss_lik += loss_likelihood loss_div += loss_divergence print(" - epoch: ", epoch, total_loss, loss_lik, loss_div) # creating reconstruction from test images print("Reconstruction: ") (images, labels) = data.test.next_batch(100) a = 0 b = 0 latent_img = [] for i in range(0, 100): print(" - reconstructing: " + labels_dict[labels[i]], a, b, i) y_out, u = sess.run((y, z), feed_dict={input_layer: images[i].reshape(1, 784)}) y_img = y_out.reshape(28, 28) latent_img += [(y_img, labels_dict[labels[i]])] latent_img = sorted(latent_img, key = lambda x : x[1]) # plotting print("Plotting") f, axarr = plt.subplots(10, 10) for i in range(0, 100): if a == 10: a = 0 b += 1 axarr[a, b].set_title(latent_img[i][1]) axarr[a, b].imshow(latent_img[i][0], cmap=plt.get_cmap('gray')) a += 1 f.subplots_adjust(hspace = 0.7) f.set_size_inches(25.0, 25.0, forward=True) plt.savefig("result/prediction.png") print("Done") # -
Variational Fashion Encoder.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Time Series Analysis PUC Data # + import pandas as pd import numpy as np import datetime from collections import Counter #ML Libraries from imblearn.over_sampling import SMOTE from imblearn.over_sampling import RandomOverSampler from imblearn.under_sampling import RandomUnderSampler from sklearn import preprocessing from sklearn.model_selection import cross_validate from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC from sklearn.ensemble import GradientBoostingClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report #Feature extraction from tsfresh import extract_relevant_features # - # ## Functions # + def cleannanvalues(df, datos_series, method = 'zeros'): """Clean NAN Values. Clean NAN values after the data frame creation. Normalize between 0-1, selected variables (i.e. only those that will be used for time series modeling). Parameters ---------- df: pandas dataframe datos_series : list iterable Columns names in df that will be clean. method : str Zeros: Replace NAN values per zeros. TODO: Hace falta programas otras opciones Returns ------- df dataframe """ for label in datos_serie: # NaN strategy if method == 'zeros': df[label].fillna(0, inplace=True) #Normalization df[label] = (df[label]-df[label].min())/(df[label].max()-df[label].min()) return df def crea_disciplinas_series_index(df_curso, tipo = 1, min_samples = 20): """ Creates the dictionary of disciplines that will be used to create the series. Column names Parameters ---------- df_curso: pandas dataframe tipo : int TIPO 1: Todas las disciplinas que tengan al menos min_samples en el semestre seleccionado. TIPO 2: Solo las materias que son recomendadas TODO. min_samples : int > 0 or -1 Minimun number of samples of echa course per semestre to be included. If tou want to include all possible courses in dataset set -1 Returns ------- list of selected disciplinees per semester """ # TODO: Hay que probar diferentes formas de crear la serie disciplinas_series_index = {} for semestre in semestre_do_aluno_model: temp_semes = df_to_createseries.loc[df_curso['semestre_do_aluno'] == semestre] if tipo == 1: tem_min = temp_semes.groupby("grupos").filter(lambda x: len(x) > min_samples) disciplinas_series_index[semestre] = sorted(tem_min['grupos'].unique().tolist()) elif tipo == 2: #TODO: Hacer esta implementacion disciplinas_series_index[semestre] = sorted(temp['grupos'].unique().tolist()) return disciplinas_series_index def crea_dict_por_matricula(disciplinas_series_index, matricula_serie, matricula, missing_discipline = 0): """ Recibe a student (matricula) and creates a list of dictionaries. Each dictionary represent the data of one semester. Parameters ---------- disciplinas_series_index: list iterable Contains the set of discipline per semester to be consider. matricula_serie : dict Contains the student organized per semeter matricula : str Student id missing_discipline: int (0 default) Missing discipline == When a student is not enrolled in a particular selected course. Whe the is a missing discipline the vairables values are replace by the value specified by this parameter. Returns ------- list of dictionaries lenght of the list == number of the semesters considered """ if len(semestre_do_aluno_model) != len(matricula_serie): #Hay data de la matricula en los semestres considerados return None series_total = [] len_total = 0 for semestre in semestre_do_aluno_model: len_total = len(disciplinas_series_index[semestre]) serie = {} serie['semestre'] = semestre serie['id'] = matricula for disciplina in disciplinas_series_index[semestre]: if disciplina in matricula_serie[semestre]: #Vio la disciplina for dato in datos_serie: if matricula_serie[semestre][disciplina][dato] is not None and not np.isnan(matricula_serie[semestre][disciplina][dato]): key = disciplina + "_" + dato serie[key] = matricula_serie[semestre][disciplina][dato] else: print matricula_serie[semestre][disciplina] raise ValueError('Valor de un dato de serie errado. Revisar matricula: ' + matricula_serie[semestre][disciplina]['matricula'] + ' Semestre: '+ str(semestre) + ' Disciplina: '+ str(disciplina)) else: #Missing Data for dato in datos_serie: key = disciplina + "_" + dato serie[key] = missing_discipline #print serie #print len(serie) if len(serie) != ((len_total*len(datos_serie))+2):#+2 por el indice del id y del semestre raise ValueError('La longitud esperada de la serie no se obtuvo!!!') series_total.append(serie) #print len(series_total) #=Numero de Semestres #print series_total return series_total def create_series_and_labels_course_variable(df_to_createseries,disciplinas_series_index): """ This function is quite important!!, creates the series and the serieslabels. Each course is a time series variable. Parameters ---------- df_to_createseries : data_frame Este dataframe se va a agrupar por matricula. disciplinas_series_index : dict Disciplinas que va a ser consideradas Returns ------- series_model: list of series label_model: list of series laberls """ series_model = [] # Series de los alumnos seleccionados label_model = {} # Label : Dropout/No_Dropout for matricula,df_aluno in df_to_createseries.groupby('matricula'): gp_sems = df_aluno.groupby('semestre_do_aluno') matricula_serie = {} for semestre, df_sem_al in gp_sems: matricula_serie[semestre] = {} for index, row in df_sem_al.iterrows(): matricula_serie[semestre][row['grupos']] = row label_sit = list((df_aluno.sit_vinculo_atual.tolist()))[0] serie_matricula = crea_dict_por_matricula(disciplinas_series_index,matricula_serie,matricula,missing_discipline = missing_disc) if serie_matricula: #Existe el caso que ninguna de las disciplinas en disciplinas_series_index haya sido inscrita!! series_model.extend(serie_matricula) if label_sit in drop_out_labels: label_model[matricula] = 1 #DROPOUT Class Label else: label_model[matricula] = 0 #DROPOUT Class Label else: continue return series_model, label_model # - # ## CSV read and Dataset main fields explanation # - tentativas : attemps # - diff : difference between the recommended semester for a discipline and the actual student semester # - puntos_enem : admision score? # - matricula : student id # - semestre_do_aluno : current student semester # - sit_final : pass (AP), not pass (RP) # - sit_vinculo_actual : [JUBILADO, DESLIGADO, MATRICULA EM ABANDONO,...] # + dtype = {'ano_curriculo' : np.string_ ,'cod_curriculo' : np.string_ , 'mat_ano' : np.int8, 'mat_sem' : np.int8, 'periodo' : np.string_ , 'ano' : np.string_ , 'semestre' : np.int8, 'semestre_recomendado' : np.int8, 'semestre_do_aluno' : np.int8, 'no_creditos' : np.int8, 'cep' : np.string_ , 'puntos_enem' : np.float32 , 'diff' : np.int8 , 'tentativas' : np.int8, 'cant' : np.int8, 'identificador': np.string_, 'cod_curriculo': np.int8, 'cod_enfase' : np.string_} dfh = pd.read_csv('historicosFinal.csv', sep=';', dtype = dtype, converters={'grau': lambda x: x.replace(',','.')} ) #Problem reading floats => dataset with , not . dfh = dfh.applymap(lambda x: x.strip() if type(x) is str else x) dfh['grau'] = dfh['grau'].apply(pd.to_numeric) # - # ## Data grouping and filtering # # En esta seccion se escogen la carrera, los estados de dropout, las variables de la serie, los semestres a considerar en la construccion de la serie. # + # Arquitectura: ARQ-BAQ-2002-0 # Dereito CDD/CDD-BDD-CON-2008-0 # Computacion CSI-BID-2010-0 (39 dropout y 79 no dropout) # Administracion: ADM-BAN-2001-0 # Datos de filtrado cod_curso = 'ADM' identificador = 'ADM-BAN-2001-0' cod_curriculo = 0 #Datos de construccion de la serie semestre_do_aluno_model = [1,2,3,4] #Semestres usados para modelar drop_out_labels = ['DESLIGADO','JUBILADO','MATRICULA EM ABANDONO'] # Estados considerados como dropout #datos_serie = ['grau','diff','tentativas'] #Que datos de una disciplina se van a incluir en la serie? datos_serie = ['grau'] missing_disc = -1 #Valor en la serie cuando no se inscribe un curso name_file = identificador + "_Sem" + '_'.join(str(e) for e in semestre_do_aluno_model) + "_Var_" + '_'.join(datos_serie) print "File: " + name_file cleannanvalues(dfh,datos_serie) df_curso = dfh.groupby(['cod_curso','cod_curriculo','identificador']) df_curso= df_curso.get_group((cod_curso,cod_curriculo,identificador)) df_to_createseries = df_curso.loc[df_curso['semestre_do_aluno'].isin(semestre_do_aluno_model)] df_to_createseries.describe() # - # ## Series creation # + disciplinas_series_index = crea_disciplinas_series_index(df_curso, tipo = 1, min_samples = 20) print "Disciplinas used to build the series:" print disciplinas_series_index series_model, label_model = create_series_and_labels_course_variable(df_to_createseries,disciplinas_series_index) print "Number of rows in final series (Matriculas * Num Semestres): " print len(series_model) #Longitud de la serie print "Ejemplo de Serie Indice 1: " print series_model[0] print "Number of Matriculas: " print len(label_model)# Numero de Matricula # + timeseries =pd.DataFrame(series_model) timeseries_label = pd.Series(label_model) #NAN -> cuando se pasa a dataframe no existe data de la serie para cursos que nadie vio en x semestre pero si en y para 0<=x<y #Interpretacion=> no vio el curso en x reemplazarlo por missing_discipline timeseries = timeseries.fillna(missing_disc) timeseries.isnull().any() timeseries.to_csv(name_file+".csv", sep=';') features_filtered_direct = extract_relevant_features(timeseries, timeseries_label, column_id='id', column_sort='semestre') features_filtered_direct.to_csv(name_file+"_features.csv", sep=';') timeseries_label.to_csv(name_file+"_labels.csv", sep=';') # - # ## Machine Learning Training Models # + #Hay que comentar name_file para una corrida completa. #name_file = "TimeSeriesProgramsToRunServer/Res_Feature_Extraction/ADM-BAN-2001-0_Sem1_2_3_4_Var_grau" X = pd.read_csv(name_file + '_features.csv', sep=';' ) y_read = pd.read_csv(name_file + '_labels.csv', sep=';', header = None , names = ['id','class'], index_col = 'id' ).to_dict('index') y = [] for alumno in X['id']: y.append(y_read[alumno]['class']) X_selected = X.drop(['id'], axis=1) print "Dimensiones antes del sampling: " print Counter(y).items() #Antes del Sampling #X_resampled, y_resampled = RandomOverSampler(random_state=42).fit_sample(X_selected, y) X_resampled, y_resampled = SMOTE(random_state=42).fit_sample(X_selected, y) print "Dimensiones despues del sampling: " print Counter(y_resampled).items() #Despues del Sampling #X_selected.head() scoring = {'accuracy': 'accuracy', 'precision': 'precision', 'recall': 'recall', 'f1': 'f1'} clasificadores_score = {} #dt = DecisionTreeClassifier() #scores = cross_validate(dt, X_resampled, y_resampled, scoring=scoring, cv=10) #res_tem = {"Acc" : np.average(scores['test_accuracy']), "Recall": np.average(scores['test_recall']), # "Precision": np.average(scores['test_precision']), "F1": np.average(scores['test_f1'])} #clasificadores_score['DecisionTreeClassifier'] = res_tem gnb = GaussianNB() scores = cross_validate(gnb, X_resampled, y_resampled, scoring=scoring, cv=10) res_tem = {"Acc" : np.average(scores['test_accuracy']), "Recall": np.average(scores['test_recall']), "Precision": np.average(scores['test_precision']), "F1": np.average(scores['test_f1'])} clasificadores_score['GaussianNB'] = res_tem svc = SVC(C=1) scores = cross_validate(svc, X_resampled, y_resampled, scoring=scoring, cv=10) res_tem = {"Acc" : np.average(scores['test_accuracy']), "Recall": np.average(scores['test_recall']), "Precision": np.average(scores['test_precision']), "F1": np.average(scores['test_f1'])} clasificadores_score['SVC'] = res_tem rf = RandomForestClassifier(n_estimators = 200) scores = cross_validate(rf, X_resampled, y_resampled, scoring=scoring, cv=10) res_tem = {"Acc" : np.average(scores['test_accuracy']), "Recall": np.average(scores['test_recall']), "Precision": np.average(scores['test_precision']), "F1": np.average(scores['test_f1'])} clasificadores_score['RandomForestClassifier'] = res_tem gbc = GradientBoostingClassifier(learning_rate=0.1,n_estimators=200,max_depth=10) scores = cross_validate(gbc, X_resampled, y_resampled, scoring=scoring, cv=10) res_tem = {"Acc" : np.average(scores['test_accuracy']), "Recall": np.average(scores['test_recall']), "Precision": np.average(scores['test_precision']), "F1": np.average(scores['test_f1'])} clasificadores_score['GradientBoostingClassifier'] = res_tem #lr = LogisticRegression() #scores = cross_validate(lr, X_resampled, y_resampled, scoring=scoring, cv=10) #res_tem = {"Acc" : np.average(scores['test_accuracy']), "Recall": np.average(scores['test_recall']), # "Precision": np.average(scores['test_precision']), "F1": np.average(scores['test_f1'])} #clasificadores_score['lr'] = res_tem # #xgboost? print "RESULTADOS DE LOS MODELOS DE CLASIFICACION" print clasificadores_score # + lista_features = list(X_selected.columns.values) #Cada curso es un feature #Mean decrease impurity rf.fit(X_resampled, y_resampled) feature_importances = pd.DataFrame(rf.feature_importances_, index = lista_features, columns=['importance']).sort_values('importance',ascending=False) print feature_importances.head(10)
2-Time Series Classification - Feature Extraction Approach.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Regression: underfitting and overfitting # # We will look at a simple example of fitting a polynomial function to a set of data points. A polynomial is defined by its degree $n$ and can be written as: $y = \sum_{k=0}^n a_k x^k$. # # The simplest polynomial, with a degree of $n=1$, is the linear function: $y = a_1x + a_0$. For example a third degree ($n=3$) polynomial would have the form $y = a_3x^3 + a_2x^2 + a_1x + a_0$. # # But first, let's start with the neccessary Python imports. Here we will be using the popular [scikit-learn](https://scikit-learn.org/stable/index.html) framework for machine learning. # + import numpy as np import matplotlib import matplotlib.pyplot as plt from sklearn.preprocessing import PolynomialFeatures from sklearn.linear_model import LinearRegression, Ridge from sklearn.pipeline import make_pipeline # %matplotlib inline # - # ## Generate data points # # First, we will create a set of points on a sine curve. We will a bit of randomness, just to make it more interesting. # + np.random.seed(42) N=20 x = np.sort(np.random.rand(N)) y = np.sin(1.2 * x * np.pi) + 0.1 * np.random.randn(len(x)) # - # We create a helper function to plot the points. def plot_curve(x, y): plt.plot(x, y, 'ko') plt.ylim(-1, 1.5) plt.xlim(0, 1) plt.xlabel('x') plt.ylabel('y') plt.show() plot_curve(x, y) # ## Fitting a polynomial model # # First, we will create another helper function that takes a given model and generates its predictions for the whole range of $x$ values, drawn on top of the data points. def plot_curve_and_model(x, y, model): model.fit(x.reshape(-1, 1), y) x_plot = np.linspace(0, 1, 100) y_pred = model.predict(x_plot.reshape(-1, 1)) plt.plot(x_plot, y_pred) plot_curve(x, y) # ### Underfitting # # First, we'll start with the simples linear model, where the degree is 1. What can you say about the result? # + model = make_pipeline(PolynomialFeatures(degree=1), LinearRegression()) plot_curve_and_model(x, y, model) # - # ### Overfitting # # Next, we will try with a very complex 13-degree model. What happens? # + model = make_pipeline(PolynomialFeatures(degree=13), LinearRegression()) plot_curve_and_model(x, y, model) # - # ### Balanced model # # Try to find a balanced model. You can try: # # - finding the right degree for the model # - using regularization such as in [Ridge regression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html) # + model = make_pipeline(PolynomialFeatures(degree=5), LinearRegression()) #model = make_pipeline(PolynomialFeatures(degree=13), Ridge(alpha=0.001)) plot_curve_and_model(x, y, model)
notebooks/sklearn-under-and-overfitting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.10 64-bit # language: python # name: python3 # --- # # Static Typing # # Python is a dinamic language, that means, any error will show during execution, and that can create a problem # # The way to type in order to make python static is the next # + a: int = 5 b: str = "World" c: float = 3.6 d: bool = False print(type(a), a,) print(type(b), b,) print(type(c), c,) print(type(d), d,) # - # The same can be applied to functions with the next syntax # + #Inside the parameters of the function we type the arguments #and next we can add what type the result of the function will be def add(x: int, y: int) -> int: return x + y print(add(5,5)) #but what happens when we not apply the type of the function in the argument print(add("2", "1")) #it concatentes the two numbers, even do we specify that they are supposed to be integers print(add("l","y")) # - # To do this in list and dictionaries we type the next syntax # + from typing import Dict, List population: Dict[str,int] = { "canada": 38000000, "brazil": 212000000, "japan":125000000 , } # - # And with tuple # + from typing import Tuple answer: Tuple[int,float,str,bool] = (6, 4.8, "lol", False) # - # A list, containing a dictionary, that the value of the dctionaries is a tuple # + from typing import Tuple, Dict, List age = List[Dict[str, Tuple[int, int]]] age = [ { "couple1": (34,33), "couple2": (76,70), "couple3": (55,70), } ] print(age)
python_projects/advanced_python/static_typing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 特徴量エンジニアリング(カテゴリ特徴量) # !pip install -U pip # !pip install scikit-learn==0.20.0 # !pip install category_encoders # !git clone https://github.com/nejumi/fe_workshop.git import numpy as np import pandas as pd from pandas import DataFrame, Series pd.set_option('display.max_columns', 100) from sklearn.preprocessing import OneHotEncoder, LabelEncoder import category_encoders as ce from google.colab import files # + # collaboratoryにLendingClub50000.csvをアップロードする。 #uploaded = files.upload() # ローカルからアップロードできるが今回は使用しない。 # - # ファイルを読み込む df = pd.read_csv('fe_workshop/dataset/LendingClub50000.csv') df.head() # ## One-hoeエンコーディング # pandasのget_dummiesでOne-hot Encodingする pd.get_dummies(df.grade).head() # sklearnのOneHotEncoderでOne-hot Encodingする ohe = OneHotEncoder() ohe.fit_transform(df[['grade']]).A[:5, :] # np.arrayで返ってくる # category_encodersのOneHotEncoderでOne-hot Encodingする ohe = ce.OneHotEncoder() ohe.fit_transform(df[['grade']]).head() # unknown labelに相当する"grade_-1"が加えられているのがpd.get_dummiesと異なる # では、'addr_state'も同様に処理できるだろうか? df[['addr_state']].head() # ユニーク数の多い'addr_state'に対してOne-hotを行うとカラム数が膨大になってしまう。 ohe = ce.OneHotEncoder() ohe.fit_transform(df[['addr_state']]).head() # ## Labelエンコーディング # sklearnのLabelEncoderを用いる le = LabelEncoder() le.fit_transform(df[['addr_state']])[:5] # category_encodersのOrdinalEncoderを用いる。 # 割り振られるラベルは異なるが基本的には同じ。 # ただし、DataRobotのOrdinalEncodingとは大きく異なる点に注意。 oe = ce.OrdinalEncoder() oe.fit_transform(df[['addr_state']]).head() # ## Frequency/Countエンコーディング # 州ごとの観測数をカウントする。 summary = df.groupby(['addr_state'])[['addr_state']].count() summary.head() # 集計結果をapplyする。 df.addr_state.apply(lambda x: summary.loc[x]).head() # ## Targetエンコーディング # 'addr_state'について観測数の少ないカテゴリがどの程度か確認する。 df['addr_state'].value_counts() # category_encodersのTargetEncoderを用いる。 # min_samples_leafはvalue_countsを参考に100にしてみた。 # 州記号を州ごとの平均貸し倒れ率に置換したことになる。 te = ce.TargetEncoder(min_samples_leaf=100) te.fit_transform(df[['addr_state']], df[['bad_loan']]).head() # + # TargetEncoderの対象は予測ターゲットに限定されない。 # ここでは'annual_inc'を選択したため、州記号を州ごとの平均年収に置換したことになる。 # 特にインパクトの大きい特徴量がある場合に有効で、予測ターゲットを用いておらず、ターゲットリーケージの懸念がほぼない点で有用。 te = ce.TargetEncoder(min_samples_leaf=50) result = te.fit_transform(df[['addr_state']], df[['annual_inc']]) result.head() # - # 州記号を州ごとの平均年収を採用してみる。 df.addr_state = result.addr_state # 結果を保存する。 df.to_csv('LendingClub50000_ave_income_state.csv', index=False) # ダウンロードしたら、DataRobotでもう一度予測してみよう files.download('LendingClub50000_ave_income_state.csv')
Feature_Engineering_Categorical.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- #import all the modules import numpy as np import pandas as pd import read_amr # Instantiate the class using the fortq file my_fortq = read_amr.ReadAmr("fort.q0001") #Extract the pandas dataframe my_fortq_pd = my_fortq.pandas_dataframe print my_fortq_pd # Now we can play around with this dataframe # Get information using queries/conditions #Example 1: All the points within some x,y coordinates. For this, use & (and) conditions print my_fortq_pd[(my_fortq_pd.ycoord > -29.7) & (my_fortq_pd.xcoord > -89.7)] # To check if some value is exactly or close to the value in a particular row and a column, use np.islcose # Example 2: Get me the data where x coordinate is -64.5 print my_fortq_pd[np.isclose(my_fortq_pd.xcoord, -64.5)] # Example 3: Get me all regions with all y coordiates > -20 and total height is > 300 print my_fortq_pd[(my_fortq_pd.ycoord < -20) & (my_fortq_pd.height > 300.0)] #Example 4: Say we are interested only in a specific column(s) in this subset. my_fortq_pd[['eta','xvel']][(my_fortq_pd.ycoord < -20) & (my_fortq_pd.height > 300.0)]
read_amr_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # This notebook was prepared by [<NAME>](http://donnemartin.com). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). # # Solution Notebook # ## Problem: Compress a string such that 'AAABCCDDDD' becomes 'A3BC2D4'. Only compress the string if it saves space. # # * [Constraints](#Constraints) # * [Test Cases](#Test-Cases) # * [Algorithm](#Algorithm) # * [Code](#Code) # * [Unit Test](#Unit-Test) # ## Constraints # # * Can we assume the string is ASCII? # * Yes # * Note: Unicode strings could require special handling depending on your language # * Is this case sensitive? # * Yes # * Can we use additional data structures? # * Yes # * Can we assume this fits in memory? # * Yes # ## Test Cases # # * None -> None # * '' -> '' # * 'AABBCC' -> 'AABBCC' # * 'AAABCCDDDD' -> 'A3BC2D4' # ## Algorithm # # * For each char in string # * If char is the same as last_char, increment count # * Else # * Append last_char and count to compressed_string # * last_char = char # * count = 1 # * Append last_char and count to compressed_string # * If the compressed string size is < string size # * Return compressed string # * Else # * Return string # # Complexity: # * Time: O(n) # * Space: O(n) # # Complexity Note: # * Although strings are immutable in Python, appending to strings is optimized in CPython so that it now runs in O(n) and extends the string in-place. Refer to this [Stack Overflow post](http://stackoverflow.com/a/4435752). # ## Code class CompressString(object): def compress(self, string): if string is None or not string: return string result = '' prev_char = string[0] count = 0 for char in string: if char == prev_char: count += 1 else: result += self._calc_partial_result(prev_char, count) prev_char = char count = 1 result += self._calc_partial_result(prev_char, count) return result if len(result) < len(string) else string def _calc_partial_result(self, prev_char, count): return prev_char + (str(count) if count > 1 else '') # ## Unit Test # + # %%writefile test_compress.py from nose.tools import assert_equal class TestCompress(object): def test_compress(self, func): assert_equal(func(None), None) assert_equal(func(''), '') assert_equal(func('AABBCC'), 'AABBCC') assert_equal(func('AAABCCDDDDE'), 'A3BC2D4E') print('Success: test_compress') def main(): test = TestCompress() compress_string = CompressString() test.test_compress(compress_string.compress) if __name__ == '__main__': main() # - # %run -i test_compress.py
arrays_strings/compress/compress_solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=true editable=true # <img src="http://i67.tinypic.com/2jcbwcw.png" align="left"></img><br><br><br><br> # # # ## NOTEBOOK: Minimizing Risk of False Positives in Classification # # **Example notebook:** Manipulating the binary classification threshold in Logistic Regression (probability between classes) to minimize the number of False Positives. # # **Author List**: <NAME> (Fall 2017) # # **License**: Feel free to do whatever you want to with this code # + deletable=true editable=true # Load packages import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.datasets import load_iris from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix from sklearn.metrics import roc_auc_score from sklearn.metrics import roc_curve # %matplotlib inline # + deletable=true editable=true # Load iris data set df = pd.DataFrame(load_iris()['data'],columns=load_iris()['feature_names']) df = df.iloc[:,0:2] # only look at two features, for visualization y = load_iris().target # target variable df['y_target'] = y # + deletable=true editable=true # Cast it as a BINARY CLASSIFICATION problem (only include ) idx = np.where(y==1)[0][0] df = df.iloc[idx:,:] # + deletable=true editable=true # Label targets as 0, 1 instead of 1, 2 df['y_target'] = df['y_target'] - 1 df['y_target'].unique() # + deletable=true editable=true df.head() # + deletable=true editable=true # Drop duplicates so that we don't plot data on top of each other df = df.drop_duplicates(subset=df.columns[0:2]) # + deletable=true editable=true # Assign X and Y df_x = df.iloc[:,:2] y = df['y_target'] # + deletable=true editable=true # Fit logistic regression model to the data that is not regularized logreg = LogisticRegression(C=10^6) logreg.fit(df_x,y); # + deletable=true editable=true print('Accuracy on the training set:') print(str(np.round(logreg.score(df_x,y),4)*100)+'%') # + deletable=true editable=true # Manual accuracy test sum(logreg.predict(df_x)==y)/len(y) # + deletable=true editable=true # Plot decision boundary, calculate Confusion Matrix, False Positives, and ROC statistics def plot_boundary(X,Y,prob_class1=None): X = X.values h = .01 # step size in the mesh # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, x_max]x[y_min, y_max]. x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) if prob_class1: Z = logreg.predict_proba(np.c_[xx.ravel(), yy.ravel()]) Z = Z[:,1]>prob_class1 Z = Z.astype(int) else: Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.figure(1, figsize=(6, 4),dpi=150) plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired) # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=Y,alpha=.8, edgecolors='k', cmap=plt.cm.Paired) plt.xlabel('Predictor A') plt.ylabel('Predictor B') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.xticks(()) plt.yticks(()) plt.title('Blue = 0 (No, Negative), Orange = 1 (Yes, Positive)') plt.suptitle('Can I eat this Flower\n') plt.show() # False-positive ratio if prob_class1: pred_prob = logreg.predict_proba(X) pred = pred_prob[:,1]>prob_class1 pred = pred.astype(int) else: pred_prob = logreg.predict_proba(X) pred = logreg.predict(X) conf_mat = confusion_matrix(y, pred) fpr, tpr, thresholds = roc_curve(y, pred_prob[:,1], pos_label=1) ## ---- This can be left out --- if prob_class1: def evaluate_threshold(threshold): print('Sensitivity:', tpr[thresholds > threshold][-1]) print('Specificity:', 1 - fpr[thresholds > threshold][-1]) print('ROC AUC: ' + str(roc_auc_score(y, pred_prob[:,1]))) print() evaluate_threshold(prob_class1) ## ---- This can be left out ---- print('Prediction Accuracy = ' + str(sum(pred==y)/len(y))) print('Number of False Positives (misclassified blue points): ' +str(conf_mat[0,1])) print() print('Confusion Matrix:') return pd.DataFrame(conf_mat,columns=\ ['Pred Negative','Pred Positive'],index=['Actual Negative','Actual Positive']) # + deletable=true editable=true # Plot Decision Boundary # (The color of the circles indicate the true class of the data samples) # The background color indicates the prediction regions for the two classes # # A blue circle on the orange area indicates a False Positive prediction plot_boundary(df_x,y) # threshold = 0.5 for the logit / sigmoid function # + deletable=true editable=true # Same as above, only showing that prob_class1 works plot_boundary(df_x,y,prob_class1=0.5) # + [markdown] deletable=true editable=true # # Changing the Threshold to 0.66 # # ### No we have to be over 66% certain that we can eat the flower (instead of 50%) # # ##### (Note that the prediction accuracy increases! However, that might not be optimal for out-of-sample data) # # . # # <div class='alert alert-warning'>We would like to <b>minimize the risk of eating a toxic flower</b> (no False Positives, i.e. `correctly classify all blue dots`) </div> # + deletable=true editable=true # Minmize number of False Positives plot_boundary(df_x,y,prob_class1=0.66) # + deletable=true editable=true
x-archive-temp/m240-theory-loss-vs-risk/minimize-false-positives.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="Gwo9bpaVgxXF" colab_type="text" # ##Setup # # You will need to make a copy of this notebook in your Google Drive before you can edit the homework files. You can do so with **File &rarr; Save a copy in Drive**. # + id="6CAdiyTKi4Se" colab_type="code" cellView="form" colab={} #@title mount your Google Drive #@markdown Your work will be stored in a folder called `cs285_f2020` by default to prevent Colab instance timeouts from deleting your edits. import os from google.colab import drive drive.mount('/content/gdrive') # + id="BKE5nA1Fgwwy" colab_type="code" cellView="form" colab={} #@title set up mount symlink DRIVE_PATH = '/content/gdrive/My\ Drive/cs285_f2020' DRIVE_PYTHON_PATH = DRIVE_PATH.replace('\\', '') if not os.path.exists(DRIVE_PYTHON_PATH): # %mkdir $DRIVE_PATH ## the space in `My Drive` causes some issues, ## make a symlink to avoid this SYM_PATH = '/content/cs285_f2020' if not os.path.exists(SYM_PATH): # !ln -s $DRIVE_PATH $SYM_PATH # + id="9FGK4kbpg3iP" colab_type="code" cellView="form" colab={} #@title apt install requirements #@markdown Run each section with Shift+Enter #@markdown Double-click on section headers to show code. # !apt update # !apt install -y --no-install-recommends \ # build-essential \ # curl \ # git \ # gnupg2 \ # make \ # cmake \ # ffmpeg \ # swig \ # libz-dev \ # unzip \ # zlib1g-dev \ # libglfw3 \ # libglfw3-dev \ # libxrandr2 \ # libxinerama-dev \ # libxi6 \ # libxcursor-dev \ # libgl1-mesa-dev \ # libgl1-mesa-glx \ # libglew-dev \ # libosmesa6-dev \ # lsb-release \ # ack-grep \ # patchelf \ # wget \ # xpra \ # xserver-xorg-dev \ # xvfb \ # python-opengl \ # ffmpeg > /dev/null 2>&1 # + id="YNGuuABeg99q" colab_type="code" cellView="form" colab={} #@title download mujoco MJC_PATH = '{}/mujoco'.format(SYM_PATH) if not os.path.exists(MJC_PATH): # %mkdir $MJC_PATH # %cd $MJC_PATH if not os.path.exists(os.path.join(MJC_PATH, 'mujoco200')): # !wget -q https://www.roboti.us/download/mujoco200_linux.zip # !unzip -q mujoco200_linux.zip # %mv mujoco200_linux mujoco200 # %rm mujoco200_linux.zip # + id="y0MiuTJ4hT5z" colab_type="code" cellView="form" colab={} #@title update mujoco paths import os os.environ['LD_LIBRARY_PATH'] += ':{}/mujoco200/bin'.format(MJC_PATH) os.environ['MUJOCO_PY_MUJOCO_PATH'] = '{}/mujoco200'.format(MJC_PATH) os.environ['MUJOCO_PY_MJKEY_PATH'] = '{}/mjkey.txt'.format(MJC_PATH) ## installation on colab does not find *.so files ## in LD_LIBRARY_PATH, copy over manually instead # !cp $MJC_PATH/mujoco200/bin/*.so /usr/lib/x86_64-linux-gnu/ # + [markdown] id="Xd-g5Z7xhWVt" colab_type="text" # Ensure your `mjkey.txt` is in /content/cs285_f2020/mujoco before this step # + id="-p6i5TqAhW4a" colab_type="code" cellView="form" colab={} #@title clone and install mujoco-py # %cd $MJC_PATH if not os.path.exists('mujoco-py'): # !git clone https://github.com/openai/mujoco-py.git # %cd mujoco-py # %pip install -e . ## cythonize at the first import import mujoco_py # + id="GQvbeuV1hi5I" colab_type="code" cellView="form" colab={} #@title clone homework repo #@markdown Note that this is the same codebase from homework 1, #@markdown so you may need to move your old `homework_fall2020` #@markdown folder in order to clone the repo again. #@markdown **Don't delete your old work though!** #@markdown You will need it for this assignment. # %cd $SYM_PATH # !git clone https://github.com/berkeleydeeprlcourse/homework_fall2020.git # %cd homework_fall2020/hw2 # %pip install -r requirements_colab.txt -f https://download.pytorch.org/whl/torch_stable.html # %pip install -e . # + id="noinfUbHiHW2" colab_type="code" cellView="form" colab={} #@title set up virtual display from pyvirtualdisplay import Display display = Display(visible=0, size=(1400, 900)) display.start() # + id="COqsZLeliU9Y" colab_type="code" cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 438} outputId="55f7feb0-e730-4789-c73d-3ec695b48757" #@title test virtual display #@markdown If you see a video of a four-legged ant fumbling about, setup is complete! import gym import matplotlib matplotlib.use('Agg') from cs285.infrastructure.colab_utils import ( wrap_env, show_video ) env = wrap_env(gym.make("Ant-v2")) observation = env.reset() for i in range(100): env.render(mode='rgb_array') obs, rew, term, _ = env.step(env.action_space.sample() ) if term: break; env.close() print('Loading video...') show_video() # + [markdown] id="ygs968BbiYHr" colab_type="text" # ## Editing Code # # To edit code, click the folder icon on the left menu. Navigate to the corresponding file (`cs285_f2020/...`). Double click a file to open an editor. There is a timeout of about ~12 hours with Colab while it is active (and less if you close your browser window). We sync your edits to Google Drive so that you won't lose your work in the event of an instance timeout, but you will need to re-mount your Google Drive and re-install packages with every new instance. # + [markdown] id="9qUmV93fif6S" colab_type="text" # ## Run Policy Gradients # + id="lN-gZkqiijnR" colab_type="code" cellView="form" colab={} #@title imports import os import time from cs285.infrastructure.rl_trainer import RL_Trainer from cs285.agents.pg_agent import PGAgent # %load_ext autoreload # %autoreload 2 # + id="Q6NaOWhOinnU" colab_type="code" cellView="both" colab={} #@title runtime arguments class Args: def __getitem__(self, key): return getattr(self, key) def __setitem__(self, key, val): setattr(self, key, val) def __contains__(self, key): return hasattr(self, key) env_name = 'CartPole-v0' #@param exp_name = 'q1_sb_rtg_na' #@param #@markdown main parameters of interest n_iter = 100 #@param {type: "integer"} ## PDF will tell you how to set ep_len ## and discount for each environment ep_len = 200 #@param {type: "integer"} discount = 0.95 #@param {type: "number"} reward_to_go = True #@param {type: "boolean"} nn_baseline = False #@param {type: "boolean"} dont_standardize_advantages = False #@param {type: "boolean"} #@markdown batches and steps batch_size = 1000 #@param {type: "integer"} eval_batch_size = 400 #@param {type: "integer"} num_agent_train_steps_per_iter = 1 #@param {type: "integer"} learning_rate = 5e-3 #@param {type: "number"} #@markdown MLP parameters n_layers = 2 #@param {type: "integer"} size = 64 #@param {type: "integer"} #@markdown system save_params = False #@param {type: "boolean"} no_gpu = False #@param {type: "boolean"} which_gpu = 0 #@param {type: "integer"} seed = 1 #@param {type: "integer"} #@markdown logging ## default is to not log video so ## that logs are small enough to be ## uploaded to gradscope video_log_freq = -1#@param {type: "integer"} scalar_log_freq = 1#@param {type: "integer"} args = Args() ## ensure compatibility with hw1 code args['train_batch_size'] = args['batch_size'] if args['video_log_freq'] > 0: import warnings warnings.warn( '''\nLogging videos will make eventfiles too''' '''\nlarge for the autograder. Set video_log_freq = -1''' '''\nfor the runs you intend to submit.''') # + id="eScWwHhnsYkd" colab_type="code" cellView="form" colab={} #@title create directory for logging data_path = '''/content/cs285_f2020/''' \ '''homework_fall2020/hw2/data''' if not (os.path.exists(data_path)): os.makedirs(data_path) logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S") logdir = os.path.join(data_path, logdir) args['logdir'] = logdir if not(os.path.exists(logdir)): os.makedirs(logdir) # + id="aljzrLdAsvNu" colab_type="code" colab={} ## define policy gradient trainer class PG_Trainer(object): def __init__(self, params): ##################### ## SET AGENT PARAMS ##################### computation_graph_args = { 'n_layers': params['n_layers'], 'size': params['size'], 'learning_rate': params['learning_rate'], } estimate_advantage_args = { 'gamma': params['discount'], 'standardize_advantages': not(params['dont_standardize_advantages']), 'reward_to_go': params['reward_to_go'], 'nn_baseline': params['nn_baseline'], } train_args = { 'num_agent_train_steps_per_iter': params['num_agent_train_steps_per_iter'], } agent_params = {**computation_graph_args, **estimate_advantage_args, **train_args} self.params = params self.params['agent_class'] = PGAgent self.params['agent_params'] = agent_params self.params['batch_size_initial'] = self.params['batch_size'] ################ ## RL TRAINER ################ self.rl_trainer = RL_Trainer(self.params) def run_training_loop(self): self.rl_trainer.run_training_loop( self.params['n_iter'], collect_policy = self.rl_trainer.agent.actor, eval_policy = self.rl_trainer.agent.actor, ) # + id="j2rCuQsRsd3N" colab_type="code" colab={} ## run training print(args.logdir) trainer = PG_Trainer(args) trainer.run_training_loop() # + id="km7LlYvhqKTl" colab_type="code" colab={} #@markdown You can visualize your runs with tensorboard from within the notebook ## requires tensorflow==2.3.0 # %load_ext tensorboard # %tensorboard --logdir /content/cs285_f2020/homework_fall2020/hw2/data
hw2/cs285/scripts/run_hw2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Day1_of_10-Days_Challenge_Islam_Ali # # Data Loading and Manipulation # ## Kindly load the las file of F02-1_logs.las well from the data folder # ## Answer the following questions: # # >1. How many well logs in the file # 2. How many data points (observations) in the welllogs. # 3. Is there any null values? how much (%)/ well-log # 4. Is there a relationship between AI and PHIE? # ### You can use the following liberaries for your assignment: # > Numpy, Pandas, Matplotlib, seaborn, LASIO # + # Importing Libraries import pandas as pd import numpy as np from IPython.display import display import matplotlib.pyplot as plt import seaborn as sb import lasio # - # read las file F02 = lasio.read("Dutch_F3_Logs\F02-1_logs.las") #1. How many well logs in the file? df = F02.df() print('The number of the logs: ',df.shape[-1]) #2. How many data points (observations) in the welllogs? print('The data points in the well-logs: ',df.shape[0]) #3. Is there any null values? how much (%)/ well-log df.info() print ('The percentages of null values: ') df.isnull().sum()/ df.shape[0]*100 #4. Is there a relationship between AI and PHIE? fig, ax = plt.subplots(figsize=(14,7)) sb.heatmap(df.corr(), annot= True,linewidths=0.1); #4. Is there a relationship between AI and PHIE? #Crossplot the AI vs PHIE plt.figure(figsize=(10,5)) sb.regplot(x=df.AI, y=df.PHIE) plt.scatter(df.AI, df.PHIE, c = df.GR, cmap = 'rainbow_r', alpha = 0.5) plt.colorbar() plt.title('AI vs PHIE plot ') plt.xlabel('AI') plt.ylabel('PHIE') plt.grid();
Day1_of_10_IA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt import pingouin as pg import seaborn as sns import os def split_data_in_groups(df, data_col, x_split, x_order, hue_split, hue_order): splitted_groups = {} for x_group_id in x_order: if x_group_id not in splitted_groups.keys(): splitted_groups[x_group_id] = {} for hue_group_id in hue_order: if df.loc[(df[x_split] == x_group_id) & (df[hue_split] == hue_group_id), data_col].shape[0] > 0: if hue_group_id not in splitted_groups[x_group_id].keys(): splitted_groups[x_group_id][hue_group_id] = {'data': np.array([]), 'stats': None, 'p-val': None, 'stars': None} data = df.loc[(df[x_split] == x_group_id) & (df[hue_split] == hue_group_id), data_col].values splitted_groups[x_group_id][hue_group_id]['data'] = np.append(splitted_groups[x_group_id][hue_group_id]['data'], data) return splitted_groups def one_sample_tests_per_group(splitted_groups, fixed_value): for first_lvl_key in splitted_groups.keys(): for second_lvl_key in splitted_groups[first_lvl_key].keys(): df_test_results = pg.wilcoxon(splitted_groups[first_lvl_key][second_lvl_key]['data'] - fixed_value, correction = 'auto') pval = df_test_results.loc['Wilcoxon', 'p-val'] if pval <= 0.001: stars = '***' elif pval <= 0.01: stars = '**' elif pval <= 0.05: stars = '*' else: stars = 'n.s.' splitted_groups[first_lvl_key][second_lvl_key]['stats'] = df_test_results.copy() splitted_groups[first_lvl_key][second_lvl_key]['p-val'] = pval splitted_groups[first_lvl_key][second_lvl_key]['stars'] = stars return splitted_groups def annotations(stat_results, hue_order): annotations = {} group_id = 0 l_first_lvl_keys = list(stat_results.keys()) for first_lvl_key in l_first_lvl_keys: group_x_coord = l_first_lvl_keys.index(first_lvl_key) for second_lvl_key in stat_results[first_lvl_key].keys(): annotations[group_id] = {'p-val': stat_results[first_lvl_key][second_lvl_key]['p-val'], 'stars': stat_results[first_lvl_key][second_lvl_key]['stars']} if len(hue_order) == 2: if hue_order.index(second_lvl_key) == 0: annotations[group_id]['x'] = group_x_coord - 0.2 elif hue_order.index(second_lvl_key) == 1: annotations[group_id]['x'] = group_x_coord + 0.2 if len(hue_order) == 3: if hue_order.index(second_lvl_key) == 0: annotations[group_id]['x'] = group_x_coord - 0.25 elif hue_order.index(second_lvl_key) == 1: annotations[group_id]['x'] = group_x_coord elif hue_order.index(second_lvl_key) == 2: annotations[group_id]['x'] = group_x_coord + 0.25 if len(hue_order) == 4: if hue_order.index(second_lvl_key) == 0: annotations[group_id]['x'] = group_x_coord - 0.3 elif hue_order.index(second_lvl_key) == 1: annotations[group_id]['x'] = group_x_coord - 0.1 elif hue_order.index(second_lvl_key) == 2: annotations[group_id]['x'] = group_x_coord + 0.1 elif hue_order.index(second_lvl_key) == 3: annotations[group_id]['x'] = group_x_coord + 0.3 group_id = group_id + 1 return annotations # + def plot_annotated_data(df, data_col, x_split, x_order, hue_split, hue_order, fixed_value, d_annotations, show): fig = plt.figure(figsize=(8, 5), facecolor='white') ax = fig.add_subplot() for axis in ['top', 'right']: ax.spines[axis].set_visible(False) sns.stripplot(data = df, x = x_split, y = data_col, order = x_order, hue = hue_split, hue_order = hue_order, dodge = True, s=7) ylim_lower, ylim_upper = ax.get_ylim() y_axis_span = ylim_upper - ylim_lower y_stars = df[data_col].max() + y_axis_span*0.15 y_pval = df[data_col].max() + y_axis_span*0.1 ylim_upper_new = ylim_upper + y_axis_span*0.2 for key in d_annotations.keys(): plt.text(d_annotations[key]['x'], y_stars, d_annotations[key]['stars'], ha='center', va='bottom', color='k') plt.text(d_annotations[key]['x'], y_pval, '({})'.format(str(round(d_annotations[key]['p-val'], 2))), ha='center', va='bottom', color='k') xlim_lower, xlim_upper = ax.get_xlim() plt.hlines(fixed_value, xlim_lower, xlim_upper, colors='k') plt.vlines(0.5, ylim_lower, ylim_upper_new, colors='grey', linestyle='dashed') plt.vlines(1.5, ylim_lower, ylim_upper_new, colors='grey', linestyle='dashed') plt.ylim(ylim_lower, ylim_upper_new) plt.legend(loc='center left', title=hue_split, bbox_to_anchor=(1, 0.5), frameon=False) if fixed_value == 1: norm = 'normalized' plt.ylabel('{} HR [mean stim HR / mean baseline HR]'.format(norm)) else: norm = 'delta' plt.ylabel('{} HR [mean stim HR - mean baseline HR]'.format(norm)) group_id = df['group_id'].unique()[0] plt.title('{} - {}'.format(group_id, norm), pad=20) plt.tight_layout() filename = os.getcwd() + '/Plots/{}_{}_per_{}_{}.png'.format(group_id, hue_split, x_split, norm) plt.savefig(filename, dpi=300) if show: plt.show() else: plt.close() # + tags=[] l_filenames = ['ResultsECG_Vglut1#910.xlsx', 'ResultsECG_Vglut2#1213.xlsx', 'ResultsECG_Chat#60-63.xlsx','Summary_median HCN4#123132.xlsx'] #l_filenames = ['Summary_median HCN4#123132.xlsx'] show = True for filename in l_filenames: if filename == 'ResultsECG_Vglut2#1213.xlsx': stim_order = ['10Hz_10ms_30s', '20Hz_10ms_30s', 'Cst_30s'] elif filename == 'ResultsECG_Vglut1#910.xlsx': stim_order = ['10Hz_10ms_30s', '20Hz_10ms_30s', 'cst_30s'] elif filename == 'ResultsECG_Chat#60-63.xlsx': stim_order = ['10Hz_10ms_30s', '50Hz_15ms_30s', 'cst_30s'] elif filename == 'Summary_median HCN4#123132.xlsx': stim_order = ['15Hz_15ms_30s', '15Hz_30ms_30s', 'Cst_30s'] stim_split = 'stimulus_id' session_split = 'session_id' session_order = [1, 2, 3] position_split = 'position_id' position_order = [1, 2, 3] subject_split = 'subject_id' subject_order = None # x_split, x_order, hue_split, hue_order l_params = [(stim_split, stim_order, position_split, position_order), (stim_split, stim_order, session_split, session_order), (position_split, position_order, stim_split, stim_order), (position_split, position_order, session_split, session_order), # Subject hue_splits: (stim_split, stim_order, subject_split, subject_order), (position_split, position_order, subject_split, subject_order), (session_split, session_order, subject_split, subject_order)] filedir = '/home/ds/DCL/Cardiac_opto/Compare_experimental_conditions_data/' d_dfs = pd.read_excel(filedir + filename, sheet_name=None, index_col=0) key_overview, key_norm, key_delta = d_dfs.keys() for df_key in [key_norm, key_delta]: if df_key == key_norm: fixed_value = 1 else: fixed_value = 0 df = d_dfs[df_key] data_col = list(df.columns)[0] for params in l_params: x_split, x_order, hue_split, hue_order = params if hue_split == 'subject_id': hue_order = list(df[hue_split].unique()) splitted_groups = split_data_in_groups(df, data_col, x_split, x_order, hue_split, hue_order) stat_results = one_sample_tests_per_group(splitted_groups, fixed_value) d_annotations = annotations(stat_results, hue_order) plot_annotated_data(df, data_col, x_split, x_order, hue_split, hue_order, fixed_value, d_annotations, show) # - # # More detailed look at responders # + l_filenames = ['ResultsECG_Vglut1#910.xlsx', 'ResultsECG_Chat#60-63.xlsx', 'ResultsECG_Vglut2#1213.xlsx'] for filename in l_filenames: if filename == 'ResultsECG_Vglut1#910.xlsx': subject_id = '#9' elif filename == 'ResultsECG_Chat#60-63.xlsx': subject_id = '#62' elif filename == 'ResultsECG_Vglut2#1213.xlsx': subject_id = '#13' filedir = '/home/ds/DCL/Cardiac_opto/Compare_experimental_conditions_data/' d_dfs = pd.read_excel(filedir + filename, sheet_name=None, index_col=0) key_overview, key_norm, key_delta = d_dfs.keys() for key in [key_norm, key_delta]: df_temp = d_dfs[key].loc[d_dfs[key]['subject_id'] == subject_id] data_col = df_temp.columns[0] group_id = df_temp.group_id.unique()[0] fig = plt.figure(figsize=(8, 5), facecolor='white') ax = fig.add_subplot() for axis in ['top', 'right']: ax.spines[axis].set_visible(False) sns.stripplot(data=df_temp, x='stimulus_id', y=data_col, hue='position_id', dodge=True, s=7) if key == key_norm: fixed_val = 1 norm = 'normalized' plt.ylabel('{} HR [mean stim HR / mean baseline HR]'.format(norm)) elif key == key_delta: fixed_val = 0 norm = 'delta' plt.ylabel('{} HR [mean stim HR - mean baseline HR]'.format(norm)) x0, x1 = ax.get_xlim() y0, y1 = ax.get_ylim() plt.ylim(y0, y1) plt.hlines(fixed_val, x0, x1, color='k') plt.vlines(0.5, y0, y1, color='gray', linestyle='dashed') plt.vlines(1.5, y0, y1, color='gray', linestyle='dashed') plt.legend(loc='center left', title='position_id', bbox_to_anchor=(1, 0.5), frameon=False) plt.title('{}: mouse {} - {}'.format(group_id, subject_id, norm)) plt.tight_layout() img_filename = os.getcwd() + '/Plots/Responders/{}_mouse{}_positionID_per_stimulusID_{}.png'.format(group_id, subject_id, norm) plt.savefig(img_filename, dpi=300) plt.show() print('\n \n') # - # # Varianzanalyse # + l_filenames = ['ResultsECG_Vglut1#910.xlsx', 'ResultsECG_Vglut2#1213.xlsx', 'ResultsECG_Chat#60-63.xlsx','Summary_median HCN4#123132.xlsx'] l_norm_dfs = [] l_delta_dfs = [] for filename in l_filenames: filedir = '/home/ds/DCL/Cardiac_opto/Compare_experimental_conditions_data/' d_dfs = pd.read_excel(filedir + filename, sheet_name=None, index_col=0) key_overview, key_norm, key_delta = d_dfs.keys() l_cols = list(d_dfs[key_norm].columns) l_cols[0] = 'data' d_dfs[key_norm].columns = l_cols d_dfs[key_delta].columns = l_cols l_norm_dfs.append(d_dfs[key_norm]) l_delta_dfs.append(d_dfs[key_delta]) df_norm = pd.concat(l_norm_dfs, axis=0) df_delta = pd.concat(l_delta_dfs, axis=0) # + fig = plt.figure(figsize=(8, 6), facecolor='white') ax = fig.add_subplot() for axis in ['top', 'right']: ax.spines[axis].set_visible(False) sns.stripplot(data=df_delta, x='position_id', y='data', s=7) x0, x1 = ax.get_xlim() y0, y1 = ax.get_ylim() plt.ylim(y0, y1) plt.ylabel('delta HR [mean stim HR - mean baseline HR]') plt.hlines(0, x0, x1, color='k') plt.vlines(0.5, y0, y1, color='gray', linestyle='dashed') plt.vlines(1.5, y0, y1, color='gray', linestyle='dashed') plt.title('all data - delta') plt.tight_layout() img_filename = os.getcwd() + '/Plots/Variance/all_data_per_positionID_delta_stripplot.png' plt.savefig(img_filename, dpi=300) plt.show() # + fig = plt.figure(figsize=(8, 6), facecolor='white') ax = fig.add_subplot() for axis in ['top', 'right']: ax.spines[axis].set_visible(False) sns.stripplot(data=df_delta, x='position_id', y='data', color='k', alpha=0.5, s=7) sns.violinplot(data=df_delta, x='position_id', y='data') x0, x1 = ax.get_xlim() y0, y1 = ax.get_ylim() plt.ylim(y0, y1) plt.ylabel('delta HR [mean stim HR - mean baseline HR]') plt.hlines(0, x0, x1, color='k') plt.vlines(0.5, y0, y1, color='gray', linestyle='dashed') plt.vlines(1.5, y0, y1, color='gray', linestyle='dashed') plt.title('all data - delta') plt.tight_layout() img_filename = os.getcwd() + '/Plots/Variance/all_data_per_positionID_delta_violinplot.png' plt.savefig(img_filename, dpi=300) plt.show() # + fig = plt.figure(figsize=(8, 6), facecolor='white') ax = fig.add_subplot() for axis in ['top', 'right']: ax.spines[axis].set_visible(False) sns.stripplot(data=df_delta, x='position_id', y='data', hue='group_id', dodge=True, s=7) x0, x1 = ax.get_xlim() y0, y1 = ax.get_ylim() plt.ylim(y0, y1) plt.ylabel('delta HR [mean stim HR - mean baseline HR]') plt.hlines(0, x0, x1, color='k') plt.vlines(0.5, y0, y1, color='gray', linestyle='dashed') plt.vlines(1.5, y0, y1, color='gray', linestyle='dashed') plt.legend(loc='center left', title='group_id', bbox_to_anchor=(1, 0.5), frameon=False) plt.title('all data - delta') plt.tight_layout() img_filename = os.getcwd() + '/Plots/Variance/all_data_groupID_per_positionID_delta_stripplot.png' plt.savefig(img_filename, dpi=300) plt.show() # + fig = plt.figure(figsize=(8, 6), facecolor='white') ax = fig.add_subplot() for axis in ['top', 'right']: ax.spines[axis].set_visible(False) sns.stripplot(data=df_delta, x='position_id', y='data', hue='subject_id', dodge=True, s=7) x0, x1 = ax.get_xlim() y0, y1 = ax.get_ylim() plt.ylim(y0, y1) plt.ylabel('delta HR [mean stim HR - mean baseline HR]') plt.hlines(0, x0, x1, color='k') plt.vlines(0.5, y0, y1, color='gray', linestyle='dashed') plt.vlines(1.5, y0, y1, color='gray', linestyle='dashed') plt.legend(loc='center left', title='subject_id', bbox_to_anchor=(1, 0.5), frameon=False) plt.title('all data - delta') plt.tight_layout() img_filename = os.getcwd() + '/Plots/Variance/all_data_subjectID_per_positionID_delta_stripplot.png' plt.savefig(img_filename, dpi=300) plt.show() # - # ## Daten von #60 Chat-Cre für Stim 2 fehlen # ## Teildaten von #62 Chat-Cre von Session 2 fehlen annotations = {} group_id = 0 l_first_lvl_keys = list(stat_results.keys()) l_second_lvl_keys = [] for first_lvl_key in l_first_lvl_keys: l_temp = [elem for elem in stat_results[first_lvl_key].keys() if elem not in l_second_lvl_keys] if len(l_temp) > 0: for key in l_temp: l_second_lvl_keys.append(key) l_second_lvl_keys # + splitted_groups = split_data_in_groups(data_norm, 'data (nomalized)', 'stimulus_id', ['10Hz_10ms_30s', '20Hz_10ms_30s', 'Cst_30s'], 'session_id', [1, 2]) # - # + fig = plt.figure(figsize=(10, 7), facecolor='white') ax = fig.add_subplot() data_norm = d_dfs['delta'].copy() sns.stripplot(data=data_norm, x='stimulus_id', y='data (nomalized)', hue='position_id', dodge=True, ax = ax) y = data_norm['data (nomalized)'].max() * 1.05 y_lim_upper = round(data_norm['data (nomalized)'].max() * 1.1, 0) y_lim_lower = round(data_norm['data (nomalized)'].min() - data_norm['data (nomalized)'].min() * 0.05, 0) for key in d_annotations.keys(): plt.text(d_annotations[key]['x'], y, d_annotations[key]['stars'], ha='center', va='bottom', color='k') plt.ylim(y_lim_lower,y_lim_upper) plt.legend(loc='center left', title='position_id', bbox_to_anchor=(1, 0.5), frameon=False) plt.show() # - splitted_groups stat_results = one_sample_tests_per_group(splitted_groups, 0) stat_results d_annotations = annotations(stat_results) d_annotations
cardio/Compare_experimental_conditions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Setup # # To run the example, a file named `secret.py` must be created in the notebooks # folder with the following content: # # ```python # # Configuration settings # # # SLIPO workbench installation # BASE_URL = 'https://app.dev.slipo.eu' # # # SLIPO API key # API_KEY = '' # ``` # # The `API_KEY` value must be set to a valid SLIPO Application Key. The file must be imported before creating a new context: # # ```python # from secret import BASE_URL, API_KEY # ``` # # # Requirements # # The following modules must be installed: # # - descartes # - geopandas # - urbanaccess # - pandana # - slipo-loci # # For Ubuntu 18.04, python3-rtree package must be installed e.g. # # `sudo apt install python3-rtree` # + # Create new context from slipoframes.context import SlipoContext from secret import BASE_URL, API_KEY ctx = SlipoContext( base_url = BASE_URL, requires_ssl = False, api_key = API_KEY ) # - # # Data loading # # In this section, we will query the SLIPO service for the most recent workflow executions. Then we will copy the output file of the last API call from the example `Demo_02_Workflow`. ctx.process_query(pageIndex=0, pageSize=5) # Replace process id value from the most recent export operation export1 = ctx.process_status(461, 1) export1.output() ctx.process_file_download(export1.output(), target='./output/exported-data.zip', overwrite=True) # # Data Processing # # Once data have been saved locally, analysis can be perfomed using # tools like pandas DataFrames, geopanadas GeoDataFrames or other libraries # like https://github.com/paulhoule/gastrodon for RDF. # + # Unzip output file import os import zipfile with zipfile.ZipFile('./output/exported-data.zip','r') as zip_ref: zip_ref.extractall("./output/") os.rename('./output/points.csv', './output/Fuel_Berlin.csv') # + # Load CSV data in a DataFrame import pandas as pd pois = pd.read_csv('./output/Fuel_Berlin.csv', delimiter='|', error_bad_lines=False) pois.head() # - # Since shapely does not supports Extended Well-Known Text, we update the geometry and remove the SRID value pois['the_geom'] = pois['the_geom'].apply(lambda x: x.split(';')[1]) pois.head() # + # Create a GeoDataFrame import geopandas from shapely import wkt pois['the_geom'] = pois['the_geom'].apply(wkt.loads) gdf = geopandas.GeoDataFrame(pois, geometry='the_geom') print(gdf.head()) # + # %matplotlib inline import matplotlib.pyplot as plt world = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres')) # We restrict to Romania. ax = world[world.name == 'Germany'].plot( color='white', edgecolor='black') # We can now plot our GeoDataFrame. gdf.plot(ax=ax, color='blue') plt.show() # - import loci as lc from loci import io from loci import analytics from loci import plots pois = lc.io.read_poi_csv(input_file='./output/Fuel_Berlin.csv', col_id='id', col_name='name', col_lon='lon', col_lat='lat', col_kwds='category', col_sep='|', kwds_sep=',', source_crs='EPSG:4326', target_crs='EPSG:4326', keep_other_cols=False) pois.head(10) # Rename category column pois.rename(columns={'category': 'kwds'}, inplace=True) m = lc.plots.map_points(pois.sample(100), show_bbox=True) m kf = lc.analytics.kwds_freq(pois) kf lc.plots.barchart(kf, plot_title='Top Keywords', x_axis_label='Keywords', y_axis_label='Frequency') lc.plots.plot_wordcloud(pois) pois_filtered = lc.analytics.filter_by_kwd(pois, 'FUEL') lc.plots.heatmap(pois_filtered, radius=12)
notebooks/Demo_03_Data_analysis_using_loci.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import numpy as np import matplotlib.pyplot as plt # # SciPy # # SciPy is a collection of numerical algorithms with python interfaces. In many cases, these interfaces are wrappers around standard numerical libraries that have been developed in the community and are used with other languages. Usually detailed references are available to explain the implementation. # # There are many subpackages. Generally, you load the subpackages separately, e.g. # # ``` # from scipy import linalg, optimize # ``` # then you have access to the methods in those namespaces # # # Numerical Methods # # One thing to keep in mind -- all numerical methods have strengths and weaknesses, and make assumptions. You should always do some research into the method to understand what it is doing. # # It is also always a good idea to run a new method on some test where you know the answer, to make sure it is behaving as expected. # # Integration # we'll do some integrals of the form # # $$I = \int_a^b f(x) dx$$ # # We can imagine two situations: # * our function $f(x)$ is given by an analytic expression. This gives us the freedom to pick our integration points, and in general can allow us to optimize our result and get high accuracy # * our function $f(x)$ is defined on at a set of (possibly regular spaced) points. # # In numerical analysis, the term _quadrature_ is used to describe any integration method that represents the integral as the weighted sum of a discrete number of points. from scipy import integrate help(integrate) # quad is the basic integrator for a general (not sampled) function. It uses a general-interface from the Fortran package QUADPACK (QAGS or QAGI). It will return the integral in an interval and an estimate of the error in the approximation def f(x): return np.sin(x)**2 I, err = integrate.quad(f, 0.0, 2.0*np.pi, epsabs=1.e-14) print(I) print(err) help(integrate.quad) # sometimes our integrand function takes optional arguments def g(x, A, sigma): return A*np.exp(-x**2/sigma**2) I, err = integrate.quad(g, -1.0, 1.0, args=(1.0, 2.0)) print(I, err) # numpy defines the inf quantity which can be used in the integration limits. We can integrate a Gaussian (we know the answer is sqrt(pi) # # Note: behind the scenes, what the integration function does is do a variable transform like: $t = 1/x$. This works when one limit is $\infty$, giving # # $$\int_a^b f(x) dx = \int_{1/b}^{1/a} \frac{1}{t^2} f\left (\frac{1}{t}\right) dt$$ I, err = integrate.quad(g, -np.inf, np.inf, args=(1.0, 1.0)) print(I, err) # ### integration of a sampled function # here we integrate a function that is defined only at a sequece of points. Recall that Simpson's rule will use piecewise parabola data. Let's compute # # $$I = \int_0^{2\pi} f(x_i) dx$$ # # with $x_i = 0, \ldots, 2\pi$ defined at $N$ points # + N = 17 x = np.linspace(0.0, 2.0*np.pi, N, endpoint=True) y = np.sin(x)**2 I = integrate.simps(y, x) print(I) # - # Romberg integration is specific to equally-spaced samples, where $N = 2^k + 1$ and can be more converge faster (it uses extrapolation of coarser integration results to achieve higher accuracy) # + N = 17 x = np.linspace(0.0, 2.0*np.pi, N, endpoint=True) y = np.sin(x)**2 I = integrate.romb(y, dx=x[1]-x[0]) print(I) # - # # Root Finding # Often we need to find a value of a variable that zeros a function -- this is _root finding_. Sometimes, this is a multidimensional problem. # The `brentq()` routine offers a very robust method for find roots from a scalar function. You do need to provide an interval that bounds the root. # $f(x) = \frac{x e^x}{e^x - 1} - 5$ # + import scipy.optimize as optimize def f(x): # this is the non-linear equation that comes up in deriving Wien's law for radiation return (x*np.exp(x)/(np.exp(x) - 1.0) - 5.0) root, r = optimize.brentq(f, 0.1, 10.0, full_output=True) print(root) print(r.converged) # - x = np.linspace(0.1, 10.0, 1000) plt.plot(x, f(x)) plt.plot(np.array([root]), np.array([f(root)]), 'ro') # # ODEs # Many methods exist for integrating ordinary differential equations. Most will want you to write your ODEs as a system of first order equations. # This system of ODEs is the Lorenz system: # # $$\frac{dx}{dt} = \sigma (y - x)$$ # $$\frac{dy}{dt} = rx - y - xz$$ # $$\frac{dz}{dt} = xy - bz$$ # # the steady states of this system correspond to: # # $${\bf f}({\bf x}) = # \left ( # \sigma (y -x), # rx - y -xz, # xy - bz # \right )^\intercal # = 0$$ # # + # system parameters sigma = 10.0 b = 8./3. r = 28.0 def rhs(t, x): xdot = sigma*(x[1] - x[0]) ydot = r*x[0] - x[1] - x[0]*x[2] zdot = x[0]*x[1] - b*x[2] return np.array([xdot, ydot, zdot]) def jac(t, x): return np.array( [ [-sigma, sigma, 0.0], [r - x[2], -1.0, -x[0]], [x[1], x[0], -b] ]) def f(x): return rhs(0.,x), jac(0.,x) # - # This class stores the integrated solution in a simple datatype class IntHistory(object): """ a simple container to store the integrated history """ def __init__(self, t=None, x=None, y=None, z=None): self.t = np.array(t) self.x = np.array(x) self.y = np.array(y) self.z = np.array(z) def ode_integrate(X0, dt, tmax): """ integrate using the VODE method, storing the solution each dt """ r = integrate.ode(rhs, jac).set_integrator("vode", method="adams", with_jacobian=True, atol=1.e-10, rtol=1.e-10, nsteps = 15000, order=12) t = 0.0 r.set_initial_value(X0, t) tout = [t] x1out = [X0[0]] x2out = [X0[1]] x3out = [X0[2]] while r.successful() and r.t < tmax: r.integrate(r.t+dt) tout.append(r.t) x1out.append(r.y[0]) x2out.append(r.y[1]) x3out.append(r.y[2]) return IntHistory(np.array(tout), np.array(x1out), np.array(x2out), np.array(x3out)) # + H1 = ode_integrate([1.0, 1.0, 20.0], 0.02, 30) from mpl_toolkits.mplot3d import Axes3D fig = plt.figure() ax = fig.gca(projection='3d') ax.plot(H1.x, H1.y, H1.z) fig.set_size_inches(8.0,6.0) # - # ### Multi-variate root find # we can find the steady points in this system by doing a multi-variate root find on the RHS vector # + sol1 = optimize.root(f, [1., 1., 1.], jac=True) print(sol1.x) sol2 = optimize.root(f, [10., 10., 10.], jac=True) print(sol2.x) sol3 = optimize.root(f, [-10., -10., -10.], jac=True) print(sol3.x) # + fig = plt.figure() ax = fig.gca(projection='3d') ax.plot(H1.x, H1.y, H1.z) ax.scatter(sol1.x[0], sol1.x[1], sol1.x[2], marker="x", color="r") ax.scatter(sol2.x[0], sol2.x[1], sol2.x[2], marker="x", color="r") ax.scatter(sol3.x[0], sol3.x[1], sol3.x[2], marker="x", color="r") ax.set_xlabel("x") ax.set_ylabel("y") ax.set_zlabel("z") # - # # Fitting # # Fitting is used to match a model to experimental data. E.g. we have N points of $(x_i, y_i)$ with associated errors, $\sigma_i$, and we want to find a simply function that best represents the data. # # Usually this means that we will need to define a metric, often called the residual, for how well our function matches the data, and then minimize this residual. Least-squares fitting is a popular formulation. # # We want to fit our data to a function $Y(x, \{a_j\})$, where $a_j$ are model parameters we can adjust. We want to find the optimal $a_j$ to minimize the distance of $Y$ from our data: # $$\Delta_i = Y(x_i, \{a_j\}) - y_i$$ # # Least-squares minimizes $\chi^2$: # $$\chi^2(\{a_j\}) = \sum_{i=1}^N \left ( \frac{\Delta_i}{\sigma_i} \right )^2$$ # ### general linear least squares # First we'll make some experimental data (a quadratic with random fashion). We use the randn() function to provide Gaussian normalized errors. # + def y_experiment2(a1, a2, a3, sigma, x): """ return the experimental data in a quadratic + random fashion, with a1, a2, a3 the coefficients of the quadratic and sigma is the error. This will be poorly matched to a linear fit for a3 != 0 """ N = len(x) # randn gives samples from the "standard normal" distribution r = np.random.randn(N) y = a1 + a2*x + a3*x*x + sigma*r return y N = 40 sigma = 5.0*np.ones(N) x = np.linspace(0, 100.0, N) y = y_experiment2(2.0, 1.50, -0.02, sigma, x) plt.scatter(x,y) plt.errorbar(x, y, yerr=sigma, fmt=None) # + def resid(avec, x, y, sigma): """ the residual function -- this is what will be minimized by the scipy.optimize.leastsq() routine. avec is the parameters we are optimizing -- they are packed in here, so we unpack to begin. (x, y) are the data points scipy.optimize.leastsq() minimizes: x = arg min(sum(func(y)**2,axis=0)) y so this should just be the distance from a point to the curve, and it will square it and sum over the points """ a0, a1, a2 = avec return (y - (a0 + a1*x + a2*x**2))/sigma # initial guesses a0, a1, a2 = 1, 1, 1 afit, flag = optimize.leastsq(resid, [a0, a1, a2], args=(x, y, sigma)) print(afit) plt.plot(x, afit[0] + afit[1]*x + afit[2]*x*x ) plt.scatter(x,y) plt.errorbar(x, y, yerr=sigma, fmt=None) # - # $\chi^2$ chisq = sum(np.power(resid(afit, x, y, sigma),2)) normalization = len(x)-len(afit) print(chisq/normalization) # ### a nonlinear example # our experiemental data -- an exponential # + a0 = 2.5 a1 = 2./3. sigma = 5.0 a0_orig, a1_orig = a0, a1 x = np.linspace(0.0, 4.0, 25) y = a0*np.exp(a1*x) + sigma*np.random.randn(len(x)) plt.scatter(x,y) plt.errorbar(x, y, yerr=sigma, fmt=None, label="_nolegend_") # - # our function to minimize def resid(avec, x, y): """ the residual function -- this is what will be minimized by the scipy.optimize.leastsq() routine. avec is the parameters we are optimizing -- they are packed in here, so we unpack to begin. (x, y) are the data points scipy.optimize.leastsq() minimizes: x = arg min(sum(func(y)**2,axis=0)) y so this should just be the distance from a point to the curve, and it will square it and sum over the points """ a0, a1 = avec # note: if we wanted to deal with error bars, we would weight each # residual accordingly return y - a0*np.exp(a1*x) # + a0, a1 = 1, 1 afit, flag = optimize.leastsq(resid, [a0, a1], args=(x, y)) print(flag) print(afit) # - plt.plot(x, afit[0]*np.exp(afit[1]*x), label=r"$a_0 = $ %f; $a_1 = $ %f" % (afit[0], afit[1])) plt.plot(x, a0_orig*np.exp(a1_orig*x), ":", label="original function") plt.legend(numpoints=1, frameon=False) plt.scatter(x,y, c="k") plt.errorbar(x, y, yerr=sigma, fmt=None, label="_nolegend_") # # FFTs # Fourier transforms convert a physical-space (or time series) representation of a function into frequency space. This provides an equivalent representation of the data with a new view. # # The FFT and its inverse in NumPy use: # $$F_k = \sum_{n=0}^{N-1} f_n e^{-2\pi i nk/N}$$ # # $$f_n = \frac{1}{N} \sum_{k=0}^{N-1} F_k # e^{2\pi i n k/N}$$ # # # Both NumPy and SciPy have FFT routines that are similar. However, the NumPy version returns the data in a more convenient form. # # It's always best to start with something you understand -- let's do a simple sine wave. Since our function is real, we can use the rfft routines in NumPy -- the understand that we are working with real data and they don't return the negative frequency components. # # One important caveat -- FFTs assume that you are periodic. If you include both endpoints of the domain in the points that comprise your sample then you will not match this assumption. Here we use endpoint=False with linspace() def single_freq_sine(npts): # a pure sine with no phase shift will result in pure imaginary # signal f_0 = 0.2 xmax = 10.0/f_0 xx = np.linspace(0.0, xmax, npts, endpoint=False) f = np.sin(2.0*np.pi*f_0*xx) return xx, f # To make our life easier, we'll define a function that plots all the stages of the FFT process def plot_FFT(xx, f): npts = len(xx) # Forward transform: f(x) -> F(k) fk = np.fft.rfft(f) # Normalization -- the '2' here comes from the fact that we are # neglecting the negative portion of the frequency space, since # the FFT of a real function contains redundant information, so # we are only dealing with 1/2 of the frequency space. # # technically, we should only scale the 0 bin by N, since k=0 is # not duplicated -- we won't worry about that for these plots norm = 2.0/npts fk = fk*norm fk_r = fk.real fk_i = fk.imag # the fftfreq returns the postive and negative (and 0) frequencies # the newer versions of numpy (>=1.8) have an rfftfreq() function # that really does what we want -- we'll use that here. k = np.fft.rfftfreq(npts) # to make these dimensional, we need to divide by dx. Note that # max(xx) is not the true length, since we didn't have a point # at the endpoint of the domain. kfreq = k*npts/(max(xx) + xx[1]) # Inverse transform: F(k) -> f(x) -- without the normalization fkinv = np.fft.irfft(fk/norm) # plots plt.subplot(411) plt.plot(xx, f) plt.xlabel("x") plt.ylabel("f(x)") plt.subplot(412) plt.plot(kfreq, fk_r, label=r"Re($\mathcal{F}$)") plt.plot(kfreq, fk_i, ls=":", label=r"Im($\mathcal{F}$)") plt.xlabel(r"$\nu_k$") plt.ylabel("F(k)") plt.legend(fontsize="small", frameon=False) plt.subplot(413) plt.plot(kfreq, np.abs(fk)) plt.xlabel(r"$\nu_k$") plt.ylabel(r"|F(k)|") plt.subplot(414) plt.plot(xx, fkinv.real) plt.xlabel(r"$\nu_k$") plt.ylabel(r"inverse F(k)") f = plt.gcf() f.set_size_inches(10,8) plt.tight_layout() npts = 128 xx, f = single_freq_sine(npts) plot_FFT(xx, f) # A cosine is shifted in phase by pi/2 def single_freq_cosine(npts): # a pure cosine with no phase shift will result in pure real # signal f_0 = 0.2 xmax = 10.0/f_0 xx = np.linspace(0.0, xmax, npts, endpoint=False) f = np.cos(2.0*np.pi*f_0*xx) return xx, f xx, f = single_freq_cosine(npts) plot_FFT(xx, f) # Now let's look at a sine with a pi/4 phase shift def single_freq_sine_plus_shift(npts): # a pure sine with no phase shift will result in pure imaginary # signal f_0 = 0.2 xmax = 10.0/f_0 xx = np.linspace(0.0, xmax, npts, endpoint=False) f = np.sin(2.0*np.pi*f_0*xx + np.pi/4) return xx, f xx, f = single_freq_sine_plus_shift(npts) plot_FFT(xx, f) # ### A frequency filter # we'll setup a simple two-frequency sine wave and filter a component def two_freq_sine(npts): # a pure sine with no phase shift will result in pure imaginary # signal f_0 = 0.2 f_1 = 0.5 xmax = 10.0/f_0 # we call with endpoint=False -- if we include the endpoint, then for # a periodic function, the first and last point are identical -- this # shows up as a signal in the FFT. xx = np.linspace(0.0, xmax, npts, endpoint=False) f = 0.5*(np.sin(2.0*np.pi*f_0*xx) + np.sin(2.0*np.pi*f_1*xx)) return xx, f # + npts = 256 xx, f = two_freq_sine(npts) plt.plot(xx, f) # - # we'll take the transform: f(x) -> F(k) # + # normalization factor: the 2 here comes from the fact that we neglect # the negative portion of frequency space because our input function # is real norm = 2.0/npts fk = norm*np.fft.rfft(f) ofk_r = fk.real.copy() ofk_i = fk.imag.copy() # get the frequencies k = np.fft.rfftfreq(len(xx)) # since we don't include the endpoint in xx, to normalize things, we need # max(xx) + dx to get the true length of the domain # # This makes the frequencies essentially multiples of 1/dx kfreq = k*npts/(max(xx) + xx[1]) plt.plot(kfreq, fk.real, label="real") plt.plot(kfreq, fk.imag, ":", label="imaginary") plt.legend(frameon=False) # - # Filter out the higher frequencies # + fk[kfreq > 0.4] = 0.0 # element 0 of fk is the DC component fk_r = fk.real fk_i = fk.imag # Inverse transform: F(k) -> f(x) fkinv = np.fft.irfft(fk/norm) plt.plot(xx, fkinv.real) # - # # Linear Algebra # ### general manipulations of matrices # you can use regular NumPy arrays or you can use a special matrix class that offers some short cuts a = np.array([[1.0, 2.0], [3.0, 4.0]]) print(a) print(a.transpose()) print(a.T) ainv = np.linalg.inv(a) print(ainv) print(np.dot(a, ainv)) # the eye() function will generate an identity matrix (as will the identity()) print(np.eye(2)) print(np.identity(2)) # we can solve Ax = b b = np.array([5, 7]) x = np.linalg.solve(a, b) print(x) # ### The matrix class A = np.matrix('1.0 2.0; 3.0 4.0') print(A) print(A.T) # + X = np.matrix('5.0 7.0') Y = X.T print(A*Y) # - print(A.I*Y)
day-5/scipy-basics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # # Federated Monai MedNIST Example # # This demo uses to demonstrate federated learning training and validation in the case of 2D medical image registration. # # Based on MONAI [registration_mednist.ipynb](https://github.com/Project-MONAI/tutorials/blob/master/2d_registration/registration_mednist.ipynb) notebook and [OpenFL](https://github.com/intel/openfl) - federated learning framework. # install workspace requirements # ! pip install -r workspace_requirements.txt import numpy as np import torch import tqdm from monai.config import USE_COMPILED from openfl.interface.interactive_api.experiment import ( DataInterface, FLExperiment, ModelInterface, TaskInterface, ) from openfl.interface.interactive_api.federation import Federation # ## Connect to the Federation # + # Create a federation # please use the same identificator that was used in signed certificate client_id = "api" cert_dir = "cert" director_node_fqdn = "localhost" director_port = 50051 # 1) Run with API layer - Director mTLS # If the user wants to enable mTLS their must provide CA root chain, and signed key pair to the federation interface # cert_chain = f'{cert_dir}/root_ca.crt' # api_certificate = f'{cert_dir}/{client_id}.crt' # api_private_key = f'{cert_dir}/{client_id}.key' # federation = Federation(client_id=client_id, director_node_fqdn=director_node_fqdn, director_port=director_port, # cert_chain=cert_chain, api_cert=api_certificate, api_private_key=api_private_key) # -------------------------------------------------------------------------------------------------------------------- # 2) Run with TLS disabled (trusted environment) # Federation can also determine local fqdn automatically federation = Federation( client_id=client_id, director_node_fqdn=director_node_fqdn, director_port=director_port, tls=False, ) # - federation.target_shape shard_registry = federation.get_shard_registry() shard_registry # First, request a dummy_shard_desc that holds information about the federated dataset dummy_shard_desc = federation.get_dummy_shard_descriptor(size=10) dummy_shard_dataset = dummy_shard_desc.get_dataset("train") sample, target = dummy_shard_dataset[0] print(sample.shape) print(target.shape) # ## Creating a FL experiment using Interactive API # ### Register dataset from monai.data import CacheDataset, DataLoader, Dataset from monai.transforms import ( Compose, EnsureChannelFirstD, EnsureTypeD, LoadImageD, RandRotateD, RandZoomD, ScaleIntensityRanged, ) image_transforms = Compose( [ LoadImageD(keys=["fixed_hand", "moving_hand"]), EnsureChannelFirstD(keys=["fixed_hand", "moving_hand"]), ScaleIntensityRanged( keys=["fixed_hand", "moving_hand"], a_min=0.0, a_max=255.0, b_min=0.0, b_max=1.0, clip=True, ), RandRotateD( keys=["moving_hand"], range_x=np.pi / 4, prob=1.0, keep_size=True, mode="bicubic", ), RandZoomD( keys=["moving_hand"], min_zoom=0.9, max_zoom=1.1, prob=1.0, mode="bicubic", align_corners=False, ), EnsureTypeD(keys=["fixed_hand", "moving_hand"]), ] ) class MedNISTDataset(DataInterface): def __init__(self, **kwargs): self.kwargs = kwargs @property def shard_descriptor(self): return self._shard_descriptor @shard_descriptor.setter def shard_descriptor(self, shard_descriptor): """ Describe per-collaborator procedures or sharding. This method will be called during a collaborator initialization. Local shard_descriptor will be set by Envoy. """ self._shard_descriptor = shard_descriptor self.train_set = Dataset( data=self._shard_descriptor.get_dataset("train").data_items, transform=image_transforms, ) self.valid_set = Dataset( data=self._shard_descriptor.get_dataset("validation").data_items, transform=image_transforms, ) def get_train_loader(self, **kwargs): """ Output of this method will be provided to tasks with optimizer in contract """ return DataLoader( self.train_set, batch_size=self.kwargs["train_bs"], shuffle=True ) def get_valid_loader(self, **kwargs): """ Output of this method will be provided to tasks without optimizer in contract """ return DataLoader(self.valid_set, batch_size=self.kwargs["valid_bs"]) def get_train_data_size(self): """ Information for aggregation """ return len(self.train_set) def get_valid_data_size(self): """ Information for aggregation """ return len(self.valid_set) fed_dataset = MedNISTDataset(train_bs=16, valid_bs=16) # ### Describe the model and optimizer from monai.networks.blocks import Warp from monai.networks.nets import GlobalNet from torch.nn import MSELoss # + model_net = GlobalNet( image_size=(64, 64), spatial_dims=2, in_channels=2, # moving and fixed num_channel_initial=16, depth=3, ) image_loss = MSELoss() if USE_COMPILED: warp_layer = Warp(3, "border") else: warp_layer = Warp("bilinear", "border") optimizer_adam = torch.optim.Adam(model_net.parameters(), 1e-5) # - # ### Register model framework_adapter = ( "openfl.plugins.frameworks_adapters.pytorch_adapter.FrameworkAdapterPlugin" ) model_interface = ModelInterface( model=model_net, optimizer=optimizer_adam, framework_plugin=framework_adapter ) # ## Define and register FL tasks # + task_interface = TaskInterface() @task_interface.register_fl_task( model="net_model", data_loader="train_loader", device="device", optimizer="optimizer", ) def train( net_model, train_loader, optimizer, device, loss_fn=image_loss, affine_transform=warp_layer, ): train_loader = tqdm.tqdm(train_loader, desc="train") net_model.train() net_model.to(device) warp_layer.to(device) epoch_loss = 0.0 step = 0 for batch_data in train_loader: step += 1 optimizer.zero_grad() moving = batch_data["moving_hand"].to(device) fixed = batch_data["fixed_hand"].to(device) ddf = net_model(torch.cat((moving, fixed), dim=1)) pred_image = affine_transform(moving, ddf) loss = loss_fn(pred_image, fixed) loss.backward() optimizer.step() epoch_loss += loss.item() epoch_loss /= step return { "train_loss": epoch_loss, } @task_interface.register_fl_task( model="net_model", data_loader="val_loader", device="device" ) def validate( net_model, val_loader, device, loss_fn=image_loss, affine_transform=warp_layer ): net_model.eval() net_model.to(device) warp_layer.to(device) epoch_loss = 0.0 step = 0 val_loader = tqdm.tqdm(val_loader, desc="validate") with torch.no_grad(): for batch_data in val_loader: step += 1 moving = batch_data["moving_hand"].to(device) fixed = batch_data["fixed_hand"].to(device) ddf = net_model(torch.cat((moving, fixed), dim=1)) pred_image = affine_transform(moving, ddf) loss = loss_fn(pred_image, fixed) epoch_loss += loss.item() epoch_loss /= step return { "validation_loss": epoch_loss, } # - # ## Time to start a federated learning experiment # Create an experimnet in federation experiment_name = "mednist_experiment" fl_experiment = FLExperiment(federation=federation, experiment_name=experiment_name) # The following command zips the workspace and python requirements to be transfered to collaborator nodes fl_experiment.start( model_provider=model_interface, task_keeper=task_interface, data_loader=fed_dataset, rounds_to_train=10, opt_treatment="CONTINUE_GLOBAL", device_assignment_policy="CUDA_PREFERRED", ) # To check how experiment is going fl_experiment.stream_metrics(tensorboard_logs=False)
federated_learning/openfl/openfl_mednist_2d_registration/workspace/Monai_MedNIST.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from wilds.datasets.camelyon17_dataset import Camelyon17Dataset from wilds.common.data_loaders import get_train_loader, get_eval_loader from wilds.common.grouper import CombinatorialGrouper from utils import load import torch import torch.nn as nn import torchvision import torchvision.transforms as transforms from models.layers import Identity # + config = { 'split_scheme': 'official', 'model': 'densenet121', 'model_kwargs': {'pretrained': False}, 'train_transform': 'image_base', 'eval_transform': 'image_base', 'target_resolution': (96, 96), 'loss_function': 'cross_entropy', 'groupby_fields': ['hospital'], 'val_metric': 'acc_avg', 'val_metric_decreasing': False, 'optimizer': 'SGD', 'optimizer_kwargs': {'momentum': 0.9}, 'scheduler': None, 'batch_size': 32, 'lr': 0.001, 'weight_decay': 0.01, 'n_epochs': 5, 'n_groups_per_batch': 2, 'irm_lambda': 1.0, 'coral_penalty_weight': 0.1, 'algo_log_metric': 'accuracy', 'process_outputs_function': 'multiclass_logits_to_pred', } def initialize_torchvision_model(name, d_out, **kwargs): # get constructor and last layer names if name == 'wideresnet50': constructor_name = 'wide_resnet50_2' last_layer_name = 'fc' elif name == 'densenet121': constructor_name = name last_layer_name = 'classifier' elif name in ('resnet50', 'resnet34'): constructor_name = name last_layer_name = 'fc' else: raise ValueError(f'Torchvision model {name} not recognized') # construct the default model, which has the default last layer constructor = getattr(torchvision.models, constructor_name) model = constructor(**kwargs) # adjust the last layer d_features = getattr(model, last_layer_name).in_features if d_out is None: # want to initialize a featurizer model last_layer = Identity(d_features) model.d_out = d_features else: # want to initialize a classifier for a particular num_classes last_layer = nn.Linear(d_features, d_out) model.d_out = d_out setattr(model, last_layer_name, last_layer) return model def initialize_image_base_transform(config, dataset): transform_steps = [] if dataset.original_resolution is not None and min(dataset.original_resolution)!=max(dataset.original_resolution): crop_size = min(dataset.original_resolution) transform_steps.append(transforms.CenterCrop(crop_size)) if config['target_resolution'] is not None: transform_steps.append(transforms.Resize(config['target_resolution'])) transform_steps += [ transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ] transform = transforms.Compose(transform_steps) return transform class Camelyon_model(nn.Module): def __init__(self, featurizer, classifier): super().__init__() self.model = torch.nn.Sequential(featurizer, classifier) self.featurizer = featurizer self.classifier = classifier class Camelyon_model_old(nn.Module): def __init__(self, featurizer): super().__init__() # self.model = torch.nn.Sequential(featurizer, classifier) self.model = featurizer # self.classifier = classifier # - dataset = Camelyon17Dataset(root_dir="/data/wilds/data") train_grouper = CombinatorialGrouper( dataset=dataset, groupby_fields=['hospital']) transform = initialize_image_base_transform(config, dataset) train_data = dataset.get_subset('train', transform=transform, frac=0.1) val_data = dataset.get_subset('val', transform=transform, frac=0.1) test_data = dataset.get_subset('test', transform=transform, frac=0.1) train_loader = get_train_loader('group', train_data, grouper=train_grouper, batch_size=15, n_groups_per_batch=3) val_loader = get_eval_loader('standard', val_data, grouper=train_grouper, batch_size=5) test_loader = get_eval_loader('standard', test_data, grouper=train_grouper, batch_size=5) # + # get_eval_loader('group', val_data, grouper=train_grouper, batch_size=16) # - ckpt = torch.load('/data/wilds/log/camelyon17_CORALaDRO/camelyon17_seed:0_epoch:best_model.pth') model_states = {} for k in ckpt['algorithm'].keys(): model_states[k.replace("model.0.","")] = ckpt['algorithm'][k] ckpt['algorithm'].keys() featurizer # + featurizer = initialize_torchvision_model('densenet121', d_out=None) classifier = nn.Linear(featurizer.d_out, 2) model = (featurizer, classifier) model = Camelyon_model(featurizer, classifier) # featurizer = initialize_torchvision_model('densenet121', d_out=2) # model = Camelyon_model_old(featurizer) # - model.load_state_dict(ckpt['algorithm'], strict=True) last = torch.nn.Sequential(*(list(model.model.children())[-1:])) model = torch.nn.Sequential(*(list(model.model.children())[:-1])) # + model.to('cuda:1') model.eval() group_list = [] hidden_list = [] count = 0 with torch.no_grad(): for b in train_loader: inputs, y, group = b inputs = inputs.to('cuda:1') # outputs = model(inputs) groups = train_grouper.metadata_to_group(group) features = model.featurizer(inputs) # features = model(inputs) group_list.extend(groups) hidden_list.extend(features.cpu().numpy()) count += 1 # break if count == 20: break count = 0 with torch.no_grad(): for b in val_loader: inputs, y, group = b inputs = inputs.to('cuda:1') # outputs = model(inputs) groups = train_grouper.metadata_to_group(group) features = model.featurizer(inputs) # features = model(inputs) group_list.extend(groups) hidden_list.extend(features.cpu().numpy()) count += 1 # break if count == 20: break count = 0 with torch.no_grad(): for b in test_loader: inputs, y, group = b inputs = inputs.to('cuda:1') # outputs = model(inputs) groups = train_grouper.metadata_to_group(group) features = model.featurizer(inputs) # features = model(inputs) group_list.extend(groups) hidden_list.extend(features.cpu().numpy()) count += 1 # break if count == 20: break # - X = np.array(hidden_list) y = np.array(group_list) IND = (y==0) | (y==3) | (y==4) OOD = (y==2) | (y==1) INDOOD = (y!=1) #(y==2) | (y==0) # # OOD = (y==2) # + from yellowbrick.text import TSNEVisualizer import numpy as np X = np.array(hidden_list) y = np.array(group_list) # Create the visualizer and draw the vectors tsne = TSNEVisualizer() tsne.fit(X[IND], y[IND]) tsne.show() # - tsne = TSNEVisualizer() tsne.fit(X[OOD], y[OOD]) tsne.show() tsne = TSNEVisualizer() tsne.fit(X[INDOOD], y[INDOOD]) tsne.show() tsne = TSNEVisualizer() tsne.fit(X, y) tsne.show() import umap from matplotlib import pyplot as plt import numpy as np # + import pandas as pd import seaborn as sns standard_embedding = umap.UMAP(random_state=42).fit_transform(X[IND]) data = pd.DataFrame({"x":standard_embedding[:, 0], "y":standard_embedding[:, 1], "g":y[IND]}) sns.scatterplot(data=data, x="x", y="y", hue="g") # + import pandas as pd import seaborn as sns standard_embedding = umap.UMAP(random_state=42).fit_transform(X[INDOOD]) data = pd.DataFrame({"x":standard_embedding[:, 0], "y":standard_embedding[:, 1], "g":y[INDOOD]}) sns.scatterplot(data=data, x="x", y="y", hue="g") # + import pandas as pd import seaborn as sns standard_embedding = umap.UMAP(random_state=42).fit_transform(X[OOD]) data = pd.DataFrame({"x":standard_embedding[:, 0], "y":standard_embedding[:, 1], "g":y[OOD]}) sns.scatterplot(data=data, x="x", y="y", hue="g") # - standard_embedding = umap.UMAP(random_state=42).fit_transform(X[OOD]) plt.scatter(standard_embedding[:, 0], standard_embedding[:, 1], c=y[OOD].astype(int), s=20)
examples/.ipynb_checkpoints/visulization_embedding-Copy1-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # + import os import re import sys import pandas as pd import muscope_loader # + print(sys.modules['muscope_loader'].__file__) muscope_loader_dp = os.path.dirname(sys.modules['muscope_loader'].__file__) downloads_dp = os.path.join(muscope_loader_dp, 'downloads') hl2a_chisholm_vesicle_xls_fp = os.path.join(downloads_dp, 'Chisholm_HOT263.283_Vesicle_seq_attrib.xls') print(hl2a_chisholm_vesicle_xls_fp) os.path.exists(hl2a_chisholm_vesicle_xls_fp) # - core_attr_plus_data_df = pd.read_excel( hl2a_chisholm_vesicle_xls_fp, sheet_name='core attributes + data', skiprows=(0,2) ) # derive a new sample_name column from seq_name like # 161013Chi_D16-10856_1_sequence.fastq.gz # where 10856 is the sample name sample_name_re = re.compile(r'^\d+Chi_D\d+-(?P<sample_name>\d+)_\d_sequence.fastq.gz$') core_attr_plus_data_df.rename(columns={'sample_name': 'sample_desc'}, inplace=True) new_sample_name = [] for r, row in core_attr_plus_data_df.iterrows(): if str(row.sample_desc) == 'nan': new_sample_name.append(float('NaN')) else: new_sample_name.append(sample_name_re.search(row.seq_name).group('sample_name')) core_attr_plus_data_df['sample_name'] = new_sample_name core_attr_plus_data_df
muscope/cruise/notebooks/fix_up_chisholm_vesicle.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Harris Corner Detection # ### Import resources and display image # + import matplotlib.pyplot as plt import numpy as np import cv2 # %matplotlib inline # Read in the image image = cv2.imread('images/waffle.jpg') # Make a copy of the image image_copy = np.copy(image) # Change color to RGB (from BGR) image_copy = cv2.cvtColor(image_copy, cv2.COLOR_BGR2RGB) plt.imshow(image_copy) # - # ### Detect corners # + # Convert to grayscale gray = cv2.cvtColor(image_copy, cv2.COLOR_RGB2GRAY) gray = np.float32(gray) # Detect corners dst = cv2.cornerHarris(gray, 2, 3, 0.04) # Dilate corner image to enhance corner points dst = cv2.dilate(dst,None) plt.imshow(dst, cmap='gray') # - # ### Extract and display strong corners # + ## TODO: Define a threshold for extracting strong corners # This value vary depending on the image and how many corners you want to detect # Try changing this free parameter, 0.1, to be larger or smaller ans see what happens thresh = 0.05*dst.max() # Create an image copy to draw corners on corner_image = np.copy(image_copy) # Iterate through all the corners and draw them on the image (if they pass the threshold) for j in range(0, dst.shape[0]): for i in range(0, dst.shape[1]): if(dst[j,i] > thresh): # image, center pt, radius, color, thickness cv2.circle( corner_image, (i, j), 1, (0,255,0), 1) plt.imshow(corner_image)
1_3_Types_of_Features_Image_Segmentation/1. Harris Corner Detection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Quaternion Triple Products and Distance # by <NAME>, <EMAIL> - please feel free to email # In this IPython notebook, efforts will be made to understand quaternion triple products and how they are related to distances in space and intervals in space-time as seen in special relativity. Rather than follow a historical story, I will try a more abstract approach. Initialize a few tools. # + # %%capture # %matplotlib inline import numpy as np import sympy as sp import matplotlib.pyplot as plt # To get equations the look like, well, equations, use the following. from sympy.interactive import printing printing.init_printing(use_latex=True) from IPython.display import display # Tools for manipulating quaternions. import Q_tools as qt; # - # ## Spatial Rotations # Define a triple product function modeled on what it takes to do a spatial rotation, $P R P^*$, where $R$ is a quaternion to be spatially rotated and $P$ is a quaternion parameter to do said rotation. def triple_sandwich(r, p=qt.QH([1, 0, 0, 0])): """A function that takes 2 quaternions but does a triple product. The default value for P leaves R unchanged.""" return p.product(r.product(p.conj())) # + t, x, y, z = sp.symbols("t x y z") s, u, v, w = sp.symbols("s u v w") R = qt.QH([t, x, y, z]) P = qt.QH([s, u, v, w]) RP_sandwich = triple_sandwich(R, P) sp.simplify(RP_sandwich.t) # - # The first term is just the norm of the parameter $P$ times the scalar value of $R$, how simple! Rotating a value is complicated. sp.simplify(RP_sandwich.x) # Show the interval of $R$ is unchanged up to the norm of the parameter $P$: sp.simplify(sp.factor(RP_sandwich.square().t)) # The interval will be invariant so long as the norm of the parameter $P$ is equal to one. A common way to do this is to use sine and cosine functions due to the trig identity $\sin^2(\theta) + \cos^2(\theta) = 1$. # + def triple_trig_z(r, a): """A rotation around the z axis only by the double angle of a.""" return triple_sandwich(r, qt.QH([sp.cos(a), 0, 0, sp.sin(a)])) def is_quadratic(r): """Tests if the the first term of the square of a quaternion is equal to t^2 - x^2 - y^2 - z^2.""" r2 = r.square() simple_r2 = sp.simplify(r2.t) it_is = ((simple_r2 == 1.0*t**2 - 1.0*x**2 - 1.0*y**2 - 1.0*z**2) or (simple_r2 == t**2 - x**2 - y**2 - z**2)) if it_is: display(t**2 - x**2 - y**2 - z**2) else: display(simple_r2) return it_is # - a = sp.Symbol('a') display(sp.simplify(triple_trig_z(R, a).t)) display(sp.simplify(triple_trig_z(R, a).x)) display(sp.simplify(triple_trig_z(R, a).y)) display(sp.simplify(triple_trig_z(R, a).z)) is_quadratic(triple_trig_z(R, a)) # An important thing to notice is that rotations work for arbitrarily small values of an angle. display(sp.simplify(triple_trig_z(R, 0.01).t)) display(sp.simplify(triple_trig_z(R, 0.01).x)) display(sp.simplify(triple_trig_z(R, 0.01).y)) display(sp.simplify(triple_trig_z(R, 0.01).z)) is_quadratic(triple_trig_z(R, 0.01)) # This is relevant to the fact that the group $SO(3)$ is a compact group. It is easy to visualize the example above: it is a circle in the $xy$ plane with $t$ and $z$ unaltered. Circles are sets of points where the "next" point is an arbitrarily short distance away. # Can we create a function that can take _any_ quaternion parameter $P$ yet still always generate another member of the group $SO(3)$? This can be done using the inverse of a quaternion which is the conjugate of a quaternion divided by the norm squared. Groups are about binary operations on a set. The binary operation can be a composite function, where the results of one rotation are fed into another. # + def next_rotation(r, p=qt.QH([1, 0, 0, 0])): """Generates another member of the rotation group given a quaternion parameter P.""" return p.product(r.product(p.invert())) def composite_rotation(r, p1=qt.QH([1, 0, 0, 0]), p2=qt.QH([1, 0, 0, 0])): """A composite function of next_rotation.""" return next_rotation(next_rotation(r, p1), p2) # - display(sp.simplify(composite_rotation(R, qt.QH([s, u, v, w])).t)) display(sp.simplify(composite_rotation(R, qt.QH([s, u, v, w])).x)) is_quadratic(composite_rotation(R, qt.QH([s, u, v, w]))) # The next_rotation function can use any quaternion parameter $P$ as input and create another member of the group. This does not mean that rotations have four degrees of freedom. There is an equivalence relation involved since the product of a quaternion with its inverse has a norm of one. This algebraic constraint means the composite_rotation function has $4-1=3$ degrees of freedom. # The composite_rotation function could be used to show that there is a real-valued quaternion representation of the compact Lie group $SO(3)$. Since it is well known quaternions can do this, such an effort will be skipped. # ## Other Triple Products Lead to More Than Just Rotations # Other triple products are possible. For example, the two quaternions could be on the same side. A number of years ago, a search for a real-valued quaternion function that could do a Lorentz boost turned up this difference between two one-sided triples, $ \frac{1}{2}((P P R)^* - (P^* P^* R)^*)$: def triple_2_on_1(r, p=qt.QH([1, 0, 0, 0])): """The two are on one side, minus a different two on one side.""" ppr = p.product(p.product(r)).conj() pcpcr = p.conj().product(p.conj().product(r)).conj() pd = ppr.dif(pcpcr) pd_ave = pd.product(qt.QH([1/2, 0, 0, 0])) return pd_ave rq_321 = triple_2_on_1(R, P) display(sp.simplify(rq_321.t)) display(sp.simplify(rq_321.x)) display(sp.simplify(rq_321.y)) display(sp.simplify(rq_321.z)) # If $s=0$, then triple_2_on_1 would contribute nothing. # Explore the hyperbolic sine and cosines: phx = qt.QH([sp.cosh(a), sp.sinh(a), 0, 0]) ppr = triple_2_on_1(R, phx) display(sp.simplify(ppr.t)) # This is promising for doing a Lorentz boost. There is a direct link between hyperbolic trig functions and the relativistic velocity $\beta$ and stretch factor $\gamma$ of special relativity. # $$\gamma = \cosh(\alpha)$$ # $$\beta \gamma = \sinh(\alpha)$$ # The trig functions are based on a circle in the plane, while the hyperbolic trig functions start with hyperbolas. The definitions are remarkably similar: # $$\sin(\alpha) = \frac{e^{i \alpha} - e^{-i \alpha}}{2 i}$$ # $$\cos(\alpha) = \frac{e^{i \alpha} + e^{-i \alpha}}{2 i}$$ # $$\sinh(\alpha) = \frac{e^{\alpha} - e^{-\alpha}}{2}$$ # $$\cosh(\alpha) = \frac{e^{\alpha} + e^{-\alpha}}{2}$$ # The hyperbolic trig functions oddly are "more real", never needing an imaginary factor. The hyperbola of the hyperbolic cosine does touch the unit circle at its minimum, suggesting a solitary link to the trig functions. # Combine the three triples and test if they do all the work of a Lorentz boost: # $$\rm{triple-triple}(R, P) \equiv P R P^* + \frac{1}{2}((P P R)^* - (P^* P^* R)^*)$$ def triple_triple(r, p=qt.QH([1, 0, 0, 0])): """Use three triple products for rotations and boosts.""" # Note: 'qtype' provides a record of what algrabric operations were done to create a quaternion. return triple_sandwich(r, p).add(triple_2_on_1(r, p), qtype="triple_triple") # Can this function do a rotation? If the first value of $P$ is equal to zero, then the two one-sided triple terms, $PPR$, will make no contribution, leaving the triple sandwich $PRP^*$. So long as the norm is equal to unity, then spatial rotations result. Do a rotation: jk = qt.QH([0, 0, 3/5, 4/5]) display(sp.simplify(triple_triple(R, jk).t)) display(sp.simplify(triple_triple(R, jk).x)) display(sp.simplify(triple_triple(R, jk).y)) display(sp.simplify(triple_triple(R, jk).z)) is_quadratic(triple_triple(R, jk)) # Something important has changed going from the regular trig functions to these hyperbolic functions for rotations. The requirements that the first term must be zero while the other three terms are normalized to unity means that one cannot go an arbitrarily small distance away and find another transformation. If one wants a product of rotations, those rotations must be at right angles to each other. Qi, Qj, Qk = qt.QH([0, 1, 0, 0]), qt.QH([0, 0, 1, 0]), qt.QH([0, 0, 0, 1]) print(triple_triple(triple_triple(R, Qi), Qj)) print(triple_triple(R, Qi.product(Qj))) # The fact that one cannot find a super close neighbor is a big technical change. # What is so special about setting the first term equal to zero? Is there a more general form? Perhaps all that is needed is for the first term of the square to be equal to negative one. Test this out: minus_1 = qt.QH([2, 2, 1, 0]) print(minus_1.square().t) display((triple_triple(R, minus_1).t, triple_triple(R, minus_1).x, triple_triple(R, minus_1).y, triple_triple(R, minus_1).z)) is_quadratic(triple_triple(R, minus_1)) # To be honest, this came as a surprise to me. Notice that the value for time changes, so a rotation is getting mixed in with a boost. This sort of mixing of rotations and boosts is known to happen when one does two boosts, one say along $x$, the other along $y$. Now we can say a similar thing is possible for rotations. If there scalar is zero then one gets a pure spatial rotation. When that is not the case, there is a mixture of rotations and boosts. # Demonstrate that a boost along the $x$ axis works. bx = qt.QH([sp.cosh(a), sp.sinh(a), 0, 0]) display(sp.simplify(bx.square().t)) display(sp.simplify(triple_triple(R, bx).t)) display(sp.simplify(triple_triple(R, bx).x)) display(sp.simplify(triple_triple(R, bx).y)) display(sp.simplify(triple_triple(R, bx).z)) is_quadratic(triple_triple(R, bx)) # Perfect. It was this result that began my investigation of triple_triple quaternion products. This is what the boost looks like using gammas and betas: $$(\gamma t - \gamma \beta x, \gamma x - \gamma \beta t, y, z)$$ # The first term of the square of the hyperbolic parameter $P=bx$ is equal to positive one. So long as the triple_triple function is fed a quaternion parameter $P$ whose first term of the square has an absolute value of one, the interval is invariant. That is surprisingly simple. # Note the double angle in the hyperbolic trig function that appeared earlier for rotations. # ## Spatial Reflection and Time Reversal # For a spatial reflection, just one spatial term flips signs. The first term of the square will not be altered. Yet the triple_triple function cannot flip only one sign. It can flip two terms. Thus, using just the triple_triple function one can go from all positive, to two positive-two negative, to all negative terms, but never one or three negative terms starting from an all positive quaternion $R$. The conjugate operator can do odd sign changes. Do a spatial reflection on $x$ only by rotating using $i$ and using the conjugate operator like so: x_reflection = triple_triple(R, Qi).conj() print(x_reflection) is_quadratic(x_reflection) # Time reversal also cannot be done using triple_triple. The parameter $P$ is used twice, so its sign is of no consequence for the scalar in $R$. The entire quaternion $R$ must be multiplied by $-1$ then take a conjugate like so: t_reversal = triple_triple(R).conj().product(qt.QH([-1, 0, 0, 0], qtype="sign_flip")) print(t_reversal) is_quadratic(t_reversal) # Rotations and boosts do not do the work of time reversal. Time reversal requires different algebraic tricks. # ## Fixing the Limitations of the Triple_Triple Function # The triple_triple function must be fed quaternions whose square is either exactly equal to plus or minus one. Create a function that can take in _any_ quaternion as a parameter and generate the next quadratic. The function must be scaled to the square root of the first term of the quaternion parameter $P$ squared. Expand the parameters so both spatial reflections and time reversals can be done. # If the parameter $P$ is light-like, it cannot be used to do a boost. Feed the triple_triple function a light-like quaternion and it will always return zero. Light-like quaternions can do rotations. The next_rotation function is up to the task. def next_quadratic(r, p=qt.QH([1, 0, 0, 0]), conj=False, sign_flip=False): """Generates another quadratic using a quaternion parameter p, if given any quaternion and whether a conjugate or sign flip is needed.""" pt_squared = p.square().t # Avoid using sp.Abs() so equations can be simplified. if isinstance(pt_squared, (int, float)): if pt_squared < 0: pt_squared *= -1 else: if pt_squared.is_negative: pt_squared *= -1 sqrt_pt_squared = sp.sqrt(pt_squared) # A light-like parameter P can rotate but not boost R. if sqrt_pt_squared == 0: rot_calc = next_rotation(r, p) else: p_normalized = p.product(qt.QH([1/sqrt_pt_squared, 0, 0, 0])) rot_calc = triple_triple(r, p_normalized) if conj: conj_calc = rot_calc.conj() else: conj_calc = rot_calc if sign_flip: sign_calc = conj_calc.product(qt.QH([-1, 0, 0, 0])) else: sign_calc = conj_calc calc_t = sp.simplify(sp.expand(sign_calc.t)) calc_x = sp.simplify(sp.expand(sign_calc.x)) calc_y = sp.simplify(sp.expand(sign_calc.y)) calc_z = sp.simplify(sp.expand(sign_calc.z)) return qt.QH([calc_t, calc_x, calc_y, calc_z], qtype="L") display(sp.simplify(next_quadratic(R, P, True, True).t)) display(sp.simplify(next_quadratic(R, P, True, True).x)) is_quadratic(next_quadratic(R, P, True, True)) # No matter what values are used for the parameter $P$, the next_quadratic function will preserve the interval of $R$. Even a light-like interval works: print(next_quadratic(R, qt.QH([s, s, 0, 0]))) is_quadratic(next_quadratic(R, qt.QH([s, s, 0, 0]))) # Notice how the $y$ and $z$ terms flip positions, but the squaring process will put both into their proper spots in the first term of the square. # ## The Lorentz Group and Functional Composition with the next_quadratic Function # The Lorentz group is all possible ways to transform an event in space-time yet preserve the quadratic form: # $$(t, x, y, z) \rightarrow t^2 - x^2 - y^2 - z^2$$ # The elements of the group are the tuples (t, x, y, z) but not the rotation angles, boost velocities, conjugation and sign flips. # # A group is defined as a binary operation on a set of elements that has 4 qualities: # 1. Closure # 1. An inverse exists # 1. There is an identity element # 1. Associative # # The next_quadratic function acts on one element of the group. The binary operation is a composite function built from two next_quadratic functions. Take the result of one action of the next_quadratic function, and have that result go into another round of the next_quadratic function. def composite_quadratic(r, p1=qt.QH([1, 0, 0, 0]), p2=qt.QH([1, 0, 0, 0]), conj1=False, conj2=False, sign_flip1=False, sign_flip2=False): """A composite function for the next_quadratic function.""" return next_quadratic(next_quadratic(r, p1, conj1, sign_flip1), p2, conj2, sign_flip2) print(composite_quadratic(R)) is_quadratic(composite_quadratic(R)) print(composite_quadratic(R, Qi, Qj, True, True, True, False)) is_quadratic(composite_quadratic(R, Qi, Qj, True, True, True, False)) print(composite_quadratic(R, minus_1, Qj, False, True, False, True)) is_quadratic(composite_quadratic(R, minus_1, Qj, False, True, False, True)) print(composite_quadratic(R, bx, P, True, False, True, False)) is_quadratic(composite_quadratic(R, bx, P, True, False, True, False)) print(composite_quadratic(composite_quadratic(R, bx, bx))) is_quadratic(composite_quadratic(composite_quadratic(R, bx, bx))) # Each of these composite functions generates exactly the same quadratic as required to be part of the Lorentz group. These five examples argue for closure: every possible choice for what one puts in the composite_quadratic function will have the same quadratic. I don't have the math skills to prove closure (unless one thinks the earlier general case is enough). # Quaternions are a division algebra. As such, it is reasonable to expect an inverse to exist. Look for one for the $Qi$, $Qk$ parameter case: print(composite_quadratic(R, Qi, Qj, True, True, True, False)) print(composite_quadratic(composite_quadratic(R, Qi, Qj, True, True, True, False), Qk)) # Close, but not quite. Add a sign_flip. print(composite_quadratic(composite_quadratic(R, Qi, Qj, True, True, True, False), Qk, sign_flip1=True)) # The is back where we started with the quaternion $R$. Again, this is just an example and not a proof. Some inverses are easier to find than others like pure rotations or pure boosts with a rotation or opposite velocity. # The identity composition was shown to do its fine work in the first composite_quadratic(R) example. # Composite functions are associative, at least according to wikipedia. # ## The Difference Between composite_rotation and composite_quadratic # Both of these composite functions call another function twice, next_rotation and next_quadratic respectively. Both functions do a normalization. The next_rotation normalizes to the norm squared which can be zero if the parameter $P$ is zero, otherwise it is positive. The next_rotation function always does one thing, $P R P^{-1}$. The next_quadratic normalizes to the first term of the square of parameter $P$. That value can be positive, negative, or zero. When the first term of the square is positive or negative, the next_quadratic function treats both cases identically. Three triple quaternion products are used, $P R P^* + \frac{1}{2}((P P R)^* - (P^* P^* R)^*)$. The first term is identical to a rotation so long as the norm is equal to one. Otherwise, it is off just by a scaling factor. The difference happens when it is zero which indicates the properties of light come into play. It is the lightcone that separates time-like events from space-like events. For a time-like value of the parameter $P$, the triple-triple returns zero which is not a member of the group. If one uses the first triple, no matter what its norm of light-like parameter $P$ happens to be, the resulting $R->R'$ remains in the group. The rotation group $SO(3)$ is compact, while the Lorentz group $O(1, 3)$ is not. The change in algebra needed for light-light parameter $P$ may be another way to view this difference. # ## Degrees of Freedom # The typical representation of the Lorentz group $O(1, 3)$ says there are six independent variables needed to represent the Lorentz group: three for rotations and three for boosts. Yet when one does two boosts in different directions, it is a mix between a boost and a rotation. This suggests there is no such thing as a completely separate notion of rotations and boosts, that they have a capacity to mix. If true, that decreases the degrees of freedom. # # Two spacial rotations will result in spacial rotation: print(composite_quadratic(R, qt.QH([0, 1,0,1]), qt.QH([0, 1,1,0]))) is_quadratic(composite_quadratic(R, qt.QH([0, 1,0,1]), qt.QH([0, 1,1,0]))) # Notice that the value of the first squared term is negative. That value gets normalized to negative one in the composite_quadratic function (via the next_quadratic function that gets called twice). What makes these rotations be only spacial is the zero in the first position of the parameter $P$. It is easy enough to look at situations where the first term of the square is negative, and the first term of the parameter is not equal to zero: print(composite_quadratic(R, qt.QH([4, 5,0,0]))) is_quadratic(composite_quadratic(R, qt.QH([4, 5,0,0]))) # This is both a boost and a rotation. The boost effect can be seen in the first and second terms where there is a positve and negative term (the negative being the term that "doesn't belong", seeing the $x$ in the first term and $t$ in the second). The rotation appears in the sign flips for $y$ and $z$. If the 4 and 5 are switched, there is no rotation of these terms: print(composite_quadratic(R, qt.QH([5, 4,0,0]))) # The first two terms are exactly the same. Now the last two terms don't flip signs because there is no rotation. Both the (4, 5) and (5, 4) parameter composites will have the same first term for the square. This real-valued quaternion representation makes it possible to see. # At first blush, one looks into the next_quadratic function and sees six degrees of freedom: four for the quaternion parameter $P$, one for the conjugate operator and one for the sign_flip. These last two are needed to generate spatial reflection and time reversal. The quaternion parameter $P$ normalizes to the first term of the square of the quaternion parameter $P$. This means that once three of the values are chosen, then the value of the fourth one is set by this algebraic constraint. The same thing happens with the composite_rotation function defined earlier: a 4D quaternion may go in, but they way it gets normalized means there is an equivalence class to those quaternions that have a norm of one, and thus only 3 degrees of freedom. Representing the Lorentz group with only five degrees of freedom with this real-valued quaternion representation would be an interesting result if it can be rigorously proved.
Notebooks/triple_products_and_distance.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="4iTGR9dBErZl" colab_type="code" colab={} import numpy as np import numpy.linalg as linalg import scipy as scipy from scipy.io import wavfile from scipy.io import savemat from scipy.fftpack import dct import matplotlib.pyplot as plt # + id="4Fal1_uREy_q" colab_type="code" colab={} def Specgram(X, W, H): """A function to compute the spectrogram of a signal :parm X: N x 1 Audio Signal :param W: Window Size :param H HopSize :returns: S, an N x NBins spectrogram array """ Q = W/H if Q - np.floor(Q) > 0: print('Warning: Window size is not integer multiple of hop size\n') win = np.hamming(W) NWin = int(np.floor((len(X) - W)/float(H)) + 1) S = np.zeros((NWin, W)) for i in range(NWin): x = X[i*H:i*H+W] S[i, :] = np.abs(np.fft.fft(win*x)) #Second half of the spectrum is redundant for real signals if W % 2 == 0: #Even Case S = S[:, 0:int(W/2)] else: #Odd Case S = S[:, 0:int((W-1)/2)+1] return S # + id="b4jnCO9FE4PH" colab_type="code" colab={} def getMelFilterbank( Fs, winSize, nbands, minfreq, maxfreq ): #Purpose: Return a mel-spaced triangle filterbank #Step 1: Warp to the mel-frequency scale melbounds = np.array([minfreq, maxfreq]) melbounds = 1125*np.log(1 + melbounds/700.0) mel = np.linspace(melbounds[0], melbounds[1], nbands) binfreqs = 700*(np.exp(mel/1125.0) - 1) binbins = np.ceil(((winSize-1)/float(Fs))*binfreqs) #Ceil to the nearest bin binbins = np.array(binbins, dtype = np.int64) #Step 2: Create mel triangular filterbank melfbank = np.zeros((nbands, winSize)) for i in range(nbands): thisbin = binbins[i] lbin = thisbin if i > 0: lbin = binbins[i-1] rbin = thisbin + (thisbin - lbin) if i < nbands-1: rbin = binbins[i+1] melfbank[i, lbin:thisbin+1] = np.linspace(0, 1, 1 + (thisbin - lbin)) melfbank[i, thisbin:rbin+1] = np.linspace(1, 0, 1 + (rbin - thisbin)) return melfbank # + id="POuZezqzE8Bv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 350} outputId="07ab2c5a-5997-49c4-8f96-f025f68b0251" def getAudioNoveltyFn(x, Fs, winSize, hopSize): """ Using techniques from <NAME>. "Beat tracking by dynamic programming." Journal of New Music Research 36.1 (2007): 51-60. """ #First compute mel-spaced STFT S = Specgram(x, winSize, hopSize) S = np.abs(S) M = getMelFilterbank(Fs, winSize, 40, 30, 8000) M = M[:, 0:S.shape[1]] X = M.dot(S.T) novFn = X[:, 1::] - X[:, 0:-1] novFn[novFn < 0] = 0 novFn = np.sum(novFn, 0) return (S, novFn) if __name__ == '__main__': Fs, X = scipy.io.wavfile.read("journey.wav") X = X/(2.0**15) #Audio is loaded in as 16 bit shorts. Convert to float winSize = 512 hopSize = 256 (S, novFn) = getAudioNoveltyFn(X, Fs, winSize, hopSize) nsamples = 500 novFn = novFn[0:nsamples] t = np.arange(nsamples)*hopSize/float(Fs) plt.subplot(211) plt.imshow(np.log(S.T), cmap = 'afmhot', aspect = 'auto') plt.title("Spectrogram") plt.axis('off') plt.subplot(212) plt.plot(t, novFn) plt.title("Audio Novelty Function") plt.xlabel("Time (Sec)") plt.xlim([0, np.max(t)]) plt.show() # + id="zUNJHwd9FBUI" colab_type="code" colab={} # + id="-OeZx_AUFXdA" colab_type="code" colab={}
MusicFeatures.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Upgraded Racing Game # + import random class Game: n_squares = 10 winner = None def __init__(self, bots, verbose=False): self.bots = bots self.verbose = verbose self._set_starting_positions() # calling the method that will reset all positions def _set_starting_positions(self): for b in self.bots: # go through every bot in the competition and set the position to 0 b.position = 0 def show_board(self): print("=" * 30) board = {i: [] for i in range(self.n_squares + 1)} for bot in self.bots: board[bot.position].append(bot) for square, bots_in_square in board.items(): print(f"{square}: {bots_in_square}") def play_round(self): if self.winner is None: random.shuffle(self.bots) if self.verbose: print(self.bots) for bot in self.bots: self._play_bot(bot) if self.winner: break # for bot in bots: # bot.direction = 1 if self.verbose: if self.winner: print(f"========== Race Over, WINNER: {self.winner} ========== ") self.show_board() def _play_bot(self, bot): bot_position_dictionary = {b.name: b.position for b in self.bots} action_str = bot.play(bot_position_dictionary) if action_str == "walk": pos_from, pos_to = bot.walk() if self.verbose: print(f"{str(bot):<15} walked from {pos_from} to {pos_to}") elif action_str == "sabotage": sabotaged_bots = bot.sabotage(self.bots) if self.verbose: print(f"{str(bot):<15} sabotaged {sabotaged_bots}") elif action_str == "faceforward": bot.direction = 1 if self.verbose: print(f"{str(bot):<15} faced forward") if bot.position >= self.n_squares: self.winner = bot class Bot: position = 0 direction = 1 def __init__(self, name, strategy): self.name = name self.strategy = strategy def __repr__(self): return f"{self.name}" def walk(self): from_position = self.position self.position = max(0, self.position+self.direction) to_position = self.position return from_position, to_position def sabotage(self, bots): sabotaged_bots = [] for bot in bots: if bot.position == self.position and bot != self: bot.direction *= -1 sabotaged_bots.append(bot) return sabotaged_bots def play(self, bot_positions): return self.strategy(self, bot_positions) # - # ### strategies def random_strategy(self, bot_positions): return random.choice(["walk", "sabotage"]) # + original_list = ['walk', 'walk', 'sabotage'] current_list = [] def list_strategy(self, bot_positions): global current_list # to allow "write-access" to out-of-function variables if current_list == []: current_list = original_list.copy() return current_list.pop(0) # - def always_walk(self, bot_positions): return "walk" def underdog(self, bot_positions): my_pos = self.position bots_at_my_pos = sum([1 for pos in bot_positions.values() if pos == my_pos]) if bots_at_my_pos > 2 and my_pos > 3: return "sabotage" else: if self.direction == 1: return "walk" else: return "faceforward" bots = [ Bot("Random", random_strategy), Bot("List", list_strategy), Bot("Walker", always_walk), Bot("UnderDog", underdog), ] # + from tqdm.auto import tqdm import pandas as pd def grand_prix(n): winnings = {b: 0 for b in bots} for _ in tqdm(range(n)): game = Game(bots, verbose=False) while game.winner is None: game.play_round() winnings[game.winner] += 1 return winnings winnings = grand_prix(n=1000) podium = pd.Series(winnings).sort_values(ascending=False) podium.plot.bar(grid=True, figsize=(25,6), rot=0) # -
2020-21_semester2/11_BotRacingUpgrade.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.7 64-bit (''.venv'': venv)' # language: python # name: python3 # --- # # 2021, Day 9: Smoke Basin from advent import get from advent.y2021 import d9 # ## Tests d9.solve(get.sample(2021, 9)) # ## Solution d9.solve(get.input(2021, 9)) # ## Benchmarking # %timeit d9.solve(get.input(2021, 9))
main/2021/d9.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # 챗봇 튜토리얼 # ================ # **Author:** `<NAME> <https://github.com/MatthewInkawhich>`_ # **번역**: `김진현 <https://github.com/lewha0>`_ # # 이 튜토리얼에서는 순환(recurrent) 시퀀스 투 시퀀스(sequence-to-sequence) # 모델의 재미있고 흥미로운 사용 예를 살펴보려 합니다. 간단한 챗봇을 학습해 # 볼 텐데, 사용할 데이터는 영화 대본으로 구성된 `Cornell Movie-Dialogs(코넬 # 대학교의 영화 속 대화 말뭉치 데이터 # <https://www.cs.cornell.edu/~cristian/Cornell_Movie-Dialogs_Corpus.html>`__ # 입니다. # # 대화형 모델은 많은 사람들이 관심을 갖는 인공지능 분야의 연구 주제입니다. # 고객 서비스와 관련된 활용, 온라인 헬프데스크 등 여러 상황에서 챗봇을 # 활용할 수 있습니다. 많은 챗봇이 검색 기반(retrieval-based) 모델을 # 사용하는데, 이는 특정한 형식을 갖춘 질문에 대해 미리 정해진 반응을 # 출력하는 방식입니다. 분야를 특정 회사의 IT 헬프데스크처럼 한정짓는다면 # 이러한 모델을 사용해도 충분합니다. 그러나 이런 모델은 좀 더 일반적인 # 상황에 활용할 수 있을만큼 강력하진 않습니다. 기계를 학습시켜서 사람과 # 여러 주제에 대해 의미 있는 대화를 하게끔 하는 것은 아직 해결되지 않은 # 연구 주제입니다. 그러나 최근에 딥러닝이 유행하면서 여러 가지의 강력한 # 생성 모델이 등장했습니다. 그러한 모델의 한 예인 구글의 `신경 대화 # 모델(Neural Conversational Model) <https://arxiv.org/abs/1506.05869>`__ 은 # 다중 도메인 대화 생성 모델(multi-domain generative conversational models) # 분야에 있어서 큰 진전을 이루었습니다. 우리는 이 튜토리얼을 통해 이러한 # 모델을 PyTorch로 구현해보려 합니다. # # .. figure:: /_static/img/chatbot/bot.png # :align: center # :alt: bot # # .. code:: python # # > hello? (안녕하세요?) # Bot: hello . (안녕하세요.) # > where am I? (여긴 어디죠?) # Bot: you re in a hospital . (병원입니다.) # > who are you? (당신은 누구시죠?) # Bot: i m a lawyer . (변호사입니다.) # > how are you doing? (어떻게 지내세요?) # Bot: i m fine . (잘 지냅니다.) # > are you my friend? (당신은 제 친구인가요?) # Bot: no . (아뇨.) # > you're under arrest (당신을 체포하겠습니다) # Bot: i m trying to help you ! (난 당신을 도우려 하는 겁니다!) # > i'm just kidding (농담이었어요) # Bot: i m sorry . (미안하네요.) # > where are you from? (어디서 오셨어요?) # Bot: san francisco . (샌프란시스코요.) # > it's time for me to leave (전 이제 가봐야겠네요) # Bot: i know . (알겠습니다.) # > goodbye (안녕히 계세요) # Bot: goodbye . (안녕히 가세요.) # # **이 튜토리얼의 핵심 내용** # # - `코넬 대학교의 영화 속 대화 말뭉치 데이터셋 # <https://www.cs.cornell.edu/~cristian/Cornell_Movie-Dialogs_Corpus.html>`__ 을 # 읽어오고 전처리합니다 # - `Luong의 어텐션(attention) 메커니즘 <https://arxiv.org/abs/1508.04025>`__ 을 # 이용하여 sequence-to-sequence 모델을 구현합니다 # - 미니배치를 이용하여 인코더와 디코더를 함께 학습합니다 # - 탐욕적 탐색 기법(greedy-search)을 사용하는 디코더 모듈을 구현합니다 # - 학습한 챗봇과 대화를 나눠 봅니다 # # **감사의 글** # # 이 튜토리얼은 다음 자료의 도움을 받아 작성하였습니다. # # 1) <NAME>의 pytorch-chatbot 구현체: # https://github.com/ywk991112/pytorch-chatbot # # 2) <NAME>의 practical-pytorch seq2seq-translation 예제: # https://github.com/spro/practical-pytorch/tree/master/seq2seq-translation # # 3) FloydHub의 코넬 대학교의 영화 말뭉치 데이터 전처리 코드: # https://github.com/floydhub/textutil-preprocess-cornell-movie-corpus # # # # 준비 단계 # --------- # # 시작에 앞서, `여기 <https://www.cs.cornell.edu/~cristian/Cornell_Movie-Dialogs_Corpus.html>`__ 에서 # ZIP 파일 형태의 데이터를 내려받고, 현재 디렉토리 아래에 ``data/`` 라는 # 디렉토리를 만들어서 내려받은 데이터를 옮겨두시기 바랍니다. # # 그 다음에는, 몇 가지 필요한 도구들을 import 하겠습니다. # # # # + from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import torch from torch.jit import script, trace import torch.nn as nn from torch import optim import torch.nn.functional as F import csv import random import re import os import unicodedata import codecs from io import open import itertools import math USE_CUDA = torch.cuda.is_available() device = torch.device("cuda" if USE_CUDA else "cpu") # - # 데이터 읽기 & 전처리하기 # ------------------------ # # 다음 단계는 데이터 파일의 형식을 재조정한 후, 우리가 작업하기 편한 # 구조로 읽어들이는 것입니다. # # `코넬 대학교의 영화 속 대화 말뭉치 데이터셋 # <https://www.cs.cornell.edu/~cristian/Cornell_Movie-Dialogs_Corpus.html>`__ 은 # 영화 속 등장 인물의 대화가 풍부하게 포함된 데이터셋입니다. # # - 영화 속 등장 인물 10,292 쌍이 대화를 220,579번 주고받습니다 # - 영화 617개의 등장 인물 9,035명이 나옵니다 # - 총 발화(utterance) 수는 304,713번입니다 # # 이 데이터셋은 규모도 크고 내용도 다양하며, 격식체와 비격식체, 여러 # 시간대, 여러 감정 상태 등이 두루 포함되어 있습니다. 우리의 바람은 # 이러한 다양성으로 인해 모델이 견고해지는, 즉 모델이 여러 종류의 입력 # 및 질의에 잘 대응할 수 있게 되는 것입니다. # # 우선은 원본 데이터 파일을 몇 줄 살펴보면서 형식이 어떻게 되어있는지 # 살펴 보겠습니다. # # # # + corpus_name = "cornell movie-dialogs corpus" corpus = os.path.join("data", corpus_name) def printLines(file, n=10): with open(file, 'rb') as datafile: lines = datafile.readlines() for line in lines[:n]: print(line) printLines(os.path.join(corpus, "movie_lines.txt")) # - # 원하는 형식의 데이터 파일로 만들기 # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # 편의를 위해 데이터의 형식을 원하는 형태로 만들려고 합니다. 각 줄에 # *질의 문장* 과 *응답 문장* 의 쌍이 탭으로 구분되어 있게끔 하는 것입니다. # # 다음의 함수를 통해 *movie_lines.txt* 원본 데이터 파일을 파싱하려 # 합니다. # # - ``loadLines`` 는 파일에 포함된 대사를 변환하여 항목(대사 ID ``lineID``, # 인물 ID ``characterID``, 영화 ID ``movieID``, 인물 ``character``, 대사 # 내용 ``text``)에 대한 사전 형태로 변환합니다 # - ``loadConversations`` 는 ``loadLines`` 를 통해 읽어들인 # 대사(``lines``)의 항목(``fields``)를 *movie_conversations.txt* 에 나와 # 있는 내용에 맞춰 대화 형태로 묶습니다 # - ``extractSentencePairs`` 는 대화(``conversations``)에서 문장 쌍을 # 추출합니다 # # # # + # 파일에 포함된 대사를 쪼개서 항목에 대한 사전(``dict``) 형태로 변환합니다 def loadLines(fileName, fields): lines = {} with open(fileName, 'r', encoding='iso-8859-1') as f: for line in f: values = line.split(" +++$+++ ") # 항목을 추출합니다 lineObj = {} for i, field in enumerate(fields): lineObj[field] = values[i] lines[lineObj['lineID']] = lineObj return lines # 대사의 항목을 *movie_conversations.txt* 를 참고하여 대화 형태로 묶습니다 def loadConversations(fileName, lines, fields): conversations = [] with open(fileName, 'r', encoding='iso-8859-1') as f: for line in f: values = line.split(" +++$+++ ") # 항목을 추출합니다 convObj = {} for i, field in enumerate(fields): convObj[field] = values[i] # 문자열을 리스트로 변환합니다(convObj["utteranceIDs"] == "['L598485', 'L598486', ...]") utterance_id_pattern = re.compile('L[0-9]+') lineIds = utterance_id_pattern.findall(convObj["utteranceIDs"]) # 대사를 재구성합니다 convObj["lines"] = [] for lineId in lineIds: convObj["lines"].append(lines[lineId]) conversations.append(convObj) return conversations # conversations에서 문장 쌍을 추출합니다 def extractSentencePairs(conversations): qa_pairs = [] for conversation in conversations: # 대화를 이루는 각 대사에 대해 반복문을 수행합니다 # 대화의 마지막 대사는 (그에 대한 응답이 없으므로) 무시합니다 for i in range(len(conversation["lines"]) - 1): inputLine = conversation["lines"][i]["text"].strip() targetLine = conversation["lines"][i+1]["text"].strip() # 잘못된 샘플은 제거합니다(리스트가 하나라도 비어 있는 경우) if inputLine and targetLine: qa_pairs.append([inputLine, targetLine]) return qa_pairs # - # 이제 이 함수들을 호출하여 새로운 파일인 *formatted_movie_lines.txt* 를 # 만듭니다. # # # # + # 새 파일에 대한 경로를 정의합니다 datafile = os.path.join(corpus, "formatted_movie_lines.txt") delimiter = '\t' # 구분자에 대해 unescape 함수를 호출합니다 delimiter = str(codecs.decode(delimiter, "unicode_escape")) # 대사 사전(dict), 대화 리스트(list), 그리고 각 항목의 이름을 초기화합니다 lines = {} conversations = [] MOVIE_LINES_FIELDS = ["lineID", "characterID", "movieID", "character", "text"] MOVIE_CONVERSATIONS_FIELDS = ["character1ID", "character2ID", "movieID", "utteranceIDs"] # 대사(lines)를 읽어들여 대화(conversations)로 재구성합니다 print("\nProcessing corpus...") lines = loadLines(os.path.join(corpus, "movie_lines.txt"), MOVIE_LINES_FIELDS) print("\nLoading conversations...") conversations = loadConversations(os.path.join(corpus, "movie_conversations.txt"), lines, MOVIE_CONVERSATIONS_FIELDS) # 결과를 새로운 csv 파일로 저장합니다 print("\nWriting newly formatted file...") with open(datafile, 'w', encoding='utf-8') as outputfile: writer = csv.writer(outputfile, delimiter=delimiter, lineterminator='\n') for pair in extractSentencePairs(conversations): writer.writerow(pair) # 몇 줄을 예제 삼아 출력해 봅니다 print("\nSample lines from file:") printLines(datafile) # - # 데이터 읽고 정리하기 # ~~~~~~~~~~~~~~~~~~~~ # # 다음에 해야 할 일은 어휘집을 만들고, 질의/응답 문장 쌍을 메모리로 # 읽어들이는 것입니다. # # 우리가 다루는 대상은 일련의 **단어** 들이며, 따라서 이들을 이산 공간 상의 # 수치(discrete numerical space)로 자연스럽게 대응시키기 어렵다는 점에 # 유의하시기 바랍니다. 따라서 우리는 데이터셋 안에 들어 있는 단어를 인덱스 # 값으로 변환하는 매핑을 따로 만들어야 합니다. # # 이를 위해 우리는 ``Voc`` 라는 클래스를 만들어 단어에서 인덱스로의 # 매핑, 인덱스에서 단어로의 역 매핑, 각 단어의 등장 횟수, 전체 단어 수 # 등을 관리하려 합니다. 이 클래스는 어휘집에 새로운 단어를 추가하는 # 메서드(``addWord``), 문장에 등장하는 모든 단어를 추가하는 # 메서드(``addSentence``), 그리고 자주 등장하지 않는 단어를 정리하는 # 메서드(``trim``)를 제공합니다. 단어를 정리하는 내용에 대해서는 뒤에서 # 좀 더 자세히 살펴보겠습니다. # # # # + # 기본 단어 토큰 값 PAD_token = 0 # 짧은 문장을 채울(패딩, PADding) 때 사용할 제로 토큰 SOS_token = 1 # 문장의 시작(SOS, Start Of Sentence)을 나타내는 토큰 EOS_token = 2 # 문장의 끝(EOS, End Of Sentence)을 나태는 토큰 class Voc: def __init__(self, name): self.name = name self.trimmed = False self.word2index = {} self.word2count = {} self.index2word = {PAD_token: "PAD", SOS_token: "SOS", EOS_token: "EOS"} self.num_words = 3 # SOS, EOS, PAD를 센 것 def addSentence(self, sentence): for word in sentence.split(' '): self.addWord(word) def addWord(self, word): if word not in self.word2index: self.word2index[word] = self.num_words self.word2count[word] = 1 self.index2word[self.num_words] = word self.num_words += 1 else: self.word2count[word] += 1 # 등장 횟수가 기준 이하인 단어를 정리합니다 def trim(self, min_count): if self.trimmed: return self.trimmed = True keep_words = [] for k, v in self.word2count.items(): if v >= min_count: keep_words.append(k) print('keep_words {} / {} = {:.4f}'.format( len(keep_words), len(self.word2index), len(keep_words) / len(self.word2index) )) # 사전을 다시 초기화힙니다 self.word2index = {} self.word2count = {} self.index2word = {PAD_token: "PAD", SOS_token: "SOS", EOS_token: "EOS"} self.num_words = 3 # 기본 토큰을 센 것 for word in keep_words: self.addWord(word) # - # 이제 어휘집과 질의/응답 문장 쌍을 재구성하려 합니다. 그러한 데이터를 # 사용하려면 그 전에 약간의 전처리 작업을 수행해야 합니다. # # 우선, ``unicodeToAscii`` 를 이용하여 유니코드 문자열을 아스키로 변환해야 # 합니다. 다음에는 모든 글자를 소문자로 변환하고, 알파벳도 아니고 기본적인 # 문장 부호도 아닌 글자는 제거합니다(정규화, ``normalizeString``). # 마지막으로는 학습할 때의 편의성을 위해서, 길이가 일정 기준을 초과하는, # 즉 ``MAX_LENGTH`` 보다 긴 문장을 제거합니다(``filterPairs``). # # # # + MAX_LENGTH = 10 # 고려할 문장의 최대 길이 # 유니코드 문자열을 아스키로 변환합니다 # https://stackoverflow.com/a/518232/2809427 참고 def unicodeToAscii(s): return ''.join( c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn' ) # 소문자로 만들고, 공백을 넣고, 알파벳 외의 글자를 제거합니다 def normalizeString(s): s = unicodeToAscii(s.lower().strip()) s = re.sub(r"([.!?])", r" \1", s) s = re.sub(r"[^a-zA-Z.!?]+", r" ", s) s = re.sub(r"\s+", r" ", s).strip() return s # 질의/응답 쌍을 읽어서 voc 객체를 반환합니다 def readVocs(datafile, corpus_name): print("Reading lines...") # 파일을 읽고, 쪼개어 lines에 저장합니다 lines = open(datafile, encoding='utf-8').\ read().strip().split('\n') # 각 줄을 쪼개어 pairs에 저장하고 정규화합니다 pairs = [[normalizeString(s) for s in l.split('\t')] for l in lines] voc = Voc(corpus_name) return voc, pairs # 문장의 쌍 'p'에 포함된 두 문장이 모두 MAX_LENGTH라는 기준보다 짧은지를 반환합니다 def filterPair(p): # EOS 토큰을 위해 입력 시퀀스의 마지막 단어를 보존해야 합니다 return len(p[0].split(' ')) < MAX_LENGTH and len(p[1].split(' ')) < MAX_LENGTH # 조건식 filterPair에 따라 pairs를 필터링합니다 def filterPairs(pairs): return [pair for pair in pairs if filterPair(pair)] # 앞에서 정의한 함수를 이용하여 만든 voc 객체와 리스트 pairs를 반환합니다 def loadPrepareData(corpus, corpus_name, datafile, save_dir): print("Start preparing training data ...") voc, pairs = readVocs(datafile, corpus_name) print("Read {!s} sentence pairs".format(len(pairs))) pairs = filterPairs(pairs) print("Trimmed to {!s} sentence pairs".format(len(pairs))) print("Counting words...") for pair in pairs: voc.addSentence(pair[0]) voc.addSentence(pair[1]) print("Counted words:", voc.num_words) return voc, pairs # voc와 pairs를 읽고 재구성합니다 save_dir = os.path.join("data", "save") voc, pairs = loadPrepareData(corpus, corpus_name, datafile, save_dir) # 검증을 위해 pairs의 일부 내용을 출력해 봅니다 print("\npairs:") for pair in pairs[:10]: print(pair) # - # 학습 단계가 빨리 수렴하도록 하는 또 다른 전략은 자주 쓰이지 않는 단어를 # 어휘집에서 제거하는 것입니다. 피처 공간의 크기를 줄이면 모델이 # 학습을 통해 근사하려는 함수의 난이도를 낮추는 효과도 있습니다. 우리는 # 이를 두 단계로 나눠 진행하려 합니다. # # 1) ``voc.trim`` 함수를 이용하여 ``MIN_COUNT`` 라는 기준 이하의 단어를 # 제거합니다. # # 2) 제거하기로 한 단어를 포함하는 경우를 pairs에서 제외합니다 # # # # + MIN_COUNT = 3 # 제외할 단어의 기준이 되는 등장 횟수 def trimRareWords(voc, pairs, MIN_COUNT): # MIN_COUNT 미만으로 사용된 단어는 voc에서 제외합니다 voc.trim(MIN_COUNT) # 제외할 단어가 포함된 경우를 pairs에서도 제외합니다 keep_pairs = [] for pair in pairs: input_sentence = pair[0] output_sentence = pair[1] keep_input = True keep_output = True # 입력 문장을 검사합니다 for word in input_sentence.split(' '): if word not in voc.word2index: keep_input = False break # 출력 문장을 검사합니다 for word in output_sentence.split(' '): if word not in voc.word2index: keep_output = False break # 입출력 문장에 제외하기로 한 단어를 포함하지 않는 경우만을 남겨둡니다 if keep_input and keep_output: keep_pairs.append(pair) print("Trimmed from {} pairs to {}, {:.4f} of total".format(len(pairs), len(keep_pairs), len(keep_pairs) / len(pairs))) return keep_pairs # voc와 pairs를 정돈합니다 pairs = trimRareWords(voc, pairs, MIN_COUNT) # - # 모델을 위한 데이터 준비하기 # --------------------------- # # 상당한 노력을 기울여 데이터를 전처리하고, 잘 정리하여 어휘집 객체와 # 문장 쌍의 리스트 형태로 만들어두긴 했지만, 결국 우리가 만들 모델에서 # 사용하는 입력은 수치 값으로 이루어진 torch 텐서입니다. 처리한 데이터를 # 모델에 맞는 형태로 준비하는 방법의 하나가 `seq2seq 변환 튜토리얼 # <https://tutorials.pytorch.kr/intermediate/seq2seq_translation_tutorial.html>`__ # 에 나와 있습니다. 이 튜토리얼에서는 배치 크기로 1을 사용하며, 이는 즉 # 문장에 등장하는 단어를 어휘집에서의 인덱스로 변환하여 모델에 제공하기만 # 하면 된다는 의미입니다. # # 그래도 여러분이 학습 속도나 GPU 병렬 처리 용량을 향상하고 싶다면 # 미니배치를 이용하여 학습해야 할 것입니다. # # 미니배치를 사용한다는 것은 배치에 포함된 문장 길이가 달라질 수 있다는 # 점에 유의해야 한다는 것을 뜻합니다. 같은 배치 안에서 크기가 다른 # 문장을 처리하기 위해서는 배치용 입력 텐서의 모양을 *(max_length, # batch_size)* 로 맞춰야 합니다. 이때 *max_length* 보다 짧은 문장에 # 대해서는 *EOS 토큰* 뒤에 제로 토큰을 덧붙이면 됩니다. # # 영어로 된 문장을 텐서로 변환하기 위해 단순히 그에 대응하는 인덱스를 # 사용하고(``indexesFromSentence``) 제로 토큰을 패딩한다고 해봅시다. # 그러면 텐서의 모양이 *(batch_size, max_length)* 이 되고, 첫 번째 차원에 # 대해 인덱싱을 수행하면 모든 시간대별 문장이 전부 반환될 것입니다. # 그러나 우리는 배치를 시간에 따라, 그리고 배치에 포함된 모든 문장에 # 대해 인덱싱할 수도 있어야 합니다. 따라서 우리는 입력 배치의 모양을 # 뒤집어서 *(max_length, batch_size)* 형태로 만들 것입니다. 그러고 난 # 후에 첫 번째 차원에 대해 인덱싱하면 배치에 포함된 모든 문장을 시간에 # 대해 인덱싱한 결과를 반환하게 됩니다. 우리는 이 뒤집기 작업을 # ``zeroPadding`` 함수를 이용하여 묵시적으로 수행할 것입니다. # # .. figure:: /_static/img/chatbot/seq2seq_batches.png # :align: center # :alt: batches # # ``inputVar`` 함수는 문장을 텐서로 변환하는, 그리고 궁극적으로는 제로 # 패딩하여 올바른 모양으로 맞춘 텐서를 만드는 작업을 수행합니다. 이 # 함수는 각 배치에 포함된 시퀀스의 길이(``lengths``)로 구성된 텐서도 같이 # 반환합니다. 그리고 우리는 이를 나중에 디코더로 넘겨줄 것입니다. # # ``outputVar`` 함수는 ``inputVar`` 와 비슷한 작업을 수행하지만, ``lengths`` # 텐서를 반환하는 대신에 이진 마스크로 구성된 텐서와 목표 문장의 최대 # 길이를 같이 반환합니다. 이진 마스크 텐서는 출력에 해당하는 목표 텐서와 # 그 모양이 같지만, 패딩 토큰(*PAD_token*)에 해당하는 경우에는 값이 0이며 # 나머지 경우의 값은 1입니다. # # ``batch2TrainData`` 는 단순히 여러 쌍을 입력으로 받아서, 앞서 설명한 # 함수를 이용하여 입력 및 목표 텐서를 구하여 반환합니다. # # # # + def indexesFromSentence(voc, sentence): return [voc.word2index[word] for word in sentence.split(' ')] + [EOS_token] def zeroPadding(l, fillvalue=PAD_token): return list(itertools.zip_longest(*l, fillvalue=fillvalue)) def binaryMatrix(l, value=PAD_token): m = [] for i, seq in enumerate(l): m.append([]) for token in seq: if token == PAD_token: m[i].append(0) else: m[i].append(1) return m # 입력 시퀀스 텐서에 패딩한 결과와 lengths를 반환합니다 def inputVar(l, voc): indexes_batch = [indexesFromSentence(voc, sentence) for sentence in l] lengths = torch.tensor([len(indexes) for indexes in indexes_batch]) padList = zeroPadding(indexes_batch) padVar = torch.LongTensor(padList) return padVar, lengths # 패딩한 목표 시퀀스 텐서, 패딩 마스크, 그리고 최대 목표 길이를 반환합니다 def outputVar(l, voc): indexes_batch = [indexesFromSentence(voc, sentence) for sentence in l] max_target_len = max([len(indexes) for indexes in indexes_batch]) padList = zeroPadding(indexes_batch) mask = binaryMatrix(padList) mask = torch.ByteTensor(mask) padVar = torch.LongTensor(padList) return padVar, mask, max_target_len # 입력 배치를 이루는 쌍에 대한 모든 아이템을 반환합니다 def batch2TrainData(voc, pair_batch): pair_batch.sort(key=lambda x: len(x[0].split(" ")), reverse=True) input_batch, output_batch = [], [] for pair in pair_batch: input_batch.append(pair[0]) output_batch.append(pair[1]) inp, lengths = inputVar(input_batch, voc) output, mask, max_target_len = outputVar(output_batch, voc) return inp, lengths, output, mask, max_target_len # 검증용 예시 small_batch_size = 5 batches = batch2TrainData(voc, [random.choice(pairs) for _ in range(small_batch_size)]) input_variable, lengths, target_variable, mask, max_target_len = batches print("input_variable:", input_variable) print("lengths:", lengths) print("target_variable:", target_variable) print("mask:", mask) print("max_target_len:", max_target_len) # - # 모델 정의하기 # ------------- # # Seq2Seq 모델 # ~~~~~~~~~~~~ # # 우리 챗봇의 두뇌에 해당하는 모델은 sequence-to-sequence (seq2seq) # 모델입니다. seq2seq 모델의 목표는 가변 길이 시퀀스를 입력으로 받고, # 크기가 고정된 모델을 이용하여, 가변 길이 시퀀스를 출력으로 반환하는 # 것입니다. # # `Sutskever 등 <https://arxiv.org/abs/1409.3215>`__ 은 두 개의 독립된 # 순환 신경망을 같이 이용하여 이러한 목적을 달성할 수 있음을 발견했습니다. # RNN 하나는 **인코더** 로, 가변 길이 입력 시퀀스를 고정된 길이의 문맥 # 벡터(context vector)로 인코딩합니다. 이론상 문맥 벡터(RNN의 마지막 # 은닉 레이어)는 봇에게 입력으로 주어지는 질의 문장에 대한 의미론적 정보를 # 담고 있을 것입니다. 두 번째 RNN은 **디코더** 입니다. 디코더는 단어 하나와 # 문맥 벡터를 입력으로 받고, 시퀀스의 다음 단어가 무엇일지를 추론하여 # 반환하며, 다음 단계에서 사용할 은닉 상태도 같이 반환합니다. # # .. figure:: /_static/img/chatbot/seq2seq_ts.png # :align: center # :alt: model # # 그림 출처: # https://jeddy92.github.io/JEddy92.github.io/ts_seq2seq_intro/ # # # # 인코더 # ~~~~~~ # # 인코더 RNN은 입력 시퀀스를 토큰 단위로(예를 들어, 단어 단위로) 한번에 # 하나씩 살펴보며 진행합니다. 그리고 각 단계마다 "출력" 벡터와 "은닉 # 상태" 벡터를 반환합니다. 은닉 상태 벡터는 다음 단계를 진행할 때 같이 # 사용되며, 출력 벡터는 차례대로 기록됩니다. 인코더는 시퀀스의 각 지점에 # 대해 파악한 문맥을 고차원 공간에 있는 점들의 집합으로 변환합니다. # 나중에 디코더는 이를 이용하여 주어진 문제에 대해 의미 있는 출력을 # 구할 것입니다. # # 인코더의 핵심 부분에는 다중 레이어 게이트 순환 유닛(multi-layered Gated # Recurrent Unit)이 있습니다. 이는 `Cho 등 <https://arxiv.org/pdf/1406.1078v3.pdf>`__ # 이 2014년에 고안한 것입니다. 우리는 GRU를 양방향으로 변환한 형태를 # 사용할 것이며, 이는 본질적으로 두 개의 독립된 RNN이 존재한다는 # 의미입니다. 하나는 입력 시퀀스를 원래 시퀀스에서의 순서로 처리하며, # 다른 하나는 입력 시퀀스를 역순으로 처리합니다. 단계마다 각 네트워크의 # 출력을 합산합니다. 양방향 GRU를 사용하면 과거와 미래의 문맥을 함께 # 인코딩할 수 있다는 장점이 있습니다. # # 양방향 RNN: # # .. figure:: /_static/img/chatbot/RNN-bidirectional.png # :width: 70% # :align: center # :alt: rnn_bidir # # 그림 출처: https://colah.github.io/posts/2015-09-NN-Types-FP/ # # ``embedding`` 레이어가 단어 인덱스를 임의 크기의 피처 공간으로 # 인코딩하는 데 사용되었음에 유의하기 바랍니다. 우리의 모델에서는 이 # 레이어가 각 단어를 크기가 *hidden_size* 인 피처 공간으로 매핑할 # 것입니다. 학습을 거치면 서로 뜻이 유사한 단어는 의미적으로 유사하게 # 인코딩될 것입니다. # # 마지막으로, RNN 모듈에 패딩된 배치를 보내려면 RNN과 연결된 부분에서 # 패킹 및 언패킹하는 작업을 수행해야 합니다. 각각은 # ``nn.utils.rnn.pack_padded_sequence`` 와 # ``nn.utils.rnn.pad_packed_sequence`` 를 통해 수행할 수 있습니다. # # **계산 그래프:** # # 1) 단어 인덱스를 임베딩으로 변환합니다. # 2) RNN 모듈을 위한 패딩된 배치 시퀀스를 패킹합니다. # 3) GRU로 포워드 패스를 수행합니다. # 4) 패딩을 언패킹합니다. # 5) 양방향 GRU의 출력을 합산합니다. # 6) 출력과 마지막 은닉 상태를 반환합니다. # # **입력:** # # - ``input_seq``: 입력 시퀀스 배치. shape=\ *(max_length, # batch_size)* # - ``input_lengths``: 배치에 포함된 각 문장의 길이로 구성된 리스트. # shape=\ *(batch_size)* # - ``hidden``: 은닉 상태. shape=\ *(n_layers x num_directions, # batch_size, hidden_size)* # # **출력:** # # - ``outputs``: GRU의 마지막 은닉 레이어에 대한 출력 피처 값(양방향 # (출력을 합산한 것). shape=\ *(max_length, batch_size, hidden_size)* # - ``hidden``: GRU의 최종 은닉 상태. shape=\ *(n_layers x # num_directions, batch_size, hidden_size)* # # # # class EncoderRNN(nn.Module): def __init__(self, hidden_size, embedding, n_layers=1, dropout=0): super(EncoderRNN, self).__init__() self.n_layers = n_layers self.hidden_size = hidden_size self.embedding = embedding # GRU를 초기화합니다. input_size와 hidden_size 매개변수는 둘 다 'hidden_size'로 # 둡니다. 이는 우리 입력의 크기가 hideen_size 만큼의 피처를 갖는 단어 임베딩이기 # 때문입니다. self.gru = nn.GRU(hidden_size, hidden_size, n_layers, dropout=(0 if n_layers == 1 else dropout), bidirectional=True) def forward(self, input_seq, input_lengths, hidden=None): # 단어 인덱스를 임베딩으로 변환합니다 embedded = self.embedding(input_seq) # RNN 모듈을 위한 패딩된 배치 시퀀스를 패킹합니다 packed = nn.utils.rnn.pack_padded_sequence(embedded, input_lengths) # GRU로 포워드 패스를 수행합니다 outputs, hidden = self.gru(packed, hidden) # 패딩을 언패킹합니다 outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs) # 양방향 GRU의 출력을 합산합니다 outputs = outputs[:, :, :self.hidden_size] + outputs[:, : ,self.hidden_size:] # 출력과 마지막 은닉 상태를 반환합니다 return outputs, hidden # 디코더 # ~~~~~~ # # 디코더 RNN은 토큰 단위로 응답 문장을 생성하는 역할을 수행합니다. 이때 # 인코더의 문맥 벡터를 사용하며, 내부 은닉 상태에 따라 시퀀스의 다음 # 단어를 생성하게 됩니다. 디코더는 *EOS_token*, 즉 문장의 끝을 나타내는 # 토큰을 출력할 때까지 계속 단어를 생성합니다. 원래의 seq2seq 디코더에는 # 알려진 문제점이 있습니다. 만약 우리가 입력 시퀀스의 의미를 인코딩할 # 때 문맥 벡터에만 전적으로 의존한다면, 그 과정 중에 정보 손실이 일어날 # 가능성이 높다는 것입니다. 이는 특히 입력 시퀀스의 길이가 길 때 그러하며, # 이 때문에 디코더의 기능이 크게 제한될 수 있습니다. # # 이를 해결하기 위한 방편으로, `Bahdanau 등 # <https://arxiv.org/abs/1409.0473>`__ 은 '어텐션 메커니즘'을 # 고안했습니다. 이는 디코더가 매 단계에 대해 고정된 문맥을 계속 사용하는 # 것이 아니라, 입력 시퀀스의 특정 부분에 집중하게 하는 방식입니다. # # 높은 차원에서 이야기 하자면, 어텐션은 디코더의 현재 은닉 상태와 인코더의 # 출력을 바탕으로 계산됩니다. 출력되는 어텐션 가중치는 입력 시퀀스와 # 동일한 모양을 가집니다. 따라서 이를 인코더의 출력과 곱할 수 있고, 그 # 결과로 얻게 되는 가중치 합은 인코더의 출력에서 어느 부분에 집중해야 # 할지를 알려줍니다. `<NAME> <https://github.com/spro>`__ # 의 그림에 이러한 내용이 잘 설명되어 있습니다. # # .. figure:: /_static/img/chatbot/attn2.png # :align: center # :alt: attn2 # # `Luong 등 <https://arxiv.org/abs/1508.04025>`__ 은 Bahdanau의 기초 연구를 # 더욱 발전시킨 '전역(global) 어텐션'을 제안했습니다. '전역 어텐션'의 # 핵심적인 차이점은 인코더의 은닉 상태를 모두 고려한다는 점입니다. 이는 # Bahdanau 등의 '지역(local) 어텐션' 방식이 현재 시점에 대한 인코더의 # 은닉 상태만을 고려한다는 점과 다른 부분입니다. '전역 어텐션'의 또 다른 # 차이점은 어텐션에 대한 가중치, 혹은 에너지를 계산할 때 현재 시점에 대한 # 디코더의 은닉 상태만을 사용한다는 점입니다. Bahdanau 등은 어텐션을 # 계산할 때 디코더의 이전 단계 상태에 대한 정보를 활용합니다. 또한 Luong 등의 # 방법에서는 인코더의 출력과 디코더의 출력에 대한 어텐션 에너지를 계산하는 # 방법을 제공하며, 이를 '점수 함수(score function)'라 부릅니다. # # .. figure:: /_static/img/chatbot/scores.png # :width: 60% # :align: center # :alt: scores # # 이때 $h_t$ 는 목표 디코더의 현재 상태를, $\bar{h}_s$ 는 인코더의 # 모든 상태를 뜻합니다. # # 종합해 보면, 전역 어텐션 메커니즘을 다음 그림과 같이 요약할 수 있을 # 것입니다. 우리가 '어텐션 레이어'를 ``Attn`` 라는 독립적인 ``nn.Module`` 로 # 구현할 것임에 유의하기 바랍니다. 이 모듈의 출력은 모양이 *(batch_size, 1, # max_length)* 인 정규화된 softmax 가중치 텐서입니다. # # .. figure:: /_static/img/chatbot/global_attn.png # :align: center # :width: 60% # :alt: global_attn # # # # Luong 어텐션 레이어 class Attn(nn.Module): def __init__(self, method, hidden_size): super(Attn, self).__init__() self.method = method if self.method not in ['dot', 'general', 'concat']: raise ValueError(self.method, "is not an appropriate attention method.") self.hidden_size = hidden_size if self.method == 'general': self.attn = nn.Linear(self.hidden_size, hidden_size) elif self.method == 'concat': self.attn = nn.Linear(self.hidden_size * 2, hidden_size) self.v = nn.Parameter(torch.FloatTensor(hidden_size)) def dot_score(self, hidden, encoder_output): return torch.sum(hidden * encoder_output, dim=2) def general_score(self, hidden, encoder_output): energy = self.attn(encoder_output) return torch.sum(hidden * energy, dim=2) def concat_score(self, hidden, encoder_output): energy = self.attn(torch.cat((hidden.expand(encoder_output.size(0), -1, -1), encoder_output), 2)).tanh() return torch.sum(self.v * energy, dim=2) def forward(self, hidden, encoder_outputs): # Attention 가중치(에너지)를 제안된 방법에 따라 계산합니다 if self.method == 'general': attn_energies = self.general_score(hidden, encoder_outputs) elif self.method == 'concat': attn_energies = self.concat_score(hidden, encoder_outputs) elif self.method == 'dot': attn_energies = self.dot_score(hidden, encoder_outputs) # max_length와 batch_size의 차원을 뒤집습니다 attn_energies = attn_energies.t() # 정규화된 softmax 확률 점수를 반환합니다 (차원을 늘려서) return F.softmax(attn_energies, dim=1).unsqueeze(1) # 이처럼 어텐션 서브모듈을 정의하고 나면 실제 디코더 모델을 구현할 수 # 있게 됩니다. 디코더에 대해서는 매 시간마다 배치를 하나씩 수동으로 # 제공하려 합니다. 이는 임베딩된 단어 텐서와 GRU 출력의 모양이 둘 다 # *(1, batch_size, hidden_size)* 라는 의미입니다. # # **계산 그래프:** # # 1) 현재의 입력 단어에 대한 임베딩을 구합니다. # 2) 무방향 GRU로 포워드 패스를 수행합니다. # 3) (2)에서 구한 현재의 GRU 출력을 바탕으로 어텐션 가중치를 계산합니다. # 4) 인코더 출력에 어텐션을 곱하여 새로운 "가중치 합" 문맥 벡터를 구합니다. # 5) Luong의 논문에 나온 식 5를 이용하여 가중치 문맥 벡터와 GRU 출력을 결합합니다. # 6) Luong의 논문에 나온 식 6을 이용하여(softmax 없이) 다음 단어를 예측합니다. # 7) 출력과 마지막 은닉 상태를 반환합니다. # # **입력:** # # - ``input_step``: 입력 시퀀스 배치에 대한 한 단위 시간(한 단어). # shape=\ *(1, batch_size)* # - ``last_hidden``: GRU의 마지막 은닉 레이어. shape=\ *(n_layers x # num_directions, batch_size, hidden_size)* # - ``encoder_outputs``: 인코더 모델의 출력. shape=\ *(max_length, # batch_size, hidden_size)* # # **출력:** # # - ``output``: 각 단어가 디코딩된 시퀀스에서 다음 단어로 사용되었을 # 때 적합할 확률을 나타내는 정규화된 softmax 텐서. # shape=\ *(batch_size, voc.num_words)* # - ``hidden``: GRU의 마지막 은닉 상태. shape=\ *(n_layers x # num_directions, batch_size, hidden_size)* # # # class LuongAttnDecoderRNN(nn.Module): def __init__(self, attn_model, embedding, hidden_size, output_size, n_layers=1, dropout=0.1): super(LuongAttnDecoderRNN, self).__init__() # 참조를 보존해 둡니다 self.attn_model = attn_model self.hidden_size = hidden_size self.output_size = output_size self.n_layers = n_layers self.dropout = dropout # 레이어를 정의합니다 self.embedding = embedding self.embedding_dropout = nn.Dropout(dropout) self.gru = nn.GRU(hidden_size, hidden_size, n_layers, dropout=(0 if n_layers == 1 else dropout)) self.concat = nn.Linear(hidden_size * 2, hidden_size) self.out = nn.Linear(hidden_size, output_size) self.attn = Attn(attn_model, hidden_size) def forward(self, input_step, last_hidden, encoder_outputs): # 주의: 한 단위 시간에 대해 한 단계(단어)만을 수행합니다 # 현재의 입력 단어에 대한 임베딩을 구합니다 embedded = self.embedding(input_step) embedded = self.embedding_dropout(embedded) # 무방향 GRU로 포워드 패스를 수행합니다 rnn_output, hidden = self.gru(embedded, last_hidden) # 현재의 GRU 출력을 바탕으로 어텐션 가중치를 계산합니다 attn_weights = self.attn(rnn_output, encoder_outputs) # 인코더 출력에 어텐션을 곱하여 새로운 "가중치 합" 문맥 벡터를 구합니다 context = attn_weights.bmm(encoder_outputs.transpose(0, 1)) # Luong의 논문에 나온 식 5를 이용하여 가중치 문맥 벡터와 GRU 출력을 결합합니다 rnn_output = rnn_output.squeeze(0) context = context.squeeze(1) concat_input = torch.cat((rnn_output, context), 1) concat_output = torch.tanh(self.concat(concat_input)) # Luong의 논문에 나온 식 6을 이용하여 다음 단어를 예측합니다 output = self.out(concat_output) output = F.softmax(output, dim=1) # 출력과 마지막 은닉 상태를 반환합니다 return output, hidden # 학습 프로시저 정의하기 # ---------------------- # # Masked loss # ~~~~~~~~~~~ # # 우리는 패딩된 시퀀스 배치를 다루기 때문에 손실을 계산할 때 단순히 텐서의 # 모든 원소를 고려할 수는 없습니다. 우리는 ``maskNLLLoss`` 를 정의하여 # 디코더의 출력 텐서, 목표 텐서, 이진 마스크 텐서를 바탕으로 손실을 계산하려 # 합니다. 이 손실 함수에서는 마스크 텐서의 *1* 에 대응하는 원소에 대한 음의 # 로그 우도 값의 평균을 계산합니다. # # # def maskNLLLoss(inp, target, mask): nTotal = mask.sum() crossEntropy = -torch.log(torch.gather(inp, 1, target.view(-1, 1)).squeeze(1)) loss = crossEntropy.masked_select(mask).mean() loss = loss.to(device) return loss, nTotal.item() # 한 번의 학습 단계 # ~~~~~~~~~~~~~~~~~ # # ``train`` 함수에 학습을 한 단계(입력 배치 한 개에 대한) 진행하는 알고리즘이 # 나와 있습니다. # # 우리는 수렴이 잘 되도록 몇 가지 영리한 전략을 사용해보려 합니다. # # - 첫 번째 전략은 **teacher forcing** 을 사용하는 것입니다. 이는 # ``teacher_forcing_ratio`` 로 정의된 확률에 따라, 디코더의 이번 단계 # 예측값 대신에 현재의 목표 단어를 디코더의 다음 입력 값으로 활용하는 # 것입니다. 이 기법은 디코더의 보조 바퀴처럼 작용하여 효율적으로 학습될 수 # 있게 도와 줍니다. 하지만 teacher forcing 기법은 추론 과정에서 모델이 # 불안정 해지도록 할 수도 있는데, 이는 디코더가 학습 과정에서 자신의 출력 # 시퀀스를 직접 만들어 볼 기회를 충분히 제공받지 못할 수 있기 때문입니다. # 따라서 우리는 ``teacher_forcing_ratio`` 를 어떻게 설정해 두었는지에 # 주의를 기울여야 하며, 수렴이 빨리 되었다고 속아 넘어가서는 안 됩니다. # # - 우리가 구현한 두 번째 전략은 **gradient clipping** 입니다. 이는 소위 # '그라디언트 폭발' 문제를 해결하기 위해 널리 사용되는 기법입니다. 핵심은 # 그라디언트를 클리핑 하거나 임계값을 둠으로써, 그라디언트가 지수 # 함수적으로 증가하거나 오버플로를 일으키는(NaN) 경우를 막고, 비용 함수의 # 급격한 경사를 피하겠다는 것입니다. # # .. figure:: /_static/img/chatbot/grad_clip.png # :align: center # :width: 60% # :alt: grad_clip # # 그림 출처: Goodfellow 등 저. *Deep Learning*. 2016. https://www.deeplearningbook.org/ # # **작업 절차:** # # 1) 전체 입력 배치에 대하여 인코더로 포워드 패스를 수행합니다. # 2) 디코더의 입력을 SOS_token로, 은닉 상태를 인코더의 마지막 은닉 상태로 초기화합니다. # 3) 입력 배치 시퀀스를 한 번에 하나씩 디코더로 포워드 패스합니다. # 4) Teacher forcing을 사용하는 경우, 디코더의 다음 입력을 현재의 목표로 둡니다. 그렇지 않으면 디코더의 다음 입력을 현재 디코더의 출력으로 둡니다. # 5) 손실을 계산하고 누적합니다. # 6) 역전파를 수행합니다. # 7) 그라디언트를 클리핑 합니다. # 8) 인코더 및 디코더 모델의 매개변수를 갱신합니다. # # # <div class="alert alert-danger"><h4>Warning</h4><p>PyTorch의 RNN 모듈(``RNN``, ``LSTM``, ``GRU``)은 전체 입력 시퀀스(또는 # 시퀀스의 배치)를 단순히 넣어주기만 하면 다른 비순환 레이어처럼 사용할 수 # 있습니다. 우리는 ``encoder`` 에서 ``GRU`` 레이어를 이런 식으로 사용합니다. # 그 안이 실제로 어떻게 되어 있는지를 살펴보면, 매 시간 단계마다 은닉 상태를 # 계산하는 반복 프로세스가 존재합니다. 또 다른 방법은, 이 모듈을 매번 한 단위 # 시간만큼 수행할 수도 있습니다. 그 경우에는 우리가 ``decoder`` 모델을 다룰 # 때처럼, 학습 과정에서 수동으로 시퀀스에 대해 반복 작업을 수행해 주어야 # 합니다. 이 모듈에 대해 모델의 개념을 확실히 갖고만 있다면, 순차 모델을 # 구현하는 것도 매우 단순할 것입니다.</p></div> # # # # def train(input_variable, lengths, target_variable, mask, max_target_len, encoder, decoder, embedding, encoder_optimizer, decoder_optimizer, batch_size, clip, max_length=MAX_LENGTH): # 제로 그라디언트 encoder_optimizer.zero_grad() decoder_optimizer.zero_grad() # device 옵션을 설정합니다 input_variable = input_variable.to(device) target_variable = target_variable.to(device) mask = mask.to(device) # Lengths for rnn packing should always be on the cpu lengths = lengths.to("cpu") # 변수를 초기화합니다 loss = 0 print_losses = [] n_totals = 0 # 인코더로 포워드 패스를 수행합니다 encoder_outputs, encoder_hidden = encoder(input_variable, lengths) # 초기 디코더 입력을 생성합니다(각 문장을 SOS 토큰으로 시작합니다) decoder_input = torch.LongTensor([[SOS_token for _ in range(batch_size)]]) decoder_input = decoder_input.to(device) # 디코더의 초기 은닉 상태를 인코더의 마지막 은닉 상태로 둡니다 decoder_hidden = encoder_hidden[:decoder.n_layers] # 이번 반복에서 teacher forcing을 사용할지를 결정합니다 use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False # 배치 시퀀스를 한 번에 하나씩 디코더로 포워드 패스합니다 if use_teacher_forcing: for t in range(max_target_len): decoder_output, decoder_hidden = decoder( decoder_input, decoder_hidden, encoder_outputs ) # Teacher forcing 사용: 다음 입력을 현재의 목표로 둡니다 decoder_input = target_variable[t].view(1, -1) # 손실을 계산하고 누적합니다 mask_loss, nTotal = maskNLLLoss(decoder_output, target_variable[t], mask[t]) loss += mask_loss print_losses.append(mask_loss.item() * nTotal) n_totals += nTotal else: for t in range(max_target_len): decoder_output, decoder_hidden = decoder( decoder_input, decoder_hidden, encoder_outputs ) # Teacher forcing 미사용: 다음 입력을 디코더의 출력으로 둡니다 _, topi = decoder_output.topk(1) decoder_input = torch.LongTensor([[topi[i][0] for i in range(batch_size)]]) decoder_input = decoder_input.to(device) # 손실을 계산하고 누적합니다 mask_loss, nTotal = maskNLLLoss(decoder_output, target_variable[t], mask[t]) loss += mask_loss print_losses.append(mask_loss.item() * nTotal) n_totals += nTotal # 역전파를 수행합니다 loss.backward() # 그라디언트 클리핑: 그라디언트를 제자리에서 수정합니다 _ = nn.utils.clip_grad_norm_(encoder.parameters(), clip) _ = nn.utils.clip_grad_norm_(decoder.parameters(), clip) # 모델의 가중치를 수정합니다 encoder_optimizer.step() decoder_optimizer.step() return sum(print_losses) / n_totals # 학습 단계 # ~~~~~~~~~ # # 이제 마지막으로 전체 학습 프로시저와 데이터를 하나로 엮을 때가 # 되었습니다. ``trainIters`` 함수는 주어진 모델, optimizer, 데이터 등을 # 토대로 학습을 ``n_iterations`` 번의 단계만큼 진행하는 역할을 담당합니다. # 이 함수는 자기 자신을 살 설명하고 있는 편인데, 무거운 작업을 ``train`` # 함수에 옮겨 놓았기 때문입니다. # # 한 가지 주의할 점은 우리가 모델을 저장하려 할 때, 인코더와 디코더의 # state_dicts (매개변수), optimizer의 state_dicts, 손실, 진행 단계 수 # 등을 tarball로 만들어 저장한다는 점입니다. 모델을 이러한 방식으로 # 저장하면 checkpoint에 대해 아주 높은 수준의 유연성을 확보할 수 있게 # 됩니다. Checkpoint를 불러오고 나면, 우리는 모델 매개변수를 이용하여 # 예측을 진행할 수도 있고, 이전에 멈췄던 부분부터 학습을 계속 진행할 # 수도 있게 됩니다. # # # def trainIters(model_name, voc, pairs, encoder, decoder, encoder_optimizer, decoder_optimizer, embedding, encoder_n_layers, decoder_n_layers, save_dir, n_iteration, batch_size, print_every, save_every, clip, corpus_name, loadFilename): # 각 단계에 대한 배치를 읽어옵니다 training_batches = [batch2TrainData(voc, [random.choice(pairs) for _ in range(batch_size)]) for _ in range(n_iteration)] # 초기화 print('Initializing ...') start_iteration = 1 print_loss = 0 if loadFilename: start_iteration = checkpoint['iteration'] + 1 # 학습 루프 print("Training...") for iteration in range(start_iteration, n_iteration + 1): training_batch = training_batches[iteration - 1] # 배치에서 각 필드를 읽어옵니다 input_variable, lengths, target_variable, mask, max_target_len = training_batch # 배치에 대해 학습을 한 단계 진행합니다 loss = train(input_variable, lengths, target_variable, mask, max_target_len, encoder, decoder, embedding, encoder_optimizer, decoder_optimizer, batch_size, clip) print_loss += loss # 경과를 출력합니다 if iteration % print_every == 0: print_loss_avg = print_loss / print_every print("Iteration: {}; Percent complete: {:.1f}%; Average loss: {:.4f}".format(iteration, iteration / n_iteration * 100, print_loss_avg)) print_loss = 0 # Checkpoint를 저장합니다 if (iteration % save_every == 0): directory = os.path.join(save_dir, model_name, corpus_name, '{}-{}_{}'.format(encoder_n_layers, decoder_n_layers, hidden_size)) if not os.path.exists(directory): os.makedirs(directory) torch.save({ 'iteration': iteration, 'en': encoder.state_dict(), 'de': decoder.state_dict(), 'en_opt': encoder_optimizer.state_dict(), 'de_opt': decoder_optimizer.state_dict(), 'loss': loss, 'voc_dict': voc.__dict__, 'embedding': embedding.state_dict() }, os.path.join(directory, '{}_{}.tar'.format(iteration, 'checkpoint'))) # 평가 정의하기 # ------------- # # 모델을 학습시키고 나면 직접 봇과 대화를 나눠보고 싶어질 것입니다. 그러려면 # 먼저 모델이 인코딩된 입력을 어떻게 디코딩할지를 정의해줘야 합니다. # # 탐욕적 디코딩 # ~~~~~~~~~~~~~ # # 탐욕적 디코딩(Greedy decoding)은 우리가 학습 단계에서 teacher forcing을 # 적용하지 않았을 때 사용한 디코딩 방법입니다. 달리 말하면, 각 단계에 대해 # 단순히 ``decoder_output`` 에서 가장 높은 softmax값을 갖는 단어를 선택하는 # 방식입니다. 이 디코딩 방법은 한 번의 단계에 대해서는 최적입니다. # # 우리는 탐욕적 디코딩 연산을 수행할 수 있도록 ``GreedySearchDecoder`` # 클래스를 만들었습니다. 수행 과정에서 이 클래스의 인스턴스는 모양이 # *(input_seq length, 1)* 인 입력 시퀀스(``input_seq``), 조종할 입력 # 길이(``input_length``) 텐서, 그리고 응답 문장 길이의 제한을 나타내는 # ``max_length`` 를 입력으로 받습니다. 입력 시퀀서는 다음과 같은 계산 그래프에 # 의해 평가됩니다. # # **계산 그래프:** # # 1) 인코더 모델로 입력을 포워드 패스합니다. # 2) 인코더의 마지막 은닉 레이어가 디코더의 첫 번째 은닉 레이어의 입력이 되도록 준비합니다. # 3) 디코더의 첫 번째 입력을 SOS_token으로 초기화합니다. # 4) 디코더가 단어를 덧붙여 나갈 텐서를 초기화합니다. # 5) 반복적으로 각 단계마다 하나의 단어 토큰을 디코딩합니다. # a) 디코더로의 포워드 패스를 수행합니다. # b) 가장 가능성 높은 단어 토큰과 그 softmax 점수를 구합니다. # c) 토큰과 점수를 기록합니다. # d) 현재의 토큰을 디코더의 다음 입력으로 준비시킵니다. # 6) 단어 토큰과 점수를 모아서 반환합니다. # # # class GreedySearchDecoder(nn.Module): def __init__(self, encoder, decoder): super(GreedySearchDecoder, self).__init__() self.encoder = encoder self.decoder = decoder def forward(self, input_seq, input_length, max_length): # 인코더 모델로 입력을 포워드 패스합니다 encoder_outputs, encoder_hidden = self.encoder(input_seq, input_length) # 인코더의 마지막 은닉 레이어가 디코더의 첫 번째 은닉 레이어의 입력이 되도록 준비합니다 decoder_hidden = encoder_hidden[:decoder.n_layers] # 디코더의 첫 번째 입력을 SOS_token으로 초기화합니다 decoder_input = torch.ones(1, 1, device=device, dtype=torch.long) * SOS_token # 디코더가 단어를 덧붙여 나갈 텐서를 초기화합니다 all_tokens = torch.zeros([0], device=device, dtype=torch.long) all_scores = torch.zeros([0], device=device) # 반복적으로 각 단계마다 하나의 단어 토큰을 디코딩합니다 for _ in range(max_length): # 디코더로의 포워드 패스를 수행합니다 decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden, encoder_outputs) # 가장 가능성 높은 단어 토큰과 그 softmax 점수를 구합니다 decoder_scores, decoder_input = torch.max(decoder_output, dim=1) # 토큰과 점수를 기록합니다 all_tokens = torch.cat((all_tokens, decoder_input), dim=0) all_scores = torch.cat((all_scores, decoder_scores), dim=0) # 현재의 토큰을 디코더의 다음 입력으로 준비시킵니다(차원을 증가시켜서) decoder_input = torch.unsqueeze(decoder_input, 0) # 단어 토큰과 점수를 모아서 반환합니다 return all_tokens, all_scores # 내 텍스트 평가하기 # ~~~~~~~~~~~~~~~~~~ # # 이제 디코딩 모델을 정의했으니, 문자열로 된 입력 시퀀스를 평가하는 함수를 # 작성해볼 수 있을 것입니다. ``evaluate`` 함수에 입력 시퀀스를 낮은 # 레벨에서 어떻게 처리할지가 나와 있습니다. 우리는 먼저 문장을 # *batch_size==1* 이고 단어 인덱스로 구성된 입력 배치 형태로 만듭니다. # 이를 위해 문장의 각 단어를 그에 대응하는 인덱스로 변환하고, 차원을 # 뒤집어서 모델에 맞는 입력 형태로 변환합니다. 우리는 입력 시퀀스의 길이를 # 저장하고 있는 ``lengths`` 텐서도 만듭니다. 이 경우에는 ``lengths`` 가 # 스칼라 값이 되는데, 우리는 한 번에 한 문장만 평가하기 때문입니다(batch_size==1). # 다음으로는 ``GreedySearchDecoder`` 의 객체(``searcher``)를 이용하여 # 응답 문장 텐서를 디코딩합니다. 마지막으로, 응답 인덱스를 단어로 변환하고 # 디코딩된 단어의 리스트를 반환합니다. # # ``evaluateInput`` 은 우리의 챗봇에 대한 인터페이스 역할을 수행합니다. # 이를 호출하면 입력 텍스트 필드가 생성되는데, 거기에 우리의 질의 문장을 # 입력해볼 수 있습니다. 입력 문장을 타이핑하고 *엔터* 를 누르면, 입력한 # 텍스트가 학습 데이터와 같은 방식으로 정규화되고, 최종적으로는 ``evaluate`` # 함수에 입력으로 제공되어 디코딩된 출력 문장을 구하게 됩니다. 우리는 # 이러한 과정을 계속 반복하며, 이를 통해 'q'나 'quit'를 입력하기 전까지는 # 계속 채팅할 수 있습니다. # # 마지막으로, 만약 어휘집에 포함되어 있지 않은 단어를 포함하고 있는 문장이 # 입력되더라도 이를 예의 바르게 처리합니다. 즉 에러 메시지를 출력하고 # 사용자에게 새로운 문장을 입력해달라고 요청합니다. # # # # + def evaluate(encoder, decoder, searcher, voc, sentence, max_length=MAX_LENGTH): ### 입력 시퀀스를 배치 형태로 만듭니다 # 단어 -> 인덱스 indexes_batch = [indexesFromSentence(voc, sentence)] # lengths 텐서를 만듭니다 lengths = torch.tensor([len(indexes) for indexes in indexes_batch]) # 배치의 차원을 뒤집어서 모델이 사용하는 형태로 만듭니다 input_batch = torch.LongTensor(indexes_batch).transpose(0, 1) # 적절한 디바이스를 사용합니다 input_batch = input_batch.to(device) lengths = lengths.to("cpu") # searcher를 이용하여 문장을 디코딩합니다 tokens, scores = searcher(input_batch, lengths, max_length) # 인덱스 -> 단어 decoded_words = [voc.index2word[token.item()] for token in tokens] return decoded_words def evaluateInput(encoder, decoder, searcher, voc): input_sentence = '' while(1): try: # 입력 문장을 받아옵니다 input_sentence = input('> ') # 종료 조건인지 검사합니다 if input_sentence == 'q' or input_sentence == 'quit': break # 문장을 정규화합니다 input_sentence = normalizeString(input_sentence) # 문장을 평가합니다 output_words = evaluate(encoder, decoder, searcher, voc, input_sentence) # 응답 문장을 형식에 맞춰 출력합니다 output_words[:] = [x for x in output_words if not (x == 'EOS' or x == 'PAD')] print('Bot:', ' '.join(output_words)) except KeyError: print("Error: Encountered unknown word.") # - # 모델 수행하기 # ------------- # # 마지막으로, 우리의 모델을 수행해 볼 시간입니다! # # 우리가 챗봇 모델을 학습할 때든 테스트할 때든, 우리는 각각의 인코더 및 # 디코더 모델을 초기화해줘야 합니다. 다음 블록에서는 우리가 원하는대로 # 설정을 맞추고, 처음부터 시작할지, 아니면 checkpoint를 불러올지 정하고, # 모델을 빌드하고 초기화합니다. 성능을 최적화하기 위해서는 모델 설정을 # 여러가지로 바꿔 보면서 테스트해보기 바랍니다. # # # # + # 모델을 설정합니다 model_name = 'cb_model' attn_model = 'dot' #attn_model = 'general' #attn_model = 'concat' hidden_size = 500 encoder_n_layers = 2 decoder_n_layers = 2 dropout = 0.1 batch_size = 64 # 불러올 checkpoint를 설정합니다. 처음부터 시작할 때는 None으로 둡니다. loadFilename = None checkpoint_iter = 4000 #loadFilename = os.path.join(save_dir, model_name, corpus_name, # '{}-{}_{}'.format(encoder_n_layers, decoder_n_layers, hidden_size), # '{}_checkpoint.tar'.format(checkpoint_iter)) # loadFilename이 제공되는 경우에는 모델을 불러옵니다 if loadFilename: # 모델을 학습할 때와 같은 기기에서 불러오는 경우 checkpoint = torch.load(loadFilename) # GPU에서 학습한 모델을 CPU로 불러오는 경우 #checkpoint = torch.load(loadFilename, map_location=torch.device('cpu')) encoder_sd = checkpoint['en'] decoder_sd = checkpoint['de'] encoder_optimizer_sd = checkpoint['en_opt'] decoder_optimizer_sd = checkpoint['de_opt'] embedding_sd = checkpoint['embedding'] voc.__dict__ = checkpoint['voc_dict'] print('Building encoder and decoder ...') # 단어 임베딩을 초기화합니다 embedding = nn.Embedding(voc.num_words, hidden_size) if loadFilename: embedding.load_state_dict(embedding_sd) # 인코더 및 디코더 모델을 초기화합니다 encoder = EncoderRNN(hidden_size, embedding, encoder_n_layers, dropout) decoder = LuongAttnDecoderRNN(attn_model, embedding, hidden_size, voc.num_words, decoder_n_layers, dropout) if loadFilename: encoder.load_state_dict(encoder_sd) decoder.load_state_dict(decoder_sd) # 적절한 디바이스를 사용합니다 encoder = encoder.to(device) decoder = decoder.to(device) print('Models built and ready to go!') # - # 학습 수행하기 # ~~~~~~~~~~~~~ # # 모델을 학습해보고 싶다면 다음 블록을 수행하면 됩니다. # # 먼저 학습 매개변수를 설정하고, optimizer를 초기화한 뒤, 마지막으로 ``trainIters`` # 함수를 호출하여 학습 단계를 진행합니다. # # # # + # 학습 및 최적화 설정 clip = 50.0 teacher_forcing_ratio = 1.0 learning_rate = 0.0001 decoder_learning_ratio = 5.0 n_iteration = 4000 print_every = 1 save_every = 500 # Dropout 레이어를 학습 모드로 둡니다 encoder.train() decoder.train() # Optimizer를 초기화합니다 print('Building optimizers ...') encoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate) decoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate * decoder_learning_ratio) if loadFilename: encoder_optimizer.load_state_dict(encoder_optimizer_sd) decoder_optimizer.load_state_dict(decoder_optimizer_sd) # cuda가 있다면 cuda를 설정합니다 for state in encoder_optimizer.state.values(): for k, v in state.items(): if isinstance(v, torch.Tensor): state[k] = v.cuda() for state in decoder_optimizer.state.values(): for k, v in state.items(): if isinstance(v, torch.Tensor): state[k] = v.cuda() # 학습 단계를 수행합니다 print("Starting Training!") trainIters(model_name, voc, pairs, encoder, decoder, encoder_optimizer, decoder_optimizer, embedding, encoder_n_layers, decoder_n_layers, save_dir, n_iteration, batch_size, print_every, save_every, clip, corpus_name, loadFilename) # - # 평가 수행하기 # ~~~~~~~~~~~~~ # # 여러분의 모델과 채팅을 해보고 싶다면 다음 블록을 수행하면 됩니다. # # # # + # Dropout 레이어를 평가 모드로 설정합니다 encoder.eval() decoder.eval() # 탐색 모듈을 초기화합니다 searcher = GreedySearchDecoder(encoder, decoder) # 채팅을 시작합니다 (다음 줄의 주석을 제거하면 시작해볼 수 있습니다) # evaluateInput(encoder, decoder, searcher, voc) # - # 맺음말 # ------ # # 이번 튜토리얼을 이것으로 마무리하겠습니다. 축하합니다! 여러분은 이제 생성 # 챗봇 모델을 만들기 위한 기초 지식을 습득했습니다. 만약 좀 더 관심이 있다면 # 모델이나 학습 매개변수를 수정해 보면서, 혹은 모델을 학습할 데이터를 바꿔 # 보면서 챗봇의 행동을 수정해볼 수 있을 것입니다. # # 그 외에도 딥러닝의 멋진 활용 예에 대한 PyTorch 튜토리얼이 있으니 한 번 # 확인해 보기 바랍니다! # # #
docs/_downloads/699173333c50cd5bfdee056e459170e2/chatbot_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="Jb3YM3OyGtXa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 646} outputId="c382adb6-1d86-481b-e63c-700be782de3c" # !pip install --upgrade tables # !pip install eli5 # !pip install xgboost # !pip install hyperopt # + id="xTj3lBCCHBFy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 168} outputId="e1b7a53b-60f0-44f1-c7d1-246a9a5ed906" import pandas as pd import numpy as np # from sklearn.dummy import DummyRegressor # from sklearn.tree import DecisionTreeRegressor # from sklearn.ensemble import RandomForestRegressor import xgboost as xgb from sklearn.metrics import mean_absolute_error as mae from sklearn.model_selection import cross_val_score, KFold from hyperopt import hp, fmin, tpe, STATUS_OK import eli5 from eli5.sklearn import PermutationImportance # + id="oc_lCRVSII9O" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="7d9897f7-8b2e-405d-e7ad-b711dfc920f5" # cd /content/drive/My Drive/Colab Notebooks/Matrix2/data # + id="t6Ziq6xQIQKv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 446} outputId="357e5949-5071-4fb6-e0b9-819902ec5d3d" df = pd.read_hdf('/content/drive/My Drive/Colab Notebooks/Matrix2/data/car.h5') print (df.shape) df.sample(3) # + id="364oeY5AIW3Y" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3257dc88-1f48-4d16-8cc9-300190172860" SUFFIX_CAT = '__cat' for feat in df.columns: if isinstance(df[feat][0], list): continue factorized_values = df[feat].factorize()[0] if SUFFIX_CAT in feat: df[feat] = factorized_values else: df[feat + SUFFIX_CAT] = factorized_values cat_feats = [x for x in df.columns if SUFFIX_CAT in x] cat_feats = [x for x in cat_feats if 'price' not in x] print (len(cat_feats)) # + id="fp2ugPy9IvJY" colab_type="code" colab={} df['param_rok-produkcji'] = df['param_rok-produkcji']. map(lambda x: -1 if str(x) == 'None' else int(x)) df['param_moc'] = df['param_moc']. map(lambda x: -1 if str(x) == 'None' else int(x.split(' ')[0])) # ||-1 if str(x) == 'None' else int(x)) df['param_pojemność-skokowa'] = df['param_pojemność-skokowa'].map(lambda x: -1 if str(x) == 'None' else int(x.split('cm')[0].replace(' ',''))) # + id="3jbSXsG-Juev" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="1990820e-3414-4408-c0d5-1ce49e787892" feats = [ 'feature_kamera-cofania__cat', 'feature_łopatki-zmiany-biegów__cat', 'param_napęd__cat', 'param_skrzynia-biegów__cat', 'param_faktura-vat__cat', 'feature_asystent-pasa-ruchu__cat', 'param_stan__cat', 'feature_światła-led__cat', 'feature_bluetooth__cat', 'feature_regulowane-zawieszenie__cat', 'feature_wspomaganie-kierownicy__cat', 'feature_system-start-stop__cat', 'feature_światła-do-jazdy-dziennej__cat', 'feature_światła-xenonowe__cat', 'feature_czujniki-parkowania-przednie__cat', 'param_moc', 'param_rok-produkcji', 'param_pojemność-skokowa', 'feature_asystent-parkowania__cat', 'seller_name__cat'] xgb_params = { 'max_depth': 5, 'n_estimators':50, 'learning_rate': 0.1, 'seed': 0 } run_model(xgb.XGBRegressor(**xgb_params), feats ) # + [markdown] id="Ovlglb0YLYJC" colab_type="text" # ## HYPEROPT # + id="naDGeB-4VCj3" colab_type="code" colab={} # + id="kWPpmqbaJwFX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="203cefe2-9a1a-461f-f22a-1502b964e171" def obj_func (params): print("trading with params: ") print(params) mean_mae, score_std = run_model(xgb.XGBRegressor(**xgb_params), feats ) return {'loss': np.abs(mean_mae), 'status': STATUS_OK} # space xgb_reg_params = { 'learning_rate': hp.choice('learning_rate', np.arange(0.05, 0.31, 0.05)), 'max_depth': hp.choice('max_depth', np.arange(5, 16, 1, dtype=int)), 'subsample': hp.quniform('subsample', 0.5,1,0.05), 'colsample_bytree': hp.quniform('colsample_bytree', 0.5,1,0.05), 'objective': 'reg:squarederror', 'n_estimators': 100, 'seed': 0, } # xgb.XGBRegressor(learning_rate=, max_depth=, subsample=,colsample_bytree=,objective=, n_estimators=, seed=) # RUN best = fmin(obj_func, xgb_reg_params, algo=tpe.suggest, max_evals=25) best # + id="P5ynEdWQLWr5" colab_type="code" colab={}
matrix2_day5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import numpy as np def unique(list1): x = np.array(list1) print(np.unique(x)) arr = [12, 14, 15, 15, 15, 16, 16, 19, 21, 22, 30] count = 0 n = len(l1) final = np.unique(arr) n = len(final) print("Total number of unique elements are: ", str(n)) # -
LAB-EXPERIMENT-2/9.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Model Predictive Control # # An entire marathon! Trying to make model predictive control (slightly more advanced than LQG) work. Main reference: https://ieeexplore-ieee-org.libproxy.berkeley.edu/stamp/stamp.jsp?tp=&arnumber=8511551 # # Steps: # # 1. Keck mirror finite-element analysis: find its $\omega_D, \omega_T, d$. Will just skip over this. # 2. Convert TF to SS and augment to make $C_i, B_i$. # 3. Augment with turbulence and vibrations to make $A_i$ and $C_0$. # 4. Pick $R_y, R_u, u_{max}$. # 5. Solve the quadratic system! # + import numpy as np from matplotlib import pyplot as plt from scipy import signal, io, linalg from cvxopt import solvers, matrix from aberrations import * from observer import * from controller import * from fractal_deriv import design_filt from utils import * f_sampling = 1000 rms = lambda data: np.sqrt(np.mean(data ** 2)) eps = 1e-8 process_noise = 0.01 measurement_noise = 0.06 N_delay = 2 # + # step 1: Keck mirror FEA results. # These are completely random numbers. w_TT = 100 z_TT = 1 def make_impulse(w, z, T=np.arange(0, 1, 0.001)): num = [w**2] den = [1, 2 * w * z, w**2] tf = signal.TransferFunction(num, den) impulse = signal.impulse(tf, T=T) return impulse[0], impulse[1] / sum(impulse[1]) num, den = signal.cont2discrete(([w_TT ** 2], [1, 2 * w_TT * z_TT, w_TT ** 2]), dt=1/f_sampling)[:2] A_TT, B_TT, C_TT, D_TT = signal.tf2ss(num, den) # - size = 5000 N_vibe = 1 params = make_vibe_params(N=N_vibe) vibe = make_1D_vibe_data(steps=size, N=N_vibe) + np.random.normal(0, process_noise * N_vibe, (size,)) pol = vibe variances = np.array([process_noise] * N_vibe) kfilter_vibe = make_kfilter_vibe(np.vstack((params[1:3])).T, variances) res, phy = kfilter_vibe.run(vibe + np.random.normal(0, measurement_noise, vibe.size), save_physics=True) plt.plot(res) plt.plot(vibe) N = 2 * N_vibe + N_delay + 1 # + A = np.zeros((N, N)) for i in range(2 * N_vibe): for j in range(2 * N_vibe): A[i][j] = kfilter_vibe.A[i][j] for i in range(N_vibe): A[2 * N_vibe][2 * i] = 1 A[2 * N_vibe][2 * i + 1] = -1 A[2 * N_vibe][2 * N_vibe] = 1 for i in range(N_delay): A[2 * N_vibe + i + 1][2 * N_vibe + i] = 1 # - A # + B = np.zeros((N,1)) B[2 * N_vibe] = 1 C = np.zeros((1,N)) C[0][0] = 1 C[0][1] = -1 C[0][3] = 1 Q = np.zeros((N,N)) for i in range(N_vibe): Q[2 * i][2 * i] = variances[i] R = np.array([measurement_noise]) # - kfilter = KFilter(np.zeros(N,), A, B, Q, C, R, has_input=True) N_frames = 5 y, u = 1, 25 R_y = np.identity(N_frames) * y R_u = np.identity(N_frames) * u C_0 = np.vstack(([kfilter.H] + [kfilter.H.dot(kfilter.A ** i) for i in range(1, N_frames)])) D_0 = np.zeros((N_frames,N_frames)) for i in range(N_frames): for j in range(i): if i - j - 1 == 0: D_0[i][j] = kfilter.H.dot(kfilter.B) else: D_0[i][j] = kfilter.H.dot(kfilter.A ** (i - j - 1)).dot(kfilter.B) kfilter.state = np.zeros(kfilter.A.shape[0],) H2 = 2 * kfilter.state.T.dot(C_0.T.dot(R_y).dot(D_0)) H3 = D_0.dot(R_y).dot(D_0.T) + R_u u_max = 5 * float(max(np.diff(pol[:5000]))) G = np.identity(N_frames) G = np.vstack((np.identity(N_frames), -np.identity(N_frames))) h = np.vstack([u_max] * (2 * N_frames)) # + kfilter.state = np.zeros(kfilter.A.shape[0],) delay = 2 shifts = np.diff(pol) res_control = np.zeros(pol.shape) res_control[0] = pol[0] residual = actions = np.zeros(pol.shape) for i in range(size): try: if i > 0: res_control[i] = res_control[i - 1] + shifts[i - 1] + actions[i - 1] measurement = res_control[i - delay] + np.random.normal(0, 0.06) kfilter.update(measurement) if i < pol.size - N_frames - 1 and i % N_frames == 0: H2 = 2 * kfilter.state.T.dot(C_0.T.dot(R_y).dot(D_0)) print(kfilter.state) solvers.options['show_progress'] = False solution = solvers.qp(matrix(H3), matrix(H2), matrix(G), matrix(h, tc='d'), solver='cvxopt')['x'] print(solution) for j in range(i, i + N_frames): actions[j] = solution[j - i] kfilter.predict(actions[i - 1]) except: print(i) raise # - rms(pol[kfilter.iters:]) rms(res_control[kfilter.iters:]) plt.semilogy(*signal.periodogram(pol, fs=f_sampling)) plt.semilogy(*signal.periodogram(res_control, fs=f_sampling)) # plt.semilogy(*signal.periodogram(actions, fs=f_sampling)) plt.ylim(1e-10) plt.figure(figsize=(10,10)) plt.plot(np.arange(kfilter.iters / f_sampling, size / f_sampling, 1 / f_sampling), pol[kfilter.iters:], label='Pseudo open-loops') plt.plot(np.arange(kfilter.iters / f_sampling, size / f_sampling, 1 / f_sampling), res_control[kfilter.iters:] - np.mean(res_control[kfilter.iters:]), label='Control residual') plt.xlabel("Time (ms)") plt.ylabel("Tip position (mas)") plt.legend() plt.show()
dev/mpc_vibe.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Python Cheat Sheet # # Basic cheatsheet for Python mostly based on the book written by <NAME>, [Automate the Boring Stuff with Python](https://automatetheboringstuff.com/) under the [Creative Commons license](https://creativecommons.org/licenses/by-nc-sa/3.0/) and many other sources. # # ## Read It # # - [Website](https://www.pythoncheatsheet.org) # - [Github](https://github.com/wilfredinni/python-cheatsheet) # - [PDF](https://github.com/wilfredinni/Python-cheatsheet/raw/master/python_cheat_sheet.pdf) # - [Jupyter Notebook](https://mybinder.org/v2/gh/wilfredinni/python-cheatsheet/master?filepath=jupyter_notebooks) # # ## itertools Module # # The _itertools_ module is a collection of tools intented to be fast and use memory efficiently when handling iterators (like [lists](#lists) or [dictionaries](#dictionaries-and-structuring-data)). # # From the official [Python 3.x documentation](https://docs.python.org/3/library/itertools.html): # # > The module standardizes a core set of fast, memory efficient tools that are useful by themselves or in combination. Together, they form an “iterator algebra” making it possible to construct specialized tools succinctly and efficiently in pure Python. # # The _itertools_ module comes in the standard library and must be imported. # # The [operator](https://docs.python.org/3/library/operator.html) module will also be used. This module is not necessary when using itertools, but needed for some of the examples below. import itertools import operator # ### accumulate # # Makes an iterator that returns the results of a function. itertools.accumulate(iterable[, func]) # Example: data = [1, 2, 3, 4, 5] result = itertools.accumulate(data, operator.mul) for each in result: print(each) # The operator.mul takes two numbers and multiplies them: operator.mul(1, 2) operator.mul(2, 3) operator.mul(6, 4) operator.mul(24, 5) # Passing a function is optional: # + data = [5, 2, 6, 4, 5, 9, 1] result = itertools.accumulate(data) for each in result: print(each) # - # If no function is designated the items will be summed: 5 5 + 2 = 7 7 + 6 = 13 13 + 4 = 17 17 + 5 = 22 22 + 9 = 31 31 + 1 = 32 # ### combinations # # Takes an iterable and a integer. This will create all the unique combination that have r members. itertools.combinations(iterable, r) # Example: # + shapes = ['circle', 'triangle', 'square',] result = itertools.combinations(shapes, 2) for each in result: print(each) # - # ### combinations_with_replacement # # Just like combinations(), but allows individual elements to be repeated more than once. itertools.combinations_with_replacement(iterable, r) # Example: # + shapes = ['circle', 'triangle', 'square'] result = itertools.combinations_with_replacement(shapes, 2) for each in result: print(each) # - # ### count # # Makes an iterator that returns evenly spaced values starting with number start. itertools.count(start=0, step=1) # Example: for i in itertools.count(10,3): print(i) if i > 20: break # ### cycle # # This function cycles through an iterator endlessly. itertools.cycle(iterable) # Example: colors = ['red', 'orange', 'yellow', 'green', 'blue', 'violet'] for color in itertools.cycle(colors): print(color) # When reached the end of the iterable it start over again from the beginning. # # ### chain # # Take a series of iterables and return them as one long iterable. itertools.chain(*iterables) # Example: # + colors = ['red', 'orange', 'yellow', 'green', 'blue'] shapes = ['circle', 'triangle', 'square', 'pentagon'] result = itertools.chain(colors, shapes) for each in result: print(each) # - # ### compress # # Filters one iterable with another. itertools.compress(data, selectors) # Example: # + shapes = ['circle', 'triangle', 'square', 'pentagon'] selections = [True, False, True, False] result = itertools.compress(shapes, selections) for each in result: print(each) # - # ### dropwhile # # Make an iterator that drops elements from the iterable as long as the predicate is true; afterwards, returns every element. itertools.dropwhile(predicate, iterable) # Example: # + data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1] result = itertools.dropwhile(lambda x: x<5, data) for each in result: print(each) # - # ### filterfalse # # Makes an iterator that filters elements from iterable returning only those for which the predicate is False. itertools.filterfalse(predicate, iterable) # Example: # + data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] result = itertools.filterfalse(lambda x: x<5, data) for each in result: print(each) # - # ### groupby # # Simply put, this function groups things together. itertools.groupby(iterable, key=None) # Example: # + robots = [{ 'name': 'blaster', 'faction': 'autobot' }, { 'name': 'galvatron', 'faction': 'decepticon' }, { 'name': 'jazz', 'faction': 'autobot' }, { 'name': 'metroplex', 'faction': 'autobot' }, { 'name': 'megatron', 'faction': 'decepticon' }, { 'name': 'starcream', 'faction': 'decepticon' }] for key, group in itertools.groupby(robots, key=lambda x: x['faction']): print(key) print(list(group)) # - # ### islice # # This function is very much like slices. This allows you to cut out a piece of an iterable. itertools.islice(iterable, start, stop[, step]) # Example: # + colors = ['red', 'orange', 'yellow', 'green', 'blue',] few_colors = itertools.islice(colors, 2) for each in few_colors: print(each) # - # ### permutations itertools.permutations(iterable, r=None) # Example: # + alpha_data = ['a', 'b', 'c'] result = itertools.permutations(alpha_data) for each in result: print(each) # - # ### product # # Creates the cartesian products from a series of iterables. # + num_data = [1, 2, 3] alpha_data = ['a', 'b', 'c'] result = itertools.product(num_data, alpha_data) for each in result: print(each) # - # ### repeat # # This function will repeat an object over and over again. Unless, there is a times argument. itertools.repeat(object[, times]) # Example: for i in itertools.repeat("spam", 3): print(i) # ### starmap # # Makes an iterator that computes the function using arguments obtained from the iterable. itertools.starmap(function, iterable) # Example: # + data = [(2, 6), (8, 4), (7, 3)] result = itertools.starmap(operator.mul, data) for each in result: print(each) # - # ### takewhile # # The opposite of dropwhile(). Makes an iterator and returns elements from the iterable as long as the predicate is true. itertools.takewhile(predicate, iterable) # Example: # + data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1] result = itertools.takewhile(lambda x: x<5, data) for each in result: print(each) # - # ### tee # # Return n independent iterators from a single iterable. itertools.tee(iterable, n=2) # Example: # + colors = ['red', 'orange', 'yellow', 'green', 'blue'] alpha_colors, beta_colors = itertools.tee(colors) for each in alpha_colors: print(each) # + colors = ['red', 'orange', 'yellow', 'green', 'blue'] alpha_colors, beta_colors = itertools.tee(colors) for each in beta_colors: print(each) # - # ### zip_longest # # Makes an iterator that aggregates elements from each of the iterables. If the iterables are of uneven length, missing values are filled-in with fillvalue. Iteration continues until the longest iterable is exhausted. itertools.zip_longest(*iterables, fillvalue=None) # Example: # + colors = ['red', 'orange', 'yellow', 'green', 'blue',] data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10,] for each in itertools.zip_longest(colors, data, fillvalue=None): print(each)
jupyter_notebooks/08_itertools_Module.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Nature of signals # # In the context of this class, a signal is the data acquired by the measurement system. It contains much information that we need to be able to identify to extract knowledge about the system being tested and how to optimize the measurements. A signal caries also messages and information. We will use the content of this module for the other modules seen in the rest of the class. # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import statsmodels.api as sm from statsmodels.formula.api import ols from astsadata import * fig, axes = plt.subplots(nrows=2, figsize=(7, 5)) #EQ5.plot(ax=axes[0], ylabel="EQ5", title="Earthquake", legend=False) EQ5.plot(ax=axes[0], title="Earthquake", legend=False) #EXP6.plot(ax=axes[1], ylabel="EXP6", title="Explosion", legend=False) EXP6.plot(ax=axes[1], title="Explosion", legend=False) fig.tight_layout() plt.show() # - # ## Signal classification # # A signal can be characterized by its amplitude and frequency. __Amplitude__ is related to the strength of the signal and __frequency__ to the extent or duration of the signal. The time series of a signal is called a __waveform__. Multipe collection of the waveform is called an __ensemble__. # # Signals can be either __deterministic__ or __random__. # # Deterministic signals can be either __static__ (do not change in time) or __dynamic__. Dynamic signals can be decomposed into __periodic__ or __aperiodic__. A periodic signal repeats itself at regular interval. The smallest value over whih it repeats itself is the __fundamental period__, with an associated __fundamental frequency__. A __simple__ periodic signal has one period; it is a sine wave. A __complex__ has multiple periods and can be thought as the sum of several sinusoids (more on this in the next section). Aperiodic signals are typically __transient__ (such as step, ramp, or pulse responses). # # Nondeterministic signals are an important class of signals that are often encountered in nature (think of turbulence, stock market, etc). They must be analyzed with satistical tools. They are classified as __nonstationary__ and __stationary__. This classification enables to select the proper statistical theory to analyze them. The properties of nondeterministic signals are computed with ensemble statistics of instantaneous properties. In particular, one computes the ensemble average, $\mu(t_1)$, and ensemble autocorrelation function (more on the physical meaning of this function later), $R(t_1,t_1+\tau)$. # # \begin{align*} # \mu(t_1) & = \frac{1}{N} \sum_{i=0}^{N-1} x_i(t_1) \\ # R(t_1,t_1+\tau) & = \frac{1}{N} \sum_{i=0}^{N-1} x_i(t_1)x_i(t_1+\tau) # \end{align*} # # The term ensemble means that we take N time series and perform statistics with the ensemble of the values at recorded time $t_1$. # # If $\mu(t_1) = \mu$ and $R(t_1,t_1+\tau) = R(\tau)$, then the signal is considered (weakly) __stationary__ and nonstationary, otherwise. Stationarity introdcues a lot of simplification in the statistical analysis of the data (by using a lot of tools developed for time series analysis) and one should always start by checking for signal stationarity. Stationarity implies that signal ensemble-averaged statistical properties are independent of $t_1$. # # For most stationary signals, the temporal and ensemble statistical properties are identical. The signal is then __ergodic__. Thus, from a _single_ time history of length $T_r$ one can calculate $\mu$ and $R(\tau)$ (which saves time in the acquisition and analysis): # # \begin{align*} # \mu & = \frac{1}{T_r} \int_{0}^{T_r} x(t) dt \\ # R(\tau) & = \frac{1}{T_r} \int_{0}^{T_r} x(t)x(t+\tau) dt # \end{align*} # # Thanks to statistical tools for ergodic processes, from a finite recording length of the signal, one can estimate population mean with confidence level. # ## Signal variables # # Most signals can be decomposed as a sum of sines and cosines (more on this in the next module). Let's start with a simple periodic signal: # # \begin{align*} # y(t) = C \sin (\omega t + \phi) = C \sin (2\pi f t + \phi) # \end{align*} # # When several sine and cosine waves are added, complext waveforms result. For example for second order dynamic system, the system response could take the form: # # \begin{align*} # y(t) = A \cos (\omega t) + B \sin (\omega t) # \end{align*} # # This sum of a cosine and sine of same frequency can be rearranged as: # # \begin{align*} # y(t) = C \cos (\omega t - \phi) = C \cos (\omega t - \phi + \pi/2) = C \sin (\omega t + \phi') # \end{align*} # # with: # \begin{align*} # C & = \sqrt{A^2 + B^2}\\ # \phi & = \tan^{-1} (B/A)\\ # \phi' & = \pi/2 - \phi = \tan^{-1} (A/B) # \end{align*} # # Let's look at some examples of simple and complex periodic signals. # First a simple function: # # \begin{align*} # y (t) = 2 \sin (2\pi t) # \end{align*} # # + #import numpy from matplotlib import pyplot # %matplotlib inline t=np.linspace(0.0,2.0,num=1000) # (s) y = 2 * np.sin(2*np.pi*t) pyplot.plot(t, y, color='b', linestyle='-'); # - # Now a complex function made of two frequencies (harmonics): # # \begin{align*} # y (t) = 2 \sin (2\pi t) + 1.2 \sin (6 \pi t) # \end{align*} # # The signal has two frequencies: 1 and 3 Hz. 1 Hz is the lowest frequency and is the fundamental frequency with period 1 s. So the signal will repeat itself every second. # + y = 2 * np.sin(2*np.pi*t) + 1.2 * np.sin(6*np.pi*t) pyplot.plot(t, y, color='b', linestyle='-'); # - # Let's now look at two sinusoidal with very close frequencies $\Delta f$. # # \begin{align*} # y (t) = 2 \sin (2\pi t) + 1.2 \sin ((2+0.2) \pi t) # \end{align*} # t=np.linspace(0.0,100.0,num=1000) # (s) y = 2 * np.sin(2*np.pi*t) + 1.2 * np.sin((2+0.2)*np.pi*t) pyplot.plot(t, y, color='b', linestyle='-'); # Here the frequency difference is $\Delta f = \frac{f_1-f_2}{f_1} = 0.2/2 = 0.1 Hz$. The resulting signal has a slow beat with __beat__ frequency $\Delta f)$ or beat period $1/\Delta f = 10$ s, i.e. the signal repepats itself every 10 s. Analytically (using trigonometric relations), one can show that the sum of two sine waves with close frequencies results in a signal modulated by $\cos(\Delta f/2)$. # # ## Detection schemes # # The mixing of two signals to produce a signal (wave) with a new frequency is called heterodyning and is commonly used in instrumentation to obtain very accurate measurements. __Heterodyne detection__ shifts the frequency content of a signal into a new range where it is easier to detected; in communucation it is called _frequency conversion_. Heterodyning is used in laser Doppler velocimetry, tuning of musical instruments, radio receivers, etc. # # In contrast, __homodyne detection__ uses a single (homo) frequency and compares the signal with a standard oscillation that would be identical to the signal if it carried null information. and measures the amplitude and phase of a signal to gain information. It enables to extract information encoded as modulation of the phase and/or frequency of the signal. In optics, this results in interferometry. It is also the fundation behind lock-in amplifier to extract information for very weak or noisy signals. # # Finally in __magnitude detection__ one only records the amplitude of signals. This is the most common detection scheme used. # ## Statistical description of signals # #
Lectures/02_NatureSignals.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Here is the master pipeline # # Camera Caliberation # + import numpy as np import cv2 import glob import pickle import matplotlib.pyplot as plt # #%matplotlib notebook # Finding image and object points def undistort(test_img): # prepare object points (our ideal reference), like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) # Stores mtx and dist coefficients in a pickle file to use later nx=9 # Number of inner corners of our chessboard along x axis (or columns) ny=6 # Number of inner corners of our chessboard along y axis (or rows) objp = np.zeros((ny*nx,3), np.float32) #We have 9 corners on X axis and 6 corners on Y axis objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2) # Gives us coorinate points in pairs as a list of 54 items. It's shape will be (54,2) # Arrays to store object points and image points from all the images. objpoints = [] # 3d points in real world space. These are the points for our ideal chessboard which we are using as a reference. imgpoints = [] # 2d points in image plane. We'll extract these from the images given for caliberating the camera # Make a list of calibration images images = glob.glob('camera_cal/calibration*.jpg') # Step through the list and search for chessboard corners for idx, fname in enumerate(images): calib_img = cv2.imread(fname) gray = cv2.cvtColor(calib_img, cv2.COLOR_BGR2GRAY) # Find the chessboard corners # Grayscale conversion ensures an 8bit image as input.The next function needs that kind of input only. Generally color images are 24 bit images. (Refer "Bits in images" in notes) ret, corners = cv2.findChessboardCorners(gray, (nx,ny), None) # If found, add object points, image points if ret == True: objpoints.append(objp) # These will be same for caliberation image. The same points will get appended every time this fires up imgpoints.append(corners) # Corners # Draw and display the identified corners (This step can be completely skipped) cv2.drawChessboardCorners(calib_img, (nx,ny), corners, ret) write_name = 'corners_found'+str(idx)+'.jpg' cv2.imwrite('output_files/corners_found_for_calib/'+write_name, calib_img) cv2.imshow(write_name, calib_img) #We dont want to see the images now so commenting out. TO see output later, un-comment these 3 lines cv2.waitKey(500) #Delete after testing. These will be used to show you images one after the other cv2.destroyAllWindows() #Delete this after testing # Test undistortion on an image test_img_size = (test_img.shape[1], test_img.shape[0]) # (x_axis_max)X(y_axis_max) # Do camera calibration given object points and image points ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, test_img_size,None,None) # Use the above obtained results to undistort undist_img = cv2.undistort(test_img, mtx, dist, None, mtx) cv2.imwrite('output_files/test_undist.jpg',undist_img) # Save the camera calibration result for later use (we won't worry about rvecs / tvecs) dist_pickle = {} dist_pickle["mtx"] = mtx dist_pickle["dist"] = dist pickle.dump( dist_pickle, open( "output_files/calib_pickle_files/dist_pickle.p", "wb" ) ) """Caution: When you use mtx and dist later, ensure that the image used has same dimensions as the images used here for caliberation, other we'll have to make some changes in the code""" #undist_img = cv2.cvtColor(dst, cv2.COLOR_BGR2RGB) return undist_img test_img= cv2.imread('camera_cal/calibration1.jpg') #Note: Your image will be in BGR format output=undistort(test_img) f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10)) #Refer subplots in python libraries ax1.imshow(test_img) ax1.set_title('Original Image', fontsize=30) ax2.imshow(output) ax2.set_title('Undistorted Image', fontsize=30) cv2.waitKey(500) cv2.destroyAllWindows() # - # # Main Pipeline: # + import cv2 import numpy as np import matplotlib.pyplot as plt import matplotlib.image as mpimg import math # Import everything needed to edit/save/watch video clips from moviepy.editor import VideoFileClip from IPython.display import HTML def process_image(frame): def cal_undistort(img): # Reads mtx and dist matrices, peforms image distortion correction and returns the undistorted image import pickle # Read in the saved matrices my_dist_pickle = pickle.load( open( "output_files/calib_pickle_files/dist_pickle.p", "rb" ) ) mtx = my_dist_pickle["mtx"] dist = my_dist_pickle["dist"] undistorted_img = cv2.undistort(img, mtx, dist, None, mtx) #undistorted_img = cv2.cvtColor(undistorted_img, cv2.COLOR_BGR2RGB) #Use if you use cv2 to import image. ax.imshow() needs RGB image return undistorted_img def yellow_threshold(img, sxbinary): # Convert to HLS color space and separate the S channel & H channel # Note: img is the undistorted image hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS) s_channel = hls[:,:,2] h_channel = hls[:,:,0] # Threshold color channel s_thresh_min = 100 s_thresh_max = 255 #for 360 degree, my desired values for yellow ranged between 35 and 50. Diving this range by 2: h_thresh_min = 10 # Taking a bit lower than required to esnure that yellow is captured h_thresh_max = 25 s_binary = np.zeros_like(s_channel) s_binary[(s_channel >= s_thresh_min) & (s_channel <= s_thresh_max)] = 1 h_binary = np.zeros_like(h_channel) h_binary[(h_channel >= h_thresh_min) & (h_channel <= h_thresh_max)] = 1 # Combine the two binary thresholds yellow_binary = np.zeros_like(s_binary) yellow_binary[(((s_binary == 1) | (sxbinary == 1) ) & (h_binary ==1))] = 1 return yellow_binary def xgrad_binary(img, thresh_min=30, thresh_max=100): # Grayscale image gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # Sobel x sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0) # Take the derivative in x abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx)) # Threshold x gradient #thresh_min = 30 # Given as default values to the parameters. These are good starting points #thresh_max = 100 # The tweaked values are given as arguments to the function while calling it sxbinary = np.zeros_like(scaled_sobel) sxbinary[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1 return sxbinary def white_threshold(img, sxbinary, lower_white_thresh = 170): # Isolating RGB channel (as we've used matplotlib to read the image) # The order would have been BGR if we had used cv2 to read the image r_channel = img[:,:,0] g_channel = img[:,:,1] b_channel = img[:,:,2] # Threshold color channel r_thresh_min = lower_white_thresh r_thresh_max = 255 r_binary = np.zeros_like(r_channel) r_binary[(r_channel >= r_thresh_min) & (r_channel <= r_thresh_max)] = 1 g_thresh_min = lower_white_thresh g_thresh_max = 255 g_binary = np.zeros_like(g_channel) g_binary[(g_channel >= g_thresh_min) & (g_channel <= g_thresh_max)] = 1 b_thresh_min = lower_white_thresh b_thresh_max = 255 b_binary = np.zeros_like(b_channel) b_binary[(b_channel >= b_thresh_min) & (b_channel <= b_thresh_max)] = 1 white_binary = np.zeros_like(r_channel) white_binary[((r_binary ==1) & (g_binary ==1) & (b_binary ==1) & (sxbinary==1))] = 1 return white_binary def thresh_img(img): sxbinary = xgrad_binary(img, thresh_min=25, thresh_max=130) yellow_binary = yellow_threshold(img, sxbinary) #(((s) | (sx)) & (h)) white_binary = white_threshold(img, sxbinary, lower_white_thresh = 150) # Combine the two binary thresholds combined_binary = np.zeros_like(sxbinary) combined_binary[((yellow_binary == 1) | (white_binary == 1))] = 1 # We close by sending out a 3D image just as we took as input # Because, to process the image, we were using binary images out_img = np.dstack((combined_binary, combined_binary, combined_binary))*255 return out_img def perspective_transform(img): # Define calibration box in source (original) and destination (desired or warped) coordinates img_size = (img.shape[1], img.shape[0]) """Notice the format used for img_size. Yaha bhi ulta hai. x axis aur fir y axis chahiye. Apne format mein rows(y axis) and columns (x axis) hain""" # Four source coordinates # Order of points: top left, top right, bottom right, bottom left src = np.array( [[435*img.shape[1]/960, 350*img.shape[0]/540], [530*img.shape[1]/960, 350*img.shape[0]/540], [885*img.shape[1]/960, img.shape[0]], [220*img.shape[1]/960, img.shape[0]]], dtype='f') # Next, we'll define a desired rectangle plane for the warped image. # We'll choose 4 points where we want source points to end up # This time we'll choose our points by eyeballing a rectangle dst = np.array( [[290*img.shape[1]/960, 0], [740*img.shape[1]/960, 0], [740*img.shape[1]/960, img.shape[0]], [290*img.shape[1]/960, img.shape[0]]], dtype='f') #Compute the perspective transform, M, given source and destination points: M = cv2.getPerspectiveTransform(src, dst) #Warp an image using the perspective transform, M; using linear interpolation #Interpolating points is just filling in missing points as it warps an image # The input image for this function can be a colored image too warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR) return warped, src, dst def rev_perspective_transform(img, src, dst): img_size = (img.shape[1], img.shape[0]) #Compute the perspective transform, M, given source and destination points: Minv = cv2.getPerspectiveTransform(dst, src) #Warp an image using the perspective transform, M; using linear interpolation #Interpolating points is just filling in missing points as it warps an image # The input image for this function can be a colored image too un_warped = cv2.warpPerspective(img, Minv, img_size, flags=cv2.INTER_LINEAR) return un_warped, Minv def draw_polygon(img1, img2, src, dst): src = src.astype(int) #Very important step (Pixels cannot be in decimals) dst = dst.astype(int) cv2.polylines(img1, [src], True, (255,0,0), 3) cv2.polylines(img2, [dst], True, (255,0,0), 3) def histogram_bottom_peaks (warped_img): # This will detect the bottom point of our lane lines # Take a histogram of the bottom half of the image bottom_half = warped_img[((2*warped_img.shape[0])//5):,:,0] # Collecting all pixels in the bottom half histogram = np.sum(bottom_half, axis=0) # Summing them along y axis (or along columns) # Find the peak of the left and right halves of the histogram # These will be the starting point for the left and right lines midpoint = np.int(histogram.shape[0]//2) # 1D array hai histogram toh uska bas 0th index filled hoga #print(np.shape(histogram)) #OUTPUT:(1280,) leftx_base = np.argmax(histogram[:midpoint]) rightx_base = np.argmax(histogram[midpoint:]) + midpoint return leftx_base, rightx_base def find_lane_pixels(warped_img): leftx_base, rightx_base = histogram_bottom_peaks(warped_img) # HYPERPARAMETERS # Choose the number of sliding windows nwindows = 9 # Set the width of the windows +/- margin. So width = 2*margin margin = 90 # Set minimum number of pixels found to recenter window minpix = 1000 #I've changed this from 50 as given in lectures # Set height of windows - based on nwindows above and image shape window_height = np.int(warped_img.shape[0]//nwindows) # Identify the x and y positions of all nonzero pixels in the image nonzero = warped_img.nonzero() #pixel ke coordinates dega 2 seperate arrays mein nonzeroy = np.array(nonzero[0]) # Y coordinates milenge 1D array mein. They will we arranged in the order of pixels nonzerox = np.array(nonzero[1]) # Current positions to be updated later for each window in nwindows leftx_current = leftx_base #initially set kar diya hai. For loop ke end mein change karenge rightx_current = rightx_base # Create empty lists to receive left and right lane pixel indices left_lane_inds = [] # Ismein lane-pixels ke indices collect karenge. # 'nonzerox' array mein index daalke coordinate mil jaayega right_lane_inds = [] # Step through the windows one by one for window in range(nwindows): # Identify window boundaries in x and y (and right and left) win_y_low = warped_img.shape[0] - (window+1)*window_height win_y_high = warped_img.shape[0] - window*window_height """### TO-DO: Find the four below boundaries of the window ###""" win_xleft_low = leftx_current - margin win_xleft_high = leftx_current + margin win_xright_low = rightx_current - margin win_xright_high = rightx_current + margin """ # Create an output image to draw on and visualize the result out_img = np.copy(warped_img) # Draw the windows on the visualization image cv2.rectangle(out_img,(win_xleft_low,win_y_low), (win_xleft_high,win_y_high),(0,255,0), 2) cv2.rectangle(out_img,(win_xright_low,win_y_low), (win_xright_high,win_y_high),(0,255,0), 2) """ ### TO-DO: Identify the nonzero pixels in x and y within the window ### #Iska poora explanation seperate page mein likha hai good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0] good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0] # Append these indices to the lists left_lane_inds.append(good_left_inds) right_lane_inds.append(good_right_inds) # If you found > minpix pixels, recenter next window on the mean position of the pixels in your current window (re-centre) if len(good_left_inds) > minpix: leftx_current = np.int(np.mean(nonzerox[good_left_inds])) if len(good_right_inds) > minpix: rightx_current = np.int(np.mean(nonzerox[good_right_inds])) # Concatenate the arrays of indices (previously was a list of lists of pixels) try: left_lane_inds = np.concatenate(left_lane_inds) right_lane_inds = np.concatenate(right_lane_inds) except ValueError: # Avoids an error if the above is not implemented fully pass # Extract left and right line pixel positions leftx = nonzerox[left_lane_inds] lefty = nonzeroy[left_lane_inds] rightx = nonzerox[right_lane_inds] righty = nonzeroy[right_lane_inds] """return leftx, lefty, rightx, righty, out_img""" #agar rectangles bana rahe ho toh out_image rakhna return leftx, lefty, rightx, righty def fit_polynomial(warped_img, leftx, lefty, rightx, righty, fit_history, variance_history, rad_curv_history): """This will fit a parabola on each lane line, give back lane curve coordinates, radius of curvature """ #Fit a second order polynomial to each using `np.polyfit` ### left_fit = np.polyfit(lefty,leftx,2) right_fit = np.polyfit(righty,rightx,2) # We'll plot x as a function of y ploty = np.linspace(0, warped_img.shape[0]-1, warped_img.shape[0]) """Primary coefficient detection: 1st level of curve fitting where frame naturally detects poins and fit's curve""" #Steps: find a,b,c for our parabola: x=a(y^2)+b(y)+c """ 1.a) If lane pixels found, fit a curve and get the coefficients for the left and right lane 1.b) If #pixels insuffient and curve couldn't be fit, use the curve from the previous frame if you have that data (In case of lack of points in 1st frame, fit an arbitrary parabola with all coeff=1: Expected to improve later on) 2) Using coefficient we'll fit a parabola. We'll improve it with following techiniques later on: - Variance of lane pixels from parabola (to account for distance of curve points from the original pixels and attempt to minimise it) - Shape and position of parabolas in the previous frame, - Trends in radius of curvature, - Frame mirroring (fine tuning one lane in the frame wrt to the other) """ try: a1_new= left_fit[0] b1_new= left_fit[1] c1_new= left_fit[2] a2_new= right_fit[0] b2_new= right_fit[1] c2_new= right_fit[2] #Calculate the x-coordinates of the parabola. Here x is the dependendent variable and y is independent left_fitx = a1_new*ploty**2 + b1_new*ploty + c1_new right_fitx = a2_new*ploty**2 + b2_new*ploty + c2_new status = True except TypeError: # Avoids an error if `left` and `right_fit` are still none or incorrect print('The function failed to fit a line!') if(len(lane.curve_fit)!=5): #If you dont have any values in the history left_fitx = 1*ploty**2 + 1*ploty #This is a senseless curve. If it was the 1st frame, we need to do something right_fitx = 1*ploty**2 + 1*ploty else: #replicate lane from previous frame if you have history left_fitx = fit_history[0][4][0]*ploty**2 + fit_history[0][4][1]*ploty + fit_history[0][4][2] right_fitx = fit_history[1][4][0]*ploty**2 + fit_history[1][4][1]*ploty + fit_history[1][4][2] lane.count=-1 #Restart your search in next frame. At the end of this frame, 1 gets added. Hence we'll get net 0. status = False """VARIANCE: Average distance of lane pixels from our curve which we have fit""" # Calculating variance for both lanes in the current frame. # Even if current frame is the 1st frame, we still need the data for the further frames # Hence it is calculated before the immediate next 'if' statement left_sum = 0 for index in range(len(leftx)): left_sum+= abs(leftx[index]-(a1_new*lefty[index]**2 + b1_new*lefty[index] + c1_new)) left_variance_new=left_sum/len(leftx) right_sum=0 for index in range(len(rightx)): right_sum+= abs(rightx[index]-(a2_new*righty[index]**2 + b2_new*righty[index] + c2_new)) right_variance_new=right_sum/len(rightx) #If you have history for variance and curve coefficients if((len(lane.curve_fit)==5)&(len(lane.variance)==5)): left_variance_old = sum([(0.2*((5-index)**3)*element) for index,element in enumerate(variance_history[0])])/sum([0.2*((5-index)**3) for index in range(0,5)]) right_variance_old = sum([(0.2*((5-index)**3)*element) for index,element in enumerate(variance_history[1])])/sum([0.2*((5-index)**3) for index in range(0,5)]) # Finding weighted average for the previous elements data within fit_history a1_old= sum([(0.2*(index+1)*element[0]) for index,element in enumerate(fit_history[0])])/sum([0.2*(index+1) for index in range(0,5)]) b1_old= sum([(0.2*(index+1)*element[1]) for index,element in enumerate(fit_history[0])])/sum([0.2*(index+1) for index in range(0,5)]) c1_old= sum([(0.2*(index+1)*element[2]) for index,element in enumerate(fit_history[0])])/sum([0.2*(index+1) for index in range(0,5)]) a2_old= sum([(0.2*(index+1)*element[0]) for index,element in enumerate(fit_history[1])])/sum([0.2*(index+1) for index in range(0,5)]) b2_old= sum([(0.2*(index+1)*element[1]) for index,element in enumerate(fit_history[1])])/sum([0.2*(index+1) for index in range(0,5)]) c2_old= sum([(0.2*(index+1)*element[2]) for index,element in enumerate(fit_history[1])])/sum([0.2*(index+1) for index in range(0,5)]) a1_new = (a1_new*((left_variance_old)**2) + a1_old*((left_variance_new)**2))/(((left_variance_old)**2) + ((left_variance_new)**2)) b1_new = (b1_new*((left_variance_old)**2) + b1_old*((left_variance_new)**2))/(((left_variance_old)**2) + ((left_variance_new)**2)) c1_new = (c1_new*((left_variance_old)**2) + c1_old*((left_variance_new)**2))/(((left_variance_old)**2) + ((left_variance_new))**2) a2_new = (a2_new*((right_variance_old)**2) + a2_old*((right_variance_new)**2))/(((right_variance_old)**2) + ((right_variance_new))**2) b2_new = (b2_new*((right_variance_old)**2) + b2_old*((right_variance_new)**2))/(((right_variance_old)**2) + ((right_variance_new))**2) c2_new = (c2_new*((right_variance_old)**2) + c2_old*((right_variance_new)**2))/(((right_variance_old)**2) + ((right_variance_new))**2) ### Tracking the difference in curve fit coefficients over the frame # from last to last frame -> last frame del_a1_old = lane.coeff_diff[0][0] del_b1_old = lane.coeff_diff[0][1] del_c1_old = lane.coeff_diff[0][2] del_a2_old = lane.coeff_diff[1][0] del_b2_old = lane.coeff_diff[1][1] del_c2_old = lane.coeff_diff[1][2] # from last frame -> current frame del_a1 = abs(a1_new - fit_history[0][4][0]) del_b1 = abs(b1_new - fit_history[0][4][1]) del_c1 = abs(c1_new - fit_history[0][4][2]) del_a2 = abs(a2_new - fit_history[1][4][0]) del_b2 = abs(b2_new - fit_history[1][4][1]) del_c2 = abs(c2_new - fit_history[1][4][2]) # Storing the new values so that the values can be used in the next frame # As we are overwriting, the old values were called earlier & then the new values were found lane.coeff_diff = [[del_a1, del_b1, del_c1], [del_a2, del_b2, del_c2]] # bas ab delta coefficient for each coefficient nikalna hai aur vo formula likh dena har element ke liye a1_new = (a1_new*(del_a1_old) + a1_old*(del_a1))/((del_a1_old) + (del_a1)) b1_new = (b1_new*(del_b1_old) + b1_old*(del_b1))/((del_b1_old) + (del_b1)) c1_new = (c1_new*(del_c1_old) + c1_old*(del_c1))/((del_c1_old) + (del_c1)) """ #print("a2_new",a2_new) #print("b2_new",b2_new) #print("c2_new",c2_new) """ a2_new = (a2_new*(del_a2_old) + a2_old*(del_a2))/((del_a2_old) + (del_a2)) b2_new = (b2_new*(del_b2_old) + b2_old*(del_b2))/((del_b2_old) + (del_b2)) c2_new = (c2_new*(del_c2_old) + c2_old*(del_c2))/((del_c2_old) + (del_c2)) """ #print("") #print("a2_old",a2_old) #print("b2_old",b2_old) #print("c2_old",c2_old) #print("") #print("a2_new",a2_new) #print("b2_new",b2_new) #print("c2_new",c2_new) """ y_eval = np.max(ploty) """ # Here we try to avoid the sudden switch from a convex to concave parabola (and vice versa) # If the parabola looks like "(", its radius of curvature is +ve. # If the parabola looks like ")", its radius of curvature is -ve. # Switch from "(" => ")" .When curve is "(" Radius of curvature starts from a small +ve value # and increases till a point such that it becomes large. Then it suddenly flips sign # and becomes negatve as it becomes of the shape: ")" # In this case, before flipping, the average slope of rad_curv is +ve. And suddenly the new value comes out to be -ve # Using this logic, we'll scrap the frames in which this is violated. # Calculation of R_curve (radius of curvature) left_curverad = (((1 + (2*a1_new*y_eval + b1_new)**2)**1.5) / (2*a1_new)) right_curverad = (((1 + (2*a2_new*y_eval + b2_new)**2)**1.5) / (2*a2_new)) if(len(lane.rad_curv)==5): # How to check series is decreasing or increasing slope_avg=0 for i in range(0,4): slope_avg += ((slope_avg*i) + (rad_curv_history[0][i+1] - rad_curv_history[0][i]))/(i+1) # Left if (((rad_curv_history[0][4]>0) & (left_curverad<0) & (slope_avg<=0)) | ((rad_curv_history[0][4]<0) & (left_curverad>0) & (slope_avg>=0))): a1_new = fit_history[0][4][0] b1_new = fit_history[0][4][1] c1_new = fit_history[0][4][2] # Right if (((rad_curv_history[1][4]>0) & (right_curverad<0) & (slope_avg<=0)) | ((rad_curv_history[1][4]<0) & (right_curverad>0) & (slope_avg>=0))): a2_new = fit_history[1][4][0] b2_new = fit_history[1][4][1] c2_new = fit_history[1][4][2] """ """FRAME MIRRORING: Fine tuning one lane wrt to the other same as they'll have similar curvature""" #Steps: """ 1) Weighted average of the coefficients related to curve shape (a,b) to make both parabola a bit similar 2) Adjusting the 'c' coefficient using the lane centre of previous frame and lane width acc to current frame """ # We'll use lane centre for the previous frame to fine tune c of the parabola. First frame won't have a history so # Just for the 1st frame, we'll define it according to itself and use. Won't make any impact but will set a base for the next frames if (lane.count==0): lane.lane_bottom_centre = (((a2_new*(warped_img.shape[0]-1)**2 + b2_new*(warped_img.shape[0]-1) + c2_new) + (a1_new*(warped_img.shape[0]-1)**2 + b1_new*(warped_img.shape[0]-1) + c1_new))/2) # We'll find lane width according to the latest curve coefficients till now lane.lane_width = (((lane.lane_width*lane.count)+(a2_new*(warped_img.shape[0]-1)**2 + b2_new*(warped_img.shape[0]-1) + c2_new) - (a1_new*(warped_img.shape[0]-1)**2 + b1_new*(warped_img.shape[0]-1) + c1_new))/(lane.count+1)) a1 = 0.8*a1_new + 0.2*a2_new b1 = 0.8*b1_new + 0.2*b2_new a2 = 0.2*a1_new + 0.8*a2_new b2 = 0.2*b1_new + 0.8*b2_new #c1 = 0.8*c1_new + 0.2*c2_new #c2 = 0.2*c1_new + 0.8*c2_new #T Taking the lane centre fromt the previous frame and finding "c" such that both lanes are equidistant from it. c1_mirror = ((lane.lane_bottom_centre - (lane.lane_width/2))-(a1*(warped_img.shape[0]-1)**2 + b1*(warped_img.shape[0]-1))) c2_mirror = ((lane.lane_bottom_centre + (lane.lane_width/2))-(a2*(warped_img.shape[0]-1)**2 + b2*(warped_img.shape[0]-1))) c1= 0.7*c1_new + 0.3*c1_mirror c2 = 0.7*c2_new + 0.3*c2_mirror # Now we'll find the lane centre of this frame and overwrite the global variable s that the next frame can use this value lane.lane_bottom_centre = (((a2*(warped_img.shape[0]-1)**2 + b2*(warped_img.shape[0]-1) + c2) + (a1*(warped_img.shape[0]-1)**2 + b1*(warped_img.shape[0]-1) + c1))/2) #print("lane.lane_width",lane.lane_width) #print("lane.lane_bottom_centre",lane.lane_bottom_centre) left_curverad = (((1 + (2*a1*y_eval + b1)**2)**1.5) / (2*a1)) right_curverad = (((1 + (2*a2*y_eval + b2)**2)**1.5) / (2*a2)) left_fitx = a1*ploty**2 + b1*ploty + c1 right_fitx = a2*ploty**2 + b2*ploty + c2 return [[a1,b1,c1], [a2,b2,c2]], left_fitx, right_fitx, status, [left_variance_new, right_variance_new], [left_curverad,right_curverad], ploty # out_img here has boxes drawn and the pixels are colored #return [[a1_new,b1_new,c1_new], [a2_new,b2_new,c2_new]], left_fitx, right_fitx, status, [left_variance_new, right_variance_new], ploty def color_pixels_and_curve(out_img, leftx, lefty, rightx, righty, left_fitx, right_fitx): ploty = np.linspace(0, warped_img.shape[0]-1, warped_img.shape[0]) ## Visualization ## # Colors in the left and right lane regions out_img[lefty, leftx] = [255, 0, 0] out_img[righty, rightx] = [0, 0, 255] # Converting the coordinates of our line into integer values as index of the image can't take decimals left_fitx_int = left_fitx.astype(np.int32) right_fitx_int = right_fitx.astype(np.int32) ploty_int = ploty.astype(np.int32) # Coloring the curve as yellow out_img[ploty_int,left_fitx_int] = [255,255,0] out_img[ploty_int,right_fitx_int] = [255,255,0] # To thicken the curve, drawing more yellow lines out_img[ploty_int,left_fitx_int+1] = [255,255,0] out_img[ploty_int,right_fitx_int+1] = [255,255,0] out_img[ploty_int,left_fitx_int-1] = [255,255,0] out_img[ploty_int,right_fitx_int-1] = [255,255,0] out_img[ploty_int,left_fitx_int+2] = [255,255,0] out_img[ploty_int,right_fitx_int+2] = [255,255,0] out_img[ploty_int,left_fitx_int-2] = [255,255,0] out_img[ploty_int,right_fitx_int-2] = [255,255,0] def search_around_poly(warped_img, left_fit, right_fit): # HYPERPARAMETER # Choosing the width of the margin around the previous polynomial to search margin = 100 # Grab activated pixels nonzero = warped_img.nonzero() nonzeroy = np.array(nonzero[0]) nonzerox = np.array(nonzero[1]) ### Setting the area of search based on activated x-values ### ### within the +/- margin of our polynomial function ### left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] + margin))) right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] + margin))) # Again, extract left and right line pixel positions leftx = nonzerox[left_lane_inds] lefty = nonzeroy[left_lane_inds] rightx = nonzerox[right_lane_inds] righty = nonzeroy[right_lane_inds] return leftx, lefty, rightx, righty def modify_array(array, new_value): if len(array)!=5: for i in range(0,5): array.append(new_value) else: dump_var=array[0] array[0]=array[1] array[1]=array[2] array[2]=array[3] array[3]=array[4] array[4]=new_value return array def truncate(number, digits) -> float: stepper = 10.0 ** digits return math.trunc(stepper * number) / stepper """Main code begins here:""" undist_img = cal_undistort(frame) thresh_img = thresh_img(undist_img) # Note: Output here is not a binary image. It has been stacked already within the function warped_img, src, dst = perspective_transform(thresh_img) #draw_polygon(frame, warped_img, src, dst) #the first image is the original image that you import into the system #print("starting count",lane.count) # Making the curve coefficient, variance, radius of curvature history ready for our new frame. left_fit_previous = [i[0] for i in lane.curve_fit] right_fit_previous = [i[1] for i in lane.curve_fit] fit_history=[left_fit_previous, right_fit_previous] left_variance_previous = [i[0] for i in lane.variance] right_variance_previous = [i[1] for i in lane.variance] variance_history=[left_variance_previous, right_variance_previous] left_rad_curv_prev = [i[0] for i in lane.rad_curv] right_rad_curv_prev = [i[1] for i in lane.rad_curv] rad_curv_history = [left_rad_curv_prev, right_rad_curv_prev] #print(rad_curv_history) # These variables realted to history could have been defined in condition lane.count>0 below # Reason for defining above: We will want to get back to finding lane pixels from scratch # if our frame is a bad frame or the lane pixels deviate too much from the previous frame. # In that case, we set lane.count=0 and start searching from scratch # but we DO have history data at that point which will be used in fit_polnomial() function if (lane.count == 0): leftx, lefty, rightx, righty = find_lane_pixels(warped_img) # Find our lane pixels first elif (lane.count > 0): leftx, lefty, rightx, righty = search_around_poly(warped_img, left_fit_previous[4], right_fit_previous[4]) curve_fit_new, left_fitx, right_fitx, status, variance_new, rad_curv_new,ploty = fit_polynomial(warped_img, leftx, lefty, rightx, righty, fit_history, variance_history,rad_curv_history) # Define conversions in x and y from pixels space to meters ym_per_pix = 30/720 # meters per pixel in y dimension xm_per_pix = 3.7/650 # meters per pixel in x dimension #Finding the fit for the curve fit who's constituent points: x and y have been caliberated left_fit_cr = np.polyfit(ploty*ym_per_pix, left_fitx*xm_per_pix, 2) right_fit_cr = np.polyfit(ploty*ym_per_pix, right_fitx*xm_per_pix, 2) #Finding the correct radius of curvature in the real world frame (in metric system istead of pixel space) # We'll choose the maximum y-value, corresponding to the bottom of the image (this is where we find roc) y_eval = np.max(ploty) left_curverad = (((1 + (2*left_fit_cr[0]*y_eval + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])) right_curverad = (((1 + (2*right_fit_cr[0]*y_eval + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])) avg_rad_curv = truncate(((left_curverad + right_curverad)/2),3) offset = truncate((((lane.lane_bottom_centre - frame.shape[1]/2))*xm_per_pix),3) #print("avg_rad_curv",avg_rad_curv) #print("offset",offset) lane.rad_curv = modify_array(lane.rad_curv, rad_curv_new) lane.detected = status lane.curve_fit = modify_array(lane.curve_fit, curve_fit_new) lane.variance = modify_array(lane.variance, variance_new) #print(lane.variance) # Now we'll color the lane pixels and plot the identified curve over the image #color_pixels_and_curve(warped_img, leftx, lefty, rightx, righty, left_fitx, right_fitx) unwarped_img, Minv = rev_perspective_transform(warped_img, src, dst) # Create an image to draw the lines on color_warp = np.zeros_like(warped_img).astype(np.uint8) # Recast the x and y points into usable format for cv2.fillPoly() pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))]) pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))]) pts = np.hstack((pts_left, pts_right)) # Draw the lane onto the warped blank image cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0)) # Warp the blank back to original image space using inverse perspective matrix (Minv) newwarp = cv2.warpPerspective(color_warp, Minv, (frame.shape[1], frame.shape[0])) # Combine the result with the original image result = cv2.addWeighted(undist_img, 1, newwarp, 0.3, 0) text1 = "Curvature radius: "+str(avg_rad_curv)+"m" text2 = "Offset: "+str(offset)+"m" cv2.putText(result, text1, (40, 60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), thickness=2) cv2.putText(result, text2, (40, 110), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), thickness=2) lane.count = lane.count+1 #return warped_img #return color_warp return result #return unwarped_img #return undist_img #return thresh_img #return warped_img """#color pixel funcction ko un-comment kardena""" # - # Class has been created below # # + # Define a class to receive the characteristics of each line detection class Line(): def __init__(self): #Let's count the number of consecutive frames self.count = 0 # was the line detected in the last iteration? self.detected = False #polynomial coefficients for the most recent fit self.curve_fit = [] # Traking variance for the right lane self.variance = [] #difference in fit coefficients between last and new fits. Just store the difference in coefficients for the last frame self.coeff_diff = [[0,0,0],[0,0,0]] #Lane width measured at the start of reset self.lane_width = 0 #Let's track the midpoint of the previous frame self.lane_bottom_centre = 0 #radius of curvature of the line in some units self.rad_curv = [] lane=Line() import glob test_images = glob.glob('test_images/*.jpg') # Step through the list and search for chessboard corners for idx, fname in enumerate(test_images): img = cv2.imread(fname) #print ("success"+str(idx)) write_name = 'output_files/img_results/undist_result '+str(idx+1)+'.jpg' color_corrected_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) lane.count = 0 # Necessary otherwise the images will start fixing the curve according to the history output = process_image(color_corrected_img) output_mod = cv2.cvtColor(output, cv2.COLOR_RGB2BGR) cv2.imwrite(write_name,output_mod) cv2.imshow(write_name, output_mod) cv2.waitKey(500) cv2.destroyAllWindows() frame1= mpimg.imread("test_images/test (4).jpg") """ frame2= mpimg.imread("my_test_images/Highway_snaps/image (2).jpg") frame3= mpimg.imread("my_test_images/Highway_snaps/image (3).jpg") frame4= mpimg.imread("my_test_images/Highway_snaps/image (4).jpg") frame5= mpimg.imread("my_test_images/Highway_snaps/image (5).jpg") frame6= mpimg.imread("my_test_images/Highway_snaps/image (6).jpg") frame7= mpimg.imread("my_test_images/Highway_snaps/image (7).jpg") frame8= mpimg.imread("my_test_images/Highway_snaps/image (8).jpg") frame9= mpimg.imread("my_test_images/Highway_snaps/image (9).jpg") %matplotlib notebook (process_image(frame1)) (process_image(frame2)) (process_image(frame3)) (process_image(frame4)) (process_image(frame5)) (process_image(frame6)) (process_image(frame7)) (process_image(frame8)) """ plt.imshow(process_image(frame1)) # - # Video test # + # Define a class to receive the characteristics of each line detection class Line(): def __init__(self): #Let's count the number of consecutive frames self.count = 0 # was the line detected in the last iteration? self.detected = False #polynomial coefficients for the most recent fit self.curve_fit = [] # Traking variance for the right lane self.variance = [] #difference in fit coefficients between last and new fits. Just store the difference in coefficients for the last frame self.coeff_diff = [[0,0,0],[0,0,0]] #Lane width measured at the start of reset self.lane_width = 0 #Let's track the midpoint of the previous frame self.lane_bottom_centre = 0 #radius of curvature of the line in some units self.rad_curv = [] lane=Line() project_output = 'Project_Result_till_yesterday.mp4' clip1 = VideoFileClip("test_videos/project_video.mp4").subclip(19,20) project_clip = clip1.fl_image(process_image) #NOTE: this function expects color images! # %time project_clip.write_videofile(project_output, audio=False) # - HTML(""" <video width="960" height="540" controls> <source src="{0}"> </video> """.format(project_output)) # .
Pipeline progression/Works fine - with Rad_of_curv.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Project 2: Breakout Strategy # ## Instructions # Each problem consists of a function to implement and instructions on how to implement the function. The parts of the function that need to be implemented are marked with a `# TODO` comment. After implementing the function, run the cell to test it against the unit tests we've provided. For each problem, we provide one or more unit tests from our `project_tests` package. These unit tests won't tell you if your answer is correct, but will warn you of any major errors. Your code will be checked for the correct solution when you submit it to Udacity. # # ## Packages # When you implement the functions, you'll only need to use the [Pandas](https://pandas.pydata.org/) and [Numpy](http://www.numpy.org/) packages. Don't import any other packages, otherwise the grader will not be able to run your code. # # The other packages that we're importing is `helper`, `project_helper`, and `project_tests`. These are custom packages built to help you solve the problems. The `helper` and `project_helper` module contains utility functions and graph functions. The `project_tests` contains the unit tests for all the problems. # ### Install Packages import sys # !{sys.executable} -m pip install -r requirements.txt # ### Load Packages import pandas as pd import numpy as np import helper import project_helper import project_tests # ## Market Data # The data source we'll be using is the [Wiki End of Day data](https://www.quandl.com/databases/WIKIP) hosted at [Quandl](https://www.quandl.com). This contains data for many stocks, but we'll just be looking at the S&P 500 stocks. We'll also make things a little easier to solve by narrowing our range of time from 2007-06-30 to 2017-09-30. # ### Set API Key # Set the `quandl_api_key` variable to your Quandl api key. You can find your Quandl api key [here](https://www.quandl.com/account/api). # TODO: Add your Quandl API Key quandl_api_key = '' # ### Download Data # + import os snp500_file_path = 'data/tickers_SnP500.txt' wiki_file_path = 'data/WIKI_PRICES.csv' start_date, end_date = '2013-07-01', '2017-06-30' use_columns = ['date', 'ticker', 'adj_close', 'adj_high', 'adj_low'] if not os.path.exists(wiki_file_path): with open(snp500_file_path) as f: tickers = f.read().split() helper.download_quandl_dataset(quandl_api_key, 'WIKI', 'PRICES', wiki_file_path, use_columns, tickers, start_date, end_date) else: print('Data already downloaded') # - # ### Load Data # While using real data will give you hands on experience, it's doesn't cover all the topics we try to condense in one project. We'll solve this by creating new stocks. We've create a scenario where companies mining [Terbium](https://en.wikipedia.org/wiki/Terbium) are making huge profits. All the companies in this sector of the market are made up. They represent a sector with large growth that will be used for demonstration latter in this project. # + df_original = pd.read_csv(wiki_file_path, parse_dates=['date'], index_col=False) # Add TB sector to the market df = df_original df = pd.concat([df] + project_helper.generate_tb_sector(df[df['ticker'] == 'AAPL']['date']), ignore_index=True) print('Loaded Dataframe') # - # ### 2-D Matrices # Here we convert df into multiple DataFrames for each OHLC. We could use a multiindex, but that just stacks the columns for each ticker. We want to be able to apply calculations without using groupby each time. close = df.reset_index().pivot(index='date', columns='ticker', values='adj_close') high = df.reset_index().pivot(index='date', columns='ticker', values='adj_high') low = df.reset_index().pivot(index='date', columns='ticker', values='adj_low') # ### View Data # To see what one of these 2-d matrices looks like, let's take a look at the closing prices matrix. close # ### Stock Example # Let's see what a single stock looks like from the closing prices. For this example and future display examples, we'll use Apple's stock, "AAPL", to graph the data. If we tried to graph all the stocks, it would be too much information. # Run the code below to view a chart of Apple stock. apple_ticker = 'AAPL' project_helper.plot_stock(close[apple_ticker], '{} Stock'.format(apple_ticker)) # ## The Alpha Research Process # # In this project you will code and evaluate a "breakout" signal. It is important to understand where these steps fit in the alpha research workflow. The signal-to-noise ratio in trading signals is very low and, as such, it is very easy to fall into the trap of _overfitting_ to noise. It is therefore inadvisable to jump right into signal coding. To help mitigate overfitting, it is best to start with a general observation and hypothesis; i.e., you should be able to answer the following question _before_ you touch any data: # # > What feature of markets or investor behaviour would lead to a persistent anomaly that my signal will try to use? # # Ideally the assumptions behind the hypothesis will be testable _before_ you actually code and evaluate the signal itself. The workflow therefore is as follows: # # ![image](images/alpha_steps.png) # # In this project, we assume that the first three steps area done ("observe & research", "form hypothesis", "validate hypothesis"). The hypothesis you'll be using for this project is the following: # - In the absence of news or significant investor trading interest, stocks oscillate in a range. # - Traders seek to capitalize on this range-bound behaviour periodically by selling/shorting at the top of the range and buying/covering at the bottom of the range. This behaviour reinforces the existence of the range. # - When stocks break out of the range, due to, e.g., a significant news release or from market pressure from a large investor: # - the liquidity traders who have been providing liquidity at the bounds of the range seek to cover their positions to mitigate losses, thus magnifying the move out of the range, _and_ # - the move out of the range attracts other investor interest; these investors, due to the behavioural bias of _herding_ (e.g., [Herd Behavior](https://www.investopedia.com/university/behavioral_finance/behavioral8.asp)) build positions which favor continuation of the trend. # # # Using this hypothesis, let start coding.. # ## Compute the Highs and Lows in a Window # You'll use the price highs and lows as an indicator for the breakout strategy. In this section, implement `get_high_lows_lookback` to get the maximum high price and minimum low price over a window of days. The variable `lookback_days` contains the number of days to look in the past. Make sure this doesn't include the current day. # + def get_high_lows_lookback(high, low, lookback_days): """ Get the highs and lows in a lookback window. Parameters ---------- high : DataFrame High price for each ticker and date low : DataFrame Low price for each ticker and date lookback_days : int The number of days to look back Returns ------- lookback_high : DataFrame Lookback high price for each ticker and date lookback_low : DataFrame Lookback low price for each ticker and date """ #TODO: Implement function lookback_high = high.shift(1).rolling(lookback_days, lookback_days).max() lookback_low = low.shift(1).rolling(lookback_days, lookback_days).min() return lookback_high, lookback_low project_tests.test_get_high_lows_lookback(get_high_lows_lookback) # - # ### View Data # Let's use your implementation of `get_high_lows_lookback` to get the highs and lows for the past 50 days and compare it to it their respective stock. Just like last time, we'll use Apple's stock as the example to look at. lookback_days = 50 lookback_high, lookback_low = get_high_lows_lookback(high, low, lookback_days) project_helper.plot_high_low( close[apple_ticker], lookback_high[apple_ticker], lookback_low[apple_ticker], 'High and Low of {} Stock'.format(apple_ticker)) # ## Compute Long and Short Signals # Using the generated indicator of highs and lows, create long and short signals using a breakout strategy. Implement `get_long_short` to generate the following signals: # # | Signal | Condition | # |----|------| # | -1 | Low > Close Price | # | 1 | High < Close Price | # | 0 | Otherwise | # # In this chart, **Close Price** is the `close` parameter. **Low** and **High** are the values generated from `get_high_lows_lookback`, the `lookback_high` and `lookback_low` parameters. # + def get_long_short(close, lookback_high, lookback_low): """ Generate the signals long, short, and do nothing. Parameters ---------- close : DataFrame Close price for each ticker and date lookback_high : DataFrame Lookback high price for each ticker and date lookback_low : DataFrame Lookback low price for each ticker and date Returns ------- long_short : DataFrame The long, short, and do nothing signals for each ticker and date """ #TODO: Implement function return ((close < lookback_low).astype(int) * -1) + (close > lookback_high).astype(int) project_tests.test_get_long_short(get_long_short) # - # ### View Data # Let's compare the signals you generated against the close prices. This chart will show a lot of signals. Too many in fact. We'll talk about filtering the redundant signals in the next problem. signal = get_long_short(close, lookback_high, lookback_low) project_helper.plot_signal( close[apple_ticker], signal[apple_ticker], 'Long and Short of {} Stock'.format(apple_ticker)) # ## Filter Signals # That was a lot of repeated signals! If we're already shorting a stock, having an additional signal to short a stock isn't helpful for this strategy. This also applies to additional long signals when the last signal was long. # # Implement `filter_signals` to filter out repeated long or short signals within the `lookahead_days`. If the previous signal was the same, change the signal to `0` (do nothing signal). For example, say you have a single stock time series that is # # `[1, 0, 1, 0, 1, 0, -1, -1]` # # Running `filter_signals` with a lookahead of 3 days should turn those signals into # # `[1, 0, 0, 0, 1, 0, -1, 0]` # # To help you implement the function, we have provided you with the `clear_signals` function. This will remove all signals within a window after the last signal. For example, say you're using a windows size of 3 with `clear_signals`. It would turn the Series of long signals # # `[0, 1, 0, 0, 1, 1, 0, 1, 0]` # # into # # `[0, 1, 0, 0, 0, 1, 0, 0, 0]` # # Note: it only takes a Series of the same type of signals, where `1` is the signal and `0` is no signal. It can't take a mix of long and short signals. Using this function, implement `filter_signals`. # + def clear_signals(signals, window_size): """ Clear out signals in a Series of just long or short signals. Remove the number of signals down to 1 within the window size time period. Parameters ---------- signals : Pandas Series The long, short, or do nothing signals window_size : int The number of days to have a single signal Returns ------- signals : Pandas Series Signals with the signals removed from the window size """ # Start with buffer of window size # This handles the edge case of calculating past_signal in the beginning clean_signals = [0]*window_size for signal_i, current_signal in enumerate(signals): # Check if there was a signal in the past window_size of days has_past_signal = bool(sum(clean_signals[signal_i:signal_i+window_size])) # Use the current signal if there's no past signal, else 0/False clean_signals.append(not has_past_signal and current_signal) # Remove buffer clean_signals = clean_signals[window_size:] # Return the signals as a Series of Ints return pd.Series(np.array(clean_signals).astype(np.int), signals.index) def filter_signals(signal, lookahead_days): """ Filter out signals in a DataFrame. Parameters ---------- signal : DataFrame The long, short, and do nothing signals for each ticker and date lookahead_days : int The number of days to look ahead Returns ------- filtered_signal : DataFrame The filtered long, short, and do nothing signals for each ticker and date """ #TODO: Implement function pos_signal = signal[signal == 1].fillna(0) neg_signal = signal[signal == -1].fillna(0) * -1 pos_signal = pos_signal.apply(lambda signals: clear_signals(signals, lookahead_days)) neg_signal = neg_signal.apply(lambda signals: clear_signals(signals, lookahead_days)) return pos_signal + neg_signal*-1 project_tests.test_filter_signals(filter_signals) # - # ### View Data # Let's view the same chart as before, but with the redundant signals removed. signal_5 = filter_signals(signal, 5) signal_10 = filter_signals(signal, 10) signal_20 = filter_signals(signal, 20) for signal_data, signal_days in [(signal_5, 5), (signal_10, 10), (signal_20, 20)]: project_helper.plot_signal( close[apple_ticker], signal_data[apple_ticker], 'Long and Short of {} Stock with {} day signal window'.format(apple_ticker, signal_days)) # ## Lookahead Close Prices # With the trading signal done, we can start working on evaluating how many days to short or long the stocks. In this problem, implement `get_lookahead_prices` to get the close price days ahead in time. You can get the number of days from the variable `lookahead_days`. We'll use the lookahead prices to calculate future returns in another problem. # + def get_lookahead_prices(close, lookahead_days): """ Get the lookahead prices for `lookahead_days` number of days. Parameters ---------- close : DataFrame Close price for each ticker and date lookahead_days : int The number of days to look ahead Returns ------- lookahead_prices : DataFrame The lookahead prices for each ticker and date """ #TODO: Implement function return close.shift(-lookahead_days) project_tests.test_get_lookahead_prices(get_lookahead_prices) # - # ### View Data # Using the `get_lookahead_prices` function, let's generate lookahead closing prices for 5, 10, and 20 days. # # Let's also chart a subsection of a few months of the Apple stock instead of years. This will allow you to view the differences between the 5, 10, and 20 day lookaheads. Otherwise, they will mesh together when looking at a chart that is zoomed out. lookahead_5 = get_lookahead_prices(close, 5) lookahead_10 = get_lookahead_prices(close, 10) lookahead_20 = get_lookahead_prices(close, 20) project_helper.plot_lookahead_prices( close[apple_ticker].iloc[150:250], [ (lookahead_5[apple_ticker].iloc[150:250], 5), (lookahead_10[apple_ticker].iloc[150:250], 10), (lookahead_20[apple_ticker].iloc[150:250], 20)], '5, 10, and 20 day Lookahead Prices for Slice of {} Stock'.format(apple_ticker)) # ## Lookahead Price Returns # Implement `get_return_lookahead` to generate the log price return between the closing price and the lookahead price. # + def get_return_lookahead(close, lookahead_prices): """ Calculate the log returns from the lookahead days to the signal day. Parameters ---------- close : DataFrame Close price for each ticker and date lookahead_prices : DataFrame The lookahead prices for each ticker and date Returns ------- lookahead_returns : DataFrame The lookahead log returns for each ticker and date """ #TODO: Implement function return np.log(lookahead_prices) - np.log(close) project_tests.test_get_return_lookahead(get_return_lookahead) # - # ### View Data # Using the same lookahead prices and same subsection of the Apple stock from the previous problem, we'll view the lookahead returns. # # In order to view price returns on the same chart as the stock, a second y-axis will be added. When viewing this chart, the axis for the price of the stock will be on the left side, like previous charts. The axis for price returns will be located on the right side. price_return_5 = get_return_lookahead(close, lookahead_5) price_return_10 = get_return_lookahead(close, lookahead_10) price_return_20 = get_return_lookahead(close, lookahead_20) project_helper.plot_price_returns( close[apple_ticker].iloc[150:250], [ (price_return_5[apple_ticker].iloc[150:250], 5), (price_return_10[apple_ticker].iloc[150:250], 10), (price_return_20[apple_ticker].iloc[150:250], 20)], '5, 10, and 20 day Lookahead Returns for Slice {} Stock'.format(apple_ticker)) # ## Compute the Signal Return # Using the price returns generate the signal returns. # + def get_signal_return(signal, lookahead_returns): """ Compute the signal returns. Parameters ---------- signal : DataFrame The long, short, and do nothing signals for each ticker and date lookahead_returns : DataFrame The lookahead log returns for each ticker and date Returns ------- signal_return : DataFrame Signal returns for each ticker and date """ #TODO: Implement function return signal * lookahead_returns project_tests.test_get_signal_return(get_signal_return) # - # ### View Data # Let's continue using the previous lookahead prices to view the signal returns. Just like before, the axis for the signal returns is on the right side of the chart. title_string = '{} day LookaheadSignal Returns for {} Stock' signal_return_5 = get_signal_return(signal_5, price_return_5) signal_return_10 = get_signal_return(signal_10, price_return_10) signal_return_20 = get_signal_return(signal_20, price_return_20) project_helper.plot_signal_returns( close[apple_ticker], [ (signal_return_5[apple_ticker], signal_5[apple_ticker], 5), (signal_return_10[apple_ticker], signal_10[apple_ticker], 10), (signal_return_20[apple_ticker], signal_20[apple_ticker], 20)], [title_string.format(5, apple_ticker), title_string.format(10, apple_ticker), title_string.format(20, apple_ticker)]) # ## Test for Significance # ### Histogram # Let's plot a histogram of the signal return values. project_helper.plot_signal_histograms( [signal_return_5, signal_return_10, signal_return_20], 'Signal Return', ('5 Days', '10 Days', '20 Days')) # ### Question: What do the histograms tell you about the signal? # *#TODO: Put Answer In this Cell* # ### P-Value # Let's calculate the P-Value from the signal return. pval_5 = project_helper.get_signal_return_pval(signal_return_5) print('5 Day P-value: {}'.format(pval_5)) pval_10 = project_helper.get_signal_return_pval(signal_return_10) print('10 Day P-value: {}'.format(pval_10)) pval_20 = project_helper.get_signal_return_pval(signal_return_20) print('20 Day P-value: {}'.format(pval_20)) # ### Question: What do the p-values tell you about the signal? # *#TODO: Put Answer In this Cell* # ## Outliers # You might have noticed the outliers in the 10 and 20 day histograms. To better visualize the outliers, let's compare the 5, 10, and 20 day signals returns to normal distributions with the same mean and deviation for each signal return distributions. project_helper.plot_signal_to_normal_histograms( [signal_return_5, signal_return_10, signal_return_20], 'Signal Return', ('5 Days', '10 Days', '20 Days')) # ## Find Outliers # While you can see the outliers in the histogram, we need to find the stocks that are cause these outlying returns. # # Implement the function `find_outliers` to use Kolmogorov-Smirnov test (KS test) between a normal distribution and each stock's signal returns in the following order: # - Ignore returns without a signal in `signal`. This will better fit the normal distribution and remove false positives. # - Run KS test on a normal distribution that with the same std and mean of all the signal returns against each stock's signal returns. Use `kstest` to perform the KS test. # - Ignore any items that don't pass the null hypothesis with a threshold of `pvalue_threshold`. You can consider them not outliers. # - Return all stock tickers with a KS value above `ks_threshold`. # + from scipy.stats import kstest def find_outliers(signal, signal_return, ks_threshold, pvalue_threshold=0.05): """ Find stock outliers in `df` using Kolmogorov-Smirnov test against a normal distribution. Ignore stock with a p-value from Kolmogorov-Smirnov test greater than `pvalue_threshold`. Ignore stocks with KS static value lower than `ks_threshold`. Parameters ---------- signal : DataFrame The long, short, and do nothing signals for each ticker and date signal_return : DataFrame The signal return for each ticker and date ks_threshold : float The threshold for the KS static pvalue_threshold : float The threshold for the p-value Returns ------- outliers : list of str Symbols that are outliers """ #TODO: Implement function non_zero_signal_returns = signal_return[signal != 0].stack().dropna().T normal_args = ( non_zero_signal_returns.mean(), non_zero_signal_returns.mean()) non_zero_signal_returns.index = non_zero_signal_returns.index.set_names(['date', 'ticker']) outliers = non_zero_signal_returns.groupby('ticker') \ .apply(lambda x: kstest(x, 'norm', normal_args)) \ .apply(pd.Series) \ .rename(index=str, columns={0: 'ks_value', 1: 'p_value'}) # Remove items that don't pass the null hypothesis outliers = outliers[outliers['p_value'] < pvalue_threshold] return outliers[outliers['ks_value'] > ks_threshold].index.tolist() project_tests.test_find_outliers(find_outliers) # - # ### View Data # Using the `find_outliers` function you implemented, let's see what we found. # + outlier_tickers = [] ks_threshold = 0.8 outlier_tickers.extend(find_outliers(signal_5, signal_return_5, ks_threshold)) outlier_tickers.extend(find_outliers(signal_10, signal_return_10, ks_threshold)) outlier_tickers.extend(find_outliers(signal_20, signal_return_20, ks_threshold)) outlier_tickers = set(outlier_tickers) print('{} Outliers Found:\n{}'.format(len(outlier_tickers), ', '.join(list(outlier_tickers)))) # - # ### Show Significance without Outliers # Let's compare the 5, 10, and 20 day signals returns without outliers to normal distributions. Also, let's see how the P-Value has changed with the outliers removed. # + good_tickers = list(set(close.columns) - outlier_tickers) project_helper.plot_signal_to_normal_histograms( [signal_return_5[good_tickers], signal_return_10[good_tickers], signal_return_20[good_tickers]], 'Signal Return Without Outliers', ('5 Days', '10 Days', '20 Days')) outliers_removed_pval_5 = project_helper.get_signal_return_pval(signal_return_5[good_tickers]) outliers_removed_pval_10 = project_helper.get_signal_return_pval(signal_return_10[good_tickers]) outliers_removed_pval_20 = project_helper.get_signal_return_pval(signal_return_20[good_tickers]) print('5 Day P-value (with outliers): {}'.format(pval_5)) print('5 Day P-value (without outliers): {}'.format(outliers_removed_pval_5)) print('') print('10 Day P-value (with outliers): {}'.format(pval_10)) print('10 Day P-value (without outliers): {}'.format(outliers_removed_pval_10)) print('') print('20 Day P-value (with outliers): {}'.format(pval_20)) print('20 Day P-value (without outliers): {}'.format(outliers_removed_pval_20)) # - # That's more like it! The returns are closer to a normal distribution. You have finished the research phase of a Breakout Strategy. You can now submit your project. # ## Submission # Now that you're done with the project, it's time to submit it. Click the submit button in the bottom right. One of our reviewers will give you feedback on your project with a pass or not passed grade. You can continue to the next section while you wait for feedback.
project2/.Trash-0/files/project_2_solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Recurrent Neural Networks with ``gluon`` # # # With gluon, now we can train the recurrent neural networks (RNNs) more neatly, such as the long short-term memory (LSTM) and the gated recurrent unit (GRU). To demonstrate the end-to-end RNN training and prediction pipeline, we take a classic problem in language modeling as a case study. Specifically, we will show how to predict the distribution of the next word given a sequence of previous words. # ## Import packages # # To begin with, we need to make the following necessary imports. import math import os import time import numpy as np import mxnet as mx from mxnet import gluon, autograd from mxnet.gluon import nn, rnn # ## Define classes for indexing words of the input document # # In a language modeling problem, we define the following classes to facilitate the routine procedures for loading document data. In the following, the ``Dictionary`` class is for word indexing: words in the documents can be converted from the string format to the integer format. # # In this example, we use consecutive integers to index words of the input document. class Dictionary(object): def __init__(self): self.word2idx = {} self.idx2word = [] def add_word(self, word): if word not in self.word2idx: self.idx2word.append(word) self.word2idx[word] = len(self.idx2word) - 1 return self.word2idx[word] def __len__(self): return len(self.idx2word) # The ``Dictionary`` class is used by the ``Corpus`` class to index the words of the input document. class Corpus(object): def __init__(self, path): self.dictionary = Dictionary() self.train = self.tokenize(path + 'train.txt') self.valid = self.tokenize(path + 'valid.txt') self.test = self.tokenize(path + 'test.txt') def tokenize(self, path): """Tokenizes a text file.""" assert os.path.exists(path) # Add words to the dictionary with open(path, 'r') as f: tokens = 0 for line in f: words = line.split() + ['<eos>'] tokens += len(words) for word in words: self.dictionary.add_word(word) # Tokenize file content with open(path, 'r') as f: ids = np.zeros((tokens,), dtype='int32') token = 0 for line in f: words = line.split() + ['<eos>'] for word in words: ids[token] = self.dictionary.word2idx[word] token += 1 return mx.nd.array(ids, dtype='int32') # ## Provide an exposition of different RNN models with ``gluon`` # # Based on the ``gluon.Block`` class, we can make different RNN models available with the following single ``RNNModel`` class. # # Users can select their preferred RNN model or compare different RNN models by configuring the argument of the constructor of ``RNNModel``. We will show an example following the definition of the ``RNNModel`` class. class RNNModel(gluon.Block): """A model with an encoder, recurrent layer, and a decoder.""" def __init__(self, mode, vocab_size, num_embed, num_hidden, num_layers, dropout=0.5, tie_weights=False, **kwargs): super(RNNModel, self).__init__(**kwargs) with self.name_scope(): self.drop = nn.Dropout(dropout) self.encoder = nn.Embedding(vocab_size, num_embed, weight_initializer = mx.init.Uniform(0.1)) if mode == 'rnn_relu': self.rnn = rnn.RNN(num_hidden, num_layers, activation='relu', dropout=dropout, input_size=num_embed) elif mode == 'rnn_tanh': self.rnn = rnn.RNN(num_hidden, num_layers, dropout=dropout, input_size=num_embed) elif mode == 'lstm': self.rnn = rnn.LSTM(num_hidden, num_layers, dropout=dropout, input_size=num_embed) elif mode == 'gru': self.rnn = rnn.GRU(num_hidden, num_layers, dropout=dropout, input_size=num_embed) else: raise ValueError("Invalid mode %s. Options are rnn_relu, " "rnn_tanh, lstm, and gru"%mode) if tie_weights: self.decoder = nn.Dense(vocab_size, in_units = num_hidden, params = self.encoder.params) else: self.decoder = nn.Dense(vocab_size, in_units = num_hidden) self.num_hidden = num_hidden def forward(self, inputs, hidden): emb = self.drop(self.encoder(inputs)) output, hidden = self.rnn(emb, hidden) output = self.drop(output) decoded = self.decoder(output.reshape((-1, self.num_hidden))) return decoded, hidden def begin_state(self, *args, **kwargs): return self.rnn.begin_state(*args, **kwargs) # ## Select an RNN model and configure parameters # # For demonstration purposes, we provide an arbitrary selection of the parameter values. In practice, some parameters should be more fine tuned based on the validation data set. # # For instance, to obtain a better performance, as reflected in a lower loss or perplexity, one can set ``args_epochs`` to a larger value. # # In this demonstration, LSTM is the chosen type of RNN. For other RNN options, one can replace the ``'lstm'`` string to ``'rnn_relu'``, ``'rnn_tanh'``, or ``'gru'`` as provided by the aforementioned ``gluon.Block`` class. args_data = '../data/nlp/ptb.' args_model = 'rnn_relu' args_emsize = 100 args_nhid = 100 args_nlayers = 2 args_lr = 1.0 args_clip = 0.2 args_epochs = 1 args_batch_size = 32 args_bptt = 5 args_dropout = 0.2 args_tied = True args_cuda = 'store_true' args_log_interval = 500 args_save = 'model.param' # ## Load data as batches # # We load the document data by leveraging the aforementioned ``Corpus`` class. # # To speed up the subsequent data flow in the RNN model, we pre-process the loaded data as batches. This procedure is defined in the following ``batchify`` function. # + context = mx.cpu(0) corpus = Corpus(args_data) def batchify(data, batch_size): """Reshape data into (num_example, batch_size)""" nbatch = data.shape[0] // batch_size data = data[:nbatch * batch_size] data = data.reshape((batch_size, nbatch)).T return data train_data = batchify(corpus.train, args_batch_size).as_in_context(context) val_data = batchify(corpus.valid, args_batch_size).as_in_context(context) test_data = batchify(corpus.test, args_batch_size).as_in_context(context) # - # ## Build the model # # We go on to build the model, initialize model parameters, and configure the optimization algorithms for training the RNN model. # + ntokens = len(corpus.dictionary) model = RNNModel(args_model, ntokens, args_emsize, args_nhid, args_nlayers, args_dropout, args_tied) model.collect_params().initialize(mx.init.Xavier(), ctx=context) trainer = gluon.Trainer(model.collect_params(), 'sgd', {'learning_rate': args_lr, 'momentum': 0, 'wd': 0}) loss = gluon.loss.SoftmaxCrossEntropyLoss() # - # ## Train the model and evaluate on validation and testing data sets # # Now we can define functions for training and evaluating the model. The following are two helper functions that will be used during model training and evaluation. # + def get_batch(source, i): seq_len = min(args_bptt, source.shape[0] - 1 - i) data = source[i : i + seq_len] target = source[i + 1 : i + 1 + seq_len] return data, target.reshape((-1,)) def detach(hidden): if isinstance(hidden, (tuple, list)): hidden = [i.detach() for i in hidden] else: hidden = hidden.detach() return hidden # - # The following is the function for model evaluation. It returns the loss of the model prediction. We will discuss the details of the loss measure shortly. def eval(data_source): total_L = 0.0 ntotal = 0 hidden = model.begin_state(func = mx.nd.zeros, batch_size = args_batch_size, ctx=context) for i in range(0, data_source.shape[0] - 1, args_bptt): data, target = get_batch(data_source, i) output, hidden = model(data, hidden) L = loss(output, target) total_L += mx.nd.sum(L).asscalar() ntotal += L.size return total_L / ntotal # Now we are ready to define the function for training the model. We can monitor the model performance on the training, validation, and testing data sets over iterations. def train(): best_val = float("Inf") for epoch in range(args_epochs): total_L = 0.0 start_time = time.time() hidden = model.begin_state(func = mx.nd.zeros, batch_size = args_batch_size, ctx = context) for ibatch, i in enumerate(range(0, train_data.shape[0] - 1, args_bptt)): data, target = get_batch(train_data, i) hidden = detach(hidden) with autograd.record(): output, hidden = model(data, hidden) L = loss(output, target) L.backward() grads = [i.grad(context) for i in model.collect_params().values()] # Here gradient is for the whole batch. # So we multiply max_norm by batch_size and bptt size to balance it. gluon.utils.clip_global_norm(grads, args_clip * args_bptt * args_batch_size) trainer.step(args_batch_size) total_L += mx.nd.sum(L).asscalar() if ibatch % args_log_interval == 0 and ibatch > 0: cur_L = total_L / args_bptt / args_batch_size / args_log_interval print('[Epoch %d Batch %d] loss %.2f, perplexity %.2f' % ( epoch + 1, ibatch, cur_L, math.exp(cur_L))) total_L = 0.0 val_L = eval(val_data) print('[Epoch %d] time cost %.2fs, validation loss %.2f, validation perplexity %.2f' % ( epoch + 1, time.time() - start_time, val_L, math.exp(val_L))) if val_L < best_val: best_val = val_L test_L = eval(test_data) model.save_params(args_save) print('test loss %.2f, test perplexity %.2f' % (test_L, math.exp(test_L))) else: args_lr = args_lr * 0.25 trainer._init_optimizer('sgd', {'learning_rate': args_lr, 'momentum': 0, 'wd': 0}) model.load_params(args_save, context) # Recall that the RNN model training is based on maximization likelihood of observations. For evaluation purposes, we have used the following two measures: # # * Loss: the loss function is defined as the average negative log likelihood of the target words (ground truth) under prediction: $$\text{loss} = -\frac{1}{N} \sum_{i = 1}^N \text{log} \ p_{\text{target}_i}, $$ where $N$ is the number of predictions and $p_{\text{target}_i}$ the predicted likelihood of the $i$-th target word. # # * Perplexity: the average per-word perplexity is $\text{exp}(\text{loss})$. # # To orient the reader using concrete examples, let us illustrate the idea of the perplexity measure as follows. # # * Consider the perfect scenario where the model always predicts the likelihood of the target word as 1. In this case, for every $i$ we have $p_{\text{target}_i} = 1$. As a result, the perplexity of the perfect model is 1. # # * Consider a baseline scenario where the model always predicts the likelihood of the target word randomly at uniform among the given word set $W$. In this case, for every $i$ we have $p_{\text{target}_i} = 1 / |W|$. As a result, the perplexity of a uniformly random prediction model is always $|W|$. # # * Consider the worst-case scenario where the model always predicts the likelihood of the target word as 0. In this case, for every $i$ we have $p_{\text{target}_i} = 0$. As a result, the perplexity of the worst model is positive infinity. # # # Therefore, a model with a lower perplexity that is closer to 1 is generally more effective. Any effective model has to achieve a perplexity lower than the cardinality of the target set. # # Now we are ready to train the model and evaluate the model performance on validation and testing data sets. train() model.load_params(args_save, context) test_L = eval(test_data) print('Best test loss %.2f, test perplexity %.2f'%(test_L, math.exp(test_L))) # ## Next # [Introduction to optimization](../chapter06_optimization/optimization-intro.ipynb) # For whinges or inquiries, [open an issue on GitHub.](https://github.com/zackchase/mxnet-the-straight-dope)
chapter05_recurrent-neural-networks/rnns-gluon.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="X640MHUZDsUw" # # Non-Natural Aptamer Array (N2A2) Data Processing # # Soh Lab, Stanford University # # *Last updated August 2019* # + [markdown] id="HIw8Vv03DsUz" # ## 1. Introduction # # Data from the N2A2 can be distinguished as three primary components: # 1. FASTQ (sequence, x_f, y_f, quality score) for each sequencing index # 2. locs (x_l, y_l) for each cluster on each tile # 3. cifs (intensities [integer]) for each cluster on each tile and cycle # # The locs and cifs both correspond to the same clusters on each tile, so the primary goal is to associate the clusters to the appropriate fastq files via the shared x,y locations. # # *Note: The fastq (x_f,y_f) are different from the locs (x_,y_l) by a rounding scheme, so they have to be converted and matched using the appropriate formula* # # ## 2. Processing Overview # # The data is processed in this order: # 1. Data is first separated into three folders (fastq, locs, cifs) in a primary run folder from the initial run folder (<run_path>) # 2. FASTQ data (seq, x_f, y_f) is extracted from the zipped (.gz) fastq files and broken into tiles under a sub-directory (directory name: <fastq_name>_tile_data) as .csv files (seq,x,y) # 3. Sequence-intensity data is generated for each fastq and exported as csv files in a new child directory (<run_path>/intensities) for each fastq and channel (A,T,C,G). # * The names of the files are <fastq_name>_<processing_tag>_<channel_tag>.csv # * Data is formatted to have seq,x,y,int_1,int_2,...,int_n for the n cycles # * Filtering by sequence can be performed in this step to remove non-compliant sequences under that same sequencing index (if so, the processing tag will be 'filt') # # Subsequent processing can be performed to remove faulty tiles or otherwise # # ## 3. Usage Instructions # # Cells in the notebook should be run sequentially unless specified otherwise. Support functions are included as an 'n2a2_utils.py' file in the same directory, so please check the code resources or contact the author if you need the supporting functions file. # # Make sure to have Python 3.x installed plus common libraries (numpy, matplotlib) # # ## 4. Google Colaboratory (Optional) # # If processing data using Google Colaboratory, make a copy of this notebook and support file ('n2a2_utils.py') first and then mount your Google Drive (run the appropriate cells below) # # + [markdown] id="JbW_cj9_DsU0" # ## 5. Running the Notebook! # + [markdown] id="zedmXB1kDsU0" # ### Connect to Google Drive # + id="yqyiUQQMDsU1" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1641242510076, "user_tz": 480, "elapsed": 892, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "01323063848847375634"}} outputId="341f8375-1a25-4d4f-df48-5f205053534f" # Mount Google Drive and access via your credentials from google.colab import drive drive.mount('/content/drive') # + [markdown] id="C4Pjzn0HDsU4" # ### Setup (run these first) # + id="SdFO7rW_DsU5" executionInfo={"status": "ok", "timestamp": 1641242510256, "user_tz": 480, "elapsed": 182, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "01323063848847375634"}} # Import the libraries to be used import numpy as np import matplotlib.pyplot as plt import pandas as pd import time import os import sys import scipy.optimize # + id="T5BRP3iJDsU7" executionInfo={"status": "ok", "timestamp": 1641242510256, "user_tz": 480, "elapsed": 3, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "01323063848847375634"}} # Import the functions used to process sys.path.insert(1, 'drive/Shared drives/imager_v1/04_processing_code/20181108_NNinsulinF') from n2a2_utils import * # + [markdown] id="phIkLnqcDsU9" # ### Edit the run specific details # + id="vdIUs-7VDsU-" executionInfo={"status": "ok", "timestamp": 1641242510257, "user_tz": 480, "elapsed": 4, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "01323063848847375634"}} # Use the full path to the top directory containing the three subfolders (fastq,locs,cifs) run_path='drive/Shared drives/imager_v1/03_imager_runs/20181108_NNinsulinF' # Default starting cycle is 93 cycle_start=93 # Custom names for the cifs (usually descriptive of what each cycle contains) # These names will also be used to name the cycles in the exported files cycle_names=['FM', 'ins_1_uM_ser_0', 'ins_10_uM_ser_0', 'ins_25_uM_ser_0', 'FM', 'ins_1_uM_ser_1', 'ins_10_uM_ser_1', 'ins_25_uM_ser_1', 'FM'] # Define the fastq names up to the S# mark in a list fastq_list=['FM_S1','insS1_S2','insR2_S3','tyroapt_S4','ksl2b_S5'] # + id="1VHngz2CDsVA" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1641242510444, "user_tz": 480, "elapsed": 190, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "01323063848847375634"}} outputId="4d231c97-f9ef-4c98-b3e1-6e9f48306c6b" # Rename sequences rename_cycle_directories(run_path,cycle_start,cycle_names) # + id="IvOQclDHT8Ui" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1641242587658, "user_tz": 480, "elapsed": 77218, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "01323063848847375634"}} outputId="238e64f4-daeb-4b1e-da95-5e3533d7383a" # Parse data for fastq_name in fastq_list: fastq_separate_extract(run_path,fastq_name) # + [markdown] id="wiLBADzoDsVG" # ### Filtering by sequence (optional) # # To pre-filter the sequences for QC and reduce the final file sizes, use the appropriate notations as defined here: # * `regex_format` (list of options) # * `'single'` : For a single sequence (i.e. for a control sequence or fiducial mark) # * `'primers'` : For a variable region flanked by constant regions (i.e. FP, RP) # * `'none'` : Skip filtering for this fastq # * `regex_seqs` (list of regex formats) # * `'single'` : Use the sequence to be filtered (e.g. `'TCGATGCAGTACTGCGTAGCTA'`) # * `'primers'` : `['<FP>','<RP>']` for the flanking constant regions (*Note: depending on the read length, parts of the sequence may be cutoff*) # * `'none'` : `'none'` # * `seq_lengths` (list of lengths) : Use the tolerated lengths for the variable regions (the `'primers'` option) # + id="q0TBIiuyDsVH" executionInfo={"status": "ok", "timestamp": 1641242587881, "user_tz": 480, "elapsed": 236, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "01323063848847375634"}} # If filtering, make sure this is 'True', otherwise leave as false and ignore the rest filter_sequences=True # Edit the first contents as necessary if filter_seqs: # Need a format for each fastq (see instructions above) # Note: These lists have to be same lengths as the number of fastq files regex_formats=['single','primers','primers','single','single'] FM_seq='ACCGACGGAACGCCAAAGAAACGCAAGG' ksl2b_seq='AGCAGCACAGAGGTCAGATGCAATTGGGCCCGTCCGTATGGTGGGTCCTATGCGTGCTACCGTGAA' tyroapt_seq='TGGAGCTTGGATTGATGTGGTGTGTGAGTGCGGTGCCC' FP,RP='GCGCATACCAGCTTATTCAATT','GCCGAGATTGCACTTACTATCT' RP_short='ACTTACTATCT' regex_seqs=[FM_seq,[FP,RP_short],[FP,RP_short],tyroapt_seq,ksl2b_seq] # Example of sequence lengths for random region of 30 bases with tolerated two base difference rand_region_len=40 seq_len_tol=2 seq_lengths_rand=[rand_region_len-seq_len_tol,rand_region_len+seq_len_tol] # Definition of lengths seq_lengths=[[],seq_lengths_rand,seq_lengths_rand,[],[]] # Package into one variable regex_input=(regex_formats,regex_seqs,seq_lengths) else: regex_input=None # + [markdown] id="xxWIYTx7DsVJ" # ### Connect and write out the sequence-intensity data! # + id="vUuNpk-lDsVK" executionInfo={"status": "ok", "timestamp": 1641242587881, "user_tz": 480, "elapsed": 4, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "01323063848847375634"}} tile_list=np.concatenate((np.arange(1101,1119+1),np.arange(2101,2119+1))) cycle_nums=np.arange(cycle_start,cycle_start+len(cycle_names)) cycle_list=retrieve_cif_names(run_path,cycle_nums) # + id="W3SgdUZbDsVM" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1641243588921, "user_tz": 480, "elapsed": 1001043, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "01323063848847375634"}} outputId="eb347bfa-3e94-4026-90a0-39ab4ac0f2db" write_fastq_intensities(cycle_list,tile_list,run_path,fastq_list,regex_input,filter_output=filter_sequences)
code/imager_process_data_01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # `mesh_mask_downbyone2.nc` Metadata # # Add metadata to the NEMO-generated mesh mask file for the downbyone2 bathymetry # so that a well-defined ERDDAP dataset can be produced from it. import xarray as xr mm = xr.open_dataset('../../NEMO-forcing/grid/mesh_mask_downbyone2.nc') # + # Copy/pasted from Google Drive MeshMask Variables spreadsheet # NEMO Variable Name Description -> long_name Attribute Units nemo_vars = ''' e1t grid spacing on T-grid in u direction m e2t grid spacing on T-grid in v direction m e1u grid spacing on U-grid in u direction m e2u grid spacing on U-grid in v direction m e1v grid spacing on V-grid in u direction m e2v grid spacing on V-grid in v direction m e1f grid spacing on vorticity-grid in u direction m e2f grid spacing on vorticity-grid in v direction m glamt longitude of T-grid points degrees east gphit latitude of T-grid points degrees north glamu longitude of U-grid points degrees east gphiu latitude of U-grid points degrees north glamv longitude of V-grid points degrees east gphiv latitude of V-grid points degrees north glamf longitude of vorticity-grid points degrees east gphif latitude of vorticity-grid points degrees north tmaskutil dry land mask for T-grid and W-grid boolean umaskutil dry land mask for U-grid boolean vmaskutil dry land mask for V-grid boolean fmaskutil dry land mask for vorticity-grid boolean ff Coriolis parameter on vorticity-grid s-1 mbathy fortran index of deepest water cell, T-grid index e3t_0 grid spacing on T-grid in w direction m e3u_0 grid spacing on U-grid in w direction m e3v_0 grid spacing on V-grid in w direction m e3w_0 grid spacing on W-grid in w direction m gdept_0 depth of T-grid points m gdepu depth of U-grid points m gdepv depth of V-grid points m gdepw_0 depth of W-grid points m tmask mask for T-grid and W-grid boolean umask mask for U-grid boolean vmask mask for V-grid boolean fmask mask for vorticity-grid boolean ''' # + def interesting_lines(nemo_vars): for line in nemo_vars.splitlines(): if line: yield line units_subs = { 's-1': '1/s', 'index': 'count', 'degrees east': 'degrees_east', 'degrees north': 'degrees_north', } for line in interesting_lines(nemo_vars): var_name, long_name, units = map(str.strip, line.split('\t')) mm[var_name].attrs['standard_name'] = var_name mm[var_name].attrs['long_name'] = long_name if units == 'boolean': mm[var_name].attrs['flag_values'] = '0, 1' mm[var_name].attrs['flag_meaings'] = 'land, water' else: try: mm[var_name].attrs['units'] = units_subs[units] except KeyError: mm[var_name].attrs['units'] = units if 'depth' in long_name: mm[var_name].attrs['positive'] = 'down' # - # Spot check results: mm.e1t mm.glamt mm.gphit mm.gdept_0 mm.tmask mm.ff mm.mbathy # Update dataset attributes: mm.attrs['file_name'] = 'NEMO-forcing/grid/mesh_mask_downbyone2.nc' mm.attrs['Conventions'] = 'CF-1.6' mm.attrs['title'] = 'Salish Sea NEMO downonegrid2 Bathymetry Mesh Mask' mm.attrs['institution'] = 'Dept of Earth, Ocean & Atmospheric Sciences, University of British Columbia' mm.attrs['source'] = 'NEMO-3.6 Salish Sea configuration' mm.attrs['references'] = '''https://salishsea.eos.ubc.ca/erddap/info/ https://bitbucket.org/salishsea/nemo-forcing/src/tip/grid/bathy_meter_downbyone2.nc ''' mm.attrs['history'] = '''Sun Jun 19 14:06:59 2016: ncks -4 -L4 -O mesh_mask.nc mesh_mask.nc [2016-11-14 16:25] Added metadata to variable in preparation for creation of ERDDAP datasets.''' mm.attrs encoding = {var: {'zlib': True} for var in mm.data_vars} mm.to_netcdf('foo.nc', engine='netcdf4', format='netcdf4', encoding=encoding)
bathymetry/mesh_mask_downbyone2_metadata.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 0.5.2 # language: julia # name: julia-0.5 # --- # We've been trying to use the analytical (Buckley-Leverett) solution of the two-phase flow in porous media to fit the Corey-type relative permeability model to the experimental oil recovery data. In this post, I'm going to compare the numerical solution of the same model with the analytical results. You can find the codes that I have written in [this github repository](https://github.com/simulkade/peteng). Here, I only call the codes and compare the results. # load the input parameters and the functions using Roots, PyPlot, Dierckx, JFVM import JSON, JLD include("rel_perms.jl") include("forced_imbibition_corey.jl") include("frac_flow_funcs.jl") IJulia.clear_output() # The input parameters are stored in the `input_params_BL.jld` file, that can be loaded by JLD.@load "input_params_BL.jld" # Now we can run the functions for the analytical and numerical solutions: # + # call the functions # numerical solution (finite volume) (t_num, R_num, sw_prf)=forced_imb_impes(mu_water, mu_oil, u_inj, poros, perm_ave, swc, sor, kro0, no, krw0,nw, swi, 1.0, L_core, pv_inj, Nx=50) # Analytical solution (BL) (xt_shock, sw_shock, xt_prf, sw_prf, t_anal, p_inj, R_anal) = frac_flow_wf( muw=mu_water, muo=mu_oil, ut=u_inj, phi=poros, k=perm_ave, swc=swc, sor=sor, kro0=kro0, no=no, krw0=krw0, nw=nw, sw0=swi, sw_inj=1.0, L=L_core, pv_inj=pv_inj) IJulia.clear_output() # only to clear the output from the previous function # - # Now, we can plot the results and compare the solutions: plot(t_anal, R_anal, "o", t_num, R_num) xlabel("time [s]") ylabel("Recovery factor [-]") IJulia.clear_output() # It seems that the match very well. But if we zoom on the recovery plot close to the water breakthrough time, plot(t_anal, R_anal, "o", t_num, R_num) xlabel("time [s]") ylabel("Recovery factor [-]") axis([25000, 50000, 0.40, 0.5]) IJulia.clear_output() # we can see that there is roughly 1 percent underestimation of the recovery factor by the numerical method. One reason is the numerical diffusion in the upwind scheme that I have used in my numerical solution. With this diffusion, the front is not sharp anymore and the water breakthrough (decrease in the slope of the recovery curve from the linear trend) happens a bit earlier in time. Let's test it by plotting the saturation profiles: # + pv_inj2 = 0.3 (t_num, R_num, sw_prf_num)=forced_imb_impes(mu_water, mu_oil, u_inj, poros, perm_ave, swc, sor, kro0, no, krw0,nw, swi, 1.0, L_core, pv_inj2, Nx=50) # Analytical solution (BL) (xt_shock, sw_shock, xt_prf, sw_prf, t_anal, p_inj, R_anal) = frac_flow_wf( muw=mu_water, muo=mu_oil, ut=u_inj, phi=poros, k=perm_ave, swc=swc, sor=sor, kro0=kro0, no=no, krw0=krw0, nw=nw, sw0=swi, sw_inj=1.0, L=L_core, pv_inj=pv_inj2) visualizeCells(sw_prf_num) plot(xt_prf*t_anal[end], sw_prf) axis([0, L_core, 0, 1.0]) legend(["Numerical", "Analytical"]) xlabel("Core length [m]") ylabel("Water saturation [-]") IJulia.clear_output() # only to clear the output from the previous function # - # We can clearly see that the extra numerical diffusion causes the water front to move faster resulting in an earlier water breakthrough. We can decrease this diffusion by refining the grid: (t_num, R_num, sw_prf_num)=forced_imb_impes(mu_water, mu_oil, u_inj, poros, perm_ave, swc, sor, kro0, no, krw0,nw, swi, 1.0, L_core, pv_inj2, Nx=500) visualizeCells(sw_prf_num) plot(xt_prf*t_anal[end], sw_prf) axis([0, L_core, 0, 1.0]) legend(["Numerical", "Analytical"]) xlabel("Core length [m]") ylabel("Water saturation [-]") IJulia.clear_output() # Now, we can see that the numerical solution is very close to the analytical solution. This must give a better match for the recovery curves as well: # + # numerical solution (finite volume) (t_num, R_num, sw_prf)=forced_imb_impes(mu_water, mu_oil, u_inj, poros, perm_ave, swc, sor, kro0, no, krw0,nw, swi, 1.0, L_core, pv_inj, Nx=500) # Analytical solution (BL) (xt_shock, sw_shock, xt_prf, sw_prf, t_anal, p_inj, R_anal) = frac_flow_wf( muw=mu_water, muo=mu_oil, ut=u_inj, phi=poros, k=perm_ave, swc=swc, sor=sor, kro0=kro0, no=no, krw0=krw0, nw=nw, sw0=swi, sw_inj=1.0, L=L_core, pv_inj=pv_inj) plot(t_anal, R_anal, "o", t_num, R_num) xlabel("time [s]") ylabel("Recovery factor [-]") axis([25000, 50000, 0.40, 0.5]) IJulia.clear_output()
.ipynb_checkpoints/compare-analytic-numeric-sensitivity-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from sklearn.datasets import load_iris iris=load_iris() print(iris.target_names) #flower names print(iris.feature_names) # 4 different features print(iris.data) # data present in iris print (iris.data.shape) # size of data 4 columns and 150 rows print(iris.target) #printing 50 zero ,one and two # + from sklearn.datasets import load_iris import numpy iris=load_iris() #loading all data print("iris target:", iris.target) #printing the target in form of 0,1,2 print("iris features name: ",iris.feature_names) setosa=iris.target[1:50] # sentosa target 1 to 49 print("printing setosa target from 1 to 49 :",setosa) print("size of setosa : ",setosa.shape) x=[0,50,100] #deleting one target from each flower only_target_training=numpy.delete(iris.target,x) print("printing all targets except 3 :",only_target_training) print("size of training target:",only_target_training.shape) only_data_training=numpy.delete(iris.data,x,axis=0) print("printing all data except 3 :",only_data_training) print("size of training data :",only_data_training.shape) #testing remaining target test_target=iris.target[x] print("remaining target :",test_target) test_data=iris.data[x] print("remaining data :",test_data) from sklearn import tree #decision tree algo clf= tree.DecisionTreeClassifier() trained=clf.fit(only_data_training,only_target_training) output=trained.predict([[5.1 ,3.5 ,1.4 ,0.2]]) #testing the data print(output) # -
suprevised learning.ipynb
# --- # jupyter: # jupytext: # cell_metadata_filter: -all # formats: ipynb,.md//md # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # # 1 Introduction # # Welcome to this introduction to the practical activities associated with Robotics block of TM129. # # The aim of the practical sessions is to introduce you to some of the ways in which we can programme a simple simulated robot running inside a two-dimensional world simulator — RoboLab — in your web browser. You will also have the opportunity to try out some simple artificial intelligence and machine learning programmes. # # The main environment provided for working through the activities is a customised Jupyter notebook environment provided by a virtual computing environment (VCE) that you will run on your own computer. Jupyter notebooks were originally designed to support reproducible computational data analysis and research. They are now commonly used across a wide range of disciplines, and are increasingly used to support interactive code-based tutorials as well as distributing teaching materials. # # The programming language you will be using in this block is the Python programming language. If you have not used the Python programming language before, or even if you haven't done *any* programming before, *DON'T PANIC*. We have tried to take a pragmatic approach to demonstrating how use to Python code to *get things done*, and the provided examples show you all you need to know to programme the simulated robot yourself. # # You will also learn how to use simple command line style commands in the form of IPython magics (yes, they really are called that!) that allow us to perform various housekeeping acts. In the way they are formed and use "switches" to modify and extend the behaviour of the magic command, they are reminiscent of many Linux command line commands. # # Although the interface to the programming activities is provided through a web browser (most reliably, the Chrome web-browser), the software supporting the activities run inside a virtual environment (you'll learn more about virtual machines later in the module). # # To run the virtual environment locally, you will need to install the Docker application (we'll provide full guidance around this). It is also possible to run the application using free or paid for services on the web, and we provide some guidance on how to do this. However, you should be aware that free services may not always be available or reliable, and some are "free" only in exchange for someone else, such as personal registration data. # ## 1.1 Workload # # The RoboLab practical sessions have been written so that if you are an absolute beginner with no experience of robotics or programming you can complete the work each week in four to five hours. The pace is deliberately unhurried and we recommend you take your time. If you already have some programming experience RoboLab should take you less time. # # Each week’s practical session combines two aspects of robotics. We will teach you the basics of programming in RoboLab, introducing features gradually during the sessions. You will also carry out some investigations using the simulated robot which will give you a taste of some of the more practical aspects of robotics. # # Throughout the RoboLab sessions you may encounter a number of activities that are labelled as ‘challenges’. We’ve defined these tasks as challenges because: # # - there may be several ways of achieving a solution to the task, rather than a single best approach # # - it may be possible to refine any particular solution and improve its performance. # # Challenges implicitly incorporate some performance measure that you can use to rate the effectiveness of your solution, such as the time taken for the robot to complete a task, or the accuracy with which it does so. There is thus a mildly competitive element to the activity. You may not get the chance to compare your solution with those of other students, but you can think about trying to improve the quality of your own solution – though you should not spend too long trying for perfection! # ### Assessment # # We hope that by the end of the RoboLab practical sessions you understand some of the basic ideas and can program the simulated robot to do some basic tasks. This should ensure that you can do the assignment and complete the block satisfactorily. The assessment will not include any material covered in optional practical activities. # ## 1.2 Accessibility # # Most of the Jupyter notebook features are keyboard accessible. Several optional extensions provide further support in terms of visual styling and limited audio feedback support. # # If you struggle to use the simulator for any reason, including but not limited to incompatibility with any tools you may use to improve software access or usability, please raise an issue in the Technical help forum or contact <EMAIL> directly. # ### Keyboard interface # # The Jupyter notebook interface supports a wide range of pre-defined keyboard shortcuts to menu and toolbar options. The shortcuts can be displayed using the `Keyboard shortcuts` item from the notebook `Help` menu or via the `ESC-h` keyboard shortcut. # # ![Screenshot of the Jupyter keyboard shortcuts help page previewing the Command Mode cell options.](../images/00_01_jupyter_nb_shortcuts.png) # # You can also add additional shortcuts and/or edit exist shortcuts via the `Edit Keyboard shortcuts` menu item. # # ![Screenshot of the Jupyter keyboard shortcuts help page previewing the Edit Command Mode cell options.](../images/00_01_jupyter_nb_edit_shortcuts.png) # # The RoboLab simulator provides a range of keyboard shortcuts to customise the environment display and control certain simulator behaviours. (More details can be found in the actual activity notebooks.) # ### Visual appearance # If required, you can use the [jupyter-themes](https://github.com/dunovank/jupyter-themes) extension to modify the visual appearance of the notebooks. The extension has been pre-installed in the virtual environment. See the [`jupyterthemes` documentation](https://github.com/dunovank/jupyter-themes) for more information. If you encounter any issues trying to run the extension, post a question to the *Technical help* forum. # #### Magnification # # The apparent size of the notebook contents in general can be zoomed using standard browser magnification tools. # # Alternatively, use operating systems tools such as *Windows Magnify* or the MacOS *Zoom Window*, or other assistive software. # ### Audio support features # # Some RoboLab programs 'speak'. Where the speech is generated as a part of a program flow, a visual display of the spoken phrase will also typically be displayed at the time the phrase is spoken. # # An experimental extension to provide screen reading support to the notebooks is available. If you would be interested in helping us further develop and test this extension, or raise accessibility issues or concerns either in general or with particular reference to specific extensions, please email `<EMAIL>`. # ### Accessibility toolbar (experimental) # # The Jupyter environment includes an [accessibility toolbar extension](https://github.com/uclixnjupyternbaccessibility/accessibility_toolbar) that allows you to control the presentation style of the Jupyter notebook; for example, you can change the font style, size and spacing, the notebook background colour, and so on. # #### Enabling the Accessibility Extension # # The accessibility is __disabled__ in the RoboLab environment by default. To use the accessibility extension, you need to enable it first. You can do this from the `nbextensions` tab on the notebook homepage: check the `Accessibility toolbar` extension to enable the toolbar. When you open a new notebook, the toolbar should be displayed. # # ![Screenshot of the nbextensions confgurator showing the location of the Accessibility Toolbar.](../images/00_01_nb_extensions_accessibility.png) # # Check the [accessibility toolbar documentation](https://github.com/uclixnjupyternbaccessibility/accessibility_toolbar#toolbar-summary) for more information. # # All of the styles are saved into local storage when refreshing the page. This means that if you use notebooks on different servers with the same browser, the same accessibility settings will be applied to notebooks on all servers within which you have enabled the accessibility extension. # # All of the styles are saved into local storage when refreshing the page. This means that if you use notebooks on different servers with the same browser, the same accessibility settings will be applied to notebooks on all servers within which you have enabled the accessibility extension. # #### Controlling colours and fonts using the Accessibility Toolbar # # If you wish to change the font and interface colours used in RoboLab to improve readability, the accessibility toolbar allows you to select the font style, size and colour. You can also modify the line spacing and spacing between individual characters. # # ![Screenshot of the colours and fonts menu dropped down from the text / A button in the group of Accessibility Toolbar buttons.](../images/00_01_accessibility_display.png) # # The font style applies to *all* text elements within the notebook itself. This includes the contents of markdown (text) cells, code cells and code cell outputs. # # The toolbar can also be used to control the notebook's background colour and the cell background colour. # # You can also save a style you have defined from the `Add new style...` option in `Predefined styles` menu. Once saved, it will be added to the menu list so you can apply it as required. # ### Other assistive software # # Please contact the Module Team if you discover that the material does not work with a particular screen reader or dictation system that you would typically expect to be able to use. # ## 1.3 Spell Checking # A spell checker is also available to check spelling in notebook markdown cells. Enable the `spellchecker` notebook extension [[direct link](/nbextensions/?nbextension=spellchecker/main])] or the spell checker via the *Accessibility Toolbar Extension*. # ## 1.4 Installing the Software # # To install the virtual computing environment (VCE) software, please refer to the Robotics Software Guide. # # You will also need to use the Chrome browser to run the RoboLab environment. If you do not already have it installed, you can download and install the Chrome browser from [https://www.google.com/chrome/](https://www.google.com/chrome/).
content/00_FOR_VLE/Section_00_01_Introduction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="uDPDN1lSMl5q" colab_type="text" # # GLTF 格式教學 Accessor 篇 # + [markdown] id="wB5dVCbFXVdh" colab_type="text" # <a href="https://colab.research.google.com/github/CSP-GD/notes/blob/master/practice/file_format/gltf%E6%A0%BC%E5%BC%8F%E8%A7%A3%E6%9E%90/accessor/accessor.ipynb"> # <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> # </a> # # [`Open in observablehq`](https://observablehq.com/@toonnyy8/gltf-accessor) # + [markdown] id="GrSb0MlrFVpc" colab_type="text" # ![圖 1. buffers, bufferViews, accessors \[1\]](https://github.com/CSP-GD/notes/raw/master/practice/file_format/gltf%E6%A0%BC%E5%BC%8F%E8%A7%A3%E6%9E%90/accessor/gltfOverview-2.0.0b-accessor.png) # # 圖 1. buffers, bufferViews, accessors \[1\] # + [markdown] id="tRnc8CmU8vRl" colab_type="text" # ## 簡介 # + [markdown] id="ipYKedrrF1Xv" colab_type="text" # 在 glTF,模型的網格、權重、動畫等等數據實際上是儲存在 Buffer 中, # 當要使用到這些數據時,就會用到 Accessor 去解讀數據, # 而 Accessor 解讀的數據則是透過 BufferView 去從 Buffer 中提取出來的。 # + [markdown] id="LEkq_qNUISIE" colab_type="text" # 運作流程如下 # > **Buffer** ==> **BufferView** 提取數據 ==> **Accessor** 解讀數據 ==> 數據 # + [markdown] id="225PstYr71tl" colab_type="text" # ### Accessor 屬性 # + [markdown] id="jYmCIK8yk63H" colab_type="text" # - bufferView : \<`number`\> # > 此 Accessor 是從哪個 BufferView 取得數據。 # # - byteOffset :\<`number`\> # > 從 BufferView 偏移多少個 byteOffset 的位置開始取數據。 # # - type : <`string`> # > 表示一筆數據的類型(count 的單位) # > `SCALAR` : $1$ 個 componentType 構成 # > `VEC2` : $2$ 個 componentType 構成 # > `VEC3` : $3$ 個 componentType 構成 # > `VEC4` : $4$ 個 componentType 構成 # > `MAT2` : $2*2$ 個 componentType 構成 # > `MAT3` : $3*3$ 個 componentType 構成 # > `MAT4` : $4*4$ 個 componentType 構成 # # - componentType : \<`GL Constant of Data Type`\> # > 表示數據的型別,以下幾種為部分 componentType 代表的型別 # > `5120` : `BYTE` # > `5121` : `UNSIGNED_BYTE` # > `5122` : `SHORT` # > `5123` : `UNSIGNED_SHORT` # > `5124` : `INT` # > `5125` : `UNSIGNED_INT` # > `5126` : `FLOAT` # # - count : \<`number`\> # > 有幾筆數據 # # - min : \<`type<componentType>`\> # > 數據的最大值 # # - max : \<`type<componentType>`\> # > 數據的最小值 # # + [markdown] id="7lLXSFy275TY" colab_type="text" # ### BufferView 屬性 # + [markdown] id="c38LwWt8lfrB" colab_type="text" # - buffer : \<`number`\> # > 此 BufferView 是從哪個 Buffer 取得數據。 # # - byteOffset : \<`number`\> # > 從 Buffer 偏移多少個 byteOffset 的位置開始取數據。 # # - byteLength : \<`number`\> # > 要取下多少個 byte。 # # - byteStride : \<`number`\> # > 數據交錯擺放時,讓 Accessor 知道取數據的步伐要多少。 # # - target : \<`34962`|`34963`\> # > 用來分辨數據的性質為 vertex (target 等於 `34962`,代表 `ARRAY_BUFFER`) 還是 vertex indices (target 等於 `34963`,代表 `ELEMENT_ARRAY_BUFFER`)。 # + [markdown] id="6s5BTeMz7_Tm" colab_type="text" # ### Buffer 屬性 # + [markdown] id="WLeSY5QVmXXm" colab_type="text" # - byteLength : \<`number`\> # > 此 Buffer 的大小。 # # - uri : \<`string`\> # > bufferData 的位置,也可能用 base64 直接儲存 bufferData。 # + [markdown] id="iJZE-jGf-JF4" colab_type="text" # ## 正式開始 # + [markdown] id="_Y8c5F6m-Kbp" colab_type="text" # ### 載入 glTF_tools # + id="xNujnE83-xB0" colab_type="code" outputId="c1cb66d9-f160-4303-fc9f-fad9aeda415f" colab={"base_uri": "https://localhost:8080/", "height": 336} # !wget https://github.com/CSP-GD/notes/raw/master/practice/file_format/gltf%E6%A0%BC%E5%BC%8F%E8%A7%A3%E6%9E%90/gltf-tools.ipynb -O gltf-tools.ipynb # %run ./gltf-tools.ipynb # + [markdown] id="Iaw0gWIIQass" colab_type="text" # ## 載入檔案 # + id="blTAX0xFF4_V" colab_type="code" outputId="7da9299f-67f1-44af-db21-a086225663c1" colab={"base_uri": "https://localhost:8080/", "height": 318} # !wget https://github.com/CSP-GD/notes/raw/master/practice/file_format/gltf%E6%A0%BC%E5%BC%8F%E8%A7%A3%E6%9E%90/accessor/cube.glb -O cube.glb glb_file = open('./cube.glb', 'rb') glb_bytes = glb_file.read() model, buffers = glTF_tools.glb_loader(glb_bytes) # + id="4O-LvxEVVRb-" colab_type="code" outputId="831d82af-96e3-49d6-bff8-5380f742ec3c" colab={"base_uri": "https://localhost:8080/", "height": 34} glTF_tools.render_JSON(model) # + id="U64rCd7ZcVVH" colab_type="code" outputId="1de40480-5f2d-4ffe-d03a-3976fd903e4f" colab={"base_uri": "https://localhost:8080/", "height": 235} glTF_tools.render_JSON(model['accessors']) # + id="s7FwsJOocx6E" colab_type="code" outputId="46e0ebb3-98ca-4295-f6c4-bf541451cb26" colab={"base_uri": "https://localhost:8080/", "height": 185} glTF_tools.render_JSON(model['bufferViews']) # + id="uYdTkx7tJwLz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 160} outputId="eee8dcf8-72e1-487f-b841-a344078d9fc2" def accessor(idx, model, buffers): _accessor = model['accessors'][idx] _buffer_view = model['bufferViews'][_accessor['bufferView']] _buffer = buffers[_buffer_view['buffer']] byteLength = _buffer_view['byteLength'] byteOffset = _buffer_view['byteOffset'] ret return _accessor, _buffer[byteOffset:byteOffset + byteLength] accessor(0, model, buffers) # + [markdown] id="GCjgwXhWMCI-" colab_type="text" # ## 參考 # + [markdown] id="MYLZSKtEMO9S" colab_type="text" # 1. https://github.com/KhronosGroup/glTF
accessor/accessor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Cross Validation # Before we select a machine learning model we need to make sure that it performs well. Cross validation is a technique used to test the performance of a model. It is very useful particularly for small datasets. All cross validation methods follow the same procedure: # 1. Divide dataset into two sets -- training set and testing set # 2. Train the model using the training set # 3. Evaluate the performance of the model on the testing set # 4. Optionally, repeat 1-3 for different set of data points # # ## KFold Cross Validation # KFold cross validation is a validation method that divides the dataset in `k` equal parts. The model is trained and tested `k` times and each time a different part is used as the testing set and the rest as the training set. We store the results of each train-test iteration and find the average to see how the model performs. For example let's say our `k` is 3 so we partition our dataset into 3 equal parts `P1`, `P2` and `P3`. This is how we will perform KFold cross validation: # # | Iteration | Training Set | Testing Set | # |-----------|:------------:|------------:| # | 1 | P1, P2 | P3 | # | 2 | P1, P3 | P2 | # | 3 | P2, P3 | P1 | # # Let's get started. # # In this notebook we are going to work with wine dataset from [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Wine). # + # import libs import pandas as pd import seaborn as sns from matplotlib import pyplot as plt # %matplotlib inline # - # load data columns = ['name', 'alcohol', 'malicAcid', 'ash', 'ashalcalinity', 'magnesium', 'totalPhenols', 'flavanoids', 'nonFlavanoidPhenols', 'proanthocyanins', 'colorIntensity', 'hue', 'od280_od315', 'proline' ] df = pd.read_csv('wine.csv', header=None) df.columns = columns df.head(5) df.describe() # correlation corr = df.corr() sns.heatmap(corr, annot=True) # Ash has the smallest correlation (0.05) so we can drop it without affecting our models. df = df.drop(['ash'], axis=1) df.head(2) # Let's train some models # import from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.svm import SVC # split data y = df['name'] X = df.drop(['name'], axis=1) accuracy_dict = dict() def get_model_accuracy(model, name): result = cross_val_score(model,X, y, cv=10, scoring='accuracy') # cv=10 -- that's the k in KFold validation accuracy_dict[name] = result.mean() # save the mean accuracy for the model. # + # logistic regression get_model_accuracy(LogisticRegression(), 'Logistic Regression') # Naive Bayes get_model_accuracy(GaussianNB(), 'Naive Bayes') # K-Nearest Neighbors get_model_accuracy(KNeighborsClassifier(), 'K-Nearest Neighbors') # Decision Tree get_model_accuracy(DecisionTreeClassifier(), 'Decision Tree') # Support Vector Machine - linear get_model_accuracy(SVC(kernel='linear'), 'Support Vector Machine (Linear)') # Support Vector Machine - RBF get_model_accuracy(SVC(kernel='rbf'), 'Support Vector Machine (RBF)') # - accuracy_df = pd.DataFrame(accuracy_dict.items(), columns=['Model', 'Accuracy']) accuracy_df # We have used k-fold cross validation on 6 models and we can now make a decision as to which one to use on our data.
cross-validation/cross-validation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (Data Science) # language: python # name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-west-2:236514542706:image/datascience-1.0 # --- # # Lab1: Finetuning HuggingFace models with Amazon SageMaker # ### Multi-class Classification with `Trainer` and `amazon_us_reviews` dataset # # Introduction # # Welcome to the first Lab and our end-to-end multi-class Text-Classification example. In this Lab, we will use the Hugging Faces `transformers` and `datasets` library together with Amazon SageMaker to fine-tune a pre-trained transformer on text classification. In particular, the pre-trained model will be fine-tuned using the `amazon_us_reviews` dataset. To get started, we need to set up the environment with a few prerequisite steps, for permissions, configurations, and so on. # # Development Environment and Permissions # ## Installation # # _*Note:* we install the required libraries from Hugging Face and AWS. Additionally, we make sure we have a compatible PyTorch version installed_ # !pip install "sagemaker>=2.48.0" --upgrade # !pip install datasets=='1.8.0' # *Note: Restart the kernel after installing the above packages.* from IPython.display import display_html def restartkernel() : display_html("<script>Jupyter.notebook.kernel.restart()</script>",raw=True) restartkernel() # ## Permissions # _If you are going to use Sagemaker in a local environment. You need access to an IAM Role with the required permissions for Sagemaker. You can find [here](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) more about it._ # + import sagemaker sess = sagemaker.Session() # sagemaker session bucket -> used for uploading data, models and logs # sagemaker will automatically create this bucket if it not exists sagemaker_session_bucket=None if sagemaker_session_bucket is None and sess is not None: # set to default bucket if a bucket name is not given sagemaker_session_bucket = sess.default_bucket() role = sagemaker.get_execution_role() sess = sagemaker.Session(default_bucket=sagemaker_session_bucket) print(f"sagemaker role arn: {role}") print(f"sagemaker bucket: {sess.default_bucket()}") print(f"sagemaker session region: {sess.boto_region_name}") # - # # Preparing the dataset # # As dataset are we going to use the [amazon_us_reviews](https://huggingface.co/datasets/amazon_us_reviews). # # the dataset contains the customer review text with accompanying metadata, consisting of three major components: # # 1. A collection of reviews written in the Amazon.com marketplace and associated metadata from 1995 until 2015. This is intended to facilitate study into the properties (and the evolution) of customer reviews potentially including how people evaluate and express their experiences with respect to products at scale. (130M+ customer reviews) # 2. A collection of reviews about products in multiple languages from different Amazon marketplaces, intended to facilitate analysis of customers’ perception of the same products and wider consumer preferences across languages and countries. (200K+ customer reviews in 5 countries) # 3. A collection of reviews that have been identified as non-compliant with respect to Amazon policies. This is intended to provide a reference dataset for research on detecting promotional or biased reviews. (several thousand customer reviews). This part of the dataset is distributed separately and is available upon request – please contact the email address below if you are interested in obtaining this dataset. # # _https://s3.amazonaws.com/amazon-reviews-pds/readme.html_ # # We will use `create_dataset.py` script to downsample the dataset and split it into train `data/amazon_us_reviews_apparel_v1_00_train.json` and test dataset `data/amazon_us_reviews_apparel_v1_00_test.json`. The train dataset contains 29750 rows and the test dataset 5250. # For the dataset files the `Apparel_v1_00` split was used. # # You can find the script at `data/create_dataset.py`. You can use this to change for example the size or category split of the reviews. # !pygmentize ../data/create_dataset.py # ### Download data and process it to have only reviews and label, then split it into train and test. # * This step will take approximately `7mins - 10mins`. # %%time # !python ../data/create_dataset.py # ## Uploading data to `sagemaker_session_bucket` # # Upload the `dataset` files to the default bucket in Amazon S3 # + import os from sagemaker.s3 import S3Uploader local_train_dataset = "amazon_us_reviews_apparel_v1_00_train.json" local_test_dataset = "amazon_us_reviews_apparel_v1_00_test.json" # s3 uris for datasets remote_train_dataset = f"s3://{sess.default_bucket()}/lab1/data" remote_test_dataset = f"s3://{sess.default_bucket()}/lab1/data" # upload datasets S3Uploader.upload(os.path.join('../data', local_train_dataset),remote_train_dataset) S3Uploader.upload(os.path.join('../data',local_test_dataset),remote_test_dataset) print(f"train dataset uploaded to: {remote_train_dataset}/{local_train_dataset}") print(f"test dataset uploaded to: {remote_test_dataset}/{local_test_dataset}") # - # # Fine-tuning & starting Sagemaker Training Job # # In order to create our sagemaker training job we need an `HuggingFace` Estimator. The Estimator handles all end-to-end Amazon SageMaker training and deployment tasks. In the Estimator we define, which fine-tuning script (`entry_point`) should be used, which `instance_type` should be used, which `hyperparameters` are passed in ..... # # # # ```python # huggingface_estimator = HuggingFace(entry_point='train.py', # source_dir='./scripts', # base_job_name='huggingface-sdk-extension', # instance_type='ml.p3.2xlarge', # instance_count=1, # transformers_version='4.4', # pytorch_version='1.6', # py_version='py36', # role=role, # hyperparameters = {'epochs': 1, # 'train_batch_size': 32, # 'model_name':'distilbert-base-uncased' # }) # ``` # # When we create a SageMaker training job, SageMaker takes care of starting and managing the required ec2 instances for us, providing the fine-tuning script `train.py` and downloading the data from our `sagemaker_session_bucket` into the container at `/opt/ml/input/data`. When starting the training SageMaer executes the following command: # # ```python # /opt/conda/bin/python train.py --epochs 1 --model_name distilbert-base-uncased --train_batch_size 32 # ``` # # The CLI arguments you see are passed in as `hyperparameters`, when creating the `HuggingFace` estimator. # # Sagemaker is also providing useful properties about the training environment through various environment variables, including the following: # # * `SM_MODEL_DIR`: A string that represents the path where the training job writes the model artifacts to. After training, artifacts in this directory are uploaded to S3 for model hosting. # # * `SM_NUM_GPUS`: An integer representing the number of GPUs available to the host. # # * `SM_CHANNEL_XXXX:` A string that represents the path to the directory that contains the input data for the specified channel. For example, if you specify two input channels in the HuggingFace estimator’s fit call, named `train` and `test`, the environment variables `SM_CHANNEL_TRAIN` and `SM_CHANNEL_TEST` are set. # # # To run your training job locally you can define `instance_type='local'` or `instance_type='local_gpu'` for gpu usage. _Note: this does not working within SageMaker Studio_ # # ## Creating an Estimator and start a training job # The training script that performs fine tuning is located here: `training/scripts/train.py`. Navigate to the source code location and open the `train.py` file. You can also go through it's contents by executing the cell below. # !pygmentize ../scripts/train.py # + from sagemaker.huggingface import HuggingFace import time # hyperparameters, which are passed into the training job hyperparameters={'epochs': 1, # number of training epochs 'train_batch_size': 32, # batch size for training 'eval_batch_size': 64, # batch size for evaluation 'learning_rate': 3e-5, # learning rate used during training 'model_id':'distilbert-base-uncased', # pre-trained model 'fp16': True, # Whether to use 16-bit (mixed) precision training 'train_file': local_train_dataset, # training dataset 'test_file': local_test_dataset, # test dataset } # - # List of supported models: https://huggingface.co/models?library=pytorch,transformers&sort=downloads # We create a `metric_definition` dictionary that contains regex-based definitions that will be used to parse the job logs and extract metrics. You can read more about parsing the cloudwatch logs [here](https://docs.aws.amazon.com/sagemaker/latest/dg/training-metrics.html). metric_definitions=[ {'Name': 'eval_loss', 'Regex': "'eval_loss': ([0-9]+(.|e\-)[0-9]+),?"}, {'Name': 'eval_accuracy', 'Regex': "'eval_accuracy': ([0-9]+(.|e\-)[0-9]+),?"}, {'Name': 'eval_f1', 'Regex': "'eval_f1': ([0-9]+(.|e\-)[0-9]+),?"}, {'Name': 'eval_precision', 'Regex': "'eval_precision': ([0-9]+(.|e\-)[0-9]+),?"}] # + # define Training Job Name job_name = f'huggingface-workshop-{time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())}' # create the Estimator huggingface_estimator = HuggingFace( entry_point = 'train.py', # fine-tuning script used in training jon source_dir = '../scripts', # directory where fine-tuning script is stored instance_type = 'ml.p3.2xlarge', # instances type used for the training job instance_count = 1, # the number of instances used for training base_job_name = job_name, # the name of the training job role = role, # Iam role used in training job to access AWS ressources, e.g. S3 transformers_version = '4.6', # the transformers version used in the training job pytorch_version = '1.7', # the pytorch_version version used in the training job py_version = 'py36', # the python version used in the training job hyperparameters = hyperparameters, # the hyperparameter used for running the training job metric_definitions = metric_definitions # the metrics regex definitions to extract logs ) # + # define a data input dictonary with our uploaded s3 uris training_data = { 'train': remote_train_dataset, 'test': remote_test_dataset } # starting the train job with our uploaded datasets as input huggingface_estimator.fit(training_data, wait=True) # - # # Accessing Logs # access the logs of the training job huggingface_estimator.sagemaker_session.logs_for_job(huggingface_estimator.latest_training_job.name) # # Accessing Training Metrics # + from sagemaker import TrainingJobAnalytics # Captured metrics can be accessed as a Pandas dataframe training_job_name = huggingface_estimator.latest_training_job.name print(f"Training jobname: {training_job_name}") df = TrainingJobAnalytics(training_job_name=training_job_name).dataframe() df # - # ## Bonus: Deploying the endpoint # # To deploy our endpoint, we call `deploy()` on our HuggingFace estimator object, passing in our desired number of instances and instance type. predictor = huggingface_estimator.deploy(1,"ml.g4dn.xlarge") # Then, we use the returned predictor object to call the endpoint. # + sentiment_input= {"inputs":"I love using the new Inference DLC."} predictor.predict(sentiment_input) # - # Finally, we delete the endpoint again. predictor.delete_endpoint()
training/lab1_default_training/train_transformer_model.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.6.3 # language: julia # name: julia-1.6 # --- # # Quadrotor planning w/ input rate constraint and obstacle avoidance # + # problem definition include("ex2_problem_data.jl") # solver functions and variable containers include("solver_suite.jl") # JuMP jump.solver!(:mosek,false); # PIPG+ w/ restart pipg.solver_proxy!(pipg.z,pipg.v,pipg.w,pipg.κK,pipg.κu,pipg.κx, pipg.rd2o_restrt,pipg.rd2K_restrt,pipg.rfval_restrt,prb.restrt_idx) # PIPG+ pipg.reset_var!(:plus) pipg.solver!(); # PIPGeq pipg.solver_eq!(); # pipg.plot_solstat(); # ADMM rival.solver_admm!(); # Chambolle & Pock (variable step) rival.solver_cp!(); # - rival.plot_solstat(); x1,u1 = asm.construct_xu(pipg.z) x2,u2 = asm.construct_xu(jump.z); plotter.trajectory2D(x1,u1,x2,u2,:mosek)
PIPG/ex2_problem_solve.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Notes Module # We are going to implement a attention mechanism similar to RETAIN. # # Alpha will be a scalar attention which will measure the importance of the Notes of particular day in the final output. # Betha will be a vector of attention weights which will measure the importance of each notes embeding value (feature) in the final output. # # Alpha Attention # class NotesAlphaAttention(torch.nn.Module): def __init__(self, hidden_dim): super().__init__() """ Define the linear layer `self.a_att` for alpha-attention using `nn.Linear()`; Arguments: hidden_dim: the hidden dimension """ self.a_att = nn.Linear(hidden_dim, 1) def forward(self, g): """" Arguments: g: the output tensor from RNN-alpha of shape (batch_size, seq_length, hidden_dim) Outputs: alpha: the corresponding attention weights of shape (batch_size, seq_length, 1) """ weights = self.a_att(g) alpha = torch.softmax(weights,1) return alpha # # Beta Attention class NotesBetaAttention(torch.nn.Module): def __init__(self, hidden_dim): super().__init__() """ Define the linear layer `self.b_att` for beta-attention using `nn.Linear()`; Arguments: hidden_dim: the hidden dimension """ self.b_att = nn.Linear(hidden_dim, hidden_dim) def forward(self, h): """ Arguments: h: the output tensor from RNN-beta of shape (batch_size, seq_length, hidden_dim) Outputs: beta: the corresponding attention weights of shape (batch_size, seq_length, hidden_dim) """ weights = self.b_att(h) beta = torch.tanh(weights) return beta # # NotesRnn class NotesRNN(nn.Module): def attention_sum(self, alpha, beta, x, masks): """ Arguments: alpha: the alpha attention weights of shape (batch_size, seq_length, 1) beta: the beta attention weights of shape (batch_size, seq_length, hidden_dim) rev_v: the visit embeddings in reversed time of shape (batch_size, # visits, embedding_dim) rev_masks: the padding masks in reversed time of shape (# visits, batch_size, # diagnosis codes) Outputs: c: the context vector of shape (batch_size, hidden_dim) """ #masks = (torch.sum(masks, 2) > 0).type(torch.float).unsqueeze(2) return torch.sum( x * alpha * beta * masks , dim=1 ) def __init__(self, hidden_dim=128, notes_emb_size=200): super().__init__() self.rnn_a = nn.GRU(notes_emb_size, notes_emb_size, batch_first=True) self.rnn_b = nn.GRU(notes_emb_size, notes_emb_size, batch_first=True) self.att_a = NotesAlphaAttention(notes_emb_size) self.att_b = NotesBetaAttention(notes_emb_size) self.fc = nn.Linear(notes_emb_size, hidden_dim) self.sigmoid = nn.Sigmoid() #self.emb_size = notes_emb_size #self.RNN = nn.GRU(input_size = input_notes_emb_size, hidden_size = notes_emb_size, batch_first = True) #self.fc1 = nn.Linear(notes_emb_size, notes_emb_size) #self.relu = nn.ReLU() #self.dropout = nn.Dropout() #self.fc2 = nn.Linear(notes_emb_size,128) #self.sig = nn.Sigmoid() def forward(self, x, masks): g, _ = self.rnn_a(x) h, _ = self.rnn_b(x) alpha = self.att_a(g) beta = self.att_b(h) #print(alpha.shape) #print(masks.shape) c = self.attention_sum(alpha, beta, x, masks) logits = self.fc(c) probs = self.sigmoid(logits) #rnn_out = self.RNN(x) #last_note_date_hs = get_last_note_date(rnn_out[0],masks) #fc1_out = self.fc1(last_note_date_hs) #fc1_out = self.relu(fc1_out) #dp_out = self.dropout(fc1_out) #fc2_out = self.fc2(dp_out) #out = self.sig(fc2_out).flatten() return probs.squeeze()
notes_module.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # ATSC 500 Assignment I (Reynolds averaging) import numpy as np import netCDF4 as nc from glob import glob import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec % matplotlib inline filename = glob('_data/ATSC-500/case_60_10.nc')[0] nc_obj = nc.Dataset(filename) # Get vertical coord z = nc_obj.variables['z'][...] time = nc_obj.variables['time'][...] # Get group names nc_gps = list(nc_obj.groups.keys()) # The key and dim of vars are the same in all the gps shape_grid = nc_obj[nc_gps[0]].variables['TABS'].shape # Add the number of gps into the shape shape_all = (len(nc_gps),) + shape_grid W = np.zeros(shape_all)*np.nan TABS = np.zeros(shape_all)*np.nan # loop over all the groups for i in range(len(nc_gps)): W[i, ...] = nc_obj[nc_gps[i]].variables['W'][...] TABS[i, ...] = nc_obj[nc_gps[i]].variables['TABS'][...] # ... nc_obj.close() # Calculating $\left\langle w'\theta ' \right\rangle$ at index location $\left(20, 20\right)$ and the last time record for all vertical levels: # # $$ # \left\langle w'\theta ' \right\rangle = \left(w - \overline w\right)\cdot\left(\theta - \overline\theta\right) # $$ # # Where the overline is Reynolds averaging on time. W_ave = np.mean(W[:, :, :, 19, 19], 1) TABS_ave = np.mean(TABS[:, :, :, 19, 19], 1) H_flux = (W[:, -1, :, 19, 19] - W_ave)*(TABS[:, -1, :, 19, 19] - TABS_ave) # + fig = plt.figure(figsize=(8, 12)) gs = gridspec.GridSpec(2, 2, width_ratios=[2, 1], height_ratios=[1, 1]) ax1 = fig.add_subplot(gs[0]); ax2 = fig.add_subplot(gs[1]) AX = [ax1, ax2] for ax in AX: ax.grid(linestyle=':') ax.spines["top"].set_visible(False) ax.spines["right"].set_visible(False) ax.xaxis.set_tick_params(labelsize=14) ax.yaxis.set_tick_params(labelsize=14) [j.set_linewidth(2.5) for j in ax.spines.values()] ax.tick_params(axis="both", which="both", bottom="off", top="off", \ labelbottom="on", left="off", right="off", labelleft="on") ax2.spines["left"].set_visible(False) ax2.tick_params(labelleft="off") for i, gp in enumerate(nc_gps): ax1.plot(H_flux[i, :], z, label=gp) ax2.plot(np.mean(H_flux, 0), z, lw=3) LG = ax1.legend(bbox_to_anchor=(1.035, 1), prop={'size':14}); LG.draw_frame(False) ax1.text(0.85, -0.1, r"Heat flux $\left\langle w'\theta ' \right\rangle$", transform=ax1.transAxes, fontsize=14) ax1.set_ylabel(r"Height [km]", fontsize=14) ax1.set_title(r"(a) x=20, y=20, time={:.2f}s".format(time[-1]), fontsize=14); ax2.set_title("(b) Ensemable average", fontsize=14); plt.tight_layout() # -
ATSC_500/ATSC_500_Assignment_I_Reynolds_averaging.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import json # + from finnhub import client as Finnhub client = Finnhub.Client(api_key="<KEY>") # + # # # List supported stocks # client.stock_symbol(exchange="US") # # # Get quote data # # data = client.quote(symbol="NFLX") # # Get candlestick data for stocks data = client.stock_candle(symbol="SIRI", resolution="D", count=600) # # client.stock_candle(symbol="NFLX", resolution="W", **{'from':'1601510400', 'to': '1585619984'}) # # # [PREMIUM] Get tick data # # data = client.stock_tick(symbol="NFLX", resolution="D", **{'from':'1585526400', 'to': '1585569600'}) app_json = json.dumps(data) # # print(data) # # pd.DataFrame(data).reset_index() # # print(pd) print(json.dumps(data, sort_keys=True, indent=4)) with open(("SIRI" + ".json"), "r+") as outfile: outfile.seek(0) outfile.write(app_json) outfile.truncate() outfile.close() # + # Read ticker names from csv data_df = pd.read_csv("Nasdaq-100.csv") data_df # - data_df[["Symbol"]].head() # + data = data_df.loc[:, ~data_df.columns.str.contains('^Unnamed')] data # - # Convert it into a list data['Symbol'].values.tolist() data_list = data['Symbol'].values.tolist() data_list # + new_list =['SIRI', 'ZNGA', 'WMT', 'VZ', 'VEEV'] # Use that list to run the for loop for i in data_list: #Printing which stock running currently print(i) # Getting the stock data stock_data = client.stock_candle(symbol=i, resolution="D", count=1000) # Converting to json app_json = json.dumps(stock_data) # Writing it to the file with open((i + ".json"), "r+") as outfile: outfile.seek(0) outfile.write(app_json) outfile.truncate() outfile.close() #Getting the ticker name # tickername = row['ticker'] # url = f'https://finnhub.io/api/v1/stock/tick?symbol=AAPL&from=1575968404&to=1575968424&token=<KEY>' # + # add the file writing code inside the for loop # - df = pd.io.json.json_normalize(data) df # + # -
raw_json/Calls.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # #Lab 11. Download Census Data into Python # + from urllib import request import json from pprint import pprint census_api_key = '<KEY>' #get your key from https://api.census.gov/data/key_signup.html url_str = 'https://api.census.gov/data/2019/acs/acs5?get=B01001_001E,NAME&for=county:*&in=state:51&key='+census_api_key # create the url of your census data response = request.urlopen(url_str) # read the response into computer html_str = response.read().decode("utf-8") # convert the response into string if (html_str): json_data = json.loads(html_str) # convert the string into json print (json_data[0]) for v1,name,state,county in json_data[1:]: print (v1,name,state,county ) # - # ##3.1 county with most total population # + url_str = 'https://api.census.gov/data/2019/acs/acs5?get=B01001_001E,NAME&for=county:*&in=state:51&key='+census_api_key # create the url of your census data response = request.urlopen(url_str) # read the response into computer html_str = response.read().decode("utf-8") # convert the response into string max_p = 0 max_county='' if (html_str): json_data = json.loads(html_str) # convert the string into json # print (json_data[0]) for v1,name,state,county in json_data[1:]: if max_p< int(v1): max_P= int(v1) max_county = name # print (v1,name,state,county ) print("The {} has the most population {}.".format(max_county,max_p)) # - # ##3.2 county with most male population # + url_str = 'https://api.census.gov/data/2019/acs/acs5?get=B01001_002E,NAME&for=county:*&in=state:51&key='+census_api_key # create the url of your census data response = request.urlopen(url_str) # read the response into computer html_str = response.read().decode("utf-8") # convert the response into string max_p = 0 max_county='' if (html_str): json_data = json.loads(html_str) # convert the string into json # print (json_data[0]) for v1,name,state,county in json_data[1:]: if max_p< int(v1): max_P= int(v1) max_county = name # print (v1,name,state,county ) print("The {} has the most male population {}.".format(max_county,max_p)) # - # ##3.3 county with highest male/total population ratio # + url_str = 'https://api.census.gov/data/2019/acs/acs5?get=B01001_001E,B01001_002E,NAME&for=county:*&in=state:51&key='+census_api_key # create the url of your census data response = request.urlopen(url_str) # read the response into computer html_str = response.read().decode("utf-8") # convert the response into string max_p = 0 max_county='' if (html_str): json_data = json.loads(html_str) # convert the string into json # print (json_data[0]) for v1,v2,name,state,county in json_data[1:]: if max_p< int(v2)/int(v1): max_P= int(v2)/int(v1) max_county = name # print (v1,v2,name,state,county ) print("The {} has the highest male/total ratio {}.".format(max_county,max_p)) # -
lab11.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd import diffprivlib.tools as dp from sklearn.preprocessing import LabelEncoder from tqdm.notebook import trange from tqdm import tqdm tqdm.pandas() import matplotlib.pyplot as plt # %matplotlib inline # + na_values={ 'capital-gain': 99999, 'capital-loss': 99999, 'hours-per-week': 99, 'workclass': '?', 'native-country': '?', 'occupation': '?'} private_df = pd.read_csv('./data/adult_with_no_native_country.csv', skipinitialspace=True, na_values=na_values) private_df.set_index('index', inplace=True) synthetic_df = pd.read_csv('./out/correlated_attribute_mode/sythetic_data.csv', skipinitialspace=True) synthetic_df.set_index('index', inplace=True) # - categorical_attributes = private_df.dtypes.loc[private_df.dtypes=='O'].index.values numerical_attributes = [col for col in private_df.columns if col not in categorical_attributes] categorical_attributes (pd.merge(private_df, synthetic_df, on=list(private_df.columns.values), how='left', indicator='Exists').Exists=='both').sum() for col in categorical_attributes: le = LabelEncoder() private_df[col] = le.fit_transform(private_df[col].fillna('unk')) synthetic_df[col] = le.transform(synthetic_df[col].fillna('unk')) # + def get_overlap(row, other, ncols): rst = row == other rst = rst.sum(axis=1) rst = rst * 100 / ncols return min(rst), np.mean(rst), max(rst) priv_np = private_df.values ncols = priv_np.shape[1] synth_np = synthetic_df.values overlap = [] for n in trange(priv_np.shape[0]): row = priv_np[n,:] overlap.append(get_overlap(row, synth_np, ncols)) # - pd.DataFrame(np.vstack(overlap), columns=['min', 'mean', 'max']).plot() p_cols = [] for name, target in private_df.loc[:2000,:].iterrows(): p_col = [] for col in target.index: # print(col) cnt = private_df[col].loc[private_df[col]==target[col]].shape[0] if cnt == 0: p = 0 else: p = 1 / cnt p_col.append(p) p_cols.append(p_col) pd.Series(np.vstack(p_cols).sum(axis=1) / target.shape[0]).plot() p_cols = [] for name, target in private_df.loc[:50,:].iterrows(): p_col = [] for col in target.index: # print(col) cnt = synthetic_df[col].loc[synthetic_df[col]==target[col]].shape[0] if cnt == 0: p = 0 else: p = 1 / cnt p_col.append(p) p_cols.append(p_col) pd.Series(np.vstack(p_cols).sum(axis=1) / target.shape[0]).plot()
notebooks/Check-census-overlap.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Session 5 Quiz and Solution # In Session 5, you looked at the framework of a case study. Building on elements from previous sessions &mdash; such as loading data, calculating an index, and plotting results &mdash; you used a new index, the Enhanced Vegetation Index (EVI), to show differences over time. The first half of EVI data was compared to the second half, and mapped to show relative increase or decrease. # ## Quiz # If you would like to be awarded a certificate of achievement at the end of the course, we ask that you [complete the quiz](https://docs.google.com/forms/d/e/1FAIpQLSfAck3EJgxeYDfTyseB0VwVmcyDMNq_PiFCnm3-JIoSRae3xw/viewform?usp=sf_link). You will need to supply your email address to progress towards the certificate. After you complete the quiz, you can check if your answers were correct by pressing the **View Accuracy** button. # # This quiz does not require a notebook to solve. However, you may find the EVI vegetation change detection notebook useful as a reference. If you would like to confirm that your vegetation change notebook works as expected, you can check it against the solution notebook provided below. # + raw_mimetype="text/restructuredtext" active="" # .. note:: # The solution notebook below does not contain the answer to the quiz. Use it to check that you implemented the exercise correctly, then use your exercise notebook to help with the quiz. Accessing the solution notebook will not affect your progression towards the certificate. # - # ## Solution notebook # + raw_mimetype="text/restructuredtext" active="" # .. note:: # We strongly encourage you to attempt the exercise on the previous page before downloading the solution below. This will help you learn how to use the Sandbox independently for your own analyses. # - # [Download the solution notebook for the Session 5 exercise](../_static/session_5/Vegetation_exercise.ipynb) # # To view a solution notebook on the Sandbox, you will need to first download # it to your computer, then upload it to the Sandbox. Follow these instructions: # # 1. Download the notebook by clicking the link above. # 2. On the Sandbox, open the **Training** folder. # 3. Click the **Upload Files** button as shown below. # # <img align="middle" src="../_static/session_2/05_solution_uploadbutton.png" alt="Upload button." width=400> # # 4. Select the downloaded notebook using the file browser. Click **OK**. # 5. The solution notebook will appear in the **Training** folder. Double-click to open it.
docs/session_5/03_s5_solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from IPython.core.display import display, HTML display(HTML("<style>.container { width:100% !important; }</style>")) # + language="javascript" # IPython.OutputArea.prototype._should_scroll = function(lines) { # return false; # } # - import os os.chdir('..') # + from data import get_dataset from model import Model from utils import score_clf import numpy as np import torch import torch.nn as nn import matplotlib.pyplot as plt import cv2 import torch.nn.functional as F import matplotlib.cm as cm from tqdm import tqdm import copy from matplotlib.colors import ListedColormap from mpl_toolkits.axes_grid1 import make_axes_locatable from scipy.spatial.distance import cdist from PIL import Image # - my_cmap = plt.cm.seismic(np.arange(plt.cm.seismic.N)) my_cmap[:,0:3] *= 0.85 my_cmap = ListedColormap(my_cmap) dataset1_train = get_dataset(f"mnist", test=True, known=-1) dataset2_train = get_dataset(f"svhn", test=True, known=-1) dataloader1 = torch.utils.data.DataLoader( dataset1_train, batch_size=50, shuffle=True, drop_last=False ) dataloader2 = torch.utils.data.DataLoader( dataset2_train, batch_size=50, shuffle=True, drop_last=False ) net = Model(mode="digits", instance_norm=False, spectral_norm=True, backbone="vat", disc_type="conditional").cuda() #net.forward = net.forward_log_classifier path_ours = "ours_umap" path_semi = "confused-frost-616" dics = torch.load(f"weights/{path_ours}.pth", map_location=torch.device('cuda')) net.load_state_dict(dics) net.eval(); # # LRP # Update the network architecture before computing the LRP: we remove Batch normalization layers by applying them directly to the weights of the previous layer for i in range(len(net.embedder.main)): if isinstance(net.embedder.main[i], nn.Conv2d): net.embedder.main[i].weight = torch.nn.parameter.Parameter(net.embedder.main[i].weight / torch.sqrt(net.embedder.main[i+1].running_var[:, None, None, None])).cuda() net.embedder.main[i].bias = torch.nn.parameter.Parameter((-net.embedder.main[i+1].running_mean[:]) / torch.sqrt(net.embedder.main[i+1].running_var[:])).cuda() net.embedder.main[i].bias = torch.nn.parameter.Parameter(net.embedder.main[i].bias * net.embedder.main[i+1].weight) net.embedder.main[i].bias = torch.nn.parameter.Parameter(net.embedder.main[i].bias + net.embedder.main[i+1].bias) net.embedder.main[i].weight = torch.nn.parameter.Parameter(net.embedder.main[i].weight * net.embedder.main[i+1].weight[:, None, None, None]) elif isinstance(net.embedder.main[i], nn.BatchNorm2d): net.embedder.main[i] = nn.Identity() # We implement the LRP def rho(w,l): return w + [None,0.1,0.0,0.0][l] * numpy.maximum(0,w) def incr(z,l): return z + [None,0.0,0.1,0.0][l] * (z**2).mean()**.5+1e-9 def newlayer(layer,g, coeff_bias=1): layer = copy.deepcopy(layer) try: layer.weight = nn.Parameter(g(layer.weight)) except AttributeError: pass #if nobiasrect is False: # try: layer.bias = nn.Parameter(g(layer.bias)) # except AttributeError: pass try: layer.bias = nn.Parameter(layer.bias*coeff_bias) except AttributeError: pass return layer def toconv(layers): newlayers = [] for i,layer in enumerate(layers): if isinstance(layer,nn.Linear): #print(i) newlayer = None if i == 0: m,n = 512,layer.weight.shape[0] newlayer = nn.Conv2d(m,n,7) newlayer.weight = nn.Parameter(layer.weight.reshape(n,m,7,7)) else: m,n = layer.weight.shape[1],layer.weight.shape[0] newlayer = nn.Conv2d(m,n,1) newlayer.weight = nn.Parameter(layer.weight.reshape(n,m,1,1)) newlayer.bias = nn.Parameter(layer.bias) newlayers += [newlayer] else: newlayers += [layer] return newlayers def toconv2(layers): newlayers = [] for i,layer in enumerate(layers): if isinstance(layer,nn.Linear): #print(i) newlayer = None #if i == 0: # m,n = 512,layer.weight.shape[0] # newlayer = nn.Conv2d(m,n,7) # newlayer.weight = nn.Parameter(layer.weight.reshape(n,m,7,7)) #else: m,n = layer.weight.shape[1],layer.weight.shape[0] newlayer = nn.Conv2d(m,n,1) newlayer.weight = nn.Parameter(layer.weight.reshape(n,m,1,1)) newlayer.bias = nn.Parameter(layer.bias) newlayers += [newlayer] else: newlayers += [layer] return newlayers def zero_grad(p): if p.grad is not None: p.grad.detach_() p.grad.zero_() def expl(model, X, Y, eps=1e-3): mean = torch.Tensor([0.5, 0.5, 0.5]).reshape(1,-1,1,1).cuda() std = torch.Tensor([0.5, 0.5, 0.5]).reshape(1,-1,1,1).cuda() #print("expl") layers = list(model._modules["embedder"]._modules['main']) layers += [model._modules["embedder"]._modules['classifier']]+list(model._modules['classifier']) L = len(layers) A = [X]+[None]*L L2 = 17 for l in range(L): if l == 32: A[l] = A[l].reshape((len(X), -1)) if l == L2: #print("req grad", l) A[l] = A[l].detach() A[l].requires_grad_(True) A[l+1] = layers[l].forward(A[l]) T = Y#.reshape([len(X),10,1,1])#torch.FloatTensor((1.0*(numpy.arange(1000)==483).reshape([1,1000,1,1]))) R = [None]*L + [(A[-1]*T).data] (A[-1]*T).sum().backward() #print(A[L2].grad) R[L2] = (A[L2].grad*A[L2]).detach() zero_grad(A[L2]) for l in range(0,L2)[::-1]: if l == 31: A[l] = A[l].reshape((len(X), 128, 11, 11)) A[l] = (A[l].data).requires_grad_(True) #if isinstance(layers[l],torch.nn.MaxPool2d): layers[l] = torch.nn.AvgPool2d(2) if isinstance(layers[l],torch.nn.Conv2d): gamma = 0.25 if True:#l <= 14: rho = lambda p: p + gamma*p.clamp(min=0) incr = lambda z: z+eps rho_n = lambda p: p + gamma*p.clamp(max=0) incr_n = lambda z: z-eps #if 15 <= l <= 26: # rho = lambda p: p; # incr = lambda z: z+1e-9+0.25*((z**2).mean()**.5).data # rho_n = rho # incr_n = incr #if l >= 27: # rho = lambda p: p; # incr = lambda z: z+1e-9 # rho_n = rho # incr_n = incr zpp = incr(newlayer(layers[l],rho, coeff_bias=1+gamma).forward(A[l].clamp(min=0))) # step 1 znn = incr(newlayer(layers[l],rho_n, coeff_bias=0).forward(A[l].clamp(max=0))) # step 1 z = layers[l].forward(A[l]) zpn = incr_n(newlayer(layers[l],rho, coeff_bias=0).forward(A[l].clamp(max=0))) znp = incr_n(newlayer(layers[l],rho_n, coeff_bias=1+gamma).forward(A[l].clamp(min=0))) s_p = ((z>eps)*R[l+1]/(zpp+znn)).data # step 2 s_n = ((z<-eps)*R[l+1]/(znp+zpn)).data # step 2 ((zpp)*s_p).sum().backward(); c_pp = A[l].grad.data*1 # step 3 zero_grad(A[l]) ((znn)*s_p).sum().backward(); c_nn = A[l].grad.data*1 # step 3 zero_grad(A[l]) ((znp)*s_n).sum().backward(); c_np = A[l].grad.data*0 # step 3 zero_grad(A[l]) ((zpn)*s_n).sum().backward(); c_pn = A[l].grad.data*0 # step 3 R[l] = (A[l].clamp(min=0)*(c_pp+c_np)+A[l].clamp(max=0)*(c_nn+c_pn)).data # step 4 elif (isinstance(layers[l],torch.nn.MaxPool2d) or isinstance(layers[l],torch.nn.AvgPool2d) or isinstance(layers[l], torch.nn.Linear) or isinstance(layers[l], torch.nn.AdaptiveAvgPool2d)): incr = lambda z: (z+eps)*(z>0) + (z-eps)*(z<=0) z = incr(layers[l].forward(A[l])) # step 1 s = (R[l+1]/z).data # step 2 (z*s).sum().backward(); c = A[l].grad.data*1 # step 3 zero_grad(A[l]) #print(A[l] is None, c is None) R[l] = (A[l]*c).data # step 4 else: R[l] = R[l+1] A[0] = (A[0].data).requires_grad_(True) lb = (A[0].data*0+(0-mean)/std).requires_grad_(True) hb = (A[0].data*0+(1-mean)/std).requires_grad_(True) z = layers[0].forward(A[0]) z = z + eps * (z>=0) - eps * (z<0) # step 1 (a) #z -= newlayer(layers[0],lambda p: p.clamp(min=0)).forward(lb) # step 1 (b) #z -= newlayer(layers[0],lambda p: p.clamp(max=0)).forward(hb) # step 1 (c) s = (R[1]/z).data # step 2 (z*s).sum().backward(); c,cp,cm = A[0].grad,lb.grad,hb.grad # step 3 R[0] = (A[0]*c).data#(A[0]*c+lb*cp+hb*cm).data # step 4 return A, R, c.data # We now compute the average standard deviation over channels for each image, as a measure of how colorful it is, and reuse to look at heatmaps of the most colorful images import cv2 dataloader1 = torch.utils.data.DataLoader( dataset1_train, batch_size=50, shuffle=False, drop_last=False ) dataloader2 = torch.utils.data.DataLoader( dataset2_train, batch_size=50, shuffle=False, drop_last=False ) it1 = iter(dataloader1) it2 = iter(dataloader2) stds1 = [] stds2 = [] for i in tqdm(range(min(len(it1), len(it2)))): x1, y1, _ = next(it1) x2, y2, _ = next(it2) #_, R1, _ = expl(net, x1, y1, eps=1e-9) _, R2, _ = expl(net, x2, y2, eps=1e-9) #R1 = R1[0] R2 = R2[0] #std1 = np.std(np.moveaxis(x1.detach().cpu().numpy(), 1, 3),-1).mean((1,2)) std2 = np.std(np.moveaxis(x2.detach().cpu().numpy(), 1, 3),-1).mean((1,2)) #stds1.append(std1) stds2.append(std2) #stds1 = np.concatenate(stds1) stds2 = np.concatenate(stds2) plt.plot(stds2) # For one of the specific images (change i to try another), shows the explanation for its coloured and black and white verison # + indices = [6965, 5791] i = indices[-1] coeff = 0.6 xt = dataloader2.dataset[i][0] xt = torch.mean(xt, dim=0, keepdim=True) xt = torch.cat([xt, xt, xt], dim=0) xt = xt * coeff x = np.moveaxis(xt.cpu().numpy(), 0, 2) x = x - np.min(x) x = x / np.max(x) A, R, C = expl(net, xt[None, :, :, :], dataloader2.dataset[i][1][None]) grad = np.moveaxis(R[0][0].detach().cpu().numpy(), 0, 2)#.mean(-1, keepdims=True) C = np.moveaxis(C[0].detach().cpu().numpy(), 0, 2) C = C - np.min(C) C = C / np.max(C) fig, (ax1, ax2, ax3, ax4, ax5, ax6) = plt.subplots(1, 6, figsize=(16, 4)) ax1.imshow(x) ax2.imshow(C,interpolation='nearest')#, cmap=my_cmap) ax3.imshow(grad[:,:,0],vmin=-0.03,vmax=0.03,interpolation='nearest', cmap=my_cmap) ax4.imshow(grad[:,:,1],vmin=-0.03,vmax=0.03,interpolation='nearest', cmap=my_cmap) ax5.imshow(grad[:,:,2],vmin=-0.03,vmax=0.03,interpolation='nearest', cmap=my_cmap) vis = ax6.imshow(np.std(grad, -1, keepdims=True)) plt.colorbar(vis, ax=ax6, shrink=0.7, orientation = 'vertical', location="right") plt.show() xt = dataloader2.dataset[i][0] * coeff x = np.moveaxis(xt.cpu().numpy(), 0, 2) x = x - np.min(x) x = x / np.max(x) A, R, C = expl(net, xt[None, :, :, :], dataloader2.dataset[i][1][None]) grad = np.moveaxis(R[0][0].detach().cpu().numpy(), 0, 2)#.mean(-1, keepdims=True) C = np.moveaxis(C[0].detach().cpu().numpy(), 0, 2) C = C - np.min(C) C = C / np.max(C) fig, (ax1, ax2, ax3, ax4, ax5, ax6) = plt.subplots(1, 6, figsize=(16, 4)) ax1.imshow(x) ax2.imshow(C,interpolation='nearest')#, cmap=my_cmap) ax3.imshow(grad[:,:,0],vmin=-0.03,vmax=0.03,interpolation='nearest', cmap=my_cmap) ax4.imshow(grad[:,:,1],vmin=-0.03,vmax=0.03,interpolation='nearest', cmap=my_cmap) ax5.imshow(grad[:,:,2],vmin=-0.03,vmax=0.03,interpolation='nearest', cmap=my_cmap) vis = ax6.imshow(np.std(grad, -1, keepdims=True)) plt.colorbar(vis, ax=ax6, shrink=0.7, orientation = 'vertical', location="right") plt.show() # - # Now display the 20 most colourful images and their explanations a1 = np.argsort(stds2) for i in a1[::-1][:20]: x = np.moveaxis(dataloader2.dataset[i][0].cpu().numpy(), 0, 2) x = x - np.min(x) x = x / np.max(x) A, R, C = expl(net, dataloader2.dataset[i][0][None, :, :, :], dataloader2.dataset[i][1][None]) grad = np.moveaxis(R[0][0].detach().cpu().numpy(), 0, 2)#.mean(-1, keepdims=True)) C = np.moveaxis(C[0].detach().cpu().numpy(), 0, 2) C = C - np.min(C) C = C / np.max(C) #grad = grad - np.min(grad) #grad = grad / np.max(grad) fig, (ax1, ax2, ax3, ax4, ax5, ax6) = plt.subplots(1, 6, figsize=(16, 4)) ax1.imshow(x) ax2.imshow(C,interpolation='nearest')#, cmap=my_cmap) ax3.imshow(grad[:,:,0],vmin=-0.03,vmax=0.03,interpolation='nearest', cmap=my_cmap) ax4.imshow(grad[:,:,1],vmin=-0.03,vmax=0.03,interpolation='nearest', cmap=my_cmap) ax5.imshow(grad[:,:,2],vmin=-0.03,vmax=0.03,interpolation='nearest', cmap=my_cmap) vis = ax6.imshow(np.std(grad, -1, keepdims=True)) plt.colorbar(vis, ax=ax6, shrink=0.7, orientation = 'vertical', location="right") plt.title(f"Image {i}: class {dataloader2.dataset[i][1][None]}") #plt.legend() plt.show() # # Process individual image # We now load and look at artificial digits as presented in the paper, and their colored heatmaps imgs = [] labels = [] paths = [("datasets/artificial_digits/b2.png", 2), ("datasets/artificial_digits/c2.png", 2), ("datasets/artificial_digits/b8.png", 8), ("datasets/artificial_digits/c8.png", 8)] mean = torch.Tensor([0.485, 0.456, 0.406]).cuda() std = torch.Tensor([0.229, 0.224, 0.225]).cuda() for path, clss in paths: with Image.open(path) as im: img = np.asarray(im.convert('RGB')) img = np.moveaxis(img, 2, 0)[None, :, :, :] img = torch.Tensor(img.copy()).cuda() img -= mean.view((1,-1,1,1)) img /= std.view((1,-1,1,1)) label = torch.eye(10)[None, clss].cuda() #print(img.shape, label.shape) imgs.append(img) labels.append(label) imgs = torch.cat(imgs) labels = torch.cat(labels) _, Ri, C = expl(net, imgs, labels, eps=1e-9) my_cmap = plt.cm.seismic(np.arange(plt.cm.seismic.N)) my_cmap[:,0:3] *= 0.85 my_cmap = ListedColormap(my_cmap) # 10, 12 for i in range(len(imgs)): img = imgs[i] grad = C[i]#Ri[0][i] img = np.moveaxis(img.detach().cpu().numpy(), 0, 2) grad = np.moveaxis(grad.detach().cpu().numpy(), 0, 2)#.mean(-1, keepdims=True) grad = grad - np.min(grad) grad = grad / np.max(grad) b2 = 10*((np.abs(grad)**3.0).mean()**(1.0/3)) img = img - np.min(img) img = img / np.max(img) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4)) ax1.imshow(img) ax1.axis("off") ax2.imshow(grad,vmin=0,vmax=1,interpolation='nearest', cmap=my_cmap) ax2.axis("off") #vis = ax3.imshow(np.std(grad, -1, keepdims=True)) #plt.colorbar(vis, ax=ax3, shrink=0.7)#, orientation = 'vertical', location="right") plt.show() # Then, we look at the distance between these images, in feature space, and show our method aligned quite well the distributions X = (imgs - mean[None, :, None, None]) / std[None, :, None, None] embs1, pred, domain, mixed = net.forward(X, torch.rand((len(X), 10)).cuda(), 1) embs = embs1.detach().cpu().numpy() print("Predicted classes", torch.argmax(pred, -1)) dists = cdist(embs, embs) plt.imshow(dists, cmap="gray") plt.xticks([]) plt.yticks([]) plt.colorbar()
notebooks/SVHN Coloured Gradient.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <h1><center>CS 455/595a: Support Vector Machines Demos</center></h1> # <center><NAME></center> # # This notebook applies the support vector machine concepts covered in [1] with the [Titanic](https://www.kaggle.com/c/titanic/) and [Boston Housing](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_boston.html) data sets for SVM-based classification and regression, respectively. # # # # Reference: # # [1] <NAME>. *Hands on Machine Learning with Scikit-Learn & TensorFlow* O'Reilley Media Inc, 2017. # # [2] <NAME>. "ageron/handson-ml: A series of Jupyter notebooks that walk you through the fundamentals of Machine Learning and Deep Learning in python using Scikit-Learn and TensorFlow." Github.com, online at: https://github.com/ageron/handson-ml [last accessed 2019-03-01] # **Table of Contents** # 1. [Titanic Survivor Classifier w/ SVM](#Titanic-Survivor-Classifier) # * [Linear SVC Demonstration](#Linear-SVC-Demonstration) # * [SVC with Linear Kernel Demo](#SVC-with-Linear-Kernel-Demo) # * [LinearSVC with Polynomial Features](#LinearSVC-with-Polynomial-Features) # * [SVC Classifier with Polynomial Kernel](SVC-Classifier-with-Polynomial-Kernel) # * [SVC with RBF Kernel](#SVC-with-RBF-Kernel) # # 2. Boston Demo - Coming Soon # # # # Titanic Survivor Classifier # # ## Set up # + from sklearn.pipeline import Pipeline, FeatureUnion from sklearn.impute import SimpleImputer from sklearn.base import BaseEstimator, TransformerMixin from sklearn.preprocessing import OneHotEncoder, StandardScaler, PolynomialFeatures from sklearn.model_selection import cross_val_score, cross_val_predict, GridSearchCV from sklearn.model_selection import train_test_split from sklearn.model_selection import RandomizedSearchCV from sklearn.svm import SVC, LinearSVC, SVR, LinearSVR from sklearn import datasets from matplotlib import pyplot as plt # %matplotlib inline from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import confusion_matrix, precision_score, recall_score, accuracy_score, f1_score from sklearn.metrics import mean_squared_error, mean_absolute_error import numpy as np import pandas as pd import os # Read data from input files into Pandas data frames data_path = os.path.join("datasets","titanic") train_filename = "train.csv" test_filename = "test.csv" def read_csv(data_path, filename): joined_path = os.path.join(data_path, filename) return pd.read_csv(joined_path) # Read CSV file into Pandas Dataframes train_df = read_csv(data_path, train_filename) # Defining Data Pre-Processing Pipelines class DataFrameSelector(BaseEstimator, TransformerMixin): def __init__(self, attributes): self.attributes = attributes def fit(self, X, y=None): return self def transform(self, X): return X[self.attributes] class MostFrequentImputer(BaseEstimator, TransformerMixin): def fit(self, X, y=None): self.most_frequent = pd.Series([X[c].value_counts().index[0] for c in X], index = X.columns) return self def transform(self, X): return X.fillna(self.most_frequent) numeric_pipe = Pipeline([ ("Select", DataFrameSelector(["Age", "Fare", "SibSp", "Parch"])), # Selects Fields from dataframe ("Imputer", SimpleImputer(strategy="median")), # Fills in NaN w/ median value for its column ]) categories_pipe = Pipeline([ ("Select", DataFrameSelector(["Pclass", "Sex"])), # Selects Fields from dataframe ("MostFreqImp", MostFrequentImputer()), # Fill in NaN with most frequent ("OneHot", OneHotEncoder(sparse=False)), # Onehot encode ]) preprocessing_pipe = FeatureUnion(transformer_list = [ ("numeric pipeline", numeric_pipe), ("categories pipeline", categories_pipe) ]) # Process Input Data Using Pipleines train_X_data = preprocessing_pipe.fit_transform(train_df) # Scale Input Data s = StandardScaler() train_X_data = s.fit_transform(train_X_data) train_y_data = train_df["Survived"] # - # ## KNN Classifier (for comparison) # + # KNN Classifier 10-fold Validation k=10 clf = KNeighborsClassifier(n_neighbors=k) y_pred = cross_val_predict(clf, train_X_data, train_y_data, cv=5) print("Confusion Matrix:") print(confusion_matrix(train_y_data, y_pred)) print("Accuracy Score = " + str(accuracy_score(train_y_data, y_pred))) print("Pecision Score = " + str(precision_score(train_y_data, y_pred))) print("Recall Score = " + str(recall_score(train_y_data,y_pred))) print("F1 Score = " + str(f1_score(train_y_data,y_pred))) # - # ## Linear SVC Demonstration # + # LinearSVC Classifier - Hard Margin clf = LinearSVC(C=10, loss="hinge") y_pred = cross_val_predict(clf, train_X_data, train_y_data, cv=5) print("Confusion Matrix:") print(confusion_matrix(train_y_data, y_pred)) print("Accuracy Score = " + str(accuracy_score(train_y_data, y_pred))) print("Pecision Score = " + str(precision_score(train_y_data, y_pred))) print("Recall Score = " + str(recall_score(train_y_data,y_pred))) print("F1 Score = " + str(f1_score(train_y_data,y_pred))) # + # LinearSVC Classifier - Soft Margin clf = LinearSVC(C=1, loss="hinge") y_pred = cross_val_predict(clf, train_X_data, train_y_data, cv=5) print("Confusion Matrix:") print(confusion_matrix(train_y_data, y_pred)) print("Accuracy Score = " + str(accuracy_score(train_y_data, y_pred))) print("Pecision Score = " + str(precision_score(train_y_data, y_pred))) print("Recall Score = " + str(recall_score(train_y_data,y_pred))) print("F1 Score = " + str(f1_score(train_y_data,y_pred))) # + # Grid Search for Best HyperParameters param_grid = [{'loss' :['hinge'],'C': [0.1,1,10,100,1000]}] clf = LinearSVC(loss="hinge") grid_search = GridSearchCV(clf, param_grid, cv=5, scoring='roc_auc') grid_search.fit(train_X_data, train_y_data) grid_search.best_params_ # - # ## SVC with Linear Kernel Demo # + param_grid = [{'C': [0.1,1,10]}] clf = SVC(cache_size=1000, kernel='linear') grid_search = GridSearchCV(clf, param_grid, cv=5, scoring='roc_auc') grid_search.fit(train_X_data, train_y_data) grid_search.best_params_ # + # SVC Classifier c=10 clf = SVC(kernel="linear", C=c) y_pred = cross_val_predict(clf, train_X_data, train_y_data, cv=5) print("Confusion Matrix:") print(confusion_matrix(train_y_data, y_pred)) print("Accuracy Score = " + str(accuracy_score(train_y_data, y_pred))) print("Pecision Score = " + str(precision_score(train_y_data, y_pred))) print("Recall Score = " + str(recall_score(train_y_data,y_pred))) print("F1 Score = " + str(f1_score(train_y_data,y_pred))) # - # ## LinearSVC with Polynomial Features # + # SVC Classifier with Polynomial Features Added clf_pipe = Pipeline([ ("Polynomial", PolynomialFeatures()), ("Scaler", StandardScaler()), ("Classifier", LinearSVC(loss="hinge")), ]) param_grid = [{'Polynomial__degree' :[1,2,3],'Classifier__C': [0.01, 0.1,1,10,100]}] grid_search = GridSearchCV(clf_pipe, param_grid, cv=5, scoring='roc_auc') grid_search.fit(train_X_data, train_y_data) grid_search.best_params_ # + # SVC Classifier with Polynomial Features Added c=0.01 deg=3 clf_pipe = Pipeline([ ("Polynomial", PolynomialFeatures(degree=deg)), ("Scaler", StandardScaler()), ("classifier", LinearSVC(loss="hinge", max_iter=10000, C=c)), ]) y_pred = cross_val_predict(clf_pipe, train_X_data, train_y_data, cv=5) print("Confusion Matrix:") print(confusion_matrix(train_y_data, y_pred)) print("Accuracy Score = " + str(accuracy_score(train_y_data, y_pred))) print("Pecision Score = " + str(precision_score(train_y_data, y_pred))) print("Recall Score = " + str(recall_score(train_y_data,y_pred))) print("F1 Score = " + str(f1_score(train_y_data,y_pred))) # - # ## SVC Classifier with Polynomial Kernel # + # Examine parameters using Grid Search param_grid = [{'C': [0.1,1,10], 'coef0':[0,0.1,10,100]}] clf = SVC(kernel='poly', degree=2, gamma='scale') grid_search = GridSearchCV(clf, param_grid, cv=5, scoring='roc_auc') grid_search.fit(train_X_data, train_y_data) grid_search.best_params_ # + # SVC Classifier with Polynomial Kernel C=10 deg=2 r=100 clf = SVC(kernel="poly", gamma='scale',degree=deg, coef0=r, C=C) y_pred = cross_val_predict(clf, train_X_data, train_y_data, cv=5) print("Confusion Matrix:") print(confusion_matrix(train_y_data, y_pred)) print("Accuracy Score = " + str(accuracy_score(train_y_data, y_pred))) print("Pecision Score = " + str(precision_score(train_y_data, y_pred))) print("Recall Score = " + str(recall_score(train_y_data,y_pred))) print("F1 Score = " + str(f1_score(train_y_data,y_pred))) # - # ## SVC with RBF Kernel # + import scipy as sp # Examine parameters using Grid Search param_grid = {'C': sp.stats.uniform(0.01,10), 'gamma':sp.stats.uniform(0.001,10)} clf = SVC(kernel='rbf') rand_search = RandomizedSearchCV(clf, param_distributions=param_grid, n_iter=100, cv=5, scoring='roc_auc') rand_search.fit(train_X_data, train_y_data) print(rand_search.best_params_) results = rand_search.cv_results_ for mean_score, params in zip(results["mean_test_score"],results["params"]): print(mean_score, params) # + # SVC Classifier with Gaussian Radial Basis Function Kernel C=2.0 gamma=0.2 clf = SVC(kernel="rbf", C=C, gamma=gamma) y_pred = cross_val_predict(clf, train_X_data, train_y_data, cv=5) print("Confusion Matrix:") print(confusion_matrix(train_y_data, y_pred)) print("Accuracy Score = " + str(accuracy_score(train_y_data, y_pred))) print("Pecision Score = " + str(precision_score(train_y_data, y_pred))) print("Recall Score = " + str(recall_score(train_y_data,y_pred))) print("F1 Score = " + str(f1_score(train_y_data,y_pred))) # - # # # + # Load Data Set boston_housing_data = datasets.load_boston() # Build data frame for visualization boston_df = pd.DataFrame(np.c_[boston_housing_data.data, boston_housing_data.target], columns=["CRIM", "ZN","INDUS","CHAS", "NOX","RM","AGE", "DIS","RAD","TAX","PTRatio","BK", "LSTAT","MEDV"]) scaler = StandardScaler() boston_data_set = scaler.fit_transform(boston_housing_data.data) train_X, test_X, train_y, test_y = train_test_split(boston_data_set, boston_housing_data.target, test_size=0.33) def plot_learning_curves(model, X, y): """ Plots performance on the training set and testing (validation) set. X-axis - number of training samples used Y-axis - RMSE """ train_X, test_X, train_y, test_y = train_test_split(X, y, test_size = 0.20) training_errors, validation_errors = [], [] for m in range(1, len(train_X)): model.fit(train_X[:m], train_y[:m]) train_pred = model.predict(train_X) test_pred = model.predict(test_X) training_errors.append(np.sqrt(mean_squared_error(train_y, train_pred))) validation_errors.append(np.sqrt(mean_squared_error(test_y, test_pred))) plt.plot(training_errors, "r-+", label="train") plt.plot(validation_errors, "b-", label="test") plt.legend() plt.axis([0, 80, 0, 3]) # + # Examine parameters using Grid Search param_grid = [{'LinearSVR__epsilon': [0.05,0.1,0.5,1.5,10]}] deg = 3 clf = Pipeline([ ("poly_features", PolynomialFeatures(degree=deg, include_bias=False)), ("LinearSVR", LinearSVR(max_iter=10000)), ]) grid_search = GridSearchCV(clf, param_grid, cv=5, scoring='neg_mean_squared_error') grid_search.fit(train_X_data, train_y_data) grid_search.best_params_ # + deg = 3 clf = Pipeline([ ("poly_features", PolynomialFeatures(degree=deg, include_bias=False)), ("LinearSVR", LinearSVR(epsilon=0.1, max_iter=10000)), ]) clf.fit(train_X,train_y) pred_y = clf.predict(test_X) plt.figure("a") plt.hist(abs(test_y - pred_y),bins=100) plt.xlabel("Error ($k)") plt.figure("b") plot_learning_curves(clf, train_X, train_y) plt.axis([0,300,0,10]) mean_absolute_error(test_y, pred_y) # + # Examine parameters using Grid Search param_grid = [{'epsilon': [0.05,0.1,0.5,1.5], 'C':[0.1,1,10,100]}] deg = 2 clf = SVR(degree=deg, kernel='rbf', gamma='scale') grid_search = GridSearchCV(clf, param_grid, cv=5, scoring='neg_mean_squared_error') grid_search.fit(train_X_data, train_y_data) grid_search.best_params_ # + deg = 2 clf = SVR(kernel='rbf', epsilon=0.1, C=1, max_iter=10000, gamma='scale') clf.fit(train_X,train_y) pred_y = clf.predict(test_X) plt.figure("a") plt.hist(abs(test_y - pred_y),bins=100) plt.xlabel("Error ($k)") plt.figure("b") plot_learning_curves(clf, train_X, train_y) plt.axis([0,300,0,10]) mean_absolute_error(test_y, pred_y)
cs455-chapter5-demos.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/saketkc/pysradb/blob/master/notebooks/08.pysradb_ascp_multithreaded.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="d58KlFuYPSbN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="3b683f20-e95c-45d6-8bd0-abf27cb9cd91" # !git clone https://github.com/saketkc/pysradb.git # !cd pysradb && git log | head && pip install -r requirements.txt && make install # + id="2T2mGqZiPeZS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 190} outputId="d8a049d8-3a3d-497c-d657-52535fd7a80c" pip install ipython-autotime # + id="CwwkSD9SPoxv" colab_type="code" colab={} # %load_ext autotime # + id="JCjvoowbQcgI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 292} outputId="1e144cef-7882-4d2a-dc91-3b5fbd23584a" # !wget -c https://download.asperasoft.com/download/sw/connect/3.9.9/ibm-aspera-connect-3.9.9.177872-linux-g2.12-64.tar.gz # !tar -zxvf ibm-aspera-connect-3.9.9.177872-linux-g2.12-64.tar.gz && ls # + id="FZAqmoUsQjDl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="edeb70ca-4bf1-46d5-f9b7-f7db61f6dde7" # !bash ibm-aspera-connect-3.9.9.177872-linux-g2.12-64.sh # + [markdown] id="EYX9WloY6ktO" colab_type="text" # # Multithreaded # + id="yKDX_0RF6nnf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 615} outputId="9a0d63c6-2631-4d48-b0e5-c3d7476e1a5f" # !pysradb download -y -t 8 -p SRP002605 && rm -rf pysradb_downloads # + [markdown] id="2hpofzci6vNX" colab_type="text" # #Single thread # + id="Cmb0efgx6wqr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 615} outputId="5f62be28-aac2-43bf-ab59-a3ef8089ae6e" # !pysradb download -y -p SRP002605 && rm -rf pysradb_downloads # + [markdown] id="rIFpI9kOQqjE" colab_type="text" # # Using ASPERA (fastq) - Multithreaded # + id="UmuzaFoGQvU7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="aa750927-3bfc-42bf-f939-667160d4ef40" # !pysradb download -t 8 --use_ascp -p SRP002605 && rm -rf pysradb_downloads # + [markdown] id="GPfF_XldQ5tC" colab_type="text" # # Using ASPERA (fastq) - Singlethreaded # + id="CjXk6Tl0Q7Pv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 63} outputId="10c0a48a-0767-4563-f550-719c957dfe6d" # !pysradb download --use_ascp -y -p SRP002605 && rm -rf pysradb_downloads
notebooks/08.pysradb_ascp_multithreaded.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7.6 64-bit # language: python # name: python3 # --- # + import networkx as nx import pandas as pd import numpy as np import itertools import os from sklearn.decomposition import PCA from sklearn.manifold import TSNE import stellargraph as sg from stellargraph.mapper import GraphSAGENodeGenerator, FullBatchNodeGenerator from stellargraph.layer import GraphSAGE, GCN, GAT from stellargraph import globalvar from stellargraph.ensemble import Ensemble, BaggingEnsemble from tensorflow.keras import layers, optimizers, losses, metrics, Model, models, regularizers from sklearn import preprocessing, feature_extraction, model_selection import matplotlib.pyplot as plt import seaborn as sns from stellargraph import datasets from IPython.display import display, HTML # %matplotlib inline # - use_bagging = 1 n_estimators = 5 n_predictions = 10 epochs = 50 dataset = 'Cora' # + if dataset == 'Cora': dataset = datasets.Cora() elif dataset == 'PubMed': dataset = datasets.PubMedDiabetes() elif dataset == 'CiteSeer': dataset = datasets.CiteSeer() # - display(HTML(dataset.description)) print(G.info()) G, labels = dataset.load() print(set(labels)) train_labels, test_labels = model_selection.train_test_split( labels, train_size=0.2, test_size=None, stratify=labels, random_state=42, # 140 ) val_labels, test_labels = model_selection.train_test_split( test_labels, train_size=0.2, # 500, test_size=None, stratify=test_labels, random_state=100, ) # + target_encoding = preprocessing.LabelBinarizer() train_targets = target_encoding.fit_transform(train_labels) val_targets = target_encoding.transform(val_labels) test_targets = target_encoding.transform(test_labels) # - generator = FullBatchNodeGenerator(G, method="sgc", k=4) train_gen = generator.flow(train_labels.index, train_targets) sgc = GCN( layer_sizes=[train_targets.shape[1]], generator=generator, bias=True, dropout=0.5, activations=["softmax"], kernel_regularizer=regularizers.l2(5e-4), ) # Expose the input and output tensors of the SGC model for node prediction, # via GCN.in_out_tensors() method: x_inp, predictions = sgc.in_out_tensors() model = Model(inputs=x_inp, outputs=predictions) # + if use_bagging: model = BaggingEnsemble(model, n_estimators=n_estimators, n_predictions=n_predictions) else: model = Ensemble(model, n_estimators=n_estimators, n_predictions=n_predictions) model.compile( optimizer=optimizers.Adam(lr=0.005), loss=losses.categorical_crossentropy, metrics=["acc"], ) model # - model.layers(0) val_gen = generator.flow(val_labels.index, val_targets) test_gen = generator.flow(test_labels.index, test_targets) if use_bagging: # When using bootstrap samples to train each model in the ensemble, we must specify # the IDs of the training nodes (train_data) and their corresponding target values # (train_targets) history = model.fit( generator, train_data=train_labels.index, train_targets=train_targets, epochs=epochs, validation_data=val_gen, verbose=0, shuffle=False, bag_size=None, use_early_stopping=True, # Enable early stopping early_stopping_monitor="val_acc", ) else: history = model.fit( train_gen, epochs=epochs, validation_data=val_gen, verbose=0, shuffle=False, use_early_stopping=True, # Enable early stopping early_stopping_monitor="val_acc", ) sg.utils.plot_history(history) # + test_metrics_mean, test_metrics_std = model.evaluate(test_gen) model.metrics_names = ['loss', 'accuracy'] print("\nTest Set Metrics of the trained models:") for name, m, s in zip(model.metrics_names, test_metrics_mean, test_metrics_std): print("\t{}: {:0.4f}±{:0.4f}".format(name, m, s)) # + all_nodes = labels.index all_gen = generator.flow(all_nodes) all_predictions = model.predict(generator=all_gen) print(all_predictions.shape) all_predictions = np.squeeze(all_predictions) print(all_predictions.shape) node_predictions = target_encoding.inverse_transform(all_predictions) selected_query_point = -1 # Select the predictions for the point specified by selected_query_point qp_predictions = all_predictions[:, :, selected_query_point, :] # The shape should be n_estimators x n_predictions x size_output_layer qp_predictions.shape qp_predictions = qp_predictions.reshape( np.product(qp_predictions.shape[0:-1]), qp_predictions.shape[-1] ) qp_predictions.shape # - inv_subject_mapper = {k: v for k, v in enumerate(target_encoding.classes_)} inv_subject_mapper fig, ax = plt.subplots(figsize=(12, 6)) ax.boxplot(x=qp_predictions) ax.set_xticklabels(target_encoding.classes_) ax.tick_params(axis="x", rotation=45) y = np.argmax(target_encoding.transform(labels.reindex(G.nodes())), axis=1) plt.title(f"Correct {target_encoding.classes_[y[selected_query_point]]}") plt.ylabel("Predicted Probability") plt.xlabel("Subject") # + a = np.mean(all_predictions, axis=(0,1)) predicted_labels = np.argmax(a, axis=1) true_labels = np.argmax(target_encoding.transform(labels), axis=1) node_predictions = target_encoding.inverse_transform(all_predictions) # - df = pd.DataFrame({"Predicted": predicted_labels, "True": true_labels}) df[df['Predicted'] == df['True']] df
Ensemble Based Simplified GCN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.1.1 # language: julia # name: julia-1.1 # --- using Pkg using CSV, DataFrames animals_df = CSV.read("a-z-animal_name.csv") animals_df |> DataFrame names(animals_df) animals_new_df = animals_df[[:Index,:Name,:Group,:Location,:Habitat, Symbol("Conservation Status"), Symbol("Estimated Population Size"), Symbol("Biggest Threat")]] # change the columns' names into the standard format animals_1 = animals_new_df[animals_new_df[Symbol("Conservation Status")].!=["NA"],:] animals_2 = animals_1[animals_1[Symbol("Conservation Status")].!=["Common"],:] animals_3 = animals_2[animals_2[Symbol("Conservation Status")].!=["Not Listed"],:] animals_4 = animals_3[animals_3[Symbol("Conservation Status")].!=["Data Deficient"],:] animals_5 = animals_4[animals_4[Symbol("Conservation Status")].!=["Least Concern, Vulnerable, Critically Endangered"],:] # delete useless data in the "Conservation Status" column by(animals_5, Symbol("Conservation Status") , nrow) # Group the animals by conservation status animals_6 = animals_5[animals_5[Symbol("Conservation Status")].!="Least Concern",:] animals_7 = animals_6[animals_6[Symbol("Conservation Status")].!="Near Threatened",:] animals_8 = animals_7[animals_7[Symbol("Conservation Status")].!="Threatened",:] # delete some status in the "Conservation Status" column by(animals_8, Symbol("Conservation Status") , nrow) # Group the animals by conservation status animals_8|> DataFrame |> describe
src/Julia/a-z_animals_julia.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # ML Pipeline Preparation # Follow the instructions below to help you create your ML pipeline. # ### 1. Import libraries and load data from database. # - Import Python libraries # - Load dataset from database with [`read_sql_table`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_sql_table.html) # - Define feature and target variables X and Y # + # import libraries from sqlalchemy import create_engine import pandas as pd import numpy as np import string import re # nlp libraries import nltk nltk.download(['punkt', 'stopwords', 'wordnet']) from nltk.corpus import stopwords from nltk.tokenize import word_tokenize from nltk.stem import WordNetLemmatizer # ml libraries import sklearn from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer from sklearn.pipeline import Pipeline, FeatureUnion from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, f1_score, recall_score, precision_score from sklearn.multioutput import MultiOutputClassifier # - # # !pip install scikit-learn --upgrade print(sklearn.__version__) # load data from database engine = create_engine('sqlite:///DisasterResponse.db') df = pd.read_sql('DisasterResponse.db', engine) X = df['message'].values Y = df.drop(['id', 'message', 'original', 'genre'], axis=1).values # df.head() df[df.aid_related==2] # ### 2. Write a tokenization function to process your text data # + from contractions import contractions_dict def expand_contractions(text, contractions_dict): contractions_pattern = re.compile('({})'.format('|'.join(contractions_dict.keys())), flags=re.IGNORECASE | re.DOTALL) expanded_text = contractions_pattern.sub(expand_match, text) expanded_text = re.sub("'", "", expanded_text) return expanded_text def expand_match(contraction): match = contraction.group(0) first_char = match[0] expanded_contraction = contractions_dict.get(match) \ if contractions_dict.get(match) \ else contractions_dict.get(match.lower()) expanded_contraction = expanded_contraction return expanded_contraction # - def tokenize(text): ''' Args: text(string): a string containing the message Return: tokenized_message(list): a list of words containing the processed message ''' tokenized_message = [] try: # for unbalanced parenthesis problem text = text.replace(')','') text = text.replace('(','') url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' # get list of all urls using regex detected_urls = re.findall(url_regex, text) # replace each url in text string with placeholder for url in detected_urls: text = re.sub(url, "urlplaceholder", text) # remove whitespaces text = re.sub(r" +", " ", text) # expand contractions text = expand_contractions(text, contractions_dict) # tokenize text tokens = word_tokenize(text) # initiate lemmatizer lemmatizer = WordNetLemmatizer() # get stopwords stopwords_english = stopwords.words('english') stopwords_english += 'u' for word in tokens: # normalize word word = word.lower() if (word not in stopwords_english and # remove stopwords word not in string.punctuation): # remove punctuation word = lemmatizer.lemmatize(word) # lemmatizing word tokenized_message.append(word) except Exception as e: print(e) # print(text) return tokenized_message text = "The first time you see The Second Renaissance it may look boring. Look at it at least twice and definitely watch part 2. It will change your view of the matrix. Are the human people the ones https://bachda.com) who started the war ? Is AI a bad thing ?" print(tokenize(text)) # ### 3. Build a machine learning pipeline # This machine pipeline should take in the `message` column as input and output classification results on the other 36 categories in the dataset. You may find the [MultiOutputClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.multioutput.MultiOutputClassifier.html) helpful for predicting multiple target variables. # multi output classifier pipeline_multi = Pipeline([ ('vect', CountVectorizer(tokenizer=tokenize)), ('tfidf', TfidfTransformer()), ('clf', MultiOutputClassifier(RandomForestClassifier(n_jobs=10))) ]) # ### 4. Train pipeline # - Split data into train and test sets # - Train pipeline # + from time import time start = time() X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=4) pipeline_multi.fit(X_train, y_train) end = time() print("Training time:{}".format(end-start)) # - # ### 5. Test your model # Report the f1 score, precision and recall for each output category of the dataset. You can do this by iterating through the columns and calling sklearn's `classification_report` on each. y_pred = pipeline_multi.predict(X_test) report = [] for idx, col in enumerate(y_pred.T): report.append(f1_score(y_test.T[idx], col, average='weighted')) full_report = [] for idx, col in enumerate(y_pred.T): full_report.append(classification_report(y_test.T[idx], col)) print(report) print(np.mean(report)) print(full_report[0]) # ### 6. Improve your model # Use grid search to find better parameters. # + parameters = { 'vect__ngram_range': ((1,1), (1,2)), 'vect__max_df': (0.5, 0.75, 1.0), 'vect__max_features': (None, 5000, 10000), 'tfidf__use_idf': (True, False), 'clf__n_estimators': [100, 200, 300], 'clf__min_samples_split': [2, 3, 4], } cv = GridSearchCV(pipeline_multi, param_grid=parameters, n_jobs=10, verbose=10) # - cv.fit(X_train, y_train) # + import joblib joblib.dump(cv, "best_params.pkl") # - cv.best_params_ # train with best params # multi output classifier pipeline_multi_best = Pipeline([ ('vect', CountVectorizer(tokenizer=tokenize, max_df=0.5, max_features=5000, ngram_range=(1,2))), ('tfidf', TfidfTransformer(use_idf=False)), ('clf', MultiOutputClassifier(RandomForestClassifier(n_estimators=100, min_samples_split=2, n_jobs=10))) ]) # + from time import time start = time() pipeline_multi_best.fit(X_train, y_train) end = time() print("Training time:{}".format(end-start)) # - y_pred = pipeline_multi_best.predict(X_test) report = [] for idx, col in enumerate(y_pred.T): report.append(f1_score(y_test.T[idx], col, average='weighted')) print(report) print(np.mean(report)) # ### 7. Test your model # Show the accuracy, precision, and recall of the tuned model. # # Since this project focuses on code quality, process, and pipelines, there is no minimum performance metric needed to pass. However, make sure to fine tune your models for accuracy, precision and recall to make your project stand out - especially for your portfolio! # ### 8. Try improving your model further. Here are a few ideas: # * try other machine learning algorithms # * add other features besides the TF-IDF # + # add new tranformers for features from sklearn.base import BaseEstimator, TransformerMixin class StartingVerbExtractor(BaseEstimator, TransformerMixin): def starting_verb(self, text): sentence_list = nltk.sent_tokenize(text) for sentence in sentence_list: pos_tags = nltk.pos_tag(tokenize(sentence)) if pos_tags: first_word, first_tag = pos_tags[0][0], pos_tags[0][1] if first_tag in ['VB', 'VBP'] or first_word == 'RT': return True return False def fit(self, X, y=None): return self def transform(self, X): x_tagged = pd.Series(X).apply(self.starting_verb) return pd.DataFrame(x_tagged) # - pipeline_improved = Pipeline([ ('features', FeatureUnion([ ('nlp_pipeline', Pipeline([ ('vect', CountVectorizer(tokenizer=tokenize)), ('tfidf', TfidfTransformer()) ])), ('starting_verb', StartingVerbExtractor()) ])), ('clf', MultiOutputClassifier(RandomForestClassifier(n_jobs=10))) ]) # %timeit pipeline_improved.fit(X_train, y_train) # %timeit pred = pipeline_improved.predict(X_test) report = [] for idx, col in enumerate(pred.T): report.append(f1_score(y_test.T[idx], col, average='weighted')) print(np.mean(report)) # XGBoost for better perfromance # + # try using xgboost import xgboost as xgb pipeline_xgb = Pipeline([ ('vect', CountVectorizer(tokenizer=tokenize)), ('tfidf', TfidfTransformer()), ('clf', MultiOutputClassifier(xgb.sklearn.XGBClassifier())) ]) # - start = time() # X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=4) pipeline_xgb.fit(X_train, y_train) end = time() print("Training Time: {}".format(end-start)) pred = pipeline_xgb.predict(X_test) report = [] for idx, col in enumerate(pred.T): report.append(f1_score(y_test.T[idx], col, average='weighted')) print(np.mean(report)) # + c_report = [] # for idx, col in enumerate(pred): # c_report.append(classification_report(y_test[:idx], pred[:idx], labels=df.columns[4:].tolist())) cols = df.columns[4:].tolist() for idx in range(pred.shape[1]): c_report.append(classification_report(y_test[:, idx],pred[:, idx], output_dict=True)) # - f1= [] for i in range(len(c_report)): f1.append(c_report[i]['weighted avg']['f1-score']) print(np.mean(f1)) # Oprimize xgboost parameters # + parameters = { # 'vect__ngram_range': ((1,1), (1,2)), # 'vect__max_df': (0.5, 0.75, 1.0), # 'vect__max_features': (None, 5000, 10000), # 'tfidf__use_idf': (True, False), 'clf__estimator__learning_rate': [0.05, 0.15, 0.25], # shrinks feature values for better boosting 'clf__estimator__max_depth': [4, 6, 8, 10], 'clf__estimator__min_child_weight': [1, 3, 5, 7], # sum of child weights for further partitioning 'clf__estimator__gamma': [0.0, 0.1, 0.2, 0.3, 0.4], # prevents overfitting, split leaf node if min. gamma loss 'clf__estimator__colsample_bytree': [0.3, 0.4, 0.5, 0.7] # subsample ratio of columns when tree is constructed } xgb_cv = GridSearchCV(pipeline_xgb, param_grid=parameters, n_jobs=10, verbose=10) # - xgb_cv.fit(X_train, y_train) joblib.dump(xgb_cv, 'xgb_params.pkl') xgb_cv.best_params_ xgb_cv.best_score_ # + pipeline_xgb = Pipeline([ ('vect', CountVectorizer(tokenizer=tokenize)), ('tfidf', TfidfTransformer()), ('clf', MultiOutputClassifier(xgb.sklearn.XGBClassifier(colsample_bytree=0.7, gamma=0.4, learning_rate=0.25, max_depth=10, min_child_weight=7))) ]) start = time() # X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=4) pipeline_xgb.fit(X_train, y_train) end = time() print("Training Time: {}".format(end-start)) pred = pipeline_xgb.predict(X_test) report = [] for idx, col in enumerate(pred.T): report.append(f1_score(y_test.T[idx], col, average='weighted')) print("Mean f1-score: {}".format(np.mean(report))) # + parameters = { 'vect__ngram_range': ((1,1), (1,2)), 'vect__max_df': (0.5, 0.75, 1.0), 'vect__max_features': (None, 5000, 10000), 'tfidf__use_idf': (True, False) # 'clf__estimator__learning_rate': [0.05, 0.15, 0.25], # shrinks feature values for better boosting # 'clf__estimator__max_depth': [4, 6, 8, 10], # 'clf__estimator__min_child_weight': [1, 3, 5, 7], # sum of child weights for further partitioning # 'clf__estimator__gamma': [0.0, 0.1, 0.2, 0.3, 0.4], # prevents overfitting, split leaf node if min. gamma loss # 'clf__estimator__colsample_bytree': [0.3, 0.4, 0.5, 0.7] # subsample ratio of columns when tree is constructed } vect_cv = GridSearchCV(pipeline_xgb, param_grid=parameters, n_jobs=10, verbose=10) vect_cv.fit(X_train, y_train) joblib.dump(vect_cv, 'vect_params.pkl') # - vect_cv.best_params_ vect_cv.best_score_ # + pipeline_xgb = Pipeline([ ('vect', CountVectorizer(tokenizer=tokenize, max_df=0.5, max_features=None, ngram_range=(1,2))), ('tfidf', TfidfTransformer(use_idf=False)), ('clf', MultiOutputClassifier(xgb.sklearn.XGBClassifier(colsample_bytree=0.7, gamma=0.4, learning_rate=0.25, max_depth=10, min_child_weight=7))) ]) start = time() # X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=4) pipeline_xgb.fit(X_train, y_train) end = time() print("Training Time: {}".format(end-start)) pred = pipeline_xgb.predict(X_test) report = [] for idx, col in enumerate(pred.T): report.append(f1_score(y_test.T[idx], col, average='weighted')) print("Mean f1-score: {}".format(np.mean(report))) # - type(pipeline_xgb) # ### 9. Export your model as a pickle file joblib.dump(pipeline_xgb, 'models/xgboost_model.pkl') # ### 10. Use this notebook to complete `train.py` # Use the template file attached in the Resources folder to write a script that runs the steps above to create a database and export a model based on a new dataset specified by the user.
ML Pipeline Preparation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # + import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.datasets import make_classification from sklearn.decomposition import PCA import pandas as pd from imblearn.over_sampling import SMOTE from collections import Counter from itertools import combinations_with_replacement print(__doc__) from __future__ import print_function import numpy as np # %matplotlib inline import pandas as pd from mpl_toolkits.axes_grid1 import make_axes_locatable from keras.preprocessing import sequence from keras.models import Sequential from keras.layers import Dense, Dropout, Activation from keras.wrappers.scikit_learn import KerasClassifier from keras.utils import np_utils from sklearn.model_selection import cross_val_score from sklearn.model_selection import KFold , StratifiedKFold from sklearn.metrics import confusion_matrix, f1_score from sklearn import preprocessing from keras.optimizers import SGD, Adam from keras.layers import BatchNormalization import matplotlib.pyplot as plt import matplotlib.colors as colors sns.set() from sklearn.tree import DecisionTreeClassifier from sklearn.cross_validation import train_test_split from sklearn.metrics import classification_report from sklearn.ensemble import RandomForestClassifier almost_black = '#262626' palette = sns.color_palette() from mpl_toolkits.axes_grid1 import make_axes_locatable from sklearn.ensemble import ExtraTreesClassifier from sklearn.model_selection import LeaveOneGroupOut np.random.seed(7) # + def compute_rolling_covariance(df, properties, window_size, center=True): combs = combinations_with_replacement(properties, 2) wells = set(df["Well Name"]) well_frames = [] for well_n in wells: well_frame = df[df["Well Name"] == well_n].copy() for first_prop, second_prop in combs: prop_str = first_prop+"_"+second_prop+"_cov_"+str(window_size) well_frame[prop_str] = well_frame[first_prop].rolling(window_size, min_periods=1, center=center).cov(well_frame[second_prop]).bfill().ffill() well_frame[prop_str].astype(np.float64).fillna(0) well_frames.append(well_frame) out = pd.concat(well_frames).fillna(0) print(out.shape) return out def compute_rolling_min_max_mean_quantile(df, properties, window_size, center=True): wells = set(df["Well Name"]) well_frames = [] for well_n in wells: well_frame = df[df["Well Name"] == well_n].copy() for prop in properties: well_frame[prop+"_mean"] = well_frame[prop].rolling(window_size, min_periods=1, center=center).mean().bfill().ffill() well_frame[prop+"_min"] = well_frame[prop].rolling(window_size, min_periods=1, center=center).min().bfill().ffill() well_frame[prop+"_max"] = well_frame[prop].rolling(window_size, min_periods=1, center=center).max().bfill().ffill() well_frame[prop+"_10q"] = well_frame[prop].rolling(window_size, min_periods=1, center=center).quantile(0.1).bfill().ffill() well_frame[prop+"_90q"] = well_frame[prop].rolling(window_size, min_periods=1, center=center).quantile(0.9).bfill().ffill() well_frame[prop+"_mean"] = well_frame[prop+"_mean"].astype(np.float64).fillna(0) well_frame[prop+"_min"] = well_frame[prop+"_min"].astype(np.float64).fillna(0) well_frame[prop+"_max"] = well_frame[prop+"_max"].astype(np.float64).fillna(0) well_frame[prop+"_10q"] = well_frame[prop+"_10q"].astype(np.float64).fillna(0) well_frame[prop+"_90q"] = well_frame[prop+"_90q"].astype(np.float64).fillna(0) well_frames.append(well_frame) out = pd.concat(well_frames).fillna(0) print(out.shape) return out def tsfresh_well_by_well(df, extraction_settings): wells = set(df["Well Name"]) well_frames = [] for well_n in wells: well_frame = df[df["Well Name"] == well_n].copy() well_frame["MD"] = well_frame["Depth"]-np.min(well_frame["Depth"]) well_frame_data = well_frame[well_frame.columns.difference(["Well Name", "Formation", "EOD_Classifier", "Facies"])] X = extract_features(well_frame_data, column_id="MD", feature_extraction_settings=extraction_settings) well_frames.append(X) out = pd.concat(well_frames).fillna(0) print(out.shape) return out # - validation_data = pd.read_csv("../training_data.csv") #validation_data = validation_data[(validation_data["Well Name"] != "NOLAN")] validation_data["EOD_Classifier"] = validation_data["Facies"] < 4 # + validation_data = compute_rolling_covariance(validation_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 100) validation_data = compute_rolling_covariance(validation_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 50) validation_data = compute_rolling_covariance(validation_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 30) validation_data = compute_rolling_covariance(validation_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 20) validation_data = compute_rolling_covariance(validation_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 15) validation_data = compute_rolling_covariance(validation_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 9) validation_data = compute_rolling_covariance(validation_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 5) validation_data = compute_rolling_covariance(validation_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 3) validation_data = compute_rolling_min_max_mean_quantile(validation_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 100) validation_data = compute_rolling_covariance(validation_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 50) validation_data = compute_rolling_min_max_mean_quantile(validation_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 30) validation_data = compute_rolling_min_max_mean_quantile(validation_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 20) validation_data = compute_rolling_covariance(validation_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 15) validation_data = compute_rolling_min_max_mean_quantile(validation_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 9) validation_data = compute_rolling_min_max_mean_quantile(validation_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 5) validation_data = compute_rolling_covariance(validation_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 3) validation_data_marine = validation_data[validation_data["EOD_Classifier"] == False] validation_data_non_marine = validation_data[validation_data["EOD_Classifier"] == True] print(set(validation_data[validation_data["EOD_Classifier"]==False]["Facies"])) print(set(validation_data_marine["Facies"])) print(set(validation_data[validation_data["EOD_Classifier"]==True]["Facies"])) print(set(validation_data_non_marine["Facies"])) # + validation_data_marine_numerics = validation_data_marine[validation_data_marine.columns.difference(["Well Name", "Formation", "Facies", "Depth", "EOD_Classifier"])] validation_data_non_marine_numerics = validation_data_non_marine[validation_data_non_marine.columns.difference(["Well Name", "Formation", "Facies", "Depth", "EOD_Classifier"])] validation_data_marine_facies = validation_data_marine["Facies"]-np.min(validation_data_marine["Facies"]) validation_data_non_marine_facies = validation_data_non_marine["Facies"]-np.min(validation_data_non_marine["Facies"]) #Define Scalers marine_scaler = preprocessing.StandardScaler().fit(validation_data_marine_numerics) non_marine_scaler = preprocessing.StandardScaler().fit(validation_data_non_marine_numerics) X_marine_train = marine_scaler.transform(validation_data_marine_numerics) y_marine_train = np_utils.to_categorical((validation_data_marine_facies).values, nb_classes=len(set(validation_data_marine_facies))) X_non_marine_train = non_marine_scaler.transform(validation_data_non_marine_numerics) y_non_marine_train = np_utils.to_categorical((validation_data_non_marine_facies).values, nb_classes=len(set(validation_data_non_marine_facies))) print(X_marine_train.shape) print(y_marine_train.shape) print(X_non_marine_train.shape) print(y_non_marine_train.shape) # + def get_dnn_model(in_size, out_size): def facies_dnn_model(input_dim=in_size, output_dim=out_size): # Define the model model = Sequential() model.add(Dense(32, input_dim=input_dim, init='normal', activation='relu')) model.add(Dense(64, input_dim=32, init='normal', activation='relu')) model.add(Dense(32, input_dim=32, init='normal', activation='relu')) model.add(Dense(output_dim, init='normal', activation='softmax')) if output_dim > 2: model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) else: model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model return facies_dnn_model input_dim = validation_data_marine_numerics.shape[1] marine_model = get_dnn_model(input_dim, len(set(validation_data_marine_facies))) non_marine_model = get_dnn_model(input_dim, len(set(validation_data_non_marine_facies))) # + from sklearn.ensemble import ExtraTreesClassifier from sklearn.model_selection import LeaveOneGroupOut logo = LeaveOneGroupOut() epochs=1 scores = [] for train, test in logo.split(X_non_marine_train, y_non_marine_train, groups=validation_data_non_marine["Well Name"]): print(train.shape[0]+test.shape[0]) X_train, X_test = X_non_marine_train[train], X_non_marine_train[test] y_train, y_test = y_non_marine_train[train], y_non_marine_train[test] dnn = non_marine_model() dnn.fit(X_train, y_train, nb_epoch=epochs, batch_size=1, verbose=1, validation_data=(X_test, y_test)) y_marine_predicted = dnn.predict(X_test, batch_size=1, verbose=1) conf_pred_marine = confusion_matrix(np.argmax(y_marine_predicted, axis=1), np.argmax(y_test, axis=1)) scores.append(f1_score(np.argmax(y_marine_predicted, axis=1), np.argmax(y_test, axis=1), average="weighted")) print(f1_score(np.argmax(y_marine_predicted, axis=1), np.argmax(y_test, axis=1), average="weighted")) print(conf_pred_marine) print(scores) print(np.min(scores), np.max(scores), np.mean(scores), np.std(scores)) # + logo = LeaveOneGroupOut() epochs=1 scores = [] for train, test in logo.split(X_marine_train, y_marine_train, groups=validation_data_marine["Well Name"]): print(train.shape[0]+test.shape[0]) X_train, X_test = X_marine_train[train], X_marine_train[test] y_train, y_test = y_marine_train[train], y_marine_train[test] dnn = marine_model() dnn.fit(X_train, y_train, nb_epoch=epochs, batch_size=1, verbose=1, validation_data=(X_test, y_test)) y_marine_predicted = dnn.predict(X_test, batch_size=1, verbose=1) conf_pred_marine = confusion_matrix(np.argmax(y_marine_predicted, axis=1), np.argmax(y_test, axis=1)) scores.append(f1_score(np.argmax(y_marine_predicted, axis=1), np.argmax(y_test, axis=1), average="weighted")) print(f1_score(np.argmax(y_marine_predicted, axis=1), np.argmax(y_test, axis=1), average="weighted")) print(conf_pred_marine) print(scores) print(np.min(scores), np.max(scores), np.mean(scores), np.std(scores)) # - # Once the set of parameters are fixed, the training stage of our model begins. We perform a Cross Validation routine to evaluate the performance of the model. epoch = 1 marinednn = marine_model() nonmarinednn = non_marine_model() marinednn.fit(X_marine_train, y_marine_train, nb_epoch=epochs, batch_size=1, verbose=1, validation_data=(X_marine_train, y_marine_train)) nonmarinednn.fit(X_non_marine_train, y_non_marine_train, nb_epoch=epochs, batch_size=1, verbose=1, validation_data=(X_non_marine_train, y_non_marine_train)) y_marine_predicted = np.argmax(marinednn.predict(X_marine_train, batch_size=1, verbose=1), axis=1) y_non_marine_predicted = np.argmax(nonmarinednn.predict(X_non_marine_train, batch_size=1, verbose=1), axis=1) # + print(validation_data_marine_facies.shape) conf_marine = confusion_matrix(y_marine_predicted, validation_data_marine_facies) conf_non_marine = confusion_matrix(y_non_marine_predicted, validation_data_non_marine_facies) f1_marine = f1_score(y_marine_predicted, validation_data_marine_facies, average='weighted') f1_non_marine = f1_score(y_non_marine_predicted, validation_data_non_marine_facies, average='weighted') print(conf_marine, f1_marine) print(conf_non_marine, f1_non_marine) # - # ## Prediction # --- # We obtain the predictions for test data. # + test_data = pd.read_csv("../validation_data_nofacies.csv") print(set(test_data["Well Name"])) test_data = test_data.bfill() test_data["EOD_Classifier"] = test_data["NM_M"] == 1 test_data = compute_rolling_covariance(test_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 100) test_data = compute_rolling_covariance(test_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 50) test_data = compute_rolling_covariance(test_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 30) test_data = compute_rolling_covariance(test_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 20) test_data = compute_rolling_covariance(test_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 15) test_data = compute_rolling_covariance(test_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 9) test_data = compute_rolling_covariance(test_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 5) test_data = compute_rolling_covariance(test_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 3) test_data = compute_rolling_min_max_mean_quantile(test_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 100) test_data = compute_rolling_covariance(test_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 50) test_data = compute_rolling_min_max_mean_quantile(test_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 30) test_data = compute_rolling_min_max_mean_quantile(test_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 20) test_data = compute_rolling_covariance(test_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 15) test_data = compute_rolling_min_max_mean_quantile(test_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 9) test_data = compute_rolling_min_max_mean_quantile(test_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 5) test_data = compute_rolling_covariance(test_data, ["GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M"], 3) test_data["EOD_Classifier"] = test_data["EOD_Classifier"].astype(np.int64) test_data_marine = test_data[test_data["EOD_Classifier"] == False] test_data_non_marine = test_data[test_data["EOD_Classifier"] == True] test_data_eod_numerics = test_data[test_data.columns.difference(["Well Name", "Formation","Depth", "EOD_Classifier"])] test_data_marine_numerics = test_data_marine[test_data_marine.columns.difference(["Well Name", "Formation","Depth", "EOD_Classifier"])] test_data_non_marine_numerics = test_data_non_marine[test_data_non_marine.columns.difference(["Well Name", "Formation","Depth", "EOD_Classifier"])] # + X_marine_test= marine_scaler.transform(test_data_marine_numerics) X_non_marine_test = non_marine_scaler.transform(test_data_non_marine_numerics) print(X_marine_test.shape) print(X_non_marine_test.shape) # + facies_non_marine_predicted_test = np.argmax(nonmarinednn.predict(X_non_marine_test, batch_size=1, verbose=1), axis=1)+np.min(validation_data_non_marine["Facies"]) facies_marine_predicted_test = np.argmax(marinednn.predict(X_marine_test, batch_size=1, verbose=1), axis=1)+np.min(validation_data_marine["Facies"]) test_data_non_marine["Facies"] = facies_non_marine_predicted_test test_data_marine["Facies"] = facies_marine_predicted_test total_facies_predicted_test = pd.concat([test_data_marine, test_data_non_marine]) # - final_prediction = total_facies_predicted_test[["Depth", "GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "Well Name", "Facies", "NM_M"]].sort_index() print(final_prediction) final_prediction.to_csv("prediction.csv") gcc_test = pd.read_csv("../GCC_FaciesClassification/02 - Well Facies Prediction - Test Data Set.csv") print(gcc_test.head()) # + # 1=sandstone 2=c_siltstone 3=f_siltstone # 4=marine_silt_shale 5=mudstone 6=wackestone 7=dolomite # 8=packstone 9=bafflestone facies_colors = ['#F4D03F', '#F5B041','#DC7633','#6E2C00', '#1B4F72','#2E86C1', '#AED6F1', '#A569BD', '#196F3D'] facies_labels = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS', 'WS', 'D','PS', 'BS'] #facies_color_map is a dictionary that maps facies labels #to their respective colors facies_color_map = {} for ind, label in enumerate(facies_labels): facies_color_map[label] = facies_colors[ind] def label_facies(row, labels): return labels[ row['Facies'] -1] final_prediction.loc[:,'FaciesLabels'] = final_prediction.apply(lambda row: label_facies(row, facies_labels), axis=1) gcc_test.loc[:,'FaciesLabels'] = gcc_test.apply(lambda row: label_facies(row, facies_labels), axis=1) # - make_facies_log_plot( final_prediction[final_prediction['Well Name'] == 'STUART'], facies_colors, gcc_test) make_facies_log_plot( final_prediction[final_prediction['Well Name'] == 'CRAWFORD'], facies_colors, gcc_test) def make_facies_log_plot(logs, facies_colors, comparison): #make sure logs are sorted by depth logs = logs.sort_values(by='Depth') cmap_facies = colors.ListedColormap( facies_colors[0:len(facies_colors)], 'indexed') ztop=logs.Depth.min(); zbot=logs.Depth.max() cluster=np.repeat(np.expand_dims(logs['Facies'].values,1), 100, 1) cluster1=np.repeat(np.expand_dims(comparison['Facies'].values,1), 100, 1) f, ax = plt.subplots(nrows=1, ncols=7, figsize=(8, 14)) ax[0].plot(logs.GR, logs.Depth, '-g') ax[1].plot(logs.NM_M, logs.Depth, '-') ax[2].plot(logs.ILD_log10, logs.Depth, '-', color='0.5') ax[3].plot(logs.Facies , logs.Depth, '-', color='r') ax[4].plot(logs.PE, logs.Depth, '-', color='black') im=ax[5].imshow(cluster, interpolation='none', aspect='auto', cmap=cmap_facies,vmin=1,vmax=9) im=ax[6].imshow(cluster1, interpolation='none', aspect='auto', cmap=cmap_facies,vmin=1,vmax=9) divider = make_axes_locatable(ax[5]) cax = divider.append_axes("right", size="20%", pad=0.05) cbar=plt.colorbar(im, cax=cax) cbar.set_label((17*' ').join([' SS ', 'CSiS', 'FSiS', 'SiSh', ' MS ', ' WS ', ' D ', ' PS ', ' BS '])) cbar.set_ticks(range(0,1)); cbar.set_ticklabels('') for i in range(len(ax)-2): ax[i].set_ylim(ztop,zbot) ax[i].invert_yaxis() ax[i].grid() ax[i].locator_params(axis='x', nbins=3) ax[0].set_xlabel("GR") ax[0].set_xlim(logs.GR.min(),logs.GR.max()) ax[1].set_xlabel("ILD_log10") ax[1].set_xlim(logs.NM_M.min(), logs.NM_M.max()) ax[2].set_xlabel("DeltaPHI") ax[2].set_xlim(logs.ILD_log10.min(),logs.ILD_log10.max()) ax[3].set_xlabel("PHIND") ax[3].set_xlim(logs.Facies.min(),logs.Facies.max()) ax[4].set_xlabel("PE") ax[4].set_xlim(logs.PE.min(),logs.PE.max()) ax[5].set_xlabel('Facies') ax[6].set_xlabel('Facies') ax[1].set_yticklabels([]); ax[2].set_yticklabels([]); ax[3].set_yticklabels([]) ax[4].set_yticklabels([]); ax[5].set_yticklabels([]) ax[5].set_xticklabels([]) ax[6].set_xticklabels([]) f.suptitle('Well: %s'%logs.iloc[0]['Well Name'], fontsize=14,y=0.94) plt.show()
LA_TEAM_FRESH/LA_TEAM A fresh start.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import geopandas as gp from plotnine import * # %matplotlib inline # - # ### The Political Territories of Westeros # # *Layering different features on a Map* # Read data and select features in Westeros only. # + continents = gp.read_file('data/lands-of-ice-and-fire/continents.shp') islands = gp.read_file('data/lands-of-ice-and-fire/islands.shp') lakes = gp.read_file('data/lands-of-ice-and-fire/lakes.shp') rivers = gp.read_file('data/lands-of-ice-and-fire/rivers.shp') political = gp.read_file('data/lands-of-ice-and-fire/political.shp') wall = gp.read_file('data/lands-of-ice-and-fire/wall.shp') roads = gp.read_file('data/lands-of-ice-and-fire/roads.shp') locations = gp.read_file('data/lands-of-ice-and-fire/locations.shp') westeros = continents.query('name=="Westeros"') islands = islands.query('continent=="Westeros" and name!="Summer Islands"') lakes = lakes.query('continent=="Westeros"') rivers = rivers.query('continent=="Westeros"') roads = roads.query('continent=="Westeros"') wg = westeros.geometry[0] bool_idx = [wg.contains(g) for g in locations.geometry] westeros_locations = locations[bool_idx] cities = westeros_locations[westeros_locations['type'] == 'City'].copy() # - # Create map by placing the features in layers in an order that limits obstraction. # # The `GeoDataFrame.geometry.centroid` property has the center coordinates of polygons, # we use these to place the labels of the political regions. # + # colors water_color = '#a3ccff' wall_color = 'white' road_color = 'brown' # Create label text by merging the territory name and # the claimant to the territory def fmt_labels(names, claimants): labels = [] for name, claimant in zip(names, claimants): if name: labels.append('{} ({})'.format(name, claimant)) else: labels.append('({})'.format(claimant)) return labels def calculate_center(df): """ Calculate the centre of a geometry This method first converts to a planar crs, gets the centroid then converts back to the original crs. This gives a more accurate """ original_crs = df.crs planar_crs = 'EPSG:3857' return df['geometry'].to_crs(planar_crs).centroid.to_crs(original_crs) political['center'] = calculate_center(political) cities['center'] = calculate_center(cities) # Gallery Plot (ggplot() + geom_map(westeros, fill=None) + geom_map(islands, fill=None) + geom_map(political, aes(fill='ClaimedBy'), color=None, show_legend=False) + geom_map(wall, color=wall_color, size=2) + geom_map(lakes, fill=water_color, color=None) + geom_map(rivers, aes(size='size'), color=water_color, show_legend=False) + geom_map(roads, aes(size='size'), color=road_color, alpha=0.5, show_legend=False) + geom_map(cities, size=1) + geom_text( political, aes('center.x', 'center.y', label='fmt_labels(name, ClaimedBy)'), size=8, fontweight='bold' ) + geom_text( cities, aes('center.x', 'center.y', label='name'), size=8, ha='left', nudge_x=.20 ) + labs(title="The Political Territories of Westeros") + scale_fill_brewer(type='qual', palette=8) + scale_x_continuous(expand=(0, 0, 0, 1)) + scale_y_continuous(expand=(0, 1, 0, 0)) + scale_size_continuous(range=(0.4, 1)) + coord_cartesian() + theme_void() + theme(figure_size=(8, 12), panel_background=element_rect(fill=water_color)) ) # - # **Credit**: [cadaei](https://www.cartographersguild.com/member.php?u=95244) of the [cartographersguild](https://www.cartographersguild.com/) website forum.
demo_plot/plotnine-examples/examples/geom_map.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder, LabelEncoder from sklearn.model_selection import GridSearchCV, cross_validate from sklearn.compose import ColumnTransformer from sklearn.preprocessing import FunctionTransformer from sklearn.pipeline import Pipeline from sklearn.impute import SimpleImputer from sklearn.linear_model import LogisticRegression from sklearn.metrics import roc_auc_score, log_loss, make_scorer, brier_score_loss from sklearn.preprocessing import StandardScaler from lightgbm import LGBMClassifier from joblib import dump, load from sklearn.calibration import calibration_curve from sklearn.calibration import CalibratedClassifierCV from sklearn.inspection import permutation_importance import matplotlib.pyplot as plt import seaborn as sns from scipy.stats import spearmanr from scipy.cluster import hierarchy # monkey patch for bayesseachcv (https://github.com/scikit-optimize/scikit-optimize/issues/902) from numpy.ma import MaskedArray import sklearn.utils.fixes sklearn.utils.fixes.MaskedArray = MaskedArray from skopt import BayesSearchCV from skopt.space import Real, Integer import warnings warnings.filterwarnings('ignore') # Random state seed = 42 # Setup metrics (see: http://business-analytic.co.uk/blog/evaluating-expected-goals-models/) # define Mcfadden's pseduo r-squared def mcfadden_r2(y, y_pred): ll = log_loss(y, y_pred) ll_null = log_loss(y, np.full(len(y), y.mean())) return 1 - (ll/ll_null) pseudo_r2_scorer = make_scorer(mcfadden_r2, needs_proba=True, greater_is_better=True) scoring = {'roc_aug': 'roc_auc', 'mcfaddens_r2': pseudo_r2_scorer} # Setup folder for storing models # Load the data df = pd.read_parquet(os.path.join('..', 'data', 'shots.parquet')) df.drop(['match_id', 'statsbomb_id', 'statsbomb_team_id', 'player_id_statsbomb', 'competition_gender', 'team_name', 'player_id', 'firstName', 'middleName', 'lastName', 'Name', 'dataset', 'wyscout_id', 'wyscout_team_id', 'team_id', 'player_id_wyscout'], axis=1, inplace=True) X = df.drop('goal', axis=1) y = df.goal # Split into train, calibration and test datasets X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8, random_state=seed, stratify=y) print('Shots train', len(y_train), ';Number goals', y_train.sum(), ';Goals %: ', round(y_train.mean()*100, 1)) print('Shots test', len(y_test), ';Number goals', y_test.sum(), ';Goals %: ', round(y_test.mean()*100, 1)) # Load and split fake data df_fake = pd.read_parquet(os.path.join('..', 'data', 'fake_shots.parquet')) df_fake.index = ['a'+str(idx) for idx in df_fake.index] y_fake = df_fake.goal X_fake = df_fake.drop('goal', axis=1) print('Shots fake', len(y_fake), ';Goals %: ', round(y_fake.mean()*100, 1)) # # Logistic regression # Subset dataset for logistic regression # drop colum logistic_drop_cols = ['x', 'y', # logistic regression does not deal well with dependent features # The model will use the distance/ angle features capture these location features instead # lots of missings for the below features as they come from StatsBomb data only. # It's not fair to impute these as they are not missing at random # while logistic regression does not allow missings so I removed them 'pass_end_y', 'pass_end_x', # <- note these were in Wyscout, but often were just the shot location 'eventSec', 'period', 'player_id_goalkeeper', 'goalkeeper_x', 'goalkeeper_y', 'carry_length', 'shot_one_on_one', 'shot_open_goal', 'under_pressure', 'area_shot', 'area_goal', 'n_angle', 'smart_pass'] X_train_logistic = X_train.drop(logistic_drop_cols, axis=1).copy() X_test_logistic = X_test.drop(logistic_drop_cols, axis=1).copy() # Split dataset for logistic regession into passes / other assists def split(X, y): mask = X.assist_type == 'pass' X_pass = X[mask].drop('assist_type', axis=1).copy() y_pass = y[mask] X_other = X[~mask].dropna(axis=1, how='all').copy() y_other = y[~mask] return X_pass, y_pass, X_other, y_other X_train_pass, y_train_pass, X_train_other, y_train_other = split(X_train_logistic, y_train) X_test_pass, y_test_pass, X_test_other, y_test_other = split(X_test_logistic, y_test) # Pipeline for cleaning pass assists cols = ['shot_type_name', 'body_part_name', 'pass_technique_name', 'pass_height_name'] cats = [['open_play', 'free_kick', 'corner', 'throw_in'], ['Right Foot', 'Left Foot', 'Other'], ['other', 'Through Ball', 'Straight', 'Inswinging', 'Outswinging'], ['Ground/ Low Pass', 'High Pass']] pass_one_hot = ColumnTransformer([('encoder', OneHotEncoder(drop='first', categories=cats), cols)], remainder='passthrough') pipe_pass = Pipeline([('one_hot', pass_one_hot), ('impute', SimpleImputer()), ('scale', StandardScaler()), ('lr', LogisticRegression(random_state=seed))]) # Column names of transformed pass data original_cols_remain = [col for col in X_train_pass.columns if col not in cols] new_cols_pass = [item for sublist in cats for i, item in enumerate(sublist) if (i>0)] new_cols_pass.extend(original_cols_remain) print(new_cols_pass) # Pipeline for cleaning other assists # + # setting direct to recovery so does not not encoded twice ( also covered by shot_type_name == 'direct_set_piece') X_train_other.loc[X_train_other.assist_type == 'direct', 'assist_type'] = 'recovery' X_test_other.loc[X_test_other.assist_type == 'direct', 'assist_type'] = 'recovery' cols = ['shot_type_name', 'body_part_name', 'assist_type'] cats = [['open_play', 'free_kick', 'corner', 'throw_in', 'direct_set_piece'], ['Right Foot', 'Left Foot', 'Other'], ['recovery', 'clearance', 'rebound']] other_one_hot = ColumnTransformer([('encoder', OneHotEncoder(drop='first', categories=cats), cols)], remainder='passthrough') pipe_other = Pipeline([('one_hot', other_one_hot), ('impute', SimpleImputer()), ('scale', StandardScaler()), ('lr', LogisticRegression(random_state=seed))]) # - # Column names of transformed passes original_cols_remain = [col for col in X_train_other.columns if col not in cols] new_cols_other = [item for sublist in cats for i, item in enumerate(sublist) if (i>0)] new_cols_other.extend(original_cols_remain) print(new_cols_other) # Search parameters for gridsearchcv param_grid = {'lr__C': np.logspace(-3, 0.1, 100)} # Fit the inner grid search for shots assisted by passes clf_pass = GridSearchCV(estimator=pipe_pass, param_grid=param_grid, scoring='neg_log_loss', n_jobs=-1) clf_pass.fit(X_train_pass, y_train_pass) print('C:', clf_pass.best_estimator_.named_steps.lr.C) # Fit the inner grid search for shots assisted other than passes clf_other = GridSearchCV(estimator=pipe_other, param_grid=param_grid, scoring='neg_log_loss', n_jobs=-1) clf_other.fit(X_train_other, y_train_other) print('C:', clf_other.best_estimator_.named_steps.lr.C) # Outer loops for unbiased estimates of the model accuracy nested_score_pass = cross_validate(clf_pass, X=X_train_pass, y=y_train_pass, scoring=scoring, n_jobs=-1) print('ROC AUC for shots assisted by passes:', nested_score_pass['test_roc_aug'].mean()) print("McFadden's Pseudo R-squared shots assisted by passes:", nested_score_pass['test_mcfaddens_r2'].mean()) nested_score_other = cross_validate(clf_other, X=X_train_other, y=y_train_other, scoring=scoring, n_jobs=-1) print('ROC AUC for other model:', nested_score_other['test_roc_aug'].mean()) print("McFadden's Pseudo R-squared for other model:", nested_score_other['test_mcfaddens_r2'].mean()) # # LightGBM model # Add fake training data. I am not adding this to the test data as want this to be realistic of real data. X_train = pd.concat([X_train, X_fake]) y_train = pd.concat([y_train, y_fake]) # Clean data. Categories to numbers. Drop distance and angle measures as just want raw locations for my models. # + def clean_lightgbm(df): df = df.copy() # replace categorical columns shot_type_cat = {'free_kick': 0, 'corner': 1, 'throw_in': 2, 'direct_set_piece': 3, 'open_play': 4} body_type_cat = {'Right Foot': 0, 'Left Foot': 1, 'Other': 2} assist_type_cat = {'pass': 0, 'recovery': 1, 'clearance': 2, 'direct': 3, 'rebound': 4} pass_height_cat = {'High Pass': 0, 'Ground/ Low Pass': 1} pass_technique_cat = {'Through Ball': 0, 'Straight': 1, 'Inswinging': 2, 'Outswinging': 3, 'other': 4} df.shot_type_name.replace(shot_type_cat, inplace=True) df.body_part_name.replace(body_type_cat, inplace=True) df.assist_type.replace(assist_type_cat, inplace=True) df.pass_height_name.replace(pass_height_cat, inplace=True) df.pass_technique_name.replace(pass_technique_cat, inplace=True) # replace boolean type columns (not really as have nans) for col in ['pass_switch', 'pass_cross', 'pass_cut_back', 'shot_one_on_one', 'shot_open_goal', 'under_pressure', 'smart_pass']: df[col] = df[col].astype(np.float32) # drop some distance/ angle columns drop_cols = ['visible_angle', 'middle_angle', 'distance_to_goal', 'distance_visible_angle', 'log_distance_to_goal', 'eventSec', 'period', 'player_id_goalkeeper'] df.drop(drop_cols, axis=1, inplace=True) return df X_train = clean_lightgbm(X_train) X_test = clean_lightgbm(X_test) # - print(X_train.columns) # Fit the nested 5-fold cross validation using Bayesian optimisation. #lgbm = LGBMClassifier(random_state=42) lgbm = CalibratedClassifierCV(LGBMClassifier(random_state=42), method='isotonic', cv=3) lgbm_param_grid = {'base_estimator__min_child_samples': Integer(0, 200), 'base_estimator__num_leaves': Integer(2, 500), 'base_estimator__reg_lambda': Real(0, 1), 'base_estimator__reg_alpha': Real(0, 1), 'base_estimator__max_depth': Integer(0, 500)} # Nested resampling using skopt. see: https://github.com/scikit-optimize/scikit-optimize/issues/725 searchcv = BayesSearchCV(estimator=lgbm, n_iter=100, search_spaces=lgbm_param_grid, cv=5, n_jobs=-1) searchcv.fit(X_train, y_train) # Permutation importance # note not using fake data for permutation importance perm_result = permutation_importance(searchcv.best_estimator_, X_train, y_train, n_repeats=10, random_state=seed) df_perm_importance = pd.DataFrame({'Feature':X_train.columns, 'importance': perm_result.importances.mean(axis=1), 'std_dev': perm_result.importances.std(axis=1)}) df_perm_importance.sort_values('importance', ascending=False, inplace=True) df_perm_importance.reset_index(drop=True, inplace=True) df_perm_importance fig, ax = plt.subplots(figsize=(16, 9)) sorted_idx = perm_result.importances_mean.argsort() bar_plot = ax.boxplot(perm_result.importances[sorted_idx].T, vert=False, labels=X_train.columns[sorted_idx]) # # Test # Calculate calibration curve on test data y_pred_lgbm_calibrated = searchcv.best_estimator_.predict_proba(X_test)[:, 1] fraction_of_positives_lgbm, mean_predicted_value_lgbm = calibration_curve(y_test, y_pred_lgbm_calibrated, n_bins=10) # logistic regression y_pred_lr_pass = clf_pass.predict_proba(X_test_pass)[:, 1] y_pred_lr_other = clf_other.predict_proba(X_test_other)[:, 1] y_pred_lr = np.concatenate([y_pred_lr_pass, y_pred_lr_other]) y_true_test = np.concatenate([y_test_pass, y_test_other]) fraction_of_positives_lr, mean_predicted_value_lr = calibration_curve(y_true_test, y_pred_lr, n_bins=10) # Plot calibration curve on test data plt.style.use('dark_background') fig = plt.figure(constrained_layout=True, figsize=(10, 15)) gs = fig.add_gridspec(ncols=1, nrows=2, height_ratios=(2/3, 1/3)) ax1 = fig.add_subplot(gs[0]) ax1.plot(mean_predicted_value_lgbm, fraction_of_positives_lgbm, "-o", color='#aabced', label='Calibrated Light GBM') ax1.plot(mean_predicted_value_lr, fraction_of_positives_lr, "-o", color='#dbdf4a', label='Logistic regression') ax1.plot([0, 1], [0, 1], "--", color='#e7aeca', label="Perfectly calibrated") ax1.set_xlabel('Mean predicted value', fontsize=15) ax1.set_ylabel('Fraction of positives', fontsize=15) ax1.set_title('Calibration curve', fontsize=20, pad=10) ax1.legend(fontsize=15) ax1.tick_params(labelsize=15) ax2 = fig.add_subplot(gs[1]) sns.distplot(y_pred_lr, color='#4fe4e4', label='Logistic regression', kde=False, ax=ax2) sns.distplot(y_pred_lgbm_calibrated, color='#aabced', label='Calibrated Light GBM', kde=False, ax=ax2) ax2.set_xlabel('Predicted value', fontsize=15) ax2.set_ylabel('Count', fontsize=15) ax2.tick_params(labelsize=15) ax2.legend(fontsize=15) ax2.set_title('Distribution of predictions', fontsize=20, pad=10); fig.savefig(os.path.join('..', 'figures', '22_calibration_curve.png'), bbox_inches = 'tight', pad_inches = 0.2) # From scikit-learn docs: "The smaller the Brier score, the better, hence the naming with “loss”. Across all items in a set N predictions, the Brier score measures the mean squared difference between (1) the predicted probability assigned to the possible outcomes for item i, and (2) the actual outcome." print('Brier score, Light GBM:', brier_score_loss(y_test, y_pred_lgbm_calibrated, pos_label=y_test.max())) print('ROC AUC, Light GBM:', roc_auc_score(y_test, y_pred_lgbm_calibrated)) print('Pseudo R-squared, Light GBM:', mcfadden_r2(y_test, y_pred_lgbm_calibrated)) print('Brier score, logistic regression:',brier_score_loss(y_true_test, y_pred_lr, pos_label=y_true_test.max())) print('ROC AUC, logistic regression:', roc_auc_score(y_true_test, y_pred_lr)) print('Pseudo R-squared, logistic regression:', mcfadden_r2(y_true_test, y_pred_lr)) # # Save models dump(searchcv.best_estimator_, os.path.join('..', 'models', 'lgbm_model.joblib')) dump(clf_pass.best_estimator_, os.path.join('..', 'models', 'lr_pass.joblib')) dump(clf_other.best_estimator_, os.path.join('..', 'models', 'lr_other.joblib')) # # Save data # reload shot dataset for ids df = pd.read_parquet(os.path.join('..', 'data', 'shots.parquet')) df = df[['match_id', 'wyscout_id', 'statsbomb_id']].copy() X_train_other['goal'] = y_train_other X_train_other['split'] = 'train' X_test_other['goal'] = y_test_other X_test_other['split'] = 'test' df_other = pd.concat([X_train_other, X_test_other]) df_other = df_other.merge(df, left_index=True, right_index=True, validate='1:1', how='left') df_other.reset_index(drop=True, inplace=True) df_other.to_parquet(os.path.join('..', 'data', 'modelling', 'lr_other.parquet')) X_train_pass['goal'] = y_train_pass X_train_pass['split'] = 'train' X_test_pass['goal'] = y_test_pass X_test_pass['split'] = 'test' df_pass = pd.concat([X_train_pass, X_test_pass]) df_pass = df_pass.merge(df, left_index=True, right_index=True, validate='1:1', how='left') df_pass.reset_index(drop=True, inplace=True) df_pass.to_parquet(os.path.join('..', 'data', 'modelling', 'lr_pass.parquet')) X_train['goal'] = y_train X_train['split'] = 'train' X_test['goal'] = y_test X_test['split'] = 'test' df_lgbm = pd.concat([X_train, X_test]) # exlcude fake shots df_lgbm = df_lgbm[df_lgbm.index.isin(df.index)].copy() df_lgbm = df_lgbm.merge(df, how='left', left_index=True, right_index=True, validate='1:1') df_lgbm.to_parquet(os.path.join('..', 'data', 'modelling', 'lgbm.parquet'))
notebooks/01-expected-goals-model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tqdm import tqdm_notebook as tqdm % matplotlib inline # + colab={"height": 279} colab_type="code" executionInfo={"elapsed": 3625, "status": "ok", "timestamp": 1561052150050, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-xab957NWDw0/AAAAAAAAAAI/AAAAAAAAAAc/oxkcBYiGSGw/s64/photo.jpg", "userId": "07881995209550579845"}, "user_tz": 240} id="DJKt6Li-dqsp" outputId="5e3268fd-42f1-4d9c-f184-0b4d8646c333" p_loc = 0. p_scale = 1. p = tf.distributions.Normal(loc=p_loc, scale=p_scale) q_loc = 0. q_scale = 1. q = tf.distributions.Normal(loc=q_loc, scale=q_scale) plot_points = np.linspace(-3., 8., 200) # plt.plot(plot_points, sess.run(p.prob(plot_points)), 'b') # plt.plot(plot_points, sess.run(q.prob(plot_points)), 'r') # plt.show() # + colab={"height": 279} colab_type="code" executionInfo={"elapsed": 86022, "status": "ok", "timestamp": 1561052306216, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-xab957NWDw0/AAAAAAAAAAI/AAAAAAAAAAc/oxkcBYiGSGw/s64/photo.jpg", "userId": "07881995209550579845"}, "user_tz": 240} id="u1whNDmCecTu" outputId="23130942-f6ec-4b30-a454-40ea59252c6e" tf.reset_default_graph() tf.random.set_random_seed(42) with tf.Session() as sess: p_loc = 0. p_scale = 1. p = tf.distributions.Normal(loc=p_loc, scale=p_scale) q_loc = 2.5 q_scale = 0.5 q = tf.distributions.Normal(loc=q_loc, scale=q_scale) n_points = 20 p_mass = tf.concat(([0.], [1. / (n_points-2) for _ in range(n_points-2)], [0.]), axis=0) quantiles = np.linspace(0., 1., n_points+1) open_sections = q.quantile(quantiles[1:-1]) p_i = tf.Variable(tf.zeros((n_points))) sess.run(tf.initialize_all_variables()) p_star = tf.reduce_sum(p_i) open_cdf = p.cdf(open_sections) cdfs = tf.concat(([0.], open_cdf, [1.]), axis=0) probs = cdfs[1:] - cdfs[:-1] alpha_i = tf.where(p_mass - p_i < (1 - p_star) * probs, p_mass - p_i, (1 - p_star) * probs) sample = p.sample() bucket = tf.concat((tf.reshape(tf.where(sample < open_sections),[-1]), [n_points-1]), axis=0)[0] beta = (alpha_i[bucket]) / ((1 - p_star) * probs[bucket]) accept = (tf.random.uniform(()) < beta) update_op = [p_i.assign(p_i + alpha_i)] rejection_samples = [] for i in tqdm(range(100)): sess.run(tf.initialize_all_variables()) accepted = False s = 0. j = 0 while not accepted: accepted, s = sess.run([accept, sample]) sess.run(update_op) j += 1 rejection_samples.append(s) # - with tf.Session() as sess: plt.hist(rejection_samples, range=(-3., 8.), normed=True, bins=100) plt.plot(plot_points, sess.run(q.prob(plot_points)), 'r') plt.show()
code/compression/Marton_Rejection_sampling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # Importing the dataset dataset = read.csv('Churn_Modelling.csv') dataset = dataset[4:14] head(dataset) # Encoding the categorical variables as factors dataset$Geography = as.numeric(factor(dataset$Geography, levels = c('France', 'Spain', 'Germany'), labels = c(1, 2, 3))) dataset$Gender = as.numeric(factor(dataset$Gender, levels = c('Female', 'Male'), labels = c(1, 2))) # Splitting the dataset into the Training set and Test set # install.packages('caTools') library(caTools) set.seed(123) split = sample.split(dataset$Exited, SplitRatio = 0.8) training_set = subset(dataset, split == TRUE) test_set = subset(dataset, split == FALSE) # Feature Scaling training_set[-11] = scale(training_set[-11]) test_set[-11] = scale(test_set[-11]) # Fitting ANN to the Training set #install.packages('h2o') library(h2o) h2o.init(nthreads = -1) model = h2o.deeplearning(y = 'Exited', training_frame = as.h2o(training_set), activation = 'Rectifier', hidden = c(5,5), epochs = 100, train_samples_per_iteration = -2) # Predicting the Test set results y_pred = h2o.predict(model, newdata = as.h2o(test_set[-11])) y_pred = (y_pred > 0.5) y_pred = as.vector(y_pred) # Making the Confusion Matrix table(test_set[, 11], y_pred) h2o.shutdown()
Tensorflow_Keras/Bank Customer Exit/ann_r.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Identity Matrix # # Write a function called identity_matrix that outputs an identity matrix of size n. # # INPUT # * n - size of the Identity matrix # # OUPUT # * identity matrix as a list of lists # # # HINTS # * nested for loops will be helpful # * the one values are always on the diagonal. To access diagonal values in a list of lists will occur where i = j # * whenever i does not equal j, the value in the matrix should be 0 def identity_matrix(n): identity = [] # Write a nested for loop to iterate over the rows and # columns of the identity matrix. Remember that identity # matrices are square so they have the same number of rows # and columns # Make sure to assign 1 to the diagonal values and 0 everywhere # else for i in range(n): new_row =[] for j in range(n): if i==j: new_row.append(1) else: new_row.append(0) identity.append(new_row) return identity # + # Run this cell to see if your answers are as expected assert identity_matrix(1) == [[1]] assert identity_matrix(2) == [[1, 0], [0, 1]] assert identity_matrix(3) == [[1, 0, 0], [0, 1, 0], [0, 0, 1]] assert identity_matrix(4) == [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]] # - # # Multiplication with the Identity Matrix # # Copy your matrix multiplication function in the code cell below. Try multiplying a matrix with an identity matrix to prove to yourself that the identity matrix is analogous to multiplyin a scalar by one. # + # Copy your matrix multiplication function and any other helper # funcitons here from the previous exercises def get_row(matrix, row): return matrix[row] def get_column(matrix, column_number): return [row[column_number] for row in matrix] def dot_product(vector_one, vector_two): return sum([x[0]*x[1] for x in zip(vector_one, vector_two)]) def matrix_multiplication(matrixA, matrixB): m_rows = len(matrixA) p_columns = len(matrixB[0]) # empty list that will hold the product of AxB result = [] for i in range(m_rows): row_result = [] for j in range(p_columns): row_result.append(dot_product(get_row(matrixA,i),get_column(matrixB, j))) result.append(row_result) return result # + # TODO: Run this cell to see if your results are as expected. m = [[5, 9, 2, 4], [3, 8, 5, 6], [1, 0, 0, 15]] assert matrix_multiplication(m, identity_matrix(4)) == m assert matrix_multiplication(identity_matrix(3), m) == m
4_6_Matrices_and_Transformation_of_State/7_identity_matrix.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Adaptive Resonance Theory # Feedback mechanism + Competitive-learning scheme # #### ART2 # %matplotlib inline import matplotlib.pyplot as plt import matplotlib.image as mpimg import keras from keras.datasets import mnist import numpy as np import numpy.linalg as LA import math from tqdm import tqdm # #### 讀取 MNIST # see https://github.com/fchollet/keras/blob/master/examples/mnist_cnn.py # + # input image dimensions img_rows, img_cols = 28, 28 # the data, shuffled and split between train and test sets (x_train, y_train), (x_test, y_test) = mnist.load_data() _, img_rows, img_cols = x_train.shape print ('原始 shape = (資料數目, #row, #col)') print ('#train = ', x_train.shape[0], ', #test = ', x_test.shape[0]) print ('image rows = ', img_rows, ', image_cols = ', img_cols) # flatten x_train = np.reshape(x_train, (x_train.shape[0], img_rows*img_cols)) x_test = np.reshape(x_test, (x_test.shape[0], img_rows*img_cols)) print ('image map 轉 vector') print ('new x_train.shape = ', x_train.shape) x_train = x_train.astype('float32') x_test = x_test.astype('float32') # - # ### ART2 # * $dim(I) = dim(F_1)= M$ # * $dim(F_2) = N$ # * $0<d<1$ # * $\mathbf{z}^I=(z_{1,I}, z_{2,I},...,z_{N,I})$ : Top-Down weights # * $\mathbf{z}^J$ : Bottom-Up weights class ART2(object): def __init__(self, M=5, N=6, a=1.0, b=1.0, c=0.1, d=0.9, e=0.001, theta=0.1, rho=0.97): self.M = M # input dim = dim(F1) = M self.N = N # dim(F2) = N self.a = a self.b = b self.c = c self.d = d self.e = e self.rho = rho self.theta = theta self.Zi = np.zeros((N,M)) ## Top-down weights self.Zj = np.random.uniform(low=0.0, high=1.0/((1-d)*math.sqrt(M)), size=(M,N)) ## Bottom-Up weights self.commited = 0 self.counter = 0 self.time_stamps = np.zeros(N) def F(self, x): v = np.array(x) idx = x[:] <= self.theta v[ idx ] = np.divide(2.0 * self.theta * np.power(x[idx], 2), np.power(x[idx], 2) + (self.theta ** 2)) return v def Gyj(self, T): result = np.zeros((self.commited)) result[np.argmax(T)] = self.d return result def Processing(self, I): u = np.zeros_like(I) p = np.zeros_like(I) q = np.zeros_like(I) count = 0 while True: count += 1 last_p = np.array(p) w = I + self.a * u x = (1.0/(self.e + LA.norm(w))) * w v = self.F(x) + (self.b * self.F(q)) u = (1.0/(self.e + LA.norm(v))) * v p = np.array(u) q = (1.0/(self.e + LA.norm(p))) * p if (LA.norm(last_p - p) == 0.0) or (count >= 100): break # Calculate the input to F2 if self.commited > 0: T = np.dot(self.Zj[:,:self.commited].transpose(), p) J = np.flipud(np.argsort(T)) u_init = np.array(u) for winner in J: # Feedback F2 -> p dzj = self.d * self.Zi[winner,:] p = u_init + dzj q = (1.0/(self.e + LA.norm(p))) * p count = 0 while True: count += 1 last_q = np.array(q) w = I + self.a * u x = (1.0/(self.e + LA.norm(w))) * w v = self.F(x) + (self.b * self.F(q)) u = (1.0/(self.e + LA.norm(v))) * v p = u + dzj q = (1.0/(self.e + LA.norm(p))) * p if (last_q == q).all() or (count >= 100): break # Calculate r output r = np.zeros_like(p) r = (u+self.c*p) / (self.e + LA.norm(u) + LA.norm(self.c*p)) if (self.rho/(self.e + LA.norm(r))) > 1: # reset continue else: # update # Modify BU weights on the winner self.Zj[:,winner] = (u / (1.0-self.d)).transpose() self.Zi[winner,:] = u / (1.0-self.d) self.counter += 1 self.time_stamps[winner] = self.counter return winner # no match winner = -1 if self.commited < self.N: winner = self.commited self.commited += 1 else: winner = np.argmin(self.counter) self.Zj[:,winner] = u / (1.0-self.d) self.Zi[winner,:] = u / (1.0-self.d) self.counter += 1 self.time_stamps[winner] = self.counter return winner model = ART2(M = x_train.shape[1], N=100) idx = np.arange(x_train.shape[0]) np.random.shuffle(idx) for i in tqdm(range(x_train.shape[0] * 1)): #for i in tqdm(range(3000)): winner = model.Processing(x_train[idx[i%x_train.shape[0]],:]) if winner == -1: print ('early stop') break plt.figure(figsize=(32,32)) for i in range(model.commited): plt.subplot(10,10,1 + i) plt.axis('off') plt.imshow(np.reshape(model.Zi[i,:], (28,28)), cmap='gray')
ART2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # [DeepSphere]: a spherical convolutional neural network # [DeepSphere]: https://github.com/SwissDataScienceCenter/DeepSphere # # [<NAME>](https://perraudin.info), [<NAME>](http://deff.ch), <NAME>, <NAME> # # # Demo: whole sphere classification # %load_ext autoreload # %autoreload 2 # %matplotlib inline # + import os import shutil # Run on CPU. os.environ["CUDA_VISIBLE_DEVICES"] = "" import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt from sklearn import preprocessing from sklearn.model_selection import train_test_split from sklearn.svm import SVC import healpy as hp import tensorflow as tf from deepsphere import models, experiment_helper, plot from deepsphere.data import LabeledDataset # - plt.rcParams['figure.figsize'] = (17, 5) EXP_NAME = 'whole_sphere' # ## 1 Data loading # # The data consists of a toy dataset that is sufficiently small to have fun with. It is made of 200 maps of size `NSIDE=64` splitted into 2 classes. # # The maps contain a Gaussian random field realisations produced with Synfast function from Healpy package. # The input power spectra were taken from LambdaCDM model with two sets of parameters. # These maps are not realistic cosmological mass maps, just a toy dataset. # We downsampled them to `Nside=64` in order to make the processing faster. data = np.load('data/maps_downsampled_64.npz') assert(len(data['class1']) == len(data['class2'])) nclass = len(data['class1']) # Let us plot a map of each class. It is not simple to visually catch the differences. cmin = min(np.min(data['class1']), np.min(data['class2'])) cmax = max(np.max(data['class1']), np.max(data['class2'])) cm = plt.cm.RdBu_r cm.set_under('w') hp.mollview(data['class1'][0], title='class 1', nest=True, cmap=cm, min=cmin, max=cmax) hp.mollview(data['class2'][0], title='class 2', nest=True, cmap=cm, min=cmin, max=cmax) # However, those maps have different Power Spectral Densities PSD. # + sample_psd_class1 = np.empty((nclass, 192)) sample_psd_class2 = np.empty((nclass, 192)) for i in range(nclass): sample_psd_class1[i] = experiment_helper.psd(data['class1'][i]) sample_psd_class2[i] = experiment_helper.psd(data['class2'][i]) # - ell = np.arange(sample_psd_class1.shape[1]) plot.plot_with_std(ell, sample_psd_class1*ell*(ell+1), label='class 1, Omega_matter=0.3, mean', color='b') plot.plot_with_std(ell,sample_psd_class2*ell*(ell+1), label='class 2, Omega_matter=0.5, mean', color='r') plt.legend(fontsize=16); plt.xlim([10, np.max(ell)]) plt.ylim([1e-6, 1e-3]) # plt.yscale('log') plt.xscale('log') plt.xlabel('$\ell$: spherical harmonic index', fontsize=18) plt.ylabel('$C_\ell \cdot \ell \cdot (\ell+1)$', fontsize=18) plt.title('Power Spectrum Density, 3-arcmin smoothing, noiseless, Nside=1024', fontsize=18); # ## 2 Data preparation # # Let us split the data into training and testing sets. The raw data is stored into `x_raw` and the power spectrum densities into `x_psd`. # + # Normalize and transform the data, i.e. extract features. x_raw = np.vstack((data['class1'], data['class2'])) x_raw = x_raw / np.mean(x_raw**2) # Apply some normalization (We do not want to affect the mean) x_psd = preprocessing.scale(np.vstack((sample_psd_class1, sample_psd_class2))) # Create the label vector labels = np.zeros([x_raw.shape[0]], dtype=int) labels[nclass:] = 1 # Random train / test split ntrain = 150 ret = train_test_split(x_raw, x_psd, labels, test_size=2*nclass-ntrain, shuffle=True) x_raw_train, x_raw_test, x_psd_train, x_psd_test, labels_train, labels_test = ret print('Class 1 VS class 2') print(' Training set: {} / {}'.format(np.sum(labels_train==0), np.sum(labels_train==1))) print(' Test set: {} / {}'.format(np.sum(labels_test==0), np.sum(labels_test==1))) # - # ## 3 Classification using SVM # # As a baseline, let us classify our data using an SVM classifier. # # * An SVM based on the raw feature cannot discriminate the data because the dimensionality of the data is too large. # * We however observe that the PSD features are linearly separable. # + clf = SVC(kernel='rbf') clf.fit(x_raw_train, labels_train) e_train = experiment_helper.model_error(clf, x_raw_train, labels_train) e_test = experiment_helper.model_error(clf, x_raw_test, labels_test) print('The training error is: {}%'.format(e_train*100)) print('The testing error is: {}%'.format(e_test*100)) # + clf = SVC(kernel='linear') clf.fit(x_psd_train, labels_train) e_train = experiment_helper.model_error(clf, x_psd_train, labels_train) e_test = experiment_helper.model_error(clf, x_psd_test, labels_test) print('The training error is: {}%'.format(e_train*100)) print('The testing error is: {}%'.format(e_test*100)) # - # ## 4 Classification using DeepSphere # # Let us now classify our data using a spherical convolutional neural network. # # Three types of architectures are suitable for this task: # 1. Classic CNN: the classic ConvNet composed of some convolutional layers followed by some fully connected layers. # 2. Stat layer: a statistical layer, which computes some statistics over the pixels, is inserted between the convolutional and fully connected layers. The role of this added layer is make the prediction invariant to the position of the pixels on the sphere. # 3. Fully convolutional: the fully connected layers are removed and the network outputs many predictions at various spatial locations that are then averaged. # # On this simple task, all architectures can reach 100% test accuracy. Nevertheless, the number of parameters to learn decreases and training converges faster. A fully convolutional network is much faster and efficient in terms of parameters. It does however assume that all pixels have the same importance and that their location does not matter. While that is true for cosmological applications, it may not for others. # + params = dict() params['dir_name'] = EXP_NAME # Types of layers. params['conv'] = 'chebyshev5' # Graph convolution: chebyshev5 or monomials. params['pool'] = 'max' # Pooling: max or average. params['activation'] = 'relu' # Non-linearity: relu, elu, leaky_relu, softmax, tanh, etc. params['statistics'] = None # Statistics (for invariance): None, mean, var, meanvar, hist. # Architecture. architecture = 'fully_convolutional' if architecture == 'classic_cnn': params['statistics'] = None params['nsides'] = [64, 32, 16, 16] # Pooling: number of pixels per layer. params['F'] = [5, 5, 5] # Graph convolutional layers: number of feature maps. params['M'] = [50, 2] # Fully connected layers: output dimensionalities. elif architecture == 'stat_layer': params['statistics'] = 'meanvar' params['nsides'] = [64, 32, 16, 16] # Pooling: number of pixels per layer. params['F'] = [5, 5, 5] # Graph convolutional layers: number of feature maps. params['M'] = [50, 2] # Fully connected layers: output dimensionalities. elif architecture == 'fully_convolutional': params['statistics'] = 'mean' params['nsides'] = [64, 32, 16, 8, 8] params['F'] = [5, 5, 5, 2] params['M'] = [] params['K'] = [10] * len(params['F']) # Polynomial orders. params['batch_norm'] = [True] * len(params['F']) # Batch normalization. # Regularization. params['regularization'] = 0 # Amount of L2 regularization over the weights (will be divided by the number of weights). params['dropout'] = 0.5 # Percentage of neurons to keep. # Training. params['num_epochs'] = 12 # Number of passes through the training data. params['batch_size'] = 16 # Number of samples per training batch. Should be a power of 2 for greater speed. params['eval_frequency'] = 15 # Frequency of model evaluations during training (influence training time). params['scheduler'] = lambda step: 1e-1 # Constant learning rate. params['optimizer'] = lambda lr: tf.train.GradientDescentOptimizer(lr) #params['optimizer'] = lambda lr: tf.train.MomentumOptimizer(lr, momentum=0.5) #params['optimizer'] = lambda lr: tf.train.AdamOptimizer(lr, beta1=0.9, beta2=0.999, epsilon=1e-8) # - model = models.deepsphere(**params) # Cleanup before running again. shutil.rmtree('summaries/{}/'.format(EXP_NAME), ignore_errors=True) shutil.rmtree('checkpoints/{}/'.format(EXP_NAME), ignore_errors=True) training = LabeledDataset(x_raw_train, labels_train) testing = LabeledDataset(x_raw_test, labels_test) accuracy_validation, loss_validation, loss_training, t_step = model.fit(training, testing) plot.plot_loss(loss_training, loss_validation, t_step, params['eval_frequency']) error_train = experiment_helper.model_error(model, x_raw_train, labels_train) error_test = experiment_helper.model_error(model, x_raw_test, labels_test) print('The training error is: {:.2%}'.format(error_train)) print('The testing error is: {:.2%}'.format(error_test)) # ## 5 Filters visualization # # The package offers a few different visualizations for the learned filters. First we can simply look at the Chebyshef coefficients. This visualization is not very interpretable for human, but can help for debugging problems related to optimization. layer=2 model.plot_chebyshev_coeffs(layer) # We observe the Chebyshef polynomial, i.e the filters in the graph spectral domain. This visuallization can help to understand wich graph frequencies are picked by the filtering operation. It mostly interpretable by the people for the graph signal processing community. model.plot_filters_spectral(layer); # Here comes one of the most human friendly representation of the filters. It consists the section of the filters "projected" on the sphere. Because of the irregularity of the healpix sampling, this representation of the filters may not look very smooth. mpl.rcParams.update({'font.size': 16}) model.plot_filters_section(layer, title=''); # Eventually, we can simply look at the filters on sphere. This representation clearly displays the sampling artifacts. plt.rcParams['figure.figsize'] = (10, 10) model.plot_filters_gnomonic(layer, title='')
demo_whole_sphere.ipynb