code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np x = np.arange(-6, 6, 0.001) import math def sigmoid(x): return 1 / (1 + math.exp(-x)) y = np.array([sigmoid(xx) for xx in x]) # %matplotlib inline from matplotlib import pyplot as plt plt.figure(figsize=(50, 35)) plt.plot(x, y) plt.plot(x, np.zeros(x.shape) + 0.5, color='r')
Chapter 8/Sigmoid.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.10 64-bit # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/derzhavin3016/CompMath/blob/master/Lab2/Lab2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="d6ndWJPxwsva" # # Домашняя лабораторная работа №2 по вычислительной математике # <NAME>, Б01-909 группа # # + [markdown] id="oVfq3kEZw6mP" # Задача II.10.6 (к) # # $$ # \left\lbrace # \begin{matrix} # a_{11} x_1 + a_{12} x_2 + \dots + a_{1n} x_n &= &f_1 \\ # \dots &\dots &\dots \\ # a_{n1} x_1 + a_{n2} x_2 + \dots + a_{nn} x_n &= &f_n # \end{matrix} # \right. , # $$ # где # $$ # n = 10, \: # a_{ii} = 1, \: # a_{ij} = \frac{1}{i + j} (i \neq j), \: # f_i = \frac{1}{i} # $$ # + id="qmD0SUmowMxu" import numpy as np from numpy import linalg as ling # + [markdown] id="c2dCoEaiS7ho" # ## Определяем константы: размер матрицы и погрешность (для сравнения чисел с 0) # + id="t_UGXUwqwdcB" SIZE = 10 ACCURACY = 1e-10 # - # # Метод Гаусса # + [markdown] id="eWVVMGYkTCK5" # ## Функция сравнения числа с нулём с учётом погрешности # + id="cpZRcGdLDsqK" def is_zero(num: float): return abs(num) < ACCURACY # + [markdown] id="COa_nPNhTGuK" # ## Функция нахождения срочки в матрице, в которой в столбце `idx` стоит ненулеове число # + id="vKHfN6shEdmt" def find_non_zero(arr, col_idx): for i in range(col_idx + 1, SIZE): if not is_zero(arr[i][col_idx]): return col_idx return -1 # + [markdown] id="xJuXfj8fTQLT" # ## Функция приведения к диагональному виду (прямой ход Гаусса) # + id="l60ufHLnDGWc" def to_diag(matr): for i in range(SIZE - 1): if is_zero(matr[i][i]): non_z_line = find_non_zero(matr, i) if non_z_line == -1: raise ValueError("Invalid Matrix") # swap rows matr[[i, non_z_line]] = matr[[non_z_line, i]] div = matr[i][i] for j in range(i + 1, SIZE): matr[j] += (-matr[j][i] / div) * matr[i] # + [markdown] id="ygsIHgEaTVlZ" # ## Вспомогательные функции: # 1. `zero_small` - замена всех малых чисел на 0 # 1. `arr_print` - распечатать матрицу в красивом виде # + id="eL-m345cGo4Y" def zero_small(matr): for i in range(matr.shape[0]): for j in range(matr.shape[1]): if is_zero(matr[i][j]): matr[i][j] = 0 def arr_print(matr): for row in matr: for elem in row: print(f"{elem} ", end="") print() # + [markdown] id="u7QqxlB6TwQe" # ## Функция обратного хода Гаусса, возвращает столбец решений # + id="YLIqs8JBJ2HL" def rev_hod(matr): sz = matr.shape[0] sol = np.zeros((sz, 1)) sol[sz - 1][0] = matr[sz - 1][sz] / matr[sz - 1][sz - 1] for i in range(sz - 2, -1, -1): sum = 0 for j in range(i + 1, sz): sum += matr[i][j] * sol[j][0] sol[i][0] = (matr[i][sz] - sum) / matr[i][i] return sol # + [markdown] id="r2-g6yMqTzUc" # ## Основная программа: # 1. Заполенение матрицы $A$ и столбца $f$ # 1. "Приклеивание" к матрице столбца # 1. Выполнение прямого и обратного ходов # 1. Печать вектора решений # + colab={"base_uri": "https://localhost:8080/"} id="sbGC8emPHebA" outputId="b4f81baa-e591-4a0e-fb22-b31c31c29800" A = np.ones((SIZE, SIZE)) for i in range(SIZE): for j in range(SIZE): if i != j: A[i][j] = 1 / (i + j + 2) f = np.array([[1 / i for i in range(1, SIZE + 1)]]).transpose() Asys = np.hstack((A, f)) to_diag(Asys) #zero_small(Asys) # set all small numbers to zero (< 1e-10) #arr_print(Asys) sol = rev_hod(Asys) print(f"Решение x = \n{sol}") # + def vec_n1(vec): return max(abs(vec)) def vec_n2(vec): return sum(abs(vec)) def vec_n3(vec): return np.sqrt(sum(vec * vec)) # + [markdown] id="tuP836uyUUtB" # ## Вывод невязки. # + colab={"base_uri": "https://localhost:8080/"} id="VhItk4YjJTuA" outputId="d9dc0a33-01a1-466f-8e9d-4bec82fda455" diff = (np.linalg.solve(A, f) - sol) err = f - np.dot(A, sol) print(f'Невязка 1: {vec_n1(err)[0]}') print(f'Невязка 2: {vec_n2(err)[0]}') print(f'Невязка 3: {vec_n3(err)[0]}') # + [markdown] id="7IZ5aK0sGlBT" # ## Функции для подсчёта норм # + id="iKGst_9b-DEP" def norm_1(matr): max_s = 0 rows, cols = matr.shape for j in range(cols): sum = 0 for i in range(rows): sum += abs(matr[i][j]) max_s = max(sum, max_s) return max_s def norm_2(matr): rows, cols = matr.shape max_s = 0 for i in range(rows): max_s = max(sum(abs(matr[i])), max_s) return max_s def norm_3(matr): return np.sqrt(max(abs(np.linalg.eigvals(np.dot(matr, matr.transpose()))))) # + [markdown] id="B6-ysmu6Gdqm" # ## Подсчёт обсуловленности через 3 разные нормы # + colab={"base_uri": "https://localhost:8080/"} id="giZHAS5M8wlF" outputId="af328810-2f5a-4e22-a3bd-0b03f80c7863" Ainv = np.linalg.inv(A) mus = [ norm_1(A) * norm_1(Ainv), norm_2(A) * norm_2(Ainv), norm_3(A) * norm_3(Ainv) ] for i, elem in enumerate(mus): print(f"mu_{i} = {elem}") # - # # Метод Зейделя # + colab={"base_uri": "https://localhost:8080/"} id="P4umkmTcCLWO" outputId="f08b3495-6a2d-4ca7-a472-0<PASSWORD>" def get_LUD(matr): sz = matr.shape[0] D = np.zeros((sz, sz)) L = np.zeros((sz, sz)) U = np.zeros((sz, sz)) for i in range(sz): for j in range(sz): if i == j: D[i][j] = matr[i][j] elif i > j: L[i][j] = matr[i][j] else: U[i][j] = matr[i][j] return L, U, D # - def zeidel(matr, b, eps, norm): print(f"Норма {norm.__name__}") it = 0 converged = False x = np.zeros((SIZE, 1)) L, U, D = get_LUD(matr) LDinv = np.linalg.inv(L + D) LDinvU = np.dot(LDinv, U) while not converged: it += 1 x_new = -np.dot(LDinvU, x) + np.dot(LDinv, b) converged = norm(x_new - x) < eps x = x_new print(f'Количество итераций {it}') print(f'Невязка: {norm(b - np.dot(matr,x))[0]}') print(f'Условие останова ||x_{{k+1}} - x_k||_{norm.__name__[-1]} < {eps}') print(f'Решение x = \n{x}') eps = 1e-16 zeidel(A, f, eps, vec_n1) zeidel(A, f, eps, vec_n2) zeidel(A, f, eps, vec_n3)
Lab2/Lab2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## AI for Medicine Course 3 Week 2 lecture notebook - Cleaning Text # For this notebook you'll be using the `re` module, which is part of Python's Standard Library and provides support for regular expressions (which you may know as `regexp`). # - If you aren't familiar with `regexp`, we recommend checking the [documentation](https://docs.python.org/3/library/re.html). # # Regular expressions allow you to perform searches and replacements in strings based on patterns. Let's start by looking at some examples. # # You'll be using the search method, which looks like this: # # ```python # search(pattern, text) # ``` # It will output a match if one is found, or None otherwise. # # For the following three examples, you'll try to match the pattern to the string "Pleural Effusion." Take note of these special characters: # # - ^ denotes "starts with" followed by the pattern # - $ denotes "ends with" preceded by the pattern # - | denotes "or" followed by another pattern # # Go ahead and import the `re` library, then run the following three cells. Can you see why the first two examples output a match, while the third one does not? # ### Import Library import re # ### Examples of Search Patterns # + # Search if string starts with 'Pl' or ends with 'ion' m = re.search(pattern = "^Pl|ion$", string = "Pleural Effusion") # return the matched string if m: print(m.group(0)) else: print(None) # + # Search if string starts with 'Sa' or ends in 'ion' m = re.search(pattern = "^Sa|ion$", string = "Pleural Effusion") # return the matched string if m: print(m.group(0)) else: print(None) # + # Search if string starts with 'Eff' m = re.search(pattern="^Eff", string="Pleural Effusion") # return the matched string if m: print(m.group(0)) else: print(None) # - # Notice that even though 'Eff' exists in the string, the string does not begin with 'Eff', so there is no match. # Now let's try a more advanced example. Your goal here is to match the pattern "a single letter of the alphabet" followed by a number. # # ### Characters in a set [a-zA-Z] # [ ] # # - [a-z] matches lowercase letters # - [A-Z] matches uppercase letters # - [a-zA-Z] matches lowercase and uppercase letters. # You can match any lowercase or uppercase single letter followed by '123' like this: # Match a letter followed by 123 m = re.search(pattern ='[a-zA-Z]123', string = "99C123") print(f"{m.group(0)}") # Notice how the match includes the letter 'C' in C123. # ### 'Lookbehind' assertion # # If you want to match the single letter but not include it in the returned match, you can use the lookbehind assertion # # (?<=...) # # Here is the documentation for lookbehind: # # >Matches if the current position in the string is preceded by a match for ... that ends at the current position. This is called a positive lookbehind assertion. (?<=abc)def will find a match in 'abcdef', since the lookbehind will back up 3 characters and check if the contained pattern matches. The contained pattern must only match strings of some fixed length, meaning that abc or a|b are allowed, but a* and a{3,4} are not. Note that patterns which start with positive lookbehind assertions will not match at the beginning of the string being searched; you will most likely want to use the search() function rather than the match() function: # # Note that you'll need to put `?<=[a-zA-Z]` within parentheses, like this: `(?<=[a-zA-Z])`; otherwise you'll see an error message. # # Match a letter followed by 123, exclude the letter. m = re.search(pattern = '(?<=[a-zA-Z])123', string = "99C12399") print(f"{m.group(0)}") # Notice the difference here. The match is returned because '123' is preceded by a letter 'C', but the letter 'C' is not returned as part of the matched substring. You're 'looking back' but not including the lookback as part of the returned substring. # ### Match 123 followed by a letter # # Similarly, you can match a letter followed by '123', including the letter. # Match 123 followed by a letter m = re.search(pattern = '123[a-zA-Z]', string = "99123C99") print(f"{m.group(0)}") # Notice that the letter 'C' is included in the returned match. # ### 'Lookahead' assertion # # Similarly, you can match '123' followed by a letter, but exclude the letter from the match. # - You can do this by using the lookahead assertion. # (?=...) # # Here is the documentation: # >Matches if ... matches next, but doesn’t consume any of the string. This is called a lookahead assertion. For example, Isaac (?=Asimov) will match 'Isaac ' only if it’s followed by 'Asimov'. # # Similar to the lookbehind, you'll need to wrap `?=[a-zA-Z]` around parentheses, like this: `(?=[a-zA-Z])`, to avoid an error message. # Match 123 followed by a letter, exclude the letter from returned match. m = re.search(pattern = '123(?=[a-zA-Z])', string = "99123C99") print(f"{m.group(0)}") # Notice that the returned match does not include the letter. # ### String Cleaning # Let's implement a `clean()` function. It should receive a sentence as input, clean it up and then return the clean version of it. "Cleaning" in this case refers to: # # 1. Convert to lowercase only # 2. Change "and/or" to "or" # 3. Change "/" to "or" when used to indicate equality between two words such as tomatos/tomatoes # 4. Replace double periods ".." with single period "." # 5. Insert the appropiate space after periods or commas # 6. Convert multiple whitespaces to a single whitespace # # Let's take this one step at a time, and pay attention to how the sentence changes along the way. # # Here's the sample sentence: # Choose a sentence to be cleaned sentence = " BIBASILAR OPACITIES,likely representing bilateral pleural effusions with ATELECTASIS and/or PNEUMONIA/bronchopneumonia.." # #### Step 1: lowercase # Now, use the built-in `lower()` method to change all characters of a string to lowercase. Quick and easy! # Convert to all lowercase letters sentence = sentence.lower() sentence # #### Step 2: and/or -> or # The `re` module provides the `sub()` method, which substitutes patterns in a string with another string. Here you'll be looking for 'and/or' and replacing it with just 'or'. sentence = re.sub('and/or', 'or', sentence) sentence # #### Step 3: / -> or # sentence = re.sub('(?<=[a-zA-Z])/(?=[a-zA-Z])', ' or ', sentence) sentence # #### Step 4: .. -> . # # When finding a specific string and replacing, you can also use Python's built-in `replace()` method. # # Otherwise, when matching special characters like `.`, use backslash to specify that you're looking for the actual period character `\.` # + # Replace .. with . using re.sub (option 1) tmp1 = re.sub("\.\.", ".", sentence) print(tmp1) # Replace .. with . using string.replace (option 2) tmp2 = sentence.replace('..','.') print(tmp2) # - # Replace .. with . using string.replace sentence = sentence.replace("..", ".") sentence # #### Step 5: add whitespace after punctuation # # For step 5, let's use a built-in Python function, `translate()`. # - This will return a copy of your string, mapped to a translation table that you define. It's usually used alongside the `maketrans()` method, and you can read about both of them [here](https://docs.python.org/3/library/stdtypes.html#str.translate). # Define a dictionary to specify that ! is replaced by !!! # and 's' is replaced by '' translation_dict = {'!': '!!!', 'z': 's' } print(translation_dict) # Create the translation table translation_tbl = str.maketrans(translation_dict) print(translation_tbl) # Note that each key in the dictionary should be a character of length 1 (the key can't be a word of length 2 or more). # + # Choose a string to be translated tmp_str = "colonization, realization, organization!" print(tmp_str) # Translate the string using the translation table tmp_str2 = tmp_str.translate(translation_tbl) print(tmp_str2) # - # Notice how z is replaced by s, and ! is replaced by !!! # Add whitespace after punctuation. # - Now apply this to replace '.' with '. ', where the period is followed by a whitespace. # - Similarly, replace ',' with ', ', so that the comma is followed by a whitespace. # + # Creat translation table using a dictionary comprehension translation_dict = {key: f"{key} " for key in ".,"} # View the translation dictionary display(translation_dict) # View the translation dictionary with some formatting for easier reading # Use vertical bars to help see the whitespace more easily. for key, val in translation_dict.items(): print(f"key: |{key}| \tval:|{val}|") # + # Create the translation table using the translation dictionary punctuation_spacer = str.maketrans(translation_dict) # Apply the translation table to add whitespace after punctuation sentence = sentence.tra nslate(punctuation_spacer) sentence # - # #### Step 6: trim whitespace # # Nice! For step 6, you can trim multiple whitespaces with Python's `join()` method. Sidenote: This can be also done using `regexp`. # Split the string using whitespace as the delimiter # This removes all whitespace between words sentence_list = sentence.split() sentence_list # Join the tokens with a single whitespace. # This ensures that there is a single whitespace between words sentence = ' '.join(sentence_list) sentence # The sentence is now cleaner and easier to work with! # ### Putting it all together # # Now you can put all that together into a function and test this implementation on more sentences. # + def clean(sentence): lower_sentence = sentence.lower() corrected_sentence = re.sub('and/or', 'or', lower_sentence) corrected_sentence = re.sub('(?<=[a-zA-Z])/(?=[a-zA-Z])', ' or ', corrected_sentence) clean_sentence = corrected_sentence.replace("..", ".") punctuation_spacer = str.maketrans({key: f"{key} " for key in ".,"}) clean_sentence = clean_sentence.translate(punctuation_spacer) clean_sentence = ' '.join(clean_sentence.split()) return clean_sentence sentences = [" BIBASILAR OPACITIES,likely representing bilateral pleural effusions with ATELECTASIS and/or PNEUMONIA..", "Small left pleural effusion/decreased lung volumes bilaterally.left RetroCardiac Atelectasis.", "PA and lateral views of the chest demonstrate clear lungs,with NO focal air space opacity and/or pleural effusion.", "worrisome nodule in the Right Upper lobe.CANNOT exclude neoplasm.."] for n, sentence in enumerate(sentences): print("\n##########################\n") print(f"Sentence number: {n+1}") print(f"Raw sentence: \n{sentence}") print(f"Cleaned sentence: \n{clean(sentence)}") # - # ### Congratulations # Congratulations on finishing this lecture notebook!** By now, you should have a better grasp of `regexp` along with some built-in Python methods for cleaning text. You'll be seeing the `clean()` function again in the upcoming graded assignment. Good luck and have fun!
AI for Medical Treatment/Week 2/lecture_notebooks/AI4M_C3_M2_lecture_notebook_clean.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # # Libsvm GUI # # # A simple graphical frontend for Libsvm mainly intended for didactic # purposes. You can create data points by point and click and visualize # the decision region induced by different kernels and parameter settings. # # To create positive examples click the left mouse button; to create # negative examples click the right button. # # If all examples are from the same class, it uses a one-class SVM. # # # # + from __future__ import division, print_function print(__doc__) # Author: <NAME> <<EMAIL>> # # License: BSD 3 clause import matplotlib matplotlib.use('TkAgg') from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg from matplotlib.figure import Figure from matplotlib.contour import ContourSet try: import tkinter as Tk except ImportError: # Backward compat for Python 2 import Tkinter as Tk import sys import numpy as np from sklearn import svm from sklearn.datasets import dump_svmlight_file from sklearn.externals.six.moves import xrange y_min, y_max = -50, 50 x_min, x_max = -50, 50 class Model(object): """The Model which hold the data. It implements the observable in the observer pattern and notifies the registered observers on change event. """ def __init__(self): self.observers = [] self.surface = None self.data = [] self.cls = None self.surface_type = 0 def changed(self, event): """Notify the observers. """ for observer in self.observers: observer.update(event, self) def add_observer(self, observer): """Register an observer. """ self.observers.append(observer) def set_surface(self, surface): self.surface = surface def dump_svmlight_file(self, file): data = np.array(self.data) X = data[:, 0:2] y = data[:, 2] dump_svmlight_file(X, y, file) class Controller(object): def __init__(self, model): self.model = model self.kernel = Tk.IntVar() self.surface_type = Tk.IntVar() # Whether or not a model has been fitted self.fitted = False def fit(self): print("fit the model") train = np.array(self.model.data) X = train[:, 0:2] y = train[:, 2] C = float(self.complexity.get()) gamma = float(self.gamma.get()) coef0 = float(self.coef0.get()) degree = int(self.degree.get()) kernel_map = {0: "linear", 1: "rbf", 2: "poly"} if len(np.unique(y)) == 1: clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()], gamma=gamma, coef0=coef0, degree=degree) clf.fit(X) else: clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C, gamma=gamma, coef0=coef0, degree=degree) clf.fit(X, y) if hasattr(clf, 'score'): print("Accuracy:", clf.score(X, y) * 100) X1, X2, Z = self.decision_surface(clf) self.model.clf = clf self.model.set_surface((X1, X2, Z)) self.model.surface_type = self.surface_type.get() self.fitted = True self.model.changed("surface") def decision_surface(self, cls): delta = 1 x = np.arange(x_min, x_max + delta, delta) y = np.arange(y_min, y_max + delta, delta) X1, X2 = np.meshgrid(x, y) Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()]) Z = Z.reshape(X1.shape) return X1, X2, Z def clear_data(self): self.model.data = [] self.fitted = False self.model.changed("clear") def add_example(self, x, y, label): self.model.data.append((x, y, label)) self.model.changed("example_added") # update decision surface if already fitted. self.refit() def refit(self): """Refit the model if already fitted. """ if self.fitted: self.fit() class View(object): """Test docstring. """ def __init__(self, root, controller): f = Figure() ax = f.add_subplot(111) ax.set_xticks([]) ax.set_yticks([]) ax.set_xlim((x_min, x_max)) ax.set_ylim((y_min, y_max)) canvas = FigureCanvasTkAgg(f, master=root) canvas.show() canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1) canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1) canvas.mpl_connect('button_press_event', self.onclick) toolbar = NavigationToolbar2TkAgg(canvas, root) toolbar.update() self.controllbar = ControllBar(root, controller) self.f = f self.ax = ax self.canvas = canvas self.controller = controller self.contours = [] self.c_labels = None self.plot_kernels() def plot_kernels(self): self.ax.text(-50, -60, "Linear: $u^T v$") self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$") self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$") def onclick(self, event): if event.xdata and event.ydata: if event.button == 1: self.controller.add_example(event.xdata, event.ydata, 1) elif event.button == 3: self.controller.add_example(event.xdata, event.ydata, -1) def update_example(self, model, idx): x, y, l = model.data[idx] if l == 1: color = 'w' elif l == -1: color = 'k' self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0) def update(self, event, model): if event == "examples_loaded": for i in xrange(len(model.data)): self.update_example(model, i) if event == "example_added": self.update_example(model, -1) if event == "clear": self.ax.clear() self.ax.set_xticks([]) self.ax.set_yticks([]) self.contours = [] self.c_labels = None self.plot_kernels() if event == "surface": self.remove_surface() self.plot_support_vectors(model.clf.support_vectors_) self.plot_decision_surface(model.surface, model.surface_type) self.canvas.draw() def remove_surface(self): """Remove old decision surface.""" if len(self.contours) > 0: for contour in self.contours: if isinstance(contour, ContourSet): for lineset in contour.collections: lineset.remove() else: contour.remove() self.contours = [] def plot_support_vectors(self, support_vectors): """Plot the support vectors by placing circles over the corresponding data points and adds the circle collection to the contours list.""" cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1], s=80, edgecolors="k", facecolors="none") self.contours.append(cs) def plot_decision_surface(self, surface, type): X1, X2, Z = surface if type == 0: levels = [-1.0, 0.0, 1.0] linestyles = ['dashed', 'solid', 'dashed'] colors = 'k' self.contours.append(self.ax.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)) elif type == 1: self.contours.append(self.ax.contourf(X1, X2, Z, 10, cmap=matplotlib.cm.bone, origin='lower', alpha=0.85)) self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k', linestyles=['solid'])) else: raise ValueError("surface type unknown") class ControllBar(object): def __init__(self, root, controller): fm = Tk.Frame(root) kernel_group = Tk.Frame(fm) Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel, value=0, command=controller.refit).pack(anchor=Tk.W) Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel, value=1, command=controller.refit).pack(anchor=Tk.W) Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel, value=2, command=controller.refit).pack(anchor=Tk.W) kernel_group.pack(side=Tk.LEFT) valbox = Tk.Frame(fm) controller.complexity = Tk.StringVar() controller.complexity.set("1.0") c = Tk.Frame(valbox) Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT) Tk.Entry(c, width=6, textvariable=controller.complexity).pack( side=Tk.LEFT) c.pack() controller.gamma = Tk.StringVar() controller.gamma.set("0.01") g = Tk.Frame(valbox) Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT) Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT) g.pack() controller.degree = Tk.StringVar() controller.degree.set("3") d = Tk.Frame(valbox) Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT) Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT) d.pack() controller.coef0 = Tk.StringVar() controller.coef0.set("0") r = Tk.Frame(valbox) Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT) Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT) r.pack() valbox.pack(side=Tk.LEFT) cmap_group = Tk.Frame(fm) Tk.Radiobutton(cmap_group, text="Hyperplanes", variable=controller.surface_type, value=0, command=controller.refit).pack(anchor=Tk.W) Tk.Radiobutton(cmap_group, text="Surface", variable=controller.surface_type, value=1, command=controller.refit).pack(anchor=Tk.W) cmap_group.pack(side=Tk.LEFT) train_button = Tk.Button(fm, text='Fit', width=5, command=controller.fit) train_button.pack() fm.pack(side=Tk.LEFT) Tk.Button(fm, text='Clear', width=5, command=controller.clear_data).pack(side=Tk.LEFT) def get_parser(): from optparse import OptionParser op = OptionParser() op.add_option("--output", action="store", type="str", dest="output", help="Path where to dump data.") return op def main(argv): op = get_parser() opts, args = op.parse_args(argv[1:]) root = Tk.Tk() model = Model() controller = Controller(model) root.wm_title("Scikit-learn Libsvm GUI") view = View(root, controller) model.add_observer(view) Tk.mainloop() if opts.output: model.dump_svmlight_file(opts.output) if __name__ == "__main__": main(sys.argv) # -
Python-sklearn/svm/svm_gui.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lecture 12 – More Iteration # # ## Data 94, Spring 2021 # ## Review for n in [2, 4, 6, 8]: print(n * 5) # ### Quick Check 1 # ### While vs. for # + def sum_squares_for(values): total = 0 for v in values: total += v**2 return total # 3^2 + 4^2 + 5^2 sum_squares_for([3, 4, 5]) # + def sum_squares_while(values): total = 0 j = 0 while j < len(values): total += values[j]**2 j += 1 return total # 3^2 + 4^2 + 5^2 sum_squares_while([3, 4, 5]) # - # ## Example: missing number # ### Quick Check 2 def missing_number(nums): for ...: if n not in nums: return n # Should be 3 missing_number([1, 2, 6, 4, 5]) # Should be 6 missing_number([7, 2, 3, 5, 9, 8, 4, 1]) # ## Example: Luhn's algorithm # Ignore this code def int_to_list(n): return [int(i) for i in str(n)] int_to_list(5457623898234113) def luhns_algorithm(cc): # Step 1 check_digit = cc[-1] even_sum = 0 for i in range(0, len(cc), 2): # Step 2 even_element = cc[i] * 2 if even_element > 9: even_element = even_element - 9 # Step 3 even_sum += even_element # Step 4 odd_sum = 0 for i in range(1, len(cc) - 2, 2): odd_sum += cc[i] # Step 5 total_sum = even_sum + odd_sum # Step 6 return (total_sum + check_digit) % 10 == 0 luhns_algorithm(int_to_list(5457623898234113)) # What if I accidentally swap two digits? luhns_algorithm(int_to_list(5475623898234113)) # Now Luhn's algorithm can tell me the credit card number is invalid, which likely means I made a typo. # ## Nested loops and lists # ### Example: times tables for x in range(1, 5): for y in range(1, 5): print(str(x) + ' x ' + str(y) + ' = ' + str(x * y)) # ### Example: movies movies = [['21 Jump Street', 'Grown Ups', 'Mall Cop'], ['Paranormal Activity', 'Nightmare on Elm Street'], ['Crazy Rich Asians', 'Trainwreck', 'Crazy, Stupid, Love']] for genre in movies: for movie in genre: print(movie) print('---') movies[0][2]
lecture/lec12/lec12.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from bs4 import BeautifulSoup import requests import pandas as pd import re import numpy as np songs = pd.read_csv("songs.csv") songs.head() def clean_track(track): s = track s = re.sub(r'\(([^\)]+)\)','',s) s= re.sub(r'\"','',s ) s = re.sub(r'\[.*?\']*','',s) s = re.sub(r'\'','',s) s = re.sub(r'--','',s) s = s.strip() s = s.replace(" ","-") s=s.lower() return s songs["genius_track"] = songs["track"].apply(clean_track) songs def genius_artist(name): name =name[0]+name[1:].lower() name= name.replace(" ","-") return name genius_artist("2 Chainz") songs["genius_artist"] = songs["artist"].apply(genius_artist) def get_lyrics(genius_artist,genius_track): base = 'https://genius.com/' url = base+genius_artist+"-"+genius_track+"-lyrics" page = requests.get(url) soup = BeautifulSoup(page.text,'html.parser') try: p = soup.find_all("p") s = p[0].text s = re.sub(r'\[.*?\']','',s) s = re.sub(r'--','',s) s = s.split("\n") s = [ x for x in s if x != ""] return s except: return "error" soup = get_lyrics(songs["genius_artist"][971], songs["genius_track"][971]) soup songs.head() songs.shape songs2 = songs[20001:27050] songs2["lyrics"] = songs2.apply(lambda x : get_lyrics(x["genius_artist"], x["genius_track"]), axis=1) # + import pickle with open('songs7_lyrics', 'wb') as fp: pickle.dump(songs2, fp) # -
wikipedia_crawling/wikipedia_get lyrics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 17장. 시계열 분석 # # 17.1 # ## 위키피디아 페이지뷰 API 만료로 이 코드는 작동하지 않습니다. # + import urllib, json import pandas as pd import numpy as np import sklearn.linear_model, statsmodels.api as sm import matplotlib.pyplot as plt START_DATE = "20131010" END_DATE = "20161012" WINDOW_SIZE = 7 TOPIC = "Cat" URL_TEMPLATE = ("https://wikimedia.org/api/rest_v1" "/metrics/pageviews/per-article" "/en.wikipedia/all-access/" "allagents/%s/daily/%s/%s") # 조회수를 불러오는 함수 def get_time_series(topic, start, end): url = URL_TEMPLATE % (topic, start, end) print(url) json_data = urllib.request.urlopen(url).read().decode('utf-8') data = json.loads(json_data) times = [rec['timestamp'] for rec in data['items']] values = [rec['views'] for rec in data['items']] times_formatted = pd.Series(times).map( lambda x: x[:4] + '-' + x[4:6] + '-' + x[6:8]) time_index = times_formatted.astype('datetime64') return pd.DataFrame( {'views': values}, index=time_index) # 선형 회귀 모델을 학습하는 함수 def line_slope(ss): X = np.arange(len(ss)).reshape((len(ss), 1)) linear.fit(X, ss) return linear.coef_ # 선형 회귀 모델을 하나 만든다. # 이제 모델에 다양한 데이터를 계속 적용한다. linear = sklearn.linear_model.LinearRegression() df = get_time_series(TOPIC, START_DATE, END_DATE) # 시계열 데이터 시각화 df['views'].plot() plt.title("날짜별 조회수") plt.show() # 백분위를 기준으로 이상치를 제거한다. max_views = df['views'].quantile(0.95) df.views[df.views > max_views] = max_views # 7일을 주기로 데이터 분석 decomp = sm.tsa.seasonal_decompose(df['views'].values, freq=7) decomp.plot() plt.suptitle("조회수 분석 결과") plt.show() # 날짜별로 과거 일주일의 평균, 최대, 최소 값 등 # 다양한 특징을 추출, 저장 df['mean_1week'] = pd.rolling_mean( df['views'], WINDOW_SIZE) df['max_1week'] = pd.rolling_max( df['views'], WINDOW_SIZE) df['min_1week'] = pd.rolling_min( df['views'], WINDOW_SIZE) df['slope'] = pd.rolling_apply( df['views'], WINDOW_SIZE, line_slope) df['total_views_week'] = pd.rolling_sum( df['views'], WINDOW_SIZE) df['day_of_week'] = df.index.astype(int) % 7 day_of_week_cols = pd.get_dummies(df['day_of_week']) df = pd.concat([df, day_of_week_cols], axis=1) # 예측값을 준비 df['total_views_next_week'] = \ list(df['total_views_week'][WINDOW_SIZE:]) + \ [np.nan for _ in range(WINDOW_SIZE)] INDEP_VARS = ['mean_1week', 'max_1week', 'min_1week', 'slope'] + range(6) DEP_VAR = 'total_views_next_week' n_records = df.dropna().shape[0] test_data = df.dropna()[:n_records / 2] train_data = df.dropna()[n_records / 2:] linear.fit( train_data[INDEP_VARS], train_data[DEP_VAR]) test_preds_array = linear.predict( test_data[INDEP_VARS]) test_preds = pd.Series( test_preds_array, index=test_data.index) print("예측값과 정답의 상관 계수: ", \ test_data[DEP_VAR].corr(test_preds)) # -
Chapter 17.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Simple Linear Regression # #### - Peprocessing # importing libraries import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import axes3d # %matplotlib inline # import the dataset dataset = pd.read_csv('../datasets/Salary_Data.csv') print(dataset.head()) x = dataset[['YearsExperience']] y = dataset['Salary'] # note : no need for missisg ,cat date # splitting from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size =1/3, random_state = 0) # note : no need for scalling data # #### Model and Prediction from sklearn.linear_model import LinearRegression sim_lin_RegModel = LinearRegression() # creating the model sim_lin_RegModel.fit(x_train, y_train) # train and fit the model y_predict = sim_lin_RegModel.predict(x_test) # predicted y # #### Comparison plt.plot(x_test, y_test,'x', x_test, y_predict, '-') plt.title('our first simple model on test set') plt.xlabel('years of experince') plt.ylabel('salary') plt.show() plt.plot(x_train, y_train,'.', x_train, sim_lin_RegModel.predict(x_train), '-') plt.title('our first simple model on trainning set') plt.xlabel('years of experince') plt.ylabel('salary') plt.show() # ### Optional # * Cost function # we will only work with 1d # importing new data data = np.loadtxt('../datasets/ex1data1.txt', delimiter=',') X = np.c_[np.ones(data.shape[0]),data[:,0]] y = np.c_[data[:,1]] def compute_cost(x, y, theta=[[0],[0]] ): m = y.size J = 0 h = x.dot(theta) # note that first column should be 1 as it multiplied by theta[1] = theta_node J = 1/(2*m)*np.sum(np.square(h-y)) return(J) # * Gradient Descent def gradientDescent(X, y, theta=[[0],[0]], alpha=0.5, num_iters=200): m = y.size J_history = np.zeros(num_iters) for iter in np.arange(num_iters): h = X.dot(theta) theta = theta - alpha*(1/m)*(X.T.dot(h-y)) J_history[iter] = compute_cost(X, y, theta) return(theta, J_history) # theta for minimized cost J theta , Cost_J = gradientDescent(X, y_train.iloc[:].values) print('theta: ',theta.ravel()) plt.plot(Cost_J) plt.ylabel('Cost J') plt.xlabel('Iterations'); # + # Create grid coordinates for plotting B0 = np.linspace(-10, 10, 50) B1 = np.linspace(-1, 4, 50) xx, yy = np.meshgrid(B0, B1, indexing='xy') Z = np.zeros((B0.size,B1.size)) # Calculate Z-values (Cost) based on grid of coefficients for (i,j),v in np.ndenumerate(Z): Z[i,j] = compute_cost(X,y, theta=[[xx[i,j]], [yy[i,j]]]) fig = plt.figure(figsize=(15,6)) ax1 = fig.add_subplot(121) ax2 = fig.add_subplot(122, projection='3d') # Left plot CS = ax1.contour(xx, yy, Z, np.logspace(-2, 3, 20), cmap=plt.cm.jet) ax1.scatter(theta[0],theta[1], c='r') # Right plot ax2.plot_surface(xx, yy, Z, rstride=1, cstride=1, alpha=0.6, cmap=plt.cm.jet) ax2.set_zlabel('Cost') ax2.set_zlim(Z.min(),Z.max()) ax2.view_init(elev=15, azim=230) # settings common to both plots for ax in fig.axes: ax.set_xlabel(r'$\theta_0$', fontsize=17) ax.set_ylabel(r'$\theta_1$', fontsize=17) # - compute_cost(X, y) # at the default value with theta_0 = 0 , theta_1 = 0 # * Gradient Descent # # Multiple Linear Regression # #### - Preprocessing # importing Libraries --> louded # importing data dataset = pd.read_csv('../datasets/50_Startups.csv') x = dataset.iloc[:, :-1].values # all except the last one y = dataset.iloc[:, -1].values # the last one # no missing data exist # categorical data # note : as using iloc[:,:].values : it will be arrays not dataframe from sklearn.preprocessing import LabelEncoder, OneHotEncoder x[:,3] = LabelEncoder().fit_transform(x[:,3]) x = OneHotEncoder(categorical_features=[3]).fit_transform(x).toarray() # no scalling needed as data nearly at the same scale # splitting from sklearn.cross_validation import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state = 0) # #### - Model and Prediction from sklearn.linear_model import LinearRegression mul_lin_RegModel = LinearRegression() mul_lin_RegModel.fit(x_train, y_train) y_predict = mul_lin_RegModel.predict(x_test) # #### - Comparison plt.plot(y_test, 'x', y_predict, '-') plt.show() # # Polynomial Regression # #### - preprocessing # importing Libraries --> louded # importing data dataset = pd.read_csv('../datasets/Position_Salaries.csv') dataset = dataset.drop(labels='Position',axis=1) x = dataset.iloc[:,1:2].values y = dataset.iloc[:,-1].values dataset.head() # + # no missing data # categorical data #if dataset['Position'].nunique() == len(dataset['Position']) : # print("no label codes needed") # from this code we delete the column Position # - # splitting from sklearn.cross_validation import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 0) # scalling from sklearn.preprocessing import StandardScaler scaler = StandardScaler() x_train = scaler.fit_transform(x_train) x_test = scaler.transform(x_test) # #### - Model and Prediction # + from sklearn.preprocessing import PolynomialFeatures x_train_poly = PolynomialFeatures(degree = 2).fit_transform(x_train) # degree is the degree of polynomial x_test_poly = PolynomialFeatures(degree = 2).fit_transform(x_test) # degree is the degree of polynomial from sklearn.linear_model import LinearRegression pol_RegModel = LinearRegression() pol_RegModel.fit(x_train_poly, y_train) y_predict = pol_RegModel.predict(x_test_poly) # - # #### - Comparison # on the trainning data plt.scatter(x_train, y_train,marker='x') plt.plot(x_train, pol_RegModel.predict(x_train_poly), color = 'r')
Regression/Linear_Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- print('Meu nome é: <NAME> ') print('Meu RA é: 192801') import numpy as np import matplotlib.pyplot as plt # %matplotlib inline from sklearn.datasets import load_iris import keras from keras.utils import np_utils import torch from torch.autograd import Variable import torch.optim as optim # + iris = load_iris() X = iris.data[:,::2] # comprimento das sépalas e pétalas, indices 0 e 2 Xc = X - X.min(axis=0) Xc /= Xc.max(axis=0) Y = iris.target #X[:,0] = Sepal lenght #X[:,1] = Sepal width #define colors to be red, yellow and blue colors = np.array(['r','y','b']) plt.scatter(Xc[:, 0], Xc[:, 1], c=colors[Y]) plt.xlabel('Sepal length') plt.ylabel('Sepal width') plt.show() # - print(X.shape) print(Y.shape) Y_oh = np_utils.to_categorical(Y, 3) print(Y[0:5]) print(Y_oh[0:5]) print(X.shape[0]) # ## Modelo do Torch # n_classes=3 # N is batch size; D_in is input dimension; # H is hidden dimension; D_out is output dimension. N, D_in, H, D_out = 10, X.shape[1], 10, n_classes model_torch = torch.nn.Sequential( torch.nn.Linear(X.shape[1], 2*H), torch.nn.ReLU(), torch.nn.Linear(2*H, H), torch.nn.ReLU(), torch.nn.Linear(H, n_classes), torch.nn.Softmax() ) criterion = torch.nn.CrossEntropyLoss() learning_rate = 1e-2 optimizer = torch.optim.Adam(model_torch.parameters(), lr=learning_rate) # opt = optim.SGD(model_torch.parameters(), lr=0.2) # + #X_tensor = torch.from_numpy(Xc).type(torch.FloatTensor) X_tensor = Variable(torch.from_numpy(Xc).type(torch.FloatTensor)) print(X_tensor.size()) #Y_tensor = torch.from_numpy(Y).type(torch.LongTensor) Y_tensor = Variable(torch.from_numpy(Y).type(torch.LongTensor), requires_grad=False) print(Y_tensor.size()) # - for t in range(700): # Forward pass: Compute predicted y by passing x to the model y_pred = model_torch(X_tensor) # Compute and print loss loss = criterion(y_pred, Y_tensor) print('Iteration , Loss',t, loss.data[0]) # Zero gradients, perform a backward pass, and update the weights. optimizer.zero_grad() loss.backward() optimizer.step() # + loss = criterion(model_torch(X_tensor), Y_tensor) print('Final loss:',loss) for i in model_torch.parameters(): print(i.data.shape) # - y_pred_max = np.argmax(model_torch(X_tensor).data.numpy(),axis=1) acc = np.sum(y_pred_max == Y)/y_pred_max.shape[0] print('Accuracy = %.3f' % acc,'%') print('Matriz de confusão:') import pandas as pd pd.crosstab(y_pred_max, Y)
CIFAR_10/Other_pytorch/my_logistic_regression_iris.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sys, os, glob import numpy as np import pandas as pd import matplotlib import matplotlib.pyplot as plt import seaborn as sns from scipy import stats from sw_plotting import change_bar_width, plotCountBar from sw_utilities import tukeyTest # Make a folder if it is not already there to store exported figures # !mkdir ../jupyter_figures # + # Read in the raw data sets to plot # These are the E-cadherin and integrin blocking antibody treatment that was done on 9/1-9/3/2020 df = pd.read_csv('../data/SMG-bud-count-Ecad-integrin-blocking-antibody/20200901-03-bud-count-summary.txt', sep='\t') df.head() # + # df_to_plot = df[(df.Measurement == 'bud_count_ratio_24h_to_2h') & (df.Treatment.isin(['alpha6-beta1', 'IgG-IgM']))] # outputFigPath = '../jupyter_figures/' + '20200901-03-bud-count-summary-integrin-set-24-to-2h' + '.svg' # plotCountBar(df_to_plot.Treatment, df_to_plot.Value, outputFigPath, # yLabel='Bud ratio (24h/2h)', # yMax=None, yTicks=None, fig_width=0.3, fig_height=0.9) # df_to_plot = df[(df.Measurement == 'bud_count_ratio_48h_to_2h') & (df.Treatment.isin(['alpha6-beta1', 'IgG-IgM']))] # outputFigPath = '../jupyter_figures/' + '20200901-03-bud-count-summary-integrin-set-48-to-2h' + '.svg' # plotCountBar(df_to_plot.Treatment, df_to_plot.Value, outputFigPath, # yMax=None, yTicks=None, fig_width=0.3, fig_height=0.9) # + # by_treatment = df_to_plot.groupby('Treatment') # by_treatment.describe() # + # stats.ttest_rel(df_to_plot[df_to_plot.Treatment=='alpha6-beta1']['Value'], # df_to_plot[df_to_plot.Treatment=='IgG-IgM']['Value']) # + # Read in the raw data sets to plot # These are the E-cadherin and integrin blocking antibody treatment that was done on 9/4-9/6/2020 # This experiment has only integrin set but not the E-cadherin set df = pd.read_csv('../data/SMG-bud-count-Ecad-integrin-blocking-antibody/20200904-06-bud-count-summary.txt', sep='\t') df # + df_to_plot = df[df.Measurement == 'bud_count_ratio_24h_to_2h'] outputFigPath = '../jupyter_figures/' + '20200904-06-bud-count-summary-all-24-to-2h' + '.svg' plotCountBar(df_to_plot.Treatment, df_to_plot.Value, outputFigPath, yLabel='Bud ratio (24h/2h)', xLabel_off=True, xticklabels_angle=45, yMax=None, yTicks=None, fig_width=0.45, fig_height=0.9) tukeyTest(df_to_plot.Value, df_to_plot.Treatment, alpha=0.05) df_to_plot = df[df.Measurement == 'bud_count_ratio_48h_to_2h'] outputFigPath = '../jupyter_figures/' + '20200904-06-bud-count-summary-all-48-to-2h' + '.svg' plotCountBar(df_to_plot.Treatment, df_to_plot.Value, outputFigPath, yLabel='Bud ratio (48h/2h)', xLabel_off=True, xticklabels_angle=45, yMax=None, yTicks=None, fig_width=0.45, fig_height=0.9) tukeyTest(df_to_plot.Value, df_to_plot.Treatment, alpha=0.05) # - tukeyTest(df_to_plot.Value, df_to_plot.Treatment, alpha=0.05) # + df_to_plot = df[(df.Measurement == 'bud_count_ratio_24h_to_2h') & (df.Treatment.isin(['alpha6-beta1', 'IgG-IgM']))] outputFigPath = '../jupyter_figures/' + '20200904-06-bud-count-summary-integrin-set-24-to-2h' + '.svg' plotCountBar(df_to_plot.Treatment, df_to_plot.Value, outputFigPath, yLabel='Bud ratio (24h/2h)', xLabel_off=True, xticklabels_angle=45, yMax=None, yTicks=None, fig_width=0.3, fig_height=0.9) df_to_plot = df[(df.Measurement == 'bud_count_ratio_48h_to_2h') & (df.Treatment.isin(['alpha6-beta1', 'IgG-IgM']))] outputFigPath = '../jupyter_figures/' + '20200904-06-bud-count-summary-integrin-set-48-to-2h' + '.svg' plotCountBar(df_to_plot.Treatment, df_to_plot.Value, outputFigPath, yLabel='Bud ratio (48h/2h)', xLabel_off=True, xticklabels_angle=45, yMax=None, yTicks=None, fig_width=0.3, fig_height=0.9) # - # Not using the paired t-test because for this set I did not keep track of paired glands from the same embryo, # although surely the two glands of the same embryo were separated to different groups stats.ttest_ind(df_to_plot[df_to_plot.Treatment=='alpha6-beta1']['Value'], df_to_plot[df_to_plot.Treatment=='IgG-IgM']['Value']) # + # Swarm and bar plot of different collagenase concentrations outputPrefix = '20200904-06-bud-count-summary-integrin-set-48-to-2h' outputFigPath = "../jupyter_figures/" + outputPrefix + ".svg" fig_width=0.4 fig_height=1.6 fig = plt.figure(figsize=(fig_width,fig_height), dpi=300) ax = fig.add_axes([0.1, 0.1, 0.8, 0.8]) ax = sns.swarmplot(x='Treatment', y='Value', data = df_to_plot, order=['IgG-IgM', 'alpha6-beta1'], color="blue", size=2.0, alpha=.4) ax = sns.barplot(x='Treatment', y='Value', data = df_to_plot, order=['IgG-IgM', 'alpha6-beta1'], color=".7", alpha=1.0, errwidth=.7, errcolor="k", capsize=.2, ci=95) plt.ylim(0, 18) # plt.yticks([0, 10, 20]) plt.xlabel(None) plt.ylabel("Bud ratio 48h / 2h") # rotate x tick labels if necessary x_labels = ['IgG-IgM', 'a6-b1'] ax.set_xticklabels(labels=x_labels, rotation=60, ha="right") # make the bar width narrower change_bar_width(ax, .6) for o in fig.findobj(): o.set_clip_on(False) for o in ax.findobj(): o.set_clip_on(False) if outputFigPath is not None: plt.savefig(outputFigPath) # -
SMG-bud-count-Ecad-integrin-blocking-antibody.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] heading_collapsed=true # #### Libraries # + hidden=true # %%javascript utils.load_extension('collapsible_headings/main') utils.load_extension('hide_input/main') utils.load_extension('autosavetime/main') utils.load_extension('execute_time/ExecuteTime') utils.load_extension('code_prettify/code_prettify') utils.load_extension('scroll_down/main') utils.load_extension('jupyter-js-widgets/extension') # + hidden=true import pandas as pd import numpy as np import matplotlib.pyplot as plt np.random.seed(0) from sklearn.metrics import roc_auc_score # + hidden=true def plot_feature_importance( columnas, model_features, columns_ploted=10, model_name="Catboost" ): """ This method is yet non-tested This function receives a set of columns feeded to a model, and the importance of each of feature. Returns a graphical visualization Call it fot catboost pipe example: plot_feature_importance(pipe_best_estimator[:-1].transform(X_tr).columns,pipe_best_estimator.named_steps['cb'].get_feature_importance(),20) Call it for lasso pipe example: plot_feature_importance(pipe_best_estimator[:-1].transform(X_tr).columns,np.array(pipe_best_estimator.named_steps['clf'].coef_.squeeze()),20) """ feature_importance = pd.Series(index=columnas, data=np.abs(model_features)) n_selected_features = (feature_importance > 0).sum() print( "{0:d} features, reduction of {1:2.2f}%".format( n_selected_features, (1 - n_selected_features / len(feature_importance)) * 100, ) ) plt.figure() feature_importance.sort_values().tail(columns_ploted).plot( kind="bar", figsize=(18, 6) ) plt.title("Feature Importance for {}".format(model_name)) plt.show() # + hidden=true # !ls # - # ## Joins # ### Generic generic = pd.read_csv('gx_num_generics.csv').drop(columns='Unnamed: 0') generic.head(1) # ### Package package = pd.read_csv('gx_package.csv').drop(columns='Unnamed: 0') package.head() package.presentation.unique() package.country.nunique() package.brand.nunique() package.brand.value_counts() # ### Panel # + panel = pd.read_csv('gx_panel.csv').drop(columns='Unnamed: 0') panel.head(2) # - panel.brand.nunique() panel.channel.unique() # + [markdown] heading_collapsed=true # ### Therapeutic # + hidden=true therapeutic_area = pd.read_csv('gx_therapeutic_area.csv').drop(columns='Unnamed: 0') therapeutic_area.head(1) # + hidden=true therapeutic_area.therapeutic_area.nunique() # + [markdown] heading_collapsed=true # ### Volume # + hidden=true volume = pd.read_csv('gx_volume.csv').drop(columns='Unnamed: 0') volume.head(1) # + hidden=true volume[(volume.country=='country_1') & (volume.brand=='brand_3')] # + [markdown] heading_collapsed=true # ### Subm # + hidden=true subm = pd.read_csv('submission_template.csv') subm # + hidden=true pd.merge(volume, subm,left_on=['country','brand','month_num'], right_on = ['country','brand','month_num']) # + hidden=true 594/4584 # + [markdown] heading_collapsed=true # ## Full # + hidden=true volume # + hidden=true generic # + hidden=true a = pd.merge(volume, generic,how='left',left_on=['country','brand'], right_on = ['country','brand']) # + hidden=true full = pd.merge(volume, generic,how='left',left_on=['country','brand'], right_on = ['country','brand']) # package full = pd.merge(full, package,how='left',left_on=['country','brand'], right_on = ['country','brand']) full # + hidden=true panel # + hidden=true panel.groupby(['country', 'brand','channel'], as_index=False).agg(['min', 'max','sum','mean','median']) # + hidden=true full # + hidden=true # generic full = pd.merge(volume, generic,how='left',left_on=['country','brand'], right_on = ['country','brand']) # package full = pd.merge(full, package,how='left',left_on=['country','brand'], right_on = ['country','brand']) # panel full = pd.merge(full, panel, how='left',left_on=['country','brand'], right_on = ['country','brand']) full.shape # + hidden=true # + hidden=true # generic full = pd.merge(volume, generic,how='left',left_on=['country','brand'], right_on = ['country','brand']) # package full = pd.merge(full, package,how='left',left_on=['country','brand'], right_on = ['country','brand']) # panel full = pd.merge(full, panel, how='left',left_on=['country','brand'], right_on = ['country','brand']) # therapeutic full = pd.merge(full, therapeutic_area,how='left',left_on=['brand'], right_on = ['brand']) full.head(1) # + hidden=true full.shape # - # ## Adversarial Trainning from catboost import CatBoostClassifier from sklearn.model_selection import train_test_split from category_encoders.m_estimate import MEstimateEncoder adv = pd.read_csv('data/gx_merged.csv') adv = adv.drop(columns=['month_name','volume', #'brand','B','C','D','num_generics' ]) adv['random'] = np.random.random(adv.shape[0]) me = MEstimateEncoder() X = adv.drop(columns=['test']) y = adv.test X = me.fit_transform(X,y) X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.33, random_state=42) cb = CatBoostClassifier(iterations=100,verbose=0) cb.fit(X_train,y_train) plot_feature_importance(X.columns,cb.get_feature_importance()) roc_auc_score(y_test,cb.predict(X_test)) X.columns # + adv # - # ## Splitting # + df = pd.read_csv('data/gx_merged.csv') # Take out test df = df[df.test==0] # Create our unique index variable df['count_brand'] = df["country"].astype(str) + '-'+ df["brand"] # Unique index lista = df['count_brand'].unique() df['count_brand'].nunique() # - # Get the ones that have not 24months a = pd.DataFrame(df.groupby(["country", "brand"]).month_num.max()).reset_index() a = a[a.month_num < 23] a["count_brand"] = a["country"].astype(str) + "-" + a["brand"] deformed = a.count_brand.unique() buenos = list(set(lista) - set(list(deformed))) split = int(len(buenos)*0.75) split_train_list = buenos[:split] split_valid_list = buenos[split:] len(split_train_list) len(split_valid_list) # + train_split = df[df['count_brand'].isin(split_train_list)] valid_split = df[df['count_brand'].isin(split_valid_list)] train_split = train_split[['country','brand']] valid_split = valid_split[['country','brand']] train_split.shape train_split.drop_duplicates().to_csv('data/train_split_noerror.csv',index=False) valid_split.drop_duplicates().to_csv('data/valid_split.csv',index=False) # - split_train_split_deformed = list(set((split_train_list + list(deformed)))) # + train_split = df[df['count_brand'].isin(split_train_split_deformed)] train_split = train_split[['country','brand']] train_split.drop_duplicates().to_csv('data/train_split.csv',index=False) # - 576/768 len(buenos) pd.read_csv('data/train_split.csv').shape pd.read_csv('data/valid_split.csv').shape pd.read_csv('data/train_split_noerror.csv').shape # ### Split test # + df = pd.read_csv('data/gx_merged.csv') # Take out test df = df[df.test==1] # Create our unique index variable df['count_brand'] = df["country"].astype(str) + '-'+ df["brand"] # Unique index lista = df['count_brand'].unique() df['count_brand'].nunique() # - split_test_list = lista # + test_split = df[df['count_brand'].isin(split_test_list)] test_split = test_split[['country','brand']] # - test_split.drop_duplicates().to_csv('data/test_split.csv',index=False)
Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + ''' Numbers: Integers You can use an integer represent numeric data, and more specifically, whole numbers from negative infinity to infinity, like 4, 5, or -1. Float "Float" stands for 'floating point number'. You can use it for rational numbers, usually ending with a decimal figure, such as 1.11 or 3.14. ''' # Integers a = 21 b = 10 c = 0 print('Value of a is ', a) print('Value of b is ', b) print('Value of c is ', c) # Floats x = 4.0 y = 2.0 print('Value of x is ', x) print('Value of y is ', y) # Addition print(x + y) # Subtraction print(x - y) # Multiplication print(x * y) # Returns the quotient print(x / y) # Returns the rounded away from zero (towards negative infinity) print(x // y) # Returns the remainder print(x % y) # Absolute value print(abs(x)) # x to the power y print(x ** y) # + ''' Numbers: Integers You can use an integer represent numeric data, and more specifically, whole numbers from negative infinity to infinity, like 4, 5, or -1. Float ')Float') stands for 'floating point number'. You can use it for rational numbers, usually ending with a decimal figure, such as 1.11 or 3.14. ''' # Integers a = 21 b = 10 c = 0 print('Value of a is ', a) print('Value of b is ', b) print('Value of c is ', c) # Comparison Operators print('\nComparison Operators\n') if ( a == b ): print('a is equal to b') else: print('a is not equal to b') if ( a != b ): print('a is not equal to b') else: print('a is equal to b') ''' Does not compatible with Python 3 if ( a <> b ): print('a is not equal to b') else: print('a is equal to b') ''' if ( a < b ): print('a is less than b') else: print('a is not less than b') if ( a > b ): print('a is greater than b') else: print('a is not greater than b') a = 5; b = 20; print('Value of a is ', a) print('Value of b is ', b) if ( a <= b ): print(' is either less than or equal to b') else: print('a is neither less than nor equal to b') if ( b >= a ): print('b is either greater than or equal to b') else: print('b is neither greater than nor equal to b') # + ''' Arithmetic Operators ''' # Values a = 21 b = 10 c = 0 print('Value of a is ', a) print('Value of b is ', b) print('Value of c is ', c) print('\n Arithmetic Operators \n') c = a + b print('Value of a + b is ', c ) c = a - b print('Value of a - b is ', c ) c = a * b print('Value of a * b is ', c ) c = a / b print('Value of a / b is ', c ) c = a % b print('Value of a % b is ', c ) a = 2 b = 3 c = a**b print('Value of a**b is ', c ) a = 10 b = 5 c = a//b print('Value of a//b is ', c ) # + # Assignment Operators print('Value of a is ', a) print('Value of b is ', b) print('Value of c is ', c) print('\n Assignment Operators \n') c = a + b print('Value of a + b is ', c) c += a print('Value of c += a is ', c) c *= a print('Value of c *= a is ', c) c /= a print('Value of c /= a is ', c) c = 2 print('Value of c is ', c) c %= a print('Value of c %= a is ', c) c **= a print('Value of c **= a is ', c) c //= a print('Value of c //= a is ', c) # + ''' Logical Operators ''' x = False if not x : print("condition satisfied") else: print("condition not satisfied") if 1 < 2 and 4 > 2: print("condition satisfied") if 1 > 2 and 4 < 10: print("condition not satisfied") if 4 < 10 or 4 < 2: print("condition satisfied") if not (1 > 2 and 4 < 10): print("condition satisfied") # + ''' Bitwise Operators ''' # Values a = 60 # 60 = 0011 1100 b = 13 # 13 = 0000 1101 c = 0 c = a & b; # 12 = 0000 1100 print('Value of a & b is ', c ) c = a | b; # 61 = 0011 1101 print('Value of a | b is ', c ) c = a ^ b; # 49 = 0011 0001 print('Value of a ^ b is ', c ) c = ~a; # -61 = 1100 0011 print('Value of ~a is ', c ) c = a << 2; # 240 = 1111 0000 print('Value of a << 2 is ', c ) c = a >> 2; # 15 = 0000 1111 print('Value of a >> 2 is ', c ) # + ''' Membership Operators ''' # Values a = 10 b = 20 list = [1, 2, 3, 4, 5 ]; # Discuss more in detain further tutorials if ( a in list ): print('a is available in the given list') else: print('a is not available in the given list') if ( b not in list ): print('b is not available in the given list') else: print('b is available in the given list') a = 2 if ( a in list ): print('a is available in the given list') else: print('a is not available in the given list') # + ''' Identity Operators ''' # Values a = 20 b = 20 if ( a is b ): print('a and b have same identity') else: print('a and b do not have same identity') if ( id(a) == id(b) ): print('a and b have same identity') else: print('a and b do not have same identity') b = 30 if ( a is b ): print('a and b have same identity') else: print('a and b do not have same identity') if ( a is not b ): print('a and b do not have same identity') else: print('a and b have same identity') # + ''' Operators Precedence Operator Description ** Exponentiation (raise to the power) ~ + - Complement, unary plus and minus (method names for the last two are +@ and -@) * / % // Multiply, divide, modulo and floor division + - Addition and subtraction >> << Right and left bitwise shift & Bitwise 'AND' ^ | Bitwise exclusive `OR' and regular `OR' <= < > >= Comparison operators <> == != Equality operators = %= /= //= -= += *= **= Assignment operators is is not Identity operators in not in Membership operators not or and Logical operators ''' # Values a = 20 b = 10 c = 15 d = 5 e = 0 e = (a + b) * c / d #( 30 * 15 ) / 5 print('Value of (a + b) * c / d is ', e) e = ((a + b) * c) / d # (30 * 15 ) / 5 print('Value of ((a + b) * c) / d is ', e) e = (a + b) * (c / d); # (30) * (15/5) print('Value of (a + b) * (c / d) is ', e) e = a + (b * c) / d; # 20 + (150/5) print('Value of a + (b * c) / d is ', e)
01 Introduction to Python/02_number_ops.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Machine learning to predict age from rs-fmri # # Goals: # * Extract data from several rs-fmri images # * Use that data as features in a machine learning model to predict age. # * Integrate what we've learned in the previous lecture to build a minimally biased model and test it on a left out sample. # # Link to slides: https://github.com/neurodatascience/course-materials-2020/blob/master/lectures/14-may/03-intro-to-machine-learning/IntroML_BrainHackSchool.pdf # + slideshow={"slide_type": "fragment"} # %matplotlib inline # + [markdown] slideshow={"slide_type": "slide"} # ## Load the data # # <img src="Imgs/SampFeat.png" alt="terms" width="300"/> # # + slideshow={"slide_type": "slide"} # Lets fetch the data! from nilearn import datasets from nilearn import image from nilearn import plotting import pylab as plt import os import numpy as np import nibabel as nib from nibabel.testing import data_path #data = datasets.fetch_development_fmri() # - data_path = "E:/Analyse_fmri/Analyse/" subjects = os.listdir(data_path) subfile = ['Ana','Nback','Stroop'] #aller sur os.path.join #f' path_mfp = 'E:/Analyse_fmri/Results_2019/Nback/Con_T1_mfp/Old_SCAN_Nback' # + x = [] con4 = [] y = [] for file in os.listdir(path_mfp): if file.endswith("T1_con_0004.nii"): x.append(file) for subjects in x: y = path_mfp+'/'+subjects con4.append(y) # - con4 #def convert(list): # return tuple(list) #Driver function #print(convert(con4)) # + [markdown] slideshow={"slide_type": "fragment"} # How many individual subjects do we have? # + slideshow={"slide_type": "fragment"} len(con4) # + [markdown] slideshow={"slide_type": "slide"} # ## Extract features # # ![feat_xtrct](https://ars.els-cdn.com/content/image/1-s2.0-S1053811919301594-gr1.jpg) # + [markdown] slideshow={"slide_type": "subslide"} # In order to do our machine learning, we will need to extract feature from our rs-fmri images. Specifically: # * Extract signals from a brain parcellation # * Compute a correlation matrix, representing regional coactivation between regions. # # We will practice on one subject first, then we'll extract data for all subjects # + [markdown] slideshow={"slide_type": "slide"} # #### Retrieve the atlas for extracting features and an example subject # + [markdown] slideshow={"slide_type": "subslide"} # Since we're using rs-fmri data, it makes sense to use an atlas defined using rs-fmri data # # + [markdown] slideshow={"slide_type": "skip"} # This paper has many excellent insights about what kind of atlas to use for an rs-fmri machine learning task. See in particular Figure 5. # https://www.sciencedirect.com/science/article/pii/S1053811919301594?via%3Dihub # + [markdown] slideshow={"slide_type": "fragment"} # Let's use the MIST atlas (Urchs et al. 2019) # * Created here in Montreal using the BASC method (Bellec et al., 2015). # * Has multiple resolutions, for larger networks or finer-grained ROIs. # # Let's use a 64-ROI atlas to allow some detail, but to ultimately keep our connectivity matrices manageable # # Here is a link to the MIST paper: https://mniopenresearch.org/articles/1-3 # + slideshow={"slide_type": "slide"} parcellations = datasets.fetch_atlas_basc_multiscale_2015(version='sym') atlas_filename = parcellations.scale064 print('Atlas ROIs are located in nifti image (4D) at: %s' % atlas_filename) # + [markdown] slideshow={"slide_type": "fragment"} # Let's have a look at that atlas # + slideshow={"slide_type": "subslide"} plotting.plot_roi(atlas_filename, draw_cross=False) # + [markdown] slideshow={"slide_type": "slide"} # Great, let's load an example 4D fmri time-series for one subject # + slideshow={"slide_type": "fragment"} fmri_filenames = con4[0] print(fmri_filenames) # + [markdown] slideshow={"slide_type": "subslide"} # Let's have a look at the image! Because it is a 4D image, we can only look at one slice at a time. Or, better yet, let's look at an average image! # + slideshow={"slide_type": "subslide"} from nilearn import image averaged_Img = image.mean_img(fmri_filenames) plotting.plot_stat_map(averaged_Img) # - plotting.view_img(averaged_Img) example_file = os.path.join(data_path, 'E:/Analyse_fmri/Results_2019/Nback/Con_T1_mfp/Old_SCAN_Nback/CA281_T1_con_0004.nii') example_file img = nib.load(example_file) print(img) data = img.get_fdata() data.shape affine = img.affine plt.imshow(data[:,:, data.shape[2] // 2].T, cmap='Greys_r') print(data.shape) img.dataobj img.affine # + [markdown] slideshow={"slide_type": "slide"} # #### Extract signals on a parcellation defined by labels # Using the NiftiLabelsMasker # # So we've loaded our atlas and 4D data for a single subject. Let's practice extracting features! # # + slideshow={"slide_type": "subslide"} from nilearn.input_data import NiftiLabelsMasker masker = NiftiLabelsMasker(labels_img=atlas_filename, standardize=True, memory='nilearn_cache', verbose=1) # Here we go from nifti files to the signal time series in a # numpy array. Note how we give confounds to be regressed out # during signal extraction conf = img time_series = masker.fit_transform(fmri_filenames, confounds=conf) # - # + [markdown] slideshow={"slide_type": "subslide"} # So what did we just create here? # + slideshow={"slide_type": "fragment"} type(time_series) # + slideshow={"slide_type": "fragment"} time_series.shape # + [markdown] slideshow={"slide_type": "subslide"} # What are these "confounds" and how are they used? # + slideshow={"slide_type": "subslide"} import pandas conf_df = pandas.read_csv(con4,sep='\t') conf_df.head() # + slideshow={"slide_type": "fragment"} conf_df.shape # + [markdown] slideshow={"slide_type": "slide"} # #### Compute and display a correlation matrix # # + slideshow={"slide_type": "fragment"} from nilearn.connectome import ConnectivityMeasure correlation_measure = ConnectivityMeasure(kind='correlation') correlation_matrix = correlation_measure.fit_transform([time_series])[0] correlation_matrix.shape # + [markdown] slideshow={"slide_type": "subslide"} # Plot the correlation matrix # + rise={"scroll": true} slideshow={"slide_type": "subslide"} import numpy as np # Mask the main diagonal for visualization: np.fill_diagonal(correlation_matrix, 0) # The labels we have start with the background (0), hence we skip the # first label plotting.plot_matrix(correlation_matrix, figure=(10, 8), labels=range(time_series.shape[-1]), vmax=0.8, vmin=-0.8, reorder=False) # matrices are ordered for block-like representation # + [markdown] slideshow={"slide_type": "slide"} # #### Extract features from the whole dataset # # Here, we are going to use a for loop to iterate through each image and use the same techniques we learned above to extract rs-fmri connectivity features from every subject. # # # + slideshow={"slide_type": "slide"} # Here is a really simple for loop for i in range(10): print('the number is', i) # + slideshow={"slide_type": "subslide"} container = [] for i in range(10): container.append(i) container # + [markdown] slideshow={"slide_type": "subslide"} # Now lets construct a more complicated loop to do what we want # + [markdown] slideshow={"slide_type": "subslide"} # First we do some things we don't need to do in the loop. Let's reload our atlas, and re-initiate our masker and correlation_measure # + slideshow={"slide_type": "fragment"} from nilearn.input_data import NiftiLabelsMasker from nilearn.connectome import ConnectivityMeasure from nilearn import datasets # load atlas multiscale = datasets.fetch_atlas_basc_multiscale_2015() atlas_filename = multiscale.scale064 # initialize masker (change verbosity) masker = NiftiLabelsMasker(labels_img=atlas_filename, standardize=True, memory='nilearn_cache', verbose=0) # initialize correlation measure, set to vectorize correlation_measure = ConnectivityMeasure(kind='correlation', vectorize=True, discard_diagonal=True) # + [markdown] slideshow={"slide_type": "subslide"} # Okay -- now that we have that taken care of, let's run our big loop! # + [markdown] slideshow={"slide_type": "fragment"} # **NOTE**: On a laptop, this might a few minutes. # + slideshow={"slide_type": "subslide"} all_features = [] # here is where we will put the data (a container) for i,sub in enumerate(data.func): # extract the timeseries from the ROIs in the atlas time_series = masker.fit_transform(sub, confounds=data.confounds[i]) # create a region x region correlation matrix correlation_matrix = correlation_measure.fit_transform([time_series])[0] # add to our container all_features.append(correlation_matrix) # keep track of status print('finished %s of %s'%(i+1,len(data.func))) # + slideshow={"slide_type": "subslide"} # Let's save the data to disk import numpy as np #np.savez_compressed('MAIN_BASC064_subsamp_features',a = all_features) # + [markdown] slideshow={"slide_type": "subslide"} # In case you do not want to run the full loop on your computer, you can load the output of the loop here! # + slideshow={"slide_type": "fragment"} feat_file = 'MAIN_BASC064_subsamp_features.npz' X_features = np.load(feat_file)['a'] # + slideshow={"slide_type": "fragment"} X_features.shape # + [markdown] slideshow={"slide_type": "subslide"} # Okay so we've got our features. # + [markdown] slideshow={"slide_type": "fragment"} # We can visualize our feature matrix # + slideshow={"slide_type": "subslide"} import matplotlib.pyplot as plt plt.imshow(X_features, aspect='auto') plt.colorbar() plt.title('feature matrix') plt.xlabel('features') plt.ylabel('subjects') # + [markdown] slideshow={"slide_type": "slide"} # <img src="Imgs/SampFeat.png" alt="terms" width="300"/> # + [markdown] slideshow={"slide_type": "slide"} # ## Get Y (our target) and assess its distribution # + slideshow={"slide_type": "subslide"} # Let's load the phenotype data import pandas pheno = pandas.DataFrame(data.phenotypic) pheno.head() # + [markdown] slideshow={"slide_type": "fragment"} # Looks like there is a column labeling age. Let's capture it in a variable # + slideshow={"slide_type": "subslide"} y_age = pheno['Age'] # + [markdown] slideshow={"slide_type": "fragment"} # Maybe we should have a look at the distribution of our target variable # + slideshow={"slide_type": "subslide"} import matplotlib.pyplot as plt import seaborn as sns sns.distplot(y_age) # + [markdown] slideshow={"slide_type": "slide"} # ## Prepare data for machine learning # # Here, we will define a "training sample" where we can play around with our models. # # We will also set aside a "validation" sample that we will not touch until the end # + [markdown] slideshow={"slide_type": "subslide"} # We want to be sure that our training and test sample are matched! We can do that with a "stratified split". # # This dataset has a variable indicating AgeGroup. We can use that to make sure our training and testing sets are balanced! # + slideshow={"slide_type": "fragment"} age_class = pheno['AgeGroup'] age_class.value_counts() # + slideshow={"slide_type": "slide"} from sklearn.model_selection import train_test_split # Split the sample to training/validation with a 60/40 ratio, and # stratify by age class, and also shuffle the data. X_train, X_val, y_train, y_val = train_test_split( X_features, # x y_age, # y test_size = 0.4, # 60%/40% split shuffle = True, # shuffle dataset # before splitting stratify = age_class, # keep # distribution # of ageclass # consistent # betw. train # & test sets. random_state = 123 # same shuffle each # time ) # print the size of our training and test groups print('training:', len(X_train), 'testing:', len(X_val)) # + [markdown] slideshow={"slide_type": "subslide"} # Let's visualize the distributions to be sure they are matched # + slideshow={"slide_type": "subslide"} sns.distplot(y_train,label='train') sns.distplot(y_val,label='test') plt.legend() # + [markdown] slideshow={"slide_type": "slide"} # ## Run your first model! # # Machine learning can get pretty fancy pretty quickly. We'll start with a fairly standard regression model called a Support Vector Regressor (SVR). # # While this may seem unambitious, simple models can be very robust. And we probably don't have enough data to create more complex models (but we can try later). # + [markdown] slideshow={"slide_type": "subslide"} # <img src="Imgs/regr.jpg" alt="terms" width="700"/> # + [markdown] slideshow={"slide_type": "skip"} # For more information, see this excellent resource: https://hal.inria.fr/hal-01824205 # + [markdown] slideshow={"slide_type": "slide"} # Let's fit our first model! # + slideshow={"slide_type": "fragment"} from sklearn.svm import SVR l_svr = SVR(kernel='linear') # define the model l_svr.fit(X_train, y_train) # fit the model # + [markdown] slideshow={"slide_type": "fragment"} # Well... that was easy. Let's see how well the model learned the data! # + [markdown] slideshow={"slide_type": "slide"} # # # # <img src="Imgs/modval.png" alt="terms" width="800"/> # + slideshow={"slide_type": "subslide"} # predict the training data based on the model y_pred = l_svr.predict(X_train) # caluclate the model accuracy acc = l_svr.score(X_train, y_train) # + [markdown] slideshow={"slide_type": "subslide"} # Let's view our results and plot them all at once! # + slideshow={"slide_type": "fragment"} # print results print('accuracy (R2)', acc) sns.regplot(y_pred,y_train) plt.xlabel('Predicted Age') # + [markdown] slideshow={"slide_type": "subslide"} # HOLY COW! Machine learning is amazing!!! Almost a perfect fit! # + [markdown] slideshow={"slide_type": "fragment"} # ...which means there's something wrong. What's the problem here? # + [markdown] slideshow={"slide_type": "subslide"} # <img src="Imgs/regr.jpg" alt="terms" width="700"/> # + slideshow={"slide_type": "slide"} from sklearn.model_selection import train_test_split # Split the sample to training/test with a 75/25 ratio, and # stratify by age class, and also shuffle the data. age_class2 = pheno.loc[y_train.index,'AgeGroup'] X_train2, X_test, y_train2, y_test = train_test_split( X_train, # x y_train, # y test_size = 0.25, # 75%/25% split shuffle = True, # shuffle dataset # before splitting stratify = age_class2, # keep # distribution # of ageclass # consistent # betw. train # & test sets. random_state = 123 # same shuffle each # time ) # print the size of our training and test groups print('training:', len(X_train2), 'testing:', len(X_test)) # + slideshow={"slide_type": "subslide"} from sklearn.metrics import mean_absolute_error # fit model just to training data l_svr.fit(X_train2,y_train2) # predict the *test* data based on the model trained on X_train2 y_pred = l_svr.predict(X_test) # caluclate the model accuracy acc = l_svr.score(X_test, y_test) mae = mean_absolute_error(y_true=y_test,y_pred=y_pred) # + slideshow={"slide_type": "subslide"} # print results print('accuracy (R2) = ', acc) print('MAE = ',mae) sns.regplot(y_pred,y_test) plt.xlabel('Predicted Age') # + [markdown] slideshow={"slide_type": "fragment"} # Not perfect, but as predicting with unseen data goes, not too bad! Especially with a training sample of "only" 69 subjects. But we can do better! # + [markdown] slideshow={"slide_type": "slide"} # For example, we can increase the size our training set while simultaneously reducing bias by instead using 10-fold cross-validation # + [markdown] slideshow={"slide_type": "subslide"} # # <img src="Imgs/KCV2.png" alt="terms" width="500"/> # + slideshow={"slide_type": "subslide"} from sklearn.model_selection import cross_val_predict, cross_val_score # predict y_pred = cross_val_predict(l_svr, X_train, y_train, cv=10) # scores acc = cross_val_score(l_svr, X_train, y_train, cv=10) mae = cross_val_score(l_svr, X_train, y_train, cv=10, scoring='neg_mean_absolute_error') # + [markdown] slideshow={"slide_type": "subslide"} # We can look at the accuracy of the predictions for each fold of the cross-validation # + slideshow={"slide_type": "fragment"} for i in range(10): print('Fold {} -- Acc = {}, MAE = {}'.format(i, acc[i],-mae[i])) # + [markdown] slideshow={"slide_type": "subslide"} # We can also look at the overall accuracy of the model # + slideshow={"slide_type": "subslide"} from sklearn.metrics import r2_score overall_acc = r2_score(y_train, y_pred) overall_mae = mean_absolute_error(y_train,y_pred) print('R2:',overall_acc) print('MAE:',overall_mae) sns.regplot(y_pred, y_train) plt.xlabel('Predicted Age') # + [markdown] slideshow={"slide_type": "subslide"} # Not too bad at all! # # But more importantly, this is a more accurate estimation of our model's predictive efficacy. # # Our sample size is larger and this is based on several rounds of prediction of unseen data. # + [markdown] slideshow={"slide_type": "fragment"} # For example, we can now see that the effect is being driven by the model's successful parsing of adults vs. children, but is not performing so well within the adult or children group. This was not evident during our previous iteration of the model # + [markdown] slideshow={"slide_type": "slide"} # ## Tweak your model # + [markdown] slideshow={"slide_type": "fragment"} # It's very important to learn when and where its appropriate to "tweak" your model. # + [markdown] slideshow={"slide_type": "subslide"} # Since we have done all of the previous analysis in our training data, it's fine to try out different models. # # But we **absolutely cannot** "test" it on our left out data. If we do, we are in great danger of overfitting. # + [markdown] slideshow={"slide_type": "fragment"} # It is not uncommon to try other models, or tweak hyperparameters. In this case, due to our relatively small sample size, we are probably not powered sufficiently to do so, and we would once again risk overfitting. However, for the sake of demonstration, we will do some tweaking. # + [markdown] slideshow={"slide_type": "subslide"} # # <img src="Imgs/KCV2.png" alt="terms" width="500"/> # # + [markdown] slideshow={"slide_type": "subslide"} # We will try a few different examples: # * Normalizing our target data # * Tweaking our hyperparameters # * Trying a more complicated model # * Feature selection # + [markdown] slideshow={"slide_type": "slide"} # #### Normalize the target data # + slideshow={"slide_type": "subslide"} # Create a log transformer function and log transform Y (age) from sklearn.preprocessing import FunctionTransformer log_transformer = FunctionTransformer(func = np.log, validate=True) log_transformer.fit(y_train.values.reshape(-1,1)) y_train_log = log_transformer.transform(y_train.values.reshape(-1,1))[:,0] sns.distplot(y_train_log) plt.title("Log-Transformed Age") # + [markdown] slideshow={"slide_type": "subslide"} # Now let's go ahead and cross-validate our model once again with this new log-transformed target # + slideshow={"slide_type": "subslide"} # re-intialize the model l_svr = SVR(kernel='linear') # predict y_pred = cross_val_predict(l_svr, X_train, y_train_log, cv=10) # scores acc = r2_score(y_train_log, y_pred) mae = mean_absolute_error(y_train_log,y_pred) print('R2:',acc) print('MAE:',mae) sns.regplot(y_pred, y_train_log) plt.xlabel('Predicted Log Age') plt.ylabel('Log Age') # + [markdown] slideshow={"slide_type": "subslide"} # Seems like a definite improvement, right? I think we can agree on that. # # But we can't forget about interpretability? The MAE is much less interpretable now. # + [markdown] slideshow={"slide_type": "slide"} # #### Tweak the hyperparameters # + [markdown] slideshow={"slide_type": "fragment"} # Many machine learning algorithms have hyperparameters that can be "tuned" to optimize model fitting. # # Careful parameter tuning can really improve a model, but haphazard tuning will often lead to overfitting. # + [markdown] slideshow={"slide_type": "subslide"} # Our SVR model has multiple hyperparameters. Let's explore some approaches for tuning them # + slideshow={"slide_type": "fragment"} # SVR? # + [markdown] slideshow={"slide_type": "subslide"} # One way is to plot a "Validation Curve" -- this will let us view changes in training and validation accuracy of a model as we shift its hyperparameters. # # We can do this easily with sklearn. # + slideshow={"slide_type": "subslide"} from sklearn.model_selection import validation_curve C_range = 10. ** np.arange(-3, 8) # A range of different values for C train_scores, valid_scores = validation_curve(l_svr, X_train, y_train_log, param_name= "C", param_range = C_range, cv=10, scoring='neg_mean_squared_error') # + slideshow={"slide_type": "subslide"} # A bit of pandas magic to prepare the data for a seaborn plot tScores = pandas.DataFrame(train_scores).stack().reset_index() tScores.columns = ['C','Fold','Score'] tScores.loc[:,'Type'] = ['Train' for x in range(len(tScores))] vScores = pandas.DataFrame(valid_scores).stack().reset_index() vScores.columns = ['C','Fold','Score'] vScores.loc[:,'Type'] = ['Validate' for x in range(len(vScores))] ValCurves = pandas.concat([tScores,vScores]).reset_index(drop=True) ValCurves.head() # + slideshow={"slide_type": "subslide"} # And plot! g = sns.catplot(x='C',y='Score',hue='Type',data=ValCurves,kind='point') plt.xticks(range(10)) g.set_xticklabels(C_range, rotation=90) # + [markdown] slideshow={"slide_type": "subslide"} # It looks like accuracy is better for higher values of C, and plateaus somewhere between 0.1 and 1. # # The default setting is C=1, so it looks like we can't really improve much by changing C. # + [markdown] slideshow={"slide_type": "fragment"} # But our SVR model actually has two hyperparameters, C and epsilon. Perhaps there is an optimal combination of settings for these two parameters. # + [markdown] slideshow={"slide_type": "subslide"} # We can explore that somewhat quickly with a grid search, which is once again easily achieved with sklearn. # # Because we are fitting the model multiple times witih cross-validation, this will take some time # + slideshow={"slide_type": "subslide"} from sklearn.model_selection import GridSearchCV C_range = 10. ** np.arange(-3, 8) epsilon_range = 10. ** np.arange(-3, 8) param_grid = dict(epsilon=epsilon_range, C=C_range) grid = GridSearchCV(l_svr, param_grid=param_grid, cv=10) grid.fit(X_train, y_train_log) # + [markdown] slideshow={"slide_type": "subslide"} # Now that the grid search has completed, let's find out what was the "best" parameter combination # + slideshow={"slide_type": "fragment"} print(grid.best_params_) # + [markdown] slideshow={"slide_type": "fragment"} # And if redo our cross-validation with this parameter set? # + slideshow={"slide_type": "subslide"} y_pred = cross_val_predict(SVR(kernel='linear', C=grid.best_params_['C'], epsilon=grid.best_params_['epsilon'], gamma='auto'), X_train, y_train_log, cv=10) # scores acc = r2_score(y_train_log, y_pred) mae = mean_absolute_error(y_train_log,y_pred) print('R2:',acc) print('MAE:',mae) sns.regplot(y_pred, y_train_log) plt.xlabel('Predicted Log Age') plt.ylabel('Log Age') # + [markdown] slideshow={"slide_type": "subslide"} # Perhaps unsurprisingly, the model fit is only very slightly improved from what we had with our defaults. There's a reason they are defaults ;-) # # Grid search can be a powerful and useful tool. But can you think of a way that, if not properly utilized, it could lead to overfitting? Could it be happening here? # + [markdown] slideshow={"slide_type": "skip"} # You can find a nice set of tutorials with links to very helpful content regarding how to tune hyperparameters while being aware of over- and under-fitting here: # # https://scikit-learn.org/stable/modules/learning_curve.html # + [markdown] slideshow={"slide_type": "slide"} # #### Trying a more complicated model # + [markdown] slideshow={"slide_type": "fragment"} # In principle, there is no real reason to do this. Perhaps one could make an argument for quadratic relationship with age, but we probably don't have enough subjects to learn a complicated non-linear model. # # But for the sake of demonstration, we can give it a shot. # + [markdown] slideshow={"slide_type": "subslide"} # # # We'll use a validation curve to see the result of our model if, instead of fitting a linear model, we instead try to the fit a 2nd, 3rd, ... 8th order polynomial. # + slideshow={"slide_type": "fragment"} # validation_curve? # + slideshow={"slide_type": "subslide"} from sklearn.model_selection import validation_curve degree_range = list(range(1,8)) # A range of different values for C train_scores, valid_scores = validation_curve(SVR(kernel='poly', gamma='scale' ), X=X_train, y=y_train_log, param_name= "degree", param_range = degree_range, cv=10, scoring='neg_mean_squared_error') # + slideshow={"slide_type": "subslide"} # A bit of pandas magic to prepare the data for a seaborn plot tScores = pandas.DataFrame(train_scores).stack().reset_index() tScores.columns = ['Degree','Fold','Score'] tScores.loc[:,'Type'] = ['Train' for x in range(len(tScores))] vScores = pandas.DataFrame(valid_scores).stack().reset_index() vScores.columns = ['Degree','Fold','Score'] vScores.loc[:,'Type'] = ['Validate' for x in range(len(vScores))] ValCurves = pandas.concat([tScores,vScores]).reset_index(drop=True) ValCurves.head() # + slideshow={"slide_type": "subslide"} # And plot! g = sns.catplot(x='Degree',y='Score',hue='Type',data=ValCurves,kind='point') plt.xticks(range(7)) g.set_xticklabels(degree_range, rotation=90) # + [markdown] slideshow={"slide_type": "subslide"} # <img src="Imgs/BV.jpg" alt="terms" width="800"/> # + [markdown] slideshow={"slide_type": "subslide"} # It appears that we cannot improve our model by increasing the complexity of the fit. If one looked only at the training data, one might surmise that a higher order fit could be a slightly better model. # # But that improvement does not generalize to the validation data. # + [markdown] slideshow={"slide_type": "slide"} # #### Feature selection # + [markdown] slideshow={"slide_type": "fragment"} # Right now, we have 2016 features. Are all of those really going to contribute to the model stably? # + [markdown] slideshow={"slide_type": "subslide"} # Intuitively, models tend to perform better when there are fewer, more important features than when there are many, less imortant features. The tough part is figuring out which features are useful or important. # + [markdown] slideshow={"slide_type": "fragment"} # Here will quickly try a basic feature seclection strategy # + [markdown] slideshow={"slide_type": "subslide"} # <img src="Imgs/FeatSel.png" alt="terms" width="400"/> # + [markdown] slideshow={"slide_type": "subslide"} # The SelectPercentile() function will select the top X% of features based on univariate tests. This is a way of identifying theoretically more useful features. # + [markdown] slideshow={"slide_type": "fragment"} # But remember, significance != prediction, as demonstrated in this figure from Bzdok et al., 2018 *bioRxiv* # # ![Bzdok2018](https://www.biorxiv.org/content/biorxiv/early/2018/05/21/327437/F1.large.jpg?width=800&height=600&carousel=1) # + [markdown] slideshow={"slide_type": "fragment"} # We are also in danger of overfitting here. For starters, if we want to test this with 10-fold cross-validation, we will need to do a separate feature selection within each fold! # # That means we'll need to do the cross-validation manually instead of using cross_val_predict(). # + slideshow={"slide_type": "subslide"} from sklearn.feature_selection import SelectPercentile, f_regression from sklearn.model_selection import KFold from sklearn.pipeline import Pipeline # Build a tiny pipeline that does feature selection (top 20% of features), # and then prediction with our linear svr model. model = Pipeline([ ('feature_selection',SelectPercentile(f_regression,percentile=20)), ('prediction', l_svr) ]) y_pred = [] # a container to catch the predictions from each fold y_index = [] # just in case, the index for each prediction # First we create 10 splits of the data skf = KFold(n_splits=10, shuffle=True, random_state=123) # For each split, assemble the train and test samples for tr_ind, te_ind in skf.split(X_train): X_tr = X_train[tr_ind] y_tr = y_train_log[tr_ind] X_te = X_train[te_ind] y_index += list(te_ind) # store the index of samples to predict # and run our pipeline model.fit(X_tr, y_tr) # fit the data to the model using our mini pipeline predictions = model.predict(X_te).tolist() # get the predictions for this fold y_pred += predictions # add them to the list of predictions # + [markdown] slideshow={"slide_type": "subslide"} # Alrighty, let's see if only using the top 20% of features improves the model at all... # + slideshow={"slide_type": "subslide"} acc = r2_score(y_train_log[y_index], y_pred) mae = mean_absolute_error(y_train_log[y_index],y_pred) print('R2:',acc) print('MAE:',mae) sns.regplot(np.array(y_pred), y_train_log[y_index]) plt.xlabel('Predicted Log Age') plt.ylabel('Log Age') # + [markdown] slideshow={"slide_type": "subslide"} # In this case, the transformation *did* result in a slight improvement. Will this generalize to left out data? # + [markdown] slideshow={"slide_type": "skip"} # See here for an explanation of different feature selection options and how to implement them in sklearn: https://scikit-learn.org/stable/modules/feature_selection.html # # And here is a thoughtful tutorial covering feature selection for novel machine learners: https://www.datacamp.com/community/tutorials/feature-selection-python # + [markdown] slideshow={"slide_type": "slide"} # So there you have it. We've tried many different strategies, but most of our "tweaks" haven't really lead to improvements in the model. This is not always the case, but it is not uncommon. # # Can you think of some reasons why? # + [markdown] slideshow={"slide_type": "slide"} # Moving on to our validation data, we probably should just stick to a basic model, though predicting log age might be a good idea! # + [markdown] slideshow={"slide_type": "slide"} # ## Can our model predict age in completely un-seen data? # + [markdown] slideshow={"slide_type": "fragment"} # Now that we've fit a model we think has possibly learned how to decode age based on rs-fmri signal, let's put it to the test. # # We will train our model on all of the training data, and try to predict the age of the subjects we left out at the beginning of this section. # + [markdown] slideshow={"slide_type": "subslide"} # Because we performed a log transformation on our training data, we will need to transform our testing data using the *same information!* But that's easy because we stored our transformation in an object! # # + slideshow={"slide_type": "fragment"} # Notice how we use the Scaler that was fit to X_train and apply to X_test, # rather than creating a new Scaler for X_test y_val_log = log_transformer.transform(y_val.values.reshape(-1,1))[:,0] # + [markdown] slideshow={"slide_type": "subslide"} # And now for the moment of truth! # # No cross-validation needed here. We simply fit the model with the training data and use it to predict the testing data # # I'm so nervous. Let's just do it all in one cell # + slideshow={"slide_type": "subslide"} l_svr = SVR(kernel='linear') # define the model l_svr.fit(X_train, y_train_log) # fit to training data y_pred = l_svr.predict(X_val) # classify age class using testing data acc = l_svr.score(X_val, y_val_log) # get accuracy (r2) mae = mean_absolute_error(y_val_log, y_pred) # get mae # print results print('accuracy (r2) =', acc) print('mae = ',mae) # plot results sns.regplot(y_pred, y_val_log) plt.xlabel('Predicted Log Age') plt.ylabel('Log Age') # + [markdown] slideshow={"slide_type": "subslide"} # ***Wow!!*** Congratulations. You just trained a machine learning model that used real rs-fmri data to predict the age of real humans. # + [markdown] slideshow={"slide_type": "fragment"} # The proper thing to do at this point would be to repeat the train-validation split multiple times. This will ensure the results are not specific to this validation set, and will give you some confidence intervals around your results. # + [markdown] slideshow={"slide_type": "slide"} # As an assignment, you can give that a try below. Create 10 different splits of the entire dataset, fit the model and get your predictions. Then, plot the range of predictions. # # + slideshow={"slide_type": "skip"} # SPACE FOR YOUR ASSIGNMENT # + [markdown] slideshow={"slide_type": "slide"} # So, it seems like something in this data does seem to be systematically related to age ... but you might be curious what what those features are? # + [markdown] slideshow={"slide_type": "fragment"} # #### Interpreting model feature importances # + [markdown] slideshow={"slide_type": "subslide"} # Interpreting the feature importances of a machine learning model is a real can of worms. This is an area of active research. Unfortunately, it's hard to trust the feature importance of some models. # + [markdown] slideshow={"slide_type": "fragment"} # <img src="Imgs/SvML.jpg" alt="terms" width="800"/> # + [markdown] slideshow={"slide_type": "skip"} # # You can find a whole tutorial on this subject here: # http://gael-varoquaux.info/interpreting_ml_tuto/index.html # + [markdown] slideshow={"slide_type": "subslide"} # For now, we'll just eschew better judgement and take a look at our feature importances. # # While we can't ascribe any biological relevance to the features, it can still be helpful to know what the model is using to make its predictions. This is a good way to, for example, establish whether your model is actually learning based on a confound! Could you think of some examples? # + [markdown] slideshow={"slide_type": "subslide"} # <img src="Imgs/reg.jpg" alt="terms" width="700"/> # + [markdown] slideshow={"slide_type": "subslide"} # We can access the feature importances (weights) used by the model # + slideshow={"slide_type": "fragment"} l_svr.coef_ # + [markdown] slideshow={"slide_type": "fragment"} # lets plot these weights to see their distribution better # + slideshow={"slide_type": "subslide"} plt.bar(range(l_svr.coef_.shape[-1]),l_svr.coef_[0]) plt.title('feature importances') plt.xlabel('feature') plt.ylabel('weight') # + [markdown] slideshow={"slide_type": "subslide"} # Or perhaps it will be easier to visualize this information as a matrix similar to the one we started with # # We can use the correlation measure from before to perform an inverse transform # + slideshow={"slide_type": "fragment"} correlation_measure.inverse_transform(l_svr.coef_).shape # + slideshow={"slide_type": "subslide"} from nilearn import plotting feat_exp_matrix = correlation_measure.inverse_transform(l_svr.coef_)[0] plotting.plot_matrix(feat_exp_matrix, figure=(10, 8), labels=range(feat_exp_matrix.shape[0]), reorder=False, tri='lower') # + [markdown] slideshow={"slide_type": "subslide"} # Let's see if we can throw those features onto an actual brain. # # First, we'll need to gather the coordinates of each ROI of our atlas # + slideshow={"slide_type": "fragment"} coords = plotting.find_parcellation_cut_coords(atlas_filename) # + [markdown] slideshow={"slide_type": "subslide"} # And now we can use our feature matrix and the wonders of nilearn to create a connectome map where each node is an ROI, and each connection is weighted by the importance of the feature to the model # + slideshow={"slide_type": "subslide"} plotting.plot_connectome(feat_exp_matrix, coords, colorbar=True) # + [markdown] slideshow={"slide_type": "fragment"} # Whoa!! That's...a lot to process. Maybe let's threshold the edges so that only the most important connections are visualized # + slideshow={"slide_type": "subslide"} plotting.plot_connectome(feat_exp_matrix, coords, colorbar=True, edge_threshold=0.035) # + [markdown] slideshow={"slide_type": "fragment"} # That's definitely an improvement, but it's still a bit hard to see what's going on. # Nilearn has a new feature that let's use view this data interactively! # + slideshow={"slide_type": "subslide"} plotting.view_connectome(feat_exp_matrix, coords, edge_threshold='98%', edge_cmap='viridis')
ML_Regression_Tutorial_Lynn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + # %matplotlib inline # %config InlineBackend.figure_format = "retina" from matplotlib import rcParams rcParams["savefig.dpi"] = 200 rcParams["font.size"] = 8 import warnings warnings.filterwarnings("ignore") # - # # Create 2D integer masks # # In this tutorial we will show how to create 2D integer mask for arbitrary latitude and longitude grids. # + active="" # .. note:: # 2D masks are good for plotting. However, to calculate weighted regional averages 3D boolean masks are more convenient. See the :doc:`tutorial on 3D masks<mask_3D>`. # - # Import regionmask and check the version: # + import regionmask regionmask.__version__ # - # Load xarray and the tutorial data: import xarray as xr import numpy as np # ## Creating a mask # # Define a lon/ lat grid with a 1° grid spacing, where the points define the center of the grid. lon = np.arange(-179.5, 180) lat = np.arange(-89.5, 90) # We will create a mask with the SREX regions (Seneviratne et al., 2012). regionmask.defined_regions.srex # The function `mask` determines which gridpoints lie within the polygon making up the each region: mask = regionmask.defined_regions.srex.mask(lon, lat) mask # `mask` is now a `xarray.Dataset` with shape `lat x lon` (if you need a numpy array use `mask.values`). Gridpoints that do not fall in a region are `NaN`, the gridpoints that fall in a region are encoded with the number of the region (here 1 to 26). # # We can now plot the `mask`: # + import cartopy.crs as ccrs import matplotlib.pyplot as plt f, ax = plt.subplots(subplot_kw=dict(projection=ccrs.PlateCarree())) ax.coastlines() mask.plot(ax=ax, transform=ccrs.PlateCarree(), add_colorbar=False); # - # ## Working with a mask # # masks can be used to select data in a certain region and to calculate regional averages - let's illustrate this with a 'real' dataset: airtemps = xr.tutorial.load_dataset("air_temperature") # The example data is a temperature field over North America. Let's plot the first time step: # + # choose a good projection for regional maps proj = ccrs.LambertConformal(central_longitude=-100) ax = plt.subplot(111, projection=proj) airtemps.isel(time=1).air.plot.pcolormesh(ax=ax, transform=ccrs.PlateCarree()) ax.coastlines(); # - # Conviniently we can directly pass an xarray object to the `mask` function. It gets the longitude and latitude from the `DataArray`/ `Dataset` and creates the `mask`. If the longitude and latitude in the xarray object are not called `"lon"` and `"lat"`, respectively; you can pass their name via the `lon_name` and `lat_name` keyword. mask = regionmask.defined_regions.srex.mask(airtemps) # + active="" # .. note:: From version 0.5 ``regionmask`` automatically detects wether the longitude needs to be wrapped around, i.e. if the regions extend from -180° E to 180° W, while the grid goes from 0° to 360° W as in our example: # + lon = airtemps.lon.values print("Grid extent: {:3.0f}°E to {:3.0f}°E".format(lon.min(), lon.max())) bounds = regionmask.defined_regions.srex.bounds_global print("Region extent: {:3.0f}°E to {:3.0f}°E".format(bounds[0], bounds[2])) # - # Let's plot the mask of the regions: # + proj = ccrs.LambertConformal(central_longitude=-100) ax = plt.subplot(111, projection=proj) low = mask.min() high = mask.max() levels = np.arange(low - 0.5, high + 1) h = mask.plot.pcolormesh( ax=ax, transform=ccrs.PlateCarree(), levels=levels, add_colorbar=False ) # for colorbar: find abbreviations of all regions that were selected reg = np.unique(mask.values) reg = reg[~np.isnan(reg)] abbrevs = regionmask.defined_regions.srex[reg].abbrevs cbar = plt.colorbar(h, orientation="horizontal", fraction=0.075, pad=0.05) cbar.set_ticks(reg) cbar.set_ticklabels(abbrevs) cbar.set_label("Region") ax.coastlines() # fine tune the extent ax.set_extent([200, 330, 10, 75], crs=ccrs.PlateCarree()) # - # We want to select the region 'Central North America'. Thus we first need to find out which number this is: CNA_index = regionmask.defined_regions.srex.map_keys("C. North America") CNA_index # ### Mask out a region # `xarray` provides the handy `where` function: airtemps_CNA = airtemps.where(mask == CNA_index) # Check everything went well by repeating the first plot with the selected region: # + # choose a good projection for regional maps proj = ccrs.LambertConformal(central_longitude=-100) ax = plt.subplot(111, projection=proj) regionmask.defined_regions.srex[["CNA"]].plot(ax=ax, add_label=False) airtemps_CNA.isel(time=1).air.plot.pcolormesh(ax=ax, transform=ccrs.PlateCarree()) ax.coastlines(); # - # Looks good - with this we can calculate the region average. # # ### Calculate weighted regional average # # From version 0.15.1 xarray includes a function to calculate the weighted mean - we use `cos(lat)` as proxy of the grid cell area # + active="" # .. note:: # It is better to use a model's original grid cell area (e.g. areacella). ``cos(lat)`` works reasonably well for regular lat/ lon grids. For irregular grids (regional models, ocean models, ...) it is not appropriate. # + weights = np.cos(np.deg2rad(airtemps.lat)) ts_airtemps_CNA = airtemps_CNA.weighted(weights).mean(dim=("lat", "lon")) - 273.15 # - # We plot the resulting time series: # + f, ax = plt.subplots() ts_airtemps_CNA.air.plot.line(ax=ax, label="Central North America") ax.axhline(0, color="0.1", lw=0.5) plt.legend(); # - # To get the regional average for each region you would need to loop over them. However, it's easier to use a 3D mask. # # ### Calculate regional statistics using `groupby` # + active="" # .. warning:: # Using ``groupby`` offers some convenience and is faster than using ``where`` and a loop. However, it cannot # currently be combinded with ``weighted`` (xarray `GH3937 <https://github.com/pydata/xarray/issues/3937>`_). # Therefore, I recommend working with a :doc:`3D mask<mask_3D>`. # - # you can group over all integer values of the mask airtemps_all = airtemps.groupby(mask).mean() airtemps_all # However, `groupby` is the way to go when calculating a regional median: # you can group over all integer values of the mask airtemps_reg_median = airtemps.groupby(mask).median() airtemps_reg_median.isel(time=0) # ## Multidimensional coordinates # # Regionmask can also handle mutltidimensional longitude/ latitude grids (e.g. from a regional climate model). As xarray provides such an example dataset, we will use it to illustrate it. See also in the [xarray documentation](http://xarray.pydata.org/en/stable/examples/multidimensional-coords.html/). # # Load the tutorial data: rasm = xr.tutorial.load_dataset("rasm") # The example data is a temperature field over the Northern Hemisphere. Let's plot the first time step: # + # choose a projection proj = ccrs.NorthPolarStereo() ax = plt.subplot(111, projection=proj) ax.set_global() # `shading="flat"` is a workaround for matplotlib 3.3 and 3.4 # until SciTools/cartopy#1646 is merged rasm.isel(time=1).Tair.plot.pcolormesh( ax=ax, x="xc", y="yc", transform=ccrs.PlateCarree(), shading="flat" ) # add the abbreviation of the regions regionmask.defined_regions.srex[[1, 2, 11, 12, 18]].plot( ax=ax, add_coastlines=False, label="abbrev" ) ax.set_extent([-180, 180, 43, 90], ccrs.PlateCarree()) ax.coastlines(); # - # Again we pass the xarray object to regionmask. We have to specify `"xc"` and `"yc"` as the longitude and latitude coordinates of the array: mask = regionmask.defined_regions.srex.mask(rasm, lon_name="xc", lat_name="yc") mask # We want to select the region 'NAS' (Northern Asia). # # ### Select using `where` # # We have to select by index (the number of the region), we thus map from the abbreviation to the index. rasm_NAS = rasm.where(mask == regionmask.defined_regions.srex.map_keys("NAS")) # Check everything went well by repeating the first plot with the selected region: # + # choose a projection proj = ccrs.NorthPolarStereo() ax = plt.subplot(111, projection=proj) ax.set_global() rasm_NAS.isel(time=1).Tair.plot.pcolormesh( ax=ax, x="xc", y="yc", transform=ccrs.PlateCarree() ) # add the abbreviation of the regions regionmask.defined_regions.srex[["NAS"]].plot( ax=ax, add_coastlines=False, label="abbrev" ) ax.set_extent([-180, 180, 45, 90], ccrs.PlateCarree()) ax.coastlines(); # - # ## References # # * Special Report on Managing the Risks of Extreme Events and Disasters to Advance Climate Change Adaptation (SREX, Seneviratne et al., 2012: https://www.ipcc.ch/site/assets/uploads/2018/03/SREX-Ch3-Supplement_FINAL-1.pdf)
docs/notebooks/mask_2D.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd from sklearn import feature_extraction, linear_model, model_selection, preprocessing # !wget --quiet https://raw.githubusercontent.com/tensorflow/models/master/official/nlp/bert/tokenization.py train_df = pd.read_csv("./DataSets/nlp-disaster/train.csv") test_df = pd.read_csv("./DataSets/nlp-disaster/test.csv") count_vectorizer = feature_extraction.text.CountVectorizer() train_vectors = count_vectorizer.fit_transform(train_df["text"]) test_vectors = count_vectorizer.transform(test_df["text"]) clf = linear_model.RidgeClassifier() scores = model_selection.cross_val_score(clf, train_vectors, train_df["target"], cv=3, scoring="f1") scores clf.fit(train_vectors, train_df["target"]) sample_submission = pd.read_csv("./DataSets/nlp-disaster/sample_submission.csv") sample_submission["target"] = clf.predict(test_vectors) sample_submission.to_csv("./Submissions/submission-nlp2", index=False)
Kaggle/Tweet NLP 2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import collections # ## part 1 ## testlines = '''00100 11110 10110 10111 10101 01111 00111 11100 10000 11001 00010 01010'''.split('\n') with open('day3.txt') as fp: puzzlelines = fp.read().split('\n') puzzlelines = puzzlelines[:-1] def most_least_common_bit(bits): ranked = collections.Counter(bits).most_common() return ranked[0][0], ranked[-1][0] most_least_common_bit([0, 1, 1, 0, 1]) def bitpop(lines): if '' == lines[0]: return None, None bits = [] remainder = [] for line in lines: bits.append(line[0]) remainder.append(line[1:]) return bits, remainder bitpop(testlines) def process(lines): most_common_bits = [] least_common_bits = [] remainder = lines.copy() while True: bits, remainder = bitpop(remainder) if bits is None: break most, least = most_least_common_bit(bits) most_common_bits.append(most) least_common_bits.append(least) binary_str_most = ''.join(most_common_bits) binary_str_least = ''.join(least_common_bits) print(binary_str_most, binary_str_least) most_num = int(binary_str_most, 2) least_num = int(binary_str_least, 2) return most_num, least_num, most_num*least_num process(testlines) process(puzzlelines) # ## part 2 ## def filt(lines, common='most'): remainder = lines.copy() cur_bit = 0 while len(remainder) > 1: bits = [line[cur_bit] for line in remainder] counts = collections.Counter(bits) num0, num1 = counts['0'], counts['1'] if common == 'most': if num0 == num1: most = '1' elif num0 > num1: most = '0' else: most = '1' remainder = [line for line in remainder if line[cur_bit] == most] elif common == 'least': if num0 == num1: least = '0' elif num0 < num1: least = '0' else: least = '1' remainder = [line for line in remainder if line[cur_bit] == least] else: raise ValueError(f'bad value of common: {common}') cur_bit += 1 return(int(remainder[0], 2)) filt(testlines) filt(testlines, 'least') filt(puzzlelines, 'most')*filt(puzzlelines, 'least')
day3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.10 64-bit (''ml_course'': conda)' # name: python3 # --- # + [markdown] tags=["remove_cell"] # <a id='logbook'></a> # # Report # This is the notebook that created the report. # + tags=["remove_cell"] # %matplotlib inline # %load_ext autoreload # %autoreload 2 import warnings warnings.filterwarnings('ignore') # + tags=["remove_cell"] import os import matplotlib.pyplot as plt import pandas as pd pd.options.display.max_rows = 999 pd.options.display.max_columns = 999 pd.set_option("display.max_columns", None) import sympy as sp import pandas as pd import numpy as np if os.name == 'nt': plt.style.use('paper.mplstyle') # Windows import statsmodels.api as sm from statsmodels.sandbox.regression.predstd import wls_prediction_std from sklearn.metrics import r2_score from scipy.integrate import solve_ivp import seaborn as sns from copy import deepcopy import sys sys.path.append('../../') from src.equation import Equation,equation_dict from src import equations,symbols import reports.paper_writing from src.df_to_latex import LateXTable ## Examples: # + tags=["remove_cell"] from IPython.display import set_matplotlib_formats set_matplotlib_formats('pdf','png') from IPython.display import HTML, Markdown, Image from src.df_to_latex import LateXTable # - # # Abstract # Short abstract of report # # Introduction # <a id='fig_rolldecay_example>'></a> # + caption="Roll decay time series" name="rolldecay_example" tags=[] fig,ax=plt.subplots() df = pd.DataFrame(data=np.random.normal(size=(1000,2))) df.plot(ax=ax); # - # The oscillating motion can be described by a spring-mass-damper system as seen in Fig.[(below)](#fig_spring_mass_damper). # <a id="fig_spring_mass_damper"></a> # + caption="Spring-mass-damper system" name="spring_mass_damper" tags=[] Image('../figures/220px-Mass_spring_damper.svg.png') # - # This system can me described as the following equation [(below)](#eq_equation1): # <a id="eq_equation1"></a> eq = r'$E=m \dot c^2 $' Equation(eq, label='equation1') A,r = sp.symbols('A r') eq = sp.Eq(A, r**2*sp.pi) Equation(eq, label='equation2') # # Data # The data used in this study is described in Tab.[(below)](#tab_data_files). There is one result with a pure FNPF simulation at 0 knots. For model test results, two tests are available at 0 knots and one test at 15.5 knots. There is also a result at 15.5 with a hybrid method, where semi empirical viscosity has been injected into the FNPF calculations. # <a id="tab_data_files"></a> # + content = pd.read_csv('../../data/external/content.csv', sep=';') table_parameters = content.round(decimals=3) content.set_index('file', inplace=True) rename = {} units = { 'Ship speed' : r'$[kts]$', } table_parameters['data file'] = table_parameters['data file'].apply(lambda x : x.replace('_',r' ')) lt = LateXTable(table_parameters, units=units, rename=rename, caption='Data files', label='data_files') lt # - # # Analysis # # Conclusions # # # # # + [markdown] tags=["remove_cell"] # # References # <div class="cite2c-biblio"></div>
reports/report/01.1.report.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # This notebook was prepared by [<NAME>](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). # # Solution Notebook # ## Problem: Implement a binary search tree with an insert method. # # * [Constraints](#Constraints) # * [Test Cases](#Test-Cases) # * [Algorithm](#Algorithm) # * [Code](#Code) # * [Unit Test](#Unit-Test) # ## Constraints # # * Can we insert None values? # * No # * Can we assume we are working with valid integers? # * Yes # * Can we assume all left descendents <= n < all right descendents? # * Yes # * Do we have to keep track of the parent nodes? # * This is optional # * Can we assume this fits in memory? # * Yes # ## Test Cases # # ### Insert # # Insert will be tested through the following traversal: # # ### In-Order Traversal # # * 5, 2, 8, 1, 3 -> 1, 2, 3, 5, 8 # * 1, 2, 3, 4, 5 -> 1, 2, 3, 4, 5 # # If the `root` input is `None`, return a tree with the only element being the new root node. # # You do not have to code the in-order traversal, it is part of the unit test. # ## Algorithm # # ### Insert # # * If the root is None, return Node(data) # * If the data is <= the current node's data # * If the current node's left child is None, set it to Node(data) # * Else, recursively call insert on the left child # * Else # * If the current node's right child is None, set it to Node(data) # * Else, recursively call insert on the right child # # Complexity: # # * Time: O(h), where h is the height of the tree # * In a balanced tree, the height is O(log(n)) # * In the worst case we have a linked list structure with O(n) # * Space: O(m), where m is the recursion depth, or O(1) if using an iterative approach # ## Code # + # %%writefile bst.py class Node(object): def __init__(self, data): self.data = data self.left = None self.right = None self.parent = None def __repr__(self): return str(self.data) class Bst(object): def __init__(self, root=None): self.root = root def insert(self, data): if data is None: raise TypeError('data cannot be None') if self.root is None: self.root = Node(data) return self.root else: return self._insert(self.root, data) def _insert(self, node, data): if node is None: return Node(data) if data <= node.data: if node.left is None: node.left = self._insert(node.left, data) node.left.parent = node return node.left else: return self._insert(node.left, data) else: if node.right is None: node.right = self._insert(node.right, data) node.right.parent = node return node.right else: return self._insert(node.right, data) # - # %run bst.py # ## Unit Test # %run dfs.py # %run ../utils/results.py # + # %%writefile test_bst.py from nose.tools import assert_equal class TestTree(object): def __init__(self): self.results = Results() def test_tree_one(self): bst = Bst() bst.insert(5) bst.insert(2) bst.insert(8) bst.insert(1) bst.insert(3) in_order_traversal(bst.root, self.results.add_result) assert_equal(str(self.results), '[1, 2, 3, 5, 8]') self.results.clear_results() def test_tree_two(self): bst = Bst() bst.insert(1) bst.insert(2) bst.insert(3) bst.insert(4) bst.insert(5) in_order_traversal(bst.root, self.results.add_result) assert_equal(str(self.results), '[1, 2, 3, 4, 5]') print('Success: test_tree') def main(): test = TestTree() test.test_tree_one() test.test_tree_two() if __name__ == '__main__': main() # - # %run -i test_bst.py
graphs_trees/bst/bst_solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <table><tr> # <td style="background-color:#ffffff;text-align:left;"><a href="http://qworld.lu.lv" target="_blank"><img src="../images\qworld.jpg" width="30%" align="left"></a></td> # <td style="background-color:#ffffff;">&nbsp;</td> # <td style="background-color:#ffffff;vertical-align:text-middle;text-align:right;"> # <table><tr style="background-color:white;"> # <td> Visit</td> # <td><a href="http://qworld.lu.lv" target="_blank"><img src="../images/web-logo.png" width="35px"></a></td> # <td width="10pt"></td> # <td> Join</td> # <td><a href="https://qworldworkspace.slack.com/" target="_blank"><img src="../images/slack-icon.png" width="80px"></a></td> # <td width="10pt"></td> # <td>Follow</td> # <td><a href="https://www.facebook.com/qworld19/" target="_blank"><img src="../images/facebook-icon.png" width="40px"></a></td> # <td><a href="https://twitter.com/QWorld19" target="_blank"><img src="../images/twitter-icon.png" width="40px"></a></td> # </tr></table> # </td> # </tr></table> # # #### References # # - First image of complex numbers in notebook S02_Complex_Number_Basics.ipynb is taken from https://brilliant.org/wiki/complex-numbers/ # # - First image of Bloch sphere in notebook S26_Bloch_Sphere.ipynb is taken from https://quantum-computing.ibm.com/docs/guide/wwwq/the-bloch-sphere # # The following references are used for the sections on Quantum Fourier Transform and Shor's algorithm. # # - Nielsen, <NAME>., and <NAME>. "Quantum computation and quantum information." (2002): 558-559. # # - <NAME>, <NAME>, and <NAME>. An introduction to quantum computing. Oxford university press, 2007. # # - [Shor’s Algorithm for Period Finding on a Quantum Computer](https://young.physics.ucsc.edu/150/shor.pdf) by # <NAME> # # - [Shor's Algorithm](https://www.ryanlarose.com/uploads/1/1/5/8/115879647/shor3.pdf) by <NAME>
silver/S01_References.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn import preprocessing # %pylab inline # %matplotlib inline train_in = pd.read_csv('data/train_input.csv', sep=';') train_out = pd.read_csv('data/train_output.csv', sep=';') #test_in = pd.read_csv('data/test_input.csv', sep=';') train_in.head() # + train_id = train_in['ID'] # test_id = test_in['ID'] train_device = train_in['DEVICE'] # test_device = test_in['DEVICE'] train_hypno = train_in['HYPNOGRAM'] # test_hypno = test_in['HYPNOGRAM'] train_records = train_in.iloc[:,2:-1].transpose() # test_records = test_in.iloc[:,2:-1].transpose() # - # Exploratory data analysis on train records train_records.describe() fulldf = pd.merge(train_in, train_out, on='ID') train_out fulldf fulldf['HYPNOGRAM'] device1 = fulldf[fulldf.DEVICE==1.0] device1_to = train_out[fulldf.DEVICE==1.0] EEG = device1[device1.columns[2:-2]] viz = EEG.as_matrix() y = viz[2,:] plt.plot(y) plt.show() # + from __future__ import division from scipy.signal import butter, lfilter import statsmodels.api as sm from statsmodels.tsa.arima_process import arma_generate_sample, ArmaProcess fs = 1E9 # 1 ns -> 1 GHz cutoff = 2.5E5# 10 MHz B, A = butter(5, cutoff / (fs / 2), btype='low') # 1st order Butterworth low-pass filtered_signal = lfilter(B, A, y, axis=0) # - plt.plot(filtered_signal) plt.show() # + from sklearn import preprocessing filtered_signal = filtered_signal.reshape(1, -1); nf_s = preprocessing.normalize(filtered_signal, norm='l2', axis=1, copy=True, return_norm=False).T plt.plot(nf_s) print(np.std(nf_s)) # - from statsmodels import tsa # + #arma = tsa.arima_model.ARIMA(nf_s, (9, 2, 2)) #results = arma.fit(transparams=False) # + import scipy data = np.reshape(nf_s, (-1, )) def stft(x, fftsize=1024, overlap=256): hop = fftsize / overlap w = scipy.hanning(fftsize+1)[:-1] # better reconstruction with this trick +1)[:-1] return np.array([np.fft.rfft(w*x[i:i+fftsize]) for i in range(0, len(x)-fftsize, hop)]) # + import pywt # pywt.dwt? # - cA, cD = pywt.dwt(data, 'haar') plt.stem(cA) y = viz[3,:] filtered_signal = lfilter(B, A, y, axis=0) filtered_signal = filtered_signal.reshape(1, -1); nf_s = preprocessing.normalize(filtered_signal, norm='l2', axis=1, copy=True, return_norm=False).T data = np.reshape(nf_s, (-1, )) cA, cD = pywt.dwt(data, 'haar') plt.stem(cA) viz29 = viz[train_out.TARGET == 29] y = viz29[1,:] filtered_signal = lfilter(B, A, y, axis=0) filtered_signal = filtered_signal.reshape(1, -1); nf_s = preprocessing.normalize(filtered_signal, norm='l2', axis=1, copy=True, return_norm=False).T data = np.reshape(nf_s, (-1, )) cA, cD = pywt.dwt(data, 'haar') plt.stem(cA) y = viz29[2,:] filtered_signal = lfilter(B, A, y, axis=0) filtered_signal = filtered_signal.reshape(1, -1); nf_s = preprocessing.normalize(filtered_signal, norm='l2', axis=1, copy=True, return_norm=False).T data = np.reshape(nf_s, (-1, )) cA, cD = pywt.dwt(data, 'haar') plt.stem(cA) viz35 = viz[train_out.TARGET == 35] y = viz35[1,:] filtered_signal = lfilter(B, A, y, axis=0) filtered_signal = filtered_signal.reshape(1, -1); nf_s = preprocessing.normalize(filtered_signal, norm='l2', axis=1, copy=True, return_norm=False).T data = np.reshape(nf_s, (-1, )) cA, cD = pywt.dwt(data, 'haar') plt.stem(cA) y = viz35[2,:] filtered_signal = lfilter(B, A, y, axis=0) filtered_signal = filtered_signal.reshape(1, -1); nf_s = preprocessing.normalize(filtered_signal, norm='l2', axis=1, copy=True, return_norm=False).T data = np.reshape(nf_s, (-1, )) cA, cD = pywt.dwt(data, 'haar') plt.stem(cA) from scipy.stats.stats import pearsonr y = viz35[1,:] filtered_signal = lfilter(B, A, y, axis=0) filtered_signal = filtered_signal.reshape(1, -1); nf_s_1 = preprocessing.normalize(filtered_signal, norm='l2', axis=1, copy=True, return_norm=False) nf_s_1 = nf_s_1[0] y = viz29[9,:] filtered_signal = lfilter(B, A, y, axis=0) filtered_signal = filtered_signal.reshape(1, -1); nf_s = preprocessing.normalize(filtered_signal, norm='l2', axis=1, copy=True, return_norm=False) nf_s = nf_s[0] plt.plot(nf_s) plt.plot(nf_s_1) viz35 viz29 EEG np.linalg.norm(1000*viz[device1_to.TARGET == 35][2,:]-1000*viz[device1_to.TARGET == 18][1,:]) viz[device0_to.TARGET == 35][2,:] viz[device0_to.TARGET == 18][1,:] device1_to.TARGET device0 = fulldf[fulldf.DEVICE==0.0] device0_to = train_out[fulldf.DEVICE==0.0] EEG0 = device0[device0.columns[2:-2]] viz0= EEG0.as_matrix() viz[[i for i, x in enumerate(device0_to.TARGET == 35) if x]] np.linalg.norm(viz0[8,:]-viz0[1,:]) viz0.shape np.corrcoef(viz0[550,:],viz0[13,:]) device0_to.TARGET def column(matrix, i): return [row[i] for row in matrix] device0_to = train_out[fulldf.DEVICE==0.0] device0_to = device0_to.as_matrix() device0_to = column(device0_to, 1) device0_to device0_to = train_out[fulldf.DEVICE==0.0] mask = np.array(device0_to.TARGET == 32, dtype = bool) fviz0 = viz0[mask] np.corrcoef(fviz0[1,:],fviz0[2,:]) fviz0 = np.array([viz0[np.array(device0_to.TARGET == i, dtype = bool)] for i in range(18, 60)]) fviz0 fviz0[3] mviz0 = np.array([np.mean(viz0[np.array(device0_to.TARGET == i, dtype = bool)], axis=0) for i in range(18, 60)]) np.corrcoef(fviz0[2][0],mviz0[2])[0,0] def corr_score(a, b): x = np.corrcoef(a,b)[0,1]; return x/(1-x) corr_score(fviz0[2][0], mviz0[2]) np.corrcoef(fviz0[0][0],mviz0[10])[1,0] fviz0[0][0] mviz0_c = mviz0[~isnan(mviz0).any(axis=1)] mviz0_c run MAPE.py EEG = fulldf[fulldf.columns[2:-2]] viz= EEG.as_matrix() viz
challenge_rythm_Belkhayat_Feltin.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os path_parent = os.path.dirname(os.getcwd()) os.chdir(path_parent) # + from ggmodel_dev.graphmodel import GraphModel, converte_to_format import pandas as pd # - print(xls.sheet_names) # + code_folding=[] xls = pd.ExcelFile('data/AFOLU_tables.xlsx') import json def var_df_to_dict(df): return ( df.rename(columns={"Description": 'name', 'Unit': 'unit'}) .drop(columns=['Source']) .assign(type='') .set_index('Variable').to_dict('index') ) var_dict = var_df_to_dict(xls.parse(sheet_name='2waste').drop_duplicates(subset=['Variable'])) print(json.dumps(var_dict, sort_keys=True, indent=4)) # + FOODWASTE_nodes = { "FLOi": { "name": "Food losses per food group", "type": "", "unit": "tonnes" }, "FLOtot": { "name": "Food losses total", "type": "", "unit": "tonnes" }, "FL_reduction": { "name": "Food losses reduction policy", "type": "", "unit": "tonnes" }, "FLossIndex": { "name": "Food loss index", "type": "", "unit": "-" }, "FPi": { "name": "Food production", "type": "", "unit": "1000 tonnes" }, "FWCt": { "name": "Food waste", "type": "", "unit": "tonnes" }, "FW_reduction": { "name": "Food waste reduction policy", "type": "", "unit": "tonnes" }, "FWasteIndex": { "name": "Food waste index", "type": "", "unit": "-" }, "FWasteKGi": { "name": "Food waste per capita", "type": "", "unit": "kg" }, "Pop": { "name": "Population", "type": "", "unit": "Head" } } FOODWASTE_nodes # + PM25_nodes = { "AEUi": { "name": "Total agricultural energy use per type", "type": "input", "unit": "TWH" }, "BMB": { "name": "Total biomass burned", "type": "input", "unit": "kg dm" }, "ECR_PM25eq": { "name": "PM25 emissions from burning crop residues", "type": "variable", "unit": "tonnes", "computation": lambda BMB, EFCRBI_pm25, **kwargs: BMB * EFCRBI_pm25 }, "EFCRBI_pm25": { "name": "Emission factors burning crop residues", "type": "input", "unit": "kg/mg waste" }, "EFPM25Ai": { "name": "Emission factor PM2.5 from live animals", "type": "input", "unit": "kg/heads" }, "EFPM25Ci": { "name": "Emission factors PM2.5 from crops", "type": "input", "unit": "kg/ha" }, "EFPM25Ei": { "name": "Emission factors PM2.5 agricultural fuel consumption", "type": "input", "unit": "g/tonne fuel" }, "PM25": { "name": "Total agricultural PM25 emissions", "type": "variable", "unit": "tonnes", "computation": lambda PM25A, PM25C, PM25E, ECR_PM25eq, **kwargs: PM25A + PM25C + PM25E + ECR_PM25eq }, "PM25A": { "name": "PM25 emissions from live animals", "type": "variable", "unit": "tonnes", "computation": lambda TAi, EFPM25Ei, **kwargs: (TAi * EFPM25Ai).sum() }, "PM25C": { "name": "PM25 emissions from crops", "type": "variable", "unit": "tonnes", "computation": lambda TCLDi, EFPM25Ci, **kwargs: (TCLDi * EFPM25Ci).sum() }, "PM25E": { "name": "PM25 emissions from agricultural energy use", "type": "variable", "unit": "tonnes", "computation": lambda AEUi, EFPM25Ei, **kwargs: (AEUi * EFPM25Ei).sum() }, "TAi": { "name": "Total live animals", "type": "input", "unit": "head" }, "TCLDi": { "name": "Total cropland demand", "type": "input", "unit": "ha" } } GraphModel(PM25_nodes).draw()
notebooks/biogas_developement.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] nbgrader={} # # Project Euler: Problem 2 # + [markdown] nbgrader={} # Each new term in the Fibonacci sequence is generated by adding the previous two terms. By starting with 0 and 1, the first 12 terms will be: # # 0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ... # # By considering the terms in the Fibonacci sequence whose values do not exceed four million, find the sum of the even-valued terms. # + deletable=false nbgrader={"checksum": "6cff4e8e53b15273846c3aecaea84a3d", "solution": true} Fib = [0,1] Fibsum = 0 while Fib[-1] <= 4000000: Fib.append(Fib[-1]+Fib[-2]) for x in Fib: if x%2 == 0: Fibsum += x print(Fibsum) # + deletable=false nbgrader={"checksum": "e8afe8a5735f0fff949b706895f8583d", "grade": true, "grade_id": "projecteuler2", "points": 10} # This cell will be used for grading, leave it at the end of the notebook.
assignments/assignment02/ProjectEuler2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import torch from torch import nn, optim from torch.nn import functional as F from torch.autograd import Variable class VAE(nn.Module): def __init__(self, inp_size, hid_size, z_size, USE_CUDA=False): super(VAE, self).__init__() # Encoder self.fc1 = nn.Linear(inp_size, hid_size) self.drop1 = nn.Dropout(0.2) self.fc2 = nn.Linear(hid_size, hid_size) self.drop2 = nn.Dropout(0.2) self.fc31 = nn.Linear(hid_size, z_size) self.fc32 = nn.Linear(hid_size, z_size) # Decoder self.fc4 = nn.Linear(z_size, hid_size) self.drop4 = nn.Dropout(0.2) self.fc5 = nn.Linear(hid_size, hid_size) self.drop5 = nn.Dropout(0.2) self.fc6 = nn.Linear(hid_size, inp_size) self.USE_CUDA = USE_CUDA def encode(self, x): h = self.drop1(F.relu(self.fc1(x))) h = self.drop2(F.relu(self.fc2(h))) return self.fc31(h), self.fc32(h) def reparametrize(self, mu, logvar): if self.training: # logvar = log(sigma**2) # logvar = 2 * log(sigma) # sigma = exp(logvar/2) std = logvar.mul(0.5).exp() x = Variable(std.data.new(std.size()).normal_()) if self.USE_CUDA: x = x.cuda() return x.mul(std).add(mu) else: return mu def decode(self, z): h = self.drop4(F.relu(self.fc4(z))) h = self.drop5(F.relu(self.fc5(h))) return F.sigmoid(self.fc6(h)) def forward(self, x): mu, logvar = self.encode(x) z = self.reparametrize(mu, logvar) return self.decode(z), mu, logvar def loss_function(x_, x, mu, logvar): BCE = F.binary_cross_entropy(x_, x) # KLD = -0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2) KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) return BCE + KLD / (len(x) * len(mu)) # + import sys def train(epoch, model, optimizer, train_loader, feat_size, USE_CUDA): model.train() train_loss = 0 for batch_idx, (x, _) in enumerate(train_loader): x = Variable(x.view(-1, feat_size)) if USE_CUDA: x = x.cuda() optimizer.zero_grad() x_, mu, logvar = model(x) loss = loss_function(x_, x, mu, logvar) loss.backward() train_loss += loss.data[0] optimizer.step() if batch_idx % 10 == 0: sys.stdout.write(f'\rTrain Epoch: {epoch + 1} ' f'[{(batch_idx) * len(x)}/{len(train_loader.dataset)} ' f'({100. * (batch_idx) / len(train_loader):.0f}%)]\t' f'Loss: {loss.data[0] / len(x):.6f}') sys.stdout.flush() print() print(f'=====> Epoch: {epoch + 1} ' f'Average loss: {train_loss / len(train_loader.dataset):.4f}') # - def test(epoch, model, test_loader, feat_size, USE_CUDA): model.eval() test_loss = 0 for batch_idx, (x, _) in enumerate(test_loader): x = Variable(x.view(-1, feat_size), volatile=True) if USE_CUDA: x = x.cuda() x_, mu, logvar = model(x) loss = loss_function(x_, x, mu, logvar) test_loss += loss.data[0] print(f'====> Test set loss: {test_loss / len(test_loader.dataset):.4f}') from torchvision.utils import save_image from torchvision import datasets, transforms BATCH_SIZE = 128 USE_CUDA = True # + train_loader = torch.utils.data.DataLoader( datasets.MNIST('data', train=True, download=True, transform=transforms.ToTensor()), batch_size=BATCH_SIZE, shuffle=True) test_loader = torch.utils.data.DataLoader( datasets.MNIST('data', train=False, transform=transforms.ToTensor()), batch_size=BATCH_SIZE, shuffle=True) # - model = VAE(inp_size=784, hid_size=800, z_size=20, USE_CUDA=USE_CUDA) if USE_CUDA: model.cuda() optimizer = optim.Adam(model.parameters(), lr=3e-4) for epoch in range(100): train(epoch, model, optimizer, train_loader, 784, USE_CUDA) test(epoch, model, test_loader, 784, USE_CUDA) if epoch in [0, 9, 19, 29, 39, 49, 59, 69, 79, 89, 99]: sample = Variable(torch.randn(BATCH_SIZE, 20)) if USE_CUDA: sample = sample.cuda() sample = model.decode(sample) save_image(sample.data.view(BATCH_SIZE, 1, 28, 28), f'../results/sample_linear_mnist_{epoch + 1}.png')
scripts/Linear-VAE-baseline-MNIST.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.9.9 64-bit (''virt'': venv)' # name: python3 # --- # # Day 6 # ## Part 1 from copy import deepcopy # + # Read input fish_list = [] with open("inputs/day6.txt") as input_file: fish_list = list(map(int, (input_file.read().split(",")))) print(fish_list) # - # Function generates a full day cycle for the fishes def new_day_for_fishes(fish_list): fish_len = len(fish_list) for i in range(fish_len): if fish_list[i] > 0: fish_list[i] = fish_list[i] - 1 else : fish_list[i] = 6 fish_list.append(8) return fish_list # Simulate a number of days and print the number of fishes after that fish_list_calc = deepcopy(fish_list) for i in range(80): fish_list_calc = new_day_for_fishes(fish_list_calc) print(len(fish_list_calc)) # ## Part 2 # Count the number of fishes in each category fishes_classes_list = [0, 0, 0, 0, 0, 0, 0, 0, 0] for fish in fish_list: fishes_classes_list[fish] += 1 print(fishes_classes_list) # Simulate a number of days and print the number of fishes after that for i in range(256): new_fishes_count = fishes_classes_list[0] fishes_classes_list.pop(0) fishes_classes_list[6] += new_fishes_count fishes_classes_list.append(new_fishes_count) print(sum(fishes_classes_list))
Day 6.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import wandb wandb.init(project="pytorch-ignite-example") # + import torch from torch import nn from torch.optim import SGD from torch.utils.data import DataLoader import torch.nn.functional as F from torchvision.transforms import Compose, ToTensor, Normalize from torchvision.datasets import MNIST from ignite.engine import Events, create_supervised_trainer, create_supervised_trainer from ignite.metrics import Accuracy, Loss from tqdm import tqdm # - class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(320, 50) self.fc2 = nn.Linear(50, 10) def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 2)) x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) x = x.view(-1, 320) x = F.relu(self.fc1(x)) x = F.dropout(x, training=self.training) x = self.fc2(x) return F.log_softmax(x, dim=-1) # + def get_data_loaders(train_batch_size, val_batch_size): data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))]) train_loader = DataLoader(MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True) val_loader = DataLoader(MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False) return train_loader, val_loader def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_interval): train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size) model = Net() device = 'cpu' if torch.cuda.is_available(): device = 'cuda' optimizer = SGD(model.parameters(), lr=lr, momentum=momentum) trainer = create_supervised_trainer(model, optimizer, F.nll_loss, device=device) evaluator = create_supervised_evaluator(model, metrics={'accuracy': Accuracy(), 'nll': Loss(F.nll_loss)}, device=device) desc = "ITERATION - loss: {:.2f}" pbar = tqdm( initial=0, leave=False, total=len(train_loader), desc=desc.format(0) ) @trainer.on(Events.ITERATION_COMPLETED(every=log_interval)) def log_training_loss(engine): pbar.desc = desc.format(engine.state.output) pbar.update(log_interval) wandb.log({"train loss": engine.state.output}) @trainer.on(Events.EPOCH_COMPLETED) def log_training_results(engine): pbar.refresh() evaluator.run(train_loader) metrics = evaluator.state.metrics avg_accuracy = metrics['accuracy'] avg_nll = metrics['nll'] tqdm.write( "Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}" .format(engine.state.epoch, avg_accuracy, avg_nll) ) @trainer.on(Events.EPOCH_COMPLETED) def log_validation_results(engine): evaluator.run(val_loader) metrics = evaluator.state.metrics avg_accuracy = metrics['accuracy'] avg_nll = metrics['nll'] tqdm.write( "Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}" .format(engine.state.epoch, avg_accuracy, avg_nll)) pbar.n = pbar.last_print_n = 0 wandb.log({"validation loss": engine.state.metrics['nll']}) wandb.log({"validation accuracy": engine.state.metrics['accuracy']}) trainer.run(train_loader, max_epochs=epochs) pbar.close() # + # Train Model hyperparameter_defaults = dict( batch_size = 256, val_batch_size = 100, epochs = 10, lr = 0.001, momentum = 0.3, log_interval = 10, ) # Get metrics in Weights & Biases wandb.init(config=hyperparameter_defaults, project="pytorch-ignite-example") config = wandb.config run(config.batch_size, config.val_batch_size, config.epochs, config.lr, config.momentum, config.log_interval) # -
notebooks/wandb/run-20200131_192656-b1wh3rz5/code/notebooks/1.1_First_neuralnet_trying wandb.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Script for training a denoiser on KappaTNG import os os.environ['XLA_FLAGS']='--xla_gpu_cuda_data_dir=/gpfslocalsys/cuda/11.1.0' from absl import app from absl import flags import tensorflow as tf import tensorflow_datasets as tfds import jax import jax.numpy as jnp import optax from jax_lensing.inversion import ks93, ks93inv from jax_lensing.utils import load_dataset from astropy.io import fits # - # %pylab inline dataset = "kappatng" batch_size = 2 map_size = 360 train_split = "80%" noise_dist_std = 0.2 # Let's prepare some test data def load_dataset(): # test set def pre_process(im): x = tf.image.random_crop(tf.expand_dims(im['map'],-1), [map_size,map_size,1]) x = tf.image.random_flip_left_right(x) x = tf.image.random_flip_up_down(x) # Sample random Gaussian noise u = tf.random.normal(tf.shape(x)) # Sample standard deviation of noise corruption s = noise_dist_std * tf.random.normal((1, 1, 1)) # Create noisy image y = x + s * u return {'x':x, 'y':y, 'u':u,'s':s} ds = tfds.load('kappatng', split='train[-20%:]', shuffle_files=False) ds = ds.map(pre_process) return iter(tfds.as_numpy(ds)) test_set = load_dataset() # Instantiate dataset and sample map ref_map = next(test_set)['x'] ref_map.shape imshow(ref_map[...,0]) mask = fits.getdata('../data/COSMOS/cosmos_full_mask_0.29arcmin360copy.fits').astype('float32') # + # We turn it into shear e1, e2 = ks93inv(ref_map[...,0], jnp.zeros_like(ref_map[...,0])) # And we extract a patch that put the big cluster right at the edge k = ref_map[...,0] # + figure(figsize=[15,5]) subplot(131) imshow(e1*mask) axis('off') subplot(132) imshow(e2*mask) axis('off') subplot(133) imshow(k*mask) axis('off') # - # **Compare with True COSMOS shear noise** # + cosmos_e1 = fits.getdata('../data/COSMOS/cosmos_full_e1_0.29arcmin360.fits').astype('float32') subplot(121) imshow(cosmos_e1) subplot(122) m = cosmos_e1 == 0 hist(cosmos_e1[~m], 100); print(jnp.std(cosmos_e1[~m])) # - # import ellipticities e1 = fits.getdata('../data/COSMOS/e1.fits').astype('float32') e2 = fits.getdata('../data/COSMOS/e2.fits').astype('float32') import jax.numpy as jnp a, b = jnp.histogram(e1, 256, density=True) hist(e1, 256, density=True); plot(b[:-1], a) # ### Fit a Gaussian import tensorflow_probability as tfp; tfp = tfp.substrates.jax tfd = tfp.distributions # + N = 256 coords = np.linspace(-1,1,N) def model(var): dist = tfp.distributions.Normal(loc=0., scale=var) return dist.prob(coords) # - params = jnp.ones(1)*.1 x = model(params) hist(e1, 256, density=True); plot(coords, x, label=r'Gaussian $\sigma=0.148$') legend() # + from jax.scipy.optimize import minimize loss = lambda p: jnp.sum((a - model(p))**2) # - results = minimize(loss, params, method='BFGS') results.x, results.nit z = model(results.x) hist(e1, 256, density=True); plot(coords, z, label=r'Gaussian Mixture') legend() # + e1_10 = e1[:-7].reshape((-1,10)).mean(axis=-1) e2_10 = e2[:-7].reshape((-1,10)).mean(axis=-1) figure(figsize=[12,4]) subplot(121) z = model(results.x/jnp.sqrt(10)) hist(e1_10, 256, density=True); plot(coords, z, label=r'Gaussian Mixture') legend() subplot(122) z = model(results.x/jnp.sqrt(10)) hist(e2_10, 256, density=True); plot(coords, z, label=r'Gaussian Mixture') legend() # + e1_6 = e1[:-3].reshape((-1,6)).mean(axis=-1) e2_6 = e2[:-3].reshape((-1,6)).mean(axis=-1) figure(figsize=[12,4]) subplot(121) z = model(results.x/jnp.sqrt(6)) hist(e1_6, 256, density=True); plot(coords, z, label=r'Gaussian Mixture') legend() subplot(122) z = model(results.x/jnp.sqrt(6)) hist(e2_6, 256, density=True); plot(coords, z, label=r'Gaussian Mixture') legend() # + e1_4 = e1[:-1].reshape((-1,4)).mean(axis=-1) e2_4 = e2[:-1].reshape((-1,4)).mean(axis=-1) figure(figsize=[12,4]) subplot(121) z = model(results.x/jnp.sqrt(4)) hist(e1_4, 256, density=True); plot(coords, z, label=r'Gaussian Mixture') legend() subplot(122) z = model(results.x/jnp.sqrt(4)) hist(e2_4, 256, density=True); plot(coords, z, label=r'Gaussian Mixture') legend() # + e1_2 = e1[:-1].reshape((-1,2)).mean(axis=-1) e2_2 = e2[:-1].reshape((-1,2)).mean(axis=-1) figure(figsize=[12,4]) subplot(121) z = model(results.x/jnp.sqrt(2)) hist(e1_2, 256, density=True); plot(coords, z, label=r'Gaussian Mixture') legend() subplot(122) z = model(results.x/jnp.sqrt(2)) hist(e2_2, 256, density=True); plot(coords, z, label=r'Gaussian Mixture') legend() # - # ### Fit a Mixture of 2 Gaussians def model(params): mix1 = params[0] mix2 = params[1] loc1 = 0.#params[2] scale1 = params[2] loc2 = 0.#params[4] scale2 = params[3] bimix_gauss = tfd.MixtureSameFamily( mixture_distribution=tfd.Categorical(probs=[mix1, mix2]), components_distribution=tfd.Normal( loc=[loc1, loc2], # One for each component. scale=[scale1, scale2])) # And same here. return bimix_gauss.prob(coords) params = jnp.array([0.5, 0.5, .4, .3]) y = model(params) hist(e1, 256, density=True); plot(coords, y, label=r'Gaussian Mixture') legend() loss = lambda p: jnp.sum((a - model(p))**2) # + """ learning_rate = 1e-4 optimizer = optax.adam(learning_rate) opt_state = optimizer.init(params) grads = jax.grad(loss)(params) for i in range(1000): updates, opt_state = optimizer.update(grads, opt_state) params = optax.apply_updates(params, updates) """ results2 = minimize(loss, params, method='BFGS') # - results2.x #params z = model(results2.x) #z = model(params) hist(e1, 256, density=True); plot(coords, z, label=r'Gaussian Mixture') legend() # + e1_10 = e1[:-7].reshape((-1,10)).mean(axis=-1) e2_10 = e2[:-7].reshape((-1,10)).mean(axis=-1) figure(figsize=[12,4]) subplot(121) p = np.array(results2.x) p[:-2] /= jnp.sqrt(10) z = model(p) hist(e1_10, 256, density=True); plot(coords, z, label=r'Gaussian Mixture') legend() subplot(122) p = np.array(results2.x) p[:-2] /= jnp.sqrt(10) z = model(p) hist(e2_10, 256, density=True); plot(coords, z, label=r'Gaussian Mixture') legend() # - # ### Fit a Mixture of 3 Gaussians def model(params): mix1 = params[0] mix2 = params[1] mix3 = params[2] loc1 = 0.#params[3] scale1 = params[3] loc2 = 0.#params[5] scale2 = params[4] loc3 = 0.#params[7] scale3 = params[5] bimix_gauss = tfd.MixtureSameFamily( mixture_distribution=tfd.Categorical(probs=[mix1, mix2, mix3]), components_distribution=tfd.Normal( loc=[loc1, loc2, loc3], # One for each component. scale=[scale1, scale2, scale3])) # And same here. return bimix_gauss.prob(coords) params = jnp.array([0.6, 0.3, 0.1, .3, .2, .3]) y = model(params) hist(e1, 256, density=True); plot(coords, y, label=r'Gaussian Mixture') legend() # + loss = lambda p: jnp.sum((a - model(p))**2) """ learning_rate = 1e-4 optimizer = optax.adam(learning_rate) opt_state = optimizer.init(params) grads = jax.grad(loss)(params) for i in range(100): updates, opt_state = optimizer.update(grads, opt_state) params = optax.apply_updates(params, updates) """ results = minimize(loss, params, method='BFGS') print(results.x, results.nit) # - #z = model(params) z = model(results.x) hist(e1, 256, density=True); plot(coords, z, label=r'Gaussian Mixture') legend() print('phi_1=',results.x[0]) print('phi_2=',results.x[1]) print('phi_3=',results.x[2]) #print('loc_1=',results.x[3]) print('scale_1=',results.x[3]) #print('loc_2=',results.x[5]) print('scale_2=',results.x[4]) #print('loc_3=',results.x[7]) print('scale_3=',results.x[5]) n_gal = fits.getdata('../data/COSMOS/cosmos_full_ngal_0.29arcmin360.fits').astype('float32') print(np.mean(n_gal[n_gal>0])) imshow(n_gal) colorbar() e1_6 = e1[:-3].reshape((-1,6)).mean(axis=-1) e2_6 = e2[:-3].reshape((-1,6)).mean(axis=-1) sigma_noise = 0.148 # We have a noise of 0.148 # And, last but not least, we can also include reduced shear g1 = e1 / (1 - k) g2 = e2 / (1 - k) subplot(131) imshow(e1) subplot(132) imshow(g1 + sigma_noise*randn(map_size,map_size)) subplot(133) mock_cosmos_e1 = mask*(g1 + sigma_noise*randn(map_size,map_size)) m = mock_cosmos_e1 == 0 hist(mock_cosmos_e1[~m], 100); print(jnp.std(mock_cosmos_e1[~m])) # + # We can save for testing this map with added noise #fits.writeto('../data/ktng/ktng_cosmos_kappa{}.fits'.format(map_size),k) #fits.writeto('../data/ktng/ktng_cosmos_e{}.fits'.format(map_size), # np.stack([mask*(g1 + sigma_noise*randn(map_size,map_size)), # mask*(g2 + sigma_noise*randn(map_size,map_size))],axis=-1)) # - 1+1 e1[:-7].shape[0] / 10
notebooks/KappaTNG_COSMOS_emulation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #libraries import glob, os import re import random import json from itertools import combinations INPUT_PATH = '' OUTPUT_PATH ='' ARCH = 'skylakeX' # + excludes = ['MEM_LOAD_RETIRED_L3_HIT', 'MEM_LOAD_RETIRED_L3_MISS', 'UOPS_RETIRED_ALL', 'MEM_INST_RETIRED_ALL_STORES', 'MEM_INST_RETIRED_ALL_LOADS'] def shuffleGroups(path = '', arch = ''): if path == '': path = INPUT_PATH if arch == '': arch = ARCH evDic = {} counter = 0 os.chdir(path) f = open('Allgroups.txt', 'r') lines = f.readlines() for l in lines: if ("-" in l): counter = counter + 1 elif (counter not in evDic): evDic[counter] = [] evDic[counter].append(l.strip('\n')) else: evDic[counter].append(l.strip('\n')) return evDic def writeShuffledGroups(dic, path): if path == '': path = OUTPUT_PATH evDic = {} fileCounter = 0 os.chdir(path) for key in dic: Kkey = key + 1 while(Kkey in dic): PCMcounter = 0 f = open('Group' + str(fileCounter) + '.txt', 'w') for val in dic[key]: split = val.split(':') if (len(split) > 1): f.write(split[0] + ":PMC" + str(PCMcounter) + ":EDGEDETECT,") PCMcounter = PCMcounter + 1 else: f.write(split[0] + ":PMC" + str(PCMcounter) + ",") PCMcounter = PCMcounter + 1 for val in dic[Kkey]: split = val.split(':') if (len(split) > 1): f.write(split[0] + ":PMC" + str(PCMcounter) + ":EDGEDETECT,") PCMcounter = PCMcounter + 1 else: f.write(split[0] + ":PMC" + str(PCMcounter) + ",") PCMcounter = PCMcounter + 1 Kkey = Kkey + 1 f.close() fileCounter=fileCounter +1 # - sG = shuffleGroups() print(sG) writeShuffledGroups(sG)
applications/matrix_example/experiment_helper/LIKWID_group_generator.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %pylab # %matplotlib inline # ## 切片、提取、赋值、索引 # ### 1. 用筛法产生素数判别组: is_prime = np.ones((100,), dtype=bool) N_max = int(np.sqrt(len(is_prime))) for j in range(2, N_max): is_prime[2*j::j]=False (is_prime[17], is_prime[18]) # ### 2. Using boolean masks: np.random.seed(3) a = np.random.randint(0, 20, 15) mask = (a % 3 == 0) extract_from_a = a[mask] extract_from_a a[a % 3 == 0] = -1 a # #### 组合 #1 #2: a = np.arange(100) is_prime = np.ones((100,), dtype=bool) N_max = int(np.sqrt(len(is_prime))) for j in range(2, N_max): is_prime[2*j::j]=False primes = a[is_prime] primes # ### 3. flat: x = np.reshape(np.arange(25.0), (5,5)) x x[0] x.flat[0] x.flat[12:15] # ## 运算 # ### 1. 按行、列求和 x = np.array([[1,1],[2,2]]) x x.sum(axis=0) # 按列求和,即一列列地求和 x.sum(axis=1) # 按行求和,即一行行地求和 # ### 2. Broadcasting # #### 求城市两两之间的距离 mileposts = np.array([0, 198, 303, 736, 871, 1175, 1475, 1544, 1913, 2448]) distance_array = np.abs(mileposts - mileposts[:, np.newaxis]) # 广播 distance_array # #### 求 $10\times 10$ 网格上某格点到原点的距离 x, y = np.arange(5), np.arange(5)[:, np.newaxis] distance = np.sqrt(x ** 2 + y ** 2) # 广播 distance plt.pcolor(distance) plt.colorbar() # ##### 使用numpy.ogrid简化 x, y = np.ogrid[0:5, 0:5] x, y distance = np.sqrt(x ** 2 + y ** 2) distance # ##### 使用numpy.mgrid提供完整的(广播后的)矩阵 x, y = np.mgrid[0:4, 0:4] x y # ### 3. 矩阵乘法(Python >= 3.5) x = np.array([[1.0, 2], [3, 2], [3, 4]]) y = np.array([[9.0, 8], [7, 6]]) x @ y z = np.asmatrix(x) w = np.asmatrix(y) z * w x * y # This will not work # ### 4. 矩阵指数 x = np.array([[2, 2, 2], [4, 5, 6], [7, 8, 9]]) x ** 3 # element-by-element y = np.asmatrix(x) y ** 3 # y*y*y # ### 5. 矩阵转置 x = np.asmatrix(randn(2, 2)) x x.T # ## Maskedarray # ### 1. 处理缺失数据 x = np.ma.array([1, 2, 3, 4], mask=[0, 1, 0, 1]) x y = np.ma.array([1, 2, 3, 4], mask=[0, 1, 1, 1]) x + y np.ma.sqrt([1, -1, 2, -2]) # ## 多项式 # #### 1. Polynomials p = np.poly1d([3, 2, -1]) p(0) p.roots p.order # #### 多项式拟合 x = np.linspace(0, 1, 20) y = np.cos(x) + 0.3*np.random.rand(20) p = np.poly1d(np.polyfit(x, y, 3)) t = np.linspace(0, 1, 200) plt.plot(x, y, 'o', t, p(t), '-') # ## Baisc Functions and Numerical Indexing # ### 1. 行向量、列向量 r_[0:10:1] r_[0:10:5j] c_[0:5:2] c_[0:5:4j] # ### 2. 子阵 ix_([2, 3], [0, 1, 2]) # (列向量, 行向量) from array x = reshape(arange(25.0), (5, 5)) x x[ix_([2, 3], [0, 1, 2])] ogrid[0:3, 0:2:.5] # from range/linspace # ### 3. meshgrid x = arange(5) y = arange(5) X, Y = meshgrid(x, y) # from array, axis=0 first X Y mgrid[0:3, 0:2:.5] # from range/linspace, but axis=1 first # ### 4. Rouding 舍入 x = randn(3) x around(x) # 最接近整数/四舍五入 around(x, 2) x.round(2) # The same floor(x) # 下取整 ceil(x) # 上取整
notebooks/learn_numpy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/tianyaoZhang/DeepRL/blob/master/week6_outro/bandits.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="totl6gO3TIWp" outputId="8708ad43-19fc-4e8e-fd8e-1bd30c11175b" colab={"base_uri": "https://localhost:8080/"} import sys, os if 'google.colab' in sys.modules and not os.path.exists('.setup_complete'): # !wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/master/setup_colab.sh -O- | bash # !wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/coursera/grading.py -O ../grading.py # !wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/coursera/week6_outro/submit.py # !touch .setup_complete # This code creates a virtual display to draw game images on. # It will have no effect if your machine has a monitor. if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0: # !bash ../xvfb start os.environ['DISPLAY'] = ':1' # + id="1up_gGDVTIWs" from abc import ABCMeta, abstractmethod, abstractproperty import enum import numpy as np np.set_printoptions(precision=3) np.set_printoptions(suppress=True) import pandas import matplotlib.pyplot as plt # %matplotlib inline # + [markdown] id="iUofFjDJTIWt" # ## Bernoulli Bandit # # We are going to implement several exploration strategies for simplest problem - bernoulli bandit. # # The bandit has $K$ actions. Action produce 1.0 reward $r$ with probability $0 \le \theta_k \le 1$ which is unknown to agent, but fixed over time. Agent's objective is to minimize regret over fixed number $T$ of action selections: # # $$\rho = T\theta^* - \sum_{t=1}^T r_t$$ # # Where $\theta^* = \max_k\{\theta_k\}$ # # **Real-world analogy:** # # Clinical trials - we have $K$ pills and $T$ ill patient. After taking pill, patient is cured with probability $\theta_k$. Task is to find most efficient pill. # # A research on clinical trials - https://arxiv.org/pdf/1507.08025.pdf # + id="4iZbcWkgTIWu" class BernoulliBandit: def __init__(self, n_actions=5): self._probs = np.random.random(n_actions) @property def action_count(self): return len(self._probs) def pull(self, action): if np.any(np.random.random() > self._probs[action]): return 0.0 return 1.0 def optimal_reward(self): """ Used for regret calculation """ return np.max(self._probs) def step(self): """ Used in nonstationary version """ pass def reset(self): """ Used in nonstationary version """ # + id="AZnMsAtLTIWu" class AbstractAgent(metaclass=ABCMeta): def init_actions(self, n_actions): self._successes = np.zeros(n_actions) self._failures = np.zeros(n_actions) self._total_pulls = 0 @abstractmethod def get_action(self): """ Get current best action :rtype: int """ pass def update(self, action, reward): """ Observe reward from action and update agent's internal parameters :type action: int :type reward: int """ self._total_pulls += 1 if reward == 1: self._successes[action] += 1 else: self._failures[action] += 1 @property def name(self): return self.__class__.__name__ class RandomAgent(AbstractAgent): def get_action(self): return np.random.randint(0, len(self._successes)) # + [markdown] id="iw8GH_aeTIWv" # ### Epsilon-greedy agent # # **for** $t = 1,2,...$ **do** # # &nbsp;&nbsp; **for** $k = 1,...,K$ **do** # # &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; $\hat\theta_k \leftarrow \alpha_k / (\alpha_k + \beta_k)$ # # &nbsp;&nbsp; **end for** # # &nbsp;&nbsp; $x_t \leftarrow argmax_{k}\hat\theta$ with probability $1 - \epsilon$ or random action with probability $\epsilon$ # # &nbsp;&nbsp; Apply $x_t$ and observe $r_t$ # # &nbsp;&nbsp; $(\alpha_{x_t}, \beta_{x_t}) \leftarrow (\alpha_{x_t}, \beta_{x_t}) + (r_t, 1-r_t)$ # # **end for** # # Implement the algorithm above in the cell below: # + id="k9nfAHPPTIWv" class EpsilonGreedyAgent(AbstractAgent): def __init__(self, epsilon=0.01): self._epsilon = epsilon def get_action(self): theta = np.zeros(len(self._successes)) for i in range(len(self._successes)): plus = self._successes[i] + self._failures[i] theta[i] = self._successes[i] / plus if plus != 0 else 0 # print(theta, np.argmax(theta)) if np.random.random() < self._epsilon or self._total_pulls==0: return np.random.randint(0, len(self._successes)) return np.argmax(theta) @property def name(self): return self.__class__.__name__ + "(epsilon={})".format(self._epsilon) # + [markdown] id="hSfo95NiTIWw" # ### UCB Agent # Epsilon-greedy strategy heve no preference for actions. It would be better to select among actions that are uncertain or have potential to be optimal. One can come up with idea of index for each action that represents otimality and uncertainty at the same time. One efficient way to do it is to use UCB1 algorithm: # # **for** $t = 1,2,...$ **do** # # &nbsp;&nbsp; **for** $k = 1,...,K$ **do** # # &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; $w_k \leftarrow \alpha_k / (\alpha_k + \beta_k) + \sqrt{2log\ t \ / \ (\alpha_k + \beta_k)}$ # # &nbsp;&nbsp; **end for** # # &nbsp;&nbsp; **end for** # $x_t \leftarrow argmax_{k}w$ # # &nbsp;&nbsp; Apply $x_t$ and observe $r_t$ # # &nbsp;&nbsp; $(\alpha_{x_t}, \beta_{x_t}) \leftarrow (\alpha_{x_t}, \beta_{x_t}) + (r_t, 1-r_t)$ # # **end for** # # __Note:__ in practice, one can multiply $\sqrt{2log\ t \ / \ (\alpha_k + \beta_k)}$ by some tunable parameter to regulate agent's optimism and wilingness to abandon non-promising actions. # # More versions and optimality analysis - https://homes.di.unimi.it/~cesabian/Pubblicazioni/ml-02.pdf # + id="zN7TqekLoumb" outputId="c14dd3ea-9aed-4d2b-9f8c-c75a6d639569" colab={"base_uri": "https://localhost:8080/"} np.sqrt(2*np.log(2)/1e-10) # + id="qum5mybUTIWw" class UCBAgent(AbstractAgent): def get_action(self): if self._total_pulls==0: return np.random.randint(0, len(self._successes)) w = np.zeros(len(self._successes)) for i in range(len(self._successes)): plus = self._successes[i] + self._failures[i] + 1e-10 w[i] = self._successes[i] / plus + np.sqrt(2*np.log(self._total_pulls)/plus) # print(w,np.argmax(w)) return np.argmax(w) # + [markdown] id="mBDnIp9_TIWx" # ### Thompson sampling # # UCB1 algorithm does not take into account actual distribution of rewards. If we know the distribution - we can do much better by using Thompson sampling: # # **for** $t = 1,2,...$ **do** # # &nbsp;&nbsp; **for** $k = 1,...,K$ **do** # # &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; Sample $\hat\theta_k \sim beta(\alpha_k, \beta_k)$ # # &nbsp;&nbsp; **end for** # # &nbsp;&nbsp; $x_t \leftarrow argmax_{k}\hat\theta$ # # &nbsp;&nbsp; Apply $x_t$ and observe $r_t$ # # &nbsp;&nbsp; $(\alpha_{x_t}, \beta_{x_t}) \leftarrow (\alpha_{x_t}, \beta_{x_t}) + (r_t, 1-r_t)$ # # **end for** # # # More on Thompson Sampling: # https://web.stanford.edu/~bvr/pubs/TS_Tutorial.pdf # + id="7Ge209wcTIWx" class ThompsonSamplingAgent(AbstractAgent): def get_action(self): theta = [] for k in range(len(self._successes)): if self._successes[k] * self._failures[k]==0: theta.append(np.random.beta(self._successes[k] + 1e-10,self._failures[k] + 1e-10)) # print(self._successes[k],self._failures[k]) continue theta.append(np.random.beta(self._successes[k],self._failures[k])) return np.argmax(np.array(theta)) # + id="pQXTQlX_TIWx" from collections import OrderedDict def get_regret(env, agents, n_steps=5000, n_trials=50): scores = OrderedDict({ agent.name: [0.0 for step in range(n_steps)] for agent in agents }) for trial in range(n_trials): env.reset() for a in agents: a.init_actions(env.action_count) for i in range(n_steps): optimal_reward = env.optimal_reward() for agent in agents: action = agent.get_action() reward = env.pull(action) agent.update(action, reward) scores[agent.name][i] += optimal_reward - reward env.step() # change bandit's state if it is unstationary for agent in agents: scores[agent.name] = np.cumsum(scores[agent.name]) / n_trials return scores def plot_regret(agents, scores): for agent in agents: plt.plot(scores[agent.name]) plt.legend([agent.name for agent in agents]) plt.ylabel("regret") plt.xlabel("steps") plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 279} id="wDuskaZATIWy" outputId="7543dcd5-17ba-45d2-dd1b-2d8b0846e7a8" # Uncomment agents agents = [ EpsilonGreedyAgent(), UCBAgent(), ThompsonSamplingAgent() ] regret = get_regret(BernoulliBandit(), agents, n_steps=10000, n_trials=10) plot_regret(agents, regret) # + outputId="0f180f53-467a-44c5-ffa6-c548a5bd518a" id="_0mw6kguaDf5" # Uncomment agents agents = [ # EpsilonGreedyAgent(), # UCBAgent(), # ThompsonSamplingAgent() ] regret = get_regret(BernoulliBandit(), agents, n_steps=10000, n_trials=10) plot_regret(agents, regret) # + [markdown] id="vUGGW0lwTIWy" # ### Submit to coursera # + id="aFGIBOyDTIWz" outputId="c365b306-228c-44a1-c19d-45513f12b290" colab={"base_uri": "https://localhost:8080/"} from submit import submit_bandits submit_bandits(agents, regret, '<EMAIL>', 'IEoys0sg8fy2jqMN')
week6_outro/bandits.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tensorflow as tf import numpy as np #for slicing error message import sys, os # + x = np.random.sample((100,2)) print("First 5 pairs in x: \n", x[:5], "\n" ) # make a dataset from a numpy array dataset = tf.data.Dataset.from_tensor_slices(x) iter = dataset.make_one_shot_iterator() el = iter.get_next() with tf.Session() as sess: print(sess.run(el)) # + # using two numpy arrays features, labels = (np.random.sample((100,2)), np.random.sample((100,1))) print("First 5 features in features: \n", features[:5], "\n" ) print("First 5 labels in labels: \n", labels[:5], "\n" ) dataset = tf.data.Dataset.from_tensor_slices((features,labels)) iter = dataset.make_one_shot_iterator() el = iter.get_next() with tf.Session() as sess: print(sess.run(el)) # + # using a tensor dataset = tf.data.Dataset.from_tensor_slices(tf.random_uniform([100, 2])) print("dataset shape: ", np.shape(dataset)) print("dataset: ", (dataset)) iter = dataset.make_initializable_iterator() el = iter.get_next() with tf.Session() as sess: sess.run(iter.initializer) print(sess.run(el)) # + # using a placeholder x = tf.placeholder(tf.float32, shape=[None,2]) dataset = tf.data.Dataset.from_tensor_slices(x) data = np.random.sample((100,2)) print("First 5 pairs in data: \n", data[:5], "\n" ) iter = dataset.make_initializable_iterator() el = iter.get_next() with tf.Session() as sess: sess.run(iter.initializer, feed_dict={ x: data }) print(sess.run(el)) # + # from generator sequence = np.array([[[1]],[[2],[3]],[[3],[4],[5]]]) def generator(): for el in sequence: yield el dataset = tf.data.Dataset().batch(1).from_generator(generator, output_types= tf.int64, output_shapes=(tf.TensorShape([None, 1]))) iter = dataset.make_initializable_iterator() el = iter.get_next() with tf.Session() as sess: sess.run(iter.initializer) print(sess.run(el),"\n") print(sess.run(el),"\n") print(sess.run(el),"\n") # + # initializable iterator to switch between data EPOCHS = 10 x, y = tf.placeholder(tf.float32, shape=[None,2]), tf.placeholder(tf.float32, shape=[None,1]) dataset = tf.data.Dataset.from_tensor_slices((x, y)) train_data = (np.random.sample((100,2)), np.random.sample((100,1))) test_data = (np.array([[1,2]]), np.array([[0]])) iter = dataset.make_initializable_iterator() features, labels = iter.get_next() with tf.Session() as sess: # initialise iterator with train data sess.run(iter.initializer, feed_dict={ x: train_data[0], y: train_data[1]}) for _ in range(EPOCHS): sess.run([features, labels]) # switch to test data sess.run(iter.initializer, feed_dict={ x: test_data[0], y: test_data[1]}) print(sess.run([features, labels])) # + # Reinitializable iterator to switch between Datasets #EVIDENCE: Can re-read the same data NO_OF_BATCHES = 10 DATA_ITEMS = 10 NO_OF_EPOCHS = 2 # making fake data using numpy train_data = (np.random.sample((DATA_ITEMS,2)), np.random.sample((DATA_ITEMS,1))) test_data = (np.random.sample((10,2)), np.random.sample((10,1))) # create two datasets, one for training and one for test train_dataset = tf.data.Dataset.from_tensor_slices(train_data) test_dataset = tf.data.Dataset.from_tensor_slices(test_data) # create a iterator of the correct shape and type iter = tf.data.Iterator.from_structure(train_dataset.output_types, train_dataset.output_shapes) assert(train_dataset.output_types == test_dataset.output_types) assert(train_dataset.output_shapes == test_dataset.output_shapes) features, labels = iter.get_next() # create the initialisation operations train_init_op = iter.make_initializer(train_dataset) test_init_op = iter.make_initializer(test_dataset) with tf.Session() as sess: for epoch in range(NO_OF_EPOCHS): print("Epoch: ", epoch) sess.run(train_init_op) # switch to train dataset for _ in range(NO_OF_BATCHES): print("Train: ",sess.run([features, labels])) print() COMMENT_OUT = True if(not COMMENT_OUT): sess.run(train_init_op) # switch to train dataset for _ in range(NO_OF_BATCHES): print("Train: ",sess.run([features, labels])) print() sess.run(test_init_op) # switch to val dataset for _ in range(NO_OF_BATCHES): print("Test: ",sess.run([features, labels])) # + # feedable iterator to switch between iterators EPOCHS = 10 # making fake data using numpy train_data = (np.random.sample((100,2)), np.random.sample((100,1))) test_data = (np.random.sample((10,2)), np.random.sample((10,1))) # create placeholder x, y = tf.placeholder(tf.float32, shape=[None,2]), tf.placeholder(tf.float32, shape=[None,1]) # create two datasets, one for training and one for test train_dataset = tf.data.Dataset.from_tensor_slices((x,y)) test_dataset = tf.data.Dataset.from_tensor_slices((x,y)) # create the iterators from the dataset train_iterator = train_dataset.make_initializable_iterator() test_iterator = test_dataset.make_initializable_iterator() # same as in the doc https://www.tensorflow.org/programmers_guide/datasets#creating_an_iterator handle = tf.placeholder(tf.string, shape=[]) iter = tf.data.Iterator.from_string_handle( handle, train_dataset.output_types, train_dataset.output_shapes) next_elements = iter.get_next() with tf.Session() as sess: train_handle = sess.run(train_iterator.string_handle()) test_handle = sess.run(test_iterator.string_handle()) # initialise iterators. In our case we could have used the 'one-shot' iterator instead, # and directly feed the data insted the Dataset.from_tensor_slices function, but this # approach is more general sess.run(train_iterator.initializer, feed_dict={ x: train_data[0], y: train_data[1]}) sess.run(test_iterator.initializer, feed_dict={ x: test_data[0], y: test_data[1]}) for _ in range(EPOCHS): x,y = sess.run(next_elements, feed_dict = {handle: train_handle}) print(x, y) x,y = sess.run(next_elements, feed_dict = {handle: test_handle}) print(x,y) # + # BATCHING BATCH_SIZE = 4 x = np.random.sample((100,2)) print("First 8 pairs in x: \n", x[:8], "\n" ) # make a dataset from a numpy array dataset = tf.data.Dataset.from_tensor_slices(x).batch(BATCH_SIZE) iter = dataset.make_one_shot_iterator() el = iter.get_next() with tf.Session() as sess: print("First 2 batches (each of 4 pairs) in dataset:") print(sess.run(el)); print(sess.run(el)) # - # REPEAT BATCH_SIZE = 4 x = np.array([[1],[2],[3],[4]]) # make a dataset from a numpy array dataset = tf.data.Dataset.from_tensor_slices(x) dataset = dataset.repeat() #<<<<<======================== #^ iter = dataset.make_one_shot_iterator() #^ el = iter.get_next() #^ #^ #with tf.Session() as sess: #^ # this will run forever #==Because of ==========>>>>>> # while True: # print(sess.run(el)) # + # MAP x = np.array([[1],[2],[3],[4]]) # make a dataset from a numpy array dataset = tf.data.Dataset.from_tensor_slices(x) print("Type of dataset before map(): ", dataset) dataset = dataset.map(lambda x: x*2) print("Type of dataset after map(): ", dataset) iter = dataset.make_one_shot_iterator() el = iter.get_next() with tf.Session() as sess: # this will run forever. #RM: NOT correct. There is no dataset = dataset.repeat() here. for i in range(len(x)): try: print("i: ", i, ". x[i]: ", sess.run(el)) print("i: ", i, ", x[i]: ", sess.run(el)) print("i: ", i, ", x[i]: ", sess.run(el)) except: """ from: https://stackoverflow.com/questions/1278705/python-when-i-catch-an-exception-how-do-i-get-the-type-file-and-line-number """ exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] print("i: ", i, "; Exception thrown as iterator is at the end. \n\tException Type: ", exc_type,\ "; \n\tIn File: ", fname, "; ", \ "\n\tAt Line No: ",exc_tb.tb_lineno) # + # SHUFFLE BATCH_SIZE = 4 x = np.array([[1],[2],[3],[4]]) # make a dataset from a numpy array dataset = tf.data.Dataset.from_tensor_slices(x) dataset = dataset.shuffle(buffer_size=100) dataset = dataset.batch(BATCH_SIZE) iter = dataset.make_one_shot_iterator() el = iter.get_next() with tf.Session() as sess: try: print(sess.run(el), "\n") print(sess.run(el), "\n") except: """ from: https://stackoverflow.com/questions/1278705/python-when-i-catch-an-exception-how-do-i-get-the-type-file-and-line-number """ exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] print("Exception thrown as iterator is at the end. \n\tException Type: ", exc_type,\ "; \n\tIn File: ", fname, "; ", \ "\n\tAt Line No: ",exc_tb.tb_lineno) #With new session there should not be any problem with tf.Session() as sess: print("But With new session there should not be any problem.\n", sess.run(el), "\n") # + # how to pass the value to a model EPOCHS = 10 BATCH_SIZE = 16 # using two numpy arrays features, labels = (np.array([np.random.sample((100,2))]), np.array([np.random.sample((100,1))])) dataset = tf.data.Dataset.from_tensor_slices((features,labels)).repeat().batch(BATCH_SIZE) iter = dataset.make_one_shot_iterator() x, y = iter.get_next() # make a simple model net = tf.layers.dense(x, 8, activation=tf.tanh) # pass the first value from iter.get_next() as input net = tf.layers.dense(net, 8, activation=tf.tanh) prediction = tf.layers.dense(net, 1, activation=tf.tanh) loss = tf.losses.mean_squared_error(prediction, y) # pass the second value from iter.get_net() as label train_op = tf.train.AdamOptimizer().minimize(loss) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for i in range(EPOCHS): _, loss_value = sess.run([train_op, loss]) print("Iter: {}, Loss: {:.4f}".format(i, loss_value)) # + # Wrapping all together -> Switch between train and test set using Initializable iterator NO_OF_EPOCHS = 10 ###################### #RM NoOfTrainingSamples = 100 NoOfTestSamples=20 INPUT_SHAPE = [None, 2] #Input data. 2-D Array of features OUTPUT_SHAPE = [None, 1] #Output Classes. 1-D array of Labels BATCH_SIZE = 10 ####################### # create a placeholder to dynamically switch between batch sizes batch_size = tf.placeholder(tf.int64) x, y = tf.placeholder(tf.float32, shape=INPUT_SHAPE), \ tf.placeholder(tf.float32, shape=OUTPUT_SHAPE) dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(batch_size).repeat() iter = dataset.make_initializable_iterator() features, labels = iter.get_next() ############################ # make a simple model net = tf.layers.dense(features, 8, activation=tf.tanh) # pass the first value from iter.get_next() as input net = tf.layers.dense(net, 8, activation=tf.tanh) prediction = tf.layers.dense(net, 1, activation=tf.tanh) loss = tf.losses.mean_squared_error(prediction, labels) # pass the second value from iter.get_net() as label train_op = tf.train.AdamOptimizer().minimize(loss) No_Of_Batches = NoOfTrainingSamples//BATCH_SIZE with tf.Session() as sess: sess.run(tf.global_variables_initializer()) # using two numpy arrays train_data = (np.random.sample((NoOfTrainingSamples,INPUT_SHAPE[1])), \ np.random.sample((NoOfTrainingSamples,OUTPUT_SHAPE[1]))) test_data = (np.random.sample((NoOfTestSamples,INPUT_SHAPE[1])), \ np.random.sample((NoOfTestSamples,OUTPUT_SHAPE[1]))) # initialise iterator with train data #See https://www.tensorflow.org/api_docs/python/tf/data/Dataset#make_initializable_iterator # The initializer property returns the tf.operation that is run. It will initialize the iterator. # See https://www.tensorflow.org/api_docs/python/tf/data/Iterator#initializer sess.run(iter.initializer, feed_dict={ x: train_data[0], y: train_data[1], batch_size: BATCH_SIZE}) ListOfFeatureLabelPairs = sess.run(iter.get_next()) print(ListOfFeatureLabelPairs) print("List Of features: ", ListOfFeatureLabelPairs[0]) print("List of labels: ", ListOfFeatureLabelPairs[1]) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) # using two numpy arrays train_data = (np.random.sample((NoOfTrainingSamples,INPUT_SHAPE[1])), \ np.random.sample((NoOfTrainingSamples,OUTPUT_SHAPE[1]))) test_data = (np.random.sample((NoOfTestSamples,INPUT_SHAPE[1])), \ np.random.sample((NoOfTestSamples,OUTPUT_SHAPE[1]))) # initialise iterator with train data #See https://www.tensorflow.org/api_docs/python/tf/data/Dataset#make_initializable_iterator # The initializer property returns the tf.operation that is run. It will initialize the iterator. # See https://www.tensorflow.org/api_docs/python/tf/data/Iterator#initializer sess.run(iter.initializer, feed_dict={ x: train_data[0], y: train_data[1], batch_size: BATCH_SIZE}) print('Training...') for epoch in range(NO_OF_EPOCHS): tot_loss = 0 for _ in range(No_Of_Batches + 1): _, loss_value = sess.run([train_op, loss]) tot_loss += loss_value print("Epoch No: {}, Loss: {:.4f}".format(epoch, tot_loss / No_Of_Batches)) # initialise iterator with test data sess.run(iter.initializer, feed_dict={ x: test_data[0], y: test_data[1], batch_size: test_data[0].shape[0]}) print('Test Loss: {:4f}'.format(sess.run(loss))) ########################################################## # Repeat above to prove that the iterator does not stall when it comes to the end # initialise iterator with train data sess.run(iter.initializer, feed_dict={ x: train_data[0], y: train_data[1], batch_size: BATCH_SIZE}) print('Training Again...') for epoch in range(NO_OF_EPOCHS): tot_loss = 0 for _ in range(No_Of_Batches): _, loss_value = sess.run([train_op, loss]) tot_loss += loss_value print("Epoch No: {}, Loss: {:.4f}".format(epoch, tot_loss / No_Of_Batches)) # initialise iterator with test data sess.run(iter.initializer, feed_dict={ x: test_data[0], y: test_data[1], batch_size: test_data[0].shape[0]}) print('Test Loss: {:4f}'.format(sess.run(loss))) ########################################################## # Repeat yet again to prove that the iterator does not stall when it comes to the end # initialise iterator with train data sess.run(iter.initializer, feed_dict={ x: train_data[0], y: train_data[1], batch_size: BATCH_SIZE}) print('Training yet Again...') for epoch in range(NO_OF_EPOCHS): tot_loss = 0 for _ in range(No_Of_Batches): _, loss_value = sess.run([train_op, loss]) tot_loss += loss_value print("Epoch No: {}, Loss: {:.4f}".format(epoch, tot_loss / No_Of_Batches)) # initialise iterator with test data sess.run(iter.initializer, feed_dict={ x: test_data[0], y: test_data[1], batch_size: test_data[0].shape[0]}) print('Test Loss: {:4f}'.format(sess.run(loss))) ########################################################## # Repeat yet again to prove that the iterator does not stall when it comes to the end # initialise iterator with train data sess.run(iter.initializer, feed_dict={ x: train_data[0], y: train_data[1], batch_size: BATCH_SIZE}) print('Training yet Again...') for epoch in range(NO_OF_EPOCHS): tot_loss = 0 for _ in range(No_Of_Batches): _, loss_value = sess.run([train_op, loss]) tot_loss += loss_value print("Epoch No: {}, Loss: {:.4f}".format(epoch, tot_loss / No_Of_Batches)) # initialise iterator with test data sess.run(iter.initializer, feed_dict={ x: test_data[0], y: test_data[1], batch_size: test_data[0].shape[0]}) print('Test Loss: {:4f}'.format(sess.run(loss))) # + # Wrapping all together -> Switch between train and test set using Reinitializable iterator EPOCHS = 10 ###################### #RM NoOfTrainingSamples = 100 NoOfTestSamples=20 ####################### # create a placeholder to dynamically switch between batch sizes batch_size = tf.placeholder(tf.int64) x, y = tf.placeholder(tf.float32, shape=[None,2]), tf.placeholder(tf.float32, shape=[None,1]) train_dataset = tf.data.Dataset.from_tensor_slices((x,y)).batch(batch_size).repeat() test_dataset = tf.data.Dataset.from_tensor_slices((x,y)).batch(batch_size) # always batch even #if you want to one shot it # using two numpy arrays train_data = (np.random.sample((NoOfTrainingSamples,2)), np.random.sample((NoOfTrainingSamples,1))) test_data = (np.random.sample((NoOfTestSamples,2)), np.random.sample((NoOfTestSamples,1))) # create a iterator of the correct shape and type iter = tf.data.Iterator.from_structure(train_dataset.output_types, train_dataset.output_shapes) features, labels = iter.get_next() # create the initialisation operations train_init_op = iter.make_initializer(train_dataset) test_init_op = iter.make_initializer(test_dataset) # make a simple model net = tf.layers.dense(features, 8, activation=tf.tanh) # pass the first value from iter.get_next() as input net = tf.layers.dense(net, 8, activation=tf.tanh) prediction = tf.layers.dense(net, 1, activation=tf.tanh) loss = tf.losses.mean_squared_error(prediction, labels) # pass the second value from iter.get_net() as label train_op = tf.train.AdamOptimizer().minimize(loss) n_batches = int(NoOfTrainingSamples/BATCH_SIZE) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) # initialise iterator with train data sess.run(train_init_op, feed_dict = {x : train_data[0], y: train_data[1], batch_size: 16}) print('Training...') for i in range(EPOCHS): tot_loss = 0 for _ in range(n_batches): _, loss_value = sess.run([train_op, loss]) tot_loss += loss_value print("Iter: {}, Loss: {:.4f}".format(i, tot_loss / n_batches)) # initialise iterator with test data sess.run(test_init_op, feed_dict = {x : test_data[0], y: test_data[1], batch_size:len(test_data[0])}) print('Test Loss: {:4f}'.format(sess.run(loss))) # + # load a csv CSV_PATH = './tweets.csv' dataset = tf.contrib.data.make_csv_dataset(CSV_PATH, batch_size=32, shuffle=False) #RM: Original code default value of shuffle (True) was used. With shuffle set to False we read the CSV #file, row-by-row. iter = dataset.make_one_shot_iterator() next = iter.get_next() #print("iter.get_next(): ", next, "\n") # next is a dict with key=columns names and value=column data ################################################# # inputs, labels = next['text'], next['sentiment'] ################################################# with tf.Session() as sess: print(sess.run([inputs,labels]), "\n") print(sess.run([inputs,labels]), "\n") # - log_time = {} # copied form https://medium.com/pythonhive/python-decorator-to-measure-the-execution-time-of-methods-fa04cb6bb36d def how_much(method): def timed(*args, **kw): ts = time.time() result = method(*args, **kw) te = time.time() if 'log_time' in kw: name = kw.get('log_name', method.__name__) kw['log_time'][name] = (te - ts) return result return timed # + # benchmark import time DATA_SIZE = 5000 DATA_SHAPE = ((32,32),(20,)) BATCH_SIZE = 64 N_BATCHES = DATA_SIZE // BATCH_SIZE EPOCHS = 10 test_size = (DATA_SIZE//100)*20 DUMMY = -1 train_shape = (DATA_SHAPE) print("DATA_SHAPE:",train_shape) train_shape = (DATA_SHAPE[0]) print("DATA_SHAPE[0]:",train_shape) train_shape = ((DUMMY, *DATA_SHAPE)) print("DUMMY, *DATA_SHAPE:",train_shape) train_shape = ((DUMMY, (*DATA_SHAPE))[0]) print("DUMMY, (*DATA_SHAPE))[0]:",train_shape) train_shape = ((DUMMY, (*DATA_SHAPE))) print("DUMMY, (*DATA_SHAPE):",train_shape, "\n") train_shape = ((DATA_SIZE, *DATA_SHAPE[0]),(DATA_SIZE, *DATA_SHAPE[1])) test_shape = ((test_size, *DATA_SHAPE[0]),(test_size, *DATA_SHAPE[1])) print(train_shape, test_shape) # + train_data = (np.random.sample(train_shape[0]), np.random.sample(train_shape[1])) test_data = (np.random.sample(test_shape[0]), np.random.sample(test_shape[1])) # used to keep track of the methodds log_time = {} tf.reset_default_graph() #sess = tf.InteractiveSession() #This needs to be closed below. Replaced it with "with tf.Session() as sess" in #each of the functions input_shape = [None, *DATA_SHAPE[0]] # [None, 64, 64, 3] output_shape = [None,*DATA_SHAPE[1]] # [None, 20] print("input_shape: {} output_shape: {}".format(input_shape, output_shape)) x, y = tf.placeholder(tf.float32, shape=input_shape), tf.placeholder(tf.float32, shape=output_shape) @how_much def one_shot(**kwargs): print('\none_shot:') #The datasets - both train and test - are loaded #Two data sets. Two iterators. One for each # #From: https://www.tensorflow.org/api_docs/python/tf/data/Dataset#make_one_shot_iterator #Note: The returned iterator will be ***initialized automatically***. #A "one-shot" iterator ***does not*** currently support re-initialization. # train_dataset = tf.data.Dataset.from_tensor_slices(train_data).batch(BATCH_SIZE).repeat() train_iter = train_dataset.make_one_shot_iterator() train_element = train_iter.get_next() test_dataset = tf.data.Dataset.from_tensor_slices(test_data).batch(BATCH_SIZE).repeat() test_iter = test_dataset.make_one_shot_iterator() test_element = test_iter.get_next() with tf.Session() as sess: for epoch_no in range(EPOCHS): print(epoch_no, end="") for batch_no in range(N_BATCHES): element = sess.run(train_element) training_data_batch = element[0] if(0 == epoch_no): assert(BATCH_SIZE == np.size(training_data_batch, 0)) else: if((epoch_no - 1) == batch_no): assert((DATA_SIZE % BATCH_SIZE) == np.size(training_data_batch, 0)) #N_BATCHES (given by DATA_SIZE // BATCH_SIZE) is 78. #78 batches are read every epoch. That leaves 8 slices at the end of the epoch_no 0 #Epoch 1 batch_no 0 reads only 8 when the iterator cycles back. In the remaining 77, #(77 * BATCH_SIZE) = 4928 slices would be read. Leaving (5000 - 4928) 72 slices. #So Epoch 2 will read 64 slices in batch_no 0 and then 8 slices in batch_no 1 before #the iterator goes back. So we have a batch of 8 slices rippling forward with each epoch #if (2 == epoch_no): else: assert(BATCH_SIZE == np.size(training_data_batch,0)) for _ in range(N_BATCHES): sess.run(test_element) @how_much def initialisable(**kwargs): print('\ninitialisable:') #x and y are placeholders. They have to be loaded. The types and shapes are known #not the values. #The same placeholder is used both for train and for test data. #Hence the graph needs only one dataset node and one iterator dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(BATCH_SIZE).repeat() iter = dataset.make_initializable_iterator() iter_init = iter.initializer elements = iter.get_next() with tf.Session() as sess: for i in range(EPOCHS): print(i, end="") #Initialize to run dataset node that has been loaded with training data sess.run(iter_init, feed_dict={ x: train_data[0], y: train_data[1]}) for _ in range(N_BATCHES): sess.run(elements) #Re-initialize to run the ***same dataset node*** *** but with test data *** sess.run(iter_init, feed_dict={ x: test_data[0], y: test_data[1]}) for _ in range(N_BATCHES): sess.run(elements) @how_much def reinitializable(**kwargs): print('\nreinitializable:') # create two datasets, one for training and one for test train_dataset = tf.data.Dataset.from_tensor_slices((x,y)).batch(BATCH_SIZE).repeat() test_dataset = tf.data.Dataset.from_tensor_slices((x,y)).batch(BATCH_SIZE).repeat() # create ***an*** iterator of the correct shape and type iter = tf.data.Iterator.from_structure(train_dataset.output_types, train_dataset.output_shapes) elements = iter.get_next() # create the nodes to initialize the iterators to iterate over the train_dataset and the test_dataset # See https://www.tensorflow.org/api_docs/python/tf/data/Iterator#make_initializer. #The same iterator node (iter) created above is re-initialized train_init_op = iter.make_initializer(train_dataset) test_init_op = iter.make_initializer(test_dataset) with tf.Session() as sess: for i in range(EPOCHS): print(i, end="") #Load the placeholders with train data and train label #Iniitialize the iterator to ***iterate over training data*** sess.run(train_init_op, feed_dict={ x: train_data[0], y: train_data[1]}) for _ in range(N_BATCHES): sess.run(elements) #Load the placeholders with test data and test label #RE-INITIALIZE the iter to ***now iterate over test data*** sess.run(test_init_op, feed_dict={ x: test_data[0], y: test_data[1]}) for _ in range(N_BATCHES): sess.run(elements) @how_much def feedable(**kwargs): print('\nfeedable:') # create two datasets, one for training and one for test train_dataset = tf.data.Dataset.from_tensor_slices((x,y)).batch(BATCH_SIZE).repeat() test_dataset = tf.data.Dataset.from_tensor_slices((x,y)).batch(BATCH_SIZE).repeat() # create the iterators from the dataset train_iterator = train_dataset.make_initializable_iterator() test_iterator = test_dataset.make_initializable_iterator() handle = tf.placeholder(tf.string, shape=[]) #See https://www.tensorflow.org/api_docs/python/tf/data/Iterator#from_string_handle iter = tf.data.Iterator.from_string_handle( handle, \ train_dataset.output_types, \ train_dataset.output_shapes) elements = iter.get_next() make_train_string_handle = train_iterator.string_handle() make_test_string_handle = test_iterator.string_handle() init_train_iterator = train_iterator.initializer init_test_iterator = test_iterator.initializer with tf.Session() as sess: #See https://www.tensorflow.org/api_docs/python/tf/data/Iterator#string_handle train_string_handle = sess.run(make_train_string_handle) test_string_handle = sess.run(make_test_string_handle) #See https://www.tensorflow.org/api_docs/python/tf/data/Iterator#initializer sess.run(init_train_iterator, feed_dict={ x: train_data[0], y: train_data[1]}) sess.run(init_test_iterator, feed_dict={ x: test_data[0], y: test_data[1]}) for i in range(EPOCHS): print(i, end="") for _ in range(N_BATCHES): sess.run(elements, feed_dict={handle: train_string_handle}) for _ in range(N_BATCHES): sess.run(elements, feed_dict={handle: test_string_handle}) print("") one_shot(log_time=log_time) print("") initialisable(log_time=log_time) print("") reinitializable(log_time=log_time) print("") feedable(log_time=log_time) sorted((value,key) for (key,value) in log_time.items()) # -
dataset_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import random # + numbers = [1,2,3,4,5,6,7,8,9] squares = [] for i in numbers: square = i**2 squares.append(square) print(square) print(squares) # + value = 0 guess = 1 while guess != value: guess = int(input("Guess the value of the die:")) value = random.randint(1,6) if guess == value: print("Congratulations you did it!") else: print("Sorry, that is incorrect") # - # # While Loop Integration Example # + import numpy as np def while_int(g,a,b,dx): #using basic reimann sums x = a area = 0 while x < b: y = g(x) area += y*dx x += dx return area print(while_int(lambda x: np.cos(x), 0,np.pi/2,0.00001)) # - # ## Derivative Example # # Lets do some basic derivatives: # + from scipy.misc import derivative #we need the derivative function from scipy import numpy as np import matplotlib.pyplot as plt def f(x): #our generic function return np.sin(x) #take the derivative at a specific point y_at_zero = derivative(f, 0, dx=1e-6, n=1) print("dy/dx @ x=0 is: ", y_at_zero) #take the derivative over a range of x values x = np.linspace(-np.pi, np.pi, 100) y_derivative = derivative(f, x, dx=1e-6, n=1) #plot plt.figure(figsize=(10,6)) plt.plot(x,f(x),'r-',label='f(x)') plt.plot(x,y_derivative,'b-', label="f'(x)") plt.legend() plt.show() # - # ## Integration Example # # We will do some simple integration! # + from scipy.integrate import quad,trapz,simps #this is the gaussian quadrature #quadrature y_int_quad = quad(f, -np.pi, np.pi) #other methods x_domain = np.linspace(-np.pi, np.pi, 1000) #trapezoid method y_int_trapz = trapz(f(x_domain),x_domain) #simpson method y_int_simps = simps(f(x_domain),x_domain) print(y_int_quad) print(y_int_trapz) print(y_int_simps) # -
Demos/Loops/While_Loop_Demos.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Exercise 5.05: K-NN Classification import pandas as pd import matplotlib.pyplot as plt from sklearn.neighbors import KNeighborsClassifier as KNN from sklearn.model_selection import train_test_split # Read in the breast cancer diagnosis dataset df = pd.read_csv('../Datasets/breast-cancer-data.csv') df.head() # For this exercise we will choose mean radius and worst radius as the classification features to use in the model. Construct a plot to visualise the corresponding measurements with the class allocations. # + markers = { 'benign': {'marker': 'o', 'facecolor': 'g', 'edgecolor': 'g'}, 'malignant': {'marker': 'x', 'facecolor': 'r', 'edgecolor': 'r'}, } plt.figure(figsize=(10, 7)) for name, group in df.groupby('diagnosis'): plt.scatter(group['mean radius'], group['worst radius'], label=name, marker=markers[name]['marker'], facecolors=markers[name]['facecolor'], edgecolor=markers[name]['edgecolor']) plt.title('Breast Cancer Diagnosis Classification Mean Radius vs Worst Radius'); plt.xlabel('Mean Radius'); plt.ylabel('Worst Radius'); plt.legend(); # - # Before actually going into training a model, lets further split the training dataset into a training and a validation set in the ratio 80:20 to be able to impartially evaluate the model performance later using the validation set. train_X, valid_X, train_y, valid_y = train_test_split(df[['mean radius', 'worst radius']], df.diagnosis, test_size=0.2, random_state=123) # Construct a KNN model with 3 nearest neighbours. One of the great things about K-NN classifiers is that we do not need to encode the classes for the method to work. We can simply keep the diagnosis strings: model = KNN(n_neighbors=3) model.fit(X=train_X, y=train_y) # Evaluate the model on the validation set by computing the validation set accuracy model.score(X=valid_X, y=valid_y)
Chapter05/Exercise5.05/Exercise5.05.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import numpy.linalg as npl trainset_size = 100 num_feautures = 3 output_size = 2 x_arr = np.random.randn(num_feautures,trainset_size)*10 y_arr = np.random.randn(output_size,trainset_size) print(x_arr.shape, y_arr.shape) # + P = npl.inv(x_arr @ x_arr.T ) theta = (P @ x_arr @ y_arr.T).T print(theta, theta.shape) y_hat = theta @ x_arr print(y_hat.shape) # + x_arr_0 = x_arr[:,:trainset_size-1] y_arr_0 = y_arr[:,:trainset_size-1] print(x_arr_0.shape,y_arr_0.shape) P_0 = npl.inv(x_arr_0 @ x_arr_0.T) theta_0 = (P_0 @ x_arr_0 @ y_arr_0.T).T print(theta_0, theta.shape) theta_0 @ x_arr_0 # + # Choose next sample print(x_arr.shape) x_next = x_arr[:,trainset_size-1:trainset_size] y_next = y_arr[:,trainset_size-1:trainset_size] x_next.shape # + P = npl.inv(x_arr @ x_arr.T) theta_1 = theta_0 + (y_next - theta_0 @ x_next ) @ x_next.T @ P_0/ (1 + x_next.T @ P_0 @ x_next) print(theta_1) # - print(theta_1) print(theta) # ## mu P x print(x_arr.shape) mu = x_arr.mean(axis=1,keepdims=True) num_feautures - mu.T @ P @ mu np.diag(x_arr.T @ npl.inv(P) @ x_arr) mu.shape
notebooks/recursive_least_sqaures.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.9 64-bit (''site_similarity'': conda)' # language: python # name: python3 # --- # + import pandas as pd import sys import os sys.path.insert(0, '../../../') from notebooks.utils import _ALEXA_DATA_PATH, load_node_features, load_level_data, create_audience_overlap_nodes, export_model_as_feature from train import run_experiment # - # # Load audience overlap edges for level 3 # + level = 3 audience_overlap_sites = load_level_data(os.path.join(_ALEXA_DATA_PATH, 'corpus_2020_audience_overlap_sites_scrapping_result.json'), level=level) audience_overlap_sites_NODES = create_audience_overlap_nodes(audience_overlap_sites) print(audience_overlap_sites_NODES[:5]) # + edge_df = pd.DataFrame(audience_overlap_sites_NODES, columns=['source', 'target']) edge_df.head() # - # ### Find all unique nodes in edges nodes_in_edges = list(set(edge_df.source.unique().tolist() + edge_df.target.unique().tolist())) print('Number of unique nodes in edges:', len(nodes_in_edges), 'Sample:', nodes_in_edges[:5]) # ### 1. Load all node features node_features_df = load_node_features() node_features_df = node_features_df.set_index('site') node_features_df.head() # # Subset node_features node_features_df = node_features_df.loc[nodes_in_edges] node_features_df.info() # ### 2. Fill all missing alexa_rank and total_sites_linking_in with 0 node_features_df.alexa_rank = node_features_df.alexa_rank.fillna(0) node_features_df.total_sites_linking_in = node_features_df.total_sites_linking_in.fillna(0) node_features_df.info() # ### 3. Normalizing features # + import math node_features_df['normalized_alexa_rank'] = node_features_df['alexa_rank'].apply(lambda x: 1/x if x else 0) node_features_df['normalized_total_sites_linked_in'] = node_features_df['total_sites_linking_in'].apply(lambda x: math.log2(x) if x else 0) # - # # Create Graph # + import stellargraph as sg G = sg.StellarGraph(nodes=node_features_df.loc[nodes_in_edges, ['normalized_alexa_rank', 'normalized_total_sites_linked_in']], edges=edge_df) print(G.info()) # - # # Unsupervised Attrib2Vec # + from stellargraph.mapper import Attri2VecLinkGenerator, Attri2VecNodeGenerator from stellargraph.layer import Attri2Vec, link_classification from stellargraph.data import UnsupervisedSampler from tensorflow import keras # 1. Specify the other optional parameter values: root nodes, the number of walks to take per node, the length of each walk, and random seed. nodes = list(G.nodes()) number_of_walks = 1 length = 5 # 2. Create the UnsupervisedSampler instance with the relevant parameters passed to it. unsupervised_samples = UnsupervisedSampler(G, nodes=nodes, length=length, number_of_walks=number_of_walks) # 3. Create a node pair generator: batch_size = 50 epochs = 4 num_samples = [10, 5] generator = Attri2VecLinkGenerator(G, batch_size) train_gen = generator.flow(unsupervised_samples) layer_sizes = [128] attri2vec = Attri2Vec(layer_sizes=layer_sizes, generator=generator, bias=False, normalize=None) # Build the model and expose input and output sockets of attri2vec, for node pair inputs: x_inp, x_out = attri2vec.in_out_tensors() prediction = link_classification(output_dim=1, output_act="sigmoid", edge_embedding_method="ip")(x_out) model = keras.Model(inputs=x_inp, outputs=prediction) model.compile( optimizer=keras.optimizers.Adam(lr=1e-3), loss=keras.losses.binary_crossentropy, metrics=[keras.metrics.binary_accuracy], ) # - history = model.fit(train_gen, epochs=epochs, verbose=2, use_multiprocessing=False, workers=1, shuffle=True) # + x_inp_src = x_inp[0] x_out_src = x_out[0] embedding_model = keras.Model(inputs=x_inp_src, outputs=x_out_src) node_gen = Attri2VecNodeGenerator(G, batch_size).flow(node_features_df.index.tolist()) node_embeddings = embedding_model.predict(node_gen, workers=1, verbose=1) embeddings_wv = dict(zip(node_features_df.index.tolist(), node_embeddings.tolist())) print('Sample:', embeddings_wv['crooked.com'][:10], len(embeddings_wv['crooked.com'])) # - # # Export embeddings as feature export_model_as_feature(embeddings_wv, f'attrib2vec_audience_overlap_level_{level}_epochs_{epochs}') run_experiment(features=f'attrib2vec_audience_overlap_level_{level}_epochs_{epochs}') # + level = 3 epochs = 4 run_experiment(features=f'attrib2vec_audience_overlap_level_{level}_epochs_{epochs}', task='bias') # -
notebooks/graph_algos/attrib2vec/corpus 2020 audience_overlap lvl data 3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: pgayn # language: python # name: pgayn # --- # ## Configuration for Colab # + import sys IN_COLAB = "google.colab" in sys.modules if IN_COLAB: # !apt install python-opengl # !apt install ffmpeg # !apt install xvfb # !pip install pyvirtualdisplay from pyvirtualdisplay import Display # Start virtual display dis = Display(visible=0, size=(600, 400)) dis.start() # - # # 03. DDPG # # [<NAME> et al., "Continuous control with deep reinforcement learning." arXiv preprint arXiv:1509.02971, 2015.](https://arxiv.org/pdf/1509.02971.pdf) # # Deep Q Network(DQN)([Mnih et al., 2013;2015](https://storage.googleapis.com/deepmind-media/dqn/DQNNaturePaper.pdf)) algorithm is combined advances in deep learning with reinforcement learning. However, while DQN solves problems with high-dimentional observation spaces, it can only handle discrete and low-dimentional action spaces because of using greedy policy. For learning in high-dimentional and continous action spaces, the authors combine the actor-critic approach with insights from the recent success of DQN. Deep DPG(DDPG) is based on the deterministic policy gradient(DPG) algorithm ([Silver et al., 2014](http://proceedings.mlr.press/v32/silver14.pdf)). # # ### Deterministic policy gradient # The DPG algorithm maintains a parameterized actor function $\mu(s|\theta^{\mu})$ which specifies the current policy by deterministically mapping states to a specific action. The critic $Q(s, a)$ is learned using the Bellman equation as in Q-learning. The actor is updated by following the applying the chain rule to the expected return from the start distribution $J$ with respect to the actor parameters # # $$ # \begin{align*} # \nabla_{\theta^{\mu}}J &\approx E_{s_t\sim\rho^\beta} [\nabla_{\theta^{\mu}} Q(s,a|\theta^Q)|_{s=s_t, a=\mu(s_t|\theta^\mu)}] \\ # &= E_{s_t\sim\rho^\beta} [\nabla_{a} Q(s,a|\theta^Q)|_{s=s_t, a=\mu(s_t)} \nabla_{\theta^{\mu}} \mu(s|\theta^\mu)|_{s=s_t}] # \end{align*} # $$ # # ### Replay buffer # One challenge when using neural networks for reinforcement learning is that most optimization algorithms assume that **the samples are independently and identically distributed**. When the samples are generated from exploring sequentially in an environment this assumption no longer holds. The authors used a **replay buffer** to address these issues. Transitions were sampled from the environment according to the exploration policy and the tuple $(s_t, a_t, r_t, s_{t+1})$ was stored in the replay buffer. At each timestep the actor and critic are updated by sampling a minibatch uniformly from the buffer. It allows to benefit from learning across a set of **uncorrelated** transitions. # # ### Soft update target network # Since the network $(Q(s,a|\theta^Q)$ being updated is also used in calculating the target value, the Q update is prone to divergence. To avoid this, the authors use **the target network** like DQN, but modified for actor-critic and using **soft target updates**. Target netwokrs is created by copying the actor and critic networks, $Q'(s,a|\theta^{Q'})$ and $\mu'(s|\theta^{\mu`})$ respectively, that are used for calculating the target values. The weights of these target networks are then updated by having them slowly track the learned networks: # # $$ # \theta' \leftarrow \tau \theta + (1 - \tau)\theta' \ \ \ {with} \ \tau \ll 1. # $$ # # It greatly improves the stability of learning. # # ### Exploration for continuous action space # An advantage of offpolicies algorithms such as DDPG is that we can treat the problem of exploration independently from the learning algorithm. The authors construct an exploration policy $\mu'$ by adding noise sampled from a noise process $\mathcal{N}$ to the actor policy # # $$ # \mu'(s_t) = \mu(s_t|\theta^{\mu}_t) + \mathcal{N} # $$ # # $\mathcal{N}$ can be chosen to suit the environment. The authors used **Ornstein-Uhlenbeck process** to generate temporally correlated exploration. # ## Import modules # + import copy import random from typing import Dict, List, Tuple import gym import matplotlib.pyplot as plt import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from IPython.display import clear_output # - # ## Set random seed # + if torch.backends.cudnn.enabled: torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True seed = 777 torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) # - # ## Replay buffer # Typically, people implement replay buffers with one of the following three data structures: # # - collections.deque # - list # - numpy.ndarray # # **deque** is very easy to handle once you initialize its maximum length (e.g. deque(maxlen=buffer_size)). However, the indexing operation of deque gets terribly slow as it grows up because it is [internally doubly linked list](https://wiki.python.org/moin/TimeComplexity#collections.deque). On the other hands, **list** is an array, so it is relatively faster than deque when you sample batches at every step. Its amortized cost of Get item is [O(1)](https://wiki.python.org/moin/TimeComplexity#list). # # Last but not least, let's see **numpy.ndarray**. numpy.ndarray is even faster than list due to the fact that it is [a homogeneous array of fixed-size items](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html#numpy.ndarray), so you can get the benefits of [locality of reference](https://en.wikipedia.org/wiki/Locality_of_reference), . Whereas list is an array of pointers to objects, even when all of them are of the same type. # # Here, we are going to implement a replay buffer using numpy.ndarray. # # Reference: # - [OpenAI spinning-up](https://github.com/openai/spinningup/blob/master/spinup/algos/sac/sac.py#L10) # - [rainbow-is-all-you-need](https://render.githubusercontent.com/view/ipynb?commit=032d11277cf2436853478a69ca5a4aba03202598&enc_url=68747470733a2f2f7261772e67697468756275736572636f6e74656e742e636f6d2f437572742d5061726b2f7261696e626f772d69732d616c6c2d796f752d6e6565642f303332643131323737636632343336383533343738613639636135613461626130333230323539382f30312e64716e2e6970796e62&nwo=Curt-Park%2Frainbow-is-all-you-need&path=01.dqn.ipynb&repository_id=191133946&repository_type=Repository#Replay-buffer) class ReplayBuffer: """A simple numpy replay buffer.""" def __init__(self, obs_dim: int, size: int, batch_size: int = 32): """Initializate.""" self.obs_buf = np.zeros([size, obs_dim], dtype=np.float32) self.next_obs_buf = np.zeros([size, obs_dim], dtype=np.float32) self.acts_buf = np.zeros([size], dtype=np.float32) self.rews_buf = np.zeros([size], dtype=np.float32) self.done_buf = np.zeros([size], dtype=np.float32) self.max_size, self.batch_size = size, batch_size self.ptr, self.size, = 0, 0 def store( self, obs: np.ndarray, act: np.ndarray, rew: float, next_obs: np.ndarray, done: bool, ): """Store the transition in buffer.""" self.obs_buf[self.ptr] = obs self.next_obs_buf[self.ptr] = next_obs self.acts_buf[self.ptr] = act self.rews_buf[self.ptr] = rew self.done_buf[self.ptr] = done self.ptr = (self.ptr + 1) % self.max_size self.size = min(self.size + 1, self.max_size) def sample_batch(self) -> Dict[str, np.ndarray]: """Randomly sample a batch of experiences from memory.""" idxs = np.random.choice(self.size, size=self.batch_size, replace=False) return dict(obs=self.obs_buf[idxs], next_obs=self.next_obs_buf[idxs], acts=self.acts_buf[idxs], rews=self.rews_buf[idxs], done=self.done_buf[idxs]) def __len__(self) -> int: return self.size # ## OU Noise # **Ornstein-Uhlenbeck** process generates temporally correlated exploration, and it effectively copes with physical control problems of inertia. # # $$ # dx_t = \theta(\mu - x_t) dt + \sigma dW_t # $$ # # Reference: # - [Udacity github](https://github.com/udacity/deep-reinforcement-learning/blob/master/ddpg-pendulum/ddpg_agent.py) # - [Wiki](https://en.wikipedia.org/wiki/Ornstein%E2%80%93Uhlenbeck_process) class OUNoise: """Ornstein-Uhlenbeck process. Taken from Udacity deep-reinforcement-learning github repository: https://github.com/udacity/deep-reinforcement-learning/blob/master/ ddpg-pendulum/ddpg_agent.py """ def __init__( self, size: int, mu: float = 0.0, theta: float = 0.15, sigma: float = 0.2, ): """Initialize parameters and noise process.""" self.state = np.float64(0.0) self.mu = mu * np.ones(size) self.theta = theta self.sigma = sigma self.reset() def reset(self): """Reset the internal state (= noise) to mean (mu).""" self.state = copy.copy(self.mu) def sample(self) -> np.ndarray: """Update internal state and return it as a noise sample.""" x = self.state dx = self.theta * (self.mu - x) + self.sigma * np.array( [random.random() for _ in range(len(x))] ) self.state = x + dx return self.state # ## Network # We are going to use two separated networks for actor and critic. The actor network has three fully connected layers and three non-linearity functions, **ReLU** for hidden layers and **tanh** for the output layer. On the other hand, the critic network has three fully connected layers, but it used two activation functions for hidden layers **ReLU**. Plus, its input sizes of critic network are sum of state sizes and action sizes. One thing to note is that we initialize the final layer's weights and biases so that they are **uniformly distributed.** # + class Actor(nn.Module): def __init__( self, in_dim: int, out_dim: int, init_w: float = 3e-3, ): """Initialize.""" super(Actor, self).__init__() self.hidden1 = nn.Linear(in_dim, 128) self.hidden2 = nn.Linear(128, 128) self.out = nn.Linear(128, out_dim) self.out.weight.data.uniform_(-init_w, init_w) self.out.bias.data.uniform_(-init_w, init_w) def forward(self, state: torch.Tensor) -> torch.Tensor: """Forward method implementation.""" x = F.relu(self.hidden1(state)) x = F.relu(self.hidden2(x)) action = self.out(x).tanh() return action class Critic(nn.Module): def __init__( self, in_dim: int, init_w: float = 3e-3, ): """Initialize.""" super(Critic, self).__init__() self.hidden1 = nn.Linear(in_dim, 128) self.hidden2 = nn.Linear(128, 128) self.out = nn.Linear(128, 1) self.out.weight.data.uniform_(-init_w, init_w) self.out.bias.data.uniform_(-init_w, init_w) def forward( self, state: torch.Tensor, action: torch.Tensor ) -> torch.Tensor: """Forward method implementation.""" x = torch.cat((state, action), dim=-1) x = F.relu(self.hidden1(x)) x = F.relu(self.hidden2(x)) value = self.out(x) return value # - # ## DDPG Agent # Here is a summary of DDPGAgent class. # # | Method | Note | # |--- |--- | # |select_action | select an action from the input state. | # |step | take an action and return the response of the env. | # |update_model | update the model by gradient descent. | # |train | train the agent during num_frames. | # |test | test the agent (1 episode). | # |\_target_soft_update| soft update from the local model to the target model.| # |\_plot | plot the training progresses. | class DDPGAgent: """DDPGAgent interacting with environment. Attribute: env (gym.Env): openAI Gym environment actor (nn.Module): target actor model to select actions actor_target (nn.Module): actor model to predict next actions actor_optimizer (Optimizer): optimizer for training actor critic (nn.Module): critic model to predict state values critic_target (nn.Module): target critic model to predict state values critic_optimizer (Optimizer): optimizer for training critic memory (ReplayBuffer): replay memory to store transitions batch_size (int): batch size for sampling gamma (float): discount factor tau (float): parameter for soft target update initial_random_steps (int): initial random action steps noise (OUNoise): noise generator for exploration device (torch.device): cpu / gpu transition (list): temporory storage for the recent transition total_step (int): total step numbers is_test (bool): flag to show the current mode (train / test) """ def __init__( self, env: gym.Env, memory_size: int, batch_size: int, ou_noise_theta: float, ou_noise_sigma: float, gamma: float = 0.99, tau: float = 5e-3, initial_random_steps: int = 1e4, ): """Initialize.""" obs_dim = env.observation_space.shape[0] action_dim = env.action_space.shape[0] self.env = env self.memory = ReplayBuffer(obs_dim, memory_size, batch_size) self.batch_size = batch_size self.gamma = gamma self.tau = tau self.initial_random_steps = initial_random_steps # noise self.noise = OUNoise( action_dim, theta=ou_noise_theta, sigma=ou_noise_sigma, ) # device: cpu / gpu self.device = torch.device( "cuda" if torch.cuda.is_available() else "cpu" ) print(self.device) # networks self.actor = Actor(obs_dim, action_dim).to(self.device) self.actor_target = Actor(obs_dim, action_dim).to(self.device) self.actor_target.load_state_dict(self.actor.state_dict()) self.critic = Critic(obs_dim + action_dim).to(self.device) self.critic_target = Critic(obs_dim + action_dim).to(self.device) self.critic_target.load_state_dict(self.critic.state_dict()) # optimizer self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=3e-4) self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=1e-3) # transition to store in memory self.transition = list() # total steps count self.total_step = 0 # mode: train / test self.is_test = False def select_action(self, state: np.ndarray) -> np.ndarray: """Select an action from the input state.""" # if initial random action should be conducted if self.total_step < self.initial_random_steps and not self.is_test: selected_action = self.env.action_space.sample() else: selected_action = self.actor( torch.FloatTensor(state).to(self.device) ).detach().cpu().numpy() # add noise for exploration during training if not self.is_test: noise = self.noise.sample() selected_action = np.clip(selected_action + noise, -1.0, 1.0) self.transition = [state, selected_action] return selected_action def step(self, action: np.ndarray) -> Tuple[np.ndarray, np.float64, bool]: """Take an action and return the response of the env.""" next_state, reward, done, _ = self.env.step(action) if not self.is_test: self.transition += [reward, next_state, done] self.memory.store(*self.transition) return next_state, reward, done def update_model(self) -> torch.Tensor: """Update the model by gradient descent.""" device = self.device # for shortening the following lines samples = self.memory.sample_batch() state = torch.FloatTensor(samples["obs"]).to(device) next_state = torch.FloatTensor(samples["next_obs"]).to(device) action = torch.FloatTensor(samples["acts"].reshape(-1, 1)).to(device) reward = torch.FloatTensor(samples["rews"].reshape(-1, 1)).to(device) done = torch.FloatTensor(samples["done"].reshape(-1, 1)).to(device) masks = 1 - done next_action = self.actor_target(next_state) next_value = self.critic_target(next_state, next_action) curr_return = reward + self.gamma * next_value * masks # train critic values = self.critic(state, action) critic_loss = F.mse_loss(values, curr_return) self.critic_optimizer.zero_grad() critic_loss.backward() self.critic_optimizer.step() # train actor actor_loss = -self.critic(state, self.actor(state)).mean() self.actor_optimizer.zero_grad() actor_loss.backward() self.actor_optimizer.step() # target update self._target_soft_update() return actor_loss.data, critic_loss.data def train(self, num_frames: int, plotting_interval: int = 200): """Train the agent.""" self.is_test = False state = self.env.reset() actor_losses = [] critic_losses = [] scores = [] score = 0 for self.total_step in range(1, num_frames + 1): action = self.select_action(state) next_state, reward, done = self.step(action) state = next_state score += reward # if episode ends if done: state = env.reset() scores.append(score) score = 0 # if training is ready if ( len(self.memory) >= self.batch_size and self.total_step > self.initial_random_steps ): actor_loss, critic_loss = self.update_model() actor_losses.append(actor_loss) critic_losses.append(critic_loss) # plotting if self.total_step % plotting_interval == 0: self._plot( self.total_step, scores, actor_losses, critic_losses, ) self.env.close() def test(self): """Test the agent.""" self.is_test = True state = self.env.reset() done = False score = 0 frames = [] while not done: frames.append(self.env.render(mode="rgb_array")) action = self.select_action(state) next_state, reward, done = self.step(action) state = next_state score += reward print("score: ", score) self.env.close() return frames def _target_soft_update(self): """Soft-update: target = tau*local + (1-tau)*target.""" tau = self.tau for t_param, l_param in zip( self.actor_target.parameters(), self.actor.parameters() ): t_param.data.copy_(tau * l_param.data + (1.0 - tau) * t_param.data) for t_param, l_param in zip( self.critic_target.parameters(), self.critic.parameters() ): t_param.data.copy_(tau * l_param.data + (1.0 - tau) * t_param.data) def _plot( self, frame_idx: int, scores: List[float], actor_losses: List[float], critic_losses: List[float], ): """Plot the training progresses.""" def subplot(loc: int, title: str, values: List[float]): plt.subplot(loc) plt.title(title) plt.plot(values) subplot_params = [ (131, f"frame {frame_idx}. score: {np.mean(scores[-10:])}", scores), (132, "actor_loss", actor_losses), (133, "critic_loss", critic_losses), ] clear_output(True) plt.figure(figsize=(30, 5)) for loc, title, values in subplot_params: subplot(loc, title, values) plt.show() # ## Environment # *ActionNormalizer* is an action wrapper class to normalize the action values ranged in (-1. 1). Thanks to this class, we can make the agent simply select action values within the zero centered range (-1, 1). class ActionNormalizer(gym.ActionWrapper): """Rescale and relocate the actions.""" def action(self, action: np.ndarray) -> np.ndarray: """Change the range (-1, 1) to (low, high).""" low = self.action_space.low high = self.action_space.high scale_factor = (high - low) / 2 reloc_factor = high - scale_factor action = action * scale_factor + reloc_factor action = np.clip(action, low, high) return action def reverse_action(self, action: np.ndarray) -> np.ndarray: """Change the range (low, high) to (-1, 1).""" low = self.action_space.low high = self.action_space.high scale_factor = (high - low) / 2 reloc_factor = high - scale_factor action = (action - reloc_factor) / scale_factor action = np.clip(action, -1.0, 1.0) return action # You can see [the code](https://github.com/openai/gym/blob/master/gym/envs/classic_control/pendulum.py) and [configurations](https://github.com/openai/gym/blob/cedecb35e3428985fd4efad738befeb75b9077f1/gym/envs/__init__.py#L81) of Pendulum-v0 from OpenAI's repository. # environment env_id = "Pendulum-v0" env = gym.make(env_id) env = ActionNormalizer(env) # ## Set random seed # + def seed_torch(seed): torch.manual_seed(seed) if torch.backends.cudnn.enabled: torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True seed = 777 random.seed(seed) np.random.seed(seed) seed_torch(seed) env.seed(seed) # - # ## Initialize # + # parameters num_frames = 50000 memory_size = 100000 batch_size = 128 ou_noise_theta = 1.0 ou_noise_sigma = 0.1 initial_random_steps = 10000 agent = DDPGAgent( env, memory_size, batch_size, ou_noise_theta, ou_noise_sigma, initial_random_steps=initial_random_steps ) # - # ## Train agent.train(num_frames) # ## Test # Run the trained agent (1 episode). # test if IN_COLAB: agent.env = gym.wrappers.Monitor(agent.env, "videos", force=True) frames = agent.test() # ## Render # + if IN_COLAB: # for colab import base64 import glob import io import os from IPython.display import HTML, display def ipython_show_video(path: str) -> None: """Show a video at `path` within IPython Notebook.""" if not os.path.isfile(path): raise NameError("Cannot access: {}".format(path)) video = io.open(path, "r+b").read() encoded = base64.b64encode(video) display(HTML( data=""" <video alt="test" controls> <source src="data:video/mp4;base64,{0}" type="video/mp4"/> </video> """.format(encoded.decode("ascii")) )) list_of_files = glob.glob("videos/*.mp4") latest_file = max(list_of_files, key=os.path.getctime) print(latest_file) ipython_show_video(latest_file) else: # for jupyter from matplotlib import animation from JSAnimation.IPython_display import display_animation from IPython.display import display def display_frames_as_gif(frames): """Displays a list of frames as a gif, with controls.""" patch = plt.imshow(frames[0]) plt.axis('off') def animate(i): patch.set_data(frames[i]) anim = animation.FuncAnimation( plt.gcf(), animate, frames = len(frames), interval=50 ) display(display_animation(anim, default_mode='loop')) # display display_frames_as_gif(frames) # -
03.DDPG.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # # Creating MNE's data structures from scratch # =========================================== # # MNE provides mechanisms for creating various core objects directly from # NumPy arrays. # import mne import numpy as np # ------------------------------------------------------ # Creating :class:`~mne.Info` objects # ------------------------------------------------------ # # <div class="alert alert-info"><h4>Note</h4><p>for full documentation on the :class:`~mne.Info` object, see # `tut-info-class`. See also `ex-array-classes`.</p></div> # # Normally, :class:`mne.Info` objects are created by the various # `data import functions <ch_convert>`. # However, if you wish to create one from scratch, you can use the # :func:`mne.create_info` function to initialize the minimally required # fields. Further fields can be assigned later as one would with a regular # dictionary. # # The following creates the absolute minimum info structure: # # # Create some dummy metadata n_channels = 32 sampling_rate = 200 info = mne.create_info(n_channels, sampling_rate) print(info) # You can also supply more extensive metadata: # # # + # Names for each channel channel_names = ['MEG1', 'MEG2', 'Cz', 'Pz', 'EOG'] # The type (mag, grad, eeg, eog, misc, ...) of each channel channel_types = ['grad', 'grad', 'eeg', 'eeg', 'eog'] # The sampling rate of the recording sfreq = 1000 # in Hertz # The EEG channels use the standard naming strategy. # By supplying the 'montage' parameter, approximate locations # will be added for them montage = 'standard_1005' # Initialize required fields info = mne.create_info(channel_names, sfreq, channel_types, montage) # Add some more information info['description'] = 'My custom dataset' info['bads'] = ['Pz'] # Names of bad channels print(info) # - # <div class="alert alert-info"><h4>Note</h4><p>When assigning new values to the fields of an # :class:`mne.Info` object, it is important that the # fields are consistent: # # - The length of the channel information field `chs` must be # `nchan`. # - The length of the `ch_names` field must be `nchan`. # - The `ch_names` field should be consistent with the `name` field # of the channel information contained in `chs`.</p></div> # # ------------------------------------- # Creating :class:`~mne.io.Raw` objects # ------------------------------------- # # To create a :class:`mne.io.Raw` object from scratch, you can use the # :class:`mne.io.RawArray` class, which implements raw data that is backed by a # numpy array. The correct units for the data are: # # - V: eeg, eog, seeg, emg, ecg, bio, ecog # - T: mag # - T/m: grad # - M: hbo, hbr # - Am: dipole # - AU: misc # # The :class:`mne.io.RawArray` constructor simply takes the data matrix and # :class:`mne.Info` object: # # # + # Generate some random data data = np.random.randn(5, 1000) # Initialize an info structure info = mne.create_info( ch_names=['MEG1', 'MEG2', 'EEG1', 'EEG2', 'EOG'], ch_types=['grad', 'grad', 'eeg', 'eeg', 'eog'], sfreq=100 ) custom_raw = mne.io.RawArray(data, info) print(custom_raw) # - # ------------------------------------- # Creating :class:`~mne.Epochs` objects # ------------------------------------- # # To create an :class:`mne.Epochs` object from scratch, you can use the # :class:`mne.EpochsArray` class, which uses a numpy array directly without # wrapping a raw object. The array must be of `shape(n_epochs, n_chans, # n_times)`. The proper units of measure are listed above. # # # + # Generate some random data: 10 epochs, 5 channels, 2 seconds per epoch sfreq = 100 data = np.random.randn(10, 5, sfreq * 2) # Initialize an info structure info = mne.create_info( ch_names=['MEG1', 'MEG2', 'EEG1', 'EEG2', 'EOG'], ch_types=['grad', 'grad', 'eeg', 'eeg', 'eog'], sfreq=sfreq ) # - # It is necessary to supply an "events" array in order to create an Epochs # object. This is of `shape(n_events, 3)` where the first column is the sample # number (time) of the event, the second column indicates the value from which # the transition is made from (only used when the new value is bigger than the # old one), and the third column is the new event value. # # # Create an event matrix: 10 events with alternating event codes events = np.array([ [0, 0, 1], [1, 0, 2], [2, 0, 1], [3, 0, 2], [4, 0, 1], [5, 0, 2], [6, 0, 1], [7, 0, 2], [8, 0, 1], [9, 0, 2], ]) # More information about the event codes: subject was either smiling or # frowning # # event_id = dict(smiling=1, frowning=2) # Finally, we must specify the beginning of an epoch (the end will be inferred # from the sampling frequency and n_samples) # # # Trials were cut from -0.1 to 1.0 seconds tmin = -0.1 # Now we can create the :class:`mne.EpochsArray` object # # # + custom_epochs = mne.EpochsArray(data, info, events, tmin, event_id) print(custom_epochs) # We can treat the epochs object as we would any other _ = custom_epochs['smiling'].average().plot(time_unit='s') # - # ------------------------------------- # Creating :class:`~mne.Evoked` Objects # ------------------------------------- # If you already have data that is collapsed across trials, you may also # directly create an evoked array. Its constructor accepts an array of # `shape(n_chans, n_times)` in addition to some bookkeeping parameters. # The proper units of measure for the data are listed above. # # # + # The averaged data data_evoked = data.mean(0) # The number of epochs that were averaged nave = data.shape[0] # A comment to describe to evoked (usually the condition name) comment = "<NAME>" # Create the Evoked object evoked_array = mne.EvokedArray(data_evoked, info, tmin, comment=comment, nave=nave) print(evoked_array) _ = evoked_array.plot(time_unit='s')
stable/_downloads/f51d54a1c1f3584f45318492102672d3/plot_creating_data_structures.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Gensim - Doc2vec untuk Similarity Content # Similarity Content menggunakan vector merupakan cara sederhana untuk mendapatkan kesamaan dari sebuah artikel. # Dalam kasus ini saya akan menggunakan hasil scraping data google news indonesia berjumlah 77 documents saja. # # Adapun module yang digunakan adalah menggunkan gensim # ## Requirement # - Gensim 2.0 # ## Kode Sederhana # + # import logging # logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) # - from gensim.models.doc2vec import Doc2Vec, TaggedDocument import gensim from pprint import pprint import multiprocessing import os import re def get_stoplist(): stop = [] for line in open('stopwords.txt','rU'): stop += line.replace(',','').split() return stop stopwords = get_stoplist() print(len(stopwords)) print(stopwords[:20]) # + dirname = 'google_news' documents_file = os.listdir(dirname) documents = [] for fname in documents_file: f = open(os.path.join(dirname,fname),'rU') content = f.read().decode('utf-8').lower() words_split = re.findall(r"[\w']+|[.,!?;]",content) # memotong kalimat berdasarkan punctuation words_split = [word for word in words_split if word not in stopwords] phrases = gensim.models.phrases.Phrases(words_split,min_count=20, threshold=100) bigram = gensim.models.phrases.Phraser(phrases) trigram = gensim.models.phrases.Phrases(bigram[words_split],min_count=20, threshold=100) for idx in range(len(words_split)): for token in bigram[words_split[idx]]: if '_' in token: # Token is a bigram, add to document. # print(words_split[idx]) # words_split[idx].append(token) words_split[idx] += token for idx in range(len(words_split)): for token in trigram[words_split[idx]]: if '_' in token: # Token is a trigram, add to document. # words_split[idx].append(token) words_split[idx] += token # print(words_split) filtered_tokens = [] for token in words_split: if re.search('[a-zA-Z]', token): # filter hanya huruf saja filtered_tokens.append(token) title = fname.replace('.txt','') documents.append(TaggedDocument(filtered_tokens,[title])) # - pprint(documents[:1][0].tags) # pprint(documents[4].words) print(documents[:2][0].words[:100]) cores = multiprocessing.cpu_count() model = Doc2Vec(dm=0, dbow_words=1, size=100, window=8, min_count=20, iter=100, workers=cores, sample=1e-4, negative=2) model.scan_vocab(documents,update=False) print(str(model)) model.build_vocab(documents,update=False) print(str(model)) # %time model.train(documents, total_examples=model.corpus_count, epochs=model.iter) print(str(model)) pprint(model.docvecs.most_similar(positive=["5 Kesamaan Xiaomi Mi 6 dan iPhone 7 Plus"], topn=10)) text_search = '''busa sabun jalan sudirman''' inferred_vector = model.infer_vector(text_search.lower().split()) model.docvecs.most_similar([inferred_vector], topn=10) model.docvecs['5 Kesamaan Xiaomi Mi 6 dan iPhone 7 Plus']
3_Gensim_Doc2vec_Similarity_Content.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.12 64-bit (conda) # name: python3 # --- # + import numpy as np import matplotlib.pyplot as plt from scipy import interpolate from utils.bigbord import bigbord # %load_ext autoreload # %autoreload 2 # - # ## Generate Fractal distribution of magnetization # # This cell defines a function to generate a fractal distribution in 3D space, # according to a given fractal scaling parameter # See for example Pilkington et al, 1994, Geophysical Prospecting # def fractal3(griddim,nlay,beta=-4,dx=1): nrow = griddim ncol = griddim # make random numbers with a lognormal distribution mu = 1 sigma = 10**0.25 data = 200 * np.exp(np.random.randn(nrow,ncol,nlay) * sigma + mu) data_trn = np.fft.fftshift(np.fft.fftn(data)) kmax = (2*np.pi)/(dx*2) kxinc = kmax/(ncol/2) kyinc = kmax/(nrow/2) kzinc = kmax/(nlay/2) #% make grid of correctly scaled frequencies kx,ky,kz = np.meshgrid(np.arange(-kmax,kmax,kxinc), np.arange(-kmax,kmax,kyinc), np.arange(-kmax,kmax,kzinc)) op = (kx**2+ky**2+kz**2)**(beta/4) #print(nrow/2,ncol/2,np.ceil(nlay/2.)) op[int(nrow/2),int(ncol/2),int(np.ceil(nlay/2.))] = 0. data_trn_frctl = data_trn*op data_frctl = np.fft.ifftn(np.fft.ifftshift(data_trn_frctl)) data_frctl = np.real(data_frctl) return data_frctl # ## Magnetic Forward modelling functions # # These functions are mostly python translations of subroutines from Blakely's textbook # # There are also functions to compute radially-averaged power spectrum for a 2D magnetic anomaly map # + def mlayer(mag,dx,z1,z2,mi,md,fi,fd): """ % [mag] = mlayer(mag,dx,z1,z2,mi,md,fi,fd) % matlab translation of Blakely subroutine % % subroutine mlayer(mag,nx,ny,dx,dy,z1,z2,mi,md,fi,fd,store) % c % c Subroutine MLAYER calculates the total-field anomaly on a two- % c dimensional grid due to a horizontal layer with two- % c dimensional magnetization. The following steps are involved: % c (1) Fourier transform the magnetization, (2) multiply by the % c earth filter, and (3) inverse Fourier transform the product. % c Magnetization is specified on a rectangular grid with x and y % c axes directed north and east, respectively. Z axis is down. % c Distance units irrelevant but must be consistent. Requires % c subroutines FOURN, DIRCOS, KVALUE, and MFILT. % c % c Input parameters: % c nx - number of elements in the sout_to_north direction. % c ny - number of elements in the west-to-east direction. % c (NOTE: both nx and ny must be a power of two.) % c mag - a singly dimensioned real array containing the % c two-dimensional magnetization (in A/m). Elements should % c be in order of west to east, then south to north (i.e., % c element 1 is southwest corner, element ny is % c southeast corner, element (nx-1)*ny+1 is northwest % c corner, and element ny*nx is northeast corner. % c store - a singly dimensioned real array used internally. % c It should be dimensioned at least 2*nx*ny. % c dx - sample interval in the x direction. % c dy - sample interval in the y direction. % c z1 - depth to top of layer. Must be > 0. % c z2 - depth to bottom of layer. Must be > z1. % c mi - inclination of magnetization, in degrees positive below % c horizontal. % c md - declination of magnetization, in degrees east of north. % c fi - inclination of regional field. % c fd - declination of regional field. % c % c Output parameters: % c mag - upon output, mag contains the total-field anomaly % c (in nT) with same orientation as above. % c % complex cmag,mfilt,cmplx % real mag,mi,md,mx,my,mz,kx,ky % dimension mag(nx*ny),store(2*nx*ny),nn(2) % data pi/3.14159265/,t2nt/1.e9/ """ #% NB for matlab to agree with blakely definitions, need to transpose inputs #% then transpose again at the end mag = mag.T #print(np.arange(mag.shape[1])) newx, newy, mag = bigbord(np.arange(mag.shape[1]), np.arange(mag.shape[0]), mag) t2nt = 1e9 #% tesla to nanotesla?? #%% IMPORTANT - the convention used by Blakely is for the x direction to be #%% North-South. This code retains that convention nx,ny = mag.shape dy = dx #nn[0]=ny #nn[1]=nx #ndim=2 [mx,my,mz] = dircos(mi,md,0) [fx,fy,fz] = dircos(fi,fd,0) #dkx=2.*pi/(nx*dx); #% not used in matlab version #dky=2.*pi/(ny*dy); #% not used in matlab version store = np.fft.fft2(mag) kx,ky = kvalue(mag,dx,dy) kx,ky = np.meshgrid(kx,ky) mf = mfilt(kx,ky,mx,my,mz,fx,fy,fz,z1,z2) #print(mf) store = store*mf store = np.fft.ifft2(store) mag = np.real(store) * t2nt/(nx*ny) mag = mag[newx[0]:newx[1],newy[0]:newy[1]] mag = mag.T return mag def dircos(incl,decl,azim): """ % subroutine dircos(incl,decl,azim,a,b,c) % c % c Subroutine DIRCOS computes direction cosines from inclination % c and declination. % c % c Input parameters: % c incl: inclination in degrees positive below horizontal. % c decl: declination in degrees positive east of true north. % c azim: azimuth of x axis in degrees positive east of north. % c % c Output parameters: % c a,b,c: the three direction cosines. % c % real incl % data d2rad/.017453293/ """ xincl=np.radians(incl) xdecl=np.radians(decl) xazim=np.radians(azim) a=np.cos(xincl)*np.cos(xdecl-xazim) b=np.cos(xincl)*np.sin(xdecl-xazim) c=np.sin(xincl) return a,b,c def mfilt (kx,ky,mx,my,mz,fx,fy,fz,z1,z2): """ % function mfilt(kx,ky,mx,my,mz,fx,fy,fz,z1,z2) % c % c Function MFILT calculates the complex value of the earth % c filter at a single (kx,ky) location. % c % c Input parameters: % c kx - the wavenumber coordinate in the kx direction. % c ky - the wavenumber coordinate in the ky direction. % c mx - the x direction cosine of the magnetization vector. % c my - the y direction cosine of the magnetization vector. % c mz - the z direction cosine of the magnetization vector. % c fx - the x direction cosine of the regional field vector. % c fy - the y direction cosine of the regional field vector. % c fz - the z direction cosine of the regional field vector. % c z1 - the depth to the top of the layer. % c z2 - the depth to the bottom of the layer. % c % c Output parameters: % c mfilt - the complex value of the earth filter. % c % complex mfilt,thetam,thetaf,cmplx % real kx,ky,k,mx,my,mz % data pi/3.14159265/,cm/1.e-7/ """ cm = 1e-7 k = np.sqrt(kx**2+ky**2) thetam = mz + 1j * (kx*mx+ky*my)/k thetaf = fz + 1j * (kx*fx+ky*fy)/k mf = 2*np.pi*cm*thetam*thetaf*(np.exp(-k*z1)-np.exp(-k*z2)) mf[k==0] = 0; return mf def kvalue(data,dx,dy=None): """ % c Subroutine KVALUE finds the wavenumber coordinates of one % c element of a rectangular grid from subroutine FOURN. % c % c Input parameters: % c i - index in the ky direction. % c j - index in the kx direction. % c nx - dimension of grid in ky direction (a power of two). % c ny - dimension of grid in kx direction (a power of two). % c dkx - sample interval in the kx direction. % c dky - sample interval in the ky direction. % c % c Output parameters: % c kx - the wavenumber coordinate in the kx direction. % c ky - the wavenumber coordinate in the ky direction. % c """ if not dy: dy = dx ny,nx = data.shape i=np.arange(ny) j=np.arange(nx) dkx = (2*np.pi)/(nx*dx) dky = (2*np.pi)/(ny*dy) nyqx=nx/2+1 nyqy=ny/2+1 kx = np.zeros_like(j, dtype=np.double) ky = np.zeros_like(i, dtype=np.double) ind = j<nyqx kx[ind] = j[ind] * dkx ind = j>=nyqx kx[ind] = (j[ind]-nx) * dkx ind = i<nyqy ky[ind] = i[ind] * dky ind = i>=nyqy ky[ind] = (i[ind]-ny) * dky return kx,ky def spec2(mag): T = np.fft.fft2(mag) S = T * np.conj(T) return np.fft.fftshift(S) def rpsinterp(S,dx): """ % [Sra,dr,Rs] = rps (S,dx) % Sra is the Radially Averaged Power Spectrum of the spectrum S, evaluated at points % along lines extending radially outwards from the centre of the 2D spectrum at, at 1 % degree increments. Linear interpolation is used to map spectrum onto these points. The % radial wavenumber increment is given by dr (in radians/distance unit), based on the % input data spacing dg """ ny,nx = S.shape nr = int(np.floor(np.min((nx,ny))/2)) na = int(360) #%% number of angles - hardwired in to code, could be altered?? R = np.zeros((na,nr)) dr = 2*np.pi/(dx*(nx-1)) Rad = np.tile(np.arange(0,nr),(na,1)) # points onto which we will sample radial spectrum, # defined by angle and distance from # centre of spectrum da = 2*np.pi/(na); A = np.tile(da*np.arange(0,na), (nr,1)).T nx2 = np.floor(nx/2) ny2 = np.floor(ny/2) if is_odd(nx): Wx=np.arange(-nx2,nx2+1) else: Wx=np.arange(-nx2,nx2) if is_odd(ny): Wy=np.arange(-ny2,ny2+1) else: Wy=np.arange(-ny2,ny2) Vxi = Rad*np.cos(A) Vyi = Rad*np.sin(A) #% NB in next line, switched Vxi and Vyi so that Angular spectrum starts at #% 0 degrees f = interpolate.RectBivariateSpline(Wx,Wy,S) Rs = f.ev(Vyi,Vxi) #Rs = interp2(Wx,Wy,S,Vyi,Vxi); #%% Rs is the radial spectrum #%%% Rla is the Radially Averaged Spectrum - note take the log first then avarage, #%%% as proposed by Maus and Dimri Sra = np.sum(np.log(np.abs(Rs)), axis=0) / Rs.shape[0] return Sra,dr,Rs def is_odd(num): return num & 0x1 def cosine_filter(mag, dx, cuts, highpass=True): dy=dx store = np.fft.fft2(mag) kx,ky = kvalue(mag,dx,dy) kx,ky = np.meshgrid(kx,ky) k = np.sqrt(kx**2+ky**2) filter_response = design_cosine_filter(k, cuts) if highpass: filter_response = (filter_response-1) * -1 store = np.fft.ifft2(store * filter_response) return np.real(store) # - # ## Example Application # # Based on the example in Pilkington and Cowan (2006), Geophysics (though actually a bit simpler, we don't use a true 3D distribution of magnetization but instead two layers of limited depth-extent where the magnetiation varies in x and y but not z) # # We define the same two layers, where # - layer one is from 1000-3000 m depth # - layer two is from 6000-22000 m depth # # Magnetizing field is vertical for simplicity # + griddim = 1000 # number of grid nodes in x and y direction (Assume a square area) nlay = 2 beta = -3 # fractal scaling parameter dx = 200 # spacing between grid points, in metres layer1_top = 1000. layer1_bottom = 3000. layer2_top = 6000. layer2_bottom = 22000. # This number controls the factor by which we assume the magnetization of the deep layer is greater than that of the shallow layer # (making this number larger will generally make the separation more tractable) deep_layer_multipication_factor = 2. # Generate a 3D array as a function of (x,y,depth) data_frctl = fractal3(griddim,nlay,beta); res1 = mlayer(data_frctl[:,:,0], dx, layer1_top, layer1_bottom, 90, 0, 90, 0) res2 = mlayer(data_frctl[:,:,1], dx, layer2_top, layer2_bottom, 90, 0, 90, 0) * deep_layer_multipication_factor fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(15,5)) p1 = ax[0].pcolormesh(res1, cmap='seismic') ax[0].set_title('TMI for shallow layer') p2 = ax[1].pcolormesh(res2, cmap='seismic') ax[1].set_title('TMI for deep layer') p2 = ax[2].pcolormesh(res1+res2, cmap='seismic') ax[2].set_title('TMI for combined layers') plt.show() # - # ## Comparison of layer responses in wavenumber domain # + s = spec2(res1 + res2) Sra,dr,Rs = rpsinterp(s,dx) Sra1,dr,Rs1 = rpsinterp(spec2(res1),dx) Sra2,dr,Rs1 = rpsinterp(spec2(res2),dx) wavenumber_scale = np.arange(len(Sra))*dr ind = wavenumber_scale<0.003 #plt.pcolormesh(np.log(np.abs(s))) #plt.show() plt.semilogy(wavenumber_scale[ind], np.exp(Sra)[ind], label='total') plt.semilogy(wavenumber_scale[ind], np.exp(Sra1)[ind], label='shallow layer') plt.semilogy(wavenumber_scale[ind], np.exp(Sra2)[ind], label='deep layer') #plt.xlim(0,0.003) #plt.ylim(1e2, 1e12) plt.legend() plt.show() # + def design_cosine_filter(wavenumber_scale, cuts): cut_one = np.min(cuts) cut_zero = np.max(cuts) filter_response = np.zeros_like(wavenumber_scale) filter_response[wavenumber_scale>cut_zero] = np.pi ind = (wavenumber_scale>cut_one) & (wavenumber_scale<cut_zero) filter_response[ind] = np.linspace(0,np.pi,np.count_nonzero(ind)+2)[1:-1] filter_response = (np.cos(filter_response)+1.)/2. return filter_response filter_response = design_cosine_filter(wavenumber_scale, (0.0003, 0.0008)) plt.plot(wavenumber_scale, filter_response) plt.xlim(0,0.001) # + f = cosine_filter(res1 + res2, dx, (0.0002, 0.0008), highpass=True) plt.pcolormesh(f) plt.show() plt.pcolormesh(res1 + res2) plt.show() # -
DepthSlicing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (higgs_inference) # language: python # name: higgs_inference # --- # # MadMiner particle physics tutorial # # # Part 2a: Analyzing events at parton level # # <NAME>, <NAME>, <NAME>, and <NAME> 2018-2019 # In this second part of the tutorial, we'll generate events and extract the observables and weights from them. You have two options: In this notebook we'll do this at parton level, in the alternative part 2b we use Delphes. # ## 0. Preparations # Before you execute this notebook, make sure you have a running installation of MadGraph. # + from __future__ import absolute_import, division, print_function, unicode_literals import logging import numpy as np import matplotlib from matplotlib import pyplot as plt # %matplotlib inline # + # MadMiner output logging.basicConfig( format='%(asctime)-5.5s %(name)-20.20s %(levelname)-7.7s %(message)s', datefmt='%H:%M', level=logging.DEBUG ) # Output of all other modules (e.g. matplotlib) for key in logging.Logger.manager.loggerDict: if "madminer" not in key: logging.getLogger(key).setLevel(logging.WARNING) # - from madminer import MadMiner, LHEReader, combine_and_shuffle, plot_distributions # Please enter here the path to your MG5 root directory: mg_dir = '/madminer/software/MG5_aMC_v2_6_7' # ## 1. Generate events # Let's load our setup: miner = MadMiner() miner.load("data/setup.h5") # In a next step, MadMiner starts MadGraph to generate events and calculate the weights. You can use `run()` or `run_multiple()`; the latter allows to generate different runs with different run cards and optimizing the phase space for different benchmark points. # # In either case, you have to provide paths to the process card, run card, param card (the entries corresponding to the parameters of interest will be automatically adapted), and an empty reweight card. Log files in the `log_directory` folder collect the MadGraph output and are important for debugging. # # The `sample_benchmark` (or in the case of `run_all`, `sample_benchmarks`) option can be used to specify which benchmark should be used for sampling, i.e. for which benchmark point the phase space is optimized. If you just use one benchmark, reweighting to far-away points in parameter space can lead to large event weights and thus large statistical fluctuations. It is therefore often a good idea to combine a lot of events at the "reference hypothesis" (for us the SM) and smaller samples from other benchmarks that span the parameter space. # # One slight annoyance is that MadGraph only supports Python 2. The `run()` and `run_multiple()` commands have a keyword `initial_command` that let you load a virtual environment in which `python` maps to Python 2 (which is what we do below). Alternatively / additionally you can set `python2_override=True`, which calls `python2.7` instead of `python` to start MadGraph. miner.run( sample_benchmark='sm', mg_directory=mg_dir, mg_process_directory='./mg_processes/signal1', proc_card_file='cards/proc_card_signal.dat', param_card_template_file='cards/param_card_template.dat', run_card_file='cards/run_card_signal_large.dat', log_directory='logs/signal', initial_command="source activate python2" ) additional_benchmarks = ['w', 'ww', 'neg_w', 'neg_ww'] miner.run_multiple( sample_benchmarks=additional_benchmarks, mg_directory=mg_dir, mg_process_directory='./mg_processes/signal2', proc_card_file='cards/proc_card_signal.dat', param_card_template_file='cards/param_card_template.dat', run_card_files=['cards/run_card_signal_small.dat'], log_directory='logs/signal', initial_command="source activate python2" ) # This will take a moment -- time for a coffee break! # # After running any event generation through MadMiner, you should check whether the run succeeded: are the usual output files there, do the log files show any error messages? MadMiner does not (yet) perform any explicit checks, and if something went wrong in the event generation, it will only notice later when trying to load the event files. # ### Backgrounds # We can also easily add other processes like backgrounds. An important option is the `is_background` keyword, which should be used for processes that do *not* depend on the parameters theta. `is_background=True` will disable the reweighting and re-use the same weights for all cross sections. # # To reduce the runtime of the notebook, the background part is commented out here. Feel free to activate it and let it run during a lunch break. """ miner.run( is_background=True, sample_benchmark='sm', mg_directory=mg_dir, mg_process_directory='./mg_processes/background', proc_card_file='cards/proc_card_background.dat', param_card_template_file='cards/param_card_template.dat', run_card_file='cards/run_card_background.dat', log_directory='logs/background', ) """ # Finally, note that both `MadMiner.run()` and `MadMiner.run_multiple()` have a `only_create_script` keyword. If that is set to True, MadMiner will not start the event generation directly, but prepare folders with all the right settings and ready-to-run bash scripts. This might make it much easier to generate Events on a high-performance computing system. # ## 2. Prepare analysis of the LHE samples # The `madminer.lhe` submodule allows us to extract observables directly from the parton-level LHE samples, including an approximate description of the detector response with smearing functions. The central object is an instance of the `LHEProcessor` class, which has to be initialized with a MadMiner file: lhe = LHEReader('data/setup.h5') # After creating the `LHEReader` object, one can add a number of event samples (the output of running MadGraph in step 1) with the `add_sample()` function. # # In addition, you have to provide the information which sample was generated from which benchmark with the `sampled_from_benchmark` keyword, and set `is_background=True` for all background samples. # + lhe.add_sample( lhe_filename='mg_processes/signal1/Events/run_01/unweighted_events.lhe.gz', sampled_from_benchmark='sm', is_background=False, k_factor=1., ) for i, benchmark in enumerate(additional_benchmarks): lhe.add_sample( lhe_filename='mg_processes/signal2/Events/run_0{}/unweighted_events.lhe.gz'.format(i+1), sampled_from_benchmark=benchmark, is_background=False, k_factor=1., ) """ lhe.add_sample( lhe_filename='mg_processes/background/Events/run_01/unweighted_events.lhe.gz', sampled_from_benchmark='sm', is_background=True, k_factor=1.0, """ # - # ## 3. Smearing functions to model the detector response # Now we have to define the smearing functions that are used (in lieu of a proper shower and detector simulation). Here we will assume a simple 10% uncertainty on the jet energy measurements and a $\pm 0.1$ smearing for jet $\eta$ and $\phi$. The transverse momenta of the jets are then derived from the smeared energy and the on-shell condition for the quarks (this is what `pt_resolution_abs=None` does). The photons from the Higgs are assumed to be measured perfectly (otherwise we'd have to call `set_smearing` another time with `pdgis=[22]`). lhe.set_smearing( pdgids=[1,2,3,4,5,6,9,21,-1,-2,-3,-4,-5,-6], # Partons giving rise to jets energy_resolution_abs=0., energy_resolution_rel=0.1, pt_resolution_abs=None, pt_resolution_rel=None, eta_resolution_abs=0.1, eta_resolution_rel=0., phi_resolution_abs=0.1, phi_resolution_rel=0., ) # In addition, we can define noise that only affects MET. This adds Gaussian noise with mean 0 and std `abs_ + rel * HT` to MET_x and MET_y separately. lhe.set_met_noise(abs_=10., rel=0.05) # ## 4. Observables and cuts # The next step is the definition of observables, either through a Python function or an expression that can be evaluated. Here we demonstrate the latter, which is implemented in `add_observable()`. In the expression string, you can use the terms `j[i]`, `e[i]`, `mu[i]`, `a[i]`, `met`, where the indices `i` refer to a ordering by the transverse momentum. In addition, you can use `p[i]`, which denotes the `i`-th particle in the order given in the LHE sample (which is the order in which the final-state particles where defined in MadGraph). # # All of these represent objects inheriting from scikit-hep [LorentzVectors](http://scikit-hep.org/api/math.html#vector-classes), see the link for a documentation of their properties. In addition, they have `charge` and `pdg_id` properties. # # `add_observable()` has an optional keyword `required`. If `required=True`, we will only keep events where the observable can be parsed, i.e. all involved particles have been detected. If `required=False`, un-parseable observables will be filled with the value of another keyword `default`. # # In a realistic project, you would want to add a large number of observables that capture all information in your events. Here we will just define two observables, the transverse momentum of the leading (= higher-pT) jet, and the azimuthal angle between the two leading jets. lhe.add_observable( 'pt_j1', 'j[0].pt', required=False, default=0., ) lhe.add_observable( 'delta_phi_jj', 'j[0].deltaphi(j[1]) * (-1. + 2.*float(j[0].eta > j[1].eta))', required=True, ) lhe.add_observable( 'met', 'met.pt', required=True, ) # We can also add cuts, again in parse-able strings. In addition to the objects discussed above, they can contain the observables: lhe.add_cut('(a[0] + a[1]).m > 122.') lhe.add_cut('(a[0] + a[1]).m < 128.') lhe.add_cut('pt_j1 > 20.') # ## 5. Run analysis and store processes events # The function `analyse_samples` then calculates all observables from the LHE file(s) generated before, applies the smearing, and checks which events pass the cuts: lhe.analyse_samples() # The values of the observables and the weights are then saved in the HDF5 file. It is possible to overwrite the same file, or to leave the original file intact and save all the data into a new file as follows: lhe.save('data/lhe_data.h5') # ## 6. Plot distributions # Let's see what our MC run produced: _ = plot_distributions( filename='data/lhe_data.h5', parameter_points=['sm', np.array([10.,0.])], line_labels=['SM', 'BSM'], uncertainties='none', n_bins=20, n_cols=3, normalize=True, sample_only_from_closest_benchmark=True ) # ## 7. Combine and shuffle different samples # To reduce disk usage, you can generate several small event samples with the steps given above, and combine them now. Note that (for now) it is essential that all of them are generated with the same setup, including the same benchmark points / morphing basis! # # This is generally good practice even if you use just one sample, since the events might have some inherent ordering (e.g. from sampling from different hypotheses). Later when we split the events into a training and test fraction, such an ordering could cause problems. combine_and_shuffle( ['data/lhe_data.h5'], 'data/lhe_data_shuffled.h5' )
examples/tutorial_particle_physics/2a_parton_level_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Differential UWB model # # The ideas behind this model improve on the previous ones by adding a second UWB transceiver to the system. The idea is to be able to measure heading directly from the UWB beacon. # # Assumptions: # # * Robot moves in the 2D plane # * Gyro in the yaw axis # * Gyroscope has constant bias ($b_\omega$) and random noise. # * UWB receivers are separated by a fixed distance $d$. # # As previously, the state of the robot is: # # \begin{equation} # \mathbf{x} = \begin{pmatrix} # x\\ # y\\ # \dot{x}\\ # \dot{y}\\ # \theta\\ # b_{\omega} # \end{pmatrix} # \end{equation} # # # + import sympy as sp import numpy as np import matplotlib.pyplot as plt # %matplotlib inline sp.init_printing() # defines useful variables x, y, xdot, ydot, theta, b_omega, d = sp.symbols('x y v_x v_y theta b_{\omega} d') state = sp.Matrix([x, y, xdot, ydot, theta, b_omega]) dt = sp.symbols('Delta_t') # - # # Prediction step # # The prediction step is the same as in the gyro integrating model. a_x, a_y, omega = sp.symbols('a_x a_y omega') # in body frame u = [a_x, a_y, omega] dx = sp.Matrix([xdot, ydot, sp.cos(theta) * a_x - sp.sin(theta) * a_y, sp.sin(theta) * a_x + sp.cos(theta) * a_y, omega - b_omega, 0]) g = state + dt * dx # ## Measurement step # # For the measurement, the UWB system gives us the distance $d$ to a beacon. # The beacon's position $\mathbf{b}$ is known and assumed to be fixed. # # We first compute the position of each UWB receiver. Receiver $n$ is assumed to be at position $\mathbf{x}_{UWB,n}^R$ in robot frame. # # \begin{equation} # \mathbf{x}_{UWB,n}^W = \mathbf{x}_{robot}^W + \begin{pmatrix} # \cos \theta & - \sin \theta \\ # \sin \theta & \cos \theta # \end{pmatrix} \mathbf{x}_{UWB,n}^R # \end{equation} # # Then the measurement model is: # # \begin{equation} # h_n(\mathbf{x}, \mathbf{b}) = \sqrt{(x_{UWB,n}^W - b_x)^2 + (y_{UWB,n}^W - b_y)^2} # \end{equation} b_x, b_y = sp.symbols('b_x b_y') # beacon position uwb_x, uwb_y = sp.symbols('x_{UWB} y_{UWB}') uwb_pos_robot = sp.Matrix([uwb_x, uwb_y]) uwb_pos_world = sp.Matrix([x, y]) + sp.Matrix([[sp.cos(theta), -sp.sin(theta)], [sp.sin(theta), sp.cos(theta)]]) @ uwb_pos_robot h_b = sp.sqrt((uwb_pos_world[0] - b_x)**2 + (uwb_pos_world[1] - b_y)**2) #h_b = h_b.replace(uwb_x, 0).replace(uwb_y, 0) # Put it in a matrix to conform with EKF framework h_b = sp.Matrix([h_b]) # ## EKF implementation # # As before, we derive the calculation using Sympy: g g.jacobian(state) h_b h_b.jacobian(state) # ## Simulation f = 200 # Hz, sample rate of the filter g_num = sp.lambdify([state, u], g.replace(dt, 1/f), 'numpy') G_num = sp.lambdify([state, u], g.jacobian(state).replace(dt, 1/f), 'numpy') h_num = sp.lambdify([[b_x, b_y], [uwb_x, uwb_y], state], h_b, 'numpy') H_num = sp.lambdify([[b_x, b_y], [uwb_x, uwb_y], state], h_b.jacobian(state), 'numpy') # quick sanity checks s = np.array([1, 1, 0, 0, 3.14, 0]) h_num((0, 0), (1, 1), s) u = np.array([0, 0 ,0]) g_num(s, u) g.jacobian(state).replace(dt, 1) # + # Defines the kalman filter import functools import ekf # Create a dummy variance model R = np.diag([0.1, # pos 0.1, 1e-4, # speed 1e-4, 1e-4, # angle 1e-4 # bias ]) Q = np.diag([0.03]) # distance measurement predictor = ekf.Predictor(g_num, G_num, R) BEACON_POS = [ (-1.5, 0), (1.5, 1), (1.5, -1), ] d = 0.1# Distance between receivers, in meters UWB_POS = [ (-d / 2, 0), ( d / 2, 0) ] correctors = [ ekf.Corrector(functools.partial(h_num, pos, uwb_pos), functools.partial(H_num, pos, uwb_pos), Q ) for pos in BEACON_POS for uwb_pos in UWB_POS] # + # Runs the kalman filter on a circular trajectory import trajectories f_uwb = 10 # Update frequency for the UWB beacons bias = np.deg2rad(5) # rad / s bias_var = np.deg2rad(3.3) mu = np.array([0, 0, 0, 0.1, 0, bias]) # initial state sigma = np.diag([1e-1, 1e-1, # pos 1e-3, 1e-3, # speed 1e-1, # angle bias_var, ]) angle, angle_estimated = [], [] x, xhat = [], [] y, yhat = [], [] acc = [] ts = [] for i, p in zip(range(10000), trajectories.generate_circular_traj(1, np.deg2rad(10), 1/f)): # add noise & bias to the measurements acc = [p.acc[0], p.acc[1]] + np.random.normal(0, 0.2, 2) omega = p.omega + np.random.normal(bias, np.sqrt(bias_var)) # feeds the input into Kalman filter_input = np.array([acc[0], acc[1], omega]) mu, sigma = predictor(mu, sigma, filter_input) # If we received a radio update, feed it into the EKF if i % (f // f_uwb) == 0: for corrector in correctors: z = corrector.h(np.array([p.pos[0], p.pos[1], 0, 0, p.theta, 0])) z += np.random.normal(0, 0.03) mu, sigma = corrector(mu, sigma, z) # Saves the data ts.append(p.timestamp) angle.append(p.theta) angle_estimated.append(mu[4]) x.append(p.pos[0]) xhat.append(mu[0]) y.append(p.pos[1]) yhat.append(mu[1]) # + # Plot various results rms = np.sqrt(np.mean([(a - b)**2 for a, b in zip(angle, angle_estimated)])) plt.plot(ts, np.degrees(angle)) plt.plot(ts, np.degrees(angle_estimated)) plt.xlabel('time [s]') plt.ylabel('heading [degrees]') plt.legend(['truth', 'estimated']) plt.title('Theta (RMS={:.2f}°)'.format(np.degrees(rms))) plt.gcf().savefig('differential_model_angle.pdf') plt.show() plt.plot(ts, x) plt.plot(ts, xhat) plt.plot(ts, y) plt.plot(ts, yhat) plt.xlabel('time') plt.ylabel('meters') plt.title('position') plt.legend(['x', 'x (estimated)', 'y', 'y (estimated)']) plt.show() plt.plot(x, y) plt.plot(xhat, yhat) plt.plot([x for x, y in BEACON_POS],[y for x, y in BEACON_POS], 'x') plt.legend(('Simulated trajectory', 'EKF output', 'anchors')) plt.title('trajectory') plt.xlabel('x [m]') plt.ylabel('y [m]') plt.gcf().savefig('differential_model_trajectory.pdf') plt.show() error = [np.sqrt((x-xh)**2+(y-yh)**2) for x,xh,y,yh in zip(x, xhat,y,yhat)] plt.plot(ts, error) plt.xlabel('time [s]') plt.ylabel('position error [m]') plt.title('Position error (RMS = {:.3f} m)'.format(np.mean(error))) plt.ylim(0, 0.1) plt.gcf().savefig('differential_model_error.pdf') plt.show() # -
uwb-beacon-firmware/doc/report/models/Differential 2 beacons.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="rpZiUllIKheS" # 0. Imports # + id="4RzJ03PpKVsC" import pandas as pd # + [markdown] id="DdBaHDDBKk_0" # 1. Load csv # + colab={"base_uri": "https://localhost:8080/", "height": 142} id="87TgmlrbKlyo" outputId="efe13535-8283-4e4e-9fb3-90eec1f8e81e" # change to your file location df = pd.read_csv('/content/drive/MyDrive/Škola/DM/spravanie_zamestnancov_v_zavislosti_od_casu_II/MLM_ZAM_stats.csv', ';', usecols=range(0,10)) df.head(10) # + [markdown] id="KvcF5DLJLCm7" # 2. Create collection of weekdays # + id="6tmMjFTNK8-b" days = ['PO', 'UT', 'STR', 'STVR', 'PIA'] # + [markdown] id="ScZrHi-8LG9W" # 3. Create logits estimate # + id="w9lO10toLFbF" dataframe_collection = {} index = 0 # Cycle through weekdays for day in days: # Create empty dataframe for weekday df_day = pd.DataFrame() # Cycle through hours from 7 to 23 for x in range (7,23): # Create logits estimates logit_uvod = df.at[index, 'Intercept'] + df.at[index, 'HODINA']*x+df.at[index, 'HODINA_STV']*(x*x)+df.at[index, day] logit_studium = df.at[index+1, 'Intercept'] + df.at[index+1, 'HODINA']*x+df.at[index+1, 'HODINA_STV']*(x*x)+df.at[index+1, day] logit_oznamy = df.at[index+2, 'Intercept'] + df.at[index+2, 'HODINA']*x+df.at[index+2, 'HODINA_STV']*(x*x)+df.at[index+2, day] # Create new row and append it to dataframe new_row = {'0_hod': x, '1_uvod': logit_uvod, '2_studium':logit_studium, '3_oznamy':logit_oznamy} df_day = df_day.append(new_row, sort=False, ignore_index=True) # add dataframe to dictionary of dataframes dataframe_collection[day] = df_day # + [markdown] id="Teckag1PLS2K" # 4. Export to excel # + id="iqiYAYhhLQ6-" # Creating Excel Writer Object from Pandas writer = pd.ExcelWriter('ZAM_logits.xlsx',engine='xlsxwriter') workbook=writer.book worksheet=workbook.add_worksheet('ZAM') writer.sheets['ZAM'] = worksheet i = 0 for day in days: worksheet.write(0, i, str(day + " - ODHAD LOGITOV")) dataframe_collection[day].to_excel(writer, sheet_name='ZAM',startrow=1 , startcol=i, index=False) i = i+5 writer.save()
spravanie_zamestnancov_v_zavislosti_od_casu_II/ZAM_logits_estimate.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np # import tensorflow.keras as keras from keras.optimizers import Adam from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate from keras.layers import BatchNormalization, Activation, ZeroPadding2D from keras.layers import LeakyReLU, UpSampling2D, Conv2D from keras.models import Sequential, Model, load_model from keras import backend as K from keras.preprocessing.image import ImageDataGenerator from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import scipy from tensorflow import logging import imageio, skimage import matplotlib.image as mpimg import datetime, os, pickle from os import listdir from os.path import isfile, join # - import warnings warnings.filterwarnings('ignore') # + def get_img(img_filepath,target_size): _, _, n_C = target_size if n_C == 1: mode='L' elif n_C == 3: mode='RGB' else: raise Exception('Unexpected number of chanel '+str(n_C)+'!') # x = imageio.imread(img_filepath,as_gray=as_gray).astype(np.float) x = scipy.misc.imread(img_filepath, mode=mode).astype(np.float) x = scipy.misc.imresize(x, target_size) if n_C == 1 : x = np.stack((x,)*1, -1) x = np.array(x)/127.5 - 1. return x def load_images(target_size): n_H, n_W, n_C = input_shape data_dir='data/raw/' img_files = [f for f in listdir(data_dir) if isfile(join(data_dir, f)) and '_x.jpg' in f] n_x=len(img_files) n_x=10 X=np.zeros((n_x,n_H, n_W, n_C)) Y=np.zeros((n_x,n_H, n_W, n_C)) for i in range(n_x): img_id=img_files[i].strip('_x.jpg').strip('data_') X[i,:,:,:] = get_img(data_dir+'data_'+str(img_id)+'_x.jpg',target_size) Y[i,:,:,:] = get_img(data_dir+'data_'+str(img_id)+'_y.jpg',target_size) return X,Y def load_facades_images(input_shape): n_H, n_W, n_C = input_shape test_data='data/facades/train/' img_files = [f for f in listdir(test_data) if isfile(join(test_data, f)) and '.jpg' in f] n_x=len(img_files) n_x=10 X=np.zeros((n_x,n_H, n_W, n_C)) Y=np.zeros((n_x,n_H, n_W, n_C)) for i in range(n_x): img = get_img(test_data+img_files[i],[n_H,n_W*2,n_C]) Y[i,:,:,:], X[i,:,:,:] = img[:, :n_W, :], img[:, n_W:, :] return X,Y def load_realworld_images(input_shape): test_data_dir='data/test/' img_training_files = [f for f in listdir(test_data_dir) if isfile(join(test_data_dir, f)) and '.jpg' in f] X_test=np.zeros((len(img_training_files),target_size[0], target_size[1], target_size[2])) for i in range(len(img_training_files)): X_test[i,:,:,:] = get_img(test_data_dir+img_training_files[i],target_size) return X_test # + # SOURCE https://github.com/eriklindernoren/Keras-GAN def build_generator(input_shape,gf,name): """U-Net Generator""" n_H, n_W, n_C = input_shape def conv2d(layer_input, filters, f_size=4, bn=True): """Layers used during downsampling""" d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input) d = LeakyReLU(alpha=0.2)(d) if bn: d = BatchNormalization(momentum=0.8)(d) return d def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0): """Layers used during upsampling""" u = UpSampling2D(size=2)(layer_input) u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u) if dropout_rate: u = Dropout(dropout_rate)(u) u = BatchNormalization(momentum=0.8)(u) u = Concatenate()([u, skip_input]) return u # Image input d0 = Input(shape=input_shape) # Downsampling d1 = conv2d(d0, gf, bn=False) d2 = conv2d(d1, gf*2) d3 = conv2d(d2, gf*4) d4 = conv2d(d3, gf*8) d5 = conv2d(d4, gf*8) d6 = conv2d(d5, gf*8) d7 = conv2d(d6, gf*8) # Upsampling u1 = deconv2d(d7, d6, gf*8) u2 = deconv2d(u1, d5, gf*8) u3 = deconv2d(u2, d4, gf*8) u4 = deconv2d(u3, d3, gf*4) u5 = deconv2d(u4, d2, gf*2) u6 = deconv2d(u5, d1, gf) u7 = UpSampling2D(size=2)(u6) output_img = Conv2D(n_C, kernel_size=4, strides=1, padding='same', activation='tanh')(u7) return Model(d0, output_img,name=name) def build_discriminator(input_shape, df, name): n_H, n_W, n_C = input_shape def d_layer(layer_input, filters, f_size=4, bn=True): """Discriminator layer""" d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input) d = LeakyReLU(alpha=0.2)(d) if bn: d = BatchNormalization(momentum=0.8)(d) return d img_A = Input(shape=input_shape) img_B = Input(shape=input_shape) # Concatenate image and conditioning image by channels to produce input combined_imgs = Concatenate(axis=-1)([img_A, img_B]) d1 = d_layer(combined_imgs, df, bn=False) d2 = d_layer(d1, df*2) d3 = d_layer(d2, df*4) d4 = d_layer(d3, df*8) validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4) return Model([img_A, img_B], validity, name=name) # - def build_model(input_shape, gf=64, df=64, name='combined',init_model=True): # Build and compile the discriminator discriminator = build_discriminator(input_shape,df,'discriminator') discriminator.compile(loss='mse',optimizer=Adam(0.0002, 0.5),metrics=['accuracy']) #------------------------- # Build the generator generator = build_generator(input_shape,gf,'generator') # Input images and their conditioning images img_A = Input(shape=input_shape) img_B = Input(shape=input_shape) # By conditioning on B generate a fake version of A fake_A = generator(img_B) # For the combined model we will only train the generator """By setting trainable=False after the discriminator has been compiled the discriminator is still trained during discriminator.train_on_batch but since it's set to non-trainable before the combined model is compiled it's not trained during combined.train_on_batch.""" discriminator.trainable = False # Discriminators determines validity of translated images / condition pairs valid = discriminator([fake_A, img_B]) combined = Model(inputs=[img_A, img_B], outputs=[valid, fake_A], name=name) combined.compile(loss=['mse', 'mae'],loss_weights=[1, 100],optimizer=Adam(0.0002, 0.5)) return generator, discriminator, combined def sample_images(generator, imgs_X, imgs_Y,epoch): m, n_H, n_W, _ = imgs_X.shape DPI = plt.gcf().get_dpi() figsize=((3*n_W)/float(DPI),(m*n_H)/float(DPI)) generated_Y = generator.predict(imgs_X,batch_size=1) titles = ['Original', 'Generated', 'Condition'] fig, axs = plt.subplots(m, len(titles),figsize=figsize) for r in range(m): axs[r,0].imshow(0.5 * imgs_X[r,:,:,:]+ 0.5) axs[r,0].set_title(titles[0]) axs[r,0].axis('off') axs[r,1].imshow(0.5 * generated_Y[r,:,:,:]+ 0.5) axs[r,1].set_title(titles[1]) axs[r,1].axis('off') axs[r,2].imshow(0.5 * imgs_Y[r,:,:,:]+ 0.5) axs[r,2].set_title(titles[2]) axs[r,2].axis('off') fig.savefig("output/pix2pix_epoch_%d.png" % (epoch)) plt.close() def train_epoch(generator, discriminator, combined,imgs_A, imgs_B, epochs=1, batch_size=1): # Calculate output shape of D (PatchGAN) m, n_H, n_W, n_C = imgs_B.shape disc_patch = (int(n_H/16), int(n_W/16), 1) # Adversarial loss ground truths valid = np.ones((m,) + disc_patch) fake = np.zeros((m,) + disc_patch) logging.info('Training Discriminator') # Condition on B and generate a translated version fake_A = generator.predict(imgs_B,batch_size=batch_size) # Train the discriminators (original images = real / generated = Fake) d_loss_real = discriminator.fit(x=[imgs_A, imgs_B], y=valid, batch_size=batch_size, epochs=epochs, verbose=0) d_loss_fake = discriminator.fit(x=[fake_A, imgs_B], y=fake, batch_size=batch_size, epochs=epochs, verbose=0) d_loss = 0.5 * np.add(d_loss_real.history['loss'], d_loss_fake.history['loss']) logging.info('Training Generator') # Train the generators. SET Discriminator trainable false. g_loss = combined.fit(x=[imgs_A, imgs_B], y=[valid, imgs_A], batch_size=batch_size, epochs=epochs) loss={'d_loss_real':d_loss_real,'d_loss_fake':d_loss_fake,'g_loss':g_loss} return generator, discriminator, combined, loss # + input_shape=[256,256,3] X,Y = load_images(input_shape) #X,Y = load_facades_images(input_shape) # X_train, X_val, Y_train, Y_val = train_test_split(X,Y,test_size=0.1,random_state=2) # X_train, X_val, Y_train, Y_val = train_test_split(X,Y,test_size=2,random_state=2) if True: i_sample=np.random.randint(len(X)) f, (ax1, ax2) = plt.subplots(1, 2, sharey=True) ax1.imshow(0.5 * X[i_sample,:,:,:] + 0.5) ax2.imshow(0.5 * Y[i_sample,:,:,:] + 0.5) plt.show() # + logging.set_verbosity(logging.ERROR) generator, discriminator, combined = build_model(input_shape,init_model=True) for epoch in range(0, 200): generator, discriminator, combined ,_=train_epoch(generator, discriminator, combined,imgs_A=Y, imgs_B=X, epochs=1, batch_size=16) logging.info('saving model') pickle.dump(combined,open('saved_model/pix2pix_emoji_combined.pkl',"wb" )) if epoch % 5 == 0 : pickle.dump(combined,open('saved_model/pix2pix_emoji_combined_epoch'+str(epoch)+'.pkl',"wb" )) # generate sample images from dataset np.random.seed(3) m = X.shape[0] _s=np.random.randint(m-5) X_sample, Y_sample =X[_s:_s+5,:,:,:], Y[_s:_s+5,:,:,:] sample_images(generator,X_sample, Y_sample,epoch) # -
emoji_net/pix2pix_emoji.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.2 64-bit (''packt-repo-M2qY5kM-'': pipenv)' # name: python3 # --- from utils import * from azureml.core import Workspace, Experiment ws = Workspace.from_config() exp = Experiment(workspace=ws, name='access_dataset') # + from azureml.core import Dataset path ='https://dprepdata.blob.core.windows.net/demo/Titanic.csv' # Create a direct dataset ds = Dataset.Tabular.from_delimited_files(path) ds # - # Register the dataset ds = ds.register(workspace=ws, name="titanic") ds # list all datasets from a workspace datasets = Dataset.get_all(ws) datasets # Now we can retrieve the dataset by name (and version) ds = Dataset.get_by_name(ws, "titanic") ds ds.to_pandas_dataframe() # + from azureml.core import ScriptRunConfig src = ScriptRunConfig(source_directory="code", script='access_data_from_dataset.py', arguments=['--input', ds.as_named_input('titanic')], environment=get_current_env()) # Submit the run configuration for your training run run = exp.submit(src) run.wait_for_completion(show_output=True) # -
chapter04/Registered_tabular_dataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: feedforward # language: python # name: feedforward # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/mella30/Deep-Learning-with-Tensorflow-2/blob/main/Course3-Probabilistic_Deep_Learning_with_Tensorflow2/week4_KL_divergence.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="Iei1HrhZazP_" # # Kullback-Leibler divergence # + [markdown] id="uLMh2drgazQA" # This reading will review the definition of the Kullback-Leibler (or KL) divergence, look at some of its important properties, see how it can be computed in practice with TensorFlow Probability. # + id="9jxJx_cXazQB" colab={"base_uri": "https://localhost:8080/"} outputId="1a963e5b-5fed-47dc-b917-5cf9193c0bf8" import tensorflow as tf import tensorflow_probability as tfp tfd = tfp.distributions print("TF version:", tf.__version__) print("TFP version:", tfp.__version__) # Additional packages for the reading import matplotlib.pyplot as plt import numpy as np from matplotlib.patches import Ellipse # + [markdown] id="0Qb18CzWazQE" # ## Introduction # # As you have already seen, the KL divergence is used in variational inference to score the dissimilarity between two distributions. In this reading, we will examine KL divergence more closely. We will see the definition of the KL divergence and some important properties, as well as how it can be computed using `tfd.kl_divergence` and Monte Carlo estimation. # + [markdown] id="VfgtknLXazQF" # ## Definition of the Kullback-Leibler divergence # # Given two probability density or mass functions $q(x)$ and $p(x)$, the Kullback-Leibler divergence between them is defined as # # \begin{equation} # D_{KL}\big[q \ || \ p\big] =\begin{cases} # \text{E}_{X\sim q}\big[ # \log q(X) - \log p(X)\big] &\text{if } p(x) = 0 \implies q(x) = 0,\\ # \infty &\text{otherwise.} # \end{cases} # \end{equation} # # The condition $p(x) = 0 \implies q(x) = 0$ - _absolute continuity_ - ensures that the $\log$ in the expectation is well-defined for all $x$ in the support of $q$. # # As was mentioned, the KL divergence is a score for the disagreement of two distributions in their placement of probability mass. A smaller score indicates a greater degree of agreement. # + [markdown] id="TuuMpRQpazQG" # ## Properties # + [markdown] id="BY2vPhtBazQG" # The Kullback-Leibler divergence is asymmetric. In general, # # \begin{equation} # D_{KL}\big[q \ || \ p\big] \neq D_{KL}\big[p \ || \ q \big] # \end{equation} # # In variational inference, $q$ is the approximating distribution, while $p$ is the distribution being approximated. The other KL divergence - $D_{KL}[p \ || \ q ]$ - is also sometimes used as a loss function, for reasons that will become clear later in this reading. # # # + [markdown] id="IMpomHFIazQH" # ### Gibbs' inequality # # A crucial property of the KL divergence is that for all $q$ and $p$, # # \begin{equation} # D_{KL}\big[q \ || \ p\big] \geq 0, # \end{equation} # # with equality if and only if $q(x) = p(x)$ almost everywhere. This property is very useful when we are trying to learn a $q$ that is similar to a $p$: if $D_{KL}[q \ || \ p] = 0$, then we know that $q$ is identical to $p$. # + [markdown] id="uAdeuoLBazQH" # ### What causes KL divergence to increase? # # As an example, take $q(x)$ and $p(x)$ to be probability mass functions, and let $\mathcal{X}$ be $q$'s support. Provided $q$ is absolutely continuous with respect to $p$, we have # # \begin{equation} # D_{KL}\big[q \ || \ p\big] = \sum_{x \in \mathcal{X}} q(x) \log \frac{q(x)}{p(x)}. # \end{equation} # # Values of $x$ that $p$ assigns mass to but $q$ does not do not feature in this sum. Superficially, this may suggest that divergence is not increased if $q$ fails to place mass where $p$ does. However, $q$ is a probability mass function, so will inevitably place more mass than $p$ at some other value(s) of $x$. At those other locations, $\log q(x)/p(x) > 0$, so the divergence is increased. # # On the other hand, if $q$ places probability mass where $p$ does not, then $D_{KL}\big[q \ || \ p\big]$ is $+\infty$ - the KL divergence severely penalizes $q$ for locating probability mass where $p$ does not! # # From this combination of effects, we can conclude that # # \begin{equation} # \text{support}(q) \subseteq \text{support}(p) \implies D_{KL}\big[ q \ || \ p \big] < \infty, # \end{equation} # # while # # \begin{equation} # \text{support}(p) \subset \text{support}(q) \implies D_{KL}\big[ q \ || \ p \big] = \infty # \end{equation} # # Consequently, the KL divergence favours distributions $q$ that have a support contained in the target distribution's (i.e. $p$'s). # # The diagram below illustrates how the KL divergence is affected by the support of two bivariate density functions $q$ and $p$. The hatched regions indicate the support of either function. # + id="3Iib1KfqazQI" colab={"base_uri": "https://localhost:8080/", "height": 324} outputId="f90db314-afe7-4f5d-edac-9a2fbdd5a394" _, axs = plt.subplots(1, 2, sharex=True, sharey=True, figsize=(11, 5)) delta = 45.0 # degrees q_ell_inf = Ellipse((0, 0), 2, 1.5, 45, ec='blue', fc='none', alpha=0.5, label='q(x)', hatch='/') q_ell_fin = Ellipse((0, 0), 0.5, 0.75, 45, ec='blue', fc='none', alpha=0.5, label='q(x)', hatch='/') p_ell_inf = Ellipse((0, 0), 1, 1, 45, ec='red', fc='none', alpha=0.5, label='p(x)', hatch='\\') p_ell_fin = Ellipse((0, 0), 1, 1, 45, ec='red', fc='none', alpha=0.5, label='p(x)', hatch='\\') # KL divergence is infinite for ell in [q_ell_inf, p_ell_inf]: axs[0].add_artist(ell) axs[0].legend([q_ell_inf, p_ell_inf], ['Support of q', 'Support of p'], loc='lower right') axs[0].get_xaxis().set_ticks([]) axs[0].get_yaxis().set_ticks([]) # KL divergence is finite for ell in [q_ell_fin, p_ell_fin]: axs[1].add_artist(ell) axs[1].legend([q_ell_fin, p_ell_fin], ['Support of q', 'Support of p'], loc='lower right') axs[1].get_xaxis().set_ticks([]) axs[1].get_yaxis().set_ticks([]) axs[0].set_title(r'$D_{KL}[q \ || \ p] = +\infty$') axs[1].set_title(r'$D_{KL}[q \ || \ p]$ is finite but non-zero') plt.xlim(-1, 1) plt.ylim(-1, 1); # + [markdown] id="HTHMV1xpazQL" # ## Computing KL divergence in TensorFlow # # For some choices of $q$ and $p$, the KL divergence can be evaluated to a closed-form expression. # # `tfd.kl_divergence` computes the KL divergence between two distributions analytically, provided the divergence in question has been implemented in the TensorFlow Probability library. # # Below is an example that uses `tfd.kl_divergence` to compute $D_{KL}\big[q \ || \ p \big]$ when $q$ and $p$ are univariate normal distributions. # + id="UqzL-8LjazQL" colab={"base_uri": "https://localhost:8080/"} outputId="9c480a64-dd6b-40a5-88d0-baa1446f0368" # Simple example mu_q = 0. sigma_q = 1. mu_p = 0. sigma_p = 0.5 distribution_q = tfd.Normal(loc=mu_q, scale=sigma_q) distribution_p = tfd.Normal(loc=mu_p, scale=sigma_p) tfd.kl_divergence(distribution_q, distribution_p) # D_{KL}[q || p] # + [markdown] id="TNH4cmjfazQO" # Let's check this value. The KL divergence between two univariate normal distributions can be derived directly from the definition of the KL divergence as # # \begin{equation} # D_{KL}\big[ q \ || \ p\big] = \frac{1}{2}\bigg(\frac{\sigma_q^2}{\sigma_p^2} + \frac{(\mu_q - \mu_p)^2}{\sigma_p^2} + 2\log \frac{\sigma_p}{\sigma_q} - 1\bigg) # \end{equation} # # The value of this function should be equal to that returned by `kl_divergence(distribution_q, distribution_p)`. # + id="MPG30ExfazQO" colab={"base_uri": "https://localhost:8080/"} outputId="747204d9-8680-45a1-dff5-f4eeaf642df3" # Analytical expression for KL divergence between two univariate Normals 0.5*( (sigma_q/sigma_p)**2 + ((mu_q - mu_p)/sigma_p)**2 + 2*np.log(sigma_p/sigma_q) - 1) # + [markdown] id="jUXjb5JQazQR" # Sure enough, it is. # # If a batch of distributions is passed to `kl_divergence`, then a batch of divergences will be returned. `kl_divergence` also supports broadcasting. # + id="e0w8mqrwazQR" # Batch example with broadcasting distributions_q = tfd.Normal(loc=[0., 1.], scale=1.) distribution_p = tfd.Normal(loc=0., scale=0.5) # + id="-71tRoeXazQU" colab={"base_uri": "https://localhost:8080/"} outputId="873529d6-1625-412f-da4f-7274cf30c0f0" # Notice the batch_shape distributions_q # + id="0bHb-P3bazQW" colab={"base_uri": "https://localhost:8080/"} outputId="1726f27b-0263-4d0d-ab26-830eae04b8a5" # [D_{KL}[q_1 || p], D_{KL}[q_2 || p] tfd.kl_divergence(distributions_q, distribution_p) # + [markdown] id="KbQwyb5XazQY" # `kl_divergence` provides a convenient way of computing the KL divergence for many TensorFlow distributions. As a rule of thumb, it will evaluate successfully provided you pass in two distributions of the same parametric family. # + id="B5LM7WLrazQZ" colab={"base_uri": "https://localhost:8080/"} outputId="8dc5c8af-40d8-4cd0-e0b9-04a50358524a" # An example with another distribution beta_q = tfd.Beta(concentration1=12, concentration0=3) beta_p = tfd.Beta(concentration1=9, concentration0=3) tfd.kl_divergence(beta_q, beta_p) # + id="EXi7YbGjazQb" colab={"base_uri": "https://localhost:8080/"} outputId="b41420e8-981a-4ab8-9857-ee7bd756141a" # An example with a multivariate distribution cov_q = np.array([[1., 0.5], [0.5, 1.]]) cov_p = np.array([[1., 0.], [0., 1.]]) mvtnormal_q = tfd.MultivariateNormalTriL(loc=[0., 0.], scale_tril=tf.linalg.cholesky(cov_q)) mvtnormal_p = tfd.MultivariateNormalTriL(loc=[0., 0.], scale_tril=tf.linalg.cholesky(cov_p)) tfd.kl_divergence(mvtnormal_q, mvtnormal_p) # + [markdown] id="JlO7m3cYazQe" # To see a complete list of distributions for which a KL method is defined, refer to `help(tfd.kl_divergence)`. # # If you pass `kl_divergence` a pair distributions for which a KL divergence method is not implemented, an error will be raised: # + id="b7OEI-p3azQe" # uniform_q and beta_p are both uniform distributions with support [0, 1] uniform_q = tfd.Uniform(low=0., high=1.) beta_p = tfd.Beta(concentration1=0., concentration0=0.) # + id="oj8MqjReazQg" colab={"base_uri": "https://localhost:8080/"} outputId="091c465a-98fc-4b26-c09a-6c787f360e0a" # kl_divergence has no method for computing their divergence try: tfd.kl_divergence(uniform_q, beta_p) except Exception as e: print(e) # + [markdown] id="JFIzPMkaazQi" # ### When `kl_divergence` fails # # If you do not have a closed-form expression for your KL divergence, and it is not implemented in `tfd.kl_divergence`, then you can make a Monte Carlo estimate of it. Simply sample $n$ values $x_1, \ldots, x_n$ from $q$, then evaluate the estimate # # \begin{equation} # \frac{1}{n}\sum_{i=1}^n \log\big[q(x_i)\big] - \log\big[p(x_i)\big] # \end{equation} # # In general, the Monte Carlo estimator is unbiased and its variance is inversely proportional to $n$. # # To show how the variance of the Monte Carlo estimator varies with $n$, let's attempt to estimate $D_{KL}\big[q \ | \ p\big]$ when $q$ and $p$ are univariate normal distributions. We'll make many estimates for several values of $n$, then plot their absolute error as a function of $n$. # # We'll start by evaluating the exact value $D_{KL}\big[q \ | \ p\big]$ using `kl_divergence`. Bear in mind that the Monte Carlo estimate will only be useful in situations where this not possible! # + id="cNLfNifSazQi" colab={"base_uri": "https://localhost:8080/"} outputId="b8898054-6d7e-4def-af61-e7a391126c28" # Evaluate the exact KL divergence distribution_q = tfd.Normal(loc=0., scale=1.) distribution_p = tfd.Normal(loc=0., scale=0.5) exact_kl_divergence = tfd.kl_divergence(distribution_q, distribution_p).numpy() # D_{KL}[q || p] exact_kl_divergence # + [markdown] id="muVRbJlcazQk" # Next, we'll define a function for making a Monte Carlo estimate for a given $q$, $p$, and $n$. # + id="jltAO4IeazQl" # Function to estimate the KL divergence with Monte Carlo samples def monte_carlo_estimate_of_kl_divergence(n, q_sampler, q_density, p_density): ''' Computes a Monte Carlo estimate of D_{KL}[q || p] using n samples from q_sampler. q_sampler is a function that receives a positive integer and returns as many samples from q. Given samples x_1, ..., x_n from q_sampler, the Monte Carlo estimate is \frac{1}{n}\sum_{i=1}^n \log(q(x_i)) - \log(p(x_i)) where q and p are density/mass functions. ''' x = q_sampler(n) KL_estimate = np.mean(np.log(q_density(x)) - np.log(p_density(x))) return(KL_estimate) # + [markdown] id="V0_LwbzTazQn" # The code below shows how this function can be used to make a single estimate. # + id="AOVKz5ifazQn" colab={"base_uri": "https://localhost:8080/"} outputId="b2704c38-2bdb-41e4-a0f7-8deec96b7c88" # Single MC estimate n = 1000 # number of samples used in MC estimate q_sampler = distribution_q.sample q_density = distribution_q.prob p_density = distribution_p.prob monte_carlo_estimate_of_kl_divergence(n, q_sampler, q_density, p_density) # + [markdown] id="23OpUuqVazQq" # To see how the estimator's variance decreases with increasing $n$, let's evaluate a few hundred estimates for each point in a grid of $n$ values. # + id="3zBlR0LBazQq" # Create a grid of 8 points n_grid = 10**np.arange(1, 8) samples_per_grid_point = 100 # Number of MC estimates to make for each value of n # + id="xJKsfDwGazQt" # Array to store results kl_estimates = np.zeros(shape=[samples_per_grid_point, len(n_grid), 2]) # + id="ZPCTW9xiazQv" # Make 100 MC estimates for each value of n, store the results in kl_estimates for sample_num in range(samples_per_grid_point): for grid_num, n in enumerate(n_grid): kl_estimates[sample_num, grid_num, 0] = n kl_estimates[sample_num, grid_num, 1] = monte_carlo_estimate_of_kl_divergence(n, q_sampler, q_density, p_density) # + id="dj16qyFYazQw" # Compute RMSE of estimates (this is approximately equal to the standard deviation of the MC estimator) rmse_of_kl_estimates = np.sqrt(np.mean((kl_estimates[:, :, 1] - exact_kl_divergence)**2, axis=0)) # + id="nq73oJm5azQy" # Compute absolute error of the MC estimates abs_error_of_kl_estimates = abs(kl_estimates[:, :, 1].flatten() - exact_kl_divergence) # + id="aWi66xX3azQ0" colab={"base_uri": "https://localhost:8080/", "height": 347} outputId="9aa59bbe-e3b2-41da-dd7c-5840073ba220" # Plot the results _, ax = plt.subplots(1, 1, figsize=(15, 5)) plt.xlabel(r'Number of samples in Monte Carlo estimate, $n$') ax.scatter(kl_estimates[:, :, 0], abs_error_of_kl_estimates, marker='.', color='red', alpha=0.1, label='Absolute error of Monte Carlo estimates') ax.plot(n_grid, rmse_of_kl_estimates, color='k', label='RMSE of Monte Carlo estimates') ax.set_xscale('log'); ax.set_yscale('log'); ax.set_ylim([1e-6, 10]) ax.legend(); # + [markdown] id="-VzZ7TsiazQ2" # You can see that the gradient of the estimates' RMSE, an estimate of the MC estimator's standard devation, with respect to $n$ is $-\frac{1}{2}$. This is unsurprising: the estimator's variance is inversely proportional to $n$, so its log standard deviation is a linear function of $\log n$ with gradient $-\frac{1}{2}$. As $n$ increases, the Monte Carlo estimates approach the exact value of the KL divergence. # + [markdown] id="Tup8g-P2azQ2" # ### Summary # # You should now feel confident about how the Kullback-Leibler divergence is motivated and defined, what its key properties and why it is used in variational inference, and how it can be computed or estimated in TensorFlow. # + [markdown] id="1rkUpU6_azQ2" # ### Further reading and resources # # * TensorFlow documentation on `tfd.kl_divergence`: https://www.tensorflow.org/probability/api_docs/python/tfp/distributions/kl_divergence # + [markdown] id="H0CLIEBqazQ3" # ## Appendix # # #### Information gain, relative entropy, and Bayesian inference # # This section provides further context for the Kullback-Leibler divergence. It is not essential, but it will give you a more complete understanding of what the divergence measures. # # The Kullback-Leibler divergence has its origins in information theory. The Shannon entropy, defined as # # \begin{equation} # H(P) := E_{X \sim P(x)}[-\log P(X) ] # \end{equation} # # is the greatest lower bound on the average number of nats ($\log 2$ nats are equal to $1$ bit) required to losslessly encode an observation sampled from from $P(x)$. This is an informal statement of a result known as the _source coding theorem_. $-\log P(x)$ is the number of bits used to encode $x$ in the lossless encoding scheme. # # Say that a lossless compression algorithm instead encodes observations using a scheme that would be optimal for distribution $Q(x)$. Then the average number of of bits required to encode an observation sampled from $P(x)$ would be # # \begin{equation} # H(P, Q) := E_{X \sim P(x)}[-\log Q(X)] # \end{equation} # # This is quantity is referred to as the _cross-entropy_ between $P$ and $Q$. Since $H(P)$ is the minimum average information for encoding observations from $P(x)$ by definition, it follows that $H(P, Q) \geq H(P)$. # # The Kullback-Leibler divergence is defined as the average additional information required to encode observations from $P(x)$ using an optimal code for $Q(x)$: # # \begin{align} # D_{KL}(P \ || \ Q) &:= E_{X \sim P(x)}[-\log Q(X)] - E_{X \sim P(x)}[-\log P(X)] \\ # &= H(P, Q) - H(P) # \end{align} # # The KL divergence therefore tells us how inefficient the optimal coding scheme for $Q$ is when applied to data source $P$. # # That KL divergence is the difference between a cross-entropy and a Shannon entropy sheds light on why the KL divergence has another moniker - _relative entropy_. # # Alternatively, we might consider encoding observations in the context of Bayesian inference. Let $P(y)$ be the prior and $P(y|x)$ be the posterior. Then the Kullback-Leibler divergence # # \begin{equation} # D_{KL}(P(y|x) \ || \ P(y)) = E_{Y \sim P(y|x)}[-\log P(Y)] - E_{Y \sim P(y|x)}[-\log P(Y|x)] # \end{equation} # # is the average number of bits saved if observations are encoded using an optimal code for the posterior rather than the prior. In this sense, the KL divergence tells us how much information is gained by conditioning on $X$.
Course3-Probabilistic_Deep_Learning_with_Tensorflow2/week4_KL_divergence.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # + # **Choropleth** This notebook demonstrates a choropleth, a thematic map in which polygon areas are coloured to represent a value. The notebook loads a polygon GeoJSON dataset and produces a choropleth visualisation. The key parameters need to be edited in the Parameters section below, in particular for selecting the data of interest. # + # Load the required libraries library(sf) library(ggplot2) library(classInt) # - # ## Parameters # # Edit the key parameters here; the dataset, the varaible of interest and the number of categories for the variable to be classified into. # + # Insert the dataset name and variable of interest ## Dataset dataset.param <- "CancerMortality-1614033420629.geojson" ## Variable of Interest variable.param <- "all_canc_rate" ## Number of Categories intervals.param <- 6 # + ## Load Dataset # + # The data are loaded here and converted to an sf dataframe. setwd("/home/jovyan/aurin") # Set the working directory getwd() dataset.sf <- st_read(paste("data/choropleth/", dataset.param, sep = "")) # Load the GeoJSON dataset # - # ## Prepare Data # # This cell creates a new variable in the dataset that is a categorisation of the variable of interest into a series of intervals. # + # Generate a list of intervals cint <- classIntervals(dataset.sf[[variable.param]], intervals.param, style = "quantile") # Set the type of classifcation, e.g. "equal" for equal intervals print(cint) # Create the categorised variable according to the intervals generated above dataset.sf$choropleth_cat <- cut(dataset.sf[[variable.param]], breaks = data.frame(cint[2])[,1], include.lowest = TRUE, dig.lab = 7) # dig.lab sets the number of digits in the label before scientific notation is used # - # ## Plot Visualisation # # This cell contains the code for plotting the choropleth using `ggplot()`. See https://ggplot2.tidyverse.org/ for further information. ggplot()+ geom_sf(aes(fill=dataset.sf$choropleth_cat),colour='grey', size = 0.2, data=dataset.sf)+ scale_fill_viridis_d( # Set the colour scheme here. See https://ggplot2.tidyverse.org/reference/scale_viridis.html name='Legend', # Set the title of the legend guide=guide_legend( direction='vertical', # Set the orientation of the legend title.position='top', # Set the postion of the legend title title.hjust = .5, # Adjust horizontal postion of the legend title label.hjust = .5, # Adjust horizontal postion of the legend labels label.position = 'right', # Position of labels Change to e.g. 'bottom' keywidth = 1, # Legend width keyheight = 1 # Legend height ))+ labs(title="Choropleth Map", # Map title subtitle= paste('Map of Variable', variable.param), # Map subtitle caption=c('Source: AURIN aurin.org.au'))+ # Caption theme_void()+ theme(title=element_text(face='bold'), legend.position = 'right') # Position of legend. Change to e.g. 'bottom' # ## Save Result ggsave("img/choropleth.png", plot = last_plot())
scenarios/Choropleth.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/mfernandes61/python-intro-gapminder/blob/binder/colab/03_types_conversion.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="gM6wykorsfn3" # + [markdown] id="hYhX1mFYsv1F" # --- # title: "Data Types and Type Conversion" # teaching: 10 # exercises: 10 # questions: # - "What kinds of data do programs store?" # - "How can I convert one type to another?" # objectives: # - "Explain key differences between integers and floating point numbers." # - "Explain key differences between numbers and character strings." # - "Use built-in functions to convert between integers, floating point numbers, and strings." # keypoints: # - "Every value has a type." # - "Use the built-in function `type` to find the type of a value." # - "Types control what operations can be done on values." # - "Strings can be added and multiplied." # - "Strings have a length (but numbers don't)." # - "Must convert numbers to strings or vice versa when operating on them." # - "Can mix integers and floats freely in operations." # - "Variables only change value when something is assigned to them." # --- # ## Every value has a type. # # * Every value in a program has a specific type. # * Integer (`int`): represents positive or negative whole numbers like 3 or -512. # * Floating point number (`float`): represents real numbers like 3.14159 or -2.5. # * Character string (usually called "string", `str`): text. # * Written in either single quotes or double quotes (as long as they match). # * The quote marks aren't printed when the string is displayed. # # ## Use the built-in function `type` to find the type of a value. # # * Use the built-in function `type` to find out what type a value has. # * Works on variables as well. # * But remember: the *value* has the type --- the *variable* is just a label. # # ~~~ # print(type(52)) # ~~~ # {: .language-python} # ~~~ # <class 'int'> # ~~~ # {: .output} # # ~~~ # fitness = 'average' # print(type(fitness)) # ~~~ # {: .language-python} # ~~~ # <class 'str'> # ~~~ # {: .output} # # ## Types control what operations (or methods) can be performed on a given value. # # * A value's type determines what the program can do to it. # # ~~~ # print(5 - 3) # ~~~ # {: .language-python} # ~~~ # 2 # ~~~ # {: .output} # # ~~~ # print('hello' - 'h') # ~~~ # {: .language-python} # ~~~ # --------------------------------------------------------------------------- # TypeError Traceback (most recent call last) # <ipython-input-2-67f5626a1e07> in <module>() # ----> 1 print('hello' - 'h') # # TypeError: unsupported operand type(s) for -: 'str' and 'str' # ~~~ # {: .error} # # ## You can use the "+" and "*" operators on strings. # # * "Adding" character strings concatenates them. # # ~~~ # full_name = 'Ahmed' + ' ' + 'Walsh' # print(full_name) # ~~~ # {: .language-python} # ~~~ # <NAME> # ~~~ # {: .output} # # * Multiplying a character string by an integer _N_ creates a new string that consists of that character string repeated _N_ times. # * Since multiplication is repeated addition. # # ~~~ # separator = '=' * 10 # print(separator) # ~~~ # {: .language-python} # ~~~ # ========== # ~~~ # {: .output} # # ## Strings have a length (but numbers don't). # # * The built-in function `len` counts the number of characters in a string. # # ~~~ # print(len(full_name)) # ~~~ # {: .language-python} # ~~~ # 11 # ~~~ # {: .output} # # * But numbers don't have a length (not even zero). # # ~~~ # print(len(52)) # ~~~ # {: .language-python} # ~~~ # --------------------------------------------------------------------------- # TypeError Traceback (most recent call last) # <ipython-input-3-f769e8e8097d> in <module>() # ----> 1 print(len(52)) # # TypeError: object of type 'int' has no len() # ~~~ # {: .error} # # ## <a name='convert-numbers-and-strings'></a> Must convert numbers to strings or vice versa when operating on them. # # * Cannot add numbers and strings. # # ~~~ # print(1 + '2') # ~~~ # {: .language-python} # ~~~ # --------------------------------------------------------------------------- # TypeError Traceback (most recent call last) # <ipython-input-4-fe4f54a023c6> in <module>() # ----> 1 print(1 + '2') # # TypeError: unsupported operand type(s) for +: 'int' and 'str' # ~~~ # {: .error} # # * Not allowed because it's ambiguous: should `1 + '2'` be `3` or `'12'`? # * Some types can be converted to other types by using the type name as a function. # # ~~~ # print(1 + int('2')) # print(str(1) + '2') # ~~~ # {: .language-python} # ~~~ # 3 # 12 # ~~~ # {: .output} # # ## Can mix integers and floats freely in operations. # # * Integers and floating-point numbers can be mixed in arithmetic. # * Python 3 automatically converts integers to floats as needed. # # ~~~ # print('half is', 1 / 2.0) # print('three squared is', 3.0 ** 2) # ~~~ # {: .language-python} # ~~~ # half is 0.5 # three squared is 9.0 # ~~~ # {: .output} # # ## Variables only change value when something is assigned to them. # # * If we make one cell in a spreadsheet depend on another, # and update the latter, # the former updates automatically. # * This does **not** happen in programming languages. # # ~~~ # variable_one = 1 # variable_two = 5 * variable_one # variable_one = 2 # print('first is', variable_one, 'and second is', variable_two) # ~~~ # {: .language-python} # ~~~ # first is 2 and second is 5 # ~~~ # {: .output} # # * The computer reads the value of `first` when doing the multiplication, # creates a new value, and assigns it to `second`. # * After that, `second` does not remember where it came from. # # > ## Fractions # > # > What type of value is 3.4? # > How can you find out? # > # > > ## Solution # > > # > > It is a floating-point number (often abbreviated "float"). # > > It is possible to find out by using the built-in function `type()`. # > > # > > ~~~ # > > print(type(3.4)) # > > ~~~ # > > {: .language-python} # > > ~~~ # > > <class 'float'> # > > ~~~ # > > {: .output} # > {: .solution} # {: .challenge} # # > ## Automatic Type Conversion # > # > What type of value is 3.25 + 4? # > # > > ## Solution # > > # > > It is a float: # > > integers are automatically converted to floats as necessary. # > > # > > ~~~ # > > result = 3.25 + 4 # > > print(result, 'is', type(result)) # > > ~~~ # > > {: .language-python} # > > ~~~ # > > 7.25 is <class 'float'> # > > ~~~ # > > {: .output} # > {: .solution} # {: .challenge} # # > ## Choose a Type # > # > What type of value (integer, floating point number, or character string) # > would you use to represent each of the following? Try to come up with more than one good answer for each problem. For example, in # 1, when would counting days with a floating point variable make more sense than using an integer? # > # > 1. Number of days since the start of the year. # > 2. Time elapsed from the start of the year until now in days. # > 3. Serial number of a piece of lab equipment. # > 4. A lab specimen's age # > 5. Current population of a city. # > 6. Average population of a city over time. # > # > > ## Solution # > > # > > The answers to the questions are: # > > 1. Integer, since the number of days would lie between 1 and 365. # > > 2. Floating point, since fractional days are required # > > 3. Character string if serial number contains letters and numbers, otherwise integer if the serial number consists only of numerals # > > 4. This will vary! How do you define a specimen's age? whole days since collection (integer)? date and time (string)? # > > 5. Choose floating point to represent population as large aggregates (eg millions), or integer to represent population in units of individuals. # > > 6. Floating point number, since an average is likely to have a fractional part. # > {: .solution} # {: .challenge} # # > ## Division Types # > # > In Python 3, the `//` operator performs integer (whole-number) floor division, the `/` operator performs floating-point # > division, and the `%` (or *modulo*) operator calculates and returns the remainder from integer division: # > # > ~~~ # > print('5 // 3:', 5 // 3) # > print('5 / 3:', 5 / 3) # > print('5 % 3:', 5 % 3) # > ~~~ # > {: .language-python} # > # > ~~~ # > 5 // 3: 1 # > 5 / 3: 1.6666666666666667 # > 5 % 3: 2 # > ~~~ # > {: .output} # > # > If `num_subjects` is the number of subjects taking part in a study, # > and `num_per_survey` is the number that can take part in a single survey, # > write an expression that calculates the number of surveys needed # > to reach everyone once. # > # > > ## Solution # > > We want the minimum number of surveys that reaches everyone once, which is # > > the rounded up value of `num_subjects/ num_per_survey`. This is # > > equivalent to performing a floor division with `//` and adding 1. Before # > > the division we need to subtract 1 from the number of subjects to deal with # > > the case where `num_subjects` is evenly divisible by `num_per_survey`. # > > ~~~ # > > num_subjects = 600 # > > num_per_survey = 42 # > > num_surveys = (num_subjects - 1) // num_per_survey + 1 # > > # > > print(num_subjects, 'subjects,', num_per_survey, 'per survey:', num_surveys) # > > ~~~ # > > {: .language-python} # > > ~~~ # > > 600 subjects, 42 per survey: 15 # > > ~~~ # > > {: .output} # > {: .solution} # {: .challenge} # # > ## Strings to Numbers # > # > Where reasonable, `float()` will convert a string to a floating point number, # > and `int()` will convert a floating point number to an integer: # > # > ~~~ # > print("string to float:", float("3.4")) # > print("float to int:", int(3.4)) # > ~~~ # > {: .language-python} # > # > ~~~ # > string to float: 3.4 # > float to int: 3 # > ~~~ # > {: .output} # > # > If the conversion doesn't make sense, however, an error message will occur. # > # > ~~~ # > print("string to float:", float("Hello world!")) # > ~~~ # > {: .language-python} # > # > ~~~ # > --------------------------------------------------------------------------- # > ValueError Traceback (most recent call last) # > <ipython-input-5-df3b790bf0a2> in <module> # > ----> 1 print("string to float:", float("Hello world!")) # > # > ValueError: could not convert string to float: 'Hello world!' # > ~~~ # > {: .error} # > # > Given this information, what do you expect the following program to do? # > # > What does it actually do? # > # > Why do you think it does that? # > # > ~~~ # > print("fractional string to int:", int("3.4")) # > ~~~ # > {: .language-python} # > # > > ## Solution # > > What do you expect this program to do? It would not be so unreasonable to expect the Python 3 `int` command to # > > convert the string "3.4" to 3.4 and an additional type conversion to 3. After all, Python 3 performs a lot of other # > > magic - isn't that part of its charm? # > > # > > ~~~ # > > int("3.4") # > > ~~~ # > > {: .language-python} # > > ~~~ # > > --------------------------------------------------------------------------- # > > ValueError Traceback (most recent call last) # > > <ipython-input-2-ec6729dfccdc> in <module> # > > ----> 1 int("3.4") # > > ValueError: invalid literal for int() with base 10: '3.4' # > > ~~~ # > > {: .output} # > > However, Python 3 throws an error. Why? To be consistent, possibly. If you ask Python to perform two consecutive # > > typecasts, you must convert it explicitly in code. # > > ~~~ # > > int(float("3.4")) # > > ~~~ # > > {: .language-python} # > > ~~~ # > > 3 # > > ~~~ # > > {: .output} # > {: .solution} # {: .challenge} # # > ## Arithmetic with Different Types # > # > Which of the following will return the floating point number `2.0`? # > Note: there may be more than one right answer. # > # > ~~~ # > first = 1.0 # > second = "1" # > third = "1.1" # > ~~~ # > {: .language-python} # > # > 1. `first + float(second)` # > 2. `float(second) + float(third)` # > 3. `first + int(third)` # > 4. `first + int(float(third))` # > 5. `int(first) + int(float(third))` # > 6. `2.0 * second` # > # > > ## Solution # > > # > > Answer: 1 and 4 # > {: .solution} # {: .challenge} # # > ## Complex Numbers # > # > Python provides complex numbers, # > which are written as `1.0+2.0j`. # > If `val` is a complex number, # > its real and imaginary parts can be accessed using *dot notation* # > as `val.real` and `val.imag`. # > # > ~~~ # > a_complex_number = 6 + 2j # > print(a_complex_number.real) # > print(a_complex_number.imag) # > ~~~ # > {: .language-python} # > # > ~~~ # > 6.0 # > 2.0 # > ~~~ # > {: .output} # > # > # > 1. Why do you think Python uses `j` instead of `i` for the imaginary part? # > 2. What do you expect `1 + 2j + 3` to produce? # > 3. What do you expect `4j` to be? What about `4 j` or `4 + j`? # > # > > ## Solution # > > # > > 1. Standard mathematics treatments typically use `i` to denote an imaginary number. However, from media reports it # > > was an early convention established from electrical engineering that now presents a technically expensive area to # > > change. [Stack Overflow provides additional explanation and # > > discussion.](http://stackoverflow.com/questions/24812444/why-are-complex-numbers-in-python-denoted-with-j-instead-of-i) # > > 2. `(4+2j)` # > > 3. `4j` and `Syntax Error: invalid syntax`. In the latter cases, `j` is considered a variable and the statement # > > depends on if `j` is defined and if so, its assigned value. # > {: .solution} # {: .challenge}
colab/03_types_conversion.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: tmv # language: python # name: tmv # --- # + import django, sys, os sys.path.append('/home/max/software/django-tmv/tmv_mcc-apsis/BasicBrowser') os.environ.setdefault("DJANGO_SETTINGS_MODULE", "BasicBrowser.settings") django.setup() from scoping.models import * from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from itertools import product, combinations from utils.text import * from scipy.sparse import find import matplotlib.pyplot as plt def translate_patterns(pats): rpats = [] for p in pats: if re.search('\{([0-9])+\}', p): ps = p.split() n = re.search('\{([0-9])+\}', p).group(1) p = "(" + ps[0] + " *(\w+\s*\W*){0,"+n+"} " + ps[2] + ")|(" + ps[2] + " *(\w+\s*\W*){0,"+n+"} " + ps[0] + ")" else: p = p.replace(" ","( |-)") rpats.append(p) rpat = "|".join(rpats) #" OR ".join([x.replace('\w*','*').replace('(\w+\s*\W*){','NEAR/').replace('}','') for x in pats]) opats = [x.replace('\w*','*') for x in pats if '(\w+\s*\W*){' not in x] opats = " OR ".join([f'"{x}"' for x in opats]) npats = [x.replace('\w*','*').replace('(\w+\s*\W*){','NEAR/').replace('}',' ') for x in pats if '(\w+\s*\W*){' in x] npats = " OR ".join([f'"{x}"' for x in npats]) if npats: wpat = f'TS=({opats}) OR TS=({npats})' else: wpat = f'TS=({opats})' return rpat, wpat def match_docs(rpat, docs, fields): mdocs = set([]) for f in fields: tdocs = set(docs.filter(**{f"{f}__iregex": rpat}).values_list('pk',flat=True)) mdocs = mdocs | tdocs nmdocs = docs.exclude(id__in=mdocs) mdocs = docs.filter(id__in=mdocs) return mdocs, nmdocs def mash_texts(x, pat=None): s = "" for at in ["de", "kwp","ab", "ti",]: if hasattr(x.wosarticle, at) and getattr(x.wosarticle, at): t = getattr(x.wosarticle, at) if pat: t = re.sub(pat, "", t) s+=f" {t}" return s.strip() # + from matplotlib.patches import Rectangle def lit_plot(): fig, ax = plt.subplots() cols = ['#edf8b1','#7fcdbb','#2c7fb8'] rect = Rectangle( (0,0),1,1, facecolor=cols[0], edgecolor="grey" ) ax.add_patch(rect) ax.annotate( "All literature", (0.7,0.7), ha="center", va="center", #arrowprops=dict(facecolor='black', shrink=0.05,width=1), ) rect = Rectangle( (0,0),.4,.4, facecolor=cols[1], edgecolor="grey" ) ax.add_patch(rect) ax.annotate( "Relevant literature", (0.3,0.3),(0.7,0.3), ha="center", va="center", arrowprops=dict(facecolor='black', shrink=0.05,width=1), ) rect = Rectangle( (0,0),.2,.2, facecolor=cols[2], edgecolor="grey" ) ax.add_patch(rect) ax.annotate( "Identified relevant literature", (0.1,0.1),(0.7,0.1), ha="center", va="center", arrowprops=dict(facecolor='black', shrink=0.05,width=1), ) #ax.axis('off') ax.set_xticks([]) ax.set_yticks([]) return ax lit_plot() plt.savefig("../plots/basic_lit_plot.png") # + ax = lit_plot() rect = Rectangle( (0,0),.472,.65, facecolor="white", edgecolor="grey", linewidth=3, alpha=0.4, ) ax.add_patch(rect) plt.savefig("../plots/lit_plot_query_1.png") plt.show() # + ax = lit_plot() rect = Rectangle( (0,0),.18,.85, facecolor="white", edgecolor="grey", linewidth=3, alpha=0.4, ) ax.add_patch(rect) plt.savefig("../plots/lit_plot_query_2.png") plt.show() # + ax = lit_plot() rect = Rectangle( (0,0),.38,.75, facecolor="white", edgecolor="grey", linewidth=3, alpha=0.4, ) ax.add_patch(rect) plt.savefig("../plots/lit_plot_query_3.png") plt.show() # + docs = Doc.objects.filter(query=6956, content__iregex="\w").order_by('id') ## WHY IS OLIVEIRA, 2007 included? ### Also, Nepstad and Stickler, 2008 ### Polidoro 2010, very weak - not main conclusion # Exclude non-climate docs docs = docs.exclude(pk__in=[ # ENSO 1461495, # Espinoza 2013 1626875, # White, 2007 1622783, # Why is Oliveira, 2007 included? 1627593, # Killeen 2008 1627701, # Also, Nepstad and Stickler, 2008 2093363, # Etter 2090005, # Nepstad, 2006 1627734, # Polidoro 2010 2089982, # Mars, 2007 2090013, # Randolph 2010 2093360, # Petney 2012 2089960, # Wassenaar, 2007 - LUC 1627733, # Polidoro 2010, very weak - not main conclusion 2090004, # Veran 2007 - long line fishing 2089967, # Giri - mangrove database, climate only mentioned in conclusion and with reference 2089944, # Jellyman - eels 2091676, # Novelo-Casanova, 2010: Cayman Islands climate?? 2089959, # Bruno, 2007 1514856, # de Waroux 2012 2091680, # Guzman, 2008, we consider the main anthropogenic threats to the coral reefs and communities of the islands are overfishing, sedimentation and tourism 2089950, # Razumov - permafrost and erosion - climate? is the ref there as counter-evidence Nevertheless, the longterm average annual rate # of erosion of the coast line of the bottom of the Arctic # Seas is much lower than in the seas outside the cry # olithozone even under the conditions of the climate’s # warming accompanied by the activation of thermo # abrasion and during the subaqual abrasion processes # (Fig. 5). In particular, this proves the development of # ordinary abrasionaccumulation processes in the Arc # tic Seas, and, here, we cannot say about the influence # of thermal slumps on the formation of the underwater # slope relief ]) # docs.count() # + #X = vec.fit_transform(docs.values_list('content',flat=True)) vec = CountVectorizer( ngram_range=(1,2), min_df=10, strip_accents='unicode', max_features=10000, tokenizer=snowball_stemmer() ) texts = [mash_texts(x) for x in docs] X = X = vec.fit_transform(texts) vocab = vec.get_feature_names() X[X.nonzero()] = 1 X # - # ## Only Climate keywords # ## Still unresolved: # # - La Nina, El Nino # + pats = [ "climate model", "elevated\w* temperatur", "ocean\w* warming", "saline\w* intrusion", "chang\w* climat", "environment\w* change", "climat\w* change", "climat\w* warm", "warming\w* climat", "climat\w* varia", "global\w* warming", "global\w* change", "greenhouse\w* effect", "anthropogen\w*", "sea\w* level", "precipitation variabil\w*", "precipitation change\w*", "temperature\w* impact", "environmental\w* variab", "change\w* (\w+\s*\W*){5} cryosphere", "increase\w* (\w+\s*\W*){3} temperatur*", "weather\w* pattern", "weather\w* factor\w*", "climat\w*" ] crpat, cwpat = translate_patterns(pats) mdocs, nmdocs = match_docs(crpat, docs, ["wosarticle__de","wosarticle__kwp","title","content"]) ctrpat, ctwpat = translate_patterns(["climat\w*"]) cmdocs, cnmdocs = match_docs(ctrpat, docs, ["title"]) cids = cmdocs.values_list('pk',flat=True) mids = set(mdocs.values_list('pk',flat=True)) | set(cmdocs.values_list('pk', flat=True)) mdocs = docs.filter(pk__in=mids) nmdocs = docs.exclude(pk__in=mids) print(cwpat) print(f"starting point: {docs.count()} docs") print(mdocs.count()) for d in nmdocs[:10]: print('\n#####') print(d.id, d.title) print(d.docauthinst_set.all().order_by('position')) print(d.PY) print(d.content) print(d.wosarticle.de) print(d.wosarticle.kwp) # - for p in pats: trpat, twpat = translate_patterns([x for x in pats if x!=p]) mdocs, nmdocs = match_docs(trpat, docs, ["wosarticle__de","wosarticle__kwp","title","content"]) nmdocs = nmdocs.exclude(pk__in=cids.values_list('pk',flat=True)) print(p) print(f'removing "{p}" loses {nmdocs.count()} documents') # + vec = CountVectorizer( ngram_range=(1,2), min_df=5, strip_accents='unicode', max_features=10000, stop_words=stoplist, tokenizer=snowball_stemmer() ) texts = [mash_texts(x, crpat) for x in docs] X = X = vec.fit_transform(texts) vocab = vec.get_feature_names() X[X.nonzero()] = 1 X # - # + colsum = np.array(X.sum(axis=0))[0] for i in np.argsort(colsum)[-10:]: print(vocab[i]) print(colsum[i]) # + pats = [ "impact\w*", "specie\w*", "mortality\w*", "ecosystem\w*", #"snowmelt\w*", "mass balance", #"landslide\w*", "flood\w*", "drought", "disease\w*", #"desertification", "glacier\w* (\w+\s*\W*){3} melt\w*", "glacier\w* (\w+\s*\W*){3} mass\w*", "adaptation", "malaria", #"population dynamic", "fire", #"coral bleaching", "water scarcity", "water supply", #"yield response\w*", "erosion\w* (\w+\s*\W*){5} coast\w*", "glacier\w* (\w+\s*\W*){5} retreat\w*", #"rainfall\w* (\w+\s*\W*){5} decline\w*", "rainfall\w* (\w+\s*\W*){5} reduc\w*", "coral\w* (\w+\s*\W*){5} stress\w*", #"coral\w* (\w+\s*\W*){5} declin\w*", "precip\w* (\w+\s*\W*){5} \w*crease\w*", "permafrost", "biological response", "food availability", "food security", "vegetation dynamic\w*", "river (\w+\s*\W*){5} flow", "cyclone\w*", "yield\w*", #"deglacier\w*", "snow water equival\w*", "surface temp\w*" ] irpat, iwpat = translate_patterns(pats) mdocs, nmdocs = match_docs(irpat, docs, ["wosarticle__de","wosarticle__kwp","title","content"]) print(iwpat) print(f"starting point: {docs.count()} docs") print(mdocs.count()) #for d in random.sample(list(nmdocs), 10): for d in nmdocs[:10]: print('\n#####') print(d.id, d.title) print(d.docauthinst_set.all().order_by('position')) print(d.PY) print(d.content) print(d.wosarticle.de) print(d.wosarticle.kwp) # - for p in pats: trpat, twpat = translate_patterns([x for x in pats if x!=p]) mdocs, nmdocs = match_docs(trpat, docs, ["wosarticle__de","wosarticle__kwp","title","content"]) #nmdocs = nmdocs.exclude(pk__in=cnmdocs.values_list('pk',flat=True)) print(p) print(f'removing "{p}" loses {nmdocs.count()} documents') # + pats = [ "recent", "current", "modern", "observ\w*", #"case study", "evidence\w*", "past", "local", "region\w*", "significant", "driver\w*", "response", #"have responded", "were responsible", "was responsible", "exhibited", "witnessed", "attribut\w*", "has increased", "has decreased", #"have increased", #"have decreased", "histor\w*", #"recorded", "correlation", "evaluation", ] arpat, awpat = translate_patterns(pats) mdocs, nmdocs = match_docs(arpat, docs, ["wosarticle__de","wosarticle__kwp","title","content"]) print(awpat) print(f"starting point: {docs.count()} docs") print(mdocs.count()) #for d in random.sample(list(nmdocs), 10): for d in nmdocs[:10]: print('\n#####') print(d.id, d.title) print(d.docauthinst_set.all().order_by('position')) print(d.PY) print(d.content) print(d.wosarticle.de) print(d.wosarticle.kwp) # - # ### go through each keyword (for all pattern lists), checking what changes if it is removed. # + mdocs, nmdocs = match_docs(arpat, docs, ["wosarticle__de","wosarticle__kwp","title","content"]) mdocs.count() # - for p in pats: trpat, twpat = translate_patterns([x for x in pats if x!=p]) mdocs, nmdocs = match_docs(trpat, docs, ["wosarticle__de","wosarticle__kwp","title","content"]) if nmdocs.count()==0: print(f'removing "{p}" loses {nmdocs.count()} documents') ## All queries together f"({awpat}) AND ({iwpat}) AND ({cwpat})" # + query_docs = Doc.objects.filter(query=7368) qds = list(query_docs.order_by('id').values("id","wosarticle__de","wosarticle__kwp","wosarticle__ab","wosarticle__ti")) qds[0] # + ids = [] texts = [] subset_ids = [] subset_docs = list(mdocs.values_list('pk',flat=True)) def mash_texts(x, pat=None): s = "" for at in ["wosarticle__de", "wosarticle__kwp","wosarticle__ab", "wosarticle__ti",]: if at in x: t = x[at] if pat: t = re.sub(pat, "", t) s+=f" {t}" return s.strip() for i, d in enumerate(qds): if i % 100000 == 0: print(i) ids.append(i) texts.append(mash_texts(d)) if d['id'] in subset_docs: subset_ids.append(i) # - X = X = vec.fit_transform(texts) vocab = vec.get_feature_names() X[X.nonzero()] = 1 X #subset = X[random.sample(range(X.shape[0]),round(X.shape[0]*0.1))] subset = X[subset_ids] subset colsum = np.array(subset.sum(axis=0))[0] zeros = np.argwhere(colsum==0)[:,0] print(len(zeros)) X[:,zeros] zsums = np.array(X[:,zeros].sum(axis=0)) zsorted = np.argsort(zsums)[0] for i in zsorted[-200:][::-1]: z = int(zeros[i]) v = vocab[z] n = zsums[0][i] print(v, n) # - impact not impact assessment exclusions = [ "carbon captur*", "life cycle assessment", "pleistocen*", "fuel cell" ] for d in mdocs.filter(content__icontains="model").exclude(content__iregex="examin|evidenc|observ|measure").values('content'): print(d) print() zeros[205] texts = [mash_texts(x) for x in mdocs] X = X = vec.fit_transform(texts) vocab = vec.get_feature_names() X[X.nonzero()] = 1 # + # Find combinations of vocab (except for those that contain the stopwords below) indices, vals = np.array(X.sum(0))[0].argsort()[::-1], np.sort(np.array(X.sum(0))[0])[::-1] all_cs = list(combinations(indices[:300],2)) print(len(all_cs)) c_lengths = [] cs = [] max_x = 0 max_ind = 0 local_stops = set([ "use","also","studi","may","larg","dure","occur","result", "climat chang","year" ]) | set(climate_words) stop_combos = [ ["studi","year"], ["year","data"], ["year","observ"], ["year","dure"], ["data","indic"], ["water","ocean"], ["increase", "result"], ["sea","ocean"], ["chang", "increas"], ["increas", "temperatur"], ["data", "analysi"], ["impact","effect"], ["result", "high"], ["chang","warm"], ["climat","warm"], ["climat","temperatur"], ["show", "data"] ] for c in all_cs: words = [vocab[x] for x in c] if len(local_stops & set(words))>0: continue if words in stop_combos: continue if len(set(words) & set(climate_words)) > 0: continue x = sum([X[:,x] for x in c]) l = x[x > 1].shape[1] c_lengths.append(l) cs.append(c) print(len(cs)) # + ids = [] # Go through the combinations, choosing the one that adds the most relevant documents seen = [] for i in range(25): if i==0: c = cs[np.argsort(c_lengths)[::-1][0]] x = sum([X[:,xi] for xi in c]) x[x==1] = 0 x.eliminate_zeros() x[x>0] = 1 ids.append(c) continue base_l = len(x.data) max_l = len(x.data) max_ind = None for ind in np.argsort(c_lengths)[::-1]: if ind in seen: continue c = cs[ind] words = [vocab[x] for x in c] if len(local_stops & set(words))>0: continue if words in stop_combos: continue if c in ids: continue tmp_x = sum([X[:,xi] for xi in c]) for w in range(1, len(c)): tmp_x[tmp_x==w] = 0 tmp_x.eliminate_zeros() tmp_x[tmp_x>0] = 1 tmp_x = x + tmp_x l = len(tmp_x.data) if l > max_l: max_l = l max_ind = ind if l <= base_l: seen.append(ind) if not max_ind: break else: c = cs[max_ind] tmp_x = sum([X[:,xi] for xi in c]) for w in range(1, len(c)): tmp_x[tmp_x==w] = 0 tmp_x.eliminate_zeros() tmp_x[tmp_x>0] = 1 x = x + tmp_x x[x>1]=1 ids.append(c) print(f"{i+1}: {max_l}") if x.shape[0] == max_l: break c = [" AND ".join([vocab[x]+"*" for x in c]) for c in ids] print(c) print(max_l) # - t = " OR ".join([f"({x})" for x in c]) q = f"TS=({t})" print(q) X.shape texts[np.where(x.A==0)[0][0]] # + np.where(x.A==0) for j in list(find(X[353]))[1]: print(vocab[j]) # + # Climate words cwords = ["climate change","climat"] for t in texts: tc = False for w in cwords: if w in t.lower(): tc=True if not tc: print(t) break # + # for i,c in enumerate(combinations(indices, 4)): # x = sum([X[:,x] for x in c]) # l = len(x.data) # if l > 740: # print(f'{" OR ".join([vocab[x] for x in c])} returns {l}') # + ids = [] for i in range(10): if i==0: x = X[:,indices[0]] ids.append(indices[0]) continue max_x = len(x.data) max_ind = None for ind in indices: if ind in ids: continue tmp_x = x + X[:,ind] if len(tmp_x.data) > max_x: max_x = len(tmp_x.data) max_ind = ind if not max_ind: break else: x = x + X[:,max_ind] ids.append(max_ind) print(max_x) print(i) print(f'{" OR ".join([vocab[x]+"*" for x in ids])} returns {len(x.data)}') # + n_ids = [] for i in range(10): if i==0: for ind in indices: if ind not in ids: x = X[:,ind] n_ids.append(ind) break continue max_x = len(x.data) max_ind = None for ind in indices: if ind in ids or ind in n_ids: continue tmp_x = x + X[:,ind] if len(tmp_x.data) > max_x: max_x = len(tmp_x.data) max_ind = ind if not max_ind: break else: x = x + X[:,max_ind] n_ids.append(max_ind) print(max_x) print(i) strings = [f'"{vocab[x]}*"' for x in n_ids] print(f'{" OR ".join(strings)} returns {len(x.data)}') # + ids += n_ids n_ids = [] for i in range(10): if i==0: for ind in indices: if ind not in ids: x = X[:,ind] n_ids.append(ind) break continue max_x = len(x.data) max_ind = None for ind in indices: if ind in ids or ind in n_ids: continue tmp_x = x + X[:,ind] if len(tmp_x.data) > max_x: max_x = len(tmp_x.data) max_ind = ind if not max_ind: break else: x = x + X[:,max_ind] n_ids.append(max_ind) print(max_x) print(i) strings = [f'"{vocab[x]}*"' for x in n_ids] print(f'{" OR ".join(strings)} returns {len(x.data)}') # + ids += n_ids n_ids = [] for i in range(10): if i==0: for ind in indices: if ind not in ids: x = X[:,ind] n_ids.append(ind) break continue max_x = len(x.data) max_ind = None for ind in indices: if ind in ids or ind in n_ids: continue tmp_x = x + X[:,ind] if len(tmp_x.data) > max_x: max_x = len(tmp_x.data) max_ind = ind if not max_ind: break else: x = x + X[:,max_ind] n_ids.append(max_ind) print(max_x) print(i) print(f'{" OR ".join([vocab[x] for x in n_ids])} returns {len(x.data)}') # + ids += n_ids n_ids = [] for i in range(10): if i==0: for ind in indices: if ind not in ids: x = X[:,ind] n_ids.append(ind) break continue max_x = len(x.data) max_ind = None for ind in indices: if ind in ids or ind in n_ids: continue tmp_x = x + X[:,ind] if len(tmp_x.data) > max_x: max_x = len(tmp_x.data) max_ind = ind if not max_ind: break else: x = x + X[:,max_ind] n_ids.append(max_ind) print(max_x) print(i) print(f'{" OR ".join([vocab[x] for x in n_ids])} returns {len(x.data)}') # -
literature_identification/induce_query.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Exercice 2 - Algorithme de Shor # ## Contexte historique # # En informatique, nous mesurons souvent les performances d'un algorithme en fonction de sa croissance avec la taille du problème d'entrée. Par exemple, l'addition a un algorithme qui croît linéairement avec la taille des nombres que nous ajoutons. Il existe certains problèmes informatiques pour lesquels les meilleurs algorithmes dont nous disposons augmentent _exponentiellement_ avec la taille de l'entrée, ce qui signifie que les entrées de taille relativement modeste sont trop grandes pour être résolues avec n'importe quel ordinateur sur terre. Nous en sommes tellement sûrs qu'une grande partie de la sécurité d'Internet dépend du fait que certains problèmes sont insolubles. # # En 1994, <NAME> a montré qu'il est possible de factoriser efficacement un nombre dans ses nombres premiers sur un ordinateur quantique.[1] C'est une grande nouvelle, car le meilleur algorithme classique que nous connaissons est l'un de ces algorithmes qui croît de façon exponentielle. Et en fait, le [cryptage RSA](https://en.wikipedia.org/wiki/RSA_(cryptosystem)) repose sur le fait que la factorisation de nombres suffisamment grands est infaisable. Pour factoriser des nombres entiers trop grands pour nos ordinateurs classiques actuels, il faudra des millions de qubits et de portes, et ces circuits sont beaucoup trop gros pour fonctionner avec succès sur les ordinateurs quantiques d'aujourd'hui. # # Alors, comment <NAME>, <NAME>, <NAME>, <NAME>, <NAME> et <NAME> parviennent à factoriser 15 sur un ordinateur quantique, depuis 2001 ?![2] # # La difficulté de créer des circuits pour l'algorithme de Shor est de créer le circuit qui calcule un $ay \bmod N$ contrôlé. Bien que nous sachions créer ces circuits en utilisant un nombre polynomial de portes, celles-ci sont encore trop grandes pour les ordinateurs d'aujourd'hui. Heureusement, si nous connaissons a priori quelques informations sur le problème, alors nous pouvons parfois « tricher » et créer des circuits plus efficaces. # # Pour exécuter ce circuit sur le matériel dont ils disposaient, les auteurs de l'article ci-dessus ont trouvé un circuit très simple qui a effectué $7y \bmod 15$. Cela a rendu le circuit suffisamment petit pour fonctionner sur leur matériel. À la fin de cet exercice, vous aurez créé un circuit pour $35y \bmod N$ qui peut être utilisé dans l'algorithme de Shor et peut fonctionner sur `ibmq_santiago`. # # Si vous voulez comprendre ce qui se passe dans cet exercice, vous devriez consulter la [page Qiskit Textbook sur l'algorithme de Shor](https://qiskit.org/textbook/ch-algorithms/shor.html), mais si c'est trop impliqué pour vous, vous pouvez terminer l'exercice sans cela. # # ### Les références # 1. Shor, <NAME>. "Algorithms for quantum computation: discrete logarithms and factoring." Proceedings 35th annual symposium on foundations of computer science. Ieee, 1994. # 2. Vandersypen, <NAME>, et al. "Experimental realization of Shor's quantum factoring algorithm using nuclear magnetic resonance." Nature 414.6866 (2001): 883-887. # # tl;dr : algorithme de Shor # # Il existe un algorithme appelé [_quantum phase estimation_](https://qiskit.org/textbook/ch-algorithms/quantum-phase-estimation.html) qui nous indique la phase qu'une porte introduit dans un certain type d'état. Par exemple, les entrées de l'algorithme d'estimation de phase pourraient être l'état $|1\rangle$ et la porte $Z$. Si la $Z$-gate agit sur l'état $|1\rangle$, on retrouve le même état avec une phase globale ajoutée de $\pi$ : # # $$ # Z|1\rangle = -|1\rangle = e^{i\pi} |1\rangle # $$ # # Et l'algorithme d'estimation de phase quantique pourrait résoudre ce problème pour nous. Vous pouvez voir un autre exemple [ici](https://qiskit.org/textbook/ch-algorithms/quantum-phase-estimation.html#2.-Example:-T-gate-). # # Shor a montré que si nous faisons une estimation de phase sur une porte, $U$, qui a le comportement $U|y\rangle = |a y\bmod N\rangle$, nous pouvons rapidement obtenir des informations sur les facteurs de $N$. # ## Le problème # # Dans cet exercice, nous allons factoriser 35 en effectuant une estimation de phase sur un circuit qui implémente $13y \bmod 35$. L'exercice consiste à créer un circuit qui fait cela, et qui est également assez petit pour fonctionner sur `ibmq_santiago` ! Ce n'est pas une tâche facile, donc la première chose que nous allons faire est de tricher. # # Un détail de l'algorithme de Shor est que notre circuit n'a besoin de travailler que sur des états que nous pouvons atteindre en appliquant $U$ à l'état de départ $|1\rangle$. C'est à dire. nous pouvons utiliser _any_ circuit qui a le comportement : # # $$ # \begin{aligned} # U|1\rangle &= |13\rangle \\ # UU|1\rangle &= |29\rangle \\ # UUU|1\rangle &= |27\rangle \\ # UUUU|1\rangle &= |1\rangle \\ # \end{aligned} # $$ # # Alors, comment pouvons-nous rendre cela plus facile pour nous? Comme nous n'avons besoin que de transformer correctement 4 états différents, nous pouvons les encoder sur deux qubits. Pour cet exercice, nous choisirons de mapper les états de base de calcul à 2 qubits aux nombres comme suit : # # $$ # \begin{aligned} # |1\rangle &\rightarrow |00\rangle \\ # |13\rangle &\rightarrow |01\rangle \\ # |29\rangle &\rightarrow |10\rangle \\ # |27\rangle &\rightarrow |11\rangle \\ # \end{aligned} # $$ # # Pourquoi est-ce de la « tricherie » ? Eh bien, pour tirer parti de cette optimisation, nous devons connaître tous les états que $U$ va affecter, ce qui signifie que nous devons calculer $ay \bmod N$ jusqu'à ce que nous revenions à 1, et cela signifie que nous connaissons le période de $a^x \bmod N$ et peut donc obtenir les facteurs de $N$. Une optimisation comme celle-ci, dans laquelle nous utilisons des informations qui nous indiqueraient la valeur $r$, ne va évidemment pas s'adapter à des problèmes que les ordinateurs classiques ne peuvent pas résoudre. # # Mais le but de cet exercice est juste de vérifier que l'algorithme de Shor fonctionne bien comme prévu, et nous n'allons pas nous inquiéter du fait que nous avons triché pour obtenir un circuit pour $U$. # # <div id='u-definition'></div> # <div class="alert alert-block alert-success"> # # **Exercice 2a :** Créez un circuit ($U$) qui effectue la transformation : # # $$ # \begin{aligned} # U|00\rangle &= |01\rangle \\ # U|01\rangle &= |10\rangle \\ # U|10\rangle &= |11\rangle \\ # U|11\rangle &= |00\rangle \\ # \end{aligned} # $$ # # et est contrôlé par un autre qubit. Le circuit agira sur un registre cible à 2 qubits nommé « cible » et sera contrôlé par un autre registre à un seul qubit nommé « contrôle ». Vous devez affecter votre circuit fini à la variable '`cu`'. # # </div> # + from qiskit import QuantumCircuit from qiskit import QuantumRegister, QuantumCircuit c = QuantumRegister(1, 'control') t = QuantumRegister(2, 'target') cu = QuantumCircuit(c, t, name="Controlled 13^x mod 35") # ECRIVEZ VOTRE CODE ENTRE CES LIGNES - COMMENCER # ÉCRIVEZ VOTRE CODE ENTRE CES LIGNES - FIN cu.draw('mpl') # - # Et exécutez la cellule ci-dessous pour vérifier votre réponse : # Vérifiez votre réponse en utilisant le code suivant from qc_grader import grade_ex2a grade_ex2a(cu) # Toutes nos félicitations! Vous avez terminé la partie difficile. # # Nous lisons la sortie de l'algorithme d'estimation de phase en mesurant les qubits, nous devrons donc nous assurer que notre registre de « comptage » contient suffisamment de qubits pour lire $r$. Dans notre cas, $r = 4$, ce qui signifie que nous n'avons besoin que de $\log_2(4) = 2$ qubits (en trichant encore car nous connaissons $r$ à l'avance), mais comme Santiago a 5 qubits, et nous n'avons utilisé que 2 pour le registre "cible", nous utiliserons les 3 qubits restants comme registre de comptage. # # Pour faire une estimation de phase sur $U$, nous devons créer des circuits qui effectuent $U^{2^x}$ ($U$ répétés $2^x$ fois) pour chaque qubit (d'indice $x$) dans notre registre de $n$ compter les qubits. Dans notre cas, cela signifie que nous avons besoin de trois circuits qui implémentent : # # $$U, \; U^2, \; \text{et} \; U^4 $$ # # L'étape suivante consiste donc à créer un circuit qui effectue $U^2$ (c'est-à-dire un circuit équivalent à appliquer $U$ deux fois). # # <div class="alert alert-block alert-success"> # # **Exercice 2b :** Créez un circuit ($U^2$) qui effectue la transformation : # # $$ # \begin{aligned} # U|00\rangle &= |10\rangle \\ # U|01\rangle &= |11\rangle \\ # U|10\rangle &= |00\rangle \\ # U|11\rangle &= |01\rangle \\ # \end{aligned} # $$ # # et est contrôlé par un autre qubit. Le circuit agira sur un registre cible à 2 qubits nommé « cible » et sera contrôlé par un autre registre à un seul qubit nommé « contrôle ». Vous devez affecter votre circuit fini à la variable '`cu2`'. # </div> # + c = QuantumRegister(1, 'control') t = QuantumRegister(2, 'target') cu2 = QuantumCircuit(c, t) # ECRIVEZ VOTRE CODE ENTRE CES LIGNES - COMMENCER # ÉCRIVEZ VOTRE CODE ENTRE CES LIGNES - FIN cu2.draw('mpl') # - # Et vous pouvez vérifier votre réponse ci-dessous : # Vérifiez votre réponse en utilisant le code suivant from qc_grader import grade_ex2b grade_ex2b(cu2) # Enfin, nous avons également besoin d'un circuit qui équivaut à appliquer $U$ quatre fois (c'est-à-dire que nous avons besoin du circuit $U^4$). # # <div class="alert alert-block alert-success"> # # **Exercice 2c :** Créez un circuit ($U^4$) qui effectue la transformation : # # $$ # \begin{aligned} # U|00\rangle &= |00\rangle \\ # U|01\rangle &= |01\rangle \\ # U|10\rangle &= |10\rangle \\ # U|11\rangle &= |11\rangle \\ # \end{aligned} # $$ # # et est contrôlé par un autre qubit. Le circuit agira sur un registre cible à 2 qubits nommé « cible » et sera contrôlé par un autre registre à un seul qubit nommé « contrôle ». Vous devez affecter votre circuit fini à la variable '`cu4`'. _Indice : La meilleure solution est très simple._ # </div> # + c = QuantumRegister(1, 'control') t = QuantumRegister(2, 'target') cu4 = QuantumCircuit(c, t) # ÉCRIVEZ VOTRE CODE ENTRE CES LIGNES - COMMENCER # ÉCRIVEZ VOTRE CODE ENTRE CES LIGNES - FIN cu4.draw('mpl') # - # Vous pouvez vérifier votre réponse en utilisant le code ci-dessous : # Vérifiez votre réponse en utilisant le code suivant from qc_grader import grade_ex2c grade_ex2c(cu4) # <div class="alert alert-block alert-success"> # # **Exercice 2 final :** Maintenant que nous avons contrôlé $U$, $U^2$ et $U^4$, nous pouvons combiner cela dans un circuit qui exécute la partie quantique de l'algorithme de Shor. # # La partie initialisation est simple : nous devons mettre le registre de comptage dans l'état $|{+}{+}{+}\rangle$ (ce que nous pouvons faire avec trois portes H) et nous avons besoin que le registre cible soit dans l'état $|1\rangle$ (que nous avons mappé à l'état de base de calcul $|00\rangle$, nous n'avons donc rien à faire ici). Nous ferons tout cela pour vous. # # _Votre_ tâche consiste à créer un circuit qui exécute les $U$ contrôlés, qui seront utilisés entre l'initialisation et la transformée de Fourier quantique inverse. Plus formellement, on veut un circuit : # # # $$ # CU_{c_0 t}CU^2_{c_1 t}CU^4_{c_2 t} # $$ # # Où $c_0$, $c_1$ et $c_2$ sont les trois qubits dans le registre 'comptage', $t$ est le registre 'cible' et $U$ est comme <a href="#u-definition"> défini dans la première partie de cet exercice</a>. Dans cette notation, $CU_{a b}$ signifie que $CU$ est contrôlé par $a$ et agit sur $b$. Une solution simple consiste à simplement combiner les circuits `cu`, `cu2` et `cu4` que vous avez créés ci-dessus, mais vous trouverez très probablement un circuit plus efficace qui a le même comportement ! # # </div> # <div class="alert alert-block alert-danger"> # # Votre circuit ne peut contenir que des [CNOTs](https://qiskit.org/documentation/stubs/qiskit.circuit.library.CXGate.html) et un seul qubit [U-gates](https://qiskit.org/documentation/stubs/qiskit.circuit.library.UGate.html). Votre score sera le nombre de CNOTs que vous utilisez (moins c'est mieux), car les portes multi-qubit sont généralement beaucoup plus difficiles à réaliser sur le matériel que les portes à qubit unique. Si vous rencontrez des difficultés avec cette exigence, nous avons inclus une ligne de code à côté de la soumission qui convertira votre circuit sous cette forme, bien que vous feriez probablement mieux à la main. # # </div> # Code pour combiner vos solutions précédentes dans votre soumission finale cqr = QuantumRegister(3, 'control') tqr = QuantumRegister(2, 'target') cux = QuantumCircuit(cqr, tqr) solutions = [cu, cu2, cu4] for i in range(3): cux = cux.compose(solutions[i], [cqr[i], tqr[0], tqr[1]]) cux.draw('mpl') # Vérifiez votre réponse en utilisant le code suivant from qc_grader import grade_ex2_final # Décommentez les deux lignes ci-dessous si vous devez convertir votre circuit en CNOTs et en portes à qubit unique #from qiskit import transpile #cux = transpile(cux, basis_gates=['cx','u']) grade_ex2_final(cux) # Une fois que vous êtes satisfait du circuit, vous pouvez le soumettre ci-dessous : # Soumettez votre réponse. Vous pouvez soumettre à nouveau à tout moment. from qc_grader import submit_ex2_final submit_ex2_final(cux) # Toutes nos félicitations! Vous avez terminé l'exercice. Lisez la suite pour voir votre circuit utilisé pour le facteur 35 et voir comment il fonctionne. # # ## Utilisation de votre circuit pour factoriser 35 # # La cellule de code ci-dessous prend votre soumission pour l'exercice et l'utilise pour créer un circuit qui nous donnera $\tfrac{s}{r}$, où $s$ est un entier aléatoire entre $0$ et $r-1$, et $r$ est la période de la fonction $f(x) = 13^x \bmod 35$. # + from qiskit.circuit.library import QFT from qiskit import ClassicalRegister # Créer l'objet circuit cr = ClassicalRegister(3) shor_circuit = QuantumCircuit(cqr, tqr, cr) # Initialiser les qubits shor_circuit.h(cqr) # Ajoutez votre circuit shor_circuit = shor_circuit.compose(cux) # Effectuer le QFT inverse et extraire la sortie shor_circuit.append(QFT(3, inverse=True), cqr) shor_circuit.measure(cqr, cr) shor_circuit.draw('mpl') # - # Transpilons ce circuit et voyons quelle est sa taille et combien de CNOTs il utilise : from qiskit import Aer, transpile, assemble from qiskit.visualization import plot_histogram qasm_sim = Aer.get_backend('aer_simulator') tqc = transpile(shor_circuit, basis_gates=['u', 'cx'], optimization_level=3) print(f"circuit depth: {tqc.depth()}") print(f"Circuit contains {tqc.count_ops()['cx']} CNOTs") # Et voyons ce que nous obtenons : counts = qasm_sim.run(tqc).result().get_counts() plot_histogram(counts) # En supposant que tout a fonctionné correctement, nous devrions voir une probabilité égale de mesurer les nombres 0$, 2$, 4$ et 8$. En effet, l'estimation de phase nous donne $2^n \cdot \tfrac{s}{r}$, où $n$ est le nombre de qubits dans notre registre de comptage (ici $n = 3$, $s$ est un entier aléatoire entre $0$ et $r-1$, et $r$ est le nombre que nous essayons de calculer). Convertissons-les en fractions qui nous disent $s/r$ (c'est quelque chose que nous pouvons facilement calculer classiquement) : from fractions import Fraction n = 3 # n est le nombre de qubits dans notre registre de "comptage" # Parcourez chaque chaîne de mesure for measurement in counts.keys(): # Convertissez la chaîne binaire en 'int' et divisez par 2^n decimal = int(measurement, 2)/2**n # Utilisez l'algorithme des fractions continues pour convertir en forme a/b print(Fraction(decimal).limit_denominator()) # Nous pouvons voir que le dénominateur de certains des résultats nous dira la bonne réponse $r = 4$. On peut vérifier $r = 4$ rapidement : 13**4 % 35 # Alors, comment pouvons-nous en tirer les facteurs ? Il y a alors une forte probabilité que le plus grand commun diviseur de $N$ et soit $a^{r/2}-1$ soit $a^{r/2}+1$ soit un facteur de $N$, et le le plus grand diviseur commun est aussi quelque chose que nous pouvons facilement calculer de manière classique. from math import gcd # Greatest common divisor for x in [-1, 1]: print(f"Guessed factor: {gcd(13**(4//2)+x, 35)}") # Nous n'avons besoin que de trouver un facteur et pouvons l'utiliser pour diviser $N$ pour trouver l'autre facteur. Mais dans ce cas, _both_ $a^{r/2}-1$ ou $a^{r/2}+1$ nous donnent les facteurs de $35$. Nous pouvons à nouveau vérifier que cela est correct : 7*5 # ## Exécution sur `ibmq_santiago` # # Nous avons promis que cela fonctionnerait sur Santiago, nous allons donc vous montrer ici comment faire cela. Dans cet exemple, nous utiliserons un appareil Santiago simulé pour plus de commodité, mais vous pouvez le remplacer par l'appareil réel si vous le souhaitez : # + from qiskit.test.mock import FakeSantiago from qiskit import assemble from qiskit.visualization import plot_histogram santiago = FakeSantiago() real_device = False # Décommentez ce bloc de code pour qu'il s'exécute sur l'appareil réel #from qiskit import IBMQ #IBMQ.load_account() #provider = IBMQ.get_provider(hub='ibm-q', group='open', project='main') #santiago = provider.get_backend('ibmq_santiago') #real_device = True # Nous devons transpiler pour Santiago tqc = transpile(shor_circuit, santiago, optimization_level=3) if not real_device: tqc = assemble(tqc) # Exécutez le circuit et imprimez les comptes counts = santiago.run(tqc).result().get_counts() plot_histogram(counts) # - # Si votre score était suffisamment bas, vous devriez voir que nous avons une forte probabilité de mesurer $0$, $2$, $4$ ou $8$ comme nous l'avons vu avec la simulation parfaite. Vous verrez des résultats supplémentaires en raison d'inexactitudes dans le processeur et d'éléments indésirables interagissant avec nos qubits. Ce "bruit" s'aggrave à mesure que notre circuit est long, car un temps de calcul plus long signifie plus de temps pour les interactions indésirables, et plus de portes signifie plus d'erreurs potentielles. C'est pourquoi nous avons dû tricher pour créer le plus petit circuit possible. # # Dans un avenir proche, nos systèmes quantiques s'amélioreront suffisamment pour que nous puissions commencer à utiliser des techniques d'atténuation des erreurs plus avancées pour surmonter ces problèmes, ce qui signifie que nous pourrons exécuter des circuits suffisamment grands pour [exécuter l'algorithme de Shor sans tricher](https://arxiv.org/pdf/quant-ph/0205095.pdf). # ## Additional information # # **Created by:** <NAME> # # **Version:** 1.0.0
content/ex2/ex2-fr.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # lab 1 # ## demo of print print ("hello world") 1+2 a=3 print (1) # ### demo of add # 1. item 1 # 2. item 2 # # JMU [website](https:\\www.jmu.edu) 5+4 a=5 b=3 a+b # %matplotlib inline # + import numpy as np import matplotlib.pyplot as plt X = np.linspace(-np.pi, np.pi, 256, endpoint=True) C, S = np.cos(X), np.sin(X) plt.plot(X, C) plt.plot(X, S) plt.show()
My First Notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 截断 & 考虑用户频率 # # 此代码目对应解决方案中对于similarity的改进 # # This code is related to the improvement for similarity metrics import pandas as pd import numpy as np import time from scipy.sparse import * import os import re ## 统计每个商品的打分次数(用train) f = open('hot_items_map.txt', 'r') rating_times_map = eval(f.read()) f.close() # + item_dict = {} for name in os.listdir('common_dense_valued_small/'): start_time = time.time() f = open('common_dense_valued_small/' + name, 'r') l = f.read() l = eval(l) f.close() end_time = time.time() print('load file: %d sec'%((end_time - start_time))) name = re.findall(r'\d+', name) start = int(name[0]) end = int(name[1]) start_time = time.time() for i in range(start, end): tmp_list = [] [tmp_list.append( (x[0], round(x[1] / rating_times_map[i], 4) ) ) for x in l[i - start] if x[0] != i] if len(tmp_list) > 0: item_dict[i] = sorted(tmp_list,key=lambda x:x[1], reverse=True)[:500] end_time = time.time() 这个文件涉及正则表达式和上一个文件生成的哈希表,首先读入之前生成的商品热度表与哈希表, 通过正则表达式findall()读出文件名中的开始item序号与结束item序号(文件命名按分组区间, 如:'common_matrix_from_3598500_to_3958350.npz')遍历这个区间内item的哈希表, 每个item i、j之间的相似度都除以i的商品热度,作为热度过高的商品的惩罚(若一件商品很热门, 则不管用户喜不喜欢,用户对其作出行为的可能性都很大,极有可能出现在不同用户的行为列表中,这样会使得相似度偏大)。 将惩罚处理过的哈希表装入item_dict中。 print('This batch is finished, time cost: %d sec'%((end_time - start_time))) # - len(item_dict) f = open('item_Apriori.txt','w') f.write(str(item_dict)) f.close()
Semi-Finals/underline_trainning/Step1 itemCF_based_on_Apriori/6_Sta_for_SparseMatrix.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Exercise 2 # * Due: 21.11. at noon # * Max points: 8 # ## General rules # * Send the source code or the written notes of your answers as an attachment to <EMAIL> before the due time # * You will get feedback about your answers a week from the due date # * From the second week on, the exercises will be given on the previous Wednesday's lecture # ## Exercises # # 1. **(2 points)** Assume that that you are buying and selling electricity. The more electricity you buy, the better price you get per kwH and the buying prize of electricity follows function $f(x) = 1-0.01x$, where $0\leq x\leq50$ is the amount of electricity you buy. On the other hand, the price that you get from selling electricity follows function $g(x) = 2-0.01x^2$ with $x$ again the amount of electricity that you sell. Formulate an optimization problem that maximizes the profit. # 2. **(2 points)** Use bisection search to optimize problem # $$ # \begin{align} # \min & (1-x)^2+x\\ # \text{s.t. }&x\in [0,2]. # \end{align} # $$ # 3. **(2 points)** Use golden section search to optimize the problem from above exercise. # 4. **(2 points)** Use differentiation to optimize the optimization problem of exercise 1 and to verify the answer of optimization problem 2. #
Exercise 2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # HW3: Part 2 - GPUs # # v1.0 (2021 Spring): <NAME>, <NAME> # # **Time Budget: 3 Hours** # # Imagine you took a really wide SIMD unit, bolted it onto a really minimal processor, crammed a bunch of them onto one chip, and then burried it under several hundred layers of abstraction and NDAs. For some reason, we call this device a GPU which stands for "Graphics Processing Unit". # # **NOTE: Unless you are willing and able to do a SIGNIFICANT amount of setup on your computer and happen to have a Nvidia GPU lying around, you MUST run this notebook on the [course server](http://jupyter.ld-cd.net). Simply enter your preferred/first name as the login, and enter whatever password you would like.** # # ## Computation Model # *(Note: Most of the things in this section are NVIDIA specific, but other architectures are pretty similar)* # # The computation model for GPUs is really confusing, but it makes a little more sense if you understand how the GPUs are put together: # # ![image-3.png](attachment:image-3.png) # # The diagram shown here is a high level overview of a single "Stream Module". The stream module consists of a relatively simple processor that interprets your program and manually schedules the execution of incredibly simple instructions inside each of the blocks labelled "Core" which are essentially very simple floating-point arithmetic logic units. These cores share a single set of registers (well, there are two banks technically) and are packed together with a few double precision cores, load/store cores, and "Special Function Units" which compute things like sin, cos, sqrt, and so on. # # ![image-4.png](attachment:image-4.png) # # The entire GPU consists of a few dozen to a few hundred Stream Modules grouped together onto a single die along with blocks for interfacing with memory and driving a display. There's also a couple general purpose CPUs that pull the strings to make the whole thing dance. # # Programs for NVIDIA GPUs are typically called kernels. The code that you write in a kernel will typically be code required to produce a single element of the output, a lot like Numba's vectorize operator. This code can then be spawned on thousands of times, once for each output element. Threads are spawned in blocks of typically between 64 and 1024 threads (usually in powers of 2). A Stream Module is then assigned to each block and each thread can query which block it is in, and which thread it is in in that block. If your output is a 1D array, then the position of a thread in the output is given by `blockid*BLOCKSIZE + threadinblock`. Fortunately, Numba includes a shorthand for this in the form of `cuda.grid(1)` which returns your position in all the threads. Because you instantiate kernels with an integer number of blocks, if your output is not a multiple of the size of the block, then there will be several threads spawned without a corresponding element in the result. To manage this you usually want to have a condition in your code so that only threads that operate on an output element run anything. This is pretty confusing, and best expressed by example: # # ```python # from numba import cuda # # @cuda.jit # def square_cuda(x): # """ # The actual CUDA kernel which gets called on every thread # """ # # Retrieve theis threads position in the (in this case 1D) "grid" of all threads of this kernel # pos = cuda.grid(1) # if pos < x.size: # # In this case we overwite the original values # x[pos] = x[pos]*x[pos] # # def square_wrap(x, blocksize = 128): # """ # Wrap the CUDA kernel with some code to launch it # """ # # Compute the smallest number of blocks required to launch enough threads to handle every element # blocks = (x.size + blocksize - 1) // blocksize # # Launch `blocks` blocks of this kernel, each with blocksize threads # square_cuda[blocks, blocksize](x) # # # The GPU we are working with has best performance with 32 bit floats by an order of magnitude # square_wrap(np.float32(np.arange(2**18))) # ``` # # This example builds a simple kernel which replaces every element in an array by its square. This kernel is then launched on each of the threads. With a blocksize of 128 we have `2**18/128 = 2048` blocks which the stream modules will work their way through. In this case, the array size was a multiple of the element size so we had no unused threads. However, if we had `2**18 + 1` elements in our array we would have launched 2049 blocks, and only one of the threads in the extra block would run anything, with the condition failing for the rest of them. # # ## Memory Model # # In most of the programming we have done so far, all memory is essentially equal. There might be multiple levels of cache with different properties, but it's all essentially completely transparent. We may be able to observe its effects, but we don't (and indeed can't) manipulate it directly. When you attach any type of accelerator to a computer, things unfortunatly get quite a bit more complicated. # # ![image-5.png](attachment:image-5.png) # # The image above is a fairly high level overview of all the systems on the computer the notebook is presumably running on. Up to now, all of our computation has occured on the CPU and operated on data in the RAM attached to the CPU. Now that we're dealing with the GPU we can choose to place our computations there or on the CPU. Further, we can choose to keep our data stored in the RAM of either one. The majority of the work in extracting performance out of a GPU in real world workloads is often going to happen around shipping data in and out of the GPU. On the diagram above, the widths of each leg represent the total bandwith each connection has available to it. # # The CPU's memory has the lowest latency of any link in the computer, but on this old system, it's fairly bandwith constrained. The GPU's memory, on the other hand, has about 4 times the bandwith, but it trades this off for noticeably more latency, and in this case, less total memory. This means that the GPU is REALLY good at operations on lots of data which are simple enough for the GPU to be able to predict what parts of memory you will need before you access them, so that it can fetch them early so you don't notice the extra latency on the other hand. The CPU, on the other hand, is better at performing strange operations on small to medium sized pieces of data. In other words, if you were writing a web server, you probably want to use the CPU. If you're taking a Fourier transform of radio data, that's an excellent task for the GPU. # # In between the GPU and the CPU is a PCI Express link, which is what your computer uses for attaching network cards, fast Solid-State Disks, and accelerators like GPUs. This link has relatively low bandwith and relatively high latency compared to the memory buses, which means we want to avoid shipping data between the CPU and GPU as much as possible. Generally you'll want to ship it there once, do all the processing you can, and then ship only the result you need back. # # This is why you attach your screen to your GPU instead of to your motherboard. When you start a program, you can just load the GPU up with all the images and 3D models it will need. The GPU can just punt along, generating every frame in its own memory and shipping them out to the display. This doesn't require any traffic over the PCIe bus other than commands to move objects around in a scene. It was a lucky coincidence that this architecture, originally designed for 3D graphics, fits the computational model we want for scientific computing so well. # # Although Numba's CUDA interface lets the CPU transparently operate on data in the GPU and vice versa, in actuality to make this happen Numba has to ship the data back and forth. This is an easy mistake to make, and is probably what is happening if you notice your code suddenly taking a lot more time. # # Numba provides a couple simple functions to manage all this that are much easier to show than tell: # # ```python # import numpy as np # from numba import cuda # # # Create an array in the cpu's memory using numpy like normal # cpuarr = np.linspace(0, 1000, 10000, dtype=np.float32) # # # Copy the contents of that array into a new array in the GPU's memory, moving the data over the PCIe link # gpuarr = cuda.to_device(cpuarr) # # # Create a new array on the GPU which is empty (contains unspecified junk data) with the same size as the input to store our result (note cpuarr and gpuarr are interchangeable, we are just using them to get size information) # gpures = cuda.device_array(cpuarr.size) # # Or my preferred way: # gpures = cuda.device_array_like(cpuarr) # # # Perform a computation # some_gpu_func(gpuarr, gpures) # # # Copy our result back to a numpy array on the host # cpures = gpures.to_host() # ``` # # There are a number of other potentially handy memory management functions which are (nonexhaustively) listed [here](https://numba.pydata.org/numba-doc/latest/cuda/memory.html). # # In addition to what we discussed here, the GPU also has an additional class of memory which we won't really be touching on in this class but is handy to know about. This is ["shared memory"](https://developer.nvidia.com/blog/using-shared-memory-cuda-cc/). What NVIDIA calls Shared Memory is a small amount of memory in each Stream Module that is shared between all the threads in the block running on said SM, which is much faster than the off chip memory. This is really handy for algorithims like matrix multiplication where you can decompose the problem into many smaller problems. By using shared memory, each thread block can perform a small matrix multiplication much more quickly than using main memory, and then these results can be quickly aggregated to produce a final result. At its essence the so-called "Shared Memory" is essentially an explicitly managed cache. In fact, physically, the cache and shared memory are just a single piece of memory that's been partitioned into two. # ### Question 1: # # Run the cell below, and note the execution time on both the CPU and the GPU. Explain why the GPU execution time is so much worse than the CPU execution time. # # **Your Answer** # + from numba import cuda import numpy as np @cuda.jit def square_cuda(x): """ The actual CUDA kernel which gets called on every thread """ # Retrieve theis threads position in the (in this case 1D) "grid" of all threads of this kernel pos = cuda.grid(1) if pos < x.size: # In this case we overwite the original values x[pos] = x[pos]*x[pos] def square_wrap(x, blocksize = 128): """ Wrap the CUDA kernel with some code to launch it """ # Compute the smallest number of blocks required to launch enough threads to handle every element blocks = (x.size + blocksize - 1) // blocksize # Launch `blocks` blocks of this kernel, each with blocksize threads square_cuda[blocks, blocksize](x) data = np.linspace(0, 10, int(1e6), dtype=np.float32) print("GPU execution time") # %timeit _ = square_wrap(data) print("CPU execution time") # %timeit data*data # - # ### Question 2: # # Alter how the code is invoked using what you know about memory management, so that you reduce the GPU execution time to below the CPU time. # # *(Note: This may seem like a bit of a synthetic result: after all, we just move part of the process out of the timeit, but this is just a stepping stone. As we will see in the next question we can generate a lot of things, like linspaces, on the device itself without involving the CPU or its memory)* # + # you need to add a line up here print("New GPU execution time") # %timeit _ = square_wrap(_) # What goes here print("CPU execution time") # %timeit data*data # - # ### Question 3: # # Fill out the cell below to create a linspace function on the GPU. Your code should exist in two parts: a cuda kernel which takes an empty array and fills each element i according to the function `arr[i] = start + i * (stop-start)/num`, and a wrapper function which allocates an empty array on the device and invokes the kernel to fill it. This is equivalent to numpy's linspace function in `endpoint=False` mode. # + # Note: The naive way of doing this is still pretty fast, but if you realize that this is basically y = m*x + b # and compute the slope a single time outside of the kernel, you can make it run even faster @cuda.jit def linspace_kernel(arr, start, stop, num): # Find our position in the array pos = cuda.grid(1) # Your code here: def linspace_wrap(start, stop, num, blocksize = 128): # Compute the smallest number of blocks required to launch enough threads to handle every element blocks = (num + blocksize - 1) // blocksize # Create a block of memory to store your result: arr = _ # Invoke your kernel: # Return a result: return arr gpu_l = linspace_wrap(0, 100, int(1e6)) cpu_l = np.linspace(0, 100, int(1e6), endpoint=False, dtype=np.float32) assert np.allclose(gpu_l, cpu_l) print('GPU Linspace Performance:') # %timeit linspace_wrap(0, 100, int(1e6)) print('CPU Linspace Performance:') # %timeit np.linspace(0, 100, int(1e6), endpoint=False, dtype=np.float32) # - # ### Question 4: # # Play with the `num` parameter. Try generating an array with a few dozen elements and a few million (but don't go over 2-3 million; this GPU is shared between this whole class, and it doesn't have a lot of memory). Note how the performance changes. Make a quick plot with 5 or 6 points of its performance, and speculate on the cause of any non-linearities you observe. # # You may want to use the Python `time` library instead of the IPython `%timeit` line magic, so that you don't have to manually write down the data you want to plot. The `time.time()` function returns the number of seconds (with a fractional component) since a reference time (usually 1 January 1970 at midnight UTC - run `time.gmtime(0)` to see when it is) - think about how you can use this to time individual operations! import time time.time() # run me a few times to see # + import matplotlib.pyplot as plt # You don't need anything fancy here, just make a relatively simple plot with a couple points # - # **YOUR ANSWER** # ### Question 5: # # Decrease the blocksize parameter to 4 (try to only use powers of 2), then increase it to 1024 (the max supported). Note how decreasing the blocksize at a certain point starts to decrease performance, but increasing it past a point doesn't do anything. Speculate on the cause of this effect. # # **YOUR ANSWER** # A lot of problems, like the monte-carlo pi estimation problem for example, fit into a class of problems called map-reduce problems; you have a large set of data which you perform a mapping on, and then you perform a reduction step to distill that data down into a single, smaller, answer. So far, everything we have done here is just the mapping step. # # Note that the idea of reduction is also expressed in the Python `functools.reduce` function, which repeatedly applies a function to an iterable (a list, a `range` object, etc.) and returns the end result. `map` is also built into Python, which makes it easy to express problems in a map-reduce format: # + from functools import reduce print("Sum of ints 0-9: ", reduce(lambda a, b: a + b, range(10))) print("Sum of squares of ints 0-9:", reduce(lambda a, b: a + b, map(lambda x: x ** 2, range(10)))) # - # Writing reductions on the GPU can be difficult, but fortunately numba provides a decorator that lets us express simple reductions of two variables. Run the cell below to see how this works. # + @cuda.reduce def sum_cuda(a, b): return a + b a = linspace_wrap(0, 1, 10000) sum_gpu = sum_cuda(a) sum_cpu = np.sum(np.linspace(0, 1, 10000, endpoint=False)) print("GPU Sum = {}, CPU Sum = {}".format(sum_gpu, sum_cpu)) # - # ### Question 6: # # This question is pretty open ended because generating random numbers on the GPU is actually pretty complicated. Each thread needs to have a seperate random number generator with seperately intialized seeds and state. Generally speaking, random number generators on GPUs should always be treated with a degree of caution. They're designed for AI and Graphics applications, and they work well enough for that, but they don't always have sufficient entropy for real scientific applications. # # Look at the Numba documentation page for random number generation [here](https://numba.pydata.org/numba-doc/latest/cuda/random.html). Read through it to get a general idea of how Numba's random number generator works. Modify their example to use the sum reduction function we defined above. Play with the number of blocks, iterations, and threads per block and see where your best performance region is. Are there any values you can precompute on the CPU? Is any part of the reduction step more efficently preformed on the CPU at the end? # # Hint: you don't necessarily have to change the `compute_pi` function. Think about what else could be made more efficient! from numba.cuda.random import create_xoroshiro128p_states, xoroshiro128p_uniform_float32
static/homeworks/physcat_sp21_hw3_p2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import json import os import numpy as np import tensorflow as tf import model, sample, encoder # + # # !ln -s ../models models # hack to make models "appear" in two places # + model_name = '117M' seed = None nsamples = 10 batch_size = 10 length = 40 temperature = 0.8 # 0 is deterministic top_k = 40 # 0 means no restrictions assert nsamples % batch_size == 0 enc = encoder.get_encoder(model_name) hparams = model.default_hparams() with open(os.path.join('models', model_name, 'hparams.json')) as f: hparams.override_from_dict(json.load(f)) if length is None: length = hparams.n_ctx // 2 elif length > hparams.n_ctx: raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx) # + sess = tf.InteractiveSession() # replace with this in script: # with tf.Session(graph=tf.Graph()) as sess: # + context = tf.placeholder(tf.int32, [batch_size, None]) np.random.seed(seed) tf.set_random_seed(seed) output = sample.sample_sequence( hparams=hparams, length=length, context=context, batch_size=batch_size, temperature=temperature, top_k=top_k ) saver = tf.train.Saver() ckpt = tf.train.latest_checkpoint(os.path.join('models', model_name)) saver.restore(sess, ckpt) # + from utils.list_all_files import * import unicodedata import os, re, random mapping = { '\xa0': ' ', 'Æ': 'AE', 'æ': 'ae', 'è': 'e', 'é': 'e', 'ë': 'e', 'ö': 'o', '–': '-', '—': '-', '‘': "'", '’': "'", '“': '"', '”': '"' } def remove_special(text): return ''.join([mapping[e] if e in mapping else e for e in text]) def strip_word(word): word = re.sub('^\W*|\W*$', '', word).lower() return word basenames = [] all_poems = {} total_lines = 0 words = set() for fn in list_all_files('../../scraping/poetry/output'): with open(fn) as f: original = open(fn).read() text = remove_special(original).split('\n') poem = text[3:] basename = os.path.basename(fn) basename = os.path.splitext(basename)[0] basenames.append(basename) all_poems[basename] = { 'url': text[0], 'title': text[1], 'author': text[2], 'poem': poem } total_lines += len(poem) poem = '\n'.join(poem) words.update([strip_word(e) for e in poem.split()]) words.remove('') words = list(words) print(total_lines) # + def titlecase_word(word): return word[0].upper() + word[1:] titlecase_word("carpenter's"), "carpenter's".title() # + def random_chunk(array, length): start = random.randint(0, max(0, len(array) - length - 1)) return array[start:start+length] def random_item(array): return array[random.randint(0, len(array) - 1)] random_chunk(all_poems[basenames[0]]['poem'], 2), titlecase_word(random_item(words)) # - seeds = ''' blue epoch ethereal ineffable iridescent nefarious oblivion quiver solitude sonorous '''.split() len(seeds) from utils.progress import progress # + def clean(text): return text.split('<|endoftext|>')[0] def generate(inspiration, seed): inspiration = remove_special(inspiration).strip() seed = titlecase_word(seed).strip() raw_text = inspiration + '\n' + seed context_tokens = enc.encode(raw_text) n_context = len(context_tokens) results = [] for _ in range(nsamples // batch_size): out = sess.run(output, feed_dict={ context: [context_tokens for _ in range(batch_size)] }) for sample in out: text = enc.decode(sample[n_context:]) result = seed + text results.append(result) return results # + inspiration_lines = 16 all_results = {} for seed in seeds: print(seed) cur = {} for basename in basenames: inspiration = random_chunk(all_poems[basename]['poem'], inspiration_lines) inspiration = '\n'.join(inspiration) results = generate(inspiration, seed) cur[basename] = results all_results[seed] = cur # + import json with open('poems.json', 'w') as f: json.dump(all_poems, f, separators=(',', ':')) with open('generated.json', 'w') as f: json.dump(all_results, f, separators=(',', ':'))
Generate GPT-2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + ##### from __future__ import division from re import compile, match category_re = compile(r'^(?P<prefix>[^ ]{4})[^ ]*:$') accuracy_re = compile(r'^ACCURACY TOP1:.*\((?P<successes>[0-9]*) / (?P<attempts>[0-9]*)\)$') veclen_re = compile(r'^\[INFO\] Current vec len (?P<veclen>[0-9]*):$') total_accs = dict() basepath = '/var/tmp/xstefan3/vectors/accuracies/' pathnames = ['acc_1b_pca_600_to_x_f.log', 'acc_1b.log', 'acc_1b_sparsepca_600_to_X_f.log', 'acc_32b.log', 'acc_32b_pca_600_to_x.log', 'acc_0b.log', 'acc_0b_pca_600_to_x.log'] for log_pathname in pathnames: counters = { 'semantic': { 'successes': 0, 'attempts': 0, 'accuracies': {}, }, 'syntactic': { 'successes': 0, 'attempts': 0, 'accuracies': {}, }, 'total': { 'successes': 0, 'attempts': 0, 'accuracies': {}, } } with open(basepath+log_pathname, 'rt') as f: current_category_type = None current_veclen = None for line in f.readlines(): veclen_match = match(veclen_re, line) if veclen_match: if current_veclen is not None: for category_type in 'semantic', 'syntactic', 'total': successes = counters[category_type]['successes'] attempts = counters[category_type]['attempts'] if attempts > 0: accuracy = successes / attempts * 100.0 else: accuracy = 0.0 counters[category_type]['accuracies'][current_veclen] = accuracy counters[category_type]['successes'] = 0 counters[category_type]['attempts'] = 0 current_veclen = int(veclen_match.group('veclen')) category_match = match(category_re, line) if category_match: if category_match.group('prefix') == 'gram': current_category_type = 'syntactic' else: current_category_type = 'semantic' accuracy_match = match(accuracy_re, line) if accuracy_match: assert current_category_type is not None for category_type in current_category_type, 'total': for counter_type in 'successes', 'attempts': counters[category_type][counter_type] += int(accuracy_match.group(counter_type)) total_accs[log_pathname] = counters['total']['accuracies'] # + # %matplotlib inline import matplotlib.pyplot as plt # detail of dimensions below 200 plt.figure(figsize=(15, 7)) for pname in pathnames: plt.plot(*zip(*sorted(total_accs[pname].items()))) plt.legend(pathnames) plt.grid(True) # -
notebooks/accuracies_plot_reduced_vecs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Using submodel loss of active materials in PyBaMM # In this notebook we show how to use the loss of active materials (LAM) submodel in pybamm. The LAM model follows the equation (25) from [[6]](#References), and the stresses are calculated by equations (7)-(9) in [[1]](#References). To see all of the models and submodels available in PyBaMM, please take a look at the documentation here. # %pip install pybamm -q # install PyBaMM if it is not installed import pybamm import os import numpy as np import matplotlib.pyplot as plt os.chdir(pybamm.__path__[0]+'/..') # Here the model is applicable to SPM, SPMe and DFN model = pybamm.lithium_ion.DFN( options= { "particle": "Fickian diffusion", "SEI":"solvent-diffusion limited", "SEI film resistance":"distributed", "SEI porosity change":"false", "particle cracking":"no cracking", "loss of active material":"both", } ) chemistry = pybamm.parameter_sets.Ai2020 param = pybamm.ParameterValues(chemistry=chemistry) param.update({"Negative electrode LAM constant propotional term": 1e-4}) param.update({"Positive electrode LAM constant propotional term": 1e-4}) total_cycles = 2 experiment = pybamm.Experiment( [ "Discharge at 1C until 3 V", "Rest for 600 seconds", "Charge at 1C until 4.2 V", "Hold at 4.199 V for 600 seconds", ] * total_cycles ) sim1 = pybamm.Simulation( model, experiment = experiment, parameter_values = param, solver = pybamm.CasadiSolver(dt_max=100), ) solution = sim1.solve() t_all = solution["Time [h]"].entries v_all = solution["Terminal voltage [V]"].entries I_if_n = solution["Sum of x-averaged negative electrode interfacial current densities"].entries I_if_p = solution["Sum of x-averaged positive electrode interfacial current densities"].entries # + # ploting the results f, (ax1, ax2, ax3) = plt.subplots(1, 3 ,figsize=(18,4)) ax1.plot(t_all, v_all, label="loss of active material model") ax1.set_xlabel("Time [h]") ax1.set_ylabel("Terminal voltage [V]") #ax1.legend() ax2.plot(t_all, I_if_p, label="loss of active material model") ax2.set_xlabel("Time [h]") ax2.set_ylabel("Positive electrode interfacial current densities") #ax2.legend() #ax2.set_xlim(6000,7000) ax3.plot(t_all, I_if_n, label="loss of active material model") ax3.set_xlabel("Time [h]") ax3.set_ylabel("Negative electrode interfacial current densities") ax3.legend(bbox_to_anchor=(1, 1.2)) #ax3.set_xlim(10000,15000) # f.tight_layout(pad=1.0) plt.show() # + LAM_n_all = solution["X-averaged negative electrode active material volume fraction"].entries LAM_p_all = solution["X-averaged positive electrode active material volume fraction"].entries f, (ax1, ax2) = plt.subplots(1, 2 ,figsize=(10,4)) ax1.plot(t_all, LAM_n_all, label="loss of active material model") ax1.set_xlabel("Time [h]") ax1.set_ylabel("X-averaged negative electrode active material volume fraction") ax2.plot(t_all, LAM_p_all, label="loss of active material model") ax2.set_xlabel("Time [h]") ax2.set_ylabel("X-averaged positive electrode active material volume fraction") f.tight_layout(pad=3.0) plt.show() # + S_t_n_all = solution["X-averaged negative particle surface tangential stress"].entries S_t_p_all = solution["X-averaged positive particle surface tangential stress"].entries f, (ax1, ax2) = plt.subplots(1, 2 ,figsize=(10,4)) ax1.plot(t_all, S_t_n_all, label="loss of active material model") ax1.set_xlabel("Time [h]") ax1.set_ylabel("X-averaged negative tangential stress/ $E_n$") ax2.plot(t_all, S_t_p_all, label="loss of active material model") ax2.set_xlabel("Time [h]") ax2.set_ylabel("X-averaged positive tangential stress/ $E_p$") f.tight_layout(pad=3.0) plt.show() # - k1 = 1e-4 k2 = 1e-3 k3 = 1e-2 param.update({"Positive electrode LAM constant propotional term": k2}) param.update({"Negative electrode LAM constant propotional term": k2}) sim2 = pybamm.Simulation( model, experiment=experiment, parameter_values=param, solver=pybamm.CasadiSolver(dt_max=100), ) solution2 = sim2.solve() param.update({"Positive electrode LAM constant propotional term": k3}) param.update({"Negative electrode LAM constant propotional term": k3}) sim3 = pybamm.Simulation( model, experiment=experiment, parameter_values=param, solver=pybamm.CasadiSolver(dt_max=100), ) solution3 = sim3.solve() # + t_all2 = solution2["Time [h]"].entries t_all3 = solution3["Time [h]"].entries LAM_n_all2 = solution2["X-averaged negative electrode active material volume fraction"].entries LAM_p_all2 = solution2["X-averaged positive electrode active material volume fraction"].entries LAM_n_all3 = solution3["X-averaged negative electrode active material volume fraction"].entries LAM_p_all3 = solution3["X-averaged positive electrode active material volume fraction"].entries f, (ax1, ax2) = plt.subplots(1, 2 ,figsize=(10,4)) ax1.plot(t_all, LAM_n_all, label="k_LAM = "+ str(k1)) ax1.plot(t_all2, LAM_n_all2, label="k_LAM = "+ str(k2)) ax1.plot(t_all3, LAM_n_all3, label="k_LAM = "+ str(k3)) ax1.set_xlabel("Time [h]") ax1.set_ylabel("X-averaged negative electrode active material volume fraction") ax1.legend() ax2.plot(t_all, LAM_p_all, label="k_LAM = "+ str(k1)) ax2.plot(t_all2, LAM_p_all2, label="k_LAM = "+ str(k2)) ax2.plot(t_all3, LAM_p_all3, label="k_LAM = "+ str(k3)) ax2.set_xlabel("Time [h]") ax2.set_ylabel("X-averaged positive electrode active material volume fraction") f.tight_layout(pad=3.0) ax2.legend() plt.show() # + t_all2 = solution2["Time [h]"].entries t_all3 = solution3["Time [h]"].entries a_n_all = solution["X-averaged negative electrode surface area to volume ratio"].entries a_p_all = solution["X-averaged positive electrode surface area to volume ratio"].entries a_n_all2 = solution2["X-averaged negative electrode surface area to volume ratio"].entries a_p_all2 = solution2["X-averaged positive electrode surface area to volume ratio"].entries a_n_all3 = solution3["Negative electrode surface area to volume ratio"].entries[-1,:] a_p_all3 = solution3["Positive electrode surface area to volume ratio"].entries[0,:] f, (ax1, ax2) = plt.subplots(1, 2 ,figsize=(10,4)) ax1.plot(t_all, a_n_all, label="k_LAM = "+ str(k1)) ax1.plot(t_all2, a_n_all2, label="k_LAM = "+ str(k2)) ax1.plot(t_all3, a_n_all3, label="k_LAM = "+ str(k3)) ax1.set_xlabel("Time [h]") ax1.set_ylabel("X-averaged negative electrode surface area to volume ratio") ax1.legend() ax2.plot(t_all, a_p_all, label="k_LAM = "+ str(k1)) ax2.plot(t_all2, a_p_all2, label="k_LAM = "+ str(k2)) ax2.plot(t_all3, a_p_all3, label="k_LAM = "+ str(k3)) ax2.set_xlabel("Time [h]") ax2.set_ylabel("X-averaged positive electrode surface area to volume ratio") f.tight_layout(pad=3.0) ax2.legend() plt.show() # + v_all = solution["Terminal voltage [V]"].entries v_all2 = solution2["Terminal voltage [V]"].entries v_all3 = solution3["Terminal voltage [V]"].entries I_if_n = solution["Sum of x-averaged negative electrode interfacial current densities"].entries I_if_p = solution["Sum of x-averaged positive electrode interfacial current densities"].entries I_if_n2 = solution2["Sum of x-averaged negative electrode interfacial current densities"].entries I_if_p2 = solution2["Sum of x-averaged positive electrode interfacial current densities"].entries I_if_n3 = solution3["Sum of x-averaged negative electrode interfacial current densities"].entries I_if_p3 = solution3["Sum of x-averaged positive electrode interfacial current densities"].entries f, (ax1, ax2, ax3) = plt.subplots(1, 3 ,figsize=(18,5)) ax1.plot(t_all, v_all, label="k_LAM = "+ str(k1)) ax1.plot(t_all2, v_all2, label="k_LAM = "+ str(k2)) ax1.plot(t_all3, v_all3, label="k_LAM = "+ str(k3)) ax1.set_xlabel("Time [h]") ax1.set_ylabel("Terminal voltage [V]") #ax1.legend() #ax1.set_xlim(0.5,0.8) ax2.plot(t_all, I_if_n, label="k_LAM = "+ str(k1)) ax2.plot(t_all2, I_if_n2, label="k_LAM = "+ str(k2)) ax2.plot(t_all3, I_if_n3, label="k_LAM = "+ str(k3)) ax2.set_xlabel("Time [h]") ax2.set_ylabel("Negative electrode interfacial current densities") #ax2.legend() #ax2.set_xlim(6000,7000) ax2.set_ylim(2.2155,2.2165) ax3.plot(t_all, I_if_p, label="k_LAM = "+ str(k1)) ax3.plot(t_all2, I_if_p2, label="k_LAM = "+ str(k2)) ax3.plot(t_all3, I_if_p3, label="k_LAM = "+ str(k3)) ax3.set_xlabel("Time [h]") ax3.set_ylabel("Positive electrode interfacial current densities") ax3.legend(bbox_to_anchor=(0.68, 1.3), ncol=2) #ax3.set_xlim(2,2.8) #ax3.set_ylim(2.492,2.494) ax3.set_ylim(-2.494,-2.492) plt.tight_layout(pad=1.0) # - # ## References # # The relevant papers for this notebook are: pybamm.print_citations()
examples/notebooks/submodel_loss_of_active_materials.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Getting Started with Data Augmentation # ## Before you start! # # - This notebook assumes that shapeworks conda environment has been activated using `conda activate shapeworks` on the terminal. # - See [Getting Started with Notebooks](getting-started-with-notebooks.ipynb) for information on setting up your environment and notebook shortcuts. # - Note example output was generated on Linux/Mac environment and may look different on Windows. # # ## In this notebook, you will learn: # # 1. How to generate realistic synthetic data from an existing dataset using different parametric distributions. # 2. How to visualize the statistical distribution of the generated data compared to the original data. # # ### Data Augmentation Overview # # ShapeWorks includes a Python package, DataAugmentationUtils, that supports model-based data augmentation. This package is useful to increase the training sample size to train deep networks such as DeepSSM (see [SSMs Directly from Images](http://sciinstitute.github.io/ShapeWorks/deep-learning/deep-ssm.html)). # # A preliminary requirement for data augmentation is a set of images and shape models from real data on which to base augmentation. Once that is acquired, the process includes: # 1. Embedding the real data into a low-dimensional space using principle component analysis (PCA). # 2. Fitting a parametric distribution to the subspace for sampling. # 3. Sampling from the distribution to create new instances. # 4. Projecting the samples back into the high-dimensional space of the original data # 5. Completing the sample generation by creating a corresponding synthetic image. # # This notebook shows how the distribution of the original data can be visually compared to the distribution of the synthetic data to motivate the choice of parametric distribution in step 2. # # For a full explanation of the data augmentation process and package please see: [Data Augmentation for Deep Learning](http://sciinstitute.github.io/ShapeWorks/deep-learning/data-augmentation.html). # ### Import shapeworks and relevant libraries import os import sys import shapeworks # ### Import Data Augmentation Package import DataAugmentationUtils # ## 1. Defining the original dataset # # ### Defining dataset location # # You can download exemplar datasets from [ShapeWorks data portal](https://girder.shapeworks-cloud.org) after you login. For new users, you can [register](https://girder.shapeworks-cloud.org#?dialog=register) an account for free. Please do not use an important password. # # After you login, click `Collections` on the left panel and then `use-case-data-v2`. Select the dataset you would like to download by clicking on the checkbox on the left of the dataset name. See the video below. # After you download the dataset zip file, make sure you unzip/extract the contents in the appropriate location. # # **This notebook assumes that you have downloaded `femur-v0` and you have placed the unzipped folder `femur-v0` in `Examples/Python/Data`.** Feel free to use your own dataset. # # # <p><video src="https://sci.utah.edu/~shapeworks/doc-resources/mp4s/portal_data_download.mp4" autoplay muted loop controls style="width:100%"></p> # + # dataset name is the folder name for your dataset datasetName = 'femur-v0' # path to the dataset where we can find shape data # here we assume shape data are given as binary segmentations data_dir = '../../Data/' + datasetName + '/' print('Dataset Name: ' + datasetName) print('Directory: ' + data_dir) # - # ### Get file lists # Now we need the .particle files and corresponding raw images for the original dataset. # + # Get image path list img_dir = data_dir + "groomed/images/" img_list = [] for file in os.listdir(img_dir): img_list.append(img_dir + file) img_list = sorted(img_list) # Get particles path list model_dir = data_dir + "shape_models/femur/1024/" local_particle_list = [] for file in os.listdir(model_dir): if "local" in file: local_particle_list.append(model_dir + file) local_particle_list = sorted(local_particle_list) print("Total shapes in original dataset: "+ str(len(img_list))) # - # ## Run data augmentation using a Gaussian Distribution # # Below is the command for running the complete data augmentation process: # # ```python # DataAugmentationUtils.runDataAugmentation(out_dir, img_list, # local_point_list, num_samples, # num_dim, percent_variability, # sampler_type, mixture_num, # world_point_list) # ``` # **Input arguments:** # # * `out_dir`: Path to the directory where augmented data will be stored # * `img_list`: List of paths to images of the original dataset. # * `local_point_list`: List of paths to local `.particles` files of the original dataset. Note, this list should be ordered in correspondence with the `img_list`. # * `num_dim`: The number of dimensions to reduce to in PCA embedding. If zero or not specified, the percent_variability option is used to select the numnber of dimensions. # * `percent_variability`: The proportion of variability in the data to be preserved in embedding. Used if `num_dim` is zero or not specified. Default value is 0.95 which preserves 95% of the varibaility in the data. # * `sampler_type`: The type of parametric distribution to fit and sample from. Options: `gaussian`, `mixture`, or `kde`. Default: `kde`. # * `mixture_num`: Only necessary if `sampler_type` is `mixture`. The number of clusters (i.e., mixture components) to be used in fitting a mixture model. If zero or not specified, the optimal number of clusters will be automatically determined using the [elbow method](https://en.wikipedia.org/wiki/Elbow_method_(clustering)). # * `world_point_list`: List of paths to world `.particles` files of the original dataset. This is optional and should be provided in cases where procrustes was used for the original optimization, resulting in a difference between world and local particle files. Note, this list should be ordered in correspondence with the `img_list` and `local_point_list`. # # # In this notebook we will keep most arguments the same and explore the effect of changing the `sampler_type`. # First, we will try a Gaussian distribution. For further explanation about each distribution, see [Data Augmentation for Deep Learning](http://sciinstitute.github.io/ShapeWorks/deep-learning/data-augmentation.html). # Augmentation variables to keep constant num_samples = 50 num_dim = 0 percent_variability = 0.95 output_directory = '../Output/GaussianAugmentation/' sampler_type = "gaussian" embedded_dim = DataAugmentationUtils.runDataAugmentation(output_directory, img_list, local_particle_list, num_samples, num_dim, percent_variability, sampler_type) aug_data_csv = output_directory + "/TotalData.csv" # ### Visualize distribution of real and augmented data # # Below is the command for visualizing the original and augmented data: # ``` # DataAugmentationUtils.visualizeAugmentation(data_csv, viz_type) # ``` # **Input arguments:** # # * `data_csv`: The path to the CSV file created by running the data augmentation process. # * `viz_type`: The type of visulazation to display. Options `splom` or `violin` (default: `splom`). If set to `splom`, a scatterplot matrix of pairwise PCA comparisions will open in the default browser. If set to `violin` a violin plot or rotated kernel density plot will be displayed. # # We will use a violin plot to visualize the difference in the real and augmented distributions. DataAugmentationUtils.visualizeAugmentation(aug_data_csv, 'violin') # Example output: # <p><img src="https://sci.utah.edu/~shapeworks/doc-resources/pngs/data_aug_gaussian.png"></p> # ## Run data augmentation using a Mixture of Gaussian Distribution # output_directory = '../Output/MixtureAugmentation/' sampler_type = "mixture" embedded_dim = DataAugmentationUtils.runDataAugmentation(output_directory, img_list, local_particle_list, num_samples, num_dim, percent_variability, sampler_type) aug_data_csv = output_directory + "/TotalData.csv" # ### Visualize distribution of real and augmented data DataAugmentationUtils.visualizeAugmentation(aug_data_csv, 'violin') # Example output: # <p><img src="https://sci.utah.edu/~shapeworks/doc-resources/pngs/data_aug_mixture.png"></p> # ## Run data augmentation using Kernel Density Estimation output_directory = '../Output/KDEAugmentation/' sampler_type = "kde" embedded_dim = DataAugmentationUtils.runDataAugmentation(output_directory, img_list, local_particle_list, num_samples, num_dim, percent_variability, sampler_type) aug_data_csv = output_directory + "/TotalData.csv" # ### Visualize distribution of real and augmented data DataAugmentationUtils.visualizeAugmentation(aug_data_csv, 'violin') # Example output: # <p><img src="https://sci.utah.edu/~shapeworks/doc-resources/pngs/data_aug_kde.png"></p>
docs/notebooks/getting-started-with-data-augmentation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## WHO Coronavirus disease (COVID-2019) 24 hour reports # # 24 hour report from https://covid19.who.int/table # import pandas as pd import pycountry import requests import os import re import numpy from datetime import datetime # + tags=["parameters"] # papermill parameters output_folder = "../output/" # - url = "https://covid19.who.int/WHO-COVID-19-global-table-data.csv" df = pd.read_csv(url) df["Date"] = datetime.utcnow().strftime("%Y-%m-%d") df["Transmission Classification"] = "" df["Name"] = df["Name"].str.replace("\[1\]", "") df["Name"] = df["Name"].replace(r"(.*)\s+\(.*\)", r"\1", regex=True) df["ISO3166_1"] = "" countries = list(df["Name"].unique()) for name in countries: search_name = name if name == "Global": continue elif name == "The United Kingdom": search_name = "United Kingdom" elif name == "United States of America": search_name = "United States" elif name == "occupied Palestinian territory, including east Jerusalem": search_name = "Jerusalem" elif name == "Pitcairn Islands": search_name = "Pitcairn" elif name == "Côte d’Ivoire": search_name = "Côte d'Ivoire" elif name == "Democratic Republic of the Congo": search_name = "Congo, The Democratic Republic of the" elif name == "United States Virgin Islands": search_name = "Virgin Islands, U.S." try: pyc = pycountry.countries.get(name=search_name) if pyc: df["ISO3166_1"].loc[name == df["Name"]] = pyc.alpha_2 df["Name"].loc[name == df["Name"]] = pyc.name continue try: pyc_list = pycountry.countries.search_fuzzy(search_name) if len(pyc_list): df["ISO3166_1"].loc[name == df["Name"]] = pyc_list[0].alpha_2 df["Name"].loc[name == df["Name"]] = pyc_list[0].name continue except: pass pass except LookupError: try: pyc_list = pycountry.countries.search_fuzzy(search_name) if len(pyc_list): df["ISO3166_1"].loc[name == df["Name"]] = pyc_list[0].alpha_2 df["Name"].loc[name == df["Name"]] = pyc_list[0].name continue except: pass pass column_map = { "Name": "COUNTRY_REGION", "Cases - cumulative total": "CASES_TOTAL", "Cases - cumulative total per 100000 population": "CASES_TOTAL_PER_100000", "Cases - newly reported in last 24 hours": "CASES", "Deaths - cumulative total": "DEATHS_TOTAL", "Deaths - cumulative total per 100000 population": "DEATHS_TOTAL_PER_100000", "Deaths - newly reported in last 24 hours": "DEATHS", "Transmission Classification": "TRANSMISSION_CLASSIFICATION", "Date": "DATE", "ISO3166_1": "ISO3166_1" } df = df.rename(columns=column_map) df.dtypes # ```sql # CREATE TABLE WHO_DAILY_REPORT ( # COUNTRY_REGION varchar, # CASES_TOTAL int, # CASES_TOTAL_PER_100000 float, # CASES int, # DEATHS_TOTAL int, # DEATHS_TOTAL_PER_100000 float, # DEATHS int, # TRANSMISSION_CLASSIFICATION varchar, # DATE timestamp_ntz, # ISO3166_1 VARCHAR(2) # ) # ``` df.to_csv(output_folder + "WHO_DAILY_REPORT.csv", index=False, columns=column_map.values())
notebooks/WHO_DAILY_REPORT.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:anaconda] # language: python # name: conda-env-anaconda-py # --- # + [markdown] slideshow={"slide_type": "slide"} # # # *** # *** # # 数据抓取 # > # 抓取历届政府工作报告 # *** # *** # # 王成军 # # <EMAIL> # # 计算传播网 http://computational-communication.com # + slideshow={"slide_type": "slide"} import requests from bs4 import BeautifulSoup # + slideshow={"slide_type": "slide"} from IPython.display import display_html, HTML HTML('<iframe src=http://www.hprc.org.cn/wxzl/wxysl/lczf/ width=1000 height=500></iframe>') # the webpage we would like to crawl # - # # Inspect # # # <td width="274" class="bl">·&nbsp;<a href="./d12qgrdzfbg/201603/t20160318_369509.html" target="_blank" title="2016年政府工作报告">2016年政府工作报告</a></td> # # <td width="274" class="bl">·&nbsp;<a href="./d12qgrdzfbg/201603/t20160318_369509.html" target="_blank" title="2016年政府工作报告">2016年政府工作报告</a></td> # # # + slideshow={"slide_type": "slide"} # get the link for each year url = "http://www.hprc.org.cn/wxzl/wxysl/lczf/" content = requests.get(url) content.encoding # + [markdown] slideshow={"slide_type": "slide"} # ## Encoding # # - ASCII # - 7位字符集 # - 美国标准信息交换代码(American Standard Code for Information Interchange)的缩写, 为美国英语通信所设计。 # - 它由128个字符组成,包括大小写字母、数字0-9、标点符号、非打印字符(换行符、制表符等4个)以及控制字符(退格、响铃等)组成。 # - iso8859-1 通常叫做Latin-1。 # - 和ascii编码相似。 # - 属于单字节编码,最多能表示的字符范围是0-255,应用于英文系列。比如,字母a的编码为0x61=97。 # - 无法表示中文字符。 # - 单字节编码,和计算机最基础的表示单位一致,所以很多时候,仍旧使用iso8859-1编码来表示。在很多协议上,默认使用该编码。 # + [markdown] slideshow={"slide_type": "subslide"} # - gb2312/gbk/gb18030 # - 是汉字的国标码,专门用来表示汉字,是双字节编码,而英文字母和iso8859-1一致(兼容iso8859-1编码)。 # - 其中gbk编码能够用来同时表示繁体字和简体字,K 为汉语拼音 Kuo Zhan(扩展)中“扩”字的声母 # - gb2312只能表示简体字,gbk是兼容gb2312编码的。 # - gb18030,全称:国家标准 GB 18030-2005《信息技术中文编码字符集》,是中华人民共和国现时最新的内码字集 # + [markdown] slideshow={"slide_type": "subslide"} # - unicode # - 最统一的编码,用来表示所有语言的字符。 # - 占用更多的空间,定长双字节(也有四字节的)编码,包括英文字母在内。 # - 不兼容iso8859-1等其它编码。相对于iso8859-1编码来说,uniocode编码只是在前面增加了一个0字节,比如字母a为"00 61"。 # - 定长编码便于计算机处理(注意GB2312/GBK不是定长编码),unicode又可以用来表示所有字符,所以在很多软件内部是使用unicode编码来处理的,比如java。 # - UTF # - unicode不便于传输和存储,产生了utf编码 # - utf编码兼容iso8859-1编码,同时也可以用来表示所有语言的字符 # - utf编码是不定长编码,每一个字符的长度从1-6个字节不等。 # - 其中,utf8(8-bit Unicode Transformation Format)是一种针对Unicode的可变长度字符编码,又称万国码。 # - 由<NAME>于1992年创建。现在已经标准化为RFC 3629。 # + [markdown] slideshow={"slide_type": "subslide"} # # decode # <del>urllib2.urlopen(url).read().decode('gb18030') </del> # # content.encoding = 'gb18030' # # content = content.text # # Or # # content = content.text.encode(content.encoding).decode('gb18030') # # # # # html.parser # BeautifulSoup(content, 'html.parser') # + slideshow={"slide_type": "fragment"} # Specify the encoding content.encoding = 'utf8' # 'gb18030' content = content.text # + slideshow={"slide_type": "subslide"} soup = BeautifulSoup(content, 'html.parser') # links = soup.find_all('td', {'class', 'bl'}) links = soup.select('.bl a') print(links[0]) # + slideshow={"slide_type": "slide"} len(links) # + slideshow={"slide_type": "subslide"} links[-1]['href'] # + slideshow={"slide_type": "subslide"} links[0]['href'].split('./')[1] # + slideshow={"slide_type": "subslide"} url + links[0]['href'].split('./')[1] # + slideshow={"slide_type": "subslide"} hyperlinks = [url + i['href'].split('./')[1] for i in links] hyperlinks[:5] # + slideshow={"slide_type": "slide"} hyperlinks[-5:] # + slideshow={"slide_type": "slide"} hyperlinks[12] # 2007年有分页 # + slideshow={"slide_type": "slide"} from IPython.display import display_html, HTML HTML('<iframe src=http://www.hprc.org.cn/wxzl/wxysl/lczf/dishiyijie_1/200908/t20090818_3955570.html width=1000 height=500></iframe>') # 2007年有分页 # + [markdown] slideshow={"slide_type": "subslide"} # # Inspect 下一页 # # <a href="t20090818_27775_1.html"><span style="color:#0033FF;font-weight:bold">下一页</span></a> # # <a href="t20090818_27775_1.html"><span style="color:#0033FF;font-weight:bold">下一页</span></a> # # - a # - script # - td # + slideshow={"slide_type": "subslide"} url_i = 'http://www.hprc.org.cn/wxzl/wxysl/lczf/dishiyijie_1/200908/t20090818_3955570.html' content = requests.get(url_i) content.encoding = 'utf8' content = content.text #content = content.text.encode(content.encoding).decode('gb18030') soup = BeautifulSoup(content, 'html.parser') #scripts = soup.find_all('script') #scripts[0] scripts = soup.select('td script')[0] # - scripts # + slideshow={"slide_type": "subslide"} scripts.text # + slideshow={"slide_type": "subslide"} # countPage = int(''.join(scripts).split('countPage = ')\ # [1].split('//')[0]) # countPage countPage = int(scripts.text.split('countPage = ')[1].split('//')[0]) countPage # + code_folding=[1] slideshow={"slide_type": "slide"} import sys def flushPrint(s): sys.stdout.write('\r') sys.stdout.write('%s' % s) sys.stdout.flush() def crawler(url_i): content = requests.get(url_i) content.encoding = 'utf8' content = content.text soup = BeautifulSoup(content, 'html.parser') year = soup.find('span', {'class', 'huang16c'}).text[:4] year = int(year) report = ''.join(s.text for s in soup('p')) # 找到分页信息 scripts = soup.find_all('script') countPage = int(''.join(scripts[1]).split('countPage = ')[1].split('//')[0]) if countPage == 1: pass else: for i in range(1, countPage): url_child = url_i.split('.html')[0] +'_'+str(i)+'.html' content = requests.get(url_child) content.encoding = 'gb18030' content = content.text soup = BeautifulSoup(content, 'html.parser') report_child = ''.join(s.text for s in soup('p')) report = report + report_child return year, report # + slideshow={"slide_type": "slide"} # 抓取50年政府工作报告内容 reports = {} for link in hyperlinks: year, report = crawler(link) flushPrint(year) reports[year] = report # + slideshow={"slide_type": "slide"} with open('../data/gov_reports1954-2019.txt', 'w', encoding = 'utf8') as f: for r in reports: line = str(r)+'\t'+reports[r].replace('\n', '\t') +'\n' f.write(line) # + slideshow={"slide_type": "slide"} import pandas as pd df = pd.read_table('../data/gov_reports1954-2019.txt', names = ['year', 'report']) # + slideshow={"slide_type": "subslide"} df[-5:] # + [markdown] slideshow={"slide_type": "slide"} # # This is the end. # > ## Thank you for your attention.
code/04.PythonCrawlerGovernmentReport.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # A5 Extension Plan # + import pandas as pd import numpy as np import os import requests import json import datetime import statsmodels import matplotlib.pyplot as plt # %matplotlib inline # + # load data covid_data = pd.read_csv('data_clean/cases_clean.csv') covid_data['log_cases'] = np.log(covid_data.cases) covid_data['date'] = pd.to_datetime(covid_data.date) covid_data['log_daily'] = covid_data.log_cases - covid_data.log_cases.shift(1) covid_data['log_14D_avg'] = covid_data.log_cases.rolling(14).mean() covid_data['log_ravg'] = covid_data.log_cases.rolling(7).mean() covid_data['pct_chg_log'] = covid_data.log_daily.pct_change() covid_data['log_ravg_pct'] = covid_data.log_ravg.pct_change() # + # load ridership data ridership = pd.read_csv('data_raw/2021_ridership.csv') ridership['date'] = pd.to_datetime(ridership.date) ridership['ravg_rail'] = ridership.Rail.rolling(7).mean() ridership['ravg_bus'] = ridership.Adjusted_Bus.rolling(7).mean() # - ridership.head() # + # quick visuals plt.plot(ridership.date, ridership.ravg_rail) # + # quick visuals plt.plot(ridership.date, ridership.ravg_bus) # + # merge covid data with ridership data = covid_data.merge(ridership) data.head() # + # helper function- get masked array def get_ma(var, order): return np.ma.masked_where(data.order_code==order, data[var]) # + # get variables x = pd.to_datetime(data.date) # get masked arrays (for different masking orders) daily_mask = get_ma('daily_cases', 2) daily_no_mask = get_ma('daily_cases', 1) total_mask = get_ma('cases', 2) total_no_mask = get_ma('cases', 1) roll7_avg_mask = get_ma('rolling_avg_7', 2) roll7_avg_no_mask = get_ma('rolling_avg_7', 1) roll14_avg_mask = get_ma('rolling_avg_14', 2) roll14_avg_no_mask = get_ma('rolling_avg_14', 1) bus_mask = get_ma('ravg_bus', 2) bus_no_mask = get_ma('ravg_bus', 1) rail_mask = get_ma('ravg_rail', 2) rail_no_mask = get_ma('ravg_rail', 1) # + # plot merged df fig = plt.figure(figsize=(20, 10), facecolor='white') ax = fig.add_subplot() ax.set_title('Daily COVID19 Cases and Transit Ridership in Montgomery County, MD \nJun 2020 - Oct 2021') # plot daily cases ax.bar(x, daily_mask, alpha=0.8, linewidth=2, color='mistyrose', label='Daily Cases-Mask Mandate (LHS)') ax.bar(x, daily_no_mask, alpha=0.5, color='silver', label='Daily Cases-No Mask Mandate (LHS)') # plot rolling avg 7D ax.plot(x, roll7_avg_mask, alpha=0.8, linewidth=2, color='crimson', label='7D Rolling Avg of Cases (LHS)') ax.plot(x, roll7_avg_no_mask, alpha=0.5, color='crimson', linestyle='dashed') # plot rolling avg 14D ax.plot(x, roll14_avg_mask, alpha=0.8, linewidth=2, color='darkmagenta', label='14D Rolling Avg of Cases (LHS)') ax.plot(x, roll14_avg_no_mask, alpha=0.5, color='darkmagenta', linestyle='dashed') # set labels ax.set_xlabel('Date') ax.set_ylabel('Number of Positive Cases') plt.legend(loc='upper left') # plot ridership ax2 = ax.twinx() ax2.plot(x, bus_mask, c='navy', linewidth=3, label='7D Rolling Avg of Bus Ridership (RHS)') ax2.plot(x, bus_no_mask, c='navy', alpha=0.3, linestyle = 'dashed', linewidth=4) ax2.plot(x, rail_mask, c='darkgreen', linewidth=3, label='7D Rolling Avg of Rail Ridership-Mask Mandate (RHS)') ax2.plot(x, rail_no_mask, c='darkgreen', alpha=0.8, linestyle = 'dashed', linewidth=4) ax2.set_ylabel('Total Ridership') ax2.set_ylim([min(data.ravg_bus.min(), data.ravg_rail.min())*0.8, max(data.ravg_bus.max(), data.ravg_rail.max())*1.1]) plt.legend(loc='upper right') plt.savefig('visualization/plot_ridership.png', facecolor=fig.get_facecolor(), bbox_inches='tight') # + # load crash data crashes = pd.read_csv('data_raw/mont_county_crash.csv') # + # # schema? for col in crashes.columns: print('Data type of {} is {}'.format(col, type(crashes[col][0]))) # + # change to datetime crashes['date'] = pd.to_datetime(crashes['Crash Date/Time']) # + # inspect crash df print('crashes.shape: ', crashes.shape) print('crashes.date.min: ', crashes.date.min()) print('crashes.date.max: ', crashes.date.max()) # + # map severity sev = {'Property Damage Crash':0, 'Injury Crash':1, 'Fatal Crash':2} # + # get just relevant features keep_cols = ['date', 'Latitude', 'Longitude', 'Hit/Run', 'Weather', 'Road Grade', 'NonTraffic', 'Collision Type', 'Road Condition', 'Number of Lanes', 'Surface Condition', 'ACRS Report Type'] crash_data = crashes[keep_cols].copy().reset_index() # + # pivot and get feature cols df_temp = pd.pivot_table(crash_data, values='date', index='index', columns='ACRS Report Type', aggfunc='count').reset_index() crash_data['fatal'] = df_temp['Fatal Crash'].fillna(0) crash_data['injury'] = df_temp['Injury Crash'].fillna(0) crash_data['pdo'] = df_temp['Property Damage Crash'].fillna(0) # + # get aggregates crash_agg = crash_data.groupby(crash_data.date.dt.date).sum()[['fatal', 'injury', 'pdo']].reset_index() crash_agg['total'] = crash_agg.fatal + crash_agg.injury + crash_agg.pdo crash_agg['total_crash_7d'] = (crash_agg.fatal + crash_agg.injury + crash_agg.pdo).rolling(7).mean() crash_agg['total_crash_14d'] = (crash_agg.fatal + crash_agg.injury + crash_agg.pdo).rolling(14).mean() crash_agg['date'] = pd.to_datetime(crash_agg.date) data2 = data.merge(crash_agg) print('data2.date.min: ', data2.date.min()) print('data2.date.max: ', data2.date.max()) # + # get np arrays pdo = np.array(data2.pdo) injury = np.array(data2.injury) fatal = np.array(data2.fatal) total_7d = np.array(data2.total_crash_7d) total_14d = np.array(data2.total_crash_14d) # + # helper function- get masked array def get_ma2(var, order): return np.ma.masked_where(data2.order_code==order, data2[var]) # + #get new variables x_new = pd.to_datetime(data2.date) # get masked arrays (for different masking orders) roll7_avg_mask2 = get_ma2('rolling_avg_7', 2) roll7_avg_no_mask2 = get_ma2('rolling_avg_7', 1) roll14_avg_mask2 = get_ma2('rolling_avg_14', 2) roll14_avg_no_mask2 = get_ma2('rolling_avg_14', 1) # + # plot crash df fig = plt.figure(figsize=(20, 10), facecolor='white') ax = fig.add_subplot() ax.set_title('Daily COVID19 Cases and Collisions in Montgomery County, MD \nJun 2020 - Aug 2021') # plot daily cases #ax.bar(x, daily_mask, alpha=0.8, linewidth=2, color='mistyrose', # label='Daily Cases-Mask Mandate (LHS)') #ax.bar(x, daily_no_mask, alpha=0.5, color='silver', label='Daily Cases-No Mask Mandate (LHS)') # plot rolling avg 7D ax.plot(x_new, roll7_avg_mask2, alpha=0.8, linewidth=2, color='crimson', label='7D Rolling Avg of Cases (LHS)') ax.plot(x_new, roll7_avg_no_mask2, alpha=0.5, color='crimson', linestyle='dashed') # plot rolling avg 14D ax.plot(x_new, roll14_avg_mask2, alpha=0.8, linewidth=2, color='darkmagenta', label='14D Rolling Avg of Cases (LHS)') ax.plot(x_new, roll14_avg_no_mask2, alpha=0.5, color='darkmagenta', linestyle='dashed') # set labels ax.set_xlabel('Date') ax.set_ylabel('Number of Positive Cases') plt.legend(loc='upper left') # plot crashes ax2 = ax.twinx() ax2.bar(x_new, pdo, color='navy', linewidth=2, alpha=0.3, label='Collisions- Property Damage') ax2.bar(x_new, injury, color='slateblue', linewidth=2, alpha=0.3, bottom=pdo, label='Collisions- Injury Crashes') ax2.bar(x_new, fatal, color='darkcyan', linewidth=2, alpha=0.3, bottom=pdo+injury, label='Collisions- Fatal Crashes') ax2.plot(x_new, total_7d, c='darkgreen', label='7D Rolling Avg of Total Collisions') ax2.plot(x_new, total_14d, c='navy', label='7D Rolling Avg of Total Collisions') ax2.set_ylabel('Number of Collisions') plt.legend(loc='upper right') plt.savefig('visualization/plot_crash.png', facecolor=fig.get_facecolor(), bbox_inches='tight') # + # preprocess here for R - ridership transit = data[['date','cases', 'Rail', 'Adjusted_Bus', 'rolling_avg_14', 'ravg_rail', 'ravg_bus', 'log_ravg_pct']].copy() transit['all_modes'] = transit.Rail + transit.Adjusted_Bus transit['all_pct'] = transit.all_modes.pct_change() transit['ravg_all'] = transit.all_modes.rolling(7).mean() transit['ravg_all_pct'] = transit.ravg_all.pct_change() transit['cases_pct'] = transit.cases.pct_change() transit['rail_pct'] = transit.Rail.pct_change() transit['bus_pct'] = transit.Adjusted_Bus.pct_change() transit['ravg_cases_pct'] = transit.rolling_avg_14.pct_change() transit['ravg_rail_pct'] = transit.ravg_rail.pct_change() transit['ravg_bus_pct'] = transit.ravg_bus.pct_change() # + # preprocess here for R- crash crash = data2[['date', 'cases', 'total']].copy() crash['ravg_crash'] = crash.total.rolling(7).mean() crash['crash_pct'] = crash.total.pct_change() crash['ravg_crash_pct'] = crash.ravg_crash.pct_change() crash['cases_pct'] = crash.cases.pct_change() # + # export to csv transit.to_csv('data_clean/transit_data.csv', index=False) crash.to_csv('data_clean/crashes_data.csv', index=False)
A5-Extension-Plan.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 - AzureML # language: python # name: python3-azureml # --- # + from zipfile import ZipFile import requests import pandas as pd import numpy as np import matplotlib.pyplot as plt import json from azureml.core import Workspace from azureml.core import Experiment from azureml.core import Workspace, Datastore, Dataset ws = Workspace.from_config() # - # ### Download Data # + os.makedirs('./data', exist_ok=True) url = 'https://github.com/asherif844/MLOps/raw/master/data/AdventureWorks-oltp-install-script.zip' zip_data = requests.get(url) with open('./data/adventureworks.zip', 'wb') as f: f.write(zip_data.content) with ZipFile('./data/adventureworks.zip', 'r') as fzip: fzip.extractall('./data/csv_data') # - # ### Transform Data # + header = ['TransactionID', 'ProductID', 'ReferenceOrderID', 'ReferenceOrderLineID', 'TransactionDate', 'TransactionType', 'Quantity', 'ActualCost', 'ModifiedDate'] trans_hist_df = pd.read_csv('./data/csv_data/TransactionHistory.csv', sep='\t', names=header) trans_hist_df['PaidAmount'] = trans_hist_df['Quantity'] * trans_hist_df['ActualCost'] trans_hist_df['TransactionDate'] = pd.to_datetime(trans_hist_df['TransactionDate']) df = trans_hist_df[['TransactionDate', 'PaidAmount']] df.set_index('TransactionDate',inplace=True) df = df.resample('D').mean().interpolate() df = df['2013-07':'2014-05'] df1 = df['2013'] df2 = df['2014'] df.to_csv('./data/mlops_forecast_data.csv', index=True, header=True) df1.to_csv('./data/mlops_forecast_data2013.csv', index=True, header=True) df2.to_csv('./data/mlops_forecast_data2014.csv', index=True, header=True) # - # ### Upload data # + #datastore = Datastore(ws, 'demostore') datastore = ws.get_default_datastore() datastore.upload_files(files = ['./data/mlops_forecast_data.csv'], target_path = 'mlops_timeseries/', overwrite = True,show_progress = True) datastore.upload_files(files = ['./data/mlops_forecast_data2013.csv'], target_path = 'mlops_timeseries/', overwrite = True,show_progress = True) datastore.upload_files(files = ['./data/mlops_forecast_data2014.csv'], target_path = 'mlops_timeseries/', overwrite = True,show_progress = True) dataset = Dataset.Tabular.from_delimited_files(path=datastore.path('mlops_timeseries/mlops_forecast_data.csv')) dataset1 = Dataset.Tabular.from_delimited_files(path=datastore.path('mlops_timeseries/mlops_forecast_data2013.csv')) dataset2 = Dataset.Tabular.from_delimited_files(path=datastore.path('mlops_timeseries/mlops_forecast_data2014.csv')) # - # ### Register dataset dataset.register(workspace = ws, name = 'transaction_ts', description='time series dataset for mlops', create_new_version=True) dataset1.register(workspace = ws, name = 'transaction_ts2013', description='2013 time series dataset for mlops', create_new_version=True) dataset2.register(workspace = ws, name = 'transaction_ts2014', description='2014 time series dataset for mlops', create_new_version=True)
00_LoadData.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # [ATM 623: Climate Modeling](../index.ipynb) # # [<NAME>](http://www.atmos.albany.edu/facstaff/brose/index.html), University at Albany # # # Lecture 4: Building simple climate models using `climlab` # + [markdown] slideshow={"slide_type": "skip"} # ### About these notes: # # This document uses the interactive [`Jupyter notebook`](https://jupyter.org) format. The notes can be accessed in several different ways: # # - The interactive notebooks are hosted on `github` at https://github.com/brian-rose/ClimateModeling_courseware # - The latest versions can be viewed as static web pages [rendered on nbviewer](http://nbviewer.ipython.org/github/brian-rose/ClimateModeling_courseware/blob/master/index.ipynb) # - A complete snapshot of the notes as of May 2017 (end of spring semester) are [available on Brian's website](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2017/Notes/index.html). # # [Also here is a legacy version from 2015](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2015/Notes/index.html). # # Many of these notes make use of the `climlab` package, available at https://github.com/brian-rose/climlab # - # Ensure compatibility with Python 2 and 3 from __future__ import print_function, division # + [markdown] slideshow={"slide_type": "slide"} # ## Contents # # 1. [Introducing `climlab`](#section1) # 2. [Using `climlab` to implement the zero-dimensional energy balance model](#section2) # 3. [Run the zero-dimensional EBM out to equilibrium](#section3) # 4. [A climate change scenario in the EBM](#section4) # 5. [Further `climlab` resources](#section5) # - # ____________ # <a id='section1'></a> # # ## 1. Introducing `climlab` # ____________ # # `climlab` is a python package for process-oriented climate modeling. # # It is based on a very general concept of a model as a collection of individual, # interacting processes. `climlab` defines a base class called `Process`, which # can contain an arbitrarily complex tree of sub-processes (each also some # sub-class of `Process`). Every climate process (radiative, dynamical, # physical, turbulent, convective, chemical, etc.) can be simulated as a stand-alone # process model given appropriate input, or as a sub-process of a more complex model. # New classes of model can easily be defined and run interactively by putting together an # appropriate collection of sub-processes. # # `climlab` is an open-source community project. The latest code can always be found on `github`: # # https://github.com/brian-rose/climlab # # You can install `climlab` by doing # # ``` # conda install -c conda-forge climlab # ``` # %matplotlib inline import numpy as np import matplotlib.pyplot as plt import climlab # ____________ # <a id='section2'></a> # # ## 2. Using `climlab` to implement the zero-dimensional energy balance model # ____________ # # Recall that we have worked with a zero-dimensional Energy Balance Model # # $$ C \frac{dT_s}{dt} = (1-\alpha) Q - \tau \sigma T_s^4 $$ # Here we are going to implement this exact model using `climlab`. # # Yes, we have already written code to implement this model, but we are going to repeat this effort here as a way of learning how to use `climlab`. # # There are tools within `climlab` to implement much more complicated models, but the basic interface will be the same. # create a zero-dimensional domain with a single surface temperature state = climlab.surface_state(num_lat=1, # a single point water_depth = 100., # 100 meters slab of water (sets the heat capacity) ) state # Here we have created a dictionary called `state` with a single item called `Ts`: state['Ts'] # This dictionary holds the state variables for our model -- which is this case is a single number! It is a **temperature in degrees Celsius**. # For convenience, we can access the same data as an attribute (which lets us use tab-autocomplete when doing interactive work): state.Ts # It is also possible to see this `state` dictionary as an `xarray.Dataset` object: climlab.to_xarray(state) # create the longwave radiation process olr = climlab.radiation.Boltzmann(name='OutgoingLongwave', state=state, tau = 0.612, eps = 1., timestep = 60*60*24*30.) # Look at what we just created print(olr) # create the shortwave radiation process asr = climlab.radiation.SimpleAbsorbedShortwave(name='AbsorbedShortwave', state=state, insolation=341.3, albedo=0.299, timestep = 60*60*24*30.) # Look at what we just created print(asr) # couple them together into a single model ebm = olr + asr # Give the parent process name ebm.name = 'EnergyBalanceModel' # Examine the model object print(ebm) # The object called `ebm` here is the entire model -- including its current state (the temperature `Ts`) as well as all the methods needed to integrated forward in time! # The current model state, accessed two ways: ebm.state ebm.Ts # Here is some internal information about the timestep of the model: print(ebm.time['timestep']) print(ebm.time['steps']) # This says the timestep is 2592000 seconds (30 days!), and the model has taken 0 steps forward so far. # To take a single step forward: ebm.step_forward() ebm.Ts # The model got colder! # # To see why, let's look at some useful diagnostics computed by this model: ebm.diagnostics # This is another dictionary, now with two items. They should make sense to you. # # Just like the `state` variables, we can access these `diagnostics` variables as attributes: ebm.OLR ebm.ASR # So why did the model get colder in the first timestep? # # What do you think will happen next? # ____________ # <a id='section3'></a> # # ## 3. Run the zero-dimensional EBM out to equilibrium # ____________ # Let's look at how the model adjusts toward its equilibrium temperature. # # Exercise: # # - Using a `for` loop, take 500 steps forward with this model # - Store the current temperature at each step in an array # - Make a graph of the temperature as a function of time # ____________ # <a id='section4'></a> # # ## 4. A climate change scenario # ____________ # Suppose we want to investigate the effects of a small decrease in the transmissitivity of the atmosphere `tau`. # # Previously we used the zero-dimensional model to investigate a **hypothetical climate change scenario** in which: # - the transmissitivity of the atmosphere `tau` decreases to 0.57 # - the planetary albedo increases to 0.32 # # How would we do that using `climlab`? # Recall that the model is comprised of two sub-components: for name, process in ebm.subprocess.items(): print(name) print(process) # The parameter `tau` is a property of the `OutgoingLongwave` subprocess: ebm.subprocess['OutgoingLongwave'].tau # and the parameter `albedo` is a property of the `AbsorbedShortwave` subprocess: ebm.subprocess['AbsorbedShortwave'].albedo # Let's make an exact clone of our model and then change these two parameters: ebm2 = climlab.process_like(ebm) print(ebm2) ebm2.subprocess['OutgoingLongwave'].tau = 0.57 ebm2.subprocess['AbsorbedShortwave'].albedo = 0.32 # Now our model is out of equilibrium and the climate will change! # # To see this without actually taking a step forward: # Computes diagnostics based on current state but does not change the state ebm2.compute_diagnostics() ebm2.ASR - ebm2.OLR # Shoud the model warm up or cool down? # Well, we can find out: ebm2.Ts ebm2.step_forward() ebm2.Ts # ### Automatic timestepping # Often we want to integrate a model forward in time to equilibrium without needing to store information about the transient state. # # `climlab` offers convenience methods to do this easily: ebm3 = climlab.process_like(ebm2) ebm3.integrate_years(50) # What is the current temperature? ebm3.Ts # How close are we to energy balance? ebm3.ASR - ebm3.OLR # We should be able to accomplish the exact same thing with explicit timestepping for n in range(608): ebm2.step_forward() ebm2.Ts ebm2.ASR - ebm2.OLR # ____________ # <a id='section5'></a> # # ## 5. Further `climlab` resources # ____________ # We will be using `climlab` extensively throughout this course. Lots of examples of more advanced usage are found here in the course notes. Here are some links to other resources: # # - The documentation is hosted at <https://climlab.readthedocs.io/en/latest/> # - Source code (for both software and docs) are at <https://github.com/brian-rose/climlab> # - [A video of a talk I gave about `climlab` at the 2018 AMS Python symposium](https://ams.confex.com/ams/98Annual/videogateway.cgi/id/44948?recordingid=44948) (January 2018) # - [Slides from a talk and demonstration that I gave in Febrary 2018](https://livealbany-my.sharepoint.com/:f:/g/personal/brose_albany_edu/EuA2afxy5-hNkzNhHgkp_HYBYcJumR3l6ukRVIEl4W3MmA?e=sbXN0d) (The Apple Keynote version contains some animations that will not show up in the pdf version) # + [markdown] slideshow={"slide_type": "skip"} # ____________ # ## Version information # ____________ # # + slideshow={"slide_type": "skip"} # %load_ext version_information # %version_information numpy, matplotlib, climlab # + [markdown] slideshow={"slide_type": "skip"} # ____________ # # ## Credits # # The author of this notebook is [<NAME>](http://www.atmos.albany.edu/facstaff/brose/index.html), University at Albany. # # It was developed in support of [ATM 623: Climate Modeling](http://www.atmos.albany.edu/facstaff/brose/classes/ATM623_Spring2015/), a graduate-level course in the [Department of Atmospheric and Envionmental Sciences](http://www.albany.edu/atmos/index.php) # # Development of these notes and the [climlab software](https://github.com/brian-rose/climlab) is partially supported by the National Science Foundation under award AGS-1455071 to <NAME>. Any opinions, findings, conclusions or recommendations expressed here are mine and do not necessarily reflect the views of the National Science Foundation. # ____________
Lectures/Lecture04 -- Intro to CLIMLAB.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # Audio Feature Augmentation # ==================== # # + # When running this tutorial in Google Colab, install the required packages # with the following. # # !pip install torchaudio librosa import torch import torchaudio import torchaudio.transforms as T print(torch.__version__) print(torchaudio.__version__) # - # Preparing data and utility functions (skip this section) # -------------------------------------------------------- # # # # + #@title Prepare data and utility functions. {display-mode: "form"} #@markdown #@markdown You do not need to look into this cell. #@markdown Just execute once and you are good to go. #@markdown #@markdown In this tutorial, we will use a speech data from [VOiCES dataset](https://iqtlabs.github.io/voices/), which is licensed under Creative Commos BY 4.0. #------------------------------------------------------------------------------- # Preparation of data and helper functions. #------------------------------------------------------------------------------- import os import requests import librosa import matplotlib.pyplot as plt _SAMPLE_DIR = "_sample_data" SAMPLE_WAV_SPEECH_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/VOiCES_devkit/source-16k/train/sp0307/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav" SAMPLE_WAV_SPEECH_PATH = os.path.join(_SAMPLE_DIR, "speech.wav") os.makedirs(_SAMPLE_DIR, exist_ok=True) def _fetch_data(): uri = [ (SAMPLE_WAV_SPEECH_URL, SAMPLE_WAV_SPEECH_PATH), ] for url, path in uri: with open(path, 'wb') as file_: file_.write(requests.get(url).content) _fetch_data() def _get_sample(path, resample=None): effects = [ ["remix", "1"] ] if resample: effects.extend([ ["lowpass", f"{resample // 2}"], ["rate", f'{resample}'], ]) return torchaudio.sox_effects.apply_effects_file(path, effects=effects) def get_speech_sample(*, resample=None): return _get_sample(SAMPLE_WAV_SPEECH_PATH, resample=resample) def get_spectrogram( n_fft = 400, win_len = None, hop_len = None, power = 2.0, ): waveform, _ = get_speech_sample() spectrogram = T.Spectrogram( n_fft=n_fft, win_length=win_len, hop_length=hop_len, center=True, pad_mode="reflect", power=power, ) return spectrogram(waveform) def plot_spectrogram(spec, title=None, ylabel='freq_bin', aspect='auto', xmax=None): fig, axs = plt.subplots(1, 1) axs.set_title(title or 'Spectrogram (db)') axs.set_ylabel(ylabel) axs.set_xlabel('frame') im = axs.imshow(librosa.power_to_db(spec), origin='lower', aspect=aspect) if xmax: axs.set_xlim((0, xmax)) fig.colorbar(im, ax=axs) plt.show(block=False) # - # SpecAugment # ----------- # # `SpecAugment <https://ai.googleblog.com/2019/04/specaugment-new-data-augmentation.html>`__ # is a popular spectrogram augmentation technique. # # ``torchaudio`` implements ``TimeStretch``, ``TimeMasking`` and # ``FrequencyMasking``. # # TimeStretch # ~~~~~~~~~~ # # # # + spec = get_spectrogram(power=None) stretch = T.TimeStretch() rate = 1.2 spec_ = stretch(spec, rate) plot_spectrogram(torch.abs(spec_[0]), title=f"Stretched x{rate}", aspect='equal', xmax=304) plot_spectrogram(torch.abs(spec[0]), title="Original", aspect='equal', xmax=304) rate = 0.9 spec_ = stretch(spec, rate) plot_spectrogram(torch.abs(spec_[0]), title=f"Stretched x{rate}", aspect='equal', xmax=304) # - # TimeMasking # ~~~~~~~~~~~ # # # # + torch.random.manual_seed(4) spec = get_spectrogram() plot_spectrogram(spec[0], title="Original") masking = T.TimeMasking(time_mask_param=80) spec = masking(spec) plot_spectrogram(spec[0], title="Masked along time axis") # - # FrequencyMasking # ~~~~~~~~~~~~~~~~ # # # # + torch.random.manual_seed(4) spec = get_spectrogram() plot_spectrogram(spec[0], title="Original") masking = T.FrequencyMasking(freq_mask_param=80) spec = masking(spec) plot_spectrogram(spec[0], title="Masked along frequency axis")
docs/_downloads/c7aabc084299906190044ec9d37b7d0e/audio_feature_augmentation_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from kirby.experiment import Experiment from kirby.run_params import RunParams import wandb wandb.login() data_files = { 'train': ['data/augmented_datasets/description_train.csv'], 'valid': ['data/augmented_datasets/description_valid.csv'] } run_params = RunParams(run_name='description', debug=False, data_files=data_files, data_file_type='csv') experiment = Experiment(run_params)
notebooks/exp_01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # **Authors:** # # <NAME> - 500513 - <EMAIL> # # <NAME> - 500093 - <EMAIL> # # Cross Validation and RBF Kernel # # ## Learning from Data: Homework 8 # # For this homework, the library _libsvm_ is needed. The easiest way to install this on _Mac OS X_ is using [_Homebrew_](http://brew.sh/) with the following command: `brew install libsvm`. # # The following questions from this homework will be answered and its solutions will be explained: questions 7, 8, 9 and 10. # ## Polynomial kernel # In the first 2 questions, we need to experiment with 10-fold cross-validation using a polynomial kernel. We will first explain the workings of support vector machines, after which we will look at the use of a polynomial kernel. Afterwards, we see why cross-validation is useful and how it works. # # The purpose of support vector machines is to find dichotomies with border margins that are as large as possible. # As we saw in the lectures, we first need the distance between the hyperplane $w^Tx=0$ and the nearest point to that hyperplane, in which $w$ are weights and $x$ are data points. This was found to be $\frac{1}{\|w\|}$. # This is the value that needs to be maximized in order to get a margin as big as possible. This can be restated by saying that we need to minimize the value $\frac{1}{2}w^\intercal w$. This is allowed because minimizing this quantity maximizes the original quantity. <br /> # The optimization should also be subject to $|w^\intercal x_n + b| \geq 1$ (in which $b$ is the bias) for $n=1,2,\dots,N$, because the distance from the separating hyperplane to its nearest point should be normalized to be 1. Eventually, we get the following Lagrangian formulation which incorporates both the quantity to minimize and the constraint: # $$ # \mathcal{L}(\alpha) = \sum_{n=1}^N \alpha_n - \frac{1}{2} \sum_{n=1}^N \sum_{m=1}^N y_ny_m \alpha_n\alpha_m x_n^\intercal x_m # $$ # In which $y_n$ and $y_m$ are target values and $\alpha_n$ and $\alpha_m$ are Lagrange multipliers. # Using quadratic programming, we eventually become $\alpha=\alpha_1, \alpha_2, \dots, \alpha_N$, which maximizes $\frac{1}{2}w^\intercal w$, while taking into account the constraint. When $\alpha_n > 0$, we say that the corresponding data point $x_n$ is a support vector. The hyperplane that separates the points is completely determined by those support vectors and must lie between the support vectors of different target values. # # For data which is not linearly separable, a nonlinear transform is needed, in which a transformation (using the function $\phi$) of the data is done from the $\mathcal{X}$ to the $\mathcal{Z}$ space. The resulting data in this $\mathcal{Z}$ is then linearly separable. # # # In a polynomial kernel, we use a nonlinear transformation function $\phi: \mathcal{X} \rightarrow \mathcal{Z}$ that is a polynomial of a certain order $Q$. # It is possible to do this nonlinear transformation first and then apply quadratic programming using the transformed data to obtain our solution. This can however be infeasible when we have a high-order $\phi$ function, because transforming all the data is computationally expensive. When we look at the Lagrange formulation however, we see that the training data is only used in a dot product of 2 such data points: $x_n^\intercal x_m$. Thus, we actually only need the result of this dot product in the $\mathcal{Z}$ space. The transformed data is not used in any other way. A function that takes 2 data points in the $\mathcal{X}$ space and computes their inner product in the $\mathcal{Z}$ space is called a kernel function. In a polynomial kernel, this function is $K(x,x')=(1+x^\intercal x')^Q$. We can thus replace the inner product in the Lagrange formulation with this kernel function. # # # There are however cases where the data is slightly non-separable but not so much that a non-linear transformation is needed, or the that is still non-separable after a transformation. As a result, some points violate the margin ($|w^\intercal x_n + b| \geq 1$ is violated). The total violation was found to be $\sum_{n=1}^N \xi_n$. An order of this value (the value $C$) is added to the value that needs to be minimized using quadratic programming in order to allow some errors. The higher $C$, the more errors are allowed in separating the data. Intuitively, we see that increasing $C$ makes the decision surface more smooth and simple. A margin that uses this $C$ is called a soft margin. # # # For the first 2 exercises, we use cross validation. In the slides, we first saw that using small partition of the training set to validate the learned hypothesis leads to a bad estimate because the points taken out of the training set ($K$ points out of $N$) for validation may not be representative for estimating the out-of-sample error, $E_{out}$. When, we take a large partition however, we get again an accurate validation error ($E_{val}$), but because the model is learned from a small number of data points, we have a bigger chance of getting a bad model for the data. Thus, we need to balance $K$. # # The optimal situation however would be to have $K$ both small and large, thus getting a good model and a good estimation of $E_{out}$. To achieve this, we use points one time for validating, and other times for training a model. # We separate the $N$ datapoints into a number of folds, $F$. These number of folds could be as large as $N$ itself, in which $N$ iterations will take place and when only one point is used for validation. # In cross-validation, $K=\frac{N}{F}$, but these K points differ each time as a different fold is used for validation. # The total cross-validation error will then be $E_{cv}=\frac{1}{F}\sum_{n=1}^F e_n$, in which $e_n=E_{val}(g_n^-)$ and $g_n^-$ is the model trained on $N-K$ datapoints. # In all the exercises, we only take data with as target value (digit) 1 or 5 and replace this target values by respectively -1 and 1. # After listing all the possible values on $C$ that need to be used with the support vector machine, we do a number of runs. In each run, we separate the training data randomly into 10 folds. Then, for each value of $C$, we iterate 10 times. Each time take a different fold on which the classifier will be tested and the other 9 folds to train the classifier. This classifier is made using a support vector machine which is given the current value of $C$. from svmutil import * import pandas as pd from sklearn.cross_validation import KFold from collections import Counter import numpy as np import matplotlib.pyplot as plt import operator pd.options.mode.chained_assignment = None # %matplotlib inline train = pd.read_table("features.train", sep=" +", header=None, engine='python') train.columns = ["digit", "intensity", "symmetry"] test = pd.read_table("features.test", sep=" +", header=None, engine='python') test.columns = ["digit", "intensity", "symmetry"] train.head() filtered = train[(train.digit == 1) | (train.digit == 5)] filtered.loc[filtered.digit == 1, "digit"] = -1 filtered.loc[filtered.digit == 5, "digit"] = 1 filtered = filtered.reset_index(drop=True) filtered.head() # + possible_C = np.array([0.0001, 0.001, 0.01, 0.1, 1]) runs = 100 best_run_C = np.zeros(runs) C_val_errors = {c: np.zeros(runs) for c in possible_C} for run in range(runs): kf = KFold(len(filtered), n_folds=10, shuffle=True) C_errors = np.zeros(len(possible_C)) for i,C in enumerate(possible_C): fold_errors = np.zeros(len(kf)) for j, index_pair in enumerate(kf): train_index, val_index = index_pair train_fold = filtered.ix[train_index] train_x = train_fold[["intensity", "symmetry"]].values.tolist() train_y = train_fold["digit"].values.tolist() val_fold = filtered.ix[val_index] val_x = val_fold[["intensity", "symmetry"]].values.tolist() val_y = val_fold["digit"].values.tolist() m = svm_train(train_y, train_x, '-q -t 1 -d 2 -c {} -r 1 -g 1'.format(C)) p_label, p_acc, p_val = svm_predict(val_y, val_x, m, "-q") fold_errors[j] = (100-p_acc[0])/100. mean_fold_error = np.mean(fold_errors) C_val_errors[C][run] = mean_fold_error C_errors[i] = mean_fold_error best_run_C[run] = possible_C[np.argmin(C_errors)] # - # ### Question 7 # Consider the 1 versus 5 classifier with Q = 2. We use $E_{cv}$ to select $C \in \{0.0001, 0.001, 0.01, 0.1, 1\}$. If there is a tie in $E_{cv}$ , select the smaller C . Within the 100 random runs, which of the following statements is correct? # # # After training, the classifier is tested and the errors on the test fold are saved. Afterwards, we average all errors for specific $C$ values for each fold over all the runs. We then calculate which $C$ yielded the lowest error rate in each run. By then calculating which $C$ was the most number of times selected as the one with the lowest error rate, we know which $C$ value is the best. ctr = Counter(best_run_C) best_c = ctr.most_common(1)[0][0] print("Best C: ", best_c) X = np.arange(len(ctr)) sorted_ctr = sorted(ctr.items(), key=operator.itemgetter(0)) keys = [t[0] for t in sorted_ctr] values = [t[1] for t in sorted_ctr] plt.bar(X, values, align='center', width=0.5, color="black") plt.xticks(X, keys) ymax = max(values) + 1 plt.ylim(0, ymax) plt.title("Number of times each C is selected as the best") plt.xlabel("C value") plt.ylabel("Times selected") plt.show() # The lowest cross validation error is achieved when $C=0.001$. As a result, our answer to this question is B. # ### Question 8 # # Again, consider the 1 versus 5 classifier with $Q = 2$. For the winning selection # in the previous problem, the average value of $E_{cv}$ over the 100 runs is closest to... # # In this question, we need to report the error of the results using the best C value. As already saved this in order to calculate which C yielded the best results, reporting this value is easy. print("Cross validation error when using C=", best_c, ": ", np.mean(C_val_errors[best_c])) # The achieved cross validation error is closest to $0.005$, which means that our answer is C. # ## RBF kernel # For the next questions, a radial basis function (RBF) kernel is used. This kernel is of the following form: # $$ # K(x_n,x_m)=e^{-\gamma\|x_n-x_m\|^2} # $$ # Here, $x_n$ and $x_m$ are again data points. Besides the different kernel, this approach is the same as with the support vector machine using the polynomial kernel which was explained earlier. $\gamma$ is a parameter passed to the algorithm and influences the variance of the obtained function. Each data point has an influence on the hypothesis based on its radial distance $\|x-x_n\|$. Intuitively, this means that nearby points have more influence than data points that are farther away. A small $\gamma$ means that a single data point has a lot of influence and leads to low bias and high variance models, while a high $\gamma$ leads to less influence and will give you a higher bias and lower variance. # # Notice that the corresponding non-linear transform ($\phi$) of the kernel generates infinite-dimensional data points. As such, first computing the transform and then applying the inner product of 2 points would not be possible. # # For the following questions, we always use $\gamma=1$ and we use the soft margin approach which was explained earlier. The experiments then involve trying different values for C. filtered_test = test[(test.digit == 1) | (test.digit == 5)] filtered_test["digit"].replace(1,float(-1),inplace=True) filtered_test["digit"].replace(5,float(1),inplace=True) filtered_test = filtered_test.reset_index(drop=True) # + possible_C = [0.01, 1, 100, 10**4, 10**6] E_ins = np.zeros(len(possible_C)) E_outs = np.zeros(len(possible_C)) for i,C in enumerate(possible_C): train_x = filtered[["intensity", "symmetry"]].values.tolist() train_y = filtered["digit"].values.tolist() test_x = filtered_test[["intensity", "symmetry"]].values.tolist() test_y = filtered_test["digit"].values.tolist() m = svm_train(train_y, train_x, '-q -t 2 -d 2 -c {} -g 1'.format(C)) train_label, train_acc, train_val = svm_predict(train_y, train_x, m, "-q") test_label, test_acc, test_val = svm_predict(test_y, test_x, m, "-q") E_ins[i] = (100-train_acc[0])/100. E_outs[i] = (100-test_acc[0])/100. # - # ### Question 9 # # Which of the following values of C results in the lowest $E_{in}$? # # For this question, we had to report the C value which yielded the lowest in-sample error. To do this, we train for each value of C the SVM with that particular C on all data (of digits 1 and 5) and use this classifier on the training and test data (of digits 1 and 5). The errors on the training data and test data are then saved to separate lists. # We then report the value of C with the lowest corresponding error in the list of in-sample errors. possible_C[np.argmin(E_ins)] X = np.arange(len(E_ins)) plt.bar(X, E_ins, align='center', width=0.5, color="black") plt.xticks(X, possible_C) ymax = max(E_ins) + 0.001 plt.ylim(0, ymax) plt.title("In-sample error for different values of C") plt.xlabel("C value") plt.ylabel("Error") plt.show() # We see that we get the lowest in-sample error when $C=1000000$. This means that the answer to this question is E. # ### Question 10 # # Which of the following values of C results in the lowest $E_{out}$? # # Here, we need to report the C value that gave us the lowest out-of-sample error. As already mentioned, the errors on the test data were already saved to a list. Thus, we now need to report the value of C with the lowest corresponding error in this list of out-of-sample errors possible_C[np.argmin(E_outs)] X = np.arange(len(E_outs)) plt.bar(X, E_outs, align='center', width=0.5, color="black") plt.xticks(X, possible_C) ymax = max(E_outs) + 0.001 plt.ylim(0, ymax) plt.title("Out-of-sample error for different values of C") plt.xlabel("C value") plt.ylabel("Error") plt.show() # The lowest out-of-sample error is achieved when $C=100$. This is equal to the value of answer C.
notebooks/Homework 8.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # clean dataset import pandas as pd import re import numpy as np import math import matplotlib.pyplot as plt import seaborn as sns sns.set_style("darkgrid") df_heart = pd.read_csv('Downloads/heart-disease-cleveland.csv') df_heart.isnull().sum().sort_values(ascending=False) #overview df_heart.dtypes df_heart df_heart.shape # number of rows missing in each column # df_heart.isnull().sum() # % of rows missing in each column for column in df_heart.columns: percentage = df_heart[column].isnull().mean() print(f'{column}: {round(percentage*100, 2)}%') #1. drop #drop column df_heart = df_heart.drop(df_heart.columns[[11]], axis=1) df_heart df_heart.to_csv("Downloads/heart.csv")
Clean_dataset_heart_disease.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #Needed Modules import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import re import os import sys import networkx as nx from tqdm import tqdm import json from pathlib import Path import matplotlib.pyplot as plt print("Starting Notebook.") import warnings warnings.filterwarnings('ignore') from tqdm import tqdm # - tqdm.pandas() SOURCES = ['Apache', 'Hyperledger', 'IntelDAOS', 'JFrog', 'Jira', 'JiraEcosystem', 'MariaDB', 'Mindville', 'MongoDB', 'Qt', 'RedHat', 'Sakai', 'SecondLife', 'Sonatype', 'Spring'] categories = ['General Relation', 'Duplication', 'Temporal/Causal', 'Composition', 'Workflow'] type_dict={'Backports': 'Workflow', 'Blocked': 'Temporal/Causal', 'Blocker': 'Temporal/Causal', 'Blocks': 'Temporal/Causal', 'Bonfire Testing': 'Workflow', 'Bonfire testing': 'Workflow', 'Git Code Review': 'Workflow', 'Testing': 'Workflow', 'Causality': 'Temporal/Causal', 'Cause': 'Temporal/Causal', 'Caused': 'Temporal/Causal', 'Problem/Incident': 'Temporal/Causal', 'Child-Issue': 'Composition', 'Parent Feature': 'Composition', 'Parent/Child': 'Composition', 'multi-level hierarchy [GANTT]': 'Composition', 'Parent-Relation': 'Composition', 'Cloners': 'Duplication', 'Cloners (old)': 'Duplication', 'Collection': 'Composition', 'Container': 'Composition', 'Contains(WBSGantt)': 'Composition', 'Incorporate': 'Composition', 'Incorporates': 'Composition', 'Part': 'Composition', 'PartOf': 'Composition', 'Superset': 'Composition', 'Completes': 'Workflow', 'Fixes': 'Workflow', 'Resolve': 'Workflow', 'Depend': 'Temporal/Causal', 'Dependency': 'Temporal/Causal', 'Dependent': 'Temporal/Causal', 'Depends': 'Temporal/Causal', 'Gantt Dependency': 'Temporal/Causal', 'dependent': 'Temporal/Causal', 'Derived': 'Workflow', 'Detail': 'Workflow', 'Documentation': 'Workflow', 'Documented': 'Workflow', 'Duplicate': 'Duplication', 'Epic': 'Composition', 'Epic-Relation': 'Composition', 'Finish-to-Finish link (WBSGantt)': 'Temporal/Causal', 'Gantt End to End': 'Temporal/Causal', 'Gantt: finish-finish': 'Temporal/Causal', 'finish-finish [GANTT]': 'Temporal/Causal', 'Gantt End to Start': 'Temporal/Causal', 'Gantt: finish-start': 'Temporal/Causal', 'finish-start [GANTT]': 'Temporal/Causal', 'Gantt Start to Start': 'Temporal/Causal', 'Gantt: start-finish': 'Temporal/Causal', 'Follows': 'Temporal/Causal', 'Sequence': 'Temporal/Causal', 'Implement': 'Workflow', 'Implements': 'Workflow', 'Issue split': 'Composition', 'Split': 'Composition', 'Work Breakdown': 'Composition', 'Preceded By': 'Temporal/Causal', 'Reference': 'General Relation', 'Relate': 'General Relation', 'Related': 'General Relation', 'Relates': 'General Relation', 'Relationship': 'General Relation', 'Regression': 'Workflow', 'Replacement': 'Duplication', 'Required': 'Temporal/Causal', 'Supercedes': 'Workflow', 'Supersede': 'Workflow', 'Supersession': 'Workflow', 'Test': 'Workflow', 'Tested': 'Workflow', 'Trigger': 'Workflow', 'Subtask': 'Composition'} source_lt_dict={'General Relation': ['Apache', 'Hyperledger', 'IntelDAOS', 'JFrog', 'Jira', 'JiraEcosystem', 'MariaDB', 'Mindville', 'MongoDB', 'Qt', 'RedHat', 'Sakai', 'Sonatype', 'Spring', 'SecondLife'], 'Duplication': ['Apache', 'Hyperledger', 'IntelDAOS', 'JFrog', 'Jira', 'JiraEcosystem', 'MariaDB', 'Mindville', 'MongoDB', 'Qt', 'RedHat', 'Sakai', 'Sonatype', 'Spring', 'SecondLife'], 'Composition': ['Apache', 'Hyperledger', 'IntelDAOS', 'JFrog', 'Jira', 'JiraEcosystem', 'MariaDB', 'MongoDB', 'Qt', 'RedHat', 'Sakai', 'Sonatype', 'Spring', 'SecondLife'], 'Temporal/Causal': ['Apache', 'Hyperledger', 'IntelDAOS', 'JFrog', 'Jira', 'JiraEcosystem', 'MariaDB', 'Mindville', 'MongoDB', 'Qt', 'RedHat', 'Sakai', 'Sonatype', 'Spring', 'SecondLife'], 'Workflow': ['Apache', 'Hyperledger', 'IntelDAOS', 'JFrog', 'Jira', 'JiraEcosystem', 'MongoDB', 'Qt', 'RedHat', 'Sakai', 'Sonatype', 'Spring'], } def load_data(source): #Loading Issues filename = '../data/crawl/issues_'+source.lower()+'.csv' issues = pd.read_csv(filename, encoding="UTF-8", low_memory=False, sep=';') #Loading Links filename = '../data/crawl/clean_links_'+source.lower()+'.csv' links = pd.read_csv(filename, encoding="UTF-8", low_memory=False, index_col=0) return issues, links # + link_dict = {} issue_dict = {} for s in SOURCES: print(s.upper()) issues, links = load_data(s) link_dict[s] = links issue_dict[s] = issues print(f'Loaded {len(issues)} issues and {len(links)} links') # - def add_mean(df): og_df = df averages = og_df.mean() avg_row = averages.to_list() avg_row.insert(0, "Mean") stds = og_df.std() std_row = stds.to_list() std_row.insert(0, "Standard Dev.") df.loc[len(df)] = avg_row df.loc[len(df)] = std_row return df def calc_graph_structures(issues, links): num_issues = len(set(issues['issue_id'])) G = nx.Graph() print("Add Edges") for i in tqdm(links.index): G.add_edge(links['issue_id_1'].iloc[i], links['issue_id_2'].iloc[i]) num_orphans = num_issues-G.number_of_nodes() components = nx.connected_components(G) assortativity = nx.degree_assortativity_coefficient(G) assortativity = round(assortativity, 3) num_comp = 0 sum_den = 0 sum_diam = 0 cycle_count = 0 star_count = 0 tree_count = 0 count_2_component = 0 count_3m_component = 0 print("Load Components") for c in tqdm(components): SG = nx.subgraph(G, c) n = SG.number_of_nodes() num_comp += 1 if n==2: count_2_component += 1 else: count_3m_component+=1 diam = nx.diameter(SG) den = nx.density(SG) sum_diam += diam sum_den += den try: cycles = list(nx.find_cycle(SG, orientation="ignore")) cycle_count+=1 except: tree_count+=1 if (diam == 2) & (n>=3) & (SG.number_of_edges()==n-1): star_count+=1 num_issues_in_complex = round(((num_issues-num_orphans-2*count_2_component)/num_issues)*100,2) try: share_complex_components = round(((count_3m_component)/num_comp)*100,2) except: share_complex_components = 0 per_orphans = round((num_orphans/num_issues)*100,2) avg_diam = sum_diam/count_3m_component avg_diam = round(avg_diam, 3) avg_dens = sum_den/count_3m_component avg_dens = round(avg_dens, 3) per_cycles = round((cycle_count/count_3m_component)*100,2) per_tree = round((tree_count/count_3m_component)*100,2) per_star = round((star_count/count_3m_component)*100,2) try: per_2_comp = round((count_2_component/num_comp)*100,2) per_3_comp = round((count_3m_component/num_comp)*100,2) except: per_2_comp = 0 per_3_comp = 0 return num_issues, num_orphans, per_orphans, num_comp, count_2_component, per_2_comp, count_3m_component, per_3_comp, num_issues_in_complex, share_complex_components, assortativity, avg_diam, avg_dens, per_cycles, per_tree, per_star # + overall_struc_df = pd.DataFrame(columns=['source', 'num_issues', 'num_orphans', 'per_orphans', 'num_comp', 'count_2_component', 'per_2_comp', 'count_3m_component', 'per_3_comp', 'num_issues_in_complex', 'share_complex_components', 'assortativity', 'avg_diam', 'avg_dens', 'per_cycles', 'per_tree', 'per_star']) j = 0 for s in ['Hyperledger', 'IntelDAOS', 'JFrog', 'Jira', 'JiraEcosystem', 'MariaDB']: print(s.upper()) issues = issue_dict[s] links = link_dict[s] num_issues, num_orphans, per_orphans, num_comp, count_2_component, per_2_comp, count_3m_component, per_3_comp, num_issues_in_complex, share_complex_components, assortativity, avg_diam, avg_dens, per_cycles, per_tree, per_star = calc_graph_structures(issues, links) overall_struc_df.loc[j] = [s, num_issues, num_orphans, per_orphans, num_comp, count_2_component, per_2_comp, count_3m_component, per_3_comp, num_issues_in_complex, share_complex_components, assortativity, avg_diam, avg_dens, per_cycles, per_tree, per_star] j+=1 # - overall_struc_df for s in ['Mindville', 'MongoDB', 'Qt']: print(s.upper()) issues = issue_dict[s] links = link_dict[s] num_issues, num_orphans, per_orphans, num_comp, count_2_component, per_2_comp, count_3m_component, per_3_comp, num_issues_in_complex, share_complex_components, assortativity, avg_diam, avg_dens, per_cycles, per_tree, per_star = calc_graph_structures(issues, links) overall_struc_df.loc[j] = [s, num_issues, num_orphans, per_orphans, num_comp, count_2_component, per_2_comp, count_3m_component, per_3_comp, num_issues_in_complex, share_complex_components, assortativity, avg_diam, avg_dens, per_cycles, per_tree, per_star] j+=1 overall_struc_df for s in ['RedHat', 'Sakai', 'SecondLife', 'Sonatype', 'Spring']: print(s.upper()) issues = issue_dict[s] links = link_dict[s] num_issues, num_orphans, per_orphans, num_comp, count_2_component, per_2_comp, count_3m_component, per_3_comp, num_issues_in_complex, share_complex_components, assortativity, avg_diam, avg_dens, per_cycles, per_tree, per_star = calc_graph_structures(issues, links) overall_struc_df.loc[j] = [s, num_issues, num_orphans, per_orphans, num_comp, count_2_component, per_2_comp, count_3m_component, per_3_comp, num_issues_in_complex, share_complex_components, assortativity, avg_diam, avg_dens, per_cycles, per_tree, per_star] j+=1 # + s = 'Apache' issues = issue_dict[s] links = link_dict[s] num_issues, num_orphans, per_orphans, num_comp, count_2_component, per_2_comp, count_3m_component, per_3_comp, num_issues_in_complex, share_complex_components, assortativity, avg_diam, avg_dens, per_cycles, per_tree, per_star = calc_graph_structures(issues, links) overall_struc_df.loc[j] = [s, num_issues, num_orphans, per_orphans, num_comp, count_2_component, per_2_comp, count_3m_component, per_3_comp, num_issues_in_complex, share_complex_components, assortativity, avg_diam, avg_dens, per_cycles, per_tree, per_star] j+=1 # - add_mean(overall_struc_df) # + def calc_subgraph_structures(issues, links, linktype): num_issues = len(set(issues['issue_id'])) links_lt = links[links['mappedtype']==linktype] links_lt.reset_index(inplace=True) G = nx.Graph() for i in links_lt.index: G.add_edge(links_lt['issue_id_1'].iloc[i], links_lt['issue_id_2'].iloc[i]) num_orphans = num_issues-G.number_of_nodes() try: assortativity = nx.degree_assortativity_coefficient(G) assortativity = round(assortativity, 3) except: assortativity = 0 transitivity = nx.transitivity(G) transitivity = round(transitivity, 3) num_links = G.number_of_edges() components = nx.connected_components(G) num_comp = 0 sum_den = 0 sum_diam = 0 cycle_count = 0 star_count = 0 tree_count = 0 count_2_component = 0 count_3m_component = 0 for c in components: SG = nx.subgraph(G, c) n = SG.number_of_nodes() num_comp += 1 if n==2: count_2_component += 1 else: count_3m_component+=1 diam = nx.diameter(SG) den = nx.density(SG) sum_diam += diam sum_den += den try: cycles = list(nx.find_cycle(SG, orientation="ignore")) cycle_count+=1 except: tree_count+=1 if (diam == 2) & (n>=3) & (SG.number_of_edges()==n-1): star_count+=1 # if num_comp == 0: # num_comp = 1 # if count_3m_component == 0: # count_3m_component = 1 num_issues_in_complex = round(((num_issues-num_orphans-2*count_2_component)/num_issues)*100,2) try: share_complex_components = round(((count_3m_component)/num_comp)*100,2) except: share_complex_components = 0 per_orphans = round((num_orphans/num_issues)*100,2) try: avg_diam = sum_diam/count_3m_component avg_diam = round(avg_diam, 3) avg_dens = sum_den/count_3m_component avg_dens = round(avg_dens, 3) per_cycles = round((cycle_count/count_3m_component)*100,2) per_tree = round((tree_count/count_3m_component)*100,2) per_star = round((star_count/count_3m_component)*100,2) except: avg_diam = 0 avg_dens = 0 per_cycles = 0 per_tree = 0 per_star = 0 try: per_2_comp = round((count_2_component/num_comp)*100,2) per_3_comp = round((count_3m_component/num_comp)*100,2) except: per_2_comp = 0 per_3_comp = 0 return num_issues, num_orphans, per_orphans, num_comp, count_2_component, per_2_comp, count_3m_component, per_3_comp, num_issues_in_complex, share_complex_components, assortativity, transitivity, avg_diam, avg_dens, per_cycles, per_tree, per_star, num_links # + linktype_metrics = {} for lt in categories: print(lt.upper()) lt_struc_df = pd.DataFrame(columns=['source', 'num_issues', 'num_orphans', 'per_orphans', 'num_comp', 'count_2_component', 'per_2_comp', 'count_3m_component', 'per_3_comp', 'num_issues_in_complex', 'share_complext_components', 'assortativity', 'transitivity', 'avg_diam', 'avg_dens', 'per_cycles', 'per_tree', 'per_star', 'num_edges']) j = 0 source_list = source_lt_dict[lt] total_links = 0 for s in source_list: issues = issue_dict[s] links = link_dict[s] links['mappedtype'] = links['linktype'].map(type_dict) num_issues, num_orphans, per_orphans, num_comp, count_2_component, per_2_comp, count_3m_component, per_3_comp, num_issues_in_complex, share_complex_components, assortativity, transitivity, avg_diam, avg_dens, per_cycles, per_tree, per_star, num_links = calc_subgraph_structures(issues, links, lt) lt_struc_df.loc[j] = [s, num_issues, num_orphans, per_orphans, num_comp, count_2_component, per_2_comp, count_3m_component, per_3_comp, num_issues_in_complex, share_complex_components, assortativity, transitivity, avg_diam, avg_dens, per_cycles, per_tree, per_star, num_links] j+=1 total_links += num_links print(total_links) link_dict[lt] = lt_struc_df # - for lt in categories: add_mean(link_dict[lt]) # + def get_latex_print(df): temp = df[['source', 'per_orphans', 'per_2_comp', 'per_3_comp', # 'transitivity', 'assortativity', 'avg_dens', 'per_tree', 'per_star']] temp['per_tree'] = temp['per_tree'].apply(lambda x: str(round(x,1))+'%') temp['per_star'] = temp['per_star'].apply(lambda x: str(round(x,1))+'%') temp['per_2_comp'] = temp['per_2_comp'].apply(lambda x: str(round(x,1))+'%') temp['per_3_comp'] = temp['per_3_comp'].apply(lambda x: str(round(x,1))+'%') temp['per_orphans'] = temp['per_orphans'].apply(lambda x: str(round(x,1))+'%') temp['assortativity'] = temp['assortativity'].apply(lambda x: round(x,3)) # blub['transitivity'] = blub['transitivity'].apply(lambda x: round(x,3)) temp['avg_dens'] = temp['avg_dens'].apply(lambda x: round(x,3)) # blub.at[len(blub)-2,'num_orphans'] = str(blub.loc[len(blub)-2]['num_orphans']).split(".")[0] # blub.at[len(blub)-2,'num_comp'] = str(blub.loc[len(blub)-2]['num_comp']).split(".")[0] # blub.at[len(blub)-1,'num_orphans'] = str(blub.loc[len(blub)-1]['num_orphans']).split(".")[0] # blub.at[len(blub)-1,'num_comp'] = str(blub.loc[len(blub)-1]['num_comp']).split(".")[0] print(temp.to_latex(index=False)) # - link_dict['Composition'] link_dict['General Relation'] link_dict['Duplication'] link_dict['Temporal/Causal'] link_dict['Workflow'] get_latex_print(link_dict['General Relation']) get_latex_print(link_dict['Duplication']) get_latex_print(link_dict['Temporal/Causal']) get_latex_print(link_dict['Composition']) get_latex_print(link_dict['Workflow']) get_latex_print(overall_struc_df.sort_values(by='source'))
RQ2_link_gt_metrics/LinkCategoryMetrics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd water_df = pd.read_csv('../SourceMaterial/Water_FINAL.csv') water_df.head() water_df.describe() column_names = [] for column in water_df.columns: column_names.append(column) column_names water_temps = water_df[['Date', 'ModifiedDate', 'HUCNAME_', 'Latitude', 'Longitude', 'Parameter', "ParameterName_CBP", 'ParameterName_CMC', 'MeasureValue']].loc[water_df['Parameter'].str.contains('WT')] water_temps ph_df = water_df[['Date', 'ModifiedDate', 'HUCNAME_', 'Latitude', 'Longitude', 'Parameter',"ParameterName_CBP", 'ParameterName_CMC', 'MeasureValue']].loc[water_df['Parameter'].str.contains("PH")] ph_df.head() conductivity_df = water_df[['Date', 'ModifiedDate', 'HUCNAME_', 'Latitude', 'Longitude', 'Parameter',"ParameterName_CBP", 'ParameterName_CMC', 'MeasureValue']].loc[water_df['Parameter'].str.contains('CO')] conductivity_df.head() water_temps.to_csv('../SourceMaterial/Measures/water_temps.csv', index=False) ph_df.to_csv('../SourceMaterial/Measures/ph.csv', index=False) conductivity_df.to_csv('../SourceMaterial/Measures/conductivity.csv', index=False)
Hack_the_bay_exploratory.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Experiment 1.3 XGBoost with class balancing # # Try to improve on the xgboost model score by treating the class imbalance in the training data. Can do this by resampling using various techniques: # # 1. Adjust the weight of the positive class in xgboost (scale_pos_weight) # 2. Upsample or downsample the adta # 3. Synthetic Minority Oversampling Technique (SMOTE) # # Experiments will be tried in this order (from least to most destructive) # # Simple examples to follow # # https://machinelearningmastery.com/xgboost-for-imbalanced-classification/ # # https://elitedatascience.com/imbalanced-classes # # https://machinelearningmastery.com/smote-oversampling-for-imbalanced-classification/ # # The previous kaggle AUC to beat is 0.70615. # The best validation AUC so far is 0.70257. import pandas as pd import numpy as np import matplotlib.pyplot as plt from xgboost import XGBClassifier # + # import load_data function from helper file # %load_ext autoreload # %autoreload 2 # fix system path import sys sys.path.append("/home/jovyan/work") # - # Since the last experiment, some adjustments to the training data were made. # + from src.features.helper_functions import load_sets X_train, y_train, X_val, y_val, X_test = load_sets() # - # ## 1. Scaling positive class weight # Check the class counts again # check distribution of the target unique_elements, counts_elements = np.unique(y_train, return_counts=True) print(np.asarray((unique_elements, counts_elements))) # The documentation for xgboost suggests that the number to use in the scale_pos_weight argument is the ratio of negative (0) to positive (1). # # https://xgboost.readthedocs.io/en/latest/tutorials/param_tuning.html # https://xgboost.readthedocs.io/en/latest/parameter.html # https://stats.stackexchange.com/questions/243207/what-is-the-proper-usage-of-scale-pos-weight-in-xgboost-for-imbalanced-datasets # # In this case, the positive class outweights the negative, so the imbalance is flipped, therefore our ratio will be <1. 1074/5326 # Ratio is 20% 0 to 1, so for the weights in xgboost use # scale_pos_weight=0.2. # ## Train using class weights # Use stratified kfold for cross validation - need to use this to assess the effectiveness of applying weights # + from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import cross_val_score # create kfold object kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=7) # - # instatiate model model = XGBClassifier(scale_pos_weight=0.2) # evaluate model scores = cross_val_score(model, X_train, y_train, scoring='roc_auc', cv=kfold, n_jobs=-1) # summarise performance print('Mean ROC AUC: %.5f' % np.mean(scores)) scores # Not the best mean AUC, but the highest fold achieved 0.68. Try a small grid search experiment using a range of weights to see if a slightly different value is better. # + from sklearn.model_selection import GridSearchCV # define grid weights = [0.2, 0.1, 0.3, 0.4] param_grid = dict(scale_pos_weight=weights) # define grid search grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=kfold, scoring='roc_auc') # + # execute the grid search grid_result = grid.fit(X_train, y_train) # report the best configuration print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_)) # report all configurations means = grid_result.cv_results_['mean_test_score'] stds = grid_result.cv_results_['std_test_score'] params = grid_result.cv_results_['params'] for mean, stdev, param in zip(means, stds, params): print("%f (%f) with: %r" % (mean, stdev, param)) # - # The AUC was best with weight at 0.1, which was higher than the mean AUC from above. Try smaller increments between 0.1 and 0.2. # + # define grid weights = [0.125, 0.15, 0.175] param_grid = dict(scale_pos_weight=weights) # define grid search grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=kfold, scoring='roc_auc') # execute the grid search grid_result = grid.fit(X_train, y_train) # report the best configuration print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_)) # report all configurations means = grid_result.cv_results_['mean_test_score'] stds = grid_result.cv_results_['std_test_score'] params = grid_result.cv_results_['params'] for mean, stdev, param in zip(means, stds, params): print("%f (%f) with: %r" % (mean, stdev, param)) # - # Smaller increments did not make a difference, use weight = 0.1. # # Can the AUC score be improved using the previous tuned model with this added weight parameter? # ## Use weight scale with previous best model parameters # instantiate model with weights and best params model = XGBClassifier(max_depth = 2, n_estimators = 140, learning_rate = 0.05, scale_pos_weight=0.1) # fit model model_weight_bp = model.fit(X_train, y_train) from src.features.helper_functions import save_model save_model(model_weight_bp, 'rez_xgb_bestparam_weight') # ## Predict on train & val set # predict class y_train_preds = model_weight_bp.predict(X_train) y_val_preds = model_weight_bp.predict(X_val) # predict proabilities y_train_preds_prob = model_weight_bp.predict_proba(X_train) y_val_preds_prob = model_weight_bp.predict_proba(X_val) # #### Train set metrics # + from sklearn.metrics import roc_auc_score ,recall_score, precision_score, accuracy_score, classification_report, confusion_matrix accuracy = accuracy_score(y_train, y_train_preds) precision=precision_score(y_train, y_train_preds) recall=recall_score(y_train, y_train_preds) roc=roc_auc_score(y_train, y_train_preds) print("Accuracy: %.2f%%" % (accuracy * 100.0)) print("Precision: %.2f%% " % (precision *100)) print("Recall: %.2f%% " % (recall * 100)) print("AUC: %.3f%% " % (roc *100)) class_report = classification_report(y_train, y_train_preds) print(class_report) confusion_matrix(y_train,y_train_preds) # - roc_auc_score(y_train, y_train_preds_prob[:,1]) # + from sklearn.metrics import plot_confusion_matrix plot_confusion_matrix(model_weight_bp, X_train, y_train, cmap=plt.cm.Blues, values_format='4d') plt.show() # - # #### val set metrics # + accuracy = accuracy_score(y_val, y_val_preds) precision=precision_score(y_val, y_val_preds) recall=recall_score(y_val, y_val_preds) roc=roc_auc_score(y_val, y_val_preds) print("Validation Accuracy: %.2f%%" % (accuracy * 100.0)) print("Validation Precision: %.2f%% " % (precision *100)) print("Validation Recall: %.2f%% " % (recall * 100)) print("Validation AUC: %.3f%% " % (roc *100)) class_report = classification_report(y_val, y_val_preds) print(class_report) confusion_matrix(y_val, y_val_preds) # - plot_confusion_matrix(model_weight_bp, X_val, y_val, cmap=plt.cm.Blues, values_format='4d') plt.show() roc_auc_score(y_val, y_val_preds_prob[:,1]) # + from sklearn.metrics import roc_curve # roc curve for models fpr, tpr, thresh = roc_curve(y_val, y_val_preds_prob[:,1], pos_label=1) # roc curve for tpr = fpr random_probs = [0 for i in range(len(y_val))] p_fpr, p_tpr, _ = roc_curve(y_val, random_probs, pos_label=1) # + # use matplotlib to plot ROC curve plt.style.use('seaborn') # plot roc curves plt.plot(fpr, tpr, linestyle='--',color='orange', label='Weight & best params') plt.plot(p_fpr, p_tpr, linestyle='--', color='blue') # title plt.title('ROC curve') # x label plt.xlabel('False Positive Rate') # y label plt.ylabel('True Positive rate') plt.legend(loc='best') plt.savefig('../src/visualization/Rez XGBoost weight best params',dpi=300) plt.show() # - # The AUC scores for the training and validation sets are slightly higher than the previous experiment (by about 0.002) but have not moved closer i.e. the model is still not predicting more generally. # # The weight adjustment seems to lessen the model's ability to predict the 1 class, and not improve the model's sensitivity to the 0 class. # # This demonstrates that adjusting the scale_pos_weight parameter is not an overly effective method to treat the class imbalance. # create an output for kaggle testing anyway. y_test_preds = model_weight_bp.predict(X_test) y_test_preds_prob = model_weight_bp.predict_proba(X_test) print(y_test_preds) print(y_test_preds_prob) unique_elements, counts_elements = np.unique(y_test_preds, return_counts=True) print(np.asarray((unique_elements, counts_elements))) from src.features.helper_functions import create_output output = create_output(X_test, y_test_preds_prob) # save to csv output.to_csv('../data/processed/output_xgboost_weight_bp_wk2.csv',index=False) # # 2. Upsampling the minority class 0 # # Here I will upsample the minority class 0, since there isn't enough observations of class zero to downsample and still have an effective xgboost model. # # https://imbalanced-learn.org/stable/generated/imblearn.over_sampling.RandomOverSampler.html from imblearn.over_sampling import RandomOverSampler from collections import Counter import sklearn print('Original dataset shape %s' % Counter(y_train)) # + # the ros with automatically used "not majority" to resample ros = RandomOverSampler(random_state=42) X_res, y_res = ros.fit_resample(X_train, y_train) print('Resampled dataset shape %s' % Counter(y_res)) # - # save new upsampled data to interim np.save('../data/interim/X_train_os', X_res) np.save('../data/interim/y_train_os', y_res) # #### Train # instantiate model model = XGBClassifier(max_depth = 2, n_estimators = 140, learning_rate = 0.05) # + # train model with new over-sampled data model_os = model.fit(X_res, y_res) # - from src.features.helper_functions import save_model save_model(model_os, 'rez_xgb_bestparam_os') # #### Predict - Check results # predict class y_train_preds = model_os.predict(X_res) y_val_preds = model_os.predict(X_val) # predict proabilities y_train_preds_prob = model_os.predict_proba(X_res) y_val_preds_prob = model_os.predict_proba(X_val) from src.features.helper_functions import result_metrics result_metrics(y_res, y_train_preds,y_train_preds_prob) result_metrics(y_val, y_val_preds,y_val_preds_prob) # Result are very bad. Try training without the tuned parameters, since they were tuned on the imbalanced training set. print('Resampled dataset shape %s' % Counter(y_val)) model_basic = XGBClassifier() # + # train model with new over-sampled data model_os_basic = model_basic.fit(X_res, y_res) # - # predict class y_train_preds = model_os_basic.predict(X_res) y_val_preds = model_os_basic.predict(X_val) # predict proabilities y_train_preds_prob = model_os_basic.predict_proba(X_res) y_val_preds_prob = model_os_basic.predict_proba(X_val) result_metrics(y_res, y_train_preds,y_train_preds_prob) result_metrics(y_val, y_val_preds,y_val_preds_prob) # The model is really overfitting this time, as the AUC for training is very high. # # However, the ability for the model to differntiate between the classes is slightly improved, since we are no longer just wrongly classifying 0's as 1's (the f1 score has improved from 0.04 to 0.23). The ability to predict the 0 class however has not improved as the metrics for class 1 have dropped. # # Perhaps we can reduce overfitting here by tuning the basic model, but first try smote to see if the basic model can be improved. # # 3. SMOTE - create synthetic samples # # from imblearn.over_sampling import SMOTE # transform the dataset - will do automatic minority resampling oversample = SMOTE() X_smote, y_smote = oversample.fit_resample(X_train, y_train) print('Resampled dataset shape %s' % Counter(y_smote)) # train model with new smote over-sampled data model_basic = XGBClassifier() model_smote_basic = model_basic.fit(X_smote, y_smote) # predict class y_train_preds = model_smote_basic.predict(X_smote) y_val_preds = model_smote_basic.predict(X_val) # predict proabilities y_train_preds_prob = model_smote_basic.predict_proba(X_smote) y_val_preds_prob = model_smote_basic.predict_proba(X_val) result_metrics(y_smote, y_train_preds,y_train_preds_prob) result_metrics(y_val, y_val_preds,y_val_preds_prob) # SMOTE seems to perform slightly better than just oversampling, but is still severly overfitting. Should run some cross validation tests to get an average for a few different data splits. # + from imblearn.pipeline import Pipeline from sklearn.model_selection import RepeatedStratifiedKFold from sklearn.model_selection import cross_val_score # define pipeline steps = [('over', SMOTE()), ('model', model_basic)] pipeline = Pipeline(steps=steps) # evaluate pipeline cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1) scores = cross_val_score(pipeline, X_train, y_train, scoring='roc_auc', cv=cv, n_jobs=-1) # - print('Mean ROC AUC: %.3f' % np.mean(scores)) scores # Try smote with down sampling, as suggested by https://machinelearningmastery.com/smote-oversampling-for-imbalanced-classification/ # # Use smote to upsample by 30% (creates obs to 30% of the majority class), then undersample by 70% (undersamples the majority to by 70% more than the minority) # + from imblearn.under_sampling import RandomUnderSampler # define pipeline over = SMOTE(sampling_strategy=0.3) under = RandomUnderSampler(sampling_strategy=0.7) steps = [('over', over), ('under', under), ('model', model_basic)] pipeline = Pipeline(steps=steps) # - # evaluate pipeline scores_2 = cross_val_score(pipeline, X_train, y_train, scoring='roc_auc', cv=cv, n_jobs=-1) # + print('Mean ROC AUC: %.3f' % np.mean(scores_2)) print(np.sort(scores_2)) # - # The combination with undersampling did perform better, but not significantly differnt than a 50/50 class split with smote alone. # # 4. Hyperparameter tuning with Smote # SMOTE with undersampling was slighty better on average than just SMOTE. With some tuning I can perhaps reduce overfitting and hopefully improve the AUC. # # To perform tuning faster than grid search, implement tuning using hyperopt sklearn API. Can try more parameters at the same time. # # https://www.analyticsvidhya.com/blog/2020/09/alternative-hyperparameter-optimization-technique-you-need-to-know-hyperopt/ # + # transform the dataset using combo resmapling # define pipeline over = SMOTE(sampling_strategy=0.3) under = RandomUnderSampler(sampling_strategy=0.7) steps = [('over', over), ('under', under)] pipeline = Pipeline(steps=steps) # fit on train X_smote_us, y_smote_us = pipeline.fit_resample(X_train, y_train) # - print('Resampled dataset shape %s' % Counter(y_smote_us)) from hyperopt import Trials, STATUS_OK, tpe, hp, fmin space = { 'max_depth' : hp.choice('max_depth', range(2, 10, 1)), # From exp 1 we know this should be pretty low 'learning_rate' : hp.quniform('learning_rate', 0.01, 0.1,0.05),# from exp 1 this should be low too 'min_child_weight' : hp.quniform('min_child_weight', 1, 10, 1), 'subsample' : hp.quniform('subsample', 0.1, 1, 0.05), 'colsample_bytree' : hp.quniform('colsample_bytree', 0.1, 1.0, 0.05), 'reg_lambda': hp.choice('reg_lambda', range(1,3,1)) # try some values to see if overfitting can be improved } def objective(space): from sklearn.model_selection import cross_val_score xgboost = XGBClassifier( max_depth = int(space['max_depth']), learning_rate = space['learning_rate'], min_child_weight = space['min_child_weight'], subsample = space['subsample'], colsample_bytree = space['colsample_bytree'], reg_lambda = space['reg_lambda'] ) auc = cross_val_score(xgboost, X_smote_us, y_smote_us, cv=10, scoring="roc_auc").mean() return{'loss': -auc, 'status': STATUS_OK } # + # Initialize trials object trials = Trials() best = fmin( fn=objective, space=space, algo=tpe.suggest, max_evals=5 ) # - print("Best: ", best) # ## Train using best params xgboost_best = XGBClassifier( max_depth = best['max_depth'], learning_rate = best['learning_rate'], min_child_weight = best['min_child_weight'], subsample = best['subsample'], colsample_bytree = best['colsample_bytree'], reg_lambda = best['reg_lambda'] ) xgboost_best # + # train model with new over-sampled data model_xgb_smoteus_ho = xgboost_best.fit(X_smote_us, y_smote_us) # - from src.features.helper_functions import save_model save_model(model_xgb_smoteus_ho, 'rez_xgb_smoteus_ho') # predict class y_train_preds = model_xgb_smoteus_ho.predict(X_smote_us) y_val_preds = model_xgb_smoteus_ho.predict(X_val) # predict proabilities y_train_preds_prob = model_xgb_smoteus_ho.predict_proba(X_smote_us) y_val_preds_prob = model_xgb_smoteus_ho.predict_proba(X_val) result_metrics(y_smote_us, y_train_preds,y_train_preds_prob) result_metrics(y_val, y_val_preds,y_val_preds_prob) # AUC did not improve even with tuning. Make a prediction anyway, and see how it fits to the test data. # save new smote upsampled data to interim np.save('../data/interim/X_train_smoteus', X_smote_us) np.save('../data/interim/y_train_smoteus', y_smote_us) # create an output for kaggle testing anyway. y_test_preds = model_xgb_smoteus_ho.predict(X_test) y_test_preds_prob = model_xgb_smoteus_ho.predict_proba(X_test) print(y_test_preds) print(y_test_preds_prob) unique_elements, counts_elements = np.unique(y_test_preds, return_counts=True) print(np.asarray((unique_elements, counts_elements))) from src.features.helper_functions import create_output output = create_output(y_test_preds_prob) output # save to csv output.to_csv('../data/processed/output_xgboost_smoteus_ho_wk2.csv',index=False) # # KAGGLE score for weights = 0.70290 # # KAGGLE score for smote-upsampling and hyperopt = 0.67427
notebooks/tith_reasmey-10845345-week2_xgb_class_balance.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline import numpy as np import pandas as pd import pickle as pkl import matplotlib.pyplot as plt from scipy import interpolate # - plt.rcParams['figure.figsize'] = (25, 30) # + with open('./Data/ECG_001_TH.pkl', 'rb') as input_file: ECG_thin = pkl.load(input_file) _ = plt.imshow( ECG_thin, cmap='gray' ) # + with open('./Data/ECG_001_SK.pkl', 'rb') as input_file: ECG_skel = pkl.load(input_file) _ = plt.imshow( ECG_skel, cmap='gray' ) # + height, _ = ECG_thin.shape data_crop = int(height * 0.1) ECG_no_data_thin = ECG_thin[:height-data_crop, :] _ = plt.imshow( ECG_no_data_thin, cmap='gray' ) # - # ## I, aVr, V1, V4 # + height, _ = ECG_thin.shape crop = int(height/4) - 50 ECG_thin_I_row = ECG_no_data_thin[:crop, :] _ = plt.imshow( ECG_thin_I_row, cmap='gray' ) # + _, width = ECG_thin_I_row.shape left_side_crop = int(width * 0.04) right_side_crop = width - int(width * 0.07) ECG_thin_I_row_crop = ECG_thin_I_row[:, left_side_crop:right_side_crop] _ = plt.imshow( ECG_thin_I_row_crop, cmap='gray' ) # + _, width = ECG_thin_I_row_crop.shape split_crop = int(width / 4) ECG_I = ECG_thin_I_row_crop[:, 0:split_crop] _ = plt.imshow( ECG_I, cmap='gray' ) # + ECG_aVR = ECG_thin_I_row_crop[:, split_crop:2*split_crop] _ = plt.imshow( ECG_aVR, cmap='gray' ) # - # ## II, aVl, V2, V5 # + ECG_thin_II_row = ECG_no_data_thin[crop:2*crop, :] _= plt.imshow( ECG_thin_II_row, cmap='gray' ) # + _, width = ECG_thin_I_row.shape left_side_crop = int(width * 0.04) right_side_crop = width - int(width * 0.07) ECG_thin_II_row_crop = ECG_thin_II_row[:, left_side_crop:right_side_crop] _ = plt.imshow( ECG_thin_I_row_crop, cmap='gray' ) # + _, width = ECG_thin_II_row_crop.shape split_crop = int(width / 4) ECG_II = ECG_thin_II_row_crop[:, 0:split_crop] _ = plt.imshow( ECG_II, cmap='gray' ) # + ECG_aVL = ECG_thin_II_row_crop[:, split_crop:2*split_crop] _ = plt.imshow( ECG_aVL, cmap='gray' ) # - # ## III, aVf, V3, V6 # + ECG_thin_III_row = ECG_no_data_thin[2*crop:3*crop, :] _= plt.imshow( ECG_thin_III_row, cmap='gray' ) # + _, width = ECG_thin_III_row.shape left_side_crop = int(width * 0.04) right_side_crop = width - int(width * 0.07) ECG_thin_III_row_crop = ECG_thin_III_row[:, left_side_crop:right_side_crop] _ = plt.imshow( ECG_thin_III_row_crop, cmap='gray' ) # + _, width = ECG_thin_III_row_crop.shape split_crop = int(width / 4) ECG_II = ECG_thin_III_row_crop[:, 0:split_crop] _ = plt.imshow( ECG_II, cmap='gray' ) # + ECG_aVF = ECG_thin_III_row_crop[:, split_crop:2*split_crop] _ = plt.imshow( ECG_aVF, cmap='gray' ) # - # ## II # + ECG_thin_IV = ECG_no_data_thin[3*crop:, :] _ = plt.imshow( ECG_thin_IV, cmap='gray' ) # - def filter_lead(lead): ECG_pure = np.zeros(lead.shape) ECG_points = pd.DataFrame( np.dstack(np.nonzero(ECG_pure))[0] ) ECG_points = ECG_points.groupby(1) \ .min() \ .reset_index() \ .values \ .T ECG_points[( ECG_points[1], ECG_points[0] )] = 1 ECG_pure = ECG_points[:, 60:3000] _ = plt.imshow( ECG_thin_IV_pure, cmap='gray' ) return ECG_pure filter_lead(ECG_I) # + ECG_thin_IV_pure_points = np.nonzero(ECG_thin_IV_pure) ECG_thin_IV_pure_inter = interpolate.interp1d(ECG_thin_IV_pure_points[1], ECG_thin_IV_pure_points[0], kind='quadratic') ECG_thin_IV_pure_data = ECG_thin_IV_pure_inter(np.arange(500, 1500, 0.01)) _ = plt.plot(np.arange(500, 1500, 0.01), (ECG_thin_IV_pure_data * -1) + (ECG_thin_IV_pure_data.mean()), '-') # -
Experiments/4 - Automated Derivation Extraction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/pcsilcan/pcd/blob/master/20202/pcd_20202_0902_conway.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="H3xzcmv2txCu" # # Conway's problem # + id="rIifu8RRuAtK" # !sudo apt install golang-go # + id="_xWPFgDYmNYf" outputId="ab4ce6c5-3585-4867-e8db-51d071226ea8" colab={"base_uri": "https://localhost:8080/", "height": 34} # %%writefile 1.go package main import ( "fmt" ) const ( MAX = 9 K = 4 ) func compress(inC, pipe chan rune) { n := 0 previous := <-inC for c := range inC { if c == previous && n < MAX-1 { n++ } else { if n > 0 { pipe<- rune(n+1 + 48) n = 0 } pipe<- previous previous = c } } close(pipe) } func output(pipe, outC chan rune) { m := 0 for c := range pipe { outC<- c m++ if m == K { outC <- '\n' m = 0 } } close(outC) } func main() { inC := make(chan rune) pipe := make(chan rune) outC := make(chan rune) go compress(inC, pipe) go output(pipe, outC) go func() { str := "aasbdsssdbsbdjjjfffskskkjsjhsshss." for _, c := range str { inC<- c } close(inC) }() for c := range outC { fmt.Printf("%c", c) } fmt.Println() } # + id="kDngwpaauDlT" outputId="8b0901b8-39e3-4d60-c630-cae0262fffa7" colab={"base_uri": "https://localhost:8080/", "height": 151} # !go run 1.go # + id="9g-3LKCJ5XbI" package main import ( "fmt" ) var end chan bool func zero(n int, west chan float64) { for i := 0; i < n; i++ { west <- 0.0 } close(west) } func source(row []float64, south chan float64) { for _, element := range row { south <- element } close(south) } func sink(north chan float64) { for range north { } } func result(c [][]float64, i int, east chan float64) { j := 0 for element := range east { c[i][j] = element j++ } end <- true } func multiplier(firstElement float64, north, east, south, west chan float64) { for secondElement := range north { sum := <-east sum = sum + firstElement*secondElement south <- secondElement west <- sum } close(south) close(west) } func main() { a := [][]float64{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}} b := [][]float64{{1, 0, 2}, {0, 1, 2}, {1, 0, 0}} c := [][]float64{{0, 0, 0}, {0, 0, 0}, {0, 0, 0}} end = make(chan bool) nra := len(a) nca := len(a[0]) ns := make([][]chan float64, nra+1) // canales norte sur, matrix de 4x3 for i := range ns { ns[i] = make([]chan float64, nca) for j := range ns[i] { ns[i][j] = make(chan float64) } } ew := make([][]chan float64, nra) // canales easte oeste, matrix de 3x4 for i := range ew { ew[i] = make([]chan float64, nca+1) for j := range ew[i] { ew[i][j] = make(chan float64) } } for i := 0; i < nra; i++ { go zero(nra, ew[i][nca]) go result(c, i, ew[i][0]) } for i := 0; i < nca; i++ { go source(b[i], ns[0][i]) go sink(ns[nra][i]) } for i := 0; i < nra; i++ { for j := 0; j < nca; j++ { go multiplier(a[i][j], ns[i][j], ew[i][j+1], ns[i+1][j], ew[i][j]) } } for i := 0; i < nra; i++ { <-end } fmt.Println(c) } # + id="kh2dcWSd5XWv" outputId="6ba8e41c-d378-427d-f578-cca845c10765" colab={"base_uri": "https://localhost:8080/", "height": 34} # %%writefile 3.go package main import ( "fmt" ) var end chan bool func zero(n int, west chan float64) { for i := 0; i < n; i++ { west <- 0.0 } close(west) } func source(row []float64, south chan float64) { for _, element := range row { south <- element } close(south) } func sink(north chan float64) { for range north { } } func result(c [][]float64, i int, east chan float64) { j := 0 for element := range east { c[i][j] = element j++ } end <- true } func multiplier(firstElement float64, north, east, south, west chan float64) { for secondElement := range north { sum := <-east sum = sum + firstElement*secondElement south <- secondElement west <- sum } close(south) close(west) } func main() { a := [][]float64{{ 1, 2, 3, 4}, { 5, 6, 7, 8}} b := [][]float64{{ 1, 2}, { 5, 6}, { 9, 10}, {13, 14}} c := [][]float64{{0, 0}, {0, 0}} end = make(chan bool) nra := len(a) nca := len(a[0]) ns := make([][]chan float64, nra+1) // canales norte sur, matrix de 4x3 for i := range ns { ns[i] = make([]chan float64, nca) for j := range ns[i] { ns[i][j] = make(chan float64) } } ew := make([][]chan float64, nra) // canales easte oeste, matrix de 3x4 for i := range ew { ew[i] = make([]chan float64, nca+1) for j := range ew[i] { ew[i][j] = make(chan float64) } } for i := 0; i < nra; i++ { go zero(nra, ew[i][nca]) go result(c, i, ew[i][0]) } for i := 0; i < nca; i++ { go source(b[i], ns[0][i]) go sink(ns[nra][i]) } for i := 0; i < nra; i++ { for j := 0; j < nca; j++ { go multiplier(a[i][j], ns[i][j], ew[i][j+1], ns[i+1][j], ew[i][j]) } } for i := 0; i < nra; i++ { <-end } fmt.Println(c) } # + id="fCRUsCky5vAx" outputId="4c88123d-2377-40fb-cacd-a943aa0d8da2" colab={"base_uri": "https://localhost:8080/", "height": 34} # !go run 3.go # + id="SW5jbJx_5wIe" outputId="fa4ebf7d-13dc-4e5e-a8d7-4089b949cd73" colab={"base_uri": "https://localhost:8080/", "height": 50} import numpy as np a = np.array([[ 1, 2, 3, 4], [ 5, 6, 7, 8]]) b = np.array([[ 1, 2], [ 5, 6], [ 9, 10], [13, 14]]) a.dot(b) # + id="C2l_kT3r6jkd"
20202/pcd_20202_0902_conway.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- from sklearn.datasets import load_diabetes import numpy as np import pandas as pd diabetes = load_diabetes() dfX_diabetes = pd.DataFrame(diabetes.data, columns=["X%d" % (i+1) for i in range(np.shape(diabetes.data)[1])]) dfy_diabetes = pd.DataFrame(diabetes.target, columns=["target"]) df_diabetes0 = pd.concat([dfX_diabetes, dfy_diabetes], axis=1) df_diabetes0.tail() from sklearn.linear_model import LinearRegression model_diabetes = LinearRegression().fit(diabetes.data, diabetes.target) print(model_diabetes.coef_) import matplotlib.pyplot as plt predictions = model_diabetes.predict(diabetes.data) plt.scatter(diabetes.target, predictions) plt.show() # + def preprocess(phonenumber): phonenumber_process_dict = { "공": 0, "영": 0, "일": 1, "이": 2, "삼": 3, "사": 4, "오": 5, "육": 6, "칠": 7, "팔": 8, "구": 9, "-" : "", " " : "", } for key, value in phonenumber_process_dict.items(): phonenumber = phonenumber.replace(key, str(value)) return phonenumber preprocess("공일공2220-57삼육") # - class Student(): def __init__(self, name, age): self.name = name self.age = age print("student {name}({age}) is born".format(name = self.name, age = self.age)) def introduce(): print("hi , i am {name} i am {age} years old.".format(name = self.name,age = self.age,)) a = Student() a.name = "jy" a.age = 26 a = Student("jy", 26) a.introduce
Practice/04-19.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Creating pandapipes Networks # This tutorial will introduce the user into the pandapipes datastructure and how to create networks through the pandapipes API. The following minimal example contains the most common elements that are supported by the pandapipes format. # # <img src="pics/simple_network-1.png"> # # The datastructure of the pandapipes framework is based on the Python library pandas. A pandapipes network consist of separate tables for each component type that is used in the network and each table holds the elements of its component type. Each row is indexed and represents a single element. The parameters are organized in columns. # # By executing the follwing code cells you generate the different component tables. You can find detailed descriptions about each parameter in the pandapipes documentation under bulletpoint "Datastructure and Components". # ### Empty Network # First, we import pandapipes and create an empty network: # We have state the name of the fluid for the network. In pandapipes, some fluids are already pre-defined, e.g. <span style="color:green">hgas</span>, <span style="color:green">lgas</span> (high and low caloric natural gas), <span style="color:green">water</span>, and <span style="color:green">air</span>, but you can also create your own fluid if you prefer (tutorial in preparation). # # In this example, we will create a medium pressure gas network and choose <span style="color:green">lgas</span> from the predefined fluids in pandapipes. try: # import pandapipes import pandapipes as pp except ImportError: # add pandapipes to system path, if it is not found import sys sys.path.insert(0, "..") import pandapipes as pp net = pp.create_empty_network(fluid="lgas") # create an empty network # This network does not hold any elements yet. It serves as a container for the component tables and network specific parameters. net # We will now use the <span style="color:blue">create</span> functions to create elements. They will be automatically added to the respective tables. # ### Junctions # <img src="pics/simple_network-junc.png"> # # We now create 6 junctions with an intital pressure for the pipeflow calculation of 1.0 bar. For improved readability, we add names (optional). junction1 = pp.create_junction(net, pn_bar=1.0, tfluid_k=293.15, name="Connection to External Grid") junction2 = pp.create_junction(net, pn_bar=1.0, tfluid_k=293.15, name="Junction 2") junction3 = pp.create_junction(net, pn_bar=1.0, tfluid_k=293.15, name="Junction 3") junction4 = pp.create_junction(net, pn_bar=1.0, tfluid_k=293.15, name="Junction 4") junction5 = pp.create_junction(net, pn_bar=1.0, tfluid_k=293.15, name="Junction 5") junction6 = pp.create_junction(net, pn_bar=1.0, tfluid_k=293.15, name="Junction 6") # The junctions are now in the net and can be called. Some parameters were filled with default values. net.junction # show junction table # All create functions return the pandapipes index of the element that was created, for example the variable junction1 is now equal to the index of the junction with the name "Connection to External Grid" (which is 0): # junction1 net.junction.loc[junction1] # We use these variables for creating bus and branch elements in the following. # ### External Grid # <img src="pics/simple_network-ext_grid.png"> # # We now create a medium pressure external grid connection that serves as slack node for the pipe flow calculation. The pressure is set to 1.1 bar: # + medium_pressure_grid = pp.create_ext_grid(net, junction=junction1, p_bar=1.1, t_k=293.15, name="Grid Connection") net.ext_grid # show external grid table # - # ### Pipes # The network includes 5 pipes between two junctions each. The junctions and pipes lengths are defined in the network diagram: # # <img src="pics/simple_network-pipes.png"> # # For all pipes we want a diameter of 300 mm. pipe1 = pp.create_pipe_from_parameters(net, from_junction=junction1, to_junction=junction2, length_km=10, diameter_m=0.3, name="Pipe 1") pipe2 = pp.create_pipe_from_parameters(net, from_junction=junction2, to_junction=junction3, length_km=2, diameter_m=0.3, name="Pipe 2") pipe3 = pp.create_pipe_from_parameters(net, from_junction=junction2, to_junction=junction4, length_km=2.5, diameter_m=0.3, name="Pipe 3") pipe4 = pp.create_pipe_from_parameters(net, from_junction=junction3, to_junction=junction5, length_km=1, diameter_m=0.3, name="Pipe 4") pipe5 = pp.create_pipe_from_parameters(net, from_junction=junction4, to_junction=junction6, length_km=1, diameter_m=0.3, name="Pipe 5") # The full pipe table looks like this: net.pipe # show pipe table # The parameters from_junction and to_junction define the orientation of the pipe. If the fluid streams in fact from the from_junction to to_junction, the resulting fluid velocity is positive. If the fluid stream direction is opposite, the resulting velocity is negative. # ### Valve # There is one valve between Junction 5 and Juncion 6. It is open so that the gas can circulate. # # <img src="pics/simple_network-valve.png"> valve = pp.create_valve(net, from_junction=junction5, to_junction=junction6, diameter_m=0.3, opened=True) # As we see in the valve table below the default value for the loss_coefficient of the valve is zero. Also, all valves have a length of 0 meter, because we consider ideal valves. However, if needed you can change the loss_coefficient parameter. net.valve # show valve table # ### Sink # # <img src="pics/simple_network-sink.png"> # The sink element is used to model by default a constant consumption. We create here a gas consumption sink with a mass flow of 545 gramm per second. Sinks and sources always have to be assigned to a particular junction. The sign of the mass flow is positive. sink = pp.create_sink(net, junction=junction4, mdot_kg_per_s=0.545, name="Sink 1") net.sink # ### Source # # <img src="pics/simple_network-source.png"> # The source element is used to model a generation of heat or gas. It could be for example a biogas plant that is directly connected to the gas network or a combined heat and power plant. # In this example, we assume that a biogas plant is directly feeding into the gas grid with a constant value of 234 gramm per second. The sign of the mass flow is also positive. source = pp.create_source(net, junction=junction3, mdot_kg_per_s=0.234, name="Source 1") net.source # We are done. Now, the net includes all the elements from the picture. net # If you want to learn about how to run a pipe flow, continue with the pipe flow tutorial. (in preparation)
tutorials/Creating a simple network.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import keras dir(keras) # ### Keras Basic # #### Models 序贯模型,函数式模型,layers 层,如Dense,Conv,activations;losses损失函数,optimizers,datasets from keras.models import Sequential model=Sequential() #model.add() from keras.layers import Dense #Dense() from keras import optimizers optimizers.SGD(lr=0.01,momentum=0,decay=0,nesterov=False)
keras/Keras.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.5 64-bit (''base'': conda)' # language: python # name: python385jvsc74a57bd0c27704c5320a3c8d1c115124d48a7f290b37dfd48f24cfb20184db681f071451 # --- # + from trellocore import TrelloCore import os import json trellocore = None with open(os.path.join(os.path.abspath(''),"ENV",".env"),"r") as file: key =json.load(file) trellocore = TrelloCore(app_key=key["api_key"], token=key["admin_token"]) # - member_id = trellocore.token().get_member()['id'] boards = trellocore.member().get_boards(id=member_id, fields="id,name") print(boards) ufficio = [board['id'] for board in boards if "UFFICIO" == board['name']][0] print(ufficio) # + tags=[] board = trellocore.board(id=ufficio) #print(board_id) liste = board.get_lists(fields="id,name", filter='open') #[print(elemento) for elemento in liste] bacheche_personali = [name['id'] for name in liste if "bacheche personali" == name['name'] ] print(bacheche_personali) # - cards = trellocore.list(id=bacheche_personali[0]).get_cards() cards = [elemento['name'].split(':') for elemento in cards][1:] bacheche_personali = [{'username': elemento[0],'board_name': elemento[1] } for elemento in cards] print(bacheche_personali) # + for elemento in bacheche_personali: elemento['id'] = (trellocore.member(id=elemento['username']).get_member(fields="id")['id']) # - for elemento in bacheche_personali: if elemento['username'] == 'zaninelli': elemento['type'] = 1 elemento['token'] = key["admin_token"] else: elemento['type'] = 0 elemento['token'] = None print(elemento) for elemento in bacheche_personali: for board in boards: if board['name'] == elemento['board_name']: elemento['board_id'] = board['id'] #print(elemento) # + for elemento in bacheche_personali: board = trellocore.board(id=elemento['board_id']) liste = board.get_lists(fields="id,name", filter='open') elemento['list_id'] = [name['id'] for name in liste if "MANAGMEENT" == name['name'] ] print(bacheche_personali[0].keys()) # - import sqlite3 import os db_file = os.path.join(os.path.abspath(''),"ENV", "database.sqlite3") conn = sqlite3.connect(db_file) cur = conn.cursor() query = """DROP TABLE Employee;""" cur.execute(query) query = """ CREATE TABLE IF NOT EXISTS Employee ( id CHAR(24), username CHAR(50), board_id CHAR(24), board_name CHAR(50), list_id CHAR(24), token CHAR(50), type INT, PRIMARY KEY (id) ); """ cur.execute(query) for line in bacheche_personali: print(line) for line in bacheche_personali: sql = f""" INSERT INTO Employee(id, username, board_id, board_name, list_id, token, type) VALUES(?,?,?,?,?,?,?); """ value =(line['id'],line['username'],line['board_id'],line['board_name'],line['list_id'][0],line['token'],line['type']) cur.execute(sql,value) # + select = "SELECT * FROM Employee;" cur.execute(select) print(cur.fetchall()) # + pycharm={"name": "#%%\n"} var = 5 print(var)
trelloengine/esperimenti.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/m4rk00s/nlp-project/blob/master/Text_Summarization.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="KqYg2lDkR6eB" colab_type="text" # # Extraction-based Text Summarization # + [markdown] id="cUCj0jkrSAc6" colab_type="text" # # Introduction # + [markdown] id="eB9u2s-iU9hU" colab_type="text" # Text summarization is an act to generate a short text that describe the text wholly. Sometimes, text summarization is called automatic summarization, because the process is automated by a software. # # There's a number of way to build a text summarization, one particular is using **SumBasic** system. Here's the algorithm: # # 1. Compute the probability of a word $w$ # $$P(w) = \frac{f(w)}{N}$$ # where $f(w)$ is the number of occurences of the word, and $N$ is the number of all words in the input. # # 2. For each sentence, $S_j$, in the input, assigns a weight equal to the average probability of the words in the sentence: # $$g(S_j) =\frac{\sum_{w_i\in S_j} P(w_i)}{|\{w_i|w_i\in S_j\}|}$$ # # 3. Pick the best scoring sentnece that contains the highest probability word. # # 4. For each word in the chosen sentence, the weight is updated: # $$p_{\text{new}}(w_i) = p_{\text{old}}(w_i)p_{\text{old}}(w_i)$$ # This word weight update indicates that the probability of a word appearing in the summary is lower than a word occuring once. # # 5. If the desired summary length has not been reached, go back to step 2 # + [markdown] id="vpsZXol1U_Y5" colab_type="text" # ## Step 1. Load the data # + [markdown] id="4qpkDbkUVJEW" colab_type="text" # First step is prepearing the data. Here's the explaination of the whole process: # 1. Fetch the data from the URL using `urlopen` # 2. Read the fetched data # 3. Using `BeautifulSoup`, parse the data and collect the article (usually found in `<p>` tag) # + id="mFmo-Z6HVLGi" colab_type="code" outputId="19c06046-3e1d-4e56-e0ee-404b15d4464b" colab={"base_uri": "https://localhost:8080/", "height": 52} import bs4 import urllib.request from nltk.tokenize import sent_tokenize # only when needed import nltk nltk.download('punkt') # Fetch the content from the URL url = 'https://www.theverge.com/circuitbreaker/2019/9/18/20868935/google-pixel-4-xl-rumors-leaks-specs-details-colors-cameras-soli' fetched_data = urllib.request.urlopen(url) article_read = fetched_data.read() # Parsing the URL content and storing in a variable soup = bs4.BeautifulSoup(article_read, 'html.parser') article_content = [] for element in soup.find_all('p'): article_content.append(element.text) article_content = ' '.join(article_content) article_content = sent_tokenize(article_content) # + [markdown] id="eAUdHlBFVMV4" colab_type="text" # Below are the first 5 sentences from the article. # + id="LpaPlVY6VRhc" colab_type="code" outputId="dab7b272-a726-41d6-bf53-6c72a188e09a" colab={"base_uri": "https://localhost:8080/", "height": 105} article_content[:5] # + [markdown] id="DzfYxE28VdMx" colab_type="text" # ## Step 2. Preprocessing the data # + [markdown] id="utAlgPGpVf2t" colab_type="text" # Next, we will create a frequency table, but first we need a cleaned version of the article content (which mean we have to lemmatize the content and remove the stop words). # + id="7LInkmVxVmB5" colab_type="code" outputId="2cc13067-630e-4a87-dcaf-8648a52ea05a" colab={"base_uri": "https://localhost:8080/", "height": 175} import string # only when needed nltk.download('stopwords') nltk.download('wordnet') clean_data = [] def preprocessing(text): lemmatizer = nltk.stem.WordNetLemmatizer() stopwords = set(nltk.corpus.stopwords.words('english')) text = text.lower() result = [] for token in nltk.word_tokenize(text): root = lemmatizer.lemmatize(token) if root in string.punctuation: continue if root in stopwords: continue result.append(root) return ' '.join(result) for sent in article_content: clean_data.append(preprocessing(sent)) clean_data[:5] # + [markdown] id="3JCb4q1nVo8b" colab_type="text" # After cleaning the data, next step we can make a frequency table. # + id="VgaP8f4cV-z7" colab_type="code" outputId="d4db5c56-9999-495d-955f-b73f7a476a3f" colab={"base_uri": "https://localhost:8080/", "height": 52} from sklearn.feature_extraction.text import CountVectorizer vectorizer = CountVectorizer() vectors = vectorizer.fit_transform(clean_data) vectors # + [markdown] id="B2hczkBKWBYq" colab_type="text" # As you can see size of the matrix is $121\times650$, where each rows represent each sentences and the columns represent the number of word occurance. Below are the top 5 words with the highest frequency. # + id="fYeSSTnqWEce" colab_type="code" outputId="27409fd5-47cf-465f-ffa4-afdf6aa06d98" colab={"base_uri": "https://localhost:8080/", "height": 105} import numpy as np res = np.sum(vectors.toarray(), axis=0) vocab = vectorizer.vocabulary_ freq_table = dict() for word, ix in vocab.items(): freq_table[word] = res[ix] N = len(freq_table.values()) for word in freq_table: freq_table[word] /= N sorted(freq_table.items(), key=lambda x: x[1], reverse=True)[:5] # + [markdown] id="AWlWVhUJWIcw" colab_type="text" # As we can expect, the article is about the Pixel 4, a new phone from Google, so the result is suitable with the article. # + [markdown] id="WDtjqXgnWxVh" colab_type="text" # ## Step 3. Finding the weighted frequencies of the sentences # + [markdown] id="tPIfL5k8Xd71" colab_type="text" # Now, we already have the tokenize version of the article and the frequency table for each token. Pretty much can now begin to implement the algorithms! # + id="X_DRIAVsX_ec" colab_type="code" outputId="74048e71-35ae-43ff-ccab-5072379bb083" colab={"base_uri": "https://localhost:8080/", "height": 105} from nltk.tokenize import word_tokenize sent_weight = dict() for ix, sent in enumerate(clean_data): list_word = word_tokenize(sent) g_sent = 0 for word in list_word: if word in freq_table: g_sent += freq_table[word] sent_weight[ix] = g_sent / len(list_word) top5 = sorted(sent_weight.items(), key=lambda x: x[1], reverse=True)[:5] top5 # + [markdown] id="Bx6r1wKCbjs8" colab_type="text" # ## Step 4. Getting the summary # + [markdown] id="lqv1Gna2ZKUs" colab_type="text" # It's turn out the 55th sentence is the best pick to summarize the article. # + id="RN3aJM_ybOqG" colab_type="code" outputId="3440997a-8d05-4bd5-e189-6170f6da45a1" colab={"base_uri": "https://localhost:8080/", "height": 34} article_content[54] # + [markdown] id="jM6UB2TFbQM8" colab_type="text" # But for sake of curiosty, let's looks the top 5 sentence! # + id="d5zlJ9q-btF-" colab_type="code" outputId="e0791d1e-6064-44b5-e31e-61fc9a4120fd" colab={"base_uri": "https://localhost:8080/", "height": 105} for ix, _ in top5: print(article_content[ix]) # + [markdown] id="6dWuwnBGcs7t" colab_type="text" # ## Conclusion # + [markdown] id="nvLid7gpb5cO" colab_type="text" # I've to admit that the result is good so far! The article is about the leaked-information on Pixel 4. It's turn out that the writer is not quite sure if he know much about the specs despite having those information. Of course, we have to look at the article by ourselves if we want to make sure if this summary is satisfying. But I leave the homework for you, the reader, to comment about those result. # + [markdown] id="ae_tU7N_cgLb" colab_type="text" # ## Reference # # This project is nothing without this awesome source: # 1. *Text Summarization Techniques: A Brief Survey* - https://arxiv.org/pdf/1707.02268 # 2. *Beyond SumBasic: Task-Focused Summarization with Sentence Simplification and Lexical Expansion* - https://www.cis.upenn.edu/~nenkova/papers/ipm.pdf # 3. *Applied Text Analysis with Python* - https://learning.oreilly.com/library/view/applied-text-analysis/9781491963036/ # 4. *Natural Language Processing with Python* - http://www.nltk.org/book/ # + id="FymkC9Hcc7xf" colab_type="code" colab={}
Text Summarization/sumbasic.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 第4章 方差掉期:复制的一课 # # 本章的通过期权复制方差掉期的损益的方法相当有技巧性,还展示了离散的期权分段线性复制的可实操的方法和分析。关于波动率掉期和方差掉期的复制思想值得深入学习理解。 # # 利用本章介绍的技术,可以获得一个波动率掉期头寸(甚至在P&L上更优)。可以和VIX衍生品套利。 # ## 期权波动率敏感性 # # BSM模型 # $$\frac{\partial C}{\partial t}+rS\frac{\partial C}{\partial S}+\frac{1}{2}\sigma^2S^2\frac{\partial^2C}{\partial S^2}=rC$$ # 欧式看涨期权定价: # $$C(S,K,\tau,\sigma,r)=SN(d_1)+Ke^{-r\tau}N(d_2)$$ # $$d_{1,2}=\frac{\ln\left(\frac{S}{K}\right)+\left(r\pm\frac{\sigma^2}{2}\right)\tau}{\sigma\sqrt{\tau}}$$ # $$N(z)=\frac{1}{\sqrt{2\pi}}\int_{-\infty}^xe^{-\frac{1}{2}y^2}\mathrm{d}y$$ # 将期权在到期日之前的总波动率定义为$v$,则$v=\sigma\sqrt{\tau}$。为了便于分析,假设有效期内无风险利率为0.于是可得 # $$C(S,K,v)=SN(d_1)-KN(d_2)$$ # $$d_{1,2}=\frac{1}{v}\ln\left(\frac{S}{K}\right)\pm\frac{v}{2}$$ # 定义期权对于波动率的敏感性: # $$V=\frac{\partial C}{\partial\sigma}=\frac{S\sqrt{\tau}}{\sqrt{2\pi}}e^{-\frac{1}{2}d_1^2}$$ # $$\kappa=\frac{\partial C}{\partial\sigma^2}=\frac{S\sqrt{\tau}}{2\sigma\sqrt{2\pi}}e^{-\frac{1}{2}d_1^2}$$ # $V$称为vega, $\kappa$也就是kappa,称为方差的vega。 # # 对于任意一个期权而言,当标的股票价格接近行权价格时,其方差vega就达到峰值。 # > $$\frac{\partial\kappa}{\partial S}=\frac{\sqrt{\tau}}{2\sigma\sqrt{2\pi}}e^{-\frac{1}{2}d_1^2}\left(\frac{1}{2}-\frac{1}{v^2}\ln\left(\frac{S}{K}\right)\right)$$ # > 为找到当$S=S^*$时,$\kappa$最大,须使此时$\frac{\partial\kappa}{\partial S}=0$,即: # > $$\frac{1}{2}-\frac{1}{v^2}\ln\left(\frac{S^*}{K}\right)=0$$ # > 因此有: # > $$S^*=Ke^{\frac{1}{2}v^2}$$ # > 对一般的波动率和到期期限而言,$v^2$非常接近于0,即$S^*$略大于$K$。 # ## 波动率和方差掉期 # # 一个普通期权对波动率或者方差的敞口是一个与标的股票价格相关的峰值函数。 # # 波动率掉期合约的价值就是合约有效期内的实际波动率$\sigma_R$,和之前双方约定的交割波动率$\sigma_K$的差。波动率掉期合约就是: # $$\pi=N(\sigma_R-\sigma_K)$$ # 其中$N$代表名义价值,通常指波动率掉期的名义vega值。 # # 与此相似,一份方差掉期就是针对实际方差的远期合约,在到期日,其价值等于: # $$\pi=N(\sigma_R^2-\sigma_K^2)$$ # 方差合约的名义价值$N$,通常也叫做名义方差。 # # 如果$(\sigma_R-\sigma_K)$的值很小,并且保持一阶值,则可以用波动率掉期的价格近似算出方差掉期的价格: # $$\sigma_R^2-\sigma_K^2\approx2\sigma_K(\sigma_R-\sigma_K)$$ # # 我们会发现,方差(而不是波动率)才是复制策略用的最多的参数。 # ## 复制波动率掉期 # # 通常,掉期合约的初始价格为0。虽然期权交易员习惯按照波动率来思考,但实际上方差掉期更容易复制并进行对冲。 # # 方差掉期的到期损益总是大于等于波动率掉期。**理论上说,可以通过交易方差掉期动态复制一个波动率掉期。**尽管方差掉期的流动性较差,可能会使这个策略产生一些问题。 # ## 在BSM模型环境下,用期权复制一个方差掉期 # # $$利润=\frac{1}{2}\Gamma S^2(\sigma_R^2-\Sigma^2)\mathrm{d}t$$ # 期权随着时间或者$S$的变化,$\Gamma S^2$也会变化。由于期权流动性差,我们很难在$\Gamma S^2$低的时候加仓;在$\Gamma S^2$高的时候减仓。即使在最好的市场环境中,动态对冲很有挑战性,而且用流动性差的工具进行动态对冲非常困难,在实际市场中也非常贵。所以,**静态复制更有吸引力**。 # # 我们可以**用多个普通期权构建一个组合**,使组合的$\Gamma$等于$1/S^2$。这个组合对方差的敞口将不受股票价格的影响。可以将多个期权组合在一起,得到一个稳定的$\kappa$值,使其不受$S$的影响。**这样的组合就是纯粹的对方差下赌注。** # # 可以证明(证明略),构建这个组合所需要的普通期权合约数量,与期权行权价格的平方成倒数关系。**一个普通期权的连续密度函数,如果其权重下降$1/K^2$,那么它对于方差的敏感性就与股票价格无关,因此可以复制一个方差掉期。**在实践中,可以在一定的股票价格范围内构建一个对方差敏感性相对稳定的组合。 # ## 权重为$1/K^2$的普通期权组合的对数损益 # # $$\pi(S,S^*,v)=\int_0^{S^*}\frac{1}{K^2}P(S,K,v)\mathrm{d}K+\int_{S^*}^\infty\frac{1}{K^2}C(S,K,v)\mathrm{d}K$$ # $$ # \begin{align*} # \pi(S_T,S^*,0)&=\int_0^{S^*}\frac{1}{K^2}P(S,K,0)\mathrm{d}K+\int_{S^*}^\infty\frac{1}{K^2}C(S,K,0)\mathrm{d}K \\ # &=\int_{S^*}^{S^T}\frac{1}{K^2}(S_T-K)\mathrm{d}K=\left(\frac{S_T-S^*}{S^*}\right)-\ln\left(\frac{S_T}{S^*}\right) # \end{align*} # $$ # 可以用BSM等式计算这些期权**到期前**的价值,假设无风险利率为0,因此有: # $$\pi(S,S^*,v)=\left(\frac{S-S^*}{S^*}\right)-\ln\left(\frac{S}{S^*}\right)+\frac{1}{2}v^2$$ # ## 在布莱克-斯科尔斯-默顿条件下计算对数合约的价值 # # 对数合约$L$在到期日$T$的损益可以表达为: # $$L(S,S^*)=\ln\left(\frac{S_T}{S^*}\right)$$ # 其中$S_T$是股票在$T$时的价格,$S^*$是固定的期权价格。 # # 对数合约也可以delta对冲,得到方程: # $$\frac{\partial L}{\partial t}+\frac{1}{2}\sigma^2S^2\frac{\partial^2 L}{\partial S^2}=0$$ # 解得: # $$L(S,S^*,t,T)=\ln\left(\frac{S}{S^*}\right)-\frac{1}{2}\sigma^2(T-t)$$ # 一份对数合约空头: # $$-L(S,S^*,t,T)=-\ln\left(\frac{S}{S^*}\right)+\frac{1}{2}\sigma^2(T-t)=-\ln\left(\frac{S}{S^*}\right)+\frac{1}{2}v^2$$ # # # **为了复制一个方差掉期,而由权重为$1/K^2$的看跌期权和看涨期权的组合,其到期损益的情况就等于一份行权价为$S^*$的对数合约空头,加上$1/S^*$份交割价为$S^*$的远期合约多头。** # # 对数合约的一些性质: # * 一份空头对数合约的delta为$-\partial L/\partial S=-1/S$,进行delta对冲只需要持有$1/S$份标的资产,也就是1美元的资产即可。 # * $-L(S,S^*,t,T)$的$\Gamma$是$1/S^2$。与普通期权不同,对数合约的$\Gamma S^2$是一个固定值,不受股票价格或者时间的影响。所以**对数合约是一个纯粹基于波动率的工具。远期合约(加上这个对数合约)可以消除对于股票价格的任何敞口。** # * $-L(S,S^*,t,T)$对方差的敏感性$\kappa$值是$(T-t)/2$。在合约起始日,也就是$t=0$,就有$\kappa=T/2$。 # # 若使组合$\pi$中的复制合约同时乘以$2/T$,初始$\kappa$值就等于1.新组合的价值为: # $$\pi(S,S^*,t,T)=\frac{2}{T}\left[\left(\frac{S-S^*}{S^*}\right)-\ln{\left(\frac{S}{S^*}\right)}\right]+\frac{T-t}{T}\sigma^2$$ # # 另合约初始时股票价格为$S_0$,设$S^*$等于$S_0$,那么初始时就有$S=S_0=S^*$,于是: # $$\pi(S_0,S_0,0,T)=\sigma^2$$ # # 在到期日,这个组合的损益情况为: # $$\pi(S_T,S_0,T,T)=\frac{2}{T}\left[\left(\frac{S_T-S_0}{S_0}\right)-\ln{\left(\frac{S_T}{S_0}\right)}\right]$$
volatility-smile/.ipynb_checkpoints/chapter-4-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Part I: Decision trees # # # Here, we will use decision trees for classification of Iris species (Setosa, Versicolor, Virginica). Use random_state=0 for splitting and building all models. # # #### 1) Fit decision tree with maximum depth (max_depth) of 2 and the default gini index for building the tree. Find the classification accuracy. (3pts) # # # - To visualize the tree (optional), first, import the graphviz package from terminal using the following: # # brew install graphviz # # OR # # #conda install -c anaconda graphviz # #conda install -c anaconda python-graphviz # # Then, we can use the package to visulaize the decision tree as follows: # # from sklearn.tree import export_graphviz # import graphviz # # dot_data=export_graphviz(FittedTreeModel,class_names=iris_dataset.target_names, feature_names=iris_dataset.feature_names,out_file=None) # # graph = graphviz.Source(dot_data) # graph # # # + # %matplotlib inline from sklearn.model_selection import train_test_split from sklearn.datasets import load_iris from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.tree import export_graphviz import graphviz iris_dataset = load_iris() X_train, X_test, Y_train, Y_test = train_test_split(iris_dataset['data'], iris_dataset['target'], random_state=0) treeModel = DecisionTreeClassifier(max_depth=2, criterion='gini') treeModel.fit(X_train, Y_train) print('Accuracy: ', treeModel.score(X_test, Y_test)) dot_data = export_graphviz(treeModel, class_names=iris_dataset.target_names, feature_names=iris_dataset.feature_names, out_file=None) graph = graphviz.Source(dot_data) graph # - # #### 2) Use random forests to classify the Iris species. The random forests combines 4 decision trees, each of maximum depth 2 and maximum number of features considered at each split is 2. What is the model accuracy? (3pts) # # forestModel = RandomForestClassifier(n_estimators=4, max_features=2, max_depth=2, random_state=0) forestModel.fit(X_train, Y_train) print('Accuracy', forestModel.score(X_test, Y_test)) # #### 3) Use AdaBoost with 4 decision tree models to perform the classification of the Iris species. What is the model accuracy? Comment on results. (3pts) # BoostModel = AdaBoostClassifier(n_estimators=4) BoostModel.fit(X_train, Y_train) print('Accuracy', BoostModel.score(X_test, Y_test)) # **Comment:** # # Compare all these 3 models, the AdaBoost model has the highest accuracy and the Decision tree model has the lowest accuracy. # # Overall, combining trees have higher accuracy in this case. # ## Part II: Neural networks # #### Apply Neural networks (multilayer perceptron) to classify the Iris species, Build a model that has two hidden layers, the first layer has 10 neurons and second layer has 5 neurons. Use 'tanh' activation function, and set the regularization parameter alpha=0.5. Scale the feautures with MinMaxScaler. Try the following settings (a)-(c) and report the accuracy, then comment on the results. # # a) Use gradient descent to solve the optimization problem (i.e. get the weights), and choose random_state=0 (which corresponds to a particular initialization of weight values), and set max_iter=5000. Print the test accuracy. (3pts) # # b) Repeat (a) above but with a model that uses random_state=10 to initialize the weights. Print the test accuracy. (2pts) # # # c) Repeat (b) but with model that use L-BFGS (a numerical quasi-Newton method of optimization) instead of stochastic gradient descent to find the weights. Print the test accuracy (3pts) # # d) Comment on results (3pts) # + from sklearn.datasets import load_iris from sklearn.preprocessing import MinMaxScaler from sklearn.neural_network import MLPClassifier scaler = MinMaxScaler().fit(X_train) X_train_transformed = scaler.transform(X_train) X_test_transformed = scaler.transform(X_test) MLPmodel = MLPClassifier(solver='sgd', activation='tanh', random_state=0, hidden_layer_sizes=[10, 5], alpha=0.5, max_iter=5000) MLPmodel.fit(X_train_transformed, Y_train) print('(a) Accuracy:', MLPmodel.score(X_test_transformed, Y_test)) MLPmodel = MLPClassifier(solver='sgd', activation='tanh', random_state=10, hidden_layer_sizes=[10, 5], alpha=0.5, max_iter=5000) MLPmodel.fit(X_train_transformed, Y_train) print('(b) Accuracy:', MLPmodel.score(X_test_transformed, Y_test)) MLPmodel = MLPClassifier(solver='lbfgs', activation='tanh', random_state=10, hidden_layer_sizes=[10, 5], alpha=0.5, max_iter=5000) MLPmodel.fit(X_train_transformed, Y_train) print('(c) Accuracy:', MLPmodel.score(X_test_transformed, Y_test)) # - # **Comment:** # # 1. The random state affects the result to some extent. # # 2. The model that use L-BFGS has a better performance than the model which uses stochastic gradient descent in this case.
Exercise8_DT_NN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Abstract # - Utiliser un réseau RNN pour prédire les quelques n+ièmes pas des cours de bourses de sociétés du SP500. # - Qualifier et quantifier les écarts. # - En déduire quels sont les sociétés dont les cours sont les plus "prévisibles" ou à défauts ceux dont les cours sont le moins erratiques. # ## Bibliothèques # + # Générique data project import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import plotly.express as px import plotly.graph_objects as go # Pour la manipulation de fichiers import os import glob from pathlib import Path # Pour RNN import tensorflow as tf from tensorflow import keras from tensorflow.keras.callbacks import TensorBoard from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator from tensorflow.keras.layers import Dense, RNN,GRUCell,InputLayer,Embedding,Dropout # MISC import warnings warnings.filterwarnings("ignore") import swifter import random # - # ## Set up df = pd.read_csv('sp500_stocks.csv') df = df.dropna() # ## Paramètres sequence_len = 100 predict_len = 5 train_prop = 0.8 batch_size = 32 epok = 20 cwd = os.getcwd() path = cwd # ## Fonction de généralisation # On applique la démarche pour chaque cours et on stocke les valeurs dans une dataframe os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # + code_folding=[1] def evalue(x): DF = df[df['Symbol']==x] dataset = np.array(DF.Close) k = int(len(dataset)*train_prop) x_train = dataset[:k] x_test = dataset[k:] mean = x_train.mean() std = x_train.std() x_train = (x_train - mean) / std x_test = (x_test - mean) / std x_train = x_train.reshape(-1,1) x_test = x_test.reshape(-1,1) train_generator = TimeseriesGenerator(x_train,x_train ,length=sequence_len, batch_size=batch_size) test_generator = TimeseriesGenerator(x_test,x_test , length=sequence_len, batch_size=batch_size) model = keras.models.Sequential() model.add(keras.layers.GRU(64, return_sequences=False,input_shape=(sequence_len,1),activation='relu') ) model.add(Dropout(0.3)) model.add(keras.layers.Dense(1)) model.compile(optimizer='rmsprop', loss='mse', metrics = ['mae'] ) bestmodel_callback = tf.keras.callbacks.ModelCheckpoint(filepath=path, verbose=0, save_best_only=True) training_history=model.fit(train_generator, epochs = epok, validation_data = test_generator,verbose = 0, callbacks = [bestmodel_callback]) loaded_model = tf.keras.models.load_model(cwd) loss,mae =loaded_model.evaluate(test_generator) return x_test,mae,loaded_model # + code_folding=[0] def plot_last_seq(x_test,model): # On sélectionne la dernière séquence sequence = x_test[-sequence_len:-1] # On définit la séquence réelle à observer sequence_true = x_test[-sequence_len:] # On prédit la valeur suivante de sequence prediction = model.predict( np.array([sequence])) # On affiche plt.plot(np.concatenate([sequence,prediction]),label = 'Predit') plt.plot(sequence_true,label = 'Vrai') plt.legend() # + code_folding=[0] def predicat(x_test,model): Predicat =[] step = 4 iteration = list(reversed([i for i in range(step)])) for i in iteration: sequence = x_test[-sequence_len-i:-i-1] prediction = model.predict( np.array([sequence])) Predicat.append(prediction) vue = np.concatenate([Predicat[0],Predicat[1],Predicat[2],Predicat[3]]) plt.plot(np.concatenate([x_test[-sequence_len:-step],vue]),label = 'Prédit') plt.plot(x_test[-sequence_len:],label = 'vrai') plt.legend() # + code_folding=[0] def boule_cristal(x_test,model): Predicat =[] step = 4 iteration = list(reversed([i for i in range(step)])) x_dessein = x_test.copy() for i in iteration: sequence = x_dessein[-sequence_len:] prediction = model.predict( np.array([sequence])) Predicat.append(prediction) x_dessein= np.concatenate([x_dessein,Predicat[-1]]) vue = np.concatenate([Predicat[0],Predicat[1],Predicat[2],Predicat[3]]) plt.plot(np.concatenate([x_test[-sequence_len:],vue]),label = 'Prédit') plt.plot(x_test[-sequence_len:],label = 'vrai') plt.legend() # - # ## Titre à suivre bourse =['DXC','VTRS','NWL','BKR','MRO','INCY','WMB'] a = len(bourse) # + code_folding=[] plt.figure(figsize=(15,15)) j = 1 for titre in bourse: x_test,mae,model = evalue(titre) plt.subplot(a,3,j) plot_last_seq(x_test,model) plt.title(f'{titre} à J+1') j +=1 plt.subplot(a,3,j) predicat(x_test,model) plt.title(f'{titre} sur J-4') j+=1 plt.subplot(a,3,j) boule_cristal(x_test,model) plt.title(f'{titre} sur J+4') j+=1 plt.show() # - evalue('DXC') boule_cristal(x_test,model) plt.show()
III - OUTIL_BOURSE.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # # Bubble Charts: An in-class challenge # # A bubble chart is a scatterplot where the size of the bubble is scaled by some value. Bubble charts were made small f famous by the late [<NAME> in one of the most watched Ted talks ever](https://www.ted.com/talks/hans_rosling_shows_the_best_stats_you_ve_ever_seen). # # Your challenge in class today: Make a bubble chart out of the dataset of majors by race and sex. I'm interested in gender differences by majors. # # I'll help you set up the data. library(dplyr) library(ggplot2) library(reshape2) enrollment <- read.csv("../../Data/collegeenrollment.csv") head(enrollment) # Note we have data that is long -- data by race and gender. We just want it by gender. So we need to do some grouping together and later we need to make this wider. So we have to first group by College, Major and Gender and add them up. majors <- enrollment %>% group_by(College, MajorName, Gender) %>% summarize( Total=sum(Count) ) # Now we need to make that long data wide, so Male and Female are on the same line. majors_bubble <- dcast(majors, College + MajorName ~ Gender) head(majors_bubble) # We now have enough to do a scatterplot, but what are we lacking for a bubble chart? Some kind of weighting. So let's create a couple. bubble <- majors_bubble %>% mutate( Total = Male+Female, Difference = abs(Male-Female) ) # The `abs()` bits there mean give me the absolute value -- so everything is above zero, regardless of which is larger. # # So let's try a plot: ggplot(bubble, aes(x = Male, y = Female, size=Difference)) + geom_point(alpha=0.4, color='red') + scale_size_continuous(range=c(.1, 20)) + scale_colour_continuous(guide = FALSE) + geom_text(data=bubble, aes(label=MajorName, size=10), check_overlap=TRUE) # Your challenge: Make this better. [Here's some guidance on things you can change](https://www.r-graph-gallery.com/320-the-basis-of-bubble-plot/). [Here's more](http://t-redactyl.io/blog/2016/02/creating-plots-in-r-using-ggplot2-part-6-weighted-scatterplots.html). We'll talk at the end of class.
Assignments/12_BubbleCharts/BubbleCharts.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:.conda-venv37-tf15-cpu] # language: python # name: conda-env-.conda-venv37-tf15-cpu-py # --- # # Optical RL-Gym # # ## Training the Stable Baselines agents using the DeepRMSA environment # # This file contains examples of how to train agents for the DeepRMSA environment. # # The agents used in this file come from the [Stable baselines](https://github.com/hill-a/stable-baselines) framework. # # This notebook is based upon the one available [here](https://github.com/Stable-Baselines-Team/rl-colab-notebooks/blob/master/monitor_training.ipynb). # # Before running this notebook, make sure to install Stable Baselines and the Optical RL-Gym in your Python environment. # ### General imports # + import os import gym import pickle import numpy as np from IPython.display import clear_output # %matplotlib inline # %config InlineBackend.figure_format = 'svg' # - # ### TensorFlow imports # + import tensorflow as tf # silencing tensorflow warnings import logging logging.getLogger('tensorflow').setLevel(logging.FATAL) tf.__version__ # - # ### Stable Baselines imports import stable_baselines from stable_baselines.common.callbacks import BaseCallback from stable_baselines.results_plotter import load_results, ts2xy from stable_baselines import PPO2 from stable_baselines.bench import Monitor from stable_baselines.common.policies import MlpPolicy from stable_baselines import results_plotter stable_baselines.__version__ # ### Environment imports import gym from optical_rl_gym.envs.qos_constrained_ra import MatrixObservationWithPaths # ### Define a callback function class SaveOnBestTrainingRewardCallback(BaseCallback): """ Callback for saving a model (the check is done every ``check_freq`` steps) based on the training reward (in practice, we recommend using ``EvalCallback``). :param check_freq: (int) :param log_dir: (str) Path to the folder where the model will be saved. It must contains the file created by the ``Monitor`` wrapper. :param verbose: (int) """ def __init__(self, check_freq: int, log_dir: str, verbose=1): super(SaveOnBestTrainingRewardCallback, self).__init__(verbose) self.check_freq = check_freq self.log_dir = log_dir self.save_path = os.path.join(log_dir, 'best_model') self.best_mean_reward = -np.inf def _init_callback(self) -> None: # Create folder if needed if self.save_path is not None: os.makedirs(self.save_path, exist_ok=True) def _on_step(self) -> bool: if self.n_calls % self.check_freq == 0: # Retrieve training reward x, y = ts2xy(load_results(self.log_dir), 'timesteps') if len(x) > 0: # Mean training reward over the last 100 episodes mean_reward = np.mean(y[-100:]) if self.verbose > 0: print("Num timesteps: {} - ".format(self.num_timesteps), end="") print("Best mean reward: {:.2f} - Last mean reward per episode: {:.2f}".format(self.best_mean_reward, mean_reward)) # New best model, you could save the agent here if mean_reward > self.best_mean_reward: self.best_mean_reward = mean_reward # Example for saving best model if self.verbose > 0: print("Saving new best model to {}".format(self.save_path)) self.model.save(self.save_path) if self.verbose > 0: clear_output(wait=True) return True # ### Setting up the environment # # The parameters are set as in the [this work](https://doi.org/10.1364/NETWORKS.2018.NeW3F.5). env_args = dict(seed=10, allow_rejection=True, load=50, episode_length=100, num_spectrum_resources=16, k_paths=3, num_service_classes=2, classes_arrival_probabilities=[0.5, 0.5], classes_reward=[10., 1.]) # ### Creating the monitors and agent # + # Create log dir log_dir = "./tmp/qosconstrainedra-ppo/" os.makedirs(log_dir, exist_ok=True) callback = SaveOnBestTrainingRewardCallback(check_freq=100, log_dir=log_dir) env = gym.make('QoSConstrainedRA-v0', **env_args) # uses a matrix observation with paths env = MatrixObservationWithPaths(env) # logs will be saved in log_dir/monitor.csv # in our case, on top of the usual monitored metrics, we also monitor service blocking rate env = Monitor(env, log_dir + 'training', info_keywords=('service_blocking_rate_since_reset',)) policy_args = dict(net_arch=5*[128], act_fun=tf.nn.elu) # the neural network has four layers with 150 neurons each agent = PPO2(MlpPolicy, env, verbose=0, tensorboard_log="./tb/PPO-QoSConstrainedRA-v0/", policy_kwargs=policy_args, gamma=.95, learning_rate=10e-5) # - # ### Training the agent agent.learn(total_timesteps=100000, callback=callback) # ### Plotting the training progress results_plotter.plot_results([log_dir], 1e5, results_plotter.X_TIMESTEPS, "QoS Constrained RA PPO")
examples/stable_baselines/QoSConstrainedRA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- # # Typeform Data API Test # ทดสอบการใช้งาน Data API ของ Typeform form_uid = 'HQnDRM' #form_uid = 'iSEGWq' typeform_api_key = '_API_KEY_' url = 'https://api.typeform.com/v1/form/' + form_uid + '?key=' + typeform_api_key import requests response = requests.get(url) results = response.json() results questions = results.get('questions') questions responses = results.get('responses') responses print('Total Responses: ' + str(len(responses))) # เปลี่ยน rating ให้กลายเป็น integer เนื่องจากตอนที่ดึงข้อมูลจาก API ข้อมูลส่วนของ rating จะมาเป็น string answers = [each['answers'] for each in responses] for answer in answers: for each in answer: if 'rating_' in each: answer[each] = int(answer[each]) # แสดงคำตอบของแต่ละคนเพื่อเช็คว่าค่า rating ที่ได้มาเป็น integer แล้ว print(answers) print('Completion rate: ' + str(len(list(filter(lambda x: x, answers))) / len(answers))) # ### ใช้ Pandas เข้ามาช่วยจัดการข้อมูล import pandas as pd df = pd.DataFrame(answers) df df[df['list_53368385_choice'] == 'จตุจักร'] df[df['list_53368385_choice'] == 'คลองสาน'] df_questions = pd.DataFrame(questions) df_questions # ### กลุ่มคำถามทั้งหมด question_groups = list(filter(lambda x: type(x) == str, df_questions.group.unique())) question_groups # ### คะแนน "โดยรวมทุกสาขา" จากคำตอบของแต่ละกลุ่มคำถาม for each_question_group in question_groups: print('--- ' + df_questions[df_questions['id'] == each_question_group]['question'].iloc[0] + ' ---') questions_in_group = df_questions[df_questions['group'] == each_question_group][['id', 'question']] for idx, question in zip(questions_in_group.id, questions_in_group.question): print(question + ': ' + str(df[idx].mean())) print() # ### สาขาทั้งหมด branch_column_name = 'list_53368385_choice' branches = list(filter(lambda x: type(x) == str, df[branch_column_name].unique())) branches df[df[branch_column_name] == 'จตุจักร'] df_questions questions_in_no_group = df_questions[ df_questions['group'].isnull() & df_questions['id'].str.contains('rating_') ][['id', 'question']] questions_in_no_group df_questions[df_questions['group'] == 'group_53368461'] df_questions[df_questions['group'] == 'group_53368461'].shape[0] # ### คะแนน "แยกแต่ละสาขา" จากคำตอบของแต่ละกลุ่มคำถาม # + charts = {} for each_branch in branches: branch = [] df_branch = df[df[branch_column_name] == each_branch] for each_question_group in question_groups: chart = {'data': []} chart['id'] = each_question_group chart['label'] = df_questions[df_questions['id'] == each_question_group]['question'].iloc[0] + '...' questions_in_group = df_questions[df_questions['group'] == each_question_group][['id', 'question']] for idx, question in zip(questions_in_group.id, questions_in_group.question): chart['data'].append( [ question, df_branch[idx].mean() ] ) branch.append(chart) chart = {'data': []} chart['id'] = 'group_others' chart['label'] = 'Others' questions_in_no_group = df_questions[df_questions['group'].isnull() & df_questions['id'].str.contains('rating_')][['id', 'question']] for idx, question in zip(questions_in_no_group.id, questions_in_no_group.question): chart['data'].append( [ question, df_branch[idx].mean() ] ) branch.append(chart) charts[each_branch] = branch charts # - #for each_question_group in question_groups: # count = df_questions[df_questions['group'] == each_question_group].shape[0] for each_branch in branches: #for idx in range(count): print(charts[each_branch][2]['data']) print('------') for idx, _ in enumerate(charts): for each_branch in branches: print(charts[each_branch][idx]['id']) print('------') scores = [] for each_branch in branches: score_each_branch = [] for each in charts[each_branch][0]['data']: score_each_branch.append(each[1]) scores.append(score_each_branch) scores list(zip(*scores)) list(map(list, zip(*scores))) # + import numpy as np np_scores = np.array(scores) np_scores.T # - # * ส่งข้อมูลออกไปเป็น group ของ questions จะ render ได้ง่ายกว่า # * ส่ง list ของ branches ออกไปแยก a = [['xx', 8.0, 9.0], ['yy', 5.0, 7.0]] a b = list(zip(*a)) b # ถ้าจัดข้อมูลแบบตัวแปร b ได้ เราสามารถ transpose และส่งออกไป render ได้ง่าย list(zip(*b)) # หลังจากา loop แต่ละ question group แล้ว ให้ loop แต่ละ สาขา (branch) เพื่อสร้าง list ของคำถามของสาขานั้นๆ มาก่อน x = [ ('คำตอบคำถาม1-สาขา1', 'คำตอบคำถาม2-สาขา1', 'คำตอบคำถาม3-สาขา1'), ('คำตอบคำถาม1-สาขา2', 'คำตอบคำถาม2-สาขา2', 'คำตอบคำถาม3-สาขา2'), ('คำตอบคำถาม1-สาขา3', 'คำตอบคำถาม2-สาขา3', 'คำตอบคำถาม3-สาขา3'), ('คำตอบคำถาม1-สาขา4', 'คำตอบคำถาม2-สาขา4', 'คำตอบคำถาม3-สาขา4'), ] x = [('คำถาม 1', 'คำถาม 2', 'คำถาม 3'), ] + x list(map(list, zip(*x))) df_answers = df df_questions df_answers question_groups branches df_questions[df_questions['group'] == 'group_53368461'][['id', 'question']] df_answers[df_answers[branch_column_name] == 'บางนา'] # + charts = [] # for each question group # for questions in each question group filtered_questions = df_questions[df_questions['group'] == 'group_53368461'][['id', 'question']] for question_id, question in zip(filtered_questions.id, filtered_questions.question): for idx, each in enumerate(branches): if idx == 0: chart = [question] df_branch = df_answers[df_answers[branch_column_name] == each] chart.append(df_branch[question_id].mean()) charts.append(chart) charts # - filtered_questions df_branch # ### [IGNORE] ทดลองโค้ดบางอย่าง prefixes = ['list_', 'rating_', 'textfield_'] for each in questions: if any([each['id'].startswith(prefix) for prefix in prefixes]): try: print(df[each['id']]) except KeyError: print('No one answers..') form_uid = 'iSEGWq' typeform_api_key = '__API_KEY__' url = 'https://api.typeform.com/v1/forms?key=' + typeform_api_key response = requests.get(url) results = response.json() results
docs/typeform-data-api-test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import tensorflow as tf import numpy as np import pandas as pd import matplotlib import matplotlib.pyplot as plot train_data = pd.read_csv('train.csv') train_data = train_data.drop('label', axis=1) train_data = train_data.as_matrix() / 255 IMAGE_SIZE = 28 BATCH_SIZE = 512 NUM_ITERATIONS = 30000 HIDDEN_LAYERS_GEN = 128 LEARNING_RATE = 0.0001 # + # Loading Batches epochs_completed = 0 index_in_epoch = 0 num_examples = train_data.shape[0] def next_batch(batch_size): global train_data global index_in_epoch global epochs_completed start = index_in_epoch index_in_epoch += batch_size if index_in_epoch > num_examples: # finished epoch epochs_completed += 1 # shuffle the data perm = np.arange(num_examples) np.random.shuffle(perm) train_data = train_data[perm] # start next epoch start = 0 index_in_epoch = batch_size assert batch_size <= num_examples end = index_in_epoch return train_data[start:end] # + def get_sample_z(size=[1, 100]): return np.random.normal(size=size) def display_image(image_data): img = image_data.reshape([IMAGE_SIZE, IMAGE_SIZE]) plot.axis('off') plot.imshow(img, cmap=matplotlib.cm.binary) plot.show() # + # Model Z_in = tf.placeholder(tf.float32, shape=[None, 100]) image_in = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE * IMAGE_SIZE]) def generator(z): with tf.variable_scope('generator'): h = tf.layers.dense(z, HIDDEN_LAYERS_GEN) h = tf.minimum(h, 0.01) logits = tf.layers.dense(h, IMAGE_SIZE * IMAGE_SIZE) output = (tf.nn.tanh(logits) + tf.ones_like(logits)) / 2 return output def discriminator(image, reuse=False): with tf.variable_scope('discriminator', reuse=reuse): h = tf.layers.dense(image, HIDDEN_LAYERS_GEN) h = tf.minimum(h, 0.01) logits = tf.layers.dense(h, 1) output = tf.nn.sigmoid(logits) return output # + gen_sample = generator(Z_in) discriminator_data = discriminator(image_in) discriminator_model = discriminator(gen_sample, reuse=True) # + # losses discriminator_loss = -tf.reduce_mean(tf.log(discriminator_data) + tf.log(tf.ones_like(discriminator_model) - discriminator_model)) generator_loss = -tf.reduce_mean(tf.log(discriminator_model)) # + all_vars = tf.trainable_variables() generator_vars = [var for var in all_vars if var.name.startswith('generator')] discriminator_vars = [var for var in all_vars if var.name.startswith('discriminator')] discriminator_optimize = tf.train.AdamOptimizer(LEARNING_RATE).minimize(discriminator_loss, var_list=discriminator_vars) generator_optimize = tf.train.AdamOptimizer(LEARNING_RATE).minimize(generator_loss, var_list=generator_vars) # + init = tf.global_variables_initializer() sess = tf.InteractiveSession() sess.run(init) # - for i in range(NUM_ITERATIONS): image_batch = next_batch(BATCH_SIZE) _ = sess.run(discriminator_optimize, feed_dict={Z_in: get_sample_z([BATCH_SIZE, 100]), image_in: image_batch}) _ = sess.run(generator_optimize, feed_dict={Z_in: get_sample_z([BATCH_SIZE, 100])}) disc_losses = sess.run(discriminator_loss, feed_dict={Z_in: get_sample_z([BATCH_SIZE, 100]), image_in: image_batch}) gen_losses = sess.run(generator_loss, feed_dict={Z_in: get_sample_z([BATCH_SIZE, 100])}) if i % 1000 == 0: sample = sess.run(gen_sample, feed_dict={Z_in: get_sample_z()}) display_image(sample) print('Step {} => Discriminator: {} | Generator: {}'.format(i, disc_losses, gen_losses))
GAN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152 os.environ["CUDA_VISIBLE_DEVICES"] = "0" import glob from PIL import Image import numpy as np import matplotlib.pyplot as plt # %matplotlib inline import pickle from tqdm import tqdm import pandas as pd from keras.preprocessing import sequence from keras.models import Sequential from keras.layers import LSTM, Embedding, TimeDistributed, Dense, RepeatVector, Merge, Activation, Flatten from keras.layers.wrappers import Bidirectional from keras.applications.inception_v3 import InceptionV3 from keras.preprocessing import image import nltk import matplotlib.pyplot as plt # %matplotlib inline from collections import Counter, OrderedDict import json # - # ## 1. Load Flickr8k dataset caption_file = 'Flickr8k_text/Flickr8k.token.txt' img2captions = {} for row in open(caption_file): row = row.strip() row = row.split('\t') img = row[0][:len(row[0])-2] cap = row[1].lower() if img not in img2captions: img2captions[img] = [] img2captions[img].append(cap) img2captions['1000268201_693b08cb0e.jpg'] # + images_dir = 'Flickr8k_Dataset/' train_images_file = 'Flickr8k_text/Flickr_8k.trainImages.txt' train_imgs = [line.strip() for line in open(train_images_file)] print(len(train_imgs), train_imgs[:3]) val_images_file = 'Flickr8k_text/Flickr_8k.devImages.txt' val_imgs = [line.strip() for line in open(val_images_file)] print(len(val_imgs), val_imgs[:3]) test_images_file = 'Flickr8k_text/Flickr_8k.testImages.txt' test_imgs = [line.strip() for line in open(test_images_file)] print(len(test_imgs), test_imgs[:3]) # - img = train_imgs[0] plt.imshow(Image.open(images_dir + '/' + img)) print('\n'.join(img2captions[img])) # ## 2. Build vocabulary # + # example for understanding Counter counter = Counter() counter.update(["aaa", "bbb", "aaa"]) counter.update(["aaa", "ccc"]) counter.update(["ccc"]) print(len(counter)) print(counter) counts = [x for x in counter.items()] print(counts) counts.sort(key=lambda x: x[1], reverse=True) print(counts) json.dump(counts, open('counts.json', "w"), indent=2) print(counts) words = [w for w, c in counts if c >= 1] print(words) # + from collections import Counter, OrderedDict import json word_counter = Counter() n_sample = 0 maxlen = 0 for img, captions in img2captions.items(): for caption in captions: n_sample += 1 caption = caption.lower() caption = str(caption) tokens = caption.split() maxlen = max([maxlen,len(tokens)]) word_counter.update(tokens) print('number of sample = ' + str(n_sample)) print('max len = ' + str(maxlen)) word_counts = [x for x in word_counter.items()] word_counts.sort(key=lambda x: x[1], reverse=True) json.dump(word_counts, open('word_counts.json', "w"), indent=2) vocab = [w for w, c in word_counts if c >= 1] start_word = '<start>' end_word = '<end>' vocab = [start_word, end_word] + vocab print('vocabulary size = %d (<start> and <end> included)'%len(vocab)) word2idx = OrderedDict(zip(vocab,range(len(vocab)))) idx2word = OrderedDict(zip(range(len(vocab)), vocab)) json.dump(word2idx, open('word2idx.json', 'w'), indent=2) # - caption = 'I am a student .' caption = caption.lower() tokens = caption.split() print(caption) print(tokens) # ## 3. Extract features for images # We will feed these images to VGG-16 to get the encoded images. Hence we need to preprocess the images as the authors of VGG-16 did. The last layer of VGG-16 is the softmax classifier(FC layer with 1000 hidden neurons) which returns the probability of a class. This layer should be removed so as to get a feature representation of an image. We will use the last Dense layer(4096 hidden neurons) after popping the classifier layer. Hence the shape of the encoded image will be (1, 4096) def preprocess_input(x): x /= 255. x -= 0.5 x *= 2. return x def preprocess(image_path): img = image.load_img(image_path, target_size=(299, 299)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) return x plt.imshow((1 + np.squeeze(preprocess(images_dir + '/' + train_imgs[0])))/2.0) model = InceptionV3(weights='inception_v3_weights_tf_dim_ordering_tf_kernels.h5') model.summary() # + from keras.models import Model new_input = model.input hidden_layer = model.layers[-2].output model_new = Model(new_input, hidden_layer) # - tryi = model_new.predict(preprocess(images_dir + '/' + train_imgs[0])) tryi[:10] def encode(image): image = preprocess(image) temp_enc = model_new.predict(image) temp_enc = np.reshape(temp_enc, temp_enc.shape[1]) return temp_enc encoding_train = {} for img in tqdm(train_imgs): encoding_train[img] = encode(images_dir + '/' + img) with open("encoded_images_train_inceptionV3.p", "wb") as encoded_pickle: pickle.dump(encoding_train, encoded_pickle) encoding_test = {} for img in tqdm(test_imgs): encoding_test[img] = encode(images_dir + '/' + img) with open("encoded_images_test_inceptionV3.p", "wb") as encoded_pickle: pickle.dump(encoding_test, encoded_pickle) encoding_train = pickle.load(open('encoded_images_train_inceptionV3.p', 'rb')) encoding_test = pickle.load(open('encoded_images_test_inceptionV3.p', 'rb')) # ## 4. Preprocess the captions # Adding '< start >' and '< end >' to all the captions to indicate the starting and ending of a sentence. # + f = open('flickr8k_train_dataset.txt', 'w') f.write("image_id\tcaptions\n") for img in train_imgs: for cap in img2captions[img]: f.write(img + "\t" + "<start> " + cap +" <end>" + "\n") f.close() # - f = open('flickr8k_val_dataset.txt', 'w') f.write("image_id\tcaptions\n") for img in val_imgs: for cap in img2captions[img]: f.write(img + "\t" + "<start> " + cap +" <end>" + "\n") f.close() f = open('flickr8k_test_dataset.txt', 'w') f.write("image_id\tcaptions\n") for img in test_imgs: for cap in img2captions[img]: f.write(img + "\t" + "<start> " + cap +" <end>" + "\n") f.close() df = pd.read_csv('flickr8k_train_dataset.txt', delimiter='\t') len(df) c = [i for i in df['captions']] len(c) imgs = [i for i in df['image_id']] a = c[-1] a, imgs[-1] for i in a.split(): print (i, "=>", word2idx[i]) samples_per_epoch = 0 for cap in df['captions']: samples_per_epoch += len(cap.split())-1 print(samples_per_epoch) # ## 5. Data generator # + max_len = 40 vocab_size = len(word2idx) def data_generator(batch_size = 128, split='train'): partial_caps = [] next_words = [] images = [] csv_file = 'flickr8k_%s_dataset.txt'%split encoding_img_feat = pickle.load(open('encoded_images_%s_inceptionV3.p'%split, 'rb')) df = pd.read_csv(csv_file, delimiter='\t') df = df.sample(frac=1) iter = df.iterrows() c = [] imgs = [] for i in range(df.shape[0]): x = next(iter) c.append(x[1][1]) imgs.append(x[1][0]) count = 0 while True: for j, text in enumerate(c): current_image = encoding_img_feat[imgs[j]] for i in range(len(text.split())-1): count+=1 partial = [word2idx[txt] for txt in text.split()[:i+1]] partial_caps.append(partial) # Initializing with zeros to create a one-hot encoding matrix # This is what we have to predict # Hence initializing it with vocab_size length n = np.zeros(vocab_size) # Setting the next word to 1 in the one-hot encoded matrix n[word2idx[text.split()[i+1]]] = 1 next_words.append(n) images.append(current_image) if count>=batch_size: next_words = np.asarray(next_words) images = np.asarray(images) partial_caps = sequence.pad_sequences(partial_caps, maxlen=max_len, padding='post') yield [[images, partial_caps], next_words] partial_caps = [] next_words = [] images = [] count = 0 train_set = data_generator(split='train') val_set = data_generator(split='val') test_set = data_generator(split='test') # - # image, little girl running in field # # X1, X2 (text sequence), y (word) # image < start >, little # # image < start >, little, girl # # image < start >, little, girl, running # # image < start >, little, girl, running, in # # image < start >, little, girl, running, in, field # # image < start >, little, girl, running, in, field, < end > # x = next(train_set) print(x[0][0].shape) print(x[0][1][:3]) print(np.argmax(x[1][:10], axis=1)) print(x[0][0][:10]) # ## 6. Build the model # + embedding_size = 300 image_model = Sequential([ Dense(embedding_size, input_shape=(2048,), activation='relu'), RepeatVector(1) ]) word_embedding_model = Sequential([ Embedding(vocab_size, embedding_size, input_length=max_len), TimeDistributed(Dense(embedding_size, activation='relu')) ]) final_model = Sequential([ Merge([image_model, word_embedding_model], mode='concat', concat_axis=1), LSTM(256, return_sequences=False), Dense(vocab_size), Activation('softmax') ]) from keras.optimizers import Adam, RMSprop final_model.summary() final_model.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy']) # - batch_size = 256 steps_per_epoch = samples_per_epoch // batch_size final_model.fit_generator(train_set, steps_per_epoch=steps_per_epoch, epochs=30, verbose=1) import keras keras.__version__ final_model.save_weights('saved_model.h5') # ## Test the captioning model final_model.load_weights('saved_model.h5') def predict_captions(image): start_word = ["<start>"] e = encode(image) while True: print(start_word) par_caps = [word2idx[i] for i in start_word] par_caps = sequence.pad_sequences([par_caps], maxlen=max_len, padding='post') preds = final_model.predict([np.array([e]), np.array(par_caps)]) word_pred = idx2word[np.argmax(preds[0])] start_word.append(word_pred) if word_pred == "<end>" or len(start_word) > max_len: break print(start_word) return ' '.join(start_word[1:-1]) try_image = images_dir + '/' + test_imgs[0] plt.imshow(Image.open(try_image)) plt.show() print (predict_captions(try_image))
ImageCaptioning_Flickr8K/ImageCaptioning_Flickr8K.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Fibonnaci Sequence # # In this interview excercise we will begin to get a feel of having to solve a single problem multiple ways! # # ## Problem Statement # # Implement a [Fibonnaci Sequence](https://en.wikipedia.org/wiki/Fibonacci_number) in three different ways: # # * Recursively # * Dynamically (Using Memoization to store results) # * Iteratively # ___ # #### Function Output # Your function will accept a number **n** and return the **nth** number of the fibonacci sequence # ___ # Remember that a fibonacci sequence: 0,1,1,2,3,5,8,13,21,... starts off with a base case checking to see if n = 0 or 1, then it returns 1. # # Else it returns fib(n-1)+fib(n+2). # # ____ # # ## Fill Out Your Solutions Below # ### Recursively # # Solve the problem using simple recursion. def fib_rec(n): if n <=2: return int((n+1)/2) else: return fib_rec(n-1) + fib_rec(n-2) pass fib_rec(10) # ### Dynamically # # Implement the function using dynamic programming by using a cache to store results (memoization). # + # Instantiate Cache information n = 10 cache = [None] * (n + 1) print(cache) def fib_dyn(n): if cache[n] is not None: return cache[n] if n <=2: cache[n] = int((n+1)/2) else: cache[n] = fib_dyn(n-1) + fib_dyn(n-2) return cache[n] pass # - fib_dyn(10) # ### Iteratively # # Implement the solution with simple iteration. def fib_iter(n): # Set starting point a = 0 b = 1 # Follow algorithm for i in range(n): a, b = b, a + b return a fib_iter(23) # # Test Your Solution # # Run the cell below to test your solutions, simply uncomment the solution functions you wish to test! # + """ UNCOMMENT THE CODE AT THE BOTTOM OF THIS CELL TO SELECT WHICH SOLUTIONS TO TEST. THEN RUN THE CELL. """ from nose.tools import assert_equal class TestFib(object): def test(self,solution): assert_equal(solution(10),55) assert_equal(solution(1),1) assert_equal(solution(23),28657) print ('Passed all tests.') # UNCOMMENT FOR CORRESPONDING FUNCTION t = TestFib() t.test(fib_rec) #t.test(fib_dyn) # Note, will need to reset cache size for each test! #t.test(fib_iter) # - # # Conclusion # # Hopefully this interview question served as a good excercise in exploring recursion, dynamic programming, and iterative solutions for a single problem! Its good to work through all three because in an interview a common question may just begin with requesting a recursive solution and then checking to se if you can implement the other forms!
code/algorithms/course_udemy_1/Recursion/Recursion Interview Problems/Recursion Problems - PRACTICE/Recursion Problem 3 - Fibonacci Sequence.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Einführung in Machine Learning - Schnelldurchgang import warnings warnings.filterwarnings('ignore') # %matplotlib inline # %pylab inline import matplotlib.pylab as plt import numpy as np from distutils.version import StrictVersion # + import sklearn print(sklearn.__version__) assert StrictVersion(sklearn.__version__ ) >= StrictVersion('0.18.1') # - # # Zuerst laden wir den Iris Datensatz und verschaffen uns einen ersten Eindruck # https://de.wikipedia.org/wiki/Portal:Statistik/Datensaetze#Iris from sklearn.datasets import load_iris iris = load_iris() print(iris.DESCR) X = iris.data y = iris.target X.shape, y.shape X[0] y[0] X_sepal_length = X[:, 0] X_sepal_width = X[:, 1] X_petal_length = X[:, 2] X_petal_width = X[:, 3] X_petal_width.shape # ## Aufteilung der Daten in Training (60%) und Test (40%) # http://scikit-learn.org/stable/modules/cross_validation.html from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=42, stratify=y) X_train.shape, y_train.shape, X_test.shape, y_test.shape # ## Wir trainieren einen einfachen KNN Klassifikator und überprüfen die Ergebnisse # http://scikit-learn.org/stable/modules/neighbors.html#classification from sklearn import neighbors clf = neighbors.KNeighborsClassifier(1) clf.fit(X_train, y_train) sample_id = 32 sample_feature = X_test[sample_id] sample_label = y_test[sample_id] sample_feature sample_label clf.predict([sample_feature]) clf.predict([[6.3, 2.7, 5.5, 1.5]]) # slightly different from above, still gives 2 clf.score(X_train, y_train) clf.score(X_test, y_test) # ## Um zu versehen, was durch das Training passiert ist, zeichnen wir die Decision Boundaries ein # + # ignore this, it is just technical code # should come from a lib, consider it to appear magically # http://scikit-learn.org/stable/auto_examples/neighbors/plot_classification.html import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF']) cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF']) font_size=25 def meshGrid(x_data, y_data): h = .02 # step size in the mesh x_min, x_max = x_data.min() - 1, x_data.max() + 1 y_min, y_max = y_data.min() - 1, y_data.max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) return (xx,yy) def plotPrediction(clf, x_data, y_data, x_label, y_label, colors, title="", mesh=True): xx,yy = meshGrid(x_data, y_data) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.figure(figsize=(20,10)) if mesh: plt.pcolormesh(xx, yy, Z, cmap=cmap_light) plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.scatter(x_data, y_data, c=colors, cmap=cmap_bold, s=80, marker='o') plt.xlabel(x_label, fontsize=font_size) plt.ylabel(y_label, fontsize=font_size) plt.title(title, fontsize=font_size) # - # ### Zuerst für die Sepal Features X_train_sepal_only = X_train[:, :2] X_test_sepal_only = X_test[:, :2] clf_sepal = neighbors.KNeighborsClassifier(1) clf_sepal.fit(X_train_sepal_only, y_train) plotPrediction(clf_sepal, X_train_sepal_only[:, 0], X_train_sepal_only[:, 1], 'Sepal length', 'Sepal width', y_train, mesh=False, title="Train Data for Sepal Features") # plt.savefig('ML_0201.png', bbox_inches='tight') # ### Scores sind gut für die Trainingsdaten, aber nicht so toll für Testdaten clf_sepal.score(X_train_sepal_only, y_train) clf_sepal.score(X_test_sepal_only, y_test) # ### Das sieht nach Overfittung aus, das siehst du auch beim Plotting der Deciscion Boundaries plotPrediction(clf_sepal, X_train_sepal_only[:, 0], X_train_sepal_only[:, 1], 'Sepal length', 'Sepal width', y_train, title="Highly Fragmented Decision Boundaries for Train Data") # plt.savefig('ML_0202.png', bbox_inches='tight') plotPrediction(clf_sepal, X_test_sepal_only[:, 0], X_test_sepal_only[:, 1], 'Sepal length', 'Sepal width', y_test, title="Same Decision Boundaries don't work well for Test Data") # plt.savefig('ML_0203.png', bbox_inches='tight') # ## Wir machen das Modell weniger komplex, allgemeiner clf_sepal_10 = neighbors.KNeighborsClassifier(10) clf_sepal_10.fit(X_train_sepal_only, y_train) clf_sepal_10.score(X_train_sepal_only, y_train) clf_sepal_10.score(X_test_sepal_only, y_test) plotPrediction(clf_sepal_10, X_train_sepal_only[:, 0], X_train_sepal_only[:, 1], 'Sepal length', 'Sepal width', y_train, title="Model too simple even for Train Data") # plt.savefig('ML_0204.png', bbox_inches='tight') # ## Mit den Sepal Features werden wir immer entweder overfitten oder underfitten # ## Wir versuchen es noch einmal mit den Petal Features X_train_petal_only = X_train[:, 2:] X_test_petal_only = X_test[:, 2:] clf_petal_10 = neighbors.KNeighborsClassifier(10) clf_petal_10.fit(X_train_petal_only, y_train) clf_petal_10.score(X_train_petal_only, y_train) clf_petal_10.score(X_test_petal_only, y_test) plotPrediction(clf_petal_10, X_train_petal_only[:, 0], X_train_petal_only[:, 1], 'Petal length', 'Petal width', y_train, title="Simple model looks good for Train Data") # plt.savefig('ML_0205.png', bbox_inches='tight') plotPrediction(clf_petal_10, X_test_petal_only[:, 0], X_test_petal_only[:, 1], 'Petal length', 'Petal width', y_test, title="Simple model looks good even for Test Data") # plt.savefig('ML_0206.png', bbox_inches='tight') # Ein deutlich besseres Ergebnis, obwohl wir wieder nur 2 Features genommen haben. Es kann also entscheident sein, welche Features man nimmt.
data/mlg/02-1-QuickStart.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 10장. 특성 선택을 사용한 차원 축소 # 이 노트북을 주피터 노트북 뷰어(nbviewer.jupyter.org)로 보거나 구글 코랩(colab.research.google.com)에서 실행할 수 있습니다. # # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://nbviewer.jupyter.org/github/rickiepark/machine-learning-with-python-cookbook/blob/master/10.ipynb"><img src="https://jupyter.org/assets/main-logo.svg" width="28" />주피터 노트북 뷰어로 보기</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/rickiepark/machine-learning-with-python-cookbook/blob/master/10.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Colab)에서 실행하기</a> # </td> # </table> # ## 10.1 분산을 기준으로 수치 특성 선택하기 # + # 라이브러리를 임포트합니다. from sklearn import datasets from sklearn.feature_selection import VarianceThreshold # 예제 데이터를 로드합니다. iris = datasets.load_iris() # 특성과 타깃을 만듭니다. features = iris.data target = iris.target # 기준값을 만듭니다. thresholder = VarianceThreshold(threshold=.5) # 기준값보다 높은 특성을 선택합니다. features_high_variance = thresholder.fit_transform(features) # 선택한 특성을 확인합니다. features_high_variance[0:3] # - # 분산을 확인합니다. thresholder.variances_ # + # 라이브러리를 임포트합니다. from sklearn.preprocessing import StandardScaler # 특성 행렬을 표준화합니다. scaler = StandardScaler() features_std = scaler.fit_transform(features) # 각 특성의 분산을 계산합니다. selector = VarianceThreshold() selector.fit(features_std).variances_ # - # ## 10.2 분산을 기준으로 이진 특성 선택하기 # + # 라이브러리를 임포트합니다. from sklearn.feature_selection import VarianceThreshold # 예제 특성 행렬을 만듭니다. # 특성 0: 80%가 클래스 0 # 특성 1: 80%가 클래스 1 # 특성 2: 60%가 클래스 0, 40%는 클래스 1 features = [[0, 1, 0], [0, 1, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0]] # 분산을 기준으로 선택합니다. thresholder = VarianceThreshold(threshold=(.75 * (1 - .75))) thresholder.fit_transform(features) # - # ### 붙임 thresholder.variances_ import numpy as np np.var(features, axis=0) # ## 10.3 상관관계가 큰 특성 다루기 # + # 라이브러리를 임포트합니다. import pandas as pd import numpy as np # 상관관계가 큰 두 개의 특성을 가진 특성 행렬을 만듭니다. features = np.array([[1, 1, 1], [2, 2, 0], [3, 3, 1], [4, 4, 0], [5, 5, 1], [6, 6, 0], [7, 7, 1], [8, 7, 0], [9, 7, 1]]) # 특성 행렬을 DataFrame으로 변환합니다. dataframe = pd.DataFrame(features) # 상관관계 행렬을 만듭니다. corr_matrix = dataframe.corr().abs() # 상관관계 행렬의 상삼각(upper triangle) 행렬을 선택합니다. upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool)) # 상관 계수가 0.95보다 큰 특성 열의 인덱스를 찾습니다. to_drop = [column for column in upper.columns if any(upper[column] > 0.95)] # 특성을 삭제합니다. dataframe.drop(dataframe.columns[to_drop], axis=1).head(3) # - dataframe.corr() upper # ### 붙임 np.corrcoef(features, rowvar=False) np.triu(np.ones((4, 4)), k=2) np.tril(np.ones((4, 4)), k=0) # ## 10.4 분류 작업에 관련 없는 특성 삭제하기 # + # 라이브러리를 임포트합니다. from sklearn.datasets import load_iris from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import chi2, f_classif # 데이터를 로드합니다. iris = load_iris() features = iris.data target = iris.target # 범주형 데이터를 정수형으로 변환합니다. features = features.astype(int) # 카이제곱 통계값이 가장 큰 특성 두 개를 선택합니다. chi2_selector = SelectKBest(chi2, k=2) features_kbest = chi2_selector.fit_transform(features, target) # 결과를 확인합니다. print("원본 특성 개수:", features.shape[1]) print("줄어든 특성 개수:", features_kbest.shape[1]) # + # F-값이 가장 높은 특성 두 개를 선택합니다. fvalue_selector = SelectKBest(f_classif, k=2) features_kbest = fvalue_selector.fit_transform(features, target) # 결과를 확인합니다. print("원본 특성 개수:", features.shape[1]) print("줄어든 특성 개수:", features_kbest.shape[1]) # + # 라이브러리를 임포트합니다. from sklearn.feature_selection import SelectPercentile # 가장 큰 F-값의 상위 75% 특성을 선택합니다. fvalue_selector = SelectPercentile(f_classif, percentile=75) features_kbest = fvalue_selector.fit_transform(features, target) # 결과를 선택합니다. print("원본 특성 개수:", features.shape[1]) print("줄어든 특성 개수:", features_kbest.shape[1]) # - # ### 붙임 # 카이제곱 계산 target observed = np.sum(features.reshape(3, 50, 4), axis=1) observed expected = features.sum(axis=0) / 3 expected np.sum((observed - expected)**2 / expected, axis=0) chi2_selector.scores_ # ANOVA 계산 total_mean = np.mean(features, axis=0) total_mean class_mean = np.mean(features.reshape(3, 50, 4), axis=1) class_mean ss_between = np.sum(50 * (class_mean - total_mean)**2, axis=0) ss_between ss_total = np.sum((features - total_mean)**2, axis=0) ss_total f = (ss_between/(3-1)) / ((ss_total-ss_between)/(150-3)) f fvalue_selector.scores_ # ## 10.5 재귀적 특성 제거 # + # 라이브러리를 임포트합니다. from sklearn.datasets import make_regression from sklearn.feature_selection import RFECV from sklearn import datasets, linear_model # 특성 행렬과 타깃 벡터를 생성합니다. features, target = make_regression(n_samples = 10000, n_features = 100, n_informative = 2, random_state = 1) # 선형 회귀 모델을 만듭니다. ols = linear_model.LinearRegression() # 재귀적으로 특성을 제거합니다. rfecv = RFECV(estimator=ols, step=1, scoring="neg_mean_squared_error") rfecv.fit(features, target) rfecv.transform(features) # - # 최선의 특성 개수 rfecv.n_features_ # 선택된 특성이 표시된 불리언 마스크 rfecv.support_ # 특성의 순위: 최고(1)에서 최악(96)까지 rfecv.ranking_ # ### 붙임 # + from sklearn.feature_selection import RFE rfe = RFE(estimator=ols, n_features_to_select=3) rfe.fit(features, target) rfe.transform(features) # - np.all(rfe.support_ == rfecv.support_)
10.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Singular Value Decomposition (SVD) Tutorial # SVD is a form of matrix decomposition commonly used for dimension reduction, denoising, and several applications. Another similar matrix decomposition method is the eigen decomposition. The [key differences](https://math.stackexchange.com/questions/320220/intuitively-what-is-the-difference-between-eigendecomposition-and-singular-valu), however, are the following: # # Consider the eigendecomposition $A=PDP^{-1}$ and $SVD=U\Sigma V^*$: # 1. The vectors in the eigen decomposition matrix $P$ are not necessarily orthogonal, so the change of basis isn't a simple rotation. On the other hand, the vectors in the matrices $U$ and $V$ in the SVD are orthonormal (a set of vectors, both orthogonal and normalized.), so they do represent rotations and possibly flips. # 2. In the SVD, the nondiagonal matrices $U$ and $V$ are not necessarily the inverse of one another. They are usually not related to each other at all. In the eigen decomposition, the nondiagonal matrices $P$ and $P^{-1}$ are the inverses of each other. # 3. In the SVD the entries in the diagonal matrix $\Sigma$ are all real and nonnegative. In the eigen decomposition, the entries of $D$ can be any complex number - negative, positive, imaginary. # 4. The SVD always exists for any sort of rectangular or square matrix, whereas the eigen decomposition can only exists for square matrices, and even among square matrices it may not not exist. # # **Parts of the tutorial** # 1. Singular-Value Decomposition # 2. Calculate Singular-Value Decomposition # 3. Reconstruct Matrix from SVD # 4. SVD for Pseudoinverse # 5. SVD for Dimensionality Reduction # ## SVD # - The diagonal values in the Sigma matrix are known as teh singular values of the original matrix A. # - The columns of the U matrix are called the left-singular vectors of A, and the columns of V are called the right-singular vectors of A. # - SVD is calculated via iterative numerical methods. # ## Calculate SVD import numpy as np from scipy.linalg import svd A = np.array([[1,2],[3,4],[5,6]]) U, s, VT = svd(A) A U s VT # ## Reconstruct matrix from SVD Sigma = np.zeros((A.shape[0], A.shape[1])) Sigma.shape # populate sigma with nxn diagonal matrix Sigma[:s.shape[0], : s.shape[0]] = np.diag(s) # reconstruct matrix B = U.dot(Sigma.dot(VT)) print(B) Sigma # ## SVD for Pseudoinverse # It is the generalization of the matrix inverxse for square matrices to rectangular matrices. The pseudoinverse is denoted as $A^+ = VD^+U^T$. SVD can provide $U$ and $V$. The pseudoinverse of the diagonal matrix, $D^+$, can be calculated by creating a diagonal matrix from Sigma, calculating the reciprocal of each non-zero element in Sigma, and taking the transpose if the original matrix was rectangular. A = np.array([ [0.1, 0.2], [0.3, 0.4], [0.5, 0.6], [0.7, 0.8] ]) print(A) print(np.linalg.pinv(A)) # manually calculating the pinverse U, s, VT = svd(A) d = 1.0/s D = np.zeros(A.shape) D[:d.shape[0], :d.shape[0]] = np.diag(d) B = VT.T.dot(D.T).dot(U.T) B # ## SVD for dimensionality reduction # For a given matrix $A$, an approximate matrix $B$ could be calculated as: $B=U\Sigma^k V^{Tk}$. This is called latent semantic analysis or indexing in natural language processing. The objective is to retain and work with a descriptive subset of the data $T$. This is a dense summary of the matrix or a projection $T = U\Sigma^k$. This transformation is also applied to the original matrix $A$: $T = V^{Tk}A$. A = np.array([ [1,2,3,4,5,6,7,8,9,10], [11,12,13,14,15,16,17,18,19,20], [21,22,23,24,25,26,27,28,29,30]]) print(A, ', Shape =',A.shape) U, s, VT = svd(A) Sigma = np.zeros(A.shape) print(Sigma) Sigma[:s.shape[0], :s.shape[0]] = np.diag(s) n_elements = 2 Sigma = Sigma[:, :n_elements] print(Sigma) VT = VT[:n_elements, :] print(VT) # **Reconstruct** B = U.dot(Sigma.dot(VT)) print(B) # **Transform: approach 1** T = U.dot(Sigma) print(T) # **Transformation: approach 2** T = A.dot(VT.T) print(T) # ## Using scikit-learn svd for reduction from sklearn.decomposition import TruncatedSVD svd = TruncatedSVD(n_components=2) svd.fit(A) result = svd.transform(A) print(result) U.T.dot(U) U.dot(U.T)
svd.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Monetary Economics: Chapter 4 # ### Preliminaries # + # This line configures matplotlib to show figures embedded in the notebook, # instead of opening a new window for each figure. More about that later. # If you are using an old version of IPython, try using '%pylab inline' instead. # %matplotlib inline import matplotlib.pyplot as plt from pysolve.model import Model from pysolve.utils import is_close,round_solution # - # ### Model PC def create_pc_model(): model = Model() model.set_var_default(0) model.var('Bcb', desc='Government bills held by the Central Bank') model.var('Bh', desc='Government bills held by households') model.var('Bs', desc='Government bills supplied by the government') model.var('C', desc='Consumption goods') model.var('Hh', desc='Cash held by households') model.var('Hs', desc='Cash supplied by the central bank') model.var('R', desc='Interest rate on government bills') model.var('T', desc='Taxes') model.var('V', desc='Household wealth') model.var('Y', desc='Income = GDP') model.var('YD', desc='Disposable income of households') model.param('alpha1', desc='Propensity to consume out of income', default=0.6) model.param('alpha2', desc='Propensity to consume out of wealth', default=0.4) model.param('lambda0', desc='Parameter in asset demand function', default=0.635) model.param('lambda1', desc='Parameter in asset demand function', default=5.0) model.param('lambda2', desc='Parameter in asset demand function', default=0.01) model.param('theta', desc='Tax rate', default=0.2) model.param('G', desc='Government goods', default=20.) model.param('Rbar', desc='Interest rate as policy instrument') model.add('Y = C + G') # 4.1 model.add('YD = Y - T + R(-1)*Bh(-1)') # 4.2 model.add('T = theta*(Y + R(-1)*Bh(-1))') #4.3, theta < 1 model.add('V = V(-1) + (YD - C)') # 4.4 model.add('C = alpha1*YD + alpha2*V(-1)') # 4.5, 0<alpha2<alpha1<1 model.add('Hh = V - Bh') # 4.6 model.add('Bh = V*lambda0 + V*lambda1*R - lambda2*YD') # 4.7 model.add('Bs - Bs(-1) = (G + R(-1)*Bs(-1)) - (T + R(-1)*Bcb(-1))') # 4.8 model.add('Hs - Hs(-1) = Bcb - Bcb(-1)') # 4.9 model.add('Bcb = Bs - Bh') # 4.10 model.add('R = Rbar') # 4.11 return model steady = create_pc_model() steady.set_values({'alpha1': 0.6, 'alpha2': 0.4, 'lambda0': 0.635, 'lambda1': 5.0, 'lambda2': 0.01, 'G': 20, 'Rbar': 0.025}) for _ in xrange(100): steady.solve(iterations=100, threshold=1e-5) if is_close(steady.solutions[-2], steady.solutions[-1], atol=1e-4): break # ### Model PCEX def create_pcex_model(): model = Model() model.set_var_default(0) model.var('Bcb', desc='Government bills held by the Central Bank') model.var('Bd', desc='Demand for government bills') model.var('Bh', desc='Government bills held by households') model.var('Bs', desc='Government bills supplied by the government') model.var('C', desc='Consumption goods') model.var('Hd', desc='Demand for cash') model.var('Hh', desc='Cash held by households') model.var('Hs', desc='Cash supplied by the central bank') model.var('R', desc='Interest rate on government bills') model.var('T', desc='Taxes') model.var('V', desc='Household wealth') model.var('Ve', desc='Expected household wealth') model.var('Y', desc='Income = GDP') model.var('YD', desc='Disposable income of households') model.var('YDe', desc='Expected disposable income of households') model.set_param_default(0) model.param('alpha1', desc='Propensity to consume out of income', default=0.6) model.param('alpha2', desc='Propensity to consume o of wealth', default=0.4) model.param('lambda0', desc='Parameter in asset demand function', default=0.635) model.param('lambda1', desc='Parameter in asset demand function', default=5.0) model.param('lambda2', desc='Parameter in asset demand function', default=0.01) model.param('theta', desc='Tax rate', default=0.2) model.param('G', desc='Government goods', default=20.) model.param('Ra', desc='Random shock to expectations', default=0.0) model.param('Rbar', desc='Interest rate as policy instrument', default=0.025) model.add('Y = C + G') # 4.1 model.add('YD = Y - T + R(-1)*Bh(-1)') # 4.2 model.add('T = theta*(Y + R(-1)*Bh(-1))') #4.3, theta < 1 model.add('V = V(-1) + (YD - C)') # 4.4 model.add('C = alpha1*YDe + alpha2*V(-1)') # 4.5E model.add('Bd = Ve*lambda0 + Ve*lambda1*R - lambda2*YDe') # 4.7E model.add('Hd = Ve - Bd') # 4.13 model.add('Ve = V(-1) + (YDe - C)') # 4.14 model.add('Hh = V - Bh') # 4.6 model.add('Bh = Bd') # 4.15 model.add('Bs - Bs(-1) = (G + R(-1)*Bs(-1)) - (T + R(-1)*Bcb(-1))') # 4.8 model.add('Hs - Hs(-1) = Bcb - Bcb(-1)') # 4.9 model.add('Bcb = Bs - Bh') # 4.10 model.add('R = Rbar') # 4.11 model.add('YDe = YD * (1 + Ra)') # 4.16 return model # ### Steady state and shocks pcex_steady = create_pcex_model() pcex_steady.set_values([('alpha1', 0.6), ('alpha2', 0.4), ('lambda0', 0.635), ('lambda1', 5.0), ('lambda2', 0.01), ('theta', 0.2), ('G', 20), ('Rbar', 0.025), ('Ra', 0), ('Bcb', 116.36), ('Bh', 363.59), ('Bs', 'Bh + Bcb'), ('Hh', 116.35), ('Hs', 'Hh'), ('V', 'Bh + Hh'), ('R', 'Rbar')]) for _ in xrange(100): pcex_steady.solve(iterations=100, threshold=1e-5) if is_close(pcex_steady.solutions[-2], pcex_steady.solutions[-1], atol=1e-4): break # + import random random.seed(6) shocks = create_pcex_model() shocks.set_values(pcex_steady.solutions[-1], ignore_errors=True) for _ in xrange(50): shocks.parameters['Ra'].value = random.gauss(0,1) / 10. shocks.solve(iterations=100, threshold=1e-3) # - # #### Figure 4.1 # + caption = ''' Figure 4.1 Money demand and held money balances, when the economy is subjected to random shocks.''' hddata = [s['Hd'] for s in shocks.solutions[25:]] hhdata = [s['Hh'] for s in shocks.solutions[25:]] fig = plt.figure() axes = fig.add_axes([0.1, 0.1, 1.1, 1.1]) axes.tick_params(top='off', right='off') axes.spines['top'].set_visible(False) axes.spines['right'].set_visible(False) axes.set_ylim(min(hddata+hhdata)-2, max(hddata+hhdata)+2) axes.plot(hhdata, 'b') axes.plot(hddata, linestyle='--', color='r') # add labels plt.text(13, 35, 'Held money balances') plt.text(13, 34, '(continuous line)') plt.text(16, 12, 'Money demand') plt.text(16, 11, '(dotted line)') fig.text(0.1, -.05, caption); # - # ###### Figure 4.2 # + caption = ''' Figure 4.2 Changes in money demand and in money balances held (first differences), when the economy is subjected to random shocks. ''' hddata = [s['Hd'] for s in shocks.solutions[24:]] hhdata = [s['Hh'] for s in shocks.solutions[24:]] for i in xrange(len(hddata)-1, 0, -1): hddata[i] -= hddata[i-1] hhdata[i] -= hhdata[i-1] hddata = hddata[1:] hhdata = hhdata[1:] fig = plt.figure() axes = fig.add_axes([0.1, 0.1, 1.1, 1.1]) axes.tick_params(top='off', right='off') axes.spines['top'].set_visible(False) axes.spines['right'].set_visible(False) axes.set_ylim(min(hddata+hhdata)-2, max(hddata+hhdata)+2) axes.plot(hhdata, 'b') axes.plot(hddata, linestyle='--', color='r') # add labels plt.text(13, 20, 'Held money balances') plt.text(13, 18, '(continuous line)') plt.text(15, -18, 'Money demand') plt.text(15, -20, '(dotted line)') fig.text(0.1, -.05, caption); # - # ### Scenario: Model PC, Steady state with increase in interest rate # + rate_shock = create_pc_model() rate_shock.set_values({'Bcb': 21.576, 'Bh': 64.865, 'Bs': 86.441, 'Hh': 21.62, 'Hs': 21.62, 'V': 86.485, 'alpha1': 0.6, 'alpha2': 0.4, 'lambda0': 0.635, 'lambda1': 5.0, 'lambda2': 0.01, 'G': 20, 'Rbar': 0.025}) # solve until stable for i in xrange(50): rate_shock.solve(iterations=100, threshold=1e-5) if is_close(rate_shock.solutions[-2], rate_shock.solutions[-1], atol=1e-4): break rate_shock.parameters['Rbar'].value = 0.035 for i in xrange(40): rate_shock.solve(iterations=100, threshold=1e-5) # - # ###### Figure 4.3 # + caption = ''' Figure 4.3 Evolution of the shares of bills and money balances in the portfolio of households, following an increase of 100 points in the rate of interest on bills.''' hhdata = [s['Hh']/s['V'] for s in rate_shock.solutions[15:]] bhdata = [s['Bh']/s['V'] for s in rate_shock.solutions[15:]] fig = plt.figure() axes = fig.add_axes([0.1, 0.1, 1.1, 1.1]) axes.tick_params(top='off') axes.spines['top'].set_visible(False) axes.set_ylim(0.19, 0.26) axes.plot(hhdata, 'b') axes2 = axes.twinx() axes2.tick_params(top='off') axes2.spines['top'].set_visible(False) axes2.set_ylim(0.74, 0.81) axes2.plot(bhdata, linestyle='--', color='r') plt.text(1, 0.81, 'Share of') plt.text(1, 0.807, 'money balances') plt.text(45, 0.81, 'Share of') plt.text(45, 0.807, 'bills') plt.text(15, 0.795, 'Share of bills in') plt.text(15, 0.792, 'household portfolios') plt.text(15, 0.755, 'Share of money balances') plt.text(15, 0.752, 'in household portfolios') fig.text(0.1, -.05, caption); # - # ###### Figure 4.4 # + caption = ''' Figure 4.4 Evolution of disposable income and household consumption following an increase of 100 points in the rate of interest on bills. ''' yddata = [s['YD'] for s in rate_shock.solutions[20:]] cdata = [s['C'] for s in rate_shock.solutions[20:]] fig = plt.figure() axes = fig.add_axes([0.1, 0.1, 1.1, 1.1]) axes.tick_params(top='off', right='off') axes.spines['top'].set_visible(False) axes.spines['right'].set_visible(False) axes.set_ylim(86, 91) axes.plot(yddata, 'b') axes.plot(cdata, linestyle='--', color='r') # add labels plt.text(10, 90.2, 'Disposable') plt.text(10, 90.0, 'Income') plt.text(10, 88, 'Consumption') fig.text(0.1, -0.05, caption); # - # ### Model PCEX1 def create_pcex1_model(): model = Model() model.set_var_default(0) model.var('Bcb', desc='Government bills held by the Central Bank') model.var('Bd', desc='Demand for government bills') model.var('Bh', desc='Government bills held by households') model.var('Bs', desc='Government bills supplied by the government') model.var('C', desc='Consumption goods') model.var('Hd', desc='Demand for cash') model.var('Hh', desc='Cash held by households') model.var('Hs', desc='Cash supplied by the central bank') model.var('R', 'Interest rate on government bills') model.var('T', desc='Taxes') model.var('V', desc='Household wealth') model.var('Ve', desc='Expected household wealth') model.var('Y', desc='Income = GDP') model.var('YD', desc='Disposable income of households') model.var('YDe', desc='Expected disposable income of households') model.set_param_default(0) model.param('alpha1', desc='Propensity to consume out of income', default=0.6) model.param('alpha2', desc='Propensity to consume o of wealth', default=0.4) model.param('lambda0', desc='Parameter in asset demand function', default=0.635) model.param('lambda1', desc='Parameter in asset demand function', default=5.0) model.param('lambda2', desc='Parameter in asset demand function', default=0.01) model.param('theta', desc='Tax rate', default=0.2) model.param('G', desc='Government goods', default=20.) model.param('Rbar', desc='Interest rate as policy instrument', default=0.025) model.add('Y = C + G') # 4.1 model.add('YD = Y - T + R(-1)*Bh(-1)') # 4.2 model.add('T = theta*(Y + R(-1)*Bh(-1))') #4.3, theta < 1 model.add('V = V(-1) + (YD - C)') # 4.4 model.add('C = alpha1*YDe + alpha2*V(-1)') # 4.5E model.add('Bd = Ve*lambda0 + Ve*lambda1*R - lambda2*YDe') # 4.7E model.add('Hd = Ve - Bd') # 4.13 model.add('Ve = V(-1) + (YDe - C)') # 4.14 model.add('Hh = V - Bh') # 4.6 model.add('Bh = Bd') # 4.15 model.add('Bs - Bs(-1) = (G + R(-1)*Bs(-1)) - (T + R(-1)*Bcb(-1))') # 4.8 model.add('Hs - Hs(-1) = Bcb - Bcb(-1)') # 4.9 model.add('Bcb = Bs - Bh') # 4.10 model.add('R = Rbar') # 4.11 model.add('YDe = YD(-1)') # 4.16A return model # + pcex1 = create_pcex1_model() pcex1.set_values({'Bcb': 21.576, 'Bh': 64.865, 'Bs': 86.441, 'Hh': 21.62, 'Hs': 21.62, 'V': 86.485, 'YD': 90, 'alpha1': 0.6, 'alpha2': 0.4, 'lambda0': 0.635, 'lambda1': 5.0, 'lambda2': 0.01, 'G': 20, 'Rbar': 0.025}) for i in xrange(10): pcex1.solve(iterations=100, threshold=1e-5) pcex1.parameters['alpha1'].value = 0.7 for i in xrange(40): pcex1.solve(iterations=100, threshold=1e-5) # - # ###### Figure 4.5 # + caption = ''' Figure 4.5 Rise and fall of national income (GDP) following an increase in the propensity to consume out of expected disposable income ($\\alpha_1$) ''' ydata = [s['Y'] for s in pcex1.solutions[8:]] fig = plt.figure() axes = fig.add_axes([0.1, 0.1, 1.1, 1.1]) axes.tick_params(top='off', right='off') axes.spines['top'].set_visible(False) axes.spines['right'].set_visible(False) axes.set_ylim(104, 123) axes.plot(ydata, 'b') # add labels plt.text(10, 116, 'National Income (GDP)') fig.text(0.1, -0.05, caption); # - # ###### Figure 4.6 # + caption = ''' Figure 4.6 Evolution of consumtion, expected disposable income and lagged wealth, following an increase in the propensity to consume out of expected disposable income ($\\alpha_1$).''' vdata = [s['V'] for s in pcex1.solutions[8:]] ydedata = [s['YDe'] for s in pcex1.solutions[8:]] cdata = [s['C'] for s in pcex1.solutions[8:]] fig = plt.figure() axes = fig.add_axes([0.1, 0.1, 1.1, 1.1]) axes.tick_params(top='off', right='off') axes.spines['top'].set_visible(False) axes.spines['right'].set_visible(False) axes.set_ylim(60, 106) axes.plot(cdata, linestyle=':', color='r') axes.plot(ydedata, linestyle='--', color='b') axes.plot(vdata, color='k') # add labels plt.text(5, 102, 'Consumption') plt.text(5, 90, 'Expected') plt.text(5, 88, 'disposable') plt.text(5, 86, 'income') plt.text(10, 70, 'Lagged wealth') fig.text(0.1, -.1, caption); # - # ### Model PCEX2 def create_pcex2_model(): model = Model() model.set_var_default(0) model.var('Bcb', desc='Government bills held by the Central Bank') model.var('Bd', desc='Demand for government bills') model.var('Bh', desc='Government bills held by households') model.var('Bs', desc='Government bills supplied by the government') model.var('C', desc='Consumption goods') model.var('Hd', desc='Demand for cash') model.var('Hh', desc='Cash held by households') model.var('Hs', desc='Cash supplied by the central bank') model.var('R', 'Interest rate on government bills') model.var('T', desc='Taxes') model.var('V', desc='Household wealth') model.var('Ve', desc='Expected household wealth') model.var('Y', desc='Income = GDP') model.var('YD', desc='Disposable income of households') model.var('YDe', desc='Expected disposable income of households') model.var('alpha1', desc='Propensity to consume out of income') model.set_param_default(0) model.param('alpha2', desc='Propensity to consume out of wealth', default=0.6) model.param('alpha10', desc='Propensity to consume out of income - exogenous') model.param('iota', desc='Impact of interest rate on the propensity to consume out of income') model.param('lambda0', desc='Parameter in asset demand function', default=0.635) model.param('lambda1', desc='Parameter in asset demand function', default=5.0) model.param('lambda2', desc='Parameter in asset demand function', default=0.01) model.param('theta', desc='Tax rate', default=0.2) model.param('G', desc='Government goods') model.param('Rbar', desc='Interest rate as policy instrument') model.add('Y = C + G') # 4.1 model.add('YD = Y - T + R(-1)*Bh(-1)') # 4.2 model.add('T = theta*(Y + R(-1)*Bh(-1))') #4.3, theta < 1 model.add('V = V(-1) + (YD - C)') # 4.4 model.add('C = alpha1*YDe + alpha2*V(-1)') # 4.5E model.add('Bd = Ve*lambda0 + Ve*lambda1*R - lambda2*YDe') # 4.7E model.add('Hd = Ve - Bd') # 4.13 model.add('Ve = V(-1) + (YDe - C)') # 4.14 model.add('Hh = V - Bh') # 4.6 model.add('Bh = Bd') # 4.15 model.add('Bs - Bs(-1) = (G + R(-1)*Bs(-1)) - (T + R(-1)*Bcb(-1))') # 4.8 model.add('Hs - Hs(-1) = Bcb - Bcb(-1)') # 4.9 model.add('Bcb = Bs - Bh') # 4.10 model.add('R = Rbar') # 4.11 model.add('YDe = YD(-1)') # 4.16A model.add('alpha1 = alpha10 - iota*R(-1)') return model # + pcex2 = create_pcex2_model() pcex2.set_values({'Bcb': 21.576, 'Bh': 64.865, 'Bs': 86.441, # Bs = Bh + Bcb 'Hh': 21.62, 'Hs': 21.62, # Hs = Hh 'R': 0.025, 'V': 86.485, # V = Bh + Hh 'YD': 90, 'alpha1': 0.6, 'alpha2': 0.4, 'alpha10': 0.7, 'iota': 4, 'lambda0': 0.635, 'lambda1': 5, 'lambda2': 0.01, 'theta': 0.2, 'G': 20, 'Rbar': 0.025}) for i in xrange(15): pcex2.solve(iterations=100, threshold=1e-5) # Introduce the rate shock pcex2.parameters['Rbar'].value += 0.01 for i in xrange(40): pcex2.solve(iterations=100, threshold=1e-5) # - # ###### Figure 4.9 # + caption = ''' Figure 4.9 Evolution of GDP, disposable income, consumptiona and wealth, following an increase of 100 points in the rate of interest on bills, in Model PCEX2 where the propensity to consume reacts negatively to higher interest rates''' vdata = [s['V'] for s in pcex2.solutions[12:]] ydata = [s['Y'] for s in pcex2.solutions[12:]] yddata = [s['YD'] for s in pcex2.solutions[12:]] cdata = [s['C'] for s in pcex2.solutions[12:]] fig = plt.figure() axes = fig.add_axes([0.1, 0.1, 1.1, 1.1]) axes.tick_params(top='off', right='off') axes.spines['top'].set_visible(False) axes.spines['right'].set_visible(False) axes.set_ylim(80, 116) axes.plot(ydata, linestyle=':', color='b') axes.plot(vdata, linestyle='-', color='r') axes.plot(yddata, linestyle='-.', color='k') axes.plot(cdata, linestyle='--', color='g') # add labels plt.text(15, 112, 'National income (GDP)') plt.text(15, 101, 'Household wealth') plt.text(8, 89, 'Disposable') plt.text(8, 87.5, 'income') plt.text(12, 84, 'Consumption') fig.text(0.1, -0.1, caption); # - # ###### Figure 4.10 # + caption = ''' Figure 4.10 Evolution of tax revenues and government expenditures including net debt servicing, following an increase of 100 points in the rate of interest on bills, in Model PCEX2 where the propensity to consume reacts negatively to higher interest rates''' tdata = list() sumdata = list() for i in xrange(12, len(pcex2.solutions)): s = pcex2.solutions[i] s_1 = pcex2.solutions[i-1] sumdata.append( s['G'] + s_1['R']*s_1['Bh']) tdata.append(s['T']) fig = plt.figure() axes = fig.add_axes([0.1, 0.1, 1.1, 1.1]) axes.tick_params(top='off', right='off') axes.spines['top'].set_visible(False) axes.spines['right'].set_visible(False) axes.set_ylim(20.5, 23) axes.plot(sumdata, linestyle='-', color='r') axes.plot(tdata, linestyle='--', color='k') # add labels plt.text(6, 22.9, 'Government expenditures plus net debt service') plt.text(15, 22, 'Tax revenues') fig.text(0.1, -0.15, caption); # -
monetary-economics/Chapter 4 Model PC.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ![Callysto.ca Banner](https://github.com/callysto/curriculum-notebooks/blob/master/callysto-notebook-banner-top.jpg?raw=true) # # <a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=Mathematics/CountingTriangles/counting-triangles.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a> # + language="html" # # <script> # function code_toggle() { # if (code_shown){ # $('div.input').hide('500'); # $('#toggleButton').val('Show Code') # } else { # $('div.input').show('500'); # $('#toggleButton').val('Hide Code') # } # code_shown = !code_shown # } # # $( document ).ready(function(){ # code_shown=false; # $('div.input').hide() # }); # </script> # <form action="javascript:code_toggle()"><input type="submit" id="toggleButton" value="Show Code"></form> # - # !pip install --upgrade --force-reinstall --user git+git://github.com/callysto/nbplus.git#egg=geogebra\&subdirectory=geogebra # + import numpy as np import matplotlib.mlab as mlab import matplotlib.pyplot as plt from ipywidgets import interact, widgets, Button, Layout from scipy import stats from collections import Counter from array import array from statistics import mode import IPython from IPython.display import Image import pandas from ggb import * ggb = GGB() # - # # Math Puzzle: Counting triangles # In this notebook we explore two methods for counting the total number of triangles in a pentagon. # <br> Organization of the notebook: # * Section 1 discusses some preliminaries for the problem. # * Section 2 presents an animation of counting triangles in a pentagon. # * Section 3 presents an alternate method for counting triangles in a pentagon. # * Section 4 concludes the notebook with some exercises. # ## 1. Preliminaries # Let's consider a pentagon $ABCDE.$ If we connect $B$ and $E$ then we can divide the pentagon into a triangle, $ABE$, and a rectangle, $BCDE$.<br> # So, our preliminary discussion will be to count how many triangles we get as we divide the triangle and rectangle. # ![](images/pentaExample.png) # # ### 1.1. Triangles in a triangle # Let's consider a triangle $ABC$. First, we draw a line $AD$ from $A$ to $BC$ as shown in the following figure. <br> # We can see that there are two triangles: $ABD$ and $ACD.$ <br> # Actually in this picture there are three different triangles, since we still count the original triangle $ABC.$ # ![](images/exm1.png) # # Now let's add two points, $D$ and $E$, along the bottom, draw lines to them from $A$, and see what we get. # # ![](images/exm2.png) # # Let us find the all triangles for this scenario. # * Step 1: consider all smaller triangles. <br> # From the figure we can see that the large triangle $ABC$ consists of three small triangles: $ABD, ADE, ACE$. # * Step 2: try to merge two small triangles to make a larger triangle. <br> # For example, if we merge triangle $ABD$ with $ADE$ then we find $ABE$. Similarly, we find $ACD$. # * Step 3: join three small triangles to generate larger one. <br> # Merging all three small triangles we find triangle $ABC.$ # # In total, there are $3+2+1=6$ triangles. # # Now if we add one more line $AF$ from $A$ to line $BC$, how many triangles do we get?.<br> # Draw the triangle $ABC$ and count the triangles. <br> # 1. There are now 4 small triangles. # 2. There are three pairs of adjacent triangles we can combine to make larger triangles. # 3. There are two ways to combine three triangles: the first three, or the last three. # 4. As always, there is the original big triangle. # # Did you find the answer? We have $4+3+2+1=10$ triangles. # # Can you guess how many triangles we'll get if we add one more line from the top to the bottom?<br> # # * With 0 lines, there was $1$ triangle. # * With 1 line, there were $1+2=3$ triangles. # * With 2 lines, there were $1+2+3=6$ triangles. # * With 3 lines, there were $1+2+3+4=10$ triangles. # # Did you guess 15 triangles for the next step? If so, well done!<br> # The numbers that count how many triangles we have at each step are called the [***triangular numbers***](https://en.wikipedia.org/wiki/Triangular_number). # # The name comes from something a child might observe while stacking blocks:<br> # It's the number of blocks you need to create a stack of blocks in the shape of a triangle: # # ![](images/triangular_numbers.png) # In the above figure, **L** denotes the number of "lines" in a triangle. # # If you're comfortable with formulas, here's a cool fact:<br> # The number of blocks needed to make a triangular stack with $n$ levels is $\dfrac{n(n+1)}{2}$ ; where $n = L + 1$ # # There's a fun (but maybe not entirely true) story associated with this formula:<br> # As a child, the mathematician [<NAME>](https://en.wikipedia.org/wiki/Carl_Friedrich_Gauss) was annoying his teacher one day.<br> # To keep Gauss busy, the teacher asked him to add up the numbers from 1 to 100.<br> # In a matter of minutes, Gauss discovered the formula, determined the answer (which is $\dfrac{100\times 101}{2}=5050$), and went back to annoying his teacher. # # For better understanding consider the following animation. Change the slider value of $n$ and observe how we count the triangles. # ggb.file('sources/triangleAnimation.ggb').draw() # ### 1.2. Triangles in a rectangle # # To determine the number of triangles in a rectangle, at first label every small triangle. The total number of triangles will be the double of highest labelling number. For example, consider a rectangle ABCD, where every vertex is connected with each other. If we start to label the smaller triangles from $1$ we end up with four triangles. So, the total number of triangles is $4 \times 2 = 8$. Now, play with the following animation. # ggb.file('sources/quadAnimation.ggb').draw() # ## 2. Triangles in a pentagon # # Now that we've seen how to count how many triangles we get when we draw lines in a larger triangle, and how to count triangles in a rectangle, we put the two together, and determine the number of triangles in a pentagon, as the next animation demonstrates. ggb.file('sources/pentaAnimation.ggb').draw() # ## 3. Alternate Method for counting triangles # There is a second method to count the triangles in a pentagon, when all vertices are connected.<br> # This is the angular, or symmetry method. It relies on the following fact: # # > Rotating a regular pentagon by $72^\circ$ (one fifth of a full rotation) produces the same pentagon.<br> # > (In other words, all that changes is the labelling of the corners.) # # Here, we will count all similar triangles at a time. <br> # There are seven distinct groups of triangles in a pentagon when all vertices are connected with each other.<br> # The symmetry noted above tells us there are 5 triangles in each group. <br> # So the total number of the triangles in the Pentagon is 7x5=35. # # Let's play the following animation to find the seven groups. The sliders $n$ and $i$ represent the number of groups and the number of triangles corresponding to a group, respectively. ggb.file('sources/pentaMethod2.ggb').draw() # ## 4. Test yourself def display(question, answerList): print(question) IPython.display.display(answerList) # Consider the following triangle ABC: # ![](images/ex1.png) # + answer642 = widgets.RadioButtons(options=['Select the best one', '20', '10', '22', '12', 'None of the above'], value = 'Select the best one', description='Choices:') question642 = "4.1 How many triangles are in the above triangle?" def check642(g): IPython.display.clear_output(wait=False) display(question642, answer642) if answer642.value == '20': print("Correct Answer!") else: if answer642.value == 'Select the best one': pass else: print("Wrong answer! Try again.") IPython.display.clear_output(wait=False) display(question642, answer642) answer642.observe(check642, 'value') # - # In the following figure two small rectangles (ADEF and BCEF) join each other and produce another large rectangle ABCD: # ![](images/ex3.png) # + answer642 = widgets.RadioButtons(options=['Select the best one', '20', '22', '24', '26', 'None of the above'], value = 'Select the best one', description='Choices:') question642 = "4.2 How many triangles are in the large rectangle?" def check642(g): IPython.display.clear_output(wait=False) display(question642, answer642) if answer642.value == '26': print("Correct Answer!") else: if answer642.value == 'Select the best one': pass else: print("Wrong answer! Try again.") IPython.display.clear_output(wait=False) display(question642, answer642) answer642.observe(check642, 'value') # - # Consider a pentagon as drawn in the following figure: # ![](images/ex4.png) # + answer642 = widgets.RadioButtons(options=['Select the best one', '20', '17', '15', '13', 'None of the above'], value = 'Select the best one', description='Choices:') question642 = "4.3 How many triangles are in the above pentagon?" def check642(g): IPython.display.clear_output(wait=False) display(question642, answer642) if answer642.value == '17': print("Correct Answer!") else: if answer642.value == 'Select the best one': pass else: print("Wrong answer! Try again.") IPython.display.clear_output(wait=False) display(question642, answer642) answer642.observe(check642, 'value') # - # A pentagon ABCDE and a rectangle touch as like as the following figure: # ![](images/ex5.png) # + answer642 = widgets.RadioButtons(options=['Select the best one', '20', '18', '16', '14', 'None of the above'], value = 'Select the best one', description='Choices:') question642 = "4.4 How many triangles are in the above figure?" def check642(g): IPython.display.clear_output(wait=False) display(question642, answer642) if answer642.value == '18': print("Correct Answer!") else: if answer642.value == 'Select the best one': pass else: print("Wrong answer! Try again.") IPython.display.clear_output(wait=False) display(question642, answer642) answer642.observe(check642, 'value') # - # Again consider a pentagon: # ![](images/ex6.png) # + answer642 = widgets.RadioButtons(options=['Select the best one', '20', '24', '28', '29', 'None of the above'], value = 'Select the best one', description='Choices:') question642 = "4.5 How many triangles are in the above pentagon?" def check642(g): IPython.display.clear_output(wait=False) display(question642, answer642) if answer642.value == '28': print("Correct Answer!") else: if answer642.value == 'Select the best one': pass else: print("Wrong answer! Try again.") IPython.display.clear_output(wait=False) display(question642, answer642) answer642.observe(check642, 'value') # - # [![Callysto.ca License](https://github.com/callysto/curriculum-notebooks/blob/master/callysto-notebook-banner-bottom.jpg?raw=true)](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md)
Mathematics/CountingTriangles/counting-triangles.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from agent.TradingAgent import TradingAgent import pandas as pd import numpy as np import os from contributed_traders.util import get_file class SimpleAgent(TradingAgent): """ Simple Trading Agent that compares the past mid-price observations and places a buy limit order if the first window mid-price exponential average >= the second window mid-price exponential average or a sell limit order if the first window mid-price exponential average < the second window mid-price exponential average """ def __init__(self, id, name, type, symbol, starting_cash, min_size, max_size, wake_up_freq='60s', log_orders=False, random_state=None): super().__init__(id, name, type, starting_cash=starting_cash, log_orders=log_orders, random_state=random_state) self.symbol = symbol self.min_size = min_size # Minimum order size self.max_size = max_size # Maximum order size self.size = self.random_state.randint(self.min_size, self.max_size) self.wake_up_freq = wake_up_freq self.mid_list, self.avg_win1_list, self.avg_win2_list = [], [], [] self.log_orders = log_orders self.state = "AWAITING_WAKEUP" #self.window1 = 100 #self.window2 = 5 def kernelStarting(self, startTime): super().kernelStarting(startTime) # Read in the configuration through util with open(get_file('simple_agent.cfg'), 'r') as f: self.window1, self.window2 = [int(w) for w in f.readline().split()] #print(f"{self.window1} {self.window2}") def wakeup(self, currentTime): """ Agent wakeup is determined by self.wake_up_freq """ can_trade = super().wakeup(currentTime) if not can_trade: return self.getCurrentSpread(self.symbol) self.state = 'AWAITING_SPREAD' def dump_shares(self): # get rid of any outstanding shares we have if self.symbol in self.holdings and len(self.orders) == 0: order_size = self.holdings[self.symbol] bid, _, ask, _ = self.getKnownBidAsk(self.symbol) if bid: self.placeLimitOrder(self.symbol, quantity=order_size, is_buy_order=False, limit_price=0) def receiveMessage(self, currentTime, msg): """ Momentum agent actions are determined after obtaining the best bid and ask in the LOB """ super().receiveMessage(currentTime, msg) if self.state == 'AWAITING_SPREAD' and msg.body['msg'] == 'QUERY_SPREAD': dt = (self.mkt_close - currentTime) / np.timedelta64(1, 'm') if dt < 25: self.dump_shares() else: bid, _, ask, _ = self.getKnownBidAsk(self.symbol) if bid and ask: self.mid_list.append((bid + ask) / 2) if len(self.mid_list) > self.window1: self.avg_win1_list.append(pd.Series(self.mid_list).ewm(span=self.window1).mean().values[-1].round(2)) if len(self.mid_list) > self.window2: self.avg_win2_list.append(pd.Series(self.mid_list).ewm(span=self.window2).mean().values[-1].round(2)) if len(self.avg_win1_list) > 0 and len(self.avg_win2_list) > 0 and len(self.orders) == 0: if self.avg_win1_list[-1] >= self.avg_win2_list[-1]: # Check that we have enough cash to place the order if self.holdings['CASH'] >= (self.size * ask): self.placeLimitOrder(self.symbol, quantity=self.size, is_buy_order=True, limit_price=ask) else: if self.symbol in self.holdings and self.holdings[self.symbol] > 0: order_size = min(self.size, self.holdings[self.symbol]) self.placeLimitOrder(self.symbol, quantity=order_size, is_buy_order=False, limit_price=bid) self.setWakeup(currentTime + self.getWakeFrequency()) self.state = 'AWAITING_WAKEUP' def getWakeFrequency(self): return pd.Timedelta(self.wake_up_freq) # + from agent.TradingAgent import TradingAgent import pandas as pd import numpy as np import os import pandas as pd from contributed_traders.util import get_file class mliu420_blazeit(TradingAgent): """ <NAME>'s Market Making Algo """ def __init__(self, id, name, type, symbol, starting_cash, min_size, max_size , wake_up_freq='10s', log_orders=False, random_state=None): super().__init__(id, name, type, starting_cash=starting_cash, log_orders=log_orders, random_state=random_state) self.symbol = symbol # Symbol traded self.min_size = min_size # Minimum order size self.max_size = max_size # Maximum order size self.size = round(self.random_state.randint(self.min_size, self.max_size) / 2) # order size per LOB side self.wake_up_freq = wake_up_freq # Frequency of agent wake up self.log_orders = log_orders self.state = "AWAITING_WAKEUP" # Percentage of the order size to be placed at different levels is determined by levels_quote_dict ###################### self.orders_executed = 0 self.can_cancel_request = False self.paOrders = 0 #parameters self.pricingVolume = 100 self.depthLevels = 10 def kernelStarting(self, startTime): super().kernelStarting(startTime) def wakeup(self, currentTime): """ Agent wakeup is determined by self.wake_up_freq """ can_trade = super().wakeup(currentTime) if not can_trade: return #check if current time greater than wait time if self.cancelCheck(currentTime): self.cancelOrders() self.getCurrentSpread(self.symbol, depth=self.depthLevels) self.state = 'AWAITING_SPREAD' self.orders_executed = 0 def receiveMessage(self, currentTime, msg): """ Market Maker actions are determined after obtaining the bids and asks in the LOB """ super().receiveMessage(currentTime, msg) try: dt = (self.mkt_close - currentTime).totalSeconds() if dt < 25: self.dump_shares() return 0 except: pass if msg.body['msg'] == 'ORDER_EXECUTED': self.orders_executed += 1 if msg.body['msg'] == 'ORDER_ACCEPTED': self.can_cancel_request = True if self.state == 'AWAITING_SPREAD' and msg.body['msg'] == 'QUERY_SPREAD': self.calculateAndOrder(currentTime) self.setWakeup(currentTime + self.getWakeFrequency()) #do nothing till other leg executed elif self.state == 'AWAITING CONFIRMATION' and msg.body['msg'] == 'ORDER_ACCEPTED': self.paOrders -= 1 if self.paOrders == 0: self.state = 'AWAITING EXECUTION' self.exec_time_order = currentTime elif self.state == 'AWAITING_EXECUTION' and msg.body['msg'] == 'ORDER_EXECUTED': #use a condition to see if holdings close to reduce exposure to JPM #self.fOrderTime = currentTime if len(self.orders) == 0: self.setWakeup(currentTime + self.getWakeFrequency()) self.orders_executed = 0 elif self.cancelCheck: self.cancelOrders() elif msg.body['msg'] == 'ORDER_CANCELLED': if len(self.orders) == 0: self.orders_executed = 0 self.can_cancel_request = False self.setWakeup(currentTime + self.getWakeFrequency()) def cancelOrders(self): """ cancels all resting limit orders placed by the market maker """ for _, order in self.orders.items(): self.cancelOrder(order) self.can_cancel_request = False def cancelCheck(self, currentTime): if self.orders and self.can_cancel_request: if self.orders_executed == 0: return True else: try: if int(currentTime - self.exec_time_order).totalSeconds() >= 5: return True except: self.exec_time_order = currentTime return False def calculateAndOrder(self, currentTime): bid, ask = self.getKnownBidAsk(self.symbol, best=False) if bid and ask: sumBid = 0 sumBidVol = 0 sumAsk = 0 sumAskVol = 0 try: for i in range(self.depthLevels): if sumBidVol < 100: if sumBidVol + bid[i][1] > self.pricingVolume: sumBidVol = self.pricingVolume sumBid += (self.pricingVolume - bid[i][1]) * bid[i][0] else: sumBid += bid[i][1] * bid[i][0] if sumAskVol < 100: if sumAskVol + ask[i][1] > self.pricingVolume: sumAskVol = self.pricingVolume sumAsk += (self.pricingVolume - ask[i][1]) * ask[i][0] else: sumAsk += ask[i][1] * ask[i][0] if sumBid == sumAsk: if sumBid == self.pricingVolume: askP = sumAsk / self.pricingVolume bidP = sumBid / self.pricingVolume self.placeLimitOrder(self.symbol, ) askVol = self.holdings['CASH'] / askP + max(0, self.holdings[self.symbol]) bidVol = self.holdings['CASH'] / bidP + max(0, -self.holdings[self.symbol]) self.placeLimitOrder(self.symbol, bidVol, True, bidP) self.paOrders += 1 self.placeLimitOrder(self.symbol, askVol, False, askP) self.paOrders += 1 self.state = 'AWAITING_CONFIRMATION' #place orders and await execution except: self.setWakeup(currentTime + self.getWakeFrequency()) def dump_shares(self): # get rid of any outstanding shares we have if self.symbol in self.holdings and len(self.orders) == 0: bid, _, ask, _ = self.getKnownBidAsk(self.symbol) order_size = self.holdings[self.symbol] if order_size > 0 if bid: self.placeLimitOrder(self.symbol, quantity=order_size, is_buy_order=False, limit_price=0) elif ask: self.placeLimitOrder(self.symbol, quantity=abs(order_size), is_buy_order=True, limit_price=0) def getWakeFrequency(self): return pd.Timedelta(self.wake_up_freq) def dump_shares(self): # get rid of any outstanding shares we have if self.symbol in self.holdings and len(self.orders) == 0: order_size = self.holdings[self.symbol] bid, _, ask, _ = self.getKnownBidAsk(self.symbol) if bid: self.placeLimitOrder(self.symbol, quantity=order_size, is_buy_order=False, limit_price=0) # + from agent.TradingAgent import TradingAgent import pandas as pd import numpy as np import os import math class mliu420_blazeit(TradingAgent): """ This agent was built on the market maker agent with some caveats. Prices are determined by a variable pricingVolume. Roughly the price of a stock is going to be the average price if to buy/sell pricingVolume amount of stock. This average between the buy and sell for 100 shares is the price. My first iteration of this agent tried to be fancy with calculations of price happening at different times to speed up the order placing process. This caused issues with too many iterations. """ def __init__(self, id, name, type, symbol, starting_cash, min_size, max_size , wake_up_freq='10s', log_orders=False, random_state=None): super().__init__(id, name, type, starting_cash=starting_cash, log_orders=log_orders, random_state=random_state) self.symbol = symbol # Symbol traded self.min_size = min_size # Minimum order size self.max_size = max_size # Maximum order size self.size = round(self.random_state.randint(self.min_size, self.max_size) / 2) # order size per LOB side self.wake_up_freq = wake_up_freq # Frequency of agent wake up self.log_orders = log_orders self.state = "AWAITING_WAKEUP" #parameters self.sc = starting_cash self.pricingVolume = 100 self.depthLevels = 10 self.starting_cash = starting_cash self.pOrders = 0 self.stdSpread = pd.DataFrame([50, 51]) self.close = False self.wait = 0 def kernelStarting(self, startTime): super().kernelStarting(startTime) def wakeup(self, currentTime): print(currentTime) """ Agent wakeup is determined by self.wake_up_freq """ can_trade = super().wakeup(currentTime) if not can_trade: return print('true holdings??') print(self.holdings) print(self.markToMarket(self.holdings)) if self.wait <= 0 and self.pOrders == 0: self.cancelOrders() try: self.stdS = self.stdSpread.std()[0] except: self.stdS = 50 if not(self.close): self.state = 'AWAITING_SPREAD' self.getCurrentSpread(self.symbol, depth=self.depthLevels) else: self.wait -= 1 self.state = 'AWAITING_WAKEUP' self.setWakeup(currentTime + self.getWakeFrequency()) if self.close: self.cancelOrders() self.dump_shares() def receiveMessage(self, currentTime, msg): """ Market Maker actions are determined after obtaining the bids and asks in the LOB """ super().receiveMessage(currentTime, msg) if self.close: if msg.body['msg'] == 'ORDER_EXECUTED': try: if self.holdings[self.symbol] != 0: print('dumping shares') self.cancelOrders() self.dump_shares() self.state = 'AWAITING_WAKEUP' except: pass elif self.state == 'AWAITING_SPREAD' and msg.body['msg'] == 'QUERY_SPREAD': self.calculateAndOrder(currentTime) dt = (self.mkt_close - currentTime) / np.timedelta64(1, 'm') if dt < 5: self.close = True print('DUMP SHARES') self.dump_shares() self.state = 'AWAITING_WAKEUP' #place orders and await execution self.setWakeup(currentTime + self.getWakeFrequency()) elif self.state == 'AWAITING_WAKEUP' and msg.body['msg'] == 'ORDER_EXECUTED': if len(self.orders) > 0 and self.wait == 0: self.wait = 1 else: self.wait = 0 elif msg.body['msg'] == 'ORDER_ACCEPTED': self.pOrders -= 1 #print(msg) def cancelOrders(self): """ cancels all resting limit orders placed by the market maker """ for _, order in self.orders.items(): self.cancelOrder(order) def calculateAndOrder(self, currentTime): bid, ask = self.getKnownBidAsk(self.symbol, best=False) if bid and ask: sumBid = 0 sumBidVol = 0 sumAsk = 0 sumAskVol = 0 try: for i in range(self.depthLevels): if sumBidVol < self.pricingVolume: if sumBidVol + bid[i][1] > self.pricingVolume: sumBid += (self.pricingVolume - sumBidVol) * bid[i][0] sumBidVol = self.pricingVolume else: sumBid += bid[i][1] * bid[i][0] sumBidVol += bid[i][1] if sumAskVol < self.pricingVolume: if sumAskVol + ask[i][1] > self.pricingVolume: sumAsk += (self.pricingVolume - sumAskVol) * ask[i][0] sumAskVol = self.pricingVolume else: sumAsk += ask[i][1] * ask[i][0] sumAskVol += ask[i][1] if sumAskVol == self.pricingVolume and sumBidVol == self.pricingVolume: break if sumBidVol == sumAskVol: if sumBidVol == self.pricingVolume: askM = sumAsk / self.pricingVolume bidM = sumBid / self.pricingVolume midM = (askM + bidM) / 2 print('Spread:',askM,bidM, askM - bidM) bidVol = math.floor(max(0, self.holdings['CASH'] / midM)) try: askVol = math.floor(max(0,2 * max(0,self.holdings[self.symbol])+(self.holdings['CASH'] - 2*abs(min(0,self.holdings[self.symbol]*askM))) / midM )) except: askVol = math.floor(max(0,self.holdings['CASH'] / midM ) ) print('Volumes ask and bid:',askVol,bidVol) midP = midM + self.stdS / 7 * bidVol / (bidVol + askVol) - self.stdS / 14 bidP = math.floor( min(midP - self.stdS/1.5, bidM + 1) ) askP = math.ceil( max(midP + self.stdS/1.5, askM - 1) ) print('Algo Spread:',askP,bidP, askP - bidP) if bidVol > 0: self.placeLimitOrder(self.symbol, bidVol, True, bidP) self.pOrders += 1 if askVol > 0: self.placeLimitOrder(self.symbol, askVol, False, askP) self.pOrders += 1 self.stdSpread = self.stdSpread.append([askM-bidM], ignore_index=True) except Exception as e: print(e) pass def dump_shares(self): # get rid of any outstanding shares we have if self.symbol in self.holdings: bid, _, ask, _ = self.getKnownBidAsk(self.symbol) order_size = self.holdings[self.symbol] print('order size',order_size) print('ask',ask) if order_size > 0: if bid: self.placeLimitOrder(self.symbol, quantity=order_size, is_buy_order=False, limit_price=0) if order_size < 0: if ask: self.placeLimitOrder(self.symbol, quantity=abs(order_size), is_buy_order=True, limit_price=round(2 * ask[0][0])) def getWakeFrequency(self): return pd.Timedelta(self.wake_up_freq) # + from agent.TradingAgent import TradingAgent import pandas as pd import math class mliu420_blazeit(TradingAgent): """ This agent was built on the market maker agent with some caveats. Prices are determined by a variable pricingVolume. Roughly the price of a stock is going to be the average price if to buy/sell pricingVolume amount of stock. This average between the buy and sell for 100 shares is the price. My first iteration of this agent tried to be fancy with calculations of price happening at different times to speed up the order placing process. This caused issues with too many iterations. """ def __init__(self, id, name, type, symbol, starting_cash, min_size, max_size , wake_up_freq='10s', log_orders=False, random_state=None): super().__init__(id, name, type, starting_cash=starting_cash, log_orders=log_orders, random_state=random_state) self.symbol = symbol # Symbol traded self.min_size = min_size # Minimum order size self.max_size = max_size # Maximum order size self.size = round(self.random_state.randint(self.min_size, self.max_size) / 2) # order size per LOB side self.wake_up_freq = wake_up_freq # Frequency of agent wake up self.log_orders = log_orders self.state = "AWAITING_WAKEUP" #parameters self.buy = True self.fake = True self.fakePrice = 0 #### self.pricingVolume = 30 self.depthLevels = 10 self.starting_cash = starting_cash self.pOrders = 0 self.stdSpread = pd.DataFrame([50]) def kernelStarting(self, startTime): super().kernelStarting(startTime) def wakeup(self, currentTime): """ Agent wakeup is determined by self.wake_up_freq """ can_trade = super().wakeup(currentTime) if not can_trade: return if self.pOrders == 0: self.cancelOrders() try: self.stdS = self.stdSpread.std()[0] except: self.stdS = 50 self.getCurrentSpread(self.symbol, depth=self.depthLevels) self.state = 'AWAITING_SPREAD' print('true holdings??') print(self.holdings) print(self.markToMarket(self.holdings)) def receiveMessage(self, currentTime, msg): """ Market Maker actions are determined after obtaining the bids and asks in the LOB """ super().receiveMessage(currentTime, msg) if self.state == 'AWAITING_SPREAD' and msg.body['msg'] == 'QUERY_SPREAD': self.calculateAndOrder(currentTime) if msg.body['msg'] == 'ORDER_ACCEPTED': self.pOrders -= 1 #print(msg) def cancelOrders(self): """ cancels all resting limit orders placed by the market maker """ for _, order in self.orders.items(): self.cancelOrder(order) def calculateAndOrder(self, currentTime): bid, ask = self.getKnownBidAsk(self.symbol, best=False) if bid and ask: sumBid = 0 sumBidVol = 0 sumAsk = 0 sumAskVol = 0 try: for i in range(self.depthLevels): if sumBidVol < 100: if sumBidVol + bid[i][1] > self.pricingVolume: sumBid += (self.pricingVolume - sumBidVol) * bid[i][0] sumBidVol = self.pricingVolume else: sumBid += bid[i][1] * bid[i][0] sumBidVol += bid[i][1] if sumAskVol < 100: if sumAskVol + ask[i][1] > self.pricingVolume: sumAsk += (self.pricingVolume - sumAskVol) * ask[i][0] sumAskVol = self.pricingVolume else: sumAsk += ask[i][1] * ask[i][0] sumAskVol += ask[i][1] if sumBidVol == sumAskVol: if sumBidVol == self.pricingVolume: askP = sumAsk / self.pricingVolume bidP = sumBid / self.pricingVolume print('Spread:',askP,bidP, askP - bidP) bidVol = math.floor(max(0, self.holdings['CASH']) / bidP/2) askVol = math.floor(max(0, self.holdings['CASH']) / askP/2) try: #print('bidvol, askvol, jpm, cash',bidVol, askVol, self.holdings[self.symbol],self.holdings['CASH']) bidVol = max(0,bidVol - self.holdings[self.symbol]) askVol = max(0,askVol + self.holdings[self.symbol]) #print('bidvol, askvol, jpm',bidVol, askVol, self.holdings) except: pass askM = askP bidM = bidP midP = (askM + bidM) / 2 self.stdSpread = self.stdSpread.append([askM-bidM], ignore_index=True) print('status:') print(self.fake,self.buy) if self.buy: if self.fake: if askM - bidM > 30: if bid[0][0] < midP: self.fakePrice = ask[0][0]-1 self.placeLimitOrder(self.symbol, 1, True, self.fakePrice) self.fake = False else: self.placeLimitOrder(self.symbol, bidVol - 1, False, self.fakePrice) self.fake = True self.buy = False else: if self.fake: if askM - bidM > 30: if ask[0][0] > midP: self.fakePrice = bid[0][0]+1 self.placeLimitOrder(self.symbol, 1, False, self.fakePrice) self.fake = False else: self.placeLimitOrder(self.symbol, askVol - 1, True, self.fakePrice) self.fake = True self.buy = True except Exception as e: print(e) pass self.state = 'AWAITING_WAKEUP' #place orders and await execution self.setWakeup(currentTime + self.getWakeFrequency()) def getWakeFrequency(self): return pd.Timedelta(self.wake_up_freq) # - try: print(xx) except: print('wow') z = {'hello':-100} print(max(0,-z['hello'])) print(z['test']) print(not(True)) import numpy as np a = [100,300] a.append(200) print(np.std(a)) import pandas as pd a = pd.DataFrame([100,200]) a = a.append([6], ignore_index=True) print(a.std()[0])
.ipynb_checkpoints/agent-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Imports from IPython.display import SVG from keras.utils.vis_utils import model_to_dot import base from src.models import build_deep_q_model # # Architecture model = build_deep_q_model() SVG(model_to_dot(model, show_shapes=True, show_layer_names=False).create(prog='dot', format='svg')) # # Summary model.summary() #
ipynb/deep-q-model-analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: exercise # language: python # name: exercise # --- # + from sympy.matrices import Matrix import sympy as sp import numpy as np from Exercise import Exercise, MarkdownBlock from process_latex import process_sympy try: from config import URL, TOKEN except: None # TODO: replace with supplied strings Exercise.URL = URL Exercise.TOKEN = TOKEN # + tags=[] # - # ## Introduction # In this notebook, you are about to create some (linear algebra) exercises using the developed `Exercise` Python library aiming to facilitate authoring parameterized mathematics exercises at a high level of abstraction (i.e. access to a scripting language and the libraries available in there, including as SymPy, NumPy and Matplotlib). # Created exercises can be 'played' inline, using the web-based player developed as part of this project. # Roughly speaking this project is new combination of existing approaches: MEGUA-like parameterized text, SymPy's CAS functionality and exercise-setup as used by Grasple and SageMath for working with mathematical objects in notebooks. # # The goal is to evaluate the usability of the developed library and the authoring setup (this notebook). # Note that by no means you or your skills are being tested, it is by no means a problem if exercises are left uncompleted. # Notes, comments and suggestions are very welcome, please write these either as code-comments or in the Markdown cells in the notebook. # All feedback will be reported and reflected upon anonymously. # Completing the notebook should take about 30 minutes, depending on setup time, prior knowledge about this project, familiarity with linear algebra and the supplied frameworks etc. # Please download the notebook when done and send it by email. # After completion, in a brief semi-structured interview, you can further elaborate upon your experiences. # # To start creating exercises, please replace the `URL` and `TOKEN` in the block above with the strings supplied by email: # ``` # Exercise.URL = "<supplied_url_here>" # Exercise.TOKEN = "<supplied_token_here>" # ``` # # Assumptions: # - Familiarity with Python, Markdown, LaTeX # - Familiarity with Jupyter-Notebook # - Familiarity with the very basics of linear algebra # # Recommendations: # - Use Binder (www.mybinder.org) to edit this notebook, if you prefer local setup instead, see README.md. # - Use Firefox, the iFrame exercise player embeddings do not work in Chrome or Safari due to global cross-origin policies set by these browsers. # - Other browsers (Chrome, Safari) can still be used, however, playing exercises is only possible outside of the notebook by clicking the generated exercise links, which is rather inconvenient. # # Notes: # - Documentation can for the Python library can be found in the `html` directory. # - Within Jupyter-Notebook, function documentation can be viewed by writing a `?` after the function, like so: `Exercise("What is $1 + 1$?").add_answer?` # - Within exercises, only inline math notation is supported. # - Preview-exercises are purged from the server from time to time, don't expect long-term, persistent availability of any played exercises. # - Please skip an exercise in case completing it requires more than a few minutes. # # Happy coding ;) # ## Exercise Basics # The most basic exercise contains a Markdown string with the exercise content and a single answer rule specifying the correct answer. # Mathematics notation can be written inline in LaTeX between dollar signs. # Create an exercise instance e = Exercise("What is $1 + 1$?") # Add 2 as a correct answer e.add_answer(2, True, "Correct!") # Verify that the exercise is working correctly e.play() # Note: as of now, all basic arithmatic is simplified by sp.simplify(...), there is not yet a way to control this behaviour; # therefore writing 1 + 1 in the answer box is accepted correct # Details on what is simplified: https://docs.sympy.org/latest/tutorial/simplification.html # Let's imagine the typical student mistake for this exercise is computing $1 - 1 = 0$ instead. # We add an answer rule to catch that error and provide the student with answer-specific feedback. e.add_answer(0, False, "🤔 That's not right, did you compute $1 - 1 = 0$ instead?") # Verify that the specific feedback is shown e.play() # ### Task 1 # Create an exercise asking learners to compute $3/3$. # Provide answer-specific feedback in case learners compute $3*3$ instead. # Add default feedback (using `e.add_default_feedback(...)`) with a link pointing to a source of preference explaining (integer) devision (hint: `[link](www.example.com)`). # Feel free to embed your favorite meme or xkcd at a correct/incorrect answer (hint `![img](www.example.com/img)`). # + # Task 1 user code: # - # ## Templating Exercises # Exercises can be parameterized/templated (still looking for the correct terminology on this one), this allows for two things: # 1. Randomization. By making part of the content random, multiple instances can be generated, allowing for repeated practice. # 2. Abstraction. By utilizing the functionality of SymPy objects to be translated to LaTeX, authoring exercises remains efficient and effective. # # The integer-exercise can be randomized as follows: # + string = """ ### Integer addition Please compute $@a + @b$ """ params = {} # avoid 0 + 0 instance, since 0 + 0 == 0 - 0, answer same in case our typical mistake is made params["a"] = np.random.randint(0, 10) params["b"] = np.random.randint(1, 10) params["ans_correct"] = params["a"] + params["b"] params["ans_incorrect"] = params["a"] - params["b"] e = Exercise(MarkdownBlock(string, params)) e.add_answer(params["ans_correct"], True, "Correct!") e.add_answer(params["ans_incorrect"], False, MarkdownBlock("Did you compute $@a - @b = @ans_incorrect$ instead?", params)) e.play() # + s = """ What is $@a^\intercal$? """ params = {} params["a"] = sp.Matrix([[1, 2], [3, 4]]) params["ans"] = params["a"].T e = Exercise(MarkdownBlock(s, params)) e.add_answer(params["ans"], True, "You are right!") e.write("demo_transpose") # e.play() # + s = "What is $@a^\intercal$?" params = {} params["a"] = sp.Matrix([[1, 2], [3, 4]]) params["ans"] = params["a"].T e = Exercise(MarkdownBlock(s, params)) e.add_answer(params["ans"], True, "You are right!") e.play() # - # Currently, only a single instance is generated played at a time. Support for multi-instance generation is planned. # ### Working with SymPy objects to represent mathematical objects # We can work with SymPy objects to represent mathematical objects, like vectors and matrices. # An vector addition exercise can be created as follows: # + string = "What is $@v_1 + @v_2$?" params["v_1"] = sp.Matrix([1, 2, 3]) params["v_2"] = sp.Matrix([4, 5, 6]) params["ans"] = params["v_1"] + params["v_2"] e = Exercise(MarkdownBlock(string, params)) e.add_answer(params["ans"], True, "That's right!") e.play() # - # ### Task 2 Parameterized vector addition # Create an exercise asking learners to compute the sum of two vectors of random length (within reasonable limits), with random integer values. # Note: if you prefer NumPy for working with matrices, you are in luck! NumPy objects can be passed to the SymPy matrix constructor, e.g. `sp.Matrix(np.arange(4))`. # + # Task 2 user code: # - # ### Task 3 - Matrix indexing # Create an exercise asking learners to identify a value at randomized indices (but within bounds) in a 5 by 5 matrix. # Please make sure all values are unique so there is only one correct answer. # + # Task 3 user code: # - # ### Task 4 - Matrix multiplication # Create an exercise asking users to multiply two matrices. # Provide a default answer explaining the procedure in case a wrong answer is supplied. # You can use the `symbolic_matrix` and `explain_multiply` functions supplied in `helpers.py` as follows: # + from helpers import symbolic_matrix, explain_multiply a = symbolic_matrix("a", 2, 2) b = symbolic_matrix("b", 2, 2) display(explain_multiply(a, b)) a = sp.Matrix([1,2,3]) b = sp.Matrix(np.matrix([5,6,7]).reshape(-1)) display(explain_multiply(a, b)) # + # Task 4 user code: # - # Hooray! # If you made it this far, you completed the notebook! # Please add any additonal comments below. # Thank you for participating! # Write any additional comments here...
usability_evaluation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7.11 # language: python # name: python3 # --- # # Python_Homework # + # Import pathlib and csv library from pathlib import Path import csv # Set file path csvpath = Path('..\\Resources\\budget_data.csv') # - # Set variables for the data total_months = 0 total_net_profit_loss = 0 net_change_list = [] mean_change = 0 greatest_increase_profits = ['',0] greatest_decrease_losses = ['',0] # Time to get crackin :] # setting path to "budget data" with open(csvpath) as budget_data: read_budget_data = csv.reader(budget_data) header = next(read_budget_data) first_row = next(read_budget_data) # displaying necessary data and skipping header total_months = total_months + 1 # listing total of months total_net_profit_loss = total_net_profit_loss + int(first_row[1]) last_months_value = int(first_row[1]) # for each date in the data for row in read_budget_data: total_months += 1 total_net_profit_loss = total_net_profit_loss + int(row[1]) mean_change = int(row[1]) - last_months_value last_months_value = int(row[1]) net_change_list.append(mean_change) # Create if statement to find greatest increase and decrease to profits if mean_change > greatest_increase_profits[1]: greatest_increase_profits[0] = row[0] greatest_increase_profits[1] = mean_change if mean_change < greatest_decrease_losses[1]: greatest_decrease_losses[0] = row[0] greatest_decrease_losses[1] = mean_change # Input calculation for average change total_average = sum(net_change_list) / len(net_change_list) # Print final result :] print(f'Financial Analysis') print(f'Total Months:{total_months}') print(f'Total:{total_net_profit_loss}') print(f'Average Change:{total_average}') print(f'Greatest Increase in Profits:{greatest_increase_profits}') print(f'Greatest Decrease in Profits:{greatest_decrease_losses}') # Celebrate! print(f'Boom-Shakalaka!!!')
PyBank/Resources/main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # グリッド細胞の発火パターンをPythonで可視化する # > # - toc: true # - badges: true # - comments: true # - categories: [neuroscience] # - author: 山拓 # - image: # ## 概要 # Edvard Moser博士の研究室が公開している、グリッド細胞の活動をPythonで可視化してみました。データは<https://www.ntnu.edu/kavli/research/grid-cell-data>からダウンロードできます。 # # コードを書く上で<http://felix11h.github.io/blog/grid-cell-rate-maps>を参考にしました。一部の関数はこのブログから引用しています。今回は上記のサイトで実装されていない、Gaussian kernelを用いたSmoothed rate mapとAutocorrelation mapの実装をしてみます。 # # > Important: 著者はGrid cellsの研究をしていません。実際の研究で用いられるコードと異なる可能性があります。 # ## グリッド細胞(Grid Cells)について # 実装とは関係ないですが、グリッド細胞についてまとめておきます。 # # ### 空間基底としてのグリッド細胞 # 詳しくは[場所細胞 - 脳科学辞典](https://bsd.neuroinf.jp/wiki/場所細胞#.E5.86.85.E5.81.B4.E5.97.85.E5.86.85.E7.9A.AE.E8.B3.AA.E3.81.AE.E6.A0.BC.E5.AD.90.E7.B4.B0.E8.83.9E)や[2014年のノーベル生理学・医学賞の解説(神経科学学会)](https://www.jnss.org/141031-03/)、[Grid cells (Scholarpedia)](http://www.scholarpedia.org/article/Grid_cells)などをお読みいただければと思います。簡単にまとめると、海馬には場所特異的に発火する**場所細胞**(place cell)があり、これはO'keefe博士によって発見されました。次にMay-Britt Moser博士とEdvard Moser博士は六角形格子状の場所受容野を持つ**グリッド細胞**(格子細胞, grid cell)を内側嗅内皮質(medial entorhinal cortex; MEC)で発見しました。この3人は2014年のノーベル生理学・医学賞を受賞しています。 # # # ![](images/grid_cells_figs/1543060133.jpg) # <http://www.scholarpedia.org/article/Grid_cells>より。左図の黒線はラットの経路、赤は発火が生じた位置。右図は発火率マップ(rate map)。 # # 最近、外側膝状体背側核(dorsal lateral geniculate nucleus)で場所細胞が見つかったそうです(V Hok, et al., 2018, [bioRxiv](https://www.biorxiv.org/content/early/2018/11/19/473520))。 # ## データについて # 公開されているデータはMatLabのmatファイル形式です。しかし、[scipy.io.loadmat](https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.loadmat.html)を用いることでpythonでデータの中身を取得することができます。 # # 使用するデータは以下の通りです。 # # - [10704-07070407_POS.mat](https://github.com/Salad-bowl-of-knowledge/hp/blob/master/_notebooks/data/grid_cells_data/10704-07070407_POS.mat) # - [10704-07070407_T2C3.mat](https://github.com/Salad-bowl-of-knowledge/hp/blob/master/_notebooks/data/grid_cells_data/10704-07070407_T2C3.mat) # # これらのファイルは<https://archive.norstore.no/pages/public/datasetDetail.jsf?id=8F6BE356-3277-475C-87B1-C7A977632DA7>からダウンロードできるファイルの一部です。ただし全体で2.23GBあるので、簡単に試したい場合は上記のリンクからダウンロードしてください。以下では`./data/grid_cells_data/`ディレクトリの下にファイルを置いています。 # # データの末尾の"POS"と"T2C3"の意味について説明しておきます。まず、"POS"はpost, posx, posyを含む構造体でそれぞれ試行の経過時間、x座標, y座標です。座標は-50~50で記録されています。恐らく1m四方の正方形の部屋で、原点を部屋の中心としているのだと思います。"T2C3"はtがtetrode(テトロード電極)でcがcell(細胞)を意味します。後ろの数字は番号付けたものと思われます。 # ## Smoothed Rate Mapについて # # 発火率$\lambda(\boldsymbol{x})$は、場所$\boldsymbol{x}=(x,y)$で記録されたスパイクの回数を、場所$\boldsymbol{x}$における滞在時間(s)で割ることで得られます。 $$ \lambda(\boldsymbol{x})=\frac{\displaystyle \sum_{i=1}^n # g\left(\frac{\boldsymbol{s}_i-\boldsymbol{x}}{h}\right)}{\displaystyle \int_0^T g\left(\frac{\boldsymbol{y}(t)-\boldsymbol{x}}{h}\right)dt} $$ ただし、$n$はスパイクの回数、$T$は計測時間、$g(\cdot)$はGaussain # Kernel(中身の分子が平均、分母が標準偏差)、$\boldsymbol{s}_i$は$i$番目のスパイクの発生した位置、$\boldsymbol{y}(t)$は時刻$t$でのラットの位置です。分母は積分になっていますが、実際には離散的に記録をするので、累積和に変更し、$dt$を時間のステップ幅(今回は0.02s)とします。 # # Gaussian Kernelを用いて平滑化することで「10cm四方での発火を同じ位置での発火とする」などとした場合よりも、得られるマップは滑らかになります。 # ### 実装 # まず、ライブラリをインポートしてデータを読み込みます。 # + import numpy as np import matplotlib.pyplot as plt from scipy import io as io from tqdm import tqdm # from http://www.ntnu.edu/kavli/research/grid-cell-data pos = io.loadmat('./data/grid_cells_data/10704-07070407_POS.mat') spk = io.loadmat('./data/grid_cells_data/10704-07070407_T2C3.mat') # - # posファイル内の構造は次のようになっています。 # - `pos["post"]`: times at which positions were recorded # - `pos["posx"]`: x positions # - `pos["posy"]`: y positions # - `spk["cellTS"]`: spike times # # 次に種々の関数を実装します。 def nearest_pos(array, value): k = (np.abs(array - value)).argmin() return k def GaussianKernel(sizex, sizey, sigma=0.5, center=None): """ sizex : kernel width sizey : kernel height sigma : gaussian Sd center : gaussian mean return gaussian kernel """ x = np.arange(0, sizex, 1, float) y = np.arange(0, sizey, 1, float) x, y = np.meshgrid(x,y) if center is None: x0 = sizex // 2 y0 = sizey // 2 else: if np.isnan(center[0])==False and np.isnan(center[1])==False: x0 = center[0] y0 = center[1] else: return np.zeros((sizey,sizex)) return np.exp(-((x-x0)**2 + (y-y0)**2) / 2*sigma**2) def smoothed_rate_map(pos, spk, kernel_sigma=0.1, W=100, H=100): # load datas posx = pos["posx"].flatten() posy = pos["posy"].flatten() spkt = spk["cellTS"].flatten() #change positions range: -50 ~ 50 -> 0 ~ H or W posx = (posx + 50) / 100 * W posy = (posy + 50) / 100 * H # find nearest positions when spikes occur indx = [nearest_pos(pos["post"],t) for t in spkt] indy = [nearest_pos(pos["post"],t) for t in spkt] # occup position while trajectory occup_m_list = [] for i in tqdm(range(len(posx))): occup_m_list.append(GaussianKernel(W, H, kernel_sigma, (posx[i], posy[i]))) occup_m = sum(occup_m_list) occup_m *= 0.02 # one time step is 0.02s occup_m[occup_m==0] = 1 # avoid devide by zero # activation activ_m_list = [] for i in tqdm(range(len(spkt))): activ_m_list.append(GaussianKernel(W, H, kernel_sigma, (posx[indx][i] ,posy[indy][i]))) activ_m = sum(activ_m_list) rate_map = activ_m / occup_m return rate_map # 最後に実行します。 rm = smoothed_rate_map(pos, spk, 0.2, 100, 100) plt.figure(figsize=(6,4)) plt.imshow(rm, cmap="jet") plt.colorbar(label="Hz") plt.gca().invert_yaxis() plt.tight_layout() # plt.savefig("smoothed_rate_map.png") plt.show() # ## Autocorrelation Mapについて # # https://core.ac.uk/download/pdf/30859910.pdfのSupporting Online Materialに書いてある式通りに実装してみましたが、遅い&論文と見た目が全く異なるので、[scipy.signal.correlate2d](https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.correlate2d.html)を使いました。 # + from scipy.signal import correlate2d rm = smoothed_rate_map(pos, spk, 0.5, 100, 100) a_corr = correlate2d(rm, rm, fillvalue=5) plt.figure(figsize=(6,4)) plt.imshow(a_corr, cmap="jet") plt.colorbar(label="Autocorrelation") plt.tight_layout() # plt.savefig("autocorr.png") plt.show() # - # 若干論文と図が異なる上、cross-correlationが-1~1の範囲でないのはおかしい気がするのですが、六角形格子が見えているので良しとします。 # ## 参考にした文献・サイト # - <https://github.com/Felix11H/grid_cell_rate_map> # - <https://www.ntnu.edu/kavli/research/grid-cell-data> # - <https://core.ac.uk/download/pdf/30859910.pdf>のSupporting Online Material # - <https://github.com/MattNolanLab/gridcells> # - <https://arxiv.org/pdf/1810.07429.pdf>
_notebooks/2018-11-23-grid_cells.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Calculating Annotation Coverage # This section shows how to calculate annotation coverage as described here: # # Annotation coverage of Gene Ontology (GO) terms to individual # gene products is high for human or model organisms: # * 87% of ~20k human protein-coding genes have GO annotations # * 76% of ~14k fly protein-coding genes have GO annotations # (Apr 27, 2016) # # ## 1. Download associations # NCBI's gene2go file contains annotations of GO terms to Entrez GeneIDs for over 35 different species. We are interested in human and fly which have the taxids 9606 and 7227 repectively. # wget ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/gene2go.gz from goatools.base import download_ncbi_associations gene2go = download_ncbi_associations() # ## 2. Read associations # # ### 2a. You can read the associations one species at a time... # + from goatools.anno.genetogo_reader import Gene2GoReader geneid2gos_human = Gene2GoReader(gene2go, taxids=[9606]) geneid2gos_fly = Gene2GoReader(gene2go, taxids=[7227]) # - # ### 2b. Or you can read 'gene2go' once and load all species... # + from collections import defaultdict, namedtuple geneid2gos_all = Gene2GoReader(gene2go, taxids=[9606, 7227]) # - # ## 3. Import protein-coding information for human and fly # In this example, the background is all human and fly protein-codinge genes. # # Follow the instructions in the `background_genes_ncbi` notebook to download a set of background population genes from NCBI. from genes_ncbi_9606_proteincoding import GENEID2NT as GeneID2nt_human from genes_ncbi_7227_proteincoding import GENEID2NT as GeneID2nt_fly lst = [ (9606, GeneID2nt_human), (7227, GeneID2nt_fly) ] print('{N:,} human genes'.format(N=len(GeneID2nt_human))) print('{N:,} fly genes'.format(N=len(GeneID2nt_fly))) # ## 4. Calculate Gene Ontology coverage # Store GO coverage information for *human* and *fly* in the list, **cov_data**. cov_data = [] NtCov = namedtuple("NtCov", "taxid num_GOs num_covgenes coverage num_allgenes") for taxid, pcGeneID2nt in lst: # Get GeneID2GOs association for current species geneid2gos = geneid2gos_all.get_id2gos_nss(taxid=taxid) # Restrict GeneID2GOs to only protein-coding genes for this report pcgene_w_gos = set(geneid2gos.keys()).intersection(set(pcGeneID2nt.keys())) num_pcgene_w_gos = len(pcgene_w_gos) num_pc_genes = len(pcGeneID2nt) # Number of GO terms annotated to protein-coding genes gos_pcgenes = set() for geneid in pcgene_w_gos: gos_pcgenes |= geneid2gos[geneid] # Print report data cov_data.append(NtCov( taxid = taxid, num_GOs = len(gos_pcgenes), num_covgenes = num_pcgene_w_gos, coverage = 100.0*num_pcgene_w_gos/num_pc_genes, num_allgenes = num_pc_genes)) # ## 5 Report Gene Ontology coverage for human and fly # Print the *human* and *fly* GO coverage information that is stored in the list, **cov_data**. # + from __future__ import print_function print(" taxid GOs GeneIDs Coverage") print("------ ------ ------- ----------------------") fmtstr = "{TAXID:>6} {N:>6,} {M:>7,} {COV:2.0f}% GO coverage of {TOT:,} protein-coding genes" for nt in cov_data: print(fmtstr.format( TAXID = nt.taxid, N = nt.num_GOs, M = nt.num_covgenes, COV = nt.coverage, TOT = nt.num_allgenes)) # - # Copyright (C) 2016-present, <NAME>, <NAME>. All rights reserved.
notebooks/annotation_coverage.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np from matplotlib import pyplot as plt from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import RBF # - chei = 129 cwid = 65 x = np.linspace(0.0, 3.467, cwid+1) y = np.linspace(-0.95, -7.05, chei+1) xx,yy = np.meshgrid(x,y) X = np.vstack((xx.flatten(), yy.flatten())).T X.shape # Set vertical scale to 0.25 for the channels facies and 0.2 for the background till facies. kernel = 1.0 * RBF(length_scale=[1.0,0.2]) #, length_scale_bounds=[(1e-1, 10.0),(1e-1,10.0)]) #kernel = 1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-1, 10.0)) gp = GaussianProcessRegressor(kernel=kernel) y_mean, y_cov = gp.predict(X, return_cov=True) y_samples = gp.sample_y(X, 1, random_state=5478) y_samples.shape # + fig,ax = plt.subplots() dx = (x[1]-x[0])/2. dy = (y[1]-y[0])/2. extent = [x[0]-dx, x[-1]+dx, y[0]-dy, y[-1]+dy] ax.imshow(y_samples[:,0].reshape(chei+1,cwid+1), extent=extent) #fig,ax = plt.subplots() #ax.pcolormesh(xx,yy,y_samples[:,0].reshape(chei+1,cwid+1)) #ax.axis('equal') # - keigvals, keigvecs = np.linalg.eig(y_cov) keigvecs = np.real(keigvecs) keigvals = np.real(keigvals) plt.plot(keigvals[:200]) nc = 150 zs = (1/np.sqrt(keigvals[:nc]).reshape(-1,1)*keigvecs[:,:nc].T)@y_samples zs.shape plt.hist(zs) # Use the first nc eigenvectors recon = (np.sqrt(keigvals[:nc])*zs[:nc,0]).T@keigvecs[:,:nc].T stnorm = np.random.randn(8580) generated = (np.sqrt(keigvals[:nc])*stnorm[:nc]).T@keigvecs[:,:nc].T fig,ax = plt.subplots(1,2) ax[0].imshow(recon.reshape(chei+1,cwid+1)) ax[1].imshow(generated.reshape(chei+1,cwid+1)) Ki = kernel.__call__(X) fig,ax = plt.subplots(figsize=(15,15)) plt.imshow(Ki[:,:]) # Save eigenvalues and eigenvectors: np.save('keigvalsh1v02.npy', keigvals) np.save('keigvecsh1v02.npy', keigvecs)
training/GPsimulation_KL.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- <NAME>, Data Scientist - Who has fun LEARNING, EXPLORING & GROWING import nltk # ## Simple sentiment analysis # Compute the proportion of positive and negative words in a text. # # Check the Hu and Liu's sentiment alalysis lexicon: words coded as either positive or negative: p_url = 'http://ptrckprry.com/course/ssd/data/positive-words.txt' n_url = 'http://ptrckprry.com/course/ssd/data/negative-words.txt' def get_words(url): import requests words = requests.get(url).content.decode('latin-1') word_list = words.split('\n') index = 0 while index < len(word_list): word = word_list[index] if ';' in word or not word: word_list.pop(index) else: index+=1 return word_list positive_words = get_words(p_url) positive_words negative_words = get_words(n_url) negative_words # #### Read the text being analyzed and count the proportion of positive and negative words in the text with open('data/community.txt','r') as f: community = f.read() with open('data/le_monde.txt','r')as f: le_monde = f.read() # #### Compute sentiment by looking at the proportion of positive and negative words it the text # + from nltk import word_tokenize cpos = cneg = lpos = lneg = 0 for word in word_tokenize(community): if word in positive_words: cpos += 1 if word in negative_words: cneg += 1 for word in word_tokenize(le_monde): if word in positive_words: lpos += 1 if word in negative_words: lneg += 1 # - community print("text \t pos \t neg \t\t diff") print("-------------------------------------------------") print("community {0:1.2f}%\t {1:1.2f}%\t {2:1.2f}%". format(cpos/len(word_tokenize(community))*100, cneg/len(word_tokenize(community))*100, (cpos-cneg)/len(word_tokenize(community))*100)) print("le_monde {0:1.2f}%\t {1:1.2f}%\t {2:1.2f}%". format(lpos/len(word_tokenize(le_monde))*100, lneg/len(word_tokenize(le_monde))*100, (lpos-lneg)/len(word_tokenize(le_monde))*100)) # ## Simple sentiment analysis using NRC data # - NRC data codifies words with emotions # - 14,182 words are coded into 2 sentimets and 8 emotions # + nrc = "data/NRC-emotion-lexicon-wordlevel-alphabetized-v0.92.txt" count = 0 emotion_dict = dict() with open(nrc,'r') as f: all_lines = list() for line in f: if count < 46: count+=1 continue line = line.strip().split('\t') if int(line[2]) == 1: if emotion_dict.get(line[0]): emotion_dict[line[0]].append(line[1]) else: emotion_dict[line[0]] = [line[1]] # - emotion_dict # #### Fanctionalize this def get_nrc_data(): nrc = "data/NRC-emotion-lexicon-wordlevel-alphabetized-v0.92.txt" count = 0 emotion_dict = dict() with open(nrc,'r') as f: all_lines = list() for line in f: if count < 46: count+=1 continue line = line.strip().split('\t') if int(line[2]) == 1: if emotion_dict.get(line[0]): emotion_dict[line[0]].append(line[1]) else: emotion_dict[line[0]] = [line[1]] return emotion_dict emotion_dict = get_nrc_data() emotion_dict['cliff'] # ### YELP Fallback # + import pickle with open('yelp_data.pickle','rb') as fp: all_snippets = pickle.load(fp) all_snippets # - # ## A function that analyzes emotions emotion_dict emotion_dict.values() def emotion_analyzer(text,emotion_dict=emotion_dict): # Set up the result dictionary emotions = {x for y in emotion_dict.values() for x in y} # print(type(emotions),emotions) emotion_count = dict() for emotion in emotions: emotion_count[emotion] = 0 # Analyze the text and normalize by total number of words total_words = len(text.split()) for word in text.split(): if emotion_dict.get(word): for emotion in emotion_dict.get(word): emotion_count[emotion] += 1/len(text.split()) return emotion_count # ### Now we can analyze the emotional content of the review snippets # + print("%-12s %1s\t%1s %1s %1s %1s %1s %1s %1s %1s"%( "restaurant","fear","trust","negative","positive","joy","disgust","anticip", "sadness","surprise")) for snippet in all_snippets: text = snippet[2] result = emotion_analyzer(text) print("%-12s %1.2f\t%1.2f\t%1.2f\t%1.2f\t%1.2f\t%1.2f\t%1.2f\t%1.2f\t%1.2f"%( snippet[1][0:10],result['fear'],result['trust'], result['negative'],result['positive'],result['joy'],result['disgust'], result['anticipation'],result['sadness'],result['surprise'])) # - # ### Let's functionalize this def comparitive_emotion_analyzer(text_tuples): print("%-20s %1s\t%1s %1s %1s %1s %1s %1s %1s %1s"%( "restaurant","fear","trust","negative","positive","joy","disgust","anticip", "sadness","surprise")) for text_tuple in text_tuples: text = text_tuple[2] result = emotion_analyzer(text) print("%-20s %1.2f\t%1.2f\t%1.2f\t%1.2f\t%1.2f\t%1.2f\t%1.2f\t%1.2f\t%1.2f"%( text_tuple[1][0:20],result['fear'],result['trust'], result['negative'],result['positive'],result['joy'],result['disgust'], result['anticipation'],result['sadness'],result['surprise'])) comparitive_emotion_analyzer(all_snippets) # ### and let's functionalize the yelp stuff as well # CLIENT_ID = '' API_KEY = '' with open('Yelp_API_ID_Key.txt','r') as f: count = 0 for line in f: if count == 0: CLIENT_ID = line.strip() if count == 1: API_KEY = line.strip() count+=1 print(CLIENT_ID,API_KEY,sep='\n') # API constants NOT to change API_HOST = 'https://api.yelp.com' #The API url header SEARCH_PATH = '/v3/businesses/search' #The path for an API request to find businesses BUSINESS_PATH = '/v3/businesses/' # The path to get data for a single business # ### Now we can get reviews # - get_reviews(location,number=15) returns the reviews of 'number' (default=15) restaurants in the vicinity of 'location' # - First, we'll write a function that gets restaurants in the vicinity of location def get_restaurants(api_key,location,number=15): import requests # First, we get the access token # Set up the search data dictionary search_data = { 'term':"restaurant", 'location':location.replace(' ', '+'), 'limit': number } url = API_HOST + SEARCH_PATH headers = { 'Authorization':"Bearer %s" % api_key, } response = requests.request('GET',url,headers=headers,params=search_data).json() businesses = response.get('businesses') return businesses get_restaurants(API_KEY,'Columbia University,New York,NY') # Then a function, that given a business id, returns a string containing the reviews def get_business_review(api_key,business_id): import json import requests business_path = BUSINESS_PATH + business_id+'/reviews' url = API_HOST + business_path headers = { 'Authorization':'Bearer %s' % api_key, } response = requests.request('GET',url,headers=headers).json() review_text = '' for review in response['reviews']: review_text += review['text'] return review_text get_business_review(API_KEY,'friedmans-new-york-62') # #### Finally, put all this together to get review data for the set of restaurants def get_reviews(location,number=15): restaurants = get_restaurants(API_KEY,location,number) if not restaurants: return None review_list = list() for restaurant in restaurants: restaurant_name = restaurant['name'] restaurant_id = restaurant['id'] review_text = get_business_review(API_KEY,restaurant_id) review_list.append((restaurant_id,restaurant_name,review_text)) return review_list all_snippets = get_reviews('Columbia University,New York,NY') all_snippets def analyze_nearby_restaurants(address,number=15): snippets = get_reviews(address,number) comparitive_emotion_analyzer(snippets) analyze_nearby_restaurants("Community Food and Juice, New York, NY",15) analyze_nearby_restaurants('Traffalgar Square,London,UK',15) # ## Simple analysis: Word Clouds # #### Let's see what sort of words the snippets use # 1. We'll combine all snippets into one string # 2. Then we'll generate a word cloud using the words in the string # 3. You may need to install wordcloud using _pip install wordcloud_ # !pip install wordcloud all_snippets text='' for snippet in all_snippets: text+=snippet[2] text from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt # %matplotlib inline # + wordcloud = WordCloud(stopwords=STOPWORDS,background_color='white',width=3000,height=3000).generate(text) plt.imshow(wordcloud) plt.axis('off') plt.show() # - # ### Let's do a detailed comparison of local restaurants # I've saved a few reviews for each restaurant in four directories. We'll use the PlaintextCorpusReader to read these directories: # - PlaintextCorpusReader reads all matching files in a directory and saves them by file-ids # + import nltk from nltk.corpus import PlaintextCorpusReader community_root = "data/community" le_monde_root = "data/le_monde" community_files = "community.*" le_monde_files = "le_monde.*" heights_root = "data/heights" heights_files = "heights.*" amigos_root = "data/amigos" amigos_files = "amigos.*" community_data = PlaintextCorpusReader(community_root,community_files) le_monde_data = PlaintextCorpusReader(le_monde_root,le_monde_files) heights_data = PlaintextCorpusReader(heights_root,heights_files) amigos_data = PlaintextCorpusReader(amigos_root,amigos_files) # - amigos_data.fileids() amigos_data.raw() # #### We need to modify comparitive_emotion_analyzer to tell it where the restaurant name and the text is in the tuple # + def comparative_emotion_analyzer(text_tuples,name_location=1,text_location=1): print("%-20s %1s\t%1s %1s %1s %1s %1s %1s %1s %1s"%( "restaurant","fear","trust","negative","positive","joy","disgust","anticip", "sadness","surprise")) for text_tuple in text_tuples: text = text_tuple[text_location] result = emotion_analyzer(text) print("%-20s %1.2f\t%1.2f\t%1.2f\t%1.2f\t%1.2f\t%1.2f\t%1.2f\t%1.2f\t%1.2f"%( text_tuple[name_location][0:20],result['fear'],result['trust'], result['negative'],result['positive'],result['joy'],result['disgust'], result['anticipation'],result['sadness'],result['surprise'])) #And test it comparative_emotion_analyzer(all_snippets) # - restaurant_data = [('community',community_data.raw()),('le monde',le_monde_data.raw()) ,('heights',heights_data.raw()), ('amigos',amigos_data.raw())] comparative_emotion_analyzer(restaurant_data,0,1) # ## Simple Analysis: Complexity Factors: # - average word lengh: longer words adds to complexity # - average sentence lengh: longer sentences are more complex # - vocabulary: the ratio of unique words used to the total number of words # # __token:__ A sequence (or group) of characters of interest. For e.g., in the below analysis, a token = a word # # - Generally: A token is the base unit of analysis # - So, the first step is to convert text into tokens and __nltk__ text object # Construct tokens (words/sentences) from the text text = le_monde_data.raw() text import nltk from nltk import sent_tokenize, word_tokenize # + sentences = nltk.Text(sent_tokenize(text)) print(len(sentences)) words = nltk.Text(word_tokenize(text)) print(len(words)) # + num_chars = len(text) num_words = len(word_tokenize(text)) num_sentences = len(sent_tokenize(text)) vocab = {x.lower() for x in word_tokenize(text)} print(num_chars,int(num_chars/num_words),int(num_words/num_sentences),(len(vocab)/num_words)) # - # ### Functionalize this def get_complexity(text): num_chars = len(text) num_words = len(word_tokenize(text)) num_sentences = len(sent_tokenize(text)) vocab = {x.lower() for x in word_tokenize(text)} return len(vocab),int(num_chars/num_words),int(num_words/num_sentences),len(vocab)/num_words get_complexity(le_monde_data.raw()) for text in restaurant_data: (vocab,word_size,sent_size,vocab_to_text) = get_complexity(text[1]) print("{0:15s}\t{1:1.2f}\t{2:1.2f}\t{3:1.2f}\t{4:1.2f}". format(text[0],vocab,word_size,sent_size,vocab_to_text)) # ### We could do a word cloud comparison # We'll remove short words and look only at words longer than 6 letters # + texts = restaurant_data from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt # %matplotlib inline #Remove unwanted words #As we look at the cloud, we can get rid of words that don't make sense by adding them to this variable DELETE_WORDS = [] def remove_words(text_string,DELETE_WORDS=DELETE_WORDS): for word in DELETE_WORDS: text_string = text_string.replace(word,' ') return text_string #Remove short words MIN_LENGTH = 0 def remove_short_words(text_string,min_length = MIN_LENGTH): word_list = text_string.split() for word in word_list: if len(word) < min_length: text_string = text_string.replace(' '+word+' ',' ',1) return text_string #Set up side by side clouds COL_NUM = 2 ROW_NUM = 2 fig, axes = plt.subplots(ROW_NUM, COL_NUM, figsize=(12,12)) for i in range(0,len(texts)): text_string = remove_words(texts[i][1]) text_string = remove_short_words(text_string) ax = axes[i%2] ax = axes[i//2, i%2] #Use this if ROW_NUM >=2 ax.set_title(texts[i][0]) wordcloud = WordCloud(stopwords=STOPWORDS,background_color='white', width=1200,height=1000,max_words=20).generate(text_string) ax.imshow(wordcloud) ax.axis('off') plt.show() # - # ### Comparing complexity of restaurant reviews won't get us anything useful - let's lool at # # <h3>ntlk documentation link:</h3> http://www.nltk.org/api/nltk.html # <h3>Commands cheat sheet</h3> https://blogs.princeton.edu/etc/wp-content/uploads/sites/32/2014/03/Text-Analysis-with-NLTK-Cheatsheet.pdf # <h3>nltk book</h3>http://www.nltk.org/book/ # # ### nltk contains a large corpora of pre-tokenized text # Load it using the command: # # __nltk.download()__ from nltk.book import * # <h1>Often, a comparitive analysis helps us understand text better</h1> # <h2>Let's look at US President inaugural speeches</h2> # <h4>Copy the files 2013-Obama.txt and 2017-Trump.txt to the nltk_data/corpora/inaugural directory. nltk_data should be under your home directory</h4> inaugural.fileids() inaugural.raw('1861-Lincoln.txt') texts = [('trump',inaugural.raw('2017-Trump.txt')), ('obama',inaugural.raw('2009-Obama.txt')+inaugural.raw('2013-Obama.txt')), ('jackson',inaugural.raw('1829-Jackson.txt')+inaugural.raw('1833-Jackson.txt')), ('washington',inaugural.raw('1789-Washington.txt')+inaugural.raw('1793-Washington.txt'))] for text in texts: (vocab,word_size,sent_size,vocab_to_text) = get_complexity(text[1]) print("{0:15s}\t{1:1.2f}\t{2:1.2f}\t{3:1.2f}\t{4:1.2f}". format(text[0],vocab,word_size,sent_size,vocab_to_text)) # <h2>Analysis over time</h2> # # <h3>The files are arranged over time so we can analyze how complexity has changed between Washington and Trump</h3> from nltk.corpus import inaugural sentence_lengths = list() for fileid in inaugural.fileids(): sentence_lengths.append(get_complexity(' '.join(inaugural.words(fileid)))[2]) plt.plot(sentence_lengths) # ## Dispersion plots show the relative frequency of words over the text # Let's see how the frequency of some words has changed over the course of the republic. That should give us some idea of how the focus of the nation changed text4.dispersion_plot(["government", "citizen", "freedom", "duties", "America",'independence','God','patriotism']) text4 # __We may want to use word stems rather than the part of speech form__ # # - For example: patriot, patriotic, patriotism all express roughly the same idea # - nltk has a stemmer that implements the "Porter Stemming Algorithm" (https://tartarus.org/martin/PorterStemmer/) # - We'll push everything to lowercase as well from nltk.stem.porter import PorterStemmer p_stemmer = PorterStemmer() text = inaugural.raw() striptext = text.replace('\n\n',' ') striptext = striptext.replace('\n',' ') sentences = sent_tokenize(striptext) words = word_tokenize(striptext) text = nltk.Text([p_stemmer.stem(i).lower() for i in words]) text.dispersion_plot(['govern','citizen','free','america','independ','god','patriot']) # !pip install vaderSentiment from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer # ## Weighted word analysis using Vader # Vader contains a list of 7500 features weighted by how positive or negative they are. It uses these features to calculate stats on how positive or negative or neutral a passage is. And combimes these results to give a compound sentiment (higher = more positive) for the passage. # # Human trained on twitter data and generally consedered good for informal communication. 10 humans rated each feature in each tweet in context from -4 to +4. # # #### Calculates the sentiment in a sentence using word order analysis # # - 'Marginally good' will get a lower positive score than 'extremely good' # # #### Computes a 'compound' score based on heuristics (between -1 and +1) # Includes sentiment of emoticons, punctuation, and other 'social media' lexicon elements headers = ['pos','neg','neu','compound'] texts = restaurant_data analyzer = SentimentIntensityAnalyzer() for i in range(len(texts)): name = texts[i][0] sentences = sent_tokenize(texts[i][1]) pos=compound=neu=neg=0 for sentence in sentences: vs = analyzer.polarity_scores(sentence) pos+=vs['pos']/(len(sentences)) neg+=vs['neg']/(len(sentences)) neu+=vs['neu']/(len(sentences)) compound+=vs['compound']/(len(sentences)) print(name,pos,neg,neu,compound) # #### And functionalize this as well def vader_comparison(texts): from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer headers = ['pos','neg','neu','compound'] print("Name\t",' pos\t','neg\t','neu\t','compound') analyzer = SentimentIntensityAnalyzer() for i in range(len(texts)): name = texts[i][0] sentences = sent_tokenize(texts[i][1]) pos=compound=neu=neg=0 for sentence in sentences: vs = analyzer.polarity_scores(sentence) pos+=vs['pos']/(len(sentences)) compound+=vs['compound']/(len(sentences)) neu+=vs['neu']/(len(sentences)) neg+=vs['neg']/(len(sentences)) print('%-10s'%name,'%1.2f\t'%pos,'%1.2f\t'%neg,'%1.2f\t'%neu,'%1.2f\t'%compound) vader_comparison(restaurant_data) # ## Named Entities # ### People, places, organizations # Named entities are often the subject of sentiments so identifying them can be very useful. # # ### Named entity detection is based on Part-of-speech tagging of words and chuncks (group of words) # # - Start with sentences (using a sentence tokenizer) # - tokenize words in each sentence # - chunck them. ne_chunk identifies likely chunked candidates (ne = named entity) # - Finally, build chunks using nltk's __guess__ on what members of chunck represent (people,place,organization) en={} try: sent_detector = nltk.data.load('tokenizers/punkt/english.pickle') sentences = sent_detector.tokenize(community_data.raw().strip()) for sentence in sentences: tokenized = nltk.word_tokenize(sentence) tagged = nltk.pos_tag(tokenized) chunked = nltk.ne_chunk(tagged) for tree in chunked: if hasattr(tree, 'label'): ne = ' '.join(c[0] for c in tree.leaves()) en[ne] = [tree.label(), ' '.join(c[1] for c in tree.leaves())] except Exception as e: print(str(e)) import pprint pp = pprint.PrettyPrinter(indent=4) pp.pprint(en) # __Assuming we've done a good job of identifying named entities, we can get an affect score on entities__ # + meaningful_sents = list() i = 0 for sentence in sentences: if 'service' in sentence: i+=1 meaningful_sents.append((i,sentence)) vader_comparison(meaningful_sents) # - # __We could also develop a affect calculator for common terms in our domain (e.g., food items)__ def get_affect(text,word,lower=False): import nltk from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer analyzer = SentimentIntensityAnalyzer() sent_detector = nltk.data.load('tokenizers/punkt/english.pickle') sentences = sent_detector.tokenize(text.strip()) sentence_count =0 running_total = 0 for sentence in sentences: if lower: sentence = sentence.lower() if word in sentence: vs = analyzer.polarity_scores(sentence) running_total += vs['compound'] sentence_count += 1 if sentence_count == 0: return 0 return running_total/sentence_count get_affect(community_data.raw(),'service',True) get_affect(le_monde_data.raw(),'service',True) get_affect(amigos_data.raw(),'service',True) get_affect(community_data.raw(),'brunch',True) get_affect(community_data.raw(),'dinner',True) get_affect(heights_data.raw(),'dinner',True) # __The nltk function concordance returns text fragments around a word__ nltk.Text(community_data.words()).concordance('service',100) nltk.Text(le_monde_data.words()).concordance('service',100) # <h2>Text summarization</h2> # <h4>Text summarization is useful because you can generate a short summary of a large piece of text automatically</h4> # <h4>Then, these summaries can serve as an input into a topic analyzer to figure out what the main topic of the text is</h4> # # A naive form of summarization is to identify the most frequent words in a piece of text and use the occurrence of these words in sentences to rate the importance of a sentence. from nltk.tokenize import word_tokenize from nltk.tokenize import sent_tokenize from nltk.probability import FreqDist from nltk.corpus import stopwords from collections import OrderedDict import pprint # __Then prep the text. Get did of end of line chars__ text = community_data.raw() summary_sentences=[] candidate_sentences={} candidate_sentence_counts={} striptext = text.replace('\n\n',' ') striptext = striptext.replace('\n',' ') # #### Construct a list of words after getting rid of unimportant ones and numbers words = word_tokenize(striptext) lowercase_words = [word.lower() for word in words if word not in stopwords.words() and word.isalpha()] # #### Construct word frequencies and choose the most common (20) # + word_frequenceies = FreqDist(lowercase_words) most_frequent_words = FreqDist(lowercase_words).most_common(20) pp = pprint.PrettyPrinter(indent=4) pp.pprint(most_frequent_words) # - # __lowercase the sentences__ # candidate_sentences is a dictionary with the original sentence as the key, and its lowercase version as the value sentences = sent_tokenize(striptext) for sentence in sentences: candidate_sentences[sentence] = sentence.lower() candidate_sentences # for Uppercase, lowercase in candidate_items(): for long,short in candidate_sentences.items(): count = 0 for freq_word, frequency_score in most_frequent_words: if freq_word in short: count += frequency_score candidate_sentence_counts[long] = count candidate_sentences.items() candidate_sentence_counts sorted_sentences = OrderedDict(sorted( candidate_sentence_counts.items(), key = lambda x: x[1], reverse = True)[:4]) pp.pprint(sorted_sentences) # __Packaging all this into a function__ def build_naive_summary(text): from nltk.tokenize import word_tokenize from nltk.tokenize import sent_tokenize from nltk.probability import FreqDist from nltk.corpus import stopwords from collections import OrderedDict summary_sentences = [] candidate_sentences = {} candidate_sentence_counts = {} striptext = text.replace('\n\n', ' ') striptext = striptext.replace('\n', ' ') words = word_tokenize(striptext) lowercase_words = [word.lower() for word in words if word not in stopwords.words() and word.isalpha()] word_frequencies = FreqDist(lowercase_words) most_frequent_words = FreqDist(lowercase_words).most_common(20) sentences = sent_tokenize(striptext) for sentence in sentences: candidate_sentences[sentence] = sentence.lower() for long, short in candidate_sentences.items(): count = 0 for freq_word, frequency_score in most_frequent_words: if freq_word in short: count += frequency_score candidate_sentence_counts[long] = count sorted_sentences = OrderedDict(sorted( candidate_sentence_counts.items(), key = lambda x: x[1], reverse = True)[:4]) return sorted_sentences summary = '\n'.join(build_naive_summary(community_data.raw())) print(summary) summary = '\n'.join(build_naive_summary(le_monde_data.raw())) print(summary) # #### We can summarize George Washington's first inaugural speech build_naive_summary(inaugural.raw('1789-Washington.txt')) build_naive_summary(inaugural.raw('2017-Trump.txt')) # ## Gensim: another text summarizer # Gensim uses a network with sentences as nodes and 'lexical similarity' as weights on the arcs between nodes. # # First, let's check all our imports and stuff: from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt # %matplotlib inline import nltk from nltk.corpus import PlaintextCorpusReader from nltk import sent_tokenize,word_tokenize from nltk.book import * from nltk.corpus import PlaintextCorpusReader community_root = "data/community" le_monde_root = "data/le_monde" community_files = "community.*" le_monde_files = "le_monde.*" heights_root = "data/heights" heights_files = "heights.*" amigos_root = "data/amigos" amigos_files = "amigos.*" community_data = PlaintextCorpusReader(community_root,community_files) le_monde_data = PlaintextCorpusReader(le_monde_root,le_monde_files) heights_data = PlaintextCorpusReader(heights_root,heights_files) amigos_data = PlaintextCorpusReader(amigos_root,amigos_files) type(community_data) text = community_data.raw() summary_sentences = [] candidate_sentences = {} candidate_sentence_counts = {} striptext = text.replace('\n\n', ' ') striptext = striptext.replace('\n', ' ') # ### Now install gensim # !pip install gensim import gensim.summarization summary = gensim.summarization.summarize(striptext,word_count=100) print(summary) summary = '\n'.join(build_naive_summary(community_data.raw())) print(summary) print(gensim.summarization.keywords(striptext,words=10)) # <h1>Topic modeling</h1> # <h4>The goal of topic modeling is to identify the major concepts underlying a piece of text</h4> # <h4>Topic modeling uses "Unsupervised Learning". No apriori knowledge is necessary # <li>Though it is helpful in cleaning up results! # <h3>LDA: Latent Dirichlet Allocation Model</h3> # <li>Identifies potential topics using pruning techniques like 'upward closure' # <li>Computes conditional probabilities for topic word sets # <li>Identifies the most likely topics # <li>Does this over multiple passes probabilistically picking topics in each pass # <li>Good intuitive explanation: http://blog.echen.me/2011/08/22/introduction-to-latent-dirichlet-allocation/ from gensim import corpora from gensim.models.ldamodel import LdaModel from gensim.parsing.preprocessing import STOPWORDS import pprint # #### Prepare the text text = PlaintextCorpusReader('data/','Nikon_coolpix_4300.txt').raw() striptext = text.replace('\n\n',' ') striptext = striptext.replace('\n',' ') sentences = sent_tokenize(striptext) texts = [[word for word in sentence.lower().split() if word not in STOPWORDS and word.isalnum()] for sentence in sentences] len(texts) # #### Create a (word,frequency) dictionary for each word in the text print(text) texts dictionary = corpora.Dictionary(texts) print(dictionary) print(type(dictionary)) print('-------------------------------------------------------------------------------------------') print(dictionary.token2id) # (word_id,frequency) pairs corpus = [dictionary.doc2bow(text) for text in texts] #(word_id,freq) pairs be sentence print(type(corpus)) print(corpus[9]) # shows how many times the word (replaced by number) occurs in the sentebce 9 print(texts[9]) print(dictionary[73]) # ## Do the LDA itself # ### Parameters: # - Numbers of topics: The number of topics you want generated. The larger the document the more the desirable topics # - Passes: The LDA model makes through the document. More passes, slower analysis # Set parameters num_topics = 5 passes = 10 lda = LdaModel(corpus, id2word=dictionary, num_topics=num_topics, passes=passes) # #### We'll get the main words of the topic with the weight (essentially, the probability that this word will be on this topic) for each of them: pp = pprint.PrettyPrinter(indent=4) pp.pprint(lda.print_topics(num_words=3)) pp.pprint(lda.print_topics(num_words=5)) # #### We can see that words _camera_ and _digital_ are redundant here. Let's rid out them: texts[2] text = PlaintextCorpusReader('data/','Nikon_coolpix_4300.txt').raw() my_stopwords=['camera','digital'] striptext = text.replace('\n\n',' ') striptext = striptext.replace('\n',' ') sentences = sent_tokenize(striptext) texts = [[word for word in sentence.lower().split() if word not in STOPWORDS and word.isalnum() and word not in my_stopwords] for sentence in sentences] texts[2] dictionary = corpora.Dictionary(texts) corpus = [dictionary.doc2bow(text) for text in texts] corpus[2] lda = LdaModel(corpus, id2word=dictionary, num_topics=num_topics, passes=passes) pp.pprint(lda.print_topics(num_words=3)) # So how do we match topics to documents? # Well, the idea roughly here is that what you do is you-- # let's say you have 1,000 different documents, # and you have a new document coming in. # What you want to do really is you # want to take your initial set of 1,000 documents, # print it up into a training and test sample, # take the training sample, look at that for possible topics, # apply those, do some analysis of the clause we've been doing # and figure out what the topics are, apply those to the-- # take the documents in the test sample # and run them through the topic analyzer to see, # given the trained set of topics, what # topics show up inside the test one, # and see if that makes sense. # So you have to do some work upfront, right? # And then what you want to be able to do # is that, as new documents come in, # you want to be able to say what are the main topics # that this new document has using the analysis you've # done before. # ## Matching topics to documents # ### Sort topics by probability # #### We're using sentences as documents here, so this is less than ideal # We can take our documents here and sort them by topic ID # here. # And we give it one document here-- corpus 0. # So corpus 0 is the first sentence in our document from operator import itemgetter lda.get_document_topics(corpus[0],minimum_probability=0.05,per_word_topics=False) sorted(lda.get_document_topics(corpus[0],minimum_probability=0,per_word_topics=False), key=itemgetter(1),reverse=True) # So, our first sentence is focused on the topic number 4 # ## Making sense of the topics # Draw wordclouds def draw_wordcloud(lda,topicnum,min_size=0,STOPWORDS=[]): word_list=[] prob_total = 0 for word,prob in lda.show_topic(topicnum,topn=50): prob_total += prob for word,prob in lda.show_topic(topicnum,topn=50): if word in STOPWORDS or len(word)<min_size: continue freq = int(prob/prob_total*1000) alist=[word] word_list.extend(alist*freq) from wordcloud import WordCloud,STOPWORDS import matplotlib.pyplot as plt # %matplotlib inline text = ' '.join(word_list) wordcloud = WordCloud(stopwords=STOPWORDS,background_color='white', width=3000,height=3000).generate(' '.join(word_list)) plt.imshow(wordcloud) plt.axis('off') plt.show() draw_wordcloud(lda,2) # <h3>Let's look at Presidential addresses to see what sorts of topics emerge from there</h3> # <li>Each document will be analyzed for topic</li> # <li>The corpus will consist of 58 documents, one per presidential address # + REMOVE_WORDS = {'shall','generally','spirit','country','people','nation','nations','great','better'} #Create a word dictionary (id, word) texts = [[word for word in sentence.lower().split() if word not in STOPWORDS and word not in REMOVE_WORDS and word.isalnum()] for sentence in sentences] dictionary = corpora.Dictionary(texts) #Create a corpus of documents text_list = list() for fileid in inaugural.fileids(): text = inaugural.words(fileid) doc=list() for word in text: if word in STOPWORDS or word in REMOVE_WORDS or not word.isalpha() or len(word) <5: continue doc.append(word) text_list.append(doc) by_address_corpus = [dictionary.doc2bow(text) for text in text_list] # - # ## Create the model lda = LdaModel(by_address_corpus, id2word=dictionary, num_topics=20, passes=10) pp=pprint.PrettyPrinter(indent=4) pp.pprint(lda.print_topics(num_words=10)) # ## We can now compare presidential addresses by topics len(by_address_corpus) from operator import itemgetter sorted(lda.get_document_topics(by_address_corpus[57],minimum_probability=0,per_word_topics=False),key=itemgetter(1),reverse=True) draw_wordcloud(lda,4) print(lda.show_topic(4,topn=5)) print(lda.show_topic(11,topn=5)) # ## Similarity # ### Given a corpus of documents, when a new document arrives, find the document that is the most similar # + doc_list = [community_data,le_monde_data,amigos_data,heights_data] all_text = community_data.raw()+le_monde_data.raw()+amigos_data.raw()+heights_data.raw() documents = [doc.raw() for doc in doc_list] texts = [[word for word in document.lower().split() if word not in STOPWORDS and word.isalnum()] for document in documents] dictionary = corpora.Dictionary(texts) corpus = [dictionary.doc2bow(text) for text in texts] # + from gensim.similarities.docsim import Similarity from gensim import corpora, models, similarities lsi = models.LsiModel(corpus,id2word=dictionary,num_topics=2) doc = """ Many, many years ago, I used to frequent this place for their amazing french toast. It's been a while since then and I've been hesitant to review a place I haven't been to in 7-8 years... but I passed by French Roast and, feeling nostalgic, decided to go back. It was a great decision. Their Bloody Mary is fantastic and includes bacon (which was perfectly cooked!!), olives, cucumber, and celery. The Irish coffee is also excellent, even without the cream which is what I ordered. Great food, great drinks, a great ambiance that is casual yet familiar like a tiny little French cafe. I highly recommend coming here, and will be back whenever I'm in the area next. Juan, the bartender, is great!! One of the best in any brunch spot in the city, by far. """ vec_bow = dictionary.doc2bow(doc.lower().split()) vec_lsi = lsi[vec_bow] index = similarities.MatrixSimilarity(lsi[corpus]) sims = index[vec_lsi] sims = sorted(enumerate(sims),key=lambda item: -item[1]) # - sims doc=""" I went to Mexican Festival Restaurant for Cinco De Mayo because I had been there years prior and had such a good experience. This time wasn't so good. The food was just mediocre and it wasn't hot when it was brought to our table. They brought my friends food out 10 minutes before everyone else and it took forever to get drinks. We let it slide because the place was packed with people and it was Cinco De Mayo. Also, the margaritas we had were slamming! Pure tequila. But then things took a turn for the worst. As I went to get something out of my purse which was on the back of my chair, I looked down and saw a huge water bug. I had to warn the lady next to me because it was so close to her chair. We called the waitress over and someone came with a broom and a dustpan and swept it away like it was an everyday experience. No one seemed phased. Even though our waitress was very nice, I do not think we will be returning to Mexican Festival again. It seems the restaurant is a shadow of its former self. """ vec_bow = dictionary.doc2bow(doc.lower().split()) vec_lsi = lsi[vec_bow] index = similarities.MatrixSimilarity(lsi[corpus]) sims = index[vec_lsi] sims = sorted(enumerate(sims), key=lambda item: -item[1]) sims
Analytics & Python/All iPython Notebooks/_7_BAMM_101_W_9_Working_With_Text.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import datetime class Person(): def __init__(self, first_name, last_name): self.first_name = first_name self.last_name = last_name class Baby(Person): def speak(self): print('Blah blah blah') class Adult(Person): def speak(self): print('Hello, my name is %s' % self.first_name) # - class Calendar(): def book_appointment(self, date): print('Booking appointment for date %s' % date) # + class OrganizedAdult(Adult, Calendar): pass class OrganizedBaby(Baby, Calendar): pass # + andres = OrganizedAdult('Andres', 'Gomez') boris = OrganizedBaby('Boris', 'Bumblebutton') andres.speak() boris.speak() boris.book_appointment(datetime.date(2018,1,1)) # - class OrganizedBaby(Baby, Calendar): def book_appointment(self, date): print('Note that you are booking an appointment with a baby.') super().book_appointment(date) boris = OrganizedBaby('Boris', 'Bumblebutton') boris.book_appointment(datetime.date(2018,1,1))
Chapter05/Exercise84/Exercise84.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # _This notebook contains code and comments from Section 5.1 of the book [Ensemble Methods for Machine Learning](https://www.manning.com/books/ensemble-methods-for-machine-learning). Please see the book for additional details on this topic. This notebook and code are released under the [MIT license](https://github.com/gkunapuli/ensemble-methods-notebooks/blob/master/LICENSE)._ # # ## 5.1 Gradient Descent for Minimization # # When learning a machine learning model, we typically aim to find the model that fits the training data. The "goodness of this fit" is measured using the loss function. Model training is essentially finding the model parameters that minimize the loss function. Training of most machine learning algorithms can ultimately be cast into this framework, and attemtp to minimize the loss function in many different ways. # # [Gradient descent](https://en.wikipedia.org/wiki/Gradient_descent) is one such way. It is an iterative technique that can be used to find the (local) minimimum of an objective function. It is an example of a first-order optimization technique as it uses first-derivative information, that is, the gradient. # # # --- # ### 5.1.1 Gradient Descent with an Illustrative Example # We will use the [Branin function](https://uqworld.org/t/branin-function/53) as a test function to visualize how gradient descent works. The Branin function is a function of two variables $w_1$ and $w_2$: # # \\[ # f(w_1, w_2) = a (w_2 - b w_1^2 + c w_1 - r)^2 + s (1-t) \cos{w_1} + s # \\] # # Since we will be performing gradient descent, we will need the gradient of $f(w_1, w_2)$ with respect to both $w_1$ and $w_2$, which we collect into a two dimensional vector: # # \\[ # g(w_1, w_2) = \left[ \begin{array}{c} # \frac{\partial f(w_1, w_2)}{\partial w_1} \\ # \frac{\partial f(w_1, w_2)}{\partial w_2} # \end{array} \right] # = \left[ \begin{array}{c} # 2a(w_2 - b w_1^2 + c w_1 - r) \cdot (-2 b w_1 + c) - s (1 - t) \sin{w_1} \\ # 2a(w_2 - b w_1^2 + c w_1 - r) \end{array} \right] # \\] # + import numpy as np def branin(w, a, b, c, r, s, t): return a * (w[1] - b * w[0] ** 2 + c * w[0] - r) ** 2 + s * (1 - t) * np.cos(w[0]) + s def branin_gradient(w, a, b, c, r, s, t): return np.array([2 * a * (w[1] - b * w[0] ** 2 + c * w[0] - r) * (-2 * b * w[0] + c) - s * (1 - t) * np.sin(w[0]), 2 * a * (w[1] - b * w[0] ** 2 + c * w[0] - r)]) # - # We can visualize the function in three dimensions, as well as its contours in two dimensions. import matplotlib.pyplot as plt # + # %matplotlib inline # Set the constants of the Branin function a, b, c, r, s, t = 1, 5.1 / (4 * np.pi ** 2), 5 / np.pi, 6, 10, 1 / (8 * np.pi) # Set plotting boundaries and generate the mesh w1Min, w1Max, w2Min, w2Max = -5, 18, -10, 18 w1, w2 = np.meshgrid(np.arange(w1Min, w1Max, 0.1), np.arange(w2Min, w2Max, 0.1)) # Compute the Branin function over this mesh z = np.apply_along_axis(branin, 1, np.c_[w1.ravel(), w2.ravel()], a, b, c, r, s, t) z = z.reshape(w1.shape) # Visualize the Branin function in 3d fig = plt.figure(figsize=(9, 5)) ax = fig.add_subplot(1, 2, 1, projection='3d') ax.set_position([0.025, 0.15, 0.5, 0.9]) ax.plot_surface(w1, w2, z, rstride=20, cstride=20, alpha=0.6, linewidth=0.25, edgecolors='k', cmap='viridis') ax.view_init(elev=25.0, azim=-100.0) ax.contour(w1, w2, z, zdir='z', levels=np.array([1, 2, 4, 7, 12, 20, 50, 75, 125, 175]), offset=-50, cmap='viridis', alpha=0.5) ax.set_xlabel('$w_1$') ax.set_xlim(w1Min, w1Max) ax.set_ylabel('$w_2$') ax.set_ylim(w2Min, w2Max) ax.set_zlabel('$f(w_1, w_2)$') ax.set_zlim(-50, 400) ax.dist = 8 ax.set_title('Branin function: surface') # Visualize the Branin function in 2d ax = fig.add_subplot(1, 2, 2) # ax.set_position([0.55, 0.11, 0.425, 0.85]) ctr = ax.contour(w1, w2, z, levels=np.array([1, 2, 4, 7, 12, 20, 50, 75, 125, 175]), cmap='viridis', alpha=0.75) ax.clabel(ctr, inline=1, fontsize=6) ax.set_xlabel('$w_1$') ax.set_ylabel('$w_2$') ax.set_title('Branin function: contours') fig.tight_layout() pngFile = './figures/CH05_F02_Kunapuli.png' plt.savefig(pngFile, dpi=300, bbox_inches='tight', pad_inches=0) # - # This function has **four global minima**, which are the centers of the elliptical regions in contours. Gradient descent will aim to find one of these four global minima. # # # Gradient descent performs the following steps: # # Initialize: ``x_old`` = initial guess # while not converged: # 1. compute the negative gradient and normalize to unit length (direction) # 2. compute the step length using line search (distance) # 3. update the solution: x_new = x_old + distance * direction # 4. check for convergence: if amount of change between x_new and x_old is below our tolerance threshold # # We can implement a basic version of gradient descent that can take a function $$f$$ and corresponding gradient $$g$$ as input and returns a locally optimal solution. # # **Listing 5.1**: Gradient Descent # + from scipy.optimize import line_search # Gradient descent with function f, and gradient g def gradient_descent(f, g, x_init, max_iter=100, args=()): converged = False n_iter = 0 x_old, x_new = np.array(x_init), None descent_path = np.full((max_iter + 1, 2), fill_value=np.nan) # Save the descent path descent_path[n_iter] = x_old while not converged: n_iter += 1 gradient = -g(x_old, *args) # Compute the negative gradient direction = gradient / np.linalg.norm(gradient) # Normalize the gradient step = line_search(f, g, x_old, direction, args=args) # Compute the step length using line search if step[0] is None: # If step length doesn't return a useful value, make it 1.0 distance = 1.0 else: distance = step[0] x_new = x_old + distance * direction # Compute the update descent_path[n_iter] = x_new # Update status # print('Iter {0:02d}: obj value = {1} (step={2}, dir={3}'.format(n_iter, step[3], step[0], direction)) err = np.linalg.norm(x_new - x_old) # Compute amount of change between x_new and x_old if err <= 1e-3 or n_iter >= max_iter: # Check for convergence converged = True x_old = x_new # Get ready for the next iteration return x_new, descent_path # - # We perform gradient descent on the Branin function, intializing our solution at $w = [-4, -5]$, and visualize our solution path. # + # %matplotlib inline # Set the constants of the Branin function a, b, c, r, s, t = 1, 5.1 / (4 * np.pi ** 2), 5 / np.pi, 6, 10, 1 / (8 * np.pi) # Initialize and perform gradient descent w_init = np.array([-4, -5]) w_optimal, w_path = gradient_descent(branin, branin_gradient, w_init, args=(a, b, c, r, s, t)) # Plot optimization path over all iterations fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(9, 4)) ax[0].contour(w1, w2, z, levels=np.array([1, 2, 4, 7, 12, 20, 50, 75, 125, 175]), cmap='viridis', alpha=0.75) ax[0].plot(w_path[:, 0], w_path[:, 1], 'k', linewidth=2) ax[0].scatter(w_init[0], w_init[1], s=50, marker='s') ax[0].scatter(w_optimal[0], w_optimal[1], s=50, c='r') ax[0].set_xlabel('$w_1$') ax[0].set_ylabel('$w_2$') ax[0].set_title('Gradient descent') # Plot optimization path zoomed in ax[1].contour(w1, w2, z, levels=np.array([1, 2, 4, 7, 12, 20, 50, 75, 125, 175]), cmap='viridis', alpha=0.75) ax[1].plot(w_path[:, 0], w_path[:, 1], 'k', linewidth=2) ax[1].scatter(w_optimal[0], w_optimal[1], s=50, c='r') ax[1].set_xlim(1.5, 5.5) ax[1].set_ylim(-1, 3) ax[1].set_xlabel('$w_1$') ax[1].set_ylabel('$w_2$') ax[1].set_title('Gradient descent (zoomed in)') fig.tight_layout() pngFile = './figures/CH05_F03_Kunapuli.png' plt.savefig(pngFile, dpi=300, bbox_inches='tight', pad_inches=0) # - # Two important things to note here: # 1. **Gradient descent typically demonstrates zig-zagging behavior**, especially in narrow valleys. This is a consequence of rapidly changing gradient direction. The magnitude of the gradient also becomes smaller as we approach the minima. # # 2. Of the four minima, which one will gradient descent converge to? That depends on the initial guess. As we see below, **different initializations will cause gradient descent to reach different minima**. # + # %matplotlib inline n = 20 inits = np.r_[np.c_[np.random.uniform(0, 15, n), np.random.uniform(10, 16, n)], np.c_[np.random.uniform(-5, 0, n), np.random.uniform(-10, 0, n)], np.c_[np.random.uniform(12, 17, n), np.random.uniform(-10, 0, n)]] plt.figure() plt.contour(w1, w2, z, levels=np.array([1, 2, 4, 7, 12, 20, 50, 75, 125, 175]), cmap='viridis', alpha=0.5) # For each initialization, optimize and plot the path for i, w_init in enumerate(inits): w, path = gradient_descent(branin, branin_gradient, w_init, args=(a, b, c, r, s, t)) plt.plot(path[:, 0], path[:, 1], 'r', linewidth=1) plt.scatter(inits[:, 0], inits[:, 1], s=20) plt.xlabel('$w_1$') plt.ylabel('$w_2$') plt.title('Gradient descent with different initializations'); fig.tight_layout() pngFile = './figures/CH05_F04_Kunapuli.png' plt.savefig(pngFile, dpi=300, bbox_inches='tight', pad_inches=0) # - # --- # ### 5.1.2 Gradient Descent over Loss Functions for Training # # Let's consider a simple classification problem in a two-dimensional feature space # + # %matplotlib inline from sklearn.datasets import make_blobs X, y = make_blobs(n_samples=200, n_features=2, centers=[[-1.5, -1.5], [1.5, 1.5]], random_state=42) from visualization import plot_2d_data fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(5, 4)) plot_2d_data(ax, X, y, xlabel='x', ylabel='y', title='Simple classification problem', legend=['pos', 'neg'], colormap='RdBu') fig.tight_layout() pngFile = './figures/CH05_F05_Kunapuli.png' plt.savefig(pngFile, dpi=300, bbox_inches='tight') # - # Now, let's say that we want to learn a linear classifier $h(\mathbf{x})$ of the form shown below: # # \\[ # h_\mathbf{w}(\mathbf{x}) = w_1 x_1 + w_2 x_2, # \\] # # which takes a training example $\mathbf{x} = [x_1, x_2]^T$. The classifier is parameterized by $\mathbf{w} = [w_1, w_2]^T$, which we have to learn using the training examples. In order to train a classifier, we'll need a loss function. # # For this problem, we will chose the **squared loss** of the classifier $h_\mathbf{w}(\mathbf{x})$ over the training set of $n$ training examples $\mathbf{x}_i = [x_1^i, x_2^i]^T$, $i=1, ..., n$, with corresponding labels $y_i$. # # \\[ # f_{loss}(w_1, w_2) = \frac{1}{2} \sum_{i=1}^n \left( y_i - h_\mathbf{w}(\mathbf{x}_i) \right)^2 = \frac{1}{2} \sum_{i=1}^n \left( y_i - w_1 x_1^i - w_2 x_2^i \right)^2 = \frac{1}{2} (\mathbf{y} - X\mathbf{w})^T (\mathbf{y} - X\mathbf{w}). # \\] # Similar to the Branin function in Section 5.2.1, we can compute the gradient of this loss function with respect to $w_1$ and $w_2$. # # \\[ # g(w_1, w_2) = \left[ \begin{array}{c} # \frac{\partial f_{loss}(w_1, w_2)}{\partial w_1} \\ # \frac{\partial f_{loss}(w_1, w_2)}{\partial w_2} # \end{array} \right] # = \left[ \begin{array}{c} # - \sum_{i=1}^n \left( y_i - w_1 x_1 - w_2 x_2 \right) x_1\\ # - \sum_{i=1}^n \left( y_i - w_1 x_1 - w_2 x_2 \right) x_2 \end{array} \right] # = -X^T (\mathbf{y} - X\mathbf{w}) # \\] # # In both the equations above, the expressions on the far right are the vectorized versions of the loss function, where $X$ is the data matrix and $\mathbf{y}$ is the label vector. The vectorized version is more compact and easier and more efficient to implement as it avoids explicit loops for summation. # + def squared_loss(w, X, y): return 0.5 * np.sum((y - np.dot(X, w))**2) def squared_loss_gradient(w, X, y): return -np.dot(X.T, (y - np.dot(X, w))) # - # As before, we visualize the function we want to optimize. # + # %matplotlib inline fig = plt.figure(figsize=(9, 5)) # Plot the loss function w1Min, w1Max, w2Min, w2Max = -1, 1, -1, 1 w1, w2 = np.meshgrid(np.arange(w1Min, w1Max, 0.05), np.arange(w2Min, w2Max, 0.05)) z = np.apply_along_axis(squared_loss, 1, np.c_[w1.ravel(), w2.ravel()], X, y) z = z.reshape(w1.shape) ax = fig.add_subplot(1, 2, 1, projection='3d') ax.plot_surface(w1, w2, z, rstride=5, cstride=5, alpha=0.5, linewidth=0.25, edgecolors='k', cmap='viridis') ax.view_init(elev=34, azim=-40.0) ax.contour(w1, w2, z, zdir='z', levels=np.array([50, 100, 150, 200, 300, 400, 600, 800, 1000]), offset=-50, cmap='viridis', alpha=0.5) ax.set_xlabel('$w_1$, weight for $x_1$') ax.set_ylabel('$w_2$, weight for $x_2$') ax.set_zlabel('$L(w)$ ') ax.set_title('Loss function, $f_{loss}(w_1, w_2)$: surface') # pos1 = ax.get_position() # get the original position # pos2 = [pos1.x0 - 0.1, pos1.y0, pos1.width, pos1.height] # ax.set_position(pos2) # set a new position # Plot the contour ax = fig.add_subplot(1, 2, 2) ctr = ax.contour(w1, w2, z, levels=np.array([50, 100, 150, 200, 300, 400, 600, 800, 1000]), cmap='viridis', alpha=0.75) ax.clabel(ctr, inline=1, fontsize=6) ax.set_xlabel('$w_1$, weight for $x_1$') ax.set_ylabel('$w_2$, weight for $x_2$') ax.set_title('Loss function, $f_{loss}(w_1, w_2)$: contours') pos1 = ax.get_position() # get the original position pos2 = [pos1.x0 + 0.1, pos1.y0, pos1.width, pos1.height] ax.set_position(pos2) # set a new position # fig.tight_layout() pngFile = './figures/CH05_F06_Kunapuli.png' plt.savefig(pngFile, dpi=300, bbox_inches='tight') # - # As before, we perform gradient descent, this time initializing at $\mathbf{w} = [0.0, -0.99]$. # + # %matplotlib inline w_init = np.array([0.0, -0.99]) w, path = gradient_descent(squared_loss, squared_loss_gradient, w_init, args=(X, y)) # Compute the Loss function in this domain w1Min, w1Max, w2Min, w2Max = -1, 1, -1, 1 w1, w2 = np.meshgrid(np.arange(w1Min, w1Max, 0.05), np.arange(w2Min, w2Max, 0.05)) z = np.apply_along_axis(squared_loss, 1, np.c_[w1.ravel(), w2.ravel()], X, y) z = z.reshape(w1.shape) # Plot optimization path over all iterations fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(9, 4)) ax[0].contour(w1, w2, z, levels=np.array([25, 50, 100, 150, 200, 300, 400, 600, 800, 1000]), cmap='viridis', alpha=0.75) ax[0].scatter(w_init[0], w_init[1], s=50, marker='s') ax[0].scatter(w[0], w[1], c='r', s=50) ax[0].plot(path[:, 0], path[:, 1], 'k', linewidth=2) ax[0].set_xlabel('$w_1$') ax[0].set_ylabel('$w_2$') ax[0].set_title('Gradient descent over the loss function') # Plot the solution xMin, xMax = X[:, 0].min() - 0.25, X[:, 0].max() + 0.25 yMin, yMax = X[:, 1].min() - 0.25, X[:, 1].max() + 0.25 xMesh, yMesh = np.meshgrid(np.arange(xMin, xMax, 0.05), np.arange(yMin, yMax, 0.05)) zMesh = np.dot(np.c_[xMesh.ravel(), yMesh.ravel()], w) zMesh = (zMesh.reshape(xMesh.shape) + 1) / 2 ax[1].contourf(xMesh, yMesh, zMesh, cmap='RdBu', alpha=0.75) ax[1].contour(xMesh, yMesh, zMesh, levels=[0.5]) plot_2d_data(ax[1], X, y, colormap='RdBu') ax[1].set_xlabel('$x_1$, first feature') ax[1].set_ylabel('$x_2$, second feature') ax[1].set_title('Simple classification problem'); # fig.tight_layout() pngFile = './figures/CH05_F07_Kunapuli.png' plt.savefig(pngFile, dpi=300, bbox_inches='tight') # - print(w) # The final $\mathbf{w}$ is trained by performing gradient descent on the loss function defined over the training examples. From the figure on the right above, we can see that gradient descent does, in fact, produce a nice fit. ypred = (np.dot(X, w) >= 0).astype(int) from sklearn.metrics import accuracy_score accuracy_score(y, ypred)
ensemble-methods-notebooks-master/Ch5.1-gradient-descent-for-minimization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Introduction to Data Science # # Lecture 13: Linear Regression 2 # *COMP 5360 / MATH 4100, University of Utah, http://datasciencecourse.net/* # # In this lecture, we'll discuss: # * overfitting, model generalizability, and the bias-variance tradeoff # * cross validation # * using categorical variables for regression # # Recommended reading: # * <NAME>, <NAME>, <NAME>, and <NAME>, An Introduction to Statistical Learning, Ch. 3 [digitial version available here](http://www-bcf.usc.edu/~gareth/ISL/) # # + [markdown] slideshow={"slide_type": "slide"} # ## Review from Lecture 9 (Linear Regression 1) # # ### Simple Linear Regression (SLR) # # **Data**: We have $n$ samples $(x, y)_i$, $i=1,\ldots n$. # # **Model**: $y \sim \beta_0 + \beta_1 x$ # # **Goal**: Find the best values of $\beta_0$ and $\beta_1$, denoted $\hat{\beta}_0$ and $\hat{\beta}_1$, so that the prediction $y = \hat{\beta}_0 + \hat{\beta}_1 x$ "best fits" the data. # # <img src="438px-Linear_regression.png" width="40%" alt="https://en.wikipedia.org/wiki/Linear_regression"> # # **Theorem.** # The parameters that minimize the "residual sum of squares (RSS)", # $RSS = \sum_i (y_i - \beta_0 - \beta_1 x_i)^2$, # are: # $$ # \hat{\beta}_1 = \frac{\sum_{i=1}^n (x_i - \overline{x})(y_i - \overline{y}) }{\sum_{i=1}^n (x_i - \overline{x})^2} # \qquad \textrm{and} \qquad # \hat{\beta}_0 = \overline{y} - \hat{\beta}_1 \overline{x}. # $$ # where $\overline{x} = \frac{1}{n} \sum_{i=1}^n x_i$ and $\overline{y} = \frac{1}{n} \sum_{i=1}^n y_i$. # # # ### Multilinear regression # # **Data**: We have $n$ samples of the form $\big(x_1, x_2 , \ldots, x_m , y \big)_i$, $i=1,\ldots n$. # # **Model**: $y \sim \beta_0 + \beta_1 x_1 + \cdots + \beta_m x_m $ # # ### Nonlinear relationships # # **Data**: We have $n$ samples $\big(x_1, x_2 , \ldots, x_m , y \big)_i$, $i=1,\ldots n$. # # **Model**: $y \sim \beta_0 + \beta_1 f_1(x_1,x_2,\ldots,x_m) + \cdots + \beta_k f_k(x_1,x_2,\ldots,x_m)$ # # + [markdown] slideshow={"slide_type": "slide"} # ## Regression with python # # There are several different python packages that do regression: # 1. [statsmodels](http://statsmodels.sourceforge.net/) # # + [scikit-learn](http://scikit-learn.org/) # # + [SciPy](http://www.scipy.org/) # # + ... # # Last time, I commented that statsmodels approaches regression from a statistics viewpoint, while scikit-learn approaches from a machine learning viewpoint. I'll say more about this today. # # SciPy has some regression tools, but compared to these other two packages, they are relatively limited. # # + slideshow={"slide_type": "-"} # imports and setup import scipy as sc import pandas as pd import statsmodels.formula.api as sm from sklearn import linear_model import matplotlib.pyplot as plt # %matplotlib inline plt.rcParams['figure.figsize'] = (10, 6) from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm # + [markdown] slideshow={"slide_type": "slide"} # ## Advertisement dataset # Consider the 'Advertising' dataset from # [here](http://www-bcf.usc.edu/~gareth/ISL/data.html). # # # For 200 different ‘markets’ (think different cities), this dataset consists of the number of sales of a particular product as well as the advertising budget for three different media: TV, radio, and newspaper. # # Last time, after trying a variety of linear models, we discovered the following one, which includes a nonlinear relationship between the TV budget and Radio budget: # $$ # \text{Sales} = \beta_0 + \beta_1 * \text{TV_budget} + \beta_2*\text{Radio_budget} + \beta_3 * \text{TV_budget} *\text{Radio_budget}. # $$ # + slideshow={"slide_type": "-"} advert = pd.read_csv('Advertising.csv',index_col=0) #load data ad_NL = sm.ols(formula="Sales ~ TV + Radio + TV*Radio", data=advert).fit() ad_NL.summary() # + [markdown] slideshow={"slide_type": "-"} # This model is really excellent: # - $R^2 = 97\%$ of the variability in the data is accounted for by the model. # - The $p$-value for the F-statistic is very small # - The $p$-values for the individual coefficients are small # # Interpretation: # - In a particular market, if I spend an additional $1k on TV advertising, what do I expect sales to do? # - Should I spend additional money on TV or Radio advertising? # + slideshow={"slide_type": "-"} fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(xs=advert['TV'], ys=advert['Radio'], zs=advert['Sales']) x = sc.linspace(advert['TV'].min(), advert['TV'].max(), 100) y = sc.linspace(advert['Radio'].min(), advert['Radio'].max(), 100) X,Y = sc.meshgrid(x,y) par = dict(ad_NL.params) Z = par["Intercept"] + par["TV"]*X + par["Radio"]*Y + par["TV:Radio"]*X*Y surf = ax.plot_surface(X, Y, Z,cmap=cm.Greys, alpha=0.2) ax.view_init(25,-71) ax.set_xlabel('TV budget') ax.set_ylabel('Radio budget') ax.set_zlabel('Sales') plt.show() # + [markdown] slideshow={"slide_type": "-"} # ### A word of caution on overfitting # # It is tempting to include a lot of terms in the regression, but this is problematic. A useful model will *generalize* beyond the data given to it. # # # **Questions?** # + [markdown] slideshow={"slide_type": "slide"} # ## Overfitting, underfitting, model generalizability, and the bias–variance tradeoff # # In regression, and other prediction problems, we would like to develop a model on a dataset, that would preform well, not only on that dataset, but on similar data that the model hasn't yet seen by the model. If a model satisfies this criterion, we say that it is *generalizable*. # # Consider the following data, that has been fit with a linear polynomial model (black) and a high degree polynomial model (blue). For convenience, let me call these the black and blue models, respectively. # # <img src="overfitted_data.png" title="https://commons.wikimedia.org/w/index.php?curid=47471056" width="40%"> # # Let's call the dataset that we train the model on the *training dataset* and the dataset that we test the model on the *testing dataset*. In the above figure, the training dataset are the black points and the testing dataset is not shown, but we imagine it to be similar to the points shown. # # Which model is better? # # The blue model has 100% accuracy on the training dataset, while the black model has much smaller accuracy. However, the blue model is highly oscillatory and might not generalize well to new data. For example, the model would wildly miss the test point $(3,0)$. We say that the blue model has *overfit* the data. On the other hand, it isn't difficult to see that we could also *underfit* the data. In this case, the model isn't complex enough to have good accuracy on the training dataset. # # This phenomena is often described in terms of the *bias-variance tradeoff*. Here, we decompose the error of the model into three terms: # $$ # \textrm{Error} = # \textrm{Bias} + # \textrm{Variance} + # \textrm{Irreducible Error}. # $$ # - The *bias* of the method is the error caused by the simplifying assumptions built into the method. # # + The *variance* of the method is how much the model will change based on the sampled data. # # + The *irreducible error* is error in the data itself, so no model can capture this error. # # There is a tradeoff between the bias and variance of a model. # High-variance methods (e.g., the blue method) are accurate on the training set, but overfit noise in the data, so don't generalized well to new data. High-bias models (e.g., the black method) are too simple to fit the data, but are better at generalizing to new test data. # # + [markdown] slideshow={"slide_type": "slide"} # ## Generalizability in practice # # Consider the Auto dataset, which contains 9 features (mpg, cylinders, displacement, horsepower, weight, acceleration, year, origin, name) for 397 different used cars. This dataset is available digitally [here](http://www-bcf.usc.edu/~gareth/ISL/). # + slideshow={"slide_type": "-"} auto = pd.read_csv('Auto.csv') #load data # one of the horsepowers is '?', so we just remove it and then map the remaining strings to integers auto = auto[auto.horsepower != '?'] auto['horsepower'] = auto['horsepower'].map(int) auto # + slideshow={"slide_type": "-"} print(auto.describe()) # + [markdown] slideshow={"slide_type": "-"} # Let's consider the relationship between mpg and horsepower. # + slideshow={"slide_type": "-"} plt.scatter(auto['horsepower'],auto['mpg'],color='black',linewidth=1) plt.xlabel('horsepower'); plt.ylabel('mpg') plt.ylim((0,50)) plt.show() # + [markdown] slideshow={"slide_type": "-"} # We consider the linear model # $$ # \text{mpg} = \beta_0 + \beta_1 \text{horsepower} + \beta_2 \text{horsepower}^2 + \cdots + \beta_m \text{horsepower}^m # $$ # It might seem that choosing $m$ to be large would be a good thing. After all, a high degree polynomial is more flexible than a small degree polynomial. # + slideshow={"slide_type": "-"} # fit polynomial models mr1 = sm.ols(formula="mpg ~ horsepower", data=auto).fit() par1 = dict(mr1.params) mr2 = sm.ols(formula="mpg ~ horsepower + I(horsepower ** 2.0)", data=auto).fit() par2 = dict(mr2.params) mr3 = sm.ols(formula="mpg ~ horsepower + I(horsepower ** 2.0) + I(horsepower ** 3.0)", data=auto).fit() par3 = dict(mr3.params) mr4 = sm.ols(formula="mpg ~ horsepower + I(horsepower ** 2.0) + I(horsepower ** 3.0) + I(horsepower ** 4.0)", data=auto).fit() par4 = dict(mr4.params) plt.scatter(auto['horsepower'],auto['mpg'],color='black',label="data") x = sc.linspace(0,250,1000) y1 = par1["Intercept"] + par1['horsepower']*x y2 = par2["Intercept"] + par2['horsepower']*x + par2['I(horsepower ** 2.0)']*x**2 y3 = par3["Intercept"] + par3['horsepower']*x + par3['I(horsepower ** 2.0)']*x**2 + par3['I(horsepower ** 3.0)']*x**3 y4 = par4["Intercept"] + par4['horsepower']*x + par4['I(horsepower ** 2.0)']*x**2 + par4['I(horsepower ** 3.0)']*x**3 + par4['I(horsepower ** 4.0)']*x**4 plt.plot(x,y1,label="degree 1",linewidth=2) plt.plot(x,y2,label="degree 2",linewidth=2) plt.plot(x,y3,label="degree 3",linewidth=2) plt.plot(x,y4,label="degree 4",linewidth=2) plt.legend() plt.xlabel('horsepower'); plt.ylabel('mpg') plt.ylim((0,50)) plt.show() # + slideshow={"slide_type": "-"} print('mr1:',mr1.rsquared) print('mr2:',mr2.rsquared) print('mr3:',mr3.rsquared) print('mr4:',mr4.rsquared) # - # As $m$ increases, the $R^2$ value is becoming larger. (You can prove that this is always true if you add more predictors.) # # Let's check the $p$-values for the coefficients for the degree 4 fit. # + slideshow={"slide_type": "-"} mr4.summary() # + [markdown] slideshow={"slide_type": "-"} # For $m>2$, the $p$-values are very large, so we don't have a strong relationship between the variables. # # We could rely on *Occam's razor* to decide between models. Occam's razor can be stated: among many different models that explain the data, the simplest one should be used. Since we don't get much benefit in terms of $R^2$ values by choosing $m>2$, we should use $m=2$. # # But there are even better criterion for deciding between models. # + [markdown] slideshow={"slide_type": "slide"} # ## Cross-validation # # There is a clever method for developing generalizable models that aren't underfit or overfit, called *cross validation*. # # **Cross-validation** is a general method for assessing how the results of a predictive model (regression, classification,...) will *generalize* to an independent data set. In regression, cross-validation is a method for assessing how well the regression model will predict the dependent value for points that weren't used to *train* the model. # # The idea of the method is simple: # 1. Split the dataset into two groups: the training dataset and the testing dataset. # # + Train a variety of models on the training dataset. # # + Check the accuracy of each model on the testing dataset. # # + By comparing these accuracies, determine which model is best. # # In practice, you have to decide how to split the data into groups (i.e. how large the groups should be). You might also want to repeat the experiment so that the assessment doesn't depend on the way in which you split the data into groups. We'll worry about these questions in a later lecture. # # As the model becomes more complex ($m$ increases), the accuracy always increases for the training dataset. But, at some point, it starts to overfit the data and the accuracy decreases for the test dataset! Cross validation techniques will allow us to find the sweet-spot for the parameter $m$! (Think: Goldilocks and the Three Bears.) # # Let's see this concept for the relationship between mpg and horsepower in the Auto dataset. We'll use the scikit-learn package for the cross validation analysis instead of statsmodels, because it is much easier to do cross validation there. # + slideshow={"slide_type": "-"} lr = linear_model.LinearRegression() # create a linear regression object # with scikit-learn, we have to extract values from the pandas dataframe for m in sc.arange(2,6): auto['h'+str(m)] = auto['horsepower']**m X = auto[['horsepower','h2','h3','h4','h5']].values.reshape(auto['horsepower'].shape[0],5) y = auto['mpg'].values.reshape(auto['mpg'].shape[0],1) plt.scatter(X[:,0], y, color='black',label='data') # make data for plotting xs = sc.linspace(20, 250, num=100) Xs = sc.zeros([100,5]) Xs[:,0] = xs for m in sc.arange(1,5): Xs[:,m] = xs**(m+1) for m in sc.arange(1,6): lr.fit(X=X[:,:m], y=y) plt.plot(xs, lr.predict(X=Xs[:,:m]), linewidth=3, label = "m = " + str(m) ) plt.legend(loc='upper right') plt.xlabel('horsepower'); plt.ylabel('mpg') plt.ylim((0,50)) plt.show() # + [markdown] slideshow={"slide_type": "-"} # ### Cross validation using scikit-learn # # - In scikit-learn, you can use the [*train_test_split*](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) function to split the dataset into a training dataset and a test dataset. # # + The *score* function returns the coefficient of determination, $R^2$, of the prediction. # # In the following code, I've split the data in an unusual way - taking the test set to be 90% - to illustrate the point more clearly. Typically, we might make the training set to be 90% of the dataset. # + slideshow={"slide_type": "-"} from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.9) print(X_train.shape, y_train.shape) print(X_test.shape, y_test.shape) plt.scatter(X_train[:,0], y_train, color='red',label='training data') plt.scatter(X_test[:,0], y_test, color='black',label='test data') for m in sc.arange(1,6): lr.fit(X=X_train[:,:m], y=y_train) print('m=', m, ', train: ', lr.score(X_train[:,:m], y_train), ' test: ', lr.score(X_test[:,:m], y_test)) plt.plot(xs, lr.predict(X=Xs[:,:m]), linewidth=3, label = "m = " + str(m) ) plt.legend() plt.xlabel('horsepower'); plt.ylabel('mpg') plt.ylim((0,50)) plt.show() # + [markdown] slideshow={"slide_type": "-"} # We observe that as the model complexity increases, # - the accuracy on the training data increases, but # # + the generalizability of the model to the test set decreases. # # Our job as data analysts is to find a model that is sufficiently complex to describe the training data, but not so complex that it isn't generalizable to new data. # + [markdown] slideshow={"slide_type": "slide"} # ## Class exercise: analysis of the credit dataset # # Next, we'll use [Statsmodels](http://statsmodels.sourceforge.net/) to study a dataset related to credit cards. # We'll use the 'Credit' dataset, available # [here](http://www-bcf.usc.edu/~gareth/ISL/data.html). # This dataset consists of some credit card information for 400 people. # # Of course, a *credit card* is a card issued to a person ("cardholder"), typically from a bank, that can be used as a method of payment. The card allows the cardholder to borrow money from the bank to pay for goods and services. Credit cards have a *limit*, the maximum amount you can borrow, which is determined by the bank. The limit is determined from information collected from the cardholder (income, age, ...) and especially (as we will see) the cardholders "credit rating". The *credit rating* is an evaluation of the (1) ability of the cardholder to pay back the borrowed money and (2) the likelihood of the cardholder to defaulting on the borrowed money. # # Our focus will be on the use of regression tools to study this dataset. Ideally, we'd like to understand what factors determine *credit ratings* and *credit limits*. We can think about this either from the point of view of (1) a bank who wants to protect their investments by minimizing credit defaults or (2) a person who is trying to increase their credit rating and/or credit limit. # # A difficulty we'll encounter is including categorical data in regression models. # - # Import data from Credit.csv file credit = pd.read_csv('Credit.csv',index_col=0) #load data credit # + # Summarize and describe data print(credit.dtypes, '\n') print(credit['Gender'].value_counts(), '\n') print(credit['Student'].value_counts(), '\n') print(credit['Married'].value_counts(), '\n') print(credit['Ethnicity'].value_counts()) credit.describe() # + [markdown] slideshow={"slide_type": "-"} # The column names of this data are: # 1. Income # # + Limit # # + Rating # # + Cards # # + Age # # + Education # # + Gender (categorial: M,F) # # + Student (categorial: Y,N) # # + Married (categorial: Y,N) # # + Ethnicity (categorial: Caucasian, Asian, African American) # # + Balance # # **Question:** What is wrong with the income data? How can it be fixed? # # The file 'Credit.csv' is a comma separated file. I assume a period was used instead of a comma to indicate thousands in income so it wouldn't get confused with the separating value? Or maybe this is a dataset from Europe? Or maybe the income is just measured in \$1k units? To change the income data, we can use the Pandas series 'map' function. # # - credit["Income"] = credit["Income"].map(lambda x: 1000*x) print(credit[:10]) # We can also look at the covariances in the data. (This is how the variables vary together.) There are two ways to do this: # 1. Quantitatively: Compute the correlation matrix. For each pair of variables, $(x_i,y_i)$, we compute # $$ # \frac{\sum_i (x_i - \bar x) (y_i - \bar y)}{s_x s_y} # $$ # where $\bar x, \bar y$ are sample means and $s_x, s_y$ are sample variances. # + Visually: Make a scatter matrix of the data # # + slideshow={"slide_type": "-"} credit.corr() # + slideshow={"slide_type": "-"} # trick: semi-colon prevents output pd.plotting.scatter_matrix(credit, figsize=(10, 10), diagonal='kde'); # + [markdown] slideshow={"slide_type": "-"} # **Observations:** # 1. Limit and Rating are highly correlated ($99.7\%$) # # + Income strongly correlates with Limit ($79\%$) and Rating ($79\%$) # # + Balance correlates with Limit ($86\%$) and Rating ($86\%$) # # + There are "weird stripes" in some of the data. Why? # # + Categorical information doesn't appear in this plot. Why? How can I visualize the categorical variables? # + # Plot Categorical variables: Gender, Student, Married, Ethnicity fig, axes = plt.subplots(nrows=2, ncols=2,figsize=(10,10)) credit["Gender"].value_counts().plot(kind='bar',ax=axes[0,0]); credit["Student"].value_counts().plot(kind='bar',ax=axes[1,0]); credit["Married"].value_counts().plot(kind='bar',ax=axes[0,1]); credit["Ethnicity"].value_counts().plot(kind='bar',ax=axes[1,1]); # - # ## A first regression model # # **Exercise:** First regress Limit on Rating: # $$ # \text{Limit} = \beta_0 + \beta_1 \text{Rating}. # $$ # Since credit ratings are primarily used by banks to determine credit limits, we expect that Rating is very predictive for Limit, so this regression should be very good. # # Use the 'ols' function from the statsmodels python library. # # your code goes here limit_ols = sm.ols(formula='Limit ~ Rating', data=credit).fit() limit_ols.summary() # + [markdown] slideshow={"slide_type": "slide"} # ## Predicting Limit without Rating # # Since Rating and Limit are almost the same variable, next we'll forget about Rating and just try to predict Limit from the real-valued variables (non-categorical variables): Income, Cards, Age, Education, Balance. # # **Exercise:** Develop a multilinear regression model to predict Rating. Interpret the results. # # For now, just focus on the real-valued variables (Income, Cards, Age, Education, Balance) # and ignore the categorical variables (Gender, Student, Married, Ethnicity). # # # + slideshow={"slide_type": "-"} # your code goes here limit_ols = sm.ols(formula='Limit ~ Balance + Income', data=credit).fit() limit_ols.summary() # + [markdown] slideshow={"slide_type": "-"} # Which independent variables are good/bad predictors? # # **Your observations:** # # + [markdown] slideshow={"slide_type": "slide"} # ## Incorporating categorical variables into regression models # # We have four categorical variables (Gender, Student, Married, Ethnicity). How can we include them in a regression model? # # Let's start with a categorical variable with only 2 categories: Gender (Male, Female). # # Idea: Create a "dummy variable" that turns Gender into a real value: # $$ # \text{Gender_num}_i = \begin{cases} # 1 & \text{if $i$-th person is female} \\ # 0 & \text{if $i$-th person is male} # \end{cases}. # $$ # Then we could try to fit a model of the form # $$ # \text{Income} = \beta_0 + \beta_1 \text{Gender_num}. # $$ # + slideshow={"slide_type": "-"} credit["Gender_num"] = credit["Gender"].map({' Male':0, 'Female':1}) credit["Student_num"] = credit["Student"].map({'Yes':1, 'No':0}) credit["Married_num"] = credit["Married"].map({'Yes':1, 'No':0}) credit_model = sm.ols(formula="Income ~ Gender_num", data=credit).fit() # + [markdown] slideshow={"slide_type": "-"} # Since the $p$-value for the Gender_num coefficient is very large, there is no support for the conclusion that there is a difference in credit card balance between genders. # # **Exercise**: Try to find a meaningful relationship in the data including one of the categorical variables (Gender, Student, Married), for example, Balance vs. Student, Credit vs. Married, etc... # # + slideshow={"slide_type": "-"} # your code here limit_model = sm.ols(formula='Married_num ~ Balance + Income', data=credit).fit() limit_model.summary() # + [markdown] slideshow={"slide_type": "slide"} # ## What about a categorical variable with 3 categories? # # The Ethnicity variable takes three values: Caucasian, Asian, and African American. # # What's wrong with the following? # $$ # \text{Ethnicity_num}_i = \begin{cases} # 0 & \text{if $i$-th person is Caucasian} \\ # 1 & \text{if $i$-th person is Asian} \\ # 2 & \text{if $i$-th person is African American} # \end{cases}. # $$ # # Hint: Recall Nominal, Ordinal, Interval, Ratio variable types from Lecture 4 (Descriptive Statistics). # # We'll need more than one dummy variable: # $$ # \text{Asian}_i = \begin{cases} # 1 & \text{if $i$-th person is Asian} \\ # 0 & \text{otherwise} # \end{cases}. # $$ # $$ # \text{Caucasian}_i = \begin{cases} # 1 & \text{if $i$-th person is Caucasian} \\ # 0 & \text{otherwise} # \end{cases}. # $$ # The value with no dummy variable--African American--is called the *baseline*. # # We can use the *get_dummies* function to automatically get these values # + slideshow={"slide_type": "-"} dummy = pd.get_dummies(credit['Ethnicity']) credit = pd.concat([credit,dummy],axis=1) credit # + [markdown] slideshow={"slide_type": "-"} # **Exercise**: Can you find a relationship in the data involving the variable ethnicity? # + slideshow={"slide_type": "-"} # your code here credit_limit = sm.ols(formula='AfricanAmerican ~ Income', data=credit).fit() credit_limit.summary() # - # ![image](https://imgs.xkcd.com/comics/error_bars_2x.png) #
13-LinearRegression2/13-LinearRegression2.ipynb