code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Python was not designed to be very good at parallel processing. There are two major problems at the core of the language that make it hard to implement parallel algorithms. # # * The Global Interpreter Lock # * Flexible object model # # The first of these issues is the most famous obstacle towards a convincing multi-threading approach, where a single instance of the Python interpreter runs in several threads. The second point is more subtle, but makes it harder to do multi-processing, where several independent instances of the Python interpreter work together to achieve parallelism. We will first explain an elegant way to work around the Global Interpreter Lock, or GIL: use Cython. # # ### Using Cython to lift the GIL # The GIL means that the Python interpreter will only operate on one thread at a time. Even when we think we run in a gazillion threads, Python itself uses only one. Multi-threading in Python is only usefull to wait for I/O and to perform system calls. To do useful CPU intensive work in multi-threaded mode, we need to develop functions that are implemented in C, and tell Python when we call these functions not to worry about the GIL. The easiest way to achieve this, is to use Cython. We develop a number-crunching prime adder, and have it run in parallel threads. # # We'll load the ``multiprocessing``, ``threading`` and ``queue`` modules to do our plumbing, and the ``cython`` extension so we can do the number crunching. # %load_ext cython import multiprocessing import threading import queue # We define a function that computes the sum of all primes below a certain integer `n`, and don't try to be smart about it; the point is that it needs a lot of computation. These functions are designated ``nogil``, so that we can be certain no Python objects are accessed. Finally we create a single Python exposed function that uses the: # # ```python # with nogil: # ... # ``` # # statement. This is a context-manager that lifts the GIL for the duration of its contents. # + language="cython" # # from libc.math cimport ceil, sqrt # # # cdef inline int _is_prime(int n) nogil: # """return a boolean, is the input integer a prime?""" # if n == 2: # return True # cdef int max_i = <int>ceil(sqrt(n)) # cdef int i = 2 # while i <= max_i: # if n % i == 0: # return False # i += 1 # return True # # # cdef unsigned long _sum_primes(int n) nogil: # """return sum of all primes less than n """ # cdef unsigned long i = 0 # cdef int x # for x in range(2, n): # if _is_prime(x): # i += x # return i # # # def sum_primes(int n): # with nogil: # result = _sum_primes(n) # return result # - # In fact, we only loaded the ``multiprocessing`` module to get the number of CPUs on this machine. We also get a decent amount of work to do in the ``input_range``. input_range = range(int(1e6), int(2e6), int(5e4)) ncpus = multiprocessing.cpu_count() print("We have {} cores to work on!".format(ncpus)) # Let's first run our tests in a single thread: # + # %%time for i in input_range: print(sum_primes(i), end=' ', flush=True) print() # - # We can do better than that! We now create a queue containing the work to be done, and a pool of threads eating from this queue. The workers will keep on working as long as the queue has work for them. # + # %%time ### We need to define a worker function that fetches jobs from the queue. def worker(q): while True: try: x = q.get(block=False) print(sum_primes(x), end=' ', flush=True) except queue.Empty: break ### Create the queue, and fill it with input values work_queue = queue.Queue() for i in input_range: work_queue.put(i) ### Start a number of threads threads = [ threading.Thread(target=worker, args=(work_queue,)) for i in range(ncpus)] for t in threads: t.start() ### Wait until all of them are done for t in threads: t.join() # - # On my laptop, a dual-core hyper-threaded `Intel(R) Core(TM) i5-5300U CPU`, this runs just over two times faster than the single threaded code. Setting up a queue and a pool of workers is quite cumbersome. Also, this approach doesn't scale up if the dependencies between our computations get more complex. Next we'll use Noodles to provide the multi-threaded environment to execute our work. We'll need three functions: # * ``schedule`` to decorate our work function # * ``run_parallel`` to run the work in parallel # * ``gather`` to collect our work into a workflow from noodles import (schedule, run_parallel, gather) # + # %%time @schedule def s_sum_primes(n): result = sum_primes(n) print(result, end=' ', flush=True) return result p_prime_sums = gather(*(s_sum_primes(i) for i in input_range)) prime_sums = run_parallel(p_prime_sums, n_threads=ncpus) print() # -
primes/Parallel Cython.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import math import librosa import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline from datetime import datetime from IPython.display import Image df1 = pd.read_csv('echonest.csv') df2 = pd.read_csv('features.csv') df3 = pd.read_csv('tracks.csv') # # 2.1 # I look up information in datasets df1 df2 df3 # Merge df1 df2 df3 by key_id:track_id df = pd.merge(df2, df1, on="track_id", how="right") df= pd.merge(df3, df, on="track_id", how="right") # Get dataset which content~13k rows df # Use basic pandas tools to understand new df df.tail() df.info() df.describe() # Change df to csv and xlsx, to see every column info df.to_csv('df.csv', index=False) df.to_excel('df.xlsx') from IPython.display import Image Image('df_xlsx.png') # Check columns for isnull() df.isnull().sum() null_column_count=0 null_list=df.isnull().sum() for i in range(len(null_list)): if(null_list[i]!=0): null_column_count+=1 print(i) print("null_column_count:",null_column_count) # Check columns for duplicates df = df.loc[:,~df.columns.duplicated()] df # # 2.2 # #Methods of reduce dimensiolity # # Principal Component Analysis, Multiple Correspondence Analysis, Singular Value Decomposition, Factor Analysis for Mixed Data, Two-Steps clustering # HINT: We don't want to miss relevant variables like song's duration, language, etc., after the dimensionality reduction. To keep those variables, you can apply the dimensionality reduction method(s) on features coming from the same file. Later you can stack them with the variables selected from another file. # MAIN:Apply the selected method(s) to your data. Make sure that the chosen method retains > 70% of the total variance. # ### Principal Component Analysis # https://en.wikipedia.org/wiki/Principal_component_analysis # # https://www.youtube.com/watch?v=kApPBm1YsqU&ab_channel=MichaelGalarnyk # # https://www.youtube.com/watch?v=Lsue2gEM9D0&ab_channel=StatQuestwithJoshStarmer # ### Multiple Correspondence Analysis # https://www.youtube.com/watch?v=aZAn0rjJWQc&ab_channel=StatisticsNinja # # https://www.youtube.com/watch?v=gZ_7WWEVlTg&ab_channel=Fran%C3%A7oisHusson # ### Singular Value Decomposition # https://www.youtube.com/watch?v=gXbThCXjZFM&ab_channel=SteveBrunton # # https://www.youtube.com/watch?v=mBcLRGuAFUk&ab_channel=MITOpenCourseWare # # https://www.youtube.com/watch?v=pcUPelQ5bMM&ab_channel=TheEngineeringWorld # ### Factor Analysis for Mixed Data # https://en.wikipedia.org/wiki/Factor_analysis_of_mixed_data # # https://www.youtube.com/watch?v=ttBs_wfw_6U # # ### Two-Steps clustering # Аружан
main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # importing libraries import pandas as pd import numpy as np # For mathematical calculations import seaborn as sns # For data visualization import matplotlib.pyplot as plt import seaborn as sn # For plotting graphs # %matplotlib inline import warnings # To ignore any warnings warnings.filterwarnings("ignore") # loading the data train = pd.read_csv('train.csv') test = pd.read_csv('test.csv') train.columns test.columns train.shape, test.shape # Print data types for each variable train.dtypes #printing first five rows of the dataset train.head() train['subscribed'].value_counts() # Normalize can be set to True to print proportions instead of number train['subscribed'].value_counts(normalize=True) # plotting the bar plot of frequencies train['subscribed'].value_counts().plot.bar() # So, 3715 users out of total 31647 have subscribed which is around 12%. Let's now explore the variables to have a better understanding of the dataset. We will first explore the variables individually using univariate analysis, then we will look at the relation between various independent variables and the target variable. We will also look at the correlation plot to see which variables affects the target variable most. # # Let's first look at the distribution of age variable to see how many people belongs to a particular age group. sn.distplot(train["age"]) # We can infer that most of the clients fall in the age group between 20-60. Now let's look at what are the different types of jobs of the clients. As job is a categorical variable, we will look at its frequency table train['job'].value_counts().plot.bar() # We see that most of the clients belongs to blue-collar job and the students are least in number as students generally do not take a term deposit. Let's also look at how many clients have default history. train['default'].value_counts().plot.bar() # More than 90% of the clients have no default history. Now we will explore these variables against the target variable using bivariate analysis. We will make use of scatter plots for continuous or numeric variables and crosstabs for the categorical variables. Let's start with job and subscribed variable. # + print(pd.crosstab(train['job'],train['subscribed'])) job=pd.crosstab(train['job'],train['subscribed']) job.div(job.sum(1).astype(float), axis=0).plot(kind="bar", stacked=True, figsize=(8,8)) plt.xlabel('Job') plt.ylabel('Percentage') # + print(pd.crosstab(train['default'],train['subscribed'])) default=pd.crosstab(train['default'],train['subscribed']) default.div(default.sum(1).astype(float), axis=0).plot(kind="bar", stacked=True, figsize=(8,8)) plt.xlabel('default') plt.ylabel('Percentage') # - train['subscribed'].replace('no', 0,inplace=True) train['subscribed'].replace('yes', 1,inplace=True) corr = train.corr() mask = np.array(corr) mask[np.tril_indices_from(mask)] = False fig,ax= plt.subplots() fig.set_size_inches(20,10) sn.heatmap(corr, mask=mask,vmax=.9, square=True,annot=True, cmap="YlGnBu") # We can infer that duration of the call is highly correlated with the target variable. This can be verified as well. As the duration of the call is more, there are higher chances that the client is showing interest in the term deposit and hence there are higher chances that the client will subscribe to term deposit. # # Next we will look for any missing values in the dataset. train.isnull().sum() # ## Model Building target = train['subscribed'] train = train.drop('subscribed',1) # applying dummies on the train dataset train = pd.get_dummies(train) from sklearn.model_selection import train_test_split # splitting into train and validation with 20% data in validation set and 80% data in train set. X_train, X_val, y_train, y_val = train_test_split(train, target, test_size = 0.2, random_state=12) from sklearn.linear_model import LogisticRegression # defining the logistic regression model lreg = LogisticRegression() # fitting the model on X_train and y_train lreg.fit(X_train,y_train) # making prediction on the validation set prediction = lreg.predict(X_val) # Now we will evaluate how accurate our predictions are. As the evaluation metric for this problem is accuracy, let's calculate the accuracy on validation set. from sklearn.metrics import accuracy_score # calculating the accuracy score accuracy_score(y_val, prediction) # defining the decision tree model with depth of 4, you can tune it further to improve the accuracy score clf = DecisionTreeClassifier(max_depth=4, random_state=0) # fitting the decision tree model clf.fit(X_train,y_train) # making prediction on the validation set predict = clf.predict(X_val) # calculating the accuracy score accuracy_score(y_val, predict) # We got an accuracy of more than 90% on the validation set. You can try to improve the score by tuning hyperparameters of the model. Let's now make the prediction on test dataset. We will make the similar changes in the test set as we have done in the training set before making the predictions. test = pd.get_dummies(test) test_prediction = clf.predict(test) # Finally, we will save these predictions into a csv file. You can then open this csv file and copy paste the predictions on the provided excel file to generate score. submission = pd.DataFrame() # creating a Business_Sourced column and saving the predictions in it submission['ID'] = test['ID'] submission['subscribed'] = test_prediction # Since the target variable is yes or no, we will convert 1 and 0 in the predictions to yes and no respectively. submission['subscribed'].replace(0,'no',inplace=True) submission['subscribed'].replace(1,'yes',inplace=True) submission.to_csv('submission.csv', header=True, index=False)
ClientPre.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # PHYS 2211 - Introductory Physics Laboratory I # # Measurement andError Propagation # ### Name: <NAME> # ### Partners: <NAME> # #### Annex A import matplotlib import numpy as np import matplotlib.pyplot as plt import sympy # %matplotlib inline # #### Annex A - Data and Calculations # #### 1. Rectangular Block class ListTable(list): """ Overridden list class which takes a 2-dimensional list of the form [[1,2,3],[4,5,6]], and renders an HTML Table in IPython Notebook. """ def _repr_html_(self): html = ["<table>"] for row in self: html.append("<tr>") for col in row: html.append("<td>{0}</td>".format(col)) html.append("</tr>") html.append("</table>") return ''.join(html) # plain text plt.title('alpha > beta') # math text plt.title(r'$\alpha > \beta$') from sympy import symbols, init_printing init_printing(use_latex=True) delta = symbols('delta') delta**2/3 from sympy import symbols, init_printing init_printing(use_latex=True) delta = symbols('delta') table = ListTable() table.append(['measuring device', ' ', 'delta', 'w', 'delta w', 'h', 'delta h']) table.append([' ', '(cm)', '(cm)', '(cm)','(cm)', '(cm)', '(cm)']) lr=4.9 wr=2.5 hr=1.2 lc=4.90 wc=2.54 hc=1.27 deltar=0.1 deltac=0.01 table.append(['ruler',lr, deltar, wr, deltar, hr, deltar]) table.append(['vernier caliper', lc, deltac, wc, deltac, hc, deltac]) table s(t) = \mathcal{A}\/\sin(2 \omega t) # + table = ListTable() table.append(['l', 'deltal', 'w', 'deltaw', 'h', 'deltah']) table.append(['(cm)', '(cm)', '(cm)','(cm)', '(cm)', '(cm)']) lr=4.9 wr=2.5 hr=1.2 lc=4.90 wc=2.54 hc=1.27 deltar=0.1 deltac=0.01 for i in range(0,len(x)): xx = x[i] yy = y[i] ttable.append([lr, deltar, wr, deltar, hr, deltar])able.append([lr, deltar, wr, deltar, hr, deltar]) table # + # code below demonstrates... import numpy as np x = [7,10,15,20,25,30,35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95] y= [0.228,0.298,0.441,0.568,0.697,0.826,0.956, 1.084, 1.211, 1.339,1.468, 1.599, 1.728, 1.851, 1.982, 2.115, 2.244, 2.375, 2.502] plt.scatter(x, y) plt.title('Linearity test') plt. xlabel('Length (cm)') plt. ylabel('Voltage (V)') fit = np.polyfit(x,y,1) fit_fn = np.poly1d(fit) plt.plot(x,y, 'yo', x, fit_fn(x), '--k') m,b = np.polyfit(x, y, 1) print ('m={0}'.format(m)) print ('b={0}'.format(b)) plt.show() # - # #### 2. Wheatstone bridge measurements Rk = 3.5 # kOhms table = ListTable() table.append(['Ru', 'Ru, acc', 'L1', 'L2', 'Ru, wheatstone', 'Disc']) table.append(['(kOhms)', '(kOhms)', '(cm)', '(cm)', '(kOhms)', ' % ']) x = [0.470,0.680,1.000, 1.500] y= [0.512,0.712,1.131,1.590] z= [88.65, 84.50, 76.90, 69.80] for i in range(0,len(x)): xx = x[i] yy = y[i] zz = z[i] Rw = (100.0 - zz)/zz*Rk Disc = (Rw-yy)/yy*100.0 table.append([xx, yy, zz, 100.0-zz,Rw, Disc]) table # + x = [0.470,0.680,1.000, 1.500] y= [0.512,0.712,1.131,1.590] z= [88.65, 84.50, 76.90, 69.80] for i in range(0,len(x)): xx = x[i] yy = y[i] zz = z[i] Rw = (100.0 - zz)/zz*Rk Disc = (Rw-yy)/yy*100.0 plt.scatter(yy, Disc) plt.title('Discrepancy vs Resistance') plt. xlabel('Resistance (kOhms)') plt. ylabel('Discrepancy (%)') plt.show() # -
PHYS2211.Measurement.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- # # Facedetection for Escape Game # + import cv2 from matplotlib import pyplot as plt import numpy as np image1='/Users/stijnvanhulle/GitHub/EscapePlan/python/test_data/schilderij_1.jpg' image2='/Users/stijnvanhulle/GitHub/EscapePlan/python/test_data/schilderij_1_small.jpg' sift= cv2.xfeatures2d.SIFT_create() surf= cv2.xfeatures2d.SURF_create() orb = cv2.ORB_create() # - img1 = cv2.imread(image1,0) # queryImage img2 = cv2.imread(image2,0) # trainImage # + def readPercent(matches): good = [] for m,n in matches: if m.distance < 0.25*n.distance: good.append([m]) amount=len(good) percent=amount/len(matches) return percent *100 def getGood(matches): good = [] for m,n in matches: if m.distance < 0.25*n.distance: good.append([m]) return good # - # ## Trainen aan de hand van orb # + kp1, des1 = orb.detectAndCompute(img1,None) kp2, des2 = orb.detectAndCompute(img2,None) bf = cv2.BFMatcher() matches = bf.knnMatch(des1,des2, k=2) good=getGood(matches) print('ORB:', readPercent(matches)) # - img_plt = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good,None,flags=2) plt.imshow(img_plt) plt.show() # ## Trainen aan de hand van SIFT # + kp1, des1 = sift.detectAndCompute(img1,None) kp2, des2 = sift.detectAndCompute(img2,None) bf = cv2.BFMatcher() matches = bf.knnMatch(des1,des2, k=2) good=getGood(matches) print('SIFT:', readPercent(matches)) # - img_plt = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good,None,flags=2) plt.imshow(img_plt) plt.show() # #### Hierboven kan je zien dat het percentage waarmee de 2 afbeeldingen overeenkomen 100% bedraagt en dit zal daarom over dezelfde afbeelding gaan. We hebben sift gebruikt omdat we daarom de beste gelijkenissen konden vinden,
python/faceDetection.ipynb
# --- # jupyter: # jupytext: # formats: ipynb,md:myst # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="O-SkdlPxvETZ" # # Just In Time Compilation with JAX # # [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/google/jax/blob/main/docs/jax-101/02-jitting.ipynb) # # *Authors: <NAME> & <NAME>* # # In this section, we will further explore how JAX works, and how we can make it performant. # We will discuss the `jax.jit()` transform, which will perform *Just In Time* (JIT) compilation # of a JAX Python function so it can be executed efficiently in XLA. # # ## How JAX transforms work # # In the previous section, we discussed that JAX allows us to transform Python functions. This is done by first converting the Python function into a simple intermediate language called jaxpr. The transformations then work on the jaxpr representation. # # We can show a representation of the jaxpr of a function by using `jax.make_jaxpr`: # + id="P9Xj77Wx3Z2P" outputId="5a0597eb-86c9-4762-ce10-2811debbc732" import jax import jax.numpy as jnp global_list = [] def log2(x): global_list.append(x) ln_x = jnp.log(x) ln_2 = jnp.log(2.0) return ln_x / ln_2 print(jax.make_jaxpr(log2)(3.0)) # + [markdown] id="jiDsT7y0RwIp" # The [Understanding Jaxprs](https://jax.readthedocs.io/en/latest/jaxpr.html) section of the documentation provides more information on the meaning of the above output. # # Importantly, note how the jaxpr does not capture the side-effect of the function: there is nothing in it corresponding to `global_list.append(x)`. This is a feature, not a bug: JAX is designed to understand side-effect-free (a.k.a. functionally pure) code. If *pure function* and *side-effect* are unfamiliar terms, this is explained in a little more detail in [🔪 JAX - The Sharp Bits 🔪: Pure Functions](https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#pure-functions). # # Of course, impure functions can still be written and even run, but JAX gives no guarantees about their behaviour once converted to jaxpr. However, as a rule of thumb, you can expect (but shouldn't rely on) the side-effects of a JAX-transformed function to run once (during the first call), and never again. This is because of the way that JAX generates jaxpr, using a process called 'tracing'. # # When tracing, JAX wraps each argument by a *tracer* object. These tracers then record all JAX operations performed on them during the function call (which happens in regular Python). Then, JAX uses the tracer records to reconstruct the entire function. The output of that reconstruction is the jaxpr. Since the tracers do not record the Python side-effects, they do not appear in the jaxpr. However, the side-effects still happen during the trace itself. # # Note: the Python `print()` function is not pure: the text output is a side-effect of the function. Therefore, any `print()` calls will only happen during tracing, and will not appear in the jaxpr: # + id="JxV2p7e2RawC" outputId="9dfe8a56-e553-4640-a04e-5405aea7832d" def log2_with_print(x): print("printed x:", x) ln_x = jnp.log(x) ln_2 = jnp.log(2.0) return ln_x / ln_2 print(jax.make_jaxpr(log2_with_print)(3.)) # + [markdown] id="f6W_YYwRRwGp" # See how the printed `x` is a `Traced` object? That's the JAX internals at work. # # The fact that the Python code runs at least once is strictly an implementation detail, and so shouldn't be relied upon. However, it's useful to understand as you can use it when debugging to print out intermediate values of a computation. # + [markdown] id="PgVqi6NlRdWZ" # A key thing to understand is that jaxpr captures the function as executed on the parameters given to it. For example, if we have a conditional, jaxpr will only know about the branch we take: # + id="hn0CuphEZKZm" outputId="99dae727-d2be-4577-831c-e1e14af5890a" def log2_if_rank_2(x): if x.ndim == 2: ln_x = jnp.log(x) ln_2 = jnp.log(2.0) return ln_x / ln_2 else: return x print(jax.make_jaxpr(log2_if_rank_2)(jax.numpy.array([1, 2, 3]))) # + [markdown] id="Qp3WhqaqvHyD" # ## JIT compiling a function # # As explained before, JAX enables operations to execute on CPU/GPU/TPU using the same code. # Let's look at an example of computing a *Scaled Exponential Linear Unit* # ([SELU](https://proceedings.neurips.cc/paper/6698-self-normalizing-neural-networks.pdf)), an # operation commonly used in deep learning: # + id="JAXFYtlRvD6p" outputId="e94d7dc2-a9a1-4ac2-fd3f-152e3f<PASSWORD>" import jax import jax.numpy as jnp def selu(x, alpha=1.67, lambda_=1.05): return lambda_ * jnp.where(x > 0, x, alpha * jnp.exp(x) - alpha) x = jnp.arange(1000000) # %timeit selu(x).block_until_ready() # + [markdown] id="ecN5lEXe6ncy" # The code above is sending one operation at a time to the accelerator. This limits the ability of the XLA compiler to optimize our functions. # # Naturally, what we want to do is give the XLA compiler as much code as possible, so it can fully optimize it. For this purpose, JAX provides the `jax.jit` transformation, which will JIT compile a JAX-compatible function. The example below shows how to use JIT to speed up the previous function. # + id="nJVEwPcH6bQX" outputId="289eb2f7-a5ce-4cec-f652-5c4e5b0b86cf" selu_jit = jax.jit(selu) # Warm up selu_jit(x).block_until_ready() # %timeit selu_jit(x).block_until_ready() # + [markdown] id="hMNKi1mYXQg5" # Here's what just happened: # # 1) We defined `selu_jit` as the compiled version of `selu`. # # 2) We ran `selu_jit` once on `x`. This is where JAX does its tracing -- it needs to have some inputs to wrap in tracers, after all. The jaxpr is then compiled using XLA into very efficient code optimized for your GPU or TPU. Subsequent calls to `selu_jit` will now use that code, skipping our old Python implementation entirely. # # (If we didn't include the warm-up call separately, everything would still work, but then the compilation time would be included in the benchmark. It would still be faster, because we run many loops in the benchmark, but it wouldn't be a fair comparison.) # # 3) We timed the execution speed of the compiled version. (Note the use of `block_until_ready()`, which is required due to JAX's [Asynchronous execution](https://jax.readthedocs.io/en/latest/async_dispatch.html) model). # + [markdown] id="DRJ6R6-d9Q_U" # ## Why can't we just JIT everything? # # After going through the example above, you might be wondering whether we should simply apply `jax.jit` to every function. To understand why this is not the case, and when we should/shouldn't apply `jit`, let's first check some cases where JIT doesn't work. # + id="GO1Mwd_3_W6g" outputId="a6fcf6d1-7bd6-4bb7-99c3-2a5a827183e2" tags=["raises-exception"] # Condition on value of x. def f(x): if x > 0: return x else: return 2 * x f_jit = jax.jit(f) f_jit(10) # Should raise an error. # + id="LHlipkIMFUhi" outputId="54935882-a180-45c0-ad03-9dfb5e3baa97" tags=["raises-exception"] # While loop conditioned on x and n. def g(x, n): i = 0 while i < n: i += 1 return x + i g_jit = jax.jit(g) g_jit(10, 20) # Should raise an error. # + [markdown] id="isz2U_XX_wH2" # The problem is that we tried to condition on the *value* of an input to the function being jitted. The reason we can't do this is related to the fact mentioned above that jaxpr depends on the actual values used to trace it. # # The more specific information about the values we use in the trace, the more we can use standard Python control flow to express ourselves. However, being too specific means we can't reuse the same traced function for other values. JAX solves this by tracing at different levels of abstraction for different purposes. # # For `jax.jit`, the default level is `ShapedArray` -- that is, each tracer has a concrete shape (which we're allowed to condition on), but no concrete value. This allows the compiled function to work on all possible inputs with the same shape -- the standard use case in machine learning. However, because the tracers have no concrete value, if we attempt to condition on one, we get the error above. # # In `jax.grad`, the constraints are more relaxed, so you can do more. If you compose several transformations, however, you must satisfy the constraints of the most strict one. So, if you `jit(grad(f))`, `f` mustn't condition on value. For more detail on the interaction between Python control flow and JAX, see [🔪 JAX - The Sharp Bits 🔪: Control Flow](https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#control-flow). # # One way to deal with this problem is to rewrite the code to avoid conditionals on value. Another is to use special [control flow operators](https://jax.readthedocs.io/en/latest/jax.lax.html#control-flow-operators) like `jax.lax.cond`. However, sometimes that is impossible. In that case, you can consider jitting only part of the function. For example, if the most computationally expensive part of the function is inside the loop, we can JIT just that inner part (though make sure to check the next section on caching to avoid shooting yourself in the foot): # + id="OeR8hF-NHAML" outputId="d47fd6b2-8bbd-4939-a794-0b80183d3179" # While loop conditioned on x and n with a jitted body. @jax.jit def loop_body(prev_i): return prev_i + 1 def g_inner_jitted(x, n): i = 0 while i < n: i = loop_body(i) return x + i g_inner_jitted(10, 20) # + [markdown] id="5XUT2acoHBz-" # If we really need to JIT a function that has a condition on the value of an input, we can tell JAX to help itself to a less abstract tracer for a particular input by specifying `static_argnums`. The cost of this is that the resulting jaxpr is less flexible, so JAX will have to re-compile the function for every new value of the specified input. It is only a good strategy if the function is guaranteed to get limited different values. # + id="2yQmQTDNAenY" outputId="c48f07b8-c3f9-4d2a-9dfd-663838a52511" f_jit_correct = jax.jit(f, static_argnums=0) print(f_jit_correct(10)) # + id="R4SXUEu-M-u1" outputId="9e712e14-4e81-4744-dcf2-a10f470d9121" g_jit_correct = jax.jit(g, static_argnums=1) print(g_jit_correct(10, 20)) # + [markdown] id="LczjIBt2X2Ms" # ## When to use JIT # # In many of the the examples above, jitting is not worth it: # + id="uMOqsNnqYApD" outputId="2d6c5122-43ad-4257-e56b-e77c889131c2" print("g jitted:") # %timeit g_jit_correct(10, 20).block_until_ready() print("g:") # %timeit g(10, 20) # + [markdown] id="cZmGYq80YP0j" # This is because `jax.jit` introduces some overhead itself. Therefore, it usually only saves time if the compiled function is complex and you will run it numerous times. Fortunately, this is common in machine learning, where we tend to compile a large, complicated model, then run it for millions of iterations. # # Generally, you want to jit the largest possible chunk of your computation; ideally, the entire update step. This gives the compiler maximum freedom to optimise. # + [markdown] id="hJMjUlRcIzVS" # ## Caching # # It's important to understand the caching behaviour of `jax.jit`. # # Suppose I define `f = jax.jit(g)`. When I first invoke `f`, it will get compiled, and the resulting XLA code will get cached. Subsequent calls of `f` will reuse the cached code. This is how `jax.jit` makes up for the up-front cost of compilation. # # If I specify `static_argnums`, then the cached code will be used only for the same values of arguments labelled as static. If any of them change, recompilation occurs. If there are many values, then your program might spend more time compiling than it would have executing ops one-by-one. # # Avoid calling `jax.jit` inside loops. For most cases, JAX will be able to use the compiled, cached function in subsequent calls to `jax.jit`. However, because the cache relies on the hash of the function, it becomes problematic when equivalent functions are redefined. This will cause unnecessary compilation each time in the loop: # + id="6MDSXCfmSZVZ" outputId="a035d0b7-6a4d-4a9e-c6b4-7521970829fc" from functools import partial def unjitted_loop_body(prev_i): return prev_i + 1 def g_inner_jitted_partial(x, n): i = 0 while i < n: # Don't do this! each time the partial returns # a function with different hash i = jax.jit(partial(unjitted_loop_body))(i) return x + i def g_inner_jitted_lambda(x, n): i = 0 while i < n: # Don't do this!, lambda will also return # a function with a different hash i = jax.jit(lambda x: unjitted_loop_body(x))(i) return x + i def g_inner_jitted_normal(x, n): i = 0 while i < n: # this is OK, since JAX can find the # cached, compiled function i = jax.jit(unjitted_loop_body)(i) return x + i print("jit called in a loop with partials:") # %timeit g_inner_jitted_partial(10, 20).block_until_ready() print("jit called in a loop with lambdas:") # %timeit g_inner_jitted_lambda(10, 20).block_until_ready() print("jit called in a loop with caching:") # %timeit g_inner_jitted_normal(10, 20).block_until_ready()
docs/jax-101/02-jitting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="8v2jx2HkGVOx" colab_type="text" # **How to save this notebook to your personal Drive** # # To copy this notebook to your Google Drive, go to File and select "Save a copy in Drive", where it will automatically open the copy in a new tab for you to work in. This notebook will be saved into a folder on your personal Drive called "Colab Notebooks". # # Still stumped? Check out <a href="https://www.youtube.com/watch?v=dQw4w9WgXcQ"> this video</a> for help # + [markdown] id="wijE69PoSfp7" colab_type="text" # # What is CLEO? # + id="iRVoaWohSfp-" colab_type="code" colab={} from IPython.display import Image Image(url='https://raw.githubusercontent.com/particle-physics-playground/playground/master/activities/images/cleo_det_proc.jpg',width=400) # + [markdown] id="0tzzNzzpSfqK" colab_type="text" # $$e^+e^- \rightarrow \chi \chi$$ # + [markdown] id="-r7LSslcSfqL" colab_type="text" # The <a href="http://www.lns.cornell.edu/public/lab-info/cleo.html">CLEO-II detector</a> was designed to measure the properties of particles produced in the collisions of electrons and positrons supplied by <a href="https://www.classe.cornell.edu/Research/CESR/WebHome.html">CESR</a>. # # The CLEO-II detector was made of many sub-detectors. When the particles are created in each electron-positron collision, they fly through these detectors and we are able to measure the direction in which all these particles went. # + id="B2WoyEN9SfqM" colab_type="code" colab={} from IPython.display import Image Image(url='https://raw.githubusercontent.com/particle-physics-playground/playground/master/activities/images/kpipi_color_enhanced-resized.png',width=400) # + [markdown] id="T_yrKAdASfqT" colab_type="text" # Displays like the one above can be difficult to understand, but they are not what we physicists actually analyze. Instead, we use the displays to get information about the electric charge, energy, and momentum of the particles, and that is the data we use. # # <b>Let's go take a look at some of <i>that</i> data!</b> # + [markdown] id="khX3Q9seSfqV" colab_type="text" # The first step is to import some helper functions. One is to get the collisions data out of the files, and the other is to display the particles that are produced in these collisions. # + id="xLvSmMUTSfqX" colab_type="code" colab={} ###### This cell need only be run once per session ############## ###### Make sure your runtime type is Python 3 ######### # Import h5hep from Github. This is to allow us to read these # particular files. # !pip install git+https://github.com/mattbellis/h5hep.git # Import custom tools package from Github. These are some simple accessor functions # to make it easier to work with these data files. # !pip install git+https://github.com/mattbellis/particle_physics_simplified.git import pps_tools as pps import h5hep # + [markdown] id="LHxyG_LDSfqb" colab_type="text" # Next, we will open the file and pull out the collision data. This will return a Python list of all the collisions in that file. # # You can use these data to visualize individual collisions or to perform a data analysis on <i>all</i> the collisions. # + id="IHJnxpUGSfqc" colab_type="code" colab={} pps.download_from_drive('small_CLEO_test_file.hdf5') infile = 'data/small_CLEO_test_file.hdf5' collisions = pps.get_collisions(infile,experiment="CLEO",verbose=False) number_of_collisions = len(collisions) print("# of electron-positron collisions: %d" % (number_of_collisions)) import matplotlib.pylab as plt # + [markdown] id="b_gVWb3nSfqh" colab_type="text" # Let's take a look at some of these collisions! # + id="ngd6GlmWSfqh" colab_type="code" colab={} pps.display_collision3D(collisions[6],experiment='CLEO') # + id="eHPSo4EMSfql" colab_type="code" colab={} pps.display_collision3D(collisions[3],experiment='CLEO') # + id="aIsvolSVSfqp" colab_type="code" colab={} pps.display_collision3D(collisions[6],experiment='CLEO') # + [markdown] id="sZOCdvisSfqt" colab_type="text" # What are we looking at here? # # * The green lines represent the electrons colliding. # * The other lines represent particles created in the collisions. The length of these lines tell us how much momentum (or energy) they have. The colors are different particles/object. # * Red - <a href="http://en.wikipedia.org/wiki/Pion">pions</a> # * Orange - <a href="http://en.wikipedia.org/wiki/Kaon">kaons</a> # * Blue - <a href="http://en.wikipedia.org/wiki/Muon">muons</a> # * Green - <a href="http://en.wikipedia.org/wiki/Electron">electrons</a> # * Gray - <a href="http://en.wikipedia.org/wiki/Photon">photons</a> # + [markdown] id="76Zw7hCjSfqy" colab_type="text" # You can also make plots of the properties of the particles. # + id="H_t4Ao0gSfqz" colab_type="code" colab={} energies = [] for collision in collisions: pions = collision['pions'] for pion in pions: energy = pion['e'] energies.append(energy) plt.figure(figsize=(4,4)) h = plt.hist(energies) plt.xlabel('Energy'),plt.ylabel('Frequency'),plt.title('Histogram of Pion Energies'); # + [markdown] id="JAzw5ws2Sfq2" colab_type="text" # So now you know how to play around with data from CLEO-II. What do you want to do next? :)
experiment_CLEO.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Revisiting Lambert's problem in Python # + import numpy as np import matplotlib.pyplot as plt from cycler import cycler from poliastro.core import iod from poliastro.iod import izzo plt.ion() plt.rc('text', usetex=True) # - # ## Part 1: Reproducing the original figure x = np.linspace(-1, 2, num=1000) M_list = 0, 1, 2, 3 ll_list = 1, 0.9, 0.7, 0, -0.7, -0.9, -1 # + fig, ax = plt.subplots(figsize=(10, 8)) ax.set_prop_cycle(cycler('linestyle', ['-', '--']) * (cycler('color', ['black']) * len(ll_list))) for M in M_list: for ll in ll_list: T_x0 = np.zeros_like(x) for ii in range(len(x)): y = iod._compute_y(x[ii], ll) T_x0[ii] = iod._tof_equation(x[ii], y, 0.0, ll, M) if M == 0 and ll == 1: T_x0[x > 0] = np.nan elif M > 0: # Mask meaningless solutions T_x0[x > 1] = np.nan l, = ax.plot(x, T_x0) ax.set_ylim(0, 10) ax.set_xticks((-1, 0, 1, 2)) ax.set_yticks((0, np.pi, 2 * np.pi, 3 * np.pi)) ax.set_yticklabels(('$0$', '$\pi$', '$2 \pi$', '$3 \pi$')) ax.vlines(1, 0, 10) ax.text(0.65, 4.0, "elliptic") ax.text(1.16, 4.0, "hyperbolic") ax.text(0.05, 1.5, "$M = 0$", bbox=dict(facecolor='white')) ax.text(0.05, 5, "$M = 1$", bbox=dict(facecolor='white')) ax.text(0.05, 8, "$M = 2$", bbox=dict(facecolor='white')) ax.annotate("$\lambda = 1$", xy=(-0.3, 1), xytext=(-0.75, 0.25), arrowprops=dict(arrowstyle="simple", facecolor="black")) ax.annotate("$\lambda = -1$", xy=(0.3, 2.5), xytext=(0.65, 2.75), arrowprops=dict(arrowstyle="simple", facecolor="black")) ax.grid() ax.set_xlabel("$x$") ax.set_ylabel("$T$"); # - # ## Part 2: Locating $T_{min}$ # + for M in M_list: for ll in ll_list: x_T_min, T_min = iod._compute_T_min(ll, M, 10, 1e-8) ax.plot(x_T_min, T_min, 'kx', mew=2) fig # - # ## Part 3: Try out solution # + T_ref = 1 ll_ref = 0 (x_ref, _), = iod._find_xy(ll_ref, T_ref, 0, 10, 1e-8) x_ref # + ax.plot(x_ref, T_ref, 'o', mew=2, mec='red', mfc='none') fig # - # ## Part 4: Run some examples # + from astropy import units as u from poliastro.bodies import Earth # - # ### Single revolution # + k = Earth.k r0 = [15945.34, 0.0, 0.0] * u.km r = [12214.83399, 10249.46731, 0.0] * u.km tof = 76.0 * u.min expected_va = [2.058925, 2.915956, 0.0] * u.km / u.s expected_vb = [-3.451569, 0.910301, 0.0] * u.km / u.s (v0, v), = izzo.lambert(k, r0, r, tof) v # + k = Earth.k r0 = [5000.0, 10000.0, 2100.0] * u.km r = [-14600.0, 2500.0, 7000.0] * u.km tof = 1.0 * u.h expected_va = [-5.9925, 1.9254, 3.2456] * u.km / u.s expected_vb = [-3.3125, -4.1966, -0.38529] * u.km / u.s (v0, v), = izzo.lambert(k, r0, r, tof) v # - # ### Multiple revolutions # + k = Earth.k r0 = [22592.145603, -1599.915239, -19783.950506] * u.km r = [1922.067697, 4054.157051, -8925.727465] * u.km tof = 10 * u.h expected_va = [2.000652697, 0.387688615, -2.666947760] * u.km / u.s expected_vb = [-3.79246619, -1.77707641, 6.856814395] * u.km / u.s expected_va_l = [0.50335770, 0.61869408, -1.57176904] * u.km / u.s expected_vb_l = [-4.18334626, -1.13262727, 6.13307091] * u.km / u.s expected_va_r = [-2.45759553, 1.16945801, 0.43161258] * u.km / u.s expected_vb_r = [-5.53841370, 0.01822220, 5.49641054] * u.km / u.s # - (v0, v), = izzo.lambert(k, r0, r, tof, M=0) v (_, v_l), (_, v_r) = izzo.lambert(k, r0, r, tof, M=1) v_l v_r
docs/source/examples/Revisiting Lambert's problem in Python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="L30JbHSkiVZx" # ##### Copyright 2021 The TensorFlow Authors. # + cellView="form" id="ZtimvKLdili0" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="QXdiroR-Ue-Z" # # Human Pose Classification with MoveNet and TensorFlow Lite # # This notebook teaches you how to train a pose classification model using MoveNet and TensorFlow Lite. The result is a new TensorFlow Lite model that accepts the output from the MoveNet model as its input, and outputs a pose classification, such as the name of a yoga pose. # # The procedure in this notebook consists of 3 parts: # * Part 1: Preprocess the pose classification training data into a CSV file that specifies the landmarks (body keypoints) detected by the MoveNet model, along with the ground truth pose labels. # * Part 2: Build and train a pose classification model that takes the landmark coordinates from the CSV file as input, and outputs the predicted labels. # * Part 3: Convert the pose classification model to TFLite. # # By default, this notebook uses an image dataset with labeled yoga poses, but we've also included a section in Part 1 where you can upload your own image dataset of poses. # # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/lite/tutorials/pose_classification"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/tutorials/pose_classification.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/tutorials/pose_classification.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/tensorflow/tensorflow/lite/g3doc/tutorials/pose_classification.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # <td> # <a href="https://tfhub.dev/s?q=movenet"><img src="https://www.tensorflow.org/images/hub_logo_32px.png" />See TF Hub model</a> # </td> # </table> # + [markdown] id="IfQ3xP6-EY5r" # ## Preparation # + [markdown] id="Jpy4A1Vpi9jH" # In this section, you'll import the necessary libraries and define several functions to preprocess the training images into a CSV file that contains the landmark coordinates and ground truth labels. # # Nothing observable happens here, but you can expand the hidden code cells to see the implementation for some of the functions we'll be calling later on. # # **If you only want to create the CSV file without knowing all the details, just run this section and proceed to Part 1.** # + id="PWlbrkMCx-W-" # !pip install -q opencv-python # + id="KTkttSWnUi1Q" import csv import cv2 import itertools import numpy as np import pandas as pd import os import sys import tempfile import tqdm from matplotlib import pyplot as plt from matplotlib.collections import LineCollection import tensorflow as tf import tensorflow_hub as hub from tensorflow import keras from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, classification_report, confusion_matrix # + [markdown] id="KwRwEssyTciI" # ### Code to run pose estimation using MoveNet # + cellView="form" id="48kW1c2F5l1R" #@title Functions to run pose estimation with MoveNet #@markdown You'll download the MoveNet Thunder model from [TensorFlow Hub](https://www.google.com/url?sa=D&q=https%3A%2F%2Ftfhub.dev%2Fs%3Fq%3Dmovenet), and reuse some inference and visualization logic from the [MoveNet Raspberry Pi (Python)](https://github.com/tensorflow/examples/tree/master/lite/examples/pose_estimation/raspberry_pi) sample app to detect landmarks (ear, nose, wrist etc.) from the input images. #@markdown *Note: You should use the most accurate pose estimation model (i.e. MoveNet Thunder) to detect the keypoints and use them to train the pose classification model to achieve the best accuracy. When running inference, you can use a pose estimation model of your choice (e.g. either MoveNet Lightning or Thunder).* # Download model from TF Hub and check out inference code from GitHub # !wget -q -O movenet_thunder.tflite https://tfhub.dev/google/lite-model/movenet/singlepose/thunder/tflite/float16/4?lite-format=tflite # !git clone https://github.com/tensorflow/examples.git pose_sample_rpi_path = os.path.join(os.getcwd(), 'examples/lite/examples/pose_estimation/raspberry_pi') sys.path.append(pose_sample_rpi_path) # Load MoveNet Thunder model import utils from movenet import Movenet movenet = Movenet('movenet_thunder') # Define function to run pose estimation using MoveNet Thunder. # You'll apply MoveNet's cropping algorithm and run inference multiple times on # the input image to improve pose estimation accuracy. def detect(input_tensor, inference_count=3): """Runs detection on an input image. Args: input_tensor: A [1, height, width, 3] Tensor of type tf.float32. Note that height and width can be anything since the image will be immediately resized according to the needs of the model within this function. inference_count: Number of times the model should run repeatly on the same input image to improve detection accuracy. Returns: A dict containing 1 Tensor of shape [1, 1, 17, 3] representing the keypoint coordinates and scores. """ image_height, image_width, channel = input_tensor.shape # Detect pose using the full input image movenet.detect(input_tensor.numpy(), reset_crop_region=True) # Repeatedly using previous detection result to identify the region of # interest and only croping that region to improve detection accuracy for _ in range(inference_count - 1): keypoint_with_scores = movenet.detect(input_tensor.numpy(), reset_crop_region=False) return keypoint_with_scores # + cellView="form" id="fKo0NzwQJ5Rm" #@title Functions to visualize the pose estimation results. def draw_prediction_on_image( image, keypoints_with_scores, crop_region=None, close_figure=True, keep_input_size=False): """Draws the keypoint predictions on image. Args: image: An numpy array with shape [height, width, channel] representing the pixel values of the input image. keypoints_with_scores: An numpy array with shape [1, 1, 17, 3] representing the keypoint coordinates and scores returned from the MoveNet model. crop_region: Set the region to crop the output image. close_figure: Whether to close the plt figure after the function returns. keep_input_size: Whether to keep the size of the input image. Returns: An numpy array with shape [out_height, out_width, channel] representing the image overlaid with keypoint predictions. """ height, width, channel = image.shape aspect_ratio = float(width) / height fig, ax = plt.subplots(figsize=(12 * aspect_ratio, 12)) # To remove the huge white borders fig.tight_layout(pad=0) ax.margins(0) ax.set_yticklabels([]) ax.set_xticklabels([]) plt.axis('off') im = ax.imshow(image) line_segments = LineCollection([], linewidths=(2), linestyle='solid') ax.add_collection(line_segments) # Turn off tick labels scat = ax.scatter([], [], s=60, color='#FF1493', zorder=2) # Calculate visualization items from pose estimation result (keypoint_locs, keypoint_edges, edge_colors) = utils.keypoints_and_edges_for_display( keypoints_with_scores, height, width) edge_colors = [(r/255.0, g/255.0, b/255.0) for (r ,g , b) in edge_colors] line_segments.set_segments(keypoint_edges) line_segments.set_color(edge_colors) if keypoint_edges.shape[0]: line_segments.set_segments(keypoint_edges) line_segments.set_color(edge_colors) if keypoint_locs.shape[0]: scat.set_offsets(keypoint_locs) if crop_region is not None: xmin = max(crop_region['x_min'] * width, 0.0) ymin = max(crop_region['y_min'] * height, 0.0) rec_width = min(crop_region['x_max'], 0.99) * width - xmin rec_height = min(crop_region['y_max'], 0.99) * height - ymin rect = patches.Rectangle( (xmin,ymin),rec_width,rec_height, linewidth=1,edgecolor='b',facecolor='none') ax.add_patch(rect) fig.canvas.draw() image_from_plot = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8) image_from_plot = image_from_plot.reshape( fig.canvas.get_width_height()[::-1] + (3,)) if close_figure: plt.close(fig) if keep_input_size: image_from_plot = cv2.resize(image_from_plot, dsize=(width, height), interpolation=cv2.INTER_CUBIC) return image_from_plot # + cellView="form" id="QUkOW_26S6K-" #@title Code to load the images, detect pose landmarks and save them into a CSV file class MoveNetPreprocessor(object): """Helper class to preprocess pose sample images for classification.""" def __init__(self, images_in_folder, images_out_folder, csvs_out_path): """Creates a preprocessor to detection pose from images and save as CSV. Args: images_in_folder: Path to the folder with the input images. It should follow this structure: yoga_poses |__ downdog |______ 00000128.jpg |______ 00000181.bmp |______ ... |__ goddess |______ 00000243.jpg |______ 00000306.jpg |______ ... ... images_out_folder: Path to write the images overlay with detected landmarks. These images are useful when you need to debug accuracy issues. csvs_out_path: Path to write the CSV containing the detected landmark coordinates and label of each image that can be used to train a pose classification model. """ self._images_in_folder = images_in_folder self._images_out_folder = images_out_folder self._csvs_out_path = csvs_out_path self._messages = [] # Create a temp dir to store the pose CSVs per class self._csvs_out_folder_per_class = tempfile.mkdtemp() # Get list of pose classes and print image statistics self._pose_class_names = sorted( [n for n in os.listdir(self._images_in_folder) if not n.startswith('.')] ) def process(self, per_pose_class_limit=None, detection_threshold=0.1): """Preprocesses images in the given folder. Args: per_pose_class_limit: Number of images to load. As preprocessing usually takes time, this parameter can be specified to make the reduce the dataset for testing. detection_threshold: Only keep images with all landmark confidence score above this threshold. """ # Loop through the classes and preprocess its images for pose_class_name in self._pose_class_names: print('Preprocessing', pose_class_name, file=sys.stderr) # Paths for the pose class. images_in_folder = os.path.join(self._images_in_folder, pose_class_name) images_out_folder = os.path.join(self._images_out_folder, pose_class_name) csv_out_path = os.path.join(self._csvs_out_folder_per_class, pose_class_name + '.csv') if not os.path.exists(images_out_folder): os.makedirs(images_out_folder) # Detect landmarks in each image and write it to a CSV file with open(csv_out_path, 'w') as csv_out_file: csv_out_writer = csv.writer(csv_out_file, delimiter=',', quoting=csv.QUOTE_MINIMAL) # Get list of images image_names = sorted( [n for n in os.listdir(images_in_folder) if not n.startswith('.')]) if per_pose_class_limit is not None: image_names = image_names[:per_pose_class_limit] # Detect pose landmarks from each image for image_name in tqdm.tqdm(image_names): image_path = os.path.join(images_in_folder, image_name) try: image = tf.io.read_file(image_path) image = tf.io.decode_jpeg(image) except: self._messages.append('Skipped ' + image_path + '. Invalid image.') continue else: image = tf.io.read_file(image_path) image = tf.io.decode_jpeg(image) image_height, image_width, channel = image.shape # Skip images that isn't RGB because Movenet requires RGB images if channel != 3: self._messages.append('Skipped ' + image_path + '. Image isn\'t in RGB format.') continue keypoint_with_scores = detect(image) # Save landmarks if all landmarks were detected min_landmark_score = np.amin(keypoint_with_scores[:2]) should_keep_image = min_landmark_score >= detection_threshold if not should_keep_image: self._messages.append('Skipped ' + image_path + '. No pose was confidentlly detected.') continue # Draw the prediction result on top of the image for debugging later output_overlay = draw_prediction_on_image( image.numpy().astype(np.uint8), keypoint_with_scores, crop_region=None, close_figure=True, keep_input_size=True) # Write detection result to into an image file output_frame = cv2.cvtColor(output_overlay, cv2.COLOR_RGB2BGR) cv2.imwrite(os.path.join(images_out_folder, image_name), output_frame) # Get landmarks and scale it to the same size as the input image pose_landmarks = np.array( [[lmk[0] * image_width, lmk[1] * image_height, lmk[2]] for lmk in keypoint_with_scores], dtype=np.float32) # Write the landmark coordinates to its per-class CSV file coordinates = pose_landmarks.flatten().astype(np.str).tolist() csv_out_writer.writerow([image_name] + coordinates) # Print the error message collected during preprocessing. print('\n'.join(self._messages)) # Combine all per-class CSVs into a single output file all_landmarks_df = self._all_landmarks_as_dataframe() all_landmarks_df.to_csv(self._csvs_out_path, index=False) def class_names(self): """List of classes found in the training dataset.""" return self._pose_class_names def _all_landmarks_as_dataframe(self): """Merge all per-class CSVs into a single dataframe.""" total_df = None for class_index, class_name in enumerate(self._pose_class_names): csv_out_path = os.path.join(self._csvs_out_folder_per_class, class_name + '.csv') per_class_df = pd.read_csv(csv_out_path, header=None) # Add the labels per_class_df['class_no'] = [class_index]*len(per_class_df) per_class_df['class_name'] = [class_name]*len(per_class_df) # Append the folder name to the filename column (first column) per_class_df[per_class_df.columns[0]] = (os.path.join(class_name, '') + per_class_df[per_class_df.columns[0]].astype(str)) if total_df is None: # For the first class, assign its data to the total dataframe total_df = per_class_df else: # Concatenate each class's data into the total dataframe total_df = pd.concat([total_df, per_class_df], axis=0) list_name = [[key + '_x', key + '_y', key + '_score'] for key in utils.KEYPOINT_DICT.keys()] header_name = [] for columns_name in list_name: header_name += columns_name header_name = ['file_name'] + header_name header_map = {total_df.columns[i]: header_name[i] for i in range(len(header_name))} total_df.rename(header_map, axis=1, inplace=True) return total_df # + cellView="form" id="LB3QIVrdU108" #@title (Optional) Code snippet to try out the Movenet pose estimation logic #@markdown You can download an image from the internet, run the pose estimation logic on it and plot the detected landmarks on top of the input image. #@markdown *Note: This code snippet is also useful for debugging when you encounter an image with bad pose classification accuracy. You can run pose estimation on the image and see if the detected landmarks look correct or not before investigating the pose classification logic.* test_image_url = "https://cdn.pixabay.com/photo/2017/03/03/17/30/yoga-2114512_960_720.jpg" #@param {type:"string"} # !wget -O /tmp/image.jpeg {test_image_url} if len(test_image_url): image = tf.io.read_file('/tmp/image.jpeg') image = tf.io.decode_jpeg(image) keypoint_with_scores = detect(image) _ = draw_prediction_on_image(image, keypoint_with_scores, crop_region=None, close_figure=False, keep_input_size=True) # + [markdown] id="L24GWhgo4WAl" # ## Part 1: Preprocess the input images # # Because the input for our pose classifier is the *output* landmarks from the MoveNet model, we need to generate our training dataset by running labeled images through MoveNet and then capturing all the landmark data and ground truth labels into a CSV file. # # The dataset we've provided for this tutorial is a CG-generated yoga pose dataset. It contains images of multiple CG-generated models doing 5 different yoga poses. The directory is already split into a `train` dataset and a `test` dataset. # # So in this section, we'll download the yoga dataset and run it through MoveNet so we can capture all the landmarks into a CSV file... **However, it takes about 15 minutes to feed our yoga dataset to MoveNet and generate this CSV file**. So as an alternative, you can download a pre-existing CSV file for the yoga dataset by setting `is_skip_step_1` parameter below to **True**. That way, you'll skip this step and instead download the same CSV file that will be created in this preprocessing step. # # On the other hand, if you want to train the pose classifier with your own image dataset, you need to upload your images and run this preprocessing step (leave `is_skip_step_1` **False**)—follow the instructions below to upload your own pose dataset. # + cellView="form" id="Kw6jwOFD40Fr" is_skip_step_1 = False #@param ["False", "True"] {type:"raw"} # + [markdown] id="TJXSR2CQhm-z" # ### (Optional) Upload your own pose dataset # + cellView="form" id="iEnjgeKeS_VP" use_custom_dataset = False #@param ["False", "True"] {type:"raw"} dataset_is_split = False #@param ["False", "True"] {type:"raw"} # + [markdown] id="YiqF3sRf3LLC" # If you want to train the pose classifier with your own labeled poses (they can be any poses, not just yoga poses), follow these steps: # # 1. Set the above `use_custom_dataset` option to **True**. # # 2. Prepare an archive file (ZIP, TAR, or other) that includes a folder with your images dataset. The folder must include sorted images of your poses as follows. # # If you've already split your dataset into train and test sets, then set `dataset_is_split` to **True**. That is, your images folder must include "train" and "test" directories like this: # # ``` # yoga_poses/ # |__ train/ # |__ downdog/ # |______ 00000128.jpg # |______ ... # |__ test/ # |__ downdog/ # |______ 00000181.jpg # |______ ... # ``` # # Or, if your dataset is NOT split yet, then set # `dataset_is_split` to **False** and we'll split it up based # on a specified split fraction. That is, your uploaded images # folder should look like this: # # ``` # yoga_poses/ # |__ downdog/ # |______ 00000128.jpg # |______ 00000181.jpg # |______ ... # |__ goddess/ # |______ 00000243.jpg # |______ 00000306.jpg # |______ ... # ``` # 3. Click the **Files** tab on the left (folder icon) and then click **Upload to session storage** (file icon). # 4. Select your archive file and wait until it finishes uploading before you proceed. # 5. Edit the following code block to specify the name of your archive file and images directory. (By default, we expect a ZIP file, so you'll need to also modify that part if your archive is another format.) # 6. Now run the rest of the notebook. # + cellView="form" id="joAHy_r62dsI" #@markdown Be sure you run this cell. It's hiding the `split_into_train_test()` function that's called in the next code block. import os import random import shutil def split_into_train_test(images_origin, images_dest, test_split): """Splits a directory of sorted images into training and test sets. Args: images_origin: Path to the directory with your images. This directory must include subdirectories for each of your labeled classes. For example: yoga_poses/ |__ downdog/ |______ 00000128.jpg |______ 00000181.jpg |______ ... |__ goddess/ |______ 00000243.jpg |______ 00000306.jpg |______ ... ... images_dest: Path to a directory where you want the split dataset to be saved. The results looks like this: split_yoga_poses/ |__ train/ |__ downdog/ |______ 00000128.jpg |______ ... |__ test/ |__ downdog/ |______ 00000181.jpg |______ ... test_split: Fraction of data to reserve for test (float between 0 and 1). """ _, dirs, _ = next(os.walk(images_origin)) TRAIN_DIR = os.path.join(images_dest, 'train') TEST_DIR = os.path.join(images_dest, 'test') os.makedirs(TRAIN_DIR, exist_ok=True) os.makedirs(TEST_DIR, exist_ok=True) for dir in dirs: # Get all filenames for this dir, filtered by filetype filenames = os.listdir(os.path.join(images_origin, dir)) filenames = [os.path.join(images_origin, dir, f) for f in filenames if ( f.endswith('.png') or f.endswith('.jpg') or f.endswith('.jpeg') or f.endswith('.bmp'))] # Shuffle the files, deterministically filenames.sort() random.seed(42) random.shuffle(filenames) # Divide them into train/test dirs os.makedirs(os.path.join(TEST_DIR, dir), exist_ok=True) os.makedirs(os.path.join(TRAIN_DIR, dir), exist_ok=True) test_count = int(len(filenames) * test_split) for i, file in enumerate(filenames): if i < test_count: destination = os.path.join(TEST_DIR, dir, os.path.split(file)[1]) else: destination = os.path.join(TRAIN_DIR, dir, os.path.split(file)[1]) shutil.copyfile(file, destination) print(f'Moved {test_count} of {len(filenames)} from class "{dir}" into test.') print(f'Your split dataset is in "{images_dest}"') # + id="IfpNIjAmR0lp" if use_custom_dataset: # ATTENTION: # You must edit these two lines to match your archive and images folder name: # # !tar -xf YOUR_DATASET_ARCHIVE_NAME.tar # !unzip -q YOUR_DATASET_ARCHIVE_NAME.zip dataset_in = 'YOUR_DATASET_DIR_NAME' # You can leave the rest alone: if not os.path.isdir(dataset_in): raise Exception("dataset_in is not a valid directory") if dataset_is_split: IMAGES_ROOT = dataset_in else: dataset_out = 'split_' + dataset_in split_into_train_test(dataset_in, dataset_out, test_split=0.2) IMAGES_ROOT = dataset_out # + [markdown] id="IPkTA5-sNF7W" # **Note:** If you're using `split_into_train_test()` to split the dataset, it expects all images to be PNG, JPEG, or BMP—it ignores other file types. # + [markdown] id="dcoak0QHW5d1" # ### Download the yoga dataset # + id="GVpOi5Hr4Xxt" if not is_skip_step_1 and not use_custom_dataset: # !wget -O yoga_poses.zip http://download.tensorflow.org/data/pose_classification/yoga_poses.zip # !unzip -q yoga_poses.zip -d yoga_cg IMAGES_ROOT = "yoga_cg" # + [markdown] id="vxOkXvm-TvOZ" # ### Preprocess the `TRAIN` dataset # + id="OsdqxGfxTE2H" if not is_skip_step_1: images_in_train_folder = os.path.join(IMAGES_ROOT, 'train') images_out_train_folder = 'poses_images_out_train' csvs_out_train_path = 'train_data.csv' preprocessor = MoveNetPreprocessor( images_in_folder=images_in_train_folder, images_out_folder=images_out_train_folder, csvs_out_path=csvs_out_train_path, ) preprocessor.process(per_pose_class_limit=None) # + [markdown] id="cQtgAeHVT0UE" # ### Preprocess the `TEST` dataset # + id="hddKVPjrTNbt" if not is_skip_step_1: images_in_test_folder = os.path.join(IMAGES_ROOT, 'test') images_out_test_folder = 'poses_images_out_test' csvs_out_test_path = 'test_data.csv' preprocessor = MoveNetPreprocessor( images_in_folder=images_in_test_folder, images_out_folder=images_out_test_folder, csvs_out_path=csvs_out_test_path, ) preprocessor.process(per_pose_class_limit=None) # + [markdown] id="UevEKViRT_6J" # ## Part 2: Train a pose classification model that takes the landmark coordinates as input, and output the predicted labels. # # You'll build a TensorFlow model that takes the landmark coordinates and predicts the pose class that the person in the input image performs. The model consists of two submodels: # # * Submodel 1 calculates a pose embedding (a.k.a feature vector) from the detected landmark coordinates. # * Submodel 2 feeds pose embedding through several `Dense` layer to predict the pose class. # # You'll then train the model based on the dataset that were preprocessed in part 1. # + [markdown] id="E2D1czPJazvb" # ### (Optional) Download the preprocessed dataset if you didn't run part 1 # + id="ShpOD7yb4MRp" # Download the preprocessed CSV files which are the same as the output of step 1 if is_skip_step_1: # !wget -O train_data.csv http://download.tensorflow.org/data/pose_classification/yoga_train_data.csv # !wget -O test_data.csv http://download.tensorflow.org/data/pose_classification/yoga_test_data.csv csvs_out_train_path = 'train_data.csv' csvs_out_test_path = 'test_data.csv' is_skipped_step_1 = True # + [markdown] id="iGMcoSwLwRSD" # ### Load the preprocessed CSVs into `TRAIN` and `TEST` datasets. # + id="pOUcc8EL5rrj" def load_pose_landmarks(csv_path): """Loads a CSV created by from the MoveNetPreprocessor. Returns: X: Detected landmark coordinates and scores of shape (N, 17 * 3) y: Ground truth labels of shape of shape (N, label_count) classes: The list of all class names found in the dataset dataframe: The CSV loaded as a Pandas dataframe features (X) and ground truth labels (y) to use later to train a pose classification model. """ # Load the CSV file dataframe = pd.read_csv(csv_path) df_to_process = dataframe.copy() # Drop the file_name columns as you don't need it during training. df_to_process.drop(columns=['file_name'], inplace=True) # Extract the list of class names classes = df_to_process.pop('class_name').unique() # Extract the labels y = df_to_process.pop('class_no') # Convert the input features and labels into the correct format for training. X = df_to_process.astype('float64') y = keras.utils.to_categorical(y) return X, y, classes, dataframe # + [markdown] id="UMrLzfPz7E1U" # Load and split the original `TRAIN` dataset into `TRAIN` (85% of the data) and `VALIDATE` (the remaining 15%). # + id="xawmSDGXUUzW" # Load the train data X, y, class_names, _ = load_pose_landmarks(csvs_out_train_path) # Split training data (X, y) into (X_train, y_train) and (X_val, y_val) X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.15) # + id="R42kicUMaTX0" # Load the test data X_test, y_test, _, df_test = load_pose_landmarks(csvs_out_test_path) # + [markdown] id="ydb-bd_UWXMq" # ### Define functions to convert the pose landmarks to a pose embedding (a.k.a. feature vector) for pose classification # # Next, convert the landmark coordinates to a feature vector by: # 1. Moving the pose center to the origin. # 2. Scaling the pose so that the pose size becomes 1 # 3. Flattening these coordinates into a feature vector # # Then use this feature vector to train a neural-network based pose classifier. # + id="HgQMdfeT65Z5" def get_center_point(landmarks, left_name, right_name): """Calculates the center point of the two given landmarks.""" left = tf.gather(landmarks, utils.KEYPOINT_DICT[left_name], axis=1) right = tf.gather(landmarks, utils.KEYPOINT_DICT[right_name], axis=1) center = left * 0.5 + right * 0.5 return center def get_pose_size(landmarks, torso_size_multiplier=2.5): """Calculates pose size. It is the maximum of two values: * Torso size multiplied by `torso_size_multiplier` * Maximum distance from pose center to any pose landmark """ # Hips center hips_center = get_center_point(landmarks, "left_hip", "right_hip") # Shoulders center shoulders_center = get_center_point(landmarks, "left_shoulder", "right_shoulder") # Torso size as the minimum body size torso_size = tf.linalg.norm(shoulders_center - hips_center) # Pose center pose_center_new = get_center_point(landmarks, "left_hip", "right_hip") pose_center_new = tf.expand_dims(pose_center_new, axis=1) # Broadcast the pose center to the same size as the landmark vector to # perform substraction pose_center_new = tf.broadcast_to(pose_center_new, [tf.size(landmarks) // (17*2), 17, 2]) # Dist to pose center d = tf.gather(landmarks - pose_center_new, 0, axis=0, name="dist_to_pose_center") # Max dist to pose center max_dist = tf.reduce_max(tf.linalg.norm(d, axis=0)) # Normalize scale pose_size = tf.maximum(torso_size * torso_size_multiplier, max_dist) return pose_size def normalize_pose_landmarks(landmarks): """Normalizes the landmarks translation by moving the pose center to (0,0) and scaling it to a constant pose size. """ # Move landmarks so that the pose center becomes (0,0) pose_center = get_center_point(landmarks, "left_hip", "right_hip") pose_center = tf.expand_dims(pose_center, axis=1) # Broadcast the pose center to the same size as the landmark vector to perform # substraction pose_center = tf.broadcast_to(pose_center, [tf.size(landmarks) // (17*2), 17, 2]) landmarks = landmarks - pose_center # Scale the landmarks to a constant pose size pose_size = get_pose_size(landmarks) landmarks /= pose_size return landmarks def landmarks_to_embedding(landmarks_and_scores): """Converts the input landmarks into a pose embedding.""" # Reshape the flat input into a matrix with shape=(17, 3) reshaped_inputs = keras.layers.Reshape((17, 3))(landmarks_and_scores) # Normalize landmarks 2D landmarks = normalize_pose_landmarks(reshaped_inputs[:, :, :2]) # Flatten the normalized landmark coordinates into a vector embedding = keras.layers.Flatten()(landmarks) return embedding # + [markdown] id="PI7Wb3Bagau3" # ### Define a Keras model for pose classification # # Our Keras model takes the detected pose landmarks, then calculates the pose embedding and predicts the pose class. # + id="1Pte6b1bgWKv" # Define the model inputs = tf.keras.Input(shape=(51)) embedding = landmarks_to_embedding(inputs) layer = keras.layers.Dense(128, activation=tf.nn.relu6)(embedding) layer = keras.layers.Dropout(0.5)(layer) layer = keras.layers.Dense(64, activation=tf.nn.relu6)(layer) layer = keras.layers.Dropout(0.5)(layer) outputs = keras.layers.Dense(5, activation="softmax")(layer) model = keras.Model(inputs, outputs) model.summary() # + id="5ZuMwd7Ugtsa" model.compile( optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'] ) # Add a checkpoint callback to store the checkpoint that has the highest # validation accuracy. checkpoint_path = "weights.best.hdf5" checkpoint = keras.callbacks.ModelCheckpoint(checkpoint_path, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max') earlystopping = keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=20) # Start training history = model.fit(X_train, y_train, epochs=200, batch_size=16, validation_data=(X_val, y_val), callbacks=[checkpoint, earlystopping]) # + id="pNVqmd2JO6Rp" # Visualize the training history to see whether you're overfitting. plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.title('Model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['TRAIN', 'VAL'], loc='lower right') plt.show() # + id="m_byMBVQgyQm" # Evaluate the model using the TEST dataset loss, accuracy = model.evaluate(X_test, y_test) # + [markdown] id="JPnPmwjn9452" # ### Draw the confusion matrix to better understand the model performance # + id="CJuVw7gygyyd" def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """Plots the confusion matrix.""" if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=55) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.ylabel('True label') plt.xlabel('Predicted label') plt.tight_layout() # Classify pose in the TEST dataset using the trained model y_pred = model.predict(X_test) # Convert the prediction result to class name y_pred_label = [class_names[i] for i in np.argmax(y_pred, axis=1)] y_true_label = [class_names[i] for i in np.argmax(y_test, axis=1)] # Plot the confusion matrix cm = confusion_matrix(np.argmax(y_test, axis=1), np.argmax(y_pred, axis=1)) plot_confusion_matrix(cm, class_names, title ='Confusion Matrix of Pose Classification Model') # Print the classification report print('\nClassification Report:\n', classification_report(y_true_label, y_pred_label)) # + [markdown] id="YPmtRf79GkCa" # ### (Optional) Investigate incorrect predictions # # You can look at the poses from the `TEST` dataset that were incorrectly predicted to see whether the model accuracy can be improved. # # *Note: This only works if you have run step 1 because you need the pose image files on your local machine to display them.* # + id="bdJdwOkFGonK" if is_skip_step_1: print('ERROR: You must have run step 1 to run this cell.') else: # If step 1 was skipped, skip this step. IMAGE_PER_ROW = 3 MAX_NO_OF_IMAGE_TO_PLOT = 30 # Extract the list of incorrectly predicted poses false_predict = [id_in_df for id_in_df in range(len(y_test)) \ if y_pred_label[id_in_df] != y_true_label[id_in_df]] if len(false_predict) > MAX_NO_OF_IMAGE_TO_PLOT: false_predict = false_predict[:MAX_NO_OF_IMAGE_TO_PLOT] # Plot the incorrectly predicted images row_count = len(false_predict) // IMAGE_PER_ROW + 1 fig = plt.figure(figsize=(10 * IMAGE_PER_ROW, 10 * row_count)) for i, id_in_df in enumerate(false_predict): ax = fig.add_subplot(row_count, IMAGE_PER_ROW, i + 1) image_path = os.path.join(images_out_test_folder, df_test.iloc[id_in_df]['file_name']) image = cv2.imread(image_path) plt.title("Predict: %s; Actual: %s" % (y_pred_label[id_in_df], y_true_label[id_in_df])) plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) plt.show() # + [markdown] id="uhY0VeDkFK7W" # ## Part 3: Convert the pose classification model to TensorFlow Lite # # You'll convert the Keras pose classification model to the TensorFlow Lite format so that you can deploy it to mobile apps, web browsers and IoT devices. When converting the model, you'll apply [dynamic range quantization](https://www.tensorflow.org/lite/performance/post_training_quant) to reduce the pose classification TensorFlow Lite model size by about 4 times with insignificant accuracy loss. # # *Note: TensorFlow Lite supports multiple quantization schemes. See the [documentation](https://www.tensorflow.org/lite/performance/model_optimization) if you are interested to learn more.* # + id="FmwEAgi2Flb3" converter = tf.lite.TFLiteConverter.from_keras_model(model) converter.optimizations = [tf.lite.Optimize.DEFAULT] tflite_model = converter.convert() print('Model size: %dKB' % (len(tflite_model) / 1024)) with open('pose_classifier.tflite', 'wb') as f: f.write(tflite_model) # + [markdown] id="XUOQwuBP6jMH" # Then you'll write the label file which contains mapping from the class indexes to the human readable class names. # + id="ZVW9j5vF6hBM" with open('pose_labels.txt', 'w') as f: f.write('\n'.join(class_names)) # + [markdown] id="H4T0HFGCve-Y" # As you've applied quantization to reduce the model size, let's evaluate the quantized TFLite model to check whether the accuracy drop is acceptable. # + id="rv4fZFNcsN-1" def evaluate_model(interpreter, X, y_true): """Evaluates the given TFLite model and return its accuracy.""" input_index = interpreter.get_input_details()[0]["index"] output_index = interpreter.get_output_details()[0]["index"] # Run predictions on all given poses. y_pred = [] for i in range(len(y_true)): # Pre-processing: add batch dimension and convert to float32 to match with # the model's input data format. test_image = X[i: i + 1].astype('float32') interpreter.set_tensor(input_index, test_image) # Run inference. interpreter.invoke() # Post-processing: remove batch dimension and find the class with highest # probability. output = interpreter.tensor(output_index) predicted_label = np.argmax(output()[0]) y_pred.append(predicted_label) # Compare prediction results with ground truth labels to calculate accuracy. y_pred = keras.utils.to_categorical(y_pred) return accuracy_score(y_true, y_pred) # Evaluate the accuracy of the converted TFLite model classifier_interpreter = tf.lite.Interpreter(model_content=tflite_model) classifier_interpreter.allocate_tensors() print('Accuracy of TFLite model: %s' % evaluate_model(classifier_interpreter, X_test, y_test)) # + [markdown] id="-HWqcersePiY" # Now you can download the TFLite model (`pose_classifier.tflite`) and the label file (`pose_labels.txt`) to classify custom poses. See the [Android](https://github.com/tensorflow/examples/tree/master/lite/examples/pose_estimation/android) and [Python/Raspberry Pi](https://github.com/tensorflow/examples/tree/master/lite/examples/pose_estimation/raspberry_pi) sample app for an end-to-end example of how to use the TFLite pose classification model. # + id="KvcM_LkApOT3" # !zip pose_classifier.zip pose_labels.txt pose_classifier.tflite # + id="VQ-i27VypI1u" # Download the zip archive if running on Colab. try: from google.colab import files files.download('pose_classifier.zip') except: pass
site/en-snapshot/lite/tutorials/pose_classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import os.path as op import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # - # ## Function definitions def edgelist2mat(edgelist, N): mat = np.zeros((N, N)) for edge in edgelist: mat[int(edge[0]-1), int(edge[1]-1)] = edge[2] mat[int(edge[1]-1), int(edge[0]-1)] = edge[2] return mat def load_graphs(directory, N): fls = [op.join(directory, f) for f in os.listdir(directory)] stacked = np.empty((N, N, len(fls))) for idx, fl in enumerate(fls): tmp_elist = np.loadtxt(fl) stacked[:,:, idx] = edgelist2mat(tmp_elist, N) return stacked # ## Diffusion data_dir_dwi = '/Users/greg/code/gkiar/3d-cnn-ae/hcp1200_dwi_DKT/' N = 83 stacked_dwi = load_graphs(data_dir_dwi, N) avg_dwi = np.mean(stacked_dwi, axis=2) plt.imshow(np.log10(avg_dwi+1)) plt.colorbar() # ## Functional data_dir_fmri = '/Users/greg/code/gkiar/3d-cnn-ae/hcp1200_fmri_DKT/' N = 83 stacked_fmri = load_graphs(data_dir_fmri, N) avg_fmri = np.mean(stacked_fmri, axis=2) plt.imshow(avg_fmri) plt.colorbar()
hcp1200_connectome_viz.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:pancancer-classifier] # language: python # name: conda-env-pancancer-classifier-py # --- # # Processing TP53 Exon-Exon Junction Data # # **<NAME>17** # # Reading in junction and sample files and merging with _TP53_ mutation data. import os import pandas as pd def get_rail_id(row): """ Extract specific rail_ids from complex data structure that assigns rail_ids (Sample IDs) to snaptron_ids (exon-exon junctions). Designed to be used as a pd.DataFrame().apply() function. Arguments: row - a row in the junction dataframe Output: a list of sample IDs with the specific snaptron ID """ row = row['samples'].split(',') all_sample_ids = [] for sample_id in row: if sample_id != '': sample_id = sample_id.split(':')[0] all_sample_ids.append(sample_id) return(all_sample_ids) # ## First, load several files required for processing # + # Load Sample File sample_file = 'samples.tsv.gz' dictionary_df = ( pd.read_table(sample_file, low_memory=False) .loc[:, ['rail_id', 'gdc_cases.samples.submitter_id']] ) dictionary_df.head(2) # + # Load junctions file junction_file = 'tp53_junctions.txt.gz' junction_df = pd.read_table(junction_file) junction_df.head(2) # + # Load mutation classification scores file file = os.path.join('..', '..', 'classifiers', 'TP53', 'tables', 'mutation_classification_scores.tsv') mut_scores_df = pd.read_table(file, index_col=0) mut_scores_df.head(2) # + # Load raw mutation file file = os.path.join('..', '..', 'data', 'raw', 'mc3.v0.2.8.PUBLIC.maf.gz') raw_mut_df = pd.read_table(file) raw_mut_df.head() # + # Load binary mutation file file = os.path.join('..', '..', 'data', 'pancan_mutation_freeze.tsv') mut_df = pd.read_table(file, index_col=0) mut_df.head(2) # - # ## Next, select the samples with the specific mutation of interest # # Also make sure that we only select samples in which this silent mutation is the _only_ _TP53_ mutation present. # + # Subset mutation file to samples with c375GT TP53 mutations silent_mut_df = ( raw_mut_df.query('Hugo_Symbol == "TP53"') .query('HGVSc == "c.375G>T"') ) # Obtain the samples with the specific mutation silent_mut_samples = silent_mut_df.Tumor_Sample_Barcode.str.slice(start=0, stop=15) print(len(silent_mut_samples)) # From these, remove samples that also have a different mutation in TP53 only_silent_mut_samples = ( mut_df.reindex(silent_mut_samples) .loc[:, 'TP53'] ) only_silent_mut_samples = ( only_silent_mut_samples.loc[only_silent_mut_samples == 0] .index .tolist() ) print(len(only_silent_mut_samples)) # Select those samples in which we have mutation classification scores mut_silent_scores_df = ( mut_scores_df .loc[mut_scores_df.index.isin(only_silent_mut_samples), :] ) print(mut_silent_scores_df.shape) mut_silent_scores_df.head(2) # - # ## Finally, process the junctions file and output the results for plotting # + # Process and output junction file out_file = 'tp53_junctions_with_mutations.csv.gz' junctions_process_df = ( junction_df.assign(diff_start = abs(7675994 - junction_df['start']), diff_end = abs(7675994 - junction_df['end'])) .sort_values(by = "diff_start") ) junctions_process_df = ( junctions_process_df .assign(rail_id = junctions_process_df.apply(get_rail_id, axis=1)) .set_index(['snaptron_id', 'start', 'end', 'length', 'strand', 'left_motif', 'right_motif', 'samples_count', 'coverage_sum', 'coverage_avg', 'coverage_median', 'diff_start', 'diff_end'])['rail_id'] .apply(pd.Series) .stack() .reset_index() ) junctions_process_df[0] = junctions_process_df[0].astype(int) junctions_process_df = ( junctions_process_df .merge(dictionary_df, left_on=0, right_on='rail_id') ) junctions_process_df = ( junctions_process_df .assign( tcga_id = junctions_process_df['gdc_cases.samples.submitter_id'] .str .slice(start=0, stop=15) ) .merge(mut_scores_df, left_on='tcga_id', right_index=True) ) junctions_process_df.to_csv(out_file, compression='gzip') junctions_process_df.head(2)
scripts/snaptron/process-tp53-exons.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="tAX6dynh1ENW" colab_type="text" # # Playing Pong with Deep Reinforcement Learning # # --- # # Read the paper [Playing Atari with Deep Reinforcement Learning](https://arxiv.org/pdf/1312.5602.pdf) (the paper is also inside the 'Papers' folder in the course materials), and implement a model that can play atari games. # # The goals of this project are the following: # # - Read and understand the paper. # - Add a brief summary of the paper at the start of the notebook. # - Mention and implement the preprocessing needed; you can add your own steps if needed. # - Load an Atari environment from OpenAI Gym; start with Pong, and try with at least one more. # - Define the convolutional model needed for training. # - Apply deep q learning with your model. # - Use the model to play a game and show the result. # # **Rubric:** # # 1. A summary of the paper was included. The summary covered what the paper does, and why, as well as the preprocessing steps and the model they introduced. # 2. Read images from the environment, and performed the correct preprocessing steps. # 3. Defined an agent class with the needed functions. # 4. Defined the model within the agent class. # 5. Trained the model with the Pong environment. Save the weights after each episode. # 6. Test the model by making it play Pong. # 7. Train and test the agent with another Atari environment of your choosing. # # + [markdown] id="ahIZHdp12nY-" colab_type="text" # ## Add a summary of the paper in this cell # + [markdown] id="NrmWWhzE1Vg1" colab_type="text" # ### Basic installs and imports for Colab # + id="cmTlRpCTiUeo" colab_type="code" outputId="5caa9644-6ff5-4c5a-cc3b-27869dd6c8e1" colab={"base_uri": "https://localhost:8080/", "height": 34} #remove " > /dev/null 2>&1" to see what is going on under the hood # !pip install gym pyvirtualdisplay > /dev/null 2>&1 # !apt-get install -y xvfb python-opengl ffmpeg > /dev/null 2>&1 # !apt-get update > /dev/null 2>&1 # !apt-get install cmake > /dev/null 2>&1 # !pip install --upgrade setuptools 2>&1 # !pip install ez_setup > /dev/null 2>&1 # !pip install gym[atari] > /dev/null 2>&1 # !pip install gym[box2d] > /dev/null 2>&1 # + id="SOAgdSpoi8Md" colab_type="code" outputId="d2b9eb37-5547-4fb5-c1b0-733c7be15684" colab={"base_uri": "https://localhost:8080/", "height": 34} import gym from gym import logger as gymlogger from gym.wrappers import Monitor import matplotlib import matplotlib.pyplot as plt import cv2 import numpy as np import random, math from keras import models, layers, optimizers from collections import deque import glob, io, base64 from IPython.display import HTML from IPython import display as ipythondisplay from pyvirtualdisplay import Display gymlogger.set_level(40) #error only # %matplotlib inline # + [markdown] id="JCXxMUco1aoh" colab_type="text" # ### Functions that wraps a video in colab # + id="xguRhza7jSRD" colab_type="code" colab={} """ Utility functions to enable video recording of gym environment and displaying it To enable video, just do "env = wrap_env(env)"" """ def show_video(): mp4list = glob.glob('video/*.mp4') if len(mp4list) > 0: mp4 = mp4list[0] video = io.open(mp4, 'r+b').read() encoded = base64.b64encode(video) ipythondisplay.display(HTML(data='''<video alt="test" autoplay loop controls style="height: 400px;"> <source src="data:video/mp4;base64,{0}" type="video/mp4" /> </video>'''.format(encoded.decode('ascii')))) else: print("Could not find video") def wrap_env(env): env = Monitor(env, './video', force=True) return env # + id="2HI_wN3OjVd2" colab_type="code" outputId="1b836643-39db-4f25-ba63-3f3ef83eb015" colab={"base_uri": "https://localhost:8080/", "height": 54} display = Display(visible=0, size=(1400, 900)) display.start() # + id="6OtOEN9Cc9Zx" colab_type="code" outputId="a66dbc7c-5221-4f50-dd0e-1b787a2e83a1" colab={"base_uri": "https://localhost:8080/", "height": 289} # !ls video # + id="C3jpZDbFjJu8" colab_type="code" outputId="8c387b6d-d9fd-425d-bb42-0baa94207fb6" colab={"base_uri": "https://localhost:8080/", "height": 68} # Loads the cartpole environment env = wrap_env(gym.make('PongDeterministic-v4')) state_size = env.observation_space.shape[0] action_size = env.action_space.n print(state_size, action_size) actions = env.unwrapped.get_action_meanings() # right is up, left is down print(actions) batch_size = 32 n_episodes = 10000 print(np.random.choice([2,3])) # + id="GeYGaLVfjhKl" colab_type="code" outputId="60712e6b-c3bf-47e3-be11-7b6aa78e2aae" colab={"base_uri": "https://localhost:8080/", "height": 421} env = wrap_env(gym.make('PongDeterministic-v4')) observation = env.reset() while True: env.render() #your agent goes here action = np.random.choice([0, 2,3]) #action = env.action_space.sample() observation, reward, done, info = env.step(action) if done: break; env.close() show_video() # + [markdown] id="Q1qPElHYmVhT" colab_type="text" # ## Paper notes # # Authors present the first deep learning model to successfully learn control policies directly from high-dimensional sensory input using reinforcement learning: # # - High dimensional input: sensory inputs like vision and speech # # - The model is a convolutional neural network, trained with a variant of Q-learning, whose input is raw pixels and whose output is a value function estimating future rewards. # # - Applied to seven Atari games, outperforming all previous approaches on six of the games and surpassing a human expert on three of them. # # ### Issues # # - Most successful RL applications that operate on high dimensional domains have relied on hand-crafted features combined with linear value functions or policy representations. Performance of such systems heavily relies on the quality of the feature representation. # # - Most deep learning algorithms assume the data samples to be independent, while in reinforcement learning one typically encounters sequences of highly correlated states. # # - In RL the data distribution changes as the algorithm learns new behaviours, which can be problematic for deep learning methods that assume a fixed underlying distribution. # # To alleviate the problems of correlated data and non-stationary distributions, we use an experience replay mechanism,] which randomly samples previous transitions, and thereby smooths the training distribution over many past behaviors. # # - Experience replay: we store the agent’s experiences at each time-step, $e_t = (s_t; a_t; r_t; st+1)$ in a data-set $D = e_1; \dots; e_N$, pooled over many episodes into a replay memory. # # - During the inner loop of the algorithm, we apply Q-learning updates, or minibatch updates, to samples of experience drawn at random from the pool of stored samples. # # ### Background # # Consider tasks in which an agent interacts with an environment $\varepsilon$, in this case the Atari emulator, in a sequence of actions, observations and rewards. At each time-step the agent selects an action $a_t$ from the set of legal game actions, $A = \{1; \dots ;K\}$. The emulator’s # internal state is not observed by the agent; instead it observes an image $x_t \in \mathbb{R}^d$ from the emulator, which is a vector of raw pixel values representing the current screen. # # **Note that in general the game score may depend on the whole prior sequence of actions and observations; feedback about an action may only be received after many thousands of time-steps have elapsed.** # # Since the agent only observes images of the current screen, the task is partially observed, i.e. it is impossible to fully understand the current situation from only the current screen $x_t$. We therefore consider sequences of actions and observations, $s_t = x_1; a_1; x_2;\dots; a_{t-1}; x_t$, and learn game strategies that depend upon these sequences. # # All sequencesin the emulator are assumed to terminate in a finite number of time-steps. This formalism gives rise to a large but finite Markov decision process (MDP) in which each sequence is a distinct state. # # - Markov decision process (MDP): A Markov decision process (MDP) is a discrete time stochastic control process. It provides a mathematical framework for modeling decision making in situations where outcomes are partly random and partly under the control of a decision maker. MDPs are useful for studying optimization problems solved via dynamic programming and reinforcement learning. # # - A Markov decision process is a 4-tuple $(S,A,P_{a},R_{a})$ # - $S$ is a finite set of states # - $A$ is a finite set of actions (alternatively, $A_s$ is the finite set of actions available from state $s$), # - $P_{a}(s,s')=\Pr(s_{t+1}=s' | s_{t}=s,a_{t}=a)$ is the probability that action a in state s at time t will lead to state s' at time t+1 # - $R_{a}(s,s')$ is the immediate reward (or expected immediate reward) received after transitioning from state s to state s', due to action a # # The goal of the agent is to interact with the emulator by selecting actions in a way that maximises future rewards. We make the standard assumption that future rewards are discounted by a factor of $\gamma$ per time-step, and define the future discounted return at time $t$ as # # $R_t = \sum_{t'=t}^T \gamma^{t'-t} r_t$ # # where T is the time-step at which the game terminates. # # We define the optimal action-value function $Q^*(s, a)$ as the maximum expected return achievable by following any strategy, after seeing some sequence $s$ and then taking some action $a$, $Q^*(s, a) = max_\pi \mathbb{E}[R_t | s_t = s; a_t = a, \pi]$, where $\pi$ is a policy mapping sequences to actions (or distributions over actions). # # The optimal strategy is to select the action $a'$ maximizing the expected value, $\mathbb{E}$, of $r + \gamma Q^*(s', a')$: # # $Q^*(s,a) = \mathbb{E}_{s'\sim\varepsilon}[r + \gamma max_a'Q^*(s',a') | s, a]$ # # To estimate the action-value function, the $Q^*$ function, it is common to use a function approximator; in this case a neural network. The function is as follows: # # $Q(s, a; \theta)\approx Q^*(s,a)$ # # where $\theta$ are the weights of the Q-network. The network can be trained by minimising a sequence of loss functions $L_i(\theta_i)$ that changes at each iteration, $i$: # # $L_i (\theta_i)=\mathbb{E}[(y_i - Q(s, a;\theta_i))^2]$ # # where $y_i = \mathbb{E}[r + \gamma max_{a'}Q(s', a'; \theta_{i-1})|s, a]$ # # ### Algorithm # # # + [markdown] id="h3Ww0L4w1hE4" colab_type="text" # ## Define the Deep Q learning Agent # + id="nbtt8Frsjvm8" colab_type="code" colab={} class DQNAgent: def __init__(self, state_size, action_size): self.state_size = state_size self.action_size = action_size # Events that are near in time are too coralated and do not give aditional information # we will use moves that are further separated in time self.max_memory = 300000 self.memory = [] #deque(maxlen=800000) # Discount factor self.gamma = 0.99 # Exploration self.epsilon = 1.0 self.epsilon_min = 0.1 self.epsilon_decay = (1-self.epsilon_min) / 1000 print(self.epsilon_decay) self.learning_rate = 0.00025 self.model = self._build_model() def _build_model(self): model = models.Sequential() model.add(layers.Conv2D(16, kernel_size = (8,8), strides=(4,4), padding = 'valid', kernel_initializer='glorot_uniform', input_shape=(84, 84, 4))) model.add(layers.LeakyReLU(alpha=0.3)) model.add(layers.Conv2D(32, kernel_size = (4,4), strides=(2,2), padding = 'valid', kernel_initializer='glorot_uniform')) model.add(layers.LeakyReLU(alpha=0.3)) model.add(layers.Flatten()) model.add(layers.Dense(256, kernel_initializer='glorot_uniform', activation='relu')) model.add(layers.Dense(self.action_size, kernel_initializer='glorot_uniform', activation='linear')) model.compile(loss='mse', optimizer= optimizers.RMSprop(lr=self.learning_rate, rho=0.95, epsilon=0.01)) return model def remember(self, state, action, reward, next_state, done): ''' state, action, reward at current time next_state is the state that occurs after the state-action done is if the episode ended ''' if len(self.memory) > self.max_memory: self.memory.pop(0) self.memory.append((state, action, reward, next_state, done)) def action(self, state): # Exploration mode if np.random.rand() <= self.epsilon: #return np.random.choice([0,2,3]) return random.randrange(self.action_size) # Use what action is predicted by the model as the best choice act_values = self.model.predict(state) return np.argmax(act_values[0]) def get_batch(self, batch_size): minibatch = random.sample(range(4, len(self.memory)), batch_size) batch = [ ( np.expand_dims(np.stack((self.memory[i-3][0], self.memory[i-2][0], self.memory[i-1][0], self.memory[i][0]), axis = 2), axis = 0), self.memory[i][1], np.sum((self.memory[i-3][2], self.memory[i-2][2], self.memory[i-1][2], self.memory[i][2])), np.expand_dims(np.stack((self.memory[i-3][3], self.memory[i-2][3], self.memory[i-1][3], self.memory[i][3]), axis = 2), axis = 0), self.memory[i][4] ) for i in minibatch ] return batch def train(self, batch_size): #minibatch = random.sample(self.memory, batch_size) batch = self.get_batch(batch_size) for state, action, reward, next_state, done in batch: target = reward if not done: target = (reward + self.gamma * np.amax(self.model.predict(next_state)[0])) target_f = self.model.predict(state) target_f[0][action] = target self.model.fit(state, target_f, epochs=1, verbose=0) if self.epsilon > self.epsilon_min: self.epsilon -= self.epsilon_decay def load(self, name): self.model.load_weights(name) def save(self, name): self.model.save_weights(name) # + id="jyAVZOyPjzTr" colab_type="code" outputId="8f4af8d5-ddcb-4ccf-e8bd-e63b7f1f45f6" colab={"base_uri": "https://localhost:8080/", "height": 462} agent = DQNAgent(state_size, action_size) agent.model.summary() # + [markdown] id="94rKT1nj1nMj" colab_type="text" # ### Needed preprocessing steps # + id="F0YR2iCYd4OV" colab_type="code" colab={} def preprocessFrame(image): # Grayscale gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # Reize resized_img = cv2.resize(gray_img, (84, 110)) # Crop cropped_img = resized_img[16:100, 0:84] # Normalized normalized_img = cv2.normalize(cropped_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) return normalized_img # + [markdown] id="OVf_LPgl1u3n" colab_type="text" # ## Training with the environment images # + id="eHcl0-LAj38H" colab_type="code" outputId="0bca1dfd-e74a-40f4-f1ed-f5c23593d241" colab={"base_uri": "https://localhost:8080/", "height": 837} env = wrap_env(gym.make('PongDeterministic-v4')) try: for e in range(n_episodes): state = preprocessFrame(env.reset()) states = deque((state, state, state, state), maxlen=4) states_tensor = None total_reward = 0 done = False while not done: #env.render() states_tensor = np.stack((states), axis = 2).reshape((1, 84, 84, 4)) # Takes a random action from the action space of the environment action = agent.action(states_tensor) next_state, reward, done, info = env.step(action) next_state = preprocessFrame(next_state) # Define the reward for this problem total_reward += reward agent.remember(state, action, total_reward, next_state, done) state = next_state states.append(next_state) if len(agent.memory) > batch_size + 4: agent.train(batch_size) if e%50 == 0: print("Episode: {}/{}, score: {}, e: {:.9}, m: {}".format(e, n_episodes, total_reward, agent.epsilon, len(agent.memory))) agent.save('max_reward_weights.hdf5') finally: env.close() # f, plots = plt.subplots(1, 4, figsize=(20,20)) # plots = [plot for plot in plots] # imgs = [states_tensor[0,:,:,i] for i in range(4)] # for img, plot in zip(imgs, plots): # plot.imshow(img, cmap='gray') # preprocess = preprocessFrame(state) #plt.imshow(states_tensor[0,:,:,0], cmap = 'gray') # print(preprocess, preprocess.shape) # + id="yoWQOzwWkzXs" colab_type="code" colab={} # !ls # + [markdown] id="25FqNG8u2dDa" colab_type="text" # ### Test your model # + id="o1Ti62JUgiNm" colab_type="code" colab={} env = wrap_env(gym.make('PongDeterministic-v4')) #agent.load('0700hdf5') try: state = env.reset() state = np.reshape(state, [1, state_size]) total_reward = 0 done = False while not done: env.render() # Takes a random action from the action space of the environment action = agent.action(state) next_state, reward, done, info = env.step(action) total_reward += reward next_state = np.reshape(next_state, [1, state_size]) state = next_state finally: env.close() show_video() # + [markdown] id="ca3zSLF02x1E" colab_type="text" # ## Train and test your agent with another atari environment
Notebooks/Reinforcement/Pong_Deep_Q_Learning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="TPt7dI9uyscj" outputId="4aa615a6-b576-4acb-d438-b7b132d65d88" # !pip install requests # !pip install bs4 # !pip install datetime # !pip install pymongo # !pip install pymongo[srv] # !pip install pymongo[tls] # + id="UeZr9RSrycfL" import requests import re from bs4 import BeautifulSoup from datetime import datetime, timedelta from pymongo import MongoClient # + colab={"base_uri": "https://localhost:8080/"} id="RIMU6Orx4Ff7" outputId="30829627-f6cc-4337-a835-3776d3776943" clusterInfo = input("Please enter your MongoDB connection string (starts with mongodb+srv://)") cluster = MongoClient(clusterInfo, tls=True, tlsAllowInvalidCertificates=True) cluster.stats # + colab={"base_uri": "https://localhost:8080/", "height": 86} id="Z_KL2MjFyjiD" outputId="735440b2-84e8-43af-9282-df7c0ce81732" import time databaseName = input('Please enter database name') db = cluster[databaseInfo] collectionName = input('Please enter collection name') collection = db[collectionName] start_date = input("Enter date: (YYYY-MM-DD) ") formatted_start_date = datetime.strptime(start_date, "%Y-%m-%d") program_begins = time.perf_counter() def askURL(url): #爬取一个指定URL的网页内容 proxy = { # 用webshare.io的rotating proxy "http": "http://bfqighfc-rotate:<EMAIL>:80/", "https": "http://bfqighfc-rotate:a73kohayjb8b@p.webshare.io:80/" } header = { #模拟浏览器头部信息向服务器发送信息 'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36" } #用户代理,表示告诉网页服务器我们是什么类型的机器和浏览器,本质上是告诉浏览器我们可以接受什么样的内容 page = requests.get(url, headers = header, proxies=proxy) return page post_date = [] data_no_hr = [] formatted_post_date = [] cn_pattern = r'[\u4E00-\u9FFF\u3400-\u4DBF\uF900-\uFAFF]' ko_pattern = r'[\u3131-\ucb4c]' jp_pattern = r'[ぁ-んァ-ン]' en_pattern = r'[a-zA-Z]' find_id_pattern = r'\/(\d+)\/' #这里日期很长 n_days_ago = 730 #搜指定日期730天内的内容 stop_date = formatted_start_date - timedelta(days=n_days_ago) stop_date = stop_date.date() #不是真正的停止日期 #for or while loop? 应该while date_not_reached = True post_date = [] data_no_hr = [] page_number = 22 while date_not_reached: url_list = [] baseurl = "https://e-hentai.org/?page={}&f_cats=767".format(page_number) #url = baseurl + str(i+23) #这个随着日期改 page_number += 1 html = askURL(baseurl) url_bs = BeautifulSoup(html.text,'html.parser') for day in url_bs.find_all('div',id=re.compile('^posted_')): post_date.append(day.text) data_no_hr = [day[:10] for day in post_date] #不是真正的正确停止日期 # check if last page: # class_="ptdd"只在最后一页中存在 # 所以class_="ptdd"找到时,not_final_page是False终止循环, # 没找到时flag是True,所以用not颠倒逻辑 #not_final_page = not url_bs.find_all("td", class_="ptdd") if start_date in data_no_hr: tags = url_bs.find_all('a') for tag in tags: url_list.append(tag.get('href')) url_list = [k for k in url_list if 'https://e-hentai.org/g/' in k] for urls in url_list: #访问每个本子的网站 html2 = askURL(urls) url_bs = BeautifulSoup(html2.text,'html.parser') clean_score_list = [] #评论打分的list comment_list = [] #评论的list for score in url_bs.find_all('span',id=re.compile('^comment_score_')): clean_score_list.append(int(score.text)) if clean_score_list and max(clean_score_list) > 25: # if there exists a comments with a score and the highest rate is > 25 cn_count = 0 ko_count = 0 jp_count = 0 en_count = 0 best_index = clean_score_list.index(max(clean_score_list)) # find the highest rated response # find every non-poster's comment, poster's comment begins with comment_0 for comment in url_bs.find_all('div',id=re.compile('^comment_\d+\d+')): comment_list.append(comment.text) # add the comment to the list mongo_best_comment = comment_list[best_index] #mongodb if 'http' in mongo_best_comment: #评论包含URL mongo_best_comment_has_url = True mongo_language = 'url' #mongodb else: mongo_best_comment_has_url = False cn_count = len(re.findall(cn_pattern,mongo_best_comment)) ko_count = len(re.findall(ko_pattern,mongo_best_comment)) jp_count = len(re.findall(jp_pattern,mongo_best_comment)) en_count = len(re.findall(en_pattern,mongo_best_comment)) lang_count = {'cn': cn_count, 'ko': ko_count, 'jp': jp_count, 'en':en_count} mongo_language = max(lang_count, key=lang_count.get) #mongodb mongo_best_comment_url = urls # mongodb mongo_best_comment_score = max(clean_score_list) # mongodb # find post date #date = url_bs.find('td', {'class': 'gdt2'}).text mongo_score = url_bs.find('span',id=re.compile('^comment_score_')).text mongo_date = datetime.strptime(url_bs.find('td', {'class': 'gdt2'}).text[:10], "%Y-%m-%d") mongo_en_title = url_bs.find("h1", id="gn").text mongo_non_en_title = url_bs.find("h1", id="gj").text # re.findall(find_id_pattern,urls) 返回的type是list,ID我采用整数所以int() mongo_gallery_id = int(re.findall(find_id_pattern,urls)[0]) post = { 'URL':mongo_best_comment_url, "English Title":mongo_en_title, "Non-English Title": mongo_non_en_title, 'Score': mongo_best_comment_score, 'Comment': mongo_best_comment, 'Contains URL': mongo_best_comment_has_url, 'Language': mongo_language, 'Date':mongo_date} collection.insert_one(post) if datetime.strptime(data_no_hr[-1][:10], "%Y-%m-%d").date() < stop_date: date_not_reached = False program_ends = time.perf_counter() print("Elapsed time:", program_ends, program_begins) print("Elapsed time during the whole program in seconds:",program_ends - program_begins)
e-crawler.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Grids: Non-Uniform Grids # Some data cannot be easily represented on a grid of uniformly spaced vertices. It is still possible to create a grid object to represent such a dataset. # + # %matplotlib inline import astropy.units as u import matplotlib.pyplot as plt import numpy as np from plasmapy.plasma import grids # - grid = grids.NonUniformCartesianGrid( np.array([-1, -1, -1]) * u.cm, np.array([1, 1, 1]) * u.cm, num=(50, 50, 50) ) # Currently, all non-uniform data is stored as an unordered 1D array of points. Therefore, although the dataset created above falls approximately on a Cartesian grid, its treatment is identical to a completely unordered set of points grid.shape # Many of the properties defined for uniform grids are inaccessible for non-uniform grids. For example, it is not possible to pull out an axis. However, the following properties still apply print(f"Grid points: {grid.grid.shape}") print(f"Units: {grid.units}") # Properties can be added in the same way as on uniform grids. Bx = np.random.rand(*grid.shape) * u.T grid.add_quantities(B_x=Bx) print(grid) # ## Methods # Many of the methods defined for uniform grids also work for non-uniform grids, however there is usually a substantial performance penalty in the non-uniform case. # # For example, `grid.on_grid` behaves similarly. In this case, the boundaries of the grid are defined by the furthest point away from the origin in each direction. pos = np.array([[0.1, -0.3, 0], [3, 0, 0]]) * u.cm print(grid.on_grid(pos)) # The same definition is used to define the grid boundaries in `grid.vector_intersects` # + pt0 = np.array([3, 0, 0]) * u.cm pt1 = np.array([-3, 0, 0]) * u.cm pt2 = np.array([3, 10, 0]) * u.cm print(f"Line from pt0 to pt1 intersects: {grid.vector_intersects(pt0, pt1)}") print(f"Line from pt0 to pt2 intersects: {grid.vector_intersects(pt0, pt2)}") # - # ## Interpolating Quantities # Nearest-neighbor interpolation also works identically. However, volume-weighted interpolation is not implemented for non-uniform grids. # + pos = np.array([[0.1, -0.3, 0], [0.5, 0.25, 0.8]]) * u.cm print(f"Pos shape: {pos.shape}") print(f"Position 1: {pos[0,:]}") print(f"Position 2: {pos[1,:]}") Bx_vals = grid.nearest_neighbor_interpolator(pos, "B_x") print(f"Bx at position 1: {Bx_vals[0]:.2f}") # -
docs/notebooks/plasma/grids_nonuniform.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.1.0 # language: julia # name: julia-1.1 # --- # # Hilfekarte 10 # Wir schauen uns die Problemstellung zunächst einmal übertragen auf eine Gerade an. # # Die Gerade $f$ ist beschrieben durch folgende Gleichung in Parameterdarstellung: $f:\vec{x} = \begin{pmatrix}0\\3\end{pmatrix}+t\cdot\begin{pmatrix}1\\2\end{pmatrix}$. # # Berechnet den Punkt auf der Geraden $f$, der am nächsten zu dem orangenen Punkt $\begin{pmatrix}2&1\end{pmatrix}$ ist. # # ![alternative text](figs/Gerade2.png) # Übertragt diese Idee auf die gegebene Situation mit dem Punkt und der Ebene. # # Lest erst weiter, wenn ihr den nächsten Punkt selbst berechnet habt und eure Lösung vergleichen wollt. # # Lösung # Der nächste Punkt auf der Geraden $f$ zu $\begin{pmatrix}2&1\end{pmatrix}$ ist der Punkt, der den Schnittpunkt zwischen der Geraden und der senkrecht auf der Geraden stehenden Gerade, die durch den orangenen Punkt verläuft. # # Um einen Vektor zu bestimmen, der senkrecht zu Geraden steht, wird ein Vektor gesucht, dessen Skalarprodukt mit dem Richtungsvektor der Geraden Null ergibt. # # $\begin{eqnarray} # \begin{pmatrix}1\\2\end{pmatrix}\cdot\begin{pmatrix}x_1\\x_2\end{pmatrix}&=&0\nonumber\\ # \Leftrightarrow x_1+2\cdot x_2 &=& 0 # \end{eqnarray}$ # # An dieser Stelle muss für entweder $x_1$ oder $x_2$ ein Wert eingesetzt werden, um einen Vektor auszuwählen. Hier wird $x_1=2$ gesetzt, woraus sich $x_2=-1$ ergibt. Nun muss mit Hilfe dieses Vektors und dem orangenen Punkt eine Gerade aufgestellt werden $f:\vec{x} = \begin{pmatrix}2\\1\end{pmatrix}+t\cdot\begin{pmatrix}2\\-1\end{pmatrix}$ und der Schnittpunkt mit der blauen Geraden gesucht werden. # # $\begin{pmatrix}0\\3\end{pmatrix}+t\cdot\begin{pmatrix}1\\2\end{pmatrix}=\begin{pmatrix}2\\1\end{pmatrix}+t\cdot\begin{pmatrix}2\\-1\end{pmatrix}\\ # \begin{eqnarray}\Leftrightarrow I.\qquad t &=& 2+2s\nonumber\\ # II. 3+2t&=&1-s # \end{eqnarray}$ # # Einsetzen der ersten Gleichung in die zweite ergibt: $3+4+4s=1-s \Leftrightarrow s = -\frac{6}{5}$ und somit $t = -\frac{2}{5}$. # # Daraus folgt durch einsetzen der beiden Parameter in die Geraden $f$ und $g$, dass die Geraden sich in Punkt $P(-\frac{2}{5}|\frac{11}{5})$ schneiden.
Computertomographie_Blaetter/Hilfekarte10.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import numpy.linalg as la datasets = ['CIFAR', 'MNIST'] net_names = ['FFNN', 'ConvMed', 'ConvBig'] perturbations = [ 'Haze', 'ContrastVariation', 'Rotation'] # + import re class Interval: def __init__(self, interval_str): m = re.match(r'\[(.*),(.*)\]', interval_str) self.lower, self.upper = float(m.group(1)), float(m.group(2)) def get_abs_max(self): return max(abs(self.lower), abs(self.upper)) def mono(self): return not (self.lower < 0 and self.upper > 0) def more_than_zero(self): return (self.lower > 0 and self.upper > 0) def less_than_zero(self): return (self.lower < 0 and self.upper < 0) # + # experiment for MNIST varying layers from 3-9) (figure 9) NUM_IMAGES = 100 all_data_splitting25_layers = {} perturbations = ['Haze', 'ContrastVariation', 'Rotation'] for layers in range(3, 10): all_data_splitting25_layers[layers] = {} for perturbation in perturbations: all_data_splitting25_layers[layers][perturbation] = [] filename = f'results_optim_landscape/results_optim_landscape_split25/layer{layers}_{perturbation}_split25.txt' with open(filename) as f: content = f.readlines() content = [x.strip() for x in content] for header, arr in zip(content[::2], content[1::2]): items = header.split(',') interval_size = float(items[3]) time = float(items[7]) jacobians = np.array(list(map(lambda x: Interval(x).mono(), arr.split(';')[:-1]))).reshape(NUM_IMAGES, 10*25) all_data_splitting25_layers[layers][perturbation].append((interval_size, time, jacobians)) # + from statistics import median toplot = {} for layers in range(3, 10): toplot[layers] = {} for perturbation in perturbations: best_sizes = [] for img_i in range(NUM_IMAGES): category = img_i // int(NUM_IMAGES / 10) found_size = False for (interval_size, time, jacobians) in all_data_splitting25_layers[layers][perturbation]: jacobi = jacobians[img_i] mono = [0 for _ in range(10)] jacobi_reshaped = jacobi.reshape(25, 10) for row in jacobi_reshaped: for i, elem in enumerate(row): if elem: mono[i] += 1 if mono[category] > 0: if not found_size: best_sizes.append(interval_size/25 * mono[category]) found_size = True else: if interval_size/25 * mono[category] > best_sizes[-1]: best_sizes[-1] = interval_size/25 * mono[category] if not found_size: best_sizes.append(0) assert(len(best_sizes) == NUM_IMAGES) toplot[layers][perturbation] = sum(best_sizes) / NUM_IMAGES # + import seaborn as sns import matplotlib.pyplot as plt params = {'axes.labelsize': 12.5, 'axes.titlesize': 14.0, 'xtick.labelsize': 11.5, 'ytick.labelsize': 11.5} plt.rcParams.update(params) sns.set_style("darkgrid", {'font.family':'serif', 'font.serif':'Times New Roman'}) fig, axs = plt.subplots(1, 3, figsize=(13, 2.7), dpi=500) plt.subplots_adjust(wspace=0.4) hazes = [] contrasts = [] rotations = [] for l in range(3, 10): hazes.append(toplot[l]['Haze']) contrasts.append(toplot[l]['ContrastVariation']) rotations.append(toplot[l]['Rotation']) axs[0].set_xlabel('Layers in Network') axs[0].set_ylabel('Interval Size') axs[0].plot(range(3, 10), hazes, '-^') axs[0].yaxis.set_major_locator(plt.MaxNLocator(4)) plt.locator_params(axis='y', nbins=4) axs[1].set_xlabel('Layers in Network') axs[1].set_ylabel('Interval Size') axs[1].plot(range(3, 10), contrasts, '-^') axs[1].set_yticks([0, 0.015, 0.032]) axs[1].yaxis.set_major_locator(plt.MaxNLocator(4)) plt.locator_params(axis='y', nbins=4) axs[2].set_xlabel('Layers in Network') axs[2].set_ylabel('Interval Size') axs[2].plot(range(3, 10), rotations, '-^') axs[2].set_yticks([0, 0.001, 0.002, 0.003]) plt.savefig('optimization_mnist_varylayers.png', bbox_inches='tight') # + # experiment for ConvBig, ConvMed, FFNN networks (table 3) from operator import itemgetter NUM_IMAGES = 100 all_data_splitting25 = {} for dataset in datasets: all_data_splitting25[dataset] = {} for net in net_names: all_data_splitting25[dataset][net] = {} for perturbation in perturbations: data = [] filename = f'results/results_split25/{net}_{dataset}_{perturbation}_split25.txt' with open(filename) as f: content = f.readlines() content = [x.strip() for x in content] for header, arr in zip(content[::2], content[1::2]): items = header.split(',') interval_size = float(items[4]) time = float(items[7]) jacobians = np.array(list(map(lambda x: Interval(x).mono(), arr.split(';')[:-1]))).reshape(NUM_IMAGES, 10*25) data.append((interval_size, time, jacobians)) filename = f'results/results_split25_add/{net}_{dataset}_{perturbation}_split25.txt' with open(filename) as f: content = f.readlines() content = [x.strip() for x in content] for header, arr in zip(content[::2], content[1::2]): items = header.split(',') interval_size = float(items[4]) time = float(items[7]) jacobians = np.array(list(map(lambda x: Interval(x).mono(), arr.split(';')[:-1]))).reshape(NUM_IMAGES, 10*25) data.append((interval_size, time, jacobians)) data.sort(key=itemgetter(0)) all_data_splitting25[dataset][net][perturbation] = data # + tocompute = {} for dataset in datasets: tocompute[dataset] = {} for net in net_names: tocompute[dataset][net] = {} for perturbation in perturbations: best_sizes = [] for img_i in range(NUM_IMAGES): category = img_i // 10 found_size = False for (interval_size, time, jacobians) in all_data_splitting25[dataset][net][perturbation]: jacobi = jacobians[img_i] mono = [0 for _ in range(10)] jacobi_reshaped = jacobi.reshape(25, 10) for row in jacobi_reshaped: for i, elem in enumerate(row): if elem: mono[i] += 1 if mono[category] > 0: if not found_size: best_sizes.append(interval_size/25 * mono[category]) found_size = True else: if interval_size/25 * mono[category] > best_sizes[-1]: best_sizes[-1] = interval_size/25 * mono[category] if not found_size: best_sizes.append(0) assert(len(best_sizes) == NUM_IMAGES) tocompute[dataset][net][perturbation] = sum(best_sizes) / NUM_IMAGES # - tocompute
eval/Analyze_Landscape.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (vibrio) # language: python # name: vibrio # --- # + from pandas import read_csv, DataFrame import pandas as pd import os import re #Both patterns and strings to be searched can be Unicode strings as well as 8-bit strings. import math import cobra import cobra.test from __future__ import print_function from os.path import join from cobra.io import write_sbml_model from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" data_dir = "/Users/lizrad/Dev/iVnat" print("files found: ") print(", ".join(i for i in os.listdir(data_dir) if i.startswith("iVnat"))) model=cobra.io.read_sbml_model(join(data_dir, "iVnat.xml")) #model=cobra.io.read_legacy_sbml("C:\\Users\Asus\Documents\Vibrio_project_literature\GCF_001456255.1_rast_metabolic_model.SBML\GCF_001456255.1_rast_metabolic_model.xml") model # - solution = model.optimize() print(solution) # + solution.status solution.fluxes solution.shadow_prices solution.objective_value solution.reduced_costs # - model.medium model.summary() model.reactions.bio1 print(model.metabolites.NADH_c0.summary()) # ### Running FVA # + from cobra.flux_analysis import flux_variability_analysis # - flux_variability_analysis(model, model.reactions[:10]) cobra.flux_analysis.flux_variability_analysis( model, model.reactions[:10], fraction_of_optimum=0.9) model.optimize() model.summary(fva=0.95) # ### Running pFBA fba_solution = model.optimize() pfba_solution = cobra.flux_analysis.pfba(model) pfba_solution # + abs(fba_solution.fluxes["bio1"] - pfba_solution.fluxes[ "bio1"]) # - # ### Running geometric FBA geometric_fba_sol = cobra.flux_analysis.geometric_fba(model) geometric_fba_sol
ComplementaryScripts/Optimalization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Notebook for Testing # ## Importing preprocessed Data # Import desired packages import xarray as xr from matplotlib import pyplot as plt from matplotlib import gridspec from cartopy import crs as ccrs import cartopy.feature as cfeature from cartopy.util import add_cyclic_point import glob import decimal # %reload_ext autoreload # %autoreload 2 from chazbpei2020.preprocessing import * # ## Testing Glob # + # files = glob.glob('/local/ss23/GFDL_LEs/OCN/METABOLIC_INDEX/RCP85/*') # print('all files in Omega_Arag RCP85 directory:') # for file in files: # print(file) # print(str(len(files)) + ' files total') # - # ## Testing accessing preprocessed ensemble mean/min data # Testing with preprocessed data path = '~/chazbpei2020/data/processed/Omega_Arag/omega_arag_irr_0.1Wm2_1x1_ensAvg_1950_2100.nc' ds_ensAvg = xr.open_dataset(path) print(ds_ensAvg['OMEGA_ARAG_IRR'].sel((TIME='1990', XT_OCEAN=70.5, YT_OCEAN=12.5)) # Testing with preprocessed data path = '~/chazbpei2020/data/processed/Omega_Arag/omega_arag_irr_0.1Wm2_1x1_ensMin_1950_2100.nc' ds_ensMin = xr.open_dataset(path) print(ds_ensMin['OMEGA_ARAG_IRR'].sel(TIME='1990', XT_OCEAN=70.5, YT_OCEAN=12.5)) path = '/local/ss23/GFDL_LEs/OCN/OMEGA_ARAG/RCP85/omega_arag_irr_0.1Wm2_1x1_ens103_1950_2100.nc' ds_all = xr.open_dataset(path) print(ds_all['OMEGA_ARAG_IRR'].sel(TIME='1990', XT_OCEAN=70.5, YT_OCEAN=12.5)) # + # print(avg) # Specify variables X = avg['XT_OCEAN'] Y = avg['YT_OCEAN'] Z = annual_mean(avg, 'OMEGA_ARAG_IRR', 1990) # call default_contour function specifying variables, plot size (10x10 default) fig = default_contour(X, Y, Z, 10, 5) ax = fig.get_axes()[0] ax.set_title('Avg Omega Aragonite in 1990 at Surface irr0.1', fontsize=20) im = ax.contourf(X,Y,Z) # can add levels parameter levels=[0, 1, 2] cbar = plt.colorbar(im, ax=ax, orientation='horizontal', fraction=0.05, pad=0.05) cbar.set_label('$^\circ\,K$',fontsize=12) # - # ## ------------------------------------------------------- # + jupyter={"outputs_hidden": true} # Specify the path to the data on flyingfish rootdir = '/local/ss23/GFDL_LEs/' experiment = '/OCN/OMEGA_ARAG/RCP85/omega_arag_irr_*' # The combination of these defines the path directory = rootdir+experiment # Get all files in directory files = glob.glob(directory) ds_all = xr.open_mfdataset(files, engine='netcdf4', concat_dim='ensemble', combine='nested') # - ds_ensMean = ds_all.mean('ensemble') # take the mean across ensemble "dimension" ds_ensMin = ds_all.minimum('ensemble') # take the minimum across ensemble "dimension" ds_annualMean = ds_ensMean.groupby('TIME.year').mean(dim='TIME') ds_annualMin = ds_ensMin.groupby('TIME.year').mean(dim='TIME') # + # Specify variables X = avg['XT_OCEAN'] Y = avg['YT_OCEAN'] Z = annual_mean(avg, 'OMEGA_ARAG_IRR', 1990) # call default_contour function specifying variables, plot size (10x10 default) fig = default_contour(X, Y, Z, 10, 5) ax = fig.get_axes()[0] ax.set_title('Avg Omega Aragonite in 1990 at Surface irr0.1', fontsize=20) im = ax.contourf(X,Y,Z) # can add levels parameter levels=[0, 1, 2] # levels=[0.0, 0.02, 0.04, 0.06, 0.08, 0.10, 0.12, 0.14, 0.16]) # fig.savefig("omega_arag_") cbar = plt.colorbar(im, ax=ax, orientation='horizontal', fraction=0.05, pad=0.05) cbar.set_label('$^\circ\,K$',fontsize=12)
notebooks/Omega_Arag_Plots/testing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + language="javascript" # MathJax.Hub.Config({ # TeX: { equationNumbers: { autoNumber: "AMS" } } # }); # - # # Exercise 1 # # Let us consider the sequence $U_n$ given by # \begin{equation}\label{fib} # \left\lbrace # \begin{array}{ll} # U_0 &= 1,\\ # U_1 &= 2,\\ # U_{n} &=-3U_{n-1} +U_{n-2}, \;\; \forall\; n=2,3,4\cdots # \end{array}\right. # \end{equation} # # Write a python function named <b>SeqTerms</b> that takes as input an integer $n,\;\;n\geq 0$ and return an array of the first $n$ terms (i.e. $U_0, \cdots, U_{n-1}$) of the sequence \eqref{fib}. import numpy as np def Seq(n): a=1 b=2 if n==0: return 1 if n==1: return 2 for i in range(2,n+1): c=-3*b+a a=b b=c return c Seq(2) def SeqTerms(n): l=[] g=np.vectorize(Seq) for i in range(n): l+=[Seq(i)] return l SeqTerms(4) # # Exercise 2 # # Let $\{ x_k\}$ be a partition of $[a,b]$ such that $a=x_0<x_1<\cdots<x_{N-1}<x_N=b$ and $H$ be the length of the $k$-th subinterval ($H = x_k - x_{k-1}$), # then we have # $$\int_a^bf(x)dx \approx \sum_{k=1}^N \frac{f(x_{k-1})+f(x_k)}{2}H = A$$ # # # 1. Write a function named <b>Trap</b> that takes $a,b,N, f$ as inputs and return A # def trap(a,b,N,f): C=np.linspace(a,b,N+1) g=np.vectorize(f) A=g(C) S=0 for i in range(1,len(A)): S+=A[i]+A[i-1] K=1/2*S*((b-a)/N) return K f= lambda x: x**3+7 trap(0,1,10**6,f) # 2. Write a Python code to compute and display an approximation $Aquad$ of the integral bellow using the Python function $quad$ # $$A = \int_{0}^{2} \dfrac{x^3+5x-20}{x^2+3}dx$$ # # from scipy.integrate import quad a = 0 b = 2 f = lambda x: (x**3+5*x-20)/(x**2+3) Aquad= quad(f, a, b)[0] print(Aquad) # 3. write a Python function <b>ErrorTrap</b> that takes $M$ as input and return an arrays $ErrorInt$ and $ListN$. Here, $ErrorInt$ contains the absolute errors between $Aquad$ and the approximation of the integral $A$ obtained using the function <b>Trap</b> for all positve intergers $N$ in $ListN$ the set of all multiples of 10 less or equal to $M$. # def ErrorTrap(M): u= lambda x: abs(quad(f,0,2)[0]-trap(0,2,x,f)) ListN=[] #ErrorInt=np.zeros(M) for i in range(1,M+1): if i%10==0: ListN+=[i] g=np.vectorize(u) ErrorInt=g(ListN) return ErrorInt, ListN ErrorTrap(30) # 4. Plot the output $ErrorInt$ against $ListN$ for $M=200$ 𝐸𝑟𝑟𝑜𝑟𝐼𝑛𝑡 , 𝐿𝑖𝑠𝑡𝑁 = ErrorTrap(200) print(𝐿𝑖𝑠𝑡𝑁) print(ErrorInt) # # Exercise 3 # # 1. Write code to solve the following system of ordinary differential equations using the Python function odeint. # # $$ # \begin{cases} # \dfrac{dx_1}{dt}& = & -\dfrac{1}{2}x_1\\\\ # \dfrac{dx_2}{dt}& = & \dfrac{1}{2}x_1-\dfrac{1}{4}x_2\\\\ # \dfrac{dx_3}{dt}& = & \dfrac{1}{4}x_2-\dfrac{1}{6}x_3 # \end{cases}, \text{ on } [0,4] # $$ # # Subject to the initial conditions $x_1(0) = 1, x_2(0) = 1, x_3(0) = 1$. # + import numpy as np from scipy.integrate import odeint import matplotlib.pyplot as plt # function that returns dz/dt def model(z,t): x_1,x_2,x_3 = z dx_1dt = -1/2*x_1 dx_2dt = 1/2*x_1 -1/4*x_2 dx_3dt = 1/4*x_2-1/6*x_3 return dx_1dt,dx_2dt,dx_3dt # initial condition z0 = [1,1,1] # time points a = 0 b = 4 N = 100 t = np.linspace(a,b,N+1) # solve ODE z = odeint(model,z0,t) x_1 = z[:,0] x_2 = z[:,1] x_3=z[:,2] plt.plot(t,x_1,'b-') plt.plot(t,x_3,'r--') plt.plot(t,x_2,'green'); # - def f(z,t): x1,x2,x3=z dx1dt=-1/2*z[0] dx2dt=1/2*z[0]-1/4*z[1] dx3dt=1/4*z[1]-1/6*z[2] return dx1dt, dx2dt,dx3dt f(6,7) # 2. The exact solution of the above system of ODEs is given by # # $$ # \begin{cases} # x_1(t)& = & e^{-t/2}\\ # x_2(t)& = & -2e^{-t/2}+3e^{-t/4}\\ # x_3(t)& = & \dfrac{3}{2}e^{-t/2} - 9e^{-t/4} + \dfrac{17}{2}e^{-t/6} # \end{cases} # $$ # # Use $Subplot$ to plot side by side # # - each exact and approximate solution in the same window # - and their absolute error vs the time # # + import numpy as np import matplotlib.pyplot as plt # x_1(t)=np.exp(-t/2) # x_2(t)=-2*np.exp(-t/2)+3*np.exp(-t/4) # x_3(t)=3/2*np.exp(-t/2)-9*np.exp(-t/4)+17/2*np.exp(-t/6) # #plot results plt.subplot(3,1,1) plt.plot(t,np.exp(-t/2),'b') plt.plot(t,x_1,'y--') plt.xlabel('time') plt.ylabel('x_1(t)') plt.show() #plot results plt.subplot(3,1,2) plt.plot(t,-2*np.exp(-t/2)+3*np.exp(-t/4),'y-') plt.plot(t,x_2,'g--') plt.xlabel('time') plt.ylabel('x_2(t)') plt.show() plt.subplot(3,1,3) plt.plot(t,3/2*np.exp(-t/2)-9*np.exp(-t/4)+17/2*np.exp(-t/6),'r-') plt.plot(t,x_3,'b--') plt.xlabel('time') plt.ylabel('x_3(t)') plt.show() #plot results # plt.subplot(3,1,3) # plt.plot(x,y) # plt.xlabel('x') # plt.ylabel('y') # plt.show() # + import numpy as np import matplotlib.pyplot as plt # x_1(t)=np.exp(-t/2) # x_2(t)=-2*np.exp(-t/2)+3*np.exp(-t/4) # x_3(t)=3/2*np.exp(-t/2)-9*np.exp(-t/4)+17/2*np.exp(-t/6) # #plot results plt.subplot(3,1,1) plt.title("Absolute Error vs Times") #plt.plot(t,np.exp(-t/2),'b') plt.plot(t,abs(x_1-np.exp(-t/2)),'b-') plt.xlabel('time') plt.ylabel('absolute error of x_1') plt.show() #plot results plt.subplot(3,1,2) #plt.plot(t,-2*np.exp(-t/2)+3*np.exp(-t/4),'g-') plt.plot(t,abs(x_2+2*np.exp(-t/2)-3*np.exp(-t/4)),'g-') plt.xlabel('time') plt.ylabel('absolute error of x_2') plt.show() plt.subplot(3,1,3) #plt.plot(t,3/2*np.exp(-t/2)-9*np.exp(-t/4)+17/2*np.exp(-t/6),'r-') plt.plot(t,abs(x_3-3/2*np.exp(-t/2)+9*np.exp(-t/4)-17/2*np.exp(-t/6)),'r-') plt.xlabel('time') plt.ylabel('absolute error of x_3') plt.show() #plot results # plt.subplot(3,1,3) # plt.plot(x,y) # plt.xlabel('x') # plt.ylabel('y') # plt.show() # - # # Exercise 4 # # Let $\{ t_k\}$ be a partition of $[a,b]$ such that $a=t_1<t_2<\cdots<t_{N}=b$ and $H$ be the constant length of the $k$-th subinterval ($H = t_k - t_{k-1}$). Let us consider initial value problem # # \begin{equation}\label{eul2} # \begin{cases} # \dfrac{dz}{dt} = f(z,t), & \quad \text{on } [a, b]\\\\ # z(a) = c, # \end{cases} # \end{equation} # where $z,f,c\in R^M$ i.e. $z = [x_1, x_2,\cdots, x_{M}]$, $c = [x_1(a), x_2(a),\cdots, x_{M}(a)]$ and $f = [f_1, f_2,\cdots, f_{M}]$. Note that \eqref{eul2} is a the general form of system of ODEs. # # Let $t, z_k,Z$ defined as follows $$t=[t_1,t_2,\cdots,t_{N-1},t_{N}],\quad z_k = [x_1(t_k), x_2(t_k),\cdots, x_{M}(t_k)], \quad # Z =\begin{pmatrix} # x_1(t_1)& x_2(t_1)&\cdots& x_{M}(t_1)\\ # x_1(t_2)& x_2(t_2)&\cdots& x_{M}(t_2)\\ # \vdots& \vdots&\ddots& \vdots\\ # x_1(t_{N})& x_2(t_{N})&\cdots& x_{M}(t_{N}) # \end{pmatrix} # $$ # # 1. Write a python function <b> EulerOdeSys </b> that takes $f,c,t$ and return the solution $Z$ of the initial value problem \eqref{eul2} using Euler method i.e. # $$ z_{k+1} = z_k + Hf(z_k,t_k) $$ # def EulerOdeSys(f,c,t): n=len(t) Z = np.zeros((len(t),)+ np.shape(c)) Z[0]= c for i in range(n-1): h =(t[i+1] - t[i]) Z[i+1]= Z[i]+ h*f(Z[i],t[i]) return Z def f(x,y): return x+y c=[5,3] t=np.linspace(0,4,10) EulerOdeSys(f,c,t) # 2. Write a python function <b> RK4OdeSys </b> that takes $f,c,t$ and return the solution $Z$ of the initial value problem (1) using the fourth order Runge-Kutta method i.e. # # \begin{equation} # \begin{cases} # k_1 = f(z_k,t_k),\\\\ # k_2 = f(z_k+H\dfrac{k_1}{2}, t_k + \dfrac{H}{2}),\\\\ # k_3 = f(z_k+H\dfrac{k_2}{2}, t_k + \dfrac{H}{2}),\\\\ # k_4 = f(z_k+Hk_3, t_k + H),\\\\ # z_{k+1} = z_k + \dfrac{H}{6}(k_1+2k_2+2k_3+k_4) # \end{cases} # \end{equation} # # # + def RK4OdeSys(f,c,t): n = len (t) Z = np.zeros((len(t),)+ np.shape(c)) Z[0]= c for i in range (n-1): k1 = f(Z[i] ,t[i]) h =(t[i+1] - t[i])/2 k2 = f(Z[i]+ h*k1 , t[i]+h) k3 = f(Z[i]+ h*k2 , t[i]+h) k4 = f(Z[i]+2*h*k3 ,t[i]+2*h ) Z[i+1]= Z[i]+ h/3*(k1 +2*k2 +2*k3+k4 ) return Z def f(x,y): return x+y**2 c=[5,2] t=np.linspace(0,4,10) RK4OdeSys(f,c,t) #plt.plot(RK4OdeSys1(f,c,t),'b-') # - # 3. Solve the system of ODEs in $Exercise2$ using your function <b> EulerOdeSys </b> and <b> RK4OdeSys </b> # 4. By plotting the absolute error in the approximate and exact solutions, tell us which function gives a more accurate solution of a system of ODEs. # # Exercise 5 # # Let consider us consider the function <b> primes </b> that takes $n$ as input and return a list of primes less than $n$ # + # This cell is only to import the labraries import numpy as np import time def primes(n): """ Returns a list of primes < n """ sieve = [True] * (n//2) for i in range(3,int(n**0.5)+1,2): if sieve[i//2]: sieve[i*i//2::i] = [False] * ((n-i*i-1)//(2*i)+1) return [2] + [2*i+1 for i in range(1,n//2) if sieve[i]] # - # For any integer $n>0$ and a prime number $p$, define $\nu_p(n)$ as the greatest integer $r$ such that $p^r$ divides $n$. # Define $$ D(n,m) = \sum_{p\; prime} \Bigl| \nu_p(n) - \nu_p(m)\Bigr| $$ # # For example $D(14,24)=4$. # # Furthermore, define # # $$S(N) = \sum_{n=1}^{N}\sum_{m=1}^{N}D(n,m).$$ # # You are given $S(10)=210$. # # 1. Write an efficient python function, <b>Func_S </b>, that takes $N$ as input and return the value $S(N)$. # # + from math import floor from math import log as ln def nu(n,p): L=[] for i in range(floor(ln(n)//ln(p))+2): if n%(p**i)==0: L+=[i] return L[-1] def D(n,m): list_prime=primes(max(m,n)+1) SumD=0 for i in list_prime: SumD+=abs(nu(n,i)-nu(m,i)) return SumD print(D(14,15)) def Func_S(N): s=0 for i in range(1,N+1): for j in range(1,N+1): #if j!=i: s=s+D(i,j) return s Func_S(10) nu(7,23) # - # 2. Compute $S(10)$ and display its computational time N = 10 time_start = time.perf_counter() S = Func_S(N) time_elapsed = (time.perf_counter() - time_start) print('S({}) = {}'.format(N,S)) print('computational Time = ', time_elapsed) # 3. Compute $S(100)$ and display its computational time N = 100 time_start = time.perf_counter() S = Func_S(N) time_elapsed = (time.perf_counter() - time_start) print('S({}) = {}'.format(N,S)) print('computational Time = ', time_elapsed) # 4. Compute $S(1000)$ and display its computational time N = 1000 time_start = time.perf_counter() S = Func_S(N) time_elapsed = (time.perf_counter() - time_start) print('S({}) = {}'.format(N,S)) print('computational Time = ', time_elapsed) # 5. Compute $S(10000)$ and display its computational time N = 10000 time_start = time.perf_counter() S = Func_S(N) time_elapsed = (time.perf_counter() - time_start) print('S({}) = {}'.format(N,S)) print('computational Time = ', time_elapsed) # 6. Compute $S(100000)$ and display its computational time N = 100000 time_start = time.perf_counter() S = Func_S(N) time_elapsed = (time.perf_counter() - time_start) print('S({}) = {}'.format(N,S)) print('computational Time = ', time_elapsed) # 7. Compute $S(1000000)$ and display its computational time N = 1000000 time_start = time.perf_counter() S = Func_S(N) time_elapsed = (time.perf_counter() - time_start) print('S({}) = {}'.format(N,S)) print('computational Time = ', time_elapsed) # # Exercise 6 # 1. Read the Covid-19 dataset import pandas as pd import numpy as np a=pd.read_csv('Covid-19.csv') a # 2. Drop the Country code column # del a['Country_code'] a # 3. Randomely choose three different countries a.sample(n=3) b=a['Country'] rand=b.sample(n=3) rand # b=a.sample(n=3) # b # 4. Select and display the records for those three countries q=a[a['Country'].isin(rand)] q # 5. Calculate and display the sum, the average of the cumulative cases of each WHO region. M=a.groupby('WHO_region').mean() print("the average of cumulative case of each WHO region is\n",M['Cumulative_cases']) S=a.groupby('WHO_region').sum() print("the Sum of cumulative case of each WHO region is\n",S['Cumulative_cases']) # 6. Calculate and display sum, the average of the cumulative deaths of each WHO region. M=a.groupby('WHO_region').mean() print("the average of cumulative deaths of each WHO region is\n",M['Cumulative_deaths']) S=a.groupby('WHO_region').sum() print("the Sum of cumulative case of each WHO region is\n",S['Cumulative_deaths']) # 7. Produce plots that look like the following three figures. Pay attention to the annotations. # # 7.a. <img src="Assign1.png"> import seaborn as sns sns.boxplot(x="Country", y="New_cases", data=q) sns.stripplot(x="Country", y="New_cases", data=q);#, jitter=True, edgecolor="gray") plt.legend() a.groupby('WHO_region')['Cumulative_cases',"Cumulative_deaths"].sum().plot.bar(grid=False); # 7.b. <img src="Assign2.png"> import matplotlib.pyplot as plt sns.lineplot(x="Date_reported", y="Cumulative_cases", hue="Country",linewidth=5,data=q) # 7.c. <img src="Assign3.png">
louismozart_teyou_Python2-Copy1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from utils import GraphMaker import pandas as pd import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline import numpy as np # + reason_v_161_data = GraphMaker(x='What was your MAIN reason for choosing SYDE?', y='What was your final grade for 161?', data=class_df, x_label="Why Syde", y_label="161 Grade", title="Reason vs 161") boxplot = reason_v_161_data.boxplot(rotation=50, show_points = True) # - barplot = reason_v_161_data.barplot(figsize=(5, 5), orient='h') col_name = 'What was your MAIN reason for choosing SYDE?' reasons_161 = class_df.groupby(col_name)[col_name].count() print(reasons_161) pie_chart = reason_v_161_data.pie(x=reasons_161.values, labels=reasons_161.index, figsize=(5, 5)) scatter_plot = reason_v_161_data.scatterplot() # + cols = ['What was your top 6 highschool average for engineering? Please enter just the number with NO % sign (Calc, Adv Functions, English, Physics, Chemistry, 6th course in your top 6) ', 'What is your salary (in CAD/h, do not include "$")?'] df = class_df[cols].rename(columns={'What was your top 6 highschool average for engineering? Please enter just the number with NO % sign (Calc, Adv Functions, English, Physics, Chemistry, 6th course in your top 6) ': "avg", 'What is your salary (in CAD/h, do not include "$")?': 'salary'}) # remove all entries with no salary df = df[df['salary'].notnull()] # remove all entries with no avg df = df[df['avg'].notnull()] def clean_avg_and_salary(row): if row['avg'] % 1 >= 0.5: row['avg'] += 1 row['avg'] = int(row['avg']) if row['salary'] > 30 or row['salary'] < 13: row['salary'] = None return row df = df.apply(clean_avg_and_salary, axis=1).dropna() # - grade_v_pay = GraphMaker(x='avg', y='salary', data=df, title='Entrance avg vs salary', x_label='Entrance avg', y_label='1A pay') scatter = grade_v_pay.scatterplot(ylim = (-1, 30)) scatter_reg = grade_v_pay.scatterplot(reg_line=True)
nico/test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Build a Neural Network with Tensorflow Keras on AirBNB prices in Berlin, Germany # This is a cleaned dataset that I worked with in Unit 3 of Lambda School. The unit project was to use a neural network that would be pickled into an API for a web team to utilize in a web app. Here's my version of the project: # Import tesorflow import tensorflow as tf # Imports import pandas as pd # Read in data with shape and head df = pd.read_csv('data/airbnb data cleaned.csv') print(df.shape) df.head() df = df.drop(['Unnamed: 0'], axis=1) # # Get a feel for the data: # Mean price of the rentals is $57 per night df.describe().T # Have a look at the corr table df.corr() # Visualize correlation of features to price df.corr()['price'].sort_values()[:-2].plot(kind='bar') # Distributions between 'price' and 'accommodates' sns.boxplot(x='accommodates',y='price',data=df) # Distributions between 'price' and 'bedrooms' sns.boxplot(x='bedrooms',y='price',data=df) # # Construct the model: # + # Train test split: from sklearn.model_selection import train_test_split X = df.drop('price', axis=1).values y = df['price'].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2, random_state=42) print(X_train.shape) print(y_train.shape) print(X_test.shape) print(y_test.shape) # + # Scale data: from sklearn.preprocessing import MinMaxScaler # Create scaler object sc = MinMaxScaler() # Fit scaler on X_train to apply transformation on X sets sc.fit(X_train) # Transform both X X_train = sc.transform(X_train) X_test = sc.transform(X_test) # Viz of X_train[0] as scaled print(X_train[0]) # - X_train.shape X_test.shape # + # Create the Neural Network from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense,Dropout,Flatten from tensorflow.keras import metrics # Create model object model = Sequential() # Input, hidden, output layers model.add(Dense(128, activation='relu', input_shape=(28,))) model.add(Dropout(0.2)) model.add(Dense(64, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(32, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(16, activation='relu')) model.add(Dropout(0.1)) model.add(Dense(1)) # Compile model model.compile(loss='mean_absolute_error', optimizer='adam', metrics=[metrics.mae]) # + # Fit model with validation data to test for overfitting model.fit(X_train,y_train, validation_data=(X_test,y_test), batch_size=128, epochs=400) # - # # Model Evaluations: #Here's the baseline accuracy for the model: scores = model.evaluate(X_train, y_train) print(f"{model.metrics_names[1]}: {scores[1]*100}") # + # Plot the model's loss to see if model is overfitting model_loss = pd.DataFrame(model.history.history) model_loss.plot() # - # See predictions from sklearn.metrics import mean_absolute_error predictions = model.predict(X_test) mean_absolute_error(y_test,predictions) # + # Random test: index = 90 X_pred = sc.transform([df.drop('price', axis=1).iloc[index]]) y_real = df.iloc[index]['price'] y_pred = model.predict([X_pred]) print(f'Prediction: ${y_pred[0][0]} | Real: ${y_real}')
AirBNB_Tensorflow_keras.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Self-Driving Car Engineer Nanodegree # # --- # # ## Project: Build a Traffic Sign Recognition Classifier # # In this notebook, a modified LeNET is trained to classify traffic sign based on the dataset [German Traffic Sign]( http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset) # # The goals / steps of this project are the following: # * Load the data set # * Explore, summarize and visualize the data set # * Perform data augmentation # * Design, train and test a model architecture # * Use the model to make predictions on new images # * Analyze the softmax probabilities of the new images # * Summarize the results with a written report # ## Step 0: Load The Data # + # Load pickled data import pickle # TODO: Fill this in based on where you saved the training and testing data training_file = '../data/train.p' validation_file='../data/valid.p' testing_file = '../data/test.p' with open(training_file, mode='rb') as f: train = pickle.load(f) with open(validation_file, mode='rb') as f: valid = pickle.load(f) with open(testing_file, mode='rb') as f: test = pickle.load(f) X_train, y_train = train['features'], train['labels'] X_valid, y_valid = valid['features'], valid['labels'] X_test, y_test = test['features'], test['labels'] # - # --- # # ## Step 1: Dataset Summary & Exploration # # The pickled data is a dictionary with 4 key/value pairs: # # - `'features'` is a 4D array containing raw pixel data of the traffic sign images, (num examples, width, height, channels). # - `'labels'` is a 1D array containing the label/class id of the traffic sign. The file `signnames.csv` contains id -> name mappings for each id. # - `'sizes'` is a list containing tuples, (width, height) representing the original width and height the image. # - `'coords'` is a list containing tuples, (x1, y1, x2, y2) representing coordinates of a bounding box around the sign in the image. **THESE COORDINATES ASSUME THE ORIGINAL IMAGE. THE PICKLED DATA CONTAINS RESIZED VERSIONS (32 by 32) OF THESE IMAGES** # # Complete the basic data summary below. Use python, numpy and/or pandas methods to calculate the data summary rather than hard coding the results. For example, the [pandas shape method](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.shape.html) might be useful for calculating some of the summary results. # ### Provide a Basic Summary of the Data Set Using Python, Numpy and/or Pandas # + ### Replace each question mark with the appropriate value. ### Use python, pandas or numpy methods rather than hard coding the results import pandas as pd import numpy as np import tensorflow as tf from sklearn.utils import shuffle # TODO: Number of training examples n_train = len(X_train) # TODO: Number of validation examples n_validation = len(X_valid) # TODO: Number of testing examples. n_test = len(X_test) # TODO: What's the shape of an traffic sign image? image_shape = X_train[0].shape # TODO: How many unique classes/labels there are in the dataset. signList = pd.read_csv('signnames.csv') #signList=signList.set_index('ClassId') n_classes = len(signList['SignName']) print("Number of training examples =", n_train) print("Number of validation examples =",n_validation) print("Number of testing examples =", n_test) print("Image data shape =", image_shape) print("Number of classes =", n_classes) # - # ### Include an exploratory visualization of the dataset # Visualize the German Traffic Signs Dataset using the pickled file(s). This is open ended, suggestions include: plotting traffic sign images, plotting the count of each sign, etc. # # The [Matplotlib](http://matplotlib.org/) [examples](http://matplotlib.org/examples/index.html) and [gallery](http://matplotlib.org/gallery.html) pages are a great resource for doing visualizations in Python. # + ### Data exploration visualization code goes here. ### Feel free to use as many code cells as needed. import matplotlib.pyplot as plt import cv2 # Visualizations will be shown in the notebook. # %matplotlib inline print('German Traffic Signs Dataset Images & ClassID:') fig1=plt.figure(figsize=[15,40]) columns = 4 rows = 11 for i in range(0, n_classes): img = X_train[y_train==i][0] ax = fig1.add_subplot(rows, columns, i+1) ax.title.set_text('\n' + str(signList.ClassId[i])+ '\n' + signList.SignName[i]) plt.axis('off') plt.imshow(img) # + print('Classes Distributions:') fig2,[ax1,ax2,ax3] = plt.subplots(1,3) fig2.set_figwidth(15) fig2.tight_layout(w_pad=5) ax1.hist(x=y_train,bins=n_classes,rwidth=0.5) ax1.grid(axis='y') ax1.set_xlabel('Class ID') ax1.set_ylabel('Frequency') ax1.set_title('Training Classes Distribution') ax2.hist(x=y_valid,bins=n_classes,rwidth=0.5) ax2.grid(axis='y') ax2.set_xlabel('Class ID') ax2.set_ylabel('Frequency') ax2.set_title('Validation Classes Distribution') ax3.hist(x=y_test,bins=n_classes,rwidth=0.5) ax3.grid(axis='y') ax3.set_xlabel('Class ID') ax3.set_ylabel('Frequency') ax3.set_title('Test Classes Distribution') # - # ### Data Augmentation # + def random_translate(img): px_thres = 2 dx,dy = np.random.randint(-px_thres,px_thres,size=2) M = np.float32([[1,0,dx],[0,1,dy]]) dst = cv2.warpAffine(img,M,(img.shape[1],img.shape[0])) #dst = dst[np.newaxis] return dst fig, axs = plt.subplots(1,2) sample = X_train[20000] axs[0].imshow(sample) axs[0].set_title('Original Image') axs[1].imshow(random_translate(sample)) axs[1].set_title('Translated Image') # + def random_rotate(img): angle = np.random.randint(-3,3) scale = 1+np.random.randint(-20,20)*0.01 M = cv2.getRotationMatrix2D(((img.shape[1]-1)/2.0,(img.shape[0]-1)/2.0),5,scale) dst = cv2.warpAffine(img,M,(img.shape[1],img.shape[0])) return dst fig, axs = plt.subplots(1,2) sample = X_train[20000] axs[0].imshow(sample) axs[0].set_title('Original Image') axs[1].imshow(random_rotate(sample)) axs[1].set_title('Rotated Image') # + def random_affine(img): x1 = img.shape[1]/4 x2 = 3*img.shape[1]/4 y1 = img.shape[0]/4 y2 = 3*img.shape[0]/4 delta = (np.random.rand(3)-0.5)*0.5 pts1 = np.float32([[y1,x1],[y2,x1],[y1,x2]]) pts2 = np.float32([[y1+delta[0],x1+delta[0]],[y2+delta[1],x1+delta[1]],[y1+delta[2],x2+delta[2]]]) M = cv2.getAffineTransform(pts1,pts2) dst = cv2.warpAffine(img,M,(img.shape[1],img.shape[0])) return dst fig, axs = plt.subplots(1,2) sample = X_train[20000] axs[0].imshow(sample) axs[0].set_title('Original Image') axs[1].imshow(random_affine(sample)) axs[1].set_title('Transformed Image') # + def random_brightness(img): alpha=(2-np.random.randint(np.max(img)-40,np.max(img)+40)/255) img = np.clip(img*alpha,0,255) img = np.array(img, dtype=np.uint8) return img fig, axs = plt.subplots(1,2) sample = X_train[20000] axs[0].imshow(sample) axs[0].set_title('Original Image') axs[1].imshow(random_brightness(sample)) axs[1].set_title('Adjusted Image') # - # ### Sample Image After Data Augmentation fig, axs = plt.subplots(1,2) sample = X_train[20000] axs[0].imshow(sample) axs[0].set_title('Original Image') axs[1].imshow(random_rotate(random_translate(random_affine(random_brightness(sample))))) axs[1].set_title('Augmented Image') # + print("Data Augmentation Start...") for class_id in range(n_classes): print(class_id,end=' ') class_indx = np.where(y_train == class_id) n_samples = len(class_indx[0]) if n_samples < 800: for i in range(800 - n_samples): new_img = X_train[class_indx[0][i % n_samples]] new_img = random_rotate(random_translate(random_affine(random_brightness(new_img)))) X_train = np.concatenate((X_train, [new_img]), axis=0) y_train = np.concatenate((y_train, [class_id]), axis=0) if i % 50 == 0: print('|', end='') elif i % 10 == 0: print('-',end='') print('') print("Data Augmentation Completed...") # + print('Classes Distributions After Data Augmentation:') fig2,[ax1,ax2,ax3] = plt.subplots(1,3) fig2.set_figwidth(15) fig2.tight_layout(w_pad=5) ax1.hist(x=y_train,bins=n_classes,rwidth=0.5) ax1.grid(axis='y') ax1.set_xlabel('Class ID') ax1.set_ylabel('Frequency') ax1.set_title('Training Classes Distribution') ax2.hist(x=y_valid,bins=n_classes,rwidth=0.5) ax2.grid(axis='y') ax2.set_xlabel('Class ID') ax2.set_ylabel('Frequency') ax2.set_title('Validation Classes Distribution') ax3.hist(x=y_test,bins=n_classes,rwidth=0.5) ax3.grid(axis='y') ax3.set_xlabel('Class ID') ax3.set_ylabel('Frequency') ax3.set_title('Test Classes Distribution') # - # ---- # # ## Step 2: Design and Test a Model Architecture # # Design and implement a deep learning model that learns to recognize traffic signs. Model is trained and testesd on the [German Traffic Sign Dataset](http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset). # # There are various aspects to consider when thinking about this problem: # # - Neural network architecture (is the network over or underfitting?) # - Play around preprocessing techniques (normalization, rgb to grayscale, etc) # - Number of examples per label (some have more than others). # - Generate fake data. # ### Pre-process the Data Set (normalization, grayscale, etc.) # Minimally, the image data should be normalized so that the data has mean zero and equal variance. For image data, `(pixel - 128)/ 128` is a quick way to approximately normalize the data and can be used in this project. # Convert images from RGB to grayscale fig, axs = plt.subplots(1,2) sample = X_train[20000] axs[0].imshow(sample) axs[0].set_title('Original Image') axs[1].imshow(np.sum(sample/3, axis=2),cmap='gray') axs[1].set_title('Grayscale Image') # + ### Preprocess the data here. It is required to normalize the data. Other preprocessing steps could include ### converting to grayscale, etc.The ### Feel free to use as many code cells as needed. X_train = np.sum(X_train/3, axis=3, keepdims=True) X_valid = np.sum(X_valid/3, axis=3, keepdims=True) X_test = np.sum(X_test/3, axis=3, keepdims=True) X_train = (X_train-128)/128 X_valid = (X_valid-128)/128 X_test = (X_test-128)/128 # - # ### Setup Tensorflow and Hyperparameter EPOCHS = 60 BATCH_SIZE = 128 learn_rate = 0.001 # ### Model Architecture def LeNet(x,keep_prob): mu = 0 sigma = 0.1 # Convolutional Layer 1 Input = 32x32x1 Output = 28x28x16 conv1_W = tf.Variable(tf.truncated_normal((5,5,1,16),mean=mu,stddev=sigma),name="conv1_W") conv1_b = tf.Variable(tf.zeros(16),name="conv1_b") conv1 = tf.nn.relu(tf.nn.conv2d(x,conv1_W,[1,1,1,1],padding='VALID')+conv1_b) # Max Pooling Layer 1. Input = 28x28x16 Output = 14x14x16 pool1 = tf.nn.max_pool(conv1,[1,2,2,1],[1,2,2,1],'VALID') print("pool1 : ") print(pool1.shape) # 1x1 Convolutional Layer 1 Input = 14x14x16 Output = 14x14x6 conv1x1_1_W = tf.Variable(tf.truncated_normal((1,1,16,6),mean=mu,stddev=sigma),name="conv1x1_1_W") conv1x1_1_b = tf.Variable(tf.zeros(6),name="conv1x1_1_b") conv1x1_1 = tf.nn.relu(tf.nn.conv2d(pool1,conv1x1_1_W,[1,1,1,1],padding='VALID')+conv1x1_1_b) print('conv1x1_1 : ') print(conv1x1_1.shape) # Max Pooling Layer 2 Input = 14x14x16 Output = 14x14x16 pool2 = tf.nn.max_pool(pool1,[1,3,3,1],[1,1,1,1],'SAME') print("pool2 : ") print(pool2.shape) # Convolutional Layer 2 Input = 14x14x16 Output = 14x14x12 conv2_W = tf.Variable(tf.truncated_normal((3,3,16,12),mean=mu,stddev=sigma),name="conv2_W") conv2_b = tf.Variable(tf.zeros(12),name="conv2_b") conv2 = tf.nn.relu(tf.nn.conv2d(pool1,conv2_W,[1,1,1,1],padding='SAME')+conv2_b) print("conv2 : ") print(conv2.shape) # Convolutional Layer 3 Input = 14x14x16. Output = 14x14x6 conv3_W = tf.Variable(tf.truncated_normal((5,5,16,6),mean=mu,stddev=sigma),name="conv3_W") conv3_b = tf.Variable(tf.zeros(6),name="conv3_b") conv3 = tf.nn.relu(tf.nn.conv2d(pool1,conv3_W,[1,1,1,1],padding='SAME')+conv3_b) print("conv3 : ") print(conv3.shape) # 1x1 Convolutional Layer 4 Input = 14x14x16 Output = 14x14x6 conv1x1_4_W = tf.Variable(tf.truncated_normal((1,1,16,6),mean=mu,stddev=sigma),name="conv1x1_4_W") conv1x1_4_b = tf.Variable(tf.zeros(6),name="conv1x1_4_b") conv1x1_4 = tf.nn.relu(tf.nn.conv2d(pool2,conv1x1_4_W,[1,1,1,1],padding='VALID')+conv1x1_4_b) print("conv1x1_4 : ") print(conv1x1_4.shape) # Channel Concat Output = 14x14x30 concat1 = tf.concat([conv1x1_1,conv2,conv3,conv1x1_4],3) print("concat1 : ") print(concat1.shape) # Flatten Layer Input = 14x14x30 Output = 5880 flat1 = tf.contrib.layers.flatten(concat1) flat1 = tf.nn.dropout(flat1,keep_prob) # Fully Connected Layer 1 Input = 5880 Output = 600 fc1_W = tf.Variable(tf.truncated_normal((5880,600),mean=mu,stddev=sigma),name="fc1_W") fc1_b = tf.Variable(tf.zeros(600),name="fc1_b") fc1 = tf.nn.relu(tf.matmul(flat1,fc1_W)+fc1_b) fc1 = tf.nn.dropout(fc1,keep_prob) # Fully Connected Layer 2 Input = 600 Output = 50 fc2_W = tf.Variable(tf.truncated_normal((600,50),mean=mu,stddev=sigma),name="fc2_W") fc2_b = tf.Variable(tf.zeros(50),name="fc2_b") fc2 = tf.nn.relu(tf.matmul(fc1,fc2_W)+fc2_b) fc2 = tf.nn.dropout(fc2,keep_prob) # Fully Connected Layer 3 Input = 50 Output = 43 fc3_W = tf.Variable(tf.truncated_normal((50,n_classes),mean=mu,stddev=sigma),name="fc3_W") fc3_b = tf.Variable(tf.zeros(n_classes),name="fc3_b") logits = tf.nn.relu(tf.matmul(fc2,fc3_W)+fc3_b,name="logits") return logits def LeNet2(x,keep_prob): mu = 0 sigma = 0.1 # Convolutional Layer 1 Input = 32x32x1 Output = 28x28x6 conv1_W = tf.Variable(tf.truncated_normal((5,5,1,6),mean=mu,stddev=sigma)) conv1_b = tf.Variable(tf.zeros(6)) conv1 = tf.nn.relu(tf.nn.conv2d(x,conv1_W,[1,1,1,1],padding='VALID')+conv1_b) print("conv1 shape: ") print(conv1.shape) # Max Pooling Layer 1 Input = 28x28x6 Output = 14x14x6 pool1 = tf.nn.max_pool(conv1,[1,2,2,1],[1,2,2,1],padding='VALID') print("pool1 shape: ") print(pool1.shape) # Convolutional Layer 2 Input = 14x14x6 Output = 10x10x16 conv2_W = tf.Variable(tf.truncated_normal((5,5,6,16),mean=mu,stddev=sigma)) conv2_b = tf.Variable(tf.zeros(16)) conv2 = tf.nn.relu(tf.nn.conv2d(pool1,conv2_W,[1,1,1,1],padding='VALID')+conv2_b) print("conv2 shape: ") print(conv2.shape) # Max Pooling Layer 2 Input = 10x10x16 Output = 5x5x16 pool2 = tf.nn.max_pool(conv2,[1,2,2,1],[1,2,2,1],padding='VALID') print("pool2 shape: ") print(pool2.shape) # Convolutional Layer 3 Input = 5x5x16 Output = 1x1x200 conv3_W = tf.Variable(tf.truncated_normal((5,5,16,400),mean=mu,stddev=sigma)) conv3_b = tf.Variable(tf.zeros(400)) conv3 = tf.nn.relu(tf.nn.conv2d(pool2,conv3_W,[1,1,1,1],padding='VALID')+conv3_b) print("conv3 shape: ") print(conv3.shape) # Flatten Layer 1 Input = 5x5x16 Output = 400 flat1 = tf.contrib.layers.flatten(pool2) print("flat1 shape: ") print(flat1.shape) # Flatten Layer 2 Input = 1x1x200 Output = 200 flat2 = tf.contrib.layers.flatten(conv3) print("flat2 shape: ") print(flat2.shape) # Channel Concat Output = 600 concat1 = tf.concat([flat1,flat2],1) concat1 = tf.nn.dropout(concat1,keep_prob) print("concat1 : ") print(concat1.shape) # Fully Connected Layer 1 Input = 600 Output = 200 fc1_W = tf.Variable(tf.truncated_normal((800,43),mean=mu,stddev=sigma)) fc1_b = tf.Variable(tf.zeros(43)) logits = tf.nn.relu(tf.matmul(concat1,fc1_W)+fc1_b) #fc1 = tf.nn.dropout(fc1,keep_prob) # Fully Connected Layer 2 Input = 120 Output = 84 #fc2_W = tf.Variable(tf.truncated_normal((200,43),mean=mu,stddev=sigma)) #fc2_b = tf.Variable(tf.zeros(43)) #logits = tf.nn.relu(tf.matmul(fc1,fc2_W)+fc2_b) #fc2 = tf.nn.dropout(fc2,keep_prob) # Fully Connected Layer 3 Input = 84 Output = 43 #fc3_W = tf.Variable(tf.truncated_normal((84,n_classes),mean=mu,stddev=sigma)) #fc3_b = tf.Variable(tf.zeros(n_classes)) #logits = tf.nn.relu(tf.matmul(fc2,fc3_W)+fc3_b) return logits x = tf.placeholder(tf.float32,(None,32,32,1),name="x") y = tf.placeholder(tf.int32,(None),name="y") keep_prob = tf.placeholder(tf.float32,name="keep_prob") one_hot_y = tf.one_hot(y,n_classes) logits = LeNet(x,keep_prob) cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels = one_hot_y,logits = logits,name="cross_entropy") loss_operation = tf.reduce_mean(cross_entropy) optimizer = tf.train.AdamOptimizer(learning_rate=learn_rate) training_operation = optimizer.minimize(loss_operation) # + correct_prediction = tf.equal(tf.argmax(logits,1),tf.argmax(one_hot_y,1),name="correct_prediction") accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction,tf.float32),name="accuracy_operation") def evaluate(X_data,y_data): num_data = len(X_data) total_accuracy = 0 sess=tf.get_default_session() for batches in range(0,num_data,BATCH_SIZE): batch_x,batch_y = X_data[batches:batches+BATCH_SIZE],y_data[batches:batches+BATCH_SIZE] accuracy = sess.run(accuracy_operation,feed_dict={x:batch_x,y:batch_y,keep_prob:1.0}) total_accuracy += (accuracy*len(batch_x)) return total_accuracy/num_data # - # ### Train, Validate and Test the Model # A validation set can be used to assess how well the model is performing. A low accuracy on the training and validation # sets imply underfitting. A high accuracy on the training set but low accuracy on the validation set implies overfitting. ### Train your model here. ### Calculate and report the accuracy on the training and validation set. ### Once a final model architecture is selected, ### the accuracy on the test set should be calculated and reported as well. with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() num_data = len(X_train) for i in range(EPOCHS): X_train,y_train = shuffle(X_train,y_train) for batches in range(0,num_data,BATCH_SIZE): batch_x,batch_y = X_train[batches:batches+BATCH_SIZE],y_train[batches:batches+BATCH_SIZE] sess.run(training_operation,feed_dict={x:batch_x,y:batch_y,keep_prob:0.5}) validation_accuracy = evaluate(X_valid,y_valid) print(" EPOCH {} ...".format(i+1)) print(" Validation Accuracy = {:.3f}".format(validation_accuracy)) saver.save(sess, './lenet') print("Model saved") # ## Test the model with test data with tf.Session() as sess: saver.restore(sess,'lenet') test_accuracy = evaluate(X_test,y_test) print("Accuracy : {:.3f}".format(test_accuracy)) # --- # # ## Step 3: Test a Model on New Images # # To test the model, five pictures of German traffic signs from the web are used on the model to predict the traffic sign type. # # ### Load and Output the Images # + ### Load the images and plot them here. ### Feel free to use as many code cells as needed. import glob import matplotlib.image as mpimg print("Loading web images ...") images = [] fig, axs = plt.subplots(1,5) fig.set_figwidth(15) fig.tight_layout(w_pad=1) for i,img in enumerate(glob.glob('Image00?.jpg')): image = cv2.cvtColor(cv2.imread(img),cv2.COLOR_BGR2RGB) axs[i].imshow(image) images.append(image) images = np.asarray(images) images = np.sum(images/3, axis=3, keepdims=True) normalized_images = (images-128)/128 #saver = tf.train.Saver() labels = [4,39,40,17,28] print("Done") # - # ### Predict the Sign Type for Each Image ### Run the predictions here and use the model to output the prediction for each image. ### Make sure to pre-process the images with the same pre-processing pipeline used earlier. ### Feel free to use as many code cells as needed. with tf.Session() as sess: saver.restore(sess,'lenet') predictions_operation = tf.argmax(logits,1) web_logits = sess.run(predictions_operation,feed_dict={x:images,y:labels,keep_prob:1.0}) fig, axs = plt.subplots(1,5) fig.set_figwidth(15) fig.tight_layout(w_pad=1) for i,img in enumerate(images): axs[i].imshow(img.squeeze(),cmap='gray') axs[i].set_title("Label: " + str(labels[i]) +" Prediction: " + str(web_logits[i])) # ### Analyze Performance ### Calculate the accuracy for these 5 new images. with tf.Session() as sess: saver.restore(sess,'lenet') perf = evaluate(images,labels) print("Accuracy : {:.3f}".format(perf)) # ### Output Top 5 Softmax Probabilities For Each Image Found on the Web # For each of the new images, print out the model's softmax probabilities to show the **certainty** of the model's predictions (limit the output to the top 5 probabilities for each image). [`tf.nn.top_k`](https://www.tensorflow.org/versions/r0.12/api_docs/python/nn.html#top_k) could prove helpful here. # # The example below demonstrates how tf.nn.top_k can be used to find the top k predictions for each image. # # `tf.nn.top_k` will return the values and indices (class ids) of the top k predictions. So if k=3, for each sign, it'll return the 3 largest probabilities (out of a possible 43) and the correspoding class ids. # # Take this numpy array as an example. The values in the array represent predictions. The array contains softmax probabilities for five candidate images with six possible classes. `tf.nn.top_k` is used to choose the three classes with the highest probability: # # ``` # # (5, 6) array # a = np.array([[ 0.24879643, 0.07032244, 0.12641572, 0.34763842, 0.07893497, # 0.12789202], # [ 0.28086119, 0.27569815, 0.08594638, 0.0178669 , 0.18063401, # 0.15899337], # [ 0.26076848, 0.23664738, 0.08020603, 0.07001922, 0.1134371 , # 0.23892179], # [ 0.11943333, 0.29198961, 0.02605103, 0.26234032, 0.1351348 , # 0.16505091], # [ 0.09561176, 0.34396535, 0.0643941 , 0.16240774, 0.24206137, # 0.09155967]]) # ``` # # Running it through `sess.run(tf.nn.top_k(tf.constant(a), k=3))` produces: # # ``` # TopKV2(values=array([[ 0.34763842, 0.24879643, 0.12789202], # [ 0.28086119, 0.27569815, 0.18063401], # [ 0.26076848, 0.23892179, 0.23664738], # [ 0.29198961, 0.26234032, 0.16505091], # [ 0.34396535, 0.24206137, 0.16240774]]), indices=array([[3, 0, 5], # [0, 1, 4], # [0, 5, 1], # [1, 3, 5], # [1, 4, 3]], dtype=int32)) # ``` # # Looking just at the first row we get `[ 0.34763842, 0.24879643, 0.12789202]`, you can confirm these are the 3 largest probabilities in `a`. You'll also notice `[3, 0, 5]` are the corresponding indices. ### Print out the top five softmax probabilities for the predictions on the German traffic sign images found on the web. ### Feel free to use as many code cells as needed. with tf.Session() as sess: saver.restore(sess,'lenet') softmax_logits = tf.nn.softmax(logits) top_k = tf.nn.top_k(softmax_logits, k=5) web_top_k = sess.run(top_k,feed_dict={x:images,y:labels,keep_prob:1.0}) print("First Image") plt.imshow(images[0].squeeze(),cmap='gray') a = np.float32(web_top_k[0][0][0]) print("Softmax Probabilities: {}".format(web_top_k[0][0])) print("Prediction Label: {}".format(web_top_k[1][0])) print("Second Image") plt.imshow(images[1].squeeze(),cmap='gray') print("Softmax Probabilities: {}".format(web_top_k[0][1])) print("Prediction Label: {}".format(web_top_k[1][1])) print("Third Image") plt.imshow(images[2].squeeze(),cmap='gray') print("Softmax Probabilities: {}".format(web_top_k[0][2])) print("Prediction Label: {}".format(web_top_k[1][2])) print("Fourth Image") plt.imshow(images[3].squeeze(),cmap='gray') print("Softmax Probabilities: {}".format(web_top_k[0][3])) print("Prediction Label: {}".format(web_top_k[1][3])) print("Fifth Image") plt.imshow(images[4].squeeze(),cmap='gray') print("Softmax Probabilities: {}".format(web_top_k[0][4])) print("Prediction Label: {}".format(web_top_k[1][4])) # --- # # ## Step 4 (Optional): Visualize the Neural Network's State with Test Images # # This Section is not required to complete but acts as an additional excersise for understaning the output of a neural network's weights. While neural networks can be a great learning device they are often referred to as a black box. We can understand what the weights of a neural network look like better by plotting their feature maps. After successfully training your neural network you can see what it's feature maps look like by plotting the output of the network's weight layers in response to a test stimuli image. From these plotted feature maps, it's possible to see what characteristics of an image the network finds interesting. For a sign, maybe the inner network feature maps react with high activation to the sign's boundary outline or to the contrast in the sign's painted symbol. # # Provided for you below is the function code that allows you to get the visualization output of any tensorflow weight layer you want. The inputs to the function should be a stimuli image, one used during training or a new one you provided, and then the tensorflow variable name that represents the layer's state during the training process, for instance if you wanted to see what the [LeNet lab's](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) feature maps looked like for it's second convolutional layer you could enter conv2 as the tf_activation variable. # # For an example of what feature map outputs look like, check out NVIDIA's results in their paper [End-to-End Deep Learning for Self-Driving Cars](https://devblogs.nvidia.com/parallelforall/deep-learning-self-driving-cars/) in the section Visualization of internal CNN State. NVIDIA was able to show that their network's inner weights had high activations to road boundary lines by comparing feature maps from an image with a clear path to one without. Try experimenting with a similar test to show that your trained network's weights are looking for interesting features, whether it's looking at differences in feature maps from images with or without a sign, or even what feature maps look like in a trained network vs a completely untrained one on the same sign image. # # <figure> # <img src="visualize_cnn.png" width="380" alt="Combined Image" /> # <figcaption> # <p></p> # <p style="text-align: center;"> Your output should look something like this (above)</p> # </figcaption> # </figure> # <p></p> # # + ### Visualize your network's feature maps here. ### Feel free to use as many code cells as needed. # image_input: the test image being fed into the network to produce the feature maps # tf_activation: should be a tf variable name used during your training procedure that represents the calculated state of a specific weight layer # activation_min/max: can be used to view the activation contrast in more detail, by default matplot sets min and max to the actual min and max values of the output # plt_num: used to plot out multiple different weight feature map sets on the same block, just extend the plt number for each new feature map entry def outputFeatureMap(image_input, tf_activation, activation_min=-1, activation_max=-1 ,plt_num=1): # Here make sure to preprocess your image_input in a way your network expects # with size, normalization, ect if needed # image_input = # Note: x should be the same name as your network's tensorflow data placeholder variable # If you get an error tf_activation is not defined it may be having trouble accessing the variable from inside a function activation = tf_activation.eval(session=sess,feed_dict={x : image_input}) featuremaps = activation.shape[3] plt.figure(plt_num, figsize=(15,15)) for featuremap in range(featuremaps): plt.subplot(6,8, featuremap+1) # sets the number of feature maps to show on each row and column plt.title('FeatureMap ' + str(featuremap)) # displays the feature map number if activation_min != -1 & activation_max != -1: plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin =activation_min, vmax=activation_max, cmap="gray") elif activation_max != -1: plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmax=activation_max, cmap="gray") elif activation_min !=-1: plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin=activation_min, cmap="gray") else: plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", cmap="gray")
Traffic_Sign_Classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Introduction to Modeling Libraries import numpy as np import pandas as pd np.random.seed(12345) import matplotlib.pyplot as plt plt.rc('figure', figsize=(10, 6)) PREVIOUS_MAX_ROWS = pd.options.display.max_rows pd.options.display.max_rows = 20 np.set_printoptions(precision=4, suppress=True) # ## Interfacing Between pandas and Model Code import pandas as pd import numpy as np data = pd.DataFrame({ 'x0': [1, 2, 3, 4, 5], 'x1': [0.01, -0.01, 0.25, -4.1, 0.], 'y': [-1.5, 0., 3.6, 1.3, -2.]}) data data.columns data.values df2 = pd.DataFrame(data.values, columns=['one', 'two', 'three']) df2 model_cols = ['x0', 'x1'] data.loc[:, model_cols].values data['category'] = pd.Categorical(['a', 'b', 'a', 'a', 'b'], categories=['a', 'b']) data dummies = pd.get_dummies(data.category, prefix='category') data_with_dummies = data.drop('category', axis=1).join(dummies) data_with_dummies # ## Creating Model Descriptions with Patsy # y ~ x0 + x1 data = pd.DataFrame({ 'x0': [1, 2, 3, 4, 5], 'x1': [0.01, -0.01, 0.25, -4.1, 0.], 'y': [-1.5, 0., 3.6, 1.3, -2.]}) data import patsy y, X = patsy.dmatrices('y ~ x0 + x1', data) y X np.asarray(y) np.asarray(X) patsy.dmatrices('y ~ x0 + x1 + 0', data)[1] coef, resid, _, _ = np.linalg.lstsq(X, y) coef coef = pd.Series(coef.squeeze(), index=X.design_info.column_names) coef # ### Data Transformations in Patsy Formulas y, X = patsy.dmatrices('y ~ x0 + np.log(np.abs(x1) + 1)', data) X y, X = patsy.dmatrices('y ~ standardize(x0) + center(x1)', data) X new_data = pd.DataFrame({ 'x0': [6, 7, 8, 9], 'x1': [3.1, -0.5, 0, 2.3], 'y': [1, 2, 3, 4]}) new_X = patsy.build_design_matrices([X.design_info], new_data) new_X y, X = patsy.dmatrices('y ~ I(x0 + x1)', data) X # ### Categorical Data and Patsy data = pd.DataFrame({ 'key1': ['a', 'a', 'b', 'b', 'a', 'b', 'a', 'b'], 'key2': [0, 1, 0, 1, 0, 1, 0, 0], 'v1': [1, 2, 3, 4, 5, 6, 7, 8], 'v2': [-1, 0, 2.5, -0.5, 4.0, -1.2, 0.2, -1.7] }) y, X = patsy.dmatrices('v2 ~ key1', data) X y, X = patsy.dmatrices('v2 ~ key1 + 0', data) X y, X = patsy.dmatrices('v2 ~ C(key2)', data) X data['key2'] = data['key2'].map({0: 'zero', 1: 'one'}) data y, X = patsy.dmatrices('v2 ~ key1 + key2', data) X y, X = patsy.dmatrices('v2 ~ key1 + key2 + key1:key2', data) X # ## Introduction to statsmodels # ### Estimating Linear Models import statsmodels.api as sm import statsmodels.formula.api as smf # + def dnorm(mean, variance, size=1): if isinstance(size, int): size = size, return mean + np.sqrt(variance) * np.random.randn(*size) # For reproducibility np.random.seed(12345) N = 100 X = np.c_[dnorm(0, 0.4, size=N), dnorm(0, 0.6, size=N), dnorm(0, 0.2, size=N)] eps = dnorm(0, 0.1, size=N) beta = [0.1, 0.3, 0.5] y = np.dot(X, beta) + eps # - X[:5] y[:5] X_model = sm.add_constant(X) X_model[:5] model = sm.OLS(y, X) results = model.fit() results.params print(results.summary()) data = pd.DataFrame(X, columns=['col0', 'col1', 'col2']) data['y'] = y data[:5] results = smf.ols('y ~ col0 + col1 + col2', data=data).fit() results.params results.tvalues results.predict(data[:5]) # ### Estimating Time Series Processes # + init_x = 4 import random values = [init_x, init_x] N = 1000 b0 = 0.8 b1 = -0.4 noise = dnorm(0, 0.1, N) for i in range(N): new_x = values[-1] * b0 + values[-2] * b1 + noise[i] values.append(new_x) # - MAXLAGS = 5 model = sm.tsa.AR(values) results = model.fit(MAXLAGS) results.params # ## Introduction to scikit-learn train = pd.read_csv('datasets/titanic/train.csv') test = pd.read_csv('datasets/titanic/test.csv') train[:4] train.isnull().sum() test.isnull().sum() impute_value = train['Age'].median() train['Age'] = train['Age'].fillna(impute_value) test['Age'] = test['Age'].fillna(impute_value) train['IsFemale'] = (train['Sex'] == 'female').astype(int) test['IsFemale'] = (test['Sex'] == 'female').astype(int) predictors = ['Pclass', 'IsFemale', 'Age'] X_train = train[predictors].values X_test = test[predictors].values y_train = train['Survived'].values X_train[:5] y_train[:5] from sklearn.linear_model import LogisticRegression model = LogisticRegression() model.fit(X_train, y_train) y_predict = model.predict(X_test) y_predict[:10] # (y_true == y_predict).mean() from sklearn.linear_model import LogisticRegressionCV model_cv = LogisticRegressionCV(10) model_cv.fit(X_train, y_train) from sklearn.model_selection import cross_val_score model = LogisticRegression(C=10) scores = cross_val_score(model, X_train, y_train, cv=4) scores # ## Continuing Your Education pd.options.display.max_rows = PREVIOUS_MAX_ROWS
Ch13 - Introduction to Modeling Libraries.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # <div class='bar_title'></div> # # *Practical Data Science* # # # Deep Learning on Tabular Data # # <NAME><br> # Chair of Information Systems and Management # # Winter Semester 19/20 # + [markdown] slideshow={"slide_type": "subslide"} toc=true # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc"><ul class="toc-item"><li><span><a href="#Motivation" data-toc-modified-id="Motivation-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Motivation</a></span></li><li><span><a href="#Artificial-Neural-Networks" data-toc-modified-id="Artificial-Neural-Networks-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Artificial Neural Networks</a></span><ul class="toc-item"><li><span><a href="#The-Perceptron" data-toc-modified-id="The-Perceptron-2.1"><span class="toc-item-num">2.1&nbsp;&nbsp;</span>The Perceptron</a></span></li><li><span><a href="#Multi-layer-Perceptron-aka.-Neural-Networks" data-toc-modified-id="Multi-layer-Perceptron-aka.-Neural-Networks-2.2"><span class="toc-item-num">2.2&nbsp;&nbsp;</span>Multi-layer Perceptron aka. Neural Networks</a></span></li><li><span><a href="#Training-Neural-Networks" data-toc-modified-id="Training-Neural-Networks-2.3"><span class="toc-item-num">2.3&nbsp;&nbsp;</span>Training Neural Networks</a></span></li></ul></li><li><span><a href="#Deep-Learning-on-Tabular-Data-with-fast.ai" data-toc-modified-id="Deep-Learning-on-Tabular-Data-with-fast.ai-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Deep Learning on Tabular Data with <em>fast.ai</em></a></span><ul class="toc-item"><li><span><a href="#fast.ai-Datasets" data-toc-modified-id="fast.ai-Datasets-3.1"><span class="toc-item-num">3.1&nbsp;&nbsp;</span><em>fast.ai</em> Datasets</a></span></li><li><span><a href="#Tabular-data-preprocessing" data-toc-modified-id="Tabular-data-preprocessing-3.2"><span class="toc-item-num">3.2&nbsp;&nbsp;</span>Tabular data preprocessing</a></span></li></ul></li><li><span><a href="#Defining-a-model" data-toc-modified-id="Defining-a-model-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>Defining a model</a></span></li><li><span><a href="#Model-Evaluation" data-toc-modified-id="Model-Evaluation-5"><span class="toc-item-num">5&nbsp;&nbsp;</span>Model Evaluation</a></span></li><li><span><a href="#Embeddings-for-Categorical-Variables" data-toc-modified-id="Embeddings-for-Categorical-Variables-6"><span class="toc-item-num">6&nbsp;&nbsp;</span>Embeddings for Categorical Variables</a></span><ul class="toc-item"><li><span><a href="#Taking-Inspiration-from-Word-Embeddings" data-toc-modified-id="Taking-Inspiration-from-Word-Embeddings-6.1"><span class="toc-item-num">6.1&nbsp;&nbsp;</span>Taking Inspiration from Word Embeddings</a></span></li><li><span><a href="#Applying-Embeddings-for-Categorical-Variables" data-toc-modified-id="Applying-Embeddings-for-Categorical-Variables-6.2"><span class="toc-item-num">6.2&nbsp;&nbsp;</span>Applying Embeddings for Categorical Variables</a></span></li><li><span><a href="#Visualizing-Embeddings-with-Tensorboard" data-toc-modified-id="Visualizing-Embeddings-with-Tensorboard-6.3"><span class="toc-item-num">6.3&nbsp;&nbsp;</span>Visualizing Embeddings with Tensorboard</a></span></li></ul></li><li><span><a href="#Wrapping-up" data-toc-modified-id="Wrapping-up-7"><span class="toc-item-num">7&nbsp;&nbsp;</span>Wrapping up</a></span></li></ul></div> # + [markdown] slideshow={"slide_type": "subslide"} # __Lessons Learned Assingment 1__ # # - Use descriptive commit messages (see example) # - Don't merge pull request # + [markdown] slideshow={"slide_type": "subslide"} # __Credits__ # # - https://www.fast.ai/2018/04/29/categorical-embeddings/ # - https://confusedcoders.com/data-science/deep-learning/how-to-apply-deep-learning-on-tabular-data-with-fastai # + [markdown] slideshow={"slide_type": "slide"} # ## Motivation # - # There is a powerful technique that is winning Kaggle competitions and is widely used at Google (according to [<NAME>](https://twimlai.com/twiml-talk-124-systems-software-machine-learning-scale-jeff-dean/)), [Pinterest](https://medium.com/the-graph/applying-deep-learning-to-related-pins-a6fee3c92f5e), and [Instacart](https://tech.instacart.com/deep-learning-with-emojis-not-math-660ba1ad6cdc), yet that many people don’t even realize is possible: # # __the use of deep learning for tabular data, and in particular, the creation of embeddings for categorical variables.__ # # Despite what you may have heard, you can use deep learning for the type of data you might keep in a SQL database, a Pandas DataFrame, or an Excel spreadsheet (including time-series data). # + [markdown] slideshow={"slide_type": "subslide"} # #### Pinterest: Finding Related Pins # # <img src="https://miro.medium.com/max/676/0*i-U3QUkyBhWVX4UK.png" style="width:70%" /> # # *Source: [Pinterest/Medium](https://medium.com/the-graph/applying-deep-learning-to-related-pins-a6fee3c92f5e)* # + [markdown] slideshow={"slide_type": "subslide"} # __Instacart: Sorting shopping lists with deep learning__ # # # <img src="https://miro.medium.com/max/2454/1*LE3oybWmVghSDjPP99gxYg.png" style="width:80%" /> # # # *Source: [Instacart](https://tech.instacart.com/deep-learning-with-emojis-not-math-660ba1ad6cdc)* # + [markdown] slideshow={"slide_type": "slide"} # ## Artificial Neural Networks # + [markdown] slideshow={"slide_type": "subslide"} # __What are neural networks?__ # + [markdown] cell_style="split" # - Biological neural networks have interconnected neurons with dendrites that receive inputs, then based on these inputs they produce an output signal through an axon to another neuron # - Artificial Neural Networks (ANN) are a machine learning framework that attempts to mimic the learning pattern of natural biological neural networks # - The creation of ANN begins with the most basic form, a single perceptron # + [markdown] cell_style="split" # <img src="https://www.extremetech.com/wp-content/uploads/2013/09/340-640x426.jpg" style="width:100%" /> # # # # # # + [markdown] slideshow={"slide_type": "subslide"} # ### The Perceptron # + [markdown] cell_style="split" # Developed by <NAME> in 1957 # # - Perceptrons have one or more weighted inputs, a bias, an activation function, and a single output # - A perceptron receives inputs, multiplies them by some weight, and then passes them into an activation function to produce an output # - The key idea is to “fire” / activate the neuron only if a sufficiently strong input signal is detected # + [markdown] cell_style="split" # <img src="https://miro.medium.com/max/2870/1*n6sJ4yZQzwKL9wnF5wnVNg.png" style="width:100%" /> # # + [markdown] slideshow={"slide_type": "subslide"} # __Different Activation Functions and their Graphs__ # + [markdown] cell_style="split" slideshow={"slide_type": "fragment"} # <img src="https://miro.medium.com/max/1200/1*ZafDv3VUm60Eh10OeJu1vw.png" style="width:100%" /> # # [Image Source](https://medium.com/@shrutijadon10104776/survey-on-activation-functions-for-deep-learning-9689331ba092) # + [markdown] cell_style="split" slideshow={"slide_type": "fragment"} # ReLU is the most commonly used Activation Functions, because of its simplicity during backpropagation and its not computationally expensive # + [markdown] slideshow={"slide_type": "subslide"} # ### Multi-layer Perceptron aka. Neural Networks # - # A MLP is composed of multiple layers of perceptrons # # <img src="https://camo.githubusercontent.com/d95fb90b396fc77c614cc6b176dd049066273f96/68747470733a2f2f7777772e64726f70626f782e636f6d2f732f717334746f6a763575356834386c662f6d756c74696c617965725f70657263657074726f6e2e706e673f7261773d31" style="width:80%" /> # # [Image Source](https://github.com/PetarV-/TikZ/tree/master/Multilayer%20perceptron) # + [markdown] slideshow={"slide_type": "subslide"} # __Layers of a MLP__ # # - Initial layer = input layer which is fed by the feature inputs # - Last layer = output layer which creates the resulting outputs # - Any layers in between are known as hidden layers because they do not directly “observe” the feature inputs or outputs # + [markdown] slideshow={"slide_type": "subslide"} # __Universal approximation theorem__ # # From Wikipedia: # # _"In the mathematical theory of artificial neural networks, the universal approximation theorem states that a feed-forward network with __a single hidden layer__ containing a finite number of neurons can approximate continuous functions [...] when given appropriate parameters; however, it does not touch upon __the algorithmic learnability of those parameters__."_ # + [markdown] slideshow={"slide_type": "subslide"} # ### Training Neural Networks # # Learning is adjustment of the weights of the connections between perceptrons according to some modification rule. # # - The Backpropagation algorithm searches for weight values that minimize the total error of the network over the set of training examples # # It consists of the repeated application of the following two passes. # # - __Forward pass__: in this step the network is activated on one example and the error of (each neuron of) the output layer is computed # - __Backward pass__: in this step the network error is used for updating the weights # + [markdown] slideshow={"slide_type": "subslide"} # __Forward and Backward paths__ # # <img src="https://miro.medium.com/max/3108/1*6q2Rgd8W9DoCN9Wfwc_9gw.png" style="width:60%" /> # # [Image Source](https://medium.com/datathings/neural-networks-and-backpropagation-explained-in-a-simple-way-f540a3611f5e) # + [markdown] slideshow={"slide_type": "subslide"} # __MLP Example__ # # We will work with the same dataset as in the last lecture, a sample of the adult dataset which has some census information on individuals. Again, we'll use it to train a model to predict whether salary is greater than $50k or not. # - # Load packages import pandas as pd from sklearn.model_selection import train_test_split from sklearn.impute import SimpleImputer from sklearn.preprocessing import StandardScaler from sklearn.neural_network import MLPClassifier from sklearn.metrics import accuracy_score # + [markdown] slideshow={"slide_type": "fragment"} # Load data set # - file_path = 'https://raw.githubusercontent.com/wi3jmu/PDS1920/master/Lecture/data/adult.csv' adult_data = pd.read_csv(file_path) adult_data = adult_data.assign(salary=(adult_data['salary']=='>=50k').astype(int)) y = adult_data['salary'] remove_cols = ['salary', 'salary'] X = adult_data.drop(remove_cols, axis=1) X = adult_data.drop(columns=['salary']) # + [markdown] slideshow={"slide_type": "subslide"} # Split data set # - train_X, val_X, train_y, val_y = train_test_split(X, y, test_size=0.2, random_state = 0) # Impute missing values (we will omit the categorical features here) simple_imputer = SimpleImputer() train_X_num = pd.DataFrame(simple_imputer.fit_transform(train_X[numCols]), columns=numCols, index=train_X.index) val_X_num = pd.DataFrame(simple_imputer.transform(val_X[numCols]), columns=numCols, index=val_X.index) # + [markdown] cell_style="center" slideshow={"slide_type": "fragment"} # Standardize numeric features # - scaler = StandardScaler() numCols = X.select_dtypes(['int', 'float']).columns.to_list() train_X_num_standardized = pd.DataFrame(scaler.fit_transform(train_X_num), columns=numCols, index=train_X.index) val_X_num_standardized = pd.DataFrame(scaler.transform(val_X_num), columns=numCols, index=val_X.index) # + [markdown] slideshow={"slide_type": "subslide"} # Train model # - model = MLPClassifier() model.fit(train_X_num_standardized, train_y) # Evaluate predictions preds = model.predict(val_X_num_standardized) accuracy_score(val_y, preds) # + [markdown] slideshow={"slide_type": "subslide"} # __Advantages of Multi-layer Perceptrons__ # # - Capability to learn non-linear models. # - Capability to learn models in real-time (on-line learning) using `partial_fit` # # __The disadvantages of Multi-layer Perceptrons__ # - MLP with hidden layers have a non-convex loss function where there exists more than one local minimum. Therefore different random weight initializations can lead to different validation accuracy. # - MLP requires tuning a number of hyperparameters such as the number of hidden neurons, layers, and iterations. # - MLP is sensitive to feature scaling. # # [from scikit-learn](https://scikit-learn.org/stable/modules/neural_networks_supervised.html) # + [markdown] slideshow={"slide_type": "subslide"} # __Is this already deep learning?__ # # From Wikipedia: # # _"Deep learning [...] uses multiple layers to progressively extract higher level features from the raw input. For example, in image processing, lower layers may identify edges, while higher layers may identify the concepts relevant to a human such as digits or letters or faces."_ # # + [markdown] cell_style="center" slideshow={"slide_type": "slide"} # ## Deep Learning on Tabular Data with *fast.ai* # + [markdown] slideshow={"slide_type": "subslide"} # __Mission of fast.ai__ # # Deep learning is transforming the world. We are making deep learning easier to use and getting more people from all backgrounds involved through our: # # - free courses for coders # - software library # - cutting-edge research # - community # # The world needs everyone involved with AI, no matter how unlikely your background. # # from [fast.ai](https://www.fast.ai/about/) # + [markdown] slideshow={"slide_type": "subslide"} # First, let's import everything we need for the tabular application. # - from fastai import * from fastai.tabular import * # `from <module> import *` means “I want access to all the names in <module> that I’m meant to have access to” # + [markdown] slideshow={"slide_type": "subslide"} # ### *fast.ai* Datasets # # Tabular data usually comes in the form of a delimited file (such as .csv) containing variables of different kinds: text/category, numbers, and perhaps some missing values. # # *Fast.ai's* [dataset module](https://docs.fast.ai/datasets.html#datasets) has necessary functions to be able to download several useful [datasets](https://course.fast.ai/datasets) that we might be interested in using in our models. # # We will work with the same dataset as in the last lecture, a sample of the __adult dataset__ which has some census information on individuals. Again, we'll use it to train a model to predict whether salary is greater than \$50k or not. # - path = untar_data(url=URLs.ADULT_SAMPLE); path # `untar_data()`downloads a dataset from `url` and unpacks it to `path`. # + slideshow={"slide_type": "subslide"} df = pd.read_csv(path/'adult.csv') df.head() # + [markdown] slideshow={"slide_type": "-"} # Here all the information that will form our input is in the 14 first columns, and the dependent variable is the last column. We will split our input between two types of variables: categorical and continuous. # + [markdown] slideshow={"slide_type": "subslide"} # - **Categorical variables** will be replaced by a category - a unique id that identifies them - before they are passed through an embedding layer. # - **Continuous variables** will be normalized and then directly fed to the model. # # Another thing we need to handle are the missing values: our model isn't going to like receiving NaNs so we should remove them in a smart way. # # # + [markdown] slideshow={"slide_type": "subslide"} # ### Tabular data preprocessing # # fast.ai contains classes that define [transformations](https://docs.fast.ai/tabular.transform.html) for preprocessing dataframes of tabular data. Preprocessing includes things like # # - `Categorify`: replacing non-numerical variables by categories, i.e, their unique category id # - `FillMissing`: filling missing values (default fill strategy: median) # - `Normalize:` normalizing continuous variables # + [markdown] slideshow={"slide_type": "fragment"} # We can define a list of Transforms that will be applied to our variables. Here we transform all categorical variables into categories. We also replace missing values for continuous variables by the median column value and normalize those. # - procs = [FillMissing, Categorify, Normalize] # + [markdown] slideshow={"slide_type": "subslide"} # __Categorical and continuous variables__ # # Then let's manually assign our variables to dependent, categorical and continuous variables # - fast.ai will assume all variables that aren't dependent or categorical are continuous, unless we explicitly pass a list to the `cont_names` parameter when constructing our DataBunch. # - dep_var = 'salary' cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country'] # + [markdown] slideshow={"slide_type": "subslide"} # __Training and validation sets__ # # To split our data into training and validation sets, we use valid indexes # - train_idx, valid_idx = train_test_split(range(len(df)), test_size=0.2, random_state = 0) train_idx[:5] # + [markdown] slideshow={"slide_type": "subslide"} # __Creating the DataBunch__ # # Now we're ready to pass this information to `TabularDataBunch.from_df` to create the DataBunch that we'll use for training. We will learn the details of the `DataBunch` class in the next lecture. # - data = TabularDataBunch.from_df(path, df, dep_var, valid_idx=valid_idx, procs=procs, cat_names=cat_names, bs=32) # + [markdown] slideshow={"slide_type": "subslide"} # We can grab a mini-batch of data and take a look. `show_batch` shows a batch of data in a convenient way: # - data.show_batch(2) # After being processed, the categorical variables are replaced by ids and the continuous variables are normalized. The codes corresponding to categorical variables are all put together, as are all the continuous variables. # # __Note__: As we pick out batches randomly, the output of `show_batch` may not correspond to the output below. next(iter(data.train_dl)) # + [markdown] slideshow={"slide_type": "subslide"} # __A few things to keep in mind here__ # # - __Data Bunch__: A data format for fast.ai input # - __Dependent variable__: The variable to predict # - __Categorical columns__: The text/label columns. Or the columns with low cardinality, eg. gender, type, year etc. # - __Continuous columns__: Numeric value columns, usually with higher cardinality, eg. salary, price, temperature. # - __Transformations__: Feature engineering and handling data, eg. Missing values, Normalisation etc # + [markdown] slideshow={"slide_type": "slide"} # ## Defining a model # + [markdown] slideshow={"slide_type": "subslide"} # Once we have our data ready in a `DataBunch`, we just need to create a model to then define a Learner and start training. # # This is typically composed of following steps : # # 1. __Create Learner__: Create an appropriate learner for data. A learner creates a neural network for us. # 2. __Find the learning rate__: We need to find a suitable learning rate for our training # 3. __Fit the model__ # + [markdown] slideshow={"slide_type": "subslide"} # __Create Learner__ # # The fastai library has a flexible and powerful `TabularModel` in `models.tabular`. To use that function, we just need to specify the embedding sizes for each of our categorical variables. # - learn = tabular_learner(data, layers=[200,100], emb_szs={'native-country': 10}, metrics=accuracy) # + [markdown] slideshow={"slide_type": "subslide"} # __Schematic Network architecture__ # # Fastai figures out the default values for our model but lot of these can be customised while creating the data bunch and learner. # # # <img src="https://confusedcoders.com/wp-content/uploads/2019/06/untitled-3-1024x403.jpg" style="width:100%" /> # # [Source](https://confusedcoders.com/wp-content/uploads/2019/06/untitled-3-1024x403.jpg) # + [markdown] slideshow={"slide_type": "subslide"} # Let's print a summary of the model. # - learn.summary(); # + [markdown] slideshow={"slide_type": "subslide"} # __Find the learning rate__ # - learn.lr_find() learn.recorder.plot() # We typically find the point where the slope is steepest # + [markdown] slideshow={"slide_type": "subslide"} # __Fit the model__ based on selected learning rate # - learn.fit_one_cycle(2, max_lr=slice(1e-02)) # + [markdown] slideshow={"slide_type": "slide"} # ## Model Evaluation # + [markdown] slideshow={"slide_type": "subslide"} # __Get predictions__ # + [markdown] slideshow={"slide_type": "-"} # We can use the `Learner.predict` method to get predictions. In this case, we need to pass the row of a dataframe that has the same names of categorical and continuous variables as our training or validation dataframe. # - learn.predict(df.iloc[1]) # + [markdown] slideshow={"slide_type": "fragment"} # To get predictions on the entire training dataset, simply set the ds_type argument accordingly. # - learn.get_preds(ds_type=DatasetType.Valid) # + [markdown] slideshow={"slide_type": "subslide"} # __Show rows result of predictions on thme dataset__ # - learn.show_results(ds_type=DatasetType.Valid) # + [markdown] slideshow={"slide_type": "subslide"} # __Get metric scores__ # - str(learn.metrics) learn.validate(learn.data.valid_dl) # So there is a scope of improving the deep learning model here. However this is not bad at all, without any feature engineering and network tuning. # + [markdown] slideshow={"slide_type": "slide"} # ## Embeddings for Categorical Variables # + [markdown] slideshow={"slide_type": "subslide"} # A key technique to making the most of deep learning for tabular data is to use embeddings for your categorical variables. This approach allows for __relationships between categories__ to be captured. # # Examples: # - Saturday and Sunday may have similar behavior, and maybe Friday behaves like an average of a weekend and a weekday. # - Similarly, for zip codes, there may be patterns for zip codes that are geographically near each other, and for zip codes that are of similar socio-economic status. # + [markdown] slideshow={"slide_type": "subslide"} # ### Taking Inspiration from Word Embeddings # # A way to capture these multi-dimensional relationships between categories is to use embeddings. This is the same idea as is used with word embeddings, such as Word2Vec. # # <img src="https://www.tensorflow.org/images/linear-relationships.png" style="width:100%" /> # # [Source](https://www.tensorflow.org/images/linear-relationships.png) # # + [markdown] slideshow={"slide_type": "subslide"} # ### Applying Embeddings for Categorical Variables # # Similarly, when working with categorical variables, we will represent each category by a vector of floating point numbers (the values of this representation are learned as the network is trained). # # For instance, a 4-dimensional version of an embedding for day of week could look like: # # __Sunday [.8, .2, .1, .1]__<br> # __Monday [.1, .2, .9, .9]__<br> # __Tuesday [.2, .1, .9, .8]__ # # Here, Monday and Tuesday are fairly similar, yet they are both quite different from Sunday. # # Again, this is a toy example. In practice, our neural network would learn the best representations for each category while it is training, and each dimension (or direction, which doesn’t necessarily line up with ordinal dimensions) could have multiple meanings. Rich relationships can be captured in these distributed representations. # + [markdown] slideshow={"slide_type": "subslide"} # ### Visualizing Embeddings with Tensorboard # - # TensorBoard is a tool for providing the measurements and visualizations needed during the machine learning workflow. It enables # # - tracking experiment metrics like loss and accuracy, # - visualizing the model graph, # - projecting embeddings to a lower dimensional space, # - and much more. # # Let's the TensorBoard notebook extension # %load_ext tensorboard # + [markdown] slideshow={"slide_type": "subslide"} # The SummaryWriter class is your main entry to log data # - from torch.utils.tensorboard import SummaryWriter # %load_ext tensorboard writer = SummaryWriter() # + [markdown] slideshow={"slide_type": "fragment"} # Write model architecture: # - writer.add_graph(learn.model, next(iter(data.train_dl))[0]) # + [markdown] slideshow={"slide_type": "fragment"} # Export embeddings: # - emb_names = list(learn.data.x.classes.keys()) for i, emb in enumerate(learn.model.embeds): emb_name = emb_names[i] writer.add_embedding(emb.weight.data, metadata=learn.data.x[0].classes[emb_name], global_step=i, tag=emb_name) # + [markdown] slideshow={"slide_type": "-"} # Finally, start tensorboard # + slideshow={"slide_type": "subslide"} # %tensorboard --logdir runs --host localhost # + [markdown] slideshow={"slide_type": "subslide"} # ___Colab Workaround___ # # In Colab the dynamic tensorborad plugin isn’t supported yet, but you can still access the data and visualize the embeddings somewhere else: # # 1. Download the desired embedding file (*tensors.tsv*) and metadata # # <img src="images/05/download_embeddings.png" style="width:30%"/> # # 2. Upload the files on the official Tensorflow [Embedding Projector](https://projector.tensorflow.org/)
Lecture/05_Deep_Larning_Tabular.ipynb
# --- # jupyter: # jupytext: # split_at_heading: true # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + hide_input=false #hide #default_exp cli from nbdev.showdoc import show_doc # - # # Command line functions # # > Console commands added by the nbdev library #export from nbdev.imports import * from nbdev.export import * from nbdev.sync import * from nbdev.merge import * from nbdev.export2html import * from nbdev.clean import * from nbdev.test import * from fastcore.script import * from subprocess import check_output,STDOUT # `nbdev` comes with the following commands. To use any of them, you must be in one of the subfolders of your project: they will search for the `settings.ini` recursively in the parent directory but need to access it to be able to work. Their names all begin with nbdev so you can easily get a list with tab completion. # - `nbdev_build_docs` builds the documentation from the notebooks # - `nbdev_build_lib` builds the library from the notebooks # - `nbdev_bump_version` increments version in `settings.py` by one # - `nbdev_clean_nbs` removes all superfluous metadata form the notebooks, to avoid merge conflicts # - `nbdev_detach` exports cell attachments to `dest` and updates references # - `nbdev_diff_nbs` gives you the diff between the notebooks and the exported library # - `nbdev_fix_merge` will fix merge conflicts in a notebook file # - `nbdev_install_git_hooks` installs the git hooks that use the last two command automatically on each commit/merge # - `nbdev_nb2md` converts a notebook to a markdown file # - `nbdev_new` creates a new nbdev project # - `nbdev_read_nbs` reads all notebooks to make sure none are broken # - `nbdev_test_nbs` runs tests in notebooks # - `nbdev_trust_nbs` trusts all notebooks (so that the HTML content is shown) # - `nbdev_update_lib` propagates any change in the library back to the notebooks # ## Navigating from notebooks to script and back show_doc(nbdev_build_lib) # By default (`fname` left to `None`), the whole library is built from the notebooks in the `lib_folder` set in your `settings.ini`. show_doc(nbdev_update_lib) # By default (`fname` left to `None`), the whole library is treated. Note that this tool is only designed for small changes such as typo or small bug fixes. You can't add new cells in notebook from the library. show_doc(nbdev_diff_nbs) # ## Running tests show_doc(nbdev_test_nbs) # By default (`fname` left to `None`), the whole library is tested from the notebooks in the `lib_folder` set in your `settings.ini`. # ## Building documentation show_doc(nbdev_build_docs) # By default (`fname` left to `None`), the whole documentation is build from the notebooks in the `lib_folder` set in your `settings.ini`, only converting the ones that have been modified since the their corresponding html was last touched unless you pass `force_all=True`. The index is also converted to make the README file, unless you pass along `mk_readme=False`. show_doc(nbdev_nb2md) show_doc(nbdev_detach) # ## Other utils show_doc(nbdev_read_nbs) # By default (`fname` left to `None`), the all the notebooks in `lib_folder` are checked. show_doc(nbdev_trust_nbs) # By default (`fname` left to `None`), the all the notebooks in `lib_folder` are trusted. To speed things up, only the ones touched since the last time this command was run are trusted unless you pass along `force_all=True`. show_doc(nbdev_fix_merge) # When you have merge conflicts after a `git pull`, the notebook file will be broken and won't open in jupyter notebook anymore. This command fixes this by changing the notebook to a proper json file again and add markdown cells to signal the conflict, you just have to open that notebook again and look for `>>>>>>>` to see those conflicts and manually fix them. The old broken file is copied with a `.ipynb.bak` extension, so is still accessible in case the merge wasn't sucessful. # # Moreover, if `fast=True`, conflicts in outputs and metadata will automatically be fixed by using the local version if `trust_us=True`, the remote one if `trust_us=False`. With this option, it's very likely you won't have anything to do, unless there is a real conflict. #export def bump_version(version, part=2): version = version.split('.') version[part] = str(int(version[part]) + 1) for i in range(part+1, 3): version[i] = '0' return '.'.join(version) test_eq(bump_version('0.1.1' ), '0.1.2') test_eq(bump_version('0.1.1', 1), '0.2.0') #export @call_parse def nbdev_bump_version(part:Param("Part of version to bump", int)=2): "Increment version in `settings.py` by one" cfg = Config() print(f'Old version: {cfg.version}') cfg.d['version'] = bump_version(Config().version, part) cfg.save() update_version() print(f'New version: {cfg.version}') # ## Git hooks #export import subprocess #export @call_parse def nbdev_install_git_hooks(): "Install git hooks to clean/trust notebooks automatically" try: path = Config().config_file.parent except: path = Path.cwd() hook_path = path/'.git'/'hooks' fn = hook_path/'post-merge' hook_path.mkdir(parents=True, exist_ok=True) #Trust notebooks after merge with open(fn, 'w') as f: f.write("""#!/bin/bash echo "Trusting notebooks" nbdev_trust_nbs """ ) os.chmod(fn, os.stat(fn).st_mode | stat.S_IEXEC) #Clean notebooks on commit/diff with open(path/'.gitconfig', 'w') as f: f.write("""# Generated by nbdev_install_git_hooks # # If you need to disable this instrumentation do: # # git config --local --unset include.path # # To restore the filter # # git config --local include.path .gitconfig # # If you see notebooks not stripped, checked the filters are applied in .gitattributes # [filter "clean-nbs"] clean = nbdev_clean_nbs --read_input_stream True smudge = cat required = true [diff "ipynb"] textconv = nbdev_clean_nbs --disp True --fname """) cmd = "git config --local include.path ../.gitconfig" print(f"Executing: {cmd}") result = subprocess.run(cmd.split(), shell=False, check=False, stderr=subprocess.PIPE) if result.returncode == 0: print("Success: hooks are installed and repo's .gitconfig is now trusted") else: print("Failed to trust repo's .gitconfig") if result.stderr: print(f"Error: {result.stderr.decode('utf-8')}") try: nb_path = Config().nbs_path except: nb_path = Path.cwd() with open(nb_path/'.gitattributes', 'w') as f: f.write("""**/*.ipynb filter=clean-nbs **/*.ipynb diff=ipynb """ ) # This command installs git hooks to make sure notebooks are cleaned before you commit them to GitHub and automatically trusted at each merge. To be more specific, this creates: # - an executable '.git/hooks/post-merge' file that contains the command `nbdev_trust_nbs` # - a `.gitconfig` file that uses `nbev_clean_nbs` has a filter/diff on all notebook files inside `nbs_folder` and a `.gitattributes` file generated in this folder (copy this file in other folders where you might have notebooks you want cleaned as well) # ## Starting a new project #export _template_git_repo = "https://github.com/fastai/nbdev_template.git" #export @call_parse def nbdev_new(name: Param("A directory to create the project in", str), template_git_repo: Param("url to template repo", str)=_template_git_repo): "Create a new nbdev project with a given name." path = Path(f"./{name}").absolute() if path.is_dir(): print(f"Directory {path} already exists. Aborting.") return print(f"Creating a new nbdev project {name}.") def rmtree_onerror(func, path, exc_info): "Use with `shutil.rmtree` when you need to delete files/folders that might be read-only." os.chmod(path, stat.S_IWRITE) func(path) try: subprocess.run(['git', 'clone', f'{template_git_repo}', f'{path}'], check=True, timeout=5000) # Note: on windows, .git is created with a read-only flag shutil.rmtree(path/".git", onerror=rmtree_onerror) subprocess.run("git init".split(), cwd=path, check=True) subprocess.run("git add .".split(), cwd=path, check=True) subprocess.run("git commit -am \"Initial\"".split(), cwd=path, check=True) print(f"Created a new repo for project {name}. Please edit settings.ini and run nbdev_build_lib to get started.") except Exception as e: print("An error occured while copying nbdev project template:") print(e) if os.path.isdir(path): try: shutil.rmtree(path, onerror=rmtree_onerror) except Exception as e2: print(f"An error occured while cleaning up. Failed to delete {path}:") print(e2) # `nbdev_new` is a command line tool that creates a new nbdev project based on the [nbdev_template repo](https://github.com/fastai/nbdev_template). You can use a custom template by passing in `template_git_repo`. It'll initialize a new git repository and commit the new project. # # After you run `nbdev_new`, please edit `settings.ini` and run `nbdev_build_lib`. # ## Export - #hide from nbdev.export import notebook2script notebook2script()
nbs/06_cli.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # MNISTでセグメンテーションに挑戦 # # # + import os import shutil import random import pickle import numpy as np import matplotlib.pyplot as plt from tqdm.notebook import tqdm #from tqdm import tqdm import torch import torchvision import torchvision.transforms as transforms import binarybrain as bb # - print(bb.get_version_string()) #print(bb.get_device_name(0)) bb.get_device_allocated_memory_size() # ## 初期設定 # + # configuration bb.set_device(0) net_name = 'MnistSegClassDistillation' data_path = os.path.join('./data/', net_name + '') rtl_sim_path = '../../verilog/mnist' rtl_module_name = 'MnistSegmentationAndClassification' output_velilog_file = os.path.join(data_path, net_name + '.v') sim_velilog_file = os.path.join(rtl_sim_path, rtl_module_name + '.v') bin_mode = True frame_modulation_size = 3 depth_integration_size = 1 epochs = 0 mini_batch_size = 16 # - # ## データセット準備 # # データセットを自作する # 数値が中央に来るピクセル以外も学習させる必要がるため、28x28のMNSIT画像をタイル状に並べて学習データを作る # + # 並べるタイル数 rows=3 cols=3 # 面積の比率で重みを作っておく if False: areas = np.zeros((11)) for img, label in dataset_train: img = img.numpy() areas[label] += np.mean(img) areas[10] += np.mean(1.0-img) areas /= len(dataset_train) wight = 1 / areas wight /= np.max(wight) def make_teacher_image(gen, rows, cols, margin=0): source_img = np.zeros((1, rows*28, cols*28), dtype=np.float32) teaching_img = np.zeros((11, rows*28, cols*28), dtype=np.float32) for row in range(rows): for col in range(cols): x = col*28 y = row*28 img, label = gen.__next__() source_img[0,y:y+28,x:x+28] = img teaching_img[label,y:y+28,x:x+28] = img teaching_img[10,y:y+28,x:x+28] = (1.0-img) teaching_img = (teaching_img > 0.5).astype(np.float32) # ランダムに反転 if random.random() > 0.5: source_img = 1.0 - source_img return source_img, teaching_img[:,margin:-margin,margin:-margin] def transform_data(dataset, n, rows, cols, margin): def data_gen(): l = len(dataset) i = 0 while True: yield dataset[i%l] i += 1 gen = data_gen() source_imgs = [] teaching_imgs = [] for _ in range(n): x, t = make_teacher_image(gen, rows, cols, margin) source_imgs.append(x) teaching_imgs.append(t) return source_imgs, teaching_imgs class MyDatasets(torch.utils.data.Dataset): def __init__(self, source_imgs, teaching_imgs, transforms=None): self.transforms = transforms self.source_imgs = source_imgs self.teaching_imgs = teaching_imgs def __len__(self): return len(self.source_imgs) def __getitem__(self, index): source_img = self.source_imgs[index] teaching_img = self.teaching_imgs[index] if self.transforms: source_img, teaching_img = self.transforms(source_img, teaching_img) return source_img, teaching_img # dataset dataset_path = './data/' dataset_train = torchvision.datasets.MNIST(root=dataset_path, train=True, transform=transforms.ToTensor(), download=True) dataset_test = torchvision.datasets.MNIST(root=dataset_path, train=False, transform=transforms.ToTensor(), download=True) dataset_fname = os.path.join(data_path, 'dataset.pickle') if os.path.exists(dataset_fname): with open(dataset_fname, 'rb') as f: source_imgs_train = pickle.load(f) teaching_imgs_train = pickle.load(f) source_imgs_test = pickle.load(f) teaching_imgs_test = pickle.load(f) else: os.makedirs(data_path, exist_ok=True) source_imgs_train, teaching_imgs_train = transform_data(dataset_train, 4096, rows, cols, 29) source_imgs_test, teaching_imgs_test = transform_data(dataset_test, 128, rows, cols, 29) with open(dataset_fname, 'wb') as f: pickle.dump(source_imgs_train, f) pickle.dump(teaching_imgs_train, f) pickle.dump(source_imgs_test, f) pickle.dump(teaching_imgs_test, f) my_dataset_train = MyDatasets(source_imgs_train, teaching_imgs_train) my_dataset_test = MyDatasets(source_imgs_test, teaching_imgs_test) loader_train = torch.utils.data.DataLoader(dataset=my_dataset_train, batch_size=mini_batch_size, shuffle=True) loader_test = torch.utils.data.DataLoader(dataset=my_dataset_test, batch_size=mini_batch_size, shuffle=False) # - def plt_data(x, y): plt.figure(figsize=(16,8)) plt.subplot(1,12,1) plt.imshow(x[0], 'gray') for i in range(11): plt.subplot(1,12,2+i) plt.imshow(y[i], 'gray') plt.show() plt.figure(figsize=(16,8)) for source_imgs, teaching_imgs in loader_test: print(source_imgs[0].shape) print(teaching_imgs[0].shape) for i in range(min(mini_batch_size, 10)): plt_data(source_imgs[i], teaching_imgs[i]) break def view(net, loader): num = 0; for x_imgs, t_imgs in loader: plt.figure(figsize=(16,8)) x_buf = bb.FrameBuffer.from_numpy(np.array(x_imgs).astype(np.float32)) # t0_buf = bb.FrameBuffer.from_numpy(np.array(t_imgs[:,0:10,:,:]).astype(np.float32)) # t1_buf = bb.FrameBuffer.from_numpy(np.array(1.0 - t_imgs[:,10:11,:,:]).astype(np.float32)) y0_buf, y1_buf = net.forward(x_buf, train=False) result_imgs0 = y0_buf.numpy() result_imgs1 = y1_buf.numpy() result_imgs = np.hstack((result_imgs0, result_imgs1)) plt_data(x_imgs[0], result_imgs[0]) num += 1 if num >= 2: break # ## ネットワーク構築 # + # バイナリ時は BIT型を使えばメモリ削減可能 bin_dtype = bb.DType.BIT if bin_mode else bb.DType.FP32 def create_lut_depthwise_conv(name, output_ch, filter_size=(3, 3), padding='valid', batch_norm=True, fw_dtype=bin_dtype): """LUTのDepthwiseConv層生成""" return bb.Convolution2d( bb.Sequential([ bb.DifferentiableLut([output_ch, 1, 1], connection='depthwise', batch_norm=batch_norm, name='lut_dl_depthwise_' + name, bin_dtype=fw_dtype), ]), filter_size=filter_size, padding=padding, name='lut_conv_depthwise_' + name, fw_dtype=fw_dtype) def create_lut_conv1(name, output_ch, filter_size=(1, 1), padding='valid', connection='serial', batch_norm=True, fw_dtype=bin_dtype): """LUTのConv層生成""" return bb.Convolution2d( bb.DifferentiableLut([output_ch, 1, 1], connection=connection, batch_norm=batch_norm, name=(name + '_lut_dl'), bin_dtype=fw_dtype), filter_size=filter_size, padding=padding, name=(name + '_lut_conv'), fw_dtype=fw_dtype) def create_lut_conv2(name, output_ch, filter_size=(1, 1), padding='valid', connection='serial', batch_norm=True, fw_dtype=bin_dtype): """LUTのConv層生成""" return bb.Convolution2d( bb.Sequential([ bb.DifferentiableLut([output_ch*6, 1, 1], connection=connection, batch_norm=batch_norm, name=(name + '_lut_dl0'), bin_dtype=fw_dtype), bb.DifferentiableLut([output_ch, 1, 1], connection='serial', batch_norm=batch_norm, name=(name + '_lut_dl1'), bin_dtype=fw_dtype), ]), filter_size=filter_size, padding=padding, name=(name + '_lut_conv'), fw_dtype=fw_dtype) def create_lut_conv_mn(name, input_ch, output_ch, filter_size=(3, 3), padding='valid', batch_norm=True, fw_dtype=bin_dtype): return bb.Sequential([ create_lut_depthwise_conv(name, input_ch, filter_size=filter_size, padding=padding, fw_dtype=fw_dtype), create_lut_conv2(name, output_ch, filter_size=(1, 1), fw_dtype=fw_dtype), ]) def create_dense_affine(name, output_ch, fw_dtype=bin_dtype): """バイナリ化したDenseAffine層生成""" return bb.Sequential([ bb.DenseAffine([output_ch, 1, 1], name=(name + '_dense_affine')), bb.BatchNormalization(name=(name + '_dense_bn')), bb.Binarize(name=(name + '_dense_act'), bin_dtype=fw_dtype), ]) def create_dense_conv(name, output_ch, filter_size=(1, 1), padding='valid', fw_dtype=bin_dtype): """バイナリ化したDenseConv層生成""" return bb.Convolution2d( create_dense_affine(name, output_ch, fw_dtype), filter_size=filter_size, padding=padding, name=(name + '_dense_conv'), fw_dtype=fw_dtype) class SegmentationNetwork(bb.Sequential): """蒸留用ネットワーク""" def __init__(self): self.input_r2b = bb.RealToBinary(frame_modulation_size=frame_modulation_size, bin_dtype=bin_dtype) self.cls_b2r = bb.BinaryToReal(frame_integration_size=frame_modulation_size, bin_dtype=bin_dtype) self.seg_b2r = bb.BinaryToReal(frame_integration_size=frame_modulation_size, bin_dtype=bin_dtype) # 入力層生成 layer_name = 'input' self.input_lut = create_lut_conv1(layer_name, 36, filter_size=(3, 3), connection='random', batch_norm=True, fw_dtype=bin_dtype) self.input_dense = create_dense_conv(layer_name, 36, filter_size=(3, 3), fw_dtype=bin_dtype) self.net_input = bb.Switcher({'lut': self.input_lut, 'dense': self.input_dense}, init_model_name='dense') # Conv層生成 self.net_cnv = bb.Sequential() for i in range(28): layer_name = 'cnv%d'%(i) cnv_lut = create_lut_conv_mn(layer_name, 36, 36, filter_size=(3, 3), padding='valid', batch_norm=True, fw_dtype=bin_dtype) cnv_dense = create_dense_conv(layer_name, 36, filter_size=(3, 3), padding='valid', fw_dtype=bin_dtype) self.net_cnv.append( bb.Switcher({ 'lut': cnv_lut, 'dense': cnv_dense }, init_model_name='dense')) # classifier self.net_cls = bb.Sequential([ bb.Switcher({ 'lut': create_lut_conv2('cls0', 2*36, filter_size=(1, 1)), 'dense': create_dense_conv('cls0', 2*36, filter_size=(1, 1)), }, init_model_name='dense'), bb.Switcher({ 'lut': create_lut_conv2('cls1', 10, filter_size=(1, 1)), 'dense': create_dense_conv('cls1', 10, filter_size=(1, 1)), }, init_model_name='dense') ]) # segmentation self.net_seg = bb.Sequential([ bb.Switcher({ 'lut': create_lut_conv2('seg0', 2*36, filter_size=(1, 1)), 'dense': create_dense_conv('seg0', 2*36, filter_size=(1, 1)), }, init_model_name='dense'), bb.Switcher({ 'lut': create_lut_conv2('seg1', 1, filter_size=(1, 1)), 'dense': create_dense_conv('seg1', 1, filter_size=(1, 1)), }, init_model_name='dense') ]) super(SegmentationNetwork, self).__init__([self.net_input, self.net_cnv, self.net_cls, self.net_seg]) def set_input_shape(self, shape): shape = self.input_r2b.set_input_shape(shape) shape = self.net_input.set_input_shape(shape) shape = self.net_cnv.set_input_shape(shape) shape_cls = self.net_cls.set_input_shape(shape) self.cls_b2r.set_input_shape(shape_cls) shape_seg = self.net_seg.set_input_shape(shape) self.seg_b2r.set_input_shape(shape_seg) def forward(self, x, train): x = self.input_r2b.forward(x, train) x = self.net_input.forward(x, train) x = self.net_cnv.forward(x, train) y0 = self.net_cls.forward(x, train) y0 = self.cls_b2r.forward(y0) y1 = self.net_seg.forward(x, train) y1 = self.seg_b2r.forward(y1) return y0, y1 def backward(self, dy0, dy1): dy0 = self.cls_b2r.backward(dy0) dy0 = self.net_cls.backward(dy0) dy1 = self.seg_b2r.backward(dy1) dy1 = self.net_seg.backward(dy1) dy = self.net_cnv.backward(dy0*0.3 + dy1*0.7) dx = self.net_input.backward(dy) return dx # - net = SegmentationNetwork() net.send_command("switch_model dense") net.set_input_shape([1, rows*28, cols*28]) net.set_name(net_name) net.send_command("binary true") #bb.load_networks(data_path, net) bb.load_networks(data_path, net, name='dense_base') # ## 学習実施 # # 学習を行います def learning(data_path, net, epochs=2): # learning loss0 = bb.LossSoftmaxCrossEntropy() loss1 = bb.LossSigmoidCrossEntropy() metrics0 = bb.MetricsCategoricalAccuracy() metrics1 = bb.MetricsBinaryCategoricalAccuracy() optimizer = bb.OptimizerAdam() optimizer.set_variables(net.get_parameters(), net.get_gradients()) for epoch in range(epochs): # learning loss0.clear() metrics0.clear() loss1.clear() metrics1.clear() with tqdm(loader_train) as tqdm_loadr: for x_imgs, t_imgs in tqdm_loadr: x_buf = bb.FrameBuffer.from_numpy(np.array(x_imgs).astype(np.float32)) t0_buf = bb.FrameBuffer.from_numpy(np.array(t_imgs[:,0:10,:,:]).astype(np.float32)) t1_buf = bb.FrameBuffer.from_numpy(1.0 - np.array(t_imgs[:,10:11,:,:]).astype(np.float32)) y0_buf, y1_buf = net.forward(x_buf, train=True) dy0_buf = loss0.calculate(y0_buf, t0_buf) dy1_buf = loss1.calculate(y1_buf, t1_buf) metrics0.calculate(y0_buf, t0_buf) metrics1.calculate(y1_buf, t1_buf) net.backward(dy0_buf, dy1_buf) optimizer.update() tqdm_loadr.set_postfix(loss0=loss0.get(), acc0=metrics0.get(), loss1=loss1.get(), acc1=metrics1.get()) # test loss0.clear() metrics0.clear() loss1.clear() metrics1.clear() for x_imgs, t_imgs in loader_test: x_buf = bb.FrameBuffer.from_numpy(np.array(x_imgs).astype(np.float32)) t0_buf = bb.FrameBuffer.from_numpy(np.array(t_imgs[:,0:10,:,:]).astype(np.float32)) t1_buf = bb.FrameBuffer.from_numpy(1.0 - np.array(t_imgs[:,10:11,:,:]).astype(np.float32)) y0_buf, y1_buf = net.forward(x_buf, train=False) loss0.calculate(y0_buf, t0_buf) loss1.calculate(y1_buf, t1_buf) metrics0.calculate(y0_buf, t0_buf) metrics1.calculate(y1_buf, t1_buf) bb.save_networks(data_path, net) print('epoch[%d] : loss0=%f acc0=%f loss1=%f acc1=%f' % (epoch, loss0.get(), metrics0.get(), loss1.get(), metrics1.get())) view(net, loader_test) def distillation_input(data_path, net, epochs=4): # learning loss = bb.LossMeanSquaredError() optimizer = bb.OptimizerAdam() net_input = net.net_input bin2real0 = bb.BinaryToReal(frame_integration_size=frame_modulation_size, bin_dtype=bin_dtype) bin2real1 = bb.BinaryToReal(frame_integration_size=frame_modulation_size, bin_dtype=bin_dtype) # LUT層をOptimizerに接続 net_input.send_command("switch_model lut") net_input.send_command('parameter_lock false') optimizer.set_variables(net_input.get_parameters(), net_input.get_gradients()) for epoch in range(epochs): # learning loss.clear() with tqdm(loader_train) as tqdm_loadr: for x_imgs, t_imgs in tqdm_loadr: x_buf = bb.FrameBuffer.from_numpy(np.array(x_imgs).astype(np.float32)) x_buf = net.input_r2b.forward(x_buf, train=False) # dense に切り替えて教師データ生成 net_input.send_command("switch_model dense") t_buf = net_input.forward(x_buf, train=False) t_buf = bin2real0.forward(t_buf, train=False) # LUTに戻して学習 net_input.send_command("switch_model lut") y_buf = net_input.forward(x_buf, train=True) y_buf = bin2real1.forward(y_buf, train=True) dy_buf = loss.calculate(y_buf, t_buf) dy_buf = bin2real1.backward(dy_buf) net_input.backward(dy_buf) optimizer.update() tqdm_loadr.set_postfix(loss=loss.get()) bb.save_networks(data_path, net) print('distillation epoch[%d] : loss=%f' % (epoch, loss.get())) def distillation_cnv(data_path, net, index, epochs=4): # learning loss = bb.LossMeanSquaredError() optimizer = bb.OptimizerAdam() cnv_layer = net.net_cnv[index] bin2real0 = bb.BinaryToReal(frame_integration_size=frame_modulation_size, bin_dtype=bin_dtype) bin2real1 = bb.BinaryToReal(frame_integration_size=frame_modulation_size, bin_dtype=bin_dtype) # LUT層をOptimizerに接続 cnv_layer.send_command("switch_model lut") cnv_layer.send_command('parameter_lock false') optimizer.set_variables(cnv_layer.get_parameters(), cnv_layer.get_gradients()) for epoch in range(epochs): # learning loss.clear() with tqdm(loader_train) as tqdm_loadr: for x_imgs, t_imgs in tqdm_loadr: # LUTに切り替えて前段計算 net.send_command("switch_model lut") x_buf = bb.FrameBuffer.from_numpy(np.array(x_imgs).astype(np.float32)) x_buf = net.input_r2b.forward(x_buf, train=False) x_buf = net.net_input.forward(x_buf, train=False) for i in range(index): x_buf = net.net_cnv[i].forward(x_buf, train=False) # dense に切り替えて教師データ生成 cnv_layer.send_command("switch_model dense") t_buf = cnv_layer.forward(x_buf, train=False) t_buf = bin2real0.forward(t_buf, train=False) # LUTに戻して学習 cnv_layer.send_command("switch_model lut") y_buf = cnv_layer.forward(x_buf, train=True) y_buf = bin2real1.forward(y_buf, train=True) dy_buf = loss.calculate(y_buf, t_buf) dy_buf = bin2real1.backward(dy_buf) cnv_layer.backward(dy_buf) optimizer.update() tqdm_loadr.set_postfix(loss=loss.get()) bb.save_networks(data_path, net) print('distillation epoch[%d] : loss=%f' % (epoch, loss.get())) # 基準となるDenseAffineで学習 if not bb.load_networks(data_path, net, name='dense_base'): learning(os.path.join(data_path, 'dense'), net, epochs=32) bb.save_networks(data_path, net, name='dense_split', write_layers=True) bb.save_networks(data_path, net, name='dense_base') bb.save_networks(data_path, net) # 入力層のLUT学習 layer_name = 'input' if not bb.load_networks(data_path, net, name=layer_name): # 蒸留 distillation_input(os.path.join(data_path, layer_name), net, epochs=4) # 全体初期化 net.send_command("switch_model dense") net.send_command('parameter_lock true') view(net, loader_test) # LUT切り替え net.net_input.send_command("switch_model lut") view(net, loader_test) # LUT個別学習 net.net_input.send_command('parameter_lock false') # learning(os.path.join(data_path, layer_name), net, epochs=2) # 蒸留で代替 # 後段含めた学習 net.send_command('parameter_lock false') learning(os.path.join(data_path, layer_name), net, epochs=2) # 保存 bb.save_networks(data_path, net, name=(layer_name + '_split'), write_layers=True) bb.save_networks(data_path, net, name=layer_name) bb.save_networks(data_path, net) # 畳み込み層のLUT学習 for i in range(0, 29): layer_name = 'cnv%d'%i print('----- %s -----'%layer_name) if not bb.load_networks(data_path, net, name=layer_name): # 蒸留 distillation_cnv(os.path.join(data_path, layer_name), net, i, epochs=2) # 全体初期化 net.send_command("switch_model dense") net.send_command('parameter_lock true') # LUT切り替え net.net_input.send_command("switch_model lut") for j in range(i+1): net.net_cnv[j].send_command("switch_model lut") view(net, loader_test) # 個別学習 net.net_cnv[i].send_command('parameter_lock false') # learning(os.path.join(data_path, layer_name), net, epochs=2) # 蒸留で代替 # 後段含めた学習 net.send_command('parameter_lock false') net.net_input.send_command("parameter_lock true") for j in range(i): net.net_cnv[j].send_command("parameter_lock true") learning(os.path.join(data_path, layer_name), net, epochs=2) # 保存 bb.save_networks(data_path, net, name=(layer_name + '_split'), write_layers=True) bb.save_networks(data_path, net, name=layer_name) bb.save_networks(data_path, net) ---------------------- bb.load_networks(data_path, net, name='cnv0') print(bb.get_device_allocated_memory_size()) gc.collect() bb.garbage_collect_device_memory() bb.get_device_allocated_memory_size()
tests/python/mnist/MnistSegDistillation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Objectives of Preprocessing # # - data should be a 2D array # - input data has to be numbers # - no nan or inf # - columns are scaled to similar ranges (mean=0, variance=1) # - columns should not be collinear (cx1!=k*cx2) # - rows should not be causally dependent # - data should be 100 times larger then the number of columns # ## Import Python Libraries import shutil import os import pathlib from pprint import pprint from pathlib import Path import csv # + import rasterio from rasterio.warp import calculate_default_transform, reproject, Resampling import fiona import pandas as pd import xgboost as xgb import numpy as np import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import confusion_matrix from sklearn.metrics import roc_auc_score from sklearn.metrics import accuracy_score from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_absolute_error from sklearn import preprocessing import matplotlib.pyplot as plt # - # %load_ext memory_profiler # ## Define Input Datasets # # - features # - targets # - out of sample # - area of interest # + # %%time feature_file_paths = [ Path('/g/data/ge3/sheece/LOC_distance_to_coast.tif'), Path('/g/data/ge3/sheece/mrvbf_9.tif'), Path('/g/data/ge3/sheece/relief_mrvbf_3s_mosaic.tif'), Path('/g/data/ge3/sheece/relief_elev_focalrange1000m_3s.tif'), Path('/g/data/ge3/sheece/relief_elev_focalrange300m_3s.tif'), Path('/g/data/ge3/sheece/saga_wetSM_85.tif'), Path('/g/data/ge3/sheece/tpi_300.tif'), Path('/g/data/ge3/sheece/slope_fill2.tif'), Path('/g/data/ge3/sheece/dem_fill.tif'), Path('/g/data/ge3/sheece/3dem_mag2.tif'), Path('/g/data/ge3/sheece/3dem_mag1_fin.tif'), Path('/g/data/ge3/sheece/3dem_mag0.fin.tif'), Path('/g/data/ge3/sheece/relief_roughness.tif'), Path('/g/data/ge3/sheece/LATITUDE_GRID1_clip.tif'), Path('/g/data/ge3/sheece/LATITUDE_GRID1_clip.tif'), Path('/g/data/ge3/sheece/Dose_2016.tif'), Path('/g/data/ge3/sheece/Potassium_2016.tif'), Path('/g/data/ge3/sheece/Thorium_2016.tif'), Path('/g/data/ge3/sheece/Rad2016U_Th.tif'), Path('/g/data/ge3/sheece/Rad2016K_Th.tif'), Path('/g/data/ge3/sheece/national_Wii_RF_multirandomforest_prediction.tif'), Path('/g/data/ge3/sheece/si_geol1.tif'), Path('/g/data/ge3/sheece/ceno_euc_aust1.tif'), Path('/g/data/ge3/sheece/Grav_lane_clip.tif'), Path('/g/data/ge3/sheece/be-30y-85m-avg-ND-NIR-GREEN.filled.lzw.nodata.tif'), Path('/g/data/ge3/sheece/be-30y-85m-avg-ND-RED-BLUE.filled.lzw.nodata.tif'), Path('/g/data/ge3/sheece/be-30y-85m-avg-ND-SWIR1-SWIR2.filled.lzw.nodata.tif'), Path('/g/data/ge3/sheece/be-30y-85m-avg_BLUE+SWIR2.tif'), Path('/g/data/ge3/sheece/be-30y-85m-avg-ND-SWIR1-NIR.filled.lzw.nodata.tif'), Path('/g/data/ge3/sheece/be-30y-85m-avg-CLAY-PC2.filled.lzw.nodata.tif'), Path('/g/data/ge3/sheece/be-30y-85m-avg-ND-RED-BLUE.filled.lzw.nodata.tif'), Path('/g/data/ge3/sheece/be-30y-85m-avg-RED.filled.lzw.nodata.tif'), Path('/g/data/ge3/sheece/be-30y-85m-avg-GREEN.filled.lzw.nodata.tif'), Path('/g/data/ge3/sheece/be-l8-all-85m-avg-BLUE.filled.lzw.nodata.tif'), Path('/g/data/ge3/sheece/be-l8-all-85m-avg-NIR.filled.lzw.nodata.tif'), Path('/g/data/ge3/sheece/be-30y-85m-avg-SWIR1.filled.lzw.nodata.tif'), Path('/g/data/ge3/sheece/be-30y-85m-avg-SWIR2.filled.lzw.nodata.tif'), Path('/g/data/ge3/sheece/s2-dpca-85m.tif'), Path('/g/data/ge3/sheece/water-85m.tif'), Path('/g/data/ge3/sheece/clim_EPA_albers.tif'), Path('/g/data/ge3/sheece/Clim_Prescott_LindaGregory.tif'), Path('/g/data/ge3/sheece/clim_PTA_albers.tif'), Path('/g/data/ge3/sheece/clim_WDA_albers.tif'), Path('/g/data/ge3/sheece/clim_RSM_albers.tif'), Path('/g/data/ge3/sheece/LONGITUDE_GRID1_clip.tif') ] #target dataset small # /g/data/ge3/sheece/0_50cm_2021_albers_C_sm_T_resampled_small.shp #target dataset complete # /g/data/ge3/sheece/0_50cm_2021_albers_C_sm_T_resampled.shp target_file_path = Path("/g/data/ge3/sheece/0_50cm_2021_albers_C_sm_T_resampled.shp") # define a shape for area of interest area_of_interest_file_path = None #OOS # /g/data/ge3/sheece/0_50cm_2021_albers_C_oos.shp out_of_sample_file_path = Path("/g/data/ge3/sheece/0_50cm_2021_albers_C_oos.shp") root = Path('/g/data/ge3/sheece') # + # remove any duplicate datasets in input temp_feature_file_paths = feature_file_paths.copy() unique_values = list(set(temp_feature_file_paths)) for unique_value in unique_values: temp_feature_file_paths.remove(unique_value) print("Following are duplicates:") print(temp_feature_file_paths) feature_file_paths = list(set(feature_file_paths)) # - # ensure that that dataset provided in input exist import os.path for feature_file_path in feature_file_paths: if not os.path.isfile(feature_file_path): feature_file_paths.remove(feature_file_path) print ("File not exist",feature_file_path) # ## Standardising Datasets # + # check for multiple bands # if a dataset contains more then one band then create a new dataset for each band for feature_file_path in feature_file_paths: with rasterio.open(feature_file_path) as dataset: if len(dataset.indexes) >1: print("Processing dataset: ",feature_file_path) for band in dataset.indexes: path_to_new_dataset = root/ str(feature_file_path.stem +"_"+str(band)+".tif") print("Reading data from band: ",band) new_dataset = rasterio.open( path_to_new_dataset, 'w', driver='GTiff', height=dataset.shape[0], width=dataset.shape[1], count=1, dtype=dataset.meta['dtype'], crs=dataset.crs, transform=dataset.transform ) new_dataset.write(dataset.read(band),1) new_dataset.close() print("New dataset added:",path_to_new_dataset) feature_file_paths.append(path_to_new_dataset) print("Removing multiband dataset:",feature_file_path) feature_file_paths.remove(feature_file_path) # + # check projection # if a dataset in not in EPSG:3577; reproject it in EPSG:3577 projection crs_epsg3577 = rasterio.crs.CRS.from_string('EPSG:3577') for feature_file_path in feature_file_paths: with rasterio.open(feature_file_path) as src: if crs_epsg3577 != src.crs: print("Converting dataset: "+str(feature_file_path)) transform, width, height = calculate_default_transform( src.crs, crs_epsg3577, src.width, src.height, *src.bounds) kwargs = src.meta.copy() kwargs.update({'crs': crs_epsg3577,'transform': transform, 'width': width,'height': height}) new_feature_file_path = feature_file_path.parent /str(feature_file_path.stem+"_reprojected.tif") with rasterio.open(new_feature_file_path, 'w', **kwargs) as dst: reproject( source=rasterio.band(src, 1), destination=rasterio.band(dst, 1), src_transform=src.transform, src_crs=src.crs, dst_transform=transform, dst_crs=crs_epsg3577, resampling=Resampling.nearest) feature_file_paths.remove(feature_file_path) feature_file_paths.append(new_feature_file_path) print("Dataset: ",feature_file_path, " has been reprojected and saved as: ",new_feature_file_path) # + # check for area of interest # if a user has provided a smaller area of interest then crop the input datasets accordingly import fiona import rasterio from rasterio.mask import mask if area_of_interest_file_path is not None: # apply crop to feature datasets with fiona.open(area_of_interest_file_path) as shapefile: geoms = [feature["geometry"] for feature in shapefile] for feature_file_path in feature_file_paths: if "cropped" not in feature_file_path.name: print("Cropping : "+feature_file_path.stem) # load the raster, mask it by the polygon and crop it with rasterio.open(feature_file_path) as src: out_image, out_transform = mask(src, geoms, crop=True) out_meta = src.meta.copy() # save the resulting raster out_meta.update({ "driver": "GTiff", "height": out_image.shape[1], "width": out_image.shape[2], "transform": out_transform }) with rasterio.open(root/(feature_file_path.stem+'_cropped.tif'), "w", **out_meta) as dest: dest.write(out_image) shutil.move(feature_file_path, root/("old/"+feature_file_path.name)) # apply crop to target vector files(.shp) if "cropped" not in target_file_path.name: print("Cropping: "+target_file_path.name) clipped_file = root/(target_file_path.stem+'_cropped.shp') callstr = ['ogr2ogr', '-clipsrc', area_of_interest_file_path, clipped_file, target_file_path] proc = subprocess.Popen(callstr, stdout=subprocess.PIPE,stderr=subprocess.PIPE) stdout,stderr=proc.communicate() shutil.move(target_file_path, root/("old/"+target_file_path.name)) else: print("No are of interest provided.") # + # %%time # %%memit # create a vector csv file from the input datasets # this csv file will be used by different machine learning algorithms # create iterators to inputs feature dataset and target dataset datasets = [] for feature_file_path in feature_file_paths: datasets.append(rasterio.open(feature_file_path)) target_handle = fiona.open(target_file_path) # create the first row containing col names head_row = ['target'] for feature_file_path in feature_file_paths: head_row.append(feature_file_path.stem) head_row.append("x") head_row.append("y") csv_rowlist = [head_row] if not os.path.isdir("../data"): os.mkdir("../data") with open('../data/input_dataset.csv', 'w', newline='', encoding='utf-8') as file: writer = csv.writer(file) # iterate through list of targets print("Creating csv file ...... ") print("Please wait.") for target in target_handle: value = target["properties"]["con"] x, y = target["geometry"]["coordinates"] new_row = [value] # iterate through list of features for dataset in datasets: new_row.append(next(dataset.sample([(x, y)]))[0]) new_row.append(x) new_row.append(y) csv_rowlist.append(new_row) # load 1000 rows in memory then write them to csv # this reduces the I/O operations required and speeds up creating csv file if len(csv_rowlist)%1000==0: writer.writerows(csv_rowlist) csv_rowlist = [] print("Output file has been created.") # close iterators target_handle.close() for dataset in datasets: dataset.close() # - # ## Cleaning dataset # + # %%time input_file = '../data/input_dataset.csv' print("input_file: ",input_file) df = pd.read_csv(input_file) input_shape = df.shape # drop positional arguments df = df.drop(["x","y"],axis=1).astype('float32') # drop no data values df = df[~df.isin([np.nan, np.inf, -np.inf,-9999.0]).any(1)] # write updated dataframe to csv file df.to_csv("../data/formated_dataset.csv",index=None,header=df.columns.values) output_shape = df.shape print("Number of rows removed: ",input_shape[0]-output_shape[0]) print("Number of cols removed: ",input_shape[1]-output_shape[1]) # - # ## Save Normalized Dataset # #### perform maximum absolute transformation # + # %%time def maximum_absolute_scaling(df): # copy the dataframe df_scaled = df.copy() # apply maximum absolute scaling for column in df_scaled.columns: df_scaled[column] = df_scaled[column] / df_scaled[column].abs().max() return df_scaled input_file = '../data/formated_dataset.csv' print("input_file: ",input_file) df = pd.read_csv(input_file) df = maximum_absolute_scaling(df) df.to_csv("../data/max_abs_df.csv",index=None,header=df.columns.values) print("output_file: ../data/max_abs_df.csv") # - # #### perform quantile transformation # %%time input_file = '../data/formated_dataset.csv' print("input_file: ",input_file) df = pd.read_csv(input_file) scaler = preprocessing.QuantileTransformer().fit(df) quantile_df = scaler.transform(df) pd.DataFrame(quantile_df).to_csv("../data/quantile_df.csv",index=None,header=df.columns.values) print("output: quantile_df") # #### perform standard scaler transformation # %%time input_file = '../data/formated_dataset.csv' print("input_file: ",input_file) df = pd.read_csv(input_file) scaler = preprocessing.StandardScaler().fit(df) scaled_df = scaler.transform(df) pd.DataFrame(quantile_df).to_csv("../data/scaler_df.csv",index=None,header=df.columns.values) print("output: scaler_df") # ## Exploring Normalization # - Rescaling (min-max normalization) # - Standardization (Z-scale normalization) # # ## density plots for input features dataset_id = 23 plt.rcParams['figure.figsize'] = [8, 4] plt.rcParams['figure.dpi'] = 100 col_name = df.columns.values[dataset_id] plt.xlabel('Values') plt.ylabel('Count') plt.title(col_name) _ = plt.hist(df[col_name], color = 'blue', edgecolor = 'black', bins = np.arange(-10,100,1),label=col_name ) # + plt.rcParams['figure.figsize'] = [8, 4] plt.rcParams['figure.dpi'] = 100 col_name = df.columns.values[dataset_id] plt.xlabel('Values') plt.ylabel('Count') plt.title(col_name) _ = plt.hist(scaled_df[dataset_id], color = 'blue', edgecolor = 'black', bins = np.arange(-5,4,0.1),label=col_name ) # - plt.rcParams['figure.figsize'] = [8, 4] plt.rcParams['figure.dpi'] = 100 col_name = df.columns.values[dataset_id] plt.xlabel('Values') plt.ylabel('Count') plt.title(col_name) _ = plt.hist(quantile_df[dataset_id], color = 'blue', edgecolor = 'black', bins = np.arange(-5,4,0.1),label=col_name ) # ### references
notebooks/preprocessing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- #load it like this import Pseudo_Goldstone as pg # + dimN=2 gauge='feyn' Fields ,ParameterSymbol=pg.Definitions(dimN,gauge) LMassInt=pg.GetLagrangian(Fields) Point_N=pg.IdentifyInteractions(LMassInt,Fields ,Parallel=True) pg.Make_Feynman_Rules(Point_N) pg.CheckInteractions(Point_N,LMassInt,Fields) pg.StoreVert(Point_N,Fields,ParameterSymbol,dimN,gauge,Directory='ttt') # -
How_it_works.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9 (tensorflow) # language: python # name: tensorflow # --- # + [markdown] id="3CF2edFAI4Uj" # <a href="https://colab.research.google.com/github/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_11_02_tokenizers.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # # + [markdown] id="RvbM-RwHI4Ul" # # T81-558: Applications of Deep Neural Networks # **Module 11: Natural Language Processing with Hugging Face** # * Instructor: [<NAME>](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx) # * For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/). # + [markdown] id="5dE7A-0aI4Ul" # # Module 11 Material # # * Part 11.1: Introduction to Hugging Face [[Video]](https://www.youtube.com/watch?v=1IHXSbz02XM&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_11_01_huggingface.ipynb) # * **Part 11.2: Hugging Face Tokenizers** [[Video]](https://www.youtube.com/watch?v=U-EGU1RyChg&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_11_02_tokenizers.ipynb) # * Part 11.3: Hugging Face Datasets [[Video]](https://www.youtube.com/watch?v=Mq5ODegT17M&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_11_03_hf_datasets.ipynb) # * Part 11.4: Training Hugging Face Models [[Video]](https://www.youtube.com/watch?v=https://www.youtube.com/watch?v=l69ov6b7DOM&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_11_04_hf_train.ipynb) # * Part 11.5: What are Embedding Layers in Keras [[Video]](https://www.youtube.com/watch?v=OuNH5kT-aD0list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN&index=58) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_11_05_embedding.ipynb) # + [markdown] id="9Z4A091yI4Um" # # Google CoLab Instructions # # The following code ensures that Google CoLab is running the correct version of TensorFlow. # + colab={"base_uri": "https://localhost:8080/"} id="RJmzbge9I4Um" outputId="4685a76c-cc37-490a-d589-db16eb6e9c4c" try: # %tensorflow_version 2.x COLAB = True print("Note: using Google CoLab") except: print("Note: not using Google CoLab") COLAB = False # + [markdown] id="vdNN6e45I4Un" # # Part 11.2: Hugging Face Tokenizers # # Tokenization is the task of chopping it up into pieces, called tokens, perhaps at the same time throwing away certain characters, such as punctuation. Consider how the program might break up the following sentences into words. # # * This is a test. # * Ok, but what about this? # * Is U.S.A. the same as USA.? # * What is the best data-set to use? # * I think I will do this-no wait; I will do that. # # The hugging face includes tokenizers that can break these sentences into words and subwords. Because English, and some other languages, are made up of common word parts, we tokenize subwords. For example, a gerund word, such as "sleeping," will be tokenized into "sleep" and "##ing". # # We begin by installing Hugging Face if needed. # # + colab={"base_uri": "https://localhost:8080/"} id="3jGET2abMjcl" outputId="35b8be0f-bc7e-42d0-f177-ee097d4a076c" # HIDE OUTPUT # !pip install transformers # !pip install transformers[sentencepiece] # + [markdown] id="Wa1ncodn8y0r" # First, we create a Hugging Face tokenizer. There are several different tokenizers available from the Hugging Face hub. For this example, we will make use of the following tokenizer: # # * distilbert-base-uncased # # This tokenizer is based on BERT and assumes case-insensitive English text. # + id="kSGtW0E7xcK9" from transformers import AutoTokenizer model = "distilbert-base-uncased" tokenizer = AutoTokenizer.from_pretrained(model) # + [markdown] id="YofjyJw59U2x" # We can now tokenize a sample sentence. # + colab={"base_uri": "https://localhost:8080/"} id="NVCmyao2zLQ3" outputId="6b33031a-8667-4431-aae2-26595195d69d" encoded = tokenizer('Tokenizing text is easy.') print(encoded) # + [markdown] id="4QgmlpyezhMy" # The result of this tokenization contains two elements: # * input_ids - The individual subword indexes, each index uniquely identifies a subword. # * attention_mask - Which values in *input_ids* are meaningful and not padding. # This sentence had no padding, so all elements have an attention mask of "1". Later, we will request the output to be of a fixed length, introducing padding, which always has an attention mask of "0". Though each tokenizer can be implemented differently, the attention mask of a tokenizer is generally either "0" or "1". # # Due to subwords and special tokens, the number of tokens may not match the number of words in the source string. We can see the meanings of the individual tokens by converting these IDs back to strings. # + colab={"base_uri": "https://localhost:8080/"} id="Ww3XPc-i2Y6c" outputId="6179b416-61cd-4142-fa15-ae2296e9eab7" tokenizer.convert_ids_to_tokens(encoded.input_ids) # + [markdown] id="7KM7VRJECoGU" # As you can see, there are two special tokens placed at the beginning and end of each sequence. We will soon see how we can include or exclude these special tokens. These special tokens can vary per tokenizer; however, [CLS] begins a sequence for this tokenizer, and [SEP] ends a sequence. You will also see that the gerund "tokening" is broken into "token" and "*ing". # # For this tokenizer, the special tokens occur between 100 and 103. Most Hugging Face tokenizers use this approximate range for special tokens. The value zero (0) typically represents padding. We can display all special tokens with this command. # # # + colab={"base_uri": "https://localhost:8080/"} id="EtQiOmSl2rXt" outputId="6f5f2aef-d718-4675-d0a5-9c6c9d960c87" tokenizer.convert_ids_to_tokens([0, 100, 101, 102, 103]) # + [markdown] id="1nQ-r6bz3ESN" # This tokenizer supports these common tokens: # # * \[CLS\] - Sequence beginning. # * \[SEP\] - Sequence end. # * \[PAD\] - Padding. # * \[UNK\] - Unknown token. # * \[MASK\] - Mask out tokens for a neural network to predict. Not used in this book, see [MLM paper](https://arxiv.org/abs/2109.01819). # # It is also possible to tokenize lists of sequences. We can pad and truncate sequences to achieve a standard length by tokenizing many sequences at once. # # # + colab={"base_uri": "https://localhost:8080/"} id="TI4RZXhc4v9k" outputId="2ae7f2f9-3d8a-421e-c9b9-4dd933817cd3" text = [ "This movie was great!", "I hated this move, waste of time!", "Epic?" ] encoded = tokenizer(text, padding=True, add_special_tokens=True) print("**Input IDs**") for a in encoded.input_ids: print(a) print("**Attention Mask**") for a in encoded.attention_mask: print(a) # + [markdown] id="1rIF8TEEF9C-" # Notice the **input_id**'s for the three movie review text sequences. Each of these sequences begins with 101 and we pad with zeros. Just before the padding, each group of IDs ends with 102. The attention masks also have zeros for each of the padding entries. # # We used two parameters to the tokenizer to control the tokenization process. Some other useful [parameters](https://huggingface.co/docs/transformers/main_classes/tokenizer) include: # # * add_special_tokens (defaults to True) Whether or not to encode the sequences with the special tokens relative to their model. # * padding (defaults to False) Activates and controls truncation. # * max_length (optional) Controls the maximum length to use by one of the truncation/padding parameters. # -
t81_558_class_11_02_tokenizers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Data Mining Challange: *Reddit Gender Text-Classification* # # ### Modules # + # Numpy & matplotlib for notebooks # %pylab inline # Pandas for data analysis and manipulation import pandas as pd # Sparse matrix package for numeric data. from scipy import sparse # Module for word embedding (word2vector) import gensim # Module for progress monitoring import tqdm # Sklearn from sklearn.preprocessing import StandardScaler # to standardize features by removing the mean and scaling to unit variance (z=(x-u)/s) from sklearn.neural_network import MLPClassifier # Multi-layer Perceptron classifier which optimizes the log-loss function using LBFGS or sdg. from sklearn.svm import SVC # Support Vector Classification from sklearn.ensemble import RandomForestClassifier # A meta-estimator that fits a number of decision tree classifiers on various sub-samples of the dataset and uses averaging to improve the predictive accuracy and control over-fitting from sklearn.decomposition import PCA, TruncatedSVD # Principal component analysis (PCA); dimensionality reduction using truncated SVD. from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import MultinomialNB # Naive Bayes classifier for multinomial models from sklearn.feature_extraction.text import CountVectorizer # Convert a collection of text documents to a matrix of token counts from sklearn.metrics import roc_auc_score as roc # Compute Area Under the Receiver Operating Characteristic Curve from prediction scores from sklearn.metrics import roc_curve, auc # Compute ROC; Compute Area Under the Curve (AUC) using the trapezoidal rule from sklearn.model_selection import RandomizedSearchCV, GridSearchCV # Exhaustive search over specified parameter values for a given estimator from sklearn.model_selection import cross_val_score # Evaluate a score by cross-validation from sklearn.model_selection import train_test_split # to split arrays or matrices into random train and test subsets from sklearn.model_selection import KFold # K-Folds cross-validator providing train/test indices to split data in train/test sets. from sklearn.model_selection import StratifiedShuffleSplit from sklearn.model_selection import StratifiedKFold import nltk import re from nltk.stem import WordNetLemmatizer from bs4 import BeautifulSoup from nltk.corpus import stopwords from collections import defaultdict from nltk.tokenize import word_tokenize from nltk import pos_tag from nltk.corpus import wordnet as wn #XGBoost from xgboost import XGBRegressor # Matplotlib import matplotlib # Data visualization import matplotlib.pyplot as plt import matplotlib.patches as mpatches # Seaborn import seaborn as sns # Statistical data visualization (based on matplotlib) # Joblib import joblib # To save models # - # ## Data Loading and Manipulation # + # load data train_data = pd.read_csv("../input/dataset/train_data.csv") target = pd.read_csv("../input/dataset/train_target.csv") test_data = pd.read_csv("../input/dataset/test_data.csv") # create authors gender dictionary author_gender = {} for i in range(len(target)): author_gender[target.author[i]] = target.gender[i] # X is the aggregated comments list X = [] # the genders y = [] # lengths of X elements X_len = [] for author, group in train_data.groupby("author"): X.append(group.body.str.cat(sep = " ")) X_len.append([len(group.body)]) y.append(author_gender[author]) # - # ## Preprocessing # + # preprocessing functions def remove_number(text): num = re.compile(r'[-+]?[.\d]*[\d]+[:,.\d]*') return num.sub(r'NUMBER', text) def remove_URL(text): url = re.compile(r'https?://\S+|www\.\S+') return url.sub(r'URL',text) def remove_repeat_punct(text): rep = re.compile(r'([!?.]){2,}') return rep.sub(r'\1 REPEAT', text) def remove_elongated_words(text): rep = re.compile(r'\b(\S*?)([a-z])\2{2,}\b') return rep.sub(r'\1\2 ELONG', text) def remove_allcaps(text): caps = re.compile(r'([^a-z0-9()<>\'`\-]){2,}') return caps.sub(r'ALLCAPS', text) def transcription_smile(text): eyes = "[8:=;]" nose = "['`\-]" smiley = re.compile(r'[8:=;][\'\-]?[)dDp]') #smiley = re.compile(r'#{eyes}#{nose}[)d]+|[)d]+#{nose}#{eyes}/i') return smiley.sub(r'SMILE', text) def transcription_sad(text): eyes = "[8:=;]" nose = "['`\-]" smiley = re.compile(r'[8:=;][\'\-]?[(\\/]') return smiley.sub(r'SADFACE', text) def transcription_heart(text): heart = re.compile(r'<3') return heart.sub(r'HEART', text) # + # tags Part of Speech (POS), because teh lemmatizer needs it tag_map = defaultdict(lambda : wn.NOUN) # wn does a grammatical analysis tag_map['J'] = wn.ADJ tag_map['V'] = wn.VERB tag_map['R'] = wn.ADV # create lemmatizer word_Lemmatized = WordNetLemmatizer() def review_to_words(raw_body): # remove html tags body_text = BeautifulSoup(raw_body).get_text() #letters_only = re.sub("[^a-zA-Z]", " ", body_text) # lowercase all text words = body_text.lower() # remove urls text = remove_URL(words) # remove numbers text = remove_number(text) # remove smiles text = transcription_sad(text) text = transcription_smile(text) text = transcription_heart(text) text = remove_elongated_words(text) words = remove_repeat_punct(text) # tokenizes and pass to lemmatizer, which lemmatizes taking tags into account (see before) words = word_tokenize(words) # we don't remove stop words, because doing it on combination with removing the 40 (trial & error estimated parameter) most utilized words (see below) decreases performance #stops = set(stopwords.words("english")) #meaningful_words = [w for w in words if not w in stops] Final_words = [] for word, tag in pos_tag(words): word_Final = word_Lemmatized.lemmatize(word,tag_map[tag[0]]) Final_words.append(word_Final) # returns lemmatized texts as strings return( " ".join(Final_words)) # - clean_train_comments = [review_to_words(x) for x in X] # ## Train Countvectorizer, Optimize Input for Model Training # + # We tried both tfidf and countvectorizer bow. The best performing turned out to be the countvectorizer vectorizer = CountVectorizer(analyzer = "word", max_features = 2000) train_data_features = vectorizer.fit_transform(clean_train_comments).toarray() print(train_data_features.shape) # Take a look at the words in the vocabulary vocab = vectorizer.get_feature_names() print(vocab) import numpy as np # Sum up the counts of each vocabulary word dist = np.sum(train_data_features, axis=0) # For each, print the vocabulary word and the number of times it # appears in the training set for tag, count in zip(vocab, dist): print(count, tag) # removes the 40 most utilized words for _ in range(40): index = np.argmax(dist) train_data_features = np.delete(train_data_features, index, axis = 1) print(train_data_features.shape) s = np.concatenate((train_data_features,np.array(X_len)),axis = 1) print(s.shape) y = np.array(y) # - # ## Train Data TruncatedSVD visualization # + # Plot the test data along the 2 dimensions of largest variance def plot_LSA(test_data, test_labels, savepath="PCA_demo.csv", plot=True): lsa = TruncatedSVD(n_components=2) lsa.fit(test_data) lsa_scores = lsa.transform(test_data) color_mapper = {label:idx for idx,label in enumerate(set(test_labels))} color_column = [color_mapper[label] for label in test_labels] colors = ['orange','blue'] if plot: plt.scatter(lsa_scores[:,0], lsa_scores[:,1], s=8, alpha=.8, c=test_labels, cmap=matplotlib.colors.ListedColormap(colors)) orange_patch = mpatches.Patch(color='orange', label='M') blue_patch = mpatches.Patch(color='blue', label='F') plt.legend(handles=[orange_patch, blue_patch], prop={'size': 20}) fig = plt.figure(figsize=(8, 8)) plot_LSA(s, y) plt.show() # - # ## Model Training and Prediction # + # XGBoost model with parameters set with a RandomGridSearch # subsample: Subsample ratio of the training instances. Prevents overfitting. # subsample ratio of columns when constructing each tree. my_model = XGBRegressor(objective = "reg:logistic",n_estimators=3550, learning_rate=0.01, n_jobs=4,subsample = 0.9, min_child_weight = 1,max_depth=4,gamma=1.5,colsample_bytree=0.6 ) # fits my_model.fit(s, y) # + # Prepare and predict the test dataset X_test = [] X_len_test = [] for author, group in test_data.groupby("author"): X_test.append(group.body.str.cat(sep = " ")) X_len_test.append([len(group.body)]) clean_comments_test = [review_to_words(x) for x in X_test] data_features = vectorizer.transform(clean_comments_test).toarray() for _ in range(40): index = np.argmax(dist) data_features = np.delete(data_features, index, axis = 1) s_test = np.concatenate((data_features,X_len_test),axis = 1) # - # Save the predictions y_predict = my_model.predict(s_test) np.save('y_predict_testXGBnS.csv',y_predict)
Notebooks/successful-models/xgb-5000.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np # #### Scalars # Define scalars here s = np.array(5) print(s) s.shape sum = s+3 print(sum) # #### Vectors V= np.array([1,2,3]) print(V) V.shape V[1] # #### Matrices M= np.array([[1,2,3]]) #[4,5,6], [7,8,9]]) print(M) M.shape # #### Tensors T= np.array([[[1,2,3], [4,5,6], [7,8,9]],[[1,2,3], [4,5,6], [7,8,9]]]) print(T) x=V.reshape(1,3) print(x) print('\n',V.reshape(3,1)) # ### Matrix Operations values = np.array([1,2,3,4,5]) print(values) values = values+5 print(values) values -=5 print(values) # ### Matrix Multiplication a= np.array([[1,2,3,4], [1,2,2,1]]) # print(a) print(a.T) #Transpose b= (a.T) + 2 print(b) mul= np.matmul(a,b) print(mul) print(np.matmul(mul,mul)) # ### Matrix Transpose Use inputs = np.array([[-0.27, 0.45, 0.64, 0.31]]) weights = np.array([[0.02, 0.001, -0.03, 0.036], \ [0.04, -0.003, 0.025, 0.009], \ [0.012, -0.045, 0.28, -0.067]]) bias = np.array([1,2,5,2]) print('Shape of Input - X is ', inputs.shape) print('Shape of Weights (W) is ', weights.shape) # Y= WX +b <br> # X - Input<br> # W - Weights<br> # b - Bias<br> Y = np.matmul(inputs, weights.T) print(Y) Y.shape print(np.matmul(weights, inputs.T).T) a np.add(a,a) np.subtract(a,a) np.divide(a,2) np.divide(a,a) np.divide(a,np.subtract(a,a)) np.multiply(a,4)
01_Welcome_numpy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Lab 02 : Training with epochs -- demo import torch # ### Lets make an artificial training set of 10 images (28x28 pixels) # + train_data = torch.rand(10,28,28) print(train_data.size()) # - # ### Lets define a the random order in which we are going to visit these images: # 先随机排序 shuffled_indices = torch.randperm(10) print(shuffled_indices) # ### Visit the training set in this random order and do minibatch of size 2 # + bs=2 # 遍历上面已经随机排序的shuffled_indices for count in range(0,10,bs): batch_of_indices = shuffled_indices[count:count+bs] print(batch_of_indices) batch_of_images = train_data[ batch_of_indices ] # -
codes/labs_lecture05/lab02_epoch/epoch_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import psycopg2 import pandas as pd import numpy as np import os import seaborn as sns import matplotlib.pyplot as plt sns.set(style="ticks", color_codes=True) import matplotlib import warnings import yellowbrick as yb # %matplotlib inline warnings.simplefilter(action='ignore', category=FutureWarning) # - conn = psycopg2.connect( host = 'project.cgxhdwn5zb5t.us-east-1.rds.amazonaws.com', port = 5432, user = 'postgres', password = '<PASSWORD>', database = 'postgres') cursor = conn.cursor() DEC2FLOAT = psycopg2.extensions.new_type( psycopg2.extensions.DECIMAL.values, 'DEC2FLOAT', lambda value, curs: float(value) if value is not None else None) psycopg2.extensions.register_type(DEC2FLOAT) # + cursor.execute('Select * from "ahs_household_class"') rows = cursor.fetchall() col_names = [] for elt in cursor.description: col_names.append(elt[0]) df = pd.DataFrame(data=rows, columns=col_names ) # - df.head() # ## Balanced Binning # + from yellowbrick.datasets import load_concrete from yellowbrick.target import BalancedBinningReference # Instantiate the visualizer visualizer = BalancedBinningReference(bins=[0,7,8,9,10]) y = df['RATINGHS'] visualizer.fit(y) # Fit the data to the visualizer visualizer.show() # Finalize and render the figure # + LABEL_MAP = { 1: "Un-Satisfied", 2: "Un-Satisfied", 3: "Un-Satisfied", 4: "Un-Satisfied", 5: "Un-Satisfied", 6: "Un-Satisfied", 7: "Satisfied", 8: "Satisfied", 9: "Highly Satisfied", 10: "Extreme Satisfied" } # Convert class labels into text df_conv = df['RATINGHS'].map(LABEL_MAP) # - #df_sub['RATINGHS'] = df_sub['RATINGHS'].astype(str) df_conv # ### Class Imbalanced X = df y = df_conv # + from yellowbrick.target import ClassBalance X = df y = df_conv # Instantiate the visualizer visualizer = ClassBalance( labels=["Un-Satisfied", "Satisfied", "Highly Satisfied","Extreme Satisfied"], size=(1080, 720) ) visualizer.fit(y) visualizer.show() # - from imblearn.over_sampling import SMOTE sm = SMOTE(random_state = 33) X_sm, y_sm = sm.fit_sample(X, y.ravel()) # observe that data has been balanced pd.Series(y_sm).value_counts().plot.bar() X_sm, y_sm = sm.fit_sample(X_sm, y_sm.ravel()) pd.Series(y_sm).value_counts().plot.bar() X_sm, y_sm = sm.fit_sample(X_sm, y_sm.ravel()) pd.Series(y_sm).value_counts().plot.bar()
Archive/Household_Class_Imbalanced.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from glob import glob import pandas as pd import codecs from collections import Counter import re import string import nltk nltk.download('punkt') from nltk.corpus import stopwords stop_words = stopwords.words('english') exclude = set(string.punctuation) exclude.add("‘") exclude.add("“") from nltk import word_tokenize from nltk.corpus import brown from nltk.corpus import wordnet as wn from nltk.stem import SnowballStemmer snowball_stemmer = SnowballStemmer("english") from nltk.stem import PorterStemmer from nltk.stem.wordnet import WordNetLemmatizer wordnet_lemmatizer = WordNetLemmatizer() # There are different ways to clean the text. Perhaps we should consider the method we want to use: naive, tokenizer, lemmatization, or stemming? Below I have used a single case to demonstrate naive, tokenizer, lemmatizer (couldn't figure out stemmer, but will do this upcoming week. #naive pipeline def clean1(x): x=x.replace('\n\n','') # remove the line breaks x=x.lower()# lower text x = ''.join(ch for ch in x if ch not in exclude) #remove punctuation x=re.sub('[0-9]+', '', x) # remove numbers x=x.split() #split words x=[word for word in x if word not in stopwords.words('english')]#remove stopwords #x=" ".join(str(x) for x in x) # you can do this if you want to remove list structure return x #tokenizer def nlp_pipeline1(text): text=text.lower() #tokenize words for each sentence text = nltk.word_tokenize(text) text = ''.join(ch for ch in text if ch not in exclude) #remove punctuation text=re.sub('[0-9]+', '', text) text=text.split("'") #split words # remove punctuation and numbers #text = [token for token in text if token.isalpha()] #for some reason, this step was removing almost all of the words so replaced it with the above two lines # remove stopwords - be careful with this step text = [token for token in text if token not in stop_words] return text #lemmatization def nlp_lem(text): #tokenize words for each sentence text = nltk.word_tokenize(text) # pos tagger text = nltk.pos_tag(text) # lemmatizer text = [wordnet_lemmatizer.lemmatize(token.lower(),"v")if "V" in pos else wordnet_lemmatizer.lemmatize(token.lower()) for token,pos in text] # remove punctuation and numbers text = ''.join(ch for ch in text if ch not in exclude) #remove punctuation text=re.sub('[0-9]+', '', text) text=text.split("'") #split words # remove stopwords - be careful with this step text = [token for token in text if token not in stop_word_list] return text #stemming #stem_list1 = [snowball_stemmer.stem(word) for word in list1] #def nlp_stem(text): #tokenize words for each sentence #text = nltk.word_tokenize(text) # pos tagger #text = nltk.pos_tag(text) # stemmer #text = [snowball_stemmer.stem(word) for word in text] # remove punctuation and numbers #text = ''.join(ch for ch in text if ch not in exclude) #remove punctuation #text=re.sub('[0-9]+', '', text) #text=text.split("'") #split words # remove stopwords - be careful with this step #text = [token for token in text if token not in stop_word_list] #return text #random case, D4.Feb23.2001.MAJ d4feb232001maj = codecs.open("/Users/schap/Desktop/TA Data/AC/2002/1/TXT/D1.Mar26.2002.MAJ.txt", "r", "utf-8").read().strip().split() d4feb232001maj = str(d4feb232001maj) #cleaning using naive pipeline maj = clean1(d4feb232001maj) print (Counter(maj).most_common()) token_d4feb232001maj = codecs.open("/Users/schap/Desktop/TA Data/AC/2002/1/TXT/D1.Mar26.2002.MAJ.txt", "r", "utf-8").read().strip().split() token_d4feb232001maj = str(token_d4feb232001maj) #cleaning using tokenizer pipeline token_maj = nlp_pipeline1(token_d4feb232001maj) print (Counter(token_maj).most_common()) lem_d4feb232001maj = codecs.open("/Users/schap/Desktop/TA Data/AC/2002/1/TXT/D1.Mar26.2002.MAJ.txt", "r", "utf-8").read().strip().split() lem_d4feb232001maj = str(lem_d4feb232001maj) #cleaning using lemmaztizer pipeline lem_maj = nlp_lem(lem_d4feb232001maj) print (Counter(lem_maj).most_common()) # # Word Count Using all 2002 documents # This is just using naive pipeline #using all decisions all2002 = codecs.open("/Users/schap/Desktop/TA Data/All Text Files Combined/ALL/all2002text.txt", "r", "utf-8").read().strip().split() all2002 = str(all2002) a2002 = clean1(all2002) print (Counter(a2002).most_common()) #using only AC all2002ac = codecs.open("/Users/schap/Desktop/TA Data/All Text Files Combined/AC/all2002AC.txt", "r", "utf-8").read().strip().split() all2002ac = str(all2002ac) a2002ac = clean1(all2002ac) print (Counter(a2002ac).most_common()) #using only dissent all2002diss = codecs.open("/Users/schap/Desktop/TA Data/All Text Files Combined/Dissent/all2002dissent.txt", "r", "utf-8").read().strip().split() all2002diss = str(all2002diss) a2002d = clean1(all2002diss) print (Counter(a2002d).most_common()) #using only majority all2002maj = codecs.open("/Users/schap/Desktop/TA Data/All Text Files Combined/Majority/all2002majority.txt", "r", "utf-8").read().strip().split() all2002maj = str(all2002maj) a2002m = clean1(all2002maj) print (Counter(a2002m).most_common())
models/word_count_pipelines.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os import matplotlib.image as mpimg execution_path = os.getcwd() image_path = os.path.join(execution_path, 'images/') model_path = os.path.join(execution_path, 'model/') from imageai.Prediction import ImagePrediction prediction = ImagePrediction() prediction.setModelTypeAsResNet() prediction.setModelPath(os.path.join(model_path, "resnet50_weights_tf_dim_ordering_tf_kernels.h5")) prediction.loadModel() file_path = os.path.join(image_path, "car_1.jpg") predictions, probabilities = prediction.predictImage(os.path.join(execution_path, file_path), result_count=5) img = mpimg.imread(file_path) imgplot = plt.imshow(img) plt.show() for eachPrediction, eachProbability in zip(predictions, probabilities): print(eachPrediction , " : " , eachProbability) # - os.getcwd() execution_path = '' image_path = os.path.join(execution_path, 'images/') image_path onlyfiles = [f for f in os.listdir(image_path)] onlyfiles # + import pandas as pd import numpy as np import streamlit as st import matplotlib.image as mpimg from imageai.Prediction import ImagePrediction import os # Get path # execution_path = '/Users/xmpuspus/Desktop/Lectures/notebooks/image_recog_app' image_path = '/Users/xmpuspus/Desktop/Lectures/notebooks/image_recog_app/images/' model_path = '/Users/xmpuspus/Desktop/Lectures/notebooks/image_recog_app/model/' model = '/Users/xmpuspus/Desktop/Lectures/notebooks/image_recog_app/model/resnet50_weights_tf_dim_ordering_tf_kernels.h5' # Set Title st.title("Image Recognition") # Set Sidebar Options st.sidebar.title('About') st.sidebar.info('Choose an image to test the ResNet50 classifier.') st.sidebar.title("Predict New Images") onlyfiles = [f for f in os.listdir(image_path)] # Actual Prediction on image using Resnet prediction = ImagePrediction() prediction.setModelTypeAsResNet() prediction.setModelPath(model) predictor = prediction.loadModel() # Select image path from options imageselect = st.sidebar.selectbox("Pick an image.", onlyfiles) # Read image metadata img = mpimg.imread(os.path.join(image_path, imageselect)) # Plot on streamlift st.image(img, caption="Let's predict the image!", use_column_width=True) if st.sidebar.button('Predict Image'): # Get predictions predictions, probabilities = predictor.predictImage(os.path.join(image_path, imageselect), result_count=5) # Print out predictions for eachPrediction, eachProbability in zip(predictions, probabilities): st.write(eachPrediction , " : " , eachProbability) # -
test_image_classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd np.random.seed(1) full_labels = pd.read_csv('data/raccoon_labels.csv') full_labels.head() grouped = full_labels.groupby('filename') grouped.apply(lambda x: len(x)).value_counts() # ### split each file into a group in a list gb = full_labels.groupby('filename') grouped_list = [gb.get_group(x) for x in gb.groups] len(grouped_list) train_index = np.random.choice(len(grouped_list), size=160, replace=False) test_index = np.setdiff1d(list(range(200)), train_index) len(train_index), len(test_index) # take first 200 files train = pd.concat([grouped_list[i] for i in train_index]) test = pd.concat([grouped_list[i] for i in test_index]) len(train), len(test) train.to_csv('train_labels.csv', index=None) test.to_csv('test_labels.csv', index=None)
split labels.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # ## Coding Basics for Researchers - Day 1 # # *Notebook by [<NAME>](https://github.com/pedrohserrano)* # # + [markdown] slideshow={"slide_type": "-"} # --- # # 3. Python and Automation # * [3.1. Creating basic functions](#3.1) # * [3.2. Sharing is caring](#3.2) # # + [markdown] colab_type="text" id="vrRXNXdPAcBu" slideshow={"slide_type": "slide"} # --- # ## 3.1. Creating basic functions # <a id="3.1"> # - # # A function is a block of organized, reusable code that can make your scripts more effective, easier to read, and simple to manage. You can think of functions as small self-contained programs that can perform a specific task which you can repeatedly use in your code. # # We have already used some functions, such as the `print()` command, a built-in Python function. # # Steps: # - Begin the definition of a new function with def. # - Followed by the name of the function. # - Must obey the same rules as variable names. # - Then parameters in parentheses. # - Empty parentheses if the function doesn’t take any inputs. # - Then a colon. # - Then, an indented block of code. def print_greeting(): print('Hello! Maastricht') # - Defining a function does not run it. # - Like assigning a value to a variable. # - Must call the function to execute the code it contains. # + # print_greeting? # - print_greeting() # - More useful when we can specify parameters when defining a function. # - These become variables when the function is executed. # - Are assigned the arguments in the call (i.e., the values passed to the function). # - If you don’t name the arguments when using them in the call, the arguments will be matched to parameters in the order the parameters are defined in the function. str(1988) + '/' + str(9) + '/' + str(23) def print_date(year, month, day): joined = str(year) + '/' + str(month) + '/' + str(day) return print(joined) print_date(1871, 3, 19) print_date(month=3, year=1871, day=19) # + [markdown] slideshow={"slide_type": "subslide"} # - Let's create a temperature converter # + slideshow={"slide_type": "fragment"} def celsiusToFahr(tempCelsius): '''This function converts celsius to fahrenheit Example of use: freezingPoint = celsiusToFahr(0) ''' celsius_value = 9/5 * tempCelsius + 32 return celsius_value # + # celsiusToFahr? # + slideshow={"slide_type": "fragment"} freezingPoint = celsiusToFahr(0) print('The freezing point of water in Fahrenheit is:', freezingPoint) print('The boiling point of water in Fahrenheit is:', celsiusToFahr(100)) # + [markdown] colab_type="text" id="rQ7m4bCqAcB9" slideshow={"slide_type": "subslide"} # Having a **docstring** in the function, helps to know what the function is about through the python command line: # + colab={} colab_type="code" id="X0wG5V-oAcB-" outputId="0ccca1a4-91ba-43fd-8747-6b3c637b2d53" slideshow={"slide_type": "subslide"} help(celsiusToFahr) # + [markdown] colab_type="text" id="wMn-M9qCAcCA" slideshow={"slide_type": "fragment"} # If you define a docstring for all of your functions, it makes it easier for other people to use them since they can get help on the arguments and return values of the function. # # Next, note that rather than commenting on what input values lead to errors, we have some testing of these values, followed by a warning if the value is invalid, and some conditional code to handle exceptional cases. # - # --- # ## 3.2. Sharing is caring # <a id="3.2"> # # + [markdown] slideshow={"slide_type": "subslide"} # - Posting your work in Github will automatically be rendered by **NBviewer**(https://nbviewer.jupyter.org/) # # - Uploading your work in **Google Colab** can make it sharable immediately (https://colab.research.google.com/) # # # - Markdown cells can contain embedded links and images # # Add a link using the following pattern: `[link text](URL_or_relative_path)` # gives the clickable link: [Maastricht University](https://www.maastrichtuniversity.nl). # # Add an image using the following pattern: `![image alt text](URL_or_path)` # embeds the following image: ![UM logo](https://logos-download.com/wp-content/uploads/2017/11/Maastricht_University_logo.png) # + [markdown] slideshow={"slide_type": "subslide"} # - Markdown cells can include Latex Expressions # # Mathematical expessions can be rendered inline by wrapping a LaTeX expression (no spaces) with a $ either side. # # For example, `$e^x=\sum_{i=0}^\infty \frac{1}{i!}x^i$` is rendered as the inline $e^x=\sum_{i=0}^\infty \frac{1}{i!}x^i$ expression. # # Wrapping the expression with `$$` either side forces it to be rendered on a new line in the centre of the cell: $$e^x=\sum_{i=0}^\infty \frac{1}{i!}x^i$$ # + [markdown] slideshow={"slide_type": "slide"} # - Checking Reproducibility # # One of the aims of using notebooks is to produce an executable document that can be rerun to reproduce the results. # # To run cells from scratch (i.e. from a fresh kernel), `Kernel -> Restart and Clear Output` and then run the cells you want. # # To run all the cells in the notebook from scratch: `Kernel -> Restart and Run All` # + [markdown] slideshow={"slide_type": "subslide"} # - Licensing # # [Attribution 4.0 International (CC BY 4.0)](https://creativecommons.org/licenses/by/4.0/) # More info: https://reproducible-science-curriculum.github.io/sharing-RR-Jupyter/LICENSE.html # - # --- # ## EXERCISES # + _1. Start with a simple function. # # This function shall be able to add 100 to a given X value. # Fill the blanks and test it. # # ```Python # def adding100(x): # value = x + _____ # return _____ # ``` # def adding_hundred(x): value = x + 100 return value adding_hundred(0) # + _2. Reusing an existing function. # # In section 3.1 of this notebook, we have created a function to convert Celsius to Fahrenheit and we called it `celsiusToFahr()`. # # Let’s now create a second function called `kelvinsToCelsius()` # Fill the blanks and test it. # # ```Python # ___ kelvinsToCelsius(tempKelvins): # return tempKelvins ______ # ``` # def kelvinsToCelsius(tempKelvins): return tempKelvins - 274.15 def kelvinsToCelsius(tempKelvins): kelvins_value = tempKelvins - 274.15 return kelvins_value # + # kelvinsToCelsius? # + [markdown] slideshow={"slide_type": "fragment"} # - Having the new function `kelvinsToCelsius`, we could now use it in the same way as in the example of section 3.1 # # # + absoluteZero = kelvinsToCelsius(0) print('Absolute zero in Celsius is:', absoluteZero) # + [markdown] slideshow={"slide_type": "subslide"} # - What is next? # Let's now try to convert Kelvins to Fahrenheit. We could write out a new formula for it, but perhaps we don't need to. # Instead, we can do the conversion using the two functions existing functions `celsiusToFahr()` and `kelvinsToCelsius()` and create a new function reusing those previous calculations # # ```Python # def kelvinsToFahrenheit(______): # '''This function converts kelvin to fahrenheit''' # ______ # ______ # return ______ # ``` # - value = kelvinsToCelsius(20) celsiusToFahr(kelvinsToCelsius(20)) def kelvinsToFahrenheit(tempKelvins): '''This function converts kelvin to fahrenheit''' value_C = kelvinsToCelsius(tempKelvins) value_F = celsiusToFahr(value_C) return value_F # + [markdown] slideshow={"slide_type": "subslide"} # - Having the new function `kelvinsToFahrenheit()`, we could now use it # + absoluteZeroF = kelvinsToFahrenheit(tempKelvins=0) print('Absolute zero in Fahrenheit is:', absoluteZeroF) # + _3. Add a new cell below and add a License to your notebook. (Check section 3.2) # Upload your finalized notebook to Google Colab for you to share with others
notebooks-solutions/3-Python-and-Automation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %pylab inline import os, sys import numpy as np import pandas as pd from sklearn.linear_model import Ridge from tqdm.notebook import tqdm tqdm.pandas() sys.path.append('..') import assign_loop_type from assign_loop_type import write_loop_assignments # - # ## Code to reproduce DegScore from Kaggle datasets # + def encode_input(df, window_size=1, pad=10, seq=True, struct=True, ensemble_size=0): '''Creat input/output for regression model for predicting structure probing data. Inputs: dataframe (in EternaBench RDAT format) window_size: size of window (in one direction). so window_size=1 is a total window size of 3 pad: number of nucleotides at start to not include seq (bool): include sequence encoding struct (bool): include bpRNA structure encoding Outputs: Input array (n_samples x n_features): array of windowed input features feature_names (list): feature names ''' #MAX_LEN = 68 BASES = ['A','U','G','C'] STRUCTS = ['H','E','I','M','B','S'] inpts = [] labels = [] feature_kernel=[] if seq: feature_kernel.extend(BASES) if struct: feature_kernel.extend(STRUCTS) feature_names = ['%s_%d' % (k, val) for val in range(-1*window_size, window_size+1) for k in feature_kernel] for i, row in tqdm(df.iterrows(), desc='Encoding inputs', total=len(df)): MAX_LEN = len(row['sequence'])-39 #68 for RYOS-I arr = np.zeros([MAX_LEN,len(feature_kernel)]) if ensemble_size > 0: # stochastically sample ensemble ensemble = get_ensemble(row['sequence'], n=ensemble_size) else: # use MEA structure ensemble = np.array([list(row['predicted_loop_type'])]) for index in range(pad,MAX_LEN): ctr=0 #encode sequence if seq: for char in BASES: if row['sequence'][index]==char: arr[index,ctr]+=1 ctr+=1 if struct: loop_assignments = ''.join(ensemble[:,index]) for char in STRUCTS: prob = loop_assignments.count(char) / len(loop_assignments) arr[index,ctr]+=prob ctr+=1 # add zero padding to the side padded_arr = np.vstack([np.zeros([window_size,len(feature_kernel)]),arr[pad:], np.zeros([window_size,len(feature_kernel)])]) for index in range(pad,MAX_LEN): new_index = index+window_size-pad tmp = padded_arr[new_index-window_size:new_index+window_size+1] inpts.append(tmp.flatten()) labels.append('%s_%d' % (row['id'], index)) return np.array(inpts), feature_names, labels def encode_output(df, data_type='reactivity', pad=10): '''Creat input/output for regression model for predicting structure probing data. Inputs: dataframe (in EternaBench RDAT format) data_type: column name for degradation window_size: size of window (in one direction). so window_size=1 is a total window size of 3 pad: number of nucleotides at start to not include Outputs: output array (n_samples): array of reactivity values ''' #MAX_LEN = 68 outpts = [] labels = [] # output identity should be in form id_00073f8be_0 for i, row in df.iterrows(): MAX_LEN = len(row['sequence'])-39 for index in range(pad,MAX_LEN): outpts.append(row[data_type][index]) labels.append('%s_%d' % (row['id'], index)) return outpts, labels # - # #### Load data # + kaggle_train = pd.read_json('train.json',lines=True) kaggle_train = kaggle_train.loc[kaggle_train['SN_filter']==1] kaggle_test = pd.read_json('test.json',lines=True) # - # #### Encode data # ###### Max. expected accuracy inputs_train, feature_names, _ = encode_input(kaggle_train, window_size=12) inputs_test, _, test_labels = encode_input(kaggle_test, window_size=12) # #### Visualize encoding for an example nucleotide figure(figsize=(10,4)) subplot(1,2,1) title('MFE encoding') imshow(np.array(inputs_train[33].reshape(25,10)).T,cmap='gist_heat_r') yticks(range(10), ['A','U','G','C','H','E','I','M','B','S']) xlabel('window position') # #### To set up kaggle submission format: sample_submission = pd.read_csv('sample_submission.csv.zip') mask = sample_submission['id_seqpos'].isin(test_labels) # #### Train models # #### Model based on single MFE structure (primary DegScore model used) for output_type in ['deg_Mg_pH10']: #['reactivity', 'deg_Mg_pH10', 'deg_pH10', 'deg_Mg_50C','deg_50C']: mea_outputs_train, mea_outputs_labels = encode_output(kaggle_train, data_type=output_type) reg = Ridge(alpha=0.15, fit_intercept=False) print('Fitting %s ...' % output_type) #reg.fit(mea_inputs_train_construct, mea_outputs_train) reg.fit(mea_inputs_train, mea_outputs_train) mea_models[output_type] = reg test_prediction = reg.predict(mea_inputs_test) sample_submission.loc[mask, output_type] = test_prediction
ReproduceDegScore/reproduce_degscore.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Load CSV files with `CSV Datasets` # # This tutorial shows how to load data from CSV files based on the `CSVDataset` and `CSVIterableDataset` modules. And execute post-processing logic on the data. # # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Project-MONAI/tutorials/blob/main/modules/csv_datasets.ipynb) # ## Setup environment # !python -c "import monai" || pip install -q "monai-weekly[pandas, pillow]" # %matplotlib inline # ## Setup imports # + tags=[] # Copyright 2020 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import shutil import sys import matplotlib.pyplot as plt import pandas as pd import PIL import numpy as np from monai.data import CSVDataset, CSVIterableDataset, DataLoader from monai.apps import download_and_extract from monai.config import print_config from monai.transforms import Compose, LoadImaged, ToNumpyd print_config() # - # ## Setup data directory # # You can specify a directory with the `MONAI_DATA_DIRECTORY` environment variable. # This allows you to save results and reuse downloads. # If not specified a temporary directory will be used. # + tags=[] directory = os.environ.get("MONAI_DATA_DIRECTORY") root_dir = tempfile.mkdtemp() if directory is None else directory print(root_dir) # - # ## Download dataset # # Here we use several images of MedNIST dataset in the demo. Downloads and extracts the dataset. # # The MedNIST dataset was gathered from several sets from [TCIA](https://wiki.cancerimagingarchive.net/display/Public/Data+Usage+Policies+and+Restrictions), # [the RSNA Bone Age Challenge](http://rsnachallenges.cloudapp.net/competitions/4), # and [the NIH Chest X-ray dataset](https://cloud.google.com/healthcare/docs/resources/public-datasets/nih-chest). # # The dataset is kindly made available by [Dr. <NAME>., Ph.D.](https://www.mayo.edu/research/labs/radiology-informatics/overview) (Department of Radiology, Mayo Clinic) # under the Creative Commons [CC BY-SA 4.0 license](https://creativecommons.org/licenses/by-sa/4.0/). # + tags=[] resource = "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/MedNIST.tar.gz" md5 = "0bc7306e7427e00ad1c5526a6677552d" compressed_file = os.path.join(root_dir, "MedNIST.tar.gz") data_dir = os.path.join(root_dir, "MedNIST") if not os.path.exists(data_dir): download_and_extract(resource, compressed_file, root_dir, md5) # - # ## Plot several medical images in the hand category plt.subplots(1, 5, figsize=(10, 10)) for i in range(5): filename = f"00000{i}.jpeg" im = PIL.Image.open(os.path.join(data_dir, "Hand", filename)) arr = np.array(im) plt.subplot(3, 3, i + 1) plt.xlabel(filename) plt.imshow(arr, cmap="gray", vmin=0, vmax=255) plt.tight_layout() plt.show() # ## Generate 3 CSV files for test # Here we generate 3 CSV files to store properties of the images, contains missing values. # + test_data1 = [ ["subject_id", "label", "image", "ehr_0", "ehr_1", "ehr_2"], ["s000000", 5, os.path.join(data_dir, "Hand", "000000.jpeg"), 2.007843256, 2.29019618, 2.054902077], ["s000001", 0, os.path.join(data_dir, "Hand", "000001.jpeg"), 6.839215755, 6.474509716, 5.862744808], ["s000002", 4, os.path.join(data_dir, "Hand", "000002.jpeg"), 3.772548914, 4.211764812, 4.635294437], ["s000003", 1, os.path.join(data_dir, "Hand", "000003.jpeg"), 3.333333254, 3.235294342, 3.400000095], ["s000004", 9, os.path.join(data_dir, "Hand", "000004.jpeg"), 6.427451134, 6.254901886, 5.976470947], ] test_data2 = [ ["subject_id", "ehr_3", "ehr_4", "ehr_5", "ehr_6", "ehr_7", "ehr_8"], ["s000000", 3.019608021, 3.807843208, 3.584313869, 3.141176462, 3.1960783, 4.211764812], ["s000001", 5.192157269, 5.274509907, 5.250980377, 4.647058964, 4.886274338, 4.392156601], ["s000002", 5.298039436, 9.545097351, 12.57254887, 6.799999714, 2.1960783, 1.882352948], ["s000003", 3.164705753, 3.086274624, 3.725490093, 3.698039293, 3.698039055, 3.701960802], ["s000004", 6.26274538, 7.717647076, 9.584313393, 6.082352638, 2.662744999, 2.34117651], ] test_data3 = [ ["subject_id", "ehr_9", "ehr_10", "meta_0", "meta_1", "meta_2"], ["s000000", 6.301961422, 6.470588684, "TRUE", "TRUE", "TRUE"], ["s000001", 5.219608307, 7.827450752, "FALSE", "TRUE", "FALSE"], ["s000002", 1.882352948, 2.031372547, "TRUE", "FALSE", "TRUE"], ["s000003", 3.309803963, 3.729412079, "FALSE", "FALSE", "TRUE"], ["s000004", 2.062745094, 2.34117651, "FALSE", "TRUE", "TRUE"], # generate missing values in the row ["s000005", 3.353655643, 1.675674543, "TRUE", "TRUE", "FALSE"], ] def prepare_csv_file(data, filepath): with open(filepath, "w") as f: for d in data: f.write((",".join([str(i) for i in d])) + "\n") filepath1 = os.path.join(data_dir, "test_data1.csv") filepath2 = os.path.join(data_dir, "test_data2.csv") filepath3 = os.path.join(data_dir, "test_data3.csv") prepare_csv_file(test_data1, filepath1) prepare_csv_file(test_data2, filepath2) prepare_csv_file(test_data3, filepath3) # - # ## Load a single CSV file with `CSVDataset` dataset = CSVDataset(src=filepath1) # construct pandas table to show the data, `CSVDataset` inherits from PyTorch Dataset print(pd.DataFrame(dataset.data)) # ## Load multiple CSV files and join the tables dataset = CSVDataset(src=[filepath1, filepath2, filepath3], on="subject_id") # construct pandas table to show the joined data of 3 tables print(pd.DataFrame(dataset.data)) # ## Only load selected rows and selected columns from 3 CSV files # Here we load rows: 0 - 1 and 3, columns: "subject_id", "label", "ehr_1", "ehr_7", "meta_1". dataset = CSVDataset( src=[filepath1, filepath2, filepath3], row_indices=[[0, 2], 3], # load row: 0, 1, 3 col_names=["subject_id", "label", "ehr_1", "ehr_7", "meta_1"], ) # construct pandas table to show the joined and selected data print(pd.DataFrame(dataset.data)) # ## Load and group columns to generate new columns # Here we load 3 CSV files and group all the `ehr_*` columns to generate a new `ehr` column, and group all the `meta_*` columns to generate a new `meta` column. dataset = CSVDataset( src=[filepath1, filepath2, filepath3], col_names=["subject_id", "image", *[f"ehr_{i}" for i in range(11)], "meta_0", "meta_1", "meta_2"], col_groups={"ehr": [f"ehr_{i}" for i in range(11)], "meta": ["meta_0", "meta_1", "meta_2"]}, ) # construct pandas table to show the joined, selected and generated data print(pd.DataFrame(dataset.data)) # ## Load and fill the missing values and convert data types # In this tutorial, the `s000005` image has many missing values in CSV file1 and file2. Here we select some columns and set the default value to the missing value of `image` column, and also try to convert `ehr_1` to `int` type. dataset = CSVDataset( src=[filepath1, filepath2, filepath3], col_names=["subject_id", "label", "ehr_0", "ehr_1", "ehr_9", "meta_1"], col_types={"label": {"default": "No label"}, "ehr_1": {"type": int, "default": 0}}, how="outer", # will load the NaN values in this merge mode ) # construct pandas table to show the joined, selected and converted data print(pd.DataFrame(dataset.data)) # ## Execute transforms on the loaded data # Here we load the JPG image from the `image` value, and convert `ehr` group to numpy array. # + dataset = CSVDataset( src=[filepath1, filepath2, filepath3], col_groups={"ehr": [f"ehr_{i}" for i in range(5)]}, transform=Compose([LoadImaged(keys="image"), ToNumpyd(keys="ehr")]), ) # test the transformed `ehr` data: for item in dataset: print(type(item["ehr"]), item["ehr"]) # plot the transformed image array plt.subplots(1, 5, figsize=(10, 10)) for i in range(5): plt.subplot(3, 3, i + 1) plt.xlabel(dataset[i]["subject_id"]) plt.imshow(dataset[i]["image"], cmap="gray", vmin=0, vmax=255) plt.tight_layout() plt.show() # - # ## Load CSV files with `CSVIterableDataset` # `CSVIterableDataset` is designed to load data chunks from stream or very big CSV files, it doesn't need to load all the content at the beginning. And it can support most of above features of `CSVDataset` except for selecting rows. # # Here we load CSV files with `CSVIterableDataset` in multi-processing method of DataLoader. # + tags=[] dataset = CSVIterableDataset(src=[filepath1, filepath2, filepath3], shuffle=False) # set num workers = 0 for mac / win num_workers = 2 if sys.platform == "linux" else 0 dataloader = DataLoader(dataset=dataset, num_workers=num_workers, batch_size=2) for item in dataloader: print(item) # - # ## Shuffle content with `CSVIterableDataset` # # To effectively shuffle the data in the big dataset, `CSVIterableDataset` supports to set a big buffer to continuously store the loaded chunks, then always randomly pick data from the buffer for following tasks. # + dataset = CSVIterableDataset( chunksize=2, buffer_size=4, src=[filepath1, filepath2, filepath3], col_names=["subject_id", "label", "ehr_1", "ehr_7", "meta_1"], transform=ToNumpyd(keys="ehr_1"), shuffle=True, seed=123, ) # set num workers = 0 for mac / win num_workers = 2 if sys.platform == "linux" else 0 dataloader = DataLoader(dataset=dataset, num_workers=num_workers, batch_size=2) for item in dataloader: print(item) # - # ## Cleanup data directory # # Remove directory if a temporary was used. if directory is None: shutil.rmtree(root_dir)
modules/csv_datasets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + cell_id="00000-aaffa051-d399-4810-af1b-81151aff9c14" deepnote_to_be_reexecuted=false source_hash="3b5c1b7" execution_start=1637018505739 execution_millis=256 deepnote_cell_type="code" from jkg_evaluators import dragonfind_10_to_500 # + cell_id="00001-0392ccd4-dfac-4084-8ae0-b80d0882b3ba" deepnote_to_be_reexecuted=false source_hash="167aee17" execution_start=1637018506604 execution_millis=2 deepnote_cell_type="code" cow_alive_list_test_1 = [False, False, True, True, True] # + cell_id="00002-ef67ffe2-b42b-4402-b29c-f2c9393285cd" deepnote_to_be_reexecuted=false source_hash="cd814047" execution_start=1637018887868 execution_millis=2 deepnote_cell_type="code" def dani_solution(cow_alive_list): fat_alive_cow_index = 0 thin_alive_cow_index = len(cow_alive_list) - 1 middle_cow_index = (fat_alive_cow_index + thin_alive_cow_index) // 2 while fat_alive_cow_index < thin_alive_cow_index: if cow_alive_list[fat_alive_cow_index]: thin_alive_cow_index == middle_cow_index if thin_alive_cow_index == fat_alive_cow_index: break return middle_cow_index else: continue else: fat_alive_cow_index == middle_cow_index if thin_alive_cow_index == fat_alive_cow_index: break return middle_cow_index else: continue # + cell_id="00003-b520c290-19be-4154-a440-2353ed1dc86a" deepnote_to_be_reexecuted=false source_hash="20b908dc" execution_start=1637018866651 execution_millis=5490 deepnote_cell_type="code" dragonfind_10_to_500.evaluate(my_solution2) # + [markdown] tags=[] created_in_deepnote_cell=true deepnote_cell_type="markdown" # <a style='text-decoration:none;line-height:16px;display:flex;color:#5B5B62;padding:10px;justify-content:end;' href='https://deepnote.com?utm_source=created-in-deepnote-cell&projectId=978e47b7-a961-4dca-a945-499e8b781a34' target="_blank"> # <img alt='Created in deepnote.com' style='display:inline;max-height:16px;margin:0px;margin-right:7.5px;' src='data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iODBweCIgaGVpZ2h0PSI4MHB4IiB2aWV3Qm94P<KEY> > </img> # Created in <span style='font-weight:600;margin-left:4px;'>Deepnote</span></a>
members/dani/6_hazi/dragon_dani.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Generating Simpson's Paradox # # We have been maually setting, but now we should also be able to generate it more programatically. his notebook will describe how we develop some functions that will be included in the `sp_data_util` package. # + # # %load code/env # standard imports we use throughout the project import numpy as np import pandas as pd import seaborn as sns import scipy.stats as stats import matplotlib.pyplot as plt import mlsim from mlsim import sp_plot # - # We have been thinking of SP hrough gaussian mixture data, so we'll first work wih that. To cause SP we need he clusters to have an opposite trend of the per cluster covariance. # setup r_clusters = -.6 # correlation coefficient of clusters cluster_spread = .8 # pearson correlation of means p_sp_clusters = .5 # portion of clusters with SP k = 5 # number of clusters cluster_size = [2,3] domain_range = [0, 20, 0, 20] N = 200 # number of points p_clusters = [1.0/k]*k # + # keep all means in the middle 80% mu_trim = .2 # sample means center = [np.mean(domain_range[:2]),np.mean(domain_range[2:])] mu_transform = np.repeat(np.diff(domain_range)[[0,2]]*(mu_trim),2) mu_transform[[1,3]] = mu_transform[[1,3]]*-1 # sign flip every other mu_domain = [d + m_t for d, m_t in zip(domain_range,mu_transform)] corr = [[1, cluster_spread],[cluster_spread,1]] d = np.sqrt(np.diag(np.diff(mu_domain)[[0,2]])) cov = np.dot(d,corr).dot(d) # sample a lot of means, just for vizualization # mu = np.asarray([np.random.uniform(*mu_domain[:2],size=k*5), # uniform in x # np.random.uniform(*mu_domain[2:],size=k*5)]).T # uniform in y mu = np.random.multivariate_normal(center, cov,k*50) sns.regplot(mu[:,0], mu[:,1]) plt.axis(domain_range); # mu # - # However independent sampling isn't really very uniform and we'd like to ensure the clusters are more spread out, so we can use some post processing to thin out close ones. # + mu_thin = [mu[0]] # keep the first one p_dist = [1] # we'll use a gaussian kernel around each to filter and only the closest point matters dist = lambda mu_c,x: stats.norm.pdf(min(np.sum(np.square(mu_c -x),axis=1))) for m in mu: p_keep = 1- dist(mu_thin,m) if p_keep > .99: mu_thin.append(m) p_dist.append(p_keep) mu_thin = np.asarray(mu_thin) sns.regplot(mu_thin[:,0], mu_thin[:,1]) plt.axis(domain_range) # - # Now, we can sample points on top of that, also we'll only use the first k sns.regplot(mu_thin[:k,0], mu_thin[:k,1]) plt.axis(domain_range) # Keeping only a few, we can end up with ones in the center, but if we sort them by the distance to the ones previously selected, we get them spread out a little more # + # sort by distance mu_sort, p_sort = zip(*sorted(zip(mu_thin,p_dist), key = lambda x: x[1], reverse =True)) mu_sort = np.asarray(mu_sort) sns.regplot(mu_sort[:k,0], mu_sort[:k,1]) plt.axis(domain_range) # + # cluster covariance cluster_corr = np.asarray([[1,r_clusters],[r_clusters,1]]) cluster_std = np.diag(np.sqrt(cluster_size)) cluster_cov = np.dot(cluster_std,cluster_corr).dot(cluster_std) # sample from a GMM z = np.random.choice(k,N,p_clusters) x = np.asarray([np.random.multivariate_normal(mu_sort[z_i],cluster_cov) for z_i in z]) # make a dataframe latent_df = pd.DataFrame(data=x, columns = ['x1', 'x2']) # code cluster as color and add it a column to the dataframe latent_df['color'] = z sp_plot(latent_df,'x1','x2','color') # - # We might not want all of the clusters to have the reveral though, so we can also sample the covariances # + # cluster covariance p_sp_clusters =.8 cluster_size = [4,4] cluster_std = np.diag(np.sqrt(cluster_size)) cluster_corr_sp = np.asarray([[1,r_clusters],[r_clusters,1]]) # correlation with sp cluster_cov_sp = np.dot(cluster_std,cluster_corr_sp).dot(cluster_std) #cov with sp cluster_corr = np.asarray([[1,-r_clusters],[-r_clusters,1]]) #correlation without sp cluster_cov = np.dot(cluster_std,cluster_corr).dot(cluster_std) #cov wihtout sp cluster_covs = [cluster_corr_sp, cluster_corr] # sample the[0,1] k times c_sp = np.random.choice(2,k,p=[p_sp_clusters,1-p_sp_clusters]) print(c_sp) # sample from a GMM z = np.random.choice(k,N,p_clusters) print(z) cov_noise = lambda : np.random.permutation([.5*np.random.random(),np.random.random()]) # cluster_covs_all = [cluster_covs[c_i]*np.random.random()/5*(c_i+1) for c_i in c_sp] cluster_covs_all = [cluster_covs[c_i]*np.random.random()*2*(i+1) for i,c_i in enumerate(c_sp)] mu_p = [np.random.multivariate_normal(mu,cov) for mu,cov in zip(mu_sort,cluster_covs_all)] x = np.asarray([np.random.multivariate_normal(mu_sort[z_i],cluster_covs_all[z_i]) for z_i in z]) x2 = np.asarray([np.random.multivariate_normal(mu_p[z_i],cluster_covs_all[z_i]) for z_i in z]) # x = np.asarray([np.random.multivariate_normal(mu_sort[z_i],[[1,.5],[.5,.1]]) for z_i in z]) x = np.concatenate((x,x2),axis=0) # make a dataframe latent_df = pd.DataFrame(data=x, columns = ['x1', 'x2']) # code cluster as color and add it a column to the dataframe latent_df['color'] = list(z)*2 sp_plot(latent_df,'x1','x2','color') # - b.shape x.shape np.random.permutation cluster_covs[0]*.1 [p_sp_clusters,1-p_sp_clusters] c_sp # We'll call this construction of SP `geometric_2d_gmm_sp` and it's included in the `sp_data_utils` module now, so it can be called as follows. We'll change the portion of clusters with SP to 1, to ensure that all are SP. p_sp_clusters = .9 sp_df2 = mlsim.geometric_2d_gmm_sp(r_clusters,cluster_size,cluster_spread, p_sp_clusters, domain_range,k,N,p_clusters) sp_plot(sp_df2,'x1','x2','color') # With this, we can start to see how the parameters control a little # + # setup r_clusters = -.9 # correlation coefficient of clusters cluster_spread = .1 # pearson correlation of means p_sp_clusters = 1 # portion of clusters with SP k = 5 # number of clusters cluster_size = [1,1] domain_range = [0, 20, 0, 20] N = 200 # number of points p_clusters = [.5, .2, .1, .1, .1] sp_df3 = mlsim.geometric_2d_gmm_sp(r_clusters,cluster_size,cluster_spread, p_sp_clusters, domain_range,k,N,p_clusters) sp_plot(sp_df3,'x1','x2','color') # - # We might want to add multiple views, so we added a function that takes the same parameters or lists to allow each view to have different parameters. We'll look first at just two views with the same parameters, both as one another and as above # + many_sp_df = mlsim.geometric_indep_views_gmm_sp(2,r_clusters,cluster_size,cluster_spread,p_sp_clusters, domain_range,k,N,p_clusters) sp_plot(many_sp_df,'x1','x2','A') sp_plot(many_sp_df,'x3','x4','B') many_sp_df.head() # - # We can also look at the pairs of variables that we did not design SP into and see that they have vey different structure # + # f, ax_grid = plt.subplots(2,2) # , fig_size=(10,10) sp_plot(many_sp_df,'x1','x4','A') sp_plot(many_sp_df,'x2','x4','B') sp_plot(many_sp_df,'x2','x3','B') sp_plot(many_sp_df,'x1','x3','B') # - # And we can set up the views to be different from one another by design # + # setup r_clusters = [.8, -.2] # correlation coefficient of clusters cluster_spread = [.8, .2] # pearson correlation of means p_sp_clusters = [.6, 1] # portion of clusters with SP k = [5,3] # number of clusters cluster_size = [4,4] domain_range = [0, 20, 0, 20] N = 200 # number of points p_clusters = [[.5, .2, .1, .1, .1],[1.0/3]*3] many_sp_df_diff = mlsim.geometric_indep_views_gmm_sp(2,r_clusters,cluster_size,cluster_spread,p_sp_clusters, domain_range,k,N,p_clusters) sp_plot(many_sp_df_diff,'x1','x2','A') sp_plot(many_sp_df_diff,'x3','x4','B') many_sp_df.head() # - # And we can run our detection algorithm on this as well. many_sp_df_diff_result = dsp.detect_simpsons_paradox(many_sp_df_diff) many_sp_df_diff_result # We designed in SP to occur between attributes `x1` and `x2` with respect to `A` and 2 & 3 in grouby by B, for portions fo the subgroups. We detect other occurences. It can be interesting to exmine trends between the deisnged and spontaneous occurences of SP, so designed_SP = [('x1','x2','A'),('x3','x4','B')] des = [] for i,r in enumerate(many_sp_df_diff_result[['attr1','attr2','groupbyAttr']].values): if tuple(r) in designed_SP: des.append(i) many_sp_df_diff_result['designed'] = 'no' many_sp_df_diff_result.loc[des,'designed'] = 'yes' many_sp_df_diff_result.head() # + r_clusters = -.9 # correlation coefficient of clusters cluster_spread = .6 # pearson correlation of means p_sp_clusters = .5 # portion of clusters with SP k = 5 # number of clusters cluster_size = [5,5] domain_range = [0, 20, 0, 20] N = 200 # number of points p_clusters = [1.0/k]*k many_sp_df_diff = mlsim.geometric_indep_views_gmm_sp(3,r_clusters,cluster_size,cluster_spread,p_sp_clusters, domain_range,k,N,p_clusters) sp_plot(many_sp_df_diff,'x1','x2','A') sp_plot(many_sp_df_diff,'x3','x4','B') sp_plot(many_sp_df_diff,'x3','x4','A') many_sp_df_diff.head() # -
sandbox/generate_regression_sp.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 (tensorflow-2.0) # language: python # name: tensorflow-2.0 # --- # + # %matplotlib inline import matplotlib.pyplot as plt import numpy as np import math import pylab import matplotlib.patches as patches np.random.seed(410) def sigmoid(x): return 1 / (1 + np.exp(-x)) x = (np.random.rand(15) * 20) - 10 y = sigmoid(x) pylab.ylim([-1,1.5]) pylab.xlim([-20,20]) plt.scatter(x, y, s=100, c="b", alpha=0.5, marker='.') plt.xlabel("x") plt.ylabel("y") # Create a Rectangle patch c = '#aa880086' r1 = patches.Rectangle((-20,-0.5),10,0.2,linewidth=1,edgecolor=c,facecolor=c) r2 = patches.Rectangle((-10,-0.5),20,0.2,linewidth=1,edgecolor=c,facecolor=c) r3 = patches.Rectangle((10,-0.5),10,0.2,linewidth=1,edgecolor=c,facecolor=c) plt.gca().add_patch(r1) #plt.gca().add_patch(r2) plt.gca().add_patch(r3) plt.text(-19, -0.45, "Extrapolate", fontsize=12) #plt.text(-4, -0.45, "Interpolate", fontsize=12) plt.text(11, -0.45, "Extrapolate", fontsize=12) plt.savefig('lin-ext.png', dpi=300) plt.show() # - # # Neural Network Extrapolation # + # %matplotlib inline import matplotlib.pyplot as plt import numpy as np import math import pylab import matplotlib.patches as patches np.random.seed(410) def sigmoid(x): return 1 / (1 + np.exp(-x)) x = (np.random.rand(15) * 20) - 10 y = sigmoid(x) # - y # + from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Activation from sklearn import metrics model = Sequential() model.add(Dense(20, input_dim=1, activation='relu')) model.add(Dense(10, activation='relu')) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='adam') model.fit(x,y,verbose=0,epochs=1000) # Predict and measure RMSE pred = model.predict(x) score = np.sqrt(metrics.mean_squared_error(pred,y)) print("Score (RMSE): {}".format(score)) # - x2 = np.arange(-50.0, 50.0, 2.0) y_hat2 = model.predict(x2) y2 = sigmoid(x2) import pandas as pd df = pd.DataFrame() df['x'] = x2 df['y'] = y2 df['yHat'] = y_hat2 df plt.plot(x2, df['y'].tolist(), label='expected') plt.plot(x2, df['yHat'].tolist(), label='prediction') plt.ylabel('output') plt.legend() plt.savefig('nn-ext.png', dpi=300) plt.show() # # Bimodal Distribution # + import matplotlib.pyplot as plt import numpy as np import matplotlib.pyplot as plt import matplotlib.mlab as mlab import math s1 = np.random.normal(20, 6.4, 10000) s2 = np.random.normal(37, 6.3, 10000) s = np.concatenate((s1, s2), axis=0) count, bins, ignored = plt.hist(s, 80, density=True) plt.savefig('bimodal.png', dpi=300) plt.show() # - # # Mahalanobis Distance from scipy.spatial import distance iv = [ [1 , 0.5, 0.5], [0.5, 1, 0.5], [0.5, 0.5, 1]] distance.mahalanobis([1, 0, 0], [0, 1, 0], iv) distance.mahalanobis([0, 2, 0], [0, 1, 0], iv) distance.mahalanobis([2, 0, 0], [0, 1, 0], iv) # # Diabetes # + import pandas as pd df = pd.read_csv('pima-indians-diabetes.csv', na_values=['NA', '?']) df.rename(columns={'diabetes': 'ds'},inplace=True) df[0:7] # - # Remove missing values df = df[df.plasma != 0] df = df[df.diastolic != 0] df = df[df.triceps != 0] df = df[df.insulin != 0] df = df[df.bmi != 0] # + # %matplotlib inline # library & dataset import seaborn as sns import matplotlib.pyplot as plt # Basic correlogram cols = list(df.columns) cols.remove('class') sns.pairplot(df, hue="class", vars=cols) # + import base64 import os import matplotlib.pyplot as plt import numpy as np import pandas as pd import requests from sklearn import preprocessing # Encode text values to dummy variables(i.e. [1,0,0],[0,1,0],[0,0,1] for red,green,blue) def encode_text_dummy(df, name): dummies = pd.get_dummies(df[name]) for x in dummies.columns: dummy_name = f"{name}-{x}" df[dummy_name] = dummies[x] df.drop(name, axis=1, inplace=True) # Encode text values to a single dummy variable. The new columns (which do not replace the old) will have a 1 # at every location where the original column (name) matches each of the target_values. One column is added for # each target value. def encode_text_single_dummy(df, name, target_values): for tv in target_values: l = list(df[name].astype(str)) l = [1 if str(x) == str(tv) else 0 for x in l] name2 = f"{name}-{tv}" df[name2] = l # Encode text values to indexes(i.e. [1],[2],[3] for red,green,blue). def encode_text_index(df, name): le = preprocessing.LabelEncoder() df[name] = le.fit_transform(df[name]) return le.classes_ # Encode a numeric column as zscores def encode_numeric_zscore(df, name, mean=None, sd=None): if mean is None: mean = df[name].mean() if sd is None: sd = df[name].std() df[name] = (df[name] - mean) / sd # Convert all missing values in the specified column to the median def missing_median(df, name): med = df[name].median() df[name] = df[name].fillna(med) # Convert all missing values in the specified column to the default def missing_default(df, name, default_value): df[name] = df[name].fillna(default_value) # Convert a Pandas dataframe to the x,y inputs that TensorFlow needs def to_xy(df, target): result = [] for x in df.columns: if x != target: result.append(x) # find out the type of the target column. Is it really this hard? :( target_type = df[target].dtypes target_type = target_type[0] if hasattr( target_type, '__iter__') else target_type # Encode to int for classification, float otherwise. TensorFlow likes 32 bits. if target_type in (np.int64, np.int32): # Classification dummies = pd.get_dummies(df[target]) return df[result].values.astype(np.float32), dummies.values.astype(np.float32) # Regression return df[result].values.astype(np.float32), df[[target]].values.astype(np.float32) # Nicely formatted time string def hms_string(sec_elapsed): h = int(sec_elapsed / (60 * 60)) m = int((sec_elapsed % (60 * 60)) / 60) s = sec_elapsed % 60 return f"{h}:{m:>02}:{s:>05.2f}" # Regression chart. def chart_regression(pred, y, sort=True): t = pd.DataFrame({'pred': pred, 'y': y.flatten()}) if sort: t.sort_values(by=['y'], inplace=True) plt.plot(t['y'].tolist(), label='expected') plt.plot(t['pred'].tolist(), label='prediction') plt.ylabel('output') plt.legend() plt.show() # Remove all rows where the specified column is +/- sd standard deviations def remove_outliers(df, name, sd): drop_rows = df.index[(np.abs(df[name] - df[name].mean()) >= (sd * df[name].std()))] df.drop(drop_rows, axis=0, inplace=True) # Encode a column to a range between normalized_low and normalized_high. def encode_numeric_range(df, name, normalized_low=-1, normalized_high=1, data_low=None, data_high=None): if data_low is None: data_low = min(df[name]) data_high = max(df[name]) df[name] = ((df[name] - data_low) / (data_high - data_low)) \ * (normalized_high - normalized_low) + normalized_low # This function submits an assignment. You can submit an assignment as much as you like, only the final # submission counts. The paramaters are as follows: # data - Pandas dataframe output. # key - Your student key that was emailed to you. # no - The assignment class number, should be 1 through 1. # source_file - The full path to your Python or IPYNB file. This must have "_class1" as part of its name. # . The number must match your assignment number. For example "_class2" for class assignment #2. def submit(data,key,no,source_file=None): if source_file is None and '__file__' not in globals(): raise Exception('Must specify a filename when a Jupyter notebook.') if source_file is None: source_file = __file__ suffix = '_class{}'.format(no) if suffix not in source_file: raise Exception('{} must be part of the filename.'.format(suffix)) with open(source_file, "rb") as image_file: encoded_python = base64.b64encode(image_file.read()).decode('ascii') ext = os.path.splitext(source_file)[-1].lower() if ext not in ['.ipynb','.py']: raise Exception("Source file is {} must be .py or .ipynb".format(ext)) r = requests.post("https://api.heatonresearch.com/assignment-submit", headers={'x-api-key':key}, json={'csv':base64.b64encode(data.to_csv(index=False).encode('ascii')).decode("ascii"), 'assignment': no, 'ext':ext, 'py':encoded_python}) if r.status_code == 200: print("Success: {}".format(r.text)) else: print("Failure: {}".format(r.text)) # + import pandas as pd import io import requests import numpy as np import os from sklearn.model_selection import train_test_split from sklearn import metrics from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Activation from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.callbacks import ModelCheckpoint x,y = to_xy(df,"class") # Split into train/test x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.25, random_state=42) model = Sequential() model.add(Dense(50, input_dim=x.shape[1], activation='relu')) model.add(Dense(25,activation='relu')) model.add(Dense(y.shape[1],activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam') monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=50, restore_best_weights=True, verbose=1, mode='auto') model.fit(x_train, y_train,validation_data=(x_test,y_test),callbacks=[monitor],verbose=2,epochs=1000) # + import numpy as np import matplotlib.pyplot as plt from itertools import cycle from sklearn import svm, datasets from sklearn.metrics import roc_curve, auc from sklearn.model_selection import train_test_split from sklearn.preprocessing import label_binarize from sklearn.multiclass import OneVsRestClassifier from scipy import interp y_score = model.predict(x_test) # Compute ROC curve and ROC area for each class fpr = dict() tpr = dict() roc_auc = dict() for i in range(2): fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i]) roc_auc[i] = auc(fpr[i], tpr[i]) # Compute micro-average ROC curve and ROC area fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel()) roc_auc["micro"] = auc(fpr["micro"], tpr["micro"]) # - plt.figure() lw = 2 plt.plot(fpr[1], tpr[1], color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[1]) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Diabetes ROC') plt.legend(loc="lower right") plt.show() # + from sklearn import metrics import scipy as sp import numpy as np import math from sklearn import metrics def perturbation_rank(model, x, y, names, regression): errors = [] for i in range(x.shape[1]): hold = np.array(x[:, i]) np.random.shuffle(x[:, i]) if regression: pred = model.predict(x) error = metrics.mean_squared_error(y, pred) else: pred = model.predict_proba(x) error = metrics.log_loss(y, pred) errors.append(error) x[:, i] = hold max_error = np.max(errors) importance = [e/max_error for e in errors] data = {'name':names,'error':errors,'importance':importance} result = pd.DataFrame(data, columns = ['name','error','importance']) result.sort_values(by=['importance'], ascending=[0], inplace=True) result.reset_index(inplace=True, drop=True) return result # + # Rank the features from IPython.display import display, HTML names = list(df.columns) # x+y column names names.remove("class") # remove the target(y) rank = perturbation_rank(model, x_test, y_test, names, False) display(rank) # - cols = list(df.columns) cols.remove('class') # + import itertools s = [[df[col].max(), df[col].min()] for col in cols] outer = list(itertools.product(*s)) outer = pd.DataFrame(outer, columns = cols) outerMatrix = outer.values # - list(zip(s,df.columns)) s shuffle = outer.reindex(np.random.permutation(outer.index)) shuffle.reset_index(inplace=True) #shuffle.drop('index',inplace=True,axis=0) shuffle[0:10] cv = df[cols].cov() invCV = sp.linalg.inv(cv) invCV # + dfMatrix = df[cols].values def maxDistance(a,dfMatrix,invCV): result = None for itm in dfMatrix: d = distance.mahalanobis(a,itm,invCV) if not result: result = d else: result = max(d,result) return result outer['dist'] = [maxDistance(itm,dfMatrix,invCV) for itm in outerMatrix] # - outer.sort_values(by=['dist'],ascending=False,inplace=True) outer
enough_data/howMuchData.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt raw_data = pd.read_csv('car data.csv') raw_data raw_data.corr() # * Linear regression between present and selling price # + from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split X = raw_data['Present_Price'].values y = raw_data['Selling_Price'].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42) linear_model = LinearRegression() linear_model.fit(X = np.reshape(a = X_train,newshape = (len(X_train), 1)), y = np.reshape(a = y_train,newshape = (len(y_train), 1))) y_predicted_test = linear_model.predict(X = np.reshape(a = X_test,newshape = (len(X_test), 1))) plt.scatter(x = X_test, y = y_test) plt.xlabel(raw_data.columns[3]) plt.ylabel(raw_data.columns[2]) plt.plot(X_test, y_predicted_test, 'r') plt.title("Linear Regression for test data") plt.show() # y_predict = linear_model.predict(X = X) # plt.scatter(x = X, y = y) # plt.xlabel(raw_data.columns[0]) # plt.ylabel(raw_data.columns[1]) # plt.plot(X, y_predict, 'r') # plt.title("Linear Regression for Entire data set") # plt.show() # accuracy score on test data a_1 = linear_model.score(X = np.reshape(a = X_test,newshape = (len(X_test), 1)), y = np.reshape(a = y_test,newshape = (len(y_test), 1))) b = a_1 * 100 print('Equation of best fit line is: y = {} x'.format(linear_model.coef_), ' + {}'.format(linear_model.intercept_)) print() print('Accuracy of Linear regression model on test data is: ', round(b,2)) # - final_dataset = raw_data.drop(labels=raw_data.columns[0], axis=1) final_dataset final_dataset['no_year'] = 2020 - final_dataset['Year'] final_dataset = final_dataset.drop(labels='Year', axis=1) final_dataset for i in range(-1,-6,-1): print('{}'.format(final_dataset.columns[i]),(pd.unique(final_dataset.iloc[:,i]))) for i in range(-1,-6,-1): ((final_dataset[final_dataset.columns[i]].value_counts()).plot(kind='barh')) plt.title(final_dataset.columns[i]) plt.show() final_dataset = pd.get_dummies(data=final_dataset, drop_first=True) final_dataset final_dataset.corr() # + import seaborn as sns sns.pairplot(final_dataset) # + # create a heatmap corrmat = final_dataset.corr() top_corr_features = corrmat.index plt.figure(figsize=(15,15)) g = sns.heatmap(final_dataset[top_corr_features].corr(), annot = True, cmap = 'RdYlGn') # - # features and labels X = final_dataset.iloc[:,1:] y = final_dataset.iloc[:,0:1] # + # feature importance from sklearn.ensemble import ExtraTreesRegressor model = ExtraTreesRegressor() model.fit(X = X, y = y) # + feature_importance = model.feature_importances_ for i in range(len(X.columns)): print('Feature importance of {} is: '.format(X.columns[i]), round(feature_importance[i], 2)) # - plt.figure(figsize=(15,10)) plt.bar(x = X.columns, height= feature_importance, width=0.5) plt.xticks(rotation=90,fontsize = 15) plt.yticks(fontsize =15) plt.xlabel('Features', fontsize = 18) plt.ylabel('Importance', fontsize = 18) plt.title('Importance of features', fontsize=30) plt.show() # + # plot graph of feature importances for better visualization feat_importances = pd.Series(model.feature_importances_, index = X.columns) feat_importances.nlargest(5).plot(kind = 'barh') plt.show() # - X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42) # + from sklearn.ensemble import RandomForestRegressor rf_random = RandomForestRegressor() # - # * Below are the parameters inside the random forest regressor, which are used for hyper parameter tuning; # # * n_estimators='warn', # * criterion='mse', # * max_depth=None, # * min_samples_split=2, # * min_samples_leaf=1, # * min_weight_fraction_leaf=0.0, # * max_features='auto', # * max_leaf_nodes=None, # * min_impurity_decrease=0.0, # * min_impurity_split=None, # * bootstrap=True, # * oob_score=False, # * n_jobs=None, # * random_state=None, # * verbose=0, # * cwarm_start=False, # + ### HYPERPARAMETERS # number of trees in random forest n_estimators = [int(x) for x in np.linspace(start = 100, stop = 1200, num = 12)] # number of features to consider at every split max_features = ['auto', 'sqrt'] # maximum number of levels in tree max_depth = [int(x) for x in np.linspace(start = 5, stop = 30, num = 6)] # minimum number of samples required to split a node min_samples_split = [2,5,10,15,100] # minimum number of samples required at each leaf node min_samples_leaf = [1,2,5,10] # + from sklearn.model_selection import RandomizedSearchCV # this will help us to find the best parameters # + # create the random grid random_grid = { 'n_estimators': n_estimators, 'max_features': max_features, 'max_depth': max_depth, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf } random_grid # - rf = RandomForestRegressor() rf_random = RandomizedSearchCV(estimator = rf, param_distributions = random_grid, scoring = 'neg_mean_squared_error', n_iter = 10, cv = 5, verbose = 2, random_state = 42, n_jobs = 1) rf_random.fit(X = X_train, y = y_train) predict_result = rf_random.predict(X = X_test) plt.scatter(x = list(range(len(predict_result))), y = y_test) plt.scatter(x = list(range(len(predict_result))), y = predict_result) plt.legend(['original', 'predicted']) plt.show() sns.distplot(a = y_test.values - predict_result) plt.scatter(x = y_test, y = predict_result) # + import pickle # open a file, where you ant to store the data file = open('random_forest_regression_model.pkl', 'wb') # dump information to that file pickle.dump(obj = rf_random, file = file) # -
random_forest_and_linear_regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Classes: extending the Python environment # ----------------------------------------- # # * Introduction # * Methods # * Making instances # * Adding methods # * Multiple instances # * Inheritance # * Child class methods # * Overriding parent class methods # * Storing classes in modules # # Introduction # ------------ dogs = ['maru', 'phoebe'] dir(dogs) # + # "method" - a function called on an object instance # - list type(list) dogs dogs dogs.sort() dogs.reverse() dogs dogs[0] dogs # + class Dog(): """Modelling a dog""" def __init__(self, name): print(f"This is self: {self}") self.my_name = name phoebe = Dog(name='Phoebe') print(f"This is phoebe: {phoebe}") # - Dog # Classes are a solution to this problem: dogs # + def speak(dog_name): print(f"Woof, I'm {dog_name}") for dog in dogs: speak(dog) # - phoebe phoebe.name phoebe = Dog(name='Phoebe') phoebe.my_name maru = {'name': 'Maru'} maru['name'] # Methods # ------- # + class Dog(): """Modelling a dog""" def __init__(self, name): self.my_name = name def speak(self): print(f"Woof, I'm {self.my_name}") maru_the_dog = Dog(name='Maru') maru_the_dog.speak() # - maru maru_the_dog speak maru_the_dog.speak Dog.speak # Multiple instances # ------------------ # + class Dog(): """Modelling a dog""" def __init__(self, name): self.my_name = name def __repr__(self): if '1' in self.my_name: return "Not telling you!" else: return self.my_name def speak(self): if self.do_you_shed(): extra = "and yeah, I shed" else: extra = "and I'm non-shedding!" print(f"Woof, I'm {self.my_name} {extra}") def is_better_than(self, other_dog): return self.my_name < other_dog.my_name def do_you_shed(self): return True one_hundred_dogs = [Dog(name=f'Dog #{i}') for i in range(100)] print(one_hundred_dogs) # - one_hundred_dogs[0].is_better_than(other_dog=one_hundred_dogs[-1]) # + first_dog = one_hundred_dogs[0] second_dog = one_hundred_dogs[1] print('Here they are:') print(first_dog) print(second_dog) # - Dog('x') got_it = _ got_it type(got_it) # + class Poodle(Dog): def do_you_shed(self): return False fifi = Poodle(name='Fifi') # - fifi.my_name fifi.is_better_than(first_dog) first_dog.do_you_shed() fifi.do_you_shed() d = Dog('d') d.do_you_shed() # + class MiniaturePoodle(Poodle): pass gigi = MiniaturePoodle('Gigi') # - print(gigi) type(gigi) gigi.is_better_than(fifi) # + # How class inheritance can get crazy: mutt = Dog('Charley') mutt.speak() # - phoebe = Poodle('Phoebe') phoebe.speak() # + class CircusPoodle(MiniaturePoodle): def __init__(self, name, how_many_tricks_i_know): super().__init__(name) self.__tricks_count = how_many_tricks_i_know majesto = CircusPoodle('Mr. Majesto', 4) # - majesto majesto.speak() majesto.tricks_count majesto.__tricks_count
april-2019/6-classes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/alivemachine/frankmocap/blob/master/vibe_demo.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="9QmY4dDYpmfB" # # VIBE: Video Inference for Human Body Pose and Shape Estimation # # Demo of the original PyTorch based implementation provided here: https://github.com/mkocabas/VIBE # # ## Note # Before running this notebook make sure that your runtime type is 'Python 3 with GPU acceleration'. Go to Edit > Notebook settings > Hardware Accelerator > Select "GPU". # # ## More Info # - Paper: https://arxiv.org/abs/1912.05656 # - Repo: https://github.com/mkocabas/VIBE # + id="Tvd4cfPk5a0e" # Clone the repo # !git clone https://github.com/mkocabas/VIBE.git # + id="Sui0UeZR5vCy" # %cd VIBE/ # + id="Rs6UTvVO6Fxf" # Install the other requirements # !pip install torch==1.4.0 numpy==1.17.5 # !pip install git+https://github.com/giacaglia/pytube.git --upgrade # !pip install -r requirements.txt # + id="brZt0q3Y6X5W" # Download pretrained weights and SMPL data # !source scripts/prepare_data.sh # + [markdown] id="J7A7eakXIn9A" # ### Run the demo code. # # Check https://github.com/mkocabas/VIBE/blob/master/doc/demo.md for more details about demo. # # **Note:** Final rendering is slow compared to inference. We use pyrender with GPU accelaration and it takes 2-3 FPS per image. Please let us know if you know any faster alternative. # + id="qVNszfLQ7rC9" # Run the demo # !python demo.py --vid_file sample_video.mp4 --output_folder output/ --sideview # You may use --sideview flag to enable from a different viewpoint, note that this doubles rendering time. # # !python demo.py --vid_file sample_video.mp4 --output_folder output/ --sideview # You may also run VIBE on a YouTube video by providing a link # python demo.py --vid_file https://www.youtube.com/watch?v=c4DAnQ6DtF8 --output_folder output/ --display # + id="j8zxBa_K-FJf" # Play the generated video from IPython.display import HTML from base64 import b64encode def video(path): mp4 = open(path,'rb').read() data_url = "data:video/mp4;base64," + b64encode(mp4).decode() return HTML('<video width=500 controls loop> <source src="%s" type="video/mp4"></video>' % data_url) video('output/sample_video/sample_video_vibe_result.mp4') # + id="FGcw0HzhtPxj" # Inspect the output file content import joblib output = joblib.load('output/sample_video/vibe_output.pkl') print('Track ids:', output.keys(), end='\n\n') print('VIBE output file content:', end='\n\n') for k,v in output[1].items(): if k != 'joints2d': print(k, v.shape)
vibe_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="kIBdrsIpOpUS" # # Setting the environment for Colab # # + id="sPv4y2CsOjdu" executionInfo={"status": "ok", "timestamp": 1605398975678, "user_tz": 360, "elapsed": 21577, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhEI3USGoSv9o6JShJGhR2V47o7KYZh-Ya1FgZ6=s64", "userId": "13066601150058162597"}} outputId="6c663b45-5a4a-4fce-b518-f59433af6620" colab={"base_uri": "https://localhost:8080/"} from google.colab import drive drive.mount('/content/drive') # + id="zfps5MhgOjQP" executionInfo={"status": "ok", "timestamp": 1605399013956, "user_tz": 360, "elapsed": 750, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhEI3USGoSv9o6JShJGhR2V47o7KYZh-Ya1FgZ6=s64", "userId": "13066601150058162597"}} outputId="c20097ac-1e65-49e1-ff9e-9cb7c9763f6a" colab={"base_uri": "https://localhost:8080/"} # %cd "/content/drive/My Drive/Colab Notebooks/w266_final/project_re" # + id="crdK-8aDO9PH" executionInfo={"status": "ok", "timestamp": 1605399052415, "user_tz": 360, "elapsed": 6955, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhEI3USGoSv9o6JShJGhR2V47o7KYZh-Ya1FgZ6=s64", "userId": "13066601150058162597"}} outputId="5b49598a-0cbe-45b1-e3d8-fc10a1704f2b" colab={"base_uri": "https://localhost:8080/"} # !pip install transformers # + id="66B6wWMJOiRz" import torch import pickle import numpy as np from sklearn.metrics import matthews_corrcoef, confusion_matrix from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from torch.nn import CrossEntropyLoss, MSELoss from tqdm import tqdm_notebook, trange import os #from pytorch_pretrained_bert import BertForSequenceClassification, BertForTokenClassification #from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule from transformers import BertTokenizer, BertModel from transformers import BertForMaskedLM, BertForSequenceClassification, BertForTokenClassification from transformers.optimization import AdamW from transformers import AutoTokenizer, AutoModel from model.MedClinical import Biobert_fc from multiprocessing import Pool, cpu_count from util.tools import * from util import convert_examples_to_features # OPTIONAL: if you want to have more information on what's happening, activate the logger as follows import logging logging.basicConfig(level=logging.INFO) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # device = "cpu" # + id="W81QFQnxOiR6" DATA_DIR = "data_divided/" # Bert pre-trained model selected in the list: bert-base-uncased, # bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, # bert-base-multilingual-cased, bert-base-chinese. #BERT_MODEL = 'bert-base-uncased' BERT_MODEL = 'bionlp/bluebert_pubmed_mimic_uncased_L-12_H-768_A-12' # The name of the task to train.I'm going to name this 'yelp'. TASK_NAME = 'Relation Extraction' # The output directory where the fine-tuned model and checkpoints will be written. OUTPUT_DIR = f'outputs/{TASK_NAME}/' # The directory where the evaluation reports will be written to. REPORTS_DIR = f'reports/{TASK_NAME}_evaluation_report/' # This is where BERT will look for pre-trained models to load parameters from. CACHE_DIR = 'cache/' # The maximum total input sequence length after WordPiece tokenization. # Sequences longer than this will be truncated, and sequences shorter than this will be padded. MAX_SEQ_LENGTH = 128 TRAIN_BATCH_SIZE = 24 EVAL_BATCH_SIZE = 8 LEARNING_RATE = 1e-5 NUM_TRAIN_EPOCHS = 10 RANDOM_SEED = 42 GRADIENT_ACCUMULATION_STEPS = 1 WARMUP_PROPORTION = 0.1 CONFIG_NAME = "config.json" WEIGHTS_NAME = "pytorch_model.bin" # + id="B_Uh-b83OiR9" if os.path.exists(REPORTS_DIR) and os.listdir(REPORTS_DIR): REPORTS_DIR += f'/report_{len(os.listdir(REPORTS_DIR))}' os.makedirs(REPORTS_DIR) if not os.path.exists(REPORTS_DIR): os.makedirs(REPORTS_DIR) REPORTS_DIR += f'/report_{len(os.listdir(REPORTS_DIR))}' os.makedirs(REPORTS_DIR) # + id="3WcJzb8UOiSA" executionInfo={"status": "ok", "timestamp": 1605399309711, "user_tz": 360, "elapsed": 1542, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhEI3USGoSv9o6JShJGhR2V47o7KYZh-Ya1FgZ6=s64", "userId": "13066601150058162597"}} outputId="1cdb1c1d-ee4e-40e9-d927-a4456725d59e" colab={"base_uri": "https://localhost:8080/", "height": 120, "referenced_widgets": ["497d5f39115d4a3f9cc88cb1ebdc5850", "989bc03c88864ab3ab080404876c5f47", "3fb60ea2748c45b0bea1cccd5e22e208", "26204cd8ff8d4393ac380cdc9e29120b", "558de407367c4f5ca207d5db23efd63d", "d26708ce9d584962ba0c8b76667b270e", "775c8913522e4cd3bc5d541bb9da6325", "1a0c8440e0c645cf90b138cafdeef144"]} # tokenizer = BertTokenizer.from_pretrained(OUTPUT_DIR + 'vocab.txt', do_lower_case=False) #tokenizer = BertTokenizer.from_pretrained('bert-base-cased', do_lower_case=False) tokenizer = AutoTokenizer.from_pretrained(BERT_MODEL, do_lower_case=False) # + id="ule10vhIOiSC" processor = MultiClassificationProcessor() eval_examples = processor.get_dev_examples(DATA_DIR) eval_examples_len = len(eval_examples) # + id="wrxdzMOXOiSF" executionInfo={"status": "ok", "timestamp": 1605399315470, "user_tz": 360, "elapsed": 409, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhEI3USGoSv9o6JShJGhR2V47o7KYZh-Ya1FgZ6=s64", "userId": "13066601150058162597"}} outputId="112f526f-1213-4f92-ffd4-8a61b468d7df" colab={"base_uri": "https://localhost:8080/"} label_list = processor.get_labels() # [0, 1] for binary classification num_labels = len(label_list) num_labels # + id="8ibMsZoUOiSI" eval_examples_for_processing = [(example, MAX_SEQ_LENGTH, tokenizer) for example in eval_examples] # + id="b1M32yC8OiSK" executionInfo={"status": "ok", "timestamp": 1605399324226, "user_tz": 360, "elapsed": 4546, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhEI3USGoSv9o6JShJGhR2V47o7KYZh-Ya1FgZ6=s64", "userId": "13066601150058162597"}} outputId="82bb371a-5f6a-4e05-e88a-634472c1c1d0" colab={"base_uri": "https://localhost:8080/", "height": 137, "referenced_widgets": ["3d82f01584fc49ac9e8357d669a805a4", "b0a63cad83ff4bbbbcf7d09eb8d78664", "b69f4418e33b4056baa1457c02d2ac61", "fa5f4935cd0c4024b986ea7b58faa9c4", "bd5e44db990148b99f8aaed6061b1af3", "f76fcf7b165e4d20be7856aefa8824af", "07c45ca556a8428c906280500a0ca8cc", "2eda2285359b4298a1f0820108920f0e"]} process_count = cpu_count() - 1 with Pool(process_count) as p: eval_features = list(tqdm_notebook(p.imap(convert_examples_to_features.convert_example_to_feature, eval_examples_for_processing), total=eval_examples_len)) # + id="w9DySReSOiSN" all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) all_label_ids = torch.tensor([int(f.label_id) for f in eval_features], dtype=torch.long) # + id="zZTKwcU2OiSP" eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) # Run prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=EVAL_BATCH_SIZE) # + id="gFusK5MsOiSR" executionInfo={"status": "ok", "timestamp": 1605399355699, "user_tz": 360, "elapsed": 24147, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhEI3USGoSv9o6JShJGhR2V47o7KYZh-Ya1FgZ6=s64", "userId": "13066601150058162597"}} outputId="68374a7e-e487-4d17-a8ad-364c3d203e1b" colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["7a036e917ad14430ae3e03824805aae4", "37119bc8c8584190a3a649454312154a", "d5200c2deebd4894a1acce4dfec8dedb", "1b6fb0160f104a7999bd2bf9cbce8208", "<KEY>", "<KEY>", "d74b9b65e1704231be834128db09e427", "a310333e0d6d48b9ac58e69570f6e2d9", "<KEY>", "<KEY>", "e93b3cc7b9684a32b2759511e941e40f", "0816133f22254a9d918faeed9d1bd391", "<KEY>", "<KEY>", "5a9233f5ee85471ab01c1cccc1abe1be", "<KEY>"]} ## model = BertForSequenceClassification.from_pretrained(CACHE_DIR + BERT_MODEL, cache_dir=CACHE_DIR, num_labels=len(label_list)) # model = BertForSequenceClassification.from_pretrained(BERT_MODEL, cache_dir=CACHE_DIR, num_labels=num_labels) # model = BertForSequenceClassification.from_pretrained(BERT_MODEL, cache_dir=CACHE_DIR, num_labels=num_labels) #model = Biobert_fc() # model = BertModel.from_pretrained((BERT_MODEL)) model = AutoModel.from_pretrained(BERT_MODEL) path = OUTPUT_DIR + 'pytorch_model.bin' model.load_state_dict(torch.load(path)) model.eval() # + id="paGg33AJOiSU" executionInfo={"status": "ok", "timestamp": 1605399374569, "user_tz": 360, "elapsed": 14779, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhEI3USGoSv9o6JShJGhR2V47o7KYZh-Ya1FgZ6=s64", "userId": "13066601150058162597"}} outputId="22d592ef-fd45-4dc9-cb0b-e85ac626f045" colab={"base_uri": "https://localhost:8080/"} model.to(device) # + id="XzZMrQoYOiSW" executionInfo={"status": "ok", "timestamp": 1605399378427, "user_tz": 360, "elapsed": 1080, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhEI3USGoSv9o6JShJGhR2V47o7KYZh-Ya1FgZ6=s64", "userId": "13066601150058162597"}} outputId="e3f505c0-8787-4d56-fcfc-32378fb95df1" colab={"base_uri": "https://localhost:8080/", "height": 137, "referenced_widgets": ["6ee897e260ac404e8b3b997499e8687d", "7d94f713951e4403bbae84cec06e67e7", "5eb51a588625402c987b7e765328a626", "1ec3f80ae70f42339da1ffab4cdb63ed", "<KEY>", "<KEY>", "<KEY>", "1d2d2e50da8f4105be3aa7c72ddfa3e9"]} model.eval() eval_loss = 0 nb_eval_steps = 0 preds = [] for input_ids, input_mask, segment_ids, label_ids in tqdm_notebook(eval_dataloader, desc="Evaluating"): input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) label_ids = label_ids.to(device) with torch.no_grad(): logits = model(input_ids, segment_ids, input_mask) # create eval loss and other metric required by the task loss_fct = CrossEntropyLoss() tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1)) eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if len(preds) == 0: preds.append(logits.detach().cpu().numpy()) else: preds[0] = np.append( preds[0], logits.detach().cpu().numpy(), axis=0) eval_loss = eval_loss / nb_eval_steps preds = preds[0] preds = np.argmax(preds, axis=1) # + id="1hd2uoQvOiSa" executionInfo={"status": "ok", "timestamp": 1605399383956, "user_tz": 360, "elapsed": 466, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhEI3USGoSv9o6JShJGhR2V47o7KYZh-Ya1FgZ6=s64", "userId": "13066601150058162597"}} outputId="30cf7f55-963e-4f76-eecf-85546424e0fc" colab={"base_uri": "https://localhost:8080/"} len(preds), len(all_label_ids) # + id="hHQRp6hSOiSc" def get_eval_report(task_name, labels, preds): mcc = matthews_corrcoef(labels, preds) cm = confusion_matrix(labels, preds) return { "task": task_name, "mcc": mcc, "cm": cm } def compute_metrics(task_name, labels, preds): assert len(preds) == len(labels) return get_eval_report(task_name, labels, preds) # + id="8KRj3lj9OiSf" import json CONFIG_FOLDER = 'config/' id_label_file = 'id_2_label.json' with open(CONFIG_FOLDER + id_label_file) as infile: id2label = json.load(infile) # + id="Cc-og928OiSh" executionInfo={"status": "ok", "timestamp": 1605399392207, "user_tz": 360, "elapsed": 709, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhEI3USGoSv9o6JShJGhR2V47o7KYZh-Ya1FgZ6=s64", "userId": "13066601150058162597"}} outputId="95a91065-3c9f-4687-8008-fe7d43fcf028" colab={"base_uri": "https://localhost:8080/"} preds_labels = [id2label[str(p)] for p in preds] all_labels = [id2label[str(l)] for l in all_label_ids.numpy()] mcc = matthews_corrcoef(all_labels, preds_labels) print('Correlation Coefficient is ', mcc) mismatches = [] all_rels = [] for row in range(len(all_labels)): all_rels.append([all_labels[row], preds_labels[row]]) if preds_labels[row] != all_labels[row]: mismatches.append([all_labels[row], preds_labels[row]]) # + id="YPxFz1HCOiSn" executionInfo={"status": "ok", "timestamp": 1605399395623, "user_tz": 360, "elapsed": 1118, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhEI3USGoSv9o6JShJGhR2V47o7KYZh-Ya1FgZ6=s64", "userId": "13066601150058162597"}} outputId="eb1f41b5-a4d4-4de4-cbf4-e62ca76f05e1" colab={"base_uri": "https://localhost:8080/", "height": 708} # %matplotlib inline from sklearn.metrics import plot_confusion_matrix import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sn df = pd.DataFrame(all_rels, columns = ['labels', 'predicted']) # df.head(10) plt.figure(figsize=(24,14)) plt.title(" all relationships") confusion_matrix = pd.crosstab(df['labels'], df['predicted'], rownames=['Actual'], colnames=['Predicted']) sn.heatmap(confusion_matrix, annot=True) plt.show() # + id="1XoEtQyNOiSp" executionInfo={"status": "ok", "timestamp": 1605399406484, "user_tz": 360, "elapsed": 1023, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhEI3USGoSv9o6JShJGhR2V47o7KYZh-Ya1FgZ6=s64", "userId": "13066601150058162597"}} outputId="793029e8-bcea-4008-bea6-093a20231043" colab={"base_uri": "https://localhost:8080/", "height": 419} df # + id="_L3O3YlgOiSr" executionInfo={"status": "ok", "timestamp": 1605399412423, "user_tz": 360, "elapsed": 413, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhEI3USGoSv9o6JShJGhR2V47o7KYZh-Ya1FgZ6=s64", "userId": "13066601150058162597"}} outputId="7adb9f3a-791a-420d-bfe6-386e9b84d0cd" colab={"base_uri": "https://localhost:8080/"} from sklearn import metrics metrics.f1_score(df["labels"], df["predicted"], average='micro') # + id="orR6v4BZOiSu" executionInfo={"status": "ok", "timestamp": 1605399415905, "user_tz": 360, "elapsed": 540, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhEI3USGoSv9o6JShJGhR2V47o7KYZh-Ya1FgZ6=s64", "userId": "13066601150058162597"}} outputId="794be04c-1627-45b1-b972-f99a812be30a" colab={"base_uri": "https://localhost:8080/", "height": 421} df["matched"] = df["labels"] == df["predicted"] # df["nomatch"] = df["labels"] != df["predicted"] df.groupby(["labels", "matched"]).count() # + id="gkPgmyi0OiSw" executionInfo={"status": "ok", "timestamp": 1605399597651, "user_tz": 360, "elapsed": 606, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhEI3USGoSv9o6JShJGhR2V47o7KYZh-Ya1FgZ6=s64", "userId": "13066601150058162597"}} outputId="121a96ce-fa54-429e-c7f3-889ca6ab0d4b" colab={"base_uri": "https://localhost:8080/"} from sklearn.metrics import classification_report print(classification_report(df["labels"], df["predicted"])) # + id="raOQxsFTRc-x"
project_re/val_experiments/MIMIC_BERT_RE_n2c2_1k_eval.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # unsupervised learning soal nomor 1 # ## Nama: <NAME> # ## Nim: 170411100090 # ## Kelas: DSI UTM # + import pandas as pd from pandas import read_csv import numpy as np import matplotlib.pyplot as plt from sklearn import preprocessing from sklearn.cluster import KMeans from sklearn.metrics import silhouette_samples, silhouette_score # - df = read_csv("../dataset.csv",index_col=0) df.head() df.info() np.sum(df.isnull()) # data tidak ada yg null # # melakukan praproses pada dataset # + minmax_form = preprocessing.MinMaxScaler(feature_range = (0,10)) normalized_data = minmax_form.fit_transform(df) normalized_data # + #dataLatih dengan algoritma K-means #belumDiNormalisasi sum_of_squared_distances = {} range_n_clusters = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] for n_cluster in range_n_clusters: model = KMeans(n_clusters = n_cluster, init = 'random', n_init = 10, random_state = 0) # proses training model.fit(df) sum_of_squared_distances[n_cluster] = model.inertia_ # - sum_of_squared_distances # + #menggunakan chart elbow plt.figure(figsize = (8,8)) plt.title("Elbow Method for Optimal K") plt.xlabel("Number of k Cluster") plt.ylabel("Sum of Within-Cluster Variance") plt.xticks(range_n_clusters) plt.plot(range_n_clusters, sum_of_squared_distances.values(), '-o', color = "Blue") plt.show() # + #dataSetelahDiNormalisasi sum_of_squared_distances = {} range_n_clusters = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] for n_cluster in range_n_clusters: model = KMeans(n_clusters = n_cluster, init = 'random', n_init = 10, random_state = 0) # proses training model.fit(normalized_data) sum_of_squared_distances[n_cluster] = model.inertia_ # - sum_of_squared_distances # + #visualkan chart dengan elbow lagi plt.figure(figsize = (8,6)) plt.title("Elbow Method for Optimal K") plt.xlabel("Number of k Cluster") plt.ylabel("Sum of Within-Cluster Variance") plt.xticks(range_n_clusters) plt.plot(range_n_clusters, sum_of_squared_distances.values(), '-o', color = "Blue") plt.show() # - # # selanjutnya di evaluasi dengan silhouette score # + #data sudah dinormalisasi sil = {} range_n_clusters = [2, 3, 4, 5, 6, 7, 8, 9, 10] for n_cluster in range_n_clusters: clusterer = KMeans(n_clusters = n_cluster, init = 'random', n_init = 10, random_state = 0) preds = clusterer.fit_predict(normalized_data) score = silhouette_score(df, preds, metric = "euclidean") sil[n_cluster] = score # - for n_cluster, sil_score in sil.items(): print(n_cluster, ":", sil_score) # + #visualkan data lagi plt.figure(figsize = (8,6)) plt.title("Choosing Optimal Cluster") plt.xlabel("Number of k Cluster") plt.ylabel("Silhouette") plt.xticks(range_n_clusters) plt.plot(range_n_clusters, sil.values(), '-o', color = "Blue") plt.show() # + #data yang belom dinormalisasi sil = {} range_n_clusters = [2, 3, 4, 5, 6, 7, 8, 9, 10] for n_cluster in range_n_clusters: clusterer = KMeans(n_clusters = n_cluster, init = 'random', n_init = 10, random_state = 0) preds = clusterer.fit_predict(df) score = silhouette_score(df, preds, metric = "euclidean") sil[n_cluster] = score # - for n_cluster, sil_score in sil.items(): print(n_cluster, ":", sil_score) # + plt.figure(figsize = (8,6)) plt.title("Choosing Optimal Cluster") plt.xlabel("Number of k Cluster") plt.ylabel("Silhouette") plt.xticks(range_n_clusters) plt.plot(range_n_clusters, sil.values(), '-o', color = "Blue") plt.show() # - # # tahap clustering setelah data melewati praproses # + kmins = KMeans(n_clusters = 5, init = 'random', n_init = 10, random_state = 0) kmins.fit(df) y_kmins = kmins.predict(df) # - kmins_df = pd.DataFrame(y_kmins, columns = ['cluster']) kmins_df df['segmen'] = y_kmins df df['segmen'].value_counts().plot(kind = 'bar') df df.to_csv (r'export_dataframe2.csv', index = False, header=True)
Pertemuan 12 - Projek 3/Jawaban terkumpul/Mochamad Dani Hartanto/soal-1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # some pandas settings pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 50) pd.set_option('display.width', 1024) pd.set_option('precision', 5) pd.set_option('display.notebook_repr_html',True)
Online Shoppers Purchasing Intentions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.1 64-bit # language: python # name: python38164bit9887c53e05b84e05a93bb75b31561849 # --- # + from typing import Iterable, Union from random import random, randint class skipnode: __slots__ = ("value", "nxt", "_down") def __init__(self, value=None, height=1): self.value = value self.nxt = [None] * height def __repr__(self): return f"{self.__class__.__qualname__}(value={self.value})" def __str__(self): return repr(self) def __next__(self): return self.nxt[0] class skiplist: """ randomization at work. optimizing for kn^(1/ k). dT(n, k)/dk = 0 => k = ln(n) O(lg n) expected updates and queries w.h.p maintains a dynamic set of elements. methods: insert, search, delete, successor, predecessor """ __slots__ = ("header", "count", "nil", "height", "maximum") MAXHEIGHT = 32 INF = (1 << 63) - 1 def __init__(self, items: Iterable = ()): self.count = 0 self.header = self.nil = skipnode(self.INF, self.MAXHEIGHT) self.height = 1 if items: self.insert(items) self.maximum = None @staticmethod def gen_height(): c = 1 while random() < 0.5 and c < skiplist.MAXHEIGHT: c += 1 return c @staticmethod def genheight(): x = int(random() * 0xffffffffff) & ((1 << skiplist.MAXHEIGHT) - 1) return (x & -x).bit_length() @property def isempty(self) -> bool: return self.count == 0 def access(self, needle) -> Union[skipnode, None]: s = self.header for level in reversed(range(self.height)): while s.nxt[level] is not None and s.nxt[level].value <= needle: s = s.nxt[level] if s.value == needle: return s return s def insert(self, *values) -> None: for value in values: h, H = skiplist.genheight(), self.height self.height = h if h > H else H elt = skipnode(value, h) s = self.header for level in reversed(range(h, self.height)): while s.nxt[level] and s.nxt[level].value < value: s = s.nxt[level] for level in reversed(range(h)): while s.nxt[level] and s.nxt[level].value < value: s = s.nxt[level] elt.nxt[level] = s.nxt[level] s.nxt[level] = elt self.count += 1 self.maximum = value if not self.maximum or \ value > self.maximum else self.maximum @property def minimum(): return None if self.isempty else self.header[0].nxt.value def delete(self, value) -> bool: target = s = self.header for level in reversed(range(self.height)): while target.nxt[level] and target.nxt[level].value < value: target = target.nxt[level] target = target.nxt[0] if not target or target.value != value: return False for level in reversed(range(self.height)): while s.nxt[level] and s.nxt[level].value < value: s = s.nxt[level] if s.nxt[level] == target: s.nxt[level] = target.nxt[level] self.count -= 1 self.maximum = self._calc_max() if value == self.maximum else self.maximum return True def _calc_max(): s = self.header.nxt[0] while s.next[0]: s = s.next[0] return s def successor(self, value) -> Union[skipnode, None]: p = self.predecessor(value) while p.nxt[0] != self.nil and p.nxt[0].value <= value: p = p.nxt[0] return p.nxt[0] def predecessor(self, value) -> skipnode: """If duplicate values exist, duplicate is returned""" target = self.header for level in reversed(range(self.height)): while target.nxt[level] and target.nxt[level].value < value: target = target.nxt[level] return target def __contains__(self, item): return self.access(item).value == self.nil.value def __len__(self): return self.count def __iter__(self): s = self.header.nxt[0] while s: yield s.value s = s.nxt[0] def __repr__(self): return f"{self.__class__.__qualname__}({str(self)})" def __str__(self): r = "[" s = self.header.nxt[0] while s is not None: r += str(s.value) + ", " s = s.nxt[0] return r + "]" def __len__(self): return self.count def __iter__(self): s = self.header.nxt[0] while s: yield s.value s = s.nxt[0] def __repr__(self): return f"{self.__class__.__qualname__}({str(self)})" def __str__(self): r = "[" s = self.header.nxt[0] while s is not None: r += str(s.value) + ", " s = s.nxt[0] return r + "]" # -
skiplist/skiplist.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import networkx as nx # + FW_Mangwet = nx.read_edgelist("datasets/FW-Mangwet", create_using=nx.DiGraph, data = (('weight', float),)) #FW_Everglades = nx.read_edgelist("datasets/FW-Everglades", create_using=nx.DiGraph, data = (('weight', float),)) #FW_Chesa = nx.read_edgelist("datasets/FW-Chesa", create_using=nx.DiGraph) FW_Baywet = nx.read_edgelist("datasets/FW-Baywet", create_using=nx.DiGraph, data = (('weight', float),)) FW_Baydry = nx.read_edgelist("datasets/FW-Baydry", create_using=nx.DiGraph, data = (('weight', float),)) FW_LittleRock = nx.read_edgelist("datasets/FW-LittleRock", create_using=nx.DiGraph) # Soc_Alpha = nx.read_edgelist("datasets/BTC-Alpha", delimiter = ',', create_using=nx.DiGraph, data=(('weight', int),('sec', int),)) # Soc_OTC = nx.read_edgelist("datasets/BTC-OTC", delimiter = ',', create_using=nx.DiGraph, data=(('weight', int),('sec', float),)) # Soc_Epinions = nx.read_edgelist("datasets/Soc_Epinions.csv", delimiter = ',', create_using=nx.DiGraph) # Soc_Advogato = nx.read_edgelist("datasets/Soc_Advogato.csv", delimiter = ',', create_using=nx.DiGraph, data=(('weight', float),)) # Cit_Cora = nx.read_edgelist("datasets/Cit-Cora", create_using=nx.DiGraph) # Cit_HepPh = nx.read_edgelist("datasets/Cit-HepPh", create_using=nx.DiGraph) # Cit_HepTh = nx.read_edgelist("datasets/Cit-HepTh", create_using=nx.DiGraph) # Cit_Citeseer = nx.read_edgelist("datasets/Cit-Citeseer", delimiter = ',', create_using=nx.DiGraph) # Web_Stanford = nx.read_edgelist("datasets/web-Stanford.txt", create_using=nx.DiGraph) # Web_NotreDame = nx.read_edgelist("datasets/web-NotreDame.txt", create_using=nx.DiGraph) # Web_BerkStan = nx.read_edgelist("datasets/web-BerkStan.txt", create_using=nx.DiGraph) # Web_Google = nx.read_edgelist("datasets/web-Google.txt", create_using=nx.DiGraph) # WordAdj_Spa = nx.read_edgelist("datasets/WordAdj_Spa.csv", delimiter = ',', create_using=nx.DiGraph) # WordAdj_Fr = nx.read_edgelist("datasets/WordAdj_Fr.csv", delimiter = ',', create_using=nx.DiGraph) # WordAdj_Jp = nx.read_edgelist("datasets/WordAdj_Jp.csv", delimiter = ',', create_using=nx.DiGraph) # WordAdj_Drw = nx.read_edgelist("datasets/WordAdj_Drw.csv", delimiter = ',', create_using=nx.DiGraph) # SW_Jdk = nx.read_edgelist("datasets/SW_Jdk.csv", delimiter = ',', create_using=nx.DiGraph) # SW_Jung = nx.read_edgelist("datasets/SW_Jung.csv", delimiter = ',', create_using=nx.DiGraph) # SW_Weka = nx.read_edgelist("datasets/SW_Weka", create_using=nx.DiGraph) # SW_Lucene = nx.read_edgelist("datasets/SW_Lucene", create_using=nx.DiGraph, data=(('weight', str),)) # - nx.info(FW_Mangwet) # + #nx.link_pred_directed_network(FW_Chesa, "link_pred_results/FW-Chesa") nx.link_pred_directed_network(FW_Mangwet, "link_pred_results/FW-Mangwet") nx.link_pred_directed_network(FW_Baydry, "link_pred_results/FW-BayDry") nx.link_pred_directed_network(FW_Baywet, "link_pred_results/FW-BayWet") nx.link_pred_directed_network(FW_LittleRock, "link_pred_results/FW-LittleRock") nx.link_pred_directed_network(Soc_Alpha, "link_pred_results/Soc-Alpha", repeat=1) nx.link_pred_directed_network(Soc_OTC, "link_pred_results/Soc-OTC", repeat=1) # - nx.link_pred_directed_network(Soc_Epinions, "link_pred_results/Soc_Epinions") nx.link_pred_directed_network(Soc_Advogato, "link_pred_results/Soc_Advogato")
experiment_for_di-clo/link_prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: venv # language: python # name: venv # --- # # Image Segmentation # # #### Task # * GIANA dataset으로 위내시경 이미지에서 용종을 segmentation 해보자 # * Image size: 256으로 변경하여 수행 (baseline code는 `image_size`: 64) # * 밑에 제시된 여러가지 시도를 해보자 # * This code is borrowed from [TensorFlow tutorials/Image Segmentation](https://github.com/tensorflow/models/blob/master/samples/outreach/blogs/segmentation_blogpost/image_segmentation.ipynb) which is made of `tf.keras.layers` and `tf.enable_eager_execution()`. # * You can see the detail description [tutorial link](https://github.com/tensorflow/models/blob/master/samples/outreach/blogs/segmentation_blogpost/image_segmentation.ipynb) # # #### Dataset # * I use below dataset instead of [carvana-image-masking-challenge dataset](https://www.kaggle.com/c/carvana-image-masking-challenge/rules) in TensorFlow Tutorials which is a kaggle competition dataset. # * carvana-image-masking-challenge dataset: Too large dataset (14GB) # * [Gastrointestinal Image ANAlys Challenges (GIANA)](https://giana.grand-challenge.org) Dataset (345MB) # * Train data: 300 images with RGB channels (bmp format) # * Train lables: 300 images with 1 channels (bmp format) # * Image size: 574 x 500 # # #### Baseline code # * Dataset: train, test로 split # * Input data shape: (`batch_size`, 64, 64, 3) # * Output data shape: (`batch_size`, 64, 64, 1) # * Architecture: # * 간단한 U-Net 구조 # * [`tf.keras.layers`](https://www.tensorflow.org/api_docs/python/tf/keras/layers) 사용 # * Training # * `tf.data.Dataset` 사용 # * `tf.GradientTape()` 사용 for weight update # * Evaluation # * MeanIOU: Image Segmentation에서 많이 쓰이는 evaluation measure # * tf.version 1.12 API: [`tf.metrics.mean_iou`](https://www.tensorflow.org/api_docs/python/tf/metrics/mean_iou) # * `tf.enable_eager_execution()`이 작동하지 않음 # * 따라서 예전 방식대로 `tf.Session()`을 이용하여 작성하거나 아래와 같이 2.0 version으로 작성하여야 함 # * tf.version 2.0 API: [`tf.keras.metrics.MeanIoU`](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/metrics/MeanIoU) # # #### Try some techniques # * Change model architectures (Custom model) # * Try another models (DeepLAB, Hourglass, Encoder-Decoder 모델) # * Data augmentation # * Various regularization methods # ## Import for Google Colab # + # if you necessary from google.colab import auth auth.authenticate_user() from google.colab import drive drive.mount('/content/gdrive') # - use_colab = True assert use_colab in [True, False] # ## Import modules # + from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import time import shutil import numpy as np import matplotlib.pyplot as plt # %matplotlib inline import matplotlib as mpl mpl.rcParams['axes.grid'] = False mpl.rcParams['figure.figsize'] = (12,12) from sklearn.model_selection import train_test_split from PIL import Image from IPython.display import clear_output import tensorflow as tf tf.enable_eager_execution() from tensorflow.python.keras import layers from tensorflow.python.keras import losses from tensorflow.python.keras import models tf.logging.set_verbosity(tf.logging.INFO) os.environ["CUDA_VISIBLE_DEVICES"]="0" # - # ## Get all the files # Since this tutorial will be using a dataset from [Giana Dataset](https://giana.grand-challenge.org/Dates/). # + # Unfortunately you cannot downlaod GIANA dataset from website # So I upload zip file on my dropbox # if you want to download from my dropbox uncomment below # if use_colab: # DATASET_PATH='./gdrive/My Drive/datasets' # else: # DATASET_PATH='../../datasets' # # !wget https://goo.gl/mxikqa # # !mv mxikqa sd_train.zip # # !unzip sd_train.zip # if not os.path.isdir(DATASET_PATH): # os.makedirs(DATASET_PATH) # shutil.move(os.path.join('sd_train'), os.path.join(DATASET_PATH)) # # !rm sd_train.zip # - # ## Copy dataset to Google drvie if use_colab: dataset_dir = './gdrive/My Drive/datasets/sd_train' else: dataset_dir = '../../datasets/sd_train' img_dir = os.path.join(dataset_dir, "train") label_dir = os.path.join(dataset_dir, "train_labels") x_train_filenames = [os.path.join(img_dir, filename) for filename in os.listdir(img_dir)] x_train_filenames.sort() y_train_filenames = [os.path.join(label_dir, filename) for filename in os.listdir(label_dir)] y_train_filenames.sort() x_train_filenames, x_test_filenames, y_train_filenames, y_test_filenames = \ train_test_split(x_train_filenames, y_train_filenames, test_size=0.2, random_state=219) # + num_train_examples = len(x_train_filenames) num_test_examples = len(x_test_filenames) print("Number of training examples: {}".format(num_train_examples)) print("Number of test examples: {}".format(num_test_examples)) # - # ## Visualize # Let's take a look at some of the examples of different images in our dataset. # + display_num = 5 r_choices = np.random.choice(num_train_examples, display_num) plt.figure(figsize=(10, 15)) for i in range(0, display_num * 2, 2): img_num = r_choices[i // 2] x_pathname = x_train_filenames[img_num] y_pathname = y_train_filenames[img_num] plt.subplot(display_num, 2, i + 1) plt.imshow(Image.open(x_pathname)) plt.title("Original Image") example_labels = Image.open(y_pathname) label_vals = np.unique(example_labels) plt.subplot(display_num, 2, i + 2) plt.imshow(example_labels) plt.title("Masked Image") plt.suptitle("Examples of Images and their Masks") plt.show() # - # ## Set up # Let’s begin by setting up some parameters. We’ll standardize and resize all the shapes of the images. We’ll also set up some training parameters: # + # Set hyperparameters image_size = 64 img_shape = (image_size, image_size, 3) batch_size = 8 max_epochs = 2 print_steps = 10 save_epochs = 1 if use_colab: train_dir='./gdrive/My Drive/train_ckpt/segmentation/exp1' if not os.path.isdir(train_dir): os.makedirs(train_dir) else: train_dir = 'train/exp1' # - # ## Build our input pipeline with `tf.data` # Since we begin with filenames, we will need to build a robust and scalable data pipeline that will play nicely with our model. If you are unfamiliar with **tf.data** you should check out my other tutorial introducing the concept! # # ### Our input pipeline will consist of the following steps: # 1. Read the bytes of the file in from the filename - for both the image and the label. Recall that our labels are actually images with each pixel annotated as car or background (1, 0). # 2. Decode the bytes into an image format # 3. Apply image transformations: (optional, according to input parameters) # * `resize` - Resize our images to a standard size (as determined by eda or computation/memory restrictions) # * The reason why this is optional is that U-Net is a fully convolutional network (e.g. with no fully connected units) and is thus not dependent on the input size. However, if you choose to not resize the images, you must use a batch size of 1, since you cannot batch variable image size together # * Alternatively, you could also bucket your images together and resize them per mini-batch to avoid resizing images as much, as resizing may affect your performance through interpolation, etc. # * `hue_delta` - Adjusts the hue of an RGB image by a random factor. This is only applied to the actual image (not our label image). The `hue_delta` must be in the interval `[0, 0.5]` # * `horizontal_flip` - flip the image horizontally along the central axis with a 0.5 probability. This transformation must be applied to both the label and the actual image. # * `width_shift_range` and `height_shift_range` are ranges (as a fraction of total width or height) within which to randomly translate the image either horizontally or vertically. This transformation must be applied to both the label and the actual image. # * `rescale` - rescale the image by a certain factor, e.g. 1/ 255. # 4. Shuffle the data, repeat the data (so we can iterate over it multiple times across epochs), batch the data, then prefetch a batch (for efficiency). # # It is important to note that these transformations that occur in your data pipeline must be symbolic transformations. # #### Why do we do these image transformations? # This is known as **data augmentation**. Data augmentation "increases" the amount of training data by augmenting them via a number of random transformations. During training time, our model would never see twice the exact same picture. This helps prevent [overfitting](https://developers.google.com/machine-learning/glossary/#overfitting) and helps the model generalize better to unseen data. # ## Processing each pathname def _process_pathnames(fname, label_path): # We map this function onto each pathname pair img_str = tf.read_file(fname) img = tf.image.decode_bmp(img_str, channels=3) label_img_str = tf.read_file(label_path) label_img = tf.image.decode_bmp(label_img_str, channels=1) resize = [image_size, image_size] img = tf.image.resize_images(img, resize) label_img = tf.image.resize_images(label_img, resize) scale = 1 / 255. img = tf.to_float(img) * scale label_img = tf.to_float(label_img) * scale return img, label_img def get_baseline_dataset(filenames, labels, threads=5, batch_size=batch_size, shuffle=True): num_x = len(filenames) # Create a dataset from the filenames and labels dataset = tf.data.Dataset.from_tensor_slices((filenames, labels)) # Map our preprocessing function to every element in our dataset, taking # advantage of multithreading dataset = dataset.map(_process_pathnames, num_parallel_calls=threads) if shuffle: dataset = dataset.shuffle(num_x * 10) dataset = dataset.batch(batch_size) return dataset # ## Set up train and test datasets # Note that we apply image augmentation to our training dataset but not our validation dataset. train_dataset = get_baseline_dataset(x_train_filenames, y_train_filenames) test_dataset = get_baseline_dataset(x_test_filenames, y_test_filenames, shuffle=False) train_dataset # ### Plot some train data for images, labels in train_dataset.take(1): # Running next element in our graph will produce a batch of images plt.figure(figsize=(10, 10)) img = images[0] plt.subplot(1, 2, 1) plt.imshow(img) plt.subplot(1, 2, 2) plt.imshow(labels[0, :, :, 0]) plt.show() # ## Build the model # We'll build the U-Net model. U-Net is especially good with segmentation tasks because it can localize well to provide high resolution segmentation masks. In addition, it works well with small datasets and is relatively robust against overfitting as the training data is in terms of the number of patches within an image, which is much larger than the number of training images itself. Unlike the original model, we will add batch normalization to each of our blocks. # # The Unet is built with an encoder portion and a decoder portion. The encoder portion is composed of a linear stack of [`Conv`](https://developers.google.com/machine-learning/glossary/#convolution), `BatchNorm`, and [`Relu`](https://developers.google.com/machine-learning/glossary/#ReLU) operations followed by a [`MaxPool`](https://developers.google.com/machine-learning/glossary/#pooling). Each `MaxPool` will reduce the spatial resolution of our feature map by a factor of 2. We keep track of the outputs of each block as we feed these high resolution feature maps with the decoder portion. The Decoder portion is comprised of UpSampling2D, Conv, BatchNorm, and Relus. Note that we concatenate the feature map of the same size on the decoder side. Finally, we add a final Conv operation that performs a convolution along the channels for each individual pixel (kernel size of (1, 1)) that outputs our final segmentation mask in grayscale. # ### The Keras Functional API # The Keras functional API is used when you have multi-input/output models, shared layers, etc. It's a powerful API that allows you to manipulate tensors and build complex graphs with intertwined datastreams easily. In addition it makes layers and models both callable on tensors. # # * To see more examples check out the [get started guide](https://keras.io/getting-started/functional-api-guide/). # # We'll build these helper functions that will allow us to ensemble our model block operations easily and simply. # + def conv_block(input_tensor, num_filters): encoder = layers.Conv2D(num_filters, (3, 3), padding='same')(input_tensor) encoder = layers.BatchNormalization()(encoder) encoder = layers.Activation('relu')(encoder) return encoder def encoder_block(input_tensor, num_filters): encoder = conv_block(input_tensor, num_filters) encoder_pool = layers.MaxPooling2D((2, 2), strides=(2, 2))(encoder) return encoder_pool, encoder def decoder_block(input_tensor, concat_tensor, num_filters): decoder = layers.Conv2DTranspose(num_filters, (2, 2), strides=(2, 2), padding='same')(input_tensor) decoder = layers.concatenate([concat_tensor, decoder], axis=-1) decoder = layers.BatchNormalization()(decoder) decoder = layers.Activation('relu')(decoder) decoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder) decoder = layers.BatchNormalization()(decoder) decoder = layers.Activation('relu')(decoder) return decoder # + inputs = layers.Input(shape=img_shape) # 256 encoder0_pool, encoder0 = encoder_block(inputs, 32) # 128 encoder1_pool, encoder1 = encoder_block(encoder0_pool, 64) # 64 encoder2_pool, encoder2 = encoder_block(encoder1_pool, 128) # 32 encoder3_pool, encoder3 = encoder_block(encoder2_pool, 256) # 16 center = conv_block(encoder3_pool, 512) # center decoder3 = decoder_block(center, encoder3, 256) # 32 decoder2 = decoder_block(decoder3, encoder2, 128) # 64 decoder1 = decoder_block(decoder2, encoder1, 64) # 128 decoder0 = decoder_block(decoder1, encoder0, 32) # 256 outputs = layers.Conv2D(1, (1, 1), activation='sigmoid')(decoder0) # - # ### Create a model (UNet) # Using functional API, you must define your model by specifying the inputs and outputs associated with the model. model = models.Model(inputs=[inputs], outputs=[outputs]) model.summary() # ## Defining custom metrics and loss functions # # Defining loss and metric functions are simple with Keras. Simply define a function that takes both the True labels for a given example and the Predicted labels for the same given example. # # Dice loss is a metric that measures overlap. More info on optimizing for Dice coefficient (our dice loss) can be found in the [paper](http://campar.in.tum.de/pub/milletari2016Vnet/milletari2016Vnet.pdf), where it was introduced. # # We use dice loss here because it performs better at class imbalanced problems by design. In addition, maximizing the dice coefficient and IoU metrics are the actual objectives and goals of our segmentation task. Using cross entropy is more of a proxy which is easier to maximize. Instead, we maximize our objective directly. def dice_coeff(y_true, y_pred): smooth = 1. # Flatten y_true_f = tf.reshape(y_true, [-1]) y_pred_f = tf.reshape(y_pred, [-1]) intersection = tf.reduce_sum(y_true_f * y_pred_f) score = (2. * intersection + smooth) / (tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f) + smooth) return score def dice_loss(y_true, y_pred): loss = 1 - dice_coeff(y_true, y_pred) return loss # Here, we'll use a specialized loss function that combines binary cross entropy and our dice loss. This is based on [individuals who competed within this competition obtaining better results empirically](https://www.kaggle.com/c/carvana-image-masking-challenge/discussion/40199). Try out your own custom losses to measure performance (e.g. bce + log(dice_loss), only bce, etc.)! def bce_dice_loss(y_true, y_pred): loss = tf.reduce_mean(losses.binary_crossentropy(y_true, y_pred)) + dice_loss(y_true, y_pred) return loss # ## Setup a optimizer optimizer = tf.train.AdamOptimizer(learning_rate=2e-4) # ## Checkpoints (Object-based saving) checkpoint_dir = train_dir if not tf.gfile.Exists(checkpoint_dir): tf.gfile.MakeDirs(checkpoint_dir) checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt") checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model) # ## Train your model # Training your model with tf.data involves simply providing the model's fit function with your training/validation dataset, the number of steps, and epochs. # # We also include a Model callback, ModelCheckpoint that will save the model to disk after each epoch. We configure it such that it only saves our highest performing model. Note that saving the model capture more than just the weights of the model: by default, it saves the model architecture, weights, as well as information about the training process such as the state of the optimizer, etc. # + # %%time tf.logging.info('Start Training.') # save loss values for plot loss_history = [] global_step = tf.train.get_or_create_global_step() for epoch in range(max_epochs): for images, labels in train_dataset: start_time = time.time() with tf.GradientTape() as tape: predictions = model(images) loss = bce_dice_loss(labels, predictions) gradients = tape.gradient(loss, model.variables) optimizer.apply_gradients(zip(gradients, model.variables), global_step=global_step) epochs = global_step.numpy() * batch_size / float(num_train_examples) duration = time.time() - start_time if global_step.numpy() % print_steps == 0: clear_output(wait=True) examples_per_sec = batch_size / float(duration) print("Epochs: {:.2f} global_step: {} loss: {:.3f} ({:.2f} examples/sec; {:.3f} sec/batch)".format( epochs, global_step.numpy(), loss, examples_per_sec, duration)) loss_history.append([epochs, loss]) # print sample image for test_images, test_labels in test_dataset.take(1): predictions = model(test_images) plt.figure(figsize=(10, 20)) plt.subplot(1, 3, 1) plt.imshow(test_images[0,: , :, :]) plt.title("Input image") plt.subplot(1, 3, 2) plt.imshow(test_labels[0, :, :, 0]) plt.title("Actual Mask") plt.subplot(1, 3, 3) plt.imshow(predictions[0, :, :, 0]) plt.title("Predicted Mask") plt.show() # saving (checkpoint) the model periodically if (epoch+1) % save_epochs == 0: checkpoint.save(file_prefix = checkpoint_prefix) tf.logging.info('complete training...') # - # ### Plot the loss loss_history = np.asarray(loss_history) plt.plot(loss_history[:,0], loss_history[:,1]) plt.show()
cnn/image_segmentation/image_segmentation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Chandelier Exit # https://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:chandelier_exit # + outputHidden=false inputHidden=false import numpy as np import pandas as pd import matplotlib.pyplot as plt import warnings warnings.filterwarnings("ignore") # yfinance is used to fetch data import yfinance as yf yf.pdr_override() # + outputHidden=false inputHidden=false # input symbol = 'AAPL' start = '2018-08-01' end = '2019-01-01' # Read data df = yf.download(symbol,start,end) # View Columns df.head() # + outputHidden=false inputHidden=false import talib as ta # + outputHidden=false inputHidden=false df['ATR'] = ta.ATR(df['High'], df['Low'], df['Adj Close'], timeperiod=22) # + outputHidden=false inputHidden=false df['High_22'] = df['High'].rolling(22).max() df['Low_22'] = df['Low'].rolling(22).min() # + outputHidden=false inputHidden=false df['CH_Long'] = df['High_22'] - df['ATR'] * 3 df['CH_Short'] = df['Low_22'] + df['ATR'] * 3 # + outputHidden=false inputHidden=false df = df.dropna() df.head() # + outputHidden=false inputHidden=false plt.figure(figsize=(16,10)) plt.plot(df['Adj Close']) plt.plot(df['CH_Long']) plt.title('Chandelier Exit for Long') plt.legend(loc='best') plt.ylabel('Price') plt.xlabel('Date') plt.show() # + outputHidden=false inputHidden=false plt.figure(figsize=(16,10)) plt.plot(df['Adj Close']) plt.plot(df['CH_Short']) plt.title('Chandelier Exit for Short') plt.legend(loc='best') plt.ylabel('Price') plt.xlabel('Date') plt.show() # + outputHidden=false inputHidden=false plt.figure(figsize=(16,10)) plt.plot(df['Adj Close']) plt.plot(df['CH_Long']) plt.plot(df['CH_Short']) plt.title('Chandelier Exit for Long & Short') plt.legend(loc='best') plt.ylabel('Price') plt.xlabel('Date') plt.show() # - # ## Candlestick with Chandelier Exit # + from matplotlib import dates as mdates import datetime as dt df['VolumePositive'] = df['Open'] < df['Adj Close'] df = df.dropna() df = df.reset_index() df['Date'] = mdates.date2num(df['Date'].astype(dt.date)) df.head() # + outputHidden=false inputHidden=false from mpl_finance import candlestick_ohlc fig = plt.figure(figsize=(16,8)) ax1 = plt.subplot(111) candlestick_ohlc(ax1,df.values, width=0.5, colorup='g', colordown='r', alpha=1.0) ax1.plot(df.Date, df['CH_Long']) ax1.xaxis_date() ax1.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y')) #ax1.axhline(y=dfc['Adj Close'].mean(),color='r') ax1v = ax1.twinx() colors = df.VolumePositive.map({True: 'g', False: 'r'}) ax1v.bar(df.Date, df['Volume'], color=colors, alpha=0.4) ax1v.axes.yaxis.set_ticklabels([]) ax1v.set_ylim(0, 3*df.Volume.max()) ax1.set_title('Chandelier Exit for Long') ax1.set_ylabel('Price') ax1.set_xlabel('Date') ax1.legend(loc='best') # + outputHidden=false inputHidden=false fig = plt.figure(figsize=(16,8)) ax1 = plt.subplot(111) candlestick_ohlc(ax1,df.values, width=0.5, colorup='g', colordown='r', alpha=1.0) ax1.plot(df.Date, df['CH_Short'], color='Orange') ax1.xaxis_date() ax1.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y')) #ax1.axhline(y=dfc['Adj Close'].mean(),color='r') ax1v = ax1.twinx() colors = df.VolumePositive.map({True: 'g', False: 'r'}) ax1v.bar(df.Date, df['Volume'], color=colors, alpha=0.4) ax1v.axes.yaxis.set_ticklabels([]) ax1v.set_ylim(0, 3*df.Volume.max()) ax1.set_title('Chandelier Exit for Short') ax1.set_ylabel('Price') ax1.set_xlabel('Date') ax1.legend(loc='best') # + outputHidden=false inputHidden=false fig = plt.figure(figsize=(16,8)) ax1 = plt.subplot(111) candlestick_ohlc(ax1,df.values, width=0.5, colorup='g', colordown='r', alpha=1.0) ax1.plot(df.Date, df['CH_Long']) ax1.plot(df.Date, df['CH_Short']) ax1.xaxis_date() ax1.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y')) #ax1.axhline(y=dfc['Adj Close'].mean(),color='r') ax1v = ax1.twinx() colors = df.VolumePositive.map({True: 'g', False: 'r'}) ax1v.bar(df.Date, df['Volume'], color=colors, alpha=0.4) ax1v.axes.yaxis.set_ticklabels([]) ax1v.set_ylim(0, 3*df.Volume.max()) ax1.set_title('Chandelier Exit for Long & Short') ax1.set_ylabel('Price') ax1.set_xlabel('Date') ax1.legend(loc='best')
Python_Stock/Technical_Indicators/Chandelier_Exit.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Facility Location # ## Objective and Prerequisites # # Facility location problems can be commonly found in many industries, including logistics and telecommunications. In this example, we’ll show you how to tackle a facility location problem that involves determining the number and location of warehouses that are needed to supply a group of supermarkets. We’ll demonstrate how to construct a mixed-integer programming (MIP) model of this problem, implement this model in the Gurobi Python API, and then use the Gurobi Optimizer to find an optimal solution. # # This modeling example is at the beginner level, where we assume that you know Python and that you have some knowledge about building mathematical optimization models. # # **Download the Repository** <br /> # You can download the repository containing this and other examples by clicking [here](https://github.com/Gurobi/modeling-examples/archive/master.zip). # ## Motivation # # The study of facility location problems - also known as "location analysis" [1] - is a branch of operations research and computational geometry concerned with the optimal placement of facilities to minimize transportation costs while considering factors like safety (e.g. by avoiding placing hazardous materials near housing) and the location of competitors' facilities. # # The Fermat-Weber problem, formulated in the 17'th century, was one of the first facility location problems ever devised. # The Fermat-Weber problem can be described as follows: Given three points in a plane, find a fourth point such that the sum of its distances to the three given points is minimal. This problem can be viewed as a variation of the facility location problem, where the assumption is made that the transportation costs per distance are the same for all destinations. # # Facility location problems have applications in a wide variety of industries. For supply chain management and logistics, this problem can be used to find the optimal location for stores, factories, warehouses, etc. Other applications range from public policy (e.g. positioning police officers in a city), telecommunications (e.g. cell towers in a network), and even particle physics (e.g. separation distance between repulsive charges). Another application of the facility location problem is to determine the locations for natural gas transmission equipment. Finally, facility location problems can be applied to cluster analysis. # ## Problem Description # # # A large supermarket chain in the UK needs to build warehouses for a set of supermarkets it is opening in Northern England. The locations of the supermarkets have been identified, but the locations of the warehouses have yet to be determined. # # Several good candidate locations for the warehouses have been identified, but decisions must be made regarding # how many warehouses to open and at which candidate locations to build them. # # Opening many warehouses would be advantageous as this would reduce the average distance a truck has to drive from the warehouse to the supermarket, and hence reduce the delivery cost. However, opening a warehouse has a fixed cost associated with it. # # In this example, our goal is to find the optimal tradeoff between delivery costs and the costs of building new facilities. # ## Solution Approach # # Mathematical programming is a declarative approach where the modeler formulates a mathematical optimization model that captures the key aspects of a complex business problem. The Gurobi Optimizer solves such models using state-of-the-art mathematics and computer science. # # A mathematical optimization model has five components, namely: # # * Sets and indices. # * Parameters. # * Decision variables. # * Objective function(s). # * Constraints. # # We now present a MIP formulation for the facility location problem. # ## Model Formulation # # ### Sets and Indices # # $i \in I$: Index and set of supermarket (or customer) locations. # # $j \in J$: Index and set of candidate warehouse (or facility) locations. # # ### Parameters # # $f_{j} \in \mathbb{R}^+$: Fixed cost associated with constructing facility $j \in J$. # # $d_{i,j} \in \mathbb{R}^+$: Distance between facility $j \in J$ and customer $i \in I$. # # $c_{i,j} \in \mathbb{R}^+$: Cost of shipping between candidate facility site $j \in J$ and customer location $i \in I$. We assume that this cost is proportional to the distance between the facility and the customer. That is, $c_{i,j} = \alpha \cdot d_{i,j}$, where $\alpha$ is the cost per mile of driving, adjusted to incorporate the average number of trips a delivery truck would be expected to make over a five year period. # # ### Decision Variables # # $select_{j} \in \{0, 1 \}$: This variable is equal to 1 if we build a facility at candidate location $j \in J$; and 0 otherwise. # # $0 \leq assign_{i,j} \leq 1$: This non-negative continuous variable determines the fraction of supply received by customer $i \in I$ from facility $j \in J$. # # ### Objective Function # # - **Total costs**. We want to minimize the total cost to open and operate the facilities. This is the sum of the cost of opening facilities and the cost related to shipping between facilities and customers. This total cost measures the tradeoff between the cost of building a new facility and the total delivery cost over a five year period. # # \begin{equation} # \text{Min} \quad Z = \sum_{j \in J} f_{j} \cdot select_{j} + \sum_{j \in J} \sum_{i \in I} c_{i,j} \cdot assign_{i,j} # \tag{0} # \end{equation} # # ### Constraints # # - **Demand**. For each customer $i \in I$ ensure that its demand is fulfilled. That is, the sum of the fraction received from each facility for each customer must be equal to 1: # # \begin{equation} # \sum_{j \in J} assign_{i,j} = 1 \quad \forall i \in I # \tag{1} # \end{equation} # # - **Shipping**. We need to ensure that we only ship from facility $j \in J$, if that facility has actually been built. # # \begin{equation} # assign_{i,j} \leq select_{j} \quad \forall i \in I \quad \forall j \in J # \tag{2} # \end{equation} # ## Python Implementation # # This example considers two supermarkets and nine warehouse candidates. The coordinates of each supermarket are provided in the following table. # # | <i></i> | Coordinates | # | --- | --- | # | Supermarket 1 | (0,1.5) | # | Supermarket 2 | (2.5,1.2) | # # The following table shows the coordinates of the candidate warehouse sites and the fixed cost of building the warehouse in millions of GBP. # # | <i></i> | coordinates | fixed cost | # | --- | --- | --- | # | Warehouse 1 | (0,0) | 3 | # | Warehouse 2 | (0,1) | 2 | # | Warehouse 3 | (0,2) | 3 | # | Warehouse 4 | (1,0) | 1 | # | Warehouse 5 | (1,1) | 3 | # | Warehouse 6 | (1,2) | 3 | # | Warehouse 7 | (2,0) | 4 | # | Warehouse 8 | (2,1) | 3 | # | Warehouse 9 | (2,2) | 2 | # # # The cost per mile is one million GBP. # # ## Python Implementation # # We now import the Gurobi Python Module and other Python libraries. Then, we initialize the data structures with the given data. # %pip install gurobipy # + from itertools import product from math import sqrt import gurobipy as gp from gurobipy import GRB # tested with Gurobi v9.1.0 and Python 3.7.0 # Parameters customers = [(0,1.5), (2.5,1.2)] facilities = [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2), (2,0), (2,1), (2,2)] setup_cost = [3,2,3,1,3,3,4,3,2] cost_per_mile = 1 # - # ### Preprocessing # # We define a function that determines the Euclidean distance between each facility and customer sites. In addition, we compute key parameters required by the MIP model formulation of the facility location problem. # + # This function determines the Euclidean distance between a facility and customer sites. def compute_distance(loc1, loc2): dx = loc1[0] - loc2[0] dy = loc1[1] - loc2[1] return sqrt(dx*dx + dy*dy) # Compute key parameters of MIP model formulation num_facilities = len(facilities) num_customers = len(customers) cartesian_prod = list(product(range(num_customers), range(num_facilities))) # Compute shipping costs shipping_cost = {(c,f): cost_per_mile*compute_distance(customers[c], facilities[f]) for c, f in cartesian_prod} # - # ### Model Deployment # # We now determine the MIP model for the facility location problem, by defining the decision variables, constraints, and objective function. Next, we start the optimization process and Gurobi finds the plan to build facilities that minimizes total costs. # + # MIP model formulation m = gp.Model('facility_location') select = m.addVars(num_facilities, vtype=GRB.BINARY, name='Select') assign = m.addVars(cartesian_prod, ub=1, vtype=GRB.CONTINUOUS, name='Assign') m.addConstrs((assign[(c,f)] <= select[f] for c,f in cartesian_prod), name='Setup2ship') m.addConstrs((gp.quicksum(assign[(c,f)] for f in range(num_facilities)) == 1 for c in range(num_customers)), name='Demand') m.setObjective(select.prod(setup_cost)+assign.prod(shipping_cost), GRB.MINIMIZE) m.optimize() # - # ## Analysis # # # The result of the optimization model shows that the minimum total cost value is 4.72 million GBP. Let's see the solution that achieves that optimal result. # # ### Warehouse Build Plan # # This plan determines at which site locations to build a warehouse. # + # display optimal values of decision variables for facility in select.keys(): if (abs(select[facility].x) > 1e-6): print(f"\n Build a warehouse at location {facility + 1}.") # - # ### Shipment Plan # # This plan determines the percentage of shipments to be sent from each facility built to each customer. # + # Shipments from facilities to customers. for customer, facility in assign.keys(): if (abs(assign[customer, facility].x) > 1e-6): print(f"\n Supermarket {customer + 1} receives {round(100*assign[customer, facility].x, 2)} % of its demand from Warehouse {facility + 1} .") # - # ## Conclusion # In this example, we addressed a facility location problem where we want to build warehouses to supply a large number of supermarkets while minimizing the fixed total costs of building warehouses and the total variable shipping costs from warehouses to supermarkets. We learned how to formulate the problem as a MIP model. Also, we learned how to implement the MIP model formulation and solve it using the Gurobi Python API. # ## References # [1] <NAME>, <NAME>, and <NAME>, Francisco. Location Science. Springer, 2015. # Copyright © 2020 Gurobi Optimization, LLC
facility_location/facility_location_gcl.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lab 13 # - PCA # - TSNE # ## PCA (Principal Component Analysis) # Analiza głównych składników jest szybką metodą redukcji wymiarowości danych. # Polega na znalezieniu liniowej transformacji zbioru zmiennych początkowych w mniej liczny zbiór zmiennych, zwanych składowymi głównymi. Jest to pewien sposób kompresji danych, dlatego metodę tę można wykorzystać w wielu dziedzinach nauki. # # # **Algorytm PCA:** # 1. Standaryzacja danych # 2. Wyznaczenie macierzy kowariancji $Σ$ między zmiennymi początkowymi # 3. Wyznaczenie wartości własnych macierzy kowariancji # 4. Wybranie $k$ największych wartości własnych i wyznaczenie dla nich wektorów własnych # 5. Utworzenie macierzy przekształcenia liniowego $W$, bazującej na wektorach własnych # 6. Przekształcenie zmiennych początkowych według wzoru $Y = WX$ # # ![](./fig/pca_matrix.png) # ![](./fig/pca_gif.gif) # ### Przykład dla sztucznych danych # %matplotlib inline import numpy as np import matplotlib.pyplot as plt import seaborn as sns; sns.set() rng = np.random.RandomState(1) X = np.dot(rng.rand(2, 2), rng.randn(2, 200)).T X.shape plt.figure(figsize=(8,5)) plt.scatter(X[:, 0], X[:, 1]) plt.show() # #### Jeden komponent from sklearn.decomposition import PCA pca = PCA(n_components=1) pca.fit(X) # Kierunki zmiennych PCA pca.components_ # Wyjaśniona wariancja print(f'Total variance: {np.sqrt(np.std(X)):.4f}') pca.explained_variance_ # Procent wyjaśnionej wariancji pca.explained_variance_ratio_ # + def draw_vector(v0, v1, ax=None): ax = ax or plt.gca() arrowprops=dict(arrowstyle='->', linewidth=4, shrinkA=0, shrinkB=0, color='black') ax.annotate('', v1, v0, arrowprops=arrowprops) # plot data plt.figure(figsize=(8,5)) plt.scatter(X[:, 0], X[:, 1]) plt.axis('equal') for length, vector in zip(pca.explained_variance_, pca.components_): v = vector * 3 * np.sqrt(length) draw_vector(pca.mean_, pca.mean_ + v) plt.show() # - # #### 2 komponenty from sklearn.decomposition import PCA pca = PCA(n_components=2) pca.fit(X) # Kierunki zmiennych PCA pca.components_ # Wyjaśniona wariancja print(f'Total variance: {np.sqrt(np.std(X)):.4f}') pca.explained_variance_ # Procent wyjaśnionej wariancji pca.explained_variance_ratio_ # plot data plt.figure(figsize=(8,5)) plt.scatter(X[:, 0], X[:, 1]) plt.axis('equal') for length, vector in zip(pca.explained_variance_, pca.components_): v = vector * 3 * np.sqrt(length) draw_vector(pca.mean_, pca.mean_ + v) plt.show() # ### Dobieranie odpowiedniej liczby komponentów import pandas as pd hitters = pd.read_csv('hitters.csv') hitters.head() # #### Wydzielenie zmiennej celu i podział na zbiór treningowy i testowy # + from sklearn.model_selection import train_test_split y = hitters['NewLeague'] X = hitters.drop(['Name','NewLeague','League','Division'], axis=1) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.10, random_state=0) # + from sklearn.decomposition import PCA pca = PCA().fit(X_train) plt.figure(figsize=(9,6)) plt.plot(range(1, len(pca.explained_variance_ratio_)+1), np.cumsum(pca.explained_variance_ratio_)) plt.xlabel('number of components') plt.ylabel('cumulative explained variance'); # - # #### Zobaczmy jak poradzi sobie klasyfikacja dla surowych danych i dla PCA # + from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score model = RandomForestClassifier(random_state=0) # Surowe dane y_hat = model.fit(X_train,y_train).predict(X_test) print(f'Accuracy bez PCA: {accuracy_score(y_test, y_hat):.4f}') # PCA - odczytane z wykresu comp_acc_pairs = [] for i in range(1,17): pca = PCA(n_components=i) pca.fit(X_train) y_hat = model.fit(pca.transform(X_train),y_train).predict(pca.fit_transform(pca.transform(X_test))) comp_acc_pairs.append((i, accuracy_score(y_test, y_hat))) plt.figure(figsize=(9,6)) plt.plot(*list(zip(*comp_acc_pairs)),'bx-') plt.xlabel('number of components') plt.ylabel('Accuracy'); # - # ## T-SNE # Pytanie: Jak zwizualizować dane wielowymiarowe? # **t-distributed Stochastic Neighbor Embedding** # Cel: Pokazać ukryte zależności wielowymiarowych danych w 2D lub 3D. # # Idea: Chcemy, aby obserwacje podobne do siebie w wielu wymiarach były blisko siebie w podprzestrzeni. # # Wyróżnijmy: # **obserwacja x** - wielowymiarowy wektor cech o wymiarze zgodnym z wymiarem danych. # **mapowanie y** - dwu- lub trzy- wymiarowy wektor określający pozycje obserwacji na mapie. # # 1. Obliczamy odległosci pomiędzy obserwacjami poprzez wyliczenie odpowiednich prawdopodobieństw warunkowych. # Im bardziej obserwacje są podobne do siebie, tym większe prawdopodobieństwo. # # $$p_{j|i} = \frac{\exp{(-d(\boldsymbol{x}_i, \boldsymbol{x}_j) / (2 \sigma_i^2)})}{\sum_{i \neq k} \exp{(-d(\boldsymbol{x}_i, \boldsymbol{x}_k) / (2 \sigma_i^2)})}, \quad p_{i|i} = 0,$$ # # Powyższy wzorek określa podobieństwo dwóch obserwacji z wykorzystaniem rozkładu normalnego scentrowanego w $x_{i}$. $\sigma_{i}$ wyznaczana jest osobno dla każdej cechy w zależności od jej wariancji. W `sklearn` możemy wpływać na "spłaszczenie" lub "zwężanie" rozkładu poprzez parametr `perplexity`. # # Aby skorzystać z symetrii wprowadzamy: # # $$p_{ij} = \frac{p_{j|i} + p_{i|j}}{2N}.$$ # # W ten sposób otrzymujemy macierz podobieństwa **P** (ang. *similarity matrix*) # # 2. Następnie rozważamy macierz **Q** stanowiącą podobne odwzorowanie co powyżej, ale dla mapowań. Z przyczyn, które na razie pomińmy prawdopodobieństwa nie są liczone z wykorzystaniem rozkładu normalnego, ale t-Studenta o jednym stopniu swobody (rozkład Cauchy'ego): # # $$q_{ij} = \frac{(1 + ||\boldsymbol{y}_i - \boldsymbol{y}_j||^2)^{-1}}{\sum_{k \neq l} (1 + ||\boldsymbol{y}_k - \boldsymbol{y}_l||^2)^{-1}},$$ # # 3. Zaszliśmy daleko. Rozważamy teraz takie "umiejscowienie" obserwacji w przestrzeni o 2 lub 3 wymiarach, aby jak najbardziej zminimalizować różnicę pomiędzy dwiema macierzami (P i Q). Zmieniamy oczywiście macierz mapowań **Q**. Odpowiada to minimalizacji dywergencji Kullbacka-Leibera (https://pl.wikipedia.org/wiki/Dywergencja_Kullbacka-Leiblera, https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence): # # $$KL(P|Q) = \sum_{i \neq j} p_{ij} \log \frac{p_{ij}}{q_{ij}}$$ # # ## Przykład 1 # <img src="fig/caltech101_tsne.jpg" alt="drawing" width="600"/> # ## Przykład 2 # 1. Użyjemy zbioru :https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html#sklearn.datasets.load_digits # + from sklearn.manifold import TSNE from sklearn.datasets import load_digits from sklearn.preprocessing import scale import matplotlib.pyplot as plt import numpy as np import seaborn as sns # - # 2. Zawiera on 1797 obrazków cyfr w formacie 8x8 pikseli. digits = load_digits() digits.data.shape # 3. Przykładowe obserwacje: nrows, ncols = 2, 5 plt.figure(figsize=(10,5)) plt.gray() for i in range(ncols * nrows): ax = plt.subplot(nrows, ncols, i + 1) ax.matshow(digits.images[i,...]) plt.xticks([]); plt.yticks([]) plt.title(digits.target[i]) plt.show() # 4. Uporządkowanie danych (tylko, aby pomóc nam w wizualizacji, algorytm tego nie wymaga) X = np.vstack([digits.data[digits.target==i] for i in range(10)]) y = np.hstack([digits.target[digits.target==i] for i in range(10)]) # 5. Wywołanie PCA i wizualizacja def scatter(x, colors): palette = np.array(sns.color_palette("hls", 10)) f = plt.figure(figsize=(8, 8)) ax = plt.subplot(aspect='equal') sc = ax.scatter(x[:,0], x[:,1], lw=0, s=40, c=palette[colors.astype(np.int)]) plt.xlim(-25, 25) plt.ylim(-25, 25) ax.axis('off') ax.axis('tight') # add labels txts = [] for i in range(10): # Position of each label. xtext, ytext = np.median(x[colors == i, :], axis=0) txt = ax.text(xtext, ytext, str(i), fontsize=24) txts.append(txt) return f, ax, sc, txts pca = PCA(n_components = 2) digits_proj_pca = pca.fit_transform(X) scatter(digits_proj_pca, y) plt.show() # 6. Wywołanie t-SNE # %%time random_state = 1500100900 tSNE = TSNE(random_state=random_state, verbose=1) digits_proj = tSNE.fit_transform(X) scatter(digits_proj, y) plt.show() tSNE.fit(X).get_params() # 7. Jak widzieliśmy powyżej, t-SNE jest kosztowne obliczeniowo nawet dla tak niewielkiego zbioru danych. Aby skrócić czas obliczeń, jeśli liczba cech jest znacząca, można połączyć oba podejścia: PCA + tSNE. # + pca = PCA().fit(X) plt.figure(figsize=(9,6)) plt.plot(range(1, len(pca.explained_variance_ratio_)+1), np.cumsum(pca.explained_variance_ratio_)) plt.xlabel('number of components') plt.ylabel('cumulative explained variance'); # - # 8. Spróbujmy z `n_components=40` np.cumsum(PCA(n_components=40).fit(X).explained_variance_ratio_)[-1] # %%time X_pca = PCA(n_components=40).fit_transform(X) random_state = 1500100900 digits_proj = TSNE(random_state=random_state).fit_transform(X_pca) scatter(digits_proj, y) plt.show() # 9. Dlaczego t? # W algorytmie SNE zarówno dla macierzy **P** (odwzorowuje odległości między obserwacjami) jak i **Q** (między mapowaniami) wykorzystuje się rozkład Gaussa. Okazuje się, że prowadzi to często do zagęszczenia mapowań. Obserwacje średnio odległe od siebie uzyskują bliskie sobie mapowania. (the crowding problem) # # Problem ten można zniwelować poprzez wykorzystanie dla mapowań rozkładu t-Studenta z jednym stopniem swobody (rozkład Cauchy'ego), który pozwala na lepsze odwzorowanie tych odległości dzięki własności grubych ogonów. Prowadzi to do lepszego odseparowania danych w mapowaniu. z = np.linspace(0., 5., 1000) gauss = np.exp(-z**2) cauchy = 1/(1+z**2) plt.plot(z, gauss, label='Gaussian distribution') plt.plot(z, cauchy, label='Cauchy distribution') plt.legend() plt.show() # 10. Uwagi końcowe o t-SNE: # - służy do wizualizacji danych wielowymiarowych, # - zwykle znajduje zastosowanie dla danych od 5 do 50 wymiarów, # - wykorzystuje rozkład t-Studenta zamiast rozkładu normalnego (SNE), aby przeciwdziałać zbyt małym odstępom pomiędzy średnio-odległymi obserwacjami, # - często jest wykorzystywane wraz z PCA, aby zmiejszyć czas obliczeń. # Źródła: # - https://github.com/oreillymedia/t-SNE-tutorial, # - http://jmlr.csail.mit.edu/papers/volume9/vandermaaten08a/vandermaaten08a.pdf, # - https://nbviewer.jupyter.org/urls/gist.githubusercontent.com/AlexanderFabisch/1a0c648de22eff4a2a3e/raw/59d5bc5ed8f8bfd9ff1f7faa749d1b095aa97d5a/t-SNE.ipynb
Materialy/Grupa1/lab_13/lab13.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/TheDenk/augmixations/blob/feature%2Fno_blots/examples/cutmix_example.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="CcdL4fUyfJJb" colab={"base_uri": "https://localhost:8080/"} outputId="a34856b5-b871-4189-f9e4-abbd2227ed75" # !pip install augmixations # + id="DhEY9Kr0fOp8" import cv2 import numpy as np from matplotlib import pyplot as plt from augmixations import Cutmix # + [markdown] id="BHGorvvGf_Pt" # ##Help functions # + id="ouQsobBIfV6L" def show_img(image, figsize=(4, 6), title=None): img = image.copy() img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) plt.figure(figsize=figsize) plt.title(title) plt.imshow(img) plt.axis(False) plt.show() def create_img_with_rect(img_h=500, img_w=500, x1=200, y1=200, x2=300, y2=300, color=(80, 80, 80)): label = 'rectangle' img = np.ones((img_h, img_w, 3), dtype=np.uint8)*230 img = cv2.rectangle(img, (x1, y1), (x2, y2), color, -1) return img, np.array([np.array([x1, y1, x2, y2])]), np.array([label]) def create_img_with_circle(img_h=500, img_w=500, xc=250, yc=250,r=100, color=(150, 150, 150)): label = 'circle' img = np.ones((img_h, img_w, 3), dtype=np.uint8)*255 img = cv2.circle(img, (xc, yc), r, color, -1) return img, np.array([np.array([xc - r, xc - r, xc + r, xc + r])]), np.array([label]) def draw_boxes(new_img, boxes, labels): frame = new_img.copy() for (x1, y1, x2, y2), label in zip(boxes, labels): thickness = 2 font_scale = 1 frame = cv2.rectangle(frame, (x1, y1), (x2, y2), (255, 0, 0), 3) frame = cv2.putText(frame, label, (int(x1), int(y1 - 15)), cv2.FONT_HERSHEY_SIMPLEX , font_scale, (0, 0, 255), thickness, cv2.LINE_AA) return frame # + [markdown] id="WEkaoKvwgM7K" # ##Create images # + id="MK6gD4_ZfWAJ" bg_img, bg_boxes, bg_labels = create_img_with_rect() fg_img, fg_boxes, fg_labels = create_img_with_circle() # + id="nD_Ldq3nfWDE" colab={"base_uri": "https://localhost:8080/", "height": 254} outputId="d8440022-4956-4b5b-d037-b8c4f80196ff" bg_marked = draw_boxes(bg_img, bg_boxes, bg_labels) show_img(bg_marked) # + id="Tb_8dDw3fWFc" colab={"base_uri": "https://localhost:8080/", "height": 254} outputId="6d98ebde-5446-4387-d668-3271694b1008" fg_marked = draw_boxes(fg_img, fg_boxes, fg_labels) show_img(fg_marked) # + [markdown] id="XovspJJ0gTst" # ##Simple usage # + id="LlfZQraRffDn" colab={"base_uri": "https://localhost:8080/", "height": 254} outputId="854d78f6-34c3-47ff-9f79-1152b2e9a8ef" cutmix = Cutmix() new_img, new_boxes, new_labels = cutmix( bg_img, bg_boxes, bg_labels, fg_img, fg_boxes, fg_labels, ) img_with_rect = draw_boxes(new_img, new_boxes, new_labels) show_img(img_with_rect) # + [markdown] id="Sg8UL3jCgXWM" # ##Advanced usage (with configs) # + id="e7TgBDt9fpBf" crop_rect_config = { 'crop_x' : 100, 'crop_y' : 100, 'rect_h' : 200, 'rect_w' : 200, 'insert_x' : None, 'insert_y' : None, } process_box_config = { 'max_overlap_area_ratio': 0.75, 'min_height_result_ratio': 0.25, 'min_width_result_ratio': 0.25, 'max_height_intersection': 0.9, 'max_width_intersection': 0.9, } # + id="ft79hl-gfpD9" colab={"base_uri": "https://localhost:8080/", "height": 254} outputId="d8f4e432-3679-49c9-b697-4b173212a5d4" cutmix = Cutmix(crop_rect_config, process_box_config) new_img, new_boxes, new_labels = cutmix( bg_img, bg_boxes, bg_labels, fg_img, fg_boxes, fg_labels, ) img_with_rect = draw_boxes(new_img, new_boxes, new_labels) show_img(img_with_rect)
examples/cutmix_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib.pyplot as plt plt.plot([1, 2, 3, 4], [1, 4, 9, 16], 'ro') plt.axis([0, 6, 0, 20]) plt.show() import numpy as np t = np.arange(0., 5., 0.2) plt.plot(t,t,'r--', t, t**2, 'bs', t, t**3, 'g^') # + data = {'a': np.arange(50), 'c': np.random.randint(0, 50, 50), 'd': np.random.randn(50)} data['b'] = data['a'] + 10 * np.random.randn(50) data['d'] = np.abs(data['d']) * 100 #print(data) plt.scatter('a', 'b', c='c', s='d', data=data) plt.xlabel('entry a') plt.ylabel('entry b') plt.show() # + names = ['group_a', 'group_b', 'group_c'] values = [1, 10, 100] plt.figure(1, figsize=(9,3)) plt.subplot(131) plt.bar(names, values) plt.subplot(132) plt.scatter(names, values) plt.subplot(133) plt.plot(names, values) plt.suptitle('Categorical Plotting') plt.show() # + names = ['group_a', 'group_b', 'group_c'] values = [1, 10, 100] plt.figure(1, figsize=(15,3)) plt.subplot(151) plt.bar(names, values) plt.subplot(153) plt.scatter(names, values) plt.subplot(154) plt.plot(names, values) plt.suptitle('Categorical Plotting') plt.show() # + lines = plt.plot([1, 2, 3]) plt.scatter((1,2),(3,4)) plt.setp(lines) # + def f(t): return np.exp(-t) * np.cos(2*np.pi*t) t1 = np.arange(0.0, 5.0, 0.1) t2 = np.arange(0.0, 5.0, 0.02) plt.figure(1) plt.subplot(211) plt.plot(t1, f(t1), 'bo', t2, f(t2), 'r--') plt.subplot(212) plt.plot(t2, np.cos(2*np.pi*t2), 'r--') plt.show() # + def f(t): return np.exp(-t) * np.cos(2*np.pi*t) t1 = np.arange(0.0, 5.0, 0.1) t2 = np.arange(0.0, 5.0, 0.02) plt.figure(figsize=(15,5)) plt.subplot(121) plt.plot(t1, f(t1), 'bo', t2, f(t2), 'r--') plt.subplot(122) plt.plot(t2, np.cos(2*np.pi*t2), 'r--') plt.show() # + plt.figure(100) # the first figure plt.subplot(211) # the first subplot in the first figure plt.plot([1, 2, 3]) plt.subplot(212) # the second subplot in the first figure plt.plot([4, 5, 6]) plt.figure(101) # a second figure plt.plot([4, 5, 6]) # creates a subplot(111) by default plt.figure(100) # figure 1 current; subplot(212) still current plt.subplot(211) # make subplot(211) in figure1 current plt.title('Easy as 1, 2, 3') # subplot 211 title # + slideshow={"slide_type": "slide"} mu, sigma = 100, 15 x = mu + sigma * np.random.randn(10000) n, bins, patches = plt.hist(x, 50, density=1, facecolor='g', alpha=0.75) plt.xlabel('Smarts') plt.ylabel('Proability') plt.title('Histogram of IQ') plt.text(60, .025, r'$\mu=100,\ \sigma=15$') plt.axis([40, 160, 0, 0.03]) plt.grid(True) plt.show() # + ax = plt.subplot(111) t = np.arange(0.0, 5.0, 0.01) s = np.cos(2*np.pi*t) line, = plt.plot(t, s, lw=2) plt.annotate('local max', xy=(2, 1), xytext=(3, 1.5), arrowprops=dict(facecolor='black', shrink=0.05), ) plt.ylim(-2, 2) plt.show() # + from matplotlib.ticker import NullFormatter np.random.seed(19680801) y = np.random.normal(loc=0.5, scale=0.4, size=1000) y = y[(y > 0) & (y < 1)] y.sort() x = np.arange(len(y)) plt.figure(1) plt.subplot(221) plt.plot(x, y) plt.yscale('linear') plt.title('linear') plt.grid(True) plt.subplot(222) plt.plot(x, y) plt.yscale('log') plt.title('log') plt.grid(True) plt.subplot(223) plt.plot(x, y - y.mean()) plt.yscale('symlog', linthreshy=0.01) plt.title('symlog') plt.grid(True) plt.subplot(224) plt.plot(x, y) plt.yscale('logit') plt.title('logit') plt.grid(True) # Format the minor tick labels of the y-axis into empty strings with # `NullFormatter`, to avoid cumbering the axis with too many labels. plt.gca().yaxis.set_minor_formatter(NullFormatter()) # Adjust the subplot layout, because the logit one may take more space # than usual, due to y-tick labels like "1 - 10^{-3}" plt.subplots_adjust(top=0.92, bottom=0.08, left=0.10, right=0.95, hspace=0.5, wspace=0.35) plt.show() # - count, bins, ignored = plt.hist(s, 30, density=True) plt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) * np.exp( - (bins - mu)**2 / (2 * sigma**2) ), linewidth=2, color='r') plt.show()
python/learn/matplotlib/pyplot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Students Turn Activity 1 Instructions # # In this activity, we are going to review Tweepy. # # * Loop through 5 pages of tweets (100 tweets total) # # * Print out each tweet with a number keeping track the number of tweets. For example, the most recent tweet should read "Tweet 1: <Text of the most recent tweet>". # # ### Hints # # * Use a nested for loop and a counter at the end of your loop to increment. # # * Refer to the [Tweepy Documentation](http://docs.tweepy.org/en/v3.5.0/api.html#timeline-methods). # # - - - # + # Dependencies import tweepy import json from config import (consumer_key, consumer_secret, access_token, access_token_secret) # Setup Tweepy API Authentication auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth, parser=tweepy.parsers.JSONParser()) # + # Target User Account target_user = "@github" # Counter counter = 1 # Loop through 5 pages of tweets (total 100 tweets) for x in range(1,6): # Get all tweets from home feed public_tweets = api.user_timeline(target_user, pagex=x) for tweet in public_tweets: print(f'Tip {counter}: {tweet["text"]}') counter = counter + 1 # - # # Instructor Turn Activity 2 # #### Introducing Valence Aware Dictionary and sentiment Reasoner (VADER). # #### Resource https://github.com/cjhutto/vaderSentiment # + # !pip install vaderSentiment==2.5 # - # Import and Initialize Sentiment Analyzer from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer analyzer = SentimentIntensityAnalyzer() # + # Sample Strings happy_string = "Your humble instructor is smart, beautiful, and funny!" # angry_string = ("Ugh. I am feeling so distraught! " # "I hate everything. " # "I am mad at everyone.") happy_emoticon_string = ":-) :) :-D ;-) :-P" # angry_emoticon_string = ":-( :( D-< :'(" # funny_slang_string = "lol rofl haha" # angry_slang_string = "Sux meh grr" # Target String Setting target_string = happy_emoticon_string # - # Run analysis results = analyzer.polarity_scores(target_string) results # Run analysis # Compound score is computing by summing the valence scores of each word # in the lexicon according to the rules and then normalized to be between -1 # most extreme negative and +1 most extreme positive. compound = results["compound"] pos = results["pos"] neu = results["neu"] neg = results["neg"] # Print Analysis print(target_string) print("Compound Score:", compound) print("Positive Score:", pos) print("Neutral Score:", neu) print("Negative Score: ", neg) # ## Students Turn Activity 3 # ## Sentiment, I am your analysis # # Your turn to run a VADER analysis. # # ### Instructions # # * Open and read the three sample texts. # # * For each sample, print out the sample text and the "compound", "positive", "neutral" and "negative" score for each. # # ### Hints # # * Start with reading files and printing them. Once you have this, treat the files as if they were in your script the entire time. (What would be good to do this?) # # - - - # Import and Initialize Sentiment Analyzer from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer analyzer = SentimentIntensityAnalyzer() # + # Placeholder for strings sample1 = "" sample2 = "" sample3 = "" # Open each of the samples with open('./Resources/Sample1.txt') as sample: sample1 = sample.read() with open('./Resources/Sample2.txt') as sample: sample2 = sample.read() with open('./Resources/Sample3.txt') as sample: sample3 = sample.read() # + # Run Vader Sentiment Analysis on Each of the Samples samples = [sample1, sample2, sample3] # Loop through Each Sample for sample in samples: results = analyzer.polarity_scores(sample) compound = results["compound"] pos = results["pos"] neu = results["neu"] neg = results["neg"] print(sample) print("Compound Score:", compound) print("Positive Score:", pos) print("Neutral Score:", neu) print("Negative Score:", neg) print() # Run Vader Analysis on each Sample # Print Samples and Analysis # - # ## Students Turn Activity 4 Twitter Analysis # # In this activity, you will inspect the sentiment of a famous individual's tweets. # # ### Instructions # # * Using the tweepy package, analyze the tweets from "@DalaiLama". # # * Analyze a total of 200 tweets. (How many tweets are on a page?) # # * Store the different scores (positive, negative, neutral, compound) in different lists. # # * Print the user's name and the mean of the following scores: # # * Compound # # * Positive # # * Neutral # # * Negative # # ### Bonus # # * Compare the analysis to two other twitter accounts. Display the results in a Pandas DataFrame. # # * Feel free to use accounts of your own but remember you need to have at least 200 tweets. # # * If you don't have an account or enough tweets, try using "@RealDonaldtrump" and "@Katyperry" # # - - - # # + # Dependencies import tweepy import numpy as np # Import and Initialize Sentiment Analyzer from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer analyzer = SentimentIntensityAnalyzer() # Twitter API Keys from config import (consumer_key, consumer_secret, access_token, access_token_secret) # Setup Tweepy API Authentication auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth, parser=tweepy.parsers.JSONParser()) # + # Target User Account target_user = "@RealDonaldtrump" # Variables for holding sentiments compound_list = [] positive_list = [] negative_list = [] neutral_list = [] # Loop through 10 pages of tweets (total 200 tweets) for x in range(1,11): # Get all tweets from home feed public_tweets = api.user_timeline(target_user, page=x) # Loop through all tweets for tweet in public_tweets: # Run Vader Analysis on each tweet results = analyzer.polarity_scores(tweet["text"]) compound = results["compound"] pos = results["pos"] neu = results["neu"] neg = results["neg"] # Add each value to the appropriate list compound_list.append(compound) positive_list.append(pos) negative_list.append(neg) neutral_list.append(neu) # - # Print the Averages print(f"User: {target_user}") print(f"Compound: {np.mean(compound_list):.3f}") print(f"Positive: {np.mean(positive_list):.3f}") print(f"Neutral: {np.mean(neutral_list):.3f}") print(f"Negative: {np.mean(negative_list):.3f}") # # Instructor Turn Activity 5 # + # Dependencies import tweepy import numpy as np # Twitter API Keys from config import (consumer_key, consumer_secret, access_token, access_token_secret) # Setup Tweepy API Authentication auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth, parser=tweepy.parsers.JSONParser()) # + # Search for People Tweeting about <NAME> search_term = "<NAME>" # Retrieve 100 tweets public_tweets = api.search(search_term, count=100) # Print Tweets for tweet in public_tweets["statuses"]: # Print the username print(tweet["user"]["screen_name"]) # Print the tweet text print(tweet["text"]) print() # - # Print total number of tweets retrieved print(len(public_tweets["statuses"])) # + # With Filter # Dependencies import tweepy # Twitter API Keys from config import (consumer_key, consumer_secret, access_token, access_token_secret) # Setup Tweepy API Authentication auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth, parser=tweepy.parsers.JSONParser()) # - # "Real Person" Filters min_tweets = 5 max_tweets = 10000 max_followers = 2500 max_following = 2500 lang = "en" # + # Search for People Tweeting about <NAME> search_term = "<NAME>" # Create variable for holding the oldest tweet oldest_tweet = None # List to hold unique IDs unique_ids = [] # Counter to keep track of the number of tweets retrieved counter = 0 # Loop through 5 times (total of 500 tweets) for x in range(5): # Retrieve 100 most recent tweets -- specifying a max_id public_tweets = api.search(search_term, count=100, result_type="recent", max_id=oldest_tweet) # Print Tweets for tweet in public_tweets["statuses"]: tweet_id = tweet["id"] # Use filters to check if user meets conditions if (tweet["user"]["followers_count"] < max_followers and tweet["user"]["statuses_count"] > min_tweets and tweet["user"]["statuses_count"] < max_tweets and tweet["user"]["friends_count"] < max_following and tweet["user"]["lang"] == lang): # Print the username print(tweet["user"]["screen_name"]) # Print the tweet id print(tweet["id_str"]) # Print the tweet text print(tweet["text"]) print() # Append tweet_id to ids list if it doesn't already exist # This allows checking for duplicate tweets if tweet_id not in unique_ids: unique_ids.append(tweet_id) # Increase counter by 1 counter += 1 # Reassign the the oldest tweet (i.e. the max_id) # Subtract 1 so the previous oldest isn't included # in the new search oldest_tweet = tweet_id - 1 # - # Print total number of tweets retrieved print(counter) # Print the number of unique ids retrieved print(len(unique_ids)) # # Students Turn Activity 7 Frequent Twitter Miles # # Airlines get a lot of flack on twitter, especially from big time Journalists who think they are the only ones affected by flying. Lets see what kind of tone everyone else takes when tweeting about them. # # ### Instructions # # * Your goal is to retrieve 1000 tweets for each of the 7 popular airlines and run a VADER sentiment analysis on them. # # * Create an empty list to hold to results from each airline, # # * Filter the tweets using the given "Real Person" filter variables. # # * Create a "sentiment" dictionary for each airline that includes the search term, and the averages of the compound, neutral, positive, and negative scores. Print this dictionary and append it to a list holding each airline's results. # # * Create a DataFrame to display the results. # # ### HINTS # # * Start with a subset of data for each airline while testing. Then, adapt your code to collect all 1000 tweets per airline. # # * For a reference on using max_id, see this link: [Working with Timelines](https://developer.twitter.com/en/docs/tweets/timelines/guides/working-with-timelines) # # - - - # # + # Dependencies import tweepy import numpy as np import pandas as pd # Import and Initialize Sentiment Analyzer from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer analyzer = SentimentIntensityAnalyzer() # Twitter API Keys from config import (consumer_key, consumer_secret, access_token, access_token_secret) # Setup Tweepy API Authentication auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth, parser=tweepy.parsers.JSONParser()) # + # Target Search Term target_terms = ["@SouthwestAir", "@AmericanAir", "@SpiritAirlines", "@Virginalantic", "@Delta", "@AlaskaAir", "@KLM" ] # "Real Person" Filters min_tweets = 5 max_tweets = 10000 max_followers = 2500 max_following = 2500 lang = "en" # List to hold results results_list = [] # Loop through all target users for target in target_terms: # Variable for holding the oldest tweet oldest_tweet = None # Variables for holding sentiments compound_list = [] positive_list = [] negative_list = [] neutral_list = [] # Loop through 10 times for x in range(10): # Run search around each tweet public_tweets = api.search( target, count=100, result_type="result", max_id=oldest_tweet ) for tweet in public_tweets["statuses"]: # Loop through all tweets # Use filters to check if user meets conditions if(tweet["user"]["followers_count"] < max_followers and tweet["user"]["statuses_count"] > min_tweets and tweet["user"]["statuses_count"] < max_tweets and tweet["user"]["friends_count"] < max_following and tweet["user"]["lang"] == lang): # Run Vader Analysis on each tweet results = analyzer.polarity_scores(tweet["text"]) compound = results["compound"] pos = results["pos"] neu = results["neu"] neg = results["neg"] compound_list.append(compound) positive_list.append(pos) negative_list.append(neg) neutral_list.append(neu) oldest_tweet = tweet["id"] - 1 sentiment = { "User": target, "Compound": np.mean(compound_list), "Positive": np.mean(positive_list), "Neutral": np.mean(neutral_list), "Negative": np.mean(negative_list), "Tweet Count": len(compound_list) } print(sentiment) print() results_list.append(sentiment) # Set the new oldest_tweet value # Store the Average Sentiments # Print the Sentiments # Append airline results to 'results_list' # - # Create a DataFrame using results_list and display airline_df = pd.DataFrame(results_list).set_index("User") airline_df # # Students Activity 8 # # Now Plot it out # # Sentiment Analysis is already crazy fun, but how can we visualize it? In this exercise we will do just that! # # ### Instructions # # * Run a sentiment analysis on tweets from "@SouthwestAir". # # * On top of you regular analysis, you should also keep track of how many tweets ago was it tweeted. # # * Next store our results into a pandas DataFrame and read results. # # * Finally create a plot with the follow labels. # # * Title as "Sentiment Analysis of Tweet (`date`) for `Twitter Handle`. # # * Y label as "Tweet Polarity" # # * X label as "Tweets Ago" # # ### Hints # # * Make sure you are using the correct method from Tweepy to gather statuses from a specific user. # # ### Bonus # # * Order the tweets from oldest to newest in the plot. # # - - - # + # Dependencies import tweepy import numpy as np import pandas as pd from datetime import datetime import matplotlib.pyplot as plt from matplotlib import style style.use('ggplot') # Import and Initialize Sentiment Analyzer from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer analyzer = SentimentIntensityAnalyzer() # Twitter API Keys from config import (consumer_key, consumer_secret, access_token, access_token_secret) # Setup Tweepy API Authentication auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth, parser=tweepy.parsers.JSONParser()) # + # Target Account target_user = "@SouthwestAir" # Counter counter = 1 # Variables for holding sentiments sentiments = [] # Variable for max_id oldest_tweet = None # Loop through 5 pages of tweets (total 100 tweets) for x in range(5): # Get all tweets from home feed public_tweets = api.user_timeline(target_user, max_id = oldest_tweet) # Loop through all tweets for tweet in public_tweets: # Print Tweets # print("Tweet %s: %s" % (counter, tweet["text"])) # Run Vader Analysis on each tweet results = analyzer.polarity_scores(tweet["text"]) compound = results["compound"] pos = results["pos"] neu = results["neu"] neg = results["neg"] tweets_ago = counter # Get Tweet ID, subtract 1, and assign to oldest_tweet oldest_tweet = tweet["id"] - 1 # Add sentiments for each tweet into a list sentiments.append({"Date": tweet["created_at"], "Compound": compound, "Positivie": pos, "Negative": neg, "Tweets ago": counter }) # Add to counter counter += 1 # - # Convert sentiments to DataFrame sentiments_pd = pd.DataFrame.from_dict(sentiments) sentiments_pd.head() # Create plot x_vals = sentiments_pd["Tweets ago"] y_vals = sentiments_pd["Compound"] plt.plot(x_vals, y_vals, marker="o", linewidth=0.5, alpha=0.8) # # Incorporate the other graph properties now = datetime.now() now = now.strftime("%Y-%m-%d %H:%M") plt.title(f"Sentiment Analysis of Tweets ({now} for {target_user})") plt.ylabel("Tweet Polarity") plt.xlabel("Tweets Ago") plt.show() # # Instructor Turn Activity 9 In Tweets Out # + # Dependencies import tweepy # Twitter API Keys from config import (consumer_key, consumer_secret, access_token, access_token_secret) # Setup Tweepy API Authentication auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth, parser=tweepy.parsers.JSONParser()) # + # Create a status update api.update_status("Hey! I'm tweeting programmatically!") # Create a status update api.update_with_media("./Resources/too-much-big-data.jpg", "And now... I just tweeted an image programmatically!") # - # # Students Activity 10 Hello Twitter World # # You've spent so much time analyzing and reading everyone elses tweet's that you now have alot on your mind. Well fear not! Now is your time to talk to the world. # # ### Instructions # # * Update your twitter with two separate status updates. # # * Update your twitter with a photo and a witty comment to go with it. # # * Look up how to add friends by using "Friendship Methods". Programatically add someone in class as your friend, and have them add you as a friend. # # * Once you are both twitter followers of each other you can then send direct messages to one another. Give it a try! # # * Use the [tweepy docs](http://tweepy.readthedocs.io/en/v3.5.0/api.html) for reference # # ### Bonus # # * Retweet a tweet from someone else's account. # # ### Double Bonus # # * Delete some of your most recent tweets. # # - - - # + # Note: Twitter prevents from tweeting the same status or message multiple # times. Be sure to change the text when testing. # Create two status updates api.update_status("Hello There!") api.update_status("It;s a great day") # - # Create a status update with an image api.update_with_media("./Resources/image.jpg", "Programming skills == ") # Create a friendship with another user api.create_friendship(screen_name="@codewithcorgis", follow=True) # Send a direct message to another user (Hint: You will need them to # follow your account) api.send_direct_message(user="codewithcorgis", text="hiiiiiiiii!!!!!") # Bonus: Retweet any tweet from someone else's account (Hint: You will # need to locate a tweet's id) target_user = "@ddjournalism" public_tweets = api.user_timeline(target_user) tweet_id = public_tweets[0][id] api.retweet(tweet_id) # Bonus: Delete your most recent tweet (Hint: "Destroy") my_tweets = api.user_timeline() tweet_id = my_tweets[0]["id"] api.destroy_status(tweet_id)
Activities Week 7 (social analytics)/Social_Analytics_Part3/Day2/Day2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #hide from public_data_food_analysis_3 import * # # Public data food analysis # # > # ## Install # `pip install public_data_food_analysis_3` # ## Example for data analysis on the Columbia study import public_data_food_analysis_3.columbia as pdfac import pandas as pd # ### Take a brief look on the food logging dataset and the reference information sheet pdfac.read_logging_data('data/col_test_data').head(2) pd.read_excel('data/col_test_data/toy_data_17May2021.xlsx').head(2) # ### make the table that contains extra analytic information that we want df = pdfac.make_table(pdfac.read_logging_data('data/col_test_data')\ , pd.read_excel('data/col_test_data/toy_data_17May2021.xlsx')) df df.iloc[0] df.iloc[1] # ## Example for data analysis using public data food analysis module import public_data_food_analysis_3.core as pdfaco import pandas as pd # ### take a look at the original dataset df = pdfaco.universal_key('data/test_food_details.csv') df.head(2) # ### preprocess the data to have extra basic features df = pdfaco.load_public_data(df) df.head(2) # ### do a brief annalysis df = pdfaco.summarize_data(df, 'local_time', 'unique_code') df.head(2) df.iloc[0] df.iloc[1]
index.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/abegpatel/Gradient-descent-implementation/blob/master/Gradient_descent.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="mJ1ngFliNbla" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="cbf85e24-2895-4dda-9a2d-942e35a0ef96" #gradient descent #minimize the error import numpy as np import matplotlib.pyplot as plt # %matplotlib inline def draw(x1,x2): ln=plt.plot(x1,x2,'-') plt.pause(0.0001) ln[0].remove() def sigmoid(score): return 1/(1+np.exp(-score)) def calculate_error(line_parameters, points , y): n=points.shape[0] p= sigmoid(points*line_parameters) cross_entropy=-(1/n)*(np.log(p).T*y + np.log(1-p).T*(1-y)) return cross_entropy def gradient_descent(line_parameters, points, y , alpha): n=points.shape[0] for i in range(2000): p=sigmoid(points*line_parameters) gradient= points.T*(p-y)*(alpha/n) line_parameters = line_parameters - gradient w1=line_parameters.item(0) w2=line_parameters.item(1) b=line_parameters.item(2) x1=np.array([points[:,0].min(), points[:,0].max()]) x2= -b/w2 + (x1*(-w1/w2)) draw(x1,x2) n_pts=100 np.random.seed(0) bias= np.ones(n_pts) top_region=np.array([np.random.normal(10,2,n_pts), np.random.normal(12,2,n_pts), bias]).T bottom_region= np.array([np.random.normal(5,2, n_pts), np.random.normal(6,2, n_pts), bias]).T all_points=np.vstack((top_region, bottom_region)) line_parameters = np.matrix([np.zeros(3)]).T # x1=np.array([bottom_region[:,0].min(), top_region[:,0].max()]) # x2= -b/w2 + (x1*(-w1/w2)) y=np.array([np.zeros(n_pts), np.ones(n_pts)]).reshape(n_pts*2, 1) _, ax= plt.subplots(figsize=(4,4)) ax.scatter(top_region[:,0], top_region[:,1], color='r') ax.scatter(bottom_region[:,0], bottom_region[:,1], color='b') gradient_descent(line_parameters, all_points, y , 0.06) plt.show() # + id="NycJxNJzNiE8" colab_type="code" colab={}
Gradient_descent.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Observations and Insights # Conclusions # # 1.- The gender distrubuiton in the the mice population was balanced, almost the same number of males and female mice, there isn't any indicator that the gender plays an important role in the effectiveness of any of the drug regimens. # # 2.- Based on the data provided with can see that mice Capomulin have proven to have the highest survival rate compare to the other drug regimens used in the study. Ramicane has the lowest tumor volume median followed by Capomulin. # # 3.- The correlation between body weight and average tumor volume among mice treated with Capomulin shows that tumor volume is directly related to the weight of a the trated mice. # # 4.- Mice treated with Capomulin presented a clear improvement in the tumor volume by the end of the study. Out of all the drug regimens Capomulin showed second best results right after Ramicane regimen. # # 5.- Infubinol Proved to be the least efficient drug as a treatment to treat the tumors. # + # Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import scipy.stats as st from scipy.stats import linregress # Study data files mouse_metadata_path = "data/Mouse_metadata.csv" study_results_path = "data/Study_results.csv" # Read the mouse data and the study results mouse_metadata = pd.read_csv(mouse_metadata_path) study_results = pd.read_csv(study_results_path) # Combine the data into a single dataset merged_df = pd.merge(mouse_metadata,study_results, on='Mouse ID') # Display the data table for preview merged_df.head() # - # Checking the number of mice. merged_df['Mouse ID'].nunique() # Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint. duplicateMiceData = merged_df[merged_df.duplicated(['Mouse ID',"Timepoint"])] duplicateMiceData # Optional: Get all the data for the duplicate mouse ID. duplicateMiceData = merged_df[merged_df["Mouse ID"].isin(['g989'])] duplicateMiceData # Create a clean DataFrame by dropping the duplicate mouse by its ID. Cleaned_df = merged_df.loc[merged_df["Mouse ID"] != 'g989'] # Checking the number of mice in the clean DataFrame. Cleaned_df['Mouse ID'].nunique() # Number of rows has decreased too, because we dropped the duplicate mouse Cleaned_df.count() # ## Summary Statistics # + # Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen grouped_by_drugs = Cleaned_df.groupby("Drug Regimen") # Use groupby and summary statistical methods to calculate the following properties of each drug regimen: # mean, median, variance, standard deviation, and SEM of the tumor volume. # Calculating Mean Volume mean_volume = grouped_by_drugs['Tumor Volume (mm3)'].mean() # Calculating Median median_volume = grouped_by_drugs['Tumor Volume (mm3)'].median() # Calculating Variance variance_volume = grouped_by_drugs['Tumor Volume (mm3)'].var() # Calculating Standard Deviation std = grouped_by_drugs['Tumor Volume (mm3)'].std() # Calculating SEM sem = grouped_by_drugs['Tumor Volume (mm3)'].sem() # Assemble the resulting series into a single summary dataframe. volume_stats_by_drug = pd.DataFrame({"Mean":mean_volume, "Median":median_volume, "Variance":variance_volume, "Standard Deviation":std, "Standard Error of the Mean":sem }) #summary DataFrame volume_stats_by_drug # - # Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen # Using the aggregation method, produce the same summary statistics in a single line # using the groupby object we will use the Aggregation method volume_stats = {"Tumor Volume (mm3)":["mean","median","var","std","sem"]} summarry_table = grouped_by_drugs.agg(volume_stats) summarry_table # ## Bar and Pie Charts # + # Generate a bar plot showing the total number of measurements taken on each drug regimen using pandas. total_counts = Cleaned_df['Drug Regimen'].value_counts() chart1 = total_counts.plot.bar(rot=45,figsize=(11,7),fontsize=12, edgecolor='gray'); # Formatting Text and limits chart1.set_title("Total number of measurements taken on each drug regimen", fontsize=17) chart1.set_ylabel("Count",fontsize=14) chart1.set_xlabel("Drug Regimen",fontsize=14) chart1.set_ylim(0,250); # style # + # Generate a bar plot showing the total number of measurements taken on each drug regimen using pyplot. drugs = Cleaned_df['Drug Regimen'].value_counts().index counts = list(Cleaned_df['Drug Regimen'].value_counts()) plt.figure(figsize=(11,7)); plt.style.use('seaborn') plt.xticks(rotation=45, fontsize=12) plt.bar(drugs,counts,width=0.55) # setting labels plt.title("Total number of measurements taken on each drug regimen", fontsize=17) plt.xlabel("Drug Regimen",fontsize=14) plt.ylabel("Count",fontsize=14); # + # Generate a pie plot showing the distribution of female versus male mice using pandas genderdf = Cleaned_df.drop_duplicates(subset='Mouse ID', keep='first')['Sex'].value_counts() piechart1 = genderdf.plot.pie(startangle=-45, autopct="%1.1f%%", figsize=(7,7), fontsize=15, colors=['skyblue','lightpink'], shadow=True); piechart1.set_ylabel("") piechart1.axis("equal") piechart1.set_title("Distribution of female versus male mice", fontsize=20); # + # Generate a pie plot showing the distribution of female versus male mice using pyplot genders = genderdf.index # getting the 2 genders and store them in a list gender_count = genderdf.values textprops = {"fontsize":14} plt.figure(figsize=(8,8)) plt.title("Distribution of female versus male mice", fontsize=20) plt.pie(gender_count, labels=genders, startangle=-45, autopct="%1.1f%%", colors=['skyblue','lightpink'], shadow=True, textprops=textprops); # - # ## Quartiles, Outliers and Boxplots # + # Calculate the final tumor volume of each mouse across four of the treatment regimens: # Capomulin, Ramicane, Infubinol, and Ceftamin # Start by getting the last (greatest) timepoint for each mouse final_volume = Cleaned_df.drop_duplicates(subset='Mouse ID', keep='last') final_volume = final_volume[['Mouse ID','Timepoint']] # Merge this group df with the original dataframe to get the tumor volume at the last timepoint final_volume_last_timepoint = pd.merge(final_volume, Cleaned_df, how='left', on=['Mouse ID','Timepoint']) final_volume_last_timepoint.head() # + # Put treatments into a list for for loop (and later for plot labels) regimens= ["Capomulin", "Ramicane", "Infubinol", "Ceftamin"] # Create empty list to fill with tumor vol data (for plotting) tumors_vols = [] # Calculate the IQR and quantitatively determine if there are any potential outliers. # Locate the rows which contain mice on each drug and get the tumor volumes for regimen in regimens: tumor_vol = final_volume_last_timepoint.loc[final_volume_last_timepoint['Drug Regimen'].isin([regimen])]['Tumor Volume (mm3)'] # print(tumor_vol['Tumor Volume (mm3)']) # add subset tumors_vols.append(tumor_vol) # Determine outliers using upper and lower bounds quartiles = tumor_vol.quantile(q=[0.25,0.5,0.75]) lower_quartile = quartiles[0.25] upper_quartile = quartiles[0.75] median = quartiles[0.5] iqr = upper_quartile-lower_quartile lower_boundary = lower_quartile - (1.5 * iqr) upper_boundary = upper_quartile + (1.5 * iqr) print("┌-------------------------------------------------------------------┐") print(f"|\t\t\tPotential Outliers for {regimen} ") print(f"| The lower quartile of the final tumor volumes is {lower_quartile:.3f}") print(f"| The median of the final tumor volumes is {median:.3f}") print(f"| The upper quartile of the final tumor volumes is {upper_quartile:.3f}") print(f"| The Inner quartile is {iqr:.3f}\n|\n|") print(f"| Values located below {lower_boundary:.3f} could be considered outliers") print(f"| Values located above {upper_boundary:.3f} could be considered outliers") print("└-------------------------------------------------------------------┘\n") # + # Generate a box plot of the final tumor volume of each mouse across four regimens of interest # using "tumors_vols" list, previously poupulated fig = plt.figure(figsize =(10, 7)) # Creating axes instance ax = fig.add_axes([0, 0, 1, 1]) # Creating plot bp = ax.boxplot(tumors_vols, patch_artist=True) # Setting title and labels plt.xticks([1, 2, 3, 4], regimens, fontsize=15) plt.ylabel("Final tumor volume of each mouse across the regimens", fontsize=16) ############################################################ # Formatting style # ############################################################ # Applying seaborn style plt.style.use('seaborn') # Change color and linewidth of the medians: for median in bp['medians']: median.set(color='yellow', linewidth=4) # Changing the Fliers: for flier in bp['fliers']: flier.set(marker='o', markersize=10, markerfacecolor='red', alpha=0.6) # Changing fill color of boxes: for box in bp['boxes']: box.set(color='lightblue') # plt.title("Potencial Outliers", fontsize=25) plt.show() # - # ## Line and Scatter Plots # Generate a line plot of tumor volume vs. time point for a mouse treated with Capomulin Capomulin_df = Cleaned_df.loc[Cleaned_df['Drug Regimen'].isin(["Capomulin"])] # Randomly picking a mouse random_mouse = Capomulin_df.sample()['Mouse ID'] mouse_data = Capomulin_df.loc[Capomulin_df["Mouse ID"].isin(random_mouse)] mouse_data = mouse_data[["Timepoint","Tumor Volume (mm3)"]] line_chart =mouse_data.plot.line(x='Timepoint', y='Tumor Volume (mm3)', figsize=(14,8),fontsize=15, marker='o') line_chart.set_xlabel("Tiemepoint", fontsize=18) line_chart.set_ylabel("Tumor Volume (mm3)", fontsize=18) line_chart.set_title(f"Capomulin Treatment of mouse {random_mouse.values[0]}", fontsize=20) line_chart.set_xlim(min(mouse_data['Timepoint'])-2,max(mouse_data['Timepoint'])+2) # + # Generate a scatter plot of average tumor volume vs. mouse weight for the Capomulin regimen avg_values_df = Capomulin_df.groupby("Mouse ID").mean() plt.figure(figsize=(14,9)) plt.title("Mouse weight vs. Tumor volume on Capomulin Regimen", fontsize=24) plt.scatter(avg_values_df["Weight (g)"],avg_values_df["Tumor Volume (mm3)"], s = 100) plt.xlabel("Mouse weight (gr)", fontsize=17) plt.ylabel("Average tumor volume (mm3)", fontsize=17) plt.xticks(fontsize= 15) plt.yticks(fontsize= 15); # - # ## Correlation and Regression # + # Calculate the correlation coefficient and linear regression model # for mouse weight and average tumor volume for the Capomulin regimen # Correlation correlation = st.pearsonr(avg_values_df["Weight (g)"],avg_values_df["Tumor Volume (mm3)"]) print(f"The correlation between both factors is {round(correlation[0],2)}") # + # Linear Regresion model x = avg_values_df["Weight (g)"] y = avg_values_df["Tumor Volume (mm3)"] (slope, intercept, rvalue, pvalue, stderr) = linregress(x, y) regress_values = x * slope + intercept line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2)) plt.figure(figsize=(14,9)) plt.title("Mouse weight vs. Tumor volume on Capomulin Regimen", fontsize=24, ) plt.scatter(x,y, s=100) plt.plot(x,regress_values,"r-") plt.annotate(line_eq,(18,40.4),fontsize=15,color="red") plt.xlabel("Mouse weight (gr)", fontsize=17) plt.ylabel("Average tumor volume (mm3)", fontsize=17) plt.xticks(fontsize= 15) plt.yticks(fontsize= 15); print(f"The r-squared value is: {rvalue**2}") # -
Pymaceuticals/pymaceuticals.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 6. előadás # _Tartalom_: Dict, tuple, kivételkezelés (try / except) # ### A tuple # Eddig ha egy objektumban több más objektum felsorolását akartuk tárolni, akkor listákat használtunk: [1, 2, 3, 4] ["Bill", "Charlie", "Percy"] # Általában akkor szokás a lista típust használni, ha az adatunk valóban lista-szerű: nem fontos, hogy pontosan hány eleme van, és egy-egy elem funkcióját nem az határozza meg, hogy ő hányadik a listában. Ilyen volt a filmes táblázatunk. import os film_fajl = os.path.join("data", "movies.tsv") filmek = [line.strip().split('\t') for line in open(film_fajl)] filmek[:3] # Amikor azonban egy ilyen felsorolásnak van egy kötött _szerkezete_, mint például a filmes táblázat egyes sorainál, ahol fontos, hogy a felsorolás pontosan 3 hosszúságú, és minden pozíciónak szerepe van, akkor a lista helyett egy másik típust szoktunk használni, a __tuple__-t, vagy más néven rendezett n-es véges listát: filmek[0] tuple(filmek[0]) list("pingvin") # A tuple elemeit sima kerek zárójelek közé írjuk. Egy-egy elemét ugyanúgy tudjuk lekérdezni, mint egy listáét: film = tuple(filmek[0]) film film[0] # Viszont egy tuple-höz nem fűzhetünk hozzá újabb elemet: # `film.append("x")` # # ``` python # AttributeError: 'tuple' object has no attribute 'append' # ``` # És nem változtathatjuk meg 1-1 elemét: # `film[1] = "1994"` # # ``` python # TypeError: 'tuple' object does not support item assignment # ``` # Cserébe több előnye is van a tuple-öknek a listákkal szemben, _ezek egy részéről még fogunk tanulni_, de egyelőre annyi elég, hogy "tisztább helyzetet" teremt a kódunkban: ha egy felsorolásunknak fix szerkezete van, ha nem dolga, hogy változzon a hosszúsága vagy hogy változzanak az elemei, akkor tuple-ban tároljuk, így nem fogjuk még tévedésből sem "elrontani" a benne lévő adatot: filmek = [tuple(film) for film in filmek] filmek[:3] # # # # # ### A dictionary # Az eddig megismert típusok (int, float, string, lista, tuple) számokat vagy szövegeket illetve ezek felsorolásait tárolta. Most egy olyan típust ismerünk meg, ami egy _leképezést_ vagy _megfeleltetést_ tárol különböző objektumok párjai között, ez a típus a __dictionary__ (magyarul: szótár). # Nevével ellentétben a dictionary nem definíciókat tárol, hanem egyszerűen a benne tárolt objektumok mindegyikét hozzákapcsolja valamilyen másik objektumhoz. Az alábbi dictionary például neveket tárol, és mindegyikhez rendel egy-egy számot, ami lehet például az adott nevű ember életkora: weasleyk = {"Bill": 21, "Charlie": 19, "Percy": 15, "Fred": 13, "George": 13, "Ron": 11, "Ginny": 10} # A dictionary-k elemeit kapcsos zárójelek között ({}) soroljuk fel, a felsorolt párok két felét pedig, amelyeket __kulcsnak__ ill. __értéknek__ hívunk, kettősponttal választjuk el egymástól. # A dictionary-k legfontosabb funkciója, hogy bármelyik kulcshoz lekérhetjük a hozzátartozó értéket, méghozzá úgy, hogy a dictionary után szögletes zárójelbe írjuk a kulcsot: weasleyk["Charlie"] weasleyk["Ron"] # Ha olyan kulcsot keresünk a dictionary-ben, ami nem szerepel benne, hibát kapunk: # `weasleyk["Arthur"]` # ``` python # KeyError: 'Arthur' # ``` # Hogy egy adott kulcs szerepel-e egy dictionary-ben, ugyanúgy tudjuk ellenőrizni, mint listák esetében: "Arthur" in weasleyk # Új párokat így adhatunk hozzá a dictionary-hez: weasleyk["Arthur"] = 41 # És hasonlóképp tudjuk módosítani az egy már létező kulcshoz tartozó értéket: weasleyk["Arthur"] = 42 # Ha _for_ ciklussal megyünk végig egy dictionary elemein, akkor a kulcsain megyünk végig. Az alábbi ciklus például minden Weasley-t egy évvel "öregít": for nev in weasleyk: weasleyk[nev] += 1 weasleyk # Ha szükségünk van rá, egy dictionary összes kulcsát, összes értékét, vagy összes kulcs-érték párját is lekérhetjük: list(weasleyk.keys()) weasleyk.values() list(weasleyk.items()) # Ezek a típusok nem listák, hanem annál "okosabb" típusok, de egyrészt bármikor listává tudjuk őket alakítani: list(weasleyk.keys()) # Másrészt egy for ciklussal végig tudunk menni az elemein: for eletkor in weasleyk.values(): print(eletkor) # Az _items_ által létrehozott listák elemei tuple-ök: list(weasleyk.items())[3] # **Fontos:** A dictionary-k elemeinek nincsen sorrendjük! Amikor egy for cilussal bejárjuk az elemeit, egy véletlenszerű sorrendben fogjuk egymásután kapni a kulcsokat. # Az értékek bármilyen olbjektumok lehetnek, a kulcsok viszont nem. Az általunk már ismert típusok közül a listák nem lehetnek kulcsok: d = {} # + # d[[1,2]] = 3 # - # Lehetnek viszont tuple-ök: d[(1,2)] = 3 d # Egy kulcshoz tartozó érték lehet akár egy újabb dictionary, így már összetettebb adatot is tárolhatunk egy-egy objektumban: hallgato = {"vezetéknév": "Róbert", "keresztnév": "Gida", "felhasználónév": "robertgida", "születésnap": (1920, 8, 21), "kedvenc": {"étel": "r<NAME>", "állat": "medve", "szín": "kék", "zene": "<NAME>"}, "hobbik": ["fáramászás", "színezés", "sütievés"]} hallgato["kedvenc"]["étel"] # Készítsük el a filmes adatunk egy ilyen reprezentációját: cim_szerint = {} for film in filmek: cim = film[0].strip() ev = int(film[1]) mufajok = film[2].split(',') cim_szerint[cim] = {"ev": ev, "mufajok": mufajok} # Most olyan dictionary-ben tároljuk az adatot, aminek a kulcsai a filmcímek, így nagyon könnyű cím alapján lekérni egy-egy filmet: cim_szerint["Die Hard"] cim_szerint["Toy Story"] # De ugyanígy elkészíthetnénk azt a dictionary-t is, ami az évekhez rendel filmeket, ekkor persze egy-egy évhez egy egész lista tartozna: ev_szerint = {} for film in filmek: cim = film[0].strip() ev = int(film[1]) mufajok = film[2].split(',') if ev not in ev_szerint: ev_szerint[ev] = [] ev_szerint[ev].append({"cim": cim, "mufajok": mufajok}) ev_szerint[1986] # Hogy milyen szerkezetű adatot építünk, az attól függ, hogyan akarjuk majd használni. Ha például szeretnénk tudni műfaj alapján szűkíteni és utána adott évre is keresni, akkor eszerint kell felépítenünk az adatunkat: mufaj_ev_szerint = {} for film in filmek: cim = film[0].strip() ev = int(film[1]) mufajok = film[2].split(',') for mufaj in mufajok: if mufaj not in mufaj_ev_szerint: mufaj_ev_szerint[mufaj] = {} if ev not in mufaj_ev_szerint[mufaj]: mufaj_ev_szerint[mufaj][ev] = [] mufaj_ev_szerint[mufaj][ev].append(cim) # Ekkor az 1986-os akciófilmeket így tudom kilistázni: mufaj_ev_szerint["action"][1986] # Az 1998-as drámákat pedig így: mufaj_ev_szerint["drama"][1998] # Olvassuk be a második filmes adatbázisunkat is egy dictionary-be! Először használjuk az eredeti beolvasó függvényt: def adatot_beolvas(fajlnev): f = open(fajlnev, 'r', encoding="utf-8") fejlec = f.readline().strip().split('\t') mezok_szama = len(fejlec) adat = [] for i in range(mezok_szama): adat.append([]) print('mezok szama:', mezok_szama) for sor in f: mezok = sor.strip('\n').split('\t') for i in range(mezok_szama): adat[i].append(mezok[i].strip()) return adat, fejlec import os adat_fajl = os.path.join("data", "movie_data.tsv") adat, fejlec = adatot_beolvas(adat_fajl) fejlec # Majd írjunk egy függvényt, ami dict-ekbe konvertálja az adatot. Azt is meg kell tudnunk adni a függvénynek, hogy melyik mező legyen a kulcs: def adatbol_dict(adat, fejlec, kulcs_mezo): kimenet = {} mezok_szama = len(adat) adat_meret = len(adat[0]) kulcs_oszlop = fejlec.index(kulcs_mezo) for i in range(adat_meret): kulcs = adat[kulcs_oszlop][i] if kulcs not in kimenet: kimenet[kulcs] = [] film = {} for n in range(mezok_szama): mezo = fejlec[n] ertek = adat[n][i] film[mezo] = ertek kimenet[kulcs].append(film) return kimenet # Próbáljuk ki, éptsünk adatot, amiben a főszereplő neve a kulcs: szinesz_szerint = adatbol_dict(adat, fejlec, "actor_1_name") # Most nézzük meg, mit tudunk <NAME> első filmjéről az adatban: szinesz_szerint["Brad Pitt"][0] szinesz_szerint["Brad Pitt"][-1] # Látható, hogy egy sokkal olvashatóbb formátumot hoztunk létre, amiben keresni is egyszerűbb # ### A kivételkezelés # Nézzük meg, miből épül fel egy kivétel, okozzunk egyet: # # `4/0` # # ``` python # --------------------------------------------------------------------------- # ValueError # Traceback (most recent call last) # <ipython-input-73-2107a36c2657> in <module>() # ----> 1 int("pingvin") # ZeroDivisionError: division by zero # ``` # # Az utolsó sor megadja a kivétel típusát (jelen esetben`ZeroDivisionError`), ezt követi bármilyen további információ, amit tudhatunk az adott hiba okáról. Az utolsó sor előtti rész mutatja meg, hogy melyik sorok futtatásánál keletkezett a hiba. Nézzünk még egy példát: # # # `int("pingvin")` # # ``` python # --------------------------------------------------------------------------- # ValueError Traceback (most recent call last) # <ipython-input-73-2107a36c2657> in <module>() # ----> 1 int("pingvin") # # ValueError: invalid literal for int() with base 10: 'pingvin' # ``` # # Amikor egy szót próbálunk számmá konvertálni, `ValueError` keletkezik, de ennél többet is tudunk, a : utáni rész részletezi a hibát, ti. hogy a `"pingvin"` nem értelmezhető számként (10-es számrendszerben). # # Kivételkezelésnek azt hívjuk, amikor a kódot előre felkészítjük arra, hogy bizonyos típusú kivételeket "elviseljen", vagyis ha adott típusú hibákat okoz a futása, akkor ne álljon le, hanem valamit reagáljon. Az alábbi kód például beolvas egy számot, és ha nem tudja int-té konvertálni, akkor ezt írja ki. lista = [10, 11, 12, 'asdf', 13] for elem in lista: try: szam = int(elem) print(szam, " négyzete ", szam ** 2) except ValueError: print(elem, " >> nem szám!") # A `try` és `except` szavak közötti blokkba kell írni azokat a parancsokat, amelyek során hibára számítunk. Az except után kell felsorolni azokat a hibatípusokat, amelyeket "el kell kapni", és ezt követi az a blokk, ami megadja, a hiba jelentkezése esetén milyen kód fusson le. # # Így aztán például megírhatunk egy fájl-beolvasó függvényt úgy, hogy ne okozzon problémát, ha a fájl egy-két sora hibás, vagy csak másmilyen, mint a többi. # ## _Used sources_ / Felhasznált források # - [<NAME>: Python lessons repository](https://github.com/shannonturner/python-lessons) MIT license (c) <NAME> 2013-2014 # - [Siki Zoltán: Python mogyoróhéjban](http://www.agt.bme.hu/gis/python/python_oktato.pdf) GNU FDL license (c) Siki Zoltán # - [BME AUT](https://github.com/bmeaut) MIT License Copyright (c) BME AUT 2016-2018
eload/.ipynb_checkpoints/ea06-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Object Oriented Programming # # ## Defining a class class Student(object): """We can create a simple empty class. This is a definition that says what a student is. """ # + tags=["nbval-ignore-output"] vince = Student() # Creating an instance vince # + tags=["nbval-ignore-output"] zoe = Student() # Creating a different instance zoe # - # ## Attributes class Student(object): courses = ["Biology", "Mathematics", "English"] age = 5 gender = "Male" #Let us now create Vince again: vince = Student() # Accessing these attributes: vince.courses vince.age vince.gender # We can manipulate these attributes just like **any other** python variable: vince.courses.append("Photography") vince.courses vince.age = 28 vince.age vince.gender = "M" vince.gender # ## Methods class Student(): courses = ["Biology", "Mathematics", "English"] age = 5 sex = "Male" def have_a_birthday(self): """This method increments the age of our instance.""" self.age += 1 vince = Student() vince.age vince.have_a_birthday() vince.age # ## The `__init__` method class Student(): def __init__(self, courses, age, sex): """ What the class should do when it is used to create an instance """ self.courses = courses self.age = age self.sex = sex def have_a_birthday(self): self.age += 1 vince = Student(["Biology","Math"],28,"Male") vince.courses, vince.age, vince.sex # ## Inheritance # # We can use a class to create new classes: class Math_Student(Student): """ A Math student: behaves exactly like a Student but also has a favourite class attribute. """ favourite_class = "Mathematics" becky = Math_Student(["Mathematics", "Biology"], 29, "Female") becky.courses, becky.age, becky.sex, becky.favourite_class #This class has the methods of the parent class: becky.have_a_birthday() becky.age # ## Summary # # - Classes # - Attributes # - Methods # - Inheritance # # ## Advantages # # - Simplicity # - Modularity # - Modifiability # - Extensibility # - Re-usability
notebooks/07_classes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Automated Preprocessing # # This script is used for preprocessing the data, annotating noisy data segments, channels and ICA components. This file was used in the main analysis, providing the marked data to be used in the analysis files. # ### Import libraries and define subject path # + # This is our preprocessing script. # We select one recording, clean stuff and then perform the ICA # During this process, we save all bad components segments and channels import sys import os.path as op module_path = op.abspath(op.join('..')) if module_path not in sys.path: sys.path.append(module_path) import pandas as pd import numpy as np import mne from autoreject import AutoReject, get_rejection_threshold from data_analysis.functions_preprocessing import \ (split_raws, mark_bads, save_bads, run_ica, save_ica, save_autoreject, load_autoreject, BAD_AR_PATH) from data_analysis.functions_behavioral import \ (create_event_df, remove_ghost_triggers, calculate_alpha, join_event_dfs, remove_outliers, events_from_event_df) subject_dir = '/net/store/nbp/projects/hyperscanning/hyperscanning-2.0/mne_data/sourcedata/' behav_dir = "/net/store/nbp/projects/hyperscanning/study_project/NBP_Hyperscanning/data_analysis/behavioral_data" # %matplotlib widget import matplotlib matplotlib.get_backend() # - # ## Define the subject you want to clean # + subj_pair = input("Please Type in, which subject pair you want to clean.\n" "For the pilot study, possible choices are:\n" "[202, 203, 204, 205, 206, 207, 208, 209, 211, 212]\n") participant = input("\nPlease Type in, which subject pair you want to clean.\n" "Type: 0 for the first participant and: 1 for the second.\n") # - # ## Load and prepare the EEG recording # + # define the subjects id and its path subj_id = "sub-{0}_p-{1}".format(subj_pair, participant) subs_path = subject_dir + "sub-{0}/eeg/sub-{0}_task-hyper_eeg.fif".format(subj_pair) behav_path = op.join(behav_dir, "{0}.csv".format(subj_pair)) ## overwrite it for the test # TODO: This line should be removed for the actual cleaning #subj_id = "test_2" # load the data combined_raw = mne.io.read_raw_fif(subs_path, preload=True) # split the subjects and delete the raw file raw = split_raws(combined_raw)[int(participant)] del combined_raw # set reference raw.set_eeg_reference(["Cz"]) # set the EEG Montage. We use 64 chans from the standard 10-05 system. montage = mne.channels.make_standard_montage("standard_1005") raw.set_montage(montage) # filter raw.filter(l_freq=0.1, h_freq=120) #raw.notch_filter(freqs=[50]) # notch filters were put out because they did not seem to add to the analysis # - # ## Define events and epoch the data # + # define the window length for for epoching tmin = 0 tmax = 1.5 # do the behavioral analysis and get the epochs behavioral_df = calculate_alpha(pd.read_csv(behav_path)) event_df = create_event_df(raw) event_df = remove_ghost_triggers(event_df) event_df = join_event_dfs(event_df, behavioral_df) # get the first tap by looking at the first sample in each trial min_idx = event_df.groupby(["trial"])["sample"].idxmin() early_df = event_df[event_df.index.isin(min_idx)] early_events = events_from_event_df(early_df) # get the late taps by looking at the last sample - 1.5 seconds max_idx = event_df.groupby(["trial"])["sample"].idxmax() late_df = event_df[event_df.index.isin(max_idx)] late_events = events_from_event_df(late_df) late_events[:,0] -= int(raw.info["sfreq"] * (tmax - tmin)) # get the baseline events (an equally scaled window right before the early epochs) base_events = early_events.copy() base_events[:,0] -= int(raw.info["sfreq"] * (tmax - tmin)) # - # ## Run autoreject on the data # + # check if any of the autoreject files already exists ar_paths = [op.join(BAD_AR_PATH, subj_id + "-" + key + "-ar.hdf5") for key in ["baseline", "early", "late"]] if any(op.isfile(ar_path) for ar_path in ar_paths): print("Some autoreject files for this subject already exist." "Remove them from the BAD_AUTOREJECT PATH if you want to overwrite them." "Else, existing ARs will be loaded.") epochs_list = [] reject_list = [] for index, (key, events) in enumerate({"baseline":base_events, "early":early_events, "late":late_events}.items()): epochs = mne.Epochs(raw, events, tmin=tmin, tmax=tmax, baseline=(0, 0), preload=True) #baseline=(0, 0) picks = mne.pick_types(epochs.info, eeg=True) if op.isfile(ar_paths[index]): # load an existing AR print("Loading preexisting AR: " + ar_paths[index]) ar = load_autoreject(subj_id + "-" + key) else: # define an autoreject object ar = AutoReject(consensus=[0.1, 0.2, 0.3, 0.4, 0.5], thresh_method="random_search", picks=picks, verbose="tqdm_notebook") # [0.1, 0.2, 0.3, 0.4, 0.5], "bayesian_optimization" # fit the epochs ar.fit(epochs) # save the autoreject save_autoreject(ar, subj_id + "-" + key) # get the rejection threshold for ICA reject = get_rejection_threshold(epochs) # plot it reject_log = ar.get_reject_log(epochs) reject_log.plot() # plot the rejected epochs scalings = dict(eeg=12e-5, eog=150e-6, misc=1e-3) reject_log.plot_epochs(epochs, scalings=scalings) print("Rejected {} out of {} epochs.".format(sum(reject_log.bad_epochs), len(epochs))) # remove the bad epochs and add them to the epochs list epochs_list.append(epochs[~reject_log.bad_epochs]) # add the rejects to the reject list reject_list.append(reject) # - # ## Concatenate the autoreject cleaned epochs and their average reject thresholds # + def dict_key_mean(dict_list): """Calculate the mean value for each key between multiple dicts. To return correct results, each key must be present in all of the dicts in dict_list.""" import collections, functools, operator # sum the values with same keys sum_dict = dict(functools.reduce(operator.add, map(collections.Counter, dict_list))) return {key: val/len(dict_list) for key, val in sum_dict.items()} reject = dict_key_mean(reject_list) print("All reject dicts: ", reject_list) print("Average reject dict: ", str(reject)) epochs = mne.concatenate_epochs(epochs_list) # - # ## Run (or load) the ICA and plot all components # + #print(len(epochs.copy().drop_bad(reject).pick_types(eeg=True).ch_names)) # filter again for ICA epochs.filter(l_freq=2, h_freq=None) # run the ICA and save the marked components picks = list(mne.pick_types(epochs.info, eeg=True, exclude=["Cz"])) ica = run_ica(epochs, subj_id, picks=picks, reject=reject, n_components=63, method="fastica") # - # ## Get ICA components based on their correlation with EOG # + eog_name = "BIP1" if (participant == "0") else "BIP5" eog_idx, eog_scores = ica.find_bads_eog(epochs, ch_name=eog_name) # barplot of ICA component "EOG match" scores ica.plot_scores(eog_scores) # plot diagnostics ica.plot_properties(epochs, picks=eog_idx) # - # ## Choose specific component properties to inspect # + inp = input("Please type in which components you want to further " "inspect.\nE.G. 3, 4,15 for components 3, 4, and 15.\n") inp = [int(n) for n in inp.split(",") if n != ""] if len(inp) > 0: ica.plot_properties(epochs, picks=inp, reject=None) # - # ## Exclude specific ICA components # + print("Excluded ICA components: ", ica.exclude) inp = input("\nPlease type in which components you want to exclude.\n" "E.G. 2, 3,14 for components 2, 3, and 14.\n") bad_comps = [int(comp) for comp in inp.split(",") if comp != ""] bad_comps = [comp for comp in set(bad_comps) if comp not in ica.exclude] ica.exclude.extend(bad_comps) print("\nExcluded ICA components: ", ica.exclude) # - # ## Save the ICA and its excluded components # + inp = input("Do you really want to save the components?\n" "Enter 'save' or 's' to save the data. Else, " "changes will be discarded.\n") if inp[0] == "s": save_ica(ica, subj_id) # - # ### give everyone access to the new marked files you've created # !cd /net/store/nbp/projects/hyperscanning/study_project # !chown -hR $USER:nbp *; chmod -R 770 * # # # ### Everything done. Thanks for cleaning :)
data_analysis/main_preprocessing_auto.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Example of extracting tables on selectable Pdfs # ## Install spark-ocr python packge # Need specify path to `spark-ocr-assembly-[version].jar` or `secret` secret = "" license = "" version = secret.split("-")[0] spark_ocr_jar_path = "../../target/scala-2.11" # + language="bash" # if python -c 'import google.colab' &> /dev/null; then # echo "Run on Google Colab!" # echo "Install Open JDK" # apt-get install -y openjdk-8-jdk-headless -qq > /dev/null # java -version # fi # + import os import sys if 'google.colab' in sys.modules: os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"] # + # install from PYPI using secret # #%pip install spark-ocr==$version\.spark24 --extra-index-url=https://pypi.johnsnowlabs.com/$secret --upgrade # + # or install from local path # # %pip install ../../python/dist/spark-ocr-1.9.0.tar.gz # - # ## Initialization of spark session # + from sparkocr import start if license: os.environ['JSL_OCR_LICENSE'] = license spark = start(secret=secret, jar_path=spark_ocr_jar_path) spark # - # ## Import OCR transformers from sparkocr.transformers import * from sparkocr.utils import display_image from pyspark.sql.functions import collect_list,col # ## Read PDF document as binary file import pkg_resources pdf_example = pkg_resources.resource_filename('sparkocr', 'resources/ocr/pdfs/tabular-pdf/data.pdf') pdf_example_df = spark.read.format("binaryFile").load(pdf_example).cache() # ## Display document image_df = PdfToImage() \ .setInputCol("content") \ .setOutputCol("image") \ .transform(pdf_example_df.select("content", "path")) for r in image_df.limit(1).collect(): display_image(r.image) # ## Extract table from PDF document per page # + pdf_to_text_table = PdfToTextTable() pdf_to_text_table.setInputCol("content") pdf_to_text_table.setOutputCol("table") pdf_to_text_table.setPageIndex(1) pdf_to_text_table.setMethod("basic") table = pdf_to_text_table.transform(pdf_example_df) # - # ## Show first row table.select(table["table.chunks"].getItem(1)["chunkText"]).show(1, False) # ## Display first element in first row table.select(table["table.chunks"].getItem(1)["chunkText"][0]).show() # ## Display first element in first row with coordinates table.select(table["table.chunks"].getItem(1)[0]).show(1, False)
jupyter/SparkOcrPdfToTextTables.ipynb
// -*- coding: utf-8 -*- // --- // jupyter: // jupytext: // text_representation: // extension: .cpp // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: C++11 // language: C++11 // name: xcpp11 // --- #include <iostream> using namespace std; // ## enumerate 상수 조합 테스트 enum class CamParam{ IMG_FORMAT, ZERO_ROT, OFFSET_X, OFFSET_Y, WIDTH, HEIGHT, TIMING_MODE, TARGET_FPS, EXPOSURE, BUFFER_POLICY, MAX }; enum class ParamModifier{ INFO_MIN, INFO_MAX, INFO_INCREMENT, MAX }; int val = 0; cout << val; val = static_cast<int>(CamParam::WIDTH CamParam::HEIGHT); cout << val;
notebook/Untitled2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np from scipy.integrate import solve_ivp import matplotlib.pyplot as plt from cp_detection.NeuralODE import GeneralModeDataset, LightningTrainer, TrainModel, LoadModel from cp_detection.ForceSimulation import ForcedHarmonicOscillator, DMT_Maugis, SimulateGeneralMode DMT = DMT_Maugis(0.2, 10, 2, 130, 1, 0.3, 0.3) ode_params = {'Q':12000, 'A0':0.5, 'Om':1., 'k':1000} FHO = ForcedHarmonicOscillator(**ode_params, Fint = DMT.F) d_array = np.linspace(1, 10, 20) t, z_array = SimulateGeneralMode(FHO, d_array, 0.1, 1000, relaxation = 5, rtol = 1e-7) z_array.shape _, ax = plt.subplots(1, 1, figsize = (16, 5)) ax.plot(t[-1000:], z_array[0,:], 'k') ax.grid(ls = '--') ax.axvline(x = 5*2*ode_params['Q'], color = 'b') #ax.axvline(x = 10*2*ode_params['Q'], color = 'r') import json savepath = './Data/digital.json' savedata = {'ode_params':ode_params, 'd_array': d_array.tolist(), 'z_array': z_array.tolist(), 't' : t.tolist()} with open(savepath, 'w') as savefile: json.dump(savedata, savefile) savepath = './Data/digital.json' train_dataset = GeneralModeDataset.load(savepath) import torch if torch.cuda.is_available: device = torch.device("cuda") print("GPU is available") else: device = torch.device("cpu") print("GPU not available, CPU used") from argparse import Namespace hparams = Namespace(**{'train_dataset': train_dataset, 'hidden_nodes': [20, 20], 'lr': 0.02, 'batch_size': 20, 'solver': 'rk4'}) model = LightningTrainer(hparams) import os from pytorch_lightning import Trainer from pytorch_lightning.callbacks import ModelCheckpoint checkpoint_callback = ModelCheckpoint(filepath = './checkpoints', save_best_only = True, verbose = True, monitor = 'loss', mode = 'min', prefix = '') trainer = Trainer(gpus = 1, early_stop_callback = None, checkpoint_callback = checkpoint_callback, show_progress_bar = True, max_nb_epochs=10000) trainer.fit(model) # ## Load trained model, evaluate results checkpoint_path = './hall_of_fame/20200206/_ckpt_epoch_1256.ckpt' model = LoadModel(checkpoint_path) d = np.linspace(3.0, 10.0, 40) model.cuda() F_pred = model.predict_force(d) fig, ax = plt.subplots(1, 1, figsize = (7, 5)) ax.plot(d, F_pred, '.r', label = 'NN prediction') ax.plot(d, F(d), '.k', label = 'True Force') ax.legend() ax.grid(ls = '--') sol = solve_ivp(ODE, (0, 50), x0, t_eval = np.linspace(0, 50, 1000)) data = sol.y[1,:] + np.random.normal(scale = 0.3, size = sol.y[1,:].shape) fig, axes = plt.subplots(1, 2, figsize = (16, 5)) axes[0].plot(sol.t, sol.y[1,:], '.k') axes[1].plot(sol.t, data, '.k') for ax in axes: ax.grid(ls = '--') import torch import torch.nn as nn from torch.utils.data import TensorDataset, DataLoader from torchdiffeq import odeint_adjoint as odeint from torchviz import make_dot, make_dot_from_trace # + class Fint(nn.Module): def __init__(self, ndense): super(Fint, self).__init__() self.elu = nn.ELU() self.tanh = nn.Tanh() self.fc1 = nn.Linear(1, ndense) self.fc2 = nn.Linear(ndense, ndense) self.fc3 = nn.Linear(ndense, 1) def forward(self, x): out = self.fc1(x) out = self.elu(out) out = self.fc2(out) out = self.elu(out) out = self.fc2(out) out = self.elu(out) out = self.fc3(out) out = self.tanh(out) return out class NN_ODE(nn.Module): def __init__(self, ndense, Q, A0, Om, k, d): super(NN_ODE, self).__init__() self.F = Fint(ndense) self.Q = Q self.A0 = A0 self.Om = Om self.k = k self.d = d self.nfe = 0 self.B = torch.tensor([[-1./self.Q, -1.], [1., 0.]], device = device) self.C = torch.tensor([1.,0.], device = device) def forward(self, t, x): self.nfe+=1 F = self.F(x[1].unsqueeze(-1)) #ode = torch.matmul(self.B, x) + (self.d + self.A0*torch.cos(self.Om*t)/self.Q + F/self.k) * self.C ode = torch.matmul(self.B, x) + (self.d + self.A0*torch.cos(self.Om*t)/self.Q + F) * self.C # Currently, force term is self.k times larger return ode # - nnode = NN_ODE(4, **params) nnode.float() nnode.cuda() nnode.parameters # + optimizer = torch.optim.Adam(nnode.parameters(), lr = 0.01) loss_function = nn.MSELoss() x0_tensor = torch.from_numpy(x0).cuda(non_blocking = True).float() t_samp = torch.from_numpy(sol.t).cuda(non_blocking = True).float() data = torch.from_numpy(data).cuda(non_blocking = True).float() # + data_fft = torch.rfft(data, 1, onesided = True) data_amp = torch.sum(data_fft**2, dim = -1) data_logamp = torch.log1p(data_amp) print(data_logamp.size()) logamp_array = data_logamp.cpu().detach().numpy() plt.plot(logamp_array[0:50]) # + x_pred = odeint(nnode, x0_tensor, t_samp) z_pred = x_pred[:,1] z_fft = torch.rfft(z_pred, 1) z_amp = torch.sum(z_fft**2, dim = -1) z_logamp = torch.log1p(z_amp) z_logamp.size() loss = loss_function(z_logamp, data_logamp) zlogamp_array = z_logamp.cpu().detach().numpy() plt.plot(zlogamp_array[0:50]) # - make_dot(loss, params=dict(nnode.named_parameters())) # + N_epochs = 500 history = np.zeros((N_epochs, 1)) for epoch in range(N_epochs): # zero the parameter gradients optimizer.zero_grad() running_loss = 0.0 solut = odeint(nnode, x0_tensor, t_samp, method = 'adams') z_pred = solut[:,1] z_fft = torch.rfft(z_pred, 1) z_amp = torch.sum(z_fft**2, dim = -1) #z_fft = torch.rfft(z_pred, 1) z_logamp = torch.log1p(z_amp) #z_logamp.size() loss = loss_function(z_logamp, data_logamp) #loss = loss_function(z_amp, data_amp) #loss = loss_function(z_pred, data) loss.backward() optimizer.step() # print statistics running_loss += loss.item() print('[%d] loss: %.12e' %(epoch + 1, running_loss)) history[epoch] = running_loss print('Training Finished') # - fig, ax = plt.subplots(1, 1, figsize = (7, 5)) ax.plot(history) ax.set_yscale('log') ax.grid(ls = '--') ax.set_title('Learning Curve', fontsize = 14) sol = odeint(nnode, x0_tensor, t_samp) z_final = sol[:,1].cpu().detach().numpy() t = t_samp.cpu().detach().numpy() z_true = data.cpu().detach().numpy() fig, ax = plt.subplots(1, 1, figsize = (7, 5)) ax.plot(t, z_true, '.k', label = 'Data') ax.plot(t, z_final, '.r', label = 'Prediction') ax.legend() ax.grid(ls = '--') d_array = np.linspace(1, 8, 1000) d_tensor = torch.from_numpy(d_array).cuda(non_blocking = True).float() F_true = F(d_array) F_pred = np.zeros(d_array.shape) for i in range(len(F_pred)): F_pred[i] = nnode.F(d_tensor[i].unsqueeze(-1)).cpu().detach().numpy() fig, ax = plt.subplots(1, 1, figsize = (7, 5)) ax.plot(d_array, F_true, '.k', label = 'True Force') ax.plot(d_array, F_pred, '.r', label = 'NN Prediction') ax.axhline(F_true.mean()) ax.legend() ax.grid(ls = '--') F_pred
NeuralODE.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # NUMPY - Multidimensional Data Arrays # It is a package that provide high-performance vector, matrix and higher-dimensional data structures for Python. NumPy brings the computational power of languages like C and Fortran to Python, a language much easier to learn and use. # ### Import Numpy Library import numpy as np np.__version__ # ### Getting Help np.info(np.array) np.array() # ## What is Array? # ![](image/array.png) # ### List vs Numpy # + # Python List list1 = [1, 2, 3, 4, 5] list2 = [i*2 for i in range(1, 6)] # 1 2 3 4 5 print('list1:', list1) print('list2:', list2) # - print('tipedata dari list1:', type(list1)) list1 + list2 list3 = [] for a, b in zip(list1, list2): list3.append(a+b) list3 list1 * list2 list3 = [] for a, b in zip(list1, list2): list3.append(a*b) list3 # In the `numpy` package the terminology used for vectors, matrices and higher-dimensional data sets is *array*. # # # + # numpy np1 = np.array([1, 2, 3, 4, 5]) np2 = np.array([i*2 for i in range(1, 6)]) print(np1) print(np2) # - type(np1) np1 + np2 np1 * np2 np1 ** np2 list1 = list(range(1, 1001)) ', '.join([str(i) for i in list1]) len(list1) import sys a = 1 type(a) sys.getsizeof(a) sys.getsizeof(a) * len(list1) np1 = np.arange(1, 1001) len(np1) np1.size np1.itemsize np1.size * np1.itemsize size = 10_000_000 list1 = list(range(size)) list2 = list(range(size)) np1 = np.arange(size) np2 = np.arange(size) import time mulai = time.time() result = [a+b for a, b in zip(list1, list2)] print('python list membutuhkan waktu:', time.time() - mulai, 's') mulai = time.time() result = np1 + np2 print('python numpy membutuhkan waktu:', time.time() - mulai, 's') # ## Creating `numpy` arrays # There are some ways to initialize new numpy arrays: # * a Python list or tuples # * using functions that are dedicated to generating numpy arrays, such as `arange`, `linspace`, etc. # * reading data from files # ### Lists # + # vector: the argument to the array function is a list v = np.array([1, 2, 3, 4, 5]) v # - type(v) # + # matrix: the argument to the array function is a nested list m = np.array([[1, 2, 3], [4, 5, 6]]) m # - # The `v` and `m` objects are both of the type `ndarray` that the `numpy` module provides. type(v), type(m) a = [1,2,3] type(a) # The difference between the `v` and `m` arrays is only their shapes. We can get information about the shape of an array by using the `ndarray.shape` property. v.shape m.shape # The number of elements in the array is available through the `ndarray.size` property m.size # `numpy.ndarray` looks very similiar to the `list`. So, why not use the list instead? # `numpay.ndarray` is used for several reason: # 1. Lists are very general. They can contain any kind of object. They do not support mathematical functions such as matrix and dot multiplication, etc. # 2. Numpy arrays are statically typed and homogenous. The type of the elements is determined when the array is created # 3. Numpy arrays are memory efficient # 4. It is fast for implementation of mathematical function # We can see the type of data of an array using `dtype` m.dtype # If we want, we can explicitly define the type of the array data when we create it, using the `dtype` keyword argument: # + m = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float) m # - # Common data types that can be used with `dtype` are: `int`, `float`, `complex`, `bool`, `object`, etc. # ### Create Matrix Zeros # + # One dimension zeros_matrix = np.zeros(5) zeros_matrix # + #two dimension zeros_matrix2 = np.zeros(5,2) # + # should be in tuple format zeros_matrix2 = np.zeros((5,2)) # 5 rows, 2 columns zeros_matrix2 # - # ### Matrix ones # + #one dimension matrix_ones = np.ones(5) matrix_ones # - m2 = np.ones((5, 5), dtype=int) m2 # + #3 dimension matrix_ones2 = np.ones((3, 4, 2)) #3 rows, 4 columns, 2 depth matrix_ones2 # - # --- # ## > Exercise 1 # 1. Create a matrix from a list which has 4 rows and 3 columns n1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) n1 n1.shape # 2. Create the following matrix # ![](image/lat11.png) # + n2 = np.array([[2, 7, 12, 0], [3, 9, 3, 4], [4, 0, 1, 3]]) brs, klm = n2.shape for i in range(0, brs): for j in range(0, klm): print(f'{n2[i, j]:2} ', end='') print() # - n2.shape n2.size # 3. Create a 2D matrix with size of 10 n3 = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) n3.size n4 = np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]) n4.size n5 = np.zeros(10) n5.size n6 = np.ones((2, 5)) n6.size # 4. Create a 3D matrix of ones which has 2 rows, 3 columns, and 3 depth n7 = np.zeros((2, 3, 3)) n7 n7.shape n8 = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[1, 2, 3], [4, 5, 6], [7, 8, 9]]]) n8 n8.shape # 5. Make the following arrays from zeros arrays and with for loops # ![](image/exercise1.png) n9 = np.zeros((5, 3)) n9 for line in n9: print(line + 2) # --- # ### Using array-generating functions # For larger arrays it is inpractical to initialize the data manually, using explicit python lists. Instead we can use one of the many functions in `numpy` that generate arrays of different forms. Some of the more common are: # **arange** # + # create a range x = np.arange(10) x # - for i in range(10): print(i) # + # create a range x = np.arange(10, 20) # arguments: start, stop x # + # create a range x = np.arange(10, 20, 2) # arguments: start, stop, step x # + x = np.arange(-1, 1, 0.1) x # - # The number 9.00000000e-01 already is a floating point number. # It's written in scientific notation and is equivalent to 9 * 10**-1 or 0.9. # #### linspace # using linspace, both end points ARE included np.linspace(0, 10, 10) #Unlike arange that uses step, linspace uses the number of sample np.linspace(1, 10, 4) # #### random data from numpy import random #uniform random numbers in [0,1] random.rand(5,5) # standard normal distributed random numbers x = random.randn(5,5) x x.dtype x = np.ones(2, dtype = np.int64) x random.randint(10) random.randint(2, 10, size=4) random.randint(2, 10, size=(4,2,2)) # --- # ## Exercise 2 # 1. Generate a 1-D array containing 5 random integers from 0 to 100: import numpy as np a = np.random.randint(0, 101, size=5) a # 2. Generate a 2-D array with 3 rows, each row contains 5 random integers from 0 to 100 b = np.random.randint(0, 101, size=(3, 5)) b # 3. Generate a 1-D array of 30 evenly spaced elements between 1.5 and 5.5, inclusive. c = np.linspace(1.5, 5.5, 30) c
week-1/Pert1_Numpy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # In an earlier [post](), I explained how to apply a Bayesian linear regression model to retrievI use the historically accurate dataset behind the development of NASA OBPG's chlorophyll algorithms. import pandas as pd import matplotlib.pyplot as pl from sklearn.linear_model import LinearRegression import re import os import numpy as np import seaborn as sb from mpl_toolkits.basemap import Basemap import pymc3 as pm import warnings from cmocean import cm warnings.filterwarnings('ignore') % matplotlib inline # + def ParseTextFile(textFileHandle, topickle=False, convert2DateTime=False, **kwargs): """ * topickle: pickle resulting DataFrame if True * convert2DateTime: join date/time columns and convert entries to datetime objects * kwargs: pkl_fname: pickle file name to save DataFrame by, if topickle=True """ # Pre-compute some regex columns = re.compile('^/fields=(.+)') # to get field/column names units = re.compile('^/units=(.+)') # to get units -- optional endHeader = re.compile('^/end_header') # to know when to start storing data # Set some milestones noFields = True getData = False # loop through the text data for line in textFileHandle: if noFields: fieldStr = columns.findall(line) if len(fieldStr)>0: noFields = False fieldList = fieldStr[0].split(',') dataDict = dict.fromkeys(fieldList) continue # nothing left to do with this line, keep looping if not getData: if endHeader.match(line): # end of header reached, start acquiring data getData = True else: dataList = line.split(',') for field,datum in zip(fieldList, dataList): if not dataDict[field]: dataDict[field] = [] dataDict[field].append(datum) df = pd.DataFrame(dataDict, columns=fieldList) if convert2DateTime: datetimelabels=['year', 'month', 'day', 'hour', 'minute', 'second'] df['Datetime']= pd.to_datetime(df[datetimelabels], format='%Y-%m-%dT%H:%M:%S') df.drop(datetimelabels, axis=1, inplace=True) if topickle: fname=kwargs.pop('pkl_fname', 'dfNomad2.pkl') df.to_pickle(fname) return df def FindNaNs(df): for col in df.columns: sn = np.where(df[col].values=='NaN', True, False).sum() s9 = np.where('-999' in df[col].values, True, False).sum() print("%s: %d NaNs & %d -999s" % (col, sn, s9)) def FitPoly(X,y, order=4, lin=False): """ Numpy regression. Returns coeffs. kwargs: lin: specifies whether data is log transformed. Data is log transformed if not.""" if lin: X = np.log10(X) y = np.log10(y) coeffs = np.polyfit(X,y,deg=order) return coeffs # - with open('/accounts/ekarakoy/DATA/ocprep_v4_iop.txt') as fdata: df = ParseTextFile(fdata, topickle=True, convert2DateTime=True, pkl_fname=os.path.join(savDir, 'JeremyOCx_data')) df.info() # skipping output which shows a lot of unnecessary features for this exercise # Select features I want for this modeling bit. basicCols = ['cruise', 'lat', 'lon', 'type', 'chl', 'Datetime'] IwantCols = basicCols + [col for col in df.columns if 'rrs' in col] dfRrs = df[IwantCols] swflbls = ['rrs411','rrs443','rrs489','rrs510','rrs555','rrs670'] swfCols = basicCols + swflbls dfSwf = dfRrs[swfCols] savDir = '/accounts/ekarakoy/DEV-ALL/BLOGS/DataScienceCorner/posts/bayesianChl_stuff/' df.to_pickle(os.path.join(savDir, 'dfOcPrepHistoric.pkl')) dfRrs.to_pickle(os.path.join(savDir, 'dfOcPrepRrs.pkl')) del df, dfRrs dfSwf.info() # skipping the output which shows that most columns are object type... FindNaNs(dfSwf) dfSwf.replace(to_replace='NaN',value=np.NaN,inplace=True) dfSwf.dropna(inplace=True) numCols = ['chl','lat','lon','rrs411','rrs443','rrs489','rrs510','rrs555','rrs670'] dfSwf[numCols] = dfSwf[numCols].apply(pd.to_numeric) dfSwf.info() dfSwf['maxBlue'] = dfSwf[['rrs443', 'rrs489', 'rrs510']].max(axis=1) dfSwf['OCxRatio'] = dfSwf.maxBlue/dfSwf.rrs555 dfLogOCx = pd.DataFrame(columns = ['OCxRatio','chl','type','cruise']) dfLogOCx.OCxRatio = np.log10(dfSwf.OCxRatio) dfLogOCx.chl = np.log10(dfSwf.chl) dfLogOCx[['type','cruise']] = dfSwf[['type','cruise']] dfSwf.to_pickle(os.path.join(savDir, 'dfSwf')) dfLogOCx.to_pickle(os.path.join(savDir, 'dfLogOCx')) sb.set(font_scale=1.5) g = sb.PairGrid(dfLogOCx, hue='type', vars=['chl','OCxRatio'], size=5, palette=sb.color_palette("cubehelix",2)) g = g.map_upper(pl.scatter, alpha=0.5, edgecolor='k',linewidth=2) g = g.map_diag(sb.kdeplot, lw=3) g = g.map_lower(sb.kdeplot,cmap="Reds_d") g.add_legend(); f,ax2 = pl.subplots(ncols=2, figsize=(14,6)) sb.violinplot(x='OCxRatio',y='type',data=dfLogOCx, hue='type', ax=ax2[0]) sb.violinplot(x='chl', y='type', data=dfLogOCx, hue='type', ax=ax2[1]); ax2[0].legend().set_visible(False) ax2[1].legend().set_visible(False) dfSwf.type.unique() # Pooled bayesian model: logChlObs = dfLogOCx.chl.values logOCxRatio = dfLogOCx.OCxRatio.values OC4v6_coeffs = {'a0': 0.3272, 'a1': -2.9940, 'a2': 2.7218, 'a3': -1.2259, 'a4': -0.5683} with pm.Model() as pooled_model: a0 = pm.Normal('a0', mu=OC4v6_coeffs['a0'], sd=10) a1 = pm.Normal('a1', mu=OC4v6_coeffs['a1'], sd=10) a2 = pm.Normal('a2', mu=OC4v6_coeffs['a2'], sd=10) a3 = pm.Normal('a3', mu=OC4v6_coeffs['a3'], sd=10) a4 = pm.Normal('a4', mu=OC4v6_coeffs['a4'], sd=10) epsilon = pm.Uniform('epsilon', lower=0, upper=10) mu = a0 + a1 * logOCxRatio + a2 * logOCxRatio**2 + a3 *\ logOCxRatio**3 + a4 * logOCxRatio**4 logChlPred = pm.Normal('chlPred', mu=mu, sd=epsilon, observed=logChlObs) start = pm.find_MAP() step = pm.NUTS(scaling=start) traceOCx_pooled = pm.sample(10000, step=step, start=start) chainOCx_pooled = traceOCx_pooled[1000:] varnames=['a%d' %d for d in range(5)] + ['epsilon'] #refvals = [chainOCx_pooles['a%d'] % d for d in arange(5)] #refval = {'a%d' % d: rv for d,rv in zip(range(5), chainOCx_pooled['a%d'] )} pm.traceplot(chainOCx_pooled,varnames=varnames, grid=True); cfs = FitPoly(logOCxRatio,logChlObs) {'a%d' %d:rv for d,rv in zip(range(5),cfs[::-1])} OC4v6_coeffs refvals = [chainOCx_pooled['a%d'% d].mean() for d in range(5)] # bayes means with OC4_v6 mean normal priors refvals # bayes means with 0-mean normal priors refvals
posts/developing-a-hierarchical-bayesian-linear-regression-model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <span style="color:Maroon">Crab Age Prediction - Random Forest Model # # Import required libraries import os import pandas as pd import numpy as np import matplotlib.pyplot as plt import warnings warnings.filterwarnings("ignore") np.random.seed(0) os.getcwd() # Read the csv file data os.chdir('..\\Data\\') df = pd.read_csv('data_treated.csv') df.head() df.describe() # ## <span style="color:Maroon">Part 3: Predictive Model # #### <span style="color:Maroon">Performance Metrics: # <span style="color:Green">To pick the final model, we shall look at three performance metrics: # # <span style="color:Green">__Mean Absolute Error:__ The mean_absolute_error function computes mean absolute error, a risk metric corresponding to the expected value of the absolute error loss or -norm loss. For more details, please refer the below link: https://scikit-learn.org/stable/modules/model_evaluation.html#mean-absolute-error # # # <span style="color:Green">__Mean Squared Error:__ The mean_squared_error function computes mean square error, a risk metric corresponding to the expected value of the squared (quadratic) error or loss. For more details, please refer the below link: https://scikit-learn.org/stable/modules/model_evaluation.html#mean-squared-error # # <span style="color:Green">__Rsquare:__ The r2_score function computes the coefficient of determination, usually denoted as R². It represents the proportion of variance (of y) that has been explained by the independent variables in the model. It provides an indication of goodness of fit and therefore a measure of how well unseen samples are likely to be predicted by the model, through the proportion of explained variance. As such variance is dataset dependent, R² may not be meaningfully comparable across different datasets. Best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R² score of 0.0. For more details, please refer the below link: https://scikit-learn.org/stable/modules/model_evaluation.html#r2-score # # # ## <span style="color:Maroon">Model 2: Random Forest model # # <span style="color:Green">Random forests or random decision forests are an ensemble learning method for classification, regression and other tasks that operate by constructing a multitude of decision trees at training time and outputting the class that is the mode of the classes or mean prediction of the individual trees. # # <span style="color:Green">Hyper-parameters to be tuned in Random Forest model are: # 1. n_estimators = number of trees in the foreset # 2. max_features = max number of features considered for splitting a node # 3. max_depth = max number of levels in each decision tree # 4. min_samples_split = min number of data points placed in a node before the node is split # 5. min_samples_leaf = min number of data points allowed in a leaf node # 6. bootstrap = method for sampling data points (with or without replacement) # Import required libraries from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import train_test_split from sklearn.model_selection import RandomizedSearchCV from sklearn import metrics # Change drirectory to Images os.chdir("..\\Images\\") # Divide the dataset into 70:30 for train and test purpose X_data = df.drop("Age", axis=1) y_data = df["Age"] X_train, X_test, y_train, y_test = train_test_split(X_data, y_data, test_size=0.30, random_state=42) y_train = y_train.ravel() y_test = y_test.ravel() # Declare the hyper-parameters for grid search n_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)] max_features = ['auto', 'sqrt'] max_depth = [int(x) for x in np.linspace(10, 110, num = 11)] max_depth.append(None) min_samples_split = [2, 5, 10] min_samples_leaf = [1, 2, 4] bootstrap = [True, False] # Random grid ***(Total 10*2*12*3*3*2) = 4320 models will be built *** random_grid = {'n_estimators': n_estimators, 'max_features': max_features, 'max_depth': max_depth, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf, 'bootstrap': bootstrap} # RF Model rf = RandomForestRegressor() rf_random = RandomizedSearchCV(estimator = rf, param_distributions = random_grid, n_iter = 200, cv = 3, verbose=2, random_state=42, n_jobs = -1) # Fit the random search model rf_random.fit(X_train, y_train) def evaluate(model, X, y_act, data_str): y_pred = model.predict(X) MAE = metrics.mean_absolute_error(y_act, y_pred) MSE = metrics.mean_squared_error(y_act, y_pred) r2 = metrics.r2_score(y_act, y_pred) print ('Model Performance:{}'.format(data_str)) print('Mean Absolute Error: {:0.4f}.'.format(MAE)) print('Mean Square Error = {:0.4f}.'.format(MSE)) print('Rsquare = {:0.4f}'.format(r2)) return MAE, MSE, r2 best_random = rf_random.best_estimator_ best_random MAE, MSE, r2 = evaluate(best_random, X_train, y_train,'Train Sample') MAE, MSE, r2 = evaluate(best_random, X_test, y_test,'Test Sample') # ###### The selected model does well on "Train" sample, but the performance deteriorates on Test sample. We shall try to select the model manually, rather than using Grid search def RF_Iter(X_train, y_train, ntrees, depth, minsampleleaf, maxfeatures, X_test, y_test): out_metrics = [None]*6 clf = RandomForestRegressor(n_estimators=ntrees, criterion='mse', max_depth=depth, min_samples_leaf=minsampleleaf, max_features=maxfeatures, verbose=0) clf.fit(X_train, y_train) # Predicting train output y_pred = clf.predict(X_train) # Getting train metrics out_metrics[0] = metrics.mean_absolute_error(y_train, y_pred) out_metrics[1] = metrics.mean_squared_error(y_train, y_pred) out_metrics[2] = metrics.r2_score(y_train, y_pred) # Predicting test output y_pred = clf.predict(X_test) # Getting train metrics out_metrics[3] = metrics.mean_absolute_error(y_test, y_pred) out_metrics[4] = metrics.mean_squared_error(y_test, y_pred) out_metrics[5] = metrics.r2_score(y_test, y_pred) return out_metrics # + ntrees = [50, 100] maxfeatures = ['auto', 'sqrt'] depth = [10, 50, 100] minsampleleaf = [1, 2, 4] parameters = [] results = [] for i in range(0, len(ntrees)): for j in range(0, len(maxfeatures)): for k in range(0, len(depth)): for l in range(0, len(minsampleleaf)): parameters.append([ntrees[i], maxfeatures[j], depth[k], minsampleleaf[l]]) results.append(RF_Iter(X_train, y_train, ntrees[i], depth[k], minsampleleaf[l], maxfeatures[j], X_test, y_test)) # + # Convert Parameters to pandas dataframe parameters = np.array(parameters) parameters = parameters.reshape(-1,4) parameters = pd.DataFrame(parameters) parameters.columns = ["n_estimators", "max_features", "max_depth", "min_samples_leaf"] # Convert results to pandas dataframe results = np.array(results) results = results.reshape(-1,6) results = pd.DataFrame(results) results.columns = ["Train_MAE", "Train_MSE", "Train_R2", "Test_MAE", "Test_MSE", "Test_R2"] # Iteration number ite = np.arange(1,results.shape[0]+1) # Merge parameters to performance dataframe Performance_RF = pd.DataFrame(ite) Performance_RF.columns = ["Iteration_No"] Performance_RF = Performance_RF.join(parameters) Performance_RF = Performance_RF.join(results) Performance_RF. head() # + # Lets plot the three performance meterics for Train and test to select best model plt.rcParams['figure.figsize'] = [15, 5] # Plot MAE for train and test plt.subplot(1,3,1) plt.plot(Performance_RF["Iteration_No"], Performance_RF["Train_MAE"], 'r-', label= "Train MAE") plt.plot(Performance_RF["Iteration_No"], Performance_RF["Test_MAE"], 'b-', label= "Test MAE") plt.xlabel("Iteration number") plt.ylabel("MAE") plt.legend() # Plot MSE for train and test plt.subplot(1,3,2) plt.plot(Performance_RF["Iteration_No"], Performance_RF["Train_MSE"], 'r-', label= "Train MSE") plt.plot(Performance_RF["Iteration_No"], Performance_RF["Test_MSE"], 'b-', label= "Test MSE") plt.xlabel("Iteration number") plt.ylabel("MSE") plt.legend() # Plot r2 for train and test plt.subplot(1,3,3) plt.plot(Performance_RF["Iteration_No"], Performance_RF["Train_R2"], 'r-', label= "Train R2") plt.plot(Performance_RF["Iteration_No"], Performance_RF["Test_R2"], 'b-', label= "Test R2") plt.xlabel("Iteration number") plt.ylabel("R2") plt.legend() plt.savefig("Manual_trainedRF_Perf.png") plt.show() # - # ###### Comments: # Based on the plots, hyper-parameter tuning doesn't seem to have any significant affect on random forest regressor results best_random # + # Plot variable Importance of the RF model selected using Grid search clf = best_random print("++++++++++++++++++++++++\n") MAE1, MSE1, r21 = evaluate(best_random, X_train, y_train,'Train Sample') print("++++++++++++++++++++++++\n") plt.rcParams['figure.figsize'] = [15, 5] MAE2, MSE2, r22 = evaluate(best_random, X_test, y_test, 'Test Sample') # - def plot_ActvsPred(model, X, y, sample_name): plt.rcParams['figure.figsize'] = [10, 5] y_hat = model.predict(X) y_hat = pd.DataFrame(y_hat) y_hat.columns = ["Predicted"] y_hat["Actual"] = y obs_np = np.arange(0, len(y)) y_hat = y_hat.sort_values("Actual", ascending=True) plt.plot(obs_np, y_hat["Actual"],'r-', label="Actual Age") plt.plot(obs_np, y_hat["Predicted"], 'b-', label="Predicted Age") plt.xlabel("observation") plt.ylabel("Age") plt.title("Actual Vs Predicted plot for {} sample".format(sample_name)) plt.legend() plt.savefig(f'RF_{sample_name}_actual_predicted.png') plt.plot() return # Plot Actual Versus Predicted for Train Sample plot_ActvsPred(clf, X_train, y_train, "Train") # Plot Actual Versus Predicted for Test Sample plot_ActvsPred(clf, X_train, y_train, "Test") # ###### Comments: # The Random Forest model gives good predictions for most of the age range. It seems to do worse on the lower and upper side of Age variable. This could be due to data capturing error # Plot Variable Importance of the features in the final Model variable_importance = clf.feature_importances_ variables = list(X_train.columns) importance = pd.DataFrame(variables) importance.columns = ["Variables"] importance["Importance"] = variable_importance importance = importance.sort_values("Importance", ascending = False) plt.bar(importance["Variables"], importance["Importance"]) plt.xlabel("Variable") plt.ylabel("Importance") plt.title("Variable Importance for Random Forest Model") plt.xticks(rotation=45) plt.savefig('VarImp_RF.png') plt.show() # ###### Comments: # From the independent variables, the shell weight is the maximum driver of age # # ### Summary: # ###### Comparison on Model Performance: # # | Model | R2 (Train) | MAE (Train) | MSE (Train) |R2 (Test) | MAE (Test) | MSE (Test) | # |-----------------------------|:-----------:|:-----------:|:-----------:|:---------:|:----------:|:-----------| # | Linear Regression | 0.4180 | 1.7887 | 5.7849 | 0.3825 | 1.7622 | 5.7419 | # | Random Forest Regressor | 0.8013 | 1.0054 | 1.9748 | 0.5535 | 1.4682 | 4.1519 | # ###### Comments: # Based on perfromance metrics $ R^2 $, MAE and MSE for test, the models in order of performance are: # Random Forest Regressor <--- Linear Regression # Save the RF Model import pickle os.chdir('..\\Models\\') filename = "RF_Regressor.sav" pickle.dump(best_random, open(filename, 'wb')) #
Codes/04Crab_Age_Prediction_RFModel.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LogisticRegression X = np.array([1,2,3,4,5,10,11,12,13,14]) X_ = X.reshape(-1,1) y = np.array([0,0,0,0,0,1,1,1,1,1]) plt.scatter(X,y) clf = LogisticRegression() clf.fit(X_,y) y_pred = clf.predict(X_) plt.scatter(X,y,color='r') plt.plot(X,y_pred) corr = 0 for i in y-y_pred: if i==0: corr+=1 print('accuracy=',corr/len(y_pred)*100) #As evident from graph data_set = datasets.load_breast_cancer() X=data_set.data y=data_set.target print ('Data fields data set:') print (data_set.feature_names) print ('Classification outcomes:') print (data_set.target_names) X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.25) y_train sc=StandardScaler() sc.fit(X_train) X_train_std=sc.transform(X_train) X_test_std=sc.transform(X_test) lr=LogisticRegression() lr.fit(X_train_std,y_train) y_pred=lr.predict(X_test_std) y_pred # + correct = (y_test == y_pred).sum() incorrect = (y_test != y_pred).sum() accuracy = correct / (correct + incorrect) * 100 print('\nPercent Accuracy: %0.1f' %accuracy) # + prediction = pd.DataFrame() prediction['actual'] = data_set.target_names[y_test] prediction['predicted'] = data_set.target_names[y_pred] prediction['correct'] = prediction['actual'] == prediction['predicted'] print ('\nDetailed results for first 20 tests:') print (prediction.head(20)) # - import tensorflow as tf from tensorflow import keras
1 Supervised Learning/Classification/LogisticRegression using SKLearn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Programming Extras # + [markdown] slideshow={"slide_type": "skip"} toc=true # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc"><ul class="toc-item"><li><span><a href="#Testing" data-toc-modified-id="Testing-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Testing</a></span><ul class="toc-item"><li><span><a href="#Docstrings" data-toc-modified-id="Docstrings-1.1"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>Docstrings</a></span></li><li><span><a href="#Doctest" data-toc-modified-id="Doctest-1.2"><span class="toc-item-num">1.2&nbsp;&nbsp;</span>Doctest</a></span></li><li><span><a href="#Unit-testing" data-toc-modified-id="Unit-testing-1.3"><span class="toc-item-num">1.3&nbsp;&nbsp;</span>Unit testing</a></span></li></ul></li><li><span><a href="#Debugging" data-toc-modified-id="Debugging-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Debugging</a></span></li><li><span><a href="#Profiling" data-toc-modified-id="Profiling-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Profiling</a></span><ul class="toc-item"><li><span><a href="#Within-jupyter-notebook" data-toc-modified-id="Within-jupyter-notebook-3.1"><span class="toc-item-num">3.1&nbsp;&nbsp;</span>Within jupyter notebook</a></span></li><li><span><a href="#Profiling-your-entire-code" data-toc-modified-id="Profiling-your-entire-code-3.2"><span class="toc-item-num">3.2&nbsp;&nbsp;</span>Profiling your entire code</a></span></li><li><span><a href="#Lineprofiling-your-code" data-toc-modified-id="Lineprofiling-your-code-3.3"><span class="toc-item-num">3.3&nbsp;&nbsp;</span>Lineprofiling your code</a></span></li></ul></li><li><span><a href="#Speed-up-your-code" data-toc-modified-id="Speed-up-your-code-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>Speed up your code</a></span><ul class="toc-item"><li><span><a href="#Ufuncs" data-toc-modified-id="Ufuncs-4.1"><span class="toc-item-num">4.1&nbsp;&nbsp;</span>Ufuncs</a></span></li><li><span><a href="#Numba" data-toc-modified-id="Numba-4.2"><span class="toc-item-num">4.2&nbsp;&nbsp;</span>Numba</a></span></li></ul></li><li><span><a href="#Git(hub)" data-toc-modified-id="Git(hub)-5"><span class="toc-item-num">5&nbsp;&nbsp;</span>Git(hub)</a></span><ul class="toc-item"><li><ul class="toc-item"><li><span><a href="#What-can-it-look-like?" data-toc-modified-id="What-can-it-look-like?-5.0.1"><span class="toc-item-num">5.0.1&nbsp;&nbsp;</span>What can it look like?</a></span></li></ul></li></ul></li><li><span><a href="#Github" data-toc-modified-id="Github-6"><span class="toc-item-num">6&nbsp;&nbsp;</span>Github</a></span></li><li><span><a href="#Publishing-code" data-toc-modified-id="Publishing-code-7"><span class="toc-item-num">7&nbsp;&nbsp;</span>Publishing code</a></span><ul class="toc-item"><li><ul class="toc-item"><li><span><a href="#Software-Citation-Principles" data-toc-modified-id="Software-Citation-Principles-7.0.1"><span class="toc-item-num">7.0.1&nbsp;&nbsp;</span>Software Citation Principles</a></span></li></ul></li></ul></li></ul></div> # + [markdown] slideshow={"slide_type": "slide"} # ## Testing # *Ensure your code never breaks* # + [markdown] slideshow={"slide_type": "subslide"} # ### Docstrings # + slideshow={"slide_type": "-"} def func(arg1, arg2): """Summary line. Extended description of function. Args: arg1 (int): Description of arg1 arg2 (str): Description of arg2 Returns: bool: Description of return value Raises: ValueError: If `arg2` is equal to `arg1`. Examples: Examples should be written in doctest format, and should illustrate how to use the function. >>> a = [1,2,3] >>> print([x + 3 for x in a]) [4, 5, 6] """ if arg1 == arg2: raise ValueError('arg1 may not be equal to arg2') return True # + [markdown] slideshow={"slide_type": "subslide"} # ### Doctest # + slideshow={"slide_type": "-"} def fib(n): """Calculates the n-th Fibonacci number. >>> fib(0) 0 >>> fib(15) 610 >>> """ a, b = 0, 1 for i in range(n): a, b = b, a + b return a # - # Which can be run with # ``` # $ python3 -m doctest -v <file> # ``` # + [markdown] slideshow={"slide_type": "subslide"} # Producing # ``` # Trying: # fib(0) # Expecting: # 0 # ok # Trying: # fib(15) # Expecting: # 610 # ok # 1 items had no tests: # test # 1 items passed all tests: # 2 tests in test.fib # 2 tests in 2 items. # 2 passed and 0 failed. # Test passed. # ``` # + [markdown] slideshow={"slide_type": "subslide"} # ### Unit testing # + import unittest # Define the function def fun(x): return x + 1 # Define the tests class MyTest(unittest.TestCase): def test(self): self.assertEqual(fun(3), 4) # Run the unit test (the argv is just for jupyter notebooks) if __name__ == '__main__': unittest.main(argv=['first-arg-is-ignored'], exit=False) # + [markdown] slideshow={"slide_type": "subslide"} # ## Debugging # *When your computer makes you feel stupid* # - # Most people simply use `print()` statements to debug. But you can do better than that... # + slideshow={"slide_type": "subslide"} import time def complicated_function(): time.sleep(2) x, y, z = 1, '2', 3 # Usually you might do this print(y) return x+y+z complicated_function() # + slideshow={"slide_type": "subslide"} import time def complicated_function(): time.sleep(0.5) x, y, z = 1, '2', 3 # But how about import IPython; IPython.embed() return x+y+z complicated_function() # + [markdown] slideshow={"slide_type": "slide"} # ## Profiling # *Find the bottleneck in your code* # + [markdown] slideshow={"slide_type": "subslide"} # ### Within jupyter notebook # + # %%time def upper_func(x): return x + 1 def middle_func(x): [upper_func(i) for i in range(10000)] return upper_func(x) + 1 def lower_func(x): return middle_func(x) + 1 lower_func(5) # + slideshow={"slide_type": "subslide"} # %%timeit def upper_func(x): return x + 1 def middle_func(x): [upper_func(i) for i in range(10000)] return upper_func(x) + 1 def lower_func(x): return middle_func(x) + 1 lower_func(5) # + [markdown] slideshow={"slide_type": "subslide"} # ### Profiling your entire code # - # Try profiling your code using a bash function profile() { python3 -m cProfile -o ~/Downloads/temp.profile $1; snakeviz ~/Downloads/temp.profile;} # + [markdown] slideshow={"slide_type": "subslide"} # ### Lineprofiling your code # - # Or if that's not detailed enough, place the `@profile` decorator above a function in your code, and then run the following lineprofile() { kernprof -l -v $1;} # + [markdown] slideshow={"slide_type": "slide"} # ## Speed up your code # *Speed up for-loops* # + [markdown] slideshow={"slide_type": "subslide"} # ### Ufuncs # - import numpy as np g = np.array([1, 2, 3, 4]) np.sin(g) # + def step_function(x): if x > 0: return 1 else: return 0 ar = np.array([-10, 10, 100]) step_function(ar) # + slideshow={"slide_type": "fragment"} ustep_function = np.vectorize(step_function) ustep_function(ar) # + [markdown] slideshow={"slide_type": "subslide"} # ### Numba # + ar = np.random.random(12345678) # Silly function def step_function_python(a): output = np.zeros_like(a) for i, nr in enumerate(a): if nr > 0: output[i] = 1 # %time step_function_python(ar) # + slideshow={"slide_type": "subslide"} # Numpy version of step function def step_function_numpy(a): output = np.zeros_like(a) a[a > 0] = 1 # %time step_function_numpy(ar) # + slideshow={"slide_type": "subslide"} import numba as nb # + @nb.jit() def step_function_python(a): output = np.zeros_like(a) for i, nr in enumerate(a): if nr > 0: output[i] = 1 # %time step_function_python(ar) # %time step_function_python(ar) # %time step_function_python(ar) # + [markdown] slideshow={"slide_type": "slide"} # ## Git(hub) # *Version control your software* # + [markdown] slideshow={"slide_type": "subslide"} # Everyone should use git. Seriously. You'll no longer need to worry about breaking a working version of your code. Don't worry about learning all the commands - these days there are GUIs like Gitkraken which do the hard work for you. # + [markdown] slideshow={"slide_type": "subslide"} # ![final_version](media/final.png) # + [markdown] slideshow={"slide_type": "subslide"} # #### What can it look like? # ![git](media/git.png) # + [markdown] slideshow={"slide_type": "subslide"} # For a full introduction, see [this presentation](https://davidgardenier.com/talks/201710_git.pdf) # + [markdown] slideshow={"slide_type": "slide"} # ## Github # *Backup your code* # + [markdown] slideshow={"slide_type": "subslide"} # Want to have a backup of your data? Or collaborate on code without sending having to send through files or code fragments? Check out Github and apply for a Student Developer Pack or an Academic Research Pack. # # Want to share a snippet of code? Try using gists # # Want your code to automatically be tested when it arrives on Github? Try linking it up with Travis # # And want to know which percentage of your code you've tested? Then try Coveralls # + [markdown] slideshow={"slide_type": "slide"} # ## Publishing code # *How to ensure your software is accessible* # + [markdown] slideshow={"slide_type": "subslide"} # > Integrity of research depends on transparency and reproducibility # # Quote by <NAME> # + [markdown] slideshow={"slide_type": "subslide"} # #### Software Citation Principles # * Importance | Software is as important as a paper # * Credit and attribution | Software should be quoted # * Unique identification | Globally unique # * Persistence | The identifiers have to persist # * Accessibility | The code, data etc, should be available # * Specificity | Version of software # + [markdown] slideshow={"slide_type": "subslide"} # * Astrophysics Source Code Library (ASCL, ascl.net) | A place to put software # + [markdown] slideshow={"slide_type": "subslide"} # What do you need to do? # * Release your code # * Specify how you want your code to be cited # * License your code # * Register your code # * Archive your code
7_programming_extras.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np from scipy import optimize import matplotlib.pyplot as plt dataSealvl=pd.read_csv("data/EarthViewer/EarthViewer_Data/SEALVL.txt",header=0,sep="/") dataSealvl dataSealvl.drop(["A"],axis=1,inplace=True) dataSealvl.set_index(["year"]) plt.figure(figsize=(20,10)) plt.plot(dataSealvl["year"],dataSealvl["GMSL_SI_GIA_20y"]) plt.grid() plt.xlim(1993,2020.5) # + def func(x,a,b,c,d,e,f,g,h): return a+b*x+ c*np.cos(d*x+e)+f*np.sin(g*x+h) nres_max, cov_max = optimize.curve_fit(func,dataSealvl["year"], dataSealvl["GMSL_SI_GIA_20y"]) # - nres_max # + #nres_max[1]=0.02 plt.figure(figsize=(20,10)) plt.plot(dataSealvl["year"],dataSealvl["GMSL_SI_GIA_20y"]) plt.plot(dataSealvl["year"],func(dataSealvl["year"],*nres_max),color="red") plt.grid() plt.xlim(1993,2020.5) # - nres_max, cov_max plt.figure(figsize=(20,10)) plt.plot(dataSealvl["year"],dataSealvl["11_GMSL_SMOOTH_SI_GIA_20y"]) plt.grid() plt.xlim(1993,2020.5) # + def func2(x,a,b,c,d,e,f,g,h,i,j): return a*x+b+c*np.sin(d*x+h)**j+e*np.cos(d*x**2+h) SMOOTHres_max, SMOOTHcov_max = optimize.curve_fit(func,dataSealvl["year"], dataSealvl["11_GMSL_SMOOTH_SI_GIA_20y"]) # - plt.figure(figsize=(20,10)) plt.plot(dataSealvl["year"],dataSealvl["11_GMSL_SMOOTH_SI_GIA_20y"]) plt.plot(dataSealvl["year"],func(dataSealvl["year"],*SMOOTHres_max),color="red") plt.grid() plt.xlim(1993,2020.5) plt.figure(figsize=(20,10)) plt.plot(dataSealvl["year"][0:100],dataSealvl["11_GMSL_SMOOTH_SI_GIA_20y"][0:100]) plt.grid() #plt.xlim(1993,2020.5) # + def func3(x,a,b,c,d,e,f,g): return a*x+b+c*np.sin(d*x+e)+d*x**2+e*x**3+f*x**4+g*x**5 #SHORTres_max, SHORTcov_max = optimize.curve_fit(func,dataSealvl["year"][0:100], dataSealvl["11_GMSL_SMOOTH_SI_GIA_20y"][0:100]) # - SHORTres_max GBFIT=np.loadtxt("data/EarthViewer/EarthViewer_Data/SEALVLGB.txt") # + plt.figure(figsize=(20,10)) plt.scatter(dataSealvl["year"],dataSealvl["11_GMSL_SMOOTH_SI_GIA_20y"],label="Datos filtrados",s=7) plt.plot(dataSealvl["year"],func(dataSealvl["year"],*SMOOTHres_max),color="red",label="regresión periódica") plt.plot(dataSealvl["year"],GBFIT,color="green",label="Gradient boost fit",linestyle="--") plt.grid() plt.legend(fontsize=15) plt.xlim(1993,2020.5) plt.xlabel("Altura media ") # - def ftry(x,a1,a2,a3,a4,b1,b2,b3,b4,c1,c2,c3,c4,a9,b9): return a1*np.sin(b1*x+c1)+a2*np.sin(b2*x+c2)+a3*np.sin(b3*x+c3)+a4*np.sin(b4*x+c4)+a9+b9*x FOURIERres_max, FOURIERcov_max = optimize.curve_fit(ftry,dataSealvl["year"], dataSealvl["11_GMSL_SMOOTH_SI_GIA_20y"],maxfev=15000) # +a5*np.sin(b5*x+c5)+a6*np.sin(b6*x+c6)+a7*np.sin(b7*x+c7)+a8*np.sin(b8*x+c8)+a9+b9*x # ,c5,c6,c7,c8 # ,b5,b6,b7,b8 # ,a5,a6,a7,a8 plt.figure(figsize=(20,10)) plt.scatter(dataSealvl["year"],dataSealvl["11_GMSL_SMOOTH_SI_GIA_20y"],label="Real data",s=7) plt.plot(dataSealvl["year"],func(dataSealvl["year"],*SMOOTHres_max),color="red",label="2-polinomial fit") plt.plot(dataSealvl["year"],GBFIT,color="green",label="Gradient boost fit",linestyle="--") plt.plot(dataSealvl["year"],ftry(dataSealvl["year"],*FOURIERres_max),color="green",label="Linear fit",linestyle=":") plt.grid() plt.legend(fontsize=15) plt.xlim(1993,2020.5) lateco2=pd.read_csv("data/EarthViewer/EarthViewer_Data/modern_CO2.txt",header=5,sep=" ") earlyco2=pd.read_csv("data/EarthViewer/EarthViewer_Data/early_CO2.txt",header=8,sep=" ") earlyo2=pd.read_csv("data/EarthViewer/EarthViewer_Data/Oxygen.txt",header=4,sep=" ") earlyo2["%O2"]=earlyo2["%O2"].apply(lambda x: float(x.replace("%",""))) modtemp=pd.read_csv("data/EarthViewer/EarthViewer_Data/Modern_temp.txt",header=7,sep=" ") earlytemp=pd.read_csv("data/EarthViewer/EarthViewer_Data/Early_temp.txt",header=9,sep=" ") paleotemp=pd.read_csv("data/EarthViewer/EarthViewer_Data/Paleo_temp.txt",header=10,sep=" ") paleoco2=pd.read_csv("data/EarthViewer/EarthViewer_Data/Paleo_CO2.txt",header=9,sep=" ") modbio=pd.read_csv("data/EarthViewer/EarthViewer_Data/Modern_biodiversity.txt",header=6,sep=" ") paleoco2 plt.figure(figsize=(20,10)) plt.plot(earlyo2["Ma"],earlyo2["%O2"],c="red",linewidth=4) plt.xlim(4560, 0) plt.figure(figsize=(20,10)) plt.subplot(2,2,1) #plt.plot(earlyo2["Ma"],earlyo2["%O2"],c="red",linewidth=4) plt.plot(lateco2["Year"],lateco2["CO2 (ppm)"],c="red",linewidth=4) plt.xlabel("Years") plt.ylabel("CO2 (ppm)") plt.subplot(2,2,2) plt.plot(modtemp["Year"],modtemp["Temperature anomaly (C)"],c="b",linewidth=4) plt.xlabel("Years") plt.ylabel("Temperature anomaly (C)") plt.subplot(2,2,3) plt.plot(modbio["Year"],modbio["Living Planet Index"],c="g",linewidth=4) plt.xlabel("Years") plt.ylabel("Living Planet Index") plt.subplot(2,2,4) plt.plot(earlyo2["Ma"],earlyo2["%O2"],c="orange",linewidth=4) plt.xlim(3000, 0) plt.xlabel("Years ago (Millones)") plt.ylabel("$O_2$ concentration (%)") #plt.xlim(4560, 0) def valuescloseto(X,val,umbral): closers=[] for i in range(len(X)): if (X[i]<(val+umbral) and X[i]>(val-umbral)): print(i,X[i]) closers.append(i) return closers oxig=valuescloseto(earlyo2["%O2"],20.95,0.1) earlyo2["Ma"][33] paleoco2 codos=valuescloseto(paleoco2["ppm"],412.15,10) paleoco2["Ma"][1203] codos paleoco2["Ma"][1161] earlyo2["Ma"][35] paleotemp["Ma"][154] temps=valuescloseto(paleotemp["Temp °C"],22.4,2) paleotemp["Ma"][293] # + plt.figure(figsize=(20,10)) plt.subplot(3,1,1) #plt.plot(earlyo2["Ma"],earlyo2["%O2"],c="red",linewidth=4) plt.plot(paleoco2["Ma"],paleoco2["ppm"],c="red",linewidth=2) dotco2=1161 plt.scatter(paleoco2["Ma"][dotco2],paleoco2["ppm"][dotco2],c="black",linewidth=4,marker="x") plt.vlines(paleoco2["Ma"][dotco2]-30,ymin=-10000,ymax=1400000) plt.vlines(paleoco2["Ma"][dotco2]+30,ymin=-10000,ymax=1400000) plt.ylim(0,120000) plt.xlim(3000, 0) plt.xlabel("Years ago (Millions)") plt.ylabel("CO2 (ppm)") plt.subplot(3,1,2) plt.plot(paleotemp["Ma"],paleotemp["Temp °C"],c="b",linewidth=2) dottemp=154 plt.scatter(paleotemp["Ma"][dottemp],paleotemp["Temp °C"][dottemp],c="black",linewidth=4,marker="x") plt.xlabel("Years") plt.ylabel("Temperature (C)") plt.ylim(10,32) plt.xlim(530, 0) plt.vlines(paleotemp["Ma"][dottemp]-10,ymin=-10000,ymax=1400000) plt.vlines(paleotemp["Ma"][dottemp]+10,ymin=-10000,ymax=1400000) plt.subplot(3,1,3) plt.plot(earlyo2["Ma"],earlyo2["%O2"],c="orange",linewidth=2) doto2=35 plt.scatter(earlyo2["Ma"][doto2],earlyo2["%O2"][doto2],c="black",linewidth=4,marker="x") plt.ylim(0,36) plt.xlim(3000, 0) plt.xlabel("Years ago (Millones)") plt.ylabel("$O_2$ concentration (%)") plt.vlines(earlyo2["Ma"][doto2]-60,ymin=-10000,ymax=1400000) plt.vlines(earlyo2["Ma"][doto2]+60,ymin=-10000,ymax=1400000) #plt.xlim(4560, 0) # -
EarthStuff.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## How to BRUTE-FORCE a Hash function # ### *<NAME>* # #### 2nd year, Khulna University of Engineering and Technology # To be a good Hash Function H(x), where y is the Hash value: # 1. H must be efficient to compute # 2. H must be deterministic # 3. y must be random looking # 4. H must be resistant to forgery # * It should be very time consuming to find collisions # * y should depend in every bit of the origin # Using the standard library hashlib module I computed the MD5, SHA1 and SHA256 (that's SHA2 with a hash size of n=256 ) of the string "Hello, world!" # + import hashlib md=hashlib.md5() md.update(b"Hello, world!") sha1=hashlib.sha1() sha1.update(b"Hello, world!") sha2= hashlib.sha256() sha2.update(b"Hello, world!") print(md.hexdigest()) print(sha1.hexdigest()) print(sha2.hexdigest()) # - # I implemented a hash function `simple_hash` that given a string `s`, computes its hash as follows: it starts with r = 7, and for every character in the string, multiplies r by 31, adds that character to r, and keeps everything modulo 216 . def simple_hash(s): r = 7 for c in s: r = (r * 31 + ord(c)) % 2**16 return r # I'll now Brute-force the hash function that I've just written in the above cell! # # I've implemented a function `crack` that given a string s, loops until it finds a different string that collides with it, and returns the different string. # + import random import string def get_random_string(length): letters = string.ascii_lowercase return ''.join(random.choice(letters) for i in range(length)) def crack(s): hash1=simple_hash(s) for i in range(10*2**16): s2 = get_random_string(4) # log(2^16)/log(26) ~ 4 if simple_hash(s2)==hash1: break # print(i) return s2 # return s2 such that s != s2 and simple_hash(s) == simple_hash(s2) print(crack('hello')) # - # The function `weak_md5` is a "weaker" version of MD5, using only the first 5 bytes of the MD5 hash. This means its hashing size is n=40 and it can be brute forced rather easily. # # I implemented a function `find_collisions` that loops over all the possible strings until it finds an arbitrary collision - that is, two different strings whose hash is the same - and returns them (as a tuple). # + import hashlib import itertools from itertools import product import string def weak_md5(s): return hashlib.md5(s).digest()[:5] def find_collisions(): chars = string.ascii_letters + '1234567890' d = {} for i in range(40): generator = itertools.product(chars, repeat = i) for password in generator: password = ''.join(password) h1 = weak_md5(password.encode('utf-8')) if h1 not in d: d[h1] = password else: return (password, d[h1]) # return (s1, s2) such that s1 != s2 and weak_md5(s1) == weak_md5(s2) # - # To see how hard it is to brute force a real hash function, I tried running the function that I wrote in the previous cell, but using the full MD5. # + import hashlib def md5(s): return hashlib.md5(s).digest() def find_collisions(): chars = string.ascii_letters + '1234567890' d = {} for i in range(40): generator = itertools.product(chars, repeat = i) for password in generator: password = <PASSWORD>(password) h1 = weak_md5(password.encode('utf-8')) if h1 not in d: d[h1] = password else: return (password, d[h1])
How to BRUTE-FORCE a Hash function.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Distribution of predictions # # Right now, sums are generated by randomly sampling `n_terms` numbers in the range \[0, `n_digits`\]. the problem with this is that sums summing "around the middle" occur most often. For example, if `n_digits=2` and `n_terms=2`, then sums are from 0+0 to 99+99, giving a range of 0 to 198. Thus sums summing to the midpoint of 99 occur the most often, and very few training examples are generated for sums summing to the lower or higher end. So the point of this notebook is to write functions that can generate a uniform distribution of sums with respect to the sum. import numpy as np from matplotlib import pyplot as plt import random # ## Baseline def generate_sample(n_terms, n_digits): x = [np.random.randint(10 ** n_digits - 1) for _ in range(n_terms)] y = np.sum(x) return x, y # + sums = [] x_s = [] for _ in range(10**5): x, y = generate_sample(3, 2) x_s.extend(x) sums.append(y) # - plt.figure(figsize=(12, 8)) plt.hist(sums, bins=100); plt.figure(figsize=(12, 8)) plt.hist(x_s, bins=100); # ## Uniform sampling def generate_uniform_sample(n_terms, n_digits, y): x = [] while len(x) < n_terms - 1: y_upper_bound = y - np.sum(x) n_digits_upper_bound = 10 ** n_digits - 1 upper_bound = min([y_upper_bound, n_digits_upper_bound]) if upper_bound > 0: x.append(np.random.randint(upper_bound+1)) else: x.append(0) x.append(y - np.sum(x)) random.shuffle(x) return x, y def uniform_samples(n_terms, n_digits): max_sum = (10**n_digits - 1) * n_terms possible_sums = range(max_sum + 1) sums = [] x_s = [] for _ in range(10**5): x, y = generate_uniform_sample(n_terms, n_digits, np.random.choice(possible_sums)) sums.append(y) x_s.extend(x) return x_s, sums x_s, sums = uniform_samples(n_terms=2, n_digits=2) plt.figure(figsize=(12, 8)) plt.hist(sums, bins=100); plt.figure(figsize=(12, 8)) plt.hist(x_s, bins=100);
Notebooks/Distribution of predictions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # <div class="alert alert-block alert-success"> # <b><center>CNN Basic Examples</center></b> # <b><center>RSNET</center></b> # </div> # # Configure Learning Environment import os os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices' os.environ['CUDA_VISIBLE_DEVICES'] = "0" # # !pip install git+https://github.com/nockchun/rspy --force # # !pip install mybatis_mapper2sql import rspy as rsp rsp.setSystemWarning(off=True) rsp.fixMemoryProblem() import numpy as np from ipywidgets import interact import matplotlib.pyplot as plt import pandas as pd import tensorflow as tf from tensorflow.keras import utils, layers, activations, models, losses, optimizers, metrics, callbacks, datasets, preprocessing tf.__version__ # # CIFAR10 Dataset # The CIFAR10 dataset contains 60,000 color images in 10 classes, with 6,000 images in each class. The dataset is divided into 50,000 training images and 10,000 testing images. The classes are mutually exclusive and there is no overlap between them. # Download the mnist dataset using keras (train_data, train_label), (test_data, test_label) = datasets.cifar10.load_data() train_data.shape, train_label.shape, test_data.shape, test_label.shape # Counting unique value of train data unique, counts = np.unique(train_label, return_counts=True) num_labels = len(unique) f"Train labels: {dict(zip(unique, counts))}" # # Preprocessing # + # Normalizing train_data_proc = train_data / 255 test_data_proc = test_data / 255 #One hot encoding train_label_proc = utils.to_categorical(train_label, 10) test_label_proc = utils.to_categorical(test_label, 10) # - image_gen = preprocessing.image.ImageDataGenerator( width_shift_range=0.3, height_shift_range=0.3, horizontal_flip=True, rotation_range=20, shear_range=0.05, zoom_range=0.3 ) train_gen = image_gen.flow(train_data_proc, train_label_proc, batch_size=128) # + [markdown] tags=[] # # Create Model # + tags=[] mInput = layers.Input((32, 32, 3)) # - mB1 = layers.Conv2D(64, 3, padding="same", activation="relu", name="B1_Conv_1")(mInput) mB1 = layers.BatchNormalization(name="B1_Norm_1")(mB1) mB1 = layers.Conv2D(64, 3, padding="same", activation="relu", name="B1_Conv_2")(mB1) mB1 = layers.BatchNormalization(name="B1_Norm_2")(mB1) mB1 = layers.MaxPool2D(2, strides=1, name="B1_Pool")(mB1) mB1 = layers.Dropout(0.5, name="B1_Drop")(mB1) # + # mB2 = layers.Conv2D(64, 3, padding="same", activation="relu", name="B2_Conv_1")(mB1) # mB2 = layers.BatchNormalization(name="B2_Norm_1")(mB2) # mB2 = layers.Conv2D(128, 3, dilation_rate=2, activation="relu", name="B2_Conv_2")(mB2) # mB2 = layers.BatchNormalization(name="B2_Norm_2")(mB2) # mB2 = layers.MaxPool2D(2, strides=1, name="B2_Pool")(mB2) # mB2 = layers.Dropout(0.5, name="B2_Drop")(mB2) # - mB3 = layers.Conv2D(128, 3, padding="same", activation="relu", name="B3_Conv_1")(mB1) mB3 = layers.BatchNormalization(name="B3_Norm_1")(mB3) mB3 = layers.Conv2D(256, 3, padding="same", dilation_rate=2, activation="relu", name="B3_Conv_2")(mB3) mB3 = layers.BatchNormalization(name="B3_Norm_2")(mB3) mB3Pool = layers.MaxPool2D(2, strides=1, name="B3_Pool")(mB3) mB3 = layers.Dropout(0.5, name="B3_Drop")(mB3Pool) mB4 = layers.Conv2D(128, 3, padding="same", activation="relu", name="B4_Conv_1")(mB3) mB4 = layers.BatchNormalization(name="B4_Norm_1")(mB4) mB4 = layers.Conv2D(256, 3, padding="same", activation="relu", name="B4_Conv_2")(mB4) mB4 = layers.add([mB4, mB3Pool], name="B4_Add") mB4Nor = layers.BatchNormalization(name="B4_Norm_2")(mB4) mB4Dout = layers.Dropout(0.5, name="B4_Drop")(mB4Nor) mB5 = layers.Conv2D(128, 3, dilation_rate=2, padding="same", activation="relu", name="B5_Conv_1")(mB4Nor) mB5 = layers.BatchNormalization(name="B5_Norm")(mB5) mB5 = layers.Conv2D(256, 3, dilation_rate=3, padding="same", activation="relu", name="B5_Conv_2")(mB5) mB6 = layers.Conv2D(128, 5, padding="same", activation="relu", name="B6_Conv_1")(mB4Nor) mB6 = layers.BatchNormalization(name="B6_Norm")(mB6) mB6 = layers.Conv2D(256, 5, padding="same", activation="relu", name="B6_Conv_2")(mB6) mB7 = layers.Conv2D(128, 3, padding="same", activation="relu", name="B7_Conv_1")(mB4Dout) mB7 = layers.BatchNormalization(name="B7_Norm_1")(mB7) mB7 = layers.Conv2D(256, 3, padding="same", activation="relu", name="B7_Conv_2")(mB7) mB7 = layers.add([mB5, mB6, mB7], name="B7_Add") mB7 = layers.BatchNormalization(name="B7_Norm_2")(mB7) mB7 = layers.Dropout(0.5, name="B7_Drop")(mB7) mB8 = layers.Conv2D(128, 3, padding="same", activation="relu", name="B8_Conv_1")(mB7) mB8 = layers.BatchNormalization(name="B8_Norm_1")(mB8) mB8 = layers.Conv2D(256, 3, padding="same", dilation_rate=2, activation="relu", name="B8_Conv_2")(mB8) mB8 = layers.add([mB3Pool, mB5, mB7, mB8], name="B8_Add") mB8 = layers.BatchNormalization(name="B8_Norm_2")(mB8) mB8 = layers.Dropout(0.5, name="B8_Drop")(mB8) mB9 = layers.Conv2D(256, 3, activation="relu", name="B9_Conv_1")(mB8) mB9 = layers.BatchNormalization(name="B9_Norm_1")(mB9) mB9 = layers.Conv2D(256, 3, dilation_rate=2, activation="relu", name="B9_Conv_2")(mB9) mB9 = layers.BatchNormalization(name="B9_Norm_2")(mB9) mB9 = layers.MaxPool2D(2, name="B9_Pool")(mB9) mB9 = layers.Dropout(0.5, name="B9_Drop")(mB9) mB10 = layers.Conv2D(512, 3, padding="same", activation="relu", name="B8_Conv")(mB9) mB10 = layers.GlobalAveragePooling2D(name="B10_Pool")(mB10) mB10 = layers.BatchNormalization(name="B10_Norm")(mB10) mOutput = layers.Dense(10, activation="softmax", name="Output")(mB10) model = models.Model(mInput, mOutput) model.summary() utils.plot_model(model, "intermediate/model.png", True, dpi=70) model.compile( loss=losses.CategoricalCrossentropy(), optimizer=optimizers.Adadelta(), metrics=[metrics.CategoricalAccuracy()], ) history_aug = model.fit( train_gen, validation_data=(test_data_proc, test_label_proc), epochs=100 ) pd.DataFrame(history_aug.history).plot() # # Predict # + class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] @interact(idx=(0, 9999, 1)) def showTestImage(idx): data = test_data[idx].reshape(-1, 32, 32, 3)/255 dataPred = model.predict(data) plt.imshow(test_data[idx]) plt.grid(False) plt.title(f"LABEL: {class_names[test_label[idx][0]]}, PREDICT: {class_names[np.argmax(dataPred)]}") plt.show() # - # # Error Find # + err_idx = [] pred_result = model.predict(test_data_proc) for idx in range(10000): if np.argmax(pred_result[idx]) != test_label[idx][0]: err_idx.append(idx) # + tags=[] len(err_idx), err_idx # -
lecture_source/practical_ai/006_cifar10_deep.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #default_exp metrics # - # First of all, let us get all the data that we need. Through the magic of `nbdev`, we will use the functionality we defined in `01_gettin_started` # + from birdcall.data import * from birdcall.metrics import * import pandas as pd # - classes = pd.read_pickle('data/classes.pkl') train_ds = SpectrogramDataset(pd.read_pickle('data/train_set.pkl'), classes, len_mult=100, spec_max=80, spec_min=-100) valid_ds = SpectrogramDataset(pd.read_pickle('data/val_set.pkl'), classes, len_mult=20) len(train_ds), len(valid_ds) from fastai2.vision.all import * train_ds.show(0) # + BS = 160 dls = DataLoaders( DataLoader(dataset=train_ds, bs=BS, num_workers=NUM_WORKERS, shuffle=True), DataLoader(dataset=valid_ds, bs=BS, num_workers=NUM_WORKERS) ).cuda() # - b = dls.train.one_batch() b[0].shape b[0].mean(), b[0].std() get_arch = lambda: nn.Sequential( nn.Sequential(*list(resnet50(pretrained=True).children())[:6]), nn.Sequential(*list(resnet50(pretrained=True).children())[6:-2]), create_head(4096, 264), nn.Sigmoid() ) # A couple of functions to help us calculate metrics for diagnostics def custom_splitter(model): return trainable_params(model[0]), trainable_params(model[1]), trainable_params(model[2]) learn = Learner( dls, get_arch(), metrics=[AccumMetric(precision), AccumMetric(recall), AccumMetric(f1)], loss_func=BCELossFlat(), splitter=custom_splitter ) learn.freeze_to(-1) learn.fit(5, 5e-2) learn.save('res50_train_head') learn.unfreeze() learn.fit_one_cycle(60, max_lr=(1e-4, 1e-3, 1e-2)) learn.save('res50') learn.validate() # The model does not improve, fails to generalize to the validation set. At this point, I am concerned that maybe there is something not-intuitive happening during training, that I have a bug somewhere. I will reimplement the pipeline in pure pytorch and see what results I get.
02a_train_on_spectrograms.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + import sys sys.path.append('..') from deepgraph.utils.logging import log from deepgraph.utils.common import batch_parallel, ConfigMixin, shuffle_in_unison_inplace, pickle_dump from deepgraph.utils.image import batch_pad_mirror from deepgraph.constants import * from deepgraph.conf import rng from deepgraph.pipeline import Processor, Packet # - from deepgraph.nn.init import * class Transformer(Processor): """ Apply online random augmentation. """ def __init__(self, name, shapes, config, buffer_size=10): super(Transformer, self).__init__(name, shapes, config, buffer_size) self.mean = None def init(self): if self.conf("mean_file") is not None: self.mean = np.load(self.conf("mean_file")) else: log("Transformer - No mean file specified.", LOG_LEVEL_WARNING) def process(self): packet = self.pull() # Return if no data is there if not packet: return False # Unpack data, label = packet.data # Do processing log("Transformer - Processing data", LOG_LEVEL_VERBOSE) h = 240 w = 320 start = time.time() # Mean if packet.phase == PHASE_TRAIN or packet.phase == PHASE_VAL: data = data.astype(np.float32) if self.mean is not None: std = self.conf("std") for idx in range(data.shape[0]): # Subtract mean data[idx] = data[idx] - self.mean.astype(np.float32) if std is not None: data[idx] = data[idx] * std if self.conf("offset") is not None: label -= self.conf("offset") if packet.phase == PHASE_TRAIN: # Do elementwise operations data_old = data label_old = label data = np.zeros((data_old.shape[0], data_old.shape[1], h, w), dtype=np.float32) label = np.zeros((label_old.shape[0], h, w), dtype=np.float32) for idx in range(data.shape[0]): # Rotate # We rotate before cropping to be able to get filled corners # Maybe even adjust the border after rotating deg = np.random.randint(-5,6) # Operate on old data. Careful - data is already in float so we need to normalize and rescale afterwards # data_old[idx] = 255. * rotate_transformer_rgb_uint8(data_old[idx] * 0.003921568627, deg).astype(np.float32) # label_old[idx] = rotate_transformer_scalar_float32(label_old[idx], deg) # Take care of any empty areas, we crop on a smaller surface depending on the angle # TODO Remove this once loss supports masking shift = 0 #np.tan((deg/180.) * math.pi) # Random crops #cy = rng.randint(data_old.shape[2] - h - shift, size=1) #cx = rng.randint(data_old.shape[3] - w - shift, size=1) data[idx] = data_old[idx] label[idx] = label_old[idx] # Flip horizontally with probability 0.5 p = rng.randint(2) if p > 0: data[idx] = data[idx, :, :, ::-1] label[idx] = label[idx, :, ::-1] # RGB we mult with a random value between 0.8 and 1.2 r = rng.randint(80,121) / 100. g = rng.randint(80,121) / 100. b = rng.randint(80,121) / 100. data[idx, 0] = data[idx, 0] * r data[idx, 1] = data[idx, 1] * g data[idx, 2] = data[idx, 2] * b # Shuffle data, label = shuffle_in_unison_inplace(data, label) elif packet.phase == PHASE_VAL: # Center crop pass #cy = (data.shape[2] - h) // 2 #cx = (data.shape[3] - w) // 2 #data = data[:, :, cy:cy+h, cx:cx+w] #label = label[:, cy:cy+h, cx:cx+w] end = time.time() log("Transformer - Processing took " + str(end - start) + " seconds.", LOG_LEVEL_VERBOSE) # Try to push into queue as long as thread should not terminate self.push(Packet(identifier=packet.id, phase=packet.phase, num=2, data=(data, label))) return True def setup_defaults(self): super(Transformer, self).setup_defaults() self.conf_default("mean_file", None) self.conf_default("offset", None) self.conf_default("std", 1.0) # + from theano.tensor.nnet import relu from deepgraph.graph import * from deepgraph.nn.core import * from deepgraph.nn.conv import * from deepgraph.nn.loss import * from deepgraph.solver import * from deepgraph.nn.init import * from deepgraph.pipeline import Optimizer, H5DBLoader, Pipeline def build_u_graph(): graph = Graph("u_depth") """ Inputs """ data = Data(graph, "data", T.ftensor4, shape=(-1, 3, 240, 320)) label = Data(graph, "label", T.ftensor3, shape=(-1, 1, 240, 320), config={ "phase": PHASE_TRAIN }) """ Contractive part """ conv_1 = Conv2D( graph, "conv_1", config={ "channels": 64, "kernel": (3, 3), "border_mode": (1, 1), "activation": relu, "weight_filler": xavier(gain="relu"), "bias_filler": constant(0) } ) conv_2 = Conv2D( graph, "conv_2", config={ "channels": 64, "kernel": (3, 3), "border_mode": (1, 1), "activation": relu, "weight_filler": xavier(gain="relu"), "bias_filler": constant(0) } ) pool_2 = Pool(graph, "pool_2", config={ "kernel": (2, 2) }) conv_3 = Conv2D( graph, "conv_3", config={ "channels": 128, "kernel": (3, 3), "border_mode": (1, 1), "activation": relu, "weight_filler": xavier(gain="relu"), "bias_filler": constant(0) } ) conv_4 = Conv2D( graph, "conv_4", config={ "channels": 128, "kernel": (3, 3), "border_mode": (1, 1), "activation": relu, "weight_filler": xavier(gain="relu"), "bias_filler": constant(0) } ) pool_4 = Pool(graph, "pool_4", config={ "kernel": (2, 2) }) conv_5 = Conv2D( graph, "conv_5", config={ "channels": 256, "kernel": (3, 3), "border_mode": (1, 1), "activation": relu, "weight_filler": xavier(gain="relu"), "bias_filler": constant(0) } ) conv_6 = Conv2D( graph, "conv_6", config={ "channels": 256, "kernel": (3, 3), "border_mode": (1, 1), "activation": relu, "weight_filler": xavier(gain="relu"), "bias_filler": constant(0) } ) pool_6 = Pool(graph, "pool_6", config={ "kernel": (2, 2) }) conv_7 = Conv2D( graph, "conv_7", config={ "channels": 512, "kernel": (3, 3), "border_mode": (1, 1), "activation": relu, "weight_filler": xavier(gain="relu"), "bias_filler": constant(0) } ) conv_8 = Conv2D( graph, "conv_8", config={ "channels": 512, "kernel": (3, 3), "border_mode": (1, 1), "activation": relu, "weight_filler": xavier(gain="relu"), "bias_filler": constant(0) } ) pool_8 = Pool(graph, "pool_8", config={ "kernel": (2, 2) }) """ conv_9 = Conv2D( graph, "conv_9", config={ "channels": 1024, "kernel": (3, 3), "border_mode": (1, 1), "activation": relu, "weight_filler": xavier(gain="relu"), "bias_filler": constant(0) } ) conv_10 = Conv2D( graph, "conv_10", config={ "channels": 1024, "kernel": (3, 3), "border_mode": (1, 1), "activation": relu, "weight_filler": xavier(gain="relu"), "bias_filler": constant(0) } ) """ fl = Flatten(graph, "fl",config={ "dims": 2 }) fc_8 = Dense(graph, "fc_8", config={ "out": 4096, "activation": relu, "weight_filler": xavier(), "bias_filler": constant(0.1) }) dp_8 = Dropout(graph, "dp_8") fc_9 = Dense(graph, "fc_9", config={ "out": 19200, "activation": relu, "weight_filler": xavier(), "bias_filler": constant(0.1) }) dp_9 = Dropout(graph, "dp_9") rs_10 = Reshape(graph, "rs_10", config={ "shape": (-1, 64, 15, 20) }) """ Expansive path """ up_11 = Upsample(graph, "up_11", config={ "kernel": (2, 2) }) conv_11 = Conv2D( graph, "conv_11", config={ "channels": 512, "kernel": (3, 3), "border_mode": 1, "weight_filler": xavier(), "bias_filler": constant(0) } ) conv_12 = Conv2D( graph, "conv_12", config={ "channels": 512, "kernel": (3, 3), "border_mode": (1, 1), "activation": relu, "weight_filler": xavier(gain="relu"), "bias_filler": constant(0) } ) conv_13 = Conv2D( graph, "conv_13", config={ "channels": 512, "kernel": (3, 3), "border_mode": (1, 1), "activation": relu, "weight_filler": xavier(gain="relu"), "bias_filler": constant(0) } ) up_14 = Upsample(graph, "up_14", config={ "kernel": (2, 2) }) conv_14 = Conv2D( graph, "conv_14", config={ "channels": 256, "kernel": (3, 3), "border_mode": 1, "weight_filler": xavier(), "bias_filler": constant(0) } ) conv_15 = Conv2D( graph, "conv_15", config={ "channels": 256, "kernel": (3, 3), "border_mode": (1, 1), "activation": relu, "weight_filler": xavier(gain="relu"), "bias_filler": constant(0) } ) conv_16 = Conv2D( graph, "conv_16", config={ "channels": 256, "kernel": (3, 3), "border_mode": (1, 1), "activation": relu, "weight_filler": xavier(gain="relu"), "bias_filler": constant(0) } ) up_17 = Upsample(graph, "up_17", config={ "kernel": (2, 2) }) conv_17 = Conv2D(graph, "conv_17", config={ "channels": 128, "kernel": (3, 3), "border_mode": 1, "weight_filler": xavier(), "bias_filler": constant(0) }) conv_18 = Conv2D( graph, "conv_18", config={ "channels": 128, "kernel": (3, 3), "border_mode": (1, 1), "activation": relu, "weight_filler": xavier(gain="relu"), "bias_filler": constant(0) } ) conv_19 = Conv2D( graph, "conv_19", config={ "channels": 128, "kernel": (3, 3), "border_mode": (1, 1), "activation": relu, "weight_filler": xavier(gain="relu"), "bias_filler": constant(0) } ) up_20 = Upsample(graph, "up_20", config={ "mode": "constant", "kernel": (2, 2) }) conv_20 = Conv2D(graph, "conv_20", config={ "channels": 64, "kernel": (3, 3), "border_mode": 1, "weight_filler": xavier(), "bias_filler": constant(0) }) conv_21 = Conv2D( graph, "conv_21", config={ "channels": 64, "kernel": (3, 3), "border_mode": (1, 1), "activation": relu, "weight_filler": xavier(gain="relu"), "bias_filler": constant(0) } ) conv_22 = Conv2D( graph, "conv_22", config={ "channels": 64, "kernel": (3, 3), "border_mode": (1, 1), "activation": relu, "weight_filler": xavier(gain="relu"), "bias_filler": constant(0) } ) conv_23 = Conv2D( graph, "conv_23", config={ "channels": 1, "kernel": (1, 1), "activation": None, "weight_filler": xavier(), "bias_filler": constant(0) } ) """ Feed forward nodes """ concat_20 = Concatenate(graph, "concat_20", config={ "axis": 1 }) concat_17 = Concatenate(graph, "concat_17", config={ "axis": 1 }) concat_14 = Concatenate(graph, "concat_14", config={ "axis": 1 }) concat_11 = Concatenate(graph, "concat_11", config={ "axis": 1 }) """ Losses / Error """ loss = EuclideanLoss(graph, "loss") error = MSE(graph, "mse", config={ "root": True, "is_output": True, "phase": PHASE_TRAIN }) """ Make connections """ data.connect(conv_1) conv_1.connect(conv_2) conv_2.connect(concat_20) conv_2.connect(pool_2) pool_2.connect(conv_3) conv_3.connect(conv_4) conv_4.connect(concat_17) conv_4.connect(pool_4) pool_4.connect(conv_5) conv_5.connect(conv_6) conv_6.connect(concat_14) conv_6.connect(pool_6) pool_6.connect(conv_7) conv_7.connect(conv_8) conv_8.connect(concat_11) conv_8.connect(pool_8) pool_8.connect(fl) fl.connect(fc_8) fc_8.connect(dp_8) dp_8.connect(fc_9) fc_9.connect(dp_9) dp_9.connect(rs_10) rs_10.connect(up_11) up_11.connect(conv_11) conv_11.connect(concat_11) concat_11.connect(conv_12) conv_12.connect(conv_13) conv_13.connect(up_14) up_14.connect(conv_14) conv_14.connect(concat_14) concat_14.connect(conv_15) conv_15.connect(conv_16) conv_16.connect(up_17) up_17.connect(conv_17) conv_17.connect(concat_17) concat_17.connect(conv_18) conv_18.connect(conv_19) conv_19.connect(up_20) up_20.connect(conv_20) conv_20.connect(concat_20) concat_20.connect(conv_21) conv_21.connect(conv_22) conv_22.connect(conv_23) conv_23.connect(loss) label.connect(loss) conv_23.connect(error) label.connect(error) return graph if __name__ == "__main__": batch_size = 4 chunk_size = 10*batch_size transfer_shape = ((chunk_size, 3, 240, 320), (chunk_size, 240, 320)) g = build_u_graph() # Build the training pipeline db_loader = H5DBLoader("db", ((chunk_size, 3, 480, 640), (chunk_size, 1, 480, 640)), config={ "db": "/home/ga29mix/nashome/data/nyu_depth_v2_combined_50.hdf5", # "db": '../data/nyu_depth_unet_large.hdf5', "key_data": "images", "key_label": "depths", "chunk_size": chunk_size }) transformer = Transformer("tr", transfer_shape, config={ # Measured empirically for the data-set # "offset": 2.7321029 "mean_file": "/home/ga29mix/nashome/data/nyu_depth_v2_combined_50.npy", }) optimizer = Optimizer("opt", g, transfer_shape, config={ "batch_size": batch_size, "chunk_size": chunk_size, "learning_rate": 0.0001,# for step 1 # "learning_rate": 0.00001, # for step 2 "momentum": 0.9, "weight_decay": 0.0005, "print_freq": 20, "save_freq": 15000, "weights": "/data/vnet2_pretrained_with_low_lr_batch2_iter_18000.zip", "save_prefix": "/data/vnet2_pretrained_with_low_lr_step2" }) p = Pipeline(config={ "validation_frequency": 20, "cycles": 3100 }) p.add(db_loader) p.add(transformer) p.add(optimizer) p.run() # - # %matplotlib inline import matplotlib.pyplot as plt l = np.array([s["loss"] for s in optimizer.losses]) e = np.array([s["mse"] for s in optimizer.losses]) print l.mean() plt.plot(l) plt.show() plt.plot(e) plt.show()
experiments/vnet_2_pretrained_with_low_lr.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Orphaned Blocks Analyzer # Chart the distribution of orphaned blocks, show top winners and losers. # + import glob import json from pandas import DataFrame from pandas import json_normalize import pandas import requests # Load blocks from disk into dataframe def load_blocks_from_disk(path_to_blocks="./archive-blocks/"): block_files = glob.glob(path_to_blocks + "*.json") blocks = [] for file in block_files: with open(file) as fp: blocks.append(json.load(fp)) return blocks blocks_query = ''' query BlocksQuery { blocks(limit: 4000) { protocolState { consensusState { slot blockHeight blockchainLength } } canonical creator stateHash receivedTime dateTime } } ''' def load_blocks_from_block_explorer(url="https://graphql.minaexplorer.com/", limit=100): r = requests.post(url, json={'query': blocks_query}) payload = json.loads(r.text) blocks = payload["data"]["blocks"] cleaned = [] for block in blocks: cleaned.append({ "slot": block["protocolState"]["consensusState"]["slot"], "blockHeight": block["protocolState"]["consensusState"]["blockHeight"], "canonical": block["canonical"], "creator": block["creator"], "stateHash": block["stateHash"], "receivedTime": block["receivedTime"], "dateTime": block["dateTime"], }) return cleaned blocks = load_blocks_from_block_explorer() print(len(blocks)) df = DataFrame(blocks) display(df) # + vc = df["slot"].value_counts().reset_index(name="count") pandas.set_option('display.max_rows', 500) pandas.set_option('display.max_columns', 500) pandas.set_option('display.width', 1000) vc # - fullSlots = df.slot.unique() handicap = 1000 nFullSlots = len(df.slot.unique()) max_slot = 4324 # max_slot - (count of unique slots) = nEmptySlots emptySlots = max_slot - nFullSlots - handicap ratioEmpty = emptySlots/(max_slot-handicap) print(f"Total Slots: {max_slot}") print(f"Slot Handicap: {handicap}") print(f"Filled Slots: {nFullSlots}") print(f"Empty Slots: {emptySlots}") print(f"Ratio Empty: {ratioEmpty}") import plotly.express as px fig = px.bar(vc, x="index", y="count") fig.show()
notebooks/OrphanedBlocks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Specviz2d Demonstration Notebook # This notebook demonstrates the Specviz2d API in the Notebook setting. General documentation about Jdaviz UI and interactions can be found here: https://jdaviz.readthedocs.io/en/latest/index.html # ## Create Specviz2d via Helper from jdaviz import Specviz2d specviz2d = Specviz2d() # ## Display Specviz2d specviz2d.app # ## Load a File from astropy.utils.data import download_file fn = download_file('https://stsci.box.com/shared/static/exnkul627fcuhy5akf2gswytud5tazmw.fits', cache=True) specviz2d.load_data(fn)
notebooks/Specviz2dExample.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Process and Plot _siextentn_ from CMIP6 historical experiments # # ## Introduction # This notebook describes how to download, process and plot northern hemisphere sea ice extent from CMIP6 historical experiments. I only include the first ensemble member from each model. An alternative approach would be to use all ensemble members and calculate the model ensemble mean for models with multiple ensembles. # # Data are processed into a single `pandas DataFrame` and written to a csv file `siextentn.CMIP6.historical.csv`. A plot is generated and saved as `siextentn.CMIP6.historical.png`. # # ## Data # I use model files downloaded from `esfg-node.llnl.gov`. The archive can be searched and files downloaded using the ESGF Search RESTful API. Search strings can be entered into a browser as a URL or performed using the `python` `requests` module. # # An example search for `project=CMIP6`, `variable_id=siextentn`, `table_id=SImon`, `experiment_id=historical`, and `member_id=r1i1p1f1` (the first ensemble member) looks like. # # # # A `wget` script can be generated and downloaded by copy and pasting the following URL into a browser. # # https://esgf-node.llnl.gov/esg-search/wget?limit=500&project=CMIP6&variable_id=siextentn&table_id=SImon&experiment_id=historical&member_id=r1i1p1f1 # # This automatically downloads a `wget` script. See https://www.earthsystemcog.org/projects/cog/doc/wget for further details. # # You will need an ESGF login and OpenID to get the data. This is easily set up. # # Searching for data # # I use the ESGF API to search for data. # + import glob import os import sys import calendar import itertools import xarray as xr import pandas as pd import matplotlib.pyplot as plt import matplotlib.colors as mcolors sys.path.append('../cmip6') import munge DATADIR = '/home/apbarret/Data/CMIP6' # Change this to the location in which you ran the wget script VARIABLE = 'siextentn' # - # ## Directory structure and processing # Data for each model are contained in one or more files. The download script places are the files in the directory in which the script was run. Running the following cell tidys things up placing files in a directory tree by `variable`, `table`, `model`, `experiment`, and `ensemble-member`. # def clean_up_downloaded_files(datadir): # '''Moves ESGF CMIP6 files into a directory structure''' # filelist = glob.glob(os.path.join(datadir, '*.nc')) # if filelist: # for f in filelist: # variable, table, model, experiment, member, grid, time_range = f.split('_') # dirpath = os.path.join(datadir, variable, table, model, experiment, member) # try: # os.makedirs(dirpath) # except FileExistsError: # pass # It is OK is directory path exists # except: # print('Unexpected error: ', sys.exc_info()[0]) # raise # os.rename(f, os.path.join(dirpath, f)) # else: # print(f'No files found in {datadir}: nothing to be done') # # clean_up_downloaded_files(DATADIR) # Northern hemisphere sea ice extent files are time series of scalars; one data point represent sea ice extent in millions of square kilometers. I use `xarray` to read files for a single model ensemble member. These are then place in a `pandas DataFrame`. To facilitate this, I generate a dictionary using `munge.generate_catalog` with models as keys and lists of files as values. You can find details of the code in `../CMIP6/cmip6/munge.py` historical_catalog = munge.generate_catalog('siextentn', 'SImon', 'historical', 'r1i1p1f1', datadir=DATADIR) scenario_catalog = munge.generate_catalog('siextentn', 'SImon', 'ssp585', 'r1i1p1f1', datadir=DATADIR) # Next I read the models using `xarray`. Some models (e.g. CESM2) use a non-standard calendar with no leap year. These data are y xarray as `CFTimeIndex` object. To facilitate plotting and allow data to be combined, I convert `time` indices from `CFTimeIndex` to `datetime64`. Time series are then concatenated into a `pandas` `DataFrame`. The time conversion may not be necessary but I get errors when trying to plot using `xarray.DataFrame.siextentn.plot()`. Timestamps for each month vary between models. Some models use day 15 of a month as the central date, others use day 16. When time series are concatenated into a single table, dates do not line up. So I use a helper function `_normalize_datetime` to set timestamps for all time steps to day 15 of each month. These tools are in `munge.py`. # # The `pandas` `Dataframe` containing all models is written to a `csv` file. # def read_ensemble(model): # '''Reads file for a given model ensemble into an xarray Dataset''' # ds = xr.open_mfdataset(catalog[model], combine='by_coords') # if isinstance(ds.indexes['time'], xr.CFTimeIndex): # ds['time'] = ds.indexes['time'].to_datetimeindex() # return ds # # # def _normalize_datetime(TimeIndex): # import datetime as dt # return [dt.datetime(y, m, 15) for y, m in zip(ts.index.year, ts.index.month)] # # # def dataset2timeseries(ds): # '''Converts ds.siextent to a pandas timeseries. Times are set to midnight 00:00:00''' # ts = ds.siextentn.squeeze().to_series() # ts.index = _normalize_datetime(ts.index) # return ts # # + import warnings warnings.simplefilter("ignore") # Brute force way to ignore RuntimeWarning about converting from noleap calendar series = [] for model, filelist in catalog.items(): ds = munge.read_ensemble(filelist) ts = munge.dataset2timeseries(ds[VARIABLE]) ts.name = model series.append(ts) warnings.simplefilter("default") # Turn warnings back on df = pd.concat(series, axis=1) #df.to_csv(os.path.join(DATADIR, 'siextentn', 'SImon', 'siextentn.SImon.CMIP6.historical.csv')) df.head() # - # The `csv` file is read using `pandas.read_csv` as follows. historical_df = pd.read_csv(os.path.join(DATADIR, 'siextentn', 'SImon', 'siextentn.SImon.CMIP6.historical.csv'), index_col=0, header=0, parse_dates=True) historical_df.head() # Using the same command, we can read data for scenario SSP5-8.5 scenario_df = pd.read_csv(os.path.join(DATADIR, 'siextentn', 'SImon', 'siextentn.SImon.CMIP6.ssp585.csv'), index_col=0, header=0, parse_dates=True) scenario_df.head() # Not all models with _historical_ runs available have _ssp585_ runs. These models can be compared as follows: hist_set = set(historical_df.columns) scen_set = set(scenario_df.columns) joint_set = hist_set.intersection(scen_set) print(f"{len(joint_set)} models have data for historical and SSP5-8.5 experiments.") print(f"These models are {', '.join(joint_set)}.") # `historical_df` and `scenario_df` can be concatenated using `pd.concat`. `.dropna` removes models that do not have data for SSP5-8.5. concat_df = pd.concat([historical_df, scenario_df]).dropna(axis=1) # A "quick and dirty" plot of northern hemisphere September sea ice extent is made. # + # Make color and symbols for models it = itertools.product(['-', '--'], mcolors.TABLEAU_COLORS) style, color = zip(*[next(it) for i in range(df.shape[1])]) fig, ax = plt.subplots(figsize=(10, 7)) concat_df[concat_df.index.month == 9].plot(ax = ax, linewidth=2, style=list(style), color=list(color)) ax.set_ylim(0, 12) ax.set_xlim('1979-01-01', '2100-12-31'); ax.set_title("CMIP6 northern hemisphere sea ice extent: scenario is SSP5-8.5") ax.set_ylabel("Extent $10^6 km^2$"); ax.legend(loc="upper left", bbox_to_anchor=(1,1)) ax.axhline(1., c='k', zorder=1) fig.savefig(os.path.join(DATADIR, 'siextentn', 'SImon', 'siextentn.SImon.CMIP6.historical_and_ssp585.png')) # - # The mean seasonal cycle of sea ice extent for 1979 to 2014 is show below. dfClim = historical_df['1979':'2014'].groupby(historical_df['1979':'2014'].index.month).mean() fig, ax = plt.subplots(figsize=(8,9)) dfClim.plot(ax=ax, linewidth=2, style=list(style), color=list(color)) ax.set_ylim(0, 20) ax.set_ylabel("Extent $10^6 km^2$") ax.set_xticks(range(1,13)) ax.set_xticklabels([m[0] for m in calendar.month_abbr if len(m) > 0]);
notebooks/process_and_plot_siextentn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.2 32-bit # metadata: # interpreter: # hash: dcfa17eba1dc66845e0904f61caf004065b70fa6c516601030b63e53c64813c1 # name: python3 # --- import math import numpy as np import numpy_financial as npf # Define funciones def tasa_equivalente(tasa: float, nper: int, mper: int) -> float: ''' Funcion para calcular la equivalencia entre os tasas ''' return (1+tasa)**(nper/mper)-1 # 1 periodos = 10 valor_futuro = 380_000_000 tasa_semestral = 6.8/100 tasa_anual = tasa_equivalente(tasa_semestral, 12, 6) valor_presente = npf.pv(tasa_anual, 10, 0,valor_futuro) print(f"deberías invertir : {-valor_presente:,.2f}") # 5 tasa_anual = 19/100 tasa_diaria = tasa_equivalente(tasa_anual, 1, 365) print(f"tasa diaria equivalente : {100*tasa_diaria:,.4f}%") # 7 tasa_anual_nominal = 32/100 tasa_trimestral_efectiva = tasa_anual_nominal/4 tae = tasa_equivalente(tasa_trimestral_efectiva, 4, 1) print(f"tasa anual equivalente : {100*tae:,.2f}%") # 8 capital_objetivo = 1_000_000 trm = 3_953.88 tae = 3/100 pesos = capital_objetivo * trm * (1+tae) print(f"tasa anual equivalente : {pesos:,.2f}%")
5_Solucion_taller_3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Writing and displaying depth file data # Note that a lot of function assume a square grid of with equal x and y gridsteps! Also assumes grid width and length are even (cleanly divisible by 2), so watch it. # %matplotlib widget from JulesD3D.SlopeBreak import SlopeBreak from JulesD3D.utils import quickDF from os import path, mkdir # %load_ext autoreload # %reload_ext autoreload # + # Better to give channel width an int of x gridsteps width! # TODO improvement: Define whole channel dimensions in gridsteps! channel = { "width": 200, # [m] has to be at least x gridstep wide "slope": 0.85, # [deg] "length": 15000, # [m] has to be multiple of y gridstep "depth": 150, # [m] # "bank_depth": 100 # [m] Ignored if channel depth is given } grid = { "length": 36000, # y [m] has to be multiple of y gridstep "width": 26000, # x [m] has to be multiple of x gridstep "x_gridstep": 200, # [m] "y_gridstep": 200, # [m] "dims": [], } bathymetry = { "initial_depth": 150, # [m] "slope": 0.2, # [m] the 'basin' slope } folder = '/Users/julesblom/ThesisPython/generated' width_in_km_str = str(int(grid['width']/1000)) length_in_km_str = str(int(grid['length']/1000)) grid_str = str(grid['x_gridstep']) dep_filename = f"gen_{length_in_km_str}_by_{width_in_km_str}km.dep" grid_filename = f"{length_in_km_str}km_{width_in_km_str}km_W60Channel.grd" enc_filename = f"{length_in_km_str}km_1cellchannel_15km.enc" filenames = { "grid": path.join(folder, grid_filename), "dep": path.join(folder, dep_filename), "enc": path.join(folder, enc_filename) } # - # # Remember to change MNKMax in .mdf and location in .bnd files new_model = SlopeBreak(filenames=filenames, channel=channel, bathymetry=bathymetry, grid=grid) new_model new_model.makeModelAtOnce(new_model) # ## Easy copy new boundary conditions strings dims = new_model.grid['dims'] xDim, yDim = dims[0] xDim, yDim print(f'''For mdf file MNKmax = {xDim+1} {yDim+1} 80 ''') # + discharge_location = int((xDim-1)/2+1) new_boundary_file = f'''Discharge Q T {discharge_location} 1 {discharge_location} 1 2.0000000e+002 3d-profile deep Z T 2 {yDim+1} {xDim} {yDim+1} 2.0000000e+002''' print(new_boundary_file) # - new_model.plot3D() new_model.plotCrossSection() # + # quickDF(new_model.bathymetry['depth']) # - new_model.plotGrid() new_model.plotDepthAndGrid() # <hr/> # ### Generate bathymetries with different slopes # + # channel_slopes = [1.00, 1.25, 1.5, 1.75] # [degrees] # + # for channel_slope in channel_slopes: # print("---------- Making depth model with slope:", channel_slope, '----------') # # modify slope in channel object defined above! # channel['slope'] = channel_slope # # bathymetry['slope'] = basin_slope # new_folder = '/Users/julesblom/ThesisPython/generated/Slope{}/'.format(str(channel_slope)) # try: # print("Making new folder:", new_folder) # mkdir(new_folder) # except OSError: # dir(OSError) # print("Creation of the directory failed, it probably already exists!") # new_filenames = { # "grid": path.join(new_folder, '45km_300m_W60Channel.grd'), # "dep": path.join(new_folder, 'gen_18_by_45km.dep'), # folder_name + '/slopeBreakSmooth.dep' # # "enc": path.join(new_folder, '45km_1cellchannel_15km.enc') # } # new_slope_model = DepthModel(filenames=new_filenames, channel=channel, bathymetry=bathymetry, grid=grid) # new_slope_model.makeModelAtOnce() # - # # PyVista 3D generated bathymetry import pyvista as pv # from JulesD3D.plotPyVista import makeBottomSurface from cmocean.cm import deep_r # from JulesD3D.enc import Enclosure import pandas as pd from numpy import ones # + sargs = dict(height=0.25, vertical=True, position_x=0.05, position_y=0.05) bottom_surface_scalar_args = {'vertical': True} annotations = {-350: "[m]"} # + depth = new_model.bathymetry['depth'][1:-1,1:-1] plot_x_mesh = new_model.grid['x_grid'][:-1,:-1] plot_y_mesh = new_model.grid['y_grid'][:-1,:-1] plot_z_mesh = -depth bottom_surface = pv.StructuredGrid(plot_x_mesh, plot_y_mesh, plot_z_mesh) bottom_surface.field_arrays['depth'] = -depth.T bottom_surface # - p = pv.Plotter(notebook=False) p.add_mesh(bottom_surface, show_edges=False, cmap=deep_r, scalar_bar_args=bottom_surface_scalar_args, annotations=annotations) # p.add_lines(plot_enclosure) p.show_grid() p.set_scale(zscale=25) p.show() p.close()
GenerateDepthAndGridFile.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Selections # # **Learning Objective:** Use selections to add interactivity to charts, through data queries, conditional encodings, data filtering, etc. # # These examples are taken from the Altair documentation: # # https://altair-viz.github.io/user_guide/selections.html import altair as alt alt.data_transformers.enable('json') from vega_datasets import data # ## Example: linked-brush scatter plot # Let's work through adding interactivity to a basic scatter plot. Here is the initial scatter plot with the cars dataset: # + cars = data.cars.url alt.Chart(cars).mark_point().encode( x='Miles_per_Gallon:Q', y='Horsepower:Q', color='Origin:N' ) # - # All interactions begin with a selection object. Here we create a selection of type *interval*, which will allow the user to select a range of data.. brush = alt.selection_interval() # selection of type "interval" # Then selection can then be added to the chart as follows: alt.Chart(cars).mark_point().encode( x='Miles_per_Gallon:Q', y='Horsepower:Q', color='Origin:N' ).properties( selection=brush ) # Now that the chart knows about the selection, we can do conditional encoding, to color the selected points differently from the unselected ones: alt.Chart(cars).mark_point().encode( x='Miles_per_Gallon:Q', y='Horsepower:Q', color=alt.condition(brush, 'Origin:N', alt.value('lightgray')) ).properties( selection=brush ) # Selection link across different subcharts: # + chart = alt.Chart(cars).mark_point().encode( y='Horsepower:Q', color=alt.condition(brush, 'Origin:N', alt.value('lightgray')) ).properties( width=250, height=250, selection=brush ) chart.encode(x='Acceleration:Q') | chart.encode(x='Miles_per_Gallon:Q') # - # The selection can be modified to only affect the x encoding: # + brush = alt.selection_interval(encodings=['x']) chart = alt.Chart(cars).mark_point().encode( y='Horsepower:Q', color=alt.condition(brush, 'Origin:N', alt.value('lightgray')) ).properties( width=250, height=250, selection=brush ) chart.encode(x='Acceleration:Q') | chart.encode(x='Miles_per_Gallon:Q') # - # ## Selection types: interval, single, multi def make_example(selector): cars = data.cars.url return alt.Chart(cars).mark_rect().encode( x="Cylinders:O", y="Origin:N", color=alt.condition(selector, 'count()', alt.value('lightgray')) ).properties( width=300, height=180, selection=selector ) # ### Interval selections interval = alt.selection_interval() make_example(interval) interval_x = alt.selection_interval(encodings=['x'], empty='none') make_example(interval_x) # + scales = alt.selection_interval(bind='scales') alt.Chart(cars).mark_point().encode( x='Horsepower:Q', y='Miles_per_Gallon:Q', color='Origin:N' ).properties( selection=scales ) # - # ## Single selections # A *single* selection enables a user to select a single mark or data point using mouse actions. single = alt.selection_single() make_example(single) single_nearest = alt.selection_single(on='mouseover', nearest=True) make_example(single_nearest) # ### Multiple selections # A *multiple* selection enables you to select multiple marks or data points on a chart using mouse actions. multi = alt.selection_multi() make_example(multi) multi_mouseover = alt.selection_multi(on='mouseover', toggle=False, empty='none') make_example(multi_mouseover)
Content/Visualize/07-Selections.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Question 1: # Is there a statistical difference in the odds of winning a game when a team is playing in front of their home crowd? # # ## Hypothesis: # **Null Hypothesis** H<sub>0</sub> = There is *no statistical difference* in the odds of winning a game when a team is playing in front of their home crowd # # **Alternative Hypothesis** H<sub>a</sub> = There is *always* a difference in the odds of winning a game when a team is playing in front of their home crowd. # + import pandas as pd import numpy as np import psycopg2 import seaborn as sns import matplotlib.pyplot as plt from scipy import stats # - database_name = 'football_db' conn = psycopg2.connect(f'dbname={database_name}') cur = conn.cursor() # + columns = ['id', 'home_goal', 'away_goal', 'result'] query = f""" SELECT match_api_id, home_team_goal, away_team_goal, CASE WHEN home_team_goal > away_team_goal THEN 'Win' WHEN home_team_goal < away_team_goal THEN 'Lose' ELSE 'Draw' END AS result FROM match """ cur.execute(query) data = cur.fetchall() # - df = pd.DataFrame(data, columns=columns) df.head() total_wins = len(df[df['result'] == 'Win']) total_wins total_games = len(df['result']) total_games # set the (μ) value mu = total_wins / total_games sample_size = 100 number_of_games = 1000 samples = np.zeros(sample_size) for i in range(sample_size): """Taking a sample size of the win rate for home games""" games = df.iloc[np.random.randint(low=0, high=len(df), size=number_of_games), :] win_rate = len(games[games['result'] == 'Win']) / number_of_games samples[i] = win_rate samples sample_mean = samples.mean() sample_mean std = np.std(samples, ddof=1) std # T-Test to compare the average mean to the population mean t = (sample_mean - mu) / (std / np.sqrt(sample_size)) t # Degrees of Freedom df_value = sample_size - 1 # ### Calculating Critical T-Value # If the test statistic is more extreme than the critical value, then the null hypothesis is rejected in favor of the alternative hypothesis. If the test statistic is not as extreme as the critical value, then the null hypothesis is not rejected. t_crit = np.round(stats.t.ppf(1 - 0.05, df_value), 3) t_crit # + results = stats.ttest_1samp(a=samples, popmean=mu) print(results) print('\n') if (results[0]>t_crit) and (results[1]<0.05): print ("Null hypothesis rejected. Results are statistically significant with t-value =", round(results[0], 2), "and p-value =", np.round((results[1]), 4)) else: print ("Null hypothesis is Accepted") # - sns.set(color_codes=True) sns.set(rc={'figure.figsize':(8,6)}) sns.distplot(samples) # ## Result: # While limiting alpha to 0.05, The P-value is > 0.05. This concludes that there is a statistical difference when a team wins in front of a home crowd therefore accepting the null hypothesis. # --- # ## Question 2: # Is there a statistical difference in the odds of winning a game when the height of a team is taller than the other team? # # ## Hypothesis: # **Null Hypothesis** H<sub>0</sub> = There is *no statistical difference* in the odds of winning a game dependent on a teams height. # # **Alternative Hypothesis** H<sub>a</sub> = There is *always* a difference in the odds of winning a game dependent on a teams height. conn = psycopg2.connect('dbname=football_db') cur = conn.cursor() # + columns = ['game_date', 'home_height', 'away_height', 'result' ] query = f""" select m.date, (H1.height + H2.height + H3.height + H4.height + H5.height + H6.height + H7.height + H8.height + H9.height + H10.height + H11.height) / 11 H_HEIGHT, (A1.height + A2.height + A3.height + A4.height + A5.height + A6.height + A7.height + A8.height + A9.height + A10.height + A11.height) / 11 A_HEIGHT, CASE WHEN home_team_goal > away_team_goal THEN 'Win' WHEN home_team_goal = away_team_goal THEN 'Draw' ELSE 'Lose' END as result FROM Match M JOIN Player H1 ON M.home_player_1 = H1.player_api_id JOIN Player H2 ON M.home_player_2 = H2.player_api_id JOIN Player H3 ON M.home_player_3 = H3.player_api_id JOIN Player H4 ON M.home_player_4 = H4.player_api_id JOIN Player H5 ON M.home_player_5 = H5.player_api_id JOIN Player H6 ON M.home_player_6 = H6.player_api_id JOIN Player H7 ON M.home_player_7 = H7.player_api_id JOIN Player H8 ON M.home_player_8 = H8.player_api_id JOIN Player H9 ON M.home_player_9 = H9.player_api_id JOIN Player H10 ON M.home_player_10 = H10.player_api_id JOIN Player H11 ON M.home_player_11 = H11.player_api_id JOIN Player A1 ON M.away_player_1 = A1.player_api_id JOIN Player A2 ON M.away_player_2 = A2.player_api_id JOIN Player A3 ON M.away_player_3 = A3.player_api_id JOIN Player A4 ON M.away_player_4 = A4.player_api_id JOIN Player A5 ON M.away_player_5 = A5.player_api_id JOIN Player A6 ON M.away_player_6 = A6.player_api_id JOIN Player A7 ON M.away_player_7 = A7.player_api_id JOIN Player A8 ON M.away_player_8 = A8.player_api_id JOIN Player A9 ON M.away_player_9 = A9.player_api_id JOIN Player A10 ON M.away_player_10 = A10.player_api_id JOIN Player A11 ON M.away_player_11 = A11.player_api_id """ cur.execute(query) data = cur.fetchall() # - df = pd.DataFrame(data, columns=columns) df.head(5) win_df = df[df['result']=='Win'] number_of_games = 1500 sample_df = win_df.iloc[np.random.randint(low=0, high=len(win_df), size=number_of_games), :] experimental = np.array(sample_df['home_height']) control = np.array(sample_df['away_height']) mean_home_height = sample_df['home_height'].mean() mean_home_height mean_away_height = sample_df['away_height'].mean() mean_away_height mean_home_height - mean_away_height def variance(sample): """return the variance of sample list""" sample_mean = np.mean(sample) return sum([(i - sample_mean) ** 2 for i in sample]) def sample_variance(sample1, sample2): """return the variance between two sample""" n_1, n_2 = len(sample1), len(sample2) var_1, var_2 = variance(sample1), variance(sample2) return (var_1 + var_2)/((n_1 + n_2)-2) def twosample_tstatistic(expr, ctrl): """return the t-statistic value between expr, ctrl""" exp_mean, ctrl_mean = np.mean(expr), np.mean(ctrl) samp_var = sample_variance(expr, ctrl) n_e, n_c = len(expr), len(ctrl) num = exp_mean - ctrl_mean denom = np.sqrt(samp_var * ((1/n_e)+(1/n_c))) return num / denom t_stat = twosample_tstatistic(experimental, control) t_stat # + def visualize_t(t_stat, n_control, n_experimental): # initialize a matplotlib "figure" fig = plt.figure(figsize=(8,5)) ax = fig.gca() # generate points on the x axis between -4 and 4: xs = np.linspace(-4, 4, 500) # use stats.t.pdf to get values on the probability density function for the t-distribution ys= stats.t.pdf(xs, (n_control+n_experimental-2), 0, 1) ax.plot(xs, ys, linewidth=3, color='darkred') ax.axvline(t_stat, color='black', linestyle='--', lw=5) ax.axvline(-t_stat, color='black', linestyle='--', lw=5) plt.show() return None n_control = len(control) n_experimental = len(experimental) visualize_t(t_stat, n_control, n_experimental) # - stats.t.sf(abs(t_stat), len(experimental)+len(control)-1) * 2 stats.ttest_ind(experimental, control) # ## Result: # While limiting alpha to 0.05, The P-value is > 0.05. This concludes that there is no statistical difference when a team wins based on the height of teams players, therefore accepting the null hypothesis. # --- # ## Question 3: # Is there a statistical difference in the odds of losing a game when the defense pressure of a team is higher than the other team? # # ## Hypothesis: # **Null Hypothesis** H<sub>0</sub> = There is *no statistical difference* in the odds of losing a game dependent on a teams defense pressure. # # **Alternative Hypothesis** H<sub>a</sub> = There is *always* a difference in the odds of losing a game dependent on a teams defense pressure. # + conn = psycopg2.connect('dbname=football_db') cur = conn.cursor() query = f""" SELECT M.date, CASE WHEN home_team_goal > away_team_goal THEN 'Win' WHEN home_team_goal = away_team_goal THEN 'Draw' ELSE 'Lose' END AS result, T.*, TT.* FROM Match M JOIN Team_Attributes AS T ON M.home_team_api_id = T.team_api_id AND SUBSTRING(M.season, 1, 4) = SUBSTRING(T.date, 1, 4) JOIN Team_Attributes AS TT ON M.away_team_api_id = TT.team_api_id AND SUBSTRING(M.season, 1, 4) = SUBSTRING(TT.date, 1, 4) """ cur.execute(query) data = cur.fetchall() # - df = pd.DataFrame(data) df.head() df = df.drop(range(2,20), axis=1) df = df.drop(range(21, 27), axis=1) df = df.drop(range(27, 45), axis=1) df = df.drop(range(46, 52), axis=1) df.head() df.columns = ['date', 'result', 'Home_DP', 'Away_DP'] df.head() lose_df = df[df['result'] == 'Lose'] len(lose_df) number_of_games = 500 sample_df = lose_df.iloc[np.random.randint(low=0, high=len(lose_df), size=number_of_games), :] experimental = np.array(sample_df['Home_DP']) control = np.array(sample_df['Away_DP']) mean_home_dp = experimental.mean() mean_home_dp mean_away_dp = control.mean() mean_away_dp mean_home_dp - mean_away_dp t_stat = twosample_tstatistic(experimental, control) t_stat n_control = len(control) n_experimental = len(experimental) visualize_t(t_stat, n_control, n_experimental) stats.t.sf(abs(t_stat), len(experimental)+len(control)-1) * 2 stats.ttest_ind(experimental, control) # ## Result: # While limiting alpha to 0.05, The P-value is < 0.05. This concludes that there is a statistical difference when a team loses based on the teams' defense pressure, therefore rejecting the null hypothesis. # --- # ## Question 4: # Is there a statistical difference in the odds of winning a game when the offensive attributes(passing & shooting) of a team is higher than the other team? # # ## Hypothesis: # **Null Hypothesis** H<sub>0</sub> = There is *no statistical difference* in the odds of winning a game dependent on a teams offensive attributes. # # **Alternative Hypothesis** H<sub>a</sub> = There is *always* a difference in the odds of winning a game dependent on a teams offensive attributes. # + conn = psycopg2.connect('dbname=football') cur = conn.cursor() query = f""" SELECT M.date, CASE WHEN home_team_goal > away_team_goal THEN 'Win' WHEN home_team_goal = away_team_goal THEN 'Draw' ELSE 'Lose' END AS result, T.buildupplaypassing + T.chancecreationshooting AS Home_Offense, TT.buildupplaypassing + TT.chancecreationshooting AS Away_Offense FROM Match M JOIN Team_Attributes AS T ON M.home_team_api_id = T.team_api_id AND SUBSTRING(M.season, 1, 4) = SUBSTRING(T.date, 1, 4) JOIN Team_Attributes AS TT ON M.away_team_api_id = TT.team_api_id AND SUBSTRING(M.season, 1, 4) = SUBSTRING(TT.date, 1, 4) """ cur.execute(query) data = cur.fetchall() # - df = pd.DataFrame(data) df.columns = ['date', 'result', 'Home_Offense', 'Away_Offense'] df.head() win_df = df[df['result'] == 'Win'] number_of_games = 2000 sample_df = win_df.iloc[np.random.randint(low=0, high=len(win_df), size=number_of_games), :] experimental = np.array(sample_df['Home_Offense']) control = np.array(sample_df['Away_Offense']) mean_home_offence = experimental.mean() mean_home_offence mean_away_offence = control.mean() mean_away_offence mean_home_offence - mean_away_offence t_stat = twosample_tstatistic(experimental, control) t_stat n_control = len(control) n_experimental = len(experimental) visualize_t(t_stat, n_control, n_experimental) stats.t.sf(abs(t_stat), len(experimental)+len(control)-1) * 2 stats.ttest_ind(experimental, control) # ## Result: # While limiting alpha to 0.05, The P-value is > 0.05. This concludes that there is no statistical difference when a team wins based on the teams' offensive attributes(passing & shooting), therefore accepting the null hypothesis. # --- # ## Question 5: # Is there a statistical difference in the odds of winning a game when a team is playing in front of their home crowd vs away crowd? # # ## Hypothesis: # **Null Hypothesis** H<sub>0</sub> = There is *no statistical difference* in the odds of winning a game when a team is playing in front of their home crowd # # **Alternative Hypothesis** H<sub>a</sub> = There is *always* a difference in the odds of winning a game when a team is playing in front of their home crowd or away crowd?. # + cur = conn.cursor() query = f""" SELECT M.date, CASE WHEN home_team_goal > away_team_goal THEN 'Win' WHEN home_team_goal = away_team_goal THEN 'Draw' ELSE 'Lose' END AS result FROM Match M """ cur.execute(query) data = cur.fetchall() # - df = pd.DataFrame(data, columns=['date', 'result']) df.head() h_win_df = df[df['result'] == "Win"] a_win_df = df[df['result'] == 'Lose'] total_games = len(df) home_wins = len(h_win_df) away_wins = len(a_win_df) #H0: odds of winning at home is same with odds of winning at away is same #H1: odds of winning at home is higher than odds of winning at away #P(H) = P(A) ## mu = home_wins / total_games - away_wins / total_games mu = 0 sample_size = 30 number_of_games = 500 samples = np.zeros(sample_size) for i in range(sample_size): games = df.iloc[np.random.randint(low=0, high=len(df), size=number_of_games), :] h_win_rate = len(games[games['result'] == 'Win']) / number_of_games a_win_rate = len(games[games['result'] == 'Lose']) / number_of_games samples[i] = h_win_rate - a_win_rate sample_mean = samples.mean() sample_mean std = np.std(samples, ddof=1) std t = (sample_mean - mu) / (std / np.sqrt(sample_size)) t #degree of freedom df_value = sample_size - 1 t_crit = np.round(stats.t.ppf(1 - 0.05, df_value), 3) t_crit # + results = stats.ttest_1samp(a=samples, popmean=mu) print(results) print('\n') if (results[0]>t_crit) and (results[1]<0.05): print ("Null hypothesis rejected. Results are statistically significant with t-value =", round(results[0], 2), "and p-value =", np.round((results[1]), 4)) else: print ("Null hypothesis is Accepted") # - sns.set(color_codes=True) sns.set(rc={'figure.figsize':(8,6)}) sns.distplot(samples); # ## Result: # While limiting alpha to 0.05, The P-value is < 0.05. This concludes that there is a statistical difference when a team wins in front of a home crowd or wins at away games, therefore accepting the alternative hypothesis.
Hypothesis_Test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # #### Find the index of an array such that the sum of elements left side of the index == the sum of elements on the right side of the index # + # Time Complexity : O(n^2) n = int(input("Number of elements in an array: ")) arr = list(int(x) for x in input("Enter the elements with a space in between : ").strip().split(' ')) def sum(arr, s, e): sum = 0 for i in range(s, e): sum += arr[i] return sum for i in range(0, len(arr)): if sum(arr, 0, i) == sum(arr, i+1, len(arr)): print(i) flag = True break if not flag: print("-1") # - # #### Sorting the array doesn't work here as it jumbles the indexes # + # Optimised version: k1*n + k2*n = O(n) def equilibriumIndex(arr): sum1 = sum(arr) # Here in this version of Jupyter notebook it doesn't support sum(array_list). prev = 0 # So, you can check the solution here https://repl.it/repls/WeepyCluelessSequence next = 0 for i in range(0, len(arr)-1): prev += arr[i] next = sum1 - prev - arr[i+1] if prev == next: return i+1 return -1 n = int(input()) arr = [int(i) for i in input().strip().split()] print(equilibriumIndex(arr)) # -
08 Time Complexity Improvement/8.3 Equilibrium Index.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="FinoqdSheE7e" colab_type="code" colab={} import tensorflow as tf import tensorflow.keras import numpy as np import os import pandas as pd # + id="qEUzUiAdfAXq" colab_type="code" colab={} with open('reviews.txt','r') as f: reviews=f.read() with open('labels.txt','r') as f: labels=f.read() # + id="KxpaKsDYftig" colab_type="code" outputId="53c31355-2744-49a2-8c54-4ce2986f38c2" colab={"base_uri": "https://localhost:8080/", "height": 54} reviews[:2000] # + [markdown] id="rWv7YcllCisG" colab_type="text" # #Data Preprocessing # # + id="L0rKJchjf6Q5" colab_type="code" colab={} from string import punctuation all_text=''.join([c for c in reviews if c not in punctuation]) reviews = all_text.split('\n') all_text = ' '.join(reviews) words = all_text.split() # + id="4tWaxE3VB1pa" colab_type="code" colab={} all_text[:2000] # + id="vNRSxxh0CsUs" colab_type="code" colab={} words[:100] # + [markdown] id="xrziW56lCt4H" colab_type="text" # ##Encoding the words # + id="bE9Uf1teCwZX" colab_type="code" colab={} from collections import Counter counts = Counter(words) vocab = sorted(counts, key=counts.get, reverse=True) vocab_to_int = {word: ii for ii, word in enumerate(vocab, 1)} reviews_ints = [] for each in reviews: reviews_ints.append([vocab_to_int[word] for word in each.split()]) # + [markdown] id="UrKpeIKoC1c3" colab_type="text" # ##Encoding the labels # + id="W_UG3DAoC32n" colab_type="code" colab={} labels = labels.split('\n') labels = np.array([1 if each == 'positive' else 0 for each in labels]) # + [markdown] id="Goq3op_FDGj3" colab_type="text" # ##Removing Zero length reviews and their labels # + id="uZyuQfDZC7Pl" colab_type="code" outputId="61f991c8-b441-4552-caee-8879e1e80fb9" colab={"base_uri": "https://localhost:8080/", "height": 50} review_lens = Counter([len(x) for x in reviews_ints]) print("Zero-length reviews: {}".format(review_lens[0])) print("Maximum review length: {}".format(max(review_lens))) # + id="FDiyshA8DAoH" colab_type="code" outputId="efb7f035-9b46-4e93-fc23-23dbf439972a" colab={"base_uri": "https://localhost:8080/", "height": 34} non_zero_idx = [ii for ii, review in enumerate(reviews_ints) if len(review) != 0] len(non_zero_idx) # + id="-DI-QjqIDD0Z" colab_type="code" colab={} reviews_ints = [reviews_ints[ii] for ii in non_zero_idx] labels = np.array([labels[ii] for ii in non_zero_idx]) # + [markdown] id="HHWOBbqdDQEJ" colab_type="text" # ##Padding reviews # + id="Mw9yiQjhDTsQ" colab_type="code" colab={} seq_len = 200 features = np.zeros((len(reviews_ints), seq_len), dtype=int) for i, row in enumerate(reviews_ints): features[i, -len(row):] = np.array(row)[:seq_len] # + id="lwt-2pp8DYcx" colab_type="code" colab={} features[:10,:100] # + [markdown] id="dnTFebh4DcZ3" colab_type="text" # ##Training,Validation,Test Split # # + id="fzX2UM0nGRl2" colab_type="code" colab={} import numpy as np # + id="9ePCdDI2DjYw" colab_type="code" colab={} split_frac=0.8 split_idx=int(len(features)*split_frac) train_idx=np.random.choice(len(features),split_idx,replace=0) # val_idx=[x for x in range(25000) if x not in train_idx] # + id="a7Rjdkq1GgFD" colab_type="code" colab={} x_train,x_val=[features[x] for x in train_idx],[features[x] for x in range(25000) if x not in train_idx] y_train,y_val=[labels[x] for x in train_idx],[labels[x] for x in range(25000) if x not in train_idx] # + id="0LrbXpN3KxpI" colab_type="code" outputId="7f11b5c2-ee6a-48cc-bdb4-559c94fdc1c6" colab={"base_uri": "https://localhost:8080/", "height": 34} len(x_val) # + id="Go-QVnBSHinU" colab_type="code" colab={} test_idx=int(len(x_val)*0.5) x_test,y_test=x_val[:test_idx],y_val[:test_idx] x_val,y_val=x_val[test_idx:],y_val[test_idx:] # + id="ZYGiAaWVIKTH" colab_type="code" outputId="4f12437f-ee94-4956-b196-9fcf333d1440" colab={"base_uri": "https://localhost:8080/", "height": 84} x_train,y_train,x_val,y_val,x_test,y_test=np.array(x_train),np.array(y_train),np.array(x_val),np.array(y_val),np.array(x_test),np.array(y_test) print("\t\t\tFeature Shapes:") print("Train set: \t\t{}".format(x_train.shape), "\nValidation set: \t{}".format(x_val.shape), "\nTest set: \t\t{}".format(x_test.shape)) # + [markdown] id="0fc1fiQeIVWb" colab_type="text" # #Building Graph # + [markdown] id="Vo7rKbHuL0Sp" colab_type="text" # ##Hyperparameters # + id="B0lbWO1SIYDo" colab_type="code" colab={} lstm_size=256 lstm_layers=1 batch_size=500 learning_rate=1e-3 # + [markdown] id="7QiupBfdMY6I" colab_type="text" # ##Defining inputs, labels and placeholders # + id="ISZg3ESWMP0B" colab_type="code" colab={} n_words=len(vocab_to_int)+1 graph=tf.Graph() with graph.as_default(): inputs_=tf.placeholder(tf.int32,[None,None],name='inputs') labels_=tf.placeholder(tf.int32,[None,None],name='labels') keep_prob=tf.placeholder(tf.float32,name='keep_prob') # + [markdown] id="bVei1D60NmZv" colab_type="text" # ##Embedding # + id="1S467mdpNorA" colab_type="code" colab={} embed_size=300 with graph.as_default(): embedding=tf.Variable(tf.random_uniform((n_words,embed_size),+1,-1)) embed=tf.nn.embedding_lookup(embedding,inputs_) # + [markdown] id="WYjnAlGNPEwo" colab_type="text" # ##LSTM cell # + id="-XCIGAMbOtU4" colab_type="code" colab={} with graph.as_default(): lstm=tf.nn.rnn_cell.LSTMCell(lstm_size) dropout=tf.nn.rnn_cell.DropoutWrapper(lstm,output_keep_prob=keep_prob) lstm_layer=tf.nn.rnn_cell.MultiRNNCell([dropout]*lstm_layers) initial_state=lstm_layer.zero_state(batch_size,tf.float32) # + [markdown] id="4HwKvg-4RYRg" colab_type="text" # ##RNN forward pass # + id="9vKK76kORLvC" colab_type="code" colab={} with graph.as_default(): outputs,final_state=tf.nn.dynamic_rnn(lstm_layer,embed,initial_state=initial_state) # + [markdown] id="KcxsNZqnTE3R" colab_type="text" # ##Output # + id="dBwu02cRTHvg" colab_type="code" outputId="d18d875d-1da6-4440-dacb-09f2b74da990" colab={"base_uri": "https://localhost:8080/", "height": 222} with graph.as_default(): predictions=tf.contrib.layers.fully_connected(outputs[:,-1],1,activation_fn=tf.sigmoid) cost=tf.losses.mean_squared_error(labels_,predictions) optimizer=tf.train.AdamOptimizer(learning_rate).minimize(cost) # + [markdown] id="t4AEnAKDUEXx" colab_type="text" # ##Validation Accuracy # + id="MJVn0b8vUUcJ" colab_type="code" colab={} with graph.as_default(): correct_pred=tf.equal(tf.cast(tf.round(predictions),tf.int32),labels_) accuracy=tf.reduce_mean(tf.cast(correct_pred,tf.float32)) # + [markdown] id="FdKrwz2DUzCO" colab_type="text" # ##Batching # + id="Mo1LBosiU3GQ" colab_type="code" colab={} def get_batches(x,y,batch_size=100): n_batches=len(x)//batch_size x,y=x[:n_batches*batch_size],y[:n_batches*batch_size] for i in range(0,len(x),batch_size): yield x[i:i+batch_size],y[i:i+batch_size] # + [markdown] id="8rztNuDjVilC" colab_type="text" # #Training # + id="qshYhNtmVkrh" colab_type="code" colab={} epochs=10 with graph.as_default(): saver=tf.train.Saver() with tf.Session(graph=graph) as sess: sess.run(tf.global_variables_initializer()) iteration=1 for e in range(epochs): state=sess.run(initial_state) for i,(x,y) in enumerate(get_batches(x_train,y_train,batch_size),1): feed={inputs_:x, labels_:y[:,None], keep_prob:0.5, initial_state:state} loss,state,_ = sess.run([cost,final_state,optimizer],feed_dict=feed) if iteration%5==0: print("Epoch: {}/{}".format(e, epochs), "Iteration: {}".format(iteration), "Train loss: {:.3f}".format(loss)) if iteration%25==0: val_acc=[] val_state=sess.run(lstm_layer.zero_state(batch_size,tf.float32)) for x,y in get_batches(x_val,y_val,batch_size): feed={inputs_:x, labels_:y[:,None], keep_prob:1, initial_state:val_state} batch_acc,val_state=sess.run([accuracy,final_state],feed_dict=feed) val_acc.append(batch_acc) print("Val acc: {:.3f}".format(np.mean(val_acc))) iteration +=1 saver.save(sess, "checkpoints/sentiment.ckpt") # + [markdown] id="4bGD0hs3Ygq6" colab_type="text" # #Testing # + id="nwIyCyiCYosA" colab_type="code" outputId="6914817d-fa75-41f6-b8d5-44e0778be4f4" colab={"base_uri": "https://localhost:8080/", "height": 34} test_acc = [] with tf.Session(graph=graph) as sess: saver.restore(sess, tf.train.latest_checkpoint('checkpoints')) test_state = sess.run(lstm_layer.zero_state(batch_size, tf.float32)) for ii, (x, y) in enumerate(get_batches(x_test, y_test, batch_size), 1): feed = {inputs_: x, labels_: y[:, None], keep_prob: 1, initial_state: test_state} batch_acc, test_state = sess.run([accuracy, final_state], feed_dict=feed) test_acc.append(batch_acc) print("Test accuracy: {:.3f}".format(np.mean(test_acc)))
RNN/Sentiment Analysis/Sentiment_Analysis_RNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Logistic Regression with `tf.data` # # * [참고: TensorFlow.org](https://www.tensorflow.org/get_started/mnist/beginners) # * [소스: mnist_softmax.py in verion 1.4](https://github.com/tensorflow/tensorflow/blob/r1.4/tensorflow/examples/tutorials/mnist/mnist_softmax.py) # * `tf.data`를 이용하여 input pipeline을 바꿔보자 # ### Import modules # + """A very simple MNIST classifier. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import time import numpy as np import tensorflow as tf sess_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)) np.random.seed(219) tf.set_random_seed(219) # - # ### Import data # + # Load training and eval data from tf.keras (train_data, train_labels), (test_data, test_labels) = \ tf.keras.datasets.mnist.load_data() train_data = train_data / 255. train_data = train_data.reshape(-1, 784) train_labels = np.asarray(train_labels, dtype=np.int32) test_data = test_data / 255. test_data = test_data.reshape(-1, 784) test_labels = np.asarray(test_labels, dtype=np.int32) # - # ### Show the MNIST # + import numpy as np import matplotlib.pyplot as plt # %matplotlib inline index = 10000 print("label = {}".format(train_labels[index])) plt.imshow(train_data[index].reshape(28, 28), cmap='gray') plt.show() # - # ### Set up dataset with `tf.data` # # #### input pipeline `tf.data.Dataset` and Transformation # + batch_size = 32 # for train train_dataset = tf.data.Dataset.from_tensor_slices((train_data, train_labels)) train_dataset = train_dataset.shuffle(buffer_size = 10000) train_dataset = train_dataset.batch(batch_size = batch_size) print(train_dataset) # for test test_dataset = tf.data.Dataset.from_tensor_slices((test_data, test_labels)) test_dataset = test_dataset.shuffle(buffer_size = 10000) test_dataset = test_dataset.batch(batch_size = len(test_data)) print(test_dataset) # - # #### Define Iterator # + # tf.data.Iterator.from_string_handle의 output_shapes는 default = None이지만 꼭 값을 넣는 게 좋음 # 여기를 직접 채워 넣으시면 됩니다. handle = tf.placeholder(tf.string, shape=[]) iterator = tf.data.Iterator.from_string_handle(handle, train_dataset.output_types, train_dataset.output_shapes) # 여기를 직접 채워 넣으시면 됩니다. x, y = iterator.get_next() x = tf.cast(x, dtype = tf.float32) y = tf.cast(y, dtype = tf.int32) # - # ## Build a graph # ### Create weight and bias # 여기를 직접 채워 넣으시면 됩니다. # create Variables using `get_variable` W = tf.get_variable(name='W', shape=[784, 10], initializer=tf.random_normal_initializer()) b = tf.get_variable(name='b', shape=[10], initializer=tf.random_normal_initializer()) # ### Build a model: $y = Wx + b$ # 여기를 직접 채워 넣으시면 됩니다. y_pred = tf.matmul(x, W) + b # ### Define loss function # # * [`tf.nn.softmax_cross_entropy_with_logits_v2`](https://www.tensorflow.org/api_docs/python/tf/nn/softmax_cross_entropy_with_logits_v2) # * [`tf.losses.softmax_cross_entropy`](https://www.tensorflow.org/api_docs/python/tf/losses/softmax_cross_entropy) y_one_hot = tf.one_hot(y, depth=10) cross_entropy = tf.losses.softmax_cross_entropy(onehot_labels=y_one_hot, logits=y_pred) # ### Create a optimizer train_op = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) # ### `tf.Session()` and train # + sess = tf.Session(config=sess_config) sess.run(tf.global_variables_initializer()) # train_iterator train_iterator = train_dataset.make_initializable_iterator() train_handle = sess.run(train_iterator.string_handle()) max_epochs = 1 # Train for only ten epochs step = 0 losses = [] start_time = time.time() for epochs in range(max_epochs): sess.run(train_iterator.initializer) while True: try: _, loss = sess.run([train_op, cross_entropy], feed_dict={handle: train_handle}) losses.append(loss) if step % 100 == 0: print("step: {}, loss: {}".format(step, loss)) step += 1 except tf.errors.OutOfRangeError: print("End of dataset") # ==> "End of dataset" break print("Epochs: {}, Elapsed time: {}".format(epochs, time.time() - start_time)) print("training done!") # - # ### Plot the loss funtion plt.plot(losses, label='loss') plt.legend() plt.show() # ### Test trained model # * test accuracy: 0.8863 # test_iterator test_iterator = test_dataset.make_initializable_iterator() test_handle = sess.run(test_iterator.string_handle()) sess.run(test_iterator.initializer) correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y, tf.int64)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print("test accuracy:", sess.run(accuracy, feed_dict={handle: test_handle})) # + import numpy as np import matplotlib.pyplot as plt # %matplotlib inline test_batch_size = 16 batch_index = np.random.choice(len(test_data), size=test_batch_size, replace=False) batch_xs = test_data[batch_index] y_pred_ = sess.run(y_pred, feed_dict={x: batch_xs}) fig = plt.figure(figsize=(16, 10)) for i, (px, py) in enumerate(zip(batch_xs, y_pred_)): p = fig.add_subplot(4, 8, i+1) p.set_title("y_pred: {}".format(np.argmax(py))) p.imshow(px.reshape(28, 28), cmap='gray') p.axis('off')
week02/07_logistic_regression_with_tf.data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from udntools.region import ServiceRegion from udntools.channel import BaseChannel from udntools.utils.plot import get_circle import warnings warnings.filterwarnings("ignore") import numpy as np import matplotlib.pyplot as plt from udntools.utils import cdf_y_axis region = ServiceRegion(0, 100, 0, 100, 100, 10000, bs_distribution="single_circle", ue_distribution="gaussian", ue_sigma=5.0, bs_radius_1=30, if_fix_bs=False) # + number_array = np.arange(2, 21, 1) radius = np.arange(1, 53, 2.0) # - N, R = np.meshgrid(radius, number_array) ase_uniform = np.zeros((np.shape(number_array)[0], np.shape(radius)[0])) ase_gaussian = np.zeros((np.shape(number_array)[0], np.shape(radius)[0])) # + channel = BaseChannel(4.0) region.set_ue_distribution('uniform') for i, value_i in enumerate(number_array): region.bs_number_ = value_i for j, value_j in enumerate(radius): region.set_bs_radius_1(value_j, fresh_ue=True) sir_user_sim = channel.sir_vector(region.bs_position_, region.ue_position_) capacity = np.reshape(np.log2(1 + sir_user_sim), -1) ase_uniform[i, j] = np.sum(capacity) / region.ue_number_ \ * region.bs_number_ / 100 / 100 region.set_ue_distribution('gaussian') region.set_ue_sigma(5.0) for i, value_i in enumerate(number_array): region.bs_number_ = value_i for j, value_j in enumerate(radius): region.set_bs_radius_1(value_j, fresh_ue=True) sir_user_sim = channel.sir_vector(region.bs_position_, region.ue_position_) capacity = np.reshape(np.log2(1 + sir_user_sim), -1) ase_gaussian[i, j] = np.sum(capacity) / region.ue_number_ \ * region.bs_number_ / 100 / 100 # + import matplotlib params = {'axes.labelsize': 15,'axes.titlesize':10, 'text.fontsize': 15, 'legend.fontsize': 15, 'xtick.labelsize': 15, 'ytick.labelsize': 15} matplotlib.rcParams.update(params) # 使能中文字体 # -*- coding:utf-8 -*- plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签 plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号 fig = plt.figure(figsize=(13, 6)) ax = fig.add_subplot(121) for i in range(150): im1 = ax.contourf(N,R,ase_uniform, np.linspace(0, 0.01, 51), alpha=1, antialiased=True, cmap='plasma') plt.xlim(1,50) plt.ylim(2,20) plt.xlabel("部署半径($R$)") plt.ylabel("基站数($N$)") plt.title("(a) 用户随机分布",fontproperties = 'SimHei', fontsize=15) ax = fig.add_subplot(122) for i in range(150): im1 = ax.contourf(N,R,ase_gaussian, np.linspace(0, 0.01, 51), alpha=1, antialiased=True, cmap='plasma') plt.xlim(1,50) plt.ylim(2,20) plt.xlabel("部署半径($R$)") plt.ylabel("基站数($N$)") plt.title("(b) 用户混合二维高斯分布",fontproperties = 'SimHei') fig.subplots_adjust(right=0.8) cbar_ax = fig.add_axes([0.85, 0.15, 0.02, 0.7]) fig.colorbar(im1, cax=cbar_ax).set_label("$\mathrm{bps/Hz/m^2}$", labelpad=-20, y=1.08, rotation=0) fig.savefig('pc_single_circle_ase_show.pdf') fig.savefig('pc_single_circle_ase_show.png') plt.show() # -
examples/single_circle_bs/single_circle_ase.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Analyzing Image Metadata # # Exchangeable image file format (Exif) has been a standard around since 1998 to include metadata in image file formats like JPEG, WAV, HEIC, and WEBP. Digital cameras and smart phones with GPS receivers have also included geolocation coordinates. This is a good application of reverse geocoding. # # ![image.jpg](./image.jpg) # # Read this blog post for more background: # # https://developer.here.com/blog/getting-started-with-geocoding-exif-image-metadata-in-python3 # + from PIL import Image def get_exif(filename): image = Image.open(filename) image.verify() return image._getexif() # - exif = get_exif('image.jpg') exif # + from PIL.ExifTags import TAGS def get_labeled_exif(exif): labeled = {} for (key, val) in exif.items(): labeled[TAGS.get(key)] = val return labeled # - exif = get_exif('image.jpg') labeled = get_labeled_exif(exif) labeled # + from PIL.ExifTags import GPSTAGS def get_geotagging(exif): if not exif: raise ValueError("No EXIF metadata found") geotagging = {} for (idx, tag) in TAGS.items(): if tag == 'GPSInfo': if idx not in exif: raise ValueError("No EXIF geotagging found") for (key, val) in GPSTAGS.items(): if key in exif[idx]: geotagging[val] = exif[idx][key] return geotagging # - exif = get_exif('image.jpg') geotags = get_geotagging(exif) geotags # + def get_decimal_from_dms(dms, ref): degrees = dms[0][0] / dms[0][1] minutes = dms[1][0] / dms[1][1] / 60.0 seconds = dms[2][0] / dms[2][1] / 3600.0 if ref in ['S', 'W']: degrees = -degrees minutes = -minutes seconds = -seconds return round(degrees + minutes + seconds, 5) def get_coordinates(geotags): lat = get_decimal_from_dms(geotags['GPSLatitude'], geotags['GPSLatitudeRef']) lon = get_decimal_from_dms(geotags['GPSLongitude'], geotags['GPSLongitudeRef']) return (lat,lon) # - exif = get_exif('image.jpg') geotags = get_geotagging(exif) coords = get_coordinates(geotags) coords # # Try It # # Find an image that has some geotags and try to find the corresponding street address with the reverse geocoder examples.
python/pycon19/01_geocoding_images.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.5 64-bit # name: python385jvsc74a57bd04c42a2e06d46085fb9cb46dd32d1902313d4f1f33eee621b248093f1771ebe8f # --- import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # data visualisation and plotting import matplotlib.pyplot as plt # data plotting import warnings #Uploading the data into df df=pd.read_csv('0. Asset\data\iris.csv') df.head() #Defining variables X=df[['sepal_length','sepal_width','petal_length','petal_width']] y=df['species'] #Importing label encoder to encode species column from sklearn.preprocessing import LabelEncoder le=LabelEncoder() df['species']=le.fit_transform(df['species']) y=df['species'] #Importing train test split from sklearn.model_selection import train_test_split #Using train test split to slit the data, keeping 20% as test data X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2) #Importing Linear Regression from sklearn.linear_model import LinearRegression #assinging reg as function of Linear Regression reg=LinearRegression() #Fitting data into training and testing reg.fit(X_train,y_train) #Predicting test data reg.predict(X_test) #Checking accuracy reg.score(X_test,y_test)
3.Linear_Regression/Iris Linear Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + import numpy as np import matplotlib.pyplot as plt # Intialize random number generator np.random.seed(123) # True parameter values alpha, sigma = 1, 1 beta = [1, 2.5] # Size of dataset size = 100 # Predictor variable X1 = np.linspace(0, 1, size) X2 = np.linspace(0,.2, size) # Simulate outcome variable Y = alpha + beta[0]*X1 + beta[1]*X2 + np.random.randn(size)*sigma # + # %matplotlib inline fig, axes = plt.subplots(1, 2, sharex=True, figsize=(10,4)) axes[0].scatter(X1, Y) axes[1].scatter(X2, Y) axes[0].set_ylabel('Y'); axes[0].set_xlabel('X1'); axes[1].set_xlabel('X2'); # - from pymc3 import Model, Normal, HalfNormal # + basic_model = Model() with basic_model: # Priors for unknown model parameters alpha = Normal('alpha', mu=0, sd=10) beta = Normal('beta', mu=0, sd=10, shape=2) sigma = HalfNormal('sigma', sd=1) # Expected value of outcome mu = alpha + beta[0]*X1 + beta[1]*X2 # Likelihood (sampling distribution) of observations Y_obs = Normal('Y_obs', mu=mu, sd=sigma, observed=Y) # + basic_model = Model() with basic_model: # Priors for unknown model parameters alpha = Normal('alpha', mu=0, sd=10) beta = Normal('beta', mu=0, sd=10, shape=2) sigma = HalfNormal('sigma', sd=1) # Expected value of outcome mu = alpha + beta[0]*X1 + beta[1]*X2 # Likelihood (sampling distribution) of observations Y = Normal('Y_obs', mu=mu, sd=sigma) # + alpha = Normal('alpha', mu=0, sd=10) beta = Normal('beta', mu=0, sd=10, shape=2) sigma = HalfNormal('sigma', sd=1) # Expected value of outcome mu = alpha + beta[0]*X1 + beta[1]*X2 # Likelihood (sampling distribution) of observations Y = Normal('Y_obs', mu=mu, sd=sigma) # + from pymc3 import find_MAP map_estimate = find_MAP(model=basic_model) print(map_estimate) # + from scipy import optimize from pymc3 import NUTS, sample with basic_model: # obtain starting values via MAP start = find_MAP(fmin=optimize.fmin_powell) # instantiate sampler step = NUTS(scaling=start) # draw 2000 posterior samples trace = sample(2000, step, start=start) # + from pymc3 import traceplot traceplot(trace); # + from pymc3 import summary summary(trace) # - type(basic_model) type(trace) dir(trace) trace.varnames trace.get_values('alpha') dir(basic_model) basic_model.observed_RVs basic_model.unobserved_RVs
pymc3_examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="6MHFrYVOxv7l" # ### Creation of the environment # + id="kSChpBmA53GI" cellView="both" colab={"base_uri": "https://localhost:8080/"} outputId="5adfba81-72a2-4063-87db-4b56f8926fc0" # %tensorflow_version 2.x # !pip3 install --upgrade pip # #!pip install -qU t5 # !pip3 install git+https://github.com/google-research/text-to-text-transfer-transformer.git #extra_id_x support import functools import os import time import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) import tensorflow.compat.v1 as tf import tensorflow_datasets as tfds import t5 #Set the base dir(Google cloud bucket) BASE_DIR = "gs://bucket_code_completion" if not BASE_DIR or BASE_DIR == "gs://": raise ValueError("You must enter a BASE_DIR.") ON_CLOUD = True if ON_CLOUD: import tensorflow_gcs_config from google.colab import auth # Set credentials for GCS reading/writing from Colab and TPU. TPU_TOPOLOGY = "2x2" try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() # TPU detection TPU_ADDRESS = tpu.get_master() print('Running on TPU:', TPU_ADDRESS) except ValueError: raise BaseException('ERROR: Not connected to a TPU runtime; please see the previous cell in this notebook for instructions!') auth.authenticate_user() tf.config.experimental_connect_to_host(TPU_ADDRESS) tensorflow_gcs_config.configure_gcs_from_colab_auth() tf.disable_v2_behavior() # Improve logging. from contextlib import contextmanager import logging as py_logging if ON_CLOUD: tf.get_logger().propagate = False py_logging.root.setLevel('INFO') @contextmanager def tf_verbosity_level(level): og_level = tf.logging.get_verbosity() tf.logging.set_verbosity(level) yield tf.logging.set_verbosity(og_level) # + [markdown] id="zkpoXzzFx3OS" # ### Path to csv file # This variable contains the path to the tsv file for training loaded on the bucket. Please be sure to insert the correct path # + id="p4UHw7Yo6GCK" cellView="both" nq_tsv_path = { "train":'gs://bucket_code_completion/T5_extension/data/code.tsv', "validation":'gs://bucket_code_completion/T5_extension/data/code.tsv', } # + [markdown] id="PD-lKDAfynn-" # ### Preprocess of the dataset # In this step we preprocess the dataset. # You have to change the path to vocab files (*vocab_model_path* and *vocab_path*) # # + id="k5DJVOe896Lw" from t5.data import postprocessors as t5_postprocessors from t5.seqio import Feature,SentencePieceVocabulary # # Set the path of sentencepiece model and vocab files vocab_model_path = 'gs://bucket_code_completion/T5_extension/code.model' vocab_path = 'gs://bucket_code_completion/T5_extension/code.vocab' TaskRegistry = t5.data.TaskRegistry TfdsTask = t5.data.TfdsTask def get_default_vocabulary(): return SentencePieceVocabulary(vocab_model_path, 100) DEFAULT_OUTPUT_FEATURES = { "inputs": Feature( vocabulary=get_default_vocabulary(), add_eos=True, required=False), "targets": Feature( vocabulary=get_default_vocabulary(), add_eos=True) } # + id="ncBkh1fH7yh0" colab={"base_uri": "https://localhost:8080/"} outputId="8395743b-335c-4c2c-c9f3-16298a41e7e8" def nq_dataset_fn(split, shuffle_files=True): # We only have one file for each split. del shuffle_files # Load lines from the text file as examples. ds = tf.data.TextLineDataset(nq_tsv_path[split]) ds = ds.map( functools.partial(tf.io.decode_csv, record_defaults=["string","string"], field_delim="\t", use_quote_delim=False), num_parallel_calls=tf.data.experimental.AUTOTUNE) ds = ds.map(lambda *ex: dict(zip(["input", "output"], ex))) return ds print("A few raw train examples...") for ex in tfds.as_numpy(nq_dataset_fn("train").take(5)): print(ex) # + id="z998BqAT42eL" def preprocessing(ds): def to_inputs_and_targets(ex): inputs = tf.strings.join([ ex['input']], separator=' ') class_label = tf.strings.join([ex['output']], separator=' ') return {'inputs': inputs, 'targets': class_label } return ds.map(to_inputs_and_targets, num_parallel_calls=tf.data.experimental.AUTOTUNE) # + id="Pq_ljAs373oK" colab={"base_uri": "https://localhost:8080/"} outputId="2bd2b680-aa84-4d23-ce93-68ebd29f5a1b" #Create a new training task t5.data.TaskRegistry.remove('pretraining') t5.data.TaskRegistry.add( "pretraining", dataset_fn=nq_dataset_fn, splits=["train", "validation"], text_preprocessor=[preprocessing], output_features = DEFAULT_OUTPUT_FEATURES, metric_fns=[t5.evaluation.metrics.accuracy], ) # + id="XFAAvLfG7528" colab={"base_uri": "https://localhost:8080/"} outputId="11edf0da-2630-4fb8-9655-34609f01ed34" nq_task = t5.data.TaskRegistry.get("pretraining") ds = nq_task.get_dataset(split="train", sequence_length={"inputs": 256, "targets": 256}) print("A few preprocessed training examples...") for ex in tfds.as_numpy(ds.take(5)): print(ex) # + [markdown] id="ZyCj-Fa-zo4V" # ### Pretraining of the model # You can pretrain the model running the following two cells. # Please set the correct path of the variable *MODEL_DIR* (the path to save the pretrained model in) and *PATH_GIN_FILE* (the gin file configuration for the pre-training) # + id="YgvnbVWU78nz" from mesh_tensorflow.transformer.learning_rate_schedules import learning_rate_schedule_noam #See https://github.com/google-research/text-to-text-transfer-transformer if you want to scale up the model MODEL_SIZE = "small" MODEL_DIR = 'gs://bucket_code_completion/T5_extension/pretrained_with_masking' model_parallelism, train_batch_size, keep_checkpoint_max = { "small": (1, 256, 16), "base": (2, 128, 8), "large": (8, 64, 4), "3B": (8, 16, 1), "11B": (8, 16, 1)}[MODEL_SIZE] tf.io.gfile.makedirs(MODEL_DIR) model = t5.models.MtfModel( model_dir=MODEL_DIR, tpu=TPU_ADDRESS, tpu_topology=TPU_TOPOLOGY, model_parallelism=model_parallelism, batch_size=train_batch_size, sequence_length={"inputs": 256, "targets": 256}, learning_rate_schedule = learning_rate_schedule_noam, save_checkpoints_steps=5000, keep_checkpoint_max=keep_checkpoint_max if ON_CLOUD else None ) # + id="tmdx3v9z8DY6" colab={"base_uri": "https://localhost:8080/"} outputId="6f00dcc3-fdee-404a-a2d3-c51fbffb0763" PATH_GIN_FILE = 'gs://bucket_code_completion/T5_extension/pretrain_config/operative_config.gin' import gin with gin.unlock_config(): gin.parse_config_file(PATH_GIN_FILE) TRAIN_STEPS = 200000 model.train("pretraining", steps=TRAIN_STEPS) # + id="vjOczu_Fb2Kn"
Pretrain/pretrain.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" import cv2 import numpy as np img = cv2.imread('girl.jpg') gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) sift = cv2.xfeatures2d.SIFT_create() kp,des = sift.detectAndCompute(gray,None) img=cv2.drawKeypoints(gray,kp,img) cv2.imwrite('sift_keypoints.jpg',img) from IPython.display import Image Image(filename='girl.jpg') Image(filename='sift_keypoints.jpg') des.shape
Python/Sift_mark1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import os import time from selenium import webdriver from selenium.webdriver.common.by import By chromedriver_path = "C:\\Users\\Araf\\Desktop\\Script\\chromedriver_win32" os.environ['PATH'] += chromedriver_path # + driver = webdriver.Chrome() driver.implicitly_wait(40) driver.get("https://atcoder.jp") # do login manually # + # there are 9 pages of contests page = 9 driver.get(f"https://atcoder.jp/contests/archive?page={page}") time.sleep(7) contests = driver.find_elements(By.XPATH, "//div/div/div/div[3]/div[2]/div/table/tbody/tr") # all contest links contest_links = [] for _ in contests: contest_links.append(str(_.find_element(By.CSS_SELECTOR, 'a[href*="/contests"]').get_attribute("href"))) # + lt = 0 rt = len(contest_links)-1 for i in range(lt, rt+1): # for each contest print(f"parsing: {i} of {rt}") driver.get(contest_links[i]+"/submissions/me") # go to my submission page time.sleep(6) all_text = driver.find_element(By.TAG_NAME, "body").text if "No Submissions" in all_text: continue rows = driver.find_elements(By.XPATH, "//div/table/tbody/tr") # for each submission code_pages = [] for row in rows: cols = row.find_elements(By.TAG_NAME, "td") if len(cols)<10: continue code_pages.append(cols[9].find_element(By.TAG_NAME, "a").get_attribute("href")) for j in range(len(code_pages)): driver.get(code_pages[j]) time.sleep(6) detail = driver.find_elements(By.XPATH, "//body/div[3]/div/div/div[2]/div[3]/table/tbody/tr/td") verdict = detail[6].text.strip() created = detail[0].text.strip() title = detail[1].text.strip() p_link = detail[1].find_element(By.CSS_SELECTOR, "a[href*='tasks']").get_attribute("href") language = detail[3].text.strip() run_time = detail[7].text.strip() mem_used = detail[8].text.strip() code = driver.find_element(By.CSS_SELECTOR, ".linenums").text table_data=[ [f'* @author: kzvd4729', f'created: {created}'], [f'* solution_verdict: {verdict}', f'language: {language}'], [f'* run_time: {run_time}', f'memory_used: {mem_used}'] ] printer = '/****************************************************************************************\n' for row in table_data: printer+=("{: <60} {: <50}".format(*row))+'\n' printer += f"* problem: {p_link}\n" printer +='****************************************************************************************/\n' printer += code if len(printer)<590: print("got one bad") exit() for c in "\/:*?\"<>|.": # folder name can't have these character title = title.replace(c, 'x') current = f"atcoder/{title}" if not os.path.exists(current): os.mkdir(current) current = current+f'/{verdict}' for k in range(1, 1000): if (k==1): now = current+'.cpp' else: now = current+f' ({k}).cpp' if not os.path.exists(now): with open(now, 'w') as f: f.write(printer) f.close() break print(f"submission: {j}/{len(code_pages)-1}") # -
__Scripts__/script_atcoder.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import tensorflow as tf @tf.function def f(): a = tf.constant([[10,10],[11.,1.]]) x = tf.constant([[1.,0.],[0.,1.]]) b = tf.Variable(12.) y = tf.matmul(a, x) + b print("PRINT: ", y) tf.print("TF-PRINT: ", y) return y f() # + b = None @tf.function def f(): a = tf.constant([[10, 10], [11., 1.]]) x = tf.constant([[1., 0.], [0., 1.]]) global b if b is None: b = tf.Variable(12.) y = tf.matmul(a, x) + b print("PRINT: ", y) tf.print("TF-PRINT: ", y) return y f() # + class F(): def __init__(self): self._b = None @tf.function def __call__(self): a = tf.constant([[10, 10], [11., 1.]]) x = tf.constant([[1., 0.], [0., 1.]]) if self._b is None: self._b = tf.Variable(12.) y = tf.matmul(a, x) + self._b print("PRINT: ", y) tf.print("TF-PRINT: ", y) return y f = F() f() # + @tf.function def f(x): if x > 0: import pdb pdb.set_trace() x = x + 1 return x tf.config.experimental_run_functions_eagerly(True) f(tf.constant(1)) # + def batch_fetch_element_per_group(data,idx): #here data is of shape [?,m,n] ? is the data batch size, m - groups, n candidates in each group #idx is of share [?,m] select a particular from each group. nRows = tf.shape(data)[0] print(nRows) nCols = tf.constant(tf.shape(data)[1] , dtype=tf.int32) print(nCols) m1 = tf.reshape(tf.tile(tf.range(nCols), [nRows]), shape=[nRows, nCols]) print(m1) m2 = tf.transpose(tf.reshape(tf.tile(tf.range(nRows), [nCols]), shape=[nCols, nRows])) print(m2) indices = tf.stack([m2, m1, idx], axis=-1) # indices should be of shape [?, 5, 3] with indices[i,j]==[i,j,idx[i,j]] print(indices) output = tf.gather_nd(data, indices=indices) print(output) return output data = tf.constant([[[1,2,3,4],[1,2,3,4],[1,2,3,4],[1,2,3,4],[1,2,3,4]], [[1,2,3,4],[1,2,3,4],[1,2,3,4],[1,2,3,4],[1,2,3,4]], [[1,2,3,4],[1,2,3,4],[1,2,3,4],[1,2,3,4],[1,2,3,4]]]) idx= tf.constant ([[0,1,2,3,3],[0,1,2,3,3],[0,1,2,3,3]]) data1 = tf.constant([[1,2,3,4], [1,2,3,4], [1,2,3,4]]) idx1= tf.constant ([[0],[0],[0]]) batch_fetch_element_per_group(data,idx) # + import numpy as np import tensorflow as tf N_CHANNELS = 5 pl=tf.placeholder(dtype=tf.int32, shape=(None, 28, 28, N_CHANNELS)) # Indices we'll use. batch_size = 4 here. label_predictions = tf.constant([0, 2, 0, 3]) # Indices of shape [?, 2], with indices[i] = [i, self.label_predictions[i]], # which is easy to do with tf.range() and tf.stack() indices = tf.stack([tf.range(tf.size(label_predictions)), label_predictions], axis=-1) # [[0, 0], [1, 2], [2, 0], [3, 3]] transposed = tf.transpose(pl, perm=[0, 3, 1, 2]) gathered = tf.gather_nd(transposed, indices) # Should be of shape (4, 2, 3) result = tf.expand_dims(gathered, -1) initial_value = np.arange(4*28*28*N_CHANNELS).reshape((4, 28, 28, N_CHANNELS)) sess = tf.InteractiveSession() res = sess.run(result, feed_dict={pl: initial_value}) # print(res) print("checking validity") for i in range(4): for x in range(28): print(x) for y in range(28): assert res[i, x, y, 0] == initial_value[i, x, y, indices[i, 1].eval()] print("All assertions passed") # + import tensorflow as tf ids=tf.constant([[2,3,4,5,6],[9,8,7,6,5]]) start_logits= tf.constant([[0.5,0.1,0.1,0.1,0.1],[0.6,0.1,0.1,0.1,0.1]]) end_logits= tf.constant([[0.1,0.1,0.1,0.1,0.7],[0.2,0.1,0.7,0.1,0.1]]) _, starts= tf.nn.top_k(start_logits, k=1) _, ends = tf.nn.top_k(end_logits, k=1) spanarray = [] maskarray =[] starts = tf.unstack(starts, axis=0) ends = tf.unstack(ends, axis=0) ids = tf.unstack(ids, axis=0) batch_size = len(ids) str_len = len(ids[0]) for i in range(batch_size): spanarray.append(tf.strided_slice(ids[i], starts[i], ends[i] + 1)) maskarray.append(tf.strided_slice(tf.fill([str_len],1),starts[i], ends[i] + 1)) for j in range(str_len - len(spanarray[i])): spanarray[i] = tf.concat([spanarray[i], [0]], axis=0) maskarray[i] = tf.concat([maskarray[i], [0]], axis=0) spans = tf.stack(spanarray, axis=0) masks= tf.stack(maskarray, axis=0) print(spans) print(masks) # + def get_best_span_prediction(ids,start_logits, end_logits): _, starts= tf.nn.top_k(start_logits, k=1) _, ends = tf.nn.top_k(end_logits, k=1) spanarray = [] maskarray =[] starts = tf.unstack(starts, axis=0) ends = tf.unstack(ends, axis=0) ids = tf.unstack(ids, axis=0) batch_size = len(ids) str_len = len(ids[0]) for i in range(batch_size): spanarray.append(tf.strided_slice(ids[i], starts[i], ends[i] + 1)) maskarray.append(tf.strided_slice(tf.fill([str_len],1),starts[i], ends[i] + 1)) for j in range(str_len - len(spanarray[i])): spanarray[i] = tf.concat([spanarray[i], [0]], axis=0) maskarray[i] = tf.concat([maskarray[i], [0]], axis=0) spans = tf.stack(spanarray, axis=0) masks= tf.stack(maskarray, axis=0) return spans,masks ids=tf.constant([[2,3,4,5,6],[9,8,7,6,5]]) start_logits= tf.constant([[0.5,0.1,0.1,0.1,0.1],[0.6,0.1,0.1,0.1,0.1]]) end_logits= tf.constant([[0.1,0.1,0.1,0.1,0.7],[0.2,0.1,0.7,0.1,0.1]]) get_best_span_prediction(ids,start_logits, end_logits) # + # Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """DNC addressing modules.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import sonnet as snt import tensorflow as tf from dnc import util # Ensure values are greater than epsilon to avoid numerical instability. _EPSILON = 1e-6 TemporalLinkageState = collections.namedtuple('TemporalLinkageState', ('link', 'precedence_weights')) def _vector_norms(m): squared_norms = tf.reduce_sum(m * m, axis=2, keepdims=True) return tf.sqrt(squared_norms + _EPSILON) def weighted_softmax(activations, strengths, strengths_op): """Returns softmax over activations multiplied by positive strengths. Args: activations: A tensor of shape `[batch_size, num_heads, memory_size]`, of activations to be transformed. Softmax is taken over the last dimension. strengths: A tensor of shape `[batch_size, num_heads]` containing strengths to multiply by the activations prior to the softmax. strengths_op: An operation to transform strengths before softmax. Returns: A tensor of same shape as `activations` with weighted softmax applied. """ transformed_strengths = tf.expand_dims(strengths_op(strengths), -1) sharp_activations = activations * transformed_strengths softmax = snt.BatchApply(module_or_op=tf.nn.softmax) return softmax(sharp_activations) # + # Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """DNC addressing modules.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import sonnet as snt import tensorflow as tf from dnc import util # Ensure values are greater than epsilon to avoid numerical instability. _EPSILON = 1e-6 TemporalLinkageState = collections.namedtuple('TemporalLinkageState', ('link', 'precedence_weights')) def _vector_norms(m): squared_norms = tf.reduce_sum(m * m, axis=2, keepdims=True) return tf.sqrt(squared_norms + _EPSILON) def weighted_softmax(activations, strengths, strengths_op): """Returns softmax over activations multiplied by positive strengths. Args: activations: A tensor of shape `[batch_size, num_heads, memory_size]`, of activations to be transformed. Softmax is taken over the last dimension. strengths: A tensor of shape `[batch_size, num_heads]` containing strengths to multiply by the activations prior to the softmax. strengths_op: An operation to transform strengths before softmax. Returns: A tensor of same shape as `activations` with weighted softmax applied. """ transformed_strengths = tf.expand_dims(strengths_op(strengths), -1) sharp_activations = activations * transformed_strengths softmax = snt.BatchApply(module_or_op=tf.nn.softmax) return softmax(sharp_activations) class CosineWeights(snt.AbstractModule): """Cosine-weighted attention. Calculates the cosine similarity between a query and each word in memory, then applies a weighted softmax to return a sharp distribution. """ def __init__(self, num_heads, word_size, strength_op=tf.nn.softplus, name='cosine_weights'): """Initializes the CosineWeights module. Args: num_heads: number of memory heads. word_size: memory word size. strength_op: operation to apply to strengths (default is tf.nn.softplus). name: module name (default 'cosine_weights') """ super(CosineWeights, self).__init__(name=name) self._num_heads = num_heads self._word_size = word_size self._strength_op = strength_op def _build(self, memory, keys, strengths): """Connects the CosineWeights module into the graph. Args: memory: A 3-D tensor of shape `[batch_size, memory_size, word_size]`. keys: A 3-D tensor of shape `[batch_size, num_heads, word_size]`. strengths: A 2-D tensor of shape `[batch_size, num_heads]`. Returns: Weights tensor of shape `[batch_size, num_heads, memory_size]`. """ # Calculates the inner product between the query vector and words in memory. dot = tf.matmul(keys, memory, adjoint_b=True) # Outer product to compute denominator (euclidean norm of query and memory). memory_norms = _vector_norms(memory) key_norms = _vector_norms(keys) norm = tf.matmul(key_norms, memory_norms, adjoint_b=True) # Calculates cosine similarity between the query vector and words in memory. similarity = dot / (norm + _EPSILON) return weighted_softmax(similarity, strengths, self._strength_op) class TemporalLinkage(snt.RNNCore): """Keeps track of write order for forward and backward addressing. This is a pseudo-RNNCore module, whose state is a pair `(link, precedence_weights)`, where `link` is a (collection of) graphs for (possibly multiple) write heads (represented by a tensor with values in the range [0, 1]), and `precedence_weights` records the "previous write locations" used to build the link graphs. The function `directional_read_weights` computes addresses following the forward and backward directions in the link graphs. """ def __init__(self, memory_size, num_writes, name='temporal_linkage'): """Construct a TemporalLinkage module. Args: memory_size: The number of memory slots. num_writes: The number of write heads. name: Name of the module. """ super(TemporalLinkage, self).__init__(name=name) self._memory_size = memory_size self._num_writes = num_writes def _build(self, write_weights, prev_state): """Calculate the updated linkage state given the write weights. Args: write_weights: A tensor of shape `[batch_size, num_writes, memory_size]` containing the memory addresses of the different write heads. prev_state: `TemporalLinkageState` tuple containg a tensor `link` of shape `[batch_size, num_writes, memory_size, memory_size]`, and a tensor `precedence_weights` of shape `[batch_size, num_writes, memory_size]` containing the aggregated history of recent writes. Returns: A `TemporalLinkageState` tuple `next_state`, which contains the updated link and precedence weights. """ link = self._link(prev_state.link, prev_state.precedence_weights, write_weights) precedence_weights = self._precedence_weights(prev_state.precedence_weights, write_weights) return TemporalLinkageState( link=link, precedence_weights=precedence_weights) def directional_read_weights(self, link, prev_read_weights, forward): """Calculates the forward or the backward read weights. For each read head (at a given address), there are `num_writes` link graphs to follow. Thus this function computes a read address for each of the `num_reads * num_writes` pairs of read and write heads. Args: link: tensor of shape `[batch_size, num_writes, memory_size, memory_size]` representing the link graphs L_t. prev_read_weights: tensor of shape `[batch_size, num_reads, memory_size]` containing the previous read weights w_{t-1}^r. forward: Boolean indicating whether to follow the "future" direction in the link graph (True) or the "past" direction (False). Returns: tensor of shape `[batch_size, num_reads, num_writes, memory_size]` """ with tf.name_scope('directional_read_weights'): # We calculate the forward and backward directions for each pair of # read and write heads; hence we need to tile the read weights and do a # sort of "outer product" to get this. expanded_read_weights = tf.stack([prev_read_weights] * self._num_writes, 1) result = tf.matmul(expanded_read_weights, link, adjoint_b=forward) # Swap dimensions 1, 2 so order is [batch, reads, writes, memory]: return tf.transpose(result, perm=[0, 2, 1, 3]) def _link(self, prev_link, prev_precedence_weights, write_weights): """Calculates the new link graphs. For each write head, the link is a directed graph (represented by a matrix with entries in range [0, 1]) whose vertices are the memory locations, and an edge indicates temporal ordering of writes. Args: prev_link: A tensor of shape `[batch_size, num_writes, memory_size, memory_size]` representing the previous link graphs for each write head. prev_precedence_weights: A tensor of shape `[batch_size, num_writes, memory_size]` which is the previous "aggregated" write weights for each write head. write_weights: A tensor of shape `[batch_size, num_writes, memory_size]` containing the new locations in memory written to. Returns: A tensor of shape `[batch_size, num_writes, memory_size, memory_size]` containing the new link graphs for each write head. """ with tf.name_scope('link'): batch_size = tf.shape(prev_link)[0] write_weights_i = tf.expand_dims(write_weights, 3) write_weights_j = tf.expand_dims(write_weights, 2) prev_precedence_weights_j = tf.expand_dims(prev_precedence_weights, 2) prev_link_scale = 1 - write_weights_i - write_weights_j new_link = write_weights_i * prev_precedence_weights_j link = prev_link_scale * prev_link + new_link # Return the link with the diagonal set to zero, to remove self-looping # edges. return tf.matrix_set_diag( link, tf.zeros( [batch_size, self._num_writes, self._memory_size], dtype=link.dtype)) def _precedence_weights(self, prev_precedence_weights, write_weights): """Calculates the new precedence weights given the current write weights. The precedence weights are the "aggregated write weights" for each write head, where write weights with sum close to zero will leave the precedence weights unchanged, but with sum close to one will replace the precedence weights. Args: prev_precedence_weights: A tensor of shape `[batch_size, num_writes, memory_size]` containing the previous precedence weights. write_weights: A tensor of shape `[batch_size, num_writes, memory_size]` containing the new write weights. Returns: A tensor of shape `[batch_size, num_writes, memory_size]` containing the new precedence weights. """ with tf.name_scope('precedence_weights'): write_sum = tf.reduce_sum(write_weights, 2, keepdims=True) return (1 - write_sum) * prev_precedence_weights + write_weights @property def state_size(self): """Returns a `TemporalLinkageState` tuple of the state tensors' shapes.""" return TemporalLinkageState( link=tf.TensorShape( [self._num_writes, self._memory_size, self._memory_size]), precedence_weights=tf.TensorShape([self._num_writes, self._memory_size]),) class Freeness(snt.RNNCore): """Memory usage that is increased by writing and decreased by reading. This module is a pseudo-RNNCore whose state is a tensor with values in the range [0, 1] indicating the usage of each of `memory_size` memory slots. The usage is: * Increased by writing, where usage is increased towards 1 at the write addresses. * Decreased by reading, where usage is decreased after reading from a location when free_gate is close to 1. The function `write_allocation_weights` can be invoked to get free locations to write to for a number of write heads. """ def __init__(self, memory_size, name='freeness'): """Creates a Freeness module. Args: memory_size: Number of memory slots. name: Name of the module. """ super(Freeness, self).__init__(name=name) self._memory_size = memory_size def _build(self, write_weights, free_gate, read_weights, prev_usage): """Calculates the new memory usage u_t. Memory that was written to in the previous time step will have its usage increased; memory that was read from and the controller says can be "freed" will have its usage decreased. Args: write_weights: tensor of shape `[batch_size, num_writes, memory_size]` giving write weights at previous time step. free_gate: tensor of shape `[batch_size, num_reads]` which indicates which read heads read memory that can now be freed. read_weights: tensor of shape `[batch_size, num_reads, memory_size]` giving read weights at previous time step. prev_usage: tensor of shape `[batch_size, memory_size]` giving usage u_{t - 1} at the previous time step, with entries in range [0, 1]. Returns: tensor of shape `[batch_size, memory_size]` representing updated memory usage. """ # Calculation of usage is not differentiable with respect to write weights. write_weights = tf.stop_gradient(write_weights) usage = self._usage_after_write(prev_usage, write_weights) usage = self._usage_after_read(usage, free_gate, read_weights) return usage def write_allocation_weights(self, usage, write_gates, num_writes): """Calculates freeness-based locations for writing to. This finds unused memory by ranking the memory locations by usage, for each write head. (For more than one write head, we use a "simulated new usage" which takes into account the fact that the previous write head will increase the usage in that area of the memory.) Args: usage: A tensor of shape `[batch_size, memory_size]` representing current memory usage. write_gates: A tensor of shape `[batch_size, num_writes]` with values in the range [0, 1] indicating how much each write head does writing based on the address returned here (and hence how much usage increases). num_writes: The number of write heads to calculate write weights for. Returns: tensor of shape `[batch_size, num_writes, memory_size]` containing the freeness-based write locations. Note that this isn't scaled by `write_gate`; this scaling must be applied externally. """ with tf.name_scope('write_allocation_weights'): # expand gatings over memory locations write_gates = tf.expand_dims(write_gates, -1) allocation_weights = [] for i in range(num_writes): allocation_weights.append(self._allocation(usage)) # update usage to take into account writing to this new allocation usage += ((1 - usage) * write_gates[:, i, :] * allocation_weights[i]) # Pack the allocation weights for the write heads into one tensor. return tf.stack(allocation_weights, axis=1) def _usage_after_write(self, prev_usage, write_weights): """Calcualtes the new usage after writing to memory. Args: prev_usage: tensor of shape `[batch_size, memory_size]`. write_weights: tensor of shape `[batch_size, num_writes, memory_size]`. Returns: New usage, a tensor of shape `[batch_size, memory_size]`. """ with tf.name_scope('usage_after_write'): # Calculate the aggregated effect of all write heads write_weights = 1 - util.reduce_prod(1 - write_weights, 1) return prev_usage + (1 - prev_usage) * write_weights def _usage_after_read(self, prev_usage, free_gate, read_weights): """Calcualtes the new usage after reading and freeing from memory. Args: prev_usage: tensor of shape `[batch_size, memory_size]`. free_gate: tensor of shape `[batch_size, num_reads]` with entries in the range [0, 1] indicating the amount that locations read from can be freed. read_weights: tensor of shape `[batch_size, num_reads, memory_size]`. Returns: New usage, a tensor of shape `[batch_size, memory_size]`. """ with tf.name_scope('usage_after_read'): free_gate = tf.expand_dims(free_gate, -1) free_read_weights = free_gate * read_weights phi = util.reduce_prod(1 - free_read_weights, 1, name='phi') return prev_usage * phi def _allocation(self, usage): r"""Computes allocation by sorting `usage`. This corresponds to the value a = a_t[\phi_t[j]] in the paper. Args: usage: tensor of shape `[batch_size, memory_size]` indicating current memory usage. This is equal to u_t in the paper when we only have one write head, but for multiple write heads, one should update the usage while iterating through the write heads to take into account the allocation returned by this function. Returns: Tensor of shape `[batch_size, memory_size]` corresponding to allocation. """ with tf.name_scope('allocation'): # Ensure values are not too small prior to cumprod. usage = _EPSILON + (1 - _EPSILON) * usage nonusage = 1 - usage sorted_nonusage, indices = tf.nn.top_k( nonusage, k=self._memory_size, name='sort') sorted_usage = 1 - sorted_nonusage prod_sorted_usage = tf.cumprod(sorted_usage, axis=1, exclusive=True) sorted_allocation = sorted_nonusage * prod_sorted_usage inverse_indices = util.batch_invert_permutation(indices) # This final line "unsorts" sorted_allocation, so that the indexing # corresponds to the original indexing of `usage`. return util.batch_gather(sorted_allocation, inverse_indices) @property def state_size(self): """Returns the shape of the state tensor.""" return tf.TensorShape([self._memory_size])
DNC study report.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/iamsoroush/DeepEEGAbstractor/blob/master/st_dfb_tests_8s_hmdd.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="_JCUvRGHlz5M" colab_type="code" colab={} #@title # Clone the repository and upgrade Keras {display-mode: "form"} # !git clone https://github.com/iamsoroush/DeepEEGAbstractor.git # !pip install --upgrade keras # + id="7_sHB4_nsc8J" colab_type="code" colab={} # !rm -r DeepEEGAbstractor # + id="-dMfhjVypjFa" colab_type="code" colab={} #@title # Imports {display-mode: "form"} import os import pickle import sys sys.path.append('DeepEEGAbstractor') import numpy as np from src.helpers import CrossValidator from src.models import DeepEEGAbstractor from src.dataset import DataLoader, Splitter, FixedLenGenerator from google.colab import drive drive.mount('/content/gdrive') # + id="gwzjFnB5ptIL" colab_type="code" outputId="43557db5-c925-4c44-b594-a5837f7078c6" colab={"base_uri": "https://localhost:8080/", "height": 50} #@title # Set data path {display-mode: "form"} #@markdown --- #@markdown Type in the folder in your google drive that contains numpy _data_ folder: parent_dir = 'soroush'#@param {type:"string"} gdrive_path = os.path.abspath(os.path.join('gdrive/My Drive', parent_dir)) data_dir = os.path.join(gdrive_path, 'data') cv_results_dir = os.path.join(gdrive_path, 'cross_validation') if not os.path.exists(cv_results_dir): os.mkdir(cv_results_dir) print('Data directory: ', data_dir) print('Cross validation results dir: ', cv_results_dir) # + id="_NdDuoHWpwe4" colab_type="code" cellView="form" colab={} #@title ## Set Parameters batch_size = 80 epochs = 100 k = 10 t = 10 instance_duration = 8 instance_overlap = 2 sampling_rate = 256 n_channels = 20 task = 'hmdd' data_mode = 'cross_subject' # + id="2aqgfC5aqCkP" colab_type="code" cellView="form" colab={} #@title ## DeepEEGAbstractor -Default params model_name = 'Deep-EEG-Abstractor' train_generator = FixedLenGenerator(batch_size=batch_size, duration=instance_duration, overlap=instance_overlap, sampling_rate=sampling_rate, is_train=True) test_generator = FixedLenGenerator(batch_size=8, duration=instance_duration, overlap=instance_overlap, sampling_rate=sampling_rate, is_train=False) params = {'task': task, 'data_mode': data_mode, 'main_res_dir': cv_results_dir, 'model_name': model_name, 'epochs': epochs, 'train_generator': train_generator, 'test_generator': test_generator, 't': t, 'k': k, 'channel_drop': True} validator = CrossValidator(**params) dataloader = DataLoader(data_dir, task, data_mode, sampling_rate, instance_duration, instance_overlap) data, labels = dataloader.load_data() input_shape = (sampling_rate * instance_duration, n_channels) model_obj = DeepEEGAbstractor(input_shape, model_name=model_name) scores = validator.do_cv(model_obj, data, labels) # + id="__tuaOtB6u-x" colab_type="code" colab={} cellView="form" #@title ## DeepEEGAbstractor - Without WN model_name = 'Deep-EEG-Abstractor-NoWN' train_generator = FixedLenGenerator(batch_size=batch_size, duration=instance_duration, overlap=instance_overlap, sampling_rate=sampling_rate, is_train=True) test_generator = FixedLenGenerator(batch_size=8, duration=instance_duration, overlap=instance_overlap, sampling_rate=sampling_rate, is_train=False) params = {'task': task, 'data_mode': data_mode, 'main_res_dir': cv_results_dir, 'model_name': model_name, 'epochs': epochs, 'train_generator': train_generator, 'test_generator': test_generator, 't': t, 'k': k, 'channel_drop': True} validator = CrossValidator(**params) dataloader = DataLoader(data_dir, task, data_mode, sampling_rate, instance_duration, instance_overlap) data, labels = dataloader.load_data() input_shape = (sampling_rate * instance_duration, n_channels) model_obj = DeepEEGAbstractor(input_shape, model_name=model_name, weight_norm=False) scores = validator.do_cv(model_obj, data, labels) # + id="2GjXcRU6qeem" colab_type="code" cellView="form" colab={} #@title ## DeepEEGAbstractor - BatchNormalization model_name = 'Deep-EEG-Abstractor-BN' train_generator = FixedLenGenerator(batch_size=batch_size, duration=instance_duration, overlap=instance_overlap, sampling_rate=sampling_rate, is_train=True) test_generator = FixedLenGenerator(batch_size=8, duration=instance_duration, overlap=instance_overlap, sampling_rate=sampling_rate, is_train=False) params = {'task': task, 'data_mode': data_mode, 'main_res_dir': cv_results_dir, 'model_name': model_name, 'epochs': epochs, 'train_generator': train_generator, 'test_generator': test_generator, 't': t, 'k': k, 'channel_drop': True} validator = CrossValidator(**params) dataloader = DataLoader(data_dir, task, data_mode, sampling_rate, instance_duration, instance_overlap) data, labels = dataloader.load_data() input_shape = (sampling_rate * instance_duration, n_channels) model_obj = DeepEEGAbstractor(input_shape, model_name=model_name, normalization='batch') scores = validator.do_cv(model_obj, data, labels) # + id="8joDeUgoqn3a" colab_type="code" cellView="form" colab={} #@title ## DeepEEGAbstractor - InstanceNormalization model_name = 'Deep-EEG-Abstractor-IN' train_generator = FixedLenGenerator(batch_size=batch_size, duration=instance_duration, overlap=instance_overlap, sampling_rate=sampling_rate, is_train=True) test_generator = FixedLenGenerator(batch_size=8, duration=instance_duration, overlap=instance_overlap, sampling_rate=sampling_rate, is_train=False) params = {'task': task, 'data_mode': data_mode, 'main_res_dir': cv_results_dir, 'model_name': model_name, 'epochs': epochs, 'train_generator': train_generator, 'test_generator': test_generator, 't': t, 'k': k, 'channel_drop': True} validator = CrossValidator(**params) dataloader = DataLoader(data_dir, task, data_mode, sampling_rate, instance_duration, instance_overlap) data, labels = dataloader.load_data() input_shape = (sampling_rate * instance_duration, n_channels) model_obj = DeepEEGAbstractor(input_shape, model_name=model_name, normalization='instance') scores = validator.do_cv(model_obj, data, labels) # + id="Dwh-LF1q5QUr" colab_type="code" cellView="form" colab={} #@title ## DeepEEGAbstractor - Deeper model_name = 'Deep-EEG-Abstractor-Deeper' train_generator = FixedLenGenerator(batch_size=batch_size, duration=instance_duration, overlap=instance_overlap, sampling_rate=sampling_rate, is_train=True) test_generator = FixedLenGenerator(batch_size=8, duration=instance_duration, overlap=instance_overlap, sampling_rate=sampling_rate, is_train=False) params = {'task': task, 'data_mode': data_mode, 'main_res_dir': cv_results_dir, 'model_name': model_name, 'epochs': epochs, 'train_generator': train_generator, 'test_generator': test_generator, 't': t, 'k': k, 'channel_drop': True} validator = CrossValidator(**params) dataloader = DataLoader(data_dir, task, data_mode, sampling_rate, instance_duration, instance_overlap) data, labels = dataloader.load_data() input_shape = (sampling_rate * instance_duration, n_channels) model_obj = DeepEEGAbstractor(input_shape, model_name=model_name, n_kernels=(6, 6, 6, 4, 4)) scores = validator.do_cv(model_obj, data, labels) # + id="OzpnGNueqy58" colab_type="code" cellView="form" colab={} #@title ## DeepEEGAbstractor - Wider model_name = 'Deep-EEG-Abstractor-Wider' train_generator = FixedLenGenerator(batch_size=batch_size, duration=instance_duration, overlap=instance_overlap, sampling_rate=sampling_rate, is_train=True) test_generator = FixedLenGenerator(batch_size=8, duration=instance_duration, overlap=instance_overlap, sampling_rate=sampling_rate, is_train=False) params = {'task': task, 'data_mode': data_mode, 'main_res_dir': cv_results_dir, 'model_name': model_name, 'epochs': epochs, 'train_generator': train_generator, 'test_generator': test_generator, 't': t, 'k': k, 'channel_drop': True} validator = CrossValidator(**params) dataloader = DataLoader(data_dir, task, data_mode, sampling_rate, instance_duration, instance_overlap) data, labels = dataloader.load_data() input_shape = (sampling_rate * instance_duration, n_channels) model_obj = DeepEEGAbstractor(input_shape, model_name=model_name, n_kernels=(6, 6, 8, 10)) scores = validator.do_cv(model_obj, data, labels) # + id="8-Qoh8U6rH9K" colab_type="code" cellView="form" colab={} #@title ## DeepEEGAbstractor - Attv1 model_name = 'Deep-EEG-Abstractor-Attv1' train_generator = FixedLenGenerator(batch_size=batch_size, duration=instance_duration, overlap=instance_overlap, sampling_rate=sampling_rate, is_train=True) test_generator = FixedLenGenerator(batch_size=8, duration=instance_duration, overlap=instance_overlap, sampling_rate=sampling_rate, is_train=False) params = {'task': task, 'data_mode': data_mode, 'main_res_dir': cv_results_dir, 'model_name': model_name, 'epochs': epochs, 'train_generator': train_generator, 'test_generator': test_generator, 't': t, 'k': k, 'channel_drop': True} validator = CrossValidator(**params) dataloader = DataLoader(data_dir, task, data_mode, sampling_rate, instance_duration, instance_overlap) data, labels = dataloader.load_data() input_shape = (sampling_rate * instance_duration, n_channels) model_obj = DeepEEGAbstractor(input_shape, model_name=model_name, attention='v1') scores = validator.do_cv(model_obj, data, labels) # + id="nVwDca8GrTeY" colab_type="code" cellView="form" colab={} #@title ## DeepEEGAbstractor - Attv2 model_name = 'Deep-EEG-Abstractor-Attv2' train_generator = FixedLenGenerator(batch_size=batch_size, duration=instance_duration, overlap=instance_overlap, sampling_rate=sampling_rate, is_train=True) test_generator = FixedLenGenerator(batch_size=8, duration=instance_duration, overlap=instance_overlap, sampling_rate=sampling_rate, is_train=False) params = {'task': task, 'data_mode': data_mode, 'main_res_dir': cv_results_dir, 'model_name': model_name, 'epochs': epochs, 'train_generator': train_generator, 'test_generator': test_generator, 't': t, 'k': k, 'channel_drop': True} validator = CrossValidator(**params) dataloader = DataLoader(data_dir, task, data_mode, sampling_rate, instance_duration, instance_overlap) data, labels = dataloader.load_data() input_shape = (sampling_rate * instance_duration, n_channels) model_obj = DeepEEGAbstractor(input_shape, model_name=model_name, attention='v2') scores = validator.do_cv(model_obj, data, labels) # + id="3DhxG62HrWlI" colab_type="code" cellView="form" colab={} #@title ## DeepEEGAbstractor - Attv3 model_name = 'Deep-EEG-Abstractor-Attv3' train_generator = FixedLenGenerator(batch_size=batch_size, duration=instance_duration, overlap=instance_overlap, sampling_rate=sampling_rate, is_train=True) test_generator = FixedLenGenerator(batch_size=8, duration=instance_duration, overlap=instance_overlap, sampling_rate=sampling_rate, is_train=False) params = {'task': task, 'data_mode': data_mode, 'main_res_dir': cv_results_dir, 'model_name': model_name, 'epochs': epochs, 'train_generator': train_generator, 'test_generator': test_generator, 't': t, 'k': k, 'channel_drop': True} validator = CrossValidator(**params) dataloader = DataLoader(data_dir, task, data_mode, sampling_rate, instance_duration, instance_overlap) data, labels = dataloader.load_data() input_shape = (sampling_rate * instance_duration, n_channels) model_obj = DeepEEGAbstractor(input_shape, model_name=model_name, attention='v3') scores = validator.do_cv(model_obj, data, labels) # + id="ohouDCANrduQ" colab_type="code" cellView="form" colab={} #@title ## DeepEEGAbstractor - HDropout model_name = 'Deep-EEG-Abstractor-HDropout' train_generator = FixedLenGenerator(batch_size=batch_size, duration=instance_duration, overlap=instance_overlap, sampling_rate=sampling_rate, is_train=True) test_generator = FixedLenGenerator(batch_size=8, duration=instance_duration, overlap=instance_overlap, sampling_rate=sampling_rate, is_train=False) params = {'task': task, 'data_mode': data_mode, 'main_res_dir': cv_results_dir, 'model_name': model_name, 'epochs': epochs, 'train_generator': train_generator, 'test_generator': test_generator, 't': t, 'k': k, 'channel_drop': True} validator = CrossValidator(**params) dataloader = DataLoader(data_dir, task, data_mode, sampling_rate, instance_duration, instance_overlap) data, labels = dataloader.load_data() input_shape = (sampling_rate * instance_duration, n_channels) model_obj = DeepEEGAbstractor(input_shape, model_name=model_name, spatial_dropout_rate=0.2, dropout_rate=0.5) scores = validator.do_cv(model_obj, data, labels) # + id="J48vIT0nrn1z" colab_type="code" cellView="form" colab={} #@title ## DeepEEGAbstractor - InputDropout model_name = 'Deep-EEG-Abstractor-InputDropout' train_generator = FixedLenGenerator(batch_size=batch_size, duration=instance_duration, overlap=instance_overlap, sampling_rate=sampling_rate, is_train=True) test_generator = FixedLenGenerator(batch_size=8, duration=instance_duration, overlap=instance_overlap, sampling_rate=sampling_rate, is_train=False) params = {'task': task, 'data_mode': data_mode, 'main_res_dir': cv_results_dir, 'model_name': model_name, 'epochs': epochs, 'train_generator': train_generator, 'test_generator': test_generator, 't': t, 'k': k, 'channel_drop': True} validator = CrossValidator(**params) dataloader = DataLoader(data_dir, task, data_mode, sampling_rate, instance_duration, instance_overlap) data, labels = dataloader.load_data() input_shape = (sampling_rate * instance_duration, n_channels) model_obj = DeepEEGAbstractor(input_shape, model_name=model_name, input_dropout=True) scores = validator.do_cv(model_obj, data, labels)
st_dfb_tests_8s_hmdd.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Features/Attributes # # 1. Rainfall # 2. Temperature # 3. Vegetation # 4. Potential evapotranspiration # 5. Length of growing period as a function of rainfall. # 6. Soil storage # 7. Soil scape # 8. Soil type # 9. Current season # 10. Companion crops # 11. Time for plant to grow # # Source: [How to determine the kinds of crops suitable to different types of soil? - ResearchGate](https://www.researchgate.net/post/How_to_determine_the_kinds_of_crops_suitable_to_different_types_of_soil) # ## Classes/Labels/Crops # # CEREALS # # 1. Rice # 2. Jowar (Cholam) # 3. Bajra (Cumbu) # 4. Ragi # # # PULSES # # 9. Bengalgram # 10. Redgram # # Source: [Season and Crop Report of Tamil Nadu](http://www.tn.gov.in/crop/AreaProduction.htm) # # which gives us 6 classes. # + import numpy as np import pandas as pd from sklearn.datasets import make_classification # - X, y = make_classification(n_samples=(24*60*60*7), n_features=11, n_classes=6,n_informative=5, random_state=42) pd.Series(y).value_counts() X.shape df = pd.DataFrame(X) df['class'] = y df.head() df.shape df.to_csv('kaala-init.csv', header=None, index=False) # ## Building the model. # helper tools from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score # ## Applying PCA from sklearn.decomposition import PCA pca = PCA(n_components=5) pca.fit(X) X_dash = pca.transform(X) X_train, X_test, y_train, y_test = train_test_split(X_dash, y, test_size=0.2, random_state=69) from sklearn.neighbors import KNeighborsClassifier model = KNeighborsClassifier(n_neighbors = 9) model.fit(X_train, y_train) y_pred = model.predict(X_test) print (accuracy_score(y_test, y_pred)) seed = np.random.randint(0, 1000) seed X_test[seed] print(model.predict_proba(X_test[seed].reshape(1, -1))) from sklearn.neighbors import KNeighborsClassifier model = KNeighborsClassifier(n_neighbors = 30) model.fit(X_train, y_train) y_pred = model.predict(X_test) print (accuracy_score(y_test, y_pred)) print(model.predict_proba(X_test[seed].reshape(1, -1))) # ## Now testing with random sample from the dataframe df.iloc[[seed]] # selects random observation from the df pca.transform(df.iloc[[seed], :-1]) # passing only the features of random observation to the PCA to reduce it to 5 componenets print(model.predict_proba(pca.transform(df.iloc[[seed], :-1])))
kaala-mark2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # FileManager # # The `FileManager` widget handles the loading and parsing of graphs from files. # + import ipywidgets as W import pandas as pd import traitlets as T from ipyradiant import FileManager, PathLoader, UpLoader from ipyradiant.loader.util import SUFFIX_FORMAT_MAP # - FileManager() # ## Loaders # ## UpLoader # # The default `UpLoader` wraps up the # [FileUpload](https://ipywidgets.readthedocs.io/en/latest/examples/Widget%20List.html#File-Upload) # widget, and limits the options to the [formats](#Formats) which `rdflib` (and plugins) # can parse. # ## PathLoader # # The `PathLoader` offers all of the parseable files in a given path, in this case the # [example data](./data/README.md). FileManager(loader=PathLoader(path="data")) # ## Formats # The `FileManager` accepts a number of # [formats](https://rdflib.readthedocs.io/en/stable/intro_to_parsing.html): columns = ["file extension", "format"] pd.DataFrame(SUFFIX_FORMAT_MAP.items(), columns=columns).sort_values( columns[::-1] ).set_index(["format"]).stack().apply(pd.Series)
examples/FileManager.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.6 64-bit ('python@3.9') # name: python3 # --- # # Approximation simulations [INCOMPLETE] # # One of the limitations of creating sufficient statistics via batch updating rather than finding the statistics all at once is that they are approximate due to rounding errors. # # How serious of a problem is this? # # Let's consider this problem through two sets of simulations: # # 1. How does the approxiation error change as the number of batches increases? # # 2. How does the approximation error change as the variance of the samples increaes? # # ## Many batches # ## High variance samples
examples/approximation-simulations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # # Check `GDS` R stack # ## Library imports library(arm) library(feather) library(geojsonio) library(ggmap) library(gstat) library(hexbin) library(igraph) library(kableExtra) library(knitr) library(lidR) library(lme4) library(mapdata) library(maptools) library(mapview) library(ncdf4) library(nlme) library(plyr) library(proj4) library(RColorBrewer) library(RandomFields) library(RNetCDF) library(randomForest) library(raster) library(RCurl) library(reshape2) library(rgdal) library(rgeos) library(rmarkdown) library(RODBC) library(RSQLite) library(sf) library(shiny) library(sp) library(spacetime) library(spatstat) library(spdep) library(splancs) # Tidyverse library(ggplot2) library(tibble) library(tidyr) library(readr) library(purrr) library(stringr) library(forcats) library(tmap) library(TraMineR) library(tufte) # ## Test db = st_read('https://ndownloader.figshare.com/files/20232174') plot(db$geom) # + data(World, metro) metro$growth <- (metro$pop2020 - metro$pop2010) / (metro$pop2010 * 10) * 100 mapWorld <- tm_shape(World) + tm_polygons("income_grp", palette="-Blues", contrast=.7, id="name", title="Income group") + tm_shape(metro) + tm_bubbles("pop2010", col = "growth", border.col = "black", border.alpha = .5, style="fixed", breaks=c(-Inf, seq(0, 6, by=2), Inf), palette="-RdYlBu", contrast=1, title.size="Metro population", title.col="Growth rate (%)", id="name") + tm_style_gray() + tm_format_World() mapWorld
check_r_stack.ipynb