repo_name
stringlengths
6
77
path
stringlengths
8
215
license
stringclasses
15 values
content
stringlengths
335
154k
massimo-nocentini/PhD
notebooks/binomial-transform-applied-to-fibonacci-numbers.ipynb
apache-2.0
import sympy from sympy import * from sympy.abc import x, n, z, t, k init_printing() # for nice printing, a-la' TeX %run "sums.py" # duplicated code, put it into "sums.py" def expand_sum_in_eq(eq_term): lhs, rhs = eq_term.lhs, eq_term.rhs return Eq(lhs, expand_Sum(rhs)) f = IndexedBase('f') fibs = {f[i]:fibonacci(i) for i in range(100)} transforming_matrix = Matrix([ [1,0,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0,0], [1,1,1,0,0,0,0,0,0], [-1,0,1,1,0,0,0,0,0], [1,0,0,1,1,0,0,0,0], [-1,0,0,0,1,1,0,0,0], [1,0,0,0,0,1,1,0,0], [-1,0,0,0,0,0,1,1,0], [1,0,0,0,0,0,0,1,1]]) transforming_matrix transforming_matrix**(-1) def gf(t): return t + 1/(1+t) gf(t).series(n=10) def h(t): return t*(1+2*t+t**2)/(1+t+t**2) (gf(t)*h(t)**2).series(n=10) pascal_matrix = Matrix([ [1,0,0,0,0,0,0,0,0], [1,1,0,0,0,0,0,0,0], [1,2,1,0,0,0,0,0,0], [1,3,3,1,0,0,0,0,0], [1,4,6,4,1,0,0,0,0], [1,5,10,10,5,1,0,0,0], [1,6,15,20,15,6,1,0,0], [1,7,21,35,35,21,7,1,0], [1,8,28,56,70,56,28,8,1]]) pascal_matrix catalan_matrix = Matrix([ [1,0,0,0,0,0,0,0,0], [1,1,0,0,0,0,0,0,0], [2,2,1,0,0,0,0,0,0], [5,5,3,1,0,0,0,0,0], [14,14,9,4,1,0,0,0,0], [42,42,28,14,5,1,0,0,0], [132,132,90,48,20,6,1,0,0], [429,429,297,165,75,27,7,1,0], [1430,1430,1001,572,275,110,35,8,1]]) catalan_matrix catalan_inverse_matrix = Matrix([ [1,0,0,0,0,0,0,0,0], [1,0,0,0,0,0,0,0,0], [0,1,0,0,0,0,0,0,0], [0,-1,1,0,0,0,0,0,0], [0,0,-2,1,0,0,0,0,0], [0,0,1,-3,1,0,0,0,0], [0,0,0,3,-4,1,0,0,0], [0,0,0,-1,6,-5,1,0,0], [0,0,0,0,-4,10,-6,1,0]]) catalan_inverse_matrix odd_transformed_matrix = pascal_matrix * transforming_matrix odd_transformed_matrix transforming_matrix * pascal_matrix (catalan_matrix**(-1) )*odd_transformed_matrix catalan_inverse_matrix * odd_transformed_matrix fib_matrix = Matrix([fibonacci(i) for i in range(9)]) fib_matrix_sym = Matrix([f[i] for i in range(9)]) fib_matrix, fib_matrix_sym a = Wild('a') std_prod = transforming_matrix * fib_matrix sym_prod = transforming_matrix * fib_matrix_sym std_prod, sym_prod, sym_prod.subs({f[0]:0}), sym_prod.subs({f[0]:0}).replace(f[a]+f[a+1],f[a+2]) rhs=pascal_matrix * transforming_matrix * fib_matrix rhs_sym = pascal_matrix * sym_prod rhs, rhs_sym a_range = range(1,18,2) lhs=Matrix([fibonacci(i) for i in a_range]) - Matrix([1 for i in range(9)]) lhs_sym=Matrix([f[i] for i in a_range]) - Matrix([1 for i in range(9)]) lhs, lhs_sym Eq(lhs,rhs) eq_sym = Eq(lhs_sym, rhs_sym) eq_sym f_minus1_vector = Matrix([f[-1] for i in range(9)]) one_plus_eq = eq_sym#.subs(-Integer(1),f[-1]) one_plus_eq = Eq(one_plus_eq.lhs + f_minus1_vector, one_plus_eq.rhs + f_minus1_vector) one_plus_eq = Eq(one_plus_eq.lhs.subs(f[-1],1), one_plus_eq.rhs) Eq(one_plus_eq.lhs, one_plus_eq.rhs.subs(fibs)) fib0_term = f[0] eq_sym.subs(fib0_term, fibs[fib0_term]) """ Explanation: An application of Binomial transform In this notebook we apply the Binomial transform to the Fibonacci numbers in order to substain the main notebook where these numbers are studied in depth. End of explanation """ gen_odd_fibs = Eq(f[2*n+1]-1, Sum(binomial(n+1, k+1)*f[k], (k,1,n))) Eq(gen_odd_fibs, Sum(binomial(n+1, n-k)*f[k], (k,1,n))) expand_sum_in_eq(gen_odd_fibs.subs(n, 8)) eq_sym.subs(fibs) eq_17 = Eq(f[17],f[-1] + rhs_sym[-1]) eq_18_shift = Eq(f[n], f[n-18]+8*f[n-17]+36*f[n-16]+84*f[n-15]+126*f[n-14]+126*f[n-13]+84*f[n-12]+36*f[n-11]+9*f[n-10]+f[n-9]) eq_17, eq_18_shift [eq_18_shift.subs(n,i).lhs.subs(fibs) - eq_18_shift.subs(n,i).rhs.subs(fibs) for i in range(18,32)] """ Explanation: Since coefficient in the triangle on the rhs are a part of Pascal triangle, namely A104712, the following is a generalization: $$ f_{2n+1} - 1 = \sum_{k=1}^{n}{{{n+1}\choose{k+1}}f_{k}} $$ End of explanation """ from itertools import accumulate to_accumulate = rhs_sym + ones(9,1)*f[-1] even_rhs = Matrix(list(accumulate(to_accumulate, lambda folded, current_row: Add(folded, current_row) ))) even_lhs = Matrix([f[i] for i in range(2,19,2)]) even_fibs_matrix_eq = Eq(even_lhs, even_rhs) even_fibs_matrix_eq even_transformed_matrix = Matrix([ [1,0,0,0,0,0,0,0,0], [2,1,0,0,0,0,0,0,0], [4,4,1,0,0,0,0,0,0], [7,10,5,1,0,0,0,0,0], [11,20,15,6,1,0,0,0,0], [16,35,35,21,7,1,0,0,0], [22,56,70,56,28,8,1,0,0], [29,84,126,126,84,36,9,1,0], [37,120,210,252,210,120,45,10,1]]) even_transformed_matrix even_transforming_matrix = (pascal_matrix**(-1))*even_transformed_matrix even_transforming_matrix (catalan_matrix**(-1) )*even_transformed_matrix catalan_inverse_matrix * even_transformed_matrix even_transforming_matrix * fib_matrix_sym even_vector_eq_sym = Eq(even_lhs - Matrix(list(range(1,10))), pascal_matrix * even_transforming_matrix * fib_matrix_sym) even_vector_eq_sym even_vector_eq_sym.subs(fib0_term, fibs[fib0_term]) """ Explanation: again, fibonacci numbers, A000045. End of explanation """ gen_even_fibs = Eq(f[2*n]-n, Sum(binomial(n+1, k+2)*f[k], (k,1,n-1))) Eq(gen_even_fibs, Sum(binomial(n+1, n-k-1)*f[k], (k,1,n-1))) expand_sum_in_eq(gen_even_fibs.subs(n, 9)) even_fibs_matrix_eq_minus1_appear = even_fibs_matrix_eq.subs(fibs) Eq(even_fibs_matrix_eq.lhs, even_fibs_matrix_eq_minus1_appear, evaluate=False) """ Explanation: Since coefficient in the triangle on the rhs are a part of Pascal triangle, namely A104713, the following is a generalization: $$ f_{2n} - n = \sum_{k=1}^{n-1}{{{n+1}\choose{k+2}}f_{k}} $$ End of explanation """ list(accumulate([fibonacci(2*i+1)-1 for i in range(21)])) def n_gf(t): return t/(1-t)**2 n_gf(t).series(n=20) def odd_fib_gf(t): return t**2/((1-t)**2*(1-3*t+t**2)) odd_fib_gf(t).series(n=20) composite_odd_fibs_gf = n_gf(t)+odd_fib_gf(t) composite_odd_fibs_gf.factor(), composite_odd_fibs_gf.series(n=20) def odd_integers_gf(t): return ((n_gf(t)+n_gf(-t))/2).simplify() odd_integers_gf(t).series(n=20) # here is the error: we should use the generating function of F(2n+1) instead of F(n) as done here! def fib_gf(t): return t/(1-t-t**2) fib_gf(odd_integers_gf(t)).series(n=20) def even_fibs_gf(t): return n_gf(t) + fib_gf(t)/(1-t) even_fibs_gf(t).series(n=10) even_fibs_matrix_eq_minus1_appear.subs(f[-1],1) eq_17 = Eq(f[n], 8*f[n-17]+29*f[n-16]+84*f[n-15]+126*f[n-14]+126*f[n-13]+84*f[n-12]+36*f[n-11]+9*f[n-10]+f[n-9]) eq_17 [eq_17.subs(n,i).lhs.subs(fibs) - eq_17.subs(n,i).rhs.subs(fibs) for i in range(17,31)] """ Explanation: summands on the rhs form a known sequence A054452. End of explanation """
ocefpaf/intro_python_notebooks
01-Jupyter-Basics.ipynb
mit
print("Olá alunos") """ Explanation: Aula 01 - IPython (e o notebook) Objetivos Introdução ao IPython e Jupyter Notebook Navegação básica Comandos "mágicos" O que é IPython (e ~~IPython~~ Jupyter Notebook)? Um ambiente para interagir com código O notebook é uma ferramenta para literate computing Combina narrativa, código e resultados IPython no terminal O terminal básico do IPython digite: ipython ```ipython Python 3.4.1 (default, May 23 2014, 17:48:28) [GCC] Type "copyright", "credits" or "license" for more information. IPython 3.0.0-dev -- An enhanced Interactive Python. ? -> Introduction and overview of IPython's features. %quickref -> Quick reference. help -> Python's own help system. object? -> Details about 'object', use 'object??' for extra details. ``` Rodar código (Acompanhem pelo terminal) End of explanation """ float? """ Explanation: Pedir ajuda End of explanation """ import seawater as sw sw.f?? *int*? """ Explanation: Ajuda com ?? End of explanation """ enumerate() """ Explanation: Ajuda com tab End of explanation """ %quickref """ Explanation: IPython reference card: End of explanation """ import seawater as sw sw. """ Explanation: Tab completion É uma forma conveniente, especialmente para atributos, de explorar a estrutura dos objetos que você está lidando. End of explanation """ 2+10 _ + 10 """ Explanation: Interactiviade: input, output, history End of explanation """ 10+20; _ """ Explanation: Output Você pode suprimir a saída e o cache usando ; no final do comando: End of explanation """ Out _25 == Out[25] """ Explanation: Histórico do output A saída é guardada em _N e Out[N]: End of explanation """ print(u'último: {}'.format(_)) print(u'penúltimo: {}'.format(__)) print(u'antepenúltimo: {}'.format(___)) """ Explanation: Os últimos 3 podem ser acessados rapidamente com: End of explanation """ In[11] print(u'último: {}'.format(_i)) print(u'penúltimo: {}'.format(_ii)) print(u'antepenúltimo: {}'.format(_iii)) %history """ Explanation: Histórico do input End of explanation """ !pwd files = !ls print(u"Conteúdo do diretório:") print(files) !echo $files !echo {files[0].upper()} """ Explanation: Acesando o sistema operacional Nota: os comandos abaixo funcionam em Linux e Macs, mas não em Windows. End of explanation """ from IPython.nbformat import current with open('01-Jupyter-Basics.ipynb') as f: nb = current.read(f, 'json') nb.worksheets[0].cells[0] """ Explanation: Notebook Interface Dashboard Notebook e células Menu Toolbar Típos de células Código Markdown Raw text Heading $\LaTeX$ equations $$ \frac{D\mathbf{v}}{Dt} = -\frac{1}{\rho}\nabla p - 2\Omega \times \mathbf{v} + \mathbf{g} + \mathbf{R_r} $$ Atalhos de teclado Ctrl-Enter para rodar Shift-Enter para rodar e ir para a próxima Os outros atalhos começam com: Ctrl-m ? Examplo Ctrl-m h Trabalhando com Notebooks IPython Notebooks são arquivos .ipynb Tudo dentro do arquivo são dados JSONs O servidor dos Notebooks abre o que está no diretório Para começar digite: ipython notebook End of explanation """ %magic %lsmagic """ Explanation: Funções mágicas As magic functions são um conjunto de comandos chamados através do um ou dois sinas de %. Alguns ainda implementam opções extras através de argumento --, parêntesis ou vírgulas. As duas razões por trás das funções magic são: Fornecer um namespace ortogonal para controlar o próprio IPython e expor funcionalidades do sistema. Para fornecer um mode de comandos que requer menos digitação. End of explanation """ %timeit range(10) %%timeit range(10) range(100) """ Explanation: magics de linha e de célula: End of explanation """ for i in range(5): size = i*100 print('size: {}'.format(size)) %timeit range(size) """ Explanation: magics de linha podem ser usadas em loops: End of explanation """ %%bash echo "My shell is:" $SHELL echo "User:" $USER """ Explanation: Magics podem ativar outra sintaxe que não necessariamente é python: End of explanation """ %%file ctd.dat S,T,P 35.5,12.5,0 34.8,10.6,100 33.45,9,1000 !cat ctd.dat from pandas import read_csv ctd = read_csv('ctd.dat', index_col='P') ctd 1/0 %debug %whos """ Explanation: Outra magic útil: criar um arquivo diretamente do notebook: End of explanation """ %matplotlib inline import matplotlib.pyplot as plt plt.plot(ctd['S'], ctd['T'], 'ro') """ Explanation: Plotando no notebook End of explanation """
mmckerns/tutmom
solutions.ipynb
bsd-3-clause
import cvxopt as cvx from cvxopt import solvers as cvx_solvers Q = cvx.matrix([[0.,0.],[0.,0.]]) p = cvx.matrix([-1., 4.]) G = cvx.matrix([[-3., 1., 0.],[1., 2., -1.]]) h = cvx.matrix([6., 4., 3.]) sol = cvx_solvers.qp(Q, p, G, h) print(sol['x']) """ Explanation: Solutions to exercises EXERCISE: Solve the constrained programming problem by any of the means above. Minimize: f = -1x[0] + 4x[1] Subject to: <br> -3x[0] + 1x[1] <= 6 <br> 1x[0] + 2x[1] <= 4 <br> x[1] >= -3 <br> where: -inf <= x[0] <= inf End of explanation """ import scipy.optimize as opt import mystic.models result = opt.minimize(mystic.models.zimmermann, [10., 1.], method='powell') print(result.x) """ Explanation: EXERCISE: Use any of the solvers we've seen thus far to find the minimum of the zimmermann function (i.e. use mystic.models.zimmermann as the objective). Use the bounds suggested below, if your choice of solver allows it. End of explanation """ import scipy.optimize as opt import mystic.models result = opt.minimize(mystic.models.fosc3d, [-5., 0.5], method='powell') print(result.x) """ Explanation: EXERCISE: Do the same for the fosc3d function found at mystic.models.fosc3d, using the bounds suggested by the documentation, if your chosen solver accepts bounds or constraints. End of explanation """ import mystic import mystic.models result = mystic.solvers.fmin_powell(mystic.models.peaks, [0., -2.], bounds=[(-5.,5.)]*2) print(result) """ Explanation: EXERCISE: Use mystic to find the minimum for the peaks test function, with the bound specified by the mystic.models.peaks documentation. End of explanation """ import numpy as np import scipy.stats as stats from mystic.solvers import fmin_powell from mystic import reduced # Define the function to fit. def function(coeffs, x): a,b,f,phi = coeffs return a * np.exp(-b * np.sin(f * x + phi)) # Create a noisy data set around the actual parameters true_params = [3, 2, 1, np.pi/4] print("target parameters: {}".format(true_params)) x = np.linspace(0, 2*np.pi, 25) exact = function(true_params, x) noisy = exact + 0.3*stats.norm.rvs(size=len(x)) # Define an objective that fits against the noisy data @reduced(lambda x,y: abs(x)+abs(y)) def objective(coeffs, x, y): return function(coeffs, x) - y # Use curve_fit to estimate the function parameters from the noisy data. initial_guess = [1,1,1,1] args = (x, noisy) estimated_params = fmin_powell(objective, initial_guess, args=args) print("solved parameters: {}".format(estimated_params)) """ Explanation: EXERCISE: Use mystic to do a fit to the noisy data in the scipy.optimize.curve_fit example (the least squares fit). End of explanation """ # Differential Evolution solver from mystic.solvers import DifferentialEvolutionSolver2 # Chebyshev polynomial and cost function from mystic.models.poly import chebyshev8, chebyshev8cost from mystic.models.poly import chebyshev8coeffs # tools from mystic.termination import VTR, CollapseAt, Or from mystic.strategy import Best1Exp from mystic.monitors import VerboseMonitor from mystic.tools import random_seed from mystic.math import poly1d import numpy as np if __name__ == '__main__': print("Differential Evolution") print("======================") ndim = 9 random_seed(123) # configure monitor stepmon = VerboseMonitor(50,50) # build a constraints function def constraints(x): x[-1] = 1. return np.round(x) stop = Or(VTR(0.0001), CollapseAt(0.0, generations=2)) # use DE to solve 8th-order Chebyshev coefficients npop = 10*ndim solver = DifferentialEvolutionSolver2(ndim,npop) solver.SetRandomInitialPoints(min=[-100]*ndim, max=[100]*ndim) solver.SetGenerationMonitor(stepmon) solver.SetConstraints(constraints) solver.enable_signal_handler() solver.Solve(chebyshev8cost, termination=stop, strategy=Best1Exp, \ CrossProbability=1.0, ScalingFactor=0.9) solution = solver.Solution() # use monitor to retrieve results information iterations = len(stepmon) cost = stepmon.y[-1] print("Generation %d has best Chi-Squared: %f" % (iterations, cost)) # use pretty print for polynomials print(poly1d(solution)) # compare solution with actual 8th-order Chebyshev coefficients print("\nActual Coefficients:\n %s\n" % poly1d(chebyshev8coeffs)) """ Explanation: EXERCISE: Solve the chebyshev8.cost example exactly, by applying the knowledge that the last term in the chebyshev polynomial will always be be one. Use numpy.round or mystic.constraints.integers or to constrain solutions to the set of integers. Does using mystic.suppressed to supress small numbers accelerate the solution? End of explanation """ "Pressure Vessel Design" def objective(x): x0,x1,x2,x3 = x return 0.6224*x0*x2*x3 + 1.7781*x1*x2**2 + 3.1661*x0**2*x3 + 19.84*x0**2*x2 bounds = [(0,1e6)]*4 # with penalty='penalty' applied, solution is: xs = [0.72759093, 0.35964857, 37.69901188, 240.0] ys = 5804.3762083 from mystic.constraints import as_constraint from mystic.penalty import quadratic_inequality def penalty1(x): # <= 0.0 return -x[0] + 0.0193*x[2] def penalty2(x): # <= 0.0 return -x[1] + 0.00954*x[2] def penalty3(x): # <= 0.0 from math import pi return -pi*x[2]**2*x[3] - (4/3.)*pi*x[2]**3 + 1296000.0 def penalty4(x): # <= 0.0 return x[3] - 240.0 @quadratic_inequality(penalty1, k=1e12) @quadratic_inequality(penalty2, k=1e12) @quadratic_inequality(penalty3, k=1e12) @quadratic_inequality(penalty4, k=1e12) def penalty(x): return 0.0 if __name__ == '__main__': from mystic.solvers import diffev2 from mystic.math import almostEqual result = diffev2(objective, x0=bounds, bounds=bounds, penalty=penalty, npop=40, gtol=500, disp=True, full_output=True) print(result[0]) """ Explanation: EXERCISE: Replace the symbolic constraints in the following "Pressure Vessel Design" code with explicit penalty functions (i.e. use a compound penalty built with mystic.penalty.quadratic_inequality). End of explanation """ def objective(x): x0,x1 = x return 2*x0**2 + x1**2 + x0*x1 + x0 + x1 bounds = [(0.0, None),(0.0, None)] # with penalty='penalty' applied, solution is: xs = [0.25, 0.75] ys = 1.875 from mystic.math.measures import normalize def constraint(x): # impose exactly return normalize(x, 1.0) if __name__ == '__main__': from mystic.solvers import diffev2, fmin_powell result = diffev2(objective, x0=bounds, bounds=bounds, npop=40, constraints=constraint, disp=False, full_output=True) print(result[0]) """ Explanation: EXERCISE: Solve the cvxopt "qp" example with mystic. Use symbolic constaints, penalty functions, or constraints operators. If you get it quickly, do all three methods. End of explanation """ from mystic.termination import VTR, ChangeOverGeneration, And, Or stop = Or(And(VTR(), ChangeOverGeneration()), VTR(1e-8)) from mystic.models import rosen from mystic.monitors import VerboseMonitor from mystic.solvers import DifferentialEvolutionSolver2 from pathos.pools import ThreadPool if __name__ == '__main__': solver = DifferentialEvolutionSolver2(3,40) solver.SetRandomInitialPoints([-10,-10,-10],[10,10,10]) solver.SetGenerationMonitor(VerboseMonitor(10)) solver.SetMapper(ThreadPool().map) #NOTE: evaluation of objective in parallel solver.SetTermination(stop) solver.SetObjective(rosen) solver.SetStrictRanges([-10,-10,-10],[10,10,10]) solver.SetEvaluationLimits(generations=600) solver.Solve() print(solver.bestSolution) """ Explanation: EXERCISE: Convert one of our previous mystic examples to use parallel computing. Note that if the solver has a SetMapper method, it can take a parallel map. End of explanation """
edeno/Jadhav-2016-Data-Analysis
notebooks/2017_06_19_Test_Spectral_Multiple_Sessions.ipynb
gpl-3.0
import numpy as np import matplotlib.pyplot as plt import seaborn as sns import pandas as pd import xarray as xr from src.analysis import (decode_ripple_clusterless, detect_epoch_ripples, ripple_triggered_connectivity, connectivity_by_ripple_type) from src.data_processing import (get_LFP_dataframe, make_tetrode_dataframe, save_ripple_info, save_tetrode_info) from src.parameters import (ANIMALS, SAMPLING_FREQUENCY, MULTITAPER_PARAMETERS, FREQUENCY_BANDS, RIPPLE_COVARIATES) epoch_keys = [('HPa', 6, 2), ('HPa', 6, 4)] def estimate_ripple_coherence(epoch_key): ripple_times = detect_epoch_ripples( epoch_key, ANIMALS, sampling_frequency=SAMPLING_FREQUENCY) tetrode_info = make_tetrode_dataframe(ANIMALS)[epoch_key] tetrode_info = tetrode_info[ ~tetrode_info.descrip.str.endswith('Ref').fillna(False)] lfps = {tetrode_key: get_LFP_dataframe(tetrode_key, ANIMALS) for tetrode_key in tetrode_info.index} # Compare all ripples for parameters_name, parameters in MULTITAPER_PARAMETERS.items(): ripple_triggered_connectivity( lfps, epoch_key, tetrode_info, ripple_times, parameters, FREQUENCY_BANDS, multitaper_parameter_name=parameters_name, group_name='all_ripples') # save_tetrode_info(epoch_key, tetrode_info) """ Explanation: Purpose The purpose of this notebook is to work out the code for how to combine and average tetrode pairs over brain areas over multiple sessions End of explanation """ for epoch_key in epoch_keys: estimate_ripple_coherence(epoch_key) """ Explanation: Make sure we can get the ripple-triggered connectivity for two epochs End of explanation """ from glob import glob def read_netcdfs(files, dim, transform_func=None, group=None): def process_one_path(path): # use a context manager, to ensure the file gets closed after use with xr.open_dataset(path, group=group) as ds: # transform_func should do some sort of selection or # aggregation if transform_func is not None: ds = transform_func(ds) # load all data from the transformed dataset, to ensure we can # use it after closing each original file ds.load() return ds paths = sorted(glob(files)) datasets = [process_one_path(p) for p in paths] return xr.concat(datasets, dim) combined = read_netcdfs('../Processed-Data/*.nc', dim='session', group='4Hz_Resolution/all_ripples/coherence', transform_func=None) """ Explanation: Now figure out how to combine the epochs End of explanation """ tetrode_info = pd.concat(make_tetrode_dataframe(ANIMALS).values()) tetrode_info = tetrode_info[ ~tetrode_info.descrip.str.endswith('Ref').fillna(False)] tetrode_info = tetrode_info.loc[ (tetrode_info.animal=='HPa') & (tetrode_info.day == 6) & (tetrode_info.epoch.isin((2, 4)))] """ Explanation: Use tetrode info to index into the epoch arrays to select out the relevant brain areas End of explanation """ coh = ( combined .sel( tetrode1=tetrode_info.query('area == "CA1"').tetrode_id.values, tetrode2=tetrode_info.query('area == "PFC"').tetrode_id.values) .coherence_magnitude .mean(dim=['tetrode1', 'tetrode2', 'session'])) fig, axes = plt.subplots(2, 1, figsize=(12, 9)) coh.sel(frequency=slice(0, 30)).plot(x='time', y='frequency', ax=axes[0]); coh.sel(frequency=slice(30, 125)).plot(x='time', y='frequency', ax=axes[1]); coh = ( combined .sel( tetrode1=tetrode_info.query('area == "iCA1"').tetrode_id.values, tetrode2=tetrode_info.query('area == "PFC"').tetrode_id.values) .coherence_magnitude .mean(dim=['tetrode1', 'tetrode2', 'session'])) fig, axes = plt.subplots(2, 1, figsize=(12, 9)) coh.sel(frequency=slice(0, 30)).plot(x='time', y='frequency', ax=axes[0]); coh.sel(frequency=slice(30, 125)).plot(x='time', y='frequency', ax=axes[1]); coh_diff = ((combined - combined.isel(time=0)) .sel( tetrode1=tetrode_info.query('area == "iCA1"').tetrode_id.values, tetrode2=tetrode_info.query('area == "PFC"').tetrode_id.values) .coherence_magnitude .mean(dim=['tetrode1', 'tetrode2', 'session'])) fig, axes = plt.subplots(2, 1, figsize=(12, 9)) coh_diff.sel(frequency=slice(0, 30)).plot(x='time', y='frequency', ax=axes[0]); coh_diff.sel(frequency=slice(30, 125)).plot(x='time', y='frequency', ax=axes[1]); """ Explanation: Show that the indexing works by getting all the CA1-PFC tetrode pairs and averaging the coherence over the two epochs to get the average CA1-PFC coherence. In this case we show two plots of two different frequency bands to show the flexibility of using the xarray package. End of explanation """ from functools import partial def select_brain_areas(dataset, area1='', area2=''): if 'tetrode1' in dataset.coords: return dataset.sel( tetrode1=dataset.tetrode1[dataset.brain_area1==area1], tetrode2=dataset.tetrode2[dataset.brain_area2==area2] ) else: # The dataset is power return dataset.sel( tetrode=dataset.tetrode[dataset.brain_area==area1], ) CA1_PFC = partial(select_brain_areas, area1='CA1', area2='PFC') combined = read_netcdfs('../Processed-Data/*.nc', dim='session', group='4Hz_Resolution/all_ripples/coherence', transform_func=CA1_PFC) combined print(combined.brain_area1) print(combined.brain_area2) coh = combined.mean(['tetrode1', 'tetrode2', 'session']).coherence_magnitude fig, axes = plt.subplots(2, 1, figsize=(12, 9)) coh.sel(frequency=slice(0, 30)).plot(x='time', y='frequency', ax=axes[0]); coh.sel(frequency=slice(30, 125)).plot(x='time', y='frequency', ax=axes[1]); """ Explanation: Alternatively we can use the transform function in the read_netcdf function to select brain areas End of explanation """
robblack007/clase-dinamica-robot
Practicas/.ipynb_checkpoints/Practica 4 - Movimientos de cuerpos rigidos-checkpoint.ipynb
mit
from math import pi, sin, cos from numpy import matrix from matplotlib.pyplot import figure, plot, style style.use("ggplot") %matplotlib inline τ = 2*pi """ Explanation: Matrices de Transformación Las matrices de rotación y traslación nos sirven para transformar una coordenada entre diferentes sistemas coordenados, pero tambien lo podemos ver, como la transformación que le hace cada eslabon a nuestro punto de ubicación. Empecemos con la rotación: $$ R_z = \begin{pmatrix} \cos{\theta} & -\sin{\theta} & 0 & 0 \ \sin{\theta} & \cos{\theta} & 0 & 0 \ 0 & 0 & 1 & 0 \ 0 & 0 & 0 & 1 \end{pmatrix} $$ La matriz que escribimos, girará nuestro de eje de coordenadas con respecto al eje $z$ un angulo $\theta$. Por cierto, las funciones trigonometricas toman como argumento el angulo en radianes, por lo que tomaré la convencion de llamar a $\tau = 2 \pi$, para definir los angulos como fracciones de la vuelta completa. End of explanation """ pos_1 = matrix([[1], [0], [0], [1]]) """ Explanation: Para empezar definiremos nuestra posición de inicio, como la coordenada: $$ P_1 = \begin{pmatrix} 1 \ 0 \ 0 \end{pmatrix} $$ End of explanation """ f1 = figure(figsize=(8, 8)) a1 = f1.gca() a1.plot([0, pos_1[0]], [0, pos_1[1]], "-o") a1.set_xlim(-0.1, 1.1) a1.set_ylim(-0.1, 1.1); """ Explanation: Agregamos un $1$ al final, debido a que las matrices de transformación homogenea son de dimensión $\Re^{4\times 4}$ y de otra manera no concordarian las dimensiones. Ahora podemos graficar en el plano $XY$ de la siguiente manera: End of explanation """ rot_1 = matrix([[cos(τ/12), -sin(τ/12), 0, 0], [sin(τ/12), cos(τ/12), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) rot_1*pos_1 """ Explanation: Podemos definir matrices de la siguiente manera, y ver que el resultado es lo que esperariamos si quisieramos rotar el vector unitario $\hat{i}$ , con $30^o$ es decir $\frac{\tau}{12}$. End of explanation """ def rotacion_z(θ): A = matrix([[cos(θ), -sin(θ), 0, 0], [sin(θ), cos(θ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) return A """ Explanation: Pero podemos hacer algo mejor, podemos definir una función que nos devuelva una matriz de rotación, dandole como argumento el angulo de rotación. End of explanation """ rot_2 = rotacion_z(τ/12) p = rot_2*pos_1 p f1 = figure(figsize=(8, 8)) a1 = f1.gca() a1.plot([0, p[0]], [0, p[1]], "-o") a1.set_xlim(-0.1, 1.1) a1.set_ylim(-0.1, 1.1); """ Explanation: Entonces, tendremos el mismo resultado, con un codigo mas limpio. End of explanation """
orbitfold/tardis
docs/notebooks/to_hdf.ipynb
bsd-3-clause
from tardis.io.config_reader import Configuration from tardis.model import Radial1DModel from tardis.simulation import Simulation # Must have the tardis_example folder in the working directory. config_fname = 'tardis_example/tardis_example.yml' tardis_config = Configuration.from_yaml(config_fname) model = Radial1DModel(tardis_config) simulation = Simulation(tardis_config) """ Explanation: Example to_hdf calls Initialize the simulation with the tardis_example.yml configuration file. End of explanation """ simulation.legacy_run_simulation(model, '/tmp/full_example.hdf', 'full', hdf_last_only=False) """ Explanation: Run the simulation while storing all its iterations to an HDF file. The first parameter is the path where the HDF file should be stored. The second parameter determines which properties will be stored. When its value is 'input', only Input plasma properties will be stored. The third parameter, hdf_last_only, if True will only store the last iteration of the simulation, otherwise every iteration will be stored. End of explanation """ import pandas as pd data = pd.HDFStore('/tmp/full_example.hdf') print data """ Explanation: Open the stored HDF file with pandas and print its structure. End of explanation """ print data['/simulation9/model/plasma/density'] """ Explanation: Access model.plasma.density of the 9th simulation, which is a one-dimensional array End of explanation """ print data['/simulation9/model/scalars']['t_inner'] """ Explanation: Scalars are stored in a scalars pandas.Series for every module. For example to access model.t_inner of the 9th iteration of the simulation, one would need to do the following. Note: Quantities are always stored as their SI values. End of explanation """ model.plasma.to_hdf('/tmp/plasma_output.hdf', path='parent') import pandas with pandas.HDFStore('/tmp/plasma_output.hdf') as data: print data """ Explanation: Breakdown of the various to_hdf methods Every module in TARDIS has its own to_hdf method responsible to store its own data to an HDF file. Plasma The following call will store every plasma property to /tmp/plasma_output.hdf under /parent/plasma End of explanation """ from tardis.plasma.properties.base import Input model.plasma.to_hdf('/tmp/plasma_input_output.hdf', collection=[Input]) import pandas with pandas.HDFStore('/tmp/plasma_input_output.hdf') as data: print data """ Explanation: Plasma's to_hdf method can also accept a collection parameter which can specify which types of plasma properties will be stored. For example if we wanted to only store Input plasma properties, we would do the following: End of explanation """ model.to_hdf('/tmp/model_output.hdf') """ Explanation: Model The following call will store properties of the Radial1DModel to /tmp/model_output.hdf under /model. Additionally, it will automatically call model.plasma.to_hdf, since plasma is also a property of the model. End of explanation """ simulation.runner.to_hdf('/tmp/runner_output.hdf') import pandas with pandas.HDFStore('/tmp/runner_output.hdf') as data: print data """ Explanation: MontecarloRunner The following call will store properties of the MontecarloRunner to /tmp/runner_output.hdf under /runner End of explanation """
gprMax/gprMax
tools/Jupyter_notebooks/example_Bscan_2D.ipynb
gpl-3.0
%%writefile ../../user_models/cylinder_Bscan_2D.in #title: B-scan from a metal cylinder buried in a dielectric half-space #domain: 0.240 0.210 0.002 #dx_dy_dz: 0.002 0.002 0.002 #time_window: 3e-9 #material: 6 0 1 0 half_space #waveform: ricker 1 1.5e9 my_ricker #hertzian_dipole: z 0.040 0.170 0 my_ricker #rx: 0.080 0.170 0 #src_steps: 0.002 0 0 #rx_steps: 0.002 0 0 #box: 0 0 0 0.240 0.170 0.002 half_space #cylinder: 0.120 0.080 0 0.120 0.080 0.002 0.010 pec """ Explanation: B-scan from a metal cylinder (2D) This example uses the same geometry as the previous (A-scan) example but this time a B-scan is created. A B-scan is composed of multiple traces (A-scans) recorded as the source and receiver are moved over the target, in this case the metal cylinder. The input needed to create the model is: my_cylinder_Bscan_2D.in End of explanation """ import os from gprMax.gprMax import api filename = os.path.join(os.pardir, os.pardir, 'user_models', 'cylinder_Bscan_2D.in') api(filename, n=60, geometry_only=False) """ Explanation: The differences between this input file and the one from the A-scan are the x coordinates of the source and receiver, and the commands needed to move the source and receiver. As before, the source and receiver are offset by 40mm from each other as before but they are now shifted to a starting position for the scan. The #src_steps command is used to move every source in the model by specified steps each time the model is run. Similarly, the #rx_steps command is used to move every receiver in the model by specified steps each time the model is run. Note, the same functionality can be achieved by using a block of Python code in the input file to move the source and receiver individually (for further details see the Python section of the User Guide). Run the model To run the model to create a B-scan you must pass an optional argument to specify the number of times the model should be run. In this case this is the number of A-scans (traces) that will comprise the B-scan. For a B-scan over a distance of 120mm with a step of 2mm that is 60 A-scans. You can now run the model using: python -m gprMax user_models/cylinder_Bscan_2D.in -n 60 End of explanation """ %run -m tools.outputfiles_merge user_models/cylinder_Bscan_2D """ Explanation: View the results Merge A-scans into B-scan You should have produced 60 output files, one for each A-scan, with names my_cylinder_Bscan_2D1.out, my_cylinder_Bscan_2D2.out etc... These can be combined into a single file using the command: python -m tools.outputfiles_merge user_models/cylinder_Bscan_2D End of explanation """ %matplotlib inline import os from tools.plot_Bscan import get_output_data, mpl_plot filename = os.path.join(os.pardir, os.pardir, 'user_models', 'cylinder_Bscan_2D_merged.out') rxnumber = 1 rxcomponent = 'Ez' outputdata, dt = get_output_data(filename, rxnumber, rxcomponent) plt = mpl_plot(outputdata, dt, rxnumber, rxcomponent) # Change from the default 'seismic' colormap #plt.set_cmap('gray') """ Explanation: You should see a combined output file cylinder_Bscan_2D_merged.out. The tool will ask you if you want to delete the original single A-scan output files or keep them. Plot the B-scan You can plot the B-scan using: python -m tools.plot_Bscan user_models/cylinder_Bscan_2D_merged.out Ez End of explanation """
dsquareindia/gensim
docs/notebooks/sklearn_wrapper.ipynb
lgpl-2.1
from gensim.sklearn_integration.sklearn_wrapper_gensim_ldaModel import SklearnWrapperLdaModel """ Explanation: Using wrappers for Scikit learn API This tutorial is about using gensim models as a part of your scikit learn workflow with the help of wrappers found at gensim.sklearn_integration.sklearn_wrapper_gensim_ldaModel The wrapper available (as of now) are : * LdaModel (gensim.sklearn_integration.sklearn_wrapper_gensim_ldaModel.SklearnWrapperLdaModel),which implements gensim's LdaModel in a scikit-learn interface LdaModel To use LdaModel begin with importing LdaModel wrapper End of explanation """ from gensim.corpora import Dictionary texts = [['complier', 'system', 'computer'], ['eulerian', 'node', 'cycle', 'graph', 'tree', 'path'], ['graph', 'flow', 'network', 'graph'], ['loading', 'computer', 'system'], ['user', 'server', 'system'], ['tree','hamiltonian'], ['graph', 'trees'], ['computer', 'kernel', 'malfunction','computer'], ['server','system','computer']] dictionary = Dictionary(texts) corpus = [dictionary.doc2bow(text) for text in texts] """ Explanation: Next we will create a dummy set of texts and convert it into a corpus End of explanation """ model=SklearnWrapperLdaModel(num_topics=2,id2word=dictionary,iterations=20, random_state=1) model.fit(corpus) model.print_topics(2) """ Explanation: Then to run the LdaModel on it End of explanation """ import numpy as np from gensim import matutils from gensim.models.ldamodel import LdaModel from sklearn.datasets import fetch_20newsgroups from sklearn.feature_extraction.text import CountVectorizer from gensim.sklearn_integration.sklearn_wrapper_gensim_ldaModel import SklearnWrapperLdaModel rand = np.random.mtrand.RandomState(1) # set seed for getting same result cats = ['rec.sport.baseball', 'sci.crypt'] data = fetch_20newsgroups(subset='train', categories=cats, shuffle=True) """ Explanation: Integration with Sklearn To provide a better example of how it can be used with Sklearn, Let's use CountVectorizer method of sklearn. For this example we will use 20 Newsgroups data set. We will only use the categories rec.sport.baseball and sci.crypt and use it to generate topics. End of explanation """ vec = CountVectorizer(min_df=10, stop_words='english') X = vec.fit_transform(data.data) vocab = vec.get_feature_names() #vocab to be converted to id2word id2word=dict([(i, s) for i, s in enumerate(vocab)]) """ Explanation: Next, we use countvectorizer to convert the collection of text documents to a matrix of token counts. End of explanation """ obj=SklearnWrapperLdaModel(id2word=id2word,num_topics=5,passes=20) lda=obj.fit(X) lda.print_topics() """ Explanation: Next, we just need to fit X and id2word to our Lda wrapper. End of explanation """ from sklearn import linear_model def print_features(clf, vocab, n=10): ''' Better printing for sorted list ''' coef = clf.coef_[0] print 'Positive features: %s' % (' '.join(['%s:%.2f' % (vocab[j], coef[j]) for j in np.argsort(coef)[::-1][:n] if coef[j] > 0])) print 'Negative features: %s' % (' '.join(['%s:%.2f' % (vocab[j], coef[j]) for j in np.argsort(coef)[:n] if coef[j] < 0])) clf=linear_model.LogisticRegression(penalty='l1', C=0.1) #l1 penalty used clf.fit(X,data.target) print_features(clf,vocab) """ Explanation: Using together with Scikit learn's Logistic Regression Now lets try Sklearn's logistic classifier to classify the given categories into two types.Ideally we should get postive weights when cryptography is talked about and negative when baseball is talked about. End of explanation """
estnltk/episode-miner
docs/Winepi.ipynb
gpl-2.0
from episode_miner import Event, EventSequence, EventSequences, Episode, Episodes from pprint import pprint sequence_of_events = (Event('a', 1), Event('b', 2), Event('a', 3), Event('a', 5), Event('b', 8)) event_sequences = EventSequences(sequence_of_events=sequence_of_events, start=0, end=9) frequent_episodes = event_sequences.find_serial_episodes(window_width=5, min_frequency=0.2, only_full_windows=False, allow_intermediate_events=True) frequent_episodes """ Explanation: Winepi A partial implementation of Winepi algorithm described by Mannila, Toivonen and Verkamo in Discovery of Frequent Episodes in Event Sequences, 1997. End of explanation """ frequent_episodes[3].abs_support, frequent_episodes[3].rel_support, frequent_episodes[3].allow_intermediate_events """ Explanation: All episodes in output of find_serial_episodes are equipped with abs_support, rel_support and allow_intermediate_events attributes. End of explanation """ episodes = Episodes((Episode(('a',)), Episode(('a', 'b')))) episodes = event_sequences.support(episodes=episodes, window_width=5, only_full_windows=False, allow_intermediate_events=True) episodes[0], episodes[0].abs_support, episodes[0].rel_support, episodes[0].allow_intermediate_events """ Explanation: Support Equip episodes with support (frequency) information. End of explanation """ episodes.abs_support() """ Explanation: The defaults are python only_full_windows = False allow_intermediate_events = episodes[0].allow_intermediate_events It is more efficient to find the supports for list of episodes than for each episode separetely. Get the absolute support of episodes. End of explanation """ episodes.rel_support() """ Explanation: Get relative support of episodes. End of explanation """ sequence_of_events = (Event('a', 0), Event('b', 4), Event('c', 7)) event_sequences = EventSequences(sequence_of_events=sequence_of_events, start=0, end=9) frequent_episodes = event_sequences.find_serial_episodes(window_width=5, min_frequency=0.2, only_full_windows=True) print('Full windows:', frequent_episodes) frequent_episodes = event_sequences.find_serial_episodes(window_width=5, min_frequency=0.2, only_full_windows=False) print('All windows: ', frequent_episodes) """ Explanation: Only full windows vs all windows The default is python only_full_windows=False If True, the start of the first window is at the start of the sequence of events and the end of the last window is at the end of the sequence of events. If False, the end of the first window is at the start of the sequence of events and the start of the last window is at the end of the sequence of events. End of explanation """ sequence_of_events = (Event('a', 0), Event('b', 2), Event('c', 2), Event('d', 3)) event_sequences = EventSequences(sequence_of_events=sequence_of_events, start=0, end=6) print('No intermediate events:') frequent_episodes = event_sequences.find_serial_episodes(window_width=4, min_frequency=0.1, allow_intermediate_events=False) pprint(frequent_episodes) print('Allow intermediate events:') frequent_episodes = event_sequences.find_serial_episodes(window_width=4, min_frequency=0.1, allow_intermediate_events=True) pprint(frequent_episodes) """ Explanation: In case of full windows the Winepi frequency of episodes near the start or end of the event sequence is reduced, but the number of all windows is smaller and therefore the relative frequency of episodes is increased. Allow intermediate events vs no intermediate events An event B is an intermediate event for events A and C if time of A &lt; time of B &lt; time of C. The default is python allow_intermediate_events = True In the next example 2nd and 3rd event have the same time and the only episode which has an intermediate event is ('a', 'd'). End of explanation """ sequence_of_events = (Event('a', 1), Event('c', 2), Event('b', 3), Event('c', 4), Event('a', 5), Event('d', 6), Event('b', 7), Event('d', 8), Event('a', 9), Event('e', 10), Event('b', 11), Event('e', 12)) event_sequences = EventSequences(sequence_of_events=sequence_of_events, start=0, end=13) frequent_episodes = event_sequences.find_serial_episodes(window_width=4, min_frequency=0.2, allow_intermediate_events=False) print('No intermediate events:') pprint(frequent_episodes) frequent_episodes = event_sequences.find_serial_episodes(window_width=4, min_frequency=0.2, allow_intermediate_events=True) print('\nAllow intermediate events:') pprint(frequent_episodes) """ Explanation: In the next example "no intermediate events" version finds only episodes of length one, but "allow intermediate events" version discovers the pattern. End of explanation """
ivannz/study_notes
year_14_15/spring_2015/netwrok_analysis/notebooks/assignments/networks_ha1.ipynb
mit
import numpy as np import matplotlib.pyplot as plt import numpy.linalg as la from scipy.stats import rankdata %matplotlib inline ## Construct a regression model def lm_model( X, Y, intercept = True ) : T = np.array( Y, dtype = float ) M = np.array( X, dtype = float ) if intercept is True : M = np.vstack( [ np.ones( len( Y ) ), M ] ).T return (M,T, intercept) ## Define the OLS regression routine: def lm_fit( model ) : M, T, intercept = model MMinv = la.inv( ## implement (X'X)^{-1} (X'Y) np.dot( M.T, M ) ) coef = np.dot( MMinv, np.dot( M.T, T ) ) ## Estimate the residual standard deviation resid = T - np.dot(M, coef) dof = len( T ) - len( coef ) RSS = np.dot( resid.T, resid ) return (coef, RSS, dof, MMinv ) """ Explanation: <center>Structural Analysis and Visualization of Networks</center> <center>Home Assignment #1: Power law</center> <center>Student: Nazarov Ivan</center> <hr /> General Information Due Date: 28.01.2015 23:59 <br > Late submission policy: -0.2 points per day <br > Please send your reports to &#108;&#101;&#111;&#110;&#105;&#100;&#46;&#101;&#46;&#122;&#104;&#117;&#107;&#111;&#118;&#64;&#103;&#109;&#97;&#105;&#108;&#46;&#99;&#111;&#109; and &#115;&#104;&#101;&#115;&#116;&#97;&#107;&#111;&#102;&#102;&#97;&#110;&#100;&#114;&#101;&#121;&#64;&#103;&#109;&#97;&#105;&#108;&#46;&#99;&#111;&#109; with message subject of the following structure:<br > [HSE Networks 2015] Nazarov Ivan HA1 Support your computations with figures and comments. <br > If you are using IPython Notebook you may use this file as a starting point of your report.<br > <br > <hr \> Preabmle Let's start by defining several routines, which would become helpuf later on in the assignment. End of explanation """ ## ML estimator of the power law in the "tail" (x≥u): ## x_k \sim C x^{-\alpha} 1_{[u,+∞)}(x). def mle_alpha( data, threshold ) : ## Keep the data observations, that we consider to be in the tail tail = np.array( [ v for v in data if v >= threshold ] ) ## Estimate the mean log of the peaks over threshold sum_log = np.sum( np.log( tail ) ) / ( len( tail ) + 0.0 ) ## Use the closed form expression for the value of the power at an optimum alpha = 1.0 + 1.0 / ( sum_log - np.log( threshold ) ) ## Using the delta-method compute the s.e of the estimate. return alpha, ( alpha - 1 ) / np.sqrt( len( tail ) ) ## The function below implements the same functionality as the previous one ## but instead of the continuous version it works with the discrete power law. from scipy.special import zeta from scipy.optimize import minimize ## The discrete power law gives marginally different results ## \Pr(N=n) \defn \frac{1}{\zeta(\gamma)} n^{-\gamma}, n -- positive integer def mle_alpha_d( data, threshold ) : ## Keep the data observations, that we consider to be in the tail tail = np.array( [ v for v in data if v >= threshold ] ) ## Estimate the mean log of the peaks over threshold sum_log = np.sum( np.log( tail - threshold + 1 ) ) / ( len( tail ) + 0.0 ) ## Define minus log-likelihood of the discrete power law loglik = lambda alpha : np.log( zeta( alpha ) ) + alpha * sum_log ## Compute the ML estimate of the exponent, with a view to using it as the ## initial seed for the numerical minimizer for better convergence. res = minimize( loglik, ( 1.0 + 1.0 / sum_log, ), method = 'Nelder-Mead', options = { 'disp': False } ) ## Return the "optimal" argument, regardless of its quality. Potentially DANGEROUS! return res.x[ 0 ], float( 'nan' ) """ Explanation: A continusous random variable $X$ is distributed accorind to the power law (also known as Pareto distibution) if it's probability density function is $$p(x) = \frac{\alpha-1}{u} {\bigg (\frac{x}{u} \bigg )}^{-\alpha} 1_{[u,+\infty)} (x)$$ The maximum likelihood esitimate of the exponent $\alpha$ is given by $$\hat{\alpha} = 1 + {\bigg( \frac{\sum_{k=1}^n \ln x_k}{n} - \ln u \bigg) }^{-1}$$ provided all the observed sample values are not less than the threshold $u$. A random variable $N$ with discrete power law distribution exceeding a certain thershold $u$ has the following probabilities: $$\mathbb{P}(N=k) = C \frac{1}{{(k-u+1)}^{\,\gamma}}$$ where $k\geq u$ is the value over the threshold $u$, $\gamma > 1$ and the constant $C$ is given by the reciprocal of Riemann's zeta function $$\zeta(\gamma) = \sum_{n\geq 0} \frac{1}{n^{\,\gamma}}$$. The MLE estimate of the power parameter of discrete power law involves the derivative of the Zeta function, which forbids a closed agelbaric form of the solution to the first order conditions on the maximum of log-likelihhod: $$\frac{\partial }{\partial \gamma} \mathcal{L}\quad:\quad \frac{-\zeta'(\gamma)}{\zeta(\gamma)} = \sum_{i=1}^n \ln (k_i-u+1)$$ In practice it is necessary to resort to numerical optimization in order to finde the MLE under this distributional assumption. The routines below implements exactly the MLE of $\alpha$ and $\gamma$. End of explanation """ ## Define a convenience function for estimating the power parameter ## of the continuous power law from scipy.stats import kstest def ks_dist( data, threshold ) : ## Estimate the power given the current threshold alpha, sd = mle_alpha( data, threshold ) ## Construct the CDF in the current environment cdf = lambda x : 1.0 - ( x / threshold ) ** ( 1.0 - alpha ) ## Return the output of the out-of-the box Kolmogorov-Smirnov test: ## the infinity norm of the difference between the distribution functions. d, pv = kstest( [ v for v in data if v >= threshold ], cdf ) return (d, pv), (alpha, sd) def ks_dist_d( data, threshold ) : ## Estimate the power given the current threshold alpha, sd = mle_alpha_d( data, threshold ) ## Construct the CDF in the current environment cdf = lambda k : 1.0 - zeta( alpha, k-threshold+1 ) / zeta( alpha ) ## Return the output of the out-of-the box Kolmogorov-Smirnov test: ## the infinity norm of the difference between the distribution functions. d, pv = kstest( [ v for v in data if v >= threshold ], cdf ) return (d, pv), (alpha, sd) """ Explanation: Selecting an optimal threshold, beyond which the power-law like tail behaviour is expected, which adequately balances between the bias and the variance, is very important. As suggested in ... this task is preformed well by employing the statistic in the Kolmogorov-Smirnov goodness-of-fit test. The statistic itself is the $L^\infty$ norm of the difference between the hypothesised distribution function and the observed (empirical) CDF. Routines below implement this functionality. End of explanation """ def values( data, frequency = False ) : bins = dict( ) ## For each value in the given array, add the index of each occurrence ## into the bin dedicated to the encountered value. for i, x in enumerate( sorted( data ) ) : ## Prepend the current occurrence of a value, unless it has never been ## seen before, in which case initialise the list of indices for it. bins[ x ] = bins.get( x, [] ) + [ i ] return bins ## It was brought to my attention, that numpy.unique() does the same trick... def counts( data ) : ## Count the number of times a value occurs in the array. counts = dict( ) for x in data : ## If the values has not been seen yet, then initialize it to ## a single occurrence otherwise increment its counter. counts[ x ] = counts.get( x, 0 ) + 1 return counts.items( ) ## Construct the complimentary cumulative distribution function for ## the data exceedig the given tail threshold. def ccdf( data, threshold ) : ## Count the occurrences of values over some threshold in the array freq = np.array( counts( [ v for v in data if v >= threshold ] ), dtype = float ) ## Sort the counts along the growing values they correspond to freq = freq[ freq[ :, 0 ].argsort( ), : ] ## ... and compute the fraction of data with values lower than the current freq[:,1] = 1.0 - np.cumsum( freq[ :,1 ], dtype = float ) / sum( freq[ :,1 ] ) return freq """ Explanation: These helper functions invert an array and count the number of occurrences of distinct values in an array. End of explanation """ def mean_excess( data ) : data = np.array( sorted( data, reverse = True ) ) ## Compute the last positions in the sorted array of each repeated observation ranks = rankdata( data, method = 'max' ) ## Since the array is sorted, the number of observation exceeding the current ## is givne by difference between the length of the array and the max-rank. excesses = np.array( np.unique( len( data ) - ranks ), dtype = np.int ) ## Get the last values in each group -- the thresholds thresholds = data[ excesses ] ## Get the sum of all values greater than the current threshold mean_excess = np.cumsum( data )[ excesses ] / ( excesses + 0.0 ) - thresholds return np.array( zip( thresholds, mean_excess ), dtype = np.float ) """ Explanation: Mean excess plot is a visual tool that helps determine the tail-type behavoiur from the sample data. Basically it is just the plot of the sample mean of values exceeding some threshold. Id $X$ is some random varaible with $\mathbb{E}X^+ < +\infty$, then the function $M(u)$, also known as the means residual lifetime, or mean excess over threshlod, is defined as $$ M(u) \overset{\Delta}{=} \mathbb{E}{\Big ( {\big. X-u\,\big \rvert}\, X\geq u \Big)} = \mathbb{E}{\Big ( {\big. X\,\big \rvert}\, X\geq u \Big)} - u$$ Its empirical analog is providedn by the folowing expression: $$\hat{M}(u) \overset{\Delta}{=} \frac{1}{\sum_{i=1}^n 1_{[u, \infty)}(x_i)} \sum_{i=1}^n (x_i - u)1_{[u, \infty)}(x_i) $$ Heavy-tailed behaviour reveals itself as an upwards trend in the graph above some threshold. A downward trend shows thin-tailed behaviour whereas an almost flat line shows an exponential tail. Mean excesses for higher thresholds are averages of a handful of extreme excesses, which implies that in this region the plot is unstable. Indeed, if $X\sim \text{Pwr}(\alpha,x_0)$ then $$M(u) = \frac{ (\alpha-1)\,x_0^{\,\alpha-1}\,\int\limits_u^\infty s^{1-\alpha} ds }{{\big (\tfrac{u}{x_0}\big )}^{1-\alpha}} - u= \frac{\alpha-1}{\alpha-2} u - u = \frac{1}{\alpha-2} u$$ If, however, $X\sim \text{Exp}(\lambda)$ then $$M(u) = \frac{ \int\limits_u^\infty s \lambda e^{-\lambda s} ds }{e^{-\lambda u}} - u = \frac{ u e^{-\lambda u}+ e^{-\lambda u}\lambda^{-1} }{e^{-\lambda u}} - u = \frac{1}{\lambda}$$ End of explanation """ ##################################################################### #+ 0. Load the data (yes, it is a milestone!) ## Load the word count dataset wordcount = np.fromregex( './data/wordcounts.txt', r"(\d+)\s+(.{,32})", [ ( 'freq', np.int64 ), ( 'word', 'S32' ) ] ) ##################################################################### ##+ 1. Check that Zipf's Law holds ## Pre-sort the frequencies: in ascending order of frequencies wordcount.sort( order = 'freq' ) freqs = wordcount[ 'freq' ] ## PRoduce ranks: from 1 up to |W| ranks = np.arange( 1, len( wordcount ) + 1, dtype = float )[::-1] ## The probability of a word frequency being not less than the ## frequency of a gien word w it exactly the ratio of the w's rank ## to the total number of words. probs = ranks / len( wordcount ) ## estimate f_k\sim C k^{-\gamma} model mdl = lm_model( np.log( ranks ), np.log( freqs ), True ) coef, rss, dof, XX = lm_fit( mdl ) ## Define the fitted Zipf's law # zipf = lambda r : np.exp( coef.dot( ( 1, np.log( r ) ) ) ) zipf = lambda r : np.exp( coef[0] + coef[1] * np.log( r ) ) ## Show how well is was estimated. plt.loglog( freqs, probs, "xr" ) plt.plot( zipf( ranks ), probs, "-b" ) plt.xlabel( "frequency" ) ; plt.ylabel( "ranks" ) plt.title( "Wordcount data" ) plt.show( ) ###################################################################### ##+ 2. Assuming that the data is distributed according to the Power Law, find ## * $\alpha$ of the distribution ## * mean sample variance $\sigma^2$ ## Get the ML estimate alpha_ml, alpha_ml_sd = mle_alpha( freqs, freqs.min( ) ) ## Let's suppose that the rank is proportional to the complementary CDF ## of a power law: $\bar{F}(x) = {\left(\frac{x}{u}\right)}^{1-\alpha}$ ## Thus the following econometric model is to be estimated: ## $\log \text{rank} \sim C + (1-\alpha) \log \text{freq} + \epsilon$ mdl = lm_model( np.log( freqs ), np.log( ranks ), True ) beta, rss, dof, XX = lm_fit( mdl ) ## Transform the coefficient alpha_ls = 1 - beta[ 1 ] ## The regression estimate of the power should be close ## to the ML estimate print "the OLS estimate of alpha is %f\n" % alpha_ls print "Whereas the ML estimate is %f (%f) \n" % ( alpha_ml, alpha_ml_sd ) print "Since ML is more theoretically sound, the relative error is %f%%\n" % ( 100 * np.abs( 1.0 - alpha_ls / alpha_ml ), ) ## The mean and the sample variance of the sample ## frequency distribution: print "The average frequency over the sample is ", freqs.mean(), "\n" print "The sample variance is ", freqs.var(), "\n" ## Theoretical mean and variance of the power law distribution ## significantly depend on the power parameter. ## Indeed for $x\sim \frac{\alpha-1}{u} {\left( \frac{x}{u} \right)}^\alpha$ one has the following: ## $E(x) = \frac{\alpha-1}{\alpha-2} u$ if $\alpha>2$ ## $E(x^2) = \frac{\alpha-1}{\alpha-3} u^2$ if $\alpha>3$ ## The estimated parameter is less than 2, implying that the frequency ## distribution is unlikely to have even a finite mean under the ## assumed distribution. ##################################################################### ##+ 3. Produce summary of the frequencies: min, max, mean, median ## Does it make sense to compute these summaries? What does the mean frequency tell us? print "The minimum frequency is ", freqs.min(), "\n" print "The mean frequency is ", freqs.mean(), "\n" print "The median frequency is ", np.median( freqs ), "\n" print "The maximum frequency is ", freqs.max(), "\n" """ Explanation: Problems Task 1. Load wordcounts dataset. 1. Check that Zipf's Law holds 2. Assuming that the data is distributed according to the Power Law, find * $\alpha$ of the distribution * mean sample variance $\sigma^2$ 3. Produce summary of the frequencies: min, max, mean, median End of explanation """ ##################################################################### ## + 0. Read the graph ## Load the network routing graph first as it is the smallest. It is ## an undirected graph. import networkx as nx G = nx.read_edgelist( "./data/network.txt", create_using = nx.Graph( ) ); node_degree = G.degree( ) deg = np.array( node_degree.values( ), dtype = np.int ) ##################################################################### ##+ 1. Are they correspondent to power law? ## First let's draw the frequency plot of the node degree distribution. degree_freq = np.array( counts( deg ) ) deg_me = mean_excess( deg ) plt.figure( 1, figsize = ( 10, 5 ) ) plt.subplot(121) plt.title( "Node degree frequency" ) plt.loglog( degree_freq[:,0], degree_freq[:,1], "bo" ) plt.xlabel( "degree" ) ; plt.ylabel( "frequency" ) plt.subplot(122) ## An upward trend in plot shows heavy-tailed behaviour, but the ## values for high thresholds are unreliably estimated. plt.title( "Mean excess plot" ) plt.loglog( deg_me[:,0], deg_me[:,1], "bo-", linewidth = 2 ) plt.ylabel( "mean excess" ) ; plt.xlabel( "threshold" ) plt.show( ) """ Explanation: <hr /> Task 2. Find and plot PDF and CDF for the following networks: * Routing network * Web graph * Facebook network Are they correspondent to power law? Find max and mean values of incoming and outcoming node degrees Find $\alpha$ via Maximum Likelihood and calculate $\sigma^2$ Determine $x_{min}$ via Kolmogorov-Smirnov test The routing network graph End of explanation """ ## The empirical degree distribution may not correspond to a power ## law per se, but it definitely has some heavy tailed behaviour, ## which exhibits itself, when the only data exceeding same truncated ## is considered. cc = ccdf( deg, 0 ) plt.title( "Degree cCDF" ) plt.xlabel( "degree" ) ; plt.ylabel( "probability" ) plt.loglog( cc[:,0], cc[:,1], "bo-", linewidth = 2 ) plt.show( ) ## Clearly the chances of an extremely high node degree decay proportional ## to the value of the degree on a log-log scale. ##################################################################### ##+ 2. Find max and mean values of incoming and outcoming node degrees ## Since the network graph is undirected it does not make sense to ## distinguish in- and out- nodes. Thus let's check the range of the ## general (two-way) degree. print "The degrees range from %d to %d" % ( min( deg ), max( deg ) ) #, "\n" print "The average degree over the sample is %.3f" % ( G.size( ) / G.order( ) ) #, "\n" print "The degree standard deviation is %.3f" % ( np.sqrt( np.var( deg ) ) ) #, "\n" print "The median degree is %d" % ( np.median( deg ) ) #, "\n" ##################################################################### ##+ 3. Find $\alpha$ via Maximum Likelihood and calculate $\sigma^2$ ##+ 4. Determine $x_{min}$ via Kolmogorov-Smirnov test ## We have reasons to believe there are some power law-like effects in ## the behaviour of the node degree (treated as a random variable). ## Let's pursue this lead and estimate the exponent in the power law ## and select the most likely breakpoint, beyond which the degree ## is heavy tailed. ##################################################################### ## Get the ML estimate of the exponent parameter. alpha_ml, alpha_ml_se = mle_alpha( deg, min( deg ) ) print "The Maximum likelihood estimate of the exponent of the node degree distribution is %.3f (%.4f)\n" % ( alpha_ml, alpha_ml_se ) ##################################################################### ## Run the KS threshold selection routine thresholds = np.unique( deg ) ## The ks_dist() function returns a tuple of the following parameters: ## * ( KS-distance, PV of the KS-test ), ( MLE of alpha, the standard error of the MLE ) ks_min = np.array( [ ks_dist( deg, u ) for u in thresholds ] ) ## Select the x_min that brings the KS metric to its minimum on the given ## degree data. Note the first threshold is removed, since it is likely ## to yield very biased estimate. i_min = np.argmin( ks_min[1:,0,0] )+1 x_min = thresholds[ i_min ] alpha_ml, alpha_ml_se = ks_min[ i_min, 1, : ] ## Produce a dataset for cCDF plotting. x = np.arange( x_min, 2 * np.max( deg ) ) deg_ccdf = ccdf( deg, x_min ) pwr_ccdf = lambda x : ( x / ( x_min + 0.0 ) ) ** ( 1.0 - alpha_ml ) """ Explanation: The mean excess plot has an unmistakeable upward trend throughout the whole set of thresholds. This is strong heuristic evidence for a heavy tail in the node degree distribution. End of explanation """ ## Produce the hill plot: the correspondence between the threshold ## and the estimated exponent. plt.figure( 1, figsize = ( 10, 5 ) ) plt.subplot( 121 ) plt.title( 'The Hill plot of the degree distribution' ) plt.ylabel( 'alpha' ) ; plt.ylabel( 'threshold' ) plt.axhline( y = alpha_ml, linewidth = 1, color = 'b' ) plt.axvline( x = x_min, linewidth = 1, color = 'b', linestyle = '--' ) plt.loglog( thresholds, ks_min[:,1,0], "r-") ## In fact the KS-metric is the $L^\infty$ norm on the set of distribution ## functions. plt.subplot( 122 ) plt.title( 'KS metric distance' ) plt.ylabel( 'max distance' ) ; plt.ylabel( 'threshold' ) plt.axhline( y = ks_min[ i_min, 0, 0 ], linewidth = 1, color = 'b' ) plt.axvline( x = x_min, linewidth = 1, color = 'b', linestyle = '--' ) plt.loglog( thresholds, ks_min[:,0,0], "r-") plt.show( ) """ Explanation: Visualize the dependence of $\alpha$ and the KS statistic on the threshold $u$. End of explanation """ print "The Kolmogorov-Smirnov metric yielded %.1f as the optimal threshold\n" % ( x_min) print "'Optimal' exponent is %.3f (%.3f)\n" % ( alpha_ml, alpha_ml_se ) plt.title( "Degree cCDF" ) plt.xlabel( "degree" ) ; plt.ylabel( "probability" ) plt.plot( x, pwr_ccdf( x ), "b-", linewidth = 2 ) plt.plot( deg_ccdf[:,0], deg_ccdf[:,1], "r-", linewidth = 2 ) plt.axvline( x = x_min, linewidth = 2, color = 'k', linestyle = '-' ) plt.show( ) """ Explanation: The estimated theoretical and the empirical CDFs are quite well aligned with each other for the chosen threshold. The tail of the hypothesised node degree law appears to have a higher tail decay rate, but that is due to the common severe undersampling of the tails. End of explanation """ ##################################################################### ## + 0. Read the graph ## Load the network routing graph first as it is the smallest. It is ## an undirected graph. import networkx as nx G = nx.read_edgelist( "./data/fb_Princeton.txt", create_using = nx.DiGraph( ) ); node_in_degree = G.in_degree( ) node_out_degree = G.out_degree( ) in_deg = np.array( node_in_degree.values( ), dtype = np.int ) out_deg = np.array( node_out_degree.values( ), dtype = np.int ) ##################################################################### ##+ 1. Are they correspondent to power law? ## First let's draw the frequency plot of the node degree distribution. degree_in_freq = np.array( counts( in_deg ) ) degree_out_freq = np.array( counts( out_deg ) ) plt.title( "Node degree frequency" ) plt.xlabel( "degree" ) ; plt.ylabel( "frequency" ) plt.loglog( degree_out_freq[:,0], degree_out_freq[:,1], "bo" ) plt.loglog( degree_in_freq[:,0], degree_in_freq[:,1], "r<" ) plt.show( ) plt.figure( 1, figsize = ( 10, 5 ) ) plt.subplot(121) plt.title( "Degree cCDF-loglog" ) out_cc = ccdf( out_deg, 0 ) plt.loglog( out_cc[:,0], out_cc[:,1], "bo-", linewidth = 2 ) in_cc = ccdf( in_deg, 0 ) plt.loglog( in_cc[:,0], in_cc[:,1], "r<-", linewidth = 2 ) plt.xlabel( "degree" ) ; plt.ylabel( "probability" ) plt.subplot(122) ## An upward trend in plot shows heavy-tailed behaviour, but the ## values for high thresholds are unreliably estimated. plt.title( "Mean excess plot" ) out_me = mean_excess( out_deg ) plt.loglog( out_me[:,0], out_me[:,1], "bo-", linewidth = 2 ) in_me = mean_excess( in_deg ) plt.loglog( in_me[:,0], in_me[:,1], "r<-", linewidth = 2 ) plt.ylabel( "mean excess" ) ; plt.xlabel( "threshold" ) plt.show( ) """ Explanation: Facebook graph End of explanation """ ##################################################################### ##+ 2. Find max and mean values of incoming and outcoming node degrees print "The degrees range from %d to %d for inward direction and from %d to %d for outward edges" % ( min( in_deg ), max( in_deg ), min( out_deg ), max( out_deg ) ) #, "\n" print "The average degree over the sample is %.3f (IN) and %.3f (OUT)" % ( np.sum( in_deg ) / ( G.order( ) + 0.0 ), np.sum( out_deg ) / ( G.order( ) + 0.0 ) ) #, "\n" print "The degree standard deviation is %.3f for the in-degree and %.3f -- out-degree" % ( np.sqrt( np.var( in_deg ) ), np.sqrt( np.var( out_deg ) ) ) #, "\n" print "The median in- and out-degree is %d and %d respectively" % ( np.median( in_deg ), np.median( out_deg ) ) #, "\n" ##################################################################### ##+ 3. Find $\alpha$ via Maximum Likelihood and calculate $\sigma^2$ ##+ 4. Determine $x_{min}$ via Kolmogorov-Smirnov test ##################################################################### ## Get the ML estimate of the exponent parameter. There are some isolated ## nodes in the provided graph, which means that it is necessary ## to omit these nodes from the analysis using a simple power law. ## One of course could try to fit a model with an explicit atom at zero, ## but that should wait for a better time. in_alpha_ml, in_alpha_ml_se = mle_alpha( in_deg, min( in_deg )+1 ) out_alpha_ml, out_alpha_ml_se = mle_alpha( out_deg, min( out_deg )+1 ) ##################################################################### in_thresholds = np.unique( in_deg ) out_thresholds = np.unique( out_deg ) ## Run the KS threshold selection routine in_ks_min = np.array( [ ks_dist( in_deg, u ) for u in in_thresholds ] ) out_ks_min = np.array( [ ks_dist( out_deg, u ) for u in out_thresholds ] ) ## Select the x_min that brings the KS metric to its minimum on the given ## degree data. Note the first threshold is removed, since it is likely ## to yield very biased estimate. in_i_min = np.argmin( in_ks_min[1:,0,0] )+1 out_i_min = np.argmin( out_ks_min[1:,0,0] )+1 ## Produce a dataset for cCDF plotting. in_x = np.arange( in_thresholds[ in_i_min ], 2 * np.max( in_deg ) ) out_x = np.arange( out_thresholds[ out_i_min ], 2 * np.max( out_deg ) ) ## Get the empirical complementary distribution fuction. in_deg_ccdf = ccdf( in_deg, in_thresholds[ in_i_min ] ) out_deg_ccdf = ccdf( out_deg, out_thresholds[ out_i_min ] ) ## ... and the fitted power law. in_pwr_ccdf = lambda x : ( x / ( in_thresholds[ in_i_min ] + 0.0 ) ) ** ( 1.0 - in_ks_min[ in_i_min, 1, 0 ] ) out_pwr_ccdf = lambda x : ( x / ( out_thresholds[ out_i_min ] + 0.0 ) ) ** ( 1.0 - out_ks_min[ out_i_min, 1, 0 ] ) print "The MLE of the exponent of the inward and outward degree distribution is %.3f (%.4f) and %.3f (%.4f) respectively\n" % ( in_alpha_ml, in_alpha_ml_se, out_alpha_ml, out_alpha_ml_se ) ## Produce the hill plot: the correspondence between the threshold ## and the estimated exponent. plt.figure( 1, figsize = ( 10, 5 ) ) plt.subplot(121) plt.title( 'The Hill plot of the degree distribution' ) plt.axhline( y = in_ks_min[ in_i_min, 1, 0 ], linewidth = 1, color = 'r' ) plt.axvline( x = in_thresholds[ in_i_min ], linewidth = 1, color = 'r', linestyle = '--' ) plt.loglog( in_thresholds, in_ks_min[:,1,0], "r<-") plt.axhline( y = out_ks_min[ out_i_min, 1, 0 ], linewidth = 1, color = 'b' ) plt.axvline( x = out_thresholds[ out_i_min ], linewidth = 1, color = 'b', linestyle = '--' ) plt.loglog( out_thresholds, out_ks_min[:,1,0], "bo-") plt.ylabel( 'alpha' ) ; plt.ylabel( 'threshold' ) ## In fact the KS-metric is the $L^\infty$ norm on the set of distribution ## functions. plt.subplot(122) plt.title( 'The KS metric distance' ) plt.axhline( y = in_ks_min[ in_i_min,0, 0 ], linewidth = 1, color = 'r' ) plt.axvline( x = in_thresholds[ in_i_min ], linewidth = 1, color = 'r', linestyle = '--' ) plt.loglog( in_thresholds, in_ks_min[:,0,0], "r<-") plt.axhline( y = out_ks_min[ out_i_min,0, 0 ], linewidth = 1, color = 'b' ) plt.axvline( x = out_thresholds[ out_i_min ], linewidth = 1, color = 'b', linestyle = '--' ) plt.loglog( out_thresholds, out_ks_min[:,0,0], "bo-") plt.ylabel( 'max distance' ) ; plt.ylabel( 'threshold' ) plt.show( ) """ Explanation: The ME plots for both the in- and the out-degrees possess significantly long flat regions, and only at the high end of the thresholds do they "explode" into a singularity. Such behaviour hints at the possibilty of an exponential tail in the distributions of both inward and outward vertex degrees. End of explanation """ print "OUT-degree: The Kolmogorov-Smirnov metric yielded %.1f as the optimal threshold and %.3f (%.3f) as 'optimal' exponent\n" % ( out_thresholds[ out_i_min ], out_ks_min[ out_i_min, 1, 0 ], out_ks_min[ out_i_min, 1, 1 ] ) print "IN-degree: The Kolmogorov-Smirnov metric yielded %.1f as the optimal threshold and %.3f (%.3f) as 'optimal' exponent\n" % ( out_thresholds[ in_i_min ], in_ks_min[ in_i_min, 1, 0 ], in_ks_min[ in_i_min, 1, 1 ] ) plt.figure( 3, figsize = ( 10, 5 ) ) plt.subplot(121) plt.title( "Out degree cCDF" ) plt.plot( out_x, out_pwr_ccdf( out_x ), "k-", linewidth = 2 ) plt.plot( out_deg_ccdf[:,0], out_deg_ccdf[:,1], "bo-", linewidth = 2 ) plt.xlabel( "degree" ) ; plt.ylabel( "probability" ) plt.subplot(122) plt.title( "In degree cCDF" ) plt.plot( in_x, in_pwr_ccdf( in_x ), "k-", linewidth = 2 ) plt.plot( in_deg_ccdf[:,0], in_deg_ccdf[:,1], "r<-", linewidth = 2 ) plt.xlabel( "degree" ) ; plt.ylabel( "probability" ) plt.show( ) """ Explanation: Hill plots (the estimated exponent $\hat{\alpha}_u$ against the employed threshold $u$) have a distinct upward curving trend, which can only be a result of an exponential behaviour in the tail of both the in- and the out-degreee distributions. End of explanation """ ##################################################################### ## + 0. Read the graph import networkx as nx G = nx.read_edgelist( "./data/web_Stanford.txt", create_using = nx.DiGraph( ) ); node_in_degree = G.in_degree( ) node_out_degree = G.out_degree( ) in_deg = np.array( node_in_degree.values( ), dtype = np.int ) out_deg = np.array( node_out_degree.values( ), dtype = np.int ) ##################################################################### ##+ 1. Are they correspondent to power law? degree_in_freq = np.array( counts( in_deg ) ) degree_out_freq = np.array( counts( out_deg ) ) plt.title( "Node degree frequency" ) plt.xlabel( "degree" ) ; plt.ylabel( "frequency" ) plt.loglog( degree_out_freq[:,0], degree_out_freq[:,1], "bo" ) plt.loglog( degree_in_freq[:,0], degree_in_freq[:,1], "r<" ) plt.show( ) plt.figure( 1, figsize = ( 10, 5 ) ) plt.subplot(121) plt.title( "Degree cCDF-loglog" ) out_cc = ccdf( out_deg, 0 ) plt.loglog( out_cc[:,0], out_cc[:,1], "bo-", linewidth = 2 ) in_cc = ccdf( in_deg, 0 ) plt.loglog( in_cc[:,0], in_cc[:,1], "r<-", linewidth = 2 ) plt.xlabel( "degree" ) ; plt.ylabel( "probability" ) plt.subplot(122) plt.title( "Mean excess plot" ) out_me = mean_excess( out_deg ) plt.loglog( out_me[:,0], out_me[:,1], "bo-", linewidth = 2 ) in_me = mean_excess( in_deg ) plt.loglog( in_me[:,0], in_me[:,1], "r<-", linewidth = 2 ) plt.ylabel( "mean excess" ) ; plt.xlabel( "threshold" ) plt.show( ) """ Explanation: Indeed, both complimetary CDFs show decay rates faster than the power law. WEB Graph End of explanation """ ##################################################################### ##+ 2. Find max and mean values of incoming and outcoming node degrees print "The degrees range from %d to %d for inward direction and from %d to %d for outward edges" % ( min( in_deg ), max( in_deg ), min( out_deg ), max( out_deg ) ) #, "\n" print "The average degree over the sample is %.3f (IN) and %.3f (OUT)" % ( np.sum( in_deg ) / ( G.order( ) + 0.0 ), np.sum( out_deg ) / ( G.order( ) + 0.0 ) ) #, "\n" print "The degree standard deviation is %.3f for the in-degree and %.3f -- out-degree" % ( np.sqrt( np.var( in_deg ) ), np.sqrt( np.var( out_deg ) ) ) #, "\n" print "The median in- and out-degree is %d and %d respectively" % ( np.median( in_deg ), np.median( out_deg ) ) #, "\n" ##################################################################### ##+ 3. Find $\alpha$ via Maximum Likelihood and calculate $\sigma^2$ ##+ 4. Determine $x_{min}$ via Kolmogorov-Smirnov test in_alpha_ml, in_alpha_ml_se = mle_alpha( in_deg, min( in_deg )+1 ) out_alpha_ml, out_alpha_ml_se = mle_alpha( out_deg, min( out_deg )+1 ) in_thresholds = np.unique( in_deg ) out_thresholds = np.unique( out_deg ) ## Run the KS threshold selection routine in_ks_min = np.array( [ ks_dist( in_deg, u ) for u in in_thresholds ] ) out_ks_min = np.array( [ ks_dist( out_deg, u ) for u in out_thresholds ] ) ## Select the x_min that brings the KS metric to its minimum on the given ## degree data. in_i_min = np.argmin( in_ks_min[1:,0,0] )+1 out_i_min = np.argmin( out_ks_min[1:,0,0] )+1 ## Produce a dataset for cCDF plotting. in_x = np.arange( in_thresholds[ in_i_min ], 2 * np.max( in_deg ) ) out_x = np.arange( out_thresholds[ out_i_min ], 2 * np.max( out_deg ) ) ## Get the empirical complementary distribution fuction. in_deg_ccdf = ccdf( in_deg, in_thresholds[ in_i_min ] ) out_deg_ccdf = ccdf( out_deg, out_thresholds[ out_i_min ] ) ## ... and the fitted power law. in_pwr_ccdf = lambda x : ( x / ( in_thresholds[ in_i_min ] + 0.0 ) ) ** ( 1.0 - in_ks_min[ in_i_min, 1, 0 ] ) out_pwr_ccdf = lambda x : ( x / ( out_thresholds[ out_i_min ] + 0.0 ) ) ** ( 1.0 - out_ks_min[ out_i_min, 1, 0 ] ) print "The MLE of the exponent of the inward and outward degree distribution is %.3f (%.4f) and %.3f (%.4f) respectively\n" % ( in_alpha_ml, in_alpha_ml_se, out_alpha_ml, out_alpha_ml_se ) ## Produce the hill plot: the correspondence between the threshold ## and the estimated exponent. plt.figure( 1, figsize = ( 10, 5 ) ) plt.subplot(121) plt.title( 'The Hill plot of the degree distribution' ) plt.axhline( y = in_ks_min[ in_i_min, 1, 0 ], linewidth = 1, color = 'r' ) plt.axvline( x = in_thresholds[ in_i_min ], linewidth = 1, color = 'r', linestyle = '--' ) plt.loglog( in_thresholds, in_ks_min[:,1,0], "r<-") plt.axhline( y = out_ks_min[ out_i_min, 1, 0 ], linewidth = 1, color = 'b' ) plt.axvline( x = out_thresholds[ out_i_min ], linewidth = 1, color = 'b', linestyle = '--' ) plt.loglog( out_thresholds, out_ks_min[:,1,0], "bo-") plt.ylabel( 'alpha' ) ; plt.ylabel( 'threshold' ) plt.subplot(122) plt.title( 'The KS metric distance' ) plt.axhline( y = in_ks_min[ in_i_min,0, 0 ], linewidth = 1, color = 'r' ) plt.axvline( x = in_thresholds[ in_i_min ], linewidth = 1, color = 'r', linestyle = '--' ) plt.loglog( in_thresholds, in_ks_min[:,0,0], "r<-") plt.axhline( y = out_ks_min[ out_i_min,0, 0 ], linewidth = 1, color = 'b' ) plt.axvline( x = out_thresholds[ out_i_min ], linewidth = 1, color = 'b', linestyle = '--' ) plt.loglog( out_thresholds, out_ks_min[:,0,0], "bo-") plt.ylabel( 'max distance' ) ; plt.ylabel( 'threshold' ) plt.show( ) print "OUT-degree: The Kolmogorov-Smirnov metric yielded %.1f as the optimal threshold and %.3f (%.3f) as 'optimal' exponent\n" % ( out_thresholds[ out_i_min ], out_ks_min[ out_i_min, 1, 0 ], out_ks_min[ out_i_min, 1, 1 ] ) print "IN-degree: The Kolmogorov-Smirnov metric yielded %.1f as the optimal threshold and %.3f (%.3f) as 'optimal' exponent\n" % ( in_thresholds[ in_i_min ], in_ks_min[ in_i_min, 1, 0 ], in_ks_min[ in_i_min, 1, 1 ] ) plt.figure( 3, figsize = ( 10, 5 ) ) plt.subplot(121) plt.title( "Out degree cCDF" ) plt.plot( out_x, out_pwr_ccdf( out_x ), "k-", linewidth = 2 ) plt.plot( out_deg_ccdf[:,0], out_deg_ccdf[:,1], "bo-", linewidth = 2 ) plt.xlabel( "degree" ) ; plt.ylabel( "probability" ) plt.subplot(122) plt.title( "In degree cCDF" ) plt.plot( in_x, in_pwr_ccdf( in_x ), "k-", linewidth = 2 ) plt.plot( in_deg_ccdf[:,0], in_deg_ccdf[:,1], "r<-", linewidth = 2 ) plt.xlabel( "degree" ) ; plt.ylabel( "probability" ) plt.show( ) """ Explanation: The ME plot a very common case, when it is not quite clear whether mean excesses have an upward trand or not. Neglecting the upper thresholds, both distributions behave concictently with a heavy tailed distiribution. However, the tail of a distribution by definition the asymptotic behaviour for increasing threshold. This means that one must needs look at the unstable estimates of the conditional mean at the right end of the threshold range. In the case of the WEB graph the $\hat{M}(u)$ for extremely high thresholds shows clear oscillations around some constant level. This sugests an exponential tail, but still further investigation is required. End of explanation """
aidiary/notebooks
pytorch/180209-dogs-vs-cats.ipynb
mit
mkdir %matplotlib inline """ Explanation: Dogs vs Cats https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html https://www.kaggle.com/c/dogs-vs-cats-redux-kernels-edition http://aidiary.hatenablog.com/entry/20170108/1483876657 http://aidiary.hatenablog.com/entry/20170603/1496493646 End of explanation """ !ls data/ import os current_dir = os.getcwd() data_dir = os.path.join(current_dir, 'data', 'dogscats') train_dir = os.path.join(data_dir, 'train') valid_dir = os.path.join(data_dir, 'valid') test_dir = os.path.join(data_dir, 'test') !mkdir $data_dir !unzip train.zip -d $data_dir !unzip test.zip -d $data_dir !ls -1 $train_dir | wc -l !ls -1 $test_dir | wc -l """ Explanation: データ整形 https://www.kaggle.com/c/dogs-vs-cats-redux-kernels-edition train.zipとtest.zipをカレントディレクトリにダウンロードしておく End of explanation """ !mkdir $valid_dir %cd $train_dir import os from glob import glob import numpy as np g = glob('*.jpg') shuf = np.random.permutation(g) for i in range(2000): os.rename(shuf[i], os.path.join(valid_dir, shuf[i])) !ls -1 $valid_dir | wc -l """ Explanation: 訓練データからランダムに選んだ2000画像をvalidationデータとする End of explanation """ # train %cd $train_dir %mkdir cats dogs %mv cat.*.jpg cats/ %mv dog.*.jpg dogs/ # valid %cd $valid_dir %mkdir cats dogs %mv cat.*.jpg cats/ %mv dog.*.jpg dogs/ # test %cd $test_dir %mkdir unknown %mv *.jpg unknown """ Explanation: PyTorchで読み込みやすいようにクラスごとにサブディレクトリを作成する Kaggleのテストデータは正解ラベルがついていないため unknown というサブディレクトリにいれる End of explanation """ vgg16 = models.vgg16(pretrained=True) vgg16.eval() # eval mode! """ Explanation: VGG16 の出力層のみ置き換える 分類層を除いたネットワークのパラメータを固定する 分類層のパラメータのみ学習対象 End of explanation """ # 全層のパラメータを固定 for param in vgg16.parameters(): param.requires_grad = False vgg16.classifier = nn.Sequential( nn.Linear(25088, 4096), nn.ReLU(inplace=True), nn.Dropout(0.5), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Dropout(0.5), nn.Linear(4096, 2) ) use_gpu = torch.cuda.is_available() if use_gpu: vgg16 = vgg16.cuda() print(vgg16) """ Explanation: 層の置き換え 下のように (classifier) の (6) だけを置き換えることはできないみたい ``` 最後のfc層のみ2クラス分類できるように置き換える num_features = vgg16.classifier[6].in_features vgg16.classifier[6] = nn.Linear(num_features, 2) # <= この代入はできない! ``` classifierをまるごと置き換える必要がある End of explanation """ train_preprocess = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) test_preprocess = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) """ Explanation: VGG用のデータ変換を定義 訓練もテストも (224, 224) にサイズ変更のみ 正方形の画像でないので Resize(224) は動作しない 最初はデータ拡張は使わないで試す End of explanation """ train_dataset = datasets.ImageFolder(train_dir, train_preprocess) valid_dataset = datasets.ImageFolder(valid_dir, test_preprocess) test_dataset = datasets.ImageFolder(test_dir, test_preprocess) # DataSetのlenはサンプル数 print(len(train_dataset)) print(len(valid_dataset)) print(len(test_dataset)) """ Explanation: データをロード End of explanation """ classes = train_dataset.classes print(train_dataset.classes) print(valid_dataset.classes) print(test_dataset.classes) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=128, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=128, shuffle=False) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=128, shuffle=False) # DataLoaderのlenはミニバッチ数 print(len(train_loader)) print(len(valid_loader)) print(len(test_loader)) def imshow(images, title=None): images = images.numpy().transpose((1, 2, 0)) # (h, w, c) # denormalize mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) images = std * images + mean images = np.clip(images, 0, 1) plt.imshow(images) if title is not None: plt.title(title) images, classes = next(iter(train_loader)) print(images.size(), classes.size()) images = torchvision.utils.make_grid(images[:25], nrow=5) imshow(images) """ Explanation: クラスはアルファベット順? End of explanation """ if use_gpu: vgg16 = vgg16.cuda() criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(vgg16.classifier.parameters(), lr=0.001, momentum=0.9) def train(model, criterion, optimizer, train_loader): model.train() running_loss = 0 for batch_idx, (images, labels) in enumerate(train_loader): if use_gpu: images = Variable(images.cuda()) labels = Variable(labels.cuda()) else: images = Variable(images) labels = Variable(labels) optimizer.zero_grad() outputs = model(images) loss = criterion(outputs, labels) running_loss += loss.data[0] loss.backward() optimizer.step() train_loss = running_loss / len(train_loader) return train_loss def valid(model, criterion, valid_loader): model.eval() running_loss = 0 correct = 0 total = 0 for batch_idx, (images, labels) in enumerate(valid_loader): if use_gpu: images = Variable(images.cuda()) labels = Variable(labels.cuda()) else: images = Variable(images) labels = Variable(labels) outputs = model(images) loss = criterion(outputs, labels) running_loss += loss.data[0] _, predicted = torch.max(outputs.data, 1) correct += (predicted == labels.data).sum() total += labels.size(0) val_loss = running_loss / len(valid_loader) val_acc = correct / total return val_loss, val_acc %mkdir logs num_epochs = 5 log_dir = './logs' best_acc = 0 loss_list = [] val_loss_list = [] val_acc_list = [] for epoch in range(num_epochs): loss = train(vgg16, criterion, optimizer, train_loader) val_loss, val_acc = valid(vgg16, criterion, valid_loader) print('epoch %d, loss: %.4f val_loss: %.4f val_acc: %.4f' % (epoch, loss, val_loss, val_acc)) if val_acc > best_acc: print('val_acc improved from %.5f to %.5f!' % (best_acc, val_acc)) best_acc = val_acc model_file = 'epoch%03d-%.3f-%.3f.pth' % (epoch, val_loss, val_acc) torch.save(vgg16.state_dict(), os.path.join(log_dir, model_file)) # logging loss_list.append(loss) val_loss_list.append(val_loss) val_acc_list.append(val_acc) """ Explanation: モデル訓練 optimizerには更新対象のパラメータのみ渡す必要がある! requires_grad = False している vgg16.parameters() を指定するとエラーになる End of explanation """
wavelets/pydata_ninja
PyData Ninja.ipynb
mit
3 * 4 """ Explanation: <center> <h1>Introduction to Data Analysis with Python</h1> <br> <h3>Dr. Thomas Wiecki</h3> <br> <h3>Lead Data Scientist</h3> <img width=40% src="http://i2.wp.com/stuffled.com/wp-content/uploads/2014/09/Quantopian-Logo-EPS-vector-image.png?resize=1020%2C680"> </center> <img src="http://cdn.nutanix.com/wp-content/uploads/2013/09/5530553658_cf0a5dd64d_z.jpg"> Source: http://www.nutanix.com/2013/09/16/the-cup-has-been-flipped/ <center> <h1><strike>Introduction to Data Analysis with Python</strike></h1> <h1>The Path of the PyData Ninja</h1> <br> <h3>Dr. Thomas Wiecki</h3> <br> <h3>Lead Data Scientist</h3> <img width=40% src="http://i2.wp.com/stuffled.com/wp-content/uploads/2014/09/Quantopian-Logo-EPS-vector-image.png?resize=1020%2C680"> </center> About me Lead Data Scientist at Quantopian Inc: Building a crowd sourced hedge fund. PhD from Brown University -- research on computational neuroscience and machine learning using Bayesian modeling. Twitter: @twiecki GitHub: @twiecki Blog: http://twiecki.github.io Developer of PyMC3. <a href="https://quantopian.com"><img width=40% src="http://i2.wp.com/stuffled.com/wp-content/uploads/2014/09/Quantopian-Logo-EPS-vector-image.png?resize=1020%2C680"></a> We back the best investment algorithms with investor capital, trading operations, and technology. Do your research in our hosted IPython environment using stock price history, corporate fundamental data, and other data sets. Write your algorithm in your browser. Then backtest it, for free, over 13 years of minute-level data. When you enter the contest, your algorithm will also be considered for our hedge fund. We're hiring in Düsseldorf: Operations Engineer! Why use Python for data analysis? Python is a general purpose language -> No hodge-podge of perl, bash, matlab, fortran. Very easy to learn. Quality and quantity of data analysis libraries is very high and growing at a rapid pace. What are the alternatives? R: "The best thing about R is that it was written by statisticians. The worst thing about R is that it was written by statisticians." Bow Cogwill Matlab: $$$, not open Jobs! <img src="http://www.indeed.com/trendgraph/jobgraph.png?q=R++and+%28%22big+data%22+or+%22statistical+analysis%22+or+%22data+mining%22+or+%22data+analytics%22+or+%22machine+learning%22+or+%22quantitative+analysis%22+or+%22business+analytics%22+or+%22statistical+software%22+or+%22predictive+modeling%22%29+%21%22R+D%22+%21%22A+R%22+%21%22H+R%22+%21%22R+N%22++%21toys+%21kids+%21%22+R+Walgreen%22+%21walmart+%21%22HVAC+R%22+%21%22R+Bard%22++%2C+python+and+%28%22big+data%22+or+%22statistical+analysis%22+or+%22data+mining%22+or+%22data+analytics%22+or+%22machine+learning%22+or+%22quantitative+analysis%22+or+%22business+analytics%22+or+%22statistical+software%22+or+%22predictive+modeling%22%29"> <center> <h2>The PyData Stack</h2> Source: Jake VanderPlas: State of the Tools <center><img src='pydata_stack-0.jpg' width=50%></center> <center> <h2>The PyData Stack</h2> <center><img src='pydata_stack-1.jpg' width=50%></center> <center> <h2>The PyData Stack</h2> <center><img src='pydata_stack-2.jpg' width=50%></center> <center> <h2>The PyData Stack</h2> <center><img src='pydata_stack-3.jpg' width=50%></center> <center> <h2>The PyData Stack</h2> <center><img src='pydata_stack-4.jpg' width=50%></center> Level 0: n00b <img src="beginner.png"> How to get started Start by installing the Anaconda Python distribution (use Python 3.4) Install the jupyter notebook (former IPython) Do a basic Python tutorial to get a handle on the syntax, e.g. Learn Python the Hard Way Python basics Interpreted and interactive End of explanation """ x = [1, 2, 3] print(x) x.append(4) print(x) """ Explanation: Lists End of explanation """ measurements = {'height': [1.70, 1.80, 1.50], 'weight': [60, 120, 50]} measurements measurements['height'] """ Explanation: Dictionaries End of explanation """ x = [1, 2, 3, 4] [i**2 for i in x] def calc_bmi(weight, height): return weight / height**2 [calc_bmi(w, h) for w, h in zip(measurements['weight'], measurements['height'])] """ Explanation: Comprehensions End of explanation """ import pandas as pd import numpy as np s = pd.Series([1,3,5,np.nan,6,8]) s dates = pd.date_range('20130101', periods=6) df = pd.DataFrame(np.random.randn(6,4), index=dates, columns=list('ABCD')) df df[df.A > 0] df.mean() df.mean(axis='columns') """ Explanation: Level 1: "The Pandas Wrangler" <img src="amateur.png"> How to become a "Pandas Wrangler" Learn Pandas (data wrangling): http://pandas.pydata.org/pandas-docs/stable/tutorials.html Learn Seaborn (data visualization): http://stanford.edu/~mwaskom/software/seaborn/ Why not start with NumPy and Matplotlib? These libraries have become core libraries. Better results can be achieved starting with Pandas and Seaborn. For more motivation, see http://twiecki.github.io/blog/2014/11/18/python-for-data-science/ Pandas End of explanation """ df2 = pd.DataFrame({ 'A' : 1., 'B' : pd.Timestamp('20130102'), 'C' : pd.Series(1,index=list(range(4)),dtype='float32'), 'D' : np.array([3] * 4,dtype='int32'), 'E' : pd.Categorical(["test","train","test","train"]), 'F' : 'foo' }) df2 df2.dtypes """ Explanation: Mixed types End of explanation """ df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'], 'B' : ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'], 'C' : np.random.randn(8), 'D' : np.random.randn(8)}) df df.groupby('A').sum() df.groupby(['A', 'B']).sum() """ Explanation: Grouping End of explanation """ %matplotlib inline import seaborn as sns x = np.random.normal(size=100) sns.distplot(x); """ Explanation: Seaborn: Generating statistical plots End of explanation """ mean, cov = [0, 1], [(1, .5), (.5, 1)] data = np.random.multivariate_normal(mean, cov, 200) df = pd.DataFrame(data, columns=["x", "y"]) df sns.jointplot(x="x", y="y", data=df, kind="kde"); """ Explanation: 2D distributions End of explanation """ iris = sns.load_dataset("iris") sns.pairplot(iris); """ Explanation: All pairwise combinations End of explanation """ tips = sns.load_dataset("tips") tips.head() sns.lmplot(x="total_bill", y="tip", hue="smoker", data=tips); sns.lmplot(x="total_bill", y="tip", col="day", data=tips, col_wrap=2, size=3); sns.factorplot(x="time", y="total_bill", hue="smoker", col="day", data=tips, kind="box", size=4, aspect=.5); """ Explanation: Seaborn: Regressions End of explanation """ from sklearn import svm X = [[0, 0], [1, 1]] y = [0, 1] clf = svm.SVC() clf.fit(X, y) clf.predict([[0, .5]]) """ Explanation: Level 2: "The Kaggle top scorer" <img src="semi-pro.png"> Lots of machine learning and stats libraries SciPy: comprehensive library of numerical routines like optimizers, integrators, FFT. scikit-learn: The ML library out there statsmodels: Frequentist statistics SymPy: Symbolic Math PyMC3: Probabilistic programming in Python scikit-learn Taken from http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html End of explanation """ from sklearn import datasets from sklearn.cross_validation import train_test_split from sklearn.grid_search import GridSearchCV from sklearn.metrics import confusion_matrix from sklearn.svm import SVC digits = datasets.load_digits() import matplotlib.pyplot as plt #Display the first digit plt.figure(1, figsize=(3, 3)) plt.imshow(digits.images[-1], cmap=plt.cm.gray_r, interpolation='nearest') plt.grid('off') n_samples = len(digits.images) X = digits.images.reshape((n_samples, -1)) y = digits.target # Split the dataset in two equal parts X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.5, random_state=0) # Set the parameters by cross-validation tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4], 'C': [1, 10, 100, 1000]}, {'kernel': ['linear'], 'C': [1, 10, 100, 1000]}] clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5) clf.fit(X_train, y_train) print(clf.best_params_) y_true, y_pred = y_test, clf.predict(X_test) ax = sns.heatmap(confusion_matrix(y_true, y_pred)) ax.set(xlabel='true label', ylabel='predicted label'); """ Explanation: Advanced example: Grid Search with Cross-Validation to find hyper parameters Taken from http://scikit-learn.org/stable/auto_examples/grid_search_digits.html and http://scikit-learn.org/stable/auto_examples/datasets/plot_digits_last_image.html End of explanation """ import numpy as np X = np.random.random((1000, 3)) def pairwise_python(X): M = X.shape[0] N = X.shape[1] D = np.empty((M, M), dtype=np.float) for i in range(M): for j in range(M): d = 0.0 for k in range(N): tmp = X[i, k] - X[j, k] d += tmp * tmp D[i, j] = np.sqrt(d) return D %timeit pairwise_python(X) """ Explanation: Level 3: "Lord of Speed" <img src="pro.png"> Python is slow! The interpreted language is indeed quite slow (just like matlab and R are slow) Vectorize computations (i.e. the matlab way): leads to unreadable code. Great tools to generate C-code Cython: Write Python-like syntax that can be translated to fast C-code and called from Python. Numba: Directly write Python and auto-translate to LLVM. Theano: Write numerical expressions in a NumPy-like syntax to build up compute-graph that can be compiled. PyCUDA: GPU programming. Comparing Python, Cython and Numba Taken from https://jakevdp.github.io/blog/2013/06/15/numba-vs-cython-take-2/ End of explanation """ %load_ext cython %%cython import numpy as np cimport cython from libc.math cimport sqrt @cython.boundscheck(False) @cython.wraparound(False) def pairwise_cython(double[:, ::1] X): cdef int M = X.shape[0] cdef int N = X.shape[1] cdef double tmp, d cdef double[:, ::1] D = np.empty((M, M), dtype=np.float64) for i in range(M): for j in range(M): d = 0.0 for k in range(N): tmp = X[i, k] - X[j, k] d += tmp * tmp D[i, j] = sqrt(d) return np.asarray(D) %timeit pairwise_cython(X) """ Explanation: Cython End of explanation """ from numba.decorators import jit pairwise_numba = jit(pairwise_python) # Run once to compile before timing pairwise_numba(X) %timeit pairwise_numba(X) """ Explanation: Numba End of explanation """ !ls -lahL POIWorld.csv from dask import dataframe as dd columns = ["name", "amenity", "Longitude", "Latitude"] data = dd.read_csv('POIWorld.csv', usecols=columns) data with_name = data[data.name.notnull()] is_starbucks = with_name.name.str.contains('[Ss]tarbucks') is_dunkin = with_name.name.str.contains('[Dd]unkin') starbucks = with_name[is_starbucks] dunkin = with_name[is_dunkin] from dask.diagnostics import ProgressBar with ProgressBar(): starbucks_count, dunkin_count = dd.compute(starbucks.name.count(), dunkin.name.count()) starbucks_count, dunkin_count locs = dd.compute(starbucks.Longitude, starbucks.Latitude, dunkin.Longitude, dunkin.Latitude) # extract arrays of values fro the series: lon_s, lat_s, lon_d, lat_d = [loc.values for loc in locs] import matplotlib.pyplot as plt from mpl_toolkits.basemap import Basemap def draw_USA(): """initialize a basemap centered on the continental USA""" plt.figure(figsize=(14, 10)) return Basemap(projection='lcc', resolution='l', llcrnrlon=-119, urcrnrlon=-64, llcrnrlat=22, urcrnrlat=49, lat_1=33, lat_2=45, lon_0=-95, area_thresh=10000) m = draw_USA() # Draw map background m.fillcontinents(color='white', lake_color='#eeeeee') m.drawstates(color='lightgray') m.drawcoastlines(color='lightgray') m.drawcountries(color='lightgray') m.drawmapboundary(fill_color='#eeeeee') # Plot the values in Starbucks Green and Dunkin Donuts Orange style = dict(s=5, marker='o', alpha=0.5, zorder=2) m.scatter(lon_s, lat_s, latlon=True, label="Starbucks", color='#00592D', **style) m.scatter(lon_d, lat_d, latlon=True, label="Dunkin' Donuts", color='#FC772A', **style) plt.legend(loc='lower left', frameon=False); """ Explanation: Level 4: "High Priest of Big Data" <img src="master.png"> Lots of things happening! Big Data Blaze + Dask Ibis PySpark bcolz Interactive data visualization Bokeh Plotly pyxley Work interactively on Big Data with Dask Taken from https://jakevdp.github.io/blog/2015/08/14/out-of-core-dataframes-in-python/ End of explanation """ from bokeh.io import output_notebook from bokeh.resources import CDN from bokeh.plotting import figure, show output_notebook(resources=CDN) from __future__ import print_function from math import pi from bokeh.browserlib import view from bokeh.document import Document from bokeh.embed import file_html from bokeh.models.glyphs import Circle, Text from bokeh.models import ( BasicTicker, ColumnDataSource, Grid, GridPlot, LinearAxis, DataRange1d, PanTool, Plot, WheelZoomTool ) from bokeh.resources import INLINE from bokeh.sampledata.iris import flowers from bokeh.plotting import show colormap = {'setosa': 'red', 'versicolor': 'green', 'virginica': 'blue'} flowers['color'] = flowers['species'].map(lambda x: colormap[x]) source = ColumnDataSource( data=dict( petal_length=flowers['petal_length'], petal_width=flowers['petal_width'], sepal_length=flowers['sepal_length'], sepal_width=flowers['sepal_width'], color=flowers['color'] ) ) text_source = ColumnDataSource( data=dict(xcenter=[125], ycenter=[135]) ) xdr = DataRange1d() ydr = DataRange1d() def make_plot(xname, yname, xax=False, yax=False, text=None): plot = Plot( x_range=xdr, y_range=ydr, background_fill="#efe8e2", border_fill='white', title="", min_border=2, h_symmetry=False, v_symmetry=False, plot_width=150, plot_height=150) circle = Circle(x=xname, y=yname, fill_color="color", fill_alpha=0.2, size=4, line_color="color") r = plot.add_glyph(source, circle) xdr.renderers.append(r) ydr.renderers.append(r) xticker = BasicTicker() if xax: xaxis = LinearAxis() plot.add_layout(xaxis, 'below') xticker = xaxis.ticker plot.add_layout(Grid(dimension=0, ticker=xticker)) yticker = BasicTicker() if yax: yaxis = LinearAxis() plot.add_layout(yaxis, 'left') yticker = yaxis.ticker plot.add_layout(Grid(dimension=1, ticker=yticker)) plot.add_tools(PanTool(), WheelZoomTool()) if text: text = " ".join(text.split('_')) text = Text( x={'field':'xcenter', 'units':'screen'}, y={'field':'ycenter', 'units':'screen'}, text=[text], angle=pi/4, text_font_style="bold", text_baseline="top", text_color="#ffaaaa", text_alpha=0.7, text_align="center", text_font_size="28pt" ) plot.add_glyph(text_source, text) return plot xattrs = ["petal_length", "petal_width", "sepal_width", "sepal_length"] yattrs = list(reversed(xattrs)) plots = [] for y in yattrs: row = [] for x in xattrs: xax = (y == yattrs[-1]) yax = (x == xattrs[0]) text = x if (x==y) else None plot = make_plot(x, y, xax, yax, text) row.append(plot) plots.append(row) grid = GridPlot(children=plots, title="iris_splom") show(grid) """ Explanation: Interactive data visualization with Bokeh End of explanation """
jegibbs/phys202-2015-work
assignments/assignment03/NumpyEx01.ipynb
mit
import numpy as np %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns import antipackage import github.ellisonbg.misc.vizarray as va """ Explanation: Numpy Exercise 1 Imports End of explanation """ def checkerboard(size): a = np.zeros((size,size)) a[0::2,::2] = 1 a[1::2,1::2] = 1 return a a = checkerboard(4) assert a[0,0]==1.0 assert a.sum()==8.0 assert a.dtype==np.dtype(float) assert np.all(a[0,0:5:2]==1.0) assert np.all(a[1,0:5:2]==0.0) b = checkerboard(5) assert b[0,0]==1.0 assert b.sum()==13.0 assert np.all(b.ravel()[0:26:2]==1.0) assert np.all(b.ravel()[1:25:2]==0.0) """ Explanation: Checkerboard Write a Python function that creates a square (size,size) 2d Numpy array with the values 0.0 and 1.0: Your function should work for both odd and even size. The 0,0 element should be 1.0. The dtype should be float. End of explanation """ va.set_block_size(10) va.enable() va.vizarray(checkerboard(20)) assert True """ Explanation: Use vizarray to visualize a checkerboard of size=20 with a block size of 10px. End of explanation """ va.set_block_size(5) va.vizarray(checkerboard(27)) assert True """ Explanation: Use vizarray to visualize a checkerboard of size=27 with a block size of 5px. End of explanation """
jtwhite79/pyemu
examples/errvarexample_freyberg.ipynb
bsd-3-clause
import flopy # load the model model_ws = os.path.join("Freyberg","extra_crispy") ml = flopy.modflow.Modflow.load("freyberg.nam",model_ws=model_ws) # Because this model is old -- it predates flopy's modelgrid implementation. # And because modelgrid has been implemented without backward compatability # the modelgrid object is not constructed properly. # - We will use some sneaky pyemu to get things to how they should be import pyemu sr = pyemu.helpers.SpatialReference.from_namfile( os.path.join(model_ws, ml.namefile), delc=ml.dis.delc, delr=ml.dis.delr ) ml.modelgrid.set_coord_info( xoff=sr.xll, yoff=sr.yll, angrot=sr.rotation, proj4=sr.proj4_str, merge_coord_info=True, ) # plot some model attributes fig = plt.figure(figsize=(10,10)) ax = plt.subplot(111,aspect="equal") ml.upw.hk.plot(axes=ax,colorbar="K m/d",alpha=0.3) ml.wel.plot(axes=ax) # flopy possibly now only plots BCs in black ml.riv.plot(axes=ax) # plot obs locations obs = pd.read_csv(os.path.join("Freyberg","misc","obs_rowcol.dat"), delim_whitespace=True) obs_x = [ml.modelgrid.xcellcenters[r-1,c-1] for r,c in obs.loc[:,["row","col"]].values] obs_y = [ml.modelgrid.ycellcenters[r-1,c-1] for r,c in obs.loc[:,["row","col"]].values] ax.scatter(obs_x,obs_y,marker='.',label="obs" ) #plot names on the pumping well locations wel_data = ml.wel.stress_period_data[0] wel_x = ml.modelgrid.xcellcenters[wel_data["i"],wel_data["j"]] wel_y = ml.modelgrid.ycellcenters[wel_data["i"],wel_data["j"]] for i,(x,y) in enumerate(zip(wel_x,wel_y)): ax.text(x,y,"{0} ".format(i+1),ha="right",va="center", font=dict(size=15), color='r') ax.set_ylabel("y") ax.set_xlabel("x") ax.add_patch(rect((0,0),0,0,label="well",ec="none",fc="r")) ax.add_patch(rect((0,0),0,0,label="river",ec="none",fc="g")) ax.legend(bbox_to_anchor=(1.5,1.0),frameon=False) plt.savefig("domain.pdf") """ Explanation: Model background Here is an example based on the model of Freyberg, 1988. The synthetic model is a 2-dimensional MODFLOW model with 1 layer, 40 rows, and 20 columns. The model has 2 stress periods: an initial steady-state stress period used for calibration, and a 5-year transient stress period. The calibration period uses the recharge and well flux of Freyberg, 1988; the last stress period use 25% less recharge and 25% more pumping. The inverse problem has 761 parameters: hydraulic conductivity of each active model cell, calibration and forecast period recharge multipliers, storage and specific yield, calibration and forecast well flux for each of the six wells, and river bed conductance for each 40 cells with river-type boundary conditions. The inverse problem has 12 head obseravtions, measured at the end of the steady-state calibration period. The forecasts of interest include the sw-gw exchange flux during both stress periods (observations named sw_gw_0 and sw_gw_1), and the water level in well cell 6 located in at row 28 column 5 at the end of the stress periods (observations named or28c05_0 and or28c05_1). The forecasts are included in the Jacobian matrix as zero-weight observations. The model files, pest control file and previously-calculated jacobian matrix are in the freyberg/ folder Freyberg, David L. "AN EXERCISE IN GROUND‐WATER MODEL CALIBRATION AND PREDICTION." Groundwater 26.3 (1988): 350-360. End of explanation """ import pyemu """ Explanation: Using pyemu End of explanation """ # get the list of forecast names from the pest++ argument # in the pest control file jco = os.path.join("Freyberg","freyberg.jcb") pst = pyemu.Pst(jco.replace("jcb","pst")) omitted = [pname for pname in pst.par_names if \ pname.startswith("wf") or pname.startswith("rch")] forecasts = pst.pestpp_options["forecasts"].split(',') la = pyemu.ErrVar(jco=jco,verbose="errvar_freyberg.log", omitted_parameters=omitted) print("observations, parameters found in jacobian:",la.jco.shape) """ Explanation: First create a linear_analysis object. We will use ErrVar derived type, which replicates the behavior of the PREDVAR suite of PEST as well as ident_par utility. We pass it the name of the jacobian matrix file. Since we don't pass an explicit argument for parcov or obscov, pyemu attempts to build them from the parameter bounds and observation weights in a pest control file (.pst) with the same base case name as the jacobian. Since we are interested in forecast uncertainty as well as parameter uncertainty, we also pass the names of the forecast sensitivity vectors we are interested in, which are stored in the jacobian as well. Note that the forecasts argument can be a mixed list of observation names, other jacobian files or PEST-compatible ASCII matrix files. Remember you can pass a filename to the verbose argument to write log file. Since most groundwater model history-matching analyses focus on adjusting hetergeneous hydraulic properties and not boundary condition elements, let's identify the well flux and recharge parameters as omitted in the error variance analysis. We can conceptually think of this action as excluding these parameters from the history-matching process. Later we will explicitly calculate the penalty for not adjusting these parameters. End of explanation """ s = la.qhalfx.s import pylab as plt figure = plt.figure(figsize=(10, 5)) ax = plt.subplot(111) ax.plot(s.x) ax.set_title("singular spectrum") ax.set_ylabel("power") ax.set_xlabel("singular value") ax.set_xlim(0,20) plt.show() """ Explanation: Parameter identifiability The errvar dervied type exposes a method to get a pandas dataframe of parameter identifiability information. Recall that parameter identifiability is expressed as $d_i = \Sigma(\mathbf{V}_{1i})^2$, where $d_i$ is the parameter identifiability, which ranges from 0 (not identified by the data) to 1 (full identified by the data), and $\mathbf{V}_1$ are the right singular vectors corresonding to non-(numerically) zero singular values. First let's look at the singular spectrum of $\mathbf{Q}^{\frac{1}{2}}\mathbf{J}$, where $\mathbf{Q}$ is the cofactor matrix and $\mathbf{J}$ is the jacobian: End of explanation """ # the method is passed the number of singular vectors to include in V_1 ident_df = la.get_identifiability_dataframe(12) ident_df.sort_values(by="ident",ascending=False).iloc[0:10] """ Explanation: We see that the singluar spectrum decays rapidly (not uncommon) and that we can really only support about 12 right singular vectors even though we have 700+ parameters in the inverse problem. Let's get the identifiability dataframe at 12 singular vectors: End of explanation """ ax = ident_df.sort_values(by="ident",ascending=False).iloc[0:20].\ loc[:,"ident"].plot(kind="bar",figsize=(10,10)) ax.set_ylabel("identifiability") """ Explanation: Plot the indentifiability: End of explanation """ sing_vals = np.arange(13) """ Explanation: Forecast error variance Now let's explore the error variance of the forecasts we are interested in. We will use an extended version of the forecast error variance equation: $\sigma_{s - \hat{s}}^2 = \underbrace{\textbf{y}i^T({\bf{I}} - {\textbf{R}})\boldsymbol{\Sigma}{{\boldsymbol{\theta}}i}({\textbf{I}} - {\textbf{R}})^T\textbf{y}_i}{1} + \underbrace{{\textbf{y}}i^T{\bf{G}}\boldsymbol{\Sigma}{\mathbf{\epsilon}}{\textbf{G}}^T{\textbf{y}}i}{2} + \underbrace{{\bf{p}}\boldsymbol{\Sigma}{{\boldsymbol{\theta}}_o}{\bf{p}}^T}{3}$ Where term 1 is the null-space contribution, term 2 is the solution space contribution and term 3 is the model error term (the penalty for not adjusting uncertain parameters). Remember the well flux and recharge parameters that we marked as omitted? The consequences of that action can now be explicitly evaluated. See Moore and Doherty (2005) and White and other (2014) for more explanation of these terms. Note that if you don't have any omitted_parameters, the only terms 1 and 2 contribute to error variance First we need to create a list (or numpy ndarray) of the singular values we want to test. Since we have 12 data, we only need to test up to $13$ singular values because that is where the action is: End of explanation """ errvar_df = la.get_errvar_dataframe(sing_vals) errvar_df.iloc[0:10] errvar_df[["first"]].to_latex("sw_gw_0.tex") """ Explanation: The ErrVar derived type exposes a method to get a multi-index pandas dataframe with each of the terms of the error variance equation: End of explanation """ colors = {"first": 'g', "second": 'b', "third": 'c'} max_idx = 19 idx = sing_vals[:max_idx] for ipred, pred in enumerate(forecasts): pred = pred.lower() fig = plt.figure(figsize=(10, 10)) ax = plt.subplot(111) ax.set_title(pred) first = errvar_df[("first", pred)][:max_idx] second = errvar_df[("second", pred)][:max_idx] third = errvar_df[("third", pred)][:max_idx] ax.bar(idx, first, width=1.0, edgecolor="none", facecolor=colors["first"], label="first",bottom=0.0) ax.bar(idx, second, width=1.0, edgecolor="none", facecolor=colors["second"], label="second", bottom=first) ax.bar(idx, third, width=1.0, edgecolor="none", facecolor=colors["third"], label="third", bottom=second+first) ax.set_xlim(-1,max_idx+1) ax.set_xticks(idx+0.5) ax.set_xticklabels(idx) #if ipred == 2: ax.set_xlabel("singular value") ax.set_ylabel("error variance") ax.legend(loc="upper right") plt.show() """ Explanation: plot the error variance components for each forecast: End of explanation """ schur = la.get(astype=pyemu.Schur) schur_prior = schur.prior_forecast schur_post = schur.posterior_forecast print("{0:10s} {1:>12s} {2:>12s} {3:>12s} {4:>12s}" .format("forecast","errvar prior","errvar min", "schur prior", "schur post")) for ipred, pred in enumerate(forecasts): first = errvar_df[("first", pred)][:max_idx] second = errvar_df[("second", pred)][:max_idx] min_ev = np.min(first + second) prior_ev = first[0] + second[0] prior_sh = schur_prior[pred] post_sh = schur_post[pred] print("{0:12s} {1:12.6f} {2:12.6f} {3:12.6} {4:12.6f}" .format(pred,prior_ev,min_ev,prior_sh,post_sh)) """ Explanation: Here we see the trade off between getting a good fit to push down the null-space (1st) term and the penalty for overfitting (the rise of the solution space (2nd) term)). The sum of the first two terms in the "appearent" error variance (e.g. the uncertainty that standard analyses would yield) without considering the contribution from the omitted parameters. You can verify this by checking prior uncertainty from the Schur's complement notebook against the zero singular value result using only terms 1 and 2. Note that the top of the green bar is the limit of traditional uncertainty/error variance analysis: accounting for parameter and observation We also see the added penalty for not adjusting the well flux and recharge parameters. For the water level at the end of the calibration period forecast (or28c05_0), the fact the we have left parameters out doesn't matter - the parameter compensation associated with fixing uncertain model inputs can be "calibrated out" beyond 2 singular values. For the water level forecast during forecast period (or28c05_1), the penalty for fixed parameters persists -it s nearly constant over the range of singular values. For sw_gw_0, the situation is much worse: not only are we greatly underestimating uncertainty by omitting parameters, worse, calibration increases the uncertainty for this forecast because the adjustable parametres are compensating for the omitted, uncertaint parameters in ways that are damanaging to the forecast. For the forecast period sw-gw exchange (sw_gw_1), calibration doesn't help or hurt - this forecast depend entirely on null space parameter components. But treating the recharge and well pumpage as "fixed" (omitted) results in greatly underestimated uncertainty. Let's check the errvar results against the results from schur. This is simple with pyemu, we simply cast the errvar type to a schur type: End of explanation """
qkitgroup/qkit
qkit/doc/notebooks/spectroscopy_measurement_basics.ipynb
gpl-2.0
# start qkit and import the needed modules. we here assume an already configured qkit measurement environment import qkit qkit.start() from qkit.measure.spectroscopy import spectroscopy import qkit.measure.samples_class as sc import numpy as np # initialize instruments; as an example we here work with a Keysight VNA, a Yokogawa current source, # and an Anritsu MW source vna = qkit.instruments.create('vna', 'Keysight_VNA_E5071C', address='TCPIP::XXX.XXX.XXX.XXX') yoko = qkit.instruments.create('yoko', 'Yokogawa_GS820', address='TCPIP::XXX.XXX.XXX.XXX') mw_src = qkit.instruments.create('mw_src', 'Anritsu_MG37022', address='TCPIP::XXX.XXX.XXX.XXX') # create/load sample object; (optional), for more information see the example notebook on the sample class. sample_filepath = r'\some\path\sample_1.sample' smpl = sc.Sample(sample_filepath) """ Explanation: Basic spectroscopy measurements This notebook is indended as a starting point into spectroscopic (frequency domain) measurements with qkit. We focus on measurements with vector network analyzers (VNAs) in the GHz regime. The notebook briefly covers the qkit startup, initialization of measurement devices and the use of a sample object. For a detailed view on these topic you will find more information in their respective example notebooks. After creating of an object of the spectrum class and parsing the device information to the object we can start measuring. The spectrum class offers frequency domain VNA measurements with up to two additional external sweep parameters. The data is stored in a hdf5 file and can be view and further analyzed. End of explanation """ s = spectroscopy.spectrum(vna=vna, sample = smpl) """ Explanation: Creating a spectrum object. The init takes<br> * vna: instrument object (mandatory)<br> * exp_name: string (optional) for a brief name of the experiment<br> * sample: sample object (optional) End of explanation """ vna.set_centerfreq(smpl.fr) vna.set_span(200e6) vna.set_averages(10) vna.set_Average(True) """ Explanation: The general VNA parameters (probing frequencies, power, etc.) can be either set at the device or use the qkit instrument command. Using sample object attributes is possible as well. End of explanation """ comment = """ * -30dB attenuator @ VNA """ s.comment = comment s.measure_1D() """ Explanation: Next we want to record the VNA trace. In the spectrum object the parameter comment can be used for all non-digital information. Any digitally available instrument settings are saved automatically. End of explanation """ comment = """ * measure resonance in reflection * -30dB attenuator @ VNA """ s.set_resonator_fit(fit_function='circle_fit_reflection') s.comment = comment s.measure_1D() """ Explanation: For resonator measurements it is also possible to fit the probed resonance live while measureing. For the fits to converge the VNA parameters need to adjusted properly. End of explanation """ comment = """ * resonator vs applied current * -30dB step attenuator @ VNA """ s.comment = comment s.set_resonator_fit(fit_resonator=False) # x_func gets called for every value of i. Here it would be not necessary to define a function for only the ramp fct, # it will be useful in the next example. def x_func(i): return yoko.ramp_current(i, 1e-3) s.set_x_parameters(x_vec = np.arange(0, 1, 1e-3), x_coordname = 'current', x_set_obj = x_func, x_unit = 'A') s.measure_2D() yoko.ramp_current(0, 1e-3) """ Explanation: A sweep parameter can be added to the spectum object and attributed to a sweep axis. In the example below we measure a resonator agains a applied current (ie for creating a magnetic field bias). The current is swept from 0 to 1A in 1mA steps. After the measurement the current is ramped down again. For one value of the x-parameter all VNA frequencies are probed before the next value of x is set. End of explanation """ # the x/y-loops are interleaved, y changes "faster" than x: # 1) each VNA freq at y0 and x0 # 2) each VNA freq at y1 and x0 # ... # 3) each VNA freq at yN and x0 # 4) each VNA freq at y0 and x1 # ... # 5) each VNA freq at yN and xM comment = """ * resonator vs applied current at different power levels * -30dB step attenuator @ VNA """ s.comment = comment s.set_resonator_fit(fit_resonator=False) def x_func(i): return yoko.ramp_current(i, 1e-3) # Here the called function features more commends, i.e. change the number of averages at different powers to shorten the # measurement time. def y_func(i): if i < -25: vna.set_averages(10) else: vna.set_averages(5) return vna.set_power(i) s.set_x_parameters(x_vec = np.arange(0, 1.001, 1e-3), x_coordname = 'current', x_set_obj = x_func, x_unit = 'A') s.set_y_parameters(y_vec = np.arange(-35, 11, 5), y_coordname = 'power', y_set_obj = y_func, y_unit = 'dBm') s.measure_3D() yoko.ramp_current(0, 1e-3) """ Explanation: In the next example the VNA power is changed in addition to the current. Depending on the applied power, the number of averages is changed. End of explanation """
mne-tools/mne-tools.github.io
stable/_downloads/98d9662291626be9c938eee7a8fcc9bd/sensor_noise_level.ipynb
bsd-3-clause
# Author: Eric Larson <larson.eric.d@gmail.com> # # License: BSD-3-Clause import os.path as op import mne data_path = mne.datasets.sample.data_path() raw_erm = mne.io.read_raw_fif(op.join(data_path, 'MEG', 'sample', 'ernoise_raw.fif'), preload=True) """ Explanation: Show noise levels from empty room data This shows how to use :meth:mne.io.Raw.plot_psd to examine noise levels of systems. See :footcite:KhanCohen2013 for an example. End of explanation """ raw_erm.plot_psd(tmax=10., average=True, spatial_colors=False, dB=False, xscale='log') """ Explanation: We can plot the absolute noise levels: End of explanation """
karlstroetmann/Artificial-Intelligence
Python/5 Linear Regression/Corona.ipynb
gpl-2.0
num_cases = [16, 19, 24, 53, 66, 117, 150, 188, 240, 349, 534, 684, 847, 1112, 1565, 1966, 2745, 3675] """ Explanation: Predicting the Spread of Covid-19 with Linear Regression The array num_cases contains the number of cases on successive days in the time period from February, the 25th up to the 13th of March 2020, i.e. num_cases[0] is the number of Covid-19 cases on the 25th of February and num_cases[i]is the number of Covid-19 cases i days after the 25th of February. I have taken these data form the website <a href="http://interaktiv.morgenpost.de/corona-virus-karte-infektionen-deutschland-weltweit/">http://interaktiv.morgenpost.de/corona-virus-karte-infektionen-deutschland-weltweit/</a>. End of explanation """ m = len(num_cases) m """ Explanation: m is the number of data points. End of explanation """ X = range(m) list(X) """ Explanation: We take the number of days after the 25th of February as x-values. End of explanation """ import numpy as np import matplotlib.pyplot as plt import seaborn as sns plt.figure(figsize=(12, 10)) sns.set(style='darkgrid') plt.xticks(ticks=X) plt.scatter(X, num_cases, c='r') plt.xlabel('date in number of days since the 25th of Febuary') plt.ylabel('number of Covid-19 cases') plt.title('Number of Covid-19 cases versus date.') """ Explanation: To begin with, we plot the data. To this end we have to load some libraries. End of explanation """ import math Y = [math.log(y) for y in num_cases] plt.figure(figsize=(12, 10)) sns.set(style='whitegrid') plt.xticks(ticks=X) plt.scatter(X, Y, c='r') plt.xlabel('date in number of days since the 25th of Febuary') plt.ylabel('log of the number of Covid-19 cases') plt.title('Log of the Number of Covid-19 cases versus date.') """ Explanation: This does not look exactly linear. Rather, it might be exponential. Hence, we take the logarithm of the number of cases and plot these logarithms with respect to the date. End of explanation """ X = np.array(X[5:]) Y = np.array(Y[5:]) X """ Explanation: This looks better. There seem to be a jump at x = 3 and x = 5. These correspond to the date of Friday, the 28th of February and Sunday, the 1st of March. These dates coincide with the German Carnival in 2020. This period might have enhanced the normal spreading of the disease. Therefore, let us just take the data starting from the 1st of March. To ease the computation, we transform our data into numpy vectors. Note that we throw away the first 5 data. End of explanation """ xMean = np.mean(X) xMean """ Explanation: We compute the average value of X according to the formula: $$ \bar{\mathbf{x}} = \frac{1}{m} \cdot \sum\limits_{i=1}^m x_i $$ End of explanation """ yMean = np.mean(Y) yMean """ Explanation: We compute the average number of the logarithm of Covid-19 cases according to the formula: $$ \bar{\mathbf{y}} = \frac{1}{m} \cdot \sum\limits_{i=1}^m y_i $$ End of explanation """ ϑ1 = np.sum((X - xMean) * (Y - yMean)) / np.sum((X - xMean) ** 2) ϑ1 """ Explanation: The coefficient $\vartheta_1$ is computed according to the formula: $$ \vartheta_1 = \frac{\sum\limits_{i=1}^m \bigl(x_i - \bar{\mathbf{x}}\bigr) \cdot \bigl(y_i - \bar{\mathbf{y}}\bigr)}{ \sum\limits_{i=1}^m \bigl(x_i - \bar{\mathbf{x}}\bigr)^2} $$ End of explanation """ ϑ0 = yMean - ϑ1 * xMean ϑ0 """ Explanation: The coefficient $\vartheta_0$ is computed according to the formula: $$ \vartheta_0 = \bar{\mathbf{y}} - \vartheta_1 \cdot \bar{\mathbf{x}} $$ End of explanation """ xMax = max(X) + 0.2 plt.figure(figsize=(12, 10)) sns.set(style='darkgrid') plt.xticks(ticks=X) plt.scatter(X, Y, c='r') plt.plot([0, xMax], [ϑ0, ϑ0 + ϑ1 * xMax], c='b') plt.xlabel('date in number of days since the 1st of March') plt.ylabel('log of the number of Covid-19 cases') plt.title('Log of the Number of Covid-19 cases versus date.') """ Explanation: Let us plot the line $y(x) = ϑ0 + ϑ1 \cdot x$ together with our data: End of explanation """ TSS = np.sum((Y - yMean) ** 2) """ Explanation: The blue line is not too far of the data points. In order to judge the quality of our model we compute both the <em style="color:blue">total sum of squares</em> and the <em style="color:blue">residual sum of squares</em>. End of explanation """ RSS = np.sum((ϑ1 * X + ϑ0 - Y) ** 2) """ Explanation: Next, we compute the residual sum of squares RSS as follows: $$ \mathtt{RSS} := \sum\limits_{i=1}^m \bigl(\vartheta_1 \cdot x_i + \vartheta_0 - y_i\bigr)^2 $$ End of explanation """ R2 = 1 - RSS/TSS R2 """ Explanation: Now $R^2$ is calculated via the formula: $$ R^2 = 1 - \frac{\mathtt{RSS}}{\mathtt{TSS}}$$ End of explanation """ round(math.exp(ϑ1 * 20 + ϑ0)) """ Explanation: It seems that our model is a good approximation of the data. Finally, let us make a prediction for the 16th of March. The logarithm of the number of cases $n$ on the 16th of March which is $20$ days after the 25th of February is predicted according to the formula: $$ \ln(n) = \vartheta_1 \cdot 20 + \vartheta_0 $$ Hence, the number of cases $n$ for that day is predicted as $$ n = \exp\bigl(\vartheta_1 \cdot 20 + \vartheta_0\bigr). $$ End of explanation """
mjcollin/ml_ocr
results/analize_results.ipynb
mit
df_test = df[(df["is_test"] == True)] df_test["prediction"] = predictions #print df_test.head() # Compare the percent correct to the results from earlier to make sure things are lined up right print "Calculated accuracy:", sum(df_test["label"] == df_test["prediction"]) / float(len(df_test)) print "Model accuracy:", best_score df_correct = df_test[(df_test["label"] == df_test["prediction"])] df_incorrect = df_test[(df_test["label"] != df_test["prediction"])] #df_correct.describe() #df_test.describe() #plt.hist(correct_labels) #print df.describe() print "Correct predictions:", df_correct.groupby(["label"])["prediction"].count() print "Incorrect predictions:", df_incorrect.groupby(["label"])["prediction"].count() """ Explanation: Build a histogram with percentages correct for each category End of explanation """ print df_correct.describe() print df_incorrect.describe() #print model_results d3_data = {} for m in model_results: d3_data[m["feat_name"]] = {} d3_data[m["feat_name"]]["C"] = [] d3_data[m["feat_name"]]["G"] = [] d3_data[m["feat_name"]]["S"] = [] #print m["feat_name"], m["model_params"], m["model_score"] for s in m["grid_scores"]: d3_data[m["feat_name"]]["C"].append(s[0]["C"]) d3_data[m["feat_name"]]["G"].append(s[0]["gamma"]) d3_data[m["feat_name"]]["S"].append(s[1]) #print d3_data from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm from matplotlib.ticker import LinearLocator, FormatStrFormatter from matplotlib import pylab pylab.rcParams['figure.figsize'] = (10.0, 8.0) def d3_plot(X, Y, Z): fig = plt.figure() ax = fig.gca(projection='3d') ax.set_xlabel("C", weight="bold", size="xx-large") ax.set_xticks([0, 5000, 10000, 15000]) ax.set_xlim(0, max(X)) ax.set_ylabel("gamma", weight="bold", size="xx-large") ax.set_yticks([0, 1.5, 3, 4.5]) ax.set_ylim(0, max(Y)) ax.set_zlabel("Accuracy", weight="bold", size="xx-large") #ax.set_zticks([0.5, 0.6, 0.70]) ax.set_zlim(0.5, 0.75) ax.scatter(X, Y, Z, c='b', marker='o') ax.zaxis.set_major_locator(LinearLocator(10)) ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f')) plt.show() d3_plot(np.array(d3_data["area"]["C"]), np.array(d3_data["area"]["G"]), np.array(d3_data["area"]["S"])) d3_plot(np.array(d3_data["line"]["C"]), np.array(d3_data["line"]["G"]), np.array(d3_data["line"]["S"])) d3_plot(np.array(d3_data["word"]["C"]), np.array(d3_data["word"]["G"]), np.array(d3_data["word"]["S"])) """ Explanation: Stats of text length for correct and incorrect End of explanation """
matmodlab/matmodlab2
notebooks/UserMaterials.ipynb
bsd-3-clause
%pycat ../matmodlab2/materials/elastic3.py %pylab inline from matmodlab2 import * """ Explanation: User Defined Materials Overview Materials are implemented by subclassing the matmodlab.core.Material base class. The user material is called at each frame of every step. It is provided with the material state at the start of the increment (stress, solution-dependent state variables, temperature, etc) and with the increments in temperature, deformation, and time. The implementation of a material model will be demonstrated with a standard isotropic linear elastic model. <a name='contents'></a> Contents <a href='#linelast'>Isotropic Linear Elasticity</a> <a href='#umat.std'>Model Implementation</a> <a href='#umat.compare'>Model Comparison</a> <a href='#conc'>Conclusion</a> <a name='linelast'></a> Isotropic Linear Elasticity The mechanical response of a linear elastic material is defined by $$ \boldsymbol{\sigma} = \mathbb{C}{:}\boldsymbol{\epsilon} = 3K\boldsymbol{\epsilon}^{\rm iso} + 2G\boldsymbol{\epsilon}^{\rm dev} $$ where $K$ is the bulk modulus and $G$ is the shear modulus. The strain $\boldsymbol{\epsilon}$ can be determined from the deformation gradient $\pmb{F}$ as $$ \boldsymbol{\epsilon} = \frac{1}{2\kappa}\left[\left(\boldsymbol{F}^{\rm T}{\cdot}\boldsymbol{F}\right)^{2\kappa} - \boldsymbol{I}\right] $$ where $\kappa$ is the generalized Seth-Hill strain parameter. Defined as such, several well known finite strain measures are emitted: $\kappa=1$: Green-Lagrange reference strain $\kappa=-1$: Alamansi spatial strain $\kappa=0$: Logarithmic, or true, strain The implementations of linear elasticity to follow will take as input Young's modulus E, Poisson's ratio Nu, and the Seth-Hill parameter k for changing the strain definition. <a name='umat.std'></a> Model Implementation The easiest way to implement a material model is to subclass the matmodlab2.core.material.Material class and define: name: class attribute Used for referencing the material model in the MaterialPointSimulator. eval: instance method Updates the material stress, stiffness (optional), and state dependent variables. If the stiffness is returned as None, Matmodlab will determine it numerically. Other optional attributes and methods include: num_sdv: instance attribute The number of state dependent variables. Default is None. sdv_names: instance attribute List of state dependent variable names. Default is SDV_N for the N$^{\rm th}$ state dependent variable. sdvini: instance method [optional] Initializes solution dependent state variables (otherwise assumed to be 0). In the example below, in addition to some standard functions imported from numpy, several helper functions are imported from various locations in Matmodlab: matmodlab2.core.tensor logm, powm: computes the matrix logarithm and power array_rep: converts a symmetric tensor stored as a 3x3 matrix to an array of length 6 polar_decomp: computes the polar decomposition of the deformation gradient $\pmb{F}$ isotropic_part, deviatoric_part: computes the isotropic and deviatoric parts of a second-order symmetric tensor stored as an array of length 6 VOIGT: mulitplier for converting tensor strain components to engineering strain components The relevant input parameters to the material's eval method from Matmodlab are: F: the deformation gradient at the end of the step The isotropic elastic material described above is implemented as ElasticMaterialTotal in the file matmodlab/materials/elastic3.py. The implementation can be viewed by executing the following cell. End of explanation """ mps1 = MaterialPointSimulator('uelastic-std') mps1.material = ElasticMaterialTotal(E=10e6, Nu=.333) mps1.run_step('ESS', (.1, 0, 0), frames=50) i = where(mps1.df['E.XX'] > 0.) E = mps1.df['S.XX'].iloc[i] / mps1.df['E.XX'].iloc[i] assert allclose(E, 10e6, atol=1e-3, rtol=1e-3) """ Explanation: Verification Test Exercising the elastic model through a path of uniaxial stress should result in the slope of axial stress vs. axial strain being equal to the input parameter E. Note: it is the responsibility of the model developer to define the material's instantiation. In the case of ElasticMaterialTotal, the interface takes the elastic parameters as keywords. Parameters not specified are initialized to a value of zero. End of explanation """
ALEXKIRNAS/DataScience
Coursera/Machine-learning-data-analysis/Course 1/Central-Limit-Theorem.ipynb
mit
def build_plot(n, subsets_num): values = np.random.triangular(0, 0.5, 1, size = (subsets_num,n)) means = np.sort(np.sum(values, axis = 1) / n) fit = norm.pdf(means, 0.5, np.sqrt(1./(24 * n))) # <=========== Theoretical distribution plt.xlabel('x') plt.ylabel('f(x)') plt.plot(means, fit,'-') plt.hist(means, bins = 7, normed=True) plt.xlim((0.3, 0.7)) """ Explanation: Distribution: Triangular distribution See: https://en.wikipedia.org/wiki/Triangular_distribution End of explanation """ build_plot(n = 100, subsets_num = 1000) build_plot(n = 500, subsets_num = 1000) build_plot(n = 1000, subsets_num = 1000) """ Explanation: For Triangular distribution with parameters a = 0, b = 1, c = 0.5 => mean = 0.5, variance = 1/24. So mean ~N(0.5, 1/(24 * n)), where n - number of experiments End of explanation """
tanghaibao/goatools
notebooks/parent_go_terms.ipynb
bsd-2-clause
from goatools.base import get_godag godag = get_godag('go-basic.obo', optional_attrs='relationship') """ Explanation: How to extract information from parent GO terms 1) Load the GO DAG 2) Pick a GO term and visualize 2a) Print GO information 2b) Plot GO term 3. Find GO parents for numerous GO IDs 3a. Find GO parents up all relationships 3b. Find GO parents up is_a relationships only 4. Find GO parents by traversing explicit relationships <a id='section1'></a> 1. Load the GO DAG Required relationship between GO terms, is_a, is always loaded. Load optional relationships, like part_of, so we have the option of finding GO parents with or without relationships. End of explanation """ goid = 'GO:0050807' """ Explanation: <a id='section2'></a> 2. Pick a GO term and visualize End of explanation """ def prt_flds(gosubdag): """Print the available printing fields""" print('Print fields:') for fld in sorted(gosubdag.prt_attr['flds']): print(' {F}'.format(F=fld)) from goatools.gosubdag.gosubdag import GoSubDag # Create a subset of the GO DAG which contains: # * The selected GO term and # * All the GO terms above it gosubdag = GoSubDag(goid, godag, relationships=True, prt=False) # Get additional information for chosen GO ntgo = gosubdag.go2nt[goid] # Choose fields and custom printing format # prt_flds(gosubdag) # Uncomment to see the available print fields prtfmt = '{NS} {GO} D{depth:02} {GO_name}' # Print detailed information for GO print(prtfmt.format(**ntgo._asdict())) """ Explanation: <a id='section2a'></a> 2a. Print GO information End of explanation """ from goatools.gosubdag.plot.gosubdag_plot import GoSubDagPlot GoSubDagPlot(gosubdag).plt_dag('reg_synapse_org.png') """ Explanation: <a id='section2b'></a> 2b. Plot GO term End of explanation """ from goatools.godag.go_tasks import get_go2parents go2parents = get_go2parents(gosubdag.go2obj, gosubdag.relationships) for goid_parent in go2parents[goid]: print(prtfmt.format(**gosubdag.go2nt[goid_parent]._asdict())) """ Explanation: <a id='section3'></a> 3. Find GO parents for numerous GO IDs 3a. Find GO parents up all relationships End of explanation """ from goatools.godag.go_tasks import get_go2parents_isa go2parents = get_go2parents_isa(gosubdag.go2obj) for goid_parent in go2parents[goid]: print(prtfmt.format(**gosubdag.go2nt[goid_parent]._asdict())) """ Explanation: <a id='section3b'></a> 3b. Find GO parents up is_a relationships only End of explanation """ goterm = godag[goid] print('Parents up "is_a": required relationship') for p_term in goterm.parents: print(prtfmt.format(**gosubdag.go2nt[p_term.item_id]._asdict())) if 'part_of' in goterm.relationship: print('\nParents up "part_of" optional relationship:') for p_go in goterm.relationship['part_of']: print(prtfmt.format(**gosubdag.go2nt[p_go.item_id]._asdict())) if 'regulates' in goterm.relationship: print('\nParents up "regulates" optional relationship:') for p_go in goterm.relationship['regulates']: print(prtfmt.format(**gosubdag.go2nt[p_go.item_id]._asdict())) # godag must be loaded with: optional_attrs='relationship' # gosubdag must be loaded with: relationships=True print('\nAncestors up all loaded relationships:') for p_go in gosubdag.rcntobj.go2ancestors[goid]: print(prtfmt.format(**gosubdag.go2nt[p_go]._asdict())) """ Explanation: <a id='section4'></a> 4. Find GO parents by traversing explicit relationships End of explanation """
mathcoding/Programmazione2
Introduzione a Python - Prima parte.ipynb
mit
x=1 print(x) type(x) print(x, type(x)) y=2 z=(x+y)**2 * 3 print("x =", x, ", y =", y, ", z =", z) """ Explanation: NOTA: si consiglia di eseguire una riga alla volta di questo notebook, come fatto a lezione, cercando di capire sia cosa fa ciascuna funzione, sia soprattutto cercando di capire gli eventuali messaggi di errore. Numeri Interi End of explanation """ y=1.0 print(y) type(x), type(y) type(x+y) """ Explanation: Numeri Reali (?) End of explanation """ a = "3.0" type(a) b = int(a) b = float(a) type(b) x = float(1)/3 y = 1/3 z = 1.0//3.0 print(x+y+z) 2.4//2.5 """ Explanation: Type conversion o type cast Sono usati per convertire un tipo in un altro, in maniera dinamica. Per convertire un oggetto in un dato tipo, basta usare il nome del tipo come se fosse una funzione. Esempi: End of explanation """ x = 1.0/3 y = 1.0/3 z = 1/3 print(x+y+z) """ Explanation: ATTENZIONE: La divisione tra numeri interi non considera il resto End of explanation """ 1/10 + 1/10 + 1/10 + 1/10 + 1/10 + 1/10 + 1/10 + 1/10 + 1/10 + 1/10 == 1.0 1/10 + 1/10 + 1/10 + 1/10 + 1/10 + 1/10 + 1/10 + 1/10 + 1/10 + 1/10 """ Explanation: NOTA: In Python 2.7 questa espressione viene valutata a 0.666666 in quanto z vienve valutata con una divisione tra interi End of explanation """ x = 1+3j y = 4+2j z = 2+1j print(type(x)) print(x+y*z) print(z.real, z.imag, z.conjugate) z.conjugate() z.real = 3 """ Explanation: ATTENZIONE: Ricordarsi di come vengono rappresentati i numeri al calcolatori con la rappresentazione in virgola mobile (floating points). Numeri Complessi End of explanation """ who """ Explanation: Comandi utili nel workspace Per controllare le variabili in memoria (nel workspace): End of explanation """ del x who print(x+y+z) help(complex) """ Explanation: Per rimuovere una variabile dal workspace: End of explanation """ # Notare l'identazione nella funzione seguente def f(x): """Funzione che il doppio di x""" return x*2 print(type(f)) f(27) f(1.2) f(1+3j) f("Ciao") 2*"domanda" help(f) """ Explanation: Funzioni Supponiamo di voler scrivere una funzione che calcola il doppio di un numero dato ovvero la funzione: $f : \mathbb{C} \rightarrow \mathbb{C}$ con: $f(x) = 2 x$ In python dobbiamo scrivere (ATTENZIONE AGLI SPAZI DEL TAB): End of explanation """ def Potenza(base, esponente): """ Calcola la potenza (base)^(esponente). """ return base**esponente Potenza(2, 3) def Potenza2(base, esponente=2): """ Calcola la potenza (base)^(esponente). Se il secondo parametro non viene passato, calcola la potenza quadrata, ovvero (esponente=2). """ return base**esponente print(Potenza2(14)) print(Potenza2(14,3)) Potenza2(esponente=4, base=10) """ Explanation: Vogliamo ora scrivere la funzione: $$f : \mathbb{C} \times \mathbb{C} \rightarrow \mathbb{C}$$ con $$f(x,y) = x^y$$ In python scriviamo: End of explanation """ As = [5,3,2,8,7,13] print(As) print(type(As)) # help(list) """ Explanation: Python supporta la possibilità di usare i nomi di variabili usati nella definizione di una funzione per passare gli argomenti (chiamati keyword arguments). Questo si differenza dalla tipica notazione posizionale di molti linguaggi di programmazione, tipo il C, e permette di rendere il codice più leggibile e di evitare ambiguità nel passare gli argomenti ad una funzione. Si noti infine che nella definizione della funzione Potenza2 è stato definito un parametro di default (default parameter values) per definire il valore dell'esponente. Liste End of explanation """ # Aggiungere un elemento ad una lista As.append(27) print(As) As.remove(7) print(As) As.insert(4, 12) print(As) # Invertire l'ordine di una lista As.reverse() print(As) # Ordinamento come funzione Bs = sorted(As) print(As, Bs) # Ordinamento "IN_PLACE" As.sort() print(As) """ Explanation: IMPORTANTE: Abituarsi ad usare il tasto tab per avere il completamento automatico End of explanation """ print(As) for x in As: print(x) # Equivalente in ANSI-C # for (int i=0; i<n; i++) # printf("%f\n", As[i]); # Iterare gli elementi di una lista (esempio di commento in python) for n in As: print("x=", str(n)," \t -> f(x^3) =",str(Potenza(n,3))) list(map(lambda x: Potenza(x,3), As)) # Iterare una lista usando una variabile # per l'indice dell'elemento nella lista for i,n in enumerate(As): print("indice:", i, " -> As["+str(i)+"] = " + str(n)) # Iterare una lista usando una variabile # per l'indice dell'elemento nella lista for i,n in enumerate(reversed(As)): print("indice:", i, " -> As["+str(i)+"] = " + str(n)) """ Explanation: COMMENTO: Ricordarsi la differenza che alcune funzioni operano IN PLACE End of explanation """ As As[3:5] As[-1] head, tail = As[0], As[1:] print(head, tail) """ Explanation: Operazioni di Slicing Per ottenere una sotto lista, si può usare un espressione di Slicing. Per esempio, l'espressione Lista[start:end], restituisce la sottolista che inizia in posizione "start" e finisce in posizione "end". Esempio: End of explanation """
JoseGuzman/myIPythonNotebooks
MachineLearning/KMC.ipynb
gpl-2.0
%pylab inline import matplotlib #matplotlib.rc('xtick', labelsize=20) #matplotlib.rc('ytick', labelsize=20) from scipy.spatial import distance """ Explanation: <H1>K-means clustering (KMC) algorithm </H1> <P> Given a set $X$ of $n$ observations; $X = \{x_1, x_2, \cdots, x_n\}$, where every $i$ observation is a vector of measurements ($x_i \in R^d$). The K-means clustering aims to classify the observations into a set of $k$ clusters $C =\{c_1, c_2, \cdots, c_k\}$ that minimizes the following expression: </P> $ \displaystyle{\operatorname{arg\,min}} \sum_{\mathbf x \in C_i} \left\| \mathbf x - \boldsymbol\mu_i \right\|^2 $ End of explanation """ x = np.loadtxt("data.txt", comments='//') x.shape print(x.shape) # Plot 2 measurements #for i in x: # plt.plot(i[0],i[1], 'ko'); plt.scatter(x[:,0], x[:,1], color='black') plt.xlim(-0.5, 10); plt.ylim(-0.5, 10); plt.xlabel('Measurement 1'); plt.ylabel('Measurement 2'); """ Explanation: Consider the following data set consisting of the scores of two variables on each of 17 experiments End of explanation """ centroid1 = x[0] # first experiment centroid2 = x[3] # fourth experiment print(centroid1, centroid2) # calculate Euclidean distances from centroid 1 sample1= list() for i, obs in enumerate(x): dist = distance.euclidean(obs, centroid1) sample1.append(dist) print("%2d -> %6f" %(i, dist)) # calculate Euclidean distances from centroid 2 sample2 = list() for i, obs in enumerate(x): dist = distance.euclidean(obs, centroid2) sample2.append(dist) print("%2d -> %6f" %(i, dist)) group1 = x[np.array(sample1)<=np.array(sample2)] print(group1) group2 = x[np.array(sample2)<np.array(sample1)] print(group2) # Replot first categorization # Plot 2 measurements for i in group1: plt.plot(i[0],i[1], 'o',color='magenta'); for i in group2: plt.plot(i[0],i[1], 'o', color='cyan'); plt.plot(centroid1[0], centroid1[1], '*', color='magenta', ms=12) plt.plot(centroid2[0], centroid2[1], '*', color='cyan', ms=12) plt.xlim(-0.5, 10); plt.ylim(-0.5, 10); plt.xlabel('Measurement 1'); plt.ylabel('Measurement 2'); # recalculate centroids centroid1 = np.average(group1, axis=0) centroid2 = np.average(group2, axis=0) print(centroid1, centroid2) # calculate Euclidean distances from new centroid 1 sample1= list() for i, obs in enumerate(x): dist = distance.euclidean(obs, centroid1) sample1.append(dist) print("%2d -> %6f" %(i, dist)) sample2= list() for i, obs in enumerate(x): dist = distance.euclidean(obs, centroid2) sample2.append(dist) print("%2d -> %6f" %(i, dist)) # reasign the groups group1 = x[np.array(sample1)<=np.array(sample2)] print(group1) group2 = x[np.array(sample2)<np.array(sample1)] print(group2) # Replot first categorization with new centrodis # Plot 2 measurements for i in group1: plt.plot(i[0],i[1], 'o', color='magenta'); for i in group2: plt.plot(i[0],i[1], 'o', color='cyan'); plt.plot(centroid1[0], centroid1[1], '*', color='magenta', ms=12) plt.plot(centroid2[0], centroid2[1], '*', color='cyan', ms=12) plt.xlim(-0.5, 10); plt.ylim(-0.5, 10); plt.xlabel('Measurement 1'); plt.ylabel('Measurement 2'); # recalculate centroids centroid1 = np.average(group1, axis=0) centroid2 = np.average(group2, axis=0) print(centroid1, centroid2) # calculate Euclidean distances from new centroid 1 sample1= list() for i, obs in enumerate(x): dist = distance.euclidean(obs, centroid1) sample1.append(dist) # calculate Euclidean distances from centroid 2 sample2 = list() for i, obs in enumerate(x): dist = distance.euclidean(obs, centroid2) sample2.append(dist) # reasign the groups group1 = x[np.array(sample1)<=np.array(sample2)] group2 = x[np.array(sample2)<np.array(sample1)] # Replot first categorization with new centrodis # Plot 2 measurements for i in group1: plt.plot(i[0],i[1], 'o', color='magenta'); for i in group2: plt.plot(i[0],i[1], 'o', color='cyan'); plt.plot(centroid1[0], centroid1[1], '*', color='magenta', ms=12) plt.plot(centroid2[0], centroid2[1], '*', color='cyan', ms=12) plt.xlim(-0.5, 10); plt.ylim(-0.5, 10); plt.xlabel('Measurement 1'); plt.ylabel('Measurement 2'); # recalculate centroids centroid1 = np.average(group1, axis=0) centroid2 = np.average(group2, axis=0) print(centroid1, centroid2) """ Explanation: This data set is to be grouped into two clusters. As a first step in finding a sensible initial partition, let the values of the measurements 1 and 2 of the two individuals End of explanation """
arnau/blog
notes/sqlite-python/sqlite-python-basics.ipynb
unlicense
import sqlite3 import os conn = sqlite3.connect("sqlite-python-basics.sqlite") cur = conn.cursor() """ Explanation: SQLite with Python (Basics) The standard Python distribution ships with a basic SQLite3 inteface. Connect to a database Import the sqlite3 module, create a connection and open a cursor to operate on the database. End of explanation """ conn.close() os.remove("sqlite-python-basics.sqlite") """ Explanation: After using the database make sure you close the connection to avoid locking yourself out of the database. End of explanation """ conn = sqlite3.connect(":memory:") cur = conn.cursor() """ Explanation: Create a database As soon as you connect to a database, if it doesn't exist it will create it. End of explanation """ cur.execute(""" CREATE TABLE IF NOT EXISTS projects ( id INTEGER PRIMARY KEY, created DATE NOT NULL, name VARCHAR(50) NOT NULL UNIQUE ) """) cur.execute(""" CREATE TABLE IF NOT EXISTS events ( id INTEGER PRIMARY KEY, date DATE, project_id INTEGER, comments TEXT, FOREIGN KEY (project_id) REFERENCES projects(id) ) """); """ Explanation: Let's create two tables representing an event log (events) and the project (projects) they belong to. End of explanation """ cur.execute("SELECT name FROM sqlite_master WHERE type='table'") cur.fetchall() """ Explanation: We can verify they exist by querying the sqlite_master table. End of explanation """ cur.execute("ALTER TABLE events ADD COLUMN effort REAL"); """ Explanation: To add a column you use ALTER TABLE. Documentation: https://www.sqlite.org/lang_altertable.html End of explanation """ cur.execute("INSERT INTO projects (created, name) VALUES (date('now'), 'project A')"); cur.execute("INSERT INTO projects (created, name) VALUES (date('now'), 'project B')"); cur.execute("SELECT * FROM projects") cur.fetchall() """ Explanation: Note: Check the Helpers section to avoid attempting to add a column that already exists. Operate on a database Documentation: https://www.sqlite.org/fullsql.html SQLite implements most of the SQL standard and extends it with a few optional features like JSON. Most operations are done as part of a transaction and will be kept in a journal until they are commited with conn.commit(). This means that if you insert a row into a table, you will not see it in a query until you commit it. Let's insert a couple of projects: End of explanation """ events = [("2018-03-12", 1, 1.5, "A stuff"), ("2018-03-13", 1, 0.5, "More A stuff"), ("2018-03-13", 2, 1.0, "B stuff")] for event in events: cur.execute("INSERT INTO events (date, project_id, effort, comments) VALUES (?, ?, ?, ?)", event) cur.execute("SELECT * FROM events") cur.fetchall() cur.execute(""" SELECT e.id, e.date, p.name AS project, e.effort FROM events AS e INNER JOIN projects AS p ON e.project_id = p.id """) cur.fetchall() """ Explanation: And a few events: End of explanation """ import csv with open('events.csv', 'r') as events: rows = [(r['date'], r['project_id'], r['comments'], r['effort']) for r in csv.DictReader(events)] cur.executemany(""" INSERT INTO events (date, project_id, comments, effort) VALUES (?, ?, ?, ?) """, rows) conn.commit() cur.execute(""" SELECT e.id, e.date, p.name AS project, e.effort FROM events AS e INNER JOIN projects AS p ON e.project_id = p.id """) cur.fetchall() """ Explanation: Let's insert a few more records, this time in CSV. End of explanation """ cur.execute("UPDATE projects SET name = ('project C') WHERE id = 2") cur.execute("SELECT * FROM projects") cur.fetchall() """ Explanation: And let's change the name of project B to C: End of explanation """ cur.execute("CREATE INDEX IF NOT EXISTS event_dates ON events (date)"); cur.execute("SELECT date, effort FROM events GROUP BY date") cur.fetchall() cur.execute("DROP INDEX IF EXISTS event_dates"); """ Explanation: Indexes End of explanation """ def column_exists(table, column): res = cur.execute("PRAGMA table_info({})".format(table)) return len ([x for (_, x, _, _, _, _) in res if x == column]) != 0 column_exists('events', 'effort') """ Explanation: Types Documentation: https://www.sqlite.org/datatype3.html NULL. The value is a NULL value. INTEGER. The value is a signed integer, stored in 1, 2, 3, 4, 6, or 8 bytes depending on the magnitude of the value. REAL. The value is a floating point value, stored as an 8-byte IEEE floating point number. TEXT. The value is a text string, stored using the database encoding (UTF-8, UTF-16BE or UTF-16LE). BLOB. The value is a blob of data, stored exactly as it was input. SQLite does not have a separate Boolean storage class. Boolean values are stored as integers 0 and 1. It doesn't have a date or datetime type. Instead, you use date and time functions to store them as TEXT, REAL or INTEGER. Helpers To check for the table info, you can use the PRAGMA command. Documentation: http://www.sqlite.org/pragma.html#pragma_table_info The following function could be used instead of the try/except block back in the first example of altering the events table. End of explanation """ conn.commit() conn.close() """ Explanation: SQLite references Core functions Aggregate functions Date and Time functions Disconnect from a database End of explanation """
NathanYee/ThinkBayes2
bayesianLinearRegression/Final Report.ipynb
gpl-2.0
from __future__ import print_function, division % matplotlib inline import warnings warnings.filterwarnings('ignore') import math import numpy as np from thinkbayes2 import Pmf, Cdf, Suite, Joint, EvalNormalPdf import thinkplot import pandas as pd import matplotlib.pyplot as plt """ Explanation: Computational Bayes Final Project Nathan Yee Uma Desai To understand Bayesian Linear Regression, we read through and used many ideas from Cypress Frankenfeld. http://allendowney.blogspot.com/2015/04/two-hour-marathon-by-2041-probably.html Height of Children in Kalama, Egypt From: http://lib.stat.cmu.edu/DASL/Datafiles/Ageandheight.html The height of a child is not stable but increases over time. Since the pattern of growth varies from child to child, one way to understand the general growth pattern is by using the average of several children's heights, as presented in this data set. The scatterplot of height versus age is almost a straight line, showing a linear growth pattern. The straightforward relationship between height and age provides a simple illustration of linear relationships, correlation, and simple regression. Description: Mean heights of a group of children in Kalama, an Egyptian village that is the site of a study of nutrition in developing countries. The data were obtained by measuring the heights of all 161 children in the village each month over several years. Age: Age in months Height: Mean height in centimeters for children at this age What is the probability that a child between the ages of 18 - 29 is less than 76.1 cm (the lowest height in our dataset)? End of explanation """ df = pd.read_csv('ageVsHeight.csv', skiprows=0, delimiter='\t') df """ Explanation: Let's start by loading our data into a Pandas dataframe to see what we're working with. End of explanation """ ages = np.array(df['age']) heights = np.array(df['height']) """ Explanation: Next, let's create vectors of our ages and heights. End of explanation """ plt.plot(ages, heights, 'o', label='Original data', markersize=10) plt.xlabel('Age (months)') plt.ylabel('Height (cm)') """ Explanation: Now let's visualize our data to make sure that linear regression is appropriate for predicting its distributions. End of explanation """ def leastSquares(x, y): """ leastSquares takes in two arrays of values. Then it returns the slope and intercept of the least squares of the two. Args: x (numpy array): numpy array of values. y (numpy array): numpy array of values. Returns: slope, intercept (tuple): returns a tuple of floats. """ A = np.vstack([x, np.ones(len(x))]).T slope, intercept = np.linalg.lstsq(A, y)[0] return slope, intercept """ Explanation: Our data looks pretty linear. We can now calculate the slope and intercept of the line of least squares. We abstract numpy's least squares function using a function of our own. End of explanation """ slope, intercept = leastSquares(ages, heights) print(slope, intercept) intercept_range = .03 * intercept slope_range = .05 * slope """ Explanation: To use our leastSquares function, we input our age and height vectors as our x and y arguments. Next, let's call leastSquares to get the slope and intercept, and use the slope and intercept to calculate the size of our intercept and slope ranges. End of explanation """ plt.plot(ages, heights, 'o', label='Original data', markersize=10) plt.plot(ages, slope*ages + intercept, 'r', label='Fitted line') plt.legend() plt.xlabel('Age (months)') plt.ylabel('Height (cm)') plt.show() """ Explanation: Now, we can visualize the slope and intercept on the same plot as the data to make sure it is working correctly. End of explanation """ intercepts = np.linspace(intercept - intercept_range, intercept + intercept_range, 20) slopes = np.linspace(slope - slope_range, slope + slope_range, 20) sigmas = np.linspace(2, 4, 15) hypos = ((intercept, slope, sigma) for intercept in intercepts for slope in slopes for sigma in sigmas) """ Explanation: Looks great! Based on the plot above, we are confident that Bayesian linear regression will give us reasonable distributions for predicting future values. Now we need to create our hypotheses. Each hypothesis will consist of a range of intercepts, slopes and sigmas. End of explanation """ data = [(age, height) for age in ages for height in heights] """ Explanation: Loop through ages and heights to create tuples as data for our hypotheses. End of explanation """ class leastSquaresHypos(Suite, Joint): def Likelihood(self, data, hypo): """ Likelihood calculates the probability of a particular line (hypo) based on data (ages Vs height) of our original dataset. This is done with a normal pmf as each hypo also contains a sigma. Args: data (tuple): tuple that contains ages (float), heights (float) hypo (tuple): intercept (float), slope (float), sigma (float) Returns: P(data|hypo) """ intercept, slope, sigma = hypo total_likelihood = 1 for age, measured_height in data: hypothesized_height = slope * age + intercept error = measured_height - hypothesized_height total_likelihood *= EvalNormalPdf(error, mu=0, sigma=sigma) return total_likelihood """ Explanation: Next make a least squares class that inherits from Suite and Joint where likelihood is calculated based on error from data. The likelihood function will depend on the data and normal distributions for each hypothesis. End of explanation """ LeastSquaresHypos = leastSquaresHypos(hypos) """ Explanation: Now instantiate a LeastSquaresHypos suite with our hypos. End of explanation """ for item in data: LeastSquaresHypos.Update([item]) """ Explanation: And update the suite with our data. We choose to update a single item at a time to normalize after every item in data. End of explanation """ marginal_intercepts = LeastSquaresHypos.Marginal(0) thinkplot.hist(marginal_intercepts) thinkplot.Config(xlabel='intercept (cm)', ylabel='prob') """ Explanation: We can now plot marginal distributions to visualize the probability distribution for each of our hypotheses for intercept, slope, and sigma values. Our hypotheses were carefully picked based on ranges that we found worked well, which is why all the intercepts, slopes, and sigmas that are important to this dataset are included in our hypotheses. End of explanation """ marginal_slopes = LeastSquaresHypos.Marginal(1) thinkplot.hist(marginal_slopes) thinkplot.Config(xlabel='slope (cm / month)', ylabel='prob') marginal_sigmas = LeastSquaresHypos.Marginal(2) thinkplot.hist(marginal_sigmas) thinkplot.Config(xlabel='sigma', ylabel='prob') """ Explanation: Since our slopes are relatively close to our original slope, we are relatively confdient that these produce reasonable distributions even though it does get cut off. End of explanation """ def getHeights(hypo_samples, random_months): """ getHeights takes in random hypos and random months and returns the coorisponding random height Args: hypo_samples (sequence): Sampled hypotheses from the LeastSquaredHypos suite random_months (sequence): Randomly sampled months """ random_heights = np.zeros(len(random_months)) for i in range(len(random_heights)): intercept = hypo_samples[i][0] slope = hypo_samples[i][1] sigma = hypo_samples[i][2] month = random_months[i] random_heights[i] = np.random.normal((slope * month + intercept), sigma, 1) return random_heights def getRandomData(start_month, end_month, n, LeastSquaresHypos): """ getRadomData computes and returns two sequences Args: start_month: (number): Starting month end_month: (number): Ending month n (int): Number of samples LeastSquaresHypos (Suite): Contains the hypos we want to sample Returns: Sequence of random_months and random_heights """ random_hypos = LeastSquaresHypos.Sample(n) random_months = np.random.uniform(start_month, end_month, n) random_heights = getHeights(random_hypos, random_months) return random_months, random_heights """ Explanation: Next, we want to sample random data from our hypotheses. To do this, we will make two functions, getHeights and getRandomData. getRandomData calls getHeights to obtain random height values. End of explanation """ num_samples = 100000 random_months, random_heights = getRandomData(18, 29, num_samples, LeastSquaresHypos) """ Explanation: Now we take 100000 random samples of pairs of months and heights. Here we want at least 100000 items so that we can get very smooth sampling. End of explanation """ plt.plot(random_months, random_heights, 'o', label='Random Sampling') plt.plot(ages, heights, 'o', label='Original data', markersize=10) plt.plot(ages, slope*ages + intercept, 'r', label='Fitted line') plt.xlabel('Age (months)') plt.ylabel('Height (cm)') plt.legend() plt.show() """ Explanation: Now plot the data to see what the randomly sampled data looks like beneath the original data and line of least squares. We notice that we have trouble seeing the exact size of our distribution as points overlap each other. To see what the distribution actually looks like, we must make a density plot. End of explanation """ num_buckets = 70 # num_buckets^2 is actual number # create horizontal and vertical linearly spaced ranges as buckets. hori_range, hori_step = np.linspace(18, 29 , num_buckets, retstep=True) vert_range, vert_step = np.linspace(65, 100, num_buckets, retstep=True) hori_step = hori_step / 2 vert_step = vert_step / 2 # store each bucket as a tuple in a the buckets dictionary. buckets = dict() keys = [(hori, vert) for hori in hori_range for vert in vert_range] # set each bucket as empty for key in keys: buckets[key] = 0 # loop through the randomly sampled data for month, height in zip(random_months, random_heights): # check each bucket and see if randomly sampled data for key in buckets: if month > key[0] - hori_step and month < key[0] + hori_step: if height > key[1] - vert_step and height < key[1] + vert_step: buckets[key] += 1 break # can only fit in a single bucket """ Explanation: Next, we want to get the intensity of the data at locations. We do that by adding the randomly sampled values to buckets. This gives us intensity values for a grid of pixels in our sample range. End of explanation """ pcolor_months = [] pcolor_heights = [] pcolor_densities = [] for key in buckets: pcolor_months.append(key[0]) pcolor_heights.append(key[1]) pcolor_densities.append(buckets[key]) """ Explanation: Next, we unpack the buckets into three vectors, pcolor_months, pcolor_heights, and pcolor_densities End of explanation """ def append_to_file(path, data): """ append_to_file appends a line of data to specified file. Then adds new line Args: path (string): the file path Return: VOID """ with open(path, 'a') as file: file.write(data + '\n') def delete_file_contents(path): """ delete_file_contents deletes the contents of a file Args: path: (string): the file path Return: VOID """ with open(path, 'w'): pass def threeSequenceCSV(x, y, z): """ Writes the x, y, z arrays to a CSV Args: x (sequence): x data y (sequence): y data z (sequence): z data """ file_name = 'intensityData.csv' delete_file_contents(file_name) for xi, yi, zi in zip(x, y, z): append_to_file(file_name, "{}, {}, {}".format(xi, yi, zi)) def twoSequenceCSV(x, y): """ Writes the x, y arrays to a CSV Args: x (sequence): x data y (sequence): y data """ file_name = 'monthsHeights.csv' delete_file_contents(file_name) for xi, yi in zip(x, y): append_to_file(file_name, "{}, {}".format(xi, yi)) def fittedLineCSV(x, slope, intercept): """ Writes line data to a CSV Args: x (sequence): x data slope (float): slope of line intercept (float): intercept of line """ file_name = 'fittedLineCSV.csv' delete_file_contents(file_name) for xi in x: append_to_file(file_name, "{}, {}".format(xi, slope*xi + intercept)) def makeCSVData(random_x, random_y, intensities, original_x, original_y, slope, intercept): """ Calls the 3 csv making functions with appropriate parameters. """ threeSequenceCSV(random_x, random_y, intensities) twoSequenceCSV(original_x, original_y) fittedLineCSV(original_x, slope, intercept) makeCSVData(pcolor_months, pcolor_heights, pcolor_densities, ages, heights, slope, intercept) """ Explanation: Since density plotting is much simpler in Mathematica, we are going to export all our data to csv files and plot them in Mathematica End of explanation """ def probHeightRange(heights, low, high): """ probHeightRange returns the probability that height is within a particular range Args: height (sequence): sequence of heights low (number): the bottom of the range high (number): the top of the range Returns: prob (float): the probability of being in the height range """ successes = 0 total = len(heights) for height in heights: if low < height and height < high: successes += 1 return successes / total probHeightRange(random_heights, 0, 76.1) """ Explanation: Now, we have a figure that contains superimposed density plot, original data scatter plot, and least squares line plot. While the density plot is not strictly necessary, it gives us a much better idea as to how large of a distribution we have. Note that the x axis is ages (months) and the y axis is height (cm). <img src="ageHeightAllPlots5.png" alt="Density Plot with orignal data/fit" height="400" width="400"> So, now we can go back to our original question. What is the probability that a child between the ages of 18 - 29 is less than 76.1 cm (the lowest height in our dataset)? To answer this question, we sample our random data and see whether or not it is less than 76.1 cm. We will do this using a function. End of explanation """
metpy/MetPy
v0.7/_downloads/Inverse_Distance_Verification.ipynb
bsd-3-clause
import matplotlib.pyplot as plt import numpy as np from scipy.spatial import cKDTree from scipy.spatial.distance import cdist from metpy.gridding.gridding_functions import calc_kappa from metpy.gridding.interpolation import barnes_point, cressman_point from metpy.gridding.triangles import dist_2 plt.rcParams['figure.figsize'] = (15, 10) def draw_circle(x, y, r, m, label): nx = x + r * np.cos(np.deg2rad(list(range(360)))) ny = y + r * np.sin(np.deg2rad(list(range(360)))) plt.plot(nx, ny, m, label=label) """ Explanation: Inverse Distance Verification: Cressman and Barnes Compare inverse distance interpolation methods Two popular interpolation schemes that use inverse distance weighting of observations are the Barnes and Cressman analyses. The Cressman analysis is relatively straightforward and uses the ratio between distance of an observation from a grid cell and the maximum allowable distance to calculate the relative importance of an observation for calculating an interpolation value. Barnes uses the inverse exponential ratio of each distance between an observation and a grid cell and the average spacing of the observations over the domain. Algorithmically: A KDTree data structure is built using the locations of each observation. All observations within a maximum allowable distance of a particular grid cell are found in O(log n) time. Using the weighting rules for Cressman or Barnes analyses, the observations are given a proportional value, primarily based on their distance from the grid cell. The sum of these proportional values is calculated and this value is used as the interpolated value. Steps 2 through 4 are repeated for each grid cell. End of explanation """ np.random.seed(100) pts = np.random.randint(0, 100, (10, 2)) xp = pts[:, 0] yp = pts[:, 1] zp = xp * xp / 1000 sim_gridx = [30, 60] sim_gridy = [30, 60] """ Explanation: Generate random x and y coordinates, and observation values proportional to x * y. Set up two test grid locations at (30, 30) and (60, 60). End of explanation """ grid_points = np.array(list(zip(sim_gridx, sim_gridy))) radius = 40 obs_tree = cKDTree(list(zip(xp, yp))) indices = obs_tree.query_ball_point(grid_points, r=radius) """ Explanation: Set up a cKDTree object and query all of the observations within "radius" of each grid point. The variable indices represents the index of each matched coordinate within the cKDTree's data list. End of explanation """ x1, y1 = obs_tree.data[indices[0]].T cress_dist = dist_2(sim_gridx[0], sim_gridy[0], x1, y1) cress_obs = zp[indices[0]] cress_val = cressman_point(cress_dist, cress_obs, radius) """ Explanation: For grid 0, we will use Cressman to interpolate its value. End of explanation """ x2, y2 = obs_tree.data[indices[1]].T barnes_dist = dist_2(sim_gridx[1], sim_gridy[1], x2, y2) barnes_obs = zp[indices[1]] ave_spacing = np.mean((cdist(list(zip(xp, yp)), list(zip(xp, yp))))) kappa = calc_kappa(ave_spacing) barnes_val = barnes_point(barnes_dist, barnes_obs, kappa) """ Explanation: For grid 1, we will use barnes to interpolate its value. We need to calculate kappa--the average distance between observations over the domain. End of explanation """ for i, zval in enumerate(zp): plt.plot(pts[i, 0], pts[i, 1], '.') plt.annotate(str(zval) + ' F', xy=(pts[i, 0] + 2, pts[i, 1])) plt.plot(sim_gridx, sim_gridy, '+', markersize=10) plt.plot(x1, y1, 'ko', fillstyle='none', markersize=10, label='grid 0 matches') plt.plot(x2, y2, 'ks', fillstyle='none', markersize=10, label='grid 1 matches') draw_circle(sim_gridx[0], sim_gridy[0], m='k-', r=radius, label='grid 0 radius') draw_circle(sim_gridx[1], sim_gridy[1], m='b-', r=radius, label='grid 1 radius') plt.annotate('grid 0: cressman {:.3f}'.format(cress_val), xy=(sim_gridx[0] + 2, sim_gridy[0])) plt.annotate('grid 1: barnes {:.3f}'.format(barnes_val), xy=(sim_gridx[1] + 2, sim_gridy[1])) plt.gca().set_aspect('equal', 'datalim') plt.legend() """ Explanation: Plot all of the affiliated information and interpolation values. End of explanation """ plt.annotate('grid 0: ({}, {})'.format(sim_gridx[0], sim_gridy[0]), xy=(sim_gridx[0] + 2, sim_gridy[0])) plt.plot(sim_gridx[0], sim_gridy[0], '+', markersize=10) mx, my = obs_tree.data[indices[0]].T mz = zp[indices[0]] for x, y, z in zip(mx, my, mz): d = np.sqrt((sim_gridx[0] - x)**2 + (y - sim_gridy[0])**2) plt.plot([sim_gridx[0], x], [sim_gridy[0], y], '--') xave = np.mean([sim_gridx[0], x]) yave = np.mean([sim_gridy[0], y]) plt.annotate('distance: {}'.format(d), xy=(xave, yave)) plt.annotate('({}, {}) : {} F'.format(x, y, z), xy=(x, y)) plt.xlim(0, 80) plt.ylim(0, 80) plt.gca().set_aspect('equal', 'datalim') """ Explanation: For each point, we will do a manual check of the interpolation values by doing a step by step and visual breakdown. Plot the grid point, observations within radius of the grid point, their locations, and their distances from the grid point. End of explanation """ dists = np.array([22.803508502, 7.21110255093, 31.304951685, 33.5410196625]) values = np.array([0.064, 1.156, 3.364, 0.225]) cres_weights = (radius * radius - dists * dists) / (radius * radius + dists * dists) total_weights = np.sum(cres_weights) proportion = cres_weights / total_weights value = values * proportion val = cressman_point(cress_dist, cress_obs, radius) print('Manual cressman value for grid 1:\t', np.sum(value)) print('Metpy cressman value for grid 1:\t', val) """ Explanation: Step through the cressman calculations. End of explanation """ plt.annotate('grid 1: ({}, {})'.format(sim_gridx[1], sim_gridy[1]), xy=(sim_gridx[1] + 2, sim_gridy[1])) plt.plot(sim_gridx[1], sim_gridy[1], '+', markersize=10) mx, my = obs_tree.data[indices[1]].T mz = zp[indices[1]] for x, y, z in zip(mx, my, mz): d = np.sqrt((sim_gridx[1] - x)**2 + (y - sim_gridy[1])**2) plt.plot([sim_gridx[1], x], [sim_gridy[1], y], '--') xave = np.mean([sim_gridx[1], x]) yave = np.mean([sim_gridy[1], y]) plt.annotate('distance: {}'.format(d), xy=(xave, yave)) plt.annotate('({}, {}) : {} F'.format(x, y, z), xy=(x, y)) plt.xlim(40, 80) plt.ylim(40, 100) plt.gca().set_aspect('equal', 'datalim') """ Explanation: Now repeat for grid 1, except use barnes interpolation. End of explanation """ dists = np.array([9.21954445729, 22.4722050542, 27.892651362, 38.8329756779]) values = np.array([2.809, 6.241, 4.489, 2.704]) weights = np.exp(-dists**2 / kappa) total_weights = np.sum(weights) value = np.sum(values * (weights / total_weights)) print('Manual barnes value:\t', value) print('Metpy barnes value:\t', barnes_point(barnes_dist, barnes_obs, kappa)) """ Explanation: Step through barnes calculations. End of explanation """
stevetjoa/stanford-mir
dp.ipynb
mit
def min_coin_sum(val, coins=None): if coins is None: coins = [1, 5, 10, 25] if val == 0: return 0 return 1 + min(min_coin_sum(val-coin) for coin in coins if val-coin >= 0) """ Explanation: &larr; Back to Index Dynamic Programming Dynamic programming (Wikipedia; FMP, p. 137) is a method for solving problems by breaking them into simpler subproblems, each solved only once, and storing their solutions for future reference. The act of storing solutions to subproblems is known as memoization (Wikipedia). Example: min coin sum Given a positive value and a list of possible coin values, write a function that determines the minimum number of coins needed to achieve the input value. Example: coins = [1, 5, 10, 25] min_coin_sum(1) -&gt; 1 min_coin_sum(6) -&gt; 2 # 1*5 + 1*1 min_coin_sum(49) -&gt; 7 # 1*25 + 2*10 + 4*1 min_coin_sum(52) -&gt; 4 # 2*25 + 2*1 Suppose that we use the coin values [1, 5, 10, 25]. The recursive solution to this uses the observation: min_coin_sum(val) = 1 + min( min_coin_sum(val-1), min_coin_sum(val-5), min_coin_sum(val-10), min_coin_sum(val-25), ) Suppose val = 49. Notice that 49 = 48 + 1 49 = 44 + 5 49 = 39 + 10 49 = 24 + 25 In other words, 49 is achieved by adding one coin to the minimum coin total used to achieve 48, 44, 39, or 24. Let's create a recursive solution: End of explanation """ coins = [1, 5, 10, 25] print('val num_coins') for val in (1, 6, 42, 49): print('%3d %d' % (val, min_coin_sum(val, coins))) """ Explanation: Test: End of explanation """ def min_coin_sum(val, coins=None): if coins is None: coins = [1, 5, 10, 25] # Initialize table. table = [0 for _ in range(val+1)] # Dynamic programming. for cur_val in range(1, val+1): table[cur_val] = 1 + min([table[cur_val-coin] for coin in coins if cur_val-coin >= 0]) return table[val] coins = [1, 5, 10, 25] print('val num_coins') for val in (1, 6, 42, 49, 52): print('%3d %d' % (val, min_coin_sum(val, coins))) """ Explanation: Notice how it takes a little while to compute the answer to 49? That's because the recursive solution is computing the solution to subproblems multiple times. For example, the solution to 49 uses the solutions to 44 and 39. However, the solution to 44 itself needs the solution to 39 which it needlessly computes again from scratch. Memoization A better solution is to use memoization by storing the answer to previous subproblems and referring to them later: End of explanation """
cfjhallgren/shogun
doc/ipython-notebooks/classification/SupportVectorMachines.ipynb
gpl-3.0
import matplotlib.pyplot as plt %matplotlib inline import os SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data') import matplotlib.patches as patches #To import all shogun classes import shogun as sg import numpy as np #Generate some random data X = 2 * np.random.randn(10,2) traindata=np.r_[X + 3, X + 7].T feats_train=sg.RealFeatures(traindata) trainlab=np.concatenate((np.ones(10),-np.ones(10))) labels=sg.BinaryLabels(trainlab) # Plot the training data plt.figure(figsize=(6,6)) plt.gray() _=plt.scatter(traindata[0, :], traindata[1,:], c=labels, s=50) plt.title("Training Data") plt.xlabel('attribute1') plt.ylabel('attribute2') p1 = patches.Rectangle((0, 0), 1, 1, fc="k") p2 = patches.Rectangle((0, 0), 1, 1, fc="w") plt.legend((p1, p2), ["Class 1", "Class 2"], loc=2) plt.gray() """ Explanation: Classification with Support Vector Machines by Soeren Sonnenburg | Saurabh Mahindre - <a href=\"https://github.com/Saurabh7\">github.com/Saurabh7</a> as a part of <a href=\"http://www.google-melange.com/gsoc/project/details/google/gsoc2014/saurabh7/5750085036015616\">Google Summer of Code 2014 project</a> mentored by - Heiko Strathmann - <a href=\"https://github.com/karlnapf\">github.com/karlnapf</a> - <a href=\"http://herrstrathmann.de/\">herrstrathmann.de</a> This notebook illustrates how to train a <a href="http://en.wikipedia.org/wiki/Support_vector_machine">Support Vector Machine</a> (SVM) <a href="http://en.wikipedia.org/wiki/Statistical_classification">classifier</a> using Shogun. The <a href="http://www.shogun-toolbox.org/doc/en/3.0.0/classshogun_1_1CLibSVM.html">CLibSVM</a> class of Shogun is used to do binary classification. Multiclass classification is also demonstrated using CGMNPSVM. Introduction Linear Support Vector Machines Prediction using Linear SVM SVMs using kernels Kernels in Shogun Prediction using kernel based SVM Probabilistic Outputs using SVM Soft margins and slack variables Binary classification using different kernels Kernel Normalizers Multiclass classification using SVM Introduction Support Vector Machines (SVM's) are a learning method used for binary classification. The basic idea is to find a hyperplane which separates the data into its two classes. However, since example data is often not linearly separable, SVMs operate in a kernel induced feature space, i.e., data is embedded into a higher dimensional space where it is linearly separable. Linear Support Vector Machines In a supervised learning problem, we are given a labeled set of input-output pairs $\mathcal{D}=(x_i,y_i)^N_{i=1}\subseteq \mathcal{X} \times \mathcal{Y}$ where $x\in\mathcal{X}$ and $y\in{-1,+1}$. SVM is a binary classifier that tries to separate objects of different classes by finding a (hyper-)plane such that the margin between the two classes is maximized. A hyperplane in $\mathcal{R}^D$ can be parameterized by a vector $\bf{w}$ and a constant $\text b$ expressed in the equation:$${\bf w}\cdot{\bf x} + \text{b} = 0$$ Given such a hyperplane ($\bf w$,b) that separates the data, the discriminating function is: $$f(x) = \text {sign} ({\bf w}\cdot{\bf x} + {\text b})$$ If the training data are linearly separable, we can select two hyperplanes in a way that they separate the data and there are no points between them, and then try to maximize their distance. The region bounded by them is called "the margin". These hyperplanes can be described by the equations $$({\bf w}\cdot{\bf x} + {\text b}) = 1$$ $$({\bf w}\cdot{\bf x} + {\text b}) = -1$$ the distance between these two hyperplanes is $\frac{2}{\|\mathbf{w}\|}$, so we want to minimize $\|\mathbf{w}\|$. $$ \arg\min_{(\mathbf{w},b)}\frac{1}{2}\|\mathbf{w}\|^2 \qquad\qquad(1)$$ This gives us a hyperplane that maximizes the geometric distance to the closest data points. As we also have to prevent data points from falling into the margin, we add the following constraint: for each ${i}$ either $$({\bf w}\cdot{x}_i + {\text b}) \geq 1$$ or $$({\bf w}\cdot{x}_i + {\text b}) \leq -1$$ which is similar to $${y_i}({\bf w}\cdot{x}_i + {\text b}) \geq 1 \forall i$$ Lagrange multipliers are used to modify equation $(1)$ and the corresponding dual of the problem can be shown to be: \begin{eqnarray} \max_{\bf \alpha} && \sum_{i=1}^{N} \alpha_i - \sum_{i=1}^{N}\sum_{j=1}^{N} \alpha_i y_i \alpha_j y_j {\bf x_i} \cdot {\bf x_j}\ \mbox{s.t.} && \alpha_i\geq 0\ && \sum_{i}^{N} \alpha_i y_i=0\ \end{eqnarray} From the derivation of these equations, it was seen that the optimal hyperplane can be written as: $$\mathbf{w} = \sum_i \alpha_i y_i \mathbf{x}_i. $$ here most $\alpha_i$ turn out to be zero, which means that the solution is a sparse linear combination of the training data. Prediction using Linear SVM Now let us see how one can train a linear Support Vector Machine with Shogun. Two dimensional data (having 2 attributes say: attribute1 and attribute2) is now sampled to demonstrate the classification. End of explanation """ #prameters to svm #parameter C is described in a later section. C=1 epsilon=1e-3 svm=sg.LibLinear(C, feats_train, labels) svm.set_liblinear_solver_type(sg.L2R_L2LOSS_SVC) svm.set_epsilon(epsilon) #train svm.train() w=svm.get_w() b=svm.get_bias() """ Explanation: Liblinear, a library for large- scale linear learning focusing on SVM, is used to do the classification. It supports different solver types. End of explanation """ #solve for w.x+b=0 x1=np.linspace(-1.0, 11.0, 100) def solve (x1): return -( ( (w[0])*x1 + b )/w[1] ) x2=map(solve, x1) #plot plt.figure(figsize=(6,6)) plt.gray() plt.scatter(traindata[0, :], traindata[1,:], c=labels, s=50) plt.plot(x1,x2, linewidth=2) plt.title("Separating hyperplane") plt.xlabel('attribute1') plt.ylabel('attribute2') plt.gray() """ Explanation: We solve ${\bf w}\cdot{\bf x} + \text{b} = 0$ to visualise the separating hyperplane. The methods get_w() and get_bias() are used to get the necessary values. End of explanation """ size=100 x1_=np.linspace(-5, 15, size) x2_=np.linspace(-5, 15, size) x, y=np.meshgrid(x1_, x2_) #Generate X-Y grid test data grid=sg.RealFeatures(np.array((np.ravel(x), np.ravel(y)))) #apply on test grid predictions = svm.apply(grid) #Distance from hyperplane z=predictions.get_values().reshape((size, size)) #plot plt.jet() plt.figure(figsize=(16,6)) plt.subplot(121) plt.title("Classification") c=plt.pcolor(x, y, z) plt.contour(x, y, z, linewidths=1, colors='black', hold=True) plt.colorbar(c) plt.gray() plt.scatter(traindata[0, :], traindata[1,:], c=labels, s=50) plt.xlabel('attribute1') plt.ylabel('attribute2') plt.jet() #Class predictions z=predictions.get_labels().reshape((size, size)) #plot plt.subplot(122) plt.title("Separating hyperplane") c=plt.pcolor(x, y, z) plt.contour(x, y, z, linewidths=1, colors='black', hold=True) plt.colorbar(c) plt.gray() plt.scatter(traindata[0, :], traindata[1,:], c=labels, s=50) plt.xlabel('attribute1') plt.ylabel('attribute2') plt.gray() """ Explanation: The classifier is now applied on a X-Y grid of points to get predictions. End of explanation """ gaussian_kernel=sg.GaussianKernel(feats_train, feats_train, 100) #Polynomial kernel of degree 2 poly_kernel=sg.PolyKernel(feats_train, feats_train, 2, True) linear_kernel=sg.LinearKernel(feats_train, feats_train) kernels=[linear_kernel, poly_kernel, gaussian_kernel] """ Explanation: SVMs using kernels If the data set is not linearly separable, a non-linear mapping $\Phi:{\bf x} \rightarrow \Phi({\bf x}) \in \mathcal{F} $ is used. This maps the data into a higher dimensional space where it is linearly separable. Our equation requires only the inner dot products ${\bf x_i}\cdot{\bf x_j}$. The equation can be defined in terms of inner products $\Phi({\bf x_i}) \cdot \Phi({\bf x_j})$ instead. Since $\Phi({\bf x_i})$ occurs only in dot products with $ \Phi({\bf x_j})$ it is sufficient to know the formula (kernel function) : $$K({\bf x_i, x_j} ) = \Phi({\bf x_i}) \cdot \Phi({\bf x_j})$$ without dealing with the maping directly. The transformed optimisation problem is: \begin{eqnarray} \max_{\bf \alpha} && \sum_{i=1}^{N} \alpha_i - \sum_{i=1}^{N}\sum_{j=1}^{N} \alpha_i y_i \alpha_j y_j k({\bf x_i}, {\bf x_j})\ \mbox{s.t.} && \alpha_i\geq 0\ && \sum_{i=1}^{N} \alpha_i y_i=0 \qquad\qquad(2)\ \end{eqnarray} Kernels in Shogun Shogun provides many options for the above mentioned kernel functions. CKernel is the base class for kernels. Some commonly used kernels : Gaussian kernel : Popular Gaussian kernel computed as $k({\bf x},{\bf x'})= exp(-\frac{||{\bf x}-{\bf x'}||^2}{\tau})$ Linear kernel : Computes $k({\bf x},{\bf x'})= {\bf x}\cdot {\bf x'}$ Polynomial kernel : Polynomial kernel computed as $k({\bf x},{\bf x'})= ({\bf x}\cdot {\bf x'}+c)^d$ Simgmoid Kernel : Computes $k({\bf x},{\bf x'})=\mbox{tanh}(\gamma {\bf x}\cdot{\bf x'}+c)$ Some of these kernels are initialised below. End of explanation """ plt.jet() def display_km(kernels, svm): plt.figure(figsize=(20,6)) plt.suptitle('Kernel matrices for different kernels', fontsize=12) for i, kernel in enumerate(kernels): plt.subplot(1, len(kernels), i+1) plt.title(kernel.get_name()) km=kernel.get_kernel_matrix() plt.imshow(km, interpolation="nearest") plt.colorbar() display_km(kernels, svm) """ Explanation: Just for fun we compute the kernel matrix and display it. There are clusters visible that are smooth for the gaussian and polynomial kernel and block-wise for the linear one. The gaussian one also smoothly decays from some cluster centre while the polynomial one oscillates within the clusters. End of explanation """ C=1 epsilon=1e-3 svm=sg.LibSVM(C, gaussian_kernel, labels) _=svm.train() """ Explanation: Prediction using kernel based SVM Now we train an SVM with a Gaussian Kernel. We use LibSVM but we could use any of the other SVM from Shogun. They all utilize the same kernel framework and so are drop-in replacements. End of explanation """ libsvm_obj=svm.get_objective() primal_obj, dual_obj=svm.compute_svm_primal_objective(), svm.compute_svm_dual_objective() print libsvm_obj, primal_obj, dual_obj """ Explanation: We could now check a number of properties like what the value of the objective function returned by the particular SVM learning algorithm or the explictly computed primal and dual objective function is End of explanation """ print "duality_gap", dual_obj-primal_obj """ Explanation: and based on the objectives we can compute the duality gap (have a look at reference [2]), a measure of convergence quality of the svm training algorithm . In theory it is 0 at the optimum and in reality at least close to 0. End of explanation """ out=svm.apply(sg.RealFeatures(grid)) z=out.get_values().reshape((size, size)) #plot plt.jet() plt.figure(figsize=(16,6)) plt.subplot(121) plt.title("Classification") c=plt.pcolor(x1_, x2_, z) plt.contour(x1_ , x2_, z, linewidths=1, colors='black', hold=True) plt.colorbar(c) plt.gray() plt.scatter(traindata[0, :], traindata[1,:], c=labels, s=50) plt.xlabel('attribute1') plt.ylabel('attribute2') plt.jet() z=out.get_labels().reshape((size, size)) plt.subplot(122) plt.title("Decision boundary") c=plt.pcolor(x1_, x2_, z) plt.contour(x1_ , x2_, z, linewidths=1, colors='black', hold=True) plt.colorbar(c) plt.scatter(traindata[0, :], traindata[1,:], c=labels, s=50) plt.xlabel('attribute1') plt.ylabel('attribute2') plt.gray() """ Explanation: Let's now apply on the X-Y grid data and plot the results. End of explanation """ n=10 x1t_=np.linspace(-5, 15, n) x2t_=np.linspace(-5, 15, n) xt, yt=np.meshgrid(x1t_, x2t_) #Generate X-Y grid test data test_grid=sg.RealFeatures(np.array((np.ravel(xt), np.ravel(yt)))) labels_out=svm.apply(sg.RealFeatures(test_grid)) #Get values (Distance from hyperplane) values=labels_out.get_values() #Get probabilities labels_out.scores_to_probabilities() prob=labels_out.get_values() #plot plt.gray() plt.figure(figsize=(10,6)) p1=plt.scatter(values, prob) plt.title('Probabilistic outputs') plt.xlabel('Distance from hyperplane') plt.ylabel('Probability') plt.legend([p1], ["Test samples"], loc=2) """ Explanation: Probabilistic Outputs Calibrated probabilities can be generated in addition to class predictions using scores_to_probabilities() method of BinaryLabels, which implements the method described in [3]. This should only be used in conjunction with SVM. A parameteric form of a sigmoid function $$\frac{1}{{1+}exp(af(x) + b)}$$ is used to fit the outputs. Here $f(x)$ is the signed distance of a sample from the hyperplane, $a$ and $b$ are parameters to the sigmoid. This gives us the posterier probabilities $p(y=1|f(x))$. Let's try this out on the above example. The familiar "S" shape of the sigmoid should be visible. End of explanation """ def plot_sv(C_values): plt.figure(figsize=(20,6)) plt.suptitle('Soft and hard margins with varying C', fontsize=12) for i in range(len(C_values)): plt.subplot(1, len(C_values), i+1) linear_kernel=sg.LinearKernel(feats_train, feats_train) svm1=sg.LibSVM(C_values[i], linear_kernel, labels) svm1.train() vec1=svm1.get_support_vectors() X_=[] Y_=[] new_labels=[] for j in vec1: X_.append(traindata[0][j]) Y_.append(traindata[1][j]) new_labels.append(trainlab[j]) out1=svm1.apply(sg.RealFeatures(grid)) z1=out1.get_labels().reshape((size, size)) plt.jet() c=plt.pcolor(x1_, x2_, z1) plt.contour(x1_ , x2_, z1, linewidths=1, colors='black', hold=True) plt.colorbar(c) plt.gray() plt.scatter(X_, Y_, c=new_labels, s=150) plt.scatter(traindata[0, :], traindata[1,:], c=labels, s=20) plt.title('Support vectors for C=%.2f'%C_values[i]) plt.xlabel('attribute1') plt.ylabel('attribute2') C_values=[0.1, 1000] plot_sv(C_values) """ Explanation: Soft margins and slack variables If there is no clear classification possible using a hyperplane, we need to classify the data as nicely as possible while incorporating the misclassified samples. To do this a concept of soft margin is used. The method introduces non-negative slack variables, $\xi_i$, which measure the degree of misclassification of the data $x_i$. $$ y_i(\mathbf{w}\cdot\mathbf{x_i} + b) \ge 1 - \xi_i \quad 1 \le i \le N $$ Introducing a linear penalty function leads to $$\arg\min_{\mathbf{w},\mathbf{\xi}, b } ({\frac{1}{2} \|\mathbf{w}\|^2 +C \sum_{i=1}^n \xi_i) }$$ This in its dual form is leads to a slightly modified equation $\qquad(2)$. \begin{eqnarray} \max_{\bf \alpha} && \sum_{i=1}^{N} \alpha_i - \sum_{i=1}^{N}\sum_{j=1}^{N} \alpha_i y_i \alpha_j y_j k({\bf x_i}, {\bf x_j})\ \mbox{s.t.} && 0\leq\alpha_i\leq C\ && \sum_{i=1}^{N} \alpha_i y_i=0 \ \end{eqnarray} The result is that soft-margin SVM could choose decision boundary that has non-zero training error even if dataset is linearly separable but is less likely to overfit. Here's an example using LibSVM on the above used data set. Highlighted points show support vectors. This should visually show the impact of C and how the amount of outliers on the wrong side of hyperplane is controlled using it. End of explanation """ num=50; dist=1.0; gmm=sg.GMM(2) gmm.set_nth_mean(np.array([-dist,-dist]),0) gmm.set_nth_mean(np.array([dist,dist]),1) gmm.set_nth_cov(np.array([[1.0,0.0],[0.0,1.0]]),0) gmm.set_nth_cov(np.array([[1.0,0.0],[0.0,1.0]]),1) gmm.set_coef(np.array([1.0,0.0])) xntr=np.array([gmm.sample() for i in xrange(num)]).T gmm.set_coef(np.array([0.0,1.0])) xptr=np.array([gmm.sample() for i in xrange(num)]).T traindata=np.concatenate((xntr,xptr), axis=1) trainlab=np.concatenate((-np.ones(num), np.ones(num))) #shogun format features feats_train=sg.RealFeatures(traindata) labels=sg.BinaryLabels(trainlab) gaussian_kernel=sg.GaussianKernel(feats_train, feats_train, 10) #Polynomial kernel of degree 2 poly_kernel=sg.PolyKernel(feats_train, feats_train, 2, True) linear_kernel=sg.LinearKernel(feats_train, feats_train) kernels=[gaussian_kernel, poly_kernel, linear_kernel] #train machine C=1 svm=sg.LibSVM(C, gaussian_kernel, labels) _=svm.train() """ Explanation: You can see that lower value of C causes classifier to sacrifice linear separability in order to gain stability, in a sense that influence of any single datapoint is now bounded by C. For hard margin SVM, support vectors are the points which are "on the margin". In the picture above, C=1000 is pretty close to hard-margin SVM, and you can see the highlighted points are the ones that will touch the margin. In high dimensions this might lead to overfitting. For soft-margin SVM, with a lower value of C, it's easier to explain them in terms of dual (equation $(2)$) variables. Support vectors are datapoints from training set which are are included in the predictor, ie, the ones with non-zero $\alpha_i$ parameter. This includes margin errors and points on the margin of the hyperplane. Binary classification using different kernels Two-dimensional Gaussians are generated as data for this section. $x_-\sim{\cal N_2}(0,1)-d$ $x_+\sim{\cal N_2}(0,1)+d$ and corresponding positive and negative labels. We create traindata and testdata with num of them being negatively and positively labelled in traindata,trainlab and testdata, testlab. For that we utilize Shogun's Gaussian Mixture Model class (GMM) from which we sample the data points and plot them. End of explanation """ size=100 x1=np.linspace(-5, 5, size) x2=np.linspace(-5, 5, size) x, y=np.meshgrid(x1, x2) grid=sg.RealFeatures(np.array((np.ravel(x), np.ravel(y)))) grid_out=svm.apply(grid) z=grid_out.get_labels().reshape((size, size)) plt.jet() plt.figure(figsize=(16,5)) z=grid_out.get_values().reshape((size, size)) plt.subplot(121) plt.title('Classification') c=plt.pcolor(x, y, z) plt.contour(x, y, z, linewidths=1, colors='black', hold=True) plt.colorbar(c) plt.subplot(122) plt.title('Original distribution') gmm.set_coef(np.array([1.0,0.0])) gmm.set_features(grid) grid_out=gmm.get_likelihood_for_all_examples() zn=grid_out.reshape((size, size)) gmm.set_coef(np.array([0.0,1.0])) grid_out=gmm.get_likelihood_for_all_examples() zp=grid_out.reshape((size, size)) z=zp-zn c=plt.pcolor(x, y, z) plt.contour(x, y, z, linewidths=1, colors='black', hold=True) plt.colorbar(c) """ Explanation: Now lets plot the contour output on a $-5...+5$ grid for The Support Vector Machines decision function $\mbox{sign}(f(x))$ The Support Vector Machines raw output $f(x)$ The Original Gaussian Mixture Model Distribution End of explanation """ def plot_outputs(kernels): plt.figure(figsize=(20,5)) plt.suptitle('Binary Classification using different kernels', fontsize=12) for i in range(len(kernels)): plt.subplot(1,len(kernels),i+1) plt.title(kernels[i].get_name()) svm.set_kernel(kernels[i]) svm.train() grid_out=svm.apply(grid) z=grid_out.get_values().reshape((size, size)) c=plt.pcolor(x, y, z) plt.contour(x, y, z, linewidths=1, colors='black', hold=True) plt.colorbar(c) plt.scatter(traindata[0,:], traindata[1,:], c=trainlab, s=35) plot_outputs(kernels) """ Explanation: And voila! The SVM decision rule reasonably distinguishes the red from the blue points. Despite being optimized for learning the discriminative function maximizing the margin, the SVM output quality wise remotely resembles the original distribution of the gaussian mixture model. Let us visualise the output using different kernels. End of explanation """ f = open(os.path.join(SHOGUN_DATA_DIR, 'uci/ionosphere/ionosphere.data')) mat = [] labels = [] # read data from file for line in f: words = line.rstrip().split(',') mat.append([float(i) for i in words[0:-1]]) if str(words[-1])=='g': labels.append(1) else: labels.append(-1) f.close() mat_train=mat[:30] mat_test=mat[30:110] lab_train=sg.BinaryLabels(np.array(labels[:30]).reshape((30,))) lab_test=sg.BinaryLabels(np.array(labels[30:110]).reshape((len(labels[30:110]),))) feats_train = sg.RealFeatures(np.array(mat_train).T) feats_test = sg.RealFeatures(np.array(mat_test).T) #without normalization gaussian_kernel=sg.GaussianKernel() gaussian_kernel.init(feats_train, feats_train) gaussian_kernel.set_width(0.1) C=1 svm=sg.LibSVM(C, gaussian_kernel, lab_train) _=svm.train() output=svm.apply(feats_test) Err=sg.ErrorRateMeasure() error=Err.evaluate(output, lab_test) print 'Error:', error #set normalization gaussian_kernel=sg.GaussianKernel() # TODO: currently there is a bug that makes it impossible to use Gaussian kernels and kernel normalisers # See github issue #3504 #gaussian_kernel.set_normalizer(sg.SqrtDiagKernelNormalizer()) gaussian_kernel.init(feats_train, feats_train) gaussian_kernel.set_width(0.1) svm.set_kernel(gaussian_kernel) svm.train() output=svm.apply(feats_test) Err=sg.ErrorRateMeasure() error=Err.evaluate(output, lab_test) print 'Error with normalization:', error """ Explanation: Kernel Normalizers Kernel normalizers post-process kernel values by carrying out normalization in feature space. Since kernel based SVMs use a non-linear mapping, in most cases any normalization in input space is lost in feature space. Kernel normalizers are a possible solution to this. Kernel Normalization is not strictly-speaking a form of preprocessing since it is not applied directly on the input vectors but can be seen as a kernel interpretation of the preprocessing. The CKernelNormalizer class provides tools for kernel normalization. Some of the kernel normalizers in Shogun: SqrtDiagKernelNormalizer : This normalization in the feature space amounts to defining a new kernel $k'({\bf x},{\bf x'}) = \frac{k({\bf x},{\bf x'})}{\sqrt{k({\bf x},{\bf x})k({\bf x'},{\bf x'})}}$ AvgDiagKernelNormalizer : Scaling with a constant $k({\bf x},{\bf x'})= \frac{1}{c}\cdot k({\bf x},{\bf x'})$ ZeroMeanCenterKernelNormalizer : Centers the kernel in feature space and ensures each feature must have zero mean after centering. The set_normalizer() method of CKernel is used to add a normalizer. Let us try it out on the ionosphere dataset where we use a small training set of 30 samples to train our SVM. Gaussian kernel with and without normalization is used. See reference [1] for details. End of explanation """ num=30; num_components=4 means=np.zeros((num_components, 2)) means[0]=[-1.5,1.5] means[1]=[1.5,-1.5] means[2]=[-1.5,-1.5] means[3]=[1.5,1.5] covs=np.array([[1.0,0.0],[0.0,1.0]]) gmm=sg.GMM(num_components) [gmm.set_nth_mean(means[i], i) for i in range(num_components)] [gmm.set_nth_cov(covs,i) for i in range(num_components)] gmm.set_coef(np.array([1.0,0.0,0.0,0.0])) xntr=np.array([gmm.sample() for i in xrange(num)]).T xnte=np.array([gmm.sample() for i in xrange(5000)]).T gmm.set_coef(np.array([0.0,1.0,0.0,0.0])) xntr1=np.array([gmm.sample() for i in xrange(num)]).T xnte1=np.array([gmm.sample() for i in xrange(5000)]).T gmm.set_coef(np.array([0.0,0.0,1.0,0.0])) xptr=np.array([gmm.sample() for i in xrange(num)]).T xpte=np.array([gmm.sample() for i in xrange(5000)]).T gmm.set_coef(np.array([0.0,0.0,0.0,1.0])) xptr1=np.array([gmm.sample() for i in xrange(num)]).T xpte1=np.array([gmm.sample() for i in xrange(5000)]).T traindata=np.concatenate((xntr,xntr1,xptr,xptr1), axis=1) testdata=np.concatenate((xnte,xnte1,xpte,xpte1), axis=1) l0 = np.array([0.0 for i in xrange(num)]) l1 = np.array([1.0 for i in xrange(num)]) l2 = np.array([2.0 for i in xrange(num)]) l3 = np.array([3.0 for i in xrange(num)]) trainlab=np.concatenate((l0,l1,l2,l3)) testlab=np.concatenate((l0,l1,l2,l3)) plt.title('Toy data for multiclass classification') plt.jet() plt.scatter(traindata[0,:], traindata[1,:], c=trainlab, s=75) feats_train=sg.RealFeatures(traindata) labels=sg.MulticlassLabels(trainlab) """ Explanation: Multiclass classification Multiclass classification can be done using SVM by reducing the problem to binary classification. More on multiclass reductions in this notebook. CGMNPSVM class provides a built in one vs rest multiclass classification using GMNPlib. Let us see classification using it on four classes. CGMM class is used to sample the data. End of explanation """ gaussian_kernel=sg.GaussianKernel(feats_train, feats_train, 2) poly_kernel=sg.PolyKernel(feats_train, feats_train, 4, True) linear_kernel=sg.LinearKernel(feats_train, feats_train) kernels=[gaussian_kernel, poly_kernel, linear_kernel] svm=sg.GMNPSVM(1, gaussian_kernel, labels) _=svm.train(feats_train) size=100 x1=np.linspace(-6, 6, size) x2=np.linspace(-6, 6, size) x, y=np.meshgrid(x1, x2) grid=sg.RealFeatures(np.array((np.ravel(x), np.ravel(y)))) def plot_outputs(kernels): plt.figure(figsize=(20,5)) plt.suptitle('Multiclass Classification using different kernels', fontsize=12) for i in range(len(kernels)): plt.subplot(1,len(kernels),i+1) plt.title(kernels[i].get_name()) svm.set_kernel(kernels[i]) svm.train(feats_train) grid_out=svm.apply(grid) z=grid_out.get_labels().reshape((size, size)) plt.pcolor(x, y, z) plt.contour(x, y, z, linewidths=1, colors='black', hold=True) plt.colorbar(c) plt.scatter(traindata[0,:], traindata[1,:], c=trainlab, s=35) plot_outputs(kernels) """ Explanation: Let us try the multiclass classification for different kernels. End of explanation """
darioizzo/d-CGP
doc/sphinx/notebooks/learning_constants.ipynb
gpl-3.0
from dcgpy import expression_gdual_vdouble as expression from dcgpy import kernel_set_gdual_vdouble as kernel_set from pyaudi import gdual_vdouble as gdual import pyaudi from matplotlib import pyplot as plt import numpy as np from random import randint %matplotlib inline """ Explanation: Learning constants in a symbolic regression task (deprecated) One of the long standing "skeletons in the closet" of GP techniques is the constant finding problem. It is widely acknowledged that the ephemeral random constant approach, de facto the main solution proposed to this problem, is far from being satisfactory. Using dCGP, we are here able to successfully learn constants as well as expressions during evolution thanks to the hybridization of the evolutionary strategy with a, second order, gradient descent approach that learns the exact value of ephemeral constants, thus avoiding to build such a value by applying kernel functions to the constants. NOTE: since v1.4 symbolic regression is performed via dedicated classes and not manipulating directly the dcgpy.expression Lets first import dcgpy and pyaudi and set up things as to compute our CGP using the type "gdual" and thus get for free all derivatives End of explanation """ # note he use of the protected division "pdiv" (not necessary here) # note the call operator (returns the list of kernels) kernels = kernel_set(["sum", "mul", "diff","pdiv"])() """ Explanation: The kernel functions End of explanation """ def run_experiment(dCGP, offsprings, max_gen, x, yt, screen_output): # The offsprings chromosome, fitness and constant chromosome = [1] * offsprings fitness = [1] *offsprings constant = [1]*offsprings # Init the best as the initial random dCGP best_chromosome = dCGP.get() best_constant = 1. fit, _ = err2(dCGP, x, yt, best_constant) best_fitness = sum(fit.constant_cf) # Main loop over generations for g in range(max_gen): for i in range(offsprings): dCGP.set(best_chromosome) cumsum=0 dCGP.mutate_active(i+1) fit, constant[i] = err2(dCGP, x, yt, best_constant) fitness[i] = sum(fit.constant_cf ) chromosome[i] = dCGP.get() for i in range(offsprings): if fitness[i] <= best_fitness: if (fitness[i] != best_fitness): best_chromosome = chromosome[i] best_fitness = fitness[i] best_constant = constant[i] dCGP.set(best_chromosome) if screen_output: print("New best found: gen: ", g, " value: ", fitness[i], dCGP.simplify(["x","c"]), "c =", best_constant) if best_fitness < 1e-14: break return g, best_chromosome, best_constant """ Explanation: The ES-(1+$\lambda$) algorithm End of explanation """ # The following functions create the target values for a gridded input x for different test problems def data_P1(x): return x**5 - np.pi*x**3 + x def data_P2(x): return x**5 - np.pi*x**3 + 2*np.pi / x def data_P3(x): return (np.e*x**5 + x**3)/(x + 1) def data_P4(x): return pyaudi.sin(np.pi * x) + 1./x def data_P5(x): return np.e * x**5 - np.pi*x**3 + np.sqrt(2) * x """ Explanation: The test problems As target functions, we define three different problems of increasing complexity: P1: $x^5 - \pi x^3 + x$ P2: $x^5 - \pi x^3 + \frac{2\pi}x$ P3: $\frac{e x^5 + x^3}{x + 1}$ P4: $\sin(\pi x) + \frac 1x$ note how $\pi$ and $e$ are present in the expressions. End of explanation """ # This is the quadratic error of a dCGP expression when the constant value is cin. The error is computed # over the input points xin (of type gdual, order 0 as we are not interested in expanding the program w.r.t. these) # The target values are contained in yt (of type gdual, order 0 as we are not interested in expanding the program w.r.t. these) def err(dCGP, xin, yt, cin): c = gdual([cin], "c", 2) y = dCGP([xin,c])[0] return (y-yt)**2 # This is the quadratic error of the expression when the constant value is learned using a, one step, # second order method. def err2(dCGP, xin, yt,cin): c = gdual([cin], "c", 2) y = dCGP([xin,c])[0] error = err(dCGP,xin,yt,cin) dc = sum(error.get_derivative({"dc":1})) dc2 = sum(error.get_derivative({"dc":2})) if dc2 != 0: learned_constant = c - dc/dc2 y = dCGP([xin, learned_constant])[0] else: learned_constant = c return (y-yt)**2, learned_constant.constant_cf[0] """ Explanation: The error functions End of explanation """ x = np.linspace(1,3,10) x = gdual(x) yt = data_P1(x) # We run nexp experiments and accumulate statistic for the ERT nexp = 100 offsprings = 4 max_gen=1000 res = [] kernels = kernel_set(["sum", "mul", "diff","pdiv"])() print("restart: \t gen: \t expression:") for i in range(nexp): dCGP = expression(inputs=2, outputs=1, rows=1, cols=15, levels_back=16, arity=2, kernels=kernels, seed = randint(0,1233456)) g, best_chromosome, best_constant = run_experiment(dCGP, offsprings,max_gen,x,yt, screen_output=False) res.append(g) dCGP.set(best_chromosome) if g < (max_gen-1): print(i, "\t\t", res[i], "\t", dCGP(["x","c"]), " a.k.a ", dCGP.simplify(["x","c"]), "c = ", best_constant) res = np.array(res) mean_gen = sum(res) / sum(res<(max_gen-1)) print("ERT Expected run time = avg. number of dCGP evaluations needed: ", mean_gen * offsprings) """ Explanation: Problem P1: $x^5 - \pi x^3 + x$ End of explanation """ x = np.linspace(0.1,5,10) # we include points close to zero here to favour learning of 1/x x = gdual(x) yt = data_P2(x) # We run nexp experiments and accumulate statistic for the ERT nexp = 100 offsprings = 4 max_gen=5000 res = [] kernels = kernel_set(["sum", "mul", "diff","pdiv"])() print("restart: \t gen: \t expression:") for i in range(nexp): dCGP = expression(inputs=2, outputs=1, rows=1, cols=15, levels_back=16, arity=2, kernels=kernels, seed = randint(0,1233456)) g, best_chromosome, best_constant = run_experiment(dCGP, offsprings,max_gen,x,yt, screen_output=False) res.append(g) dCGP.set(best_chromosome) if g < (max_gen-1): print(i, "\t\t", res[i], "\t", dCGP(["x","c"]), " a.k.a ", dCGP.simplify(["x","c"]), "c = ", best_constant) res = np.array(res) mean_gen = sum(res) / sum(res<(max_gen-1)) print("ERT Expected run time = avg. number of dCGP evaluations needed: ", mean_gen * offsprings) """ Explanation: Problem P2 - $x^5 - \pi x^3 + \frac{2\pi}x$ End of explanation """ x = np.linspace(-0.9,1,10) x = gdual(x) yt = data_P3(x) # We run nexp experiments and accumulate statistic for the ERT nexp = 100 offsprings = 4 max_gen=5000 res = [] kernels = kernel_set(["sum", "mul", "diff","pdiv"])() print("restart: \t gen: \t expression:") for i in range(nexp): dCGP = expression(inputs=2, outputs=1, rows=1, cols=15, levels_back=16, arity=2, kernels=kernels, seed = randint(0,1233456)) g, best_chromosome, best_constant = run_experiment(dCGP, offsprings,max_gen,x,yt, screen_output=False) res.append(g) dCGP.set(best_chromosome) if g < (max_gen-1): print(i, "\t\t", res[i], "\t", dCGP(["x","c"]), " a.k.a ", dCGP.simplify(["x","c"]), "c = ", best_constant) res = np.array(res) mean_gen = sum(res) / sum(res<(max_gen-1)) print("ERT Expected run time = avg. number of dCGP evaluations needed: ", mean_gen * offsprings) """ Explanation: Problem P3 - $\frac{e x^5 + x^3}{x + 1}$ End of explanation """ x = np.linspace(-1,1,10) x = gdual(x) yt = data_P4(x) # We run nexp experiments and accumulate statistic for the ERT nexp = 100 offsprings = 4 max_gen=5000 res = [] kernels = kernel_set(["sum", "mul", "diff","pdiv","sin"])() print("restart: \t gen: \t expression:") for i in range(nexp): dCGP = expression(inputs=2, outputs=1, rows=1, cols=15, levels_back=16, arity=2, kernels=kernels, seed = randint(0,1233456)) g, best_chromosome, best_constant = run_experiment(dCGP, offsprings,max_gen,x,yt, screen_output=False) res.append(g) dCGP.set(best_chromosome) if g < (max_gen-1): print(i, "\t\t", res[i], "\t", dCGP(["x","c"]), " a.k.a ", dCGP.simplify(["x","c"]), "c = ", best_constant) res = np.array(res) mean_gen = sum(res) / sum(res<(max_gen-1)) print("ERT Expected run time = avg. number of dCGP evaluations needed: ", mean_gen * offsprings) """ Explanation: Problem P4: $\sin(\pi x) + \frac 1x$ End of explanation """
joandamerow/lit-mining-occurrencedb
notebooks/classifiers_01_preprocess.ipynb
isc
import os from os.path import join, basename, splitext import subprocess from glob import glob from shutil import copy from random import shuffle, seed from pyzotero import zotero from lib.secrets import CORRECTED_PAPERS_DATASET, USER_KEY output_dir = join('data', 'pdf') """ Explanation: Get files from Zotero End of explanation """ def get_pdfs(output_dir, collection_name, tag): # Create the output directory path = join(output_dir, collection_name, tag) os.makedirs(path, exist_ok=True) # Connect to Zotero zot = zotero.Zotero(CORRECTED_PAPERS_DATASET, 'group', USER_KEY) # Get the collection of interest and it's key collections = {c['data']['name']: c for c in zot.collections()} collection = collections[collection_name] key = collection['key'] # Now get the items in the collection that have the given tag items = [d for d in zot.everything(zot.collection_items(key, tag=tag))] # items = [d for d in zot.collection_items(key, tag=tag, limit=3)] # Get the PDF attachment for each item and save it to the category directory for item in items: # An item's attachments children = [c for c in zot.children(item['key'])] # Just get the PDFs pdfs = [c for c in children if c['data'].get('contentType') == 'application/pdf'] # Handle when there are no attachments if not children: print('\nMISSING DOCUMENTS {}\n'.format(item['key'])) # Handle when there are no PDF attachments elif not pdfs: print('\nNO PDFs {}\n'.format(item['key'])) # Handle when there is more than one PDF attachment elif len(pdfs) != 1: print('\nTOO MANY PDFs {}\n'.format(item['key'])) # Save the PDF to the category directory else: doc = pdfs[0] print(doc['data']['filename']) zot.dump(doc['key'], '{}.pdf'.format(doc['key']), path) get_pdfs(output_dir, 'RSet_N1', 'Rel-Yes') get_pdfs(output_dir, 'RSet_N1', 'Rel-No') get_pdfs(output_dir, 'RSet_N2', 'Rel-Yes') get_pdfs(output_dir, 'RSet_N2', 'Rel-No') """ Explanation: This is the function that does the actual download of the PDFs using Zotero's API. First, we need to get all of the collections in the Zotero Library. Collections are like sub-folders in the library. We will be looking for a collection with the given name. Next, we will get all of the items in a collection with a given tag. We have been tagging items with a "Rel-Yes" or "Rel-No" when we determine if the item is relevant to the study or not. Finally, we can get the PDF attachment associated with the item. An item may have more than one attachment (PDF, HTML, etc.) underneath it. However, for our current purpose we are only concerned with the PDF. End of explanation """ def file_names(root, category): pattern = join('data', 'pdf', root, category, '*.pdf') paths = glob(pattern) return [basename(p) for p in paths] """ Explanation: Handle duplicate files It turns out that some files have both labels (Rel-Yes and Rel-No). We need to remove these files from the data set. Get all PDF file names for a particular category. End of explanation """ def move_duplicates(root): rel_yes = set(file_names(root, 'Rel-Yes')) rel_no = set(file_names(root, 'Rel-No')) duplicates = rel_yes & rel_no dup_root = join('data', 'pdf', 'duplicates') os.makedirs(dup_root, exist_ok=True) for duplicate in duplicates: print(duplicate) src = join('data', 'pdf', root, 'Rel-Yes', duplicate) dst = join(dup_root, duplicate) move(src, dst) src = join('data', 'pdf', root, 'Rel-No', duplicate) os.remove(src) move_duplicates('RSet_N1') move_duplicates('RSet_N2') """ Explanation: We move one copy of the file out of the way and delete the extra copy. End of explanation """ def pdf_to_text(output_dir, pdf_path): txt_name = basename(pdf_path) txt_name = splitext(txt_name)[0] + '.txt' txt_path = join(output_dir, txt_name) cmd = "pdftotext '{}' '{}'".format(pdf_path, txt_path) try: subprocess.check_call(cmd, shell=True) except Exception: pass """ Explanation: Convert PDF files to text Convert the PDF files to text. They will be placed into the given output directory. This utility depends on the external program "xpdf" specifically "pdftotext". Extract the text from the PDF ad write it to a file. End of explanation """ def convert_pdfs(input_dir, output_dir): os.makedirs(output_dir, exist_ok=True) pattern = join(input_dir, '*.pdf') pdf_paths = glob(pattern) for i, pdf_path in enumerate(pdf_paths, 1): print('Converting:', pdf_path) pdf_to_text(output_dir, pdf_path) convert_pdfs('data/pdf/RSet_N1/Rel-Yes', 'data/Rel-Yes') convert_pdfs('data/pdf/RSet_N1/Rel-No', 'data/Rel-No') convert_pdfs('data/pdf/RSet_N2/Rel-Yes', 'data/Rel-Yes') convert_pdfs('data/pdf/RSet_N2/Rel-No', 'data/Rel-No') """ Explanation: Loop through all of the PDFs and convert them End of explanation """
dsevilla/bdge
hbase/sesion5.ipynb
mit
from pprint import pprint as pp import pandas as pd import matplotlib.pyplot as plt import matplotlib %matplotlib inline matplotlib.style.use('ggplot') """ Explanation: NoSQL (HBase) (sesión 5) Esta hoja muestra cómo acceder a bases de datos HBase y también a conectar la salida con Jupyter. Se puede utilizar el shell propio de HBase en el contenedor. Con HBase vamos a simular un clúster de varias máquinas con varios contenedores conectados. En el directorio hbase del repositorio git hay un script para ejecutar la instalación con docker-compose. Para conectarse al clúster con un shell de hbase, hay que ejecutar, desde una terminal el siguiente comando de docker: ```bash $ docker exec -ti hbase-regionserver hbase shell Base Shell; enter 'help<RETURN>' for list of supported commands. Type "exit<RETURN>" to leave the HBase Shell Version 1.2.7, rac57c51f7ad25e312b4275665d62b34a5945422f, Fri Sep 7 16:11:05 CDT 2018 hbase(main):001:0> ``` End of explanation """ import os import os.path as path from urllib.request import urlretrieve def download_file_upper_dir(baseurl, filename): file = path.abspath(path.join(os.getcwd(),os.pardir,filename)) if not os.path.isfile(file): urlretrieve(baseurl + '/' + filename, file) baseurl = 'http://neuromancer.inf.um.es:8080/es.stackoverflow/' download_file_upper_dir(baseurl, 'Posts.csv') download_file_upper_dir(baseurl, 'Users.csv') download_file_upper_dir(baseurl, 'Tags.csv') download_file_upper_dir(baseurl, 'Comments.csv') download_file_upper_dir(baseurl, 'Votes.csv') !pip install happybase import happybase host = 'hbase-thriftserver' pool = happybase.ConnectionPool(size=5, host=host) with pool.connection() as connection: print(connection.tables()) """ Explanation: Usaremos la librería happybase para python. La cargamos a continuación y hacemos la conexión. End of explanation """ # Create tables tables = ['posts', 'votes', 'users', 'tags', 'comments'] for t in tables: try: with pool.connection() as connection: connection.create_table( t, { 'rawdata': dict(max_versions=1,compression='GZ') }) except Exception as e: print("Database already exists: {0}. {1}".format(t, e)) pass with pool.connection() as connection: print(connection.tables()) """ Explanation: Para la carga inicial, vamos a crear todas las tablas con una única familia de columnas, rawdata, donde meteremos toda la información raw comprimida. Después podremos hacer reorganizaciones de los datos para hacer el acceso más eficiente. Es una de las muchas ventajas de no tener un esquema. End of explanation """ import csv def csv_to_hbase(file, tablename, cf): with pool.connection() as connection, open(file) as f: table = connection.table(tablename) # La llamada csv.reader() crea un iterador sobre un fichero CSV reader = csv.reader(f, dialect='excel') # Se leen las columnas. Sus nombres se usarán para crear las diferentes columnas en la familia columns = next(reader) columns = [cf + ':' + c for c in columns] with table.batch(batch_size=500) as b: for row in reader: # La primera columna se usará como Row Key b.put(row[0], dict(zip(columns[1:], row[1:]))) for t in tables: print("Importando tabla {0}...".format(t)) %time csv_to_hbase('../'+t.capitalize() + '.csv', t, 'rawdata') """ Explanation: El código de importación es siempre el mismo, ya que se coge la primera fila del CSV que contiene el nombre de las columnas y se utiliza para generar nombres de columnas dentro de la familia de columnas dada como parámetro. La función csv_to_hbase() acepta un fichero CSV a abrir, un nombre de tabla y una familia de columnas donde agregar las columnas del fichero CSV. En nuestro caso siempre va a ser rawdata. End of explanation """ with pool.connection() as connection: posts = connection.table('posts') """ Explanation: Consultas sencillas desde Python A continuación veremos algunas consultas sencillas desde python usando el API de happybase. End of explanation """ posts.row(b'5',columns=[b'rawdata:Body']) """ Explanation: Obtener el Post con Id 5. La orden más sencilla e inmediata de HBase es obtener una fila, opcionalmente limitando las columnas a mostrar: End of explanation """ # http://stackoverflow.com/a/30525061/62365 class DictTable(dict): # Overridden dict class which takes a dict in the form {'a': 2, 'b': 3}, # and renders an HTML Table in IPython Notebook. def _repr_html_(self): htmltext = ["<table width=100%>"] for key, value in self.items(): htmltext.append("<tr>") htmltext.append("<td>{0}</td>".format(key.decode('utf-8'))) htmltext.append("<td>{0}</td>".format(value.decode('utf-8'))) htmltext.append("</tr>") htmltext.append("</table>") return ''.join(htmltext) # Muestra cómo queda la fila del Id del Post 9997 DictTable(posts.row(b'5')) DictTable(posts.row(b'5',columns=[b'rawdata:AnswerCount',b'rawdata:AcceptedAnswerId'])) """ Explanation: El siguiente código permite mostrar de forma amigable las tablas extraídas de la base de datos en forma de diccionario: End of explanation """ row = posts.row(b'5') for key, value in row.items(): print("Key = '%s', Value = '%s'" % (key, value.decode('utf-8')[:40])) """ Explanation: Y también se puede recorrer como un diccionario normal (el decode se utiliza para convertir los valores binarios de la base de datos a una codificación UTF-8): End of explanation """ max_len = 0 for key, data in posts.scan(): cur_len = len(data[b'rawdata:Body'].decode('utf-8')) if cur_len > max_len: max_len = cur_len print("Máxima longitud: %s caracteres." % (max_len)) """ Explanation: Finalmente, también se puede recorrer toda la tabla estableciendo filtros, que se estudiarán después. Se utiliza la función scan. Se puede iterar con los parámetros key y data. Por ejemplo, calcular el tamaño máximo de la longitud del texto de los posts: (OJO, es un ejemplo, no se debería hacer así) End of explanation """ with pool.connection() as connection: comments = connection.table('comments') posts = connection.table('posts') with posts.batch(batch_size=500) as bp: # Hacer un scan de la tabla for key, data in comments.scan(): comment = {'comments:' + d.decode('utf-8').split(':')[1] + "_" + key.decode('utf-8') : data[d].decode('utf-8') for d in data.keys()} bp.put(data[b'rawdata:PostId'], comment) DictTable(posts.row(b'7251')) %timeit q = posts.row(b'7251') from functools import reduce def doit(): q = posts.row(b'7251') (s,n) = reduce(lambda res, e: (res[0]+len(e[1].decode('utf-8')), res[1]+1) if e[0].decode('utf-8').startswith('comments:Text') else res , q.items(), (0,0)) return (s/n) %timeit doit() # MySQL -> 1.12 ms # HBase -> 1.47 ms """ Explanation: Construcción de estructuras anidadas Al igual que pasaba con MongoDB, las bases de datos NoSQL como en este caso HBase permiten almacenar estructuras de datos complejas. En nuestro caso vamos a agregar los comentarios de cada pregunta o respuesta (post) en columnas del mismo. Para ello, creamos una nueva familia de columnas comments. HBase es bueno para añadir columnas sencillas, por ejemplo que contengan un valor. Sin embargo, si queremos añadir objetos complejos, tenemos que jugar con la codificación de la familia de columnas y columna. Usaremos el shell porque happybase no permite alterar tablas ya creadas. Para acceder al shell de HBase, tenemos que contactar al contenedor hbase-regionserver, de esta forma: bash $ docker exec -ti hbase-regionserver hbase shell En el shell de HBase pondremos lo siguiente: disable 'posts' alter 'posts', {NAME =&gt; 'comments', VERSIONS =&gt; 1} enable 'posts' Cada comentario que añadimos contiene, al menos: un id único un texto un autor etc. ¿Cómo se consigue meterlo en una única familia de columnas? Hay varias formas. La que usaremos aquí, añadiremos el id de cada comentario como parte del nombre de la columna. Por ejemplo, el comentario con Id 2000, generará las columnas: Id_2000 (valor 2000) UserId_2000 PostId_2000 Text_2000 con sus correspondientes valores. Así, todos los datos relativos al comentario con Id original 2000, estarán almacenados en todas las columnas que terminen en "_2000". La base de datos permite implementar filtros que nos permiten buscar esto de forma muy sencilla. Los veremos después. End of explanation """ download_file_upper_dir('http://neuromancer.inf.um.es:8080/wikipedia/','eswiki.xml.gz') """ Explanation: EJERCICIO: ¿Cómo sería el código para saber qué usuarios han comentado un post en particular? Wikipedia Como otro ejemplo de carga de datos y de organización en HBase, veremos de manera simplificada el ejemplo de la wikipedia visto en teoría. A continuación se descarga una pequeña parte del fichero de la wikipedia en XML: End of explanation """ import xml.sax import re class WikiHandler(xml.sax.handler.ContentHandler): def __init__(self): self._charBuffer = '' self.document = {} def _getCharacterData(self): data = self._charBuffer self._charBuffer = '' return data def parse(self, f, callback): self.callback = callback xml.sax.parse(f, self) def characters(self, data): self._charBuffer = self._charBuffer + data def startElement(self, name, attrs): if name == 'page': # print 'Start of page' self.document = {} if re.match(r'title|timestamp|username|comment|text', name): self._charBuffer = '' def endElement(self, name): if re.match(r'title|timestamp|username|comment|text', name): self.document[name] = self._getCharacterData() # print(name, ': ', self.document[name][:20]) if 'revision' == name: self.callback(self.document) """ Explanation: Se crea la tabla para albergar la wikipedia. Igual que la vista en teoría, pero aquí se usa wikipedia en vez de wiki para que no colisionen la versión completa con la reducida. De nuevo en el shell de HBase: create 'wikipedia' , 'text', 'revision' disable 'wikipedia' # Para evitar su uso temporal alter 'wikipedia' , { NAME =&gt; 'text', VERSIONS =&gt; org.apache.hadoop.hbase.HConstants::ALL_VERSIONS } alter 'wikipedia' , { NAME =&gt; 'revision', VERSIONS =&gt; org.apache.hadoop.hbase.HConstants::ALL_VERSIONS } alter 'wikipedia' , { NAME =&gt; 'text', COMPRESSION =&gt; 'GZ', BLOOMFILTER =&gt; 'ROW'} enable 'wikipedia' Este código, visto en teoría, recorre el árbol XML construyendo documentos y llamando a la función callback con cada uno. Los documentos son diccionarios con las claves encontradas dentro de los tags &lt;page&gt;...&lt;/page&gt;. End of explanation """ import time import os import gzip class FillWikiTable(): """Llena la tabla Wiki""" def __init__(self,connection): # Conectar a la base de datos a través de Thrift self.table = connection.table('wikipedia') def run(_s): def processdoc(d): print("Callback called with {0}".format(d['title'])) tuple_time = time.strptime(d['timestamp'], "%Y-%m-%dT%H:%M:%SZ") timestamp = int(time.mktime(tuple_time)) _s.table.put(d['title'], {'text:': d.get('text',''), 'revision:author': d.get('username',''), 'revision:comment': d.get('comment','')}, timestamp=timestamp) with gzip.open(os.path.join(os.pardir,'eswiki.xml.gz'),'r') as f: start = time.time() WikiHandler().parse(f, processdoc) end = time.time() print ("End adding documents. Time: %.5f" % (end - start)) with pool.connection() as connection: FillWikiTable(connection).run() """ Explanation: El codigo a continuación, cada vez que el código anterior llama a la función processdoc() se añade un documento a la base de datos. End of explanation """ import sys class BuildLinks(): """Llena la tabla de Links""" def __init__(self,connection): # Create table try: connection.create_table( "wikilinks", { 'from': dict(bloom_filter_type='ROW',max_versions=1), 'to' : dict(bloom_filter_type='ROW',max_versions=1) }) except: print ("Database wikilinks already exists.") pass self.table = connection.table('wikilinks') self.wikitable = connection.table('wikipedia') def run(self): print("run") linkpattern = r'\[\[([^\[\]\|\:\#][^\[\]\|:]*)(?:\|([^\[\]\|]+))?\]\]' # target, label with self.table.batch(batch_size=500) as b: for key, data in self.wikitable.scan(): to_dict = {} doc = key.strip().decode('utf-8') print("\n{0}:".format(doc)) for mo in re.finditer(linkpattern, data[b'text:'].decode('utf-8')): (target, label) = mo.groups() target = target.strip() if target == '': continue label = '' if not label else label label = label.strip() to_dict['to:' + target] = label sys.stdout.write(".") b.put(target, {'from:' + doc : label}) if bool(to_dict): b.put(doc, to_dict) with pool.connection() as connection: BuildLinks(connection).run() """ Explanation: El código a continuación permite ver las diferentes versiones de una revisión. Como la versión reducida es muy pequeña no da lugar a que haya ninguna revisión, pero con este código se vería. Hace uso del shell de HBase: get 'wikipedia', 'Commodore Amiga', {COLUMN =&gt; 'revision',VERSIONS=&gt;10} Enlazado de documentos en la wikipedia Los artículos de la wikipedia llevan enlaces entre sí, incluyendo referencias del tipo [[artículo referenciado]]. Se pueden extraer estos enlaces y se puede construir un grafo de conexiones. Para cada artículo, se anotarán qué enlaces hay que salen de él y hacia qué otros artículos enlazan y también qué enlaces llegan a él. Esto se hará con dos familias de columnas, from y to. En cada momento, se añadirá una columna from:artículo cuando un artículo nos apunte, y otras columnas to:articulo con los artículos que nosotros enlazamos. End of explanation """
wangg12/caffe
examples/03-fine-tuning.ipynb
bsd-2-clause
import os os.chdir('..') import sys sys.path.insert(0, './python') import caffe import numpy as np from pylab import * %matplotlib inline # This downloads the ilsvrc auxiliary data (mean file, etc), # and a subset of 2000 images for the style recognition task. !data/ilsvrc12/get_ilsvrc_aux.sh !scripts/download_model_binary.py models/bvlc_reference_caffenet !python examples/finetune_flickr_style/assemble_data.py \ --workers=-1 --images=2000 --seed=1701 --label=5 """ Explanation: Fine-tuning a Pretrained Network for Style Recognition In this example, we'll explore a common approach that is particularly useful in real-world applications: take a pre-trained Caffe network and fine-tune the parameters on your custom data. The upside of such approach is that, since pre-trained networks are learned on a large set of images, the intermediate layers capture the "semantics" of the general visual appearance. Think of it as a very powerful feature that you can treat as a black box. On top of that, only a few layers will be needed to obtain a very good performance of the data. First, we will need to prepare the data. This involves the following parts: (1) Get the ImageNet ilsvrc pretrained model with the provided shell scripts. (2) Download a subset of the overall Flickr style dataset for this demo. (3) Compile the downloaded Flickr dataset into a database that Caffe can then consume. End of explanation """ !diff models/bvlc_reference_caffenet/train_val.prototxt models/finetune_flickr_style/train_val.prototxt """ Explanation: Let's show what is the difference between the fine-tuning network and the original caffe model. End of explanation """ niter = 200 # losses will also be stored in the log train_loss = np.zeros(niter) scratch_train_loss = np.zeros(niter) caffe.set_device(0) caffe.set_mode_gpu() # We create a solver that fine-tunes from a previously trained network. solver = caffe.SGDSolver('models/finetune_flickr_style/solver.prototxt') solver.net.copy_from('models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel') # For reference, we also create a solver that does no finetuning. scratch_solver = caffe.SGDSolver('models/finetune_flickr_style/solver.prototxt') # We run the solver for niter times, and record the training loss. for it in range(niter): solver.step(1) # SGD by Caffe scratch_solver.step(1) # store the train loss train_loss[it] = solver.net.blobs['loss'].data scratch_train_loss[it] = scratch_solver.net.blobs['loss'].data if it % 10 == 0: print 'iter %d, finetune_loss=%f, scratch_loss=%f' % (it, train_loss[it], scratch_train_loss[it]) print 'done' """ Explanation: For your record, if you want to train the network in pure C++ tools, here is the command: <code> build/tools/caffe train \ -solver models/finetune_flickr_style/solver.prototxt \ -weights models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel \ -gpu 0 </code> However, we will train using Python in this example. End of explanation """ plot(np.vstack([train_loss, scratch_train_loss]).T) """ Explanation: Let's look at the training loss produced by the two training procedures respectively. End of explanation """ plot(np.vstack([train_loss, scratch_train_loss]).clip(0, 4).T) """ Explanation: Notice how the fine-tuning procedure produces a more smooth loss function change, and ends up at a better loss. A closer look at small values, clipping to avoid showing too large loss during training: End of explanation """ test_iters = 10 accuracy = 0 scratch_accuracy = 0 for it in arange(test_iters): solver.test_nets[0].forward() accuracy += solver.test_nets[0].blobs['accuracy'].data scratch_solver.test_nets[0].forward() scratch_accuracy += scratch_solver.test_nets[0].blobs['accuracy'].data accuracy /= test_iters scratch_accuracy /= test_iters print 'Accuracy for fine-tuning:', accuracy print 'Accuracy for training from scratch:', scratch_accuracy """ Explanation: Let's take a look at the testing accuracy after running 200 iterations. Note that we are running a classification task of 5 classes, thus a chance accuracy is 20%. As we will reasonably expect, the finetuning result will be much better than the one from training from scratch. Let's see. End of explanation """
matthewfeickert/Behnke-Data-Analysis-in-HEP
Notebooks/Chapter01/Exercise-1.5-py.ipynb
mit
import math import numpy as np from scipy import special as special %matplotlib inline import matplotlib.pyplot as plt import matplotlib.mlab as mlab from prettytable import PrettyTable """ Explanation: Data Analysis in High Energy Physics: Exercise 1.5 $p$-values Find the number of standard deviations corresponding to $p$-values of 10%, 5%, and 1% for a Gaussian distribution. Consider both one-sided and two-sided $p$-values. Reminder: The error function is defined as the symmetric integral over the range of the standard Gaussian, $\displaystyle \text{erf}(x) = \frac{1}{\sqrt{\pi}} \int\limits_{-x}^{x}e^{-t^2}\,dt = \frac{2}{\sqrt{\pi}} \int\limits_{0}^{x}e^{-t^2}\,dt\,,$ and so the probability for Gaussian distributed data to lie within $y$ of the mean is $\displaystyle P(\mu - y \leq x \leq \mu + y) = \int\limits_{\mu - y}^{\mu + y} \frac{1}{\sqrt{2\pi} \sigma} e^{-(x-\mu)^2/2\sigma^2}\,dx = \frac{2}{\sqrt{\pi}} \int\limits_{0}^{y/\sqrt{2}\sigma} e^{-t^2}\,dt = \text{erf}\left(\frac{y}{\sqrt{2}\sigma}\right)\,.$ End of explanation """ mean = 0 sigma = 1 nsigma = 1 x = np.linspace(-5,5,100) plt.plot(x,mlab.normpdf(x,mean,sigma),color='black') xlTail = np.linspace(-5,-nsigma) xrTail = np.linspace(nsigma,5) plt.fill_between(xlTail,0,mlab.normpdf(xlTail,mean,sigma),facecolor='red') plt.fill_between(xrTail,0,mlab.normpdf(xrTail,mean,sigma),facecolor='red') plt.show() """ Explanation: Two-tailed $p$-value As for the two-tailed Gaussian, $\displaystyle p(x) = P(\left|X\right| \geq x) = 1-\text{erf}\left(\frac{x}{\sqrt{2}\sigma}\right) \equiv \text{erfc}\left(\frac{x}{\sqrt{2}\sigma}\right)$, it is seen that for $x=n \sigma$, then $\displaystyle p(n \sigma) = P(\left|X\right| \geq n \sigma) = 1-\text{erf}\left(\frac{n}{\sqrt{2}}\right)$, thus, $\displaystyle \text{erf}\left(\frac{n}{\sqrt{2}}\right) = 1 - p(n \sigma)$. End of explanation """ pvalues = [0.10, 0.05, 0.01] for p in pvalues: print("{} standard deviations corresponds to a p-value of {}".format(math.sqrt(2.)*special.erfcinv(p),p)) """ Explanation: However, at this point we are at an impass analytically, as the integral of a Gaussian function over a finite range has no analytical solution, and must be evaluated numerically. So using erfc, End of explanation """ for p in pvalues: print("{} standard deviations corresponds to a p-value of {}".format(math.sqrt(2.)*special.erfinv(1-p),p)) """ Explanation: and using erf, End of explanation """ plt.plot(x,mlab.normpdf(x,mean,sigma),color='black') plt.fill_between(xrTail,0,mlab.normpdf(xrTail,mean,sigma),facecolor='red') plt.show() """ Explanation: the same output is found (as required by the defintion of the functions). One-tailed $p$-value A one-sided p-value considers the probability for the data to have produced a value as extreme or grearer than the observed value on only one side of the distribution. For example, the p-value for the right tail of a Gaussian is $p(x) = \displaystyle P\left(X \geq x\right) = 1-\Phi(x)$, and the p-value for the left tail of a Gaussian is $p(-x) = \displaystyle P\left(X \leq -x\right) = \Phi(-x)$. It is seen by symmetry $p(x) = p(-x)$ and that for a normalized Gaussian a one-tailed p-vaule is 1/2 that of a two-tailed p-value. \begin{split} p(x) = P\left(X \geq \left|x\right|\right)&= 1 - \frac{1}{\sqrt{2\pi}}\int\limits_{-\infty}^{x} e^{-t^2/2}\,dt = 1 - \frac{1}{2}\left(1+\text{erf}\left(\frac{x}{\sqrt{2}}\right)\right)\ &= 1-\Phi(x)\ &= \frac{1}{2}\left(1-\text{erf}\left(\frac{x}{\sqrt{2}}\right)\right) = \frac{1}{2}\text{erfc}\left(\frac{x}{\sqrt{2}}\right) \end{split} End of explanation """ for p in pvalues: print("{} standard deviations corresponds to a p-value of {}".format((math.sqrt(2.)/sigma)*special.erfcinv(2*p),p)) print("") for p in pvalues: print("{} standard deviations corresponds to a p-value of {}".format((math.sqrt(2.)/sigma)*special.erfinv(1-2*p),p)) """ Explanation: thus for $x = n \sigma$, $\displaystyle \text{erf}\left(\frac{n\sigma}{\sqrt{2}}\right) = 1 - 2\,p(n \sigma)$. End of explanation """ def nSigmaTwoTailed(p): return math.sqrt(2.)*special.erfcinv(p) def nSigmaOneTailed(p, sigma): return (math.sqrt(2.)/sigma)*special.erfcinv(2*p) # this needs to be turned into a loop of some sort t = PrettyTable() t.field_names = ["p-values", "n sigma 2-tailed", "n sigma 1-tailed"] t.add_row([pvalues[0], nSigmaTwoTailed(pvalues[0]), nSigmaOneTailed(pvalues[0],sigma)]) t.add_row([pvalues[1], nSigmaTwoTailed(pvalues[1]), nSigmaOneTailed(pvalues[1],sigma)]) t.add_row([pvalues[2], nSigmaTwoTailed(pvalues[2]), nSigmaOneTailed(pvalues[2],sigma)]) print(t) """ Explanation: Summary End of explanation """ checkvalues = [0.317310507863, 0.045500263896, 0.002699796063, 0.000063342484, 0.000000573303] for p in checkvalues: print("{:0.3f} standard deviations corresponds to a p-value of {}".format(nSigmaTwoTailed(p),p)) """ Explanation: Sanity Check End of explanation """
qutip/qutip-notebooks
examples/qip-processor-DJ-algorithm.ipynb
lgpl-3.0
import numpy as np from qutip_qip.device import OptPulseProcessor, LinearSpinChain, SpinChainModel, SCQubits from qutip_qip.circuit import QubitCircuit from qutip import sigmaz, sigmax, identity, tensor, basis, ptrace qc = QubitCircuit(N=3) qc.add_gate("X", targets=2) qc.add_gate("SNOT", targets=0) qc.add_gate("SNOT", targets=1) qc.add_gate("SNOT", targets=2) # function f(x) qc.add_gate("CNOT", controls=0, targets=2) qc.add_gate("CNOT", controls=1, targets=2) qc.add_gate("SNOT", targets=0) qc.add_gate("SNOT", targets=1) qc """ Explanation: Simulating the Deutsch–Jozsa algorithm at the pulse level Author: Boxi Li (etamin1201@gmail.com) In this example, we demonstrate how to simulate simple quantum algorithms on a qauntum hardware with QuTiP. The simulators are defined in the class Processor(and its sub-classes). Processor represents a general quantum device. The interaction of the quantum systems such as qubits is defined by the control Hamiltonian. For a general introduction of pulse-level simulation, please refer to the user guide. In the following, we compile a simple three-qubit quantum circuit into control pulses on different Hamiltonian model. The Deutsch–Jozsa algorithm The Deutsch–Jozsa algorithm is the simplest quantum algorithm that offers an exponential speed-up compared to the classical one. It assumes that we have a function $f:{0,1}^n \rightarrow {0,1}$ which is either balanced or constant. Constant means that $f(x)$ is either 1 or 0 for all inputs while balanced means that $f(x)$ is 1 for half of the input domain and 0 for the other half. A more rigorous definition can be found at https://en.wikipedia.org/wiki/Deutsch-Jozsa_algorithm. The implementation of the Deutsch–Jozsa algorithm includes $n$ input qubits and 1 ancilla initialised in state $1$. At the end of the algorithm, the first $n$ qubits are measured on the computational basis. If the function is constant, the result will be $0$ for all $n$ qubits. If balanced, $\left|00...0\right\rangle$ will never be measured. The following example is implemented for the balanced function $f:{00,01,10,11} \rightarrow {0,1}$, where $f(00)=f(11)=0$ and $f(01)=f(10)=1$. This function is balanced, so the probability of measuring state $\left|00\right\rangle$ should be 0. End of explanation """ processor = LinearSpinChain(3) processor.load_circuit(qc); """ Explanation: Using the spin chain model First, we simulate the quantum circuit using the Hamiltonian model LinearSpinChain. The control Hamiltonians are defined in SpinChainModel. End of explanation """ processor.plot_pulses(title="Control pulse of Spin chain", figsize=(8, 4), dpi=100); """ Explanation: To quickly visualize the pulse, Processor has a method called plot_pulses. In the figure bellow, each colour represents the pulse sequence of one control Hamiltonian in the system as a function of time. In each time interval, the pulse remains constant. End of explanation """ basis00 = basis([2,2], [0,0]) psi0 = basis([2,2,2], [0,0,0]) result = processor.run_state(init_state=psi0) print("Probability of measuring state 00:") print(np.real((basis00.dag() * ptrace(result.states[-1], [0,1]) * basis00)[0,0])) """ Explanation: Because for the spin chain model interaction only exists between neighbouring qubits, SWAP gates are added between and after the first CNOT gate, swapping the first two qubits. The SWAP gate is decomposed into three iSWAP gates, while the CNOT is decomposed into two iSWAP gates plus additional single-qubit corrections. Both the Hadamard gate and the two-qubit gates need to be decomposed to native gates (iSWAP and rotation on the $x$ and $z$ axes). The compiled coefficients are square pulses and the control coefficients on $\sigma_z$ and $\sigma_x$ are also different, resulting in different gate times. Without decoherence End of explanation """ processor.t1 = 100 processor.t2 = 30 psi0 = basis([2,2,2], [0,0,0]) result = processor.run_state(init_state=psi0) print("Probability of measuring state 00:") print(np.real((basis00.dag() * ptrace(result.states[-1], [0,1]) * basis00)[0,0])) """ Explanation: With decoherence End of explanation """ setting_args = {"SNOT": {"num_tslots": 6, "evo_time": 2}, "X": {"num_tslots": 1, "evo_time": 0.5}, "CNOT": {"num_tslots": 12, "evo_time": 5}} processor = OptPulseProcessor( # Use the control Hamiltonians of the spin chain model. num_qubits=3, model=SpinChainModel(3, setup="linear")) processor.load_circuit( # Provide parameters for the algorithm qc, setting_args=setting_args, merge_gates=False, verbose=True, amp_ubound=5, amp_lbound=0); processor.plot_pulses(title="Control pulse of OptPulseProcessor", figsize=(8, 4), dpi=100); """ Explanation: Using the optimal control module This feature integrated into the sub-class OptPulseProcessor which use methods in the optimal control module to find the optimal pulse sequence for the desired gates. It can find the optimal pulse either for the whole unitary evolution or for each gate. Here we choose the second option. End of explanation """ basis00 = basis([2,2], [0,0]) psi0 = basis([2,2,2], [0,0,0]) result = processor.run_state(init_state=psi0) print("Probability of measuring state 00:") print(np.real((basis00.dag() * ptrace(result.states[-1], [0,1]) * basis00)[0,0])) """ Explanation: For the optimal control model, we use the GRAPE algorithm, where control pulses are piece-wise constant functions. We provide the algorithm with the same control Hamiltonian model used for the spin chain model. In the compiled optimal signals, all controls are active (non-zero pulse amplitude) during most of the execution time. We note that for identical gates on different qubits (e.g., Hadamard), each optimized pulse is different, demonstrating that the optimized solution is not unique, and there are further constraints one could apply, such as adaptions for the specific hardware. Without decoherence End of explanation """ processor.t1 = 100 processor.t2 = 30 psi0 = basis([2,2,2], [0,0,0]) result = processor.run_state(init_state=psi0) print("Probability of measuring state 00:") print(np.real((basis00.dag() * ptrace(result.states[-1], [0,1]) * basis00)[0,0])) """ Explanation: With decoherence End of explanation """ processor = SCQubits(num_qubits=3) processor.load_circuit(qc); processor.plot_pulses(title="Control pulse of SCQubits", figsize=(8, 4), dpi=100); """ Explanation: We can see that under noisy evolution their is a none zero probability of measuring state 00. Using the superconducting qubits model Below, we simulate the same quantum circuit using one sub-class LinearSpinChain. It will find the pulse based on the Hamiltonian available on a quantum computer of the linear spin chain system. Please refer to the notebook of the spin chain model for more details. End of explanation """ basis00 = basis([3, 3], [0, 0]) psi0 = basis([3, 3, 3], [0, 0, 0]) result = processor.run_state(init_state=psi0) print("Probability of measuring state 00:") print(np.real((basis00.dag() * ptrace(result.states[-1], [0,1]) * basis00)[0,0])) """ Explanation: For the superconducting-qubit processor, the compiled pulses have a Gaussian shape. This is crucial for superconducting qubits because the second excited level is only slightly detuned from the qubit transition energy. A smooth pulse usually prevents leakage to the non-computational subspace. Similar to the spin chain, SWAP gates are added to switch the zeroth and first qubit and one SWAP gate is compiled to three CNOT gates. The control $ZX^{21}$ is not used because there is no CNOT gate that is controlled by the second qubit and acts on the first one. Without decoherence End of explanation """ processor.t1 = 50.e3 processor.t2 = 20.e3 psi0 = basis([3, 3, 3], [0, 0, 0]) result = processor.run_state(init_state=psi0) print("Probability of measuring state 00:") print(np.real((basis00.dag() * ptrace(result.states[-1], [0,1]) * basis00)[0,0])) import qutip_qip print("qutip-qip version:", qutip_qip.version.version) from qutip.ipynbtools import version_table version_table() """ Explanation: With decoherence End of explanation """
TomTranter/OpenPNM
examples/simulations/Capillary Pressure Curves.ipynb
mit
import numpy as np import openpnm as op np.random.seed(10) ws = op.Workspace() ws.settings["loglevel"] = 40 np.set_printoptions(precision=5) """ Explanation: Simulating capillary pressure curves using Porosimetry Start by importing OpenPNM. End of explanation """ pn = op.network.Cubic(shape=[20, 20, 20], spacing=0.00005) """ Explanation: Next, create a simple cubic network with 20 pores per side and a spacing of 50 um End of explanation """ geo = op.geometry.StickAndBall(network=pn, pores=pn.Ps, throats=pn.Ts) """ Explanation: The network object only contains topological and spatial information. We need to assign some pore and throat sizes, which can be conveniently done by creating a StickAndBall geometry object. End of explanation """ hg = op.phases.Mercury(network=pn, name='mercury') """ Explanation: The StickAndBall class assigns pores as random values between 0.2 and 0.7 of the lattice spacing, then finds all other geometric information assuming the pores are spheres and the throats are cylinders. Next we need to create a phase object which contains the thermo-physical properties of the invading fluid, such as surface tension: End of explanation """ phys = op.physics.GenericPhysics(network=pn, phase=hg, geometry=geo) phys.add_model(propname='throat.entry_pressure', model=op.models.physics.capillary_pressure.washburn) """ Explanation: Lastly, we need to compute the capillary entry pressure of the throats in the network. The OpenPNM models library has a few common capillary pressure models, including the Washburn equation: $$ P_C = \frac{-2\sigma cos(\theta)}{R_T} $$ To use this model we should create a physics object, and use the add_model method as follows: End of explanation """ print(pn.project.grid) """ Explanation: Note that we can inspect our project to see the interrelationships between all the object with: End of explanation """ mip = op.algorithms.Porosimetry(network=pn, phase=hg) mip.set_inlets(pores=pn.pores('left')) mip.run() """ Explanation: The grid shows us that phys_01 is associated with the mercury phase, and geo_01. This means that when calculating the throat entry pressure using the Washburn equation above, it will fetch the throat radius ($R_T$) from geo_01 and the surface tension and contact angle from mercury. Now that all the required objects are setup, we can perform the capillary pressure curve simulation. OpenPNM contains both InvasionPercolation and OrdinaryPercolation classes. The porosimetry experiment is ordinary percolation with access limitations. This means that a fixed pressure is applied to the invading fluid and all pores and throat that can be penetrated at that pressure are set as possibly invaded, then pores and throats are set to invaded only if they are physically connected to the source of invading fluid directly or though a path of already invading pores and throats. Since this process is simulated very frequently, OpenPNM includes a class called Porosimetry that is a subclass of OrdinaryPercolation, with several useful methods added. It's use is illustrated below: End of explanation """ #NBVAL_IGNORE_OUTPUT fig = mip.plot_intrusion_curve() Pc, Snwp = mip.get_intrusion_data() print(Pc, Snwp) """ Explanation: The meaning of this warning message will be analyzed below, but first let's take a quick look at the result using the built-in plotting method: End of explanation """ #NBVAL_IGNORE_OUTPUT import matplotlib.pyplot as plt fig = plt.plot(Pc, Snwp, 'r*-') """ Explanation: With the above data in the form of arrays it's possible to cut&paste into Excel, or to use a Python plotting package such as Matplotlib to make plots with your desired style: End of explanation """
benkoo/fast_ai_coursenotes
deeplearning1/nbs/statefarm.ipynb
apache-2.0
from theano.sandbox import cuda cuda.use('gpu0') %matplotlib inline from __future__ import print_function, division path = "data/state/" #path = "data/state/sample/" import utils; reload(utils) from utils import * from IPython.display import FileLink batch_size=64 """ Explanation: Enter State Farm End of explanation """ batches = get_batches(path+'train', batch_size=batch_size) val_batches = get_batches(path+'valid', batch_size=batch_size*2, shuffle=False) (val_classes, trn_classes, val_labels, trn_labels, val_filenames, filenames, test_filenames) = get_classes(path) """ Explanation: Setup batches End of explanation """ trn = get_data(path+'train') val = get_data(path+'valid') save_array(path+'results/val.dat', val) save_array(path+'results/trn.dat', trn) val = load_array(path+'results/val.dat') trn = load_array(path+'results/trn.dat') """ Explanation: Rather than using batches, we could just import all the data into an array to save some processing time. (In most examples I'm using the batches, however - just because that's how I happened to start out.) End of explanation """ def conv1(batches): model = Sequential([ BatchNormalization(axis=1, input_shape=(3,224,224)), Convolution2D(32,3,3, activation='relu'), BatchNormalization(axis=1), MaxPooling2D((3,3)), Convolution2D(64,3,3, activation='relu'), BatchNormalization(axis=1), MaxPooling2D((3,3)), Flatten(), Dense(200, activation='relu'), BatchNormalization(), Dense(10, activation='softmax') ]) model.compile(Adam(lr=1e-4), loss='categorical_crossentropy', metrics=['accuracy']) model.fit_generator(batches, batches.nb_sample, nb_epoch=2, validation_data=val_batches, nb_val_samples=val_batches.nb_sample) model.optimizer.lr = 0.001 model.fit_generator(batches, batches.nb_sample, nb_epoch=4, validation_data=val_batches, nb_val_samples=val_batches.nb_sample) return model model = conv1(batches) """ Explanation: Re-run sample experiments on full dataset We should find that everything that worked on the sample (see statefarm-sample.ipynb), works on the full dataset too. Only better! Because now we have more data. So let's see how they go - the models in this section are exact copies of the sample notebook models. Single conv layer End of explanation """ gen_t = image.ImageDataGenerator(rotation_range=15, height_shift_range=0.05, shear_range=0.1, channel_shift_range=20, width_shift_range=0.1) batches = get_batches(path+'train', gen_t, batch_size=batch_size) model = conv1(batches) model.optimizer.lr = 0.0001 model.fit_generator(batches, batches.nb_sample, nb_epoch=15, validation_data=val_batches, nb_val_samples=val_batches.nb_sample) """ Explanation: Interestingly, with no regularization or augmentation we're getting some reasonable results from our simple convolutional model. So with augmentation, we hopefully will see some very good results. Data augmentation End of explanation """ gen_t = image.ImageDataGenerator(rotation_range=15, height_shift_range=0.05, shear_range=0.1, channel_shift_range=20, width_shift_range=0.1) batches = get_batches(path+'train', gen_t, batch_size=batch_size) model = Sequential([ BatchNormalization(axis=1, input_shape=(3,224,224)), Convolution2D(32,3,3, activation='relu'), BatchNormalization(axis=1), MaxPooling2D(), Convolution2D(64,3,3, activation='relu'), BatchNormalization(axis=1), MaxPooling2D(), Convolution2D(128,3,3, activation='relu'), BatchNormalization(axis=1), MaxPooling2D(), Flatten(), Dense(200, activation='relu'), BatchNormalization(), Dropout(0.5), Dense(200, activation='relu'), BatchNormalization(), Dropout(0.5), Dense(10, activation='softmax') ]) model.compile(Adam(lr=10e-5), loss='categorical_crossentropy', metrics=['accuracy']) model.fit_generator(batches, batches.nb_sample, nb_epoch=2, validation_data=val_batches, nb_val_samples=val_batches.nb_sample) model.optimizer.lr=0.001 model.fit_generator(batches, batches.nb_sample, nb_epoch=10, validation_data=val_batches, nb_val_samples=val_batches.nb_sample) model.optimizer.lr=0.00001 model.fit_generator(batches, batches.nb_sample, nb_epoch=10, validation_data=val_batches, nb_val_samples=val_batches.nb_sample) """ Explanation: I'm shocked by how good these results are! We're regularly seeing 75-80% accuracy on the validation set, which puts us into the top third or better of the competition. With such a simple model and no dropout or semi-supervised learning, this really speaks to the power of this approach to data augmentation. Four conv/pooling pairs + dropout Unfortunately, the results are still very unstable - the validation accuracy jumps from epoch to epoch. Perhaps a deeper model with some dropout would help. End of explanation """ vgg = Vgg16() model=vgg.model last_conv_idx = [i for i,l in enumerate(model.layers) if type(l) is Convolution2D][-1] conv_layers = model.layers[:last_conv_idx+1] conv_model = Sequential(conv_layers) (val_classes, trn_classes, val_labels, trn_labels, val_filenames, filenames, test_filenames) = get_classes(path) conv_feat = conv_model.predict_generator(batches, batches.nb_sample) conv_val_feat = conv_model.predict_generator(val_batches, val_batches.nb_sample) conv_test_feat = conv_model.predict_generator(test_batches, test_batches.nb_sample) save_array(path+'results/conv_val_feat.dat', conv_val_feat) save_array(path+'results/conv_test_feat.dat', conv_test_feat) save_array(path+'results/conv_feat.dat', conv_feat) conv_feat = load_array(path+'results/conv_feat.dat') conv_val_feat = load_array(path+'results/conv_val_feat.dat') conv_val_feat.shape """ Explanation: This is looking quite a bit better - the accuracy is similar, but the stability is higher. There's still some way to go however... Imagenet conv features Since we have so little data, and it is similar to imagenet images (full color photos), using pre-trained VGG weights is likely to be helpful - in fact it seems likely that we won't need to fine-tune the convolutional layer weights much, if at all. So we can pre-compute the output of the last convolutional layer, as we did in lesson 3 when we experimented with dropout. (However this means that we can't use full data augmentation, since we can't pre-compute something that changes every image.) End of explanation """ def get_bn_layers(p): return [ MaxPooling2D(input_shape=conv_layers[-1].output_shape[1:]), Flatten(), Dropout(p/2), Dense(128, activation='relu'), BatchNormalization(), Dropout(p/2), Dense(128, activation='relu'), BatchNormalization(), Dropout(p), Dense(10, activation='softmax') ] p=0.8 bn_model = Sequential(get_bn_layers(p)) bn_model.compile(Adam(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy']) bn_model.fit(conv_feat, trn_labels, batch_size=batch_size, nb_epoch=1, validation_data=(conv_val_feat, val_labels)) bn_model.optimizer.lr=0.01 bn_model.fit(conv_feat, trn_labels, batch_size=batch_size, nb_epoch=2, validation_data=(conv_val_feat, val_labels)) bn_model.save_weights(path+'models/conv8.h5') """ Explanation: Batchnorm dense layers on pretrained conv layers Since we've pre-computed the output of the last convolutional layer, we need to create a network that takes that as input, and predicts our 10 classes. Let's try using a simplified version of VGG's dense layers. End of explanation """ gen_t = image.ImageDataGenerator(rotation_range=15, height_shift_range=0.05, shear_range=0.1, channel_shift_range=20, width_shift_range=0.1) da_batches = get_batches(path+'train', gen_t, batch_size=batch_size, shuffle=False) """ Explanation: Looking good! Let's try pre-computing 5 epochs worth of augmented data, so we can experiment with combining dropout and augmentation on the pre-trained model. Pre-computed data augmentation + dropout We'll use our usual data augmentation parameters: End of explanation """ da_conv_feat = conv_model.predict_generator(da_batches, da_batches.nb_sample*5) save_array(path+'results/da_conv_feat2.dat', da_conv_feat) da_conv_feat = load_array(path+'results/da_conv_feat2.dat') """ Explanation: We use those to create a dataset of convolutional features 5x bigger than the training set. End of explanation """ da_conv_feat = np.concatenate([da_conv_feat, conv_feat]) """ Explanation: Let's include the real training data as well in its non-augmented form. End of explanation """ da_trn_labels = np.concatenate([trn_labels]*6) """ Explanation: Since we've now got a dataset 6x bigger than before, we'll need to copy our labels 6 times too. End of explanation """ def get_bn_da_layers(p): return [ MaxPooling2D(input_shape=conv_layers[-1].output_shape[1:]), Flatten(), Dropout(p), Dense(256, activation='relu'), BatchNormalization(), Dropout(p), Dense(256, activation='relu'), BatchNormalization(), Dropout(p), Dense(10, activation='softmax') ] p=0.8 bn_model = Sequential(get_bn_da_layers(p)) bn_model.compile(Adam(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy']) """ Explanation: Based on some experiments the previous model works well, with bigger dense layers. End of explanation """ bn_model.fit(da_conv_feat, da_trn_labels, batch_size=batch_size, nb_epoch=1, validation_data=(conv_val_feat, val_labels)) bn_model.optimizer.lr=0.01 bn_model.fit(da_conv_feat, da_trn_labels, batch_size=batch_size, nb_epoch=4, validation_data=(conv_val_feat, val_labels)) bn_model.optimizer.lr=0.0001 bn_model.fit(da_conv_feat, da_trn_labels, batch_size=batch_size, nb_epoch=4, validation_data=(conv_val_feat, val_labels)) """ Explanation: Now we can train the model as usual, with pre-computed augmented data. End of explanation """ bn_model.save_weights(path+'models/da_conv8_1.h5') """ Explanation: Looks good - let's save those weights. End of explanation """ val_pseudo = bn_model.predict(conv_val_feat, batch_size=batch_size) """ Explanation: Pseudo labeling We're going to try using a combination of pseudo labeling and knowledge distillation to allow us to use unlabeled data (i.e. do semi-supervised learning). For our initial experiment we'll use the validation set as the unlabeled data, so that we can see that it is working without using the test set. At a later date we'll try using the test set. To do this, we simply calculate the predictions of our model... End of explanation """ comb_pseudo = np.concatenate([da_trn_labels, val_pseudo]) comb_feat = np.concatenate([da_conv_feat, conv_val_feat]) """ Explanation: ...concatenate them with our training labels... End of explanation """ bn_model.load_weights(path+'models/da_conv8_1.h5') bn_model.fit(comb_feat, comb_pseudo, batch_size=batch_size, nb_epoch=1, validation_data=(conv_val_feat, val_labels)) bn_model.fit(comb_feat, comb_pseudo, batch_size=batch_size, nb_epoch=4, validation_data=(conv_val_feat, val_labels)) bn_model.optimizer.lr=0.00001 bn_model.fit(comb_feat, comb_pseudo, batch_size=batch_size, nb_epoch=4, validation_data=(conv_val_feat, val_labels)) """ Explanation: ...and fine-tune our model using that data. End of explanation """ bn_model.save_weights(path+'models/bn-ps8.h5') """ Explanation: That's a distinct improvement - even although the validation set isn't very big. This looks encouraging for when we try this on the test set. End of explanation """ def do_clip(arr, mx): return np.clip(arr, (1-mx)/9, mx) keras.metrics.categorical_crossentropy(val_labels, do_clip(val_preds, 0.93)).eval() conv_test_feat = load_array(path+'results/conv_test_feat.dat') preds = bn_model.predict(conv_test_feat, batch_size=batch_size*2) subm = do_clip(preds,0.93) subm_name = path+'results/subm.gz' classes = sorted(batches.class_indices, key=batches.class_indices.get) submission = pd.DataFrame(subm, columns=classes) submission.insert(0, 'img', [a[4:] for a in test_filenames]) submission.head() submission.to_csv(subm_name, index=False, compression='gzip') FileLink(subm_name) """ Explanation: Submit We'll find a good clipping amount using the validation set, prior to submitting. End of explanation """ for l in get_bn_layers(p): conv_model.add(l) for l1,l2 in zip(bn_model.layers, conv_model.layers[last_conv_idx+1:]): l2.set_weights(l1.get_weights()) for l in conv_model.layers: l.trainable =False for l in conv_model.layers[last_conv_idx+1:]: l.trainable =True comb = np.concatenate([trn, val]) gen_t = image.ImageDataGenerator(rotation_range=8, height_shift_range=0.04, shear_range=0.03, channel_shift_range=10, width_shift_range=0.08) batches = gen_t.flow(comb, comb_pseudo, batch_size=batch_size) val_batches = get_batches(path+'valid', batch_size=batch_size*2, shuffle=False) conv_model.compile(Adam(lr=0.00001), loss='categorical_crossentropy', metrics=['accuracy']) conv_model.fit_generator(batches, batches.N, nb_epoch=1, validation_data=val_batches, nb_val_samples=val_batches.N) conv_model.optimizer.lr = 0.0001 conv_model.fit_generator(batches, batches.N, nb_epoch=3, validation_data=val_batches, nb_val_samples=val_batches.N) for l in conv_model.layers[16:]: l.trainable =True conv_model.optimizer.lr = 0.00001 conv_model.fit_generator(batches, batches.N, nb_epoch=8, validation_data=val_batches, nb_val_samples=val_batches.N) conv_model.save_weights(path+'models/conv8_ps.h5') conv_model.load_weights(path+'models/conv8_da.h5') val_pseudo = conv_model.predict(val, batch_size=batch_size*2) save_array(path+'models/pseudo8_da.dat', val_pseudo) """ Explanation: This gets 0.534 on the leaderboard. The "things that didn't really work" section You can safely ignore everything from here on, because they didn't really help. Finetune some conv layers too End of explanation """ drivers_ds = pd.read_csv(path+'driver_imgs_list.csv') drivers_ds.head() img2driver = drivers_ds.set_index('img')['subject'].to_dict() driver2imgs = {k: g["img"].tolist() for k,g in drivers_ds[['subject', 'img']].groupby("subject")} def get_idx(driver_list): return [i for i,f in enumerate(filenames) if img2driver[f[3:]] in driver_list] drivers = driver2imgs.keys() rnd_drivers = np.random.permutation(drivers) ds1 = rnd_drivers[:len(rnd_drivers)//2] ds2 = rnd_drivers[len(rnd_drivers)//2:] models=[fit_conv([d]) for d in drivers] models=[m for m in models if m is not None] all_preds = np.stack([m.predict(conv_test_feat, batch_size=128) for m in models]) avg_preds = all_preds.mean(axis=0) avg_preds = avg_preds/np.expand_dims(avg_preds.sum(axis=1), 1) keras.metrics.categorical_crossentropy(val_labels, np.clip(avg_val_preds,0.01,0.99)).eval() keras.metrics.categorical_accuracy(val_labels, np.clip(avg_val_preds,0.01,0.99)).eval() """ Explanation: Ensembling End of explanation """
agmarrugo/sensors-actuators
notebooks/Ex6_4_evaluation_force_sensor.ipynb
mit
import matplotlib.pyplot as plt import numpy as np %matplotlib inline F = np.array([50,100,150,200,250,300,350,400,450,500,550,600,650]) R = np.array([500,256.4,169.5,144.9,125,100,95.2,78.1,71.4,65.8,59.9,60,55.9]) plt.plot(R,F,'*') plt.ylabel('R [Omega]') plt.xlabel('Force [N]') plt.show() """ Explanation: Evaluation of a force sensor Andrés Marrugo, PhD Universidad Tecnológica de Bolívar A force sensor (FSR) is evaluated experimentally. To do so, the resistance of the sensor is measured for a range of forces as follows: Calculate the sensitivity of the sensor throughout its range. | | | | | | | | | | | | | | | | -- | -- | -- | -- | -- | -- | -- | -- | -- | -- | -- | -- | -- | -- | | F [N] |50|100|150|200|250|300|350|400|450|500|550|600|650| | R [$\Omega$] |500|256.4|169.5|144.9|125|100|95.2|78.1|71.4|65.8|59.9|60|55.9| | | | | | | | | | | | | | | | End of explanation """ C = 1/R plt.plot(F,C,'*') plt.ylabel('C [Siemens]') plt.xlabel('Force [N]') plt.show() # polyfit computes the coefficients a and b of degree=1 a,b = np.polyfit(F,C,1) print('The coefficients are a =',a,'b =',b) C1 = a*F+b plt.plot(C1,F,':b',label='Fitted line') plt.plot(C,F,'*') plt.ylabel('C [Siemens]') plt.xlabel('Force [N]') plt.show() """ Explanation: Sensitivity is the slope of the resistance versus force curve and is clearly a nonlinear quantity. However, we recall that force resistive sensors have a linear relation between force ($F$) and conductance ($1/R$). Therefore it is simpler to first calculate the conductance $C$. End of explanation """
DaveBackus/Data_Bootcamp
Code/Lab/Regression_statsmodels_LeBlanc.ipynb
mit
import pandas as pd #This is Pandas, we'll call it 'pd' for short import statsmodels.formula.api as smf #This is the linear regression program """ The following reads in the .csv file and saves it as a dataframe we call 'df'. You can read in other files besides .csv, too. For example, .xls can be read in using pd.read_excel and Stata files as pd.read_stata, etc. """ df = pd.read_csv('http://people.stern.nyu.edu/wgreene/Econometrics/productivity.csv') df = df.sort_values('YR') #Ignore this, I'm just re-sorting the dataframe by year df.index = range(1,len(df) + 1) # This prints the first 10 rows of the matrix df.head(10) """ Explanation: Linear Regression in Python There are two main packages you can use to run basic linear regression models in Python: statsmodels and scikit-learn. I'll focus here on statsmodels. We'll also use the data management package Pandas. It allows the user to organize the data into matrices (called "dataframes") that mesh well with the statsmodels package. There are two different versions of statsmodels you can import: a. statsmodels.formula.api b. statsmodels.api I'll focus on statsmodels.formulas.api, which was designed to be similar in style to the regression programs in R. It also works nicely with Pandas. Design I'll use some random panel dataset I found on Professor Greene's website. It has both cross-sectional (U.S. states) and time-series (years 1970-1986) dimensions. Here are the details he provides: Statewide Capital Productivity Data, lower 48 states, 17 years Variables in the file are STATE = state name ST_ABB = state abbreviation YR = year, 1970,...,1986 P_CAP = public capital HWY = highway capital WATER = water utility capital UTIL = utility capital PC = private capital GSP = gross state product EMP = employment UNEMP = unemployment rate I'll haphazardly run the following regression: $$\text{GSP}{i,t} = \alpha + \beta_1 \text{UNEMP}{i,t} + \beta_2 \text{P_CAP}{i,t} +\beta_3 \text{PC}{i,t} + \epsilon_{i,t}, \quad i = 1, ..., 48 \quad t = 1970, ..., 1986 $$ For more info on the basics of Python, definitely take a look at Professor Backus's Data Bootcamp book and course End of explanation """ model = smf.ols(formula = 'GSP ~ UNEMP + P_CAP + PC', data = df).fit() #Fits the model print(model.summary()) #Prints off a nice summary """ Explanation: Above is just a snippet of our dataframe, df. It organizes the data into column vectors corresponding to the variable in the dataset. We can also select different subsets of the matrix, for example: df['P_CAP'] returns just the variable P_CAP, while df[df['YR']==1970] returns only rows corresponding to the year 1970. Simple OLS over all observations in our dataframe: Here's the basic idea behind linear regression code in statsmodels: model = smf.ols(formula = 'Y ~ Var1 + Var2 + Var3', data = dataframe).fit() Key things to notice about the above is that The 'data = ' command tells statsmodels which Pandas dataframe to pull the variables Y, Var1, Var2, Var3 (etc.) from. The 'formula = ' command specifies the linear regression model you are trying to run. Y in this case is the dependent variable and the dependent variables are Var1, Var2, and Var3. Notice that the dependent and independent variables are seperated by a '~' and not an equal sign. The variables 'Y', 'Var1', 'Var2', and 'Var3' correspond to the column names of the variables in our dataframe So, in our case, we will have formula = 'GSP ~ UNEMP + P_CAP + PC' and data = df End of explanation """ model1970 = smf.ols(formula='GSP ~ UNEMP + P_CAP + PC', data=df[df['YR']==1970]).fit() #Fits the model """ Notice that we've isolated a subset of the original dataframe (in this case the obs. corresponding to 1970) by specificying 'data = df[df['YR']==1970]]' in the regression forumula above. """ print(model1970.summary()) #Prints off a nice summary """ Explanation: Simple OLS over a subset of the rows in our dataframe: We can run the regression only over the observations corresponding to the year 1970 (for example) by specifying data = df[df['YR']==1970] in the code above. 'df[df['YR']==1970]' simply takes our original dataframe 'df' and returns only the rows that correspond to the year 1970. We could also do this by first specifying a whole new dataframe, i.e. df1970 = df[df['YR']==1970] and then calling this new dataframe in the regression code, i.e. data = df1970 End of explanation """ """ Below are a few examples of calling speficic elements of our regression """ model.params #Produces all coefficient estimates model.params['P_CAP'] #Produces coefficient estimate for the regressor 'P_CAP' model.bse #Standard Errors for all regressors model.bse['P_CAP'] #Standard Errors for regressor 'P_CAP' model.pvalues #P-values for all regressors model.pvalues['P_CAP'] #P-values for regressor 'P_CAP' r_sqr = model.rsquared #R-squared print('The R^2 for our regression is',r_sqr) """ Explanation: Calling only select elements of the model (coefficents, standard errors, etc.): If you type in, model. and then hit the 'tab' key, a box will pop up showing all the different items you can call from your regression. (Remember that 'model' is what we saved our regression as, so it will different depending on what name you give it. i.e. if we saved our regression as 'regression_results' then we would type 'regression_results'.) End of explanation """ # Method 1: [action(x) for x in range(a,b)] fitted_models = [smf.ols(formula='GSP ~ UNEMP + P_CAP + PC', data=df[df['YR']==x]).fit() for x in range(1970,1987)] # Method 2: for item in range(a,b): fitted_models = list() for item in range(1970,1987): fitted_models.append(smf.ols(formula='GSP ~ UNEMP + P_CAP + PC', data=df[df['YR']==item]).fit()) #We can then call/store the coefficients/standard errors/etc in the same way: P_CAPcoefs = [fitted_models[x].params['P_CAP'] for x in range(17)] P_CAPpval = [fitted_models[x].pvalues['P_CAP'] for x in range(17)] P_CAPse = [fitted_models[x].bse['P_CAP'] for x in range(17)] df_PCAPResults = pd.DataFrame({'P_Cap':P_CAPcoefs,'Std. Errors':P_CAPse}, index = list(range(1970,1987))) df_PCAPResults """ Explanation: Iterating over multiple regression models and storing the results: Loops in Python are pretty standard if you're familiar with running loop arguments in other programming languages. Below are two examples of different ways you can loop over multiple regression models. End of explanation """
MaxYousif/Data-Science-MSc-Projects
SVM Binary Classification.ipynb
mit
#Import Relevant Modules and Packages import pandas as pd import numpy as np from sklearn.svm import SVC from sklearn import metrics from sklearn.model_selection import train_test_split from sklearn.model_selection import cross_val_score from sklearn import preprocessing from sklearn.model_selection import GridSearchCV from sklearn.decomposition import PCA from scipy import stats from sklearn.feature_selection import VarianceThreshold #see all rows of dataframe #pd.set_option('display.max_rows', 500) #Load the complete training data set training_data = pd.read_csv("/Users/Max/Desktop/Max's Folder/Uni Work/Data Science MSc/Machine Learning/ML Kaggle Competition /Data Sets/Training Data Set.csv", header=0, index_col=0) #Observe the original training data training_data.head() #quantify class counts of original training data training_data.prediction.value_counts() """ Explanation: Introduction MSc Machine Learning Assignment - Classification task. Private Kaggle Competition: "Are you sure Brighton's seagull is not a man-made object?" The aim of the assignment was to build a classifier able to distinguish between man-made and not man-made objects. Each data instance was represented by a 4608 dimensional feature vector. This vector was a concatenation of 4096 dimensional deep Convolutional Neural Networks (CNNs) features extracted from the fc7 activation layer of CaffeNet and 512 dimensional GIST features. Three Additional pieces of information were granted: confidence label for each training instance, the test data class proportions, and additional training data containing missing values. This Notebook contains the final workflow employed, to produce the model used to make the final predictions. The original Notebook contained a lot of trial and error methods, such as fine tuning the range of parameters fitted in the model. Some of these details from the original, and rather messy Notebook have been excluded here. Thus, this Notebook only intends to show the code for the key processes leading up to the development of the final model. In addition, this notebook contains a report/commentary documenting the theory concerning the steps of the workflow. The theory is adopted from the Literature, and is referenced appropriately. Report also available in PDF, contact me for request. 1. Approach 1.1) Introduction of SVM The approach of choice here was the Support Vector Machine (SVM). SVMs were pioneered in the late seventies [1]. SVMs are supervised learning models, which are extensively used for classification [2] and regression tasks [3]. In this context, SVM was employed for a binary classification task. In layman’s terms, the basic premise of SVM for classification tasks is to find the optimal separating hyperplane (also called the decision boundary) between classes, through maximizing the margin between the data points closest to the decision boundary and the decision boundary itself. The points closest to the decision boundary are termed support vectors. The reason why the margin is maximized is to improve generalisation of the decision boundary; it is likely that many decision boundaries exist, but the decision boundary that maximizes the margin increases the likelihood that future outliers will be correctly classified [4]. This intuition seems relatively simple, but is complicated by ‘soft’ and ‘hard’ margins. A hard margin is only applicable when the data set is linearly separable. A soft margin is applicable when the data set is not linearly separable. Essentially, a soft margin is aware of future misclassifications due to the data not being linearly separable, thus tolerates misclassifications using a penalty term. On the contrary, a hard margin does not tolerate misclassifications. For a hard margin, misclassifications are dealt with by minimizing the margin. These concepts will be formalized below. Assume the problem of binary classification on a dataset ${(x_1, y_1), (x_2, y_2), ..., (x_n, y_n)}$ , where $x_i$ $\in$ $R^d$, i.e. $x_i$ is a data point represented as a d-dimensional vector, and $y_i$ $\in$ ${-1, 1}$ , which represents the class label of that data point, for $i= 1, 2, ..., n$ . A better optimal separation can be found by first transforming the data into a higher dimensional feature space by a non-linear mapping function $\phi$ [2]. This $\phi$ is also referred to as the ‘kernel’. A possible decision boundary can then be represented by $w \cdot \phi(x) + b = 0$ , where $w$ is the weight vector orthogonal to the decision boundary and $b$ is an intercept term. It follows that, if the data set is linearly separable, then the decision boundary that maximizes the margin can be found by solving the following optimization: $\min (\frac{1}{2} w \cdot w) $ under the constraint $y_i (w \cdot \phi(x_i) + b) \ge 1 $ where $i = 1, 2, ..., n$ . This encapsulates the concept of a ‘hard’ margin. However, in the case of non-linearly separable data, the above constraint has to be relaxed by the introduction of a slack variable $\varepsilon$ . The optimization problem then becomes: $\min(\frac{1}{2} w \cdot w + C \sum_{i=1}^n \varepsilon_i$) such that $y_i (w \cdot \phi(x_i) + b) \ge 1 - \varepsilon_i $ where $i = 1, 2, ..., n$ and $\varepsilon_i \ge 0$. The $\sum_{i=1}^n \varepsilon_i$ term can be interpreted as the misclassification cost. This new objective function comprises two aims. The first aim still remains to maximize the margin, and the second aim is to reduce the number of misclassifications. The trade-off between these two aims is controlled by the parameter $C$. This encapsulates the concept of a ‘soft’ margin. $C$ is coined the regularization parameter. A high value of $C$ increases the penalty for misclassifications, thus places more emphasis on the second goal. A large misclassification penalty enforces the model to reduce the number of misclassifications. Hence, a high enough value of $C$ could induce over-fitting. A small $C$ decreases the penalty for misclassifications, thus places more emphasis on the first goal. A small classification penalty enforces the model to tolerate classifications more readily. Hence, a small enough value of $C$ could induce under-fitting. The SVM classifier is trained using the hinge-loss as the loss function [5]. 1.2) Suitability of SVM SVM is a popular technique because of its solid mathematical background, high generalisation capability, ability to find global solutions and ability to find solutions that are non-linear [6]. However, SVMs can be impacted by data sets that do not have equal class balances. The methods for dealing with this will be discussed in Section 2.1 of this report. Thus, SVMs are still applicable to data sets with class imbalances, such as the data set provided here. It has been argued that SVMs show superior performance than other techniques when analysis is conducted on high- dimensional data [7]. The dataset here, even after pre- processing, has many dimensions. Thus, the use of SVM in this context is justified. Another downfall of SVM is the dependency on feature scaling; the performance of an SVM can be highly impacted by the selection of the feature scaling method. However, feature scaling is an important pre-processing technique. One encouraging reason for employing feature scaling is that the gradient descent algorithm converges much faster with feature scaling than without feature scaling. In particular, feature scaling reduces the time it takes for the SVM to find support vectors [8]. 2. Data Preparation Before Pre-Processing This section will cover how the training data for the final model was prepared. Several additonal pieces of information were provided in the assignment outline. This section will demonstrate how these strands of information were incorporated, if they were incorporated at all. End of explanation """ #Load additional training data add_training_data = pd.read_csv("/Users/Max/Desktop/Max's Folder/Uni Work/Data Science MSc/Machine Learning/ML Kaggle Competition /Data Sets/Additional Training Data Set .csv", header=0, index_col=0) #observe additional training data add_training_data #quantify class counts of additional training data add_training_data.prediction.value_counts() #find number of NAs for each column for additional training data add_training_data.isnull().sum() #concatenate original training data with additional training data full_training_data_inc = pd.concat([training_data, add_training_data]) #observe concatenated training data full_training_data_inc """ Explanation: 2.1) Dealing with Missing Values – Imputation Imputation is the act of replacing missing data values in a data set with meaningful values. Simply removing rows with missing feature values is bad practice if the data is scarce, as a lot of information could be lost. In addition, deletion methods can introduce bias [9]. The incomplete additional training data was combined with the complete original training data because the complete original data was scarce in number. However, the incomplete additional training data was missing values, therefore imputation was appropriate, if not required. Two methods of imputation were employed. The first method of imputation employed was imputation via feature means. However, this method has been heavily criticized. In particular, it has been hypothesized that imputation via mean introduces bias and underestimates variability [10]. The second method of imputation employed was k- Nearest-Neighbours (kNN) [11]. This is a technique, which is part of hot-deck imputation techniques [12], where missing feature values are filled from data points that are similar, or geometrically speaking, points that are closest in distance. This method is more appropriate than using the mean imputation, given the flaws of feature mean imputation. Therefore, kNN was the imputation method used to build the final model. The kNN implementation was found in the ‘fancyimpute’ package [13]. The k of kNN can be considered a parameter that needs to be chosen carefully. Fortunately, the literature provides some direction on this. The work of [14] suggests that kNN with 3 nearest-neighbours is the best for the trade-off between imputation error and preservation of data structure. In summary, kNN was employed for imputation, and k was set to 3. This section will cover how the incomplete additional training data set was incorporated to develop a larger training data set. In particular, the additional training data was combined with the original training data. The additonal training data was incomplete, with several NaN entries. Thus, imputation was performed to replace NaN entries with meaningful values. End of explanation """ #imputation via KNN from fancyimpute import KNN knn_trial = full_training_data_inc knn_trial complete_knn = KNN(k=3).complete(knn_trial) #convert imputed matrix back to dataframe for visualisation and convert 'prediction' dtype to int complete_knn_df = pd.DataFrame(complete_knn, index=full_training_data_inc.index, columns=full_training_data_inc.columns) full_training_data = complete_knn_df full_training_data.prediction = full_training_data.prediction.astype('int') full_training_data #quantify class counts for full training data full_training_data.prediction.value_counts() """ Explanation: A couple of imputation methods were tried in the original Notebook: Imputation using the column (feature) mean Imputation with K-Nearest Neighbours The most effective and theoretically advocated method producing the best results was the second method; imputation using the K-Nearest Neighbours. Note: the fancyimpute package may need installing before running. K was set to 3 here, see report for justification. End of explanation """ #Load confidence annotations confidence_labels = pd.read_csv("/Users/Max/Desktop/Max's Folder/Uni Work/Data Science MSc/Machine Learning/ML Kaggle Competition /Data Sets/Annotation Confidence .csv", header=0, index_col=0) #quantify confidence labels (how many are 1, how many are 0.66) print(confidence_labels.confidence.value_counts()) #observe confidence annotations confidence_labels #adding confidence of label column to imputed full training data set full_train_wcl = pd.merge(full_training_data, confidence_labels, left_index=True, right_index=True) full_train_wcl """ Explanation: 2.2) Dealing with Confidence Labels One approach employed to incorporate the confidence labels was to use the confidence label of each instance as the corresponding sample weight for the instance. Theoretically, a confidence label of smaller than 1 would reduce the C parameter, which results in a lower penalty for misclassification of an instance whose label is not known with certainty. However, the implementation of this did not follow the theory; introducing the sample weights reduced the overall accuracy of the model. This matter was complicated more by the fact that samples generated from over-sampling via SMOTE had to be assigned a confidence label, which is difficult to determine objectively. Thus, it was decided that only data instances with a confidence label of 1 should be retained in the training data. This obviously leads to a massive loss of information. However, after removing instances, which do not have a confidence label of 1, 1922 training instances remained, which can be assumed to be a reasonable training data size. After truncating the data set, the procedure described in Section 2.2 was repeated for the truncated training data. In summary, the training data was truncated to only include instances that have a confidence label of 1. The minority class of the training data was over-sampled using SMOTE to balance the class split. Class weights were then applied during the training of the SVM to ensure that the model was more sensitive to correctly classifying the majority class of the test data. This section will cover how the confidence labels, one of the additional pieces of information provided in the assignment outline, were incorporated into the final training data set. End of explanation """ #only keep data instance with confidence label = 1 conf_full_train = full_train_wcl.loc[full_train_wcl['confidence'] == 1] conf_full_train #quantify class counts conf_full_train.prediction.value_counts() #convert full training data dataframe with confidence instances only to matrix conf_ft_matrix = conf_full_train.as_matrix(columns=None) conf_ft_matrix conf_ft_matrix.shape #splitting full training data with confidence into inputs and outputs conf_ft_inputs = conf_ft_matrix[:,0:4608] print(conf_ft_inputs.shape) conf_ft_outputs = conf_ft_matrix[:,4608] print(conf_ft_outputs.shape) """ Explanation: The original Notebook tried a couple of methods of incorporating the confidence labels into the model: Use all data samples, irrespective of confidence labels. However, the confidence label of each instance was set to be sample weight of each instance in the training phase. Only use instances that have a confidence label of 1. The best model was based on Method 2. Thus, only method 2 will be shown for this section here. End of explanation """ from imblearn.over_sampling import SMOTE from collections import Counter #fit over-sampling to training data inputs and putputs over_sampler = SMOTE(ratio='auto', k_neighbors=5, kind='regular', random_state=0) over_sampler.fit(conf_ft_inputs, conf_ft_outputs) #create new inputs and outputs with correct class proportions resampled_x, resampled_y = over_sampler.fit_sample(conf_ft_inputs, conf_ft_outputs) #quantify original class proportions prior to over-sampling Counter(conf_ft_outputs) #quantify class proportions after over-sampling Counter(resampled_y) #assign newly sampled input and outputs to old variable name used for inputs and outputs before #over-sampling conf_ft_inputs = resampled_x conf_ft_outputs = resampled_y print(Counter(conf_ft_outputs)) """ Explanation: 2.3) Dealing with Class Imbalance Binary classification tasks suffer from imbalanced class splits. Training a model on a data set with more instances for one class than the other class can result in biases towards the majority class, as sensitivity will be lost in detecting the minority class [17]. This is pertinent because the training data (additional and original data included) has an unbalanced class split, with more instances of Class 1 than Class 0. Thus, training the model on this data would result in a model that is biased towards Class 1 detections. To exacerbate this issue, the test data is also unbalanced, but the majority class for the test data is Class 0. Some researchers have already attempted to tackle this problem. There are two primary methods of dealing with class imbalances: balancing or further unbalancing the data set as needs fit, or introducing class weights, where the underlying algorithm applies disparate misclassification penalties to different classes [15]. Both approaches will be combined here, to first balance the data set, and then train the model to be bias towards Class 0 instances, as Class 0 is the majority class in the test data. The ‘imbalanced-learn’ API [16] has implementations of class balancing strategies found in the literature, such as SMOTE [17]. The premise of SMOTE is over-sampling of the minority class until a balance in the data set is reached. The sampling method is based on sampling via kNN. Unlike kNN for imputation, the best k was suggested to be 5 here. Once the data set was balanced through SMOTE, class weights were introduced. Considering the test data has more instances belonging to Class 0, the class weights were adjusted so that misclassification of Class 0 is penalized more heavily than misclassification of Class 1. The ratio of class weights for training were adjusted to match the class proportions of the test data, i.e. Class 0 weight = 1.33 and Class 1 weight = 1. The reason over-sampling of the minority class was preferred over under-sampling of the majority class is because the data quantity was already scarce (evident from Section 2.3). Furthermore, over-sampling to reach a class balance permits the use of ‘accuracy’ as the accuracy metric, as opposed to using AUC, which is more complex. In summary, as well as balancing the train data class-split, the model itself was adjusted to place more emphasis on correct Class 0 classifications. This section will cover how the class imbalance of the training data was addressed. The best approach for this was Over-Sampling using SMOTE. This technique over-samples the minority class until the data set is completely balanced. Note: may need to install imblearn package first. End of explanation """ #standardise the full training data with confidence labels 1 only scaler_2 = preprocessing.StandardScaler().fit(conf_ft_inputs) std_conf_ft_in = scaler_2.transform(conf_ft_inputs) std_conf_ft_in """ Explanation: 3. Pre-Processing The pre-processing of the data consisted of several steps. First, the features were rescaled appropriately. Secondly, Feature Extraction was performed to reduce the unwieldy dimensionality of the training data set, concomitantly increasing the signal-to-noise ratio and decreasing time complexity. This section will cover the Pre-Processing conducted that produced the model capable of producing the best predictions. Feature Scaling was achieved via several methods. The best method was standardisation. Feature Extraction was achieved via PCA. 3.1) Feature Scaling Feature scaling is important because it ensures that features have values plotted on the same scale, irrespective of the original units used to describe the original features. Feature scaling can be in the form of standardization, normalization or rescaling. The correct choice of feature scaling method is arbitrary and highly dependent on context. Thus, all three approaches were tried. The optimal results were obtained for standardization. End of explanation """ import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline #preprocessing: PCA (feature construction). High number of pcs chosen to plot a graph #showing how much more variance is explained as pc number increases pca_2 = PCA(n_components=700, random_state=0) std_conf_ft_in_pca = pca_2.fit_transform(std_conf_ft_in) #quantify amount of variance explained by principal components print("Total Variance Explained by PCs (%): ", np.sum(pca_2.explained_variance_ratio_)) """ Explanation: 3.2) Principal Component Analysis (PCA) High-dimensionality should be reduced because it is likely to contain noisy features and because high-dimensionality increases computational time complexity [18]. Dimensionality reduction can be achieved via feature selection methods, such as filters and wrappers [19], or via feature extraction methods, such as PCA [20]. Here, the dimensionality reduction was conducted via feature extraction, vicariously through PCA. The rationale behind this is that the relative importance of GIST and CNN features is undetermined. Furthermore, feature selection methods may require some domain expertise to be effective. PCA uses the covariance matrix, its eigenvectors and eigenvalues to engineer principal components, which are uncorrelated eigenvectors that explain some proportion of the variance found in the dataset. The optimal number of principal components to engineer is arbitrary. Thus, the optimal number of principal components can be configured experimentally. This can be achieved by plotting the change in variance explained as a function of the number of principal components included, and by calculating the test score during cross validation for data transformed using different numbers of principal components. End of explanation """ #calculate a list of cumulative sums for amount of variance explained cumulative_variance = np.cumsum(pca_2.explained_variance_ratio_) len(cumulative_variance) #add 0 to the beginning of the list, otherwise list starts with variance explained by 1 pc cumulative_variance = np.insert(cumulative_variance, 0, 0) #define range of pcs pcs_4_var_exp = np.arange(0,701,1) len(pcs_4_var_exp) fig_1 = plt.figure(figsize=(7,4)) plt.title('Number of PCs and Change In Variance Explained') plt.xlabel('Number of PCs') plt.ylabel('Variance Explained (%)') plt.plot(pcs_4_var_exp, cumulative_variance, 'x-', color="r") plt.show() """ Explanation: The cell below will plot how much more of the variance in the data set is explained as the number of principal components included is increased. End of explanation """ #preprocessing: PCA (feature construction) pca_2 = PCA(n_components=230, random_state=0) std_conf_ft_in_pca = pca_2.fit_transform(std_conf_ft_in) #quantify ratio of variance explain by principal components print("Total Variance Explained by PCs (%): ", np.sum(pca_2.explained_variance_ratio_)) """ Explanation: The graph above suggests that the maximum number of principal components should not exceed 300, as less and less variance is explained as the number of principal components included increases beyond 300. For the optimisation, the optimal number of principal components was initially assumed to be 230. End of explanation """ #this cell takes around 7 minutes to run #parameter optimisation with Exhaustive Grid Search, with class weight original_c_range = np.arange(0.85, 1.01, 0.01) gamma_range = np.arange(0.00001, 0.00023, 0.00002) #define parameter ranges to test param_grid = [{'C': original_c_range, 'gamma': gamma_range, 'kernel': ['rbf'], 'class_weight':[{0:1.33, 1:1}]}] #define model to do parameter search on svr = SVC() clf = GridSearchCV(svr, param_grid, scoring='accuracy', cv=5,) clf.fit(std_conf_ft_in_pca, conf_ft_outputs) #create dictionary of results results_dict = clf.cv_results_ #convert the results into a dataframe df_results = pd.DataFrame.from_dict(results_dict) df_results """ Explanation: 4. Model Selection The optimization was conducted through the use of a Grid search. In addition, the optimization was conducted for two kernels: the polynomial kernel and the RBF kernel. The initial search for optimal parameters was conducted on a logarithmic scale to explore as much of the parameter space as possible. From the results, the parameter ranges were refined and pruned to only include the potential best candidates. The choice of parameters was purely based on accuracy metrics, not on any other practical factors such as memory consumption or time complexity of predictions. The best model was determined on the following merits: 1. Good generalisation - achieving a high testing 356 score during cross-validation. 2. Avoidance of over-fitting - restriction on the magnitude of training scores during cross-validation. In particular, a training score beyond 360 an arbitrary limit is indicative of over-fitting. 361 Thus, a balance had to be struck to ensure that good 362 generalisation can be assumed. This section covers how the best model was selected. Two kernels were tried and tested: RBF and polynomial. RBF outperformed polynomial, therefore only the optimisation results of RBF will be presented here. Furthermore, the parameter ranges to try have already been pruned at this point, so only the final ranges will be used to perform a Grid Search. 4.1) Parameter Optimisation End of explanation """ #Draw heatmap of the validation accuracy as a function of gamma and C fig = plt.figure(figsize=(10, 10)) ix=fig.add_subplot(1,2,1) val_scores = clf.cv_results_['mean_test_score'].reshape(len(original_c_range),len(gamma_range)) val_scores ax = sns.heatmap(val_scores, linewidths=0.5, square=True, cmap='PuBuGn', xticklabels=gamma_range, yticklabels=original_c_range, cbar_kws={'shrink':0.5}) ax.invert_yaxis() plt.yticks(rotation=0, fontsize=10) plt.xticks(rotation= 70,fontsize=10) plt.xlabel('Gamma', fontsize=15) plt.ylabel('C', fontsize=15) plt.title('Validation Accuracy', fontsize=15) #Draw heatmap of the validation accuracy as a function of gamma and C ix=fig.add_subplot(1,2,2) train_scores = clf.cv_results_['mean_train_score'].reshape(len(original_c_range),len(gamma_range)) train_scores #plt.figure(figsize=(6, 6)) ax_1 = sns.heatmap(train_scores, linewidths=0.5, square=True, cmap='PuBuGn', xticklabels=gamma_range, yticklabels=original_c_range, cbar_kws={'shrink':0.5}) ax_1.invert_yaxis() plt.yticks(rotation=0, fontsize=10) plt.xticks(rotation= 70,fontsize=10) plt.xlabel('Gamma', fontsize=15) plt.ylabel('C', fontsize=15) plt.title('Training Accuracy', fontsize=15) plt.show() """ Explanation: The cell below will plot two heat-maps side by side: one for showing how the training accuracy changes during cross-validation for different combinations of parameters, and one for showing how the testing accuracy changes during cross-validation for different combinations of parameters. End of explanation """ #import module/library from sklearn.model_selection import validation_curve import matplotlib.pyplot as plt %matplotlib inline #specifying gamma parameter range to plot for validation curve param_range = gamma_range param_range #calculating train and validation scores train_scores, valid_scores = validation_curve(SVC(C=0.92, kernel='rbf', class_weight={0:1.33, 1:1}), std_conf_ft_in_pca, conf_ft_outputs, param_name='gamma',param_range=param_range,scoring='accuracy') train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) valid_scores_mean = np.mean(valid_scores, axis=1) valid_scores_std = np.std(valid_scores, axis=1) #plotting validation curve plt.title('Gamma Validation Curve for SVM With RBF Kernel | C=0.92') plt.xlabel('Gamma') plt.ylabel('Score') plt.xticks(rotation=70) plt.ylim(0.8,1.0) plt.xlim(0.0001,0.00021) plt.xticks(param_range) lw=2 plt.plot(param_range, train_scores_mean, 'o-',label="Training Score", color='darkorange', lw=lw) plt.fill_between(param_range, train_scores_mean-train_scores_std, train_scores_mean+train_scores_std, alpha=0.2, color='darkorange', lw=lw) plt.plot(param_range, valid_scores_mean, 'o-',label="Testing Score", color='navy', lw=lw) plt.fill_between(param_range, valid_scores_mean-valid_scores_std, valid_scores_mean+valid_scores_std, alpha=0.2, color='navy', lw=lw) plt.legend(loc='best') plt.show() """ Explanation: The cells below will plot a Validation Curves for Gamma. End of explanation """ #import module/library from sklearn.model_selection import learning_curve #define training data size increments td_size = np.arange(0.1, 1.1, 0.1) #calculating train and validation scores train_sizes, train_scores, valid_scores = learning_curve(SVC(C=0.92, kernel='rbf', gamma=0.00011, class_weight={0:1.33, 1:1}), std_conf_ft_in_pca, conf_ft_outputs, train_sizes=td_size ,scoring='accuracy') train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) valid_scores_mean = np.mean(valid_scores, axis=1) valid_scores_std = np.std(valid_scores, axis=1) #plotting learning curve fig = plt.figure(figsize=(5,5)) plt.title('Learning Curve with SVM with RBF Kernel| C=0.92 & Gamma = 0.00011', fontsize=9) plt.xlabel('Train Data Size') plt.ylabel('Score') plt.ylim(0.8,1) lw=2 plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training Score") plt.fill_between(train_sizes, train_scores_mean-train_scores_std, train_scores_mean+train_scores_std, alpha=0.2, color='red', lw=lw) plt.plot(train_sizes, valid_scores_mean, 'o-', color="g",label="Testing Score") plt.fill_between(train_sizes, valid_scores_mean-valid_scores_std, valid_scores_mean+valid_scores_std, alpha=0.2, color='green', lw=lw) plt.legend(loc='best') plt.show() """ Explanation: The cells below will plot the Learning Curve. End of explanation """ #this cell may take several minutes to run #plot how the number of PC's changes the test accuracy no_pcs = np.arange(20, 310, 10) compute_average_of_5 = [] for t in range(0,5): pcs_accuracy_change = [] for i in no_pcs: dummy_inputs = std_conf_ft_in dummy_outputs = conf_ft_outputs pca_dummy = PCA(n_components=i,) pca_dummy.fit(dummy_inputs) dummy_inputs_pca = pca_dummy.transform(dummy_inputs) dummy_model = SVC(C=0.92, kernel='rbf', gamma=0.00011, class_weight={0:1.33, 1:1}) dummy_model.fit(dummy_inputs_pca, dummy_outputs,) dummy_scores = cross_val_score(dummy_model, dummy_inputs_pca, dummy_outputs, cv=3, scoring='accuracy') mean_cv = dummy_scores.mean() pcs_accuracy_change.append(mean_cv) print (len(pcs_accuracy_change)) compute_average_of_5.append(pcs_accuracy_change) #calculate position specific average for the five trials from __future__ import division average_acc_4_pcs = [sum(e)/len(e) for e in zip(*compute_average_of_5)] plt.title('Number of PCs and Change In Accuracy') plt.xlabel('Number of PCs') plt.ylabel('Accuracy (%)') plt.plot(no_pcs, average_acc_4_pcs, 'o-', color="r") plt.show() """ Explanation: Finding Best Number of Principal Components The cells below will show the optimisation for the number of principal components to include. This is done by doing using a range of principal components, conducting PCA for each specified number in the interval and calculating the average of the test score over 3-fold cross-validation. This procedure is repeated 5 times to combat the randomness of PCA. The average test accuracy over the 5 runs is then plotted against the number of principal components included. End of explanation """ #Load the complete training data set test_data = pd.read_csv("/Users/Max/Desktop/Max's Folder/Uni Work/Data Science MSc/Machine Learning/ML Kaggle Competition /Data Sets/Testing Data Set.csv", header=0, index_col=0) ##Observe the test data test_data #turn test dataframe into matrix test_data_matrix = test_data.as_matrix(columns=None) test_data_matrix.shape """ Explanation: Making Predictions The following cells will prepare the test data by getting it into the right format. End of explanation """ #pre-process test data in same way as train data scaled_test = scaler_2.transform(test_data_matrix) transformed_test = pca_2.transform(scaled_test) transformed_test.shape """ Explanation: The following cell will apply the same pre-processing applied to the training data to the test data. End of explanation """ #define and fit final model with best parameters from grid search final_model = SVC(C=0.92, cache_size=1000, kernel='rbf', gamma=0.00011, class_weight={0:1.33, 1:1}) final_model.fit(std_conf_ft_in_pca, conf_ft_outputs) #make test data predictions predictions = final_model.predict(transformed_test) #create dictionary for outputs matched with ID to_export = {'ID': np.arange(1, 4201, 1), 'prediction': predictions} to_export #convert to dataframe final_predictions = pd.DataFrame.from_dict(to_export) final_predictions #convert prediction column float type entries to integers final_predictions = final_predictions.astype('int') final_predictions #check properties of predictions: class balance should be 42.86(1):57.14(0) #i.e. should predict 2400 Class 0 instances, and 1800 Class 1 instances final_predictions.prediction.value_counts() """ Explanation: The following cells will produce predictions on the test data using the final model. End of explanation """
NervanaSystems/neon_course
06 Deep Residual Network.ipynb
apache-2.0
# Start by generating the backend: from neon.backends import gen_backend be = gen_backend(backend='gpu', batch_size=128) """ Explanation: Tutorial: Classifying tiny images with a Convolutional Neural Network Outline This interactive notebook shows how to do image classification with a Convnet. You can edit code in the code cells, and run it with Shift+Return. The notebook is read-only, so feel free to hack the code, and reload the page if something breaks. The tutorial covers how to: * Build a small convNet in neon. * Train it on the Cifar10 dataset. * Upload a new image, and classify it into one of the 10 categories. <img src="https://kaggle2.blob.core.windows.net/competitions/kaggle/3649/media/cifar-10.png"> Setting up a model The pieces we need to set up a model are described in the neon user guide: * The CIFAR10 dataset. * layer configuration and a model. * a compute backend. * an optimizer to train the model. * callbacks to keep us updated about the progress of training. End of explanation """ from neon.data.aeon_shim import AeonDataLoader from neon.data.dataloader_transformers import OneHot, TypeCast, BGRMeanSubtract import numpy as np # define configuration file for CIFAR-10 dataset config = { 'manifest_filename': 'data/cifar10/train-index.csv', # CSV manifest of data 'manifest_root': 'data/cifar10', # root data directory 'image': {'height': 32, 'width': 32, # output image size 'scale': [0.8, 0.8], # random scaling of image before cropping 'flip_enable': True}, # randomly flip image 'type': 'image,label', # type of data 'minibatch_size': be.bsz # batch size } from neon.data.aeon_shim import AeonDataLoader # build train_set train_set = AeonDataLoader(config, be) train_set = OneHot(train_set, index=1, nclasses=10) # perform onehot on the labels train_set = TypeCast(train_set, index=0, dtype=np.float32) # cast the image to float32 train_set = BGRMeanSubtract(train_set, index=0) # subtract image color means (based on default values) # build test set config['manifest_filename'] = 'data/cifar10/val-index.csv' test_set = AeonDataLoader(config, be) test_set = OneHot(test_set, index=1, nclasses=10) # perform onehot on the labels test_set = TypeCast(test_set, index=0, dtype=np.float32) # cast the image to float32 test_set = BGRMeanSubtract(test_set, index=0) # subtract image color means (based on default values) """ Explanation: Loading a dataset We use the aeon dataloader to present the data to the model. Note: This assumes the data has already been downloaded and ingested. If that is not the case, follow the instructions in the 02 VGG Fine-tuning notebook to process the CIFAR-10 dataset End of explanation """ from neon.initializers import Uniform from neon.transforms import Rectlin, Softmax from neon.layers import Activation, Conv, Pooling, Affine, MergeSum # This is a simple convnet with a one conv layer, # max-pooling, and a fully connected layer. # # input - Conv - ReLu - Pooling - Affine - ReLu - Affine - Softmax # layers = [Conv((5, 5, 16), init=Uniform(-0.1, 0.1), activation=Rectlin()), Pooling((2, 2)), Affine(nout=500, init=Uniform(-0.1, 0.1), activation=Rectlin()), Affine(nout=10, init=Uniform(-0.1, 0.1), activation=Softmax())] # We can use a MergeSum layer to combine differnt layers in parallel # # - Conv3 - ReLu - # / \ # input - Sum - ReLu - ... # \ / # - Conv5 - ReLu - # conv3 = Conv((3, 3, 16), init=Uniform(-0.1, 0.1), activation=Rectlin()) conv5 = Conv((5, 5, 16), padding=1, init=Uniform(-0.1, 0.1), activation=Rectlin()) layers = [MergeSum([conv3, conv5]), Activation(Rectlin()), Pooling((2, 2)), Affine(nout=500, init=Uniform(-0.1, 0.1), activation=Rectlin()), Affine(nout=10, init=Uniform(-0.1, 0.1), activation=Softmax())] """ Explanation: Generating layers The core of the model is the layers. This can be as simple as a list, but merging and branching makes it easy to specify complex topologies. End of explanation """ from neon.initializers import Kaiming, IdentityInit from neon.layers import SkipNode from neon.models import Model # helper functions simplify init params for conv and identity layers def conv_params(fsize, nfm, stride=1, relu=True, batch_norm=True): return dict(fshape=(fsize, fsize, nfm), strides=stride, padding=(1 if fsize > 1 else 0), activation=(Rectlin() if relu else None), init=Kaiming(local=True), batch_norm=batch_norm) def id_params(nfm): return dict(fshape=(1, 1, nfm), strides=2, padding=0, activation=None, init=IdentityInit()) # A resnet module # # - Conv - Conv - # / \ # input - Sum - Relu - output # \ / # - Identity - # def module_factory(nfm, stride=1): mainpath = [Conv(**conv_params(3, nfm, stride=stride)), Conv(**conv_params(3, nfm, relu=False))] sidepath = [SkipNode() if stride == 1 else Conv(**id_params(nfm))] module = [MergeSum([mainpath, sidepath]), Activation(Rectlin())] return module """ Explanation: The Deep Residual Network A resnet module is a MergeSum layer containing a main path with conv layers, and a side path with a SkipNode() configured as the identity function. This allows earlier layer activations to bypass a series of layers. We use some helper functions to succinclty define the deep network: End of explanation """ # Set depth = 3 for quick results # or depth = 9 to reach 6.7% top1 error in 150 epochs depth = 3 nfms = [2**(stage + 4) for stage in sorted(range(3) * depth)] strides = [1] + [1 if cur == prev else 2 for cur, prev in zip(nfms[1:], nfms[:-1])] layers = [Conv(**conv_params(3, 16))] for nfm, stride in zip(nfms, strides): layers.append(module_factory(nfm, stride)) layers.append(Pooling('all', op='avg')) layers.append(Affine(10, init=Kaiming(local=False), batch_norm=True, activation=Softmax())) model = Model(layers=layers) """ Explanation: The model is a collection of resnet modules between an input conv and output pooling and affine layer. End of explanation """ from neon.transforms import CrossEntropyMulti from neon.layers import GeneralizedCost cost = GeneralizedCost(costfunc=CrossEntropyMulti()) """ Explanation: Cost function The cost function compares network outputs with ground truth labels, and produces and error that we can backpropagate through the layers of the network. For our binary classification task, we use a cross entropy cost function. End of explanation """ from neon.optimizers import GradientDescentMomentum, Schedule opt = GradientDescentMomentum(0.1, 0.9, wdecay=0.0001, schedule=Schedule([90, 135], 0.1)) """ Explanation: Optimizer We now have a cost function to minimize by gradient descent. We do this iteratively over small batches of the data set, making it stochastic gradient decesent (SGD). There are other optimizers such as RMSProp and AdaDelta that are supported in neon, but often simple gradient descent works well. End of explanation """ # set up callbacks. By default sets up a progress bar from neon.transforms import Misclassification from neon.callbacks.callbacks import Callbacks valmetric = Misclassification() callbacks = Callbacks(model, eval_set=test_set, metric=valmetric) """ Explanation: Callbacks To provide feedback while the model is training, neon lets the user specify a set of callbacks that get evaluated at the end of every iteration (minibatch) or pass through the dataset (epoch). Callbacks include evaluating the model on a validation set or computing missclassification percentage. There are also callbacks for saving to disk and for generating visualizations. Here we will set up a progress bar to monitor training. End of explanation """ # And run the model epochs = 10 model.fit(train_set, optimizer=opt, num_epochs=epochs, cost=cost, callbacks=callbacks) """ Explanation: Training the model Now all the pieces are in place to run the network. We use the fit function and pass it a dataset, cost, optmizer, and the callbacks we set up. End of explanation """ # Check the performance on the supplied test set from neon.transforms import Misclassification error_pct = 100 * model.eval(test_set, metric=Misclassification()) print 'Misclassification error = %.1f%%' % error_pct """ Explanation: Congrats! If you made it this far you have trained a convolutional network in neon. Evaluating the model We can now compute the misclassification on the test set to see how well we did. End of explanation """ %matplotlib inline import matplotlib.pyplot as plt import urllib from PIL import Image import numpy as np # download images from the web imgs = { 'frog': "https://upload.wikimedia.org/wikipedia/commons/thumb/5/55/Atelopus_zeteki1.jpg/440px-Atelopus_zeteki1.jpg", 'airplane': "https://img0.etsystatic.com/016/0/5185796/il_570xN.433414910_p5n3.jpg", 'cat': "https://s-media-cache-ak0.pinimg.com/236x/8e/d7/41/8ed7410285f101ba5892ff723c91fa75.jpg", 'car': "http://static01.nyt.com/images/2012/09/09/automobiles/09REFI2/09REFI2-articleLarge.jpg", } # empty buffer to use for inference dataset # dims [minibatch, imgsize] x_new = np.zeros((128, 32*32*3), dtype=np.float32) # crop/resize images and assign them to slots in x_new # also display with true labels plt.figure(1) for i, name in enumerate(imgs): imgs[name] = urllib.urlretrieve(imgs[name], filename="data/{}.jpg".format(name)) plt.subplot(100 + (10 * len(imgs)) + 1 + i) img = Image.open("data/{}.jpg".format(name)) crop = img.crop((0,0,min(img.size),min(img.size))) crop.thumbnail((32, 32)) plt.imshow(crop, interpolation="nearest") plt.title(name) plt.axis('off') x_new[i,:] = np.asarray(crop, dtype=np.float32)[:,:,(2,0,1)].transpose(2,0,1).reshape(1,3072) -127 """ Explanation: By increasing the depth of the network and the number of epochs, we can improve the performance to match state of the art. This was quite a lot of code! Generally, to set up a new model from scratch it is best to follow one of the examples from the neon/examples directory. It's easy to mix and match parts! Inference Now we want to grab a few new images from the internet and classify them through our network. End of explanation """ from neon.data import ArrayIterator # create a minibatch with the new image inference_set = ArrayIterator(x_new, None, nclass=10, lshape=(3, 32, 32)) # inference_set = ArrayIterator(x_train, None, nclass=10, # lshape=(3, 32, 32)) classes =["airplane", "auto", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"] out = model.get_outputs(inference_set) """ Explanation: Create a dataset with this image for inference End of explanation """ plt.figure(2) for i, name in enumerate(imgs): plt.subplot(100 + (10 * len(imgs)) + 1 + i) img = Image.open("data/{}.jpg".format(name)) crop = img.crop((0,0,min(img.size),min(img.size))) crop.thumbnail((32, 32)) title = "{} ({:.2})".format(classes[out[i].argmax()], out[i].max()) plt.imshow(crop, interpolation="nearest") plt.title(title) plt.axis('off') """ Explanation: Get model outputs on the inference data End of explanation """
mdiaz236/DeepLearningFoundations
first-neural-network/dlnd-your-first-neural-network.ipynb
mit
%matplotlib inline %config InlineBackend.figure_format = 'retina' import numpy as np import pandas as pd import matplotlib.pyplot as plt """ Explanation: Your first neural network In this project, you'll build your first neural network and use it to predict daily bike rental ridership. We've provided some of the code, but left the implementation of the neural network up to you (for the most part). After you've submitted this project, feel free to explore the data and the model more. End of explanation """ data_path = 'Bike-Sharing-Dataset/hour.csv' rides = pd.read_csv(data_path) rides.head() """ Explanation: Load and prepare the data A critical step in working with neural networks is preparing the data correctly. Variables on different scales make it difficult for the network to efficiently learn the correct weights. Below, we've written the code to load and prepare the data. You'll learn more about this soon! End of explanation """ rides[:24*10].plot(x='dteday', y='cnt') """ Explanation: Checking out the data This dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the cnt column. You can see the first few rows of the data above. Below is a plot showing the number of bike riders over the first 10 days in the data set. You can see the hourly rentals here. This data is pretty complicated! The weekends have lower over all ridership and there are spikes when people are biking to and from work during the week. Looking at the data above, we also have information about temperature, humidity, and windspeed, all of these likely affecting the number of riders. You'll be trying to capture all this with your model. End of explanation """ dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday'] for each in dummy_fields: dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False) rides = pd.concat([rides, dummies], axis=1) fields_to_drop = ['instant', 'dteday', 'season', 'weathersit', 'weekday', 'atemp', 'mnth', 'workingday', 'hr'] data = rides.drop(fields_to_drop, axis=1) data.head() """ Explanation: Dummy variables Here we have some categorical variables like season, weather, month. To include these in our model, we'll need to make binary dummy variables. This is simple to do with Pandas thanks to get_dummies(). End of explanation """ quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed'] # Store scalings in a dictionary so we can convert back later scaled_features = {} for each in quant_features: mean, std = data[each].mean(), data[each].std() scaled_features[each] = [mean, std] data.loc[:, each] = (data[each] - mean)/std """ Explanation: Scaling target variables To make training the network easier, we'll standardize each of the continuous variables. That is, we'll shift and scale the variables such that they have zero mean and a standard deviation of 1. The scaling factors are saved so we can go backwards when we use the network for predictions. End of explanation """ # Save the last 21 days test_data = data[-21*24:] data = data[:-21*24] # Separate the data into features and targets target_fields = ['cnt', 'casual', 'registered'] features, targets = data.drop(target_fields, axis=1), data[target_fields] test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields] """ Explanation: Splitting the data into training, testing, and validation sets We'll save the last 21 days of the data to use as a test set after we've trained the network. We'll use this set to make predictions and compare them with the actual number of riders. End of explanation """ # Hold out the last 60 days of the remaining data as a validation set train_features, train_targets = features[:-60*24], targets[:-60*24] val_features, val_targets = features[-60*24:], targets[-60*24:] """ Explanation: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set). End of explanation """ class NeuralNetwork(object): def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate): # Set number of nodes in input, hidden and output layers. self.input_nodes = input_nodes self.hidden_nodes = hidden_nodes self.output_nodes = output_nodes # Initialize weights self.weights_input_to_hidden = np.random.normal(0.0, self.hidden_nodes**-0.5, (self.hidden_nodes, self.input_nodes)) self.weights_hidden_to_output = np.random.normal(0.0, self.output_nodes**-0.5, (self.output_nodes, self.hidden_nodes)) self.lr = learning_rate #### Set this to your implemented sigmoid function #### # Activation function is the sigmoid function self.activation_function = lambda x: 1 / (1 + np.exp(-x)) def train(self, inputs_list, targets_list): # Convert inputs list to 2d array inputs = np.array(inputs_list, ndmin=2).T targets = np.array(targets_list, ndmin=2).T #### Implement the forward pass here #### ### Forward pass ### # Hidden layer hidden_inputs = np.dot(self.weights_input_to_hidden, inputs) # signals into hidden layer hidden_outputs = self.activation_function(hidden_inputs)# signals from hidden layer # TODO: Output layer final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs )# signals into final output layer final_outputs = self.activation_function(final_inputs)# signals from final output layer #### Implement the backward pass here #### ### Backward pass ### # Output error output_errors = (targets - final_inputs) # Output layer error is the difference between desired target and actual output. # Backpropagated error hidden_errors = np.dot(self.weights_hidden_to_output.T, output_errors) # errors propagated to the hidden layer hidden_grad = hidden_outputs * (1.0 - hidden_outputs) # hidden layer gradients # TUpdate the weights self.weights_hidden_to_output += self.lr * np.dot(output_errors, hidden_outputs.T) # update hidden-to-output weights with gradient descent step self.weights_input_to_hidden += np.dot(hidden_grad * hidden_errors, inputs.T) * self.lr# update input-to-hidden weights with gradient descent step def run(self, inputs_list): # Run a forward pass through the network inputs = np.array(inputs_list, ndmin=2).T #### Implement the forward pass here #### # Hidden layer hidden_inputs = np.dot(inputs.T, self.weights_input_to_hidden.T )# signals into hidden layer hidden_outputs = self.activation_function(hidden_inputs)# signals from hidden layer # TODO: Output layer final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output.T )# signals into final output layer final_outputs = final_inputs# signals from final output layer return final_outputs def MSE(y, Y): return np.mean((y-Y)**2) """ Explanation: Time to build the network Below you'll build your network. We've built out the structure and the backwards pass. You'll implement the forward pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes. The network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called forward propagation. We use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called backpropagation. Hint: You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$. Below, you have these tasks: 1. Implement the sigmoid function to use as the activation function. Set self.activation_function in __init__ to your sigmoid function. 2. Implement the forward pass in the train method. 3. Implement the backpropagation algorithm in the train method, including calculating the output error. 4. Implement the forward pass in the run method. End of explanation """ import sys ### Set the hyperparameters here ### epochs = 600 learning_rate = 0.001 hidden_nodes = 24 output_nodes = 1 N_i = train_features.shape[1] network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate) losses = {'train':[], 'validation':[]} for e in range(epochs): # Go through a random batch of 128 records from the training data set batch = np.random.choice(train_features.index, size=128) for record, target in zip(train_features.ix[batch].values, train_targets.ix[batch]['cnt']): network.train(record, target) # Printing out the training progress train_loss = MSE(network.run(train_features), train_targets['cnt'].values) val_loss = MSE(network.run(val_features), val_targets['cnt'].values) sys.stdout.write("\rProgress: " + str(100 * e/float(epochs))[:4] \ + "% ... Training loss: " + str(train_loss)[:5] \ + " ... Validation loss: " + str(val_loss)[:5]) losses['train'].append(train_loss) losses['validation'].append(val_loss) plt.plot(losses['train'], label='Training loss') plt.plot(losses['validation'], label='Validation loss') plt.legend() # plt.ylim(ymax=0.5) """ Explanation: Training the network Here you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops. You'll also be using a method know as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later. Choose the number of epochs This is the number of times the dataset will pass through the network, each time updating the weights. As the number of epochs increases, the network becomes better and better at predicting the targets in the training set. You'll need to choose enough epochs to train the network well but not too many or you'll be overfitting. Choose the learning rate This scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. A good choice to start at is 0.1. If the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge. Choose the number of hidden nodes The more hidden nodes you have, the more accurate predictions the model will make. Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose. End of explanation """ fig, ax = plt.subplots(figsize=(8,4)) mean, std = scaled_features['cnt'] predictions = network.run(test_features)*std + mean ax.plot(predictions, label='Prediction') ax.plot((test_targets['cnt']*std + mean).values, label='Data') ax.set_xlim(right=len(predictions)) ax.legend() dates = pd.to_datetime(rides.ix[test_data.index]['dteday']) dates = dates.apply(lambda d: d.strftime('%b %d')) ax.set_xticks(np.arange(len(dates))[12::24]) _ = ax.set_xticklabels(dates[12::24], rotation=45) """ Explanation: Check out your predictions Here, use the test data to view how well your network is modeling the data. If something is completely wrong here, make sure each step in your network is implemented correctly. End of explanation """ import unittest inputs = [0.5, -0.2, 0.1] targets = [0.4] test_w_i_h = np.array([[0.1, 0.4, -0.3], [-0.2, 0.5, 0.2]]) test_w_h_o = np.array([[0.3, -0.1]]) class TestMethods(unittest.TestCase): ########## # Unit tests for data loading ########## def test_data_path(self): # Test that file path to dataset has been unaltered self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv') def test_data_loaded(self): # Test that data frame loaded self.assertTrue(isinstance(rides, pd.DataFrame)) ########## # Unit tests for network functionality ########## def test_activation(self): network = NeuralNetwork(3, 2, 1, 0.5) # Test that the activation function is a sigmoid self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5)))) def test_train(self): # Test that weights are updated correctly on training network = NeuralNetwork(3, 2, 1, 0.5) network.weights_input_to_hidden = test_w_i_h.copy() network.weights_hidden_to_output = test_w_h_o.copy() network.train(inputs, targets) self.assertTrue(np.allclose(network.weights_hidden_to_output, np.array([[ 0.37275328, -0.03172939]]))) self.assertTrue(np.allclose(network.weights_input_to_hidden, np.array([[ 0.10562014, 0.39775194, -0.29887597], [-0.20185996, 0.50074398, 0.19962801]]))) def test_run(self): # Test correctness of run method network = NeuralNetwork(3, 2, 1, 0.5) network.weights_input_to_hidden = test_w_i_h.copy() network.weights_hidden_to_output = test_w_h_o.copy() self.assertTrue(np.allclose(network.run(inputs), 0.09998924)) suite = unittest.TestLoader().loadTestsFromModule(TestMethods()) unittest.TextTestRunner().run(suite) """ Explanation: Thinking about your results Answer these questions about your results. How well does the model predict the data? Where does it fail? Why does it fail where it does? Note: You can edit the text in this cell by double clicking on it. When you want to render the text, press control + enter Your answer below The model predicts the first half of the test set fairly well (Dec 11 - 19). There is some underfitting with the current hyper parameters. The second half of the month isn't predicted as well, particularly Dec 24 - 25. This could be due to holidays observed on these days. More years of data or a holiday feature could help with predicting days like this. Unit tests Run these unit tests to check the correctness of your network implementation. These tests must all be successful to pass the project. End of explanation """
karlstroetmann/Algorithms
Python/Chapter-09/Huffman.ipynb
gpl-2.0
import graphviz as gv """ Explanation: Huffman's Algorithm for Lossless Data Compression End of explanation """ class CodingTree: sNodeCount = 0 def __init__(self): CodingTree.sNodeCount += 1 self.mID = CodingTree.sNodeCount def count(self): "compute the number of characters" pass def cost(self): "compute the number of bits used by this coding tree" pass def getID(self): return self.mID # used only by graphviz """ Explanation: This notebook presents <em style="color:blue;">coding trees</em>. Given an alphabet $\Sigma$ of characters, we define the set $\mathcal{K}$ of coding trees by induction: $\texttt{Leaf}(c,f) \in \mathcal{K} $ if $c \in \Sigma$ and $f \in \mathbb{N}$ An expression of the form $\texttt{Leaf}(c,f)$ represent a leaf in a coding tree. $c$ is a letter from the alphabet $\Sigma$ and * $f$ is the frequency* of the letter $c$ in the string $s$ that is to be encoded. $\texttt{Node}(l,r) \in \mathcal{K}$ if $l \in\mathcal{K}$ and $r \in \mathcal{K}$ The expressions $\texttt{Node}(l,r)$ represent the inner nodes of the coding-tree. The class CodingTree is a superclass for constructing coding trees. It has one static variable sNodeCount. This variable is used to equip all nodes with a unique identifier. This identifier is used to draw the trees using graphviz. Every object of class CodingTree has a uniques identifier mID that is stored as a member variable. This identifier is only used by graphviz. End of explanation """ def _make_string(self, Attributes): # map the function __str__ to all attributes and join them with a comma name = self.__class__.__name__ return f"{name}({', '.join(map(str, [getattr(self, at) for at in Attributes]))})" CodingTree._make_string = _make_string """ Explanation: The function make_string is a helper function that is used to simplify the implementation of __str__. - self is the object that is to be rendered as a string - attributes is a list of those member variables that are used to produce the string End of explanation """ def toDot(self): dot = gv.Digraph(node_attr={'shape': 'record', 'style': 'rounded'}) nodeDict = {} self._collectIDs(nodeDict) for n, t in nodeDict.items(): if isinstance(t, Leaf): if t.mCharacter == ' ': dot.node(str(n), label='{ \' \' |' + "{:,}".format(t.mFrequency) + '}') elif t.mCharacter == '\t': dot.node(str(n), label='{ \'\\\\t\' |' + "{:,}".format(t.mFrequency) + '}') elif t.mCharacter == '\n': dot.node(str(n), label='{ \'\\\\n\' |' + "{:,}".format(t.mFrequency) + '}') elif t.mCharacter == '\v': dot.node(str(n), label='{ \'\\\\v\' |' + "{:,}".format(t.mFrequency) + '}') else: dot.node(str(n), label='{' + str(t.mCharacter) + '|' + "{:,}".format(t.mFrequency) + '}') elif isinstance(t, Node): dot.node(str(n), label="{:,}".format(t.count())) else: assert False, f'Unknown node {t}' for n, t in nodeDict.items(): if isinstance(t, Node): dot.edge(str(n), str(t.mLeft .getID()), label='0') dot.edge(str(n), str(t.mRight.getID()), label='1') return dot CodingTree.toDot = toDot """ Explanation: The method $t.\texttt{toDot}()$ takes a binary trie $t$ and returns a graph that depicts the tree $t$. End of explanation """ def _collectIDs(self, nodeDict): nodeDict[self.getID()] = self if isinstance(self, Node): self.mLeft ._collectIDs(nodeDict) self.mRight._collectIDs(nodeDict) CodingTree._collectIDs = _collectIDs """ Explanation: The method $t.\texttt{collectIDs}(d)$ takes a coding tree $t$ and a dictionary $d$ and updates the dictionary so that the following holds: $$ d[\texttt{id}] = n \quad \mbox{for every node $n$ in $t$.} $$ Here, $\texttt{id}$ is the unique identifier of the node $n$, i.e. $d$ associates the identifiers with the corresponding nodes. End of explanation """ class Leaf(CodingTree): def __init__(self, c, f): CodingTree.__init__(self) self.mCharacter = c self.mFrequency = f def count(self): return self.mFrequency def cost(self): return 0 def __str__(self): return _make_string(self, ['mCharacter', 'mFrequency']) def __lt__(self, other): if isinstance(other, Node): return True return self.mCharacter < other.mCharacter """ Explanation: The class Leaf represents a leaf of the form $\texttt{Leaf}(c, f)$. It maintains two member variables. - $c$ represents the character that is encoded. This character is stored in the member variable mCharacter. - $f$ represents the number of occurrences of the character $c$ in the string $s$ that is to be encoded and is stored in the member variable mFrequency. The class Leaf implements the method __lt__. This way, nodes in a coding tree can be ordered. The ordering between nodes is given as follows: * $\texttt{Leaf}(c, f) < Node(l, r)$, i.e. leaf nodes are smaller than inner nodes. * $\texttt{Leaf}(c_1, f_1) < \texttt{Leaf}(c_2, f_2) \Leftrightarrow c_1 < c_2$, i.e. leaf nodes are compared by comparing their characters lexicographically. End of explanation """ class Node(CodingTree): def __init__(self, l, r): CodingTree.__init__(self) self.mLeft = l self.mRight = r def count(self): return self.mLeft.count() + self.mRight.count() def cost(self): return self.mLeft.cost() + self.mRight.cost() + self.count() def __str__(self): return _make_string(self, ['mLeft', 'mRight']) def __lt__(self, other): if isinstance(other, Leaf): return False return self.mLeft < other.mLeft """ Explanation: The class Node represents an inner node of the form $\texttt{Node}(l, r)$. It maintains two member variables: - self.mLeft is the left subtree $l$, - self.mRight is the right subtree $r$. The class Node implements the method __lt__. This way, nodes in a coding tree can be ordered. The ordering between nodes is given as follows: * $\texttt{Leaf}(c, f) < Node(l, r)$, i.e. leaf nodes are smaller than inner nodes. * $\texttt{Node}(l_1, r_1) < \texttt{Node}(l_2, r_2) \Leftrightarrow l_1 < l_2$, i.e. inner nodes are compared by comparing their left subtrees. End of explanation """ import heapq H = [] heapq.heappush(H, 7) heapq.heappush(H, 1) heapq.heappush(H, 0) heapq.heappush(H, 6) H a = heapq.heappop(H) print('a = ', a) H """ Explanation: Building a Coding Tree The module heapq provides priority queues. The api is given at https://docs.python.org/3/library/heapq.html. We will use two methods: * heapq.heappush(H, p) pushes the priority p onto the heap. For this to be possible, p has to be an object of a class that provides the method __lt__. The expression p.__lt__(q) is True if p is smaller than q. * heapq.heappop(H) removes and returns the element from the heap H that has the highest priority, i.e. the smallest element. The implementation of this module represents heaps as arrays. Therefore, * to test whether a heap H is empty we can write H == [] since [] is the empty heap. * to create an empty heap we write H = [], * to get the element with the highest priority in the heap H we write H[0], * to get the number of elements in H we can write len(H). End of explanation """ def coding_tree(M): H = [] # empty priority queue for c, f in M: heapq.heappush(H, (f, Leaf(c, f))) while len(H) > 1: ac, a = heapq.heappop(H) bc, b = heapq.heappop(H) heapq.heappush(H, (ac + bc, Node(a, b))) return H[0][1] """ Explanation: The function coding_tree implements Huffman's algorithm for data compression. The input $M$ is a set of pairs of the form $$ \bigl{ (c_1, f_1), \cdots, (c_k, f_k)\bigr} $$ where $c_i$ is a character and $f_i$ is the number of times this character occurs in the string $s$ that is to be encoded. Huffman's algorithm is <em style="color:blue;">greedy</em>: It always combines those coding trees that have the least character count so far as this results in the smallest cost increase. The heap H that is maintained by this function is a priority queue which is represented by an array that is structured as a heap. The items in this priority queue are pairs of the form $$ \bigl( t.\texttt{count}(), t \bigr) $$ where $t$ is a coding tree and $t.\texttt{count}()$ is the count of this coding tree. End of explanation """ import math """ Explanation: Let us test this with a trivial example. End of explanation """ def log2(n): return math.log(n) / math.log(2) log2(8) """ Explanation: The function log2(n) computes $\log_2(n)$. End of explanation """ def demo(M): K = coding_tree(M) display(K.toDot()) n = math.ceil(log2(len(M))) cost_huffman = K.cost() cost_constant = n * K.count() savings = (cost_constant - cost_huffman) / cost_constant print(f'cost of encoding with Huffman coding tree : {"{:,}".format(cost_huffman)} bits') print(f'cost of encoding with {n} bits : {"{:,}".format(cost_constant)} bits') print(f'savings: {100 * savings}%') return savings demo({ ('a', 990), ('b', 8), ('c', 1), ('d', 1) }) demo({ ('a', 4), ('b', 9), ('c', 16), ('d', 25), ('e', 36), ('f', 49), ('g', 64) }) demo({ ('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7), ('h', 8), ('i', 9), ('j', 10) }) demo({ ('a', 1), ('b', 1), ('c', 2), ('d', 3), ('e', 5), ('f', 8), ('g', 13) }) """ Explanation: The function demo(M)takes one argument M that is a set of pairs of the form $$ M = \bigl{ \langle c_1, f_1 \rangle, \cdots, \langle c_n, f_n \rangle \bigr} $$ Here, $c_1$, $\cdots$, $c_n$ are characters and $f_1, \cdots, f_n$ are the frequencies with which these characters occur in a given string that is to be encoded. The function builds the Huffman tree that is optimal for these characters. Additionally the function prints the percentage of memory savings that can be achieved with a Huffman tree. This number is also returned. End of explanation """ def demo_file(fn): with open(fn, 'r') as file: s = file.read() # read file as string s Frequencies = {} for c in s: f = Frequencies.get(c, 0) f += 1 Frequencies[c] = f M = { (c, f) for (c, f) in Frequencies.items() } print(M) return demo(M) !cat alice.txt demo_file('alice.txt') demo_file('moby-dick.txt') """ Explanation: The function demo_file(fn) reads the file with name fn and calculates the frequency of all characters occurring in fn. Using these frequencies it computes the Huffman coding tree. End of explanation """
xmnlab/skdata
notebooks/SkDataWidget.ipynb
mit
from IPython.display import Image from skdata.widgets import SkDataWidget from skdata import SkData """ Explanation: # Table of Contents <div class="toc" style="margin-top: 1em;"><ul class="toc-item" id="toc-level0"><li><span><a href="http://localhost:8888/notebooks/SkDataWidget.ipynb#Load-data-to-the-analysis-and-visualization" data-toc-modified-id="Load-data-to-the-analysis-and-visualization-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Load data to the analysis and visualization</a></span><ul class="toc-item"><li><span><a href="http://localhost:8888/notebooks/SkDataWidget.ipynb#Variables-description:" data-toc-modified-id="Variables-description:-1.1"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>Variables description:</a></span></li></ul></li><li><span><a href="http://localhost:8888/notebooks/SkDataWidget.ipynb#Widget" data-toc-modified-id="Widget-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Widget</a></span></li><li><span><a href="http://localhost:8888/notebooks/SkDataWidget.ipynb#Conclusion" data-toc-modified-id="Conclusion-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Conclusion</a></span></li></ul></div> Scikit-Data Widget Introduction Scikit-Data library offers a set of functionalities to help the Data Analysts in their work. Initially is just a small set of simple functionalities like convert a dataframe in a crostab dataframe using some specifics fields. Other interesting functionality is offer a jupyter widget to offer interactive options to handle the data with graphical and tabular outputs. To import the Scikit-Data Jupyter Widget just use the following code: python from skdata.widgets import SkDataWidget End of explanation """ sd = SkData('/tmp/titanic.h5') sd.import_from( source='../data/train.csv', index_col='PassengerId', target_col='Survived' ) """ Explanation: Load data to the analysis and visualization The data used in this example was extracted from Kaggle Titanic challenge. Variables description: survival Survival (0 = No; 1 = Yes) pclass Passenger Class (1 = 1st; 2 = 2nd; 3 = 3rd) name Name sex Sex age Age sibsp Number of Siblings/Spouses Aboard parch Number of Parents/Children Aboard ticket Ticket Number fare Passenger Fare cabin Cabin embarked Port of Embarkation (C = Cherbourg; Q = Queenstown; S = Southampton) SPECIAL NOTES: Pclass is a proxy for socio-economic status (SES) 1st ~ Upper; 2nd ~ Middle; 3rd ~ Lower Age is in Years; Fractional if Age less than One (1) If the Age is Estimated, it is in the form xx.5 With respect to the family relation variables (i.e. sibsp and parch) some relations were ignored. The following are the definitions used for sibsp and parch. Sibling: Brother, Sister, Stepbrother, or Stepsister of Passenger Aboard Titanic Spouse: Husband or Wife of Passenger Aboard Titanic (Mistresses and Fiances Ignored) Parent: Mother or Father of Passenger Aboard Titanic Child: Son, Daughter, Stepson, or Stepdaughter of Passenger Aboard Titanic Other family relatives excluded from this study include cousins, nephews/nieces, aunts/uncles, and in-laws. Some children travelled only with a nanny, therefore parch=0 for them. As well, some travelled with very close friends or neighbors in a village, however, the definitions do not support such relations." End of explanation """ sd['train'].summary() w = SkDataWidget(sd) w.display(dset_id='train') """ Explanation: Widget To use SkDataWidget class, you need some SkData loaded: python w = SkDataWidget(sd) You can use the show_chart method to change some parameters of the chart that show information of a cross tab of the fields selected: python w.display(dset_id='dset_id') This method will use the parameters informed and create and show a chart and a data table. End of explanation """ Image(filename='../data/img/initial_screen.png') """ Explanation: This should display the follow screen: End of explanation """ Image(filename='../data/img/chart_screen.png') """ Explanation: If you want to see the chart just click at Chart option and you will see something like that: End of explanation """
magwenelab/mini-term-2016
Bio311-ODE-modeling-network-motifs.ipynb
cc0-1.0
# import statements to make numeric and plotting functions available %matplotlib inline from numpy import * from matplotlib.pyplot import * def hill_activating(X, B, K, n): """ Hill function for an activator""" return (B * X**n)/(K**n + X**n) """ Explanation: Modeling Gene Networks Using Ordinary Differential Equations Author: Paul M. Magwene <br> Date: April 2016 Background readings Shen-Orr, S. S., et al. 2002. Network motifs in the transcriptional regulation network of Escherichia coli. Nat Genet 31(1):64-8. http://dx.doi.org/10.1038/ng881 Alon, U. 2007. Network motifs: theory and experimental approaches. Nat Rev Genet, 8(6):450–61. http://dx.doi.org/10.1038/nrg2102. To gain some intuition for how systems biologists build mathematical models of gene networks we're going to use computer simulations to explore the dynamical behavior of simple transcriptional networks. In each of our simulations we will keep track of the the concentration of a different genes of interest as they change over time. The basic approach we will use to calculate changes in the quantity of different molecules are differential equations, which are simply a way of describing the instanteous change in a quantity of interest. All of our differential equations will be of this form: \begin{eqnarray} \frac{dY}{dt} = \mbox{rate of production} - \mbox{rate of decay} \end{eqnarray} To state this in words -- the amount of gene $Y$ changes over time is a function of two things: 1) a growth term which represents the rate at which the gene is being transcribed and translated; and 2) a decay term which gives the rate at which $Y$ trascsripts and protein are being degraded. In general we will assume that the "rate of production" is a function of the concentration of the genes that regulate $Y$(i.e. it's inputs in the transcriptional network), while the "rate of decay" is a proportional to the amount of $Y$ that is present. So the above formula will take the following structure: $$ \frac{dY}{dt} = f(X_1, X_2, \ldots) - \alpha Y $$ The $f(X_1, X_2, \ldots)$ term represents the growth term and is a function of the transcription factors that regulate $Y$. The term, $\alpha Y$ represents the rate at which $Y$ is being broken down or diluted. Notice that the decay rate is a proportional to the amount of $Y$ that is present. If $\frac{dy}{dt}$ is positive than the concentration of gene $Y$ is increasing, if $\frac{dy}{dt}$ is negative the concentration of $Y$ is decreasing, and if $\frac{dy}{dt} = 0$ than $Y$ is at steady state. Modeling the rate of production term with the Hill Function An appropriate approach for modeling the rate of production of a protein, $Y$, as a function of it's inputs, $X_1, X_2,..$, is a with the "Hill Function". The Hill Function for a single transcriptional activator is: $$ f(X) = \frac{\beta X^n}{K^n + X^n} $$ $X$ represents the concentration of a transcriptional activator and $f(X)$ represents the the combined transcription and translation of the gene $Y$ that is regulated by $X$. Modeling transcriptional activation Here a Python function to represent transcriptional activation based on the Hill function given above: End of explanation """ ## generate a plot using the hill_activating function defined above # setup paramters for our simulation # CHANGE THESE VALUES TO EXPLORE THE EFFECT OF THESE PARAMTERS # (see questions below) n = 1 B = 5 K = 10 # generate a range of x values, representing a range of concentrations of our # transcription factor X x = linspace(0,30,200) # generate 200 evenly spaced points between 0 and 30 # calculating corresponding rates of production of Y y = hill_activating(x, B, K, n) # plot the hill fxn with the user set parameters plot(x, y, label='B = {}, K = {}, n={}\n(user specified)'.format(B, K, n)) # plot the hill fxn with a set of reference parameters to facilitate comparison plot(x, hill_activating(x, 5, 10, 1), label='B = 5, K = 10, n=1\n(reference)', alpha=0.75) xlabel('Concentration of X') ylabel('Rate of production of Y') legend(loc='best') pass # suppress further output """ Explanation: Visualizing the activating Hill function Now we'll setup a plot to visualize what this function looks like. End of explanation """ ## Python implementation of repressive Hill function def hill_repressing(X, B, K, n): return B/(1.0 + (X/K)**n) ## generate a plot using the hill_repressing function defined above # CHANGE THESE VALUES TO EXPLORE THE EFFECT OF THESE PARAMTERS # (see questions below) n = 1 B = 5 K = 10 # generate a range of x values, representing a range of concentrations of our # transcription factor X x = linspace(0,30,200) # generate 200 evenly spaced points between 0 and 30 # calculating corresponding rates of production of Y y = hill_repressing(x, B, K, n) # plot the hill fxn with the user set parameters plot(x, y, label='B = {}, K = {}, n={}\n(user specified)'.format(B, K, n)) # plot the hill fxn with a set of reference parameters to facilitate comparison plot(x, hill_repressing(x, 5, 10, 1), label='B = 5, K = 10, n=1\n(reference)') xlabel('Concentration of X') ylabel('Rate of production of Y') legend(loc='best') pass # suppress further output """ Explanation: <h2> <font color='firebrick'> Question Set 1 (3 pts) </font> </h2> What happens to the shape of the Hill function curve when you vary $n$ between 1 and 8? What happens to the curve when you vary $\beta$ between 2 and 20? What happens to the curve when you vary $K$ between 2 and 20? Modeling transcriptional repression If rather than stimulating the production of $Y$, $X$ "represses" $Y$, we can write the corresponding Hill function as: $$ f(X) = \frac{\beta}{1 + (X/K)^n} $$ Remember that both of these Hill functions (activating and repressing) describe the production of $Y$ as a function of the levels of $X$, not the temporal dynamics of $Y$ which we'll look at after developing a few more ideas. End of explanation """ def logic_activating(X, B, K): if X > K: theta = 1 else: theta = 0 return B*theta def logic_repressing(X, B, K): if X < K: theta = 1 else: theta = 0 return B*theta """ Explanation: <h2> <font color='firebrick'> Question Set 2 (3 pts) </font> </h2> As before change the values of the simulation to answer these questions What happens to the shape of the Hill repressive function curve when you vary $n$ between 1 and 8? What happens to the curve when you vary $B$ between 2 and 20? What happens to the curve when you vary $K$ between 2 and 20? Simplifying Models using Logic Approximations To simplify analysis it's often convenient to approximate step-like sigmoidal functions like those produced by the Hill equation with functions using logic approximations. We'll assume that when the transcription factor, $X$, is above a threshold, $K$, then gene $Y$ is transcribed at a rate, $\beta$. When $X$ is below the threshold, $K$, gene $Y$ is not be transcribed. To represent this situation, we can rewrite the formula for $Y$ as: $$ f(X) = \beta\ \Theta(X > K) $$ where the function $\Theta = 0$ if the statement inside the parentheses is false and $\Theta = 1$ if the statement is true. An alternate way to write this is: $$ f(X) = \begin{cases} 0, &\text{if $X > K$;} \ \beta, &\text{otherwise.} \end{cases} $$ When $X$ is a repressor we can write: $$ f(X) = \beta\ \Theta(X < K) $$ Python functions for the logic approximation We can write Python functions to represent the logic approximations for activation and repression as follows: End of explanation """ ## generate plots using your hill_activating and logic_activating functions defined above ## For X values range from 0 to 30 # CHANGE THESE VALUES TO EXPLORE THE EFFECT OF THESE PARAMTERS n = 4 B = 5 K = 10 x = linspace(0, 30, 200) plot(x, hill_activating(x, B, K, n), label='B = {}, K = {}, n={}'.format(B, K, n)) logicx = [logic_activating(i, B, K) for i in x] plot(x, logicx, label='logic approximation') xlabel('Concentration of X') ylabel('Rate of production of Y') legend(loc='best') ylim(-0.5, B*1.1) pass """ Explanation: And now we can generate some plot to compare the logic approximation to the Hill function, for the activating case: End of explanation """ ## write a function to represent the simple differential equation above def dYdt(B, K, a, X, Y): production = logic_activating(X, B, K) decay = a*Y return production - decay ## generate a plot of conc of Y over time using your dY function defined above ## Evaluated over 200 time units B = 5 K = 10 X = K + 1 Y = [0] # initial value of Y a = 0.05 nsteps = 200 for i in range(nsteps): yprevious = Y[-1] deltay = dYdt(B, K, a, X, yprevious) ynew = Y[-1] + deltay Y.append(ynew) plot(Y) xlabel('Time units') ylabel('Concentration of Y') ylim(0, (B/a)*1.1) pass """ Explanation: <h2> <font color='firebrick'> Question Set 3 (3 pts) </font> </h2> As before change the values of the simulation to answer these questions How does the parameter K in the Hill function relate to the location of the "step" of the logic approximation? How does the parameter $\beta$ relate to the rate of production of $Y$? For waht values of $n$ is the logic approximation most like the hill function? Logic approximations for multi-input functions What if a gene needs two or more activator proteins to be transcribed? We can describe the amount of $Z$ transcribed as a function of active forms of $X$ and $Y$ with a function like: $$ f(X,Y) = \beta\ \Theta(X > K_x \land Y > K_y) $$ The above equation describes "AND" logic (i.e. both X and Y have to be above their threshold levels, $K_x$ and $K_y$, for Z to be transcribed). In a similar manner we can define "OR" logic: $$ f(X,Y) = \beta\ \Theta(X > K_x \lor Y > K_y) $$ A SUM function would be defined like this: $$ f(X,Y) = \beta_x \Theta(X > K_x) + \beta_y \Theta (Y > K_y) $$ Modeling changes in network components over time Up until this point we've been considering how the rate of production of a protein $Y$ changes with the concentration of a transcriptional activator/repressor that regulates $Y$. Now we want to turn to the question of how the absolute amount of $Y$ changes over time. As we discussed at the beginning of this notebook, how the amount of $Y$ changes over time is a function of two things: 1) a growth term which represents the rate of production of $Y$; and 2) a decay term which gives the rate at which $Y$ is degraded. A differential equation describing this as follows: $$ \frac{dY}{dt} = f(X_1, X_2, \ldots) - \alpha Y $$ The $f(X_1, X_2, \ldots)$ term represents the growth term and is a function of the transcription factors that regulate $Y$. We've already seen a couple of ways to model the rate of producting -- using the Hill function or its logic approximation. For the sake of simplicity we'll use the logic approximation to model the growth term. For example, in the case $Y$ is regulated by a single input we might use $f(X) = \beta \theta(X > K_1)$. For the equivalent function where $Y$ was regulated by two transcription factor, $X_1$ and $X_2$, and both are required to be above the respective threshold, we could use the function $f(X_1, X_2) = \beta \theta (X_1 > K_1 \land X_2 > K_2)$. The second term, $\alpha Y$ represents the rate at which $Y$ is being broken down or diluted. Notice that the decay rate is a proportional to the amount of $Y$ that is present. Change in concentration under constant activation Now let's explore a simple model of regulation for the two gene network, $X \longrightarrow Y$. Here we assume that at time 0 the activator, $X$, rises above the threshold, $K$, necessary to induce transcription of $Y$ at the rate $\beta$. $X$ remains above this threshold for the entire simulation. Therefore, we can write $dY/dt$ as: $$ \frac{dY}{dt} = \beta - \alpha Y $$ Write a Python function to represent the change in $Y$ in a given time increment, under this assumption of constant activation: End of explanation """ B = 5 K = 10 a = 0.05 # setup pulse of X # off (0) for first 50 steps, on for next 100 steps, off again for last 100 steps X = [0]*50 + [3*K]*100 + [0]*100 Y = [0] # initial value of Y nsteps = 250 for i in range(1, nsteps): xnow = X[i] yprevious = Y[-1] deltay = dYdt(B, K, a, xnow, yprevious) ynew = yprevious + deltay Y.append(ynew) plot(X, color='red', linestyle='dashed', label="X") plot(Y, color='blue', label="Y") ylim(0, max(max(X)*1.1, (B/a)*1.1)) xlabel('Time units') ylabel('Concentration') legend(loc="best") pass """ Explanation: <h2> <font color='firebrick'> Question Set 4 (3 pts) </font> </h2> The concentration of $Y$ eventually reaches a steady state, $Y_{st}$. How does $Y_{st}$ relate to $\beta$ and $\alpha$? The response time of a dynamical system, $T_{1/2}$is defined as the time it takes for it to go half-way between it's initial and final value. a) How does the response time change as you vary $\beta$? b) How does the response time change as you vary $\alpha$? Estimate the response time as you vary the parameter $\alpha$ and see if you can create a plot (in Python, or R, or Excel) showing the relationship between $\alpha$ and response time, for $0.01 \leq \alpha \leq 0.1$. Toggling the activator X In the proceeding example the activator $X$ was on at the beginning of the simulation and just stayed on. Let's see what happens when $X$ has pulsatile dynamics. This would be akin to toggling $X$ on then off, and asking what happens to $Y$. End of explanation """ ## We'll specify the behavior of X as a series of pulse of different length ## so we'll define a function to generate pulses def pulse(ontime, offtime, ntimes, onval=1): if ontime >= offtime: raise Exception("Invalid on/off times.") signal = np.zeros(ntimes) signal[ontime:offtime] = onval return signal nsteps = 150 short_pulse = pulse(20, 23, nsteps) # 5 sec pulse long_pulse = pulse(50, 100, nsteps) # 50 sec pulse X = short_pulse + long_pulse # we can then add the pulses to create # a single time trace plot(X, color='black') xlabel('Time units') ylabel('Amount of Gene Product') ylim(0, 1.5) pass """ Explanation: Feed Forward Loops We're now going to use some of these tools to look at a class of network motifs (small network topologies), called Feed Forward Loops (FFLs), found in signaling and regulatory networks. FFLs involve interactions between three components, with the basic topology illustrated below. Depending on the signs of the edges (whether activating or repressing) we can classify FFLs as "coherent" or "incoherent." We'll take a look at an example of each class. A Coherent FFL The most common type of coherent FFL is illustrated in the figure below. In this system $X$ is an activator of $Y$ and both $X$ and $Y$ regulate the production of $Z$ with AND logic (i.e. both $X$ and $Y$ must be above particular thresholds in order to trigger the production of $Z$). Using our logic approximation framework we will model the coherent FFL network illustrated above as follows. Gene Y: \begin{eqnarray} Y = f(X) = \beta_y\ \Theta(X > K_{xy}) \ \ \frac{dY}{dt} = \beta_y\ \Theta(X > K_{xy}) - \alpha_{y}Y \end{eqnarray} Gene Z: \begin{eqnarray} Z = g(X,Y) = \beta_z\ \Theta(X > K_{xz})\Theta(Y > K_{yz}) \ \ \frac{dZ}{dt} = \beta_z\ \Theta(X > K_{xz})\Theta(Y > K_{yz}) - \alpha_{z}Z \end{eqnarray} End of explanation """ def dYdt(B, K, a, X, Y): if X > K: theta = 1 else: theta = 0 return B * theta - a * Y def dZdt(B, Kx, Ky, a, X, Y, Z): theta = 0 if (X > Kx) and (Y > Ky): theta = 1 return B * theta - a * Z ## Plot X, Y, and Z on the same time scale nsteps = 150 short_pulse = pulse(20, 23, nsteps) # 5 sec pulse long_pulse = pulse(50, 100, nsteps) # 50 sec pulse X = short_pulse + long_pulse # setup parameters for Y and Z Y = [0] betay, alphay = 0.2, 0.1 Kxy = 0.5 Z = [0] betaz, alphaz = 0.2, 0.1 Kxz = 0.5 Kyz = 1 for i in range(nsteps): xnow = X[i] ynow, znow = Y[-1], Z[-1] ynew = ynow + dYdt(betay, Kxy, alphay, xnow, ynow) znew = znow + dZdt(betaz, Kxz, Kyz, alphaz, xnow, ynow, znow) Y.append(ynew) Z.append(znew) plot(X, 'k--', label='X', linewidth=1.5) plot(Y, 'b', label='Y') plot(Z, 'r', label='Z') ylim(-0.1, 2.5) xlabel("Time") ylabel("Concentration") legend() pass """ Explanation: Define Python functions for dY/dt and dZ/dt Recall from above that \begin{eqnarray} \frac{dY}{dt} & = & \beta_y\ \Theta(X > K_{xy}) - \alpha_{y}Y \ \ \frac{dZ}{dt} & = & \beta_z\ \Theta(X > K_{xz})\Theta(Y > K_{yz}) - \alpha_{z}Z \end{eqnarray} End of explanation """ nsteps = 150 p1start = 10 p1duration = 5 p2start = 50 p2duration = 50 short_pulse = pulse(p1start, p1start + p1duration, nsteps) # short pulse long_pulse = pulse(p2start, p2start + p2duration, nsteps) # long pulse X = short_pulse + long_pulse # change this `scale` argument to increase/decrease noise noise = np.random.normal(loc=0, scale=0.2, size=nsteps) # mean=0, sd=0.2 X = X + noise # setup parameters for Y and Z Y = [0] betay, alphay = 0.2, 0.1 Kxy = 0.5 Z = [0] betaz, alphaz = 0.2, 0.1 Kxz = 0.5 Kyz = 1 for i in range(nsteps): xnow = X[i] ynow, znow = Y[-1], Z[-1] ynew = ynow + dYdt(betay, Kxy, alphay, xnow, ynow) znew = znow + dZdt(betaz, Kxz, Kyz, alphaz, xnow, ynow, znow) Y.append(ynew) Z.append(znew) # draw each trace as a subfigure # subfigures stacked in a vertical grid subplot2grid((3,1),(0,0)) plot(X, 'k', label='X', linewidth=1) legend() subplot2grid((3,1),(1,0)) plot(Y, 'b', label='Y', linewidth=2) legend() subplot2grid((3,1),(2,0)) plot(Z, 'r', label='Z', linewidth=2) vlines(p1start, min(Z),max(Z)*1.1,color='black',linestyle='dashed') annotate("pulse 1 on", xy=(p1start,1),xytext=(40,20), textcoords='offset points', horizontalalignment="center", verticalalignment="bottom", arrowprops=dict(arrowstyle="->",color='black', connectionstyle='arc3,rad=0.5', linewidth=1)) vlines(p2start, min(Z),max(Z)*1.1,color='black',linestyle='dashed') annotate("pulse 2 on", xy=(p2start,1),xytext=(-40,0), textcoords='offset points', horizontalalignment="center", verticalalignment="bottom", arrowprops=dict(arrowstyle="->",color='black', connectionstyle='arc3,rad=0.5', linewidth=1)) legend() pass """ Explanation: <h2> <font color='firebrick'>Question Set 5 (2 pts)</font> </h2> How do the dynamics of $Y$ and $Z$ differ in the simulation above? Try varying the length of the first (short) pulse. How does changing the length of the pulse affect the dynamics of $Y$ and $Z$? Performance of the Coherent FFL under noisy inputs Let's further explore the behavior of the coherent FFL defined given noisy inputs. As before we're going to define an input signal, $X$, that has a short and long pulse, but now we're going to pollute $X$ with random noise. End of explanation """ def Ton(alpha, KYratio): return (1.0/alpha) * log(1.0/(1.0-KYratio)) ## Create a contour plot for a range of alpha and Kyz/Yst x = alpha = linspace(0.01, 0.2, 100) y = KYratio = linspace(0.01, 0.99, 100) X,Y = meshgrid(x, y) Z = Ton(X,Y) levels = MaxNLocator(nbins=20).tick_values(Z.min(), Z.max()) im = contourf(X,Y,Z, cmap=cm.afmhot_r, levels=levels) contour(X, Y, Z, levels, colors=('k',), linewidths=(0.5,)) colorbar(im) xlabel('alpha') ylabel("Kyz/Yst") pass """ Explanation: <h2> <font color='firebrick'>Question Set 6 (2 pts) </font> </h2> In the code cell above, try changing the duration of the first pulse and the scale of the noise (see comments in code) to get a sense of how good a filter the FFL is. Is there a bias to the filtering with respect to Z turning on versus Z turning off? Dynamics of Y and Z in the Coherent FFL As before we can solve for Y as a function of time and calculate what its steady state value will be: $$ Y(t) = Y_{st}(1-e^{-\alpha_{y}t}) $$ and $$ Y_{st}=\frac{\beta_y}{\alpha_y} $$ How about $Z$? Since $Z$ is governed by an AND function it needs both $X$ and $Y$ to be above their respective thresholds, $K_{xz}$ and $K_{yz}$. For the sake of simplicity let's assume that both $Y$ and $Z$ have the same threshold with respect to $X$, i.e. $K_{xy} = K_{xz}$. This allows us just to consider how long it takes for $Y$ to reach the threshold value $K_{yz}$. Given this we can calculate the delay before $Z$ turns on, $T_{\mathrm{on}}$ as follows. $$ Y(T_{\mathrm{on}}) = Y_{st}(1-e^{-\alpha_y T_{\mathrm{on}}}) = K_{yz} $$ and solving for $T_{\mathrm{on}}$ we find: $$ T_{\mathrm{on}} = \frac{1}{\alpha_y} \log\left[\frac{1}{(1-K_{yz}/Y_{st})}\right] $$ Thus we see that the delay before $Z$ turns on is a function of the degradation rate of $Y$ and the ratio between $Y_{st}$ and $K_{yz}$. Exploring the Parameter space of $Z$'s turn-on time From the above formula, we see that there are two parameters that affect the turn-on time of $Z$ -- $\alpha_y$ (the scaling factor for the decay rate of $Y$) and the compound parameter $K_{yz}/Y_{st}$ (the threshold concentration where $Y$ activate $Z$ relative to the steady state of $Y$). To explore the two-dimensional parameter space of $Z's$ $T_on$ we can create a contour plot. End of explanation """ ## A Python function that represents dZ/dt for the Incoherent FFL ## our dY function previously defined stays the same def dZ_incoh(B1,B2,Kx,Ky,a,X,Y,Z): pass # define the function here def dZ_incoh(B1,B2,Kx,Ky,a,X,Y,Z): theta = 0 B = 0 if (X > Kx) and (Y < Ky): theta = 1 B = B1 elif (X > Kx) and (Y >= Ky): theta = 1 B = B2 return B * theta - a * Z nsteps = 150 short_pulse = pulse(20, 25, nsteps) # 5 sec pulse long_pulse = pulse(50, 100, nsteps) # 50 sec pulse X = short_pulse + long_pulse # setup parameters for Y and Z Y = [0] betay, alphay = 0.2, 0.1 Kxy = 0.5 Z = [0] betaz1, betaz2 = 0.2, 0.001 alphaz = 0.1 Kxz = 0.5 Kyz = 0.5 for i in range(nsteps): xnow = X[i] ynow, znow = Y[-1], Z[-1] ynew = ynow + dYdt(betay, Kxy, alphay, xnow, ynow) znew = znow + dZ_incoh(betaz1, betaz2, Kxz, Kyz, alphaz, xnow, ynow, znow) Y.append(ynew) Z.append(znew) # draw each trace as a subfigure # subfigures stacked in a vertical grid subplot2grid((3,1),(0,0)) plot(X, 'k', label='X', linewidth=1) legend() ylim(0,1.1) subplot2grid((3,1),(1,0)) plot(Y, 'b', label='Y', linewidth=2) legend() ylim(0,2.1) subplot2grid((3,1),(2,0)) plot(Z, 'r', label='Z', linewidth=2) legend() ylim(0,0.7) pass """ Explanation: Type 1 Coherent FFLs can act as a Sign-Sensitive Delays As discussed in the article by Shen-Orr et al. a feed forward loop of the type we've just discussed can act as a type of filter -- a sign-sensitive delay that keeps $Z$ from firing in response to transient noisy signals from $X$, but shuts down $Z$ immediately once the signal from $X$ is removed. An Incoherent FFL Consider the FFL illustrated in the figure below. In this incoherent FFL, the logic function that regulates $Z$ is "X AND NOT Y". That is $Z$ turns on once $X$ is above a given threshold, but only stays on fully as long as $Y$ is below another threshold. Again for simplicity we assume $K_{xy} = K_{yz}$. Dynamics of Y As before, the dynamics of $Y$ are described by: $$ \frac{dY}{dt} = \beta_y\ \Theta(X > K_{xy}) - \alpha_{y}Y $$ and $$ Y(t) = Y_{st}(1-e^{-\alpha_{y}t}) $$ Dynamics of Z To describe $Z$ we consider two phases - 1) while $Y < K_{yz}$ and 2) while $Y > K_{yz}$. Z, Phase 1 For the first phase: $$ \frac{dZ}{dt} = \beta_z\ \Theta(X > K_{xz}) - \alpha_{z}Z $$ and $$ Z(t) = Z_{m}(1-e^{-\alpha_{z}t}) $$ As we did in the case of the coherent FFL, we can calculate the time until $Y$ reaches the treshold $K_{yz}$. We'll call this $T_{\mathrm{rep}}$ and it is the same formula we found for $T_{\mathrm{on}}$ previously. $$ T_{\mathrm{rep}} = \frac{1}{\alpha_y \log[\frac{1}{1-K_{yz}/Y_{st}}]} $$ Z, Phase 2 After a delay, $T_{\mathrm{rep}}$, $Y$ starts to repress the transcription of $Z$ and $Z$ decays to a new lower steady state, $Z_{st} = \beta_{z}^{'}/\alpha$. The value of $\beta_{z}^{'}$ depends on how leaky the repression of $Z$ is by $Y$. The dynamics of $Z$ in Phase 2 is given by: $$ Z(t) = Z_{st} + (Z_0 - Z_{st})e^{-\alpha_{z}(t-T_{\mathrm{rep}})} $$ where $$ Z_0 = Z_{m}(1-e^{-\alpha_{z}T_{\mathrm{rep}}}) $$ Combining the two phases of Z We can combine the two phases of $Z$ into a single function: $$ f(X,Y) = \beta_z\Theta(X > K_{xz} \land Y < K_{yz}) + \beta_{z}^{'}\Theta(Y \geq K_{yz}) - \alpha_z Z $$ End of explanation """
keras-team/keras-io
guides/ipynb/sequential_model.ipynb
apache-2.0
import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers """ Explanation: The Sequential model Author: fchollet<br> Date created: 2020/04/12<br> Last modified: 2020/04/12<br> Description: Complete guide to the Sequential model. Setup End of explanation """ # Define Sequential model with 3 layers model = keras.Sequential( [ layers.Dense(2, activation="relu", name="layer1"), layers.Dense(3, activation="relu", name="layer2"), layers.Dense(4, name="layer3"), ] ) # Call model on a test input x = tf.ones((3, 3)) y = model(x) """ Explanation: When to use a Sequential model A Sequential model is appropriate for a plain stack of layers where each layer has exactly one input tensor and one output tensor. Schematically, the following Sequential model: End of explanation """ # Create 3 layers layer1 = layers.Dense(2, activation="relu", name="layer1") layer2 = layers.Dense(3, activation="relu", name="layer2") layer3 = layers.Dense(4, name="layer3") # Call layers on a test input x = tf.ones((3, 3)) y = layer3(layer2(layer1(x))) """ Explanation: is equivalent to this function: End of explanation """ model = keras.Sequential( [ layers.Dense(2, activation="relu"), layers.Dense(3, activation="relu"), layers.Dense(4), ] ) """ Explanation: A Sequential model is not appropriate when: Your model has multiple inputs or multiple outputs Any of your layers has multiple inputs or multiple outputs You need to do layer sharing You want non-linear topology (e.g. a residual connection, a multi-branch model) Creating a Sequential model You can create a Sequential model by passing a list of layers to the Sequential constructor: End of explanation """ model.layers """ Explanation: Its layers are accessible via the layers attribute: End of explanation """ model = keras.Sequential() model.add(layers.Dense(2, activation="relu")) model.add(layers.Dense(3, activation="relu")) model.add(layers.Dense(4)) """ Explanation: You can also create a Sequential model incrementally via the add() method: End of explanation """ model.pop() print(len(model.layers)) # 2 """ Explanation: Note that there's also a corresponding pop() method to remove layers: a Sequential model behaves very much like a list of layers. End of explanation """ model = keras.Sequential(name="my_sequential") model.add(layers.Dense(2, activation="relu", name="layer1")) model.add(layers.Dense(3, activation="relu", name="layer2")) model.add(layers.Dense(4, name="layer3")) """ Explanation: Also note that the Sequential constructor accepts a name argument, just like any layer or model in Keras. This is useful to annotate TensorBoard graphs with semantically meaningful names. End of explanation """ layer = layers.Dense(3) layer.weights # Empty """ Explanation: Specifying the input shape in advance Generally, all layers in Keras need to know the shape of their inputs in order to be able to create their weights. So when you create a layer like this, initially, it has no weights: End of explanation """ # Call layer on a test input x = tf.ones((1, 4)) y = layer(x) layer.weights # Now it has weights, of shape (4, 3) and (3,) """ Explanation: It creates its weights the first time it is called on an input, since the shape of the weights depends on the shape of the inputs: End of explanation """ model = keras.Sequential( [ layers.Dense(2, activation="relu"), layers.Dense(3, activation="relu"), layers.Dense(4), ] ) # No weights at this stage! # At this point, you can't do this: # model.weights # You also can't do this: # model.summary() # Call the model on a test input x = tf.ones((1, 4)) y = model(x) print("Number of weights after calling the model:", len(model.weights)) # 6 """ Explanation: Naturally, this also applies to Sequential models. When you instantiate a Sequential model without an input shape, it isn't "built": it has no weights (and calling model.weights results in an error stating just this). The weights are created when the model first sees some input data: End of explanation """ model.summary() """ Explanation: Once a model is "built", you can call its summary() method to display its contents: End of explanation """ model = keras.Sequential() model.add(keras.Input(shape=(4,))) model.add(layers.Dense(2, activation="relu")) model.summary() """ Explanation: However, it can be very useful when building a Sequential model incrementally to be able to display the summary of the model so far, including the current output shape. In this case, you should start your model by passing an Input object to your model, so that it knows its input shape from the start: End of explanation """ model.layers """ Explanation: Note that the Input object is not displayed as part of model.layers, since it isn't a layer: End of explanation """ model = keras.Sequential() model.add(layers.Dense(2, activation="relu", input_shape=(4,))) model.summary() """ Explanation: A simple alternative is to just pass an input_shape argument to your first layer: End of explanation """ model = keras.Sequential() model.add(keras.Input(shape=(250, 250, 3))) # 250x250 RGB images model.add(layers.Conv2D(32, 5, strides=2, activation="relu")) model.add(layers.Conv2D(32, 3, activation="relu")) model.add(layers.MaxPooling2D(3)) # Can you guess what the current output shape is at this point? Probably not. # Let's just print it: model.summary() # The answer was: (40, 40, 32), so we can keep downsampling... model.add(layers.Conv2D(32, 3, activation="relu")) model.add(layers.Conv2D(32, 3, activation="relu")) model.add(layers.MaxPooling2D(3)) model.add(layers.Conv2D(32, 3, activation="relu")) model.add(layers.Conv2D(32, 3, activation="relu")) model.add(layers.MaxPooling2D(2)) # And now? model.summary() # Now that we have 4x4 feature maps, time to apply global max pooling. model.add(layers.GlobalMaxPooling2D()) # Finally, we add a classification layer. model.add(layers.Dense(10)) """ Explanation: Models built with a predefined input shape like this always have weights (even before seeing any data) and always have a defined output shape. In general, it's a recommended best practice to always specify the input shape of a Sequential model in advance if you know what it is. A common debugging workflow: add() + summary() When building a new Sequential architecture, it's useful to incrementally stack layers with add() and frequently print model summaries. For instance, this enables you to monitor how a stack of Conv2D and MaxPooling2D layers is downsampling image feature maps: End of explanation """ initial_model = keras.Sequential( [ keras.Input(shape=(250, 250, 3)), layers.Conv2D(32, 5, strides=2, activation="relu"), layers.Conv2D(32, 3, activation="relu"), layers.Conv2D(32, 3, activation="relu"), ] ) feature_extractor = keras.Model( inputs=initial_model.inputs, outputs=[layer.output for layer in initial_model.layers], ) # Call feature extractor on test input. x = tf.ones((1, 250, 250, 3)) features = feature_extractor(x) """ Explanation: Very practical, right? What to do once you have a model Once your model architecture is ready, you will want to: Train your model, evaluate it, and run inference. See our guide to training & evaluation with the built-in loops Save your model to disk and restore it. See our guide to serialization & saving. Speed up model training by leveraging multiple GPUs. See our guide to multi-GPU and distributed training. Feature extraction with a Sequential model Once a Sequential model has been built, it behaves like a Functional API model. This means that every layer has an input and output attribute. These attributes can be used to do neat things, like quickly creating a model that extracts the outputs of all intermediate layers in a Sequential model: End of explanation """ initial_model = keras.Sequential( [ keras.Input(shape=(250, 250, 3)), layers.Conv2D(32, 5, strides=2, activation="relu"), layers.Conv2D(32, 3, activation="relu", name="my_intermediate_layer"), layers.Conv2D(32, 3, activation="relu"), ] ) feature_extractor = keras.Model( inputs=initial_model.inputs, outputs=initial_model.get_layer(name="my_intermediate_layer").output, ) # Call feature extractor on test input. x = tf.ones((1, 250, 250, 3)) features = feature_extractor(x) """ Explanation: Here's a similar example that only extract features from one layer: End of explanation """
mne-tools/mne-tools.github.io
stable/_downloads/27d6cff3f645408158cdf4f3f05a21b6/30_eeg_erp.ipynb
bsd-3-clause
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import mne sample_data_folder = mne.datasets.sample.data_path() sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample', 'sample_audvis_filt-0-40_raw.fif') raw = mne.io.read_raw_fif(sample_data_raw_file, preload=False) sample_data_events_file = os.path.join(sample_data_folder, 'MEG', 'sample', 'sample_audvis_filt-0-40_raw-eve.fif') events = mne.read_events(sample_data_events_file) raw.crop(tmax=90) # in seconds; happens in-place # discard events >90 seconds (not strictly necessary: avoids some warnings) events = events[events[:, 0] <= raw.last_samp] """ Explanation: EEG processing and Event Related Potentials (ERPs) This tutorial shows how to perform standard ERP analyses in MNE-Python. Most of the material here is covered in other tutorials too, but for convenience the functions and methods most useful for ERP analyses are collected here, with links to other tutorials where more detailed information is given. As usual we'll start by importing the modules we need and loading some example data. Instead of parsing the events from the raw data's :term:stim channel (like we do in this tutorial &lt;tut-events-vs-annotations&gt;), we'll load the events from an external events file. Finally, to speed up computations so our documentation server can handle them, we'll crop the raw data from ~4.5 minutes down to 90 seconds. End of explanation """ raw.pick(['eeg', 'eog']).load_data() raw.info """ Explanation: The file that we loaded has already been partially processed: 3D sensor locations have been saved as part of the .fif file, the data have been low-pass filtered at 40 Hz, and a common average reference is set for the EEG channels, stored as a projector (see section-avg-ref-proj in the tut-set-eeg-ref tutorial for more info about when you may want to do this). We'll discuss how to do each of these below. Since this is a combined EEG+MEG dataset, let's start by restricting the data to just the EEG and EOG channels. This will cause the other projectors saved in the file (which apply only to magnetometer channels) to be removed. By looking at the measurement info we can see that we now have 59 EEG channels and 1 EOG channel. End of explanation """ channel_renaming_dict = {name: name.replace(' 0', '').lower() for name in raw.ch_names} _ = raw.rename_channels(channel_renaming_dict) # happens in-place """ Explanation: Channel names and types In practice it's quite common to have some channels labelled as EEG that are actually EOG channels. ~mne.io.Raw objects have a ~mne.io.Raw.set_channel_types method that you can use to change a channel that is labeled as eeg into an eog type. You can also rename channels using the ~mne.io.Raw.rename_channels method. Detailed examples of both of these methods can be found in the tutorial tut-raw-class. In this data the channel types are all correct already, so for now we'll just rename the channels to remove a space and a leading zero in the channel names, and convert to lowercase: End of explanation """ raw.plot_sensors(show_names=True) fig = raw.plot_sensors('3d') """ Explanation: Channel locations The tutorial tut-sensor-locations describes MNE-Python's handling of sensor positions in great detail. To briefly summarize: MNE-Python distinguishes :term:montages &lt;montage&gt; (which contain sensor positions in 3D: x, y, z, in meters) from :term:layouts &lt;layout&gt; (which define 2D arrangements of sensors for plotting approximate overhead diagrams of sensor positions). Additionally, montages may specify idealized sensor positions (based on, e.g., an idealized spherical headshape model) or they may contain realistic sensor positions obtained by digitizing the 3D locations of the sensors when placed on the actual subject's head. This dataset has realistic digitized 3D sensor locations saved as part of the .fif file, so we can view the sensor locations in 2D or 3D using the ~mne.io.Raw.plot_sensors method: End of explanation """ for proj in (False, True): with mne.viz.use_browser_backend('matplotlib'): fig = raw.plot(n_channels=5, proj=proj, scalings=dict(eeg=50e-6)) fig.subplots_adjust(top=0.9) # make room for title ref = 'Average' if proj else 'No' fig.suptitle(f'{ref} reference', size='xx-large', weight='bold') """ Explanation: If you're working with a standard montage like the 10-20 system, you can add sensor locations to the data like this: raw.set_montage('standard_1020'). See tut-sensor-locations for info on what other standard montages are built-in to MNE-Python. If you have digitized realistic sensor locations, there are dedicated functions for loading those digitization files into MNE-Python; see reading-dig-montages for discussion and dig-formats for a list of supported formats. Once loaded, the digitized sensor locations can be added to the data by passing the loaded montage object to raw.set_montage(). Setting the EEG reference As mentioned above, this data already has an EEG common average reference added as a :term:projector. We can view the effect of this on the raw data by plotting with and without the projector applied: End of explanation """ raw.filter(l_freq=0.1, h_freq=None) """ Explanation: The referencing scheme can be changed with the function mne.set_eeg_reference (which by default operates on a copy of the data) or the raw.set_eeg_reference() &lt;mne.io.Raw.set_eeg_reference&gt; method (which always modifies the data in-place). The tutorial tut-set-eeg-ref shows several examples of this. Filtering MNE-Python has extensive support for different ways of filtering data. For a general discussion of filter characteristics and MNE-Python defaults, see disc-filtering. For practical examples of how to apply filters to your data, see tut-filter-resample. Here, we'll apply a simple high-pass filter for illustration: End of explanation """ np.unique(events[:, -1]) """ Explanation: Evoked responses: epoching and averaging The general process for extracting evoked responses from continuous data is to use the ~mne.Epochs constructor, and then average the resulting epochs to create an ~mne.Evoked object. In MNE-Python, events are represented as a :class:NumPy array &lt;numpy.ndarray&gt; of sample numbers and integer event codes. The event codes are stored in the last column of the events array: End of explanation """ event_dict = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3, 'visual/right': 4, 'face': 5, 'buttonpress': 32} """ Explanation: The tut-event-arrays tutorial discusses event arrays in more detail. Integer event codes are mapped to more descriptive text using a Python :class:dictionary &lt;dict&gt; usually called event_id. This mapping is determined by your experiment code (i.e., it reflects which event codes you chose to use to represent different experimental events or conditions). For the sample-dataset data has the following mapping: End of explanation """ epochs = mne.Epochs(raw, events, event_id=event_dict, tmin=-0.3, tmax=0.7, preload=True) fig = epochs.plot(events=events) """ Explanation: Now we can extract epochs from the continuous data. An interactive plot allows you to click on epochs to mark them as "bad" and drop them from the analysis (it is not interactive on the documentation website, but will be when you run epochs.plot() &lt;mne.Epochs.plot&gt; in a Python console). End of explanation """ reject_criteria = dict(eeg=100e-6, # 100 µV eog=200e-6) # 200 µV _ = epochs.drop_bad(reject=reject_criteria) """ Explanation: It is also possible to automatically drop epochs, when first creating them or later on, by providing maximum peak-to-peak signal value thresholds (pass to the ~mne.Epochs constructor as the reject parameter; see tut-reject-epochs-section for details). You can also do this after the epochs are already created, using the ~mne.Epochs.drop_bad method: End of explanation """ epochs.plot_drop_log() """ Explanation: Next we generate a barplot of which channels contributed most to epochs getting rejected. If one channel is responsible for lots of epoch rejections, it may be worthwhile to mark that channel as "bad" in the ~mne.io.Raw object and then re-run epoching (fewer channels w/ more good epochs may be preferable to keeping all channels but losing many epochs). See tut-bad-channels for more info. End of explanation """ l_aud = epochs['auditory/left'].average() l_vis = epochs['visual/left'].average() """ Explanation: Another way in which epochs can be automatically dropped is if the event around which the epoch is formed is too close to the start or end of the ~mne.io.Raw object (e.g., if the epoch's tmax would be past the end of the file; this is the cause of the "TOO_SHORT" entry in the ~mne.Epochs.plot_drop_log plot above). Epochs may also be automatically dropped if the ~mne.io.Raw object contains :term:annotations that begin with either bad or edge ("edge" annotations are automatically inserted when concatenating two separate ~mne.io.Raw objects together). See tut-reject-data-spans for more information about annotation-based epoch rejection. Now that we've dropped the bad epochs, let's look at our evoked responses for some conditions we care about. Here the ~mne.Epochs.average method will create an ~mne.Evoked object, which we can then plot. Notice that we select which condition we want to average using the square-bracket indexing (like a :class:dictionary &lt;dict&gt;); that returns a smaller epochs object containing just the epochs from that condition, to which we then apply the ~mne.Epochs.average method: End of explanation """ fig1 = l_aud.plot() fig2 = l_vis.plot(spatial_colors=True) """ Explanation: These ~mne.Evoked objects have their own interactive plotting method (though again, it won't be interactive on the documentation website): click-dragging a span of time will generate a scalp field topography for that time span. Here we also demonstrate built-in color-coding the channel traces by location: End of explanation """ l_aud.plot_topomap(times=[-0.2, 0.1, 0.4], average=0.05) """ Explanation: Scalp topographies can also be obtained non-interactively with the ~mne.Evoked.plot_topomap method. Here we display topomaps of the average field in 50 ms time windows centered at -200 ms, 100 ms, and 400 ms. End of explanation """ l_aud.plot_joint() """ Explanation: Considerable customization of these plots is possible, see the docstring of ~mne.Evoked.plot_topomap for details. There is also a built-in method for combining "butterfly" plots of the signals with scalp topographies, called ~mne.Evoked.plot_joint. Like ~mne.Evoked.plot_topomap you can specify times for the scalp topographies or you can let the method choose times automatically, as is done here: End of explanation """ for evk in (l_aud, l_vis): evk.plot(gfp=True, spatial_colors=True, ylim=dict(eeg=[-12, 12])) """ Explanation: Global field power (GFP) Global field power :footcite:Lehmann1980,Lehmann1984,Murray2008 is, generally speaking, a measure of agreement of the signals picked up by all sensors across the entire scalp: if all sensors have the same value at a given time point, the GFP will be zero at that time point; if the signals differ, the GFP will be non-zero at that time point. GFP peaks may reflect "interesting" brain activity, warranting further investigation. Mathematically, the GFP is the population standard deviation across all sensors, calculated separately for every time point. You can plot the GFP using evoked.plot(gfp=True) &lt;mne.Evoked.plot&gt;. The GFP trace will be black if spatial_colors=True and green otherwise. The EEG reference does not affect the GFP: End of explanation """ l_aud.plot(gfp='only') """ Explanation: To plot the GFP by itself you can pass gfp='only' (this makes it easier to read off the GFP data values, because the scale is aligned): End of explanation """ gfp = l_aud.data.std(axis=0, ddof=0) # Reproducing the MNE-Python plot style seen above fig, ax = plt.subplots() ax.plot(l_aud.times, gfp * 1e6, color='lime') ax.fill_between(l_aud.times, gfp * 1e6, color='lime', alpha=0.2) ax.set(xlabel='Time (s)', ylabel='GFP (µV)', title='EEG') """ Explanation: As stated above, the GFP is the population standard deviation of the signal across channels. To compute it manually, we can leverage the fact that evoked.data &lt;mne.Evoked.data&gt; is a :class:NumPy array &lt;numpy.ndarray&gt;, and verify by plotting it using matplotlib commands: End of explanation """ left = ['eeg17', 'eeg18', 'eeg25', 'eeg26'] right = ['eeg23', 'eeg24', 'eeg34', 'eeg35'] left_ix = mne.pick_channels(l_aud.info['ch_names'], include=left) right_ix = mne.pick_channels(l_aud.info['ch_names'], include=right) """ Explanation: Analyzing regions of interest (ROIs): averaging across channels Since our sample data is responses to left and right auditory and visual stimuli, we may want to compare left versus right ROIs. To average across channels in a region of interest, we first find the channel indices we want. Looking back at the 2D sensor plot above, we might choose the following for left and right ROIs: End of explanation """ roi_dict = dict(left_ROI=left_ix, right_ROI=right_ix) roi_evoked = mne.channels.combine_channels(l_aud, roi_dict, method='mean') print(roi_evoked.info['ch_names']) roi_evoked.plot() """ Explanation: Now we can create a new Evoked with 2 virtual channels (one for each ROI): End of explanation """ evokeds = dict(auditory=l_aud, visual=l_vis) picks = [f'eeg{n}' for n in range(10, 15)] mne.viz.plot_compare_evokeds(evokeds, picks=picks, combine='mean') """ Explanation: Comparing conditions If we wanted to compare our auditory and visual stimuli, a useful function is mne.viz.plot_compare_evokeds. By default this will combine all channels in each evoked object using global field power (or RMS for MEG channels); here instead we specify to combine by averaging, and restrict it to a subset of channels by passing picks: End of explanation """ evokeds = dict(auditory=list(epochs['auditory/left'].iter_evoked()), visual=list(epochs['visual/left'].iter_evoked())) mne.viz.plot_compare_evokeds(evokeds, combine='mean', picks=picks) """ Explanation: We can also easily get confidence intervals by treating each epoch as a separate observation using the ~mne.Epochs.iter_evoked method. A confidence interval across subjects could also be obtained, by passing a list of ~mne.Evoked objects (one per subject) to the ~mne.viz.plot_compare_evokeds function. End of explanation """ aud_minus_vis = mne.combine_evoked([l_aud, l_vis], weights=[1, -1]) aud_minus_vis.plot_joint() """ Explanation: We can also compare conditions by subtracting one ~mne.Evoked object from another using the mne.combine_evoked function (this function also allows pooling of epochs without subtraction). End of explanation """ grand_average = mne.grand_average([l_aud, l_vis]) print(grand_average) """ Explanation: <div class="alert alert-danger"><h4>Warning</h4><p>The code above yields an **equal-weighted difference**. If you have imbalanced trial numbers, you might want to equalize the number of events per condition first by using `epochs.equalize_event_counts() <mne.Epochs.equalize_event_counts>` before averaging.</p></div> Grand averages To compute grand averages across conditions (or subjects), you can pass a list of ~mne.Evoked objects to mne.grand_average. The result is another ~mne.Evoked object. End of explanation """ list(event_dict) """ Explanation: For combining conditions it is also possible to make use of :term:HED tags in the condition names when selecting which epochs to average. For example, we have the condition names: End of explanation """ epochs['auditory'].average() """ Explanation: We can select the auditory conditions (left and right together) by passing: End of explanation """ # Define a function to print out the channel (ch) containing the # peak latency (lat; in msec) and amplitude (amp, in µV), with the # time range (tmin and tmax) that were searched. # This function will be used throughout the remainder of the tutorial def print_peak_measures(ch, tmin, tmax, lat, amp): print(f'Channel: {ch}') print(f'Time Window: {tmin * 1e3:.3f} - {tmax * 1e3:.3f} ms') print(f'Peak Latency: {lat * 1e3:.3f} ms') print(f'Peak Amplitude: {amp * 1e6:.3f} µV') # Get peak amplitude and latency from a good time window that contains the peak good_tmin, good_tmax = .08, .12 ch, lat, amp = l_vis.get_peak(ch_type='eeg', tmin=good_tmin, tmax=good_tmax, mode='pos', return_amplitude=True) # Print output from the good time window that contains the peak print('** PEAK MEASURES FROM A GOOD TIME WINDOW **') print_peak_measures(ch, good_tmin, good_tmax, lat, amp) """ Explanation: see tut-section-subselect-epochs for details. The tutorials tut-epochs-class and tut-evoked-class have many more details about working with the ~mne.Epochs and ~mne.Evoked classes. Amplitude and latency measures It is common in ERP research to extract measures of amplitude or latency to compare across different conditions. There are many measures that can be extracted from ERPs, and many of these are detailed (including the respective strengths and weaknesses) in chapter 9 of Luck :footcite:Luck2014 (also see the Measurement Tool in the ERPLAB Toolbox :footcite:Lopez-CalderonLuck2014). This part of the tutorial will demonstrate how to extract three common measures: Peak latency Peak amplitude Mean amplitude Peak latency and amplitude The most common measures of amplitude and latency are peak measures. Peak measures are basically the maximum amplitude of the signal in a specified time window and the time point (or latency) at which the peak amplitude occurred. Peak measures can be obtained using the :meth:~mne.Evoked.get_peak method. There are two important things to point out about :meth:~mne.Evoked.get_peak method. First, it finds the strongest peak looking across all channels of the selected type that are available in the :class:~mne.Evoked object. As a consequence, if you want to restrict the search for the peak to a group of channels or a single channel, you should first use the :meth:~mne.Evoked.pick or :meth:~mne.Evoked.pick_channels methods. Second, the :meth:~mne.Evoked.get_peak method can find different types of peaks using the mode argument. There are three options: mode='pos': finds the peak with a positive voltage (ignores negative voltages) mode='neg': finds the peak with a negative voltage (ignores positive voltages) mode='abs': finds the peak with the largest absolute voltage regardless of sign (positive or negative) The following example demonstrates how to find the first positive peak in the ERP (i.e., the P100) for the left visual condition (i.e., the l_vis :class:~mne.Evoked object). The time window used to search for the peak ranges from .08 to .12 s. This time window was selected because it is when P100 typically occurs. Note that all 'eeg' channels are submitted to the :meth:~mne.Evoked.get_peak method. End of explanation """ # Fist, return a copy of l_vis to select the channel from l_vis_roi = l_vis.copy().pick('eeg59') # Get the peak and latency measure from the selected channel ch_roi, lat_roi, amp_roi = l_vis_roi.get_peak( tmin=good_tmin, tmax=good_tmax, mode='pos', return_amplitude=True) # Print output print('** PEAK MEASURES FOR ONE CHANNEL FROM A GOOD TIME WINDOW **') print_peak_measures(ch_roi, good_tmin, good_tmax, lat_roi, amp_roi) """ Explanation: The output shows that channel eeg55 had the maximum positive peak in the chosen time window from all of the 'eeg' channels searched. In practice, one might want to pull out the peak for an a priori region of interest or a single channel depending on the study. This can be done by combining the :meth:~mne.Evoked.pick or :meth:~mne.Evoked.pick_channels methods with the :meth:~mne.Evoked.get_peak method. Here, let's assume we believe the effects of interest will occur at eeg59. End of explanation """ # Get BAD peak measures bad_tmin, bad_tmax = .095, .135 ch_roi, bad_lat_roi, bad_amp_roi = l_vis_roi.get_peak( mode='pos', tmin=bad_tmin, tmax=bad_tmax, return_amplitude=True) # Print output print('** PEAK MEASURES FOR ONE CHANNEL FROM A BAD TIME WINDOW **') print_peak_measures(ch_roi, bad_tmin, bad_tmax, bad_lat_roi, bad_amp_roi) """ Explanation: While the peak latencies are the same in channels eeg55 and eeg59, the peak amplitudes differ. This approach can also be applied to virtual channels created with the :func:~mne.channels.combine_channels function and difference waves created with the :func:mne.combine_evoked function (see aud_minus_vis in section Comparing conditions_ above). Peak measures are very susceptible to high frequency noise in the signal (for discussion, see :footcite:Luck2014). Specifically, high frequency noise positively bias peak amplitude measures. This bias can confound comparisons across conditions where ERPs differ in the level of high frequency noise, such as when the conditions differ in the number of trials contributing to the ERP. One way to avoid this is to apply a non-causal low-pass filter to the ERP. Low-pass filters reduce the contribution of high frequency noise by smoothing out fast (i.e., high frequency) fluctuations in the signal (see disc-filtering). While this can reduce the positive bias in peak amplitude measures caused by high frequency noise, low-pass filtering the ERP can introduce challenges in interpreting peak latency measures for effects of interest :footcite:Rousselet2012,VanRullen2011. If using peak measures, it is critical to visually inspect the data to make sure the selected time window actually contains a peak (:meth:~mne.Evoked.get_peak will always identify a peak). Visual inspection allows to easily verify whether the automatically found peak is correct. The :meth:~mne.Evoked.get_peak detects the maximum or minimum voltage in the specified time range and returns the latency and amplitude of this peak. There is no guarantee that this method will return an actual peak. Instead, it may return a value on the rising or falling edge of the peak we are trying to find. The following example demonstrates why visual inspection is crucial. Below, we use a known bad time window (.095 to .135 s) to search for a peak in channel eeg59. End of explanation """ fig, axs = plt.subplots(nrows=2, ncols=1) words = (('Bad', 'missing'), ('Good', 'finding')) times = (np.array([bad_tmin, bad_tmax]), np.array([good_tmin, good_tmax])) colors = ('C1', 'C0') for ix, ax in enumerate(axs): title = '{} time window {} peak'.format(*words[ix]) l_vis_roi.plot(axes=ax, time_unit='ms', show=False, titles=title) ax.plot(lat_roi * 1e3, amp_roi * 1e6, marker='*', color='C6') ax.axvspan(*(times[ix] * 1e3), facecolor=colors[ix], alpha=0.3) ax.set_xlim(-50, 150) # Show zoomed in around peak """ Explanation: If all we had were the above values, it would be unclear if they are truly identifying a peak or just a the falling or rising edge of one. However, it becomes clear that the .095 to .135 s time window is misses the peak on eeg59. This is shown in the bottom panel where we see the bad time window (highlighted in orange) misses the peak (the pink star). In contrast, the time window defined initially (.08 to .12 s; highlighted in blue) returns an actual peak instead of a just a maximal or minimal value in the searched time window. Visual inspection will always help you to convince yourself the data returned are actual peaks. End of explanation """ # Select all of the channels and crop to the time window channels = ['eeg54', 'eeg57', 'eeg55', 'eeg59'] hemisphere = ['left', 'left', 'right', 'right'] l_vis_mean_roi = l_vis.copy().pick(channels).crop( tmin=good_tmin, tmax=good_tmax) # Extract mean amplitude in µV over time mean_amp_roi = l_vis_mean_roi.data.mean(axis=1) * 1e6 # Store the data in a data frame mean_amp_roi_df = pd.DataFrame({ 'ch_name': l_vis_mean_roi.ch_names, 'hemisphere': ['left', 'left', 'right', 'right'], 'mean_amp': mean_amp_roi }) # Print the data frame print(mean_amp_roi_df.groupby('hemisphere').mean()) """ Explanation: Mean Amplitude Another common practice in ERP studies is to define a component (or effect) as the mean amplitude within a specified time window. One advantage of this approach is that it is less sensitive to high frequency noise (compared to peak amplitude measures) because averaging over a time window acts like a low-pass filter (see discussion in the above section Peak latency and amplitude_). When using mean amplitude measures, selecting the time window based on the effect of interest (e.g., the difference between two conditions) can inflate the likelihood of finding false positives in your results because this approach is circular :footcite:LuckGaspelin2017. There are other, and better, ways to identify a time window to use for extracting mean amplitude measures. First, you can use a priori time window based on prior research. A second way is to define a time window from an independent condition or set of trials not used in the analysis (e.g., a "localizer"). A third approach is to define a time window using the across-condition grand average. This latter approach is not circular because the across-condition mean and condition difference are independent of one another. The issues discussed above also apply to selecting channels used for analysis. The following example demonstrates how to pull out the mean amplitude from the left visual condition (i.e., the l_vis :class:~mne.Evoked object) using from selected channels and time windows. Stimulating the left visual field is increases neural activity visual cortex of the contralateral (i.e., right) hemisphere. We can test this by examining the amplitude of the ERP for left visual field stimulation over right (contralateral) and left (ipsilateral) channels. The channels used for this analysis are eeg54 and eeg57 (left hemisphere), and eeg59 and eeg55 (right hemisphere). The time window used is .08 (good_tmin) to .12 s (good_tmax) as it corresponds to when P100 typically occurs. The P100 is sensitive to left and right visual field stimulation. The mean amplitude is extracted from the above four channels and stored in a :class:pandas.DataFrame. End of explanation """ # Extract mean amplitude for all channels in l_vis (including `eog`) l_vis_cropped = l_vis.copy().crop(tmin=good_tmin, tmax=good_tmax) mean_amp_all = l_vis_cropped.data.mean(axis=1) * 1e6 mean_amp_all_df = pd.DataFrame({ 'ch_name': l_vis_cropped.info['ch_names'], 'mean_amp': mean_amp_all }) mean_amp_all_df['tmin'] = good_tmin mean_amp_all_df['tmax'] = good_tmax mean_amp_all_df['condition'] = 'Left/Visual' print(mean_amp_all_df.head()) print(mean_amp_all_df.tail()) """ Explanation: As demonstrated in the above example, the mean amplitude was higher and positive in right compared to left hemisphere channels. It should be reiterated that both that spatial and temporal window you use should be determined in an independent manner (e.g., defined a priori from prior research, a "localizer" or another independent condition) and not based on the data you will use to test your hypotheses. The above example can be modified to extract the the mean amplitude from all channels and store the resulting output in :class:pandas.DataFrame. This can be useful for statistical analyses conducted in other programming languages. End of explanation """
gammapy/PyGamma15
tutorials/analysis-stats/TutorialSolutions.ipynb
bsd-3-clause
%matplotlib inline import numpy as np import matplotlib.pyplot as plt """ Explanation: Tutorial about statistical methods The following contains a sequence of simple exercises, designed to get familiar with using Minuit for maximum likelihood fits and emcee to determine parameters by MCMC. Commands are generally commented, i.e. in order to activate them, simply uncomment them. A few functions are still to be defined... which is part of the exercise. Have fun! End of explanation """ np.random.seed(12345) y = np.random.random(10000) x = 1./np.sqrt(y) plt.hist(x, bins=100, range=(1,10), histtype='stepfilled',color='blue') plt.yscale('log') """ Explanation: Generate a dataset to be fitted End of explanation """ def nllp(a): return np.sum(a*np.log(x) - np.log(a-1)) """ Explanation: Maximum likelihood fit of a simple power law First define the negative-log likelihood function for a density proportional to x**(-a) the range 1 < x < infinity End of explanation """ import iminuit minp = iminuit.Minuit(nllp, a=3, error_a=0.1, errordef=0.5) minp.migrad(); """ Explanation: Then minimize it using iminuit End of explanation """ minp.hesse(); minp.minos(); minp.draw_profile('a'); """ Explanation: Error analysis First determine the parabolic errors using hesse() and then do a parameter scan using minos() to determine the 68% confidence level errors. End of explanation """ from scipy.integrate import quad def pdfpn(x, a): return x**(-a) def pdfpn_norm(a): return quad(pdfpn, 1, np.inf, args=(a))[0] def nllpn(a): return -np.sum(np.log(pdfpn(x,a))) + np.log(pdfpn_norm(a))*len(x) """ Explanation: Use of an un-normalised PDF The above example shall be modified such that the normalisation of the likelihood function, which so far was determined analytically, now is determined numerically in the fit. This is the more realistic case, since in many case no (simple) analytical normalisation exists. As a first step, this requires to load the integration package. End of explanation """ minpn = iminuit.Minuit(nllpn, a=3, error_a=0.1, errordef=0.5) minpn.migrad(); """ Explanation: Then do the same minimization steps as before. End of explanation """ def pdfcn(x, a, b): return x**(-a)*np.exp(-b*b*x) def pdfcn_norm(a, b): return quad(pdfcn, 1, np.inf, args=(a, b))[0] def nllcn(a, b): return -np.sum(np.log(pdfcn(x,a,b))) + np.log(pdfcn_norm(a,b))*len(x) """ Explanation: Extend the fit model by an exponential cutoff The exponential cutoff is implemented by exp(-bbx), i.e. exponential growth is not allowed for real valued parameters b. The implications of this ansatz shall be discussed when looking at the solution. After that, the example can be modified to use exp(-b*x). Here the likelihood function has no (simple) analytical normalisation anymore, i.e. we directly do the numerical approach. End of explanation """ mincn = iminuit.Minuit(nllcn, a=3, b=1, error_a=0.1, error_b=0.1, errordef=0.5) mincn.migrad(); mincn.hesse(); mincn.minos(); mincn.draw_profile('a'); mincn.draw_profile('b'); mincn.draw_contour('a','b'); """ Explanation: As before, use Minuit for minimisation and error analysis, but now in two dimensions. Study parabolic errors and minos errors, the latter both for the single variables and for both together. End of explanation """ import emcee """ Explanation: Do the same analysis by MCMC End of explanation """ def log_prior(theta): a, b = theta if b < 0: return -np.inf # log(0) else: return 0. def log_likelihood(theta, x): a, b = theta return np.sum(-a*np.log(x) - b*b*x) def log_posterior(theta, x): a , b = theta return log_prior(theta) + log_likelihood(theta, x) - np.log(pdfcn_norm(a,b))*len(x) """ Explanation: emcee requires as input the log-likelihood of the posterior in the parameters a and b. In the following it is composed of the log-of the prior and the log-likelihood of the data. Initially use a simple uniform prior in a and b with the constraint b>0. Afterwards one can play with the prior to see how strongly it affects the result. End of explanation """ ndim = 2 # number of parameters in the model nwalkers = 50 # number of MCMC walkers nburn = 100 # "burn-in" period to let chains stabilize nsteps = 1000 # number of MCMC steps to take # random starting point np.random.seed(0) starting_guesses = np.random.random((nwalkers, ndim)) """ Explanation: Here we'll set up the computation. emcee combines multiple "walkers", each of which is its own MCMC chain. The number of trace results will be nwalkers * nsteps End of explanation """ sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posterior, args=[x]) %time sampler.run_mcmc(starting_guesses, nsteps) print("done") """ Explanation: run the MCMC (and time it using IPython's %time magic End of explanation """ emcee_trace = sampler.chain[:, nburn:, :].reshape(-1, ndim).T len(emcee_trace[0]) """ Explanation: sampler.chain is of shape (nwalkers, nsteps, ndim). Before analysis throw-out the burn-in points and reshape. End of explanation """ plt.hist(emcee_trace[0], 100, range=(2.5,3.5) , histtype='stepfilled', color='cyan'); plt.hist(emcee_trace[1], 100, range=(0.,0.5) , histtype='stepfilled', color='cyan'); plt.plot(emcee_trace[0],emcee_trace[1],',k'); """ Explanation: Analyse the results. Plot the projected (marginalized) posteriors for the parameters a and b and also the joinyt density as sampled by the MCMC. End of explanation """ def compute_sigma_level(trace1, trace2, nbins=20): """From a set of traces, bin by number of standard deviations""" L, xbins, ybins = np.histogram2d(trace1, trace2, nbins) L[L == 0] = 1E-16 logL = np.log(L) shape = L.shape L = L.ravel() # obtain the indices to sort and unsort the flattened array i_sort = np.argsort(L)[::-1] i_unsort = np.argsort(i_sort) L_cumsum = L[i_sort].cumsum() L_cumsum /= L_cumsum[-1] xbins = 0.5 * (xbins[1:] + xbins[:-1]) ybins = 0.5 * (ybins[1:] + ybins[:-1]) return xbins, ybins, L_cumsum[i_unsort].reshape(shape) xbins, ybins, sigma = compute_sigma_level(emcee_trace[0], emcee_trace[1]) plt.contour(xbins, ybins, sigma.T, levels=[0.683, 0.955]) plt.plot(emcee_trace[0], emcee_trace[1], ',k', alpha=0.1) """ Explanation: As a final step, generate 2-dim bayesian confidence level contours containing 68.3% and 95.5% probability content. For that define a convenient plot functions and use them. Overlay the contours with the scatter plot. End of explanation """
drericstrong/Blog
20161228_PointBuyVsRandomRolls.ipynb
agpl-3.0
from tabulate import tabulate # We will use the value mapping later as a lookup dictionary vmap = {3:-16, 4:-12, 5:-9, 6:-6, 7:-4, 8:-2, 9:-1, 10:0, 11:1, 12:2, 13:3, 14:5, 15:7, 16:10, 17:13, 18:17} # However, we want to actually display the mapping above, so let's # convert the dictionary to a list using a listcomp, for better display value_list = [[sum_, value] for sum_, value in vmap.items()] print('Sum Value\n',tabulate(value_list)) """ Explanation: Imagine that you have a choice between the following algorithms involving rolls of multiple six-sided dice, and you are interested in which algorithm most consistently rolls the best results: Roll 3 six-sided dice and take the sum. Repeat 6 times. Roll 3 six-sided dice and take the sum. Repeat 7 times, drop the lowest of the 7 times. Roll 3 six-sided dice and take the sum. Repeat 9 times, keep the best six rolls. Roll 4 six-sided dice, drop the lowest, and take the sum. Repeat 6 times. Roll 4 six-sided dice, drop the lowest, and take the sum. Repeat 7 times, drop the lowest of the 7 times. Roll 2 six-sided dice, add a third dice roll always equal to 6, and take the sum. Repeat 6 times. Roll 5 six-sided dice, drop the two lowest, and take the sum. Repeat 6 times. Notice that each algorithm has the following basic structure: [A.] Number of dice to roll (e.g. "roll 3 six-sided dice") [B.] (Optional) Method to modify the roll (e.g. "drop the lowest" or "add a third dice roll always equal to 6") [C.] Summary statistic (e.g. "take the sum") [D.] Repetition (e.g. "repeat 6 times") [E.] (Optional) Method to modify the repetition (e.g. "drop the lowest of the 7 times") Complications First and foremost, "best results" is extremely vague. Do we simply want the highest average roll (expected value of each algorithm? How should we interpret high vs. low values? Are high values much better than average, and low values much worse than average? Let's imagine that we were given the following "value mapping" that gives a rough idea of how we should "score" the results of parts A through C from the algorithm structure above: End of explanation """ import numpy as np import seaborn as sns from random import randint from scipy.stats import norm from heapq import nlargest import matplotlib.pyplot as plt % matplotlib inline def mc_solution(num_hist, num_dice, num_reps, add_six=False, drop_low_dice=False, drop_low_rep=False): # raw_bins will contain each raw result; val_bins is after #Point Buy remapping raw_res = [] val_res = [] # Perform Monte Carlo sampling for the requested # number of histories for _ in range(num_hist): roll_sums = [] # Repeat the dice rolling [num_reps] time (Part D) for _ in range(num_reps): # Part A- Roll [num_dice] dice rolls = [randint(1,6) for _ in range(num_dice)] # Part B- Modify the dice roll if add_six: rolls.append(6) if drop_low_dice: rolls = nlargest(3,rolls) # Part C- Sum the values roll_sums.append(sum(rolls)) # Part E- Modify the repetitions if drop_low_rep: roll_sums = nlargest(6,roll_sums) # Part Fa- Find the average raw value and save to the raw_res avg_roll_sums = np.median(roll_sums) raw_res.append(avg_roll_sums) # Part Fb- Find the Point Buy value and save to the val_res point_buy_sum = np.sum([vmap[value] for value in roll_sums]) val_res.append(point_buy_sum) return raw_res, val_res def plot_hist(raw_data, val_data, title, bins): f,(ax1,ax2) = plt.subplots(1, 2, figsize=(8,5)) props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) plt.suptitle(title, fontsize=14) # Plot the raw results sns.distplot(raw_data, kde=False, bins=bins, fit=norm, ax=ax1) ax1.text(0.05, 0.95, build_text(raw_data), transform=ax1.transAxes, fontsize=12, verticalalignment='top', bbox=props) ax1.set_xlabel('Raw Results') ax1.set_ylabel('Probability') ax1.set_xlim([3,18]) # Plot the values from the value mapping sns.distplot(val_data, kde=False, fit=norm, ax=ax2) ax2.text(0.05, 0.95, build_text(val_data), transform=ax2.transAxes, fontsize=12, verticalalignment='top', bbox=props) ax2.set_xlabel('Equivalent Point Buy') def build_text(data): ret_string = '$\mu$={:.1f}\n$\sigma$={:.2f}\n95%={:.0f}\n5%={:.0f}'.format( np.mean(data), np.std(data), np.percentile(data,95), np.percentile(data,5)) return ret_string """ Explanation: All of the algorithms have no less than a minimum of 3 and no greater than a maximum of 18, so this mapping covers all possible scenarios. Note, however, that one algorithm has a minimum possible value of 8 ("roll 2 six-sided dice, take the sum, and add six"). You can think of this "value mapping" in the following way. If instead of randomly rolling dice, we were able to choose exactly what value we wanted each dice to be, what "price" would we assign to each roll to ensure that every high roll is eventually balanced out by a low roll? We might call this value system "Point Buy", referring to the amount of "points" that each roll might "cost" if we were able to "buy" it. To ensure this mapping is fair, the average of the Point Buy system should be near the statistical mean of Algorithm 1 (i.e. each "bought" high roll must be roughly balanced out by a corresponding low roll), but not exactly. Notice that the distribution of the value mapping is not symmetrical- higher values cost more than lower values give, so there's a slight penalty to using the Point Buy system. The "consistency" requirement means that we also want to minimize the variance of the results. Finally, the "repeat X times" part of the algorithm will be a little tricky. The number of repetitions and methods to modify the repetitions will change the shape of the histogram as well. Let's add a part [F.a.] to each algorithm, which is the median value of all repetitions. We will also visualize the results as the sum of all the repetitions after the value re-mapping (the "Point Buy" system) [F.b.]. This seems complicated, but it will probably make more sense as we go through each algorithm below. General Solution There are several different ways to solve this problem, including pure statistics, simulation of all possible results (for instance, using list comprehensions), and Monte Carlo sampling. For this problem, I chose Monte Carlo sampling, because I wanted to visualize the histograms, and simulating all possible results was too computationally expensive (for example, Algorithm 1 has 216^6 possible combinations). The mc_solution function below has been divided into six parts, with comments indicating where the code belongs within the basic structure defined above. The accuracy of the Monte Carlo algorithm depends on the number of histories that are used during simulation; more histories are preferred, but the execution time of the algorithm increases. Refer to the previous post for more information about Monte Carlo simulation. End of explanation """ alg1_raw, alg1_val = mc_solution(10**6, 3, 6) plot_hist(alg1_raw, alg1_val, "Algorithm 1- Sum of 3 Six-Sided Dice, " + "Repeat 6 Times", 25) """ Explanation: Algorithm 1- Sum of 3 Six-Sided Dice, Repeat 6 Times The first algorithm is "Roll 3 six-sided dice and take the sum. Repeat 6 times." At first glance, it seems that this is the worst of all the algorithms, since there is no dropping of lowest values anywhere. Moving forward, this algorithm will be considered as the baseline case, and all other algorithms can be compared to these results. The code below will visualize both the raw results of the algorithm (what do we actually get when we roll the 3 dice, then sum them together?) and the "Equivalent Point Buy" results (what do we get when we map the "raw" results using the value mapping from earlier?). End of explanation """ alg2_raw, alg2_val = mc_solution(10**6, 3, 7, drop_low_rep=True) plot_hist(alg2_raw, alg2_val, "Algorithm 2- Sum of 3 Six-Sided Dice, " + "Repeat 7 Times, Drop Lowest", 25) """ Explanation: The mean of the raw results is approximately 10.5, which is exactly what is expected from a statistical solution, since each dice roll has an average of 3.5 (3.5 * 3 = 10.5). The 5% and 95% percentiles are also given, and can be interpreted as "90% of the values of the distribution fall between these two numbers". The Equivalent Point Buy is not exactly equal to 0, because the value mapping is not symmetric (high rolls "cost" more than low rolls "give"). The mean Point Buy (approximately 3) can be interpreted as the average "cost" to "buy" a distribution that is similar to Algorithm 1 (i.e. actually randomly rolling the dice). Notice, though, that the variance seems extremely high. 90% of the time, we expect the Equivalent Point Buy to fall between -14 and 21. Algorithm 2- Sum of 3 Six-Sided Dice, Repeat 7 Times, Drop Lowest The second algorithm is "Roll 3 six-sided dice and take the sum. Repeat 7 times, drop the lowest of the 7 times." Intuitively, this algorithm should have a higher expected value than algorithm 1, because there is an extra chance to roll 3 dice compared to Algorithm 1, and we are discarding the worst one. End of explanation """ alg3_raw, alg3_val = mc_solution(10**6, 3, 9, drop_low_rep=True) plot_hist(alg3_raw, alg3_val, "Algorithm 3- Sum of 3 Six-Sided Dice, " + "Repeat 9 Times, Keep Best 6", 24) """ Explanation: Notice that for the raw results, both the mean and 5% confidence interval increased. The Equivalent Point Buy distribution changed significantly, increasing to a mean of 9 with 90% of the distribution between -6 and 26. Note- all these algorithms will be compared in tabular form in the Summary section. Algorithm 3- Sum of 3 Six-Sided Dice, Repeat 9 Times, Keep Best 6 The third algorithm is "Roll 3 six-sided dice and take the sum. Repeat 9 times, keep the best six rolls." We should expect similar results as for Algorithm 2, but slightly improved. End of explanation """ alg4_raw, alg4_val = mc_solution(10**6, 4, 6, drop_low_dice=True) plot_hist(alg4_raw, alg4_val, "Algorithm 4- Sum of 4 Six-Sided Dice, " + "Drop Lowest, Repeat 6 Times", 25) """ Explanation: In fact, the results improved more than expected. The mean Equivalent Point Buy is now approximately 15, increased from 9 for Algorithm 2. This is because there are now 3 chances to drop the lowest value, compared to 1. Algorithm 4- Sum of 4 Six-Sided Dice, Drop Lowest, Repeat 6 Times The fourth algorithm is "Roll 4 six-sided dice, drop the lowest, and take the sum. Repeat 6 times." Before we run the code, let's think about the algorithm and how we might expect the results to compare to Algorithm 1. We should qualitatively expect that the mean should increase more than it did for Algorithm 2, because there are more chances to drop a low result (one chance for Algorithm 2, six chances for Algorithm 3). Let's see if we were right: End of explanation """ alg5_raw, alg5_val = mc_solution(10**6, 4, 7, drop_low_dice=True, drop_low_rep=True) plot_hist(alg5_raw, alg5_val, "Algorithm 5- Sum of 4 Six-Sided Dice, " + "Drop Lowest, Repeat 7 Times, Drop Lowest", 25) """ Explanation: As expected, the mean of the raw results distribution increased significantly more than for Algorithm 2, with 90% of the distribution falling between 10 and 14.5. The mean Equivalent Point Buy is now roughly 19, which is double that of Algorithm 2. Algorithm 5- Sum of 4 Six-Sided Dice, Drop Lowest, Repeat 7 Times, Drop Lowest The fifth algorithm is "Roll 4 six-sided dice, drop the lowest, and take the sum. Repeat 7 times, drop the lowest of the 7 times." Essentially, this is the "best of both worlds" from Algorithms 2 and 4. Intuitively, we should expect the distribution to shift to the right somewhat compared to Algorithm 4. End of explanation """ alg6_raw, alg6_val = mc_solution(10**6, 2, 6, add_six=True) plot_hist(alg6_raw, alg6_val, "Algorithm 6- Sum of 2 Six-Sided Dice, " + "Add 6, Repeat 6 Times", 25) """ Explanation: Algorithm 6- Sum of 2 Six-Sided Dice, Add 6, Repeat 6 Times The sixth algorithm is "Roll 2 six-sided dice, add a third dice roll always equal to 6, and take the sum. Repeat 6 times." It's a little unclear how this algorithm will compare to the others. Essentially, we are always replacing the lowest value with the highest possible value, so the expected value should definitely be higher than for Algorithm 4, which only dropped the minimum of 3 rolls. Let's try it out: End of explanation """ alg7_raw, alg7_val = mc_solution(10**6, 5, 6, drop_low_dice=True) plot_hist(alg7_raw, alg7_val, "Algorithm 7- Sum of 5 Six-Sided Dice, " + "Keep 3, Repeat 6 Times", 25) """ Explanation: In fact, this algorithm achieved the highest expected value so far. Notice that the variance is also the lowest of any algorithm so far, since we are replacing one "random" dice with a dice that always gives a value of 6. Algorithm 7- Sum of 5 Six-Sided Dice, Drop the 2 Lowest, Repeat 6 Times The seventh algorithm is "Roll 5 six-sided dice, drop the two lowest, and take the sum. Repeat 6 times." Intuitively, we might expect this algorithm to give the "best" results of all the algorithms, since we are dropping the lowest value 12 times, compared to 6 times for Algorithm 5, 3 times for Algorithm 3, and 1 time for Algorithm 2. End of explanation """ def br(data, description): return [description, round(np.mean(data),1), round(np.std(data),2), int(np.percentile(data,5)), int(np.percentile(data,95))] raw_res = [["Description","Mean","Std","5%","95%"], br(alg1_raw, "1. Sum of 3d6, Repeat 6"), br(alg2_raw, "2. Sum of 3d6, Repeat 7, Drop Lowest"), br(alg3_raw, "3. Sum of 3d6, Repeat 9, Keep Best 6"), br(alg4_raw, "4. Sum of 4d6, Drop Lowest, Repeat 6"), br(alg5_raw, "5. Sum of 4d6, Drop Lowest, Repeat 7, " + "Drop Lowest"), br(alg6_raw, "6. Sum of 2d6, Add 6, Repeat 6"), br(alg7_raw, "7. Sum of 5d6, Drop 2, Repeat 6 Times")] print("Raw Results") print(tabulate(raw_res)) """ Explanation: As expected, we were correct. This algorithm has an Equivalent Point Buy of approximately 30, which is massive compared to Algorithm 1. Summary of Results The following table summarizes the raw results from each of the seven algorithms: End of explanation """ val_res = [["Description","Mean","Std","5%","95%"], br(alg1_val, "1. Sum of 3d6, Repeat 6"), br(alg2_val, "2. Sum of 3d6, Repeat 7, Drop Lowest"), br(alg3_val, "3. Sum of 3d6, Repeat 9, Keep Best 6"), br(alg4_val, "4. Sum of 4d6, Drop Lowest, Repeat 6"), br(alg5_val, "5. Sum of 4d6, Drop Lowest, Repeat 7, " + "Drop Lowest"), br(alg6_val, "6. Sum of 2d6, Add 6, Repeat 6"), br(alg7_val, "7. Sum of 5d6, Drop 2, Repeat 6 Times")] print("Equivalent Point Buy") print(tabulate(val_res)) """ Explanation: The following table summarizes the Equivalent Point Buy results: End of explanation """
statsmodels/statsmodels.github.io
v0.13.2/examples/notebooks/generated/statespace_varmax.ipynb
bsd-3-clause
%matplotlib inline import numpy as np import pandas as pd import statsmodels.api as sm import matplotlib.pyplot as plt dta = sm.datasets.webuse('lutkepohl2', 'https://www.stata-press.com/data/r12/') dta.index = dta.qtr dta.index.freq = dta.index.inferred_freq endog = dta.loc['1960-04-01':'1978-10-01', ['dln_inv', 'dln_inc', 'dln_consump']] """ Explanation: VARMAX models This is a brief introduction notebook to VARMAX models in statsmodels. The VARMAX model is generically specified as: $$ y_t = \nu + A_1 y_{t-1} + \dots + A_p y_{t-p} + B x_t + \epsilon_t + M_1 \epsilon_{t-1} + \dots M_q \epsilon_{t-q} $$ where $y_t$ is a $\text{k_endog} \times 1$ vector. End of explanation """ exog = endog['dln_consump'] mod = sm.tsa.VARMAX(endog[['dln_inv', 'dln_inc']], order=(2,0), trend='n', exog=exog) res = mod.fit(maxiter=1000, disp=False) print(res.summary()) """ Explanation: Model specification The VARMAX class in statsmodels allows estimation of VAR, VMA, and VARMA models (through the order argument), optionally with a constant term (via the trend argument). Exogenous regressors may also be included (as usual in statsmodels, by the exog argument), and in this way a time trend may be added. Finally, the class allows measurement error (via the measurement_error argument) and allows specifying either a diagonal or unstructured innovation covariance matrix (via the error_cov_type argument). Example 1: VAR Below is a simple VARX(2) model in two endogenous variables and an exogenous series, but no constant term. Notice that we needed to allow for more iterations than the default (which is maxiter=50) in order for the likelihood estimation to converge. This is not unusual in VAR models which have to estimate a large number of parameters, often on a relatively small number of time series: this model, for example, estimates 27 parameters off of 75 observations of 3 variables. End of explanation """ ax = res.impulse_responses(10, orthogonalized=True, impulse=[1, 0]).plot(figsize=(13,3)) ax.set(xlabel='t', title='Responses to a shock to `dln_inv`'); """ Explanation: From the estimated VAR model, we can plot the impulse response functions of the endogenous variables. End of explanation """ mod = sm.tsa.VARMAX(endog[['dln_inv', 'dln_inc']], order=(0,2), error_cov_type='diagonal') res = mod.fit(maxiter=1000, disp=False) print(res.summary()) """ Explanation: Example 2: VMA A vector moving average model can also be formulated. Below we show a VMA(2) on the same data, but where the innovations to the process are uncorrelated. In this example we leave out the exogenous regressor but now include the constant term. End of explanation """ mod = sm.tsa.VARMAX(endog[['dln_inv', 'dln_inc']], order=(1,1)) res = mod.fit(maxiter=1000, disp=False) print(res.summary()) """ Explanation: Caution: VARMA(p,q) specifications Although the model allows estimating VARMA(p,q) specifications, these models are not identified without additional restrictions on the representation matrices, which are not built-in. For this reason, it is recommended that the user proceed with error (and indeed a warning is issued when these models are specified). Nonetheless, they may in some circumstances provide useful information. End of explanation """
aliakbars/uai-ai
scripts/tugas3.ipynb
mit
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns plt.rcParams = plt.rcParamsOrig """ Explanation: Artificial Intelligence & Machine Learning Tugas 3: Search & Reinforcement Learning Mekanisme Anda hanya diwajibkan untuk mengumpulkan file ini saja ke uploader yang disediakan di https://elearning.uai.ac.id/. Ganti nama file ini saat pengumpulan menjadi tugas3_NIM.ipynb. Keterlambatan: Pengumpulan tugas yang melebihi tenggat yang telah ditentukan tidak akan diterima. Keterlambatan akan berakibat pada nilai nol untuk tugas ini. Kolaborasi: Anda diperbolehkan untuk berdiskusi dengan teman Anda, tetapi dilarang keras menyalin kode maupun tulisan dari teman Anda. Kecurangan akan berakibat pada nilai nol untuk tugas ini. Petunjuk Anda diperbolehkan (jika dirasa perlu) untuk mengimpor modul tambahan untuk tugas ini. Namun, seharusnya modul yang tersedia sudah cukup untuk memenuhi kebutuhan Anda. Untuk kode yang Anda ambil dari sumber lain, cantumkan URL menuju referensi tersebut jika diambil dari internet! Perhatikan poin untuk tiap soal! Semakin kecil poinnya, berarti kode yang diperlukan untuk menjawab soal tersebut seharusnya semakin sedikit! End of explanation """ def dynamic_prog_mcnuggets(total_mcnuggets: int, packs: list): pass # Kode Anda di sini dynamic_prog_mcnuggets(200, [3, 6, 10, 24]) """ Explanation: 1. Dynamic Programming (5 poin) Seorang pria di Australia pada tahun 2017 memesan 200 McNuggets melalui drive-through hingga diliput oleh media setempat. Asumsikan bahwa McDonald's bersedia memenuhi permintaan tersebut dan dalam menu terdapat kombinasi paket McNuggets berisi 3, 6, 10, dan 24. Buatlah program dinamis untuk menghitung berapa jumlah paket McNuggets minimum yang dapat diberikan kepada pria tersebut! End of explanation """ import networkx as nx import urllib locs = pd.read_csv('https://raw.githubusercontent.com/aliakbars/uai-ai/master/datasets/uk-coordinates.csv') heuristics = pd.read_csv('https://raw.githubusercontent.com/aliakbars/uai-ai/master/datasets/uk-heuristics.csv') G = nx.read_gpickle(urllib.request.urlopen("https://raw.githubusercontent.com/aliakbars/uai-ai/master/datasets/uk.pickle")) def draw_map(G, locs): pos = locs.set_index('city_name').apply( lambda x: (x['longitude'], x['latitude']), axis=1 ).to_dict() fig, ax = plt.subplots(figsize=(7, 7)) nx.draw( G, pos, with_labels=True, edge_color='#DDDDDD', node_color='#A0CBE2', node_size=300, font_size=10, ax=ax ) labels = nx.get_edge_attributes(G, 'weight') labels = {k: np.round(v).astype(int) for k, v in labels.items()} nx.draw_networkx_edge_labels( G, pos, edge_labels=labels, ax=ax ) draw_map(G, locs) """ Explanation: 2. Search (10 poin) Diberikan peta UK sebagai berikut. End of explanation """ # Kode Anda di sini """ Explanation: Soal 2.1 (2 poin) Gunakan algoritma UCS dari networkx untuk mencari jalan dari London ke Edinburgh. End of explanation """ def heuristic(source, target): pass # Kode Anda di sini # Kode Anda di sini """ Explanation: Soal 2.2 (4 poin) Gunakan algoritma A* dari networkx untuk mencari jalan dari London ke Edinburgh. Implementasikan fungsi heuristik berdasarkan variabel heuristics yang diberikan. End of explanation """ # Kode Anda di sini """ Explanation: Soal 2.3 (2 poin) Berapa jarak tempuh dari jalur terpendek London ke Edinburgh dari soal 2.2? End of explanation """ from tqdm.notebook import trange import gym class Agent: def __init__(self, env, algo="random", eps=0.2, eta=0.1, gamma=1): self.env = env self.s = env.reset() # inisialisasi state self.q = np.zeros((env.observation_space.n, env.action_space.n)) # inisialisasi semua nilai pada matriks Q = 0 self.eps = eps # probabilitas eksplorasi self.eta = eta # learning rate self.gamma = gamma # discount factor self.algo = algo def update_q(self, s, a, r, s_, a_): # Implementasikan SARSA pada bagian ini self.q[s, a] = ... # Kode Anda di sini def choose_action(self, s): if self.algo == "random": return self.env.action_space.sample() elif self.algo == "sarsa": ... # Kode Anda di sini else: raise NotImplementedError() def play(self): a = self.choose_action(self.s) state, reward, done, _ = self.env.step(a) action = self.choose_action(state) # melihat aksi selanjutnya self.update_q(self.s, a, reward, state, action) self.s = state # state saat ini diperbarui return done, reward def reset(self): self.s = self.env.reset() """ Explanation: Soal 2.4 (2 poin) Apakah hasil pada soal 2.1 dan 2.2 sama? Mengapa? Jawaban Anda di sini 3. Reinforcement Learning (10 poin) Game yang akan dimainkan kali ini adalah Frozen Lake. Terjemahan bebas dari dokumentasi: Musim dingin telah tiba. Anda dan teman Anda sedang bermain frisbee dan tanpa sengaja Anda melempar cakram frisbee ke tengah danau. Sebagian besar danau sudah beku, tetapi ada beebrapa lubang karena esnya telah mencair. Jika Anda terjatuh ke lubang, maka Anda akan tercebur ke air yang sangat dingin. Anda harus mengambil cakram tersebut dengan menyeberangi danau. Namun, es yang Anda pijak licin, sehingga Anda tidak selalu bisa berjalan ke arah yang Anda tuju. Anda diminta mencari strategi (policy) berupa jalan yang aman menuju ke ubin tujuan. Peta danau: SFFF FHFH FFFH HFFG Soal 3.1 (4 poin) Di dalam kelas yang sudah didefinisikan di bawah ini: 1. Implementasikan SARSA untuk memperbarui nilai Q. (2 poin) 2. Implementasikan algoritma $\epsilon$-greedy untuk memilih aksi. Petunjuk: Manfaatkan np.random.random(). (2 poin) End of explanation """ def simulate(algo, num_episodes=10000): np.random.seed(101) env = gym.make("FrozenLake-v0") agent = Agent(env, algo) utilities = [] for i in trange(num_episodes): while True: done, reward = agent.play() if done: utilities.append(reward) agent.reset() break env.close() return agent, utilities # Kode Anda di sini """ Explanation: Soal 3.2.1 (2 poin) Simulasikan permainan ini dengan algoritma random dan SARSA. Bandingkan rata-rata utilities yang didapatkan. End of explanation """ # Kode Anda di sini """ Explanation: Soal 3.2.2 (2 poin) Gambarkan perubahan nilai utilities dari algoritma random dan SARSA dengan rolling mean 100 episodes. Apa yang dapat Anda amati? Petunjuk: Cari tentang "pandas rolling mean". End of explanation """ # Kode Anda di sini """ Explanation: Soal 3.3 (2 poin) Tampilkan optimal policy untuk setiap state dari algoritma SARSA. End of explanation """
dipanjanS/BerkeleyX-CS100.1x-Big-Data-with-Apache-Spark
Week 2 - Introduction to Apache Spark/lab1_word_count_student.ipynb
mit
wordsList = ['cat', 'elephant', 'rat', 'rat', 'cat'] wordsRDD = sc.parallelize(wordsList, 4) # Print out the type of wordsRDD print type(wordsRDD) """ Explanation: + Word Count Lab: Building a word count application This lab will build on the techniques covered in the Spark tutorial to develop a simple word count application. The volume of unstructured text in existence is growing dramatically, and Spark is an excellent tool for analyzing this type of data. In this lab, we will write code that calculates the most common words in the Complete Works of William Shakespeare retrieved from Project Gutenberg. This could also be scaled to find the most common words on the Internet. During this lab we will cover: Part 1: Creating a base RDD and pair RDDs Part 2: Counting with pair RDDs Part 3: Finding unique words and a mean value Part 4: Apply word count to a file Note that, for reference, you can look up the details of the relevant methods in Spark's Python API Part 1: Creating a base RDD and pair RDDs In this part of the lab, we will explore creating a base RDD with parallelize and using pair RDDs to count words. (1a) Create a base RDD We'll start by generating a base RDD by using a Python list and the sc.parallelize method. Then we'll print out the type of the base RDD. End of explanation """ # TODO: Replace <FILL IN> with appropriate code def makePlural(word): """Adds an 's' to `word`. Note: This is a simple function that only adds an 's'. No attempt is made to follow proper pluralization rules. Args: word (str): A string. Returns: str: A string with 's' added to it. """ return word + 's' print makePlural('cat') # One way of completing the function def makePlural(word): return word + 's' print makePlural('cat') # Load in the testing code and check to see if your answer is correct # If incorrect it will report back '1 test failed' for each failed test # Make sure to rerun any cell you change before trying the test again from test_helper import Test # TEST Pluralize and test (1b) Test.assertEquals(makePlural('rat'), 'rats', 'incorrect result: makePlural does not add an s') """ Explanation: (1b) Pluralize and test Let's use a map() transformation to add the letter 's' to each string in the base RDD we just created. We'll define a Python function that returns the word with an 's' at the end of the word. Please replace &lt;FILL IN&gt; with your solution. If you have trouble, the next cell has the solution. After you have defined makePlural you can run the third cell which contains a test. If you implementation is correct it will print 1 test passed. This is the general form that exercises will take, except that no example solution will be provided. Exercises will include an explanation of what is expected, followed by code cells where one cell will have one or more &lt;FILL IN&gt; sections. The cell that needs to be modified will have # TODO: Replace &lt;FILL IN&gt; with appropriate code on its first line. Once the &lt;FILL IN&gt; sections are updated and the code is run, the test cell can then be run to verify the correctness of your solution. The last code cell before the next markdown section will contain the tests. End of explanation """ # TODO: Replace <FILL IN> with appropriate code pluralRDD = wordsRDD.map(makePlural) print pluralRDD.collect() # TEST Apply makePlural to the base RDD(1c) Test.assertEquals(pluralRDD.collect(), ['cats', 'elephants', 'rats', 'rats', 'cats'], 'incorrect values for pluralRDD') """ Explanation: (1c) Apply makePlural to the base RDD Now pass each item in the base RDD into a map() transformation that applies the makePlural() function to each element. And then call the collect() action to see the transformed RDD. End of explanation """ # TODO: Replace <FILL IN> with appropriate code pluralLambdaRDD = wordsRDD.map(lambda word: word + 's') print pluralLambdaRDD.collect() # TEST Pass a lambda function to map (1d) Test.assertEquals(pluralLambdaRDD.collect(), ['cats', 'elephants', 'rats', 'rats', 'cats'], 'incorrect values for pluralLambdaRDD (1d)') """ Explanation: (1d) Pass a lambda function to map Let's create the same RDD using a lambda function. End of explanation """ # TODO: Replace <FILL IN> with appropriate code pluralLengths = (pluralRDD .map(lambda word: len(word)) .collect()) print pluralLengths # TEST Length of each word (1e) Test.assertEquals(pluralLengths, [4, 9, 4, 4, 4], 'incorrect values for pluralLengths') """ Explanation: (1e) Length of each word Now use map() and a lambda function to return the number of characters in each word. We'll collect this result directly into a variable. End of explanation """ # TODO: Replace <FILL IN> with appropriate code wordPairs = wordsRDD.map(lambda word: (word, 1)) print wordPairs.collect() # TEST Pair RDDs (1f) Test.assertEquals(wordPairs.collect(), [('cat', 1), ('elephant', 1), ('rat', 1), ('rat', 1), ('cat', 1)], 'incorrect value for wordPairs') """ Explanation: (1f) Pair RDDs The next step in writing our word counting program is to create a new type of RDD, called a pair RDD. A pair RDD is an RDD where each element is a pair tuple (k, v) where k is the key and v is the value. In this example, we will create a pair consisting of ('&lt;word&gt;', 1) for each word element in the RDD. We can create the pair RDD using the map() transformation with a lambda() function to create a new RDD. End of explanation """ # TODO: Replace <FILL IN> with appropriate code # Note that groupByKey requires no parameters wordsGrouped = wordPairs.groupByKey() for key, value in wordsGrouped.collect(): print '{0}: {1}'.format(key, list(value)) # TEST groupByKey() approach (2a) Test.assertEquals(sorted(wordsGrouped.mapValues(lambda x: list(x)).collect()), [('cat', [1, 1]), ('elephant', [1]), ('rat', [1, 1])], 'incorrect value for wordsGrouped') """ Explanation: Part 2: Counting with pair RDDs Now, let's count the number of times a particular word appears in the RDD. There are multiple ways to perform the counting, but some are much less efficient than others. A naive approach would be to collect() all of the elements and count them in the driver program. While this approach could work for small datasets, we want an approach that will work for any size dataset including terabyte- or petabyte-sized datasets. In addition, performing all of the work in the driver program is slower than performing it in parallel in the workers. For these reasons, we will use data parallel operations. (2a) groupByKey() approach An approach you might first consider (we'll see shortly that there are better ways) is based on using the groupByKey() transformation. As the name implies, the groupByKey() transformation groups all the elements of the RDD with the same key into a single list in one of the partitions. There are two problems with using groupByKey(): The operation requires a lot of data movement to move all the values into the appropriate partitions. The lists can be very large. Consider a word count of English Wikipedia: the lists for common words (e.g., the, a, etc.) would be huge and could exhaust the available memory in a worker. Use groupByKey() to generate a pair RDD of type ('word', iterator). End of explanation """ # TODO: Replace <FILL IN> with appropriate code wordCountsGrouped = wordsGrouped.map(lambda (k,v): (k, sum(v))) print wordCountsGrouped.collect() # TEST Use groupByKey() to obtain the counts (2b) Test.assertEquals(sorted(wordCountsGrouped.collect()), [('cat', 2), ('elephant', 1), ('rat', 2)], 'incorrect value for wordCountsGrouped') """ Explanation: (2b) Use groupByKey() to obtain the counts Using the groupByKey() transformation creates an RDD containing 3 elements, each of which is a pair of a word and a Python iterator. Now sum the iterator using a map() transformation. The result should be a pair RDD consisting of (word, count) pairs. End of explanation """ # TODO: Replace <FILL IN> with appropriate code # Note that reduceByKey takes in a function that accepts two values and returns a single value wordCounts = wordPairs.reduceByKey(lambda a,b: a+b) print wordCounts.collect() # TEST Counting using reduceByKey (2c) Test.assertEquals(sorted(wordCounts.collect()), [('cat', 2), ('elephant', 1), ('rat', 2)], 'incorrect value for wordCounts') """ Explanation: (2c) Counting using reduceByKey A better approach is to start from the pair RDD and then use the reduceByKey() transformation to create a new pair RDD. The reduceByKey() transformation gathers together pairs that have the same key and applies the function provided to two values at a time, iteratively reducing all of the values to a single value. reduceByKey() operates by applying the function first within each partition on a per-key basis and then across the partitions, allowing it to scale efficiently to large datasets. End of explanation """ # TODO: Replace <FILL IN> with appropriate code wordCountsCollected = (wordsRDD .map(lambda word: (word, 1)) .reduceByKey(lambda a,b: a+b) .collect()) print wordCountsCollected # TEST All together (2d) Test.assertEquals(sorted(wordCountsCollected), [('cat', 2), ('elephant', 1), ('rat', 2)], 'incorrect value for wordCountsCollected') """ Explanation: (2d) All together The expert version of the code performs the map() to pair RDD, reduceByKey() transformation, and collect in one statement. End of explanation """ # TODO: Replace <FILL IN> with appropriate code uniqueWords = wordsRDD.map(lambda word: (word, 1)).distinct().count() print uniqueWords # TEST Unique words (3a) Test.assertEquals(uniqueWords, 3, 'incorrect count of uniqueWords') """ Explanation: Part 3: Finding unique words and a mean value (3a) Unique words Calculate the number of unique words in wordsRDD. You can use other RDDs that you have already created to make this easier. End of explanation """ # TODO: Replace <FILL IN> with appropriate code from operator import add totalCount = (wordCounts .map(lambda (a,b): b) .reduce(add)) average = totalCount / float(wordCounts.distinct().count()) print totalCount print round(average, 2) # TEST Mean using reduce (3b) Test.assertEquals(round(average, 2), 1.67, 'incorrect value of average') """ Explanation: (3b) Mean using reduce Find the mean number of words per unique word in wordCounts. Use a reduce() action to sum the counts in wordCounts and then divide by the number of unique words. First map() the pair RDD wordCounts, which consists of (key, value) pairs, to an RDD of values. End of explanation """ # TODO: Replace <FILL IN> with appropriate code def wordCount(wordListRDD): """Creates a pair RDD with word counts from an RDD of words. Args: wordListRDD (RDD of str): An RDD consisting of words. Returns: RDD of (str, int): An RDD consisting of (word, count) tuples. """ return (wordListRDD .map(lambda a : (a,1)) .reduceByKey(lambda a,b: a+b)) print wordCount(wordsRDD).collect() # TEST wordCount function (4a) Test.assertEquals(sorted(wordCount(wordsRDD).collect()), [('cat', 2), ('elephant', 1), ('rat', 2)], 'incorrect definition for wordCount function') """ Explanation: Part 4: Apply word count to a file In this section we will finish developing our word count application. We'll have to build the wordCount function, deal with real world problems like capitalization and punctuation, load in our data source, and compute the word count on the new data. (4a) wordCount function First, define a function for word counting. You should reuse the techniques that have been covered in earlier parts of this lab. This function should take in an RDD that is a list of words like wordsRDD and return a pair RDD that has all of the words and their associated counts. End of explanation """ # TODO: Replace <FILL IN> with appropriate code import re def removePunctuation(text): """Removes punctuation, changes to lower case, and strips leading and trailing spaces. Note: Only spaces, letters, and numbers should be retained. Other characters should should be eliminated (e.g. it's becomes its). Leading and trailing spaces should be removed after punctuation is removed. Args: text (str): A string. Returns: str: The cleaned up string. """ return re.sub("[^a-zA-Z0-9 ]", "", text.strip(" ").lower()) print removePunctuation('Hi, you!') print removePunctuation(' No under_score!') # TEST Capitalization and punctuation (4b) Test.assertEquals(removePunctuation(" The Elephant's 4 cats. "), 'the elephants 4 cats', 'incorrect definition for removePunctuation function') """ Explanation: (4b) Capitalization and punctuation Real world files are more complicated than the data we have been using in this lab. Some of the issues we have to address are: Words should be counted independent of their capitialization (e.g., Spark and spark should be counted as the same word). All punctuation should be removed. Any leading or trailing spaces on a line should be removed. Define the function removePunctuation that converts all text to lower case, removes any punctuation, and removes leading and trailing spaces. Use the Python re module to remove any text that is not a letter, number, or space. Reading help(re.sub) might be useful. End of explanation """ # Just run this code import os.path baseDir = os.path.join('data') inputPath = os.path.join('cs100', 'lab1', 'shakespeare.txt') fileName = os.path.join(baseDir, inputPath) shakespeareRDD = (sc .textFile(fileName, 8) .map(removePunctuation)) print '\n'.join(shakespeareRDD .zipWithIndex() # to (line, lineNum) .map(lambda (l, num): '{0}: {1}'.format(num, l)) # to 'lineNum: line' .take(15)) """ Explanation: (4c) Load a text file For the next part of this lab, we will use the Complete Works of William Shakespeare from Project Gutenberg. To convert a text file into an RDD, we use the SparkContext.textFile() method. We also apply the recently defined removePunctuation() function using a map() transformation to strip out the punctuation and change all text to lowercase. Since the file is large we use take(15), so that we only print 15 lines. End of explanation """ # TODO: Replace <FILL IN> with appropriate code shakespeareWordsRDD = shakespeareRDD.flatMap(lambda a: a.split(" ")) shakespeareWordCount = shakespeareWordsRDD.count() print shakespeareWordsRDD.top(5) print shakespeareWordCount # TEST Words from lines (4d) # This test allows for leading spaces to be removed either before or after # punctuation is removed. Test.assertTrue(shakespeareWordCount == 927631 or shakespeareWordCount == 928908, 'incorrect value for shakespeareWordCount') Test.assertEquals(shakespeareWordsRDD.top(5), [u'zwaggerd', u'zounds', u'zounds', u'zounds', u'zounds'], 'incorrect value for shakespeareWordsRDD') """ Explanation: (4d) Words from lines Before we can use the wordcount() function, we have to address two issues with the format of the RDD: The first issue is that that we need to split each line by its spaces. The second issue is we need to filter out empty lines. Apply a transformation that will split each element of the RDD by its spaces. For each element of the RDD, you should apply Python's string split() function. You might think that a map() transformation is the way to do this, but think about what the result of the split() function will be. End of explanation """ # TODO: Replace <FILL IN> with appropriate code shakeWordsRDD = shakespeareWordsRDD.filter(lambda word: len(word) > 0) shakeWordCount = shakeWordsRDD.count() print shakeWordCount # TEST Remove empty elements (4e) Test.assertEquals(shakeWordCount, 882996, 'incorrect value for shakeWordCount') """ Explanation: (4e) Remove empty elements The next step is to filter out the empty elements. Remove all entries where the word is ''. End of explanation """ # TODO: Replace <FILL IN> with appropriate code top15WordsAndCounts = wordCount(shakeWordsRDD).takeOrdered(15, lambda (a,b): -b) print '\n'.join(map(lambda (w, c): '{0}: {1}'.format(w, c), top15WordsAndCounts)) # TEST Count the words (4f) Test.assertEquals(top15WordsAndCounts, [(u'the', 27361), (u'and', 26028), (u'i', 20681), (u'to', 19150), (u'of', 17463), (u'a', 14593), (u'you', 13615), (u'my', 12481), (u'in', 10956), (u'that', 10890), (u'is', 9134), (u'not', 8497), (u'with', 7771), (u'me', 7769), (u'it', 7678)], 'incorrect value for top15WordsAndCounts') """ Explanation: (4f) Count the words We now have an RDD that is only words. Next, let's apply the wordCount() function to produce a list of word counts. We can view the top 15 words by using the takeOrdered() action; however, since the elements of the RDD are pairs, we need a custom sort function that sorts using the value part of the pair. You'll notice that many of the words are common English words. These are called stopwords. In a later lab, we will see how to eliminate them from the results. Use the wordCount() function and takeOrdered() to obtain the fifteen most common words and their counts. End of explanation """
badlands-model/BayesLands
Examples/regridInput.ipynb
gpl-3.0
import sys print(sys.version) print(sys.executable) %matplotlib inline # Import badlands grid generation toolbox import pybadlands_companion.resizeInput as resize """ Explanation: Regridding input data to higher resolution The initial resolution of the input file is used as the higher resolution that Badlands model can used. If one started with a given resolution and want to work with an higher one, it is required to regrid the input file to match at least the requested resolution. End of explanation """ #help(resize.resizeInput.__init__) newRes = resize.resizeInput(requestedSpacing = 40) """ Explanation: 1. Load python class and set required resolution End of explanation """ #help(newRes.regridDEM) newRes.regridDEM(inDEM='mountain/data/nodes.csv',outDEM='mountain/data/newnodes.csv') """ Explanation: 2. Regrid DEM file End of explanation """ #help(newRes.regridRain) newRes.regridRain(inRain='data/rain.csv',outRain='newrain.csv') """ Explanation: 3. Regrid Rain file End of explanation """ #help(newRes.regridTecto) newRes.regridTecto(inTec='data/disp.csv', outTec='newdisp.csv') """ Explanation: 4. Regrid Tectonic files Here you have the choice between vertical only displacements file and 3D ones. In cases where you have several files you might create a loop to automatically regrid the files! Vertical only file End of explanation """ #help(newRes.regridDisp) newRes.regridDisp(inDisp='data/disp.csv', outDisp='newdisp.csv') """ Explanation: 3D displacements file End of explanation """
karlstroetmann/Algorithms
Python/Chapter-07/Binary-Tries-Frame.ipynb
gpl-2.0
import graphviz as gv """ Explanation: Binary Tries End of explanation """ class BinaryTrie: sNodeCount = 0 def __init__(self): BinaryTrie.sNodeCount += 1 self.mID = BinaryTrie.sNodeCount def getID(self): return self.mID # used only by graphviz """ Explanation: This notebook presents <em style="color:blue;">binary tries</em>. We define the set $\texttt{BT}$ of binary tries by induction: $\texttt{Nil} \in \texttt{BT}$. $\texttt{Bin}(v,l,r) \in \texttt{BT}$ provided that $v \in \texttt{Value} \cup {\Omega}$ and $l,r \in \texttt{BT}$. The class BinaryTrie is a superclass for constructing binary tries. It has one static variable sNodeCount. This variable is used to equip all nodes with a unique identifier. This identifier is used to draw the trees using graphviz. Every object of class BinaryTrie has a unique identifier mID that is stored as a member variable. End of explanation """ def _make_string(self, attributes): # map the function __str__ to all attributes and join them with a comma name = self.__class__.__name__ return f"{name}({', '.join(map(str, [getattr(self, at) for at in attributes]))})" BinaryTrie._make_string = _make_string del _make_string """ Explanation: The function make_string is a helper function that is used to simplify the implementation of __str__. - self is the object that is to be rendered as a string - attributes is a list of those member variables that are used to produce the string End of explanation """ def toDot(self): dot = gv.Digraph(node_attr={'shape': 'record', 'style': 'rounded'}) nodeDict = {} self._collectIDs(nodeDict) for n, t in nodeDict.items(): if isinstance(t, Nil): dot.node(str(n), label='', shape='point') elif isinstance(t, Bin): if t.mValue != None: dot.node(str(n), label='{' + str(t.mDigit) + '|' + str(t.mValue) + '}') else: dot.node(str(n), label='{' + str(t.mDigit) + '|' + '}') else: assert False, f'Unknown node {t}' for n, t in nodeDict.items(): if isinstance(t, Bin): dot.edge(str(n), str(t.mLeft .getID())) dot.edge(str(n), str(t.mRight.getID())) return dot BinaryTrie.toDot = toDot del toDot """ Explanation: The method $t.\texttt{toDot}()$ takes a binary trie $t$ and returns a graph that depicts the tree $t$. End of explanation """ def _collectIDs(self, nodeDict): nodeDict[self.getID()] = self if isinstance(self, Bin): self.mLeft ._collectIDs(nodeDict) self.mRight._collectIDs(nodeDict) self.mLeft .mDigit = '0' self.mRight.mDigit = '1' BinaryTrie._collectIDs = _collectIDs del _collectIDs """ Explanation: The method $t.\texttt{collectIDs}(d)$ takes a binary trie $t$ and a dictionary $d$ and updates the dictionary so that the following holds: $$ d[\texttt{id}] = n \quad \mbox{for every node $n$ in $t$.} $$ Here, $\texttt{id}$ is the unique identifier of the node $n$, i.e. $d$ associates the identifiers with the corresponding nodes. End of explanation """ class Nil(BinaryTrie): def __init__(self): BinaryTrie.__init__(self) def __str__(self): return 'Nil()' """ Explanation: The class Nil represents an empty binary trie. It has no member variables of its own. End of explanation """ class Bin(BinaryTrie): def __init__(self, value, left, right): BinaryTrie.__init__(self) self.mValue = value self.mLeft = left self.mRight = right self.mDigit = '' # only used by graphviz def __str__(self): return _make_string(self, ['mValue', 'mLeft', 'mRight']) """ Explanation: The class Bin represents a binary trie of the form $\texttt{Bin}(v,l,r)$. It has three member variables: * mValue is the value that is stored at this node. * mLeft is the left subtree. * mRight is the right subtree. * mDigit is a string that is used by graphviz to display the node. End of explanation """ def find(self, n): "your code here" Nil.find = find del find def find(self, n): "your code here" Bin.find = find del find """ Explanation: Implementing the Method find Given a binary trie $b$ and a natural number $n$, the expression $$ b.\texttt{find}(n) $$ returns the value in $b$ that is associated with the number $n$. If there is no value associated with $b$, then the expression evaluates to $\Omega$. Formally, the value of the expression $b.\texttt{find}(n)$ is defined by induction on both $b$ and $n$: - $\texttt{Nil}.\texttt{find}(n) = \Omega$, since the empty trie doesn't store any values. $\texttt{Bin}(v,l,r).\texttt{find}(0) = v$, because $0$ is interpreted as the empty string $\varepsilon$. - $n \not= 0 \rightarrow \texttt{Bin}(v,l,r).\texttt{find}(2\cdot n) = l.\texttt{find}(n)$, because if a number is represented in binary, then the last bit of every even number is zero and zero chooses the left subtree. - $\texttt{Bin}(v,l,r).\texttt{find}(2 \cdot n + 1) = r.\texttt{find}(n)$, because if a number is represented in binary, then the last bit of every odd number is 1 and 1 is associated with the right subtree. End of explanation """ def insert(self, n, v): "your code here" Nil.insert = insert del insert def insert(self, n, v): "your code here" Bin.insert = insert del insert """ Explanation: Implementing the Method insert Given a binary trie $b$, a natural number $n$ and a value $v$, the expression $$ b.\texttt{insert}(n, v) $$ is defined by induction on $b$ and $n$: Your equations here! E = m * c ** 2 End of explanation """ def simplify(self): "your code here" Bin.simplify = simplify del simplify """ Explanation: Implementing the Method delete First we have to implement a method simplify. Given a binary trie $b$, the expression $b.\texttt{simplify}()$ returns Nil iff $b$ does not contain any key. Otherwise, $b$ is left unchanged. The method simplify is specified by the following equations: Your equations here! End of explanation """ def delete(self, n): "your code here" Nil.delete = delete del delete def delete(self, n): "your code here" Bin.delete = delete del delete """ Explanation: Your equations here! End of explanation """ b = Nil() b.toDot() b = b.insert(0, 'a') b.toDot() b = b.insert(1, 'b') b.toDot() b = b.insert(2, 'c') b.toDot() b = b.delete(0) b.toDot() b = b.delete(1) b.toDot() b = b.delete(2) b.toDot() """ Explanation: Testing End of explanation """ Primes = Nil() for i in range(2, 101): Primes = Primes.insert(i, True) Primes.toDot() for i in range(2, 51): for j in range(i, 100 // i + 1): Primes = Primes.delete(i * j) display(Primes.toDot()) for i in range(2, 101): if Primes.find(i): print(i, end=' ') """ Explanation: Let us compute the prime numbers next. End of explanation """
drericstrong/Blog
20170204_FuzzyLogicLinearGaussian.ipynb
agpl-3.0
import numpy as np import skfuzzy as fuzz import matplotlib.pyplot as plt %matplotlib inline x = np.arange(30, 100, 0.1) ## LINEAR # Create the membership functions x_cold_lin = fuzz.trimf(x, [30, 30, 50]) x_mild_lin = fuzz.trimf(x, [30, 50, 70]) x_warm_lin = fuzz.trimf(x, [50, 70, 100]) x_hot_lin = fuzz.trimf(x, [70, 100, 100]) # Plot the results of the linear fuzzy membership plt.figure() plt.plot(x, x_cold_lin, 'b', linewidth=1.5, label='Cold') plt.plot(x, x_mild_lin, 'k', linewidth=1.5, label='Mild') plt.plot(x, x_warm_lin, 'm', linewidth=1.5, label='Warm') plt.plot(x, x_hot_lin, 'r', linewidth=1.5, label='Hot') plt.title('Temperature, Linear Fuzzy') plt.ylabel('Membership') plt.xlabel('Temperature (Fahrenheit)') plt.legend(loc='center right', bbox_to_anchor=(1.25, 0.5), ncol=1, fancybox=True, shadow=True); """ Explanation: The term "fuzzy logic" refers to a special type of logic where propositions are not merely TRUE or FALSE but instead can take any truth value between 0 ("pure" FALSE) and 1 ("pure" TRUE). For instance, a truth value of 0.5 is indeterminate; it is just as likely to be TRUE as FALSE. This concept may seem counter-intuitive. If presented with a statement, such as "John is wearing a green shirt", most people would think that the statement must be either TRUE or FALSE. Clearly, there's no in-between, right? However, even in this simple case, there's some ambiguity about the color green. What if John is wearing a sea-green shirt? Does that count as green or blue? What if it leans much more towards the blue end of the spectrum? Or, even more confusingly, what if the person who made the statement is color-blind? The blue/white gold/black dress phenomenon taught us that color can sometimes be very ambiguous. As another practical example, people's feelings about temperature can be difficult to quantify in terms of absolute limits. Is 75 degrees (F) "hot"? Is it "warm"? Maybe it depends on where you live, or perhaps it could be a little bit of both "warm" and "hot"? Fuzzy logic is a great choice for modeling this situation: at 75 degrees, you can assign some membership to both the "warm" and "hot" states. For instance, you might say 75 degrees is 70% "warm" and 30% "hot". Practically no one, though, would think that 75 degrees (F) is "cold". You can confidently assign a membership of 0% to "cold" at 75 degrees. How do we want to specify these membership probabilities at different temperatures? Well, it would take a very long time to go through each temperature and assign probabilities for "cold", "warm", and "hot". There's a shortcut we can use to make this easier. 76 degrees is clearly more "hot" than 75 degrees, so we wouldn't expect the "hot" membership of 76 degrees to be less than the "hot" membership at 75 degrees. We can assign functions to "cold", "warm", and "hot" that take these intuitions into account. One of the more common membership functions is a triangle, given its relatively easy-to-define shape. 3 values are defined: lower, full, and upper. The lower and upper values have a membership of 0%, and the full value has a membership of 100%. All values in between are linearly interpolated. For example, if we defined the triangular membership function for "warm" as [50, 70, 100], the membership would be 0% at 50 degrees, linearly increase to 100% at 70 degrees, and linearly decrease to 0% at 100 degrees. Generalizing to four different judgements about temperature, the following code will demonstrate linear, triangular fuzzy logic membership functions for each. End of explanation """ ## GAUSSIAN # Create the membership functions x_cold_gauss = fuzz.gaussmf(x, 30, 8) x_mild_gauss = fuzz.gaussmf(x, 50, 8) x_warm_gauss = fuzz.gaussmf(x, 70, 12) x_hot_gauss = fuzz.gaussmf(x, 100, 8) # Plot the results of the gaussian fuzzy membership plt.figure() plt.plot(x, x_cold_gauss, 'b', linewidth=1.5, label='Cold') plt.plot(x, x_mild_gauss, 'k', linewidth=1.5, label='Mild') plt.plot(x, x_warm_gauss, 'm', linewidth=1.5, label='Warm') plt.plot(x, x_hot_gauss, 'r', linewidth=1.5, label='Hot') plt.title('Temperature, Gaussian Fuzzy') plt.ylabel('Membership') plt.xlabel('Temperature') plt.legend(loc='center right', bbox_to_anchor=(1.25, 0.5), ncol=1, fancybox=True, shadow=True); """ Explanation: As can be seen in the figure above, each state ("cold", "mild", "warm", and "hot") has a membership value defined at all temperatures between 30 and 100 degrees (F). This was accomplished easily by defining the following triangular functions for each state: Cold - [30, 30, 50] Mild - [30, 50, 70] Warm - [50, 70, 100] Hot - [70, 100, 100] Where the first value in the array is the "low" (0%) value, the second is the "full" (100%) value, and the third is the "high" (0%) value. Below 30 degrees, we can assume that "cold" has 100% membership and all other judgements have 0% membership. Likewise, above 100 degrees, we can assume that "hot" has 100% membership and all other judgements have 0% membership. Notice that "warm" has a wider distribution than the other states. This occurred because warm is an even more "fuzzy" word than the other words- people generally tend to disagree strongly about exactly what the word "warm" means. Fuzzy logic allows us to quantify this disagreement as a function with a higher standard deviation. Let's examine a more complicated Gaussian distribution as a membership function. In this case, we will be specifying a mean and standard deviation for each Gaussian distribution. End of explanation """ # Plot to show the sum is not always 1 x_sum = x_cold_gauss + x_mild_gauss + \ x_warm_gauss + x_hot_gauss plt.figure() plt.plot(x, x_sum, 'y', linewidth=1.5, label='Total') plt.title('Temperature, Gaussian Fuzzy Sum') plt.ylabel('Membership') plt.xlabel('Temperature') plt.legend(loc='center right', bbox_to_anchor=(1.25, 0.5), ncol=1, fancybox=True, shadow=True); """ Explanation: Looking at the figure above, values tend to be more heavily concentrated around the "full" membership values, dropping off more steeply than the triangular membership function. This might be preferential behavior, depending on the system that's being modeled. You might also notice that the membership functions no longer sum to 100%. Let's examine this further by plotting the membership sum at each temperature in the following code. End of explanation """ ## NORMALIZED GAUSSIAN # rescale x_sum = x_cold_gauss + x_mild_gauss + \ x_warm_gauss + x_hot_gauss x_cold_rescale = x_cold_gauss / x_sum x_mild_rescale = x_mild_gauss / x_sum x_warm_rescale = x_warm_gauss / x_sum x_hot_rescale = x_hot_gauss / x_sum # Plot the results of the rescaled gaussian fuzzy membership plt.figure() plt.plot(x, x_cold_rescale, 'b', linewidth=1.5, label='Cold') plt.plot(x, x_mild_rescale, 'k', linewidth=1.5, label='Mild') plt.plot(x, x_warm_rescale, 'm', linewidth=1.5, label='Warm') plt.plot(x, x_hot_rescale, 'r', linewidth=1.5, label='Hot') plt.title('Temperature, Rescaled Gaussian Fuzzy') plt.ylabel('Membership') plt.xlabel('Temperature') plt.legend(loc='center right', bbox_to_anchor=(1.25, 0.5), ncol=1, fancybox=True, shadow=True); """ Explanation: As suspected, the membership function does not sum to 100% at each temperature value. There may be situations where this doesn't matter. For instance, if you're using fuzzy logic to determine the most likely descriptor at each temperature value, that the membership doesn't need to sum to 1, since only the maximum value is required. Generally, though, if you are going to be treating the membership function as a probability or using it in a summation somewhere down the line, you want to normalize your membership functions so that they always equal 1. A quick, easy way to rescale the membership functions so that they sum to 1 is simply to divide by the sum. The following code will normalize the Gaussian membership functions. End of explanation """ # Plot to show the sum is not always 1 x_sum = x_cold_rescale + x_mild_rescale + \ x_warm_rescale + x_hot_rescale plt.figure() plt.plot(x, x_sum, 'y', linewidth=1.5, label='Total') plt.title('Temperature, Rescaled Gaussian Fuzzy Sum') plt.ylabel('Membership') plt.xlabel('Temperature') plt.legend(loc='center right', bbox_to_anchor=(1.25, 0.5), ncol=1, fancybox=True, shadow=True); """ Explanation: After normalization, notice that the membership functions no longer look Gaussian. Let's make sure that the membership does sum to 1 at each temperature: End of explanation """
ToqueWillot/M2DAC
FDMS/TME5/sklearn_t-SNE.ipynb
gpl-2.0
from sklearn.manifold import TSNE help(TSNE) """ Explanation: t-Distributed Stochastic Neighbor Embedding (t-SNE) in sklearn t-SNE is a tool for data visualization. It reduces the dimensionality of data to 2 or 3 dimensions so that it can be plotted easily. Local similarities are preserved by this embedding. t-SNE converts distances between data in the original space to probabilities. First, we compute conditional probabilites $$p_{j|i} = \frac{\exp{(-d(\boldsymbol{x}i, \boldsymbol{x}_j) / (2 \sigma_i^2)})}{\sum{i \neq k} \exp{(-d(\boldsymbol{x}i, \boldsymbol{x}_k) / (2 \sigma_i^2)})}, \quad p{i|i} = 0,$$ which will be used to generate joint probabilities $$p_{ij} = \frac{p_{j|i} + p_{i|j}}{2N}.$$ The $\sigma_i$ will be determined automatically. This procedure can be influenced by setting the perplexity of the algorithm. A heavy-tailed distribution will be used to measure the similarities in the embedded space $$q_{ij} = \frac{(1 + ||\boldsymbol{y}i - \boldsymbol{y}_j)||^2)^{-1}}{\sum{k \neq l} (1 + ||\boldsymbol{y}_k - \boldsymbol{y}_l)||^2)^{-1}},$$ and then we minimize the Kullback-Leibler divergence $$KL(P|Q) = \sum_{i \neq j} p_{ij} \log \frac{p_{ij}}{q_{ij}}$$ between both distributions with gradient descent (and some tricks). Note that the cost function is not convex and multiple runs might yield different results. More information can be found in these resources and in the documentation from t-SNE: Website (Implementations, FAQ, etc.): t-Distributed Stochastic Neighbor Embedding Original paper: Visualizing High-Dimensional Data Using t-SNE End of explanation """ from sklearn.datasets import load_iris from sklearn.decomposition import PCA iris = load_iris() X_tsne = TSNE(learning_rate=100).fit_transform(iris.data) X_pca = PCA().fit_transform(iris.data) """ Explanation: A simple example: the Iris dataset End of explanation """ figure(figsize=(10, 5)) subplot(121) scatter(X_tsne[:, 0], X_tsne[:, 1], c=iris.target) subplot(122) scatter(X_pca[:, 0], X_pca[:, 1], c=iris.target) """ Explanation: t-SNE can help us to decide whether classes are separable in some linear or nonlinear representation. Here we can see that the 3 classes of the Iris dataset can be separated quite easily. They can even be separated linearly which we can conclude from the low-dimensional embedding of the PCA. End of explanation """ from sklearn.datasets import fetch_20newsgroups from sklearn.feature_extraction.text import TfidfVectorizer categories = ['alt.atheism', 'talk.religion.misc', 'comp.graphics', 'sci.space'] newsgroups = fetch_20newsgroups(subset="train", categories=categories) vectors = TfidfVectorizer().fit_transform(newsgroups.data) print(repr(vectors)) """ Explanation: High-dimensional sparse data: the 20 newsgroups dataset In high-dimensional and nonlinear domains, PCA is not applicable any more and many other manifold learning algorithms do not yield good visualizations either because they try to preserve the global data structure. End of explanation """ from sklearn.decomposition import TruncatedSVD X_reduced = TruncatedSVD(n_components=50, random_state=0).fit_transform(vectors) X_embedded = TSNE(n_components=2, perplexity=40, verbose=2).fit_transform(X_reduced) fig = figure(figsize=(10, 10)) ax = axes(frameon=False) setp(ax, xticks=(), yticks=()) subplots_adjust(left=0.0, bottom=0.0, right=1.0, top=0.9, wspace=0.0, hspace=0.0) scatter(X_embedded[:, 0], X_embedded[:, 1], c=newsgroups.target, marker="x") """ Explanation: For high-dimensional sparse data it is helpful to first reduce the dimensions to 50 dimensions with TruncatedSVD and then perform t-SNE. This will usually improve the visualization. End of explanation """ from sklearn.datasets import fetch_mldata # Load MNIST dataset mnist = fetch_mldata("MNIST original") X, y = mnist.data / 255.0, mnist.target # Create subset and reduce to first 50 dimensions indices = arange(X.shape[0]) random.shuffle(indices) n_train_samples = 5000 X_pca = PCA(n_components=50).fit_transform(X) X_train = X_pca[indices[:n_train_samples]] y_train = y[indices[:n_train_samples]] # Plotting function matplotlib.rc('font', **{'family' : 'sans-serif', 'weight' : 'bold', 'size' : 18}) matplotlib.rc('text', **{'usetex' : True}) def plot_mnist(X, y, X_embedded, name, min_dist=10.0): fig = figure(figsize=(10, 10)) ax = axes(frameon=False) title("\\textbf{MNIST dataset} -- Two-dimensional " "embedding of 70,000 handwritten digits with %s" % name) setp(ax, xticks=(), yticks=()) subplots_adjust(left=0.0, bottom=0.0, right=1.0, top=0.9, wspace=0.0, hspace=0.0) scatter(X_embedded[:, 0], X_embedded[:, 1], c=y, marker="x") if min_dist is not None: from matplotlib import offsetbox shown_images = np.array([[15., 15.]]) indices = arange(X_embedded.shape[0]) random.shuffle(indices) for i in indices[:5000]: dist = np.sum((X_embedded[i] - shown_images) ** 2, 1) if np.min(dist) < min_dist: continue shown_images = np.r_[shown_images, [X_embedded[i]]] imagebox = offsetbox.AnnotationBbox( offsetbox.OffsetImage(X[i].reshape(28, 28), cmap=cm.gray_r), X_embedded[i]) ax.add_artist(imagebox) X_train_embedded = TSNE(n_components=2, perplexity=40, verbose=2).fit_transform(X_train) plot_mnist(X[indices[:n_train_samples]], y_train, X_train_embedded, "t-SNE", min_dist=20.0) """ Explanation: MNIST dataset End of explanation """
chetan51/nupic.research
projects/dynamic_sparse/notebooks/ExperimentAnalysis-Neurips-debug-hebbianGrowth.ipynb
gpl-3.0
%load_ext autoreload %autoreload 2 import sys sys.path.append("../../") from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import glob import tabulate import pprint import click import numpy as np import pandas as pd from ray.tune.commands import * from dynamic_sparse.common.browser import * """ Explanation: Experiment: Evaluate hebbian growth Motivation: Control all other variables and evaluate hebbian growth alone Conclusions: Lower accuracy, about 0.2%, but it converges faster (18 vs 24) compared to random growth. Results are consistent with expectation: it accelerates learning, but focusing early on few specific units might prevent the neural network from finding a different set of connections that can lead to higher performance End of explanation """ exps = ['neurips_debug_test13', ] paths = [os.path.expanduser("~/nta/results/{}".format(e)) for e in exps] df = load_many(paths) df.head(5) # replace hebbian prine df['hebbian_prune_perc'] = df['hebbian_prune_perc'].replace(np.nan, 0.0, regex=True) df['weight_prune_perc'] = df['weight_prune_perc'].replace(np.nan, 0.0, regex=True) df.columns df.shape df.iloc[1] df.groupby('model')['model'].count() """ Explanation: Load and check data End of explanation """ # Did any trials failed? df[df["epochs"]<30]["epochs"].count() # Removing failed or incomplete trials df_origin = df.copy() df = df_origin[df_origin["epochs"]>=30] df.shape # which ones failed? # failed, or still ongoing? df_origin['failed'] = df_origin["epochs"]<30 df_origin[df_origin['failed']]['epochs'] # helper functions def mean_and_std(s): return "{:.3f} ± {:.3f}".format(s.mean(), s.std()) def round_mean(s): return "{:.0f}".format(round(s.mean())) stats = ['min', 'max', 'mean', 'std'] def agg(columns, filter=None, round=3): if filter is None: return (df.groupby(columns) .agg({'val_acc_max_epoch': round_mean, 'val_acc_max': stats, 'val_acc_last': stats, 'model': ['count']})).round(round) else: return (df[filter].groupby(columns) .agg({'val_acc_max_epoch': round_mean, 'val_acc_max': stats, 'val_acc_last': stats, 'model': ['count']})).round(round) """ Explanation: ## Analysis Experiment Details End of explanation """ agg(['hebbian_grow']) """ Explanation: What are optimal levels of hebbian and weight pruning End of explanation """
mathLab/RBniCS
tutorials/12_stokes/tutorial_stokes_2_pod.ipynb
lgpl-3.0
from dolfin import * from rbnics import * from sampling import LinearlyDependentUniformDistribution """ Explanation: TUTORIAL 12 - Stokes Equations Keywords: geometrical parametrization, POD-Galerkin method, mixed formulation, inf sup condition 1. Introduction This tutorial addresses geometrical parametrization and the POD-Galerkin method applied to the steady Stokes equations in a domain $\Omega_o \subset \mathbb{R}^2$ divided into 4 parts with boundary $\Gamma_o$ shown below: <img src="data/t_bypass.png" width="50%"/> The problem is characterized by six parameters. We introduce a vector of parameters $\boldsymbol{\mu} = {t,D,L,S,H,\theta }$ that control the shape of the subdomains. The ranges of the six parameters are the following: The parameter vector $\boldsymbol{\mu}$ is thus given by $$\boldsymbol{\mu}=(\mu_0,\mu_1,\mu_2,\mu_3,\mu_4,\mu_5)$$ which corresponds to $\boldsymbol{\mu} = {t,D,L,S,H,\theta }$, respectively, on the parameter domain $$\mathbb{P}=[0.5,1.5]\times[0.5,1.5]\times[0.5,1.5]\times[0.5,1.5]\times[0.5,1.5]\times[0,\pi/6]$$ In this program, we apply the following conditions on the boundaries: * Zero velocity on the left boundary $\Gamma_{o,w}$ * Constant inflow on the right boundary $\Gamma_{o,in}$ * Stress free Neumann condition on the bottom boundary $\Gamma_{o,out}$ In order to obtain a faster approximation of the problem we pursue a model reduction by means of a POD-Galerkin reduced order method from a fixed reference domain. 2. Parametrized formulation Let $\boldsymbol{u_o}(\boldsymbol{\mu})$ be the velocity vector and $p_o(\boldsymbol{\mu})$ be the pressure in the domain $\Omega_o(\boldsymbol{\mu})$. We will directly provide a weak formulation for this problem: for a given parameter $\boldsymbol{\mu} \in\mathbb{P}$, find $\boldsymbol{u_o}(\boldsymbol{\mu}) \in\mathbb{V_o}(\boldsymbol{\mu})$, $p_o \in\mathbb{M_o}$ such that <center> $ \begin{cases} \nu \int_{\Omega_o} \nabla \boldsymbol{u_o} : \nabla \boldsymbol{v_o} \ d\Omega - \int_{\Omega_o} p_o \nabla \cdot \boldsymbol{v_o} \ d\Omega = \int_{\Omega_o} \boldsymbol{f_o} \cdot \boldsymbol{v_o} \ d\Omega, \quad \forall \boldsymbol{v_o} \in\mathbb{V_o}, \ \int_{\Omega_o} q_o \nabla \cdot \boldsymbol{u_o} \ d\Omega = 0, \quad \forall q_o \in\mathbb{M_o} \end{cases} $ </center> where $\nu$ represents kinematic viscosity the functional space $\mathbb{V_o}(\boldsymbol{\mu})$ is defined as $$\mathbb{V_o}(\boldsymbol{\mu}) = [H_{\Gamma_{o,w}}^{1}(\Omega_o)]^2$$ the functional space $\mathbb{M_o}(\boldsymbol{\mu})$ is defined as $$\mathbb{M_o}(\boldsymbol{\mu}) = L^2(\Omega_o)$$ Note that the functional spaces are parameter dependent due to the shape variation Since this problem utilizes mixed finite element discretization with the velocity and pressure as solution variables, the inf-sup condition is necessary for the well posedness of this problem. Thus, the supremizer operator $T^{\mu}: \mathbb{M_o}_h \rightarrow \mathbb{V_o}_h$ will be used. End of explanation """ @PullBackFormsToReferenceDomain() @AffineShapeParametrization("data/t_bypass_vertices_mapping.vmp") class Stokes(StokesProblem): # Default initialization of members def __init__(self, V, **kwargs): # Call the standard initialization StokesProblem.__init__(self, V, **kwargs) # ... and also store FEniCS data structures for assembly assert "subdomains" in kwargs assert "boundaries" in kwargs self.subdomains, self.boundaries = kwargs["subdomains"], kwargs["boundaries"] up = TrialFunction(V) (self.u, self.p) = split(up) vq = TestFunction(V) (self.v, self.q) = split(vq) self.dx = Measure("dx")(subdomain_data=self.subdomains) self.ds = Measure("ds")(subdomain_data=self.boundaries) # ... as well as forcing terms and inlet velocity self.inlet = Expression(("- 1./0.25*(x[1] - 1)*(2 - x[1])", "0."), degree=2) self.f = Constant((0.0, 0.0)) self.g = Constant(0.0) # Return custom problem name def name(self): return "Stokes2POD" # Return theta multiplicative terms of the affine expansion of the problem. @compute_theta_for_supremizers def compute_theta(self, term): if term == "a": theta_a0 = 1.0 return (theta_a0, ) elif term in ("b", "bt"): theta_b0 = 1.0 return (theta_b0, ) elif term == "f": theta_f0 = 1.0 return (theta_f0, ) elif term == "g": theta_g0 = 1.0 return (theta_g0, ) elif term == "dirichlet_bc_u": theta_bc0 = 1. return (theta_bc0, ) else: raise ValueError("Invalid term for compute_theta().") # Return forms resulting from the discretization of the affine expansion of the problem operators. @assemble_operator_for_supremizers def assemble_operator(self, term): dx = self.dx if term == "a": u = self.u v = self.v a0 = inner(grad(u), grad(v)) * dx return (a0, ) elif term == "b": u = self.u q = self.q b0 = - q * div(u) * dx return (b0, ) elif term == "bt": p = self.p v = self.v bt0 = - p * div(v) * dx return (bt0, ) elif term == "f": v = self.v f0 = inner(self.f, v) * dx return (f0, ) elif term == "g": q = self.q g0 = self.g * q * dx return (g0, ) elif term == "dirichlet_bc_u": bc0 = [DirichletBC(self.V.sub(0), self.inlet, self.boundaries, 1), DirichletBC(self.V.sub(0), Constant((0.0, 0.0)), self.boundaries, 3)] return (bc0, ) elif term == "inner_product_u": u = self.u v = self.v x0 = inner(grad(u), grad(v)) * dx return (x0, ) elif term == "inner_product_p": p = self.p q = self.q x0 = inner(p, q) * dx return (x0, ) else: raise ValueError("Invalid term for assemble_operator().") """ Explanation: 3. Affine decomposition In order to obtain an affine decomposition, we recast the problem on a fixed, parameter independent, reference domain $\Omega$. We choose one characterized by $\mu_0=\mu_1=\mu_2=\mu_3=\mu_4=1$ and $\mu_5=0$, which we generate through the generate_mesh notebook provided in the data folder. End of explanation """ mesh = Mesh("data/t_bypass.xml") subdomains = MeshFunction("size_t", mesh, "data/t_bypass_physical_region.xml") boundaries = MeshFunction("size_t", mesh, "data/t_bypass_facet_region.xml") """ Explanation: 4. Main program 4.1. Read the mesh for this problem The mesh was generated by the data/generate_mesh.ipynb notebook. End of explanation """ element_u = VectorElement("Lagrange", mesh.ufl_cell(), 2) element_p = FiniteElement("Lagrange", mesh.ufl_cell(), 1) element = MixedElement(element_u, element_p) V = FunctionSpace(mesh, element, components=[["u", "s"], "p"]) """ Explanation: 4.2. Create Finite Element space (Taylor-Hood P2-P1) End of explanation """ problem = Stokes(V, subdomains=subdomains, boundaries=boundaries) mu_range = [ (0.5, 1.5), (0.5, 1.5), (0.5, 1.5), (0.5, 1.5), (0.5, 1.5), (0., pi / 6.) ] problem.set_mu_range(mu_range) """ Explanation: 4.3. Allocate an object of the Stokes class End of explanation """ reduction_method = PODGalerkin(problem) reduction_method.set_Nmax(25) reduction_method.set_tolerance(1e-6) """ Explanation: 4.4. Prepare reduction with a POD-Galerkin method End of explanation """ lifting_mu = (1.0, 1.0, 1.0, 1.0, 1.0, 0.0) problem.set_mu(lifting_mu) reduction_method.initialize_training_set(100, sampling=LinearlyDependentUniformDistribution()) reduced_problem = reduction_method.offline() """ Explanation: 4.5. Perform the offline phase End of explanation """ online_mu = (1.0, 1.0, 1.0, 1.0, 1.0, pi / 6.) reduced_problem.set_mu(online_mu) reduced_solution = reduced_problem.solve() plot(reduced_solution, reduced_problem=reduced_problem, component="u") plot(reduced_solution, reduced_problem=reduced_problem, component="p") """ Explanation: 4.6. Perform an online solve End of explanation """ reduction_method.initialize_testing_set(100, sampling=LinearlyDependentUniformDistribution()) reduction_method.error_analysis() """ Explanation: 4.7. Perform an error analysis End of explanation """ reduction_method.speedup_analysis() """ Explanation: 4.8. Perform a speedup analysis End of explanation """
irockafe/revo_healthcare
notebooks/HMDB/hmdb_isomers.ipynb
mit
# namespace - at the top of file. fucks with every tag. # very annoying, so name all tags ns + tag ns = '{http://www.hmdb.ca}' nsmap = {None : ns} # If you're within a metabolite tag count = 0 seen_mass = 0 d = {} for event, element in etree.iterparse(xml_file, tag=ns+'metabolite'): tree = etree.ElementTree(element) # Aggregate info into a dictionary of # {HMDB_ID: iso_mass} accession = [] # Get accession number and masses for each metabolite # Could be multiple accessions. Grab all of them, # sort to make unique identifier for elem in tree.iter(): if elem.tag == ns+'accession': accession.append(elem.text) # If you just saw a 'mono_mass' entry, # get the mass value and reset, saying you # havent seen 'mono_mass' in the text of next metabolite if (elem.tag == ns+'value') & (seen_mass == 1): mass = float(elem.text) seen_mass = 0 if elem.text == 'mono_mass': seen_mass = 1 elem.clear() # sort accession numbers and join with '_' accession_key = '_'.join(sorted(accession)) # add to dictionary if mass: d[accession_key] = mass # reset mass - only add feature if mass listed mass = None # reset accession numbers accession = [] element.clear() count += 1 if count % 1000 == 0: print('Made it through ' + str(count) + ' metabolites') #pickle.dump(d, open('serumdb_dict.p', 'wb')) print 'Number of metabolites: %s' % len(d.keys()) # write to file pickle.dump(d, open(pickle_path, 'wb')) hmdb_dict = pickle.load(open(pickle_path, 'rb')) # masses are entries of dict, yes? hmdb_masses = pd.Series(hmdb_dict, dtype='float32') """ Explanation: TODO: make all this so that it checks if these files are present - Claire sent an email about a fxn that helps do this in the past End of explanation """ def plot_mz_hist(series, ppm): worst_mz_bin = (ppm * series.max() * 10**-6) #print 'Worst bin size: %s for %s mass' % (worst_mz_bin, # series.max()) median_mz_bin = (ppm*series.median() * 10**-6) #print 'Median bin size: %s for %s mass' % (median_mz_bin, # series.median()) median_bins = np.arange(series.min(), series.max(), median_mz_bin) worst_bins = np.arange(series.min(), series.max(), worst_mz_bin) print 'median bins:', median_bins.shape print 'worst bins:', worst_bins.shape #sns.distplot(series, kde=False) #plt.show() #plt.hist(series, bins=median_mz_bin) sns.distplot(series, kde=False, norm_hist=False, bins=worst_bins) plt.ylim([0,10]) plt.title('mz overlaps at 30ppm, worst possible binsize (deltaX at highest m/z)') plt.show() # norm_hist=False, kde=False) #plt.show() return plt ppm = 30 import copy my_plot = plot_mz_hist(hmdb_masses, 30) 30*hmdb_masses.max() * 10**-6 hmdb_masses[0:5] ppm_matrix = combine_mz.ppm_matrix(hmdb_masses, hmdb_masses) # write to file np_path = local+path+'/hmdb_serumdb_20170813_ppm_matrix.npy' np.save(np_path, ppm_matrix) # reload it ppm_matrix = np.load(np_path) # Convert to upper triangular matrix idx_ppm = np.tril_indices(ppm_matrix.shape[0]) ppm_matrix[idx_ppm] = np.nan # get indices whose ppm falls below cutoff # Ignore runtime warning - means we're ignoring NaN values isomer_indices = np.argwhere(ppm_matrix < ppm) isomer_indices.shape print isomer_indices[0:10] # write isomer indices to file np.save(local+path+'/hmdb_serumdb_20170813_isomer_indices_%s_ppm.npy' % ppm, isomer_indices) isomer_indices = np.load(local+path+'/hmdb_serumdb_20170813_isomer_indices_%s_ppm.npy' % ppm) # TODO - fix this - it takes too long # 7 seconds for 25,000 molecules ends up being # 48 hours of run-time def isomers_from_ppm_matrix(ppm_matrix, ppm): ''' Only tested on square matrices for now INPUT - numpy array of ppm values OUTPUT - list of arrays - position in list is same as row in matrix, values in each list-entry are indices along column of array ''' bool_idx = ppm_matrix < ppm # Get indices where you have an isomer # for each row iso_indices = [np.argwhere(x) for x in bool_idx] return iso_indices toy_ppm = np.array([ [0, 20, 15, 50], [100, 0, 90, 10 ], [15, 90, 0, 10]], ) # not additive ppms print 'Input:\n', toy_ppm isomers_from_ppm_matrix(toy_ppm, 30) ppm = 30 isomers = isomers_from_ppm_matrix(ppm_matrix, ppm) np.save(local+path+'isomer_index_per_feature.npy', isomers) num_isomers = [len(x) for x in isomers] sns.distplot(num_isomers,) plt.title('Overlapping features at 30ppm') plt.xlabel('Number of isomers') plt.show() plt.hist(num_isomers, bins=1000) plt.xlabel('Number of isomers') plt.title('Overlapping features at 30ppm') plt.show() single_isomers = sum([i <= 1 for i in num_isomers]) print ("Number of metabolites from HMDB with no " + 'isomers: {num} out of {hmdb}: {per:.2f} percent'.format( num=single_isomers, hmdb=ppm_matrix.shape[0], per = float(single_isomers) / ppm_matrix.shape[0]*100)) """ Explanation: Plot histogram of metabolites at m/z give bin size of x ppm End of explanation """ ppm = 3 isomers = isomers_from_ppm_matrix(ppm_matrix, ppm) np.save(local+path+'isomer_index_per_feature_%s_ppm.npy' % ppm, isomers) num_isomers = [len(x) for x in isomers] sns.distplot(num_isomers,) plt.title('Overlapping features at %sppm' % ppm) plt.xlabel('Number of isomers') plt.show() plt.hist(num_isomers, bins=1000) plt.xlabel('Number of isomers') plt.title('Overlapping features at %sppm' % ppm) plt.show() ppm = 1 isomers = isomers_from_ppm_matrix(ppm_matrix, ppm) np.save(local+path+'isomer_index_per_feature_%s_ppm.npy' % ppm, isomers) num_isomers = [len(x) for x in isomers] sns.distplot(num_isomers,) plt.title('Overlapping features at %sppm' % ppm) plt.xlabel('Number of isomers') plt.show() plt.hist(num_isomers, bins=1000) plt.xlabel('Number of isomers') plt.title('Overlapping features at %sppm' % ppm) plt.show() """ Explanation: Repeat at 5ppm End of explanation """ # First make isomer indices as before # Use 30, because it's the worst ppms = [30,3] isomers = isomers_from_ppm_matrix(ppm_matrix, ppm) # get the number of isomers at that position num_isomers = [len(x) for x in isomers] print isomers[0:5] print num_isomers[0:5] # Next get y'values (mz) corresponding to those indices plt.scatter(x=hmdb_masses, y=num_isomers, s=1) plt.ylim([0,400]) plt.show() """ Explanation: Let's try to plot histogram # of isomers vs. m/z End of explanation """
petersaints/YanuX-Cruncher
YanuXCalculatorUserStudy.ipynb
gpl-3.0
import numpy as np from scipy import stats import statsmodels.stats.proportion as smp import pandas as pd import matplotlib.pyplot as plt """ Explanation: Yanux Calculator Imports End of explanation """ def print_stats(data, hist_bins=10, hist_size=(8,4)): print('--- Statistics ----') display(data.describe()) print('\n') print('--- Counting Unique Values ----') display(data.value_counts()) print('\n') print('--- Basic Histogram ----') data.hist(bins=hist_bins, figsize=hist_size) plt.show() def calculate_sus(data): for i in range(len(data.columns)): if i % 2: data.iloc[:,i] = 7 - data.iloc[:,i] else: data.iloc[:,i] = data.iloc[:,i] - 1 data['Score'] = data.iloc[:,0:10].sum(axis=1) data['Score 100'] = data['Score'] * (100/60) return data def confidence_interval_t(data, confidence_level=0.95): return stats.t.interval(confidence_level, data.count()-1, data.mean(), data.sem()) """ Explanation: Helper Functions End of explanation """ data = pd.read_excel('data/userstudies/yanux-calculator-questionnaire-responses.xlsx', sheet_name=None) data_basic = data['Basic'] #display(data_basic) """ Explanation: Load Excel End of explanation """ data_basic_age = data_basic['Age'] print_stats(data_basic_age, hist_bins=20) print('Population Standard Deviation:') print(data_basic_age.std(ddof=0)) """ Explanation: Age End of explanation """ data_basic_genre = data_basic['Genre'] print_stats(data_basic_genre) """ Explanation: Genre End of explanation """ data_basic_education = data_basic['Education'] print_stats(data_basic_education) """ Explanation: Education End of explanation """ data_basic_type_of_devices = data_basic['Type of Devices'].map(lambda x: [i.strip() for i in x.split(",")]) print('--- Statistics ----') display(data_basic_type_of_devices.describe()) #print('--- Basic Histogram ----') #data_basic_type_of_devices.hist() data_number_of_devices = data_basic_type_of_devices.apply(len) print_stats(data_number_of_devices) data_basic_multiple_devices = data_basic['Number of Devices'] print_stats(data_basic_multiple_devices) print('Population Standard Deviation:') print(data_basic_multiple_devices.std(ddof=0)) """ Explanation: Type of Devices End of explanation """ data_sus = calculate_sus(data['SUS'].copy().dropna()) data_sus """ Explanation: SUS Questions I think that I would like to use this system frequently. I found the system unnecessarily complex. I thought the system was easy to use. I think that I would need the support of a technical person to be able to use this system. I found the various functions in this system were well integrated. I thought there was too much inconsistency in this system. I would imagine that most people would learn to use this system very quickly. I found the system very cumbersome to use. I felt very confident using the system. I needed to learn a lot of things before I could get going with this system. End of explanation """ print(data_sus['Score 100'].mean()) """ Explanation: Mean Score 100 End of explanation """ data_sus.describe() #print_stats(data_sus) """ Explanation: General Stats End of explanation """ confidence_interval_t(data_sus['Score 100']) """ Explanation: Confidence Interval End of explanation """ ##Uncomment if you need these stats #for column in data_sus: #print_stats(data_sus[column]) """ Explanation: Per-question Stats End of explanation """ data_domain_specific = data['Domain Specific'] #data_domain_specific print_stats(data_domain_specific) """ Explanation: Domain Specific Questions End of explanation """ data_domain_specific_freq_res = (data_domain_specific.apply(pd.value_counts).fillna(0)/data_domain_specific.count()*100).transpose() data_domain_specific_freq_res data_domain_specific_freq_res.iloc[::-1].plot.barh(stacked=True).legend(loc='center left',bbox_to_anchor=(1.0, 0.5)) plt.show() for question in data_domain_specific: print(question, 'Median:', data_domain_specific[question].median(), 'Mean:', data_domain_specific[question].mean(), 'Standard Deviation:', data_domain_specific[question].std(), 'Confidence Interval:', confidence_interval_t(data_domain_specific[question])) """ Explanation: Response Frequency End of explanation """ data_basic_usage_multiple_devices = data_domain_specific['DS1'] (data_basic_usage_multiple_devices[data_basic_usage_multiple_devices >= 5].count()/data_basic_usage_multiple_devices.count())*100 """ Explanation: Percentage of Users that User Multiple Devices Sometimes or More Often End of explanation """ res = np.array([7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,6,6,6,6,6,6]) res.size np.median(res) res.mean() res.std() """ Explanation: Auxiliary Calculations for JuxtBoard End of explanation """
ZwickyTransientFacility/ztf_sim
notebooks/plot_simulator_inputs.ipynb
bsd-3-clause
# hack to get the path right import sys sys.path.append('..') import ztf_sim from astropy.time import Time import pandas as pd import numpy as np import astropy.units as u import pylab as plt import seaborn as sns %matplotlib inline sns.set_style('ticks') sns.set_context('talk') """ Explanation: plot_simulator_inputs This notebook gives visualizations of some of the inputs to the simulator. End of explanation """ df = ztf_sim.utils.df_read_from_sqlite('weather_blocks') df.head() plt.hist(df['nexps']) plt.xlabel('Number of exposures in block') plt.ylabel('Number of blocks') sns.despine() """ Explanation: weather We have binned PTF observing into 20 minute intervals to use as a proxy for weather, daylight, and other downtime. End of explanation """ 12*100*u.second.to(u.min) blocks_per_day = np.round((1.*u.day).to(u.min)/ztf_sim.constants.TIME_BLOCK_SIZE).astype(np.int) fig = plt.figure(figsize=(18,24)) # cut off 2017, which is partial df = df[df['year'] < 2017] years = np.sort(list(set(df['year']))) nyears = len(years) for i, year in enumerate(years): ax = plt.subplot(nyears,1,i+1) w = df['year'] == year # make an array to hold these, including zeros for times without observations # buggy but sufficient accounting for leap years ndays = 365 if year % 4 == 0: ndays += 1 nexps = np.zeros([ndays,blocks_per_day]) for block, n in zip(df[w]['block'],df[w]['nexps']): nexps.flat[block] = n sns.heatmap(nexps.T,xticklabels=15,yticklabels=False,vmin=0,vmax=12) ax.set_ylim([25,70]) # this was guess and check... ax.set_ylabel(np.int(year)) ax.set_xlabel('Day of Year') plt.savefig('fig/PTF_exposure_blocks.png',bbox_inches='tight') """ Explanation: We see a peak near 12 images per block, which at ~100 seconds average time per exposure matches our 20 minute block size: End of explanation """ df = pd.read_table('../data/mjd.txt.gz', sep='|', names=['expMJD'], skipfooter=1) # let's just look at complete iPTF years df = df[(df['expMJD'] >= Time('2013-01-01').mjd) & (df['expMJD'] < Time('2016-01-01').mjd)] t = Time(df['expMJD'], format='mjd', location=ztf_sim.utils.P48_loc) df['datetime'] = t.datetime df = df.set_index('datetime') df['month'] = df.index.month #df['day'] = df.index.day df['night'] = np.floor(t.mjd).astype(np.int) #df['night'] = t.iso #df['night'] = df['night'].apply(lambda x: x.split(' ')[0]) #df['year'] = np.floor(t.decimalyear).astype(np.int) # these are slow, but faster than me figuring out how to speed them up #df['month'] = [ti.datetime.month for ti in t] #df['day'] = [ti.datetime.day for ti in t] """ Explanation: Let's look at the raw image times (all filters, including H-alpha) to understand the available observing time. End of explanation """ grp = df.groupby(['night']) nexps = grp['expMJD'].agg(len) nexps.name = 'nexps' # make a month dataframe dfm = grp.agg(lambda x:x) df = df.join(nexps,on='night') """ Explanation: Now count exposures by night End of explanation """ df['obstime'] = df['nexps'] * 100./3600. df.head() """ Explanation: Convert to observing time in hours using our average time between exposures. This means our observing time estimate is conservative, because it excludes long slews. End of explanation """ grp = df.groupby('month') # now fill missing dates with zeros--nights with no exposures due to weather or # other downtime # (df.asfreq('D') doesn't let me set the fill value) df = df.reindex(pandas.date_range(df.index.min(),df.index.max()),fill_value=0) pd.date_range('2013-01-01','2015-12-31') """ Explanation: Aggregate by month: End of explanation """
PyladiesMx/Empezando-con-Python
4. Lops/.ipynb_checkpoints/For Loops-checkpoint.ipynb
mit
#Obtén el cuadrado de 1 #Obtén el cuadrado de 2 #Obtén el cuadrado de 3 #Obtén el cuadrado de 4 #Obtén el cuadrado de 5 #Obtén el cuadrado de 6 #Obtén el cuadrado de 7 #Obtén el cuadrado de 8 #Obtén el cuadrado de 9 #Obtén el cuadrado de 10 """ Explanation: Bienvenida a otra reunión de pyladies!! Yo sé que después de las vacaciones lo que ya habías aprendido en python tal vez no esté tan fresco. Así que vamos a enumerar (y explicar) brevemente lo que hemos estado viendo en python. Operaciones básicas o cómo usar python como calculadora. Python se puede usar básicamente como cualquier calculadora operando directamente sobre objetos como números enteros (integers) o decimales (floats) y series de caracteres (strings) Asignación de variables.Si quieres guardar los resultados de operaciones, floats, integers, strings en la memoria de python lo que tenemos que hacer es asignarlos a unas variables. Para hacer esto tienes que inventar un nombre (que empiece con letras del alfabeto) poner un signo igual y después de este el valor u operación que desees guardar como en el siguiente ejemplo: variable = 5 + 2.5 variable_string = "String" Listas, el álbum coleccionador de python. Si lo que quieres es una colección de elementos en python, una de las estructuras de datos que te permite hacer esto son las listas, para estas tienes que poner entre corchetes los elementos que quieras guardar (todos los tipos de datos incluyendo listas!) separados por comas. Ejemplo: lista = [variable, 5, 2.5, "Hola"] Control de flujo. Decisiones con "if" y "else". En algún punto tendrás que hacer un programa el cual deba seguir dos caminos distintos dependiendo de una condición. Por ejemplo para decidir si usar un paraguas o no un programa puede ser: Si llueve entonces uso un paraguas, de lo contrario no se usa. Esto en python se representa de la siguiente forma: if lluvia == True: paraguas = True else: paraguas = False Espero que este repaso te haya ayudado a refrescar tu memoria, pero lo que hoy veremos es un concepto muy útil en la programación y éste es la iteració. Iteraciones en python Las iteraciones son la repetición de una misma secuencia de paso determinado número de veces, esta repetición iteración se va a llevar a cabo hasta que se cumpla una condición. Para hacerlo más claro imagina que tu quieres obtener el cuadrado de todos los número del 1 al 20, lo que tendrías que hacer en python (si no hubiera iteraciones) es escribir la misma operación 20 veces. Como ejercicio obtén los cuadrados manualmente End of explanation """ for numero in range(1,21): cuadrado = numero**2 print(cuadrado) """ Explanation: Yo creo que el punto está entendido... Es tedioso estar escribiendo lo mismo 20 veces. Ahora imagina que no tienes que hacer esto 20 veces, sino 10 000!!! Suena a mucho trabajo no? Sin embargo en python hay varias estrategias para resolverlo. Hoy veremos el for loop (o froot loop como yo le digo jejeje). El for loop es una clase de iteración a la cual tu le vas a dar una lista o colección de objetos para iterar (llamados iterables) y sobre cada elemento va a ejecutar la serie de instrucciones que le diste hasta que se acabe la lista o iterble. Veamos un ejemplo para clarificarlo... Hagamos lo mismo que queríamos hacer en el ejemplo anterior. End of explanation """ lista = [5.9, 3.0, 2, 25.5, 14.2] """ Explanation: Yeiii!!! viste lo que se puede hacer con loops. Ahora te toca a ti. Ejercicio 1 Crea un programa que convierta todos los elementos de la siguiente lista a integers (usando por supuesto el froot loop) End of explanation """ lista_anidada = [['Perro', 'Gato'], ['Joven', 'Viejo'], [1, 2]] """ Explanation: Ejercicio 2 Crea un programa que te de como resultado una nueva lista con los promedios de la lista creada anteriormente. Ejercicio 3 crea un programa que imprima "hola" el número de veces que el usuario escoja. Ejemplo. "Escoge un número del 1 al 100": 3 "hola" "hola" "hola" Loops anidados Algo curioso en python es que puedes generar un loop for, dentro de otro loop. INCEPTION... Veamos un ejemplo End of explanation """ for elemento in lista_anidada: print (elemento) """ Explanation: Observa lo que para cuando le pedimos a python que nos imprima cada elemento de la lista anidada End of explanation """ for elemento in lista_anidada: for objeto in elemento: print(objeto) """ Explanation: Y que pasa si queremos obtener cada elemento de todas las listas End of explanation """
vkuznet/rep
howto/04-howto-folding.ipynb
apache-2.0
%pylab inline """ Explanation: About This notebook demonstrates stacking machine learning algorithm - folding, which physicists use in their analysis. End of explanation """ !cd toy_datasets; wget -O MiniBooNE_PID.txt -nc MiniBooNE_PID.txt https://archive.ics.uci.edu/ml/machine-learning-databases/00199/MiniBooNE_PID.txt import numpy, pandas from rep.utils import train_test_split from sklearn.metrics import roc_auc_score data = pandas.read_csv('toy_datasets/MiniBooNE_PID.txt', sep='\s*', skiprows=[0], header=None, engine='python') labels = pandas.read_csv('toy_datasets/MiniBooNE_PID.txt', sep=' ', nrows=1, header=None) labels = [1] * labels[1].values[0] + [0] * labels[2].values[0] data.columns = ['feature_{}'.format(key) for key in data.columns] train_data, test_data, train_labels, test_labels = train_test_split(data, labels, train_size=0.5) """ Explanation: Loading data download particle identification Data Set from UCI End of explanation """ variables = list(data.columns) """ Explanation: Training variables End of explanation """ from rep.estimators import SklearnClassifier from sklearn.ensemble import GradientBoostingClassifier """ Explanation: Folding strategy - stacking algorithm It implements the same interface as all classifiers, but with some difference: all prediction methods have additional parameter "vote_function" (example folder.predict(X, vote_function=None)), which is used to combine all classifiers' predictions. By default "mean" is used as "vote_function" End of explanation """ from rep.metaml import FoldingClassifier n_folds = 4 folder = FoldingClassifier(GradientBoostingClassifier(), n_folds=n_folds, features=variables) folder.fit(train_data, train_labels) """ Explanation: Define folding model End of explanation """ folder.predict_proba(train_data) """ Explanation: Default prediction (predict i_th_ fold by i_th_ classifier) End of explanation """ # definition of mean function, which combines all predictions def mean_vote(x): return numpy.mean(x, axis=0) folder.predict_proba(test_data, vote_function=mean_vote) """ Explanation: Voting prediction (predict i-fold by all classifiers and take value, which is calculated by vote_function) End of explanation """ from rep.data.storage import LabeledDataStorage from rep.report import ClassificationReport # add folds_column to dataset to use mask train_data["FOLDS"] = folder._get_folds_column(len(train_data)) lds = LabeledDataStorage(train_data, train_labels) report = ClassificationReport({'folding': folder}, lds) """ Explanation: Comparison of folds Again use ClassificationReport class to compare different results. For folding classifier this report uses only default prediction. Report training dataset End of explanation """ for fold_num in range(n_folds): report.prediction_pdf(mask="FOLDS == %d" % fold_num, labels_dict={1: 'sig fold %d' % fold_num}).plot() """ Explanation: Signal distribution for each fold Use mask parameter to plot distribution for the specific fold End of explanation """ for fold_num in range(n_folds): report.prediction_pdf(mask="FOLDS == %d" % fold_num, labels_dict={0: 'bck fold %d' % fold_num}).plot() """ Explanation: Background distribution for each fold End of explanation """ for fold_num in range(n_folds): report.roc(mask="FOLDS == %d" % fold_num).plot() """ Explanation: ROCs (each fold used as test dataset) End of explanation """ lds = LabeledDataStorage(test_data, test_labels) report = ClassificationReport({'folding': folder}, lds) report.prediction_pdf().plot(new_plot=True, figsize = (9, 4)) report.roc().plot(xlim=(0.5, 1)) """ Explanation: Report for test dataset NOTE: Here vote function is None, so default prediction is used End of explanation """
dsacademybr/PythonFundamentos
Cap08/DesafioDSA_Solucao/Missao4/missao4.ipynb
gpl-3.0
class SelectionSort(object): def sort(self, data): # Implemente aqui sua solução """ Explanation: <font color='blue'>Data Science Academy - Python Fundamentos - Capítulo 7</font> Download: http://github.com/dsacademybr Missão 2: Implementar o Algoritmo de Ordenação "Selection sort". Nível de Dificuldade: Alto Premissas As duplicatas são permitidas?      * Sim Podemos assumir que a entrada é válida?      * Não Podemos supor que isso se encaixa na memória?      * Sim Teste Cases None -> Exception [] -> [] One element -> [element] Two or more elements Algoritmo Animação do Wikipedia: Podemos fazer isso de forma recursiva ou iterativa. Iterativamente será mais eficiente, pois não requer sobrecarga de espaço extra com as chamadas recursivas. Para cada elemento      * Verifique cada elemento à direita para encontrar o min      * Se min < elemento atual, swap Solução End of explanation """ %%writefile missao4.py from nose.tools import assert_equal, assert_raises class TestSelectionSort(object): def test_selection_sort(self, func): print('None input') assert_raises(TypeError, func, None) print('Input vazio') assert_equal(func([]), []) print('Um elemento') assert_equal(func([5]), [5]) print('Dois ou mais elementos') data = [5, 1, 7, 2, 6, -3, 5, 7, -10] assert_equal(func(data), sorted(data)) print('Sua solução foi executada com sucesso! Parabéns!') def main(): test = TestSelectionSort() try: selection_sort = SelectionSort() test.test_selection_sort(selection_sort.sort) except NameError: pass if __name__ == '__main__': main() %run -i missao4.py """ Explanation: Teste da Solução End of explanation """
the-deep-learners/nyc-ds-academy
notebooks/point_by_point_intro_to_tensorflow.ipynb
mit
import numpy as np np.random.seed(42) import pandas as pd import matplotlib.pyplot as plt %matplotlib inline import tensorflow as tf tf.set_random_seed(42) """ Explanation: Introduction to TensorFlow, fitting point by point In this notebook, we introduce TensorFlow by fitting a line of the form y=m*x+b point by point. This is a derivation of Jared Ostmeyer's Naked Tensor code. Load dependencies and set seeds for reproducibility End of explanation """ xs = [0., 1., 2., 3., 4., 5., 6., 7.] # feature (independent variable) ys = [-.82, -.94, -.12, .26, .39, .64, 1.02, 1.] # labels (dependent variable) fig, ax = plt.subplots() _ = ax.scatter(xs, ys) """ Explanation: Create a very small data set End of explanation """ m = tf.Variable(-0.5) b = tf.Variable(1.0) """ Explanation: Define variables -- the model parameters we'll learn -- and initialize them with "random" values End of explanation """ total_error = 0.0 for x,y in zip(xs, ys): y_model = m*x + b total_error += (y-y_model)**2 """ Explanation: One single point at a time, define the error between the true label and the model's prediction of the label End of explanation """ optimizer_operation = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(total_error) """ Explanation: Define optimizer as SSE-minimizing gradient descent End of explanation """ initializer_op = tf.global_variables_initializer() """ Explanation: Define an operator that will initialize the graph with all available global variables End of explanation """ with tf.Session() as sess: sess.run(initializer_op) n_epochs = 10 for iteration in range(n_epochs): sess.run(optimizer_operation) slope, intercept = sess.run([m, b]) slope intercept """ Explanation: With the computational graph designed, we initialize a session to execute it End of explanation """ y_hat = slope*np.array(xs) + intercept pd.DataFrame(list(zip(ys, y_hat)), columns=['y', 'y_hat']) fig, ax = plt.subplots() ax.scatter(xs, ys) x_min, x_max = ax.get_xlim() y_min, y_max = intercept, intercept + slope*(x_max-x_min) ax.plot([x_min, x_max], [y_min, y_max]) _ = ax.set_xlim([x_min, x_max]) """ Explanation: Calculate the predicted model outputs given the inputs xs End of explanation """
BinRoot/TensorFlow-Book
ch02_basics/Concept09_queue.ipynb
mit
import tensorflow as tf import numpy as np """ Explanation: Ch 02: Concept 09 Using Queues If you have a lot of training data, you probably don't want to load it all into memory at once. The QueueRunner in TensorFlow is a tool to efficiently employ a queue data-structure in a multi-threaded way. End of explanation """ import multiprocessing NUM_THREADS = multiprocessing.cpu_count() """ Explanation: We will be running multiple threads, so let's figure out the number of CPUs: End of explanation """ xs = np.random.randn(100, 3) ys = np.random.randint(0, 2, size=100) """ Explanation: Generate some fake data to work with: End of explanation """ xs_and_ys = zip(xs, ys) for _ in range(5): x, y = next(xs_and_ys) print('Input {} ---> Output {}'.format(x, y)) """ Explanation: Here's a couple concrete examples of our data: End of explanation """ queue = tf.FIFOQueue(capacity=1000, dtypes=[tf.float32, tf.int32]) """ Explanation: Define a queue: End of explanation """ enqueue_op = queue.enqueue_many([xs, ys]) x_op, y_op = queue.dequeue() """ Explanation: Set up the enqueue and dequeue ops: End of explanation """ qr = tf.train.QueueRunner(queue, [enqueue_op] * 4) """ Explanation: Define a QueueRunner: End of explanation """ sess = tf.InteractiveSession() """ Explanation: Now that all variables and ops have been defined, let's get started with a session: End of explanation """ coord = tf.train.Coordinator() enqueue_threads = qr.create_threads(sess, coord=coord, start=True) """ Explanation: Create threads for the QueueRunner: End of explanation """ for _ in range(100): if coord.should_stop(): break x, y = sess.run([x_op, y_op]) print(x, y) coord.request_stop() coord.join(enqueue_threads) """ Explanation: Test out dequeueing: End of explanation """
statkclee/ThinkStats2
code/chap01soln-kor.ipynb
gpl-3.0
import nsfg df = nsfg.ReadFemPreg() df """ Explanation: 통계적 사고 (2판) 연습문제 (thinkstats2.com, think-stat.xwmooc.org)<br> Allen Downey / 이광춘(xwMOOC) End of explanation """ df.birthord.value_counts().sort_index() """ Explanation: <tt>birthord</tt>에 대한 빈도수를 출력하고 codebook 게시된 결과값과 비교하시오. End of explanation """ df.prglngth.value_counts().sort_index() """ Explanation: <tt>prglngth</tt>에 대한 빈도수를 출력하고 codebook 게시된 결과값과 비교하시오. End of explanation """ df.agepreg.value_counts().sort_index() """ Explanation: <tt>agepreg</tt>에 대한 빈도수를 출력하고 codebook에 게시된 결과값과 비교하시오. 이 데이터를 살펴보고, 응답자에 대한 존경과 맥락을 고려해서 데이터에 접근하는에 필요한 의무에 관해서 저자가 언급한 논평을 기억하라. End of explanation """ df.totalwgt_lb.mean() """ Explanation: 평균 출생체중(birthweight)을 계산하시오. End of explanation """ df['totalwgt_kg'] = df.totalwgt_lb / 2.2 df.totalwgt_kg.mean() """ Explanation: 킬로그램으로 출생체중 정보를 담는 <tt>totalwgt_kg</tt>로 불리는 새로운 칼럼을 생성하라. 평균도 계산하시오. 새로운 칼럼을 생성할 때, 점표기법이 아닌 딕셔너리 구문을 사용하는 것을 기억하라. End of explanation """ df.columns """ Explanation: 코드북(codebook)을 살펴보고 책에서 언급된 것이 아닌 본인이 관심있는 변수를 찾아내라. 그리고 그 변수의 빈도수, 평균, 다른 통계량을 계산하시오. End of explanation """ print('Count:', df.npostsmk.value_counts().sort_index()) ## 임신기간동안 흡연 print('Mean:', df.npostsmk.mean()) """ Explanation: 부울 시리즈(boolean Series)를 생성하시오. End of explanation """ live = df[df.outcome == 1] len(live) """ Explanation: 부울 시리즈를 사용해서 정상출산 임신에 대한 레코드를 선택하시오. End of explanation """ len(live[(live.birthwgt_lb >= 0) & (live.birthwgt_lb <= 5)]) """ Explanation: <tt>birthwgt_lb</tt> 변수에 0 에서 5 파운드(0과 5도 모두 포함) 사이 정상출산 빈도수를 계산하시오. 결과는 1125 가 되어야만 된다. End of explanation """ len(live[(live.birthwgt_lb >=9)&(live.birthwgt_lb <=95)]) """ Explanation: <tt>birthwgt_lb</tt> 변수에 9 에서 95 파운드(9과 95도 모두 포함) 사이 정상출산 빈도수를 계산하시오. 결과는 798 가 되어야만 된다. End of explanation """ firsts = df[df.birthord==1] others = df[df.birthord>1] len(firsts), len(others) """ Explanation: <tt>birthord</tt> 변수를 사용해서, 첫번째 아이와 첫째가 아닌 아이에 대한 레코드를 선택하시오. 첫번째 아이와 첫째가 아닌 아이는 얼마나 되는가? End of explanation """ firsts.totalwgt_lb.mean() others.totalwgt_lb.mean() """ Explanation: 첫번째 아이와 첫째가 아닌 아이에 대한 평균 체중을 계산하시오. End of explanation """ print('Firsts Mean: ', firsts.prglngth.mean()) print('Others Mean: ', others.prglngth.mean()) print('Diff: ', firsts.prglngth.mean()-others.prglngth.mean()) """ Explanation: 변수 <tt>prglngth</tt>으로 첫째 아이와 첫째가 아닌 아이에 대한 평균임신기간을 계산하시오. 시간으로 표시된, 평균에 차이를 계산하시오. End of explanation """
pablovicente/python-tutorials
regular_expressions.ipynb
mit
import re """ Explanation: Regular Expressions End of explanation """ # re.match(pattern, string, flags=0) line = "Cats are smarter than dogs" matchObj = re.match( r'(.*) are (.*?) .*', line, re.M|re.I) if matchObj: print "matchObj.group() : ", matchObj.group() #or matchObj.group(0) print "matchObj.group(1) : ", matchObj.group(1) print "matchObj.group(2) : ", matchObj.group(2) else: print "No match!!" """ Explanation: Match This function attempts to match RE pattern to string with optional flags. End of explanation """ # re.search(pattern, string, flags=0) line = "Cats are smarter than dogs" searchObj = re.search( r'(.*) are (.*?) .*', line, re.M|re.I) if searchObj: print "searchObj.group() : ", searchObj.group() #or matchObj.group(0) print "searchObj.group(1) : ", searchObj.group(1) print "searchObj.group(2) : ", searchObj.group(2) else: print "No match!!" """ Explanation: Search This function attempts to match RE pattern to string with optional flags. End of explanation """ # findall(pattern, string, flags=0) line = "Cats are smarter than dogs" re.findall( r'(.*) are (.*?) .*', line, re.M|re.I) """ Explanation: Match vs Search Python offers two different primitive operations based on regular expressions: match checks for a <br> match only at the beginning of the string, while search checks for a match anywhere in the string. <br> Findall Two pattern methods return all of the matches for a pattern. findall() returns a list of matching strings End of explanation """ # re.split(pattern, string, maxsplit=0, flags=0) print re.split('\W+', 'Words, words, words.') print re.split('(\W+)', 'Words, words, words.') print re.split('\W+', 'Words, words, words.', 1) print re.split('[a-f]+', '0a3B9', flags=re.IGNORECASE) """ Explanation: Split Split string by the occurrences of pattern. If capturing parentheses are used in pattern, <br> then the text of all groups in the pattern are also returned as part of the resulting list. End of explanation """ # re.sub(pattern, repl, string, max=0) re.sub('(blue|white|red)', 'colour', 'blue socks and red shoes') re.subn('(blue|white|red)', 'colour', 'blue socks and red shoes') """ Explanation: Sub Returns the string obtained by replacing the leftmost non-overlapping occurrences of the RE in string by the replacement replacement. If the pattern isn’t found, string is returned unchanged. End of explanation """ # compile(pattern, flags=0) prog = re.compile('(blue|white|red)') prog.sub('colour', 'blue socks and red shoes') """ Explanation: Compile Compile a regular expression pattern, returning a pattern object End of explanation """
Vvkmnn/books
UdacityTensorflow/1_notmnist.ipynb
gpl-3.0
# These are all the modules we'll be using later. Make sure you can import them # before proceeding further. from __future__ import print_function import matplotlib.pyplot as plt import numpy as np import os import sys import tarfile from IPython.display import display, Image from scipy import ndimage from sklearn.linear_model import LogisticRegression from six.moves.urllib.request import urlretrieve from six.moves import cPickle as pickle # Config the matplotlib backend as plotting inline in IPython %matplotlib inline """ Explanation: Deep Learning Assignment 1 The objective of this assignment is to learn about simple data curation practices, and familiarize you with some of the data we'll be reusing later. This notebook uses the notMNIST dataset to be used with python experiments. This dataset is designed to look like the classic MNIST dataset, while looking a little more like real data: it's a harder task, and the data is a lot less 'clean' than MNIST. End of explanation """ url = 'http://commondatastorage.googleapis.com/books1000/' last_percent_reported = None def download_progress_hook(count, blockSize, totalSize): """A hook to report the progress of a download. This is mostly intended for users with slow internet connections. Reports every 1% change in download progress. """ global last_percent_reported percent = int(count * blockSize * 100 / totalSize) if last_percent_reported != percent: if percent % 5 == 0: sys.stdout.write("%s%%" % percent) sys.stdout.flush() else: sys.stdout.write(".") sys.stdout.flush() last_percent_reported = percent def maybe_download(filename, expected_bytes, force=False): """Download a file if not present, and make sure it's the right size.""" if force or not os.path.exists(filename): print('Attempting to download:', filename) filename, _ = urlretrieve(url + filename, filename, reporthook=download_progress_hook) print('\nDownload Complete!') statinfo = os.stat(filename) if statinfo.st_size == expected_bytes: print('Found and verified', filename) else: raise Exception( 'Failed to verify ' + filename + '. Can you get to it with a browser?') return filename train_filename = maybe_download('notMNIST_large.tar.gz', 247336696) test_filename = maybe_download('notMNIST_small.tar.gz', 8458043) """ Explanation: First, we'll download the dataset to our local machine. The data consists of characters rendered in a variety of fonts on a 28x28 image. The labels are limited to 'A' through 'J' (10 classes). The training set has about 500k and the testset 19000 labelled examples. Given these sizes, it should be possible to train models quickly on any machine. End of explanation """ num_classes = 10 np.random.seed(133) def maybe_extract(filename, force=False): root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz if os.path.isdir(root) and not force: # You may override by setting force=True. print('%s already present - Skipping extraction of %s.' % (root, filename)) else: print('Extracting data for %s. This may take a while. Please wait.' % root) tar = tarfile.open(filename) sys.stdout.flush() tar.extractall() tar.close() data_folders = [ os.path.join(root, d) for d in sorted(os.listdir(root)) if os.path.isdir(os.path.join(root, d))] if len(data_folders) != num_classes: raise Exception( 'Expected %d folders, one per class. Found %d instead.' % ( num_classes, len(data_folders))) print(data_folders) return data_folders train_folders = maybe_extract(train_filename) test_folders = maybe_extract(test_filename) """ Explanation: Extract the dataset from the compressed .tar.gz file. This should give you a set of directories, labelled A through J. End of explanation """ image_size = 28 # Pixel width and height. pixel_depth = 255.0 # Number of levels per pixel. def load_letter(folder, min_num_images): """Load the data for a single letter label.""" image_files = os.listdir(folder) dataset = np.ndarray(shape=(len(image_files), image_size, image_size), dtype=np.float32) print(folder) num_images = 0 for image in image_files: image_file = os.path.join(folder, image) try: image_data = (ndimage.imread(image_file).astype(float) - pixel_depth / 2) / pixel_depth if image_data.shape != (image_size, image_size): raise Exception('Unexpected image shape: %s' % str(image_data.shape)) dataset[num_images, :, :] = image_data num_images = num_images + 1 except IOError as e: print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.') dataset = dataset[0:num_images, :, :] if num_images < min_num_images: raise Exception('Many fewer images than expected: %d < %d' % (num_images, min_num_images)) print('Full dataset tensor:', dataset.shape) print('Mean:', np.mean(dataset)) print('Standard deviation:', np.std(dataset)) return dataset def maybe_pickle(data_folders, min_num_images_per_class, force=False): dataset_names = [] for folder in data_folders: set_filename = folder + '.pickle' dataset_names.append(set_filename) if os.path.exists(set_filename) and not force: # You may override by setting force=True. print('%s already present - Skipping pickling.' % set_filename) else: print('Pickling %s.' % set_filename) dataset = load_letter(folder, min_num_images_per_class) try: with open(set_filename, 'wb') as f: pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL) except Exception as e: print('Unable to save data to', set_filename, ':', e) return dataset_names train_datasets = maybe_pickle(train_folders, 45000) test_datasets = maybe_pickle(test_folders, 1800) """ Explanation: Problem 1 Let's take a peek at some of the data to make sure it looks sensible. Each exemplar should be an image of a character A through J rendered in a different font. Display a sample of the images that we just downloaded. Hint: you can use the package IPython.display. Now let's load the data in a more manageable format. Since, depending on your computer setup you might not be able to fit it all in memory, we'll load each class into a separate dataset, store them on disk and curate them independently. Later we'll merge them into a single dataset of manageable size. We'll convert the entire dataset into a 3D array (image index, x, y) of floating point values, normalized to have approximately zero mean and standard deviation ~0.5 to make training easier down the road. A few images might not be readable, we'll just skip them. End of explanation """ def make_arrays(nb_rows, img_size): if nb_rows: dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32) labels = np.ndarray(nb_rows, dtype=np.int32) else: dataset, labels = None, None return dataset, labels def merge_datasets(pickle_files, train_size, valid_size=0): num_classes = len(pickle_files) valid_dataset, valid_labels = make_arrays(valid_size, image_size) train_dataset, train_labels = make_arrays(train_size, image_size) vsize_per_class = valid_size // num_classes tsize_per_class = train_size // num_classes start_v, start_t = 0, 0 end_v, end_t = vsize_per_class, tsize_per_class end_l = vsize_per_class+tsize_per_class for label, pickle_file in enumerate(pickle_files): try: with open(pickle_file, 'rb') as f: letter_set = pickle.load(f) # let's shuffle the letters to have random validation and training set np.random.shuffle(letter_set) if valid_dataset is not None: valid_letter = letter_set[:vsize_per_class, :, :] valid_dataset[start_v:end_v, :, :] = valid_letter valid_labels[start_v:end_v] = label start_v += vsize_per_class end_v += vsize_per_class train_letter = letter_set[vsize_per_class:end_l, :, :] train_dataset[start_t:end_t, :, :] = train_letter train_labels[start_t:end_t] = label start_t += tsize_per_class end_t += tsize_per_class except Exception as e: print('Unable to process data from', pickle_file, ':', e) raise return valid_dataset, valid_labels, train_dataset, train_labels train_size = 200000 valid_size = 10000 test_size = 10000 valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets( train_datasets, train_size, valid_size) _, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size) print('Training:', train_dataset.shape, train_labels.shape) print('Validation:', valid_dataset.shape, valid_labels.shape) print('Testing:', test_dataset.shape, test_labels.shape) """ Explanation: Problem 2 Let's verify that the data still looks good. Displaying a sample of the labels and images from the ndarray. Hint: you can use matplotlib.pyplot. Problem 3 Another check: we expect the data to be balanced across classes. Verify that. Merge and prune the training data as needed. Depending on your computer setup, you might not be able to fit it all in memory, and you can tune train_size as needed. The labels will be stored into a separate array of integers 0 through 9. Also create a validation dataset for hyperparameter tuning. End of explanation """ def randomize(dataset, labels): permutation = np.random.permutation(labels.shape[0]) shuffled_dataset = dataset[permutation,:,:] shuffled_labels = labels[permutation] return shuffled_dataset, shuffled_labels train_dataset, train_labels = randomize(train_dataset, train_labels) test_dataset, test_labels = randomize(test_dataset, test_labels) valid_dataset, valid_labels = randomize(valid_dataset, valid_labels) """ Explanation: Next, we'll randomize the data. It's important to have the labels well shuffled for the training and test distributions to match. End of explanation """ pickle_file = 'notMNIST.pickle' try: f = open(pickle_file, 'wb') save = { 'train_dataset': train_dataset, 'train_labels': train_labels, 'valid_dataset': valid_dataset, 'valid_labels': valid_labels, 'test_dataset': test_dataset, 'test_labels': test_labels, } pickle.dump(save, f, pickle.HIGHEST_PROTOCOL) f.close() except Exception as e: print('Unable to save data to', pickle_file, ':', e) raise statinfo = os.stat(pickle_file) print('Compressed pickle size:', statinfo.st_size) """ Explanation: Problem 4 Convince yourself that the data is still good after shuffling! Finally, let's save the data for later reuse: End of explanation """
ScoffM/ITESO-Word2Vec
Doc2Vec_PopCorn.ipynb
gpl-3.0
import re import random import nltk.data import numpy as np import pandas as pd from bs4 import BeautifulSoup from nltk.corpus import stopwords from gensim.models import Doc2Vec from gensim.models.doc2vec import LabeledSentence from sklearn.ensemble import RandomForestClassifier #Loading the differents sets of data. train = pd.read_csv( "labeledTrainData.tsv", header=0, delimiter="\t", quoting=3, encoding="utf-8" ) test = pd.read_csv( "testData.tsv", header=0, delimiter="\t", quoting=3, encoding="utf-8" ) unlabeled_train = pd.read_csv( "unlabeledTrainData.tsv", header=0, delimiter="\t", quoting=3, encoding="utf-8" ) #Getting the reviews for each set of data. x_train = train['review'] x_test = test['review'] x_unlabeled = unlabeled_train['review'] #Getting the sentiment just for train. In unlabeled_train we don't #have sentiment column as same in test. y_train = train['sentiment'] """ Explanation: Doc2Vec End of explanation """ def review_to_wordlist( review, remove_stopwords=False ): # Function to convert a document to a sequence of words, # optionally removing stop words. Returns a list of words. # clean_reviews = [] for i in range(len(review)): # 1. Remove HTML review_text = BeautifulSoup(review[i]).get_text() # # 2. Remove non-letters review_text = re.sub("[^a-zA-Z]"," ", review_text) # # 3. Convert words to lower case and split them words = review_text.lower().split() # # 4. Optionally remove stop words (false by default) if remove_stopwords: stops = set(stopwords.words("english")) words = [w for w in words if not w in stops] # clean_reviews.append(words) # 5. Return a list of reviews return(clean_reviews) #Cleaning the data with the reviews in each set of data x_train = review_to_wordlist(x_train) x_test = review_to_wordlist(x_test) x_unlabeled = review_to_wordlist(x_unlabeled) """ Explanation: Function to clean the data. Removes html, special characters like punctuation and put all the words in lower case End of explanation """ def labelizeReviews(reviews, label_type): labelized = [] for i,v in enumerate(reviews): label = '%s_%s'%(label_type,i) labelized.append(LabeledSentence(v, [label])) return labelized x_concatenate = np.concatenate((x_train, x_unlabeled)) #Doing labilize procces to each data set, specifying in the parameter "label_type" #what kind of data is the given data. x_train2 = labelizeReviews(x_concatenate, 'TRAIN') x_test2 = labelizeReviews(x_test, 'TEST') size = 50 model= Doc2Vec(min_count=1, window=5, size=size, sample=1e-4, negative=5, workers=8) #build vocab over all reviews model.build_vocab(x_train2) import random as ra ra.seed(12345) alpha = 0.025 min_alpha = 0.001 num_epochs = 5 alpha_delta = (alpha - min_alpha) / num_epochs for epoch in range(num_epochs): print(epoch) ra.shuffle(x_train2) model.alpha = alpha model.min_alpha = alpha model.train(x_train2) alpha -= alpha_delta #Infering over the test reviews in base the previous model built test_vectors = np.zeros((len(x_test),size)) for i in range(len(x_test)): test_vectors[i] = model.infer_vector(x_test[i]) #Getting the train vectors from the model with the function docvecs. #Remember that when we concatenate train and unlabeled sets, the first #25,000 rows are from train set. The rest 50,000 rows are from the unlabeled set. train_vectors = np.zeros((len(x_train), size)) for i in range(len(x_train)): train_vectors[i] = model.docvecs[i] #model.save('my_model.doc2vec') """ Explanation: Function that labelize each review into 'TRAIN_i' , 'TEST_i' and 'UNSUP_i' where i is an index for each observation. This procces is needed because in Doc2Vec it's necesary that each document, in this case review, needs to have a label associated with it. End of explanation """ # Fit a random forest and extract predictions forest = RandomForestClassifier(n_estimators = 100) # Fitting the forest may take a few minutes print "Fitting a random forest to labeled training data..." forest = forest.fit(train_vectors,train["sentiment"]) results = forest.predict(test_vectors) output = pd.DataFrame(data={"id":test["id"], "sentiment":results}) output.to_csv( "Doc2Vec.csv", index=False, quoting=3 ) """ Explanation: Training and prediciting sentimient with Random Forest End of explanation """ from sklearn.linear_model import LogisticRegression lgr = LogisticRegression() lgr = lgr.fit(train_vectors, train["sentiment"]) results = lgr.predict(test_vectors) output = pd.DataFrame(data={"id":test["id"], "sentiment":results}) output.to_csv( "Doc2Vec_lgr.csv", index=False, quoting=3 ) """ Explanation: Training and predicting sentiment with Logistic Regression End of explanation """
diging/tethne-notebooks
7. A Closer Look at Corpora.ipynb
gpl-3.0
from tethne.readers import wos datapath = '/Users/erickpeirson/Downloads/datasets/wos' corpus = wos.read(datapath) """ Explanation: 7. A Closer Look at Corpora A Corpus is a collection of Papers with superpowers. Most importantly, it provides a consistent way of indexing bibliographic records. Indexing is important, because it sets the stage for all of the subsequent analyses that we may wish to do with our bibliographic data. In 1. Loading Data, part 1 we used the read function in tethne.readers.wos to parse a collection of Web of Science field-tagged data files and build a Corpus. End of explanation """ print 'The primary index field for the Papers in my Corpus is "%s"' % corpus.index_by """ Explanation: In this notebook, we'll dive deeper into the guts of the Corpus, focusing on indexing and and features. Setting the primary indexing field: index_by The primary indexing field is the field that Tethne uses to identify each of the Papers in your dataset. Ideally, each one of the records in your bibliographic dataset will have this field. Good candidates include DOIs, URIs, or other unique identifiers. Depending on which module you use, read will make assumptions about which field to use as the primary index for the Papers in your dataset. The default for Web of Science data, for example, is 'wosid' (the value of the UT field-tag). End of explanation """ corpus.indexed_papers.items()[0:10] # We'll just show the first ten Papers, for the sake of space. """ Explanation: The primary index for your Corpus can be found in the indexed_papers attribute. indexed_papers is a dictionary that maps the value of the indexing field for each Paper onto that Paper itself. End of explanation """ corpus.indexed_papers['WOS:000321911200011'] """ Explanation: So if you know (in this case) the wosid of a Paper, you can retrieve that Paper by passing the wosid to indexed_papers: End of explanation """ otherCorpus = wos.read(datapath, index_by='doi') print 'The primary index field for the Papers in this other Corpus is "%s"' % otherCorpus.index_by """ Explanation: If you'd prefer to index by a different field, you can pass the index_by parameter to read. End of explanation """ i = 0 for doi, paper in otherCorpus.indexed_papers.items()[0:10]: print '(%i) DOI: %s \t ---> \t Paper: %s' % (i, doi.ljust(30), paper) i += 1 """ Explanation: If some of the Papers lack the indexing field that you specified with the index_by parameter, Tethne will automatically generate a unique identifier for each of those Papers. For example, in our otherCorpus that we indexed by doi, most of the papers have valid DOIs, but a few (#1, below) did not -- a nonsensical-looking sequence of alphanumeric characters was used instead. End of explanation """ print 'The following Paper fields have been indexed: \n\n\t%s' % '\n\t'.join(corpus.indices.keys()) """ Explanation: Other indexing fields In addition to the primary index, you can index the Papers in your Corpus using any other fields that you like. By default, the Web of Science read method will index 'citations' and 'authors': End of explanation """ for citation, papers in corpus.indices['citations'].items()[7:10]: # Show the first three, for space's sake. print 'The following Papers cite %s: \n\n\t%s \n' % (citation, '\n\t'.join(papers)) """ Explanation: The 'citations' index, for example, allows us to look up all of the Papers that contain a particular bibliographic reference: End of explanation """ papers = corpus.indices['citations']['CARLSON SM 2004 EVOL ECOL RES'] # Who cited Carlson 2004? print papers for paper in papers: print corpus.indexed_papers[paper] """ Explanation: Notice that the values above are not Papers themselves, but identifiers. These are the same identifiers used in the primary index, so we can use them to look up Papers: End of explanation """ corpus.index('authorKeywords') for keyword, papers in corpus.indices['authorKeywords'].items()[6:10]: # Show the first three, for space's sake. print 'The following Papers contain the keyword %s: \n\n\t%s \n' % (keyword, '\n\t'.join(papers)) """ Explanation: We can create new indices using the index method. For example, to index our Corpus using the authorKeywords field: End of explanation """ corpus.index('date') for date, papers in corpus.indices['date'].items()[-11:-1]: # Last ten years. print 'There are %i Papers from %i' % (len(papers), date) """ Explanation: Since we're interested in historical trends in our Corpus, we probably also want to index the date field: End of explanation """ corpus.distribution()[-11:-1] # Last ten years. plt.figure(figsize=(10, 3)) start = min(corpus.indices['date'].keys()) end = max(corpus.indices['date'].keys()) X = range(start, end + 1) plt.plot(X, corpus.distribution(), lw=2) plt.ylabel('Number of Papers') plt.xlim(start, end) plt.show() """ Explanation: We can examine the distribution of Papers over time using the distribution method: End of explanation """ corpus['WOS:000309391500014'] """ Explanation: Selecting Papers from the Corpus In previous examples, we selected a Paper from our Corpus using the primary index, indexed_papers. In fact, there is a much simpler way! Corpus allows us to "select" Papers using its built-in get method: End of explanation """ corpus[('authorKeywords', 'LIFE')] """ Explanation: Whoa! But it gets better. We can select Papers using any of the indices in the Corpus. For example, we can select all of the papers with the authorKeyword LIFE: End of explanation """ corpus[['WOS:000309391500014', 'WOS:000306532900015']] """ Explanation: We can also select Papers using several values. For example, with the primary index field: End of explanation """ corpus[('authorKeywords', ['LIFE', 'ENZYME GENOTYPE', 'POLAR AUXIN'])] """ Explanation: ...and with other indexed fields (think of this as an OR search): End of explanation """ papers = corpus[('date', range(2002, 2013))] # range() excludes the "last" value. print 'There are %i Papers published between %i and %i' % (len(papers), 2002, 2012) """ Explanation: Since we indexed 'date' earlier, we could select any Papers published between 2011 and 2012: End of explanation """ corpus.features.items() """ Explanation: Features Earlier we used specific fields in our Papers to create indices. The inverse of an index is what we call a FeatureSet. A FeatureSet contains data about the occurrence of specific features across all of the Papers in our Corpus. The read method generates a few FeatureSets by default. All of the available FeatureSets are stored in a dictionary, the features attribute. End of explanation """ featureset = corpus.features['authors'] for k, author in featureset.index.items()[0:10]: print '%i --> "%s"' % (k, ', '.join(author)) # Author names are stored as (LAST, FIRST M). """ Explanation: Each FeatureSet has several properties: FeatureSet.index maps integer identifiers to specific features. For example, for author names: End of explanation """ featureset = corpus.features['authors'] for author, k in featureset.lookup.items()[0:10]: print '%s --> %i' % (', '.join(author).ljust(25), k) """ Explanation: FeatureSet.lookup is the reverse of index: it maps features onto their integer IDs: End of explanation """ featureset = corpus.features['authors'] for k, count in featureset.documentCounts.items()[0:10]: print 'Feature %i (which identifies author "%s") is found in %i documents' % (k, ', '.join(featureset.index[k]), count) """ Explanation: FeatureSet.documentCounts shows how many Papers in our Corpus have a specific feature: End of explanation """ featureset.features.items()[0] """ Explanation: FeatureSet.features shows how many times each feature occurs in each Paper. End of explanation """ corpus.index_feature('authorKeywords') corpus.features.keys() featureset = corpus.features['authorKeywords'] for k, count in featureset.documentCounts.items()[0:10]: print 'Keyword %s is found in %i documents' % (featureset.index[k], count) featureset.features['WOS:000324532900018'] # Feature for a specific Paper. plt.figure(figsize=(10, 3)) years, values = corpus.feature_distribution('authorKeywords', 'DIVERSITY') start = min(years) end = max(years) X = range(start, end + 1) plt.plot(years, values, lw=2) plt.ylabel('Papers with DIVERSITY in authorKeywords') plt.xlim(start, end) plt.show() """ Explanation: We can create a new FeatureSet from just about any field in our Corpus, using the index_feature method. For example, suppose that we were interested in the distribution of authorKeywords across the whole corpus: End of explanation """
interedition/paceofchange
defactoring-pace-of-change.ipynb
mit
### DEFACTORING IMPORT import os import csv import random from collections import Counter import numpy as np import pandas as pd #from multiprocessing import Pool ### Defactoring Import from multiprocess import Pool import matplotlib.pyplot as plt %matplotlib inline from sklearn.linear_model import LogisticRegression """ Explanation: Defactoring Pace of Change At a practical level, we have copied the code from Underwood and Sellers' Pace of Change git repository and restructured their code into a Jupyter Notebook. Transforming Underwood and Seller's code from a set of Python scripts into the notebook involved a series of transformations to their code. The provenance of this effort has been preserved because we did all of our work in a fork of their version repository, visible via the git history command. Underwood and Sellers's wrote their code to make it easy to replicate their results by running a single command. One of our first tasks was to trace their code following the path of execution from the initial command to the output data files. By reading the code that spanned multiple python scripts, we could obtain a rough idea of how Underwood and Sellers's analysis of poetry volumes was performed in practice. The first step of reading their code involved translating their python scripts into a Jupyter Notebook. Plain text files leave much to be desired with trying to critically interrogate code, the Jupyter Notebook format and platform enables a much richer reading experience. Translating Underwood and Sellers's scripts involved copying snippets of code from multiple Python files into a single executable notebook. For code that was not already authored in a notebook, this requires significant effort curating and restructuring the code to fit the linear structure of the notebook while preserving the functionality of Underwood and Sellers's scripts. Not all of the code in the git repository was copied into the notebook for two main reasons. First, not all of the code included in the files was actually needed to replicate the analysis. The repository includes additional code, one could call it "cruft", from Underwood and Seller's exploratory analysis or earlier iterations of their analytical process. For example, the file SonicScrewdiver.py is never called upon although, based upon the name, we might hypothesize it is an important catch-all module for a variety of different functions. Other sections of the code are commented out and thus never executed (such as binormal_select discussed below). As part of the defactoring process, we opted to not include unused functions or code blocks. Another reason that the code of Sellers and Underwood was not copied verbatim is because we do not represent all possible executions paths of the code. Their code included execution paths that trained multiple models of the data broken into twenty-year periods, but their final analysis trained a model on the entire dataset. In this notebook we follow the default option, "full", which models the entire dataset. Underwood and Seller's code is stored within four Python files, replicate.py, parallel_crossvalidate.py, metafilter.py, and modelingprocess.py. Through the process of defactoring we have copied some, but not all, code from the Python text files to a Jupyter Notebook. This process has transformed the structure and flow of the code, but not the functionality. Like refactoring, the inputs and outputs and overall functionality remain the same, but our process has restructured the code to be much more readable as a narrative with our own expository text intermixed with executable blocks of code. In order for the code to execute seamlessly within the notebook, we had to make minor changes and tweaks to the code. These changes fall into 4 categories: defactoring functions - This is the most significant of the changes. When we defactor a function we take the function's code and move it to the global namespace. This has the effect of eliminating the function and just making it part of the main execution path. defactoring definitions - Not all functions can be fully defactored. Functions that are called more than once or those that are short have been kept as re-usable functions. Defactoring Definition cells define the functions above the code cells that use them (preventing errors). defactoring namespace - Because we have defactored some of the functions and their function calls some of the variables in the namespace need to be mapped to each other. This happens, for instance, when the return value of a defactored function needs to be stored in a differently named variable so the rest of the code can access it (resolving the fact we have moved a function's internal variables into a global namespace). defactoring inspections - When we want to inspect the state of the process, we insert an inspection cell that prints the values of the variables of interest. These allow us the ability to inspect the state of the process as it is running; essentially debugging intermediate states. defactoring import - Because the code is reliant upon external and third party functions, we need to import that code into the global namespace. This cell contains all of those imports. How to Read this Notebook One of the advantages to defactoring a function is that it affords us the ability to insert critical commentary into the code constituting the function itself. An unfortunate side-effect is that this makes keeping track of one's place in the code a little more difficult. We have restructured the code into an executable narrative (when read as a Jupyter Notebook) for both human and machine readers. Jupyter Notebooks are divided into a series of cells, code cells and text cells. In our defactoring, the code cells contain Python code mostly written by Underwood and Sellers, but with a few modifications by us. Code cells can be executed when reading this document using the Jupyter Platform (as opposed to static document). We have broken up the code of Underwood and Sellers and inserted text cells, written by us, which discuss and explain what is happening in the code cells and underlying computational process. In most cases the expository text is below the code cell. In the critical examination of the code we will be using a fair bit of coding language jargon. Although a very deep grasp of these coding concepts will not be needed, a certain level of coding literacy (Vee 2013) will facilitate a speedier comprehension of our narrative. Program Structure The code expressed below has nine steps: * Importing Libraries - Loads the necessary Python libraries needed for the analysis. * Setting Parameters - Specifies parameters for the loading, cleaning, and labeling of data as well as sets conditions for the logistic regression. * Preparing Metadata - Generates a list of *.tsv files from the poems/ directory. * Cleaning Metadata - Loads the metadata file, poemetadata.csv and performs some cleaning of the metadata to make labeling easier. * Sorting Training Data - Sort the volumes into two bins, reviewed and not reviewed using the cleaned metadata. * Transforming Words into Features - Identifies the 3,200 most common words in the corpus. Those most common words will be the features for the regression. * Filtering Authors - Removes poems by authors who have been reviewed. * Filtering Words - Remove any words from the poem data that are not in the most-common feature list. * Training Predictive Models - Run a separate logistic regression for each volume, using a single volume as held-out data and measure each model's predictive power. * Modeling Coefficients - Run a single logistic regression over all the data to inspect the salient coefficients. * Plotting Results - Generate a plot showing the accuracy of the predictive models. Importing Libraries This section loads a series of libraries used in the Pace of Change analysis. End of explanation """ ## PATHS. sourcefolder = 'poems/' extension = '.poe.tsv' classpath = 'poemeta.csv' outputpath = 'mainmodelpredictions.csv' ## EXCLUSIONS. excludeif = dict() excludeif['pubname'] = 'TEM' # We're not using reviews from Tait's. excludeif['recept'] = 'addcanon' # We don't ordinarily include canonical volumes that were not in either sample. # These are included only if we're testing the canon specifically. excludeifnot = dict() excludeabove = dict() excludebelow = dict() excludebelow['firstpub'] = 1700 excludeabove['firstpub'] = 1950 sizecap = 360 # For more historically-interesting kinds of questions, we can limit the part # of the dataset that gets TRAINED on, while permitting the whole dataset to # be PREDICTED. (Note that we always exclude authors from their own training # set; this is in addition to that.) The variables futurethreshold and # pastthreshold set the chronological limits of the training set, inclusive # of the threshold itself. ## THRESHOLDS futurethreshold = 1925 pastthreshold = 1800 # CLASSIFY CONDITIONS positive_class = 'rev' category2sorton = 'reviewed' datetype = 'firstpub' numfeatures = 3200 regularization = .00007 paths = (sourcefolder, extension, classpath, outputpath) exclusions = (excludeif, excludeifnot, excludebelow, excludeabove, sizecap) thresholds = (pastthreshold, futurethreshold) classifyconditions = (category2sorton, positive_class, datetype, numfeatures, regularization) """ Explanation: We begin examination of the code by importing a series of Python libraries into working memory. This is the boundary between the layers of bespoke code and existing general purpose (os, csv, random) and scientific computing libraries (numpy, pandas, sklearn). Following from Hinsen's model of layers of scientific software, what is missing is the inclusion of libraries from the disciplinary layer. The most specific library in terms of use in the Hinsen model is the LogisticRegression model from Scikit-Learn, but we'd argue this lives in layer two, scientific software, because it is broadly applicable across a variety of disciplines. This begs the question, what or where are the disciplinary Python libraries for literary history or digital humanities? What functions would they perform? What domain specific tasks or methods need to encoded into a disciplinary library? Perhaps it is too early for such libraries to exist as the practices of computational and data intensive research are still new (in literary history). Setting Parameters The first section of the code sets a series of parameters specifying what data to process, where data are located, and parameters for the logistic regression. While there is no complex logic or work being done in this section, many assumptions and important distinctions that shape the execution of subsequent code are defined here. End of explanation """ ### DEFACTORING FUNCTION ### def create_model(paths, exclusions, thresholds, classifyconditions): ''' This is the main function in the module. It can be called externally; it's also called if the module is run directly. ''' verbose = False if not sourcefolder.endswith('/'): sourcefolder = sourcefolder + '/' # This just makes things easier. # Get a list of files. allthefiles = os.listdir(sourcefolder) # random.shuffle(allthefiles) volumeIDs = list() volumepaths = list() for filename in allthefiles: if filename.endswith(extension): volID = filename.replace(extension, "") # The volume ID is basically the filename minus its extension. # Extensions are likely to be long enough that there is little # danger of accidental occurrence inside a filename. E.g. # '.fic.tsv' path = sourcefolder + filename volumeIDs.append(volID) volumepaths.append(path) """ Explanation: The parameters defined in the code cell above are a set of knobs and switches used to tweak the performance and execution of the computational modeling process. Underwood and Sellers have collected the parameters into four categories: paths, exclusions, thresholds, and classifyconditions. These categories are simultaneously distinguished discursively through the code comments (the lines beginning with a #) and technologically through four variable assignments, exclusions, thresholds, and classifyconditions. Because technically speaking the grouping of parameters is not strictly necessary, each of these four variables embody stylistic choices of the authors as a means of organizing and structuring the information they are encoding in Python. The variables in paths specify the location of the data and metadata files as well as where to write the output files at the completion of the analysis. The variables in exclusions specify data and types of data to be excluded from the analysis, such as reviews from Tait's Endinburgh Magazine (https://en.wikipedia.org/wiki/Tait%27s_Edinburgh_Magazine), which we infer from the author's comments. Additional exclusions specify temporal boundaries from 1700 to 1950. A further set of two variables in thresholds also articulates a temporal boundary from 1800 to 1925. The comments indicate this distinguishes the temporal window for datasets used in training versus those used during prediction. The variables in classifyconditions are important parameters for the logistic regression, specifying the number of variables to train the model upon as well as setting the regularization parameter (regularization) for the logistic regression. What is not well documented here, is why the value .00007 was chosen over other values. Preparing Metadata With the preparation of metadata we begin to see some logical work of Pace of Change being conducted. The code in this section has two subsections, one to clean the metadata and another to sort the training data. All of the work in this section focuses on preparing the metadata, identified in the classpath variable and the filenames of the individual data files in the sourcefolder. The main task of this section is to organize the metadata of the volumes and their associated labels (positive or negative) for training the logistic regression. All of the code in this section attends to the cleanliness of the metadata; we will not start digging into the data itself until the next section. End of explanation """ ### DEFACTORING INSPECTION ### Inspect the two variables defined in the codecell above. ### We know they are lists so lets just look at the first item. print("The first item in volumeIDs is: ", volumeIDs[0]) print("The first item in volumepaths is: ",volumepaths[0]) """ Explanation: This code assembles a list of volume identifiers (volumeIDs) and file paths (volumepaths) by readings the directory listing of files in the poems/ directory (sourcefolder). The filenames are in and of themselves a source of metadata, but as we will see in the code below, they need to be reconciled with the metadata stored separately from the data files. We are curious about the contents of the volumeIDs and volumepaths variables. End of explanation """ ### DEFACTORING FUNCTION DEFINITION ### we need these helper functions for execute the next code cell def dirty_pairtree(htid): period = htid.find('.') prefix = htid[0:period] postfix = htid[(period+1): ] if '=' in postfix: postfix = postfix.replace('+',':') postfix = postfix.replace('=','/') dirtyname = prefix + "." + postfix return dirtyname def forceint(astring): try: intval = int(astring) except: intval = 0 return intval """ Explanation: The code has created an alignment between identifiers in the metadata records and the filename identifiers of the TSV data files themselves (located in the poems/ folder). These identifiers, dul1.ark+=13960=t5fb5xg2z, are the threads that stitch together the various representations of the (meta)data. Cleaning Metadata End of explanation """ ### DEFACTORING INSPECTION metadata_file = pd.read_csv(classpath) #print(metadata_file.shape) print("The metadata files has {} rows and {} columns.".format( *metadata_file.shape)) metadata_file.iloc[0:5,0:9] # display first 5 rows and 10 columns """ Explanation: This code cell defines two functions used in the code below. The first is dirty_pairtree(), which cleans up the identifiers in the data. This issue arises from the fact that the HathiTrust (where Underwood and Sellers got their data) uses IDs that cannot be expressed on the filesystem and Underwood and Sellers encoded ID metadata in filenames. The / and : characters in the IDs cannot be part of a file name. So, because the volumes are stored as individual files they have a + and an = instead. However, the IDs are stored in the original format in the metadata file so the IDS have to be transformed back into the original HathiTrust format. The second function is called forceint() and transforms numbers expressed as Python strings into the python integer data type with a bit of error handling in the case values that throw and error when being cast as an integer. What does the metadata look like? We can inspect the beginning of file to get a sense of the material conditions of the metadata. End of explanation """ ### DEFACTORING FUNCTION ### def get_metadata(classpath, volumeIDs, excludeif, excludeifnot, excludebelow, excludeabove): ''' As the name would imply, this gets metadata matching a given set of volume IDs. It returns a dictionary containing only those volumes that were present both in metadata and in the data folder. It also accepts four dictionaries containing criteria that will exclude volumes from the modeling process. ''' print(classpath) metadict = dict() with open(classpath, encoding = 'utf-8') as f: reader = csv.DictReader(f) anonctr = 0 for row in reader: volid = dirty_pairtree(row['docid']) theclass = row['recept'].strip() # I've put 'remove' in the reception column for certain # things that are anomalous. if theclass == 'remove': continue bail = False for key, value in excludeif.items(): if row[key] == value: bail = True for key, value in excludeifnot.items(): if row[key] != value: bail = True for key, value in excludebelow.items(): if forceint(row[key]) < value: bail = True for key, value in excludeabove.items(): if forceint(row[key]) > value: bail = True if bail: print("DEFACTORING: Excluding volume with id "+volid) ### DEFACTORING CODE continue birthdate = forceint(row['birth']) pubdate = forceint(row['inferreddate']) gender = row['gender'].rstrip() nation = row['nationality'].rstrip() #if pubdate >= 1880: #continue if nation == 'ca': nation = 'us' elif nation == 'ir': nation = 'uk' # I hope none of my Canadian or Irish friends notice this. notes = row['notes'].lower() author = row['author'] if len(author) < 1 or author == '<blank>': author = "anonymous" + str(anonctr) anonctr += 1 title = row['title'] canon = row['canon'] # I'm creating two distinct columns to indicate kinds of # literary distinction. The reviewed column is based purely # on the question of whether this work was in fact in our # sample of contemporaneous reviews. The obscure column incorporates # information from post-hoc biographies, which trumps # the question of reviewing when they conflict. if theclass == 'random': obscure = 'obscure' reviewed = 'not' elif theclass == 'reviewed': obscure = 'known' reviewed = 'rev' elif theclass == 'addcanon': print("DEFACTORING: adding volume") ### DEFACTORING CODE obscure = 'known' reviewed = 'addedbecausecanon' else: print("Missing class" + theclass) if notes == 'well-known': obscure = 'known' if notes == 'obscure': obscure = 'obscure' if canon == 'y': if theclass == 'addcanon': actually = 'Norton, added' else: actually = 'Norton, in-set' elif reviewed == 'rev': actually = 'reviewed' else: actually = 'random' metadict[volid] = dict() metadict[volid]['reviewed'] = reviewed metadict[volid]['obscure'] = obscure metadict[volid]['pubdate'] = pubdate metadict[volid]['birthdate'] = birthdate metadict[volid]['gender'] = gender metadict[volid]['nation'] = nation metadict[volid]['author'] = author metadict[volid]['title'] = title metadict[volid]['canonicity'] = actually metadict[volid]['pubname'] = row['pubname'] metadict[volid]['firstpub'] = forceint(row['firstpub']) # These come in as dirty pairtree; we need to make them clean. cleanmetadict = dict() allidsinmeta = set([x for x in metadict.keys()]) allidsindir = set([dirty_pairtree(x) for x in volumeIDs]) missinginmeta = len(allidsindir - allidsinmeta) missingindir = len(allidsinmeta - allidsindir) print("We have " + str(missinginmeta) + " volumes in missing in metadata, and") print(str(missingindir) + " volumes missing in the directory.") print(allidsinmeta - allidsindir) for anid in volumeIDs: dirtyid = dirty_pairtree(anid) if dirtyid in metadict: cleanmetadict[anid] = metadict[dirtyid] # Now that we have a list of volumes with metadata, we can select the groups of IDs # that we actually intend to contrast. If we want to us more or less everything, # this may not be necessary. But in some cases we want to use randomly sampled subsets. # The default condition here is # category2sorton = 'reviewed' # positive_class = 'rev' # sizecap = 350 # A sizecap less than one means, no sizecap. ### DEFACTORING FUNCTION CALL ### IDsToUse, classdictionary = metafilter.label_classes(metadict, category2sorton, positive_class, sizecap) ### DEFACTORING NAMESPACE metadict = cleanmetadict # put the data into the global namespace so execution can continue. """ Explanation: Using defactoring inspection we can actually look at the metadata file and inspect the first five rows of metadata. By blending the original code with our inspection code and narrative the metadata becomes less of a conceptual abstraction and more of a tangible, material object that we can interrogate. Here we can see the file has 728 rows and 22 columns as well as the contents of the first five rows. We worked with the HathiTrust Digital Library, which contains the aggregated collections of large public and university libraries: for 1820-1919, that gave us a collection of roughly 758,400 books in English, of which 53,200 include significant amounts of poetry...We gathered 360 reviewed and 360 random volumes, distributed in a similar way over the timeline (Underwood & Sellers 2016). This collection we have loaded into memory contains the metadata for the 720 poetry volumes out of the total of 53,200 identified poetry volumes in the HathiTrust Digital Library (Underwood 2014). What appears as a series of distance numbers in the MLQ article (Underwood & Sellers 2016, 325) are a tangible Python data structure capable of being directly inspected. End of explanation """ ### DEFACTORING INSPECTION # Examine the original metadata file defactoring_volume_id = 'wu.89099921512' the_croakers_metadata = ( metadata_file .loc[metadata_file['docid'] == defactoring_volume_id] .squeeze() .sort_index() ) the_croakers_metadata ### DEFACTORING INSPECTION # Examine the cleaned metadata cleanmetadict[defactoring_volume_id] """ Explanation: This above code cell is large due to a long for loop processing each row of the metadata file. At a high level, the code in this cell loads the metadata and determines which volumes to exclude in the analysis. It does this by loading the poemeta.csv file and excluding rows based upon the parameters specified in the excludeif, excludeifnot, excludeabove, and excludebelow variables. This process removed 8 volume designations from the (meta)data , which explains why there were 728 volumes in the metadata, but 720 volumes mentioned in the MLQ article. The resulting output immediately above is a mixture of the author's code and our own DEFACTORING inspection statements (marked with the comment ### DEFACTORING). We have added a print statement so we can see the IDs of the volumes being excluded in the code. Beyond filtering out excluded (meta)data, this code also makes a series of normalizing decisions, that is, there are more conceptual distinctions being made (or unmade) in this code. First is the normalization of nationality, which is a clinical way of saying Underwood and Sellers lump Canada with the United States and Ireland with the UK. Nationality was briefly explored in the MLQ analysis, and it interesting to see the residue code here. However, the main thrust of the MLQ article focuses on word frequencies as features and leaves extant features such as gender and nationality as future directions to be explored more fully (Underwood & Sellers 2016). More importantly, this code cell splits the recept column of the metadata file into two columns, obscure and reviewed. These are important distinctions for the analysis because this is where the labels for training the computational model are specified. From the code and the comments, there are poems that were reviewed, and there are poems that were obscure. Lastly, the logic of the code indicates there are poems that are not in the set of reviewed poems but are nevertheless part of the canon. In the latter case the poems are set to be "known" (obscure = \'known\'). According to the author's comment this trumps the conflict when the author is known but not explicitly in the reviewed set. To confirm that a given book had never been reviewed in any of [our selected] publications would have been a tedious task. It was more straightforward simply to select works at random from a very large collection while excluding authors already in our reviewed sample: in practice, this turned up mostly books that were rarely reviewed (Underwood & Sellers 2016). This is not the code that selects the sample of unreviewed works, that work had been done previously and was encoded in the metadata. Rather, this code is loading and preparing the data based upon that previous work while also "removing authors already in our reviewed sample." Here we can see a connection between the prosaic description of methodology in the MLQ article with that same method made material in the code. What might be important to note here is how "great debates" in literary history about literary prestige, obscurity, and the canon are being ascribed in the code without much fanfare. There is a hard decision being made (in the blink of an eye) about the status of particular literary works. Most of these are, we suspect, fairly uncontroversial distinctions that accord with the broader community, but the code and computation enforce clear and unambiguous decisions for each and every volume. These hard decisions are pragmatically executed to get to more interesting analyses. End of explanation """ ### DEFACTORING FUNCTION ### def label_classes(metadict, category2sorton, positive_class, sizecap): ''' This takes as input the metadata dictionary generated by get_metadata. It subsets that dictionary into a positive class and a negative class. Instances that belong to neither class get ignored. ''' all_instances = set([x for x in metadict.keys()]) # The first stage is to find positive instances. all_positives = set() for key, value in metadict.items(): if value[category2sorton] == positive_class: all_positives.add(key) """ Explanation: The inspection above shows the data expressed in the CSV file has been transformed into into a Python dictionary, cleanmetadict, with additional columns for expressing more granularity about the categorizations for each poetry volume. We also observe the raw metadata csv file has additional columns that are not reflected in the Python dictionary. What we see reflected in cleanmetadict is only the metadata necessary for the analysis with any dirty or unnecessary information removed. Sorting Training Data End of explanation """ all_negatives = all_instances - all_positives iterator = list(all_negatives) for item in iterator: if metadict[item]['reviewed'] == 'addedbecausecanon': all_negatives.remove(item) """ Explanation: This block of code reads the metadata properties and puts a subset of all entries into a variable, all_positives, which will contain all of the volume ids for reviewed poems. If the reviewed column has a value of 'rev', then it is selected for inclusion. The name and value of the property are parameterized however, so technically it is more correct, but more opaque as well, to state: if a poem's metadata has the value 'rev' (specified by the positive_class variable) for the reviewed property (specified by the category2sorton variable) then it is labeled as a positive. Having thus collected all the reviewed poems into the set named all_positives, the next cell populates the variable all_negatives with all the instances not in the positive set by subtracting the set of positives from the set of all instances by applying a basic mathematical set operation (-). End of explanation """ if sizecap > 0 and len(all_positives) > sizecap: positives = random.sample(all_positives, sizecap) else: positives = list(all_positives) print(len(all_positives)) # If there's a sizecap we also want to ensure classes have # matching sizes and roughly equal distributions over time. numpositives = len(all_positives) if sizecap > 0 and len(all_negatives) > numpositives: if not 'date' in category2sorton: available_negatives = list(all_negatives) negatives = list() for anid in positives: date = metadict[anid]['pubdate'] available_negatives = sort_by_proximity(available_negatives, metadict, date) selected_id = available_negatives.pop(0) negatives.append(selected_id) else: # if we're dividing classes by date, we obvs don't want to # ensure equal distributions over time. negatives = random.sample(all_negatives, sizecap) else: negatives = list(all_negatives) """ Explanation: The negative labels are assigned to all instances that are not in the set of positive instances. This section includes additional code that filters out any item with addedbecausecannon set for the reviewed property, but this code should never execute because, as we have seen above, the volumes in the canon have already been removed. End of explanation """ # Now we have two lists of ids. IDsToUse = set() classdictionary = dict() print() print("We have " + str(len(positives)) + " positive, and") print(str(len(negatives)) + " negative instances.") for anid in positives: IDsToUse.add(anid) classdictionary[anid] = 1 for anid in negatives: IDsToUse.add(anid) classdictionary[anid] = 0 for key, value in metadict.items(): if value['reviewed'] == 'addedbecausecanon': print("DEFACTORING: Adding cannon supplement") ### DEFACTORING CODE IDsToUse.add(key) classdictionary[key] = 0 # We add the canon supplement, but don't train on it. """ Explanation: Most of the code in the cell above does not actually execute because the number of entries in the all_positives and all_negatives lists are not greater than sizecap. The conditional statements on line 1 and line 12 will not be true so the the accompanying blocks of code never execute. If the sizecap variable was smaller, or the number of entries larger, this code would use random sampling to select smaller number of entries from the positives entries. Looking at the block of code for the negative entries is a bit more interesting. This block of code makes an unexecuted reference to a function sort_by_proximity() that samples from the negative elements with an equal distribution based upon some function of proximity. Because this code is not executing we are not going to spend more time and analytical attention to exactly how this function operates. Furthermore, we have not included the code for sort_by_proximity() in the notebook because it is not part of the execution path we are tracing. In the code's garden of forking paths, this is a path not taken. These issues of bespoke code that is not executed and functions that are not called point to properties of code that make it complex and therefore difficult to review or critique. Code has a textual and a processual dimension (Hiller 2015, Van Zundert 2016). The code-as-text is what we can see and read in the source code, the processual dimension of code is tied to its execution as a software program. Code critique moves in between these two modes of existence of code. We are here, as code critics, not simply looking at code-as-text. That is, we are in this case reviewing a live execution of the code. This is extremely significant and, we'd argue, distinguishes defactoring as more than analysing code-as-text; we are analyzing the code, the data, and their interaction in the computation. Leveraging the affordances of the Jupyter platform allows us the ability to interact with the methodology inscribed in the code. At each step of the incremental process we can ask questions by inspecting the state of variables (or even change them). This is more than simply treating the code as a text, the code is but one part of a complex assemblage we have been executing and inspecting. However, it is also not a complete inspection of all the ways in which the code can be possibly executed. As we defactor the Underwood and Sellers's code, we make choices about how much of the code to include for the argument we are trying to make (and for the sake of our time and attention). Thus we are dealing with a code-criticism conundrum: What is the required or adequate breadth and depth of the computational process and subsequently of the critique? The decision to include or not include sort_by_proximity() is a breadth issue. How broad should we be in including code that does not execute? Note that we are including code from a conditional block that does not execute, but are not going out the additional step to include non-executed functions defined elsewhere in the code. The decision to include or not include code from the standard library, code not written by the authors, is a depth issue. While there are many functions we are stepping over, like len, list, append, pop, random.sample, we argue there is no need to step into these functions because, following Hinsen's model, they are not part of the bespoke code of Pace of Change. Again, this raises the problematic issue of our decision to step over sort_by_proximity() even though it was written by the authors' for this particular project. Obviously the 'rules of the game' for defactoring are not yet clear established. Therefore we are more or less 'feeling' our way through an emerging methodology for code criticism. As we see vestiges of the authors' evolution in thinking in their code, this notebook is capturing the evolution of our thinking about defactoring as a practice. End of explanation """ ### DEFACTORING FUNCTION DEFINITIONS ### We need to define the infer_date function def infer_date(metadictentry, datetype): if datetype == 'pubdate': return metadictentry[datetype] elif datetype == 'firstpub': firstpub = metadictentry['firstpub'] if firstpub > 1700 and firstpub < 1950: return firstpub else: return metadictentry['pubdate'] else: sys.exit(0) """ Explanation: In this cell we are seeing yet another instance of metadata being shaped and transformed in preparation for analysis. The code first prints out the number of positive and negative instances by checking the length (using len()) of the volume ids stored in the positives and negatives variables. Two loops iterate over these lists and populate two more variables, IDsToUse and classdictionary. The first, IDsToUse contains a master list of all the volume identifiers to be used in the analysis. It is of the Python set datatype, meaning there will be no duplicate identifiers in the set list. The second, classdictionary is a dictionary that allows a simple lookup to see if a volume ID is in the positive or negative class–as indicated by a 0 or a 1. There is a final loop whose logic checks to see if any volumes have a specific metadata flag reviewed with a value of addedbecausecanon. We have added a defactoring statement to see if this logic is ever triggered. The output indicates the if statement's conditions were never satisfied, no volumes were added because of cannon. We have come to the end of the preparing metadata section. All of the code up to this point has focused on loading, normalizing, and transforming the metadata--namely the identifiers of the volumes to be analyzed. Based upon the values in the metadata fields and assumptions built into the logic of the code, the authors have assembled the list of volume ids and their associated binary labels: reviewed or random, 0 or 1. Because this is a supervised machine learning exercise, the authors need labeled data to train the model. All of the work in this section of the code was dedicated to assigning a class label (positive or negative) to the identifiers of the data files. The next section dives into the actual data itself. Transforming Words into Features Now that we know exactly which volumes of poetry the code will be analyzing, we can venture into the datafiles and begin the work of transforming the volume data files into a data structure suitable for analysis. The logistic regression requires the data to be in a specific shape, a matrix of binary features. This section does the work of getting the data into shape. End of explanation """ # make a vocabulary list and a volsize dict wordcounts = Counter() volspresent = list() orderedIDs = list() positivecounts = dict() negativecounts = dict() for volid, volpath in zip(volumeIDs, volumepaths): if volid not in IDsToUse: continue else: volspresent.append((volid, volpath)) orderedIDs.append(volid) date = infer_date(metadict[volid], datetype) if date < pastthreshold or date > futurethreshold: continue else: with open(volpath, encoding = 'utf-8') as f: for line in f: fields = line.strip().split('\t') if len(fields) > 2 or len(fields) < 2: # print(line) continue word = fields[0] if len(word) > 0 and word[0].isalpha(): count = int(fields[1]) wordcounts[word] += 1 # for initial feature selection we use the number of # *documents* that contain a given word, # so it's just +=1. vocablist = [x[0] for x in wordcounts.most_common(numfeatures)] """ Explanation: This code cell defines a helper function, infer_date(), which is used in the code below to deal with differences in the pubdate and firstpub columns in the metadata. When firstpub falls between 1700 and 1950 the codes uses that as the date, otherwise it returns the value in pubdate (or it exists the script in the case of bad data). End of explanation """ ### DEFACTORING INSPECTION data_file = pd.read_csv(volumepaths[0], delimiter='\t', names=["Word", "Count"]) data_file.head(10) """ Explanation: This is an important section because it contains the code that opens the data files and selects the word-features used in the logistic regression. The main block of code in the cell above loops over each data file (representing a poem) and counts the number of instances of each word. Like the metadata file, we can use defactoring to insThis is an important section because it contains the code that opens the data files and selects the word-features used in the logistic regression. The main block of code in the cell above loops over each data file (representing a poem) and counts the number of instances of each word. This code performs the extremely important work of identifying the 3,200 word features used in the analysis. Underwood & Sellers only briefly mention this choice: The model we have trained uses thirty-two hundred variables---the frequencies of the thirty-two hundred words most common in the collection. A model this complex can encode a lot of information about a social boundary. But the complexity also creates room for a definition of literary prestige that can be about a lot of intersecting things at once; it is not required to map onto any single idea. Moreover, we may not be able to characterize the effect of any single word with great precision, since variables interact with each other in tricky ways. A list of the top ten words that individually have the largest effect on the model's predictions might not tell us very much (Underwood & Sellers 2016, 330). The decision to use 3,200 word features is never discussed in the MLQ article, but is rather situated in a broader discussion about the significance of individual words and how they can be marshaled as evidence of distinctions found via a statistical model. The 3,200 words selected here will eventually be transformed into a set of coefficients below with which the boundaries of statistical significance can be observed. The lofty philosophical discussion of the MLQ article is rooted in mundane materiality of a Pythonic function call.pect a datafile to get a sense of what these data look like before they are transformed by the code. End of explanation """ ### DEFACTORING INSPECTION print("Word Count") print("---------------") for word, count in wordcounts.most_common(n=20): print("{:8} {:5}".format(word, count)) """ Explanation: The output above shows the first 10 lines of one of the poem data files. As we can plainly see, the volume has already been pre-processed into a list of words and their frequencies. This particular volume has 2,745 commas and 1445 instances of the word “the.” The authors’ code parses each of these files and assembles a vocabulary list of the 3200 most common words (as specified by the numfeatures variable) End of explanation """ ### DEFACTORING INSPECTION plt.style.use('ggplot') ax = pd.Series([x[1] for x in wordcounts.most_common(n=3200)]).hist( bins=72, figsize=(8,5)) ax.set_xlabel("Words documents.") ax.set_ylabel("Count") plt.show() """ Explanation: At first glance it might seem strange that the count is 720 for all of the top words in the corpus. However, when we dig deeper into the code we can see that the authors are not tabulating the total word frequencies across all volumes in the corpus, rather they are associating words and the number of volumes; a distinction not explicitly expressed in the MLQ article. The code loops over each file, opening it, and parses each line by splitting on the tab character ("\t"). What is interesting is that Underwood and Sellers are only paying attention to the word and ignoring the frequency within the volume. They check to see if the word is longer than zero and use the isalpha() function to make sure the characters are alphabetic as opposed to punctuation. The comments in the code explain that the authors are just using the "number of documents that contain a given word". They are selecting their list of features (stored in the vocablist variable) by selecting words ranked by the number of documents in which they appear. The total number of documents we are working with is 720, so the table we generated above tells us that the top ten words appear in all of the documents. If we look at more than just the top ten, we can start to see the distribution of words in documents. End of explanation """ # vocablist = binormal_select(vocablist, positivecounts, negativecounts, totalposvols, totalnegvols, 3000) # Feature selection is deprecated. There are cool things # we could do with feature selection, # but they'd improve accuracy by 1% at the cost of complicating our explanatory task. # The tradeoff isn't worth it. Explanation is more important. # So we just take the most common words (by number of documents containing them) # in the whole corpus. Technically, I suppose, we could crossvalidate that as well, # but *eyeroll*. """ Explanation: The plot above shows a histogram of the top 3,200 words and how they are expressed across corpus. The spike on the right end of this chart shows there are nearly 60 words that appear in all 720 documents (as we can see in the text table above). As a whole, the higher bars on the left side of the chart indicate most of the words appear in a smaller number of documents. Here we use defactoring as a technique to investigate and even generate intermediate representations of the data, representations implicit in the data structures created by Underwood and Sellers, but not explicitly visualized in their MLQ narrative. For our purposes, this image is an interesting chapter in the story of the data precisely because it is in the middle of Underwood and Seller's analysis. These middle states are often glossed over in the hurried rush for analysis to generate a meaningful result. Defactoring is an effort to slow down, take a breather, and reflect upon the data-work that has happened up until this point in the code. The meandering step-by-step journey through the code sometimes reveals very interesting paths not taken, such as the commented out code block below. End of explanation """ donttrainon = list() # Here we create a list of volumed IDs not to be used for training. # For instance, we have supplemented the dataset with volumes that # are in the Norton but that did not actually occur in random # sampling. We want to make predictions for these, but never use # them for training. for idx1, anid in enumerate(orderedIDs): reviewedstatus = metadict[anid]['reviewed'] date = infer_date(metadict[anid], datetype) if reviewedstatus == 'addedbecausecanon': donttrainon.append(idx1) elif date < pastthreshold or date > futurethreshold: donttrainon.append(idx1) """ Explanation: Underwood and Seller's code above does not actually perform any work as each line has been commented out, however we include it because it points towards an execution path not taken and an interesting rationale for why it was not followed. In the "production" code the heuristic for feature selection is to simply select the 3200 most common words by their appearance in the 720 documents. This is a simple and easy technique to implement and--more importantly--explain to a literary history and digital humanities audience. Selecting the top words is a well established practice in text analysis and it has a high degree of face validity. It is a good mechanism for removing features that have diminishing returns. However, the commented code above tells a different, and methodologically significant, story. The comment discusses an alternative technique for feature selection using binormal selection. Because this function is commented out and not used in the analysis, we have opted to not include it as part of the defactoring. Instead, we have decided to focus on the more interesting rationale about why binormal selection is not being used in the analysis as indicated in the comments: There are cool things we could do with feature selection, but they'd improve accuracy by 1% at the cost of complicating our explanatory task. The tradeoff isn't worth it. Explanation is more important. This comment reveals much about the reasoning, the effort, and energy focused on the important, but in the humanities oft neglected, work of discussing methodology. As Underwood argued in The literary uses of high-dimensional space (Underwood 2015b), while there is enormous potential for the application of statistical methods in humanistic fields like literary history there is resistance to these methods because there is a resistance to methodology. Underwood has described the humanities disciplines relationship to methodology as an "insistence on staging methodology as ethical struggle" (Underwood 2013). In this commented code we can see the material manifestation of Underwood's methodological sentiment, in this case embodied by self-censorship in the decision to not use more statistically robust techniques for feature selection. We do not argue this choice compromises the analysis or final conclusions, rather we want to highlight the practical and material ways research methods are not a metaphysical abstraction, but rather have an tangible and observable reality. By focusing on a close reading of the code and execution environment, by defactoring, we illuminate methodology and its relation to the omnipresent explanatory task commensurate with the use of computational research methods in the humanities. In an algorithmic, data driven analysis, the selection of features is a crucial step because it affects the accuracy of the algorithm. In the digital humanities, feature selection is deeply embedded in the theory of the analysis and the context of the data. Claims made in and through this kind of analysis must attend to the representational configuration of the data. That is to say, we cannot take for granted how we have transformed data and what data are included or excluded from the analysis. Care, in the form of thorough documentation and thoughtful reflection, must be taken--especially at this unique moment in the development of digital humanities as we are still learning how algorithmic, data-driven techniques can be leveraged to better understand our objects of study. Filtering Authors End of explanation """ ### DEFACTORING INSPECTION print("The variable donttrainon contains {} volume IDs".format( len(donttrainon))) """ Explanation: As the comments describe, this block of code creates a list of volume IDs not to be used in the training. What that means in code is that any volume with the metadata label addedbecauseofcanon or with a date outside of the thresholds defined by pastthreshold and futurethreshold will be ignored. If we inspect the donttrainon variable we can see how many volumes satisfy these criteria. End of explanation """ authormatches = [list(donttrainon) for x in range(len(orderedIDs))] # For every index in authormatches, identify a set of indexes that have # the same author. Obvs, there will always be at least one. # Since we are going to use these indexes to exclude rows, we also add # all the ids in donttrainon to every volume for idx1, anid in enumerate(orderedIDs): thisauthor = metadict[anid]['author'] for idx2, anotherid in enumerate(orderedIDs): otherauthor = metadict[anotherid]['author'] if thisauthor == otherauthor and not idx2 in authormatches[idx1]: authormatches[idx1].append(idx2) for alist in authormatches: alist.sort(reverse = True) # I am reversing the order of indexes so that I can delete them from # back to front, without changing indexes yet to be deleted. # This will become important in the modelingprocess module. """ Explanation: It would appear there are no volumes to be filtered out by these criteria. End of explanation """ ### DEFACTORING INSPECTION # Tabular view of shared authorship pd.Series([len(x) for x in authormatches]).value_counts() ### DEFACTORING INSPECTION # Barchart of shared authorship ax = (pd.Series([len(x) for x in authormatches]) .value_counts() .plot(kind="barh", figsize=(8,5))) ax.set_xlabel("count") ax.set_ylabel("Number of volumes with same author") plt.tight_layout() """ Explanation: In this block of code Underwood and Sellers group the volumes by the same author. The list authormatches is a list of lists for each volume. Each sub-list contains the identifiers for all the volumes by the same author. Essentially this data structure represents the potential relations of each volume to other volumes, with that relation being "other volumes by the same author." This raises the question, how many volumes share the same author in this corpus. End of explanation """ ### DEFACTORING DEFINITIONS usedate = False # Leave this flag false unless you plan major # surgery to reactivate the currently-deprecated # option to use "date" as a predictive feature. def get_features(wordcounts, wordlist): numwords = len(wordlist) wordvec = np.zeros(numwords) for idx, word in enumerate(wordlist): if word in wordcounts: wordvec[idx] = wordcounts[word] return wordvec # In an earlier version of this script, we sometimes used # "publication date" as a feature, to see what would happen. # In the current version, we don't. Some of the functions # and features remain, but they are deprecated. E.g.: def get_features_with_date(wordcounts, wordlist, date, totalcount): numwords = len(wordlist) wordvec = np.zeros(numwords + 1) for idx, word in enumerate(wordlist): if word in wordcounts: wordvec[idx] = wordcounts[word] wordvec = wordvec / (totalcount + 0.0001) wordvec[numwords] = date return wordvec """ Explanation: This histogram tells us a majority of volumes are written by unique authors but that there are some authors who have written up to six volumes in the corpus. Note, we are generating this graph by counting the length of the list containing the volume IDs of other volumes by the same author. This means volumes written by the same author are counted twice. This is not an issue for the purposes of our inspection, just that the sum total number of volumes represented by this histogram is greater than 720. Filtering Words End of explanation """ volsizes = dict() voldata = list() classvector = list() for volid, volpath in volspresent: with open(volpath, encoding = 'utf-8') as f: voldict = dict() totalcount = 0 for line in f: fields = line.strip().split('\t') if len(fields) > 2 or len(fields) < 2: continue word = fields[0] count = int(fields[1]) voldict[word] = count totalcount += count date = infer_date(metadict[volid], datetype) date = date - 1700 if date < 0: date = 0 if usedate: features = get_features_with_date(voldict, vocablist, date, totalcount) voldata.append(features) else: features = get_features(voldict, vocablist) voldata.append(features / (totalcount + 0.001)) volsizes[volid] = totalcount classflag = classdictionary[volid] classvector.append(classflag) data = pd.DataFrame(voldata) """ Explanation: This code cell defines two functions to be used below when opening and parsing the raw data files (in the poems/ directory). The function get_features() simply takes the word counts from the parsed volume and filters out any words that are not part of wordlist, which contains the list of word features that had been selected for this analysis. We have also included a second function, get_features_with_date(), even though it is not executed. This residual code points to yet another path not taken, one that uses the volume's publication date as a feature. As Underwood and Seller's comment indicates, this was an experiment from an "earlier version of this script...to see what would happen." End of explanation """ ### DEFACTORING INSPECTION print("The vector representation of {} by {}".format( metadict[defactoring_volume_id]['title'], metadict[defactoring_volume_id]['author'])) print("The vector has a length of {}.".format( len(features))) print("The first 100 elements of the vector:") print(features[0:100]) """ Explanation: This is an important code block because we are now pulling the raw data files from the poems/ directory into memory, filtering out the unselected word features, and putting the data into a vectorized data structure. The code loops over the volspresent variable and parses each individual volume into the voldict dictionary. At this stage the code is reading in all the words of a volume including their frequencies, and it is tabulating the total number of words in that volume. Once all of the data for the volume has been read into memory, the code calls the get_features function that throws out the words not part of the selected word features stored in the vocablist variable. This is where the top 3200 words are foregrounded for each volume and the remaining, less commonly used words, are discarded. At this point, any prosaic resemblance left in the data is gone and now we are dealing entirely with textual data in a numeric form. End of explanation """ ### DEFACTORING INSPECTION # Normalized perspective of the data data.iloc[:,0:10]# display first and last 5 rows and 10 columns """ Explanation: The inspection above shows us the last volume processed by the loop, The Croakers by Joseph Rodman Drake. As we can see, the words for this volume of poetry are now represented as a list of numbers (representing word frequencies). However, this list of numbers still requires additional transformation in order to be consumable by logistic regression. The word frequencies need to be normalized so they are comparable across volumes. To do this Underwood and Sellers divide the frequency of each individual word (each number in the list above) by the total number of words in that volume (the totalcount variable. This makes volumes of different lengths comparable by turning absolute frequencies into relative frequencies. One thing we initially did not understand is why the value of 0.001 has been added to the totalcount variable. When we asked, it turned out this is a "lazy" way to prevent divide-by-zero errors. The end result of the code we have executed thus far in the notebook is a very neat and tidy table of numbers between zero and 1. End of explanation """ sextuplets = list() for i, volid in enumerate(orderedIDs): listtoexclude = authormatches[i] asixtuple = (data, classvector, listtoexclude, i, usedate, regularization) sextuplets.append(asixtuple) """ Explanation: The last row in that table, 719, is the volume we have been tracking, The Croakers by Joseph Rodman Drake. It is just one of 720 relatively indistinguishable rows of numbers in this representation of 19th century poetry. This is a radical transformation of the original, prosaic representation literary historians are probably used to seeing (if the poem was of interest in the first place): End of explanation """ ### DEFACTORING DEFINITION def sliceframe(dataframe, yvals, excludedrows, testrow): numrows = len(dataframe) newyvals = list(yvals) for i in excludedrows: del newyvals[i] # NB: This only works if we assume that excluded rows # has already been sorted in descending order !!!!!!! # otherwise indexes will slide around as you delete trainingset = dataframe.drop(dataframe.index[excludedrows]) newyvals = np.array(newyvals) testset = dataframe.iloc[testrow] return trainingset, newyvals, testset """ Explanation: This is the last step before Underwood and Sellers's code moves away from the transformation of features and into the actual analysis of the data. This bit of code gathers all of the relevant data and metadata that has been cleaned and normalized in a structure suitable for performing the statistical analysis. The sextuplets variable is a list of 720 tuples containing six elements. Each item in the sextuplets list contains the necessary data structures to model each poem. The contents of each item in the list is as follows: data: a normalized feature matrix. Word features are the columns and volumes are the rows with dimensions of 720 x 3200. classvector: the classification or labels of volumes as either 'reviewed' (1) or 'random' (0). listtoexclude: the list of poems to ignore because they are the same author. ``i: the index of the volume usedate: a flag indicating if date is a feature. It is false in this analysis. regularization: a parameter for the logistic regression. This value was hardcoded at the beginning of the code in the Setting Parameters section. With all of the data assembled and in the right shape, a process we call data fitness, we can now venture into the algorithmic territory and perform the statistical analysis. As we can see, the fitted representation of features has traveled a great distance from the original poetry. One of the most important aspects of distant reading is the work of cleaning, preparing, and normalizing texts to be "read" by an algorithm. When considering distance, we should think not only of the perspective that we, the analyst, are reading from, but also the distance traveled in terms of successive transformations and representations of the data. If computational literary history is a triathalon, we have only completed the first endurance test. Training Predictive Models We are now about to dive into the very heart of the analysis, training predictive models on each volume. In their MLQ article, Underwood and Sellers summarize this effort: At the grittiest mathematical level, the predictive model we create is just an equation that translates word frequencies into a probability that a particular volume came from the reviewed sample. Although we may say that it models a boundary between the samples, probability is a continuum, and a probabilistic model allows us to treat social boundaries as fuzzy gradients. We "train" the model by showing a regression algorithm the volumes from all authors (except one) in both samples; the algorithm assigns each word a positive or negative weight in an effort to separate the samples. When we show the model a new volume, by the author it hasn't yet seen, it uses the weights assigned to different words to estimate the probability that this volume was reviewed. To avoid circularity, the model never makes predictions about one of an author's books by using information about others (Sculley and Pasanek 2008). So we actually train 636 slightly different models, each one excluding books by a different author. However, since each pair of models shares more than 99 percent of their evidence, we can describe them collectively as one model of the literary field (Underwood & Sellers 2016, 326). We can see a more Pythonic expression of this work below. End of explanation """ ### DEFACTORING DEFINITION def normalizearray(featurearray, usedate): '''Normalizes an array by centering on means and scaling by standard deviations. Also returns the means and standard deviations for features. ''' numinstances, numfeatures = featurearray.shape means = list() stdevs = list() lastcolumn = numfeatures - 1 for featureidx in range(numfeatures): thiscolumn = featurearray.iloc[ : , featureidx] thismean = np.mean(thiscolumn) thisstdev = np.std(thiscolumn) if (not usedate) or featureidx != lastcolumn: # If we're using date we don't normalize the last column. means.append(thismean) stdevs.append(thisstdev) featurearray.iloc[ : , featureidx] = \ (thiscolumn - thismean) / thisstdev else: print('FLAG') means.append(thismean) thisstdev = 0.1 stdevs.append(thisstdev) featurearray.iloc[ : , featureidx] = \ (thiscolumn - thismean) / thisstdev # We set a small stdev for date. return featurearray, means, stdevs """ Explanation: This function prepares the data for training one model by separating volumes used for training from a single volume held out to test the predictive power of the model. The function takes a dataframe containing the feature vectors, a list of the classifications for each volume, a list of volumes to exclude (because of shared authorship), and the the index of the specific volume to be held out. It returns the dataframe with the held out volume removed (trainingset), a list of the known classifications (newyvals) corresponding to the training set, and the held-out volume that will be classified once the model has been trained (testset). End of explanation """ ### DEFACTORING DEFINITION def model_one_volume(data5tuple): data, classvector, listtoexclude, i, usedate, regularization = \ data5tuple trainingset, yvals, testset = sliceframe(data, classvector, listtoexclude, i) newmodel = LogisticRegression(C = regularization) trainingset, means, stdevs = normalizearray(trainingset, usedate) newmodel.fit(trainingset, yvals) testset = (testset - means) / stdevs prediction = newmodel.predict_proba(testset.values.reshape(1, -1))[0][1] if i % 50 == 0: print(i) # print(str(i) + " - " + str(len(listtoexclude))) return prediction """ Explanation: This function standardizes the features by computing the z-score for the feature vectors. That is, it loops over each column of the data, subtracts the column mean from each value, and then divides that value by the standard deviation. This is an important step in the data preparation pipeline because it ensures all of the data values are on the same scale. End of explanation """ # Now do leave-one-out predictions. print('Beginning multiprocessing.') pool = Pool(processes = 4) res = pool.map_async(model_one_volume, sextuplets) # After all files are processed, write metadata, errorlog, and counts of phrases. res.wait() resultlist = res.get() assert len(resultlist) == len(orderedIDs) logisticpredictions = dict() for i, volid in enumerate(orderedIDs): logisticpredictions[volid] = resultlist[i] pool.close() pool.join() print('Multiprocessing concluded.') """ Explanation: In many respects, this is the most salient block of code in the entire document. The code above actually runs the logistic regression and does the computational work that generates a prediction about each individual volume. This function builds upon the two previous functions to assemble a normalized set of training data (trainingset) distinct from the single volume to be predicted (testset). There are three lines of code involved in the computational modeling of the data. First, Underwood and Sellers instantiate a model object with the regularization parameter (more on that below): newmodel = LogisticRegression(C = regularization) Then they fit the model using the normalized training data: newmodel.fit(trainingset, yvals) Once a model has been fit they can use that model to make predictions about unseen or held-out data. This is what they do with the predict_proba() function: prediction = newmodel.predict_proba(testset.reshape(1, -1))[0][1] Those three lines are all it takes to do the computational part of of the analysis, the rest of the code up until this point has all been data preparation, cleaning, normalization, and re-shaping. This ratio of standardized function calls to bespoke data management code indicates that claims of machines are eradicating scholars' jobs are greatly exaggerated (Basken 2017). Regularization and Logistic Regression The three lines of code above hide a significant amount of intellectual and computational work. The call to the newmodel.fit() function is a crucial step in the analytical process. Underwood and Sellers are using an implementation of logistic regression from the 3rd party Python library scikit-learn. At a very high level, logistic regression is a machine learning algorithm for performing classification. Logistic regression works by estimating the parameters of a function, the hypothesis representation, that divides a multidimensional space into two parts (note, in this case we are talking about binomial or binary logistic regression, which classifies things into one of two bins). The hypothesis representation describes a line that winds its way through the space creating what is called the decision boundary. Every data point that lands on one side the boundary gets one label and every data point on the other side of the boundary gets the other label. Similar to linear regression, the goal is to find the best hypothesis representation, that is, the function that best draws a line dividing the space given the known data points. Once you have a good hypothesis representation, an appropriately fit model, you can begin to classify new data by dropping data points into the multidimensional space and seeing on which side of the decision boundary they land. The key to logistic regression is estimating the parameters of the hypothesis representation-the parameters to the function that draws a line through the multidimensional space. We can derive the parameters by using the features of existing data combined with their known labels; this is called training data. The modeling process is executed by the function call to newmodel.fit(trainingset, yvals). In Underwood and Sellers’s c'de the model uses the training data---the matrix of word features in the data variable and known labels ('reviewed' or 'random') in the classvector variable---to "learn" the parameters through a process called coordinate descent. How so-called optimization functions work is well beyond the scope of the discussion. Overfitting One of the problems when fitting a logistic regression model is a tendency towards overfitting. Crudely this means the model, the function with the learned parameters, that you estimated have tailored themselves such that they are overly optimized to the particular training data you provided. As such, the model becomes less useful for prediction or classifying new data because they are outside the fitness of the model. An overfit model is like a snug pair of jeans, once you put on a few pounds (add new data) they don't fit. In Underwood and Sellers's case, they are fitting models on all volumes except one, which is held out. Then they test the predictive performance of the model by seeing if it correctly classifies the held-out volume. If they overfit the models, the model will to a terrible job guessing the status of the held out volumes. When Underwood and Sellers instantiated the model (newmodel = LogisticRegression(C = regularization)), they set a regularization parameter on the model. Regularization is a technique for logistic regression (and other machine learning algorithms) that smooths out the tendency toward overfitting with some more mathematical gymnastics. The diagram below shows how regularization can help with the fitness of the model: On the left side is a linear regression which doesn't quite fit the data. In the middle is an overfit logistic regression. On right side is a regularized logistic regression. As the diagrams show, the regularized logistic expression (the right side) does have a bit of error, there are pink and blue dots on the wrong sides of the decision boundary, but as more data get added it will generally be more right than the overfitted model as represented by the middle diagram (the squiggly decision boundary). Running the Regression End of explanation """ ### DEFACTORING INSPECTION print("There are {} predictions.".format(len(logisticpredictions))) """ Explanation: This code automates the training of 720 volumes, holding out one volume, training the model on the remaining volumes, and then making a prediction for the held-out volume. As a very computation intensive process training or fitting a logistic regression model takes time, and training 720 different models obviously takes 720 times longer. Fortunately, this is a so called embarrassingly parallel computational task and so we can train the models using parallel processing instead of one after the other. Using Python's built in parallel processing modules, this code can speed up the process. Still, this block of code takes a bit of time to execute, around twenty minutes on a quad core MacBook Pro (late 2013 model). End of explanation """ truepositives = 0 truenegatives = 0 falsepositives = 0 falsenegatives = 0 allvolumes = list() with open(outputpath, mode = 'w', encoding = 'utf-8') as f: writer = csv.writer(f) header = ['volid', 'reviewed', 'obscure', 'pubdate', 'birthdate', 'gender', 'nation', 'allwords', 'logistic', 'author', 'title', 'pubname', 'actually', 'realclass'] writer.writerow(header) for volid in IDsToUse: metadata = metadict[volid] reviewed = metadata['reviewed'] obscure = metadata['obscure'] pubdate = infer_date(metadata, datetype) birthdate = metadata['birthdate'] gender = metadata['gender'] nation = metadata['nation'] author = metadata['author'] title = metadata['title'] canonicity = metadata['canonicity'] pubname = metadata['pubname'] allwords = volsizes[volid] logistic = logisticpredictions[volid] realclass = classdictionary[volid] outrow = [volid, reviewed, obscure, pubdate, birthdate, gender, nation, allwords, logistic, author, title, pubname, canonicity, realclass] writer.writerow(outrow) allvolumes.append(outrow) if logistic > 0.5 and classdictionary[volid] > 0.5: truepositives += 1 elif logistic <= 0.5 and classdictionary[volid] < 0.5: truenegatives += 1 elif logistic <= 0.5 and classdictionary[volid] > 0.5: falsenegatives += 1 elif logistic > 0.5 and classdictionary[volid] < 0.5: falsepositives += 1 """ Explanation: What emerges from the other side of this computationally intensive task are a series of predictions, 720 to be specific, one for each of the modeled volumes. These predictions, stored in the logisticpredictions variable, are the model's assertions of each volume's reviewed status. Additionally, because we already know the status of the modeled volumes we can compare the performance of the predictive model to the "ground truth" and see if the algorithm was able to detect patterns. End of explanation """ donttrainon.sort(reverse = True) trainingset, yvals, testset = sliceframe(data, classvector, donttrainon, 0) newmodel = LogisticRegression(C = regularization) trainingset, means, stdevs = normalizearray(trainingset, usedate) newmodel.fit(trainingset, yvals) coefficients = newmodel.coef_[0] * 100 coefficientuples = list(zip(coefficients, (coefficients / np.array(stdevs)), vocablist + ['pub.date'])) coefficientuples.sort() if verbose: for coefficient, normalizedcoef, word in coefficientuples: print(word + " : " + str(coefficient)) print() accuracy = (truepositives + truenegatives) / len(IDsToUse) """ Explanation: This code is a bit simpler than its predecessors. This block writes a CSV file to disk containing 720 rows of volume metadata, the predicted classification, and the actual classification. Modeling Coefficients The code below generates a single logistic regression model, trained on all of the data with nothing held-out. The properties of this model, the coefficients of the hypothesis representation, are interrogated to better understand the influence of individual features, words, on reviewed or unreviewed volumes. Thus this individual model is not using computational modeling to predict a phenomena, it is using the computational model to explore and explain patterns and features of the phenomena. End of explanation """ ### DEFACTORING INSPECTION print("There are {} coefficients in this model.".format(len(coefficientuples))) ### DEFACTORING INSPECTION print("First ten items in the list of coefficients.") coefficientuples[:10] ### DEFACTORING INSPECTION print("Last ten items in the list of coefficients.") coefficientuples[-10:] """ Explanation: This code functions much like the code in the model_one_volume() function except it only trains a single model for the purposes of investigating the impact of particular words on the prediction. By inspecting the magnitude of the coefficients Underwood and Sellers can see how particular words influenced a positive or negative prediction. Looking at the code, specifically the call to sliceframe() reveals this model actually does have some hold-out data, the first volume at index zero. We suspect the cost of excluding a single volume is less than the effort of re-implementing the sliceframe() function. The code to instantiate and train the model is identical to the code above, but instead of predicting the status of the held-out data the code extracts the coefficients from the model and puts them in the coefficients variable. End of explanation """ coefficientpath = outputpath.replace('.csv', '.coefs.csv') with open(coefficientpath, mode = 'w', encoding = 'utf-8') as f: writer = csv.writer(f) for triple in coefficientuples: coef, normalizedcoef, word = triple writer.writerow([word, coef, normalizedcoef]) ### DEFACTORING NAMESPACE rawaccuracy = accuracy """ Explanation: The coefficients are a list of numbers, one per word feature, that determine the shape of the line through the multidimensional space. These results tell us the influence of particular words in the classification of volumes in the corpus. Looking at the normalized values helps us understand the degree to which particular words are more or less associated with reviewed or unreviewed poetry. End of explanation """ ### DEFACTORING FUNCTION PARAMETERS modeltype = 'linear' datelimits = [] ''' Takes a set of predictions produced by a model that knows nothing about date, and divides it along a line with a diachronic tilt. We need to do this in a way that doesn't violate crossvalidation. I.e., we shouldn't "know" anything that the model didn't know. We tried a couple of different ways to do this, but the simplest and actually most reliable is to divide the whole dataset along a linear central trend line for the data! ''' listofrows = list() classvector = list() # DEPRECATED # if modeltype == 'logistic' and len(datelimits) == 2: # # In this case we construct a subset of data to model on. # tomodeldata = list() # tomodelclasses = list() # pastthreshold, futurethreshold = datelimits for volume in allvolumes: date = volume[3] logistic = volume[8] realclass = volume[13] listofrows.append([logistic, date]) classvector.append(realclass) # DEPRECATED # if modeltype == 'logistic' and len(datelimits) == 2: # if date >= pastthreshold and date <= futurethreshold: # tomodeldata.append([logistic, date]) # tomodelclasses.append(realclass) y, x = [a for a in zip(*listofrows)] plt.axis([min(x) - 2, max(x) + 2, min(y) - 0.02, max(y) + 0.02]) reviewedx = list() reviewedy = list() randomx = list() randomy = list() for idx, reviewcode in enumerate(classvector): if reviewcode == 1: reviewedx.append(x[idx]) reviewedy.append(y[idx]) else: randomx.append(x[idx]) randomy.append(y[idx]) plt.plot(reviewedx, reviewedy, 'ro') plt.plot(randomx, randomy, 'k+') if modeltype == 'logistic': # all this is DEPRECATED print("Hey, you're attempting to use the logistic-tilt option") print("that we deactivated. Go in and uncomment the code.") # if len(datelimits) == 2: # data = pd.DataFrame(tomodeldata) # responsevariable = tomodelclasses # else: # data = pd.DataFrame(listofrows) # responsevariable = classvector # newmodel = LogisticRegression(C = 100000) # newmodel.fit(data, responsevariable) # coefficients = newmodel.coef_[0] # intercept = newmodel.intercept_[0] / (-coefficients[0]) # slope = coefficients[1] / (-coefficients[0]) # p = np.poly1d([slope, intercept]) elif modeltype == 'linear': # what we actually do z = np.polyfit(x, y, 1) p = np.poly1d(z) slope = z[0] intercept = z[1] plt.plot(x,p(x),"b-") plt.show(block = False) x = np.array(x, dtype='float64') y = np.array(y, dtype='float64') classvector = np.array(classvector) dividingline = intercept + (x * slope) predicted_as_reviewed = (y > dividingline) really_reviewed = (classvector == 1) accuracy = sum(predicted_as_reviewed == really_reviewed) / len(classvector) ### DEFACTORING NAMESPACE tiltaccuracy = accuracy print('If we divide the dataset with a horizontal line at 0.5, accuracy is: ', str(rawaccuracy)) print("Divided with a line fit to the data trend, it's ", str(tiltaccuracy)) """ Explanation: This code generates the mainmodelcoefficients.csv output file, which contains the word, its coefficient, and its normalized coefficients. Plotting Results The final function of the analysis is to test the accuracy of the model(s). This code produces a plot giving a sense of how the model performed compared to the known classifications. End of explanation """
migueldiascosta/pymatgen
examples/Plotting a Pourbaix Diagram.ipynb
mit
from pymatgen.matproj.rest import MPRester from pymatgen.core.ion import Ion from pymatgen import Element from pymatgen.phasediagram.pdmaker import PhaseDiagram from pymatgen.analysis.pourbaix.entry import PourbaixEntry, IonEntry from pymatgen.analysis.pourbaix.maker import PourbaixDiagram from pymatgen.analysis.pourbaix.plotter import PourbaixPlotter from pymatgen.entries.compatibility import MaterialsProjectCompatibility, AqueousCorrection %matplotlib inline """ Explanation: This notebook provides an example of how to generate a Pourbaix Diagram using the Materials API and pymatgen. Currently, the process is a bit involved. But we are working to simplify the usage in the near future. Author: Sai Jayaratnam End of explanation """ def contains_entry(entry_list, entry): for e in entry_list: if e.entry_id == entry.entry_id or \ (abs(entry.energy_per_atom - e.energy_per_atom) < 1e-6 and entry.composition.reduced_formula == e.composition.reduced_formula): return True """ Explanation: Let's first define a useful function for filtering duplicate entries. End of explanation """ #This initializes the REST adaptor. Put your own API key in. a = MPRester() #Entries are the basic unit for thermodynamic and other analyses in pymatgen. #This gets all entries belonging to the Fe-O-H system. entries = a.get_entries_in_chemsys(['Fe', 'O', 'H']) """ Explanation: Using the Materials API, we obtain the entries for the relevant chemical system we are interested in. End of explanation """ #Dictionary of ion:energy, where the energy is the formation energy of ions from #the NBS tables. (Source: NBS Thermochemical Tables; FeO4[2-]: Misawa T., Corr. Sci., 13(9), 659-676 (1973)) ion_dict = {"Fe[2+]":-0.817471, "Fe[3+]":-0.0478, "FeO2[2-]":-3.06055, "FeOH[+]":-2.8738, "FeOH[2+]":-2.37954, "HFeO2[-]":-3.91578, "Fe(OH)2[+]":-4.54022, "Fe2(OH)2[4+]":-4.84285, "FeO2[-]":-3.81653, "FeO4[2-]":-3.33946, "Fe(OH)3(aq)":-6.83418, "Fe(OH)2[+]":-4.54022} #Dictionary of reference state:experimental formation energy (from O. Kubaschewski) for reference state. ref_dict = {"Fe2O3": -7.685050670886141} ref_state = "Fe2O3" """ Explanation: To construct a Pourbaix diagram, we also need the reference experimental energies for the relevant aqueous ions. This process is done manually here. We will provide a means to obtain these more easily via a programmatic interface in future. End of explanation """ # Run aqueouscorrection on the entries aqcompat = AqueousCorrection("MP") entries_aqcorr = list() for entry in entries: aq_corrected_entry = aqcompat.correct_entry(entry) if not contains_entry(entries_aqcorr, aq_corrected_entry): entries_aqcorr.append(aq_corrected_entry) # Generate a phase diagram to consider only solid entries stable in water. pd = PhaseDiagram(entries_aqcorr) stable_solids = pd.stable_entries stable_solids_minus_h2o = [entry for entry in stable_solids if entry.composition.reduced_formula not in ["H2", "O2", "H2O", "H2O2"]] pbx_solid_entries = [] for entry in stable_solids_minus_h2o: pbx_entry = PourbaixEntry(entry) pbx_entry.g0_replace(pd.get_form_energy(entry)) pbx_entry.reduced_entry() pbx_solid_entries.append(pbx_entry) # Calculate DFT reference energy for ions (See Persson et al, PRB (2012)) ref_entry = [entry for entry in stable_solids_minus_h2o if entry.composition.reduced_formula == ref_state][0] ion_correction = pd.get_form_energy(ref_entry)/ref_entry.composition.get_reduced_composition_and_factor()[1] - ref_dict[ref_state] el = Element("Fe") pbx_ion_entries = [] # Get PourbaixEntry corresponding to each ion for key in ion_dict: comp = Ion.from_formula(key) factor = comp.composition[el] / (ref_entry.composition[el] / ref_entry.composition.get_reduced_composition_and_factor()[1]) energy = ion_dict[key] + ion_correction * factor pbx_entry_ion = PourbaixEntry(IonEntry(comp, energy)) pbx_entry_ion.name = key pbx_ion_entries.append(pbx_entry_ion) all_entries = pbx_solid_entries + pbx_ion_entries # Generate and plot Pourbaix diagram pourbaix = PourbaixDiagram(all_entries) plotter = PourbaixPlotter(pourbaix) plotter.plot_pourbaix(limits=[[-2, 16],[-3, 3]]) """ Explanation: We will now construct the Pourbaix diagram, which requires the application of the AqueousCorrection, obtaining the stable entries, followed by generating a list of Pourbaix entries. End of explanation """
massimo-nocentini/on-python
UniFiCourseSpring2020/numpy.ipynb
mit
__AUTHORS__ = {'am': ("Andrea Marino", "andrea.marino@unifi.it",), 'mn': ("Massimo Nocentini", "massimo.nocentini@unifi.it", "https://github.com/massimo-nocentini/",)} __KEYWORDS__ = ['Python', 'numpy', 'numerical', 'data',] """ Explanation: <p> <img src="http://www.cerm.unifi.it/chianti/images/logo%20unifi_positivo.jpg" alt="UniFI logo" style="float: left; width: 20%; height: 20%;"> <div align="right"> <small> Massimo Nocentini, PhD. <br><br> February 26, 2020: init </small> </div> </p> <br> <br> <div align="center"> <b>Abstract</b><br> These slides outline techniques for effectively loading, storing, and manipulating in-memory data in Python. </div> End of explanation """ import numpy numpy.__version__ """ Explanation: <center><img src="https://upload.wikimedia.org/wikipedia/commons/c/c3/Python-logo-notext.svg"></center> Introduction to NumPy The topic is very broad: datasets can come from a wide range of sources and a wide range of formats, including be collections of documents, collections of images, collections of sound clips, collections of numerical measurements, or nearly anything else. Despite this apparent heterogeneity, it will help us to think of all data fundamentally as arrays of numbers. For this reason, efficient storage and manipulation of numerical arrays is absolutely fundamental to the process of doing data science. NumPy (short for Numerical Python) provides an efficient interface to store and operate on dense data buffers. In some ways, NumPy arrays are like Python's built-in list type, but NumPy arrays provide much more efficient storage and data operations as the arrays grow larger in size. NumPy arrays form the core of nearly the entire ecosystem of data science tools in Python, so time spent learning to use NumPy effectively will be valuable no matter what aspect of data science interests you. End of explanation """ import numpy as np """ Explanation: By convention, you'll find that most people in the SciPy/PyData world will import NumPy using np as an alias: End of explanation """ import array L = list(range(10)) A = array.array('i', L) A """ Explanation: Throughout this chapter, and indeed the rest of the book, you'll find that this is the way we will import and use NumPy. Understanding Data Types in Python Effective data-driven science and computation requires understanding how data is stored and manipulated. Here we outlines and contrasts how arrays of data are handled in the Python language itself, and how NumPy improves on this. Python offers several different options for storing data in efficient, fixed-type data buffers. The built-in array module (available since Python 3.3) can be used to create dense arrays of a uniform type: End of explanation """ np.array([1, 4, 2, 5, 3]) """ Explanation: Here 'i' is a type code indicating the contents are integers. Much more useful, however, is the ndarray object of the NumPy package. While Python's array object provides efficient storage of array-based data, NumPy adds to this efficient operations on that data. Creating Arrays from Python Lists First, we can use np.array to create arrays from Python lists: End of explanation """ np.array([3.14, 4, 2, 3]) """ Explanation: Remember that unlike Python lists, NumPy is constrained to arrays that all contain the same type. If types do not match, NumPy will upcast if possible (here, integers are up-cast to floating point): End of explanation """ np.array([1, 2, 3, 4], dtype='float32') """ Explanation: If we want to explicitly set the data type of the resulting array, we can use the dtype keyword: End of explanation """ np.zeros(10, dtype=int) np.ones((3, 5), dtype=float) np.full((3, 5), 3.14) np.arange(0, 20, 2) np.linspace(0, 1, 5) np.random.random((3, 3)) np.random.normal(0, 1, (3, 3)) np.eye(3) """ Explanation: Creating Arrays from Scratch Especially for larger arrays, it is more efficient to create arrays from scratch using routines built into NumPy: End of explanation """ np.random.seed(0) # seed for reproducibility x1 = np.random.randint(10, size=6) # One-dimensional array x2 = np.random.randint(10, size=(3, 4)) # Two-dimensional array x3 = np.random.randint(10, size=(3, 4, 5)) # Three-dimensional array """ Explanation: NumPy Standard Data Types NumPy arrays contain values of a single type, so have a look at those types and their bounds: | Data type | Description | |---------------|-------------| | bool_ | Boolean (True or False) stored as a byte | | int_ | Default integer type (same as C long; normally either int64 or int32)| | intc | Identical to C int (normally int32 or int64)| | intp | Integer used for indexing (same as C ssize_t; normally either int32 or int64)| | int8 | Byte (-128 to 127)| | int16 | Integer (-32768 to 32767)| | int32 | Integer (-2147483648 to 2147483647)| | int64 | Integer (-9223372036854775808 to 9223372036854775807)| | uint8 | Unsigned integer (0 to 255)| | uint16 | Unsigned integer (0 to 65535)| | uint32 | Unsigned integer (0 to 4294967295)| | uint64 | Unsigned integer (0 to 18446744073709551615)| | float_ | Shorthand for float64.| | float16 | Half precision float: sign bit, 5 bits exponent, 10 bits mantissa| | float32 | Single precision float: sign bit, 8 bits exponent, 23 bits mantissa| | float64 | Double precision float: sign bit, 11 bits exponent, 52 bits mantissa| | complex_ | Shorthand for complex128.| | complex64 | Complex number, represented by two 32-bit floats| | complex128| Complex number, represented by two 64-bit floats| The Basics of NumPy Arrays Data manipulation in Python is nearly synonymous with NumPy array manipulation: even newer tools like Pandas are built around the NumPy array. Attributes of arrays: Determining the size, shape, memory consumption, and data types of arrays Indexing of arrays: Getting and setting the value of individual array elements Slicing of arrays: Getting and setting smaller subarrays within a larger array Reshaping of arrays: Changing the shape of a given array Joining and splitting of arrays: Combining multiple arrays into one, and splitting one array into many NumPy Array Attributes First let's discuss some useful array attributes. We'll start by defining three random arrays, a one-dimensional, two-dimensional, and three-dimensional array: End of explanation """ print("x3 ndim: ", x3.ndim) print("x3 shape:", x3.shape) print("x3 size: ", x3.size) print("dtype:", x3.dtype) """ Explanation: Each array has attributes ndim (the number of dimensions), shape (the size of each dimension), size (the total size of the array) and dtype (the data type of the array): End of explanation """ x1 x1[0] x1[-1] # To index from the end of the array, you can use negative indices. """ Explanation: Array Indexing: Accessing Single Elements In a one-dimensional array, the $i^{th}$ value (counting from zero) can be accessed by specifying the desired index in square brackets, just as with Python lists: End of explanation """ x2 x2[0, 0] x2[2, -1] """ Explanation: In a multi-dimensional array, items can be accessed using a comma-separated tuple of indices: End of explanation """ x2[0, 0] = 12 x2 """ Explanation: Values can also be modified using any of the above index notation: End of explanation """ x1[0] = 3.14159 # this will be truncated! x1 """ Explanation: Keep in mind that, unlike Python lists, NumPy arrays have a fixed type. End of explanation """ x = np.arange(10) x x[:5] # first five elements x[5:] # elements after index 5 x[4:7] # middle sub-array x[::2] # every other element x[1::2] # every other element, starting at index 1 """ Explanation: Array Slicing: Accessing Subarrays Just as we can use square brackets to access individual array elements, we can also use them to access subarrays with the slice notation, marked by the colon (:) character. The NumPy slicing syntax follows that of the standard Python list; to access a slice of an array x, use this: python x[start:stop:step] If any of these are unspecified, they default to the values start=0, stop=size of dimension, step=1. One-dimensional subarrays End of explanation """ x[::-1] # all elements, reversed x[5::-2] # reversed every other from index 5 """ Explanation: A potentially confusing case is when the step value is negative. In this case, the defaults for start and stop are swapped. This becomes a convenient way to reverse an array: End of explanation """ x2 x2[:2, :3] # two rows, three columns x2[:3, ::2] # all rows, every other column x2[::-1, ::-1] """ Explanation: Multi-dimensional subarrays Multi-dimensional slices work in the same way, with multiple slices separated by commas: End of explanation """ print(x2[:, 0]) # first column of x2 print(x2[0, :]) # first row of x2 print(x2[0]) # equivalent to x2[0, :] """ Explanation: Accessing array rows and columns One commonly needed routine is accessing of single rows or columns of an array: End of explanation """ x2 x2_sub = x2[:2, :2] x2_sub x2_sub[0, 0] = 99 # if we modify this subarray, the original array is changed too x2 """ Explanation: Subarrays as no-copy views One important–and extremely useful–thing to know about array slices is that they return views rather than copies of the array data. This is one area in which NumPy array slicing differs from Python list slicing: in lists, slices will be copies. End of explanation """ np.arange(1, 10).reshape((3, 3)) x = np.array([1, 2, 3]) x.reshape((1, 3)) # row vector via reshape x[np.newaxis, :] # row vector via newaxis x.reshape((3, 1)) # column vector via reshape x[:, np.newaxis] # column vector via newaxis """ Explanation: It is sometimes useful to instead explicitly copy the data within an array or a subarray. This can be most easily done with the copy() method. Reshaping of Arrays If you want to put the numbers 1 through 9 in a $3 \times 3$ grid: End of explanation """ x = np.array([1, 2, 3]) y = np.array([3, 2, 1]) np.concatenate([x, y]) z = [99, 99, 99] np.concatenate([x, y, z]) grid = np.array([[1, 2, 3], [4, 5, 6]]) np.concatenate([grid, grid]) # concatenate along the first axis np.concatenate([grid, grid], axis=1) # concatenate along the second axis (zero-indexed) """ Explanation: Concatenation of arrays np.concatenate takes a tuple or list of arrays as its first argument: End of explanation """ x = np.array([1, 2, 3]) grid = np.array([[9, 8, 7], [6, 5, 4]]) np.vstack([x, grid]) # vertically stack the arrays y = np.array([[99], [99]]) np.hstack([grid, y]) # horizontally stack the arrays """ Explanation: For working with arrays of mixed dimensions, it can be clearer to use the np.vstack (vertical stack) and np.hstack (horizontal stack) functions: End of explanation """ x = [1, 2, 3, 99, 99, 3, 2, 1] x1, x2, x3 = np.split(x, [3, 5]) print(x1, x2, x3) grid = np.arange(16).reshape((4, 4)) grid np.vsplit(grid, [2]) np.hsplit(grid, [2]) """ Explanation: Splitting of arrays The opposite of concatenation is splitting, we can pass a list of indices giving the split points: End of explanation """ np.random.seed(0) def compute_reciprocals(values): output = np.empty(len(values)) for i in range(len(values)): output[i] = 1.0 / values[i] return output values = np.random.randint(1, 10, size=5) compute_reciprocals(values) """ Explanation: Computation on NumPy Arrays: Universal Functions Numpy provides an easy and flexible interface to optimized computation with arrays of data. The key to making it fast is to use vectorized operations, generally implemented through NumPy's universal functions (ufuncs). The Slowness of Loops Python's default implementation (known as CPython) does some operations very slowly, this is in part due to the dynamic, interpreted nature of the language. The relative sluggishness of Python generally manifests itself in situations where many small operations are being repeated – for instance looping over arrays to operate on each element. For example, pretend to compute the reciprocal of values contained in a array: End of explanation """ big_array = np.random.randint(1, 100, size=1000000) %timeit compute_reciprocals(big_array) """ Explanation: If we measure the execution time of this code for a large input, we see that this operation is very slow, perhaps surprisingly so! End of explanation """ %timeit (1.0 / big_array) """ Explanation: It takes $2.63$ seconds to compute these million operations and to store the result. It turns out that the bottleneck here is not the operations themselves, but the type-checking and function dispatches that CPython must do at each cycle of the loop. If we were working in compiled code instead, this type specification would be known before the code executes and the result could be computed much more efficiently. Introducing UFuncs For many types of operations, NumPy provides a convenient interface into just this kind of compiled routine. This is known as a vectorized operation. This can be accomplished by performing an operation on the array, which will then be applied to each element. End of explanation """ np.arange(5) / np.arange(1, 6) """ Explanation: Vectorized operations in NumPy are implemented via ufuncs, whose main purpose is to quickly execute repeated operations on values in NumPy arrays. Ufuncs are extremely flexible – before we saw an operation between a scalar and an array, but we can also operate between two arrays: End of explanation """ x = np.arange(9).reshape((3, 3)) 2 ** x """ Explanation: And ufunc operations are not limited to one-dimensional arrays–they can also act on multi-dimensional arrays as well: End of explanation """ x = np.arange(4) print("x =", x) print("x + 5 =", x + 5) print("x - 5 =", x - 5) print("x * 2 =", x * 2) print("x / 2 =", x / 2) print("x // 2 =", x // 2) # floor division print("-x = ", -x) print("x ** 2 = ", x ** 2) print("x % 2 = ", x % 2) -(0.5*x + 1) ** 2 # can be strung together also """ Explanation: Any time you see such a loop in a Python script, you should consider whether it can be replaced with a vectorized expression. Array arithmetic NumPy's ufuncs feel very natural to use because they make use of Python's native arithmetic operators: End of explanation """ theta = np.linspace(0, np.pi, 3) print("theta = ", theta) print("sin(theta) = ", np.sin(theta)) print("cos(theta) = ", np.cos(theta)) print("tan(theta) = ", np.tan(theta)) """ Explanation: Trigonometric functions NumPy provides a large number of useful ufuncs, we'll start by defining an array of angles: End of explanation """ x = [1, 2, 3] print("x =", x) print("e^x =", np.exp(x)) print("2^x =", np.exp2(x)) print("3^x =", np.power(3, x)) x = [1, 2, 4, 10] print("x =", x) print("ln(x) =", np.log(x)) print("log2(x) =", np.log2(x)) print("log10(x) =", np.log10(x)) """ Explanation: Exponents and logarithms Another common NumPy ufunc are the exponentials (that are useful for maintaining precision with very small inputs) End of explanation """ x = np.arange(5) y = np.empty(5) np.multiply(x, 10, out=y) print(y) y = np.zeros(10) np.power(2, x, out=y[::2]) print(y) """ Explanation: Specifying output For large calculations, it is sometimes useful to be able to specify the array where the result of the calculation will be stored: End of explanation """ x = np.arange(1, 6) np.multiply.outer(x, x) """ Explanation: Outer products Finally, any ufunc can compute the output of all pairs of two different inputs using the outer method: End of explanation """ L = np.random.random(100) sum(L) np.sum(L) big_array = np.random.rand(1000000) %timeit sum(big_array) %timeit np.sum(big_array) """ Explanation: Aggregations: Min, Max, and Everything In Between Summing the Values in an Array As a quick example, consider computing the sum of all values in an array. Python itself can do this using the built-in sum function: End of explanation """ min(big_array), max(big_array) np.min(big_array), np.max(big_array) %timeit min(big_array) %timeit np.min(big_array) big_array.min(), big_array.max(), big_array.sum() """ Explanation: Minimum and Maximum Similarly, Python has built-in min and max functions: End of explanation """ M = np.random.random((3, 4)) M M.sum() # By default, each NumPy aggregation function works on the whole array M.min(axis=0) # specifying the axis along which the aggregate is computed M.max(axis=1) # find the maximum value within each row """ Explanation: Multi dimensional aggregates One common type of aggregation operation is an aggregate along a row or column: End of explanation """ a = np.array([0, 1, 2]) b = np.array([5, 5, 5]) a + b """ Explanation: Other aggregation functions Additionally, most aggregates have a NaN-safe counterpart that computes the result while ignoring missing values, which are marked by the special IEEE floating-point NaN value |Function Name | NaN-safe Version | Description | |-------------------|---------------------|-----------------------------------------------| | np.sum | np.nansum | Compute sum of elements | | np.prod | np.nanprod | Compute product of elements | | np.mean | np.nanmean | Compute mean of elements | | np.std | np.nanstd | Compute standard deviation | | np.var | np.nanvar | Compute variance | | np.min | np.nanmin | Find minimum value | | np.max | np.nanmax | Find maximum value | | np.argmin | np.nanargmin | Find index of minimum value | | np.argmax | np.nanargmax | Find index of maximum value | | np.median | np.nanmedian | Compute median of elements | | np.percentile | np.nanpercentile| Compute rank-based statistics of elements | | np.any | N/A | Evaluate whether any elements are true | | np.all | N/A | Evaluate whether all elements are true | Computation on Arrays: Broadcasting Another means of vectorizing operations is to use NumPy's broadcasting functionality. Broadcasting is simply a set of rules for applying binary ufuncs (e.g., addition, subtraction, multiplication, etc.) on arrays of different sizes. Introducing Broadcasting Recall that for arrays of the same size, binary operations are performed on an element-by-element basis: End of explanation """ a + 5 """ Explanation: Broadcasting allows these types of binary operations to be performed on arrays of different sizes: End of explanation """ M = np.ones((3, 3)) M M + a """ Explanation: We can think of this as an operation that stretches or duplicates the value 5 into the array [5, 5, 5], and adds the results; the advantage of NumPy's broadcasting is that this duplication of values does not actually take place. We can similarly extend this to arrays of higher dimensions: End of explanation """ a = np.arange(3) b = np.arange(3)[:, np.newaxis] a, b a + b """ Explanation: Here the one-dimensional array a is stretched, or broadcast across the second dimension in order to match the shape of M. More complicated cases can involve broadcasting of both arrays: End of explanation """ X = np.random.random((10, 3)) Xmean = X.mean(0) Xmean X_centered = X - Xmean X_centered.mean(0) # To double-check, we can check that the centered array has near 0 means. """ Explanation: Rules of Broadcasting Broadcasting in NumPy follows a strict set of rules to determine the interaction between the two arrays: Rule 1: If the two arrays differ in their number of dimensions, the shape of the one with fewer dimensions is padded with ones on its leading (left) side. Rule 2: If the shape of the two arrays does not match in any dimension, the array with shape equal to 1 in that dimension is stretched to match the other shape. Rule 3: If in any dimension the sizes disagree and neither is equal to 1, an error is raised. Centering an array Imagine you have an array of 10 observations, each of which consists of 3 values, we'll store this in a $10 \times 3$ array: End of explanation """ steps = 500 x = np.linspace(0, 5, steps) # # x and y have 500 steps from 0 to 5 y = np.linspace(0, 5, steps)[:, np.newaxis] z = np.sin(x) ** 10 + np.cos(10 + y * x) * np.cos(x) %matplotlib inline import matplotlib.pyplot as plt plt.imshow(z, origin='lower', extent=[0, 5, 0, 5], cmap='viridis') plt.colorbar(); """ Explanation: Plotting a two-dimensional function One place that broadcasting is very useful is in displaying images based on two-dimensional functions. If we want to define a function $z = f(x, y)$, broadcasting can be used to compute the function across the grid: End of explanation """ x = np.array([1, 2, 3, 4, 5]) x < 3 # less than x > 3 # greater than x != 3 # not equal (2 * x) == (x ** 2) """ Explanation: Comparisons, Masks, and Boolean Logic Masking comes up when you want to extract, modify, count, or otherwise manipulate values in an array based on some criterion: for example, you might wish to count all values greater than a certain value, or perhaps remove all outliers that are above some threshold. In NumPy, Boolean masking is often the most efficient way to accomplish these types of tasks. Comparison Operators as ufuncs End of explanation """ rng = np.random.RandomState(0) x = rng.randint(10, size=(3, 4)) x x < 6 """ Explanation: Just as in the case of arithmetic ufuncs, these will work on arrays of any size and shape: End of explanation """ np.count_nonzero(x < 6) # how many values less than 6? np.sum(x < 6) np.sum(x < 6, axis=1) # how many values less than 6 in each row? np.any(x > 8) # are there any values greater than 8? np.any(x < 0) # are there any values less than zero? np.all(x < 10) # are all values less than 10? np.all(x < 8, axis=1) # are all values in each row less than 8? """ Explanation: Counting entries To count the number of True entries in a Boolean array, np.count_nonzero is useful: End of explanation """ x x < 5 x[x < 5] """ Explanation: Boolean Arrays as Masks A more powerful pattern is to use Boolean arrays as masks, to select particular subsets of the data themselves: End of explanation """ rand = np.random.RandomState(42) x = rand.randint(100, size=10) x [x[3], x[7], x[2]] # Suppose we want to access three different elements. ind = [3, 7, 4] x[ind] # Alternatively, we can pass a single list or array of indices """ Explanation: What is returned is a one-dimensional array filled with all the values that meet this condition; in other words, all the values in positions at which the mask array is True. Fancy Indexing We saw how to access and modify portions of arrays using simple indices (e.g., arr[0]), slices (e.g., arr[:5]), and Boolean masks (e.g., arr[arr &gt; 0]). We'll look at another style of array indexing, known as fancy indexing, that is like the simple indexing we've already seen, but we pass arrays of indices in place of single scalars. Fancy indexing is conceptually simple: it means passing an array of indices to access multiple array elements at once: End of explanation """ ind = np.array([[3, 7], [4, 5]]) x[ind] """ Explanation: When using fancy indexing, the shape of the result reflects the shape of the index arrays rather than the shape of the array being indexed: End of explanation """ X = np.arange(12).reshape((3, 4)) X """ Explanation: Fancy indexing also works in multiple dimensions: End of explanation """ row = np.array([0, 1, 2]) col = np.array([2, 1, 3]) X[row, col] """ Explanation: Like with standard indexing, the first index refers to the row, and the second to the column: End of explanation """ X[row[:, np.newaxis], col] """ Explanation: The pairing of indices in fancy indexing follows all the broadcasting rules that we've already seen: End of explanation """ row[:, np.newaxis] * col """ Explanation: each row value is matched with each column vector, exactly as we saw in broadcasting of arithmetic operations End of explanation """ X X[2, [2, 0, 1]] # combine fancy and simple indices X[1:, [2, 0, 1]] # combine fancy indexing with slicing mask = np.array([1, 0, 1, 0], dtype=bool) X[row[:, np.newaxis], mask] # combine fancy indexing with masking """ Explanation: Remember: with fancy indexing that the return value reflects the broadcasted shape of the indices, rather than the shape of the array being indexed. Combined Indexing For even more powerful operations, fancy indexing can be combined with the other indexing schemes we've seen: End of explanation """ mean = [0, 0] cov = [[1, 2], [2, 5]] X = rand.multivariate_normal(mean, cov, 100) X.shape plt.scatter(X[:, 0], X[:, 1]); """ Explanation: Example: Selecting Random Points One common use of fancy indexing is the selection of subsets of rows from a matrix. For example, we might have an $N$ by $D$ matrix representing $N$ points in $D$ dimensions, such as the following points drawn from a two-dimensional normal distribution: End of explanation """ indices = np.random.choice(X.shape[0], 20, replace=False) indices selection = X[indices] # fancy indexing here selection.shape """ Explanation: Let's use fancy indexing to select 20 random points. We'll do this by first choosing 20 random indices with no repeats, and use these indices to select a portion of the original array: End of explanation """ plt.scatter(X[:, 0], X[:, 1], alpha=0.3); """ Explanation: Now to see which points were selected, let's over-plot large circles at the locations of the selected points: End of explanation """ x = np.arange(10) i = np.array([2, 1, 8, 4]) x[i] = 99 x x[i] -= 10 # use any assignment-type operator for this x """ Explanation: Modifying Values with Fancy Indexing Fancy indexing it can also be used to modify parts of an array: End of explanation """ x = np.zeros(10) x[[0, 0]] = [4, 6] x """ Explanation: Notice, though, that repeated indices with these operations can cause some potentially unexpected results: End of explanation """ i = [2, 3, 3, 4, 4, 4] x[i] += 1 x """ Explanation: Where did the 4 go? The result of this operation is to first assign x[0] = 4, followed by x[0] = 6. The result, of course, is that x[0] contains the value 6. End of explanation """ x = np.zeros(10) np.add.at(x, i, 1) x """ Explanation: You might expect that x[3] would contain the value 2, and x[4] would contain the value 3, as this is how many times each index is repeated. Why is this not the case? Conceptually, this is because x[i] += 1 is meant as a shorthand of x[i] = x[i] + 1. x[i] + 1 is evaluated, and then the result is assigned to the indices in x. With this in mind, it is not the augmentation that happens multiple times, but the assignment, which leads to the rather nonintuitive results. End of explanation """ np.random.seed(42) x = np.random.randn(100) # compute a histogram by hand bins = np.linspace(-5, 5, 20) counts = np.zeros_like(bins) # find the appropriate bin for each x i = np.searchsorted(bins, x) # add 1 to each of these bins np.add.at(counts, i, 1) # The counts now reflect the number of points # within each bin–in other words, a histogram: line, = plt.plot(bins, counts); line.set_drawstyle("steps") print("NumPy routine:") %timeit counts, edges = np.histogram(x, bins) print("Custom routine:") %timeit np.add.at(counts, np.searchsorted(bins, x), 1) """ Explanation: The at() method does an in-place application of the given operator at the specified indices (here, i) with the specified value (here, 1). Another method that is similar in spirit is the reduceat() method of ufuncs, which you can read about in the NumPy documentation. Example: Binning Data You can use these ideas to efficiently bin data to create a histogram by hand. For example, imagine we have 1,000 values and would like to quickly find where they fall within an array of bins. We could compute it using ufunc.at like this: End of explanation """ x = np.random.randn(1000000) print("NumPy routine:") %timeit counts, edges = np.histogram(x, bins) print("Custom routine:") %timeit np.add.at(counts, np.searchsorted(bins, x), 1) """ Explanation: Our own one-line algorithm is several times faster than the optimized algorithm in NumPy! How can this be? If you dig into the np.histogram source code (you can do this in IPython by typing np.histogram??), you'll see that it's quite a bit more involved than the simple search-and-count that we've done; this is because NumPy's algorithm is more flexible, and particularly is designed for better performance when the number of data points becomes large... End of explanation """ x = np.array([2, 1, 4, 3, 5]) np.sort(x) x """ Explanation: What this comparison shows is that algorithmic efficiency is almost never a simple question. An algorithm efficient for large datasets will not always be the best choice for small datasets, and vice versa. The key to efficiently using Python in data-intensive applications is knowing about general convenience routines like np.histogram and when they're appropriate, but also knowing how to make use of lower-level functionality when you need more pointed behavior. Sorting Arrays Up to this point we have been concerned mainly with tools to access and operate on array data with NumPy. This section covers algorithms related to sorting values in NumPy arrays. Fast Sorting in NumPy: np.sort and np.argsort Although Python has built-in sort and sorted functions to work with lists, NumPy's np.sort function turns out to be much more efficient and useful. To return a sorted version of the array without modifying the input, you can use np.sort: End of explanation """ i = np.argsort(x) i """ Explanation: A related function is argsort, which instead returns the indices of the sorted elements: End of explanation """ x[i] """ Explanation: The first element of this result gives the index of the smallest element, the second value gives the index of the second smallest, and so on. These indices can then be used (via fancy indexing) to construct the sorted array if desired: End of explanation """ rand = np.random.RandomState(42) X = rand.randint(0, 10, (4, 6)) X np.sort(X, axis=0) # sort each column of X np.sort(X, axis=1) # sort each row of X """ Explanation: Sorting along rows or columns End of explanation """ x = np.array([7, 2, 3, 1, 6, 5, 4]) np.partition(x, 3) """ Explanation: Keep in mind that this treats each row or column as an independent array, and any relationships between the row or column values will be lost! Partial Sorts: Partitioning Sometimes we're not interested in sorting the entire array, but simply want to find the k smallest values in the array. np.partition takes an array and a number K; the result is a new array with the smallest K values to the left of the partition, and the remaining values to the right, in arbitrary order: End of explanation """ np.partition(X, 2, axis=1) """ Explanation: Note that the first three values in the resulting array are the three smallest in the array, and the remaining array positions contain the remaining values. Within the two partitions, the elements have arbitrary order. Similarly to sorting, we can partition along an arbitrary axis of a multidimensional array: End of explanation """ X = rand.rand(50, 2) plt.scatter(X[:, 0], X[:, 1], s=100); # compute the distance between each pair of points dist_sq = np.sum((X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2, axis=-1) dist_sq.shape, np.all(dist_sq.diagonal() == 0) """ Explanation: The result is an array where the first two slots in each row contain the smallest values from that row, with the remaining values filling the remaining slots. Finally, just as there is a np.argsort that computes indices of the sort, there is a np.argpartition that computes indices of the partition. Example: k-Nearest Neighbors Let's quickly see how we might use this argsort function along multiple axes to find the nearest neighbors of each point in a set. We'll start by creating a random set of 10 points on a two-dimensional plane: End of explanation """ nearest = np.argsort(dist_sq, axis=1) nearest[:,0] """ Explanation: With the pairwise square-distances converted, we can now use np.argsort to sort along each row. The leftmost columns will then give the indices of the nearest neighbors: End of explanation """ K = 2 nearest_partition = np.argpartition(dist_sq, K + 1, axis=1) plt.scatter(X[:, 0], X[:, 1], s=100) K = 2 # draw lines from each point to its two nearest neighbors for i in range(X.shape[0]): for j in nearest_partition[i, :K+1]: plt.plot(*zip(X[j], X[i]), color='black') """ Explanation: Notice that the first column is order because each point's closest neighbor is itself. If we're simply interested in the nearest $k$ neighbors, all we need is to partition each row so that the smallest $k + 1$ squared distances come first, with larger distances filling the remaining positions of the array: End of explanation """
rasbt/pattern_classification
data_viz/model-evaluation-articles/iris-random-dist.ipynb
gpl-3.0
%matplotlib inline """ Explanation: This Jupyter notebook contains the code to create the data visualizations for the article "Model evaluation, model selection, and algorithm selection in machine learning - Part I" at http://sebastianraschka.com/blog/2016/model-evaluation-selection-part1.html. End of explanation """ import matplotlib.pyplot as plt import numpy as np import pandas as pd from mlxtend.data import iris_data from mlxtend.preprocessing import shuffle_arrays_unison X, y = iris_data() X, y = shuffle_arrays_unison([X, y], random_seed=123) X_train, X_test = X[:100], X[100:150] y_train, y_test = y[:100], y[100:150] np.bincount(y) np.bincount(y_train) np.bincount(y_test) df = pd.DataFrame(X) df['class'] = y df_train = pd.DataFrame(X_train) df_train['class'] = y_train df_test = pd.DataFrame(X_test) df_test['class'] = y_test def stackhist(x, y, **kws): grouped = pd.groupby(x, y) data = [d for _, d in grouped] labels = [l for l, _ in grouped] plt.hist(data, histtype="barstacked", label=labels, alpha=0.8, normed=True, bins=np.arange(4.0, 8.1, 0.25)) plt.ylim([0, 0.6]) plt.xlim([4, 8]) plt.xlabel('Sepal Width [cm]') plt.ylabel('Frequency') with plt.style.context('fivethirtyeight'): stackhist(df[0], df['class']) #plt.legend(['Setosa', 'Virginica', 'Versicolor'], fontsize=12) plt.title('All') plt.tight_layout() plt.savefig('./all.svg') plt.show() stackhist(df_train[0], df_train['class']) plt.title('Train') plt.tight_layout() #plt.savefig('./train.svg') plt.show() stackhist(df_test[0], df_test['class']) plt.title('Test') plt.tight_layout() #plt.savefig('./test.svg') plt.show() """ fig = plt.figure() figlegend = plt.figure(figsize=(3,4)) ax = fig.add_subplot(111) lines = ax.plot(range(10), np.random.randn(10), range(10), np.random.randn(10), np.random.randn(10), range(10),) figlegend.legend(lines, ['Setosa', 'Virginica', 'Versicolor'], 'center') fig.show() figlegend.show() figlegend.savefig('legend.svg') """ """ Explanation: Iris Feature by Class Distribution in Random Subsampling End of explanation """
pacoqueen/ginn
extra/install/ipython2/ipython-5.10.0/examples/IPython Kernel/Animations Using clear_output.ipynb
gpl-2.0
import sys import time from IPython.display import display, clear_output for i in range(10): time.sleep(0.25) clear_output(wait=True) print(i) sys.stdout.flush() """ Explanation: Simple Animations Using clear_output Sometimes you want to clear the output area in the middle of a calculation. This can be useful for doing simple animations. In terminals, there is the carriage-return ('\r') for overwriting a single line, but the notebook frontend can clear the whole output area, not just a single line. To clear output in the Notebook you can use the clear_output() function. If you are clearing the output every frame of an animation, calling clear_output() will create noticeable flickering. You can use clear_output(wait=True) to add the clear_output call to a queue. When data becomes available to replace the existing output, the clear_output will be called immediately before the new data is added. This avoids the flickering by not rendering the cleared output to the screen. Simple example Here we show our progress iterating through a list: End of explanation """ from IPython import parallel rc = parallel.Client() view = rc.load_balanced_view() amr = view.map_async(time.sleep, [0.5]*100) amr.wait_interactive() """ Explanation: AsyncResult.wait_interactive The AsyncResult object has a special wait_interactive() method, which prints its progress interactively, so you can watch as your parallel computation completes. This example assumes you have an IPython cluster running, which you can start from the cluster panel End of explanation """ %matplotlib inline import numpy as np import matplotlib.pyplot as plt from scipy.special import jn x = np.linspace(0,5) f, ax = plt.subplots() ax.set_title("Bessel functions") for n in range(1,10): time.sleep(1) ax.plot(x, jn(x,n)) clear_output(wait=True) display(f) # close the figure at the end, so we don't get a duplicate # of the last plot plt.close() """ Explanation: Matplotlib example You can also use clear_output() to clear figures and plots. End of explanation """
unnati-xyz/intro-python-data-science
hard-disk/Explore.ipynb
mit
import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import numpy as np %matplotlib inline plt.style.use('ggplot') plt.rcParams['figure.figsize']=15,10 df = pd.read_csv('data/data.csv') """ Explanation: <img style="float:center" src="img/explore.jpg" width=300/> Exploring the data When we look at spreadsheets or large amounts of data, its hard for us to understand what is really happening. But when we visually interpret the data then everything starts making sense. <img style="float::left" src="img/explore-reason.png" /> Question 1. Find the total number of hard disks for a given model Question 2. Find total failures for a hard disk models Question 3. How do you compute failure rate for a model Question 4. Given a model and capacity bytes, what does failure count look like Question 5. Let us count how many days each hard disk ran Question 6. Find the average running time for failed hard disks and average running time for hard disks that have not failed Question 7. How about using hours (SMART_9) column now and co-relate it with failure Question 8. Given the data , identify the model and capacity of the hard disk to buy based on how long it runs Step by step approach First let us look at our data End of explanation """ df.head() """ Explanation: Let us take a sneak peek at the data End of explanation """ df.shape """ Explanation: What is the size of the dataset? End of explanation """ df_model = pd.DataFrame(df.model.unique(),columns=['model']) df_model.head() df_model.count()[0] """ Explanation: Now we see that there are different models of hard disks, let us list them <img style="float:center" src="img/distinct.gif" /> End of explanation """ print "Total number of distinct models : "+ str(df_model.count()[0]) # Exerice 1: Find the distinct number of serial numbers # Exercise 2: Find the distinct number of capacity bytes """ Explanation: let us see how many models are there in total End of explanation """ df_model_serial = pd.DataFrame(df.groupby(['model']).serial.nunique()) df_model_serial.head() df_model_serial = df_model_serial.reset_index() df_model_serial.head() df_model_serial.columns = ['model','total_HD'] df_model_serial.head(39) df_model_serial.plot(kind="barh",x="model",y="total_HD") """ Explanation: <img style="float:center" src="img/group-by.gif" /> Question 1. Find the total number of hard disks for a given model Now let us see how many hard disks are there for each model and visualize it. We see that serial number represents the hard disk and they are related to a model i.e multiple serial numbers belongs to one type of model End of explanation """ df_model_serial.sort_values(by='total_HD',inplace=True) df_model_serial.plot(kind="barh",x="model",y="total_HD") #Exercise 3: Find the count of different capacity bytes for a model and plot with and without sorting """ Explanation: Sort and plot End of explanation """ df_fail = pd.DataFrame(df.groupby('model').failure.sum()) df_fail.head() df_fail = df_fail.reset_index() df_fail.head() df_fail.plot(kind="barh",x="model",y="failure",figsize=(18,10)) # Exercise 4 : sort the above data frame and plot it """ Explanation: Question 2. Find failures for a hard disk models End of explanation """ merged_df = df_model_serial.merge(df_fail,how='inner',on='model') merged_df.head() """ Explanation: Question 3. How do you compute failure rate for a model Now let us express the failure / total number of hard disks as ratio. This will give us an understanding of models and their failure behavior To get that data, instead of computing again, we can join the 2 data frames that were previously computed and compute the ratio End of explanation """ merged_df['success'] = merged_df.total_HD - merged_df.failure merged_df.head() merged_df.plot(kind="bar",x="model",y=["failure","success"],subplots=True) """ Explanation: let us see in total hard disks for a model, how many failed and how many did not End of explanation """ merged_df['ratio_failure'] = merged_df.failure / merged_df.total_HD merged_df.head(25) merged_df.sort_values(by="ratio_failure",ascending=False,inplace=True) merged_df.head() merged_df.plot(kind="bar",x="model",y="ratio_failure") """ Explanation: now let us compute the ratio of failure number/total_hard_disk of hard disk End of explanation """ #Exercise: Find ratio of success and plot it #Exercise : Plot multiple bar charts comparing ratio of success and failure """ Explanation: The higher the ratio value is , the model is prone to failure End of explanation """ df_capacity = pd.DataFrame(df.capacity.unique(),columns=['capacity']) df_capacity.head() df_capacity.shape #Exercise : For a given capacity bytes, find the total number of failures and plot it """ Explanation: Now we know which models fail the most, let us introduce a new feature in our analysis, capacity. We are going feature by feature the reason being, the more features we add that add value to the outcome, we see how our understanding of the data starts to change. Let us look at the capacity End of explanation """ df_fail_mod_cap = pd.DataFrame(df.groupby(['model','capacity']).failure.sum()) df_fail_mod_cap.head() df_fail_mod_cap = df_fail_mod_cap.reset_index() df_fail_mod_cap.head(25) df_fail_mod_cap.plot(x="capacity",y="failure",kind="bar",figsize=(20,5)) """ Explanation: Question 4. Given a model and capacity bytes, what does failure count look like End of explanation """ df_fail_mod_cap.head() df_fail_mod_cap_pivot = df_fail_mod_cap.pivot("model","capacity","failure") df_fail_mod_cap_pivot.head() """ Explanation: Looking at this chart can you tell what is not being represented right? We are having repeated entries for the same capacity and this really does not give us insights on the relation between capacity data and the models. End of explanation """ df_fail_mod_cap.fillna(0,inplace=True) df_fail_mod_cap.head() sns.heatmap(df_fail_mod_cap_pivot) """ Explanation: we see that for some models and their respective capacitys we do not have a fail count, lets fill it with 0 End of explanation """ #Exercise : Find count of success for a model with different capacities and plot it """ Explanation: This heat map gives us a better understanding of model, capacity vs failure End of explanation """ df_days = pd.DataFrame(df.groupby(['capacity','serial']).date.count()) df_days = df_days.reset_index() df_days.head() df_days.columns = ['capacity','serial','total_days'] df_days.head() df_days.capacity.value_counts() df_days.shape df_days_pivot = df_days.pivot('capacity','serial','total_days') df_days_pivot.head() df_days_pivot.fillna(0,inplace=True) df_days_pivot.head() # Exercise : Visualize the above dataframe """ Explanation: The above charts give us an explanation of which models failed the most, which models had the most number of hard disks running , the ratio of hard disk : failure rate and hard disk and for a given capacity of a model what the failure count looks like <img style="float:center" src="img/explore-clock.png" width=150/> Hard disk data is time series data, so let us start using time Question 5. Let us count how many days each hard disk ran End of explanation """ df_fail_days = pd.DataFrame(df[['capacity','serial','failure']].loc[df['failure'] == 1 ]) df_fail_days.head() """ Explanation: Question 6. Find the average running time for failed hard disks and average running time for hard disks that have not failed End of explanation """ df_fail_count = df_days.merge(df_fail_days,how="left",on=['capacity','serial']) df_fail_count.head() df_fail_count.fillna(0,inplace=True) df_fail_count.head() df_fail_count.dtypes g = sns.FacetGrid(df_fail_count, col="failure",hue='failure',size=5,aspect=1.5) g.map_dataframe(plt.scatter,x='capacity',y='total_days') """ Explanation: <img style="float:center" src="img/sql-joins.jpg"/> now let us merge the previous data frame which had serial number and count of days End of explanation """ df_fail_count_avg = pd.DataFrame(df_fail_count.groupby(['capacity','failure']).total_days.mean()) df_fail_count_avg.head() df_fail_count_avg = df_fail_count_avg.reset_index() df_fail_count_avg.head() df_fail_count_avg_pivot = df_fail_count_avg.pivot('capacity','failure','total_days') df_fail_count_avg_pivot.head() df_fail_count_avg_pivot.plot(kind="bar") """ Explanation: Now what can we do with this data? Is this useful? What can I generate from the above data that gives me a little more insight ? We can generate what is the average time of failure and average success time for capacity End of explanation """ df_hours = df[['serial','capacity','failure','smart_9']] df_hours.head() df_hours.shape """ Explanation: Question 7. How about using hours (SMART_9) column now and co-relate it with failure End of explanation """ df_hours_max = pd.DataFrame(df_hours.groupby(['serial','capacity']).smart_9.max()) df_hours_max.head() df_hours_max.shape df_hours_max = df_hours_max.reset_index() df_hours_max_merge = df_hours_max.merge(df_hours,on=['serial','capacity','smart_9'],how='inner') df_hours_max_merge.head() df_hours_max_merge_pivot = pd.pivot_table(df_hours_max_merge,index='capacity',columns='failure',values='smart_9' ,aggfunc='mean') df_hours_max_merge_pivot.head() df_hours_max_merge_pivot.plot(kind='bar') """ Explanation: Now we want to know upto when for a given hard disk and capacity , how long the hard disk ran End of explanation """ df_model_capacity_hours = df[['model','capacity','failure','smart_9']] df_model_capacity_hours.head() """ Explanation: Question 8. Given the data , identify the model and capacity of the hard disk to buy based on how long it runs End of explanation """ df_model_capacity_hours.capacity = df_model_capacity_hours.capacity / 1024 ** 3 df_model_capacity_hours.head() df_model_capacity_hours.capacity = df_model_capacity_hours.capacity.astype(np.int64) df_model_capacity_hours.head() df_model_capacity_hours_pivot = pd.pivot_table(data=df_model_capacity_hours,index='model',columns=['failure','capacity'], values='smart_9',aggfunc='mean') df_model_capacity_hours_pivot.head() df_model_capacity_hours_pivot.fillna(0,inplace=True) df_model_capacity_hours_pivot.head() df_model_capacity_hours_pivot.plot(kind="barh") """ Explanation: Let us convert bytes to gigabytes and round it to the nearest number End of explanation """ sns.heatmap(df_model_capacity_hours_pivot) """ Explanation: The above visualization is confusing as the bars reflect combination of failure and hours count End of explanation """
sdpython/pyquickhelper
_unittests/ut_helpgen/data/TD_2A_Eco_Web_Scraping.ipynb
mit
from jyquickhelper import add_notebook_menu add_notebook_menu() """ Explanation: Web-Scraping Sous ce nom se cache une pratique très utile pour toute personne souhaitant travailler sur des informations disponibles en ligne, mais n'existant pas forcément sous la forme d'un tableau Excel ... Le webscraping est une technique d'extraction du contenu des sites internet, via un programme informatique : nous allons aujourd'hui vous présenter comme créer et exécuter ces robots afin de recupérer rapidement des informations utiles à vos projets actuels ou futurs. End of explanation """ import urllib import bs4 #help(bs4) """ Explanation: Un détour par le Web : comment fonctionne un site ? Même si nous n'allons pas aujourd'hui faire un cours de web, il vous faut néanmoins certaines bases pour comprendre comment un site internet fonctionne et comment sont structurées les informations sur une page. Un site Web est un ensemble de pages codées en HTML qui permet de décrire à la fois le contenu et la forme d'une page Web. HTML Les balises Sur une page web, vous trouverez toujours à coup sûr des éléments comme < head>, < title>, etc. Il s'agit des codes qui vous permettent de structurer le contenu d'une page HTML et qui s'appellent des balises. Citons, par exemple, les balises < p>, < h1>, < h2>, < h3>, < strong> ou < em>. Le symbole < > est une balise : il sert à indiquer le début d'une partie. Le symbole <\ > indique la fin de cette partie. La plupart des balises vont par paires, avec une «balise ouvrante» et une «balise fermante». (par exemple < p> et < /p>). Exemple : les balise des tableaux $$\begin{array}{rr} \hline Balise & \text{Description} \ \hline < table> & \text{Tableau} \ < caption>& \text{Titre du tableau} \ < tr> & \text{Ligne de tableau} \ < th> & \text{Cellule d'en-tête}\ < td> & \text{Cellule} \ < thead> & \text{Section de l'en-tête du tableau} \ < tbody> & \text{Section du corps du tableau} \ < tfoot> & \text{Section du pied du tableau} \ \end{array}$$ Application : un tableau en HTML Le code HTML du tableau suivant Donnera dans le navigateur $$\begin{array}{rrr} Prénom & Mike & Mister \ Nom & Stuntman & Pink \ Profession & Cascadeur & Gangster \ \end{array}$$ Parent et enfant Dans le cadre du langage HTML, les termes de parents (parent) et enfants (child) servent à désigner des élements emboîtés les uns dans les autres. Dans la construction suivante, par exemple : On dira que l'élément < div> est le parent de l'élément < p> tandis que l'élément < p> est l'enfant de l'élément < div>. Mais pourquoi apprendre ça pour scraper me direz-vous ? Pour bien récupérer les informations d'un site internet, il faut pouvoir comprendre sa structure et donc son code HTML. Les fonctions python qui servent au scrapping sont principalement construites pour vous permettre de naviguer entre les balises Optionnel - CSS - le style de la page WEB Quand le bout de code html est écrit, il apaprait sous la forme d'un texte noir sur un fond blanc. Une manière simple de rendre la page plus belle, c'est d'y ajouter de la couleur. La feuille de style qui permet de rendre la page plus belle correspond au(x) fichier(s) CSS. Toutes les pages HTML qui font référence à cette feuille de style externe hériteront de toutes ses définitions. Nous y reviendrons plus en détail dans le TD sur Flask (module Python de création de site internet). Scrapper avec python Nous allons essentiellement utiliser le package BeautifulSoup4 pour ce cours, mais d'autres packages existent (Selenium, Scrapy...). BeautifulSoup sera suffisant quand vous voudrez travailler sur des pages HTML statiques, dès que les informations que vous recherchez sont générées via l'exécution de scripts Javascipt, il vous faudra passer par des outils comme Selenium. De même, si vous ne connaissez pas l'URL, il faudra passer par un framework comme Scrapy, qui passe facilement d'une page à une autre ("crawl"). Scrapy est plus complexe à manipuler que BeautifulSoup : si vous voulez plus de détails, rendez-vous sur la page du tutorial https://doc.scrapy.org/en/latest/intro/tutorial.html. Utiliser BeautifulSoup Les packages pour scrapper des pages HTML : - BeautifulSoup (pip install bs4) - urllib End of explanation """ # Etape 1 : se connecter à la page wikipedia et obtenir le code source url_ligue_1 = "https://fr.wikipedia.org/wiki/Championnat_de_France_de_football_2016-2017" from urllib import request request_text = request.urlopen(url_ligue_1).read() print(request_text[:1000]) # Etape 2 : utiliser le package BeautifulSoup # qui "comprend" les balises contenues dans la chaine de caractères renvoyée par la fonction request page = bs4.BeautifulSoup(request_text, "lxml") #print(page) """ Explanation: 1ere page HTML On va commencer facilement, prenons une page wikipedia, par exemple celle de la Ligue 1 de football : https://fr.wikipedia.org/wiki/Championnat_de_France_de_football_2016-2017 On va souhaiter récupérer la liste des équipes, ainsi que les url des pages Wikipedia de ces équipes. End of explanation """ print(page.find("title")) """ Explanation: Si on print l'objet, page créée avec BeautifulSoup, on voit que ce n'est plus une chaine de caractères mais bien une page HTML avec des balises. On peut à présenter chercher des élements à l'intérieur de ces balises. par exemple, si on veut connaire le titre de la page, on utilise la méthode .find et on lui demande "title" End of explanation """ print(page.find("table")) """ Explanation: la methode .find ne renvoie que la première occurence de l'élément End of explanation """ print("Il y a", len(page.findAll("table")), "éléments dans la page qui sont des <table>") print(" Le 2eme tableau de la page : Hiérarchie \n", page.findAll("table")[1]) print("--------------------------------------------------------") print("Le 3eme tableau de la page : Palmarès \n",page.findAll("table")[2]) """ Explanation: Pour trouver toutes les occurences, on utilise .findAll() End of explanation """ for item in page.find('table', {'class' : 'DebutCarte'}).findAll({'a'})[0:5] : print(item, "\n-------") """ Explanation: Exercice guidé : obtenir la liste des équipes de Ligue 1 La liste des équipes est dans le tableau "Participants" : dans le code source, on voit que ce tableau est celui qui a class = "DebutCarte" On voit également que les balises qui encerclent les noms et les urls des clubs sont de la forme suivante End of explanation """ ### condition sur la place dans la liste >>>> MAUVAIS for e, item in enumerate(page.find('table', {'class' : 'DebutCarte'}).findAll({'a'})[0:5]) : if e == 0: pass else : print(item) #### condition sur les éléments que doit avoir la ligne >>>> BIEN for item in page.find('table', {'class' : 'DebutCarte'}).findAll({'a'})[0:5] : if item.get("title") : print(item) """ Explanation: On n'a pas envie de prendre le premier élément qui ne correspond pas à un club mais à une image. Or cet élément est le seul qui n'ait pas de title = "". Il est conseillé d'exclure les élements qui ne nous intéressent pas en indiquant les éléments que la ligne doit avoir au lieu de les exclure en fonction de leur place dans la liste End of explanation """ for item in page.find('table', {'class' : 'DebutCarte'}).findAll({'a'})[0:5] : if item.get("title") : print(item.get("href")) print(item.getText()) # pour avoir le nom officiel, on aurait utiliser l'élément <title> for item in page.find('table', {'class' : 'DebutCarte'}).findAll({'a'})[0:5] : if item.get("title") : print(item.get("title")) """ Explanation: Enfin la dernière étape, consiste à obtenir les informations souhaitées, c'est à dire dans notre cas, le nom et l'url des 20 clubs. Pour cela, nous allons utiliser deux méthodes de l'élement item : - getText() qui permet d'obtenir le texte qui est sur la page web et dans la balise < a> - get('xxxx') qui permet d'obtenir l'élément qui est égal à xxxx Dans notre cas, nous allons vouloir le nom du club ainsi que l'url : on va donc utiliser getText et get("href") End of explanation """ import pandas liste_noms = [] liste_urls = [] for item in page.find('table', {'class' : 'DebutCarte'}).findAll({'a'}) : if item.get("title") : liste_urls.append(item.get("href")) liste_noms.append(item.getText()) df = pandas.DataFrame.from_dict( {"clubs" : liste_noms, 'url' : liste_urls}) df.head() """ Explanation: Toutes ces informations, on souhaite les conserver dans un tableau Excel pour pouvoir les réuitiliser à l'envie : pour cela, rien de plus simple, on va passer par pandas, parce qu'on le maitrise parfaitement à ce stade de la formation. End of explanation """ import selenium #pip install selenium # télécharger le chrome driver http://chromedriver.storage.googleapis.com/index.html?path=2.24/ path_to_web_driver = "./chromedriver" import time from selenium import webdriver from selenium.webdriver.common.keys import Keys browser = webdriver.Chrome(path_to_web_driver) browser.get('https://news.google.com/') # on cherche l'endroit où on peut remplir un formulaire en utilisant les outils du navigateur > inspecter les éléments de la page # on voit que la barre de recherche est un élement du code appelé 'q' comme query # on lui demande de chercher cet élément search = browser.find_element_by_name('q') # on envoie à cet endroit le mot qu'on aurait tapé dans la barre de recherche search.send_keys("alstom") # on appuie sur le bouton "Entrée" Return en anglais search.send_keys(Keys.RETURN) links = browser.find_elements_by_xpath("//h3[@class='r _U6c']/a[@href]") results = [] for link in links: url = link.get_attribute('href') results.append(url) ### on a une pause de 10 secondes pour aller voir ce qui se passe sur la page internet time.sleep(10) # on demande de quitter le navigateur quand tout est fini browser.quit() print(results) """ Explanation: Exercice de web scraping avec BeautifulSoup Pour cet exercice, nous vous demandons d'obtenir 1) les informations personnelles des 721 pokemons sur le site internet http://pokemondb.net/pokedex/national Les informations que nous aimerions obtenir au final pour les pokemons sont celles contenues dans 4 tableaux : - Pokédex data - Training - Breeding - Base stats Pour exemple : http://pokemondb.net/pokedex/nincada 2) Nous aimerions que vous récupériez également les images de chacun des pokémons et que vous les enregistriez dans un dossier (indice : utilisez les modules request et shutil) pour cette question ci, il faut que vous cherchiez de vous même certains éléments, tout n'est pas présent dans le TD Aller sur internet avec Selenium L'avantage du package Selenium est d'obtenir des informations du site qui ne sont pas dans le code html mais qui apparaissent uniquement à la suite de l'exécution de script javascript en arrière plan. Selenium se comporte comme un utilisateur qui surfe sur internet : il clique sur des liens, il remplit des formulaires etc. Dans cet exemple, nous allons essayer de aller sur le site de Google Actualités et entrer dans la barre de recherche un sujet donné. End of explanation """ from selenium import webdriver from selenium.webdriver.common.keys import Keys browser = webdriver.Chrome(path_to_web_driver) browser.get('https://news.google.com/') search = browser.find_element_by_name('q') # on envoie à cet endroit le mot qu'on aurait tapé dans la barre de recherche search.send_keys("alstom") # on appuie sur le bouton "Rechercher" search.send_keys(Keys.RETURN) #pour obtenir le lien vers les articles d'il y a moins d'une heure : # on utilise ce qu'on a trouvé dans le code source à savoir l'url pour les articles de moins d'une heure link = browser.find_element_by_xpath("//li[@id='qdr_h']/a[@href]").get_attribute('href') print(link) browser.get(link) links = browser.find_elements_by_xpath("//h3[@class='r _U6c']/a[@href]") results = [] for link in links: url = link.get_attribute('href') results.append(url) #################################" #print(results) #time.sleep(5) browser.quit() print(results) """ Explanation: Obtenir des informations datant de moins d'une heure sur Google News End of explanation """ import time from selenium import webdriver def get_news_specific_dates (beg_date, end_date, subject, hl = "fr", gl = "fr", tbm = "nws", authuser = "0") : '''Permet d obtenir pour une requete donnée et un intervalle temporel précis les 10 premiers résultats d articles de presse parus sur le sujet''' get_string = 'https://www.google.com/search?hl={}&gl={}&tbm={}&authuser={}&q={}&tbs=cdr%3A1%2Ccd_min%3A{}%2Ccd_max%3A{}&tbm={}'.format(hl,gl,tbm,authuser,subject,beg_date,end_date,tbm) browser.get(get_string) links = browser.find_elements_by_xpath("//h3[@class='r _U6c']/a[@href]") results = [] for link in links: url = link.get_attribute('href') results.append(url) browser.quit() return results ### On appelle la fonction créée à l'instant browser = webdriver.Chrome(path_to_web_driver) articles_mai_2015 = get_news_specific_dates("01/05/2015","31/05/2015","société générale jerome kerviel",hl="fr") print(articles_mai_2015) """ Explanation: Obtenir des nouvelles sur un sujet entre deux dates données En réalité, l'exemple de Google News aurait pu se passer de Selenium et être utilisé directement avec BeautifulSoup et les url qu'on réussit à deviner de Google. Ici, on utilise l'url de Google News pour créer une petite fonction qui donne pour chaque ensemble de (sujet, debut d'une période, fin d'une période) des liens pertinents issus de la recherche Google. End of explanation """ from selenium import webdriver from selenium.webdriver.common.keys import Keys # on ouvre la page internet du jeu 2048 browser = webdriver.Chrome(path_to_web_driver) browser.get('https://gabrielecirulli.github.io/2048/') # Ce qu'on va faire : une boucle qui répète inlassablement la même chose : haut / droite / bas / gauche # on commence par cliquer sur la page pour que les touches sachent browser.find_element_by_class_name('grid-container').click() grid = browser.find_element_by_tag_name('body') # pour savoir quels coups faire à quel moment, on crée un dictionnaire direction = {0: Keys.UP, 1: Keys.RIGHT, 2: Keys.DOWN, 3: Keys.LEFT} count = 0 while True: try: # on vérifie que le bouton "Try again" n'est pas là - sinon ça veut dire que le jeu est fini retryButton = browser.find_element_by_link_text('Try again') scoreElem = browser.find_element_by_class_name('score-container') break except: #Do nothing. Game is not over yet pass # on continue le jeu - on appuie sur la touche suivante pour le coup d'après count += 1 grid.send_keys(direction[count % 4]) time.sleep(0.1) print('Score final : {} en {} coups'.format(scoreElem.text, count)) browser.quit() """ Explanation: Utiliser selenium pour jouer à 2048 Dans cet exemple, on utilise le module pour que python appuie lui même sur les touches du clavier afin de jouer à 2048. Note : ce bout de code ne donne pas une solution à 2048, il permet juste de voir ce qu'on peut faire avec selenium End of explanation """
ndanielsen/dc_parking_violations_data
notebooks/Top 15 Violations by Revenue And Total for VA.ipynb
mit
dc_df = df[(df.rp_plate_state.isin(['VA']))] dc_fines = dc_df.groupby(['violation_code']).fine.sum().reset_index('violation_code') fine_codes_15 = dc_fines.sort_values(by='fine', ascending=False)[:15] top_codes = dc_df[dc_df.violation_code.isin(fine_codes_15.violation_code)] top_violation_by_state = top_codes.groupby(['violation_description']).fine.sum() ax = top_violation_by_state.plot.barh() ax.xaxis.set_major_formatter(plt.FormatStrFormatter('%.0f')) plt.draw() top_violation_by_state = top_codes.groupby(['violation_description']).counter.sum() ax = top_violation_by_state.plot.barh() ax.xaxis.set_major_formatter(plt.FormatStrFormatter('%.0f')) plt.draw() """ Explanation: VA Top 15 violations by total revenue (revenue and total) End of explanation """ dc_df = df[(df.rp_plate_state.isin(['VA']))] dc_fines = dc_df.groupby(['violation_code']).counter.sum().reset_index('violation_code') fine_codes_15 = dc_fines.sort_values(by='counter', ascending=False)[:15] top_codes = dc_df[dc_df.violation_code.isin(fine_codes_15.violation_code)] top_violation_by_state = top_codes.groupby(['violation_description']).fine.sum() ax = top_violation_by_state.plot.barh() ax.xaxis.set_major_formatter(plt.FormatStrFormatter('%.0f')) plt.draw() top_violation_by_state = top_codes.groupby(['violation_description']).counter.sum() ax = top_violation_by_state.plot.barh() ax.xaxis.set_major_formatter(plt.FormatStrFormatter('%.0f')) plt.draw() """ Explanation: VA Top 15 violations by total tickets (revenue and total) End of explanation """
akutuzov/gensim
docs/notebooks/Word2Vec_FastText_Comparison.ipynb
lgpl-2.1
import nltk nltk.download('brown') # Only the brown corpus is needed in case you don't have it. # Generate brown corpus text file with open('brown_corp.txt', 'w+') as f: for word in nltk.corpus.brown.words(): f.write('{word} '.format(word=word)) # Make sure you set FT_HOME to your fastText directory root FT_HOME = 'fastText/' # download the text8 corpus (a 100 MB sample of cleaned wikipedia text) import os.path if not os.path.isfile('text8'): !wget -c http://mattmahoney.net/dc/text8.zip !unzip text8.zip # download and preprocess the text9 corpus if not os.path.isfile('text9'): !wget -c http://mattmahoney.net/dc/enwik9.zip !unzip enwik9.zip !perl {FT_HOME}wikifil.pl enwik9 > text9 """ Explanation: Comparison of FastText and Word2Vec Facebook Research open sourced a great project recently - fastText, a fast (no surprise) and effective method to learn word representations and perform text classification. I was curious about comparing these embeddings to other commonly used embeddings, so word2vec seemed like the obvious choice, especially considering fastText embeddings are an extension of word2vec. I've used gensim to train the word2vec models, and the analogical reasoning task (described in Section 4.1 of [2]) for comparing the word2vec and fastText models. I've compared embeddings trained using the skipgram architecture. Download data End of explanation """ MODELS_DIR = 'models/' !mkdir -p {MODELS_DIR} lr = 0.05 dim = 100 ws = 5 epoch = 5 minCount = 5 neg = 5 loss = 'ns' t = 1e-4 from gensim.models import Word2Vec from gensim.models.word2vec import Text8Corpus # Same values as used for fastText training above params = { 'alpha': lr, 'size': dim, 'window': ws, 'iter': epoch, 'min_count': minCount, 'sample': t, 'sg': 1, 'hs': 0, 'negative': neg } def train_models(corpus_file, output_name): output_file = '{:s}_ft'.format(output_name) if not os.path.isfile(os.path.join(MODELS_DIR, '{:s}.vec'.format(output_file))): print('Training fasttext on {:s} corpus..'.format(corpus_file)) %time !{FT_HOME}fasttext skipgram -input {corpus_file} -output {MODELS_DIR+output_file} -lr {lr} -dim {dim} -ws {ws} -epoch {epoch} -minCount {minCount} -neg {neg} -loss {loss} -t {t} else: print('\nUsing existing model file {:s}.vec'.format(output_file)) output_file = '{:s}_ft_no_ng'.format(output_name) if not os.path.isfile(os.path.join(MODELS_DIR, '{:s}.vec'.format(output_file))): print('\nTraining fasttext on {:s} corpus (without char n-grams)..'.format(corpus_file)) %time !{FT_HOME}fasttext skipgram -input {corpus_file} -output {MODELS_DIR+output_file} -lr {lr} -dim {dim} -ws {ws} -epoch {epoch} -minCount {minCount} -neg {neg} -loss {loss} -t {t} -maxn 0 else: print('\nUsing existing model file {:s}.vec'.format(output_file)) output_file = '{:s}_gs'.format(output_name) if not os.path.isfile(os.path.join(MODELS_DIR, '{:s}.vec'.format(output_file))): print('\nTraining word2vec on {:s} corpus..'.format(corpus_file)) # Text8Corpus class for reading space-separated words file %time gs_model = Word2Vec(Text8Corpus(corpus_file), **params); gs_model # Direct local variable lookup doesn't work properly with magic statements (%time) locals()['gs_model'].save_word2vec_format(os.path.join(MODELS_DIR, '{:s}.vec'.format(output_file))) print('\nSaved gensim model as {:s}.vec'.format(output_file)) else: print('\nUsing existing model file {:s}.vec'.format(output_file)) evaluation_data = {} train_models('brown_corp.txt', 'brown') train_models(corpus_file='text8', output_name='text8') train_models(corpus_file='text9', output_name='text9') """ Explanation: Train models For training the models yourself, you'll need to have both Gensim and FastText set up on your machine. End of explanation """ # download the file questions-words.txt to be used for comparing word embeddings !wget https://raw.githubusercontent.com/tmikolov/word2vec/master/questions-words.txt """ Explanation: Comparisons End of explanation """ import logging logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) # Training times in seconds evaluation_data['brown'] = [(18, 54.3, 32.5)] evaluation_data['text8'] = [(402, 942, 496)] evaluation_data['text9'] = [(3218, 6589, 3550)] def print_accuracy(model, questions_file): print('Evaluating...\n') acc = model.accuracy(questions_file) sem_correct = sum((len(acc[i]['correct']) for i in range(5))) sem_total = sum((len(acc[i]['correct']) + len(acc[i]['incorrect'])) for i in range(5)) sem_acc = 100*float(sem_correct)/sem_total print('\nSemantic: {:d}/{:d}, Accuracy: {:.2f}%'.format(sem_correct, sem_total, sem_acc)) syn_correct = sum((len(acc[i]['correct']) for i in range(5, len(acc)-1))) syn_total = sum((len(acc[i]['correct']) + len(acc[i]['incorrect'])) for i in range(5,len(acc)-1)) syn_acc = 100*float(syn_correct)/syn_total print('Syntactic: {:d}/{:d}, Accuracy: {:.2f}%\n'.format(syn_correct, syn_total, syn_acc)) return (sem_acc, syn_acc) word_analogies_file = 'questions-words.txt' accuracies = [] print('\nLoading Gensim embeddings') brown_gs = Word2Vec.load_word2vec_format(MODELS_DIR + 'brown_gs.vec') print('Accuracy for Word2Vec:') accuracies.append(print_accuracy(brown_gs, word_analogies_file)) print('\nLoading FastText embeddings') brown_ft = Word2Vec.load_word2vec_format(MODELS_DIR + 'brown_ft.vec') print('Accuracy for FastText (with n-grams):') accuracies.append(print_accuracy(brown_ft, word_analogies_file)) """ Explanation: Once you have downloaded or trained the models and downloaded questions-words.txt, you're ready to run the comparison. End of explanation """ print('Loading FastText embeddings') brown_ft_no_ng = Word2Vec.load_word2vec_format(MODELS_DIR + 'brown_ft_no_ng.vec') print('Accuracy for FastText (without n-grams):') accuracies.append(print_accuracy(brown_ft_no_ng, word_analogies_file)) evaluation_data['brown'] += [[acc[0] for acc in accuracies], [acc[1] for acc in accuracies]] """ Explanation: The accuracy takes an optional parameter restrict_vocab, which limits the vocabulary of model considered for fast approximate evaluation (default is 30000). Word2Vec embeddings seem to be slightly better than fastText embeddings at the semantic tasks, while the fastText embeddings do significantly better on the syntactic analogies. Makes sense, since fastText embeddings are trained for understanding morphological nuances, and most of the syntactic analogies are morphology based. Let me explain that better. According to the paper [1], embeddings for words are represented by the sum of their n-gram embeddings. This is meant to be useful for morphologically rich languages - so theoretically, the embedding for apparently would include information from both character n-grams apparent and ly (as well as other n-grams), and the n-grams would combine in a simple, linear manner. This is very similar to what most of our syntactic tasks look like. Example analogy: amazing amazingly calm calmly This analogy is marked correct if: embedding(amazing) - embedding(amazingly) = embedding(calm) - embedding(calmly) Both these subtractions would result in a very similar set of remaining ngrams. No surprise the fastText embeddings do extremely well on this. Let's do a small test to validate this hypothesis - fastText differs from word2vec only in that it uses char n-gram embeddings as well as the actual word embedding in the scoring function to calculate scores and then likelihoods for each word, given a context word. In case char n-gram embeddings are not present, this reduces (atleast theoretically) to the original word2vec model. This can be implemented by setting 0 for the max length of char n-grams for fastText. End of explanation """ accuracies = [] print('Loading Gensim embeddings') text8_gs = Word2Vec.load_word2vec_format(MODELS_DIR + 'text8_gs.vec') print('Accuracy for word2vec:') accuracies.append(print_accuracy(text8_gs, word_analogies_file)) print('Loading FastText embeddings (with n-grams)') text8_ft = Word2Vec.load_word2vec_format(MODELS_DIR + 'text8_ft.vec') print('Accuracy for FastText (with n-grams):') accuracies.append(print_accuracy(text8_ft, word_analogies_file)) print('Loading FastText embeddings') text8_ft_no_ng = Word2Vec.load_word2vec_format(MODELS_DIR + 'text8_ft_no_ng.vec') print('Accuracy for FastText (without n-grams):') accuracies.append(print_accuracy(text8_ft_no_ng, word_analogies_file)) evaluation_data['text8'] += [[acc[0] for acc in accuracies], [acc[1] for acc in accuracies]] """ Explanation: A-ha! The results for FastText with no n-grams and Word2Vec look a lot more similar (as they should) - the differences could easily result from differences in implementation between fastText and Gensim, and randomization. Especially telling is that the semantic accuracy for FastText has improved slightly after removing n-grams, while the syntactic accuracy has taken a giant dive. Our hypothesis that the char n-grams result in better performance on syntactic analogies seems fair. It also seems possible that char n-grams hurt semantic accuracy a little. However, the brown corpus is too small to be able to draw any definite conclusions - the accuracies seem to vary significantly over different runs. Let's try with a larger corpus now - text8 (collection of wiki articles). I'm also curious about the impact on semantic accuracy - for models trained on the brown corpus, the difference in the semantic accuracy and the accuracy values themselves are too small to be conclusive. Hopefully a larger corpus helps, and the text8 corpus likely has a lot more information about capitals, currencies, cities etc, which should be relevant to the semantic tasks. End of explanation """ accuracies = [] print('Loading Gensim embeddings') text9_gs = Word2Vec.load_word2vec_format(MODELS_DIR + 'text9_gs.vec') print('Accuracy for word2vec:') accuracies.append(print_accuracy(text9_gs, word_analogies_file)) print('Loading FastText embeddings (with n-grams)') text9_ft = Word2Vec.load_word2vec_format(MODELS_DIR + 'text9_ft.vec') print('Accuracy for FastText (with n-grams):') accuracies.append(print_accuracy(text9_ft, word_analogies_file)) print('Loading FastText embeddings') text9_ft_no_ng = Word2Vec.load_word2vec_format(MODELS_DIR + 'text9_ft_no_ng.vec') print('Accuracy for FastText (without n-grams):') accuracies.append(print_accuracy(text9_ft_no_ng, word_analogies_file)) evaluation_data['text9'] += [[acc[0] for acc in accuracies], [acc[1] for acc in accuracies]] %matplotlib inline import matplotlib.pyplot as plt def plot(ax, data, corpus_name='brown'): width = 0.25 pos = [(i, i + width, i + 2*width) for i in range(len(data))] colors = ['#EE3224', '#F78F1E', '#FFC222'] acc_ax = ax.twinx() # Training time ax.bar(pos[0], data[0], width, alpha=0.5, color=colors ) # Semantic accuracy acc_ax.bar(pos[1], data[1], width, alpha=0.5, color=colors ) # Syntactic accuracy acc_ax.bar(pos[2], data[2], width, alpha=0.5, color=colors ) ax.set_ylabel('Training time (s)') acc_ax.set_ylabel('Accuracy (%)') ax.set_title(corpus_name) acc_ax.set_xticks([p[0] + 1.5 * width for p in pos]) acc_ax.set_xticklabels(['Training Time', 'Semantic Accuracy', 'Syntactic Accuracy']) # Proxy plots for adding legend correctly proxies = [ax.bar([0], [0], width=0, color=c, alpha=0.5)[0] for c in colors] models = ('Gensim', 'FastText', 'FastText (no-ngrams)') ax.legend((proxies), models, loc='upper left') ax.set_xlim(pos[0][0]-width, pos[-1][0]+width*4) ax.set_ylim([0, max(data[0])*1.1] ) acc_ax.set_ylim([0, max(data[1] + data[2])*1.1] ) plt.grid() # Plotting the bars fig = plt.figure(figsize=(10,15)) for corpus, subplot in zip(sorted(evaluation_data.keys()), [311, 312, 313]): ax = fig.add_subplot(subplot) plot(ax, evaluation_data[corpus], corpus) plt.show() """ Explanation: With the text8 corpus, we observe a similar pattern. Semantic accuracy falls by a small but significant amount when n-grams are included in FastText, while FastText with n-grams performs far better on the syntactic analogies. FastText without n-grams are largely similar to Word2Vec. My hypothesis for semantic accuracy being lower for the FastText-with-ngrams model is that most of the words in the semantic analogies are standalone words and are unrelated to their morphemes (eg: father, mother, France, Paris), hence inclusion of the char n-grams into the scoring function actually makes the embeddings worse. This trend is observed in the original paper too where the performance of embeddings with n-grams is worse on semantic tasks than both word2vec cbow and skipgram models. Let's do a quick comparison on an even larger corpus - text9 End of explanation """
MadsJensen/intro_to_scientific_computing
src/00-Solutions-to-exercises.ipynb
bsd-3-clause
def my_power_func(base, pwr=2): return(base**pwr) """ Explanation: Solutions to exercises Building blocks Function arguments End of explanation """ import html # part of the Python 3 standard library with open('nobel-prize-winners.csv', 'rt') as fp: orig = fp.read() # read the entire file as a single hunk of text orig[727:780] # show some characters, note the '\n' print(orig[727:780]) # see how the '\n' gets converted to a newline """ Explanation: File I/O Fixing encoding in CSV file The file nobel-prize-winners.csv contains some odd-looking characters in the name-column, such as '&egrave;'. These are the HTML codes for characters outside the limited ASCII set. Python is very capable at Unicode/UTF-8, so let's convert the characters to something more pleasant to the eye End of explanation """ html.unescape? fixed = html.unescape(orig) # one line, less than a second... print(fixed[727:780]) # much better with open('nobel-prize-winners-fixed.csv', 'wt') as fp: fp.write(fixed) # write back to disk, and we're done! """ Explanation: With some Googling, we find this candidate function to fix the character End of explanation """
quiltdata/quilt-compiler
docs/Walkthrough/Editing a Package.ipynb
apache-2.0
import quilt3 p = quilt3.Package() """ Explanation: Data in Quilt is organized in terms of data packages. A data package is a logical group of files, directories, and metadata. Initializing a package To edit a new empty package, use the package constructor: End of explanation """ quilt3.Package.install( "examples/hurdat", "s3://quilt-example", ) """ Explanation: To edit a preexisting package, we need to first make sure to install the package: End of explanation """ p = quilt3.Package.browse('examples/hurdat') """ Explanation: Use browse to edit the package: End of explanation """ # add entries individually using `set` # ie p.set("foo.csv", "/local/path/foo.csv"), # p.set("bar.csv", "s3://bucket/path/bar.csv") # create test data with open("data.csv", "w") as f: f.write("id, value\na, 42") p = quilt3.Package() p.set("data.csv", "data.csv") p.set("banner.png", "s3://quilt-example/imgs/banner.png") # or grab everything in a directory at once using `set_dir` # ie p.set_dir("stuff/", "/path/to/stuff/"), # p.set_dir("things/", "s3://path/to/things/") # create test directory import os os.mkdir("data") p.set_dir("stuff/", "./data/") p.set_dir("imgs/", "s3://quilt-example/imgs/") """ Explanation: For more information on accessing existing packages see the section "Installing a Package". Adding data to a package Use the set and set_dir commands to add individual files and whole directories, respectively, to a Package: End of explanation """ p """ Explanation: The first parameter to these functions is the logical key, which will determine where the file lives within the package. So after running the commands above our package will look like this: End of explanation """ # assuming data.csv is in the current directory p = quilt3.Package() p.set("data.csv") """ Explanation: The second parameter is the physical key, which states the file's actual location. The physical key may point to either a local file or a remote object (with an s3:// path). If the physical key and the logical key are the same, you may omit the second argument: End of explanation """ # switch to a test directory and create some test files import os %cd data/ os.mkdir("stuff") with open("new_data.csv", "w") as f: f.write("id, value\na, 42") # set the contents of the package to that of the current directory p.set_dir(".", ".") """ Explanation: Another useful trick. Use "." to set the contents of the package to that of the current directory: End of explanation """ p.delete("data.csv") """ Explanation: Deleting data in a package Use delete to remove entries from a package: End of explanation """ p = quilt3.Package() p.set("data.csv", "new_data.csv", meta={"type": "csv"}) p.set_dir("stuff/", "stuff/", meta={"origin": "unknown"}) """ Explanation: Note that this will only remove this piece of data from the package. It will not delete the actual data itself. Adding metadata to a package Packages support metadata anywhere in the package. To set metadata on package entries or directories, use the meta argument: End of explanation """ # set metadata on a package p.set_meta({"package-type": "demo"}) """ Explanation: You can also set metadata on the package as a whole using set_meta. End of explanation """
sot/aimpoint_mon
fit_aimpoint_drift-2018-11.ipynb
bsd-2-clause
import re import tables import matplotlib.pyplot as plt import numpy as np from astropy.time import Time from astropy.table import Table import Ska.engarchive.fetch_eng as fetch from Ska.engarchive import fetch_sci from Chandra.Time import DateTime from Ska.Numpy import interpolate from kadi import events from sherpa import ui from Ska.Matplotlib import plot_cxctime %matplotlib inline SIM_MM_TO_ARCSEC = 20.493 # Discrete jumps after 2012:001. Note also jumps at: # '2008:293', # IU-reset # '2010:151', # IU-reset # '2011:190', # Safe mode JUMPS = ['2015:006', # IU-reset '2015:265', # Safe mode 6 '2016:064', # Safe mode 7 '2017:066', # NSM '2018:285', # Safe mode 8 ] ltt_bads = events.ltt_bads(pad=(0, 200000)) normal_suns = events.normal_suns(pad=(0, 100000)) safe_suns = events.safe_suns(pad=(0, 86400 * 7)) # Aspect camera CCD temperature trend since 2010 t_ccd = fetch.Msid('aacccdpt', start='2010:001', stat='5min') t_ccd.remove_intervals(ltt_bads | normal_suns | safe_suns) plt.figure(figsize=(12, 4.5)) t_ccd.plot() plt.ylabel('T_ccd (degF)') plt.title('ACA CCD temperature') plt.ylim(None, 20) plt.grid() # Get aspect solution DY and DZ (apparent SIM offsets via fid light positions) # which are sampled at 1 ksec intervals and updated daily. if 'adat' not in globals(): h5 = tables.open_file('/proj/sot/ska/data/aimpoint_mon/aimpoint_asol_values.h5') adat = h5.root.data[:] h5.close() adat.sort(order=['time']) # Filter bad data when asol DY and DZ are both exactly 0.0 (doesn't happen normally) bad = (adat['dy'] == 0.0) & (adat['dz'] == 0.0) adat = adat[~bad] class AcaDriftModel(object): """ Class to encapsulate necessary data and compute the model of ACA alignment drift. The object created from this class is called by Sherpa as a function during fitting. This gets directed to the __call__() method. """ YEAR0 = 2016.0 # Reference year for linear offset def __init__(self, adat, start='2012:001', stop=None): """ adat is the raw data array containing aspect solution data sampled at 1 ksec intervals. """ # Get the ACA CCD temperature telemetry t_ccd = fetch.Msid('aacccdpt', stat='5min', start=start, stop=stop) # Slice the ASOL data corresponding to available ACA CCD temps i0, i1 = np.searchsorted(adat['time'], [t_ccd.times[0], t_ccd.times[-1]]) self.asol = adat[i0:i1].copy() # Convert from mm to arcsec for convenience self.asol['dy'] *= SIM_MM_TO_ARCSEC self.asol['dz'] *= SIM_MM_TO_ARCSEC self.times = self.asol['time'] self.years = Time(self.times, format='cxcsec').decimalyear self.years_0 = self.years - self.YEAR0 # Resample CCD temp. data to the 1 ksec ASOL time stamps self.t_ccd = interpolate(t_ccd.vals, t_ccd.times, self.asol['time'], method='linear') # Get indices corresponding to jump times for later model computation self.jump_times = Time(JUMPS).cxcsec self.jump_idxs = np.searchsorted(self.times, self.jump_times) def __call__(self, pars, years=None, t_ccd=None): """ Calculate model prediction for DY or DZ. Params are: scale : scaling in arcsec / degF offset : ACA CCD temperature corresponding to DY/Z = 0.0 arcsec trend : Trend in DY/Z (arcsec / year) jumpYYYYDDD : discrete jump in arcsec at date YYYY:DDD """ # Sherpa passes the parameters as a list scale, offset, trend = pars[0:3] jumps = pars[3:] # Allow for passing in a different value for ACA CCD temperature if t_ccd is None: t_ccd = self.t_ccd # Compute linear part of model out = (t_ccd - offset) * scale + self.years_0 * trend # Put in the step function jumps for jump_idx, jump in zip(self.jump_idxs, jumps): if jump_idx > 10 and jump_idx < len(out) - 10: out[jump_idx:] += jump return out def fit_aimpoint_aca_temp(axis='dy', start='2012:180', stop=None): """ Use Sherpa to fit the model parameters """ # Create the object used to define the Sherpa user model, then # load as a model and create parameters aca_drift = AcaDriftModel(adat, start, stop) ui.load_user_model(aca_drift, 'aca_drift_model') parnames = ['scale', 'offset', 'trend'] parnames += ['jump{}'.format(re.sub(':', '', x)) for x in JUMPS] ui.add_user_pars('aca_drift_model', parnames) # Sherpa automatically puts 'aca_drift_model' into globals, but # make this explicit so code linters don't complain. aca_drift_model = globals()['aca_drift_model'] # Get the DY or DZ values and load as Sherpa data dyz = aca_drift.asol[axis] ui.load_arrays(1, aca_drift.years, dyz) # Set the model and fit using Simplex (Nelder-Mead) minimization ui.set_model(1, aca_drift_model) ui.set_method('simplex') ui.fit(1) return aca_drift, ui.get_fit_results() def plot_aimpoint_drift(axis, aca_drift, fit_results, start='2010:001', stop=None, plot_t_ccd=False): """ Plot our results """ y_start = DateTime(start).frac_year y_stop = DateTime(stop).frac_year years = aca_drift.years ok = (years > y_start) & (years < y_stop) years = aca_drift.years[ok] times = aca_drift.times[ok] # Call model directly with best-fit parameters to get model values dyz_fit = aca_drift(fit_results.parvals)[ok] # DY or DZ values from aspect solution dyz = aca_drift.asol[axis][ok] dyz_resid = dyz - dyz_fit if plot_t_ccd: plt.figure(figsize=(12, 4.5)) plt.subplot(1, 2, 1) plot_cxctime(times, dyz, label='Data') plot_cxctime(times, dyz_fit, 'r-', alpha=0.5, label='Fit') plot_cxctime(times, dyz_resid, 'r-', label='Residual') plt.title('Fit aspect solution {} to scaled ACA CCD temperature' .format(axis.upper())) plt.ylabel('{} (arcsec)'.format(axis.upper())) plt.grid() plt.legend(loc='upper left', framealpha=1.0) if plot_t_ccd: dat = fetch_sci.Msid('aacccdpt', start, stop, stat='5min') plt.subplot(1, 2, 2) dat.plot() plt.grid() plt.ylabel('AACCCDPT (degC)') if isinstance(plot_t_ccd, tuple): plt.ylim(*plot_t_ccd) std = dyz_resid.std() p1, p99 = np.percentile(dyz_resid, [1, 99]) print('Fit residual stddev = {:.2f} arcsec'.format(std)) print('Fit residual 99th - 1st percentile = {:.2f}'.format(p99 - p1)) """ Explanation: Model for aimpoint drift (aka ACA alignment drift) 2018-11 This notebook documents and computes fit coefficients for a simple model that gives the relative ACA alignment as a linear function of the ACA CCD temperature. It also includes validation of the implementation of the new model in the chandra_aca.drift package. This is based on the origin fit_aimpoint_drift notebook but updated through 2018:310 to include data from after the 2018:283 safe mode normal sun dwell. Two jumps were added to the model, one corresponding to the 2017:066 NSM and one from the 2018:283 safe mode. The ACA alignment is measured accurately for each science observation via the apparent positions of the fid lights. These are referred to by their CXC aspect solution designation as the SIM DY and DZ offsets. This is actually a misnomer based on the pre-launch understanding of what physical mechanism would generate such offsets. We now know via HRMA optical axis measurements that a temperature-dependent change in the ACA boresight alignment is responsible. The HRMA to SIM alignment is quite stable. The ACA alignment relates directly to the X-ray detector aimpoint that is used in observation planning and analysis. With this model it will be possible to improve the aimpoint accuracy by introducing a dynamic pointing offset based on the predicted ACA CCD temperature for each observation. The model is DY/Z = (t_ccd - offset) * scale + (year - 2016.0) * trend + JUMPS where t_ccd : ACA CCD temperature (degF) scale : scaling in arcsec / degF offset : ACA CCD temperature corresponding to DY/Z = 0.0 arcsec trend : Trend in DY/Z (arcsec / year) year : decimal year jumpYYYYDDD : step function from 0.0 to jumpYYYYDDD (arcsec) for date &gt; YYYY:DDD The jumps are persistent step function changes in alignment that have been observed following extended dwells at normal sun where the ACA gets substantially hotter than during normal operations. The exact mechanism is not understood, but could be due to a non-linear stiction release of a stress point that impacts alignment. Note that the ACA alignment has a direct linear correlation to the ACA housing temperature (AACH1T). However, in this model we use the ACA CCD temperature as the model dependent variable because it is linearly related to housing temperature (AACCDPT = m * AACH1T + b) as long as the TEC is at max drive current. Since there is already an existing Xija model to predict ACA CCD temperature this reduces duplication. This model was fitted to data from 2012:180 to 2018:310 using Sherpa. The key fit results are: ``` DY scale = 2.1 arcsec / degF = 3.9 arcsec / degC trend = -1.11 arcsec / year jumps ~ -2 to -13 arcsec model error = +/- 1.9 arcsec (1st to 99th percentile range) DZ scale = 1.0 arcsec / degF = 1.8 arcsec / degC trend = -0.16 arcsec / year jumps ~ -0.4 to -6.1 arcsec model error = +/- 2.8 arcsec (1st to 99th percentile range) ``` The model accuracy will be degraded somewhat when ACA CCD temperature is taken from a predictive Xija model instead of from telemetry. This notebook lives in the aimpoint_mon project repository Code End of explanation """ aca_drift_dy, fit_dy = fit_aimpoint_aca_temp('dy') plot_aimpoint_drift('dy', aca_drift_dy, fit_dy) """ Explanation: Fit model coefficients for DY and plot results End of explanation """ start = '2018:260' stop = '2018:310' plot_aimpoint_drift('dy', aca_drift_dy, fit_dy, start=start, stop=stop, plot_t_ccd=(-16, -8)) """ Explanation: Zoom in around the 2018:283 safe mode time End of explanation """ dyz_fit = aca_drift_dy(fit_dy.parvals, t_ccd=14) # degF = -10 C plot_cxctime(aca_drift_dy.times, dyz_fit) plt.title('DY drift model assuming constant ACA temperature') plt.grid(); """ Explanation: Fid light commanded vs observed angles Temperatures just after the safe mode gap are cool, around -12.5 C. In this plot the offset gets more positive with lower temperature (i.e. anti-correlated with T_ccd, about -4 arcsec / degC). Implications: At T_ccd = -13.5 C the caution yellow limit would be hit At T_ccd = -16.0 the warning red limit would be hit. Somewhere in there the fids will not be acquired. So effectively right now we have a new constraint that the ACA CCD needs to be warmer than -13.5 C. Probably not a problem in practice. Raw fid drift data vs. archaic fid drift model This just shows in raw form the 13 arcsec jump. The sign is the same, so increasing temperature means more negative Y offset. Focus on the downwarn spikesaround Y offset = -46 for the blue model, looking before and after safe mode. The raw fid locations have shifted by about 13 arcsec. Pre-safemode these have mean Y offset around -22 arcsec. Post-safemode this was around 2018:299 and the mean Y offset is around -8 arcsec. Illustrate model behavior by assuming a constant ACA CCD temperature End of explanation """ aca_drift_dz, fit_dz = fit_aimpoint_aca_temp('dz') plot_aimpoint_drift('dz', aca_drift_dz, fit_dz) start = '2018:260' stop = '2018:310' plot_aimpoint_drift('dz', aca_drift_dz, fit_dz, start=start, stop=stop, plot_t_ccd=(-16, -8)) """ Explanation: Fit model coefficients for DZ and plot results End of explanation """ dyz_fit = aca_drift_dz(fit_dz.parvals, t_ccd=14) # degF = -10 C plot_cxctime(aca_drift_dz.times, dyz_fit) plt.title('DZ drift model assuming constant ACA temperature') plt.grid(); """ Explanation: Illustrate model behavior by assuming a constant ACA CCD temperature End of explanation """ text = """ obsid detector chipx chipy chip_id aca_offset_y aca_offset_z mean_t_ccd mean_date ----- -------- ------- ------- ------- ------------ ------------ ---------- --------------------- 21152 ACIS-S 210.0 520.0 7 -0.9 -22.67 -11.72 2018:307:18:07:54.816 20332 ACIS-I 970.0 975.0 3 -14.27 -21.89 -11.88 2018:308:04:03:46.816 21718 HRC-I 7590.0 7745.0 0 -13.39 -22.8 -11.53 2018:313:03:14:10.816 21955 HRC-S 2195.0 8915.0 2 -12.50 -22.57 -11.53 2018:305:16:28:34.816 """ obss = Table.read(text, format='ascii.fixed_width_two_line') import sys import os sys.path.insert(0, os.path.join(os.environ['HOME'], 'git', 'chandra_aca')) import chandra_aca from chandra_aca import drift from kadi import events chandra_aca.test(get_version=True) for obs in obss: dwell = events.dwells.filter(obsid=21152)[0] t_ccd = fetch_sci.Msid('aacccdpt', dwell.start, dwell.stop, stat='5min') mean_t_ccd = np.mean(t_ccd.vals) offsets = drift.get_aca_offsets(obs['detector'], chip_id=obs['chip_id'], chipx=obs['chipx'], chipy=obs['chipy'], time=obs['mean_date'], t_ccd=mean_t_ccd) print(obs) print('T_ccd:', mean_t_ccd, ' Delta offsets Y Z:', '%.2f' % (obs['aca_offset_y'] - offsets[0]), '%.2f' % (obs['aca_offset_z'] - offsets[1])) print() """ Explanation: Comparison to current flight model for NOV0518B Compare the actual flight aca_offset_y/z from the *_dynamical_offsets.txt files to predictions with the new chandra_aca.drift module. A key point is to use the observed mean T_ccd with the new model to be able to reproduce the observed aimpoint shift of about 8 arcsec. The jump was 13 arcsec but we did not see that directly because of the ~1.4 C error in the temperatures being used to predict the aimpoint offset. End of explanation """ from chandra_aca.tests.test_all import simple_test_aca_drift dy, dz, times = simple_test_aca_drift() plt.figure(figsize=(12, 4.5)) plt.subplot(1, 2, 1) dy_fit = aca_drift_dy(fit_dy.parvals, t_ccd=14) # degF = -10 C plot_cxctime(aca_drift_dy.times, dy_fit) plt.title('DY drift model assuming constant ACA temperature') plt.grid(); plt.subplot(1, 2, 2) plot_cxctime(times, dy); plt.grid() plt.ylabel('DY (arcsec)'); plt.title('DY drift model from chandra_aca'); plt.figure(figsize=(12, 4.5)) plt.subplot(1, 2, 1) dz_fit = aca_drift_dz(fit_dz.parvals, t_ccd=14) # degF = -10 C plot_cxctime(aca_drift_dz.times, dz_fit) plt.title('DZ drift model assuming constant ACA temperature') plt.grid(); plt.subplot(1, 2, 2) plot_cxctime(times, dz); plt.grid() plt.ylabel('DZ (arcsec)'); plt.title('DZ drift model from chandra_aca'); """ Explanation: Comparison of local model prediction to implementation in chandra_aca End of explanation """
ML4DS/ML4all
U2.SpectralClustering/SpecClustering_professor.ipynb
mit
%matplotlib inline import numpy as np import matplotlib.pyplot as plt from scipy import stats # use seaborn plotting defaults import seaborn as sns; sns.set() from sklearn.cluster import KMeans from sklearn.datasets.samples_generator import make_blobs, make_circles from sklearn.utils import shuffle from sklearn.metrics.pairwise import rbf_kernel from sklearn.cluster import SpectralClustering # For the graph representation import networkx as nx """ Explanation: Spectral Clustering Algorithms Notebook version: 1.1 (Nov 17, 2017) Author: Jesús Cid Sueiro (jcid@tsc.uc3m.es) Jerónimo Arenas García (jarenas@tsc.uc3m.es) Changes: v.1.0 - First complete version. v.1.1 - Python 3 version End of explanation """ N = 300 nc = 4 Xs, ys = make_blobs(n_samples=N, centers=nc, random_state=6, cluster_std=0.60, shuffle = False) X, y = shuffle(Xs, ys, random_state=0) plt.scatter(X[:, 0], X[:, 1], s=30); plt.axis('equal') plt.show() """ Explanation: 1. Introduction The key idea of spectral clustering algorithms is to search for groups of connected data. I.e, rather than pursuing compact clusters, spectral clustering allows for arbitrary shape clusters. This can be illustrated with two artifitial datasets that we will use along this notebook. 1.1. Gaussian clusters: The first one consists of 4 compact clusters generated from a Gaussian distribution. This is the kind of dataset that are best suited to centroid-based clustering algorithms like $K$-means. If the goal of the clustering algorithm is to minimize the intra-cluster distances and find a representative prototype or centroid for each cluster, $K$-means may be a good option. End of explanation """ X2s, y2s = make_circles(n_samples=N, factor=.5, noise=.05, shuffle=False) X2, y2 = shuffle(X2s, y2s, random_state=0) plt.scatter(X2[:, 0], X2[:, 1], s=30) plt.axis('equal') plt.show() """ Explanation: Note that we have computed two data matrices: ${\bf X}$, which contains the data points in an arbitray ordering ${\bf X}_s$, where samples are ordered by clusters, according to the cluster id array, ${\bf y}$. Note that both matrices contain the same data (rows) but in different order. The sorted matrix will be useful later for illustration purposes, but keep in mind that, in a real clustering application, vector ${\bf y}$ is unknown (learning is not supervised), and only a data matrix with an arbitrary ordering (like ${\bf X}$) will be available. 1.2. Concentric rings The second dataset contains two concentric rings. One could expect from a clustering algorithm to identify two different clusters, one per each ring of points. If this is the case, $K$-means or any other algorithm focused on minimizing distances to some cluster centroids is not a good choice. End of explanation """ # <SOL> est = KMeans(n_clusters=4) clusters = est.fit_predict(X) plt.scatter(X[:, 0], X[:, 1], c=clusters, s=30, cmap='rainbow') plt.axis('equal') clusters = est.fit_predict(X2) plt.figure() plt.scatter(X2[:, 0], X2[:, 1], c=clusters, s=30, cmap='rainbow') plt.axis('equal') plt.show() # </SOL> """ Explanation: Note, again, that we have computed both the sorted (${\bf X}_{2s}$) and the shuffled (${\bf X}_2$) versions of the dataset in the code above. Exercise 1: Using the code of the previous notebook, run the $K$-means algorithm with 4 centroids for the two datasets. In the light of your results, why do you think $K$-means does not work well for the second dataset? End of explanation """ gamma = 0.5 K = rbf_kernel(X, X, gamma=gamma) """ Explanation: Spectral clustering algorithms are focused on connectivity: clusters are determined by maximizing some measure of intra-cluster connectivity and maximizing some form of inter-cluster connectivity. 2. The affinity matrix 2.1. Similarity function To implement a spectral clustering algorithm we must specify a similarity measure between data points. In this session, we will use the rbf kernel, that computes the similarity between ${\bf x}$ and ${\bf y}$ as: $$\kappa({\bf x},{\bf y}) = \exp(-\gamma \|{\bf x}-{\bf y}\|^2)$$ Other similarity functions can be used, like the kernel functions implemented in Scikit-learn (see the <a href=http://scikit-learn.org/stable/modules/metrics.html> metrics </a> module). 2.2. Affinity matrix For a dataset ${\cal S} = {{\bf x}0,\ldots,{\bf x}{N-1}}$, the $N\times N$ affinity matrix ${\bf K}$ contains the similarity measure between each pair of samples. Thus, its components are $$K_{ij} = \kappa\left({\bf x}_i, {\bf x}_j\right)$$ The following fragment of code illustrates all pairs of distances between any two points in the dataset. End of explanation """ plt.imshow(K, cmap='hot') plt.colorbar() plt.title('RBF Affinity Matrix for gamma = ' + str(gamma)) plt.grid('off') plt.show() """ Explanation: 2.3. Visualization We can visualize the affinity matrix as an image, by translating component values into pixel colors or intensities. End of explanation """ Ks = rbf_kernel(Xs, Xs, gamma=gamma) plt.imshow(Ks, cmap='hot') plt.colorbar() plt.title('RBF Affinity Matrix for gamma = ' + str(gamma)) plt.grid('off') plt.show() """ Explanation: Despite the apparent randomness of the affinity matrix, it contains some hidden structure, that we can uncover by visualizing the affinity matrix computed with the sorted data matrix, ${\bf X}_s$. End of explanation """ t = 0.001 # Kt = <FILL IN> # Truncated affinity matrix Kt = K*(K>t) # Truncated affinity matrix # Kst = <FILL IN> # Truncated and sorted affinity matrix Kst = Ks*(Ks>t) # Truncated and sorted affinity matrix """ Explanation: Note that, despite their completely different appearance, both affinity matrices contain the same values, but with a different order of rows and columns. For this dataset, the sorted affinity matrix is almost block diagonal. Note, also, that the block-wise form of this matrix depends on parameter $\gamma$. Exercise 2: Modify the selection of $\gamma$, and check the effect of this in the appearance of the sorted similarity matrix. Write down the values for which you consider that the structure of the matrix better resembles the number of clusters in the datasets. Out from the diagonal block, similarities are close to zero. We can enforze a block diagonal structure be setting to zero the small similarity values. For instance, by thresholding ${\bf K}s$ with threshold $t$, we get the truncated (and sorted) affinity matrix $$ \overline{K}{s,ij} = K_{s,ij} \cdot \text{u}(K_{s,ij} - t) $$ (where $\text{u}()$ is the step function) which is block diagonal. Exercise 3: Compute the truncated and sorted affinity matrix with $t=0.001$ End of explanation """ G = nx.from_numpy_matrix(Kt) graphplot = nx.draw(G, X, node_size=40, width=0.5,) plt.axis('equal') plt.show() """ Explanation: 3. Affinity matrix and data graph Any similarity matrix defines a weighted graph in such a way that the weight of the edge linking ${\bf x}i$ and ${\bf x}_j$ is $K{ij}$. If $K$ is a full matrix, the graph is fully connected (there is and edge connecting every pair of nodes). But we can get a more interesting sparse graph by setting to zero the edges with a small weights. For instance, let us visualize the graph for the truncated affinity matrix $\overline{\bf K}$ with threshold $t$. You can also check the effect of increasing or decreasing $t$. End of explanation """ Dst = np.diag(np.sum(Kst, axis=1)) Lst = Dst - Kst # Next, we compute the eigenvalues of the matrix w = np.linalg.eigvalsh(Lst) plt.figure() plt.plot(w, marker='.'); plt.title('Eigenvalues of the matrix') plt.show() """ Explanation: Note that, for this dataset, the graph connects edges from the same cluster only. Therefore, the number of diagonal blocks in $\overline{\bf K}_s$ is equal to the number of connected components in the graph. Note, also, the graph does not depend on the sample ordering in the data matrix: the graphs for any matrix ${\bf K}$ and its sorted version ${\bf K}_s$ are the same. 4. The Laplacian matrix The <a href = https://en.wikipedia.org/wiki/Laplacian_matrix>Laplacian matrix</a> of a given affinity matrix ${\bf K}$ is given by $${\bf L} = {\bf D} - {\bf K}$$ where ${\bf D}$ is the diagonal degree matrix given by $$D_{ii}=\sum^{n}{j} K{ij}$$ 4.1. Properties of the Laplacian matrix The Laplacian matrix of any symmetric matrix ${\bf K}$ has several interesting properties: P1. ${\bf L}$ is symmetric and positive semidefinite. Therefore, all its eigenvalues $\lambda_0,\ldots, \lambda_{N-1}$ are non-negative. Remind that each eigenvector ${\bf v}$ with eigenvalue $\lambda$ satisfies $${\bf L} \cdot {\bf v} = \lambda {\bf v}$$ P2. ${\bf L}$ has at least one eigenvector with zero eigenvalue: indeed, for ${\bf v} = {\bf 1}_N = (1, 1, \ldots, 1)^\intercal$ we get $${\bf L} \cdot {\bf 1}_N = {\bf 0}_N$$ where ${\bf 0}_N$ is the $N$ dimensional all-zero vector. P3. If ${\bf K}$ is block diagonal, its Laplacian is block diagonal. P4. If ${\bf L}$ is a block diagonal with blocks ${\bf L}0, {\bf L}_1, \ldots, {\bf L}{c-1}$, then it has at least $c$ orthogonal eigenvectors with zero eigenvalue: indeed, each block ${\bf L}_i$ is the Laplacian matrix of the graph containing the samples in the $i$ connected component, therefore, according to property P2, $${\bf L}i \cdot {\bf 1}{N_i} = {\bf 0}_{N_i}$$ where $N_i$ is the number of samples in the $i$-th connected component. Therefore, if $${\bf v}i = \left(\begin{array}{l} {\bf 0}{N_0} \ \vdots \ {\bf 0}{N{i-1}} \ {\bf 1}{N_i} \ {\bf 0}{N_{i+1}} \ \vdots \ {\bf 0}{N{c-1}} \end{array} \right) $$ then $${\bf L} \cdot {\bf v}{i} = {\bf 0}{N}$$ We can compute the Laplacian matrix for the given dataset and visualize the eigenvalues: End of explanation """ # <SOL> print(np.linalg.norm(Lst.dot(np.ones((N,1))))) for i in range(nc): vi = (ys==i) print(np.linalg.norm(Lst.dot(vi))) # </SOL> """ Explanation: Exercise 4: Verify that ${\bf 1}N$ is an eigenvector with zero eigenvalues. To do so, compute ${\bf L}{st} \cdot {\bf 1}_N$ and verify that its <a href= https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.norm.html>euclidean norm</a> is close to zero (it may be not exactly zero due to finite precission errors). Verify that vectors ${\bf v}_i$ defined above (that you can compute using vi = (ys==i)) also have zero eigenvalue. End of explanation """ # <SOL> Dt = np.diag(np.sum(Kt, axis=1)) Lt = Dt - Kt print(np.linalg.norm(Lt.dot(np.ones((N,1))))) for i in range(nc): vi = (y==i) print(np.linalg.norm(Lt.dot(vi))) # </SOL> """ Explanation: Exercise 5: Verify that the spectral properties of the Laplacian matrix computed from ${\bf K}{st}$ still apply using the unsorted matrix, ${\bf K}_t$: compute ${\bf L}{t} \cdot {\bf v}'_{i}$, where ${\bf v}'_i$ is a binary vector with components equal to 1 at the positions corresponding to samples in cluster $i$ (that you can compute using vi = (y==i))), and verify that its euclidean norm is close to zero. End of explanation """ wst, vst = np.linalg.eigh(Lst) for n in range(nc): plt.plot(vst[:,n], '.-') plt.imshow(vst[:,:nc], aspect='auto') plt.grid(False) plt.title('Display of first 4 eigenvectors of Ks') """ Explanation: Note that the position of 1's in eigenvectors ${\bf v}_i$ points out the samples in the $i$-th connected component. This suggest the following tentative clustering algorithm: Compute the affinity matrix Compute the laplacian matrix Compute $c$ orthogonal eigenvectors with zero eigenvalue If $v_{in}=1$, assign ${\bf x}_n$ to cluster $i$. This is the grounding idea of some spectral clustering algorithms. In this precise form, this algorithm does not usually work, for several reasons that we will discuss next, but with some modifications it becomes a powerfull method. 4.2. Computing eigenvectors of the Laplacian Matrix One of the reasons why the algorithm above may not work is that vectors ${\bf v}'0, \ldots,{\bf v}'{c-1}$ are not the only zero eigenvectors or ${\bf L}_t$: any linear combination of them is also a zero eigenvector. Eigenvector computation algorithms may return a different set of orthogonal eigenvectors. However, one can expect that eigenvector should have similar component in the positions corresponding to samples in the same connected component. End of explanation """ # <SOL> D = np.diag(np.sum(K, axis=1)) L = D - K w, v = np.linalg.eigh(L) for n in range(nc): plt.plot(v[:,n], '.-') # </SOL> """ Explanation: 4.3. Non block diagonal matrices. Another reason to modify our tentative algorithm is that, in more realistic cases, the affinity matrix may have an imperfect block diagonal structure. In such cases, the smallest eigenvalues may be nonzero and eigenvectors may be not exactly piecewise constant. Exercise 6 Plot the eigenvector profile for the shuffled and not thresholded affinity matrix, ${\bf K}$. End of explanation """ # <SOL> g = 20 t = 0.1 K2 = rbf_kernel(X2, X2, gamma=g) K2t = K2*(K2>t) G2 = nx.from_numpy_matrix(K2t) graphplot = nx.draw(G2, X2, node_size=40, width=0.5) plt.axis('equal') plt.show() # </SOL> """ Explanation: Note that, despite the eigenvector components can not be used as a straighforward cluster indicator, they are strongly informative of the clustering structure. All points in the same cluster have similar values of the corresponding eigenvector components $(v_{n0}, \ldots, v_{n,c-1})$. Points from different clusters have different values of the corresponding eigenvector components $(v_{n0}, \ldots, v_{n,c-1})$. Therefore we can define vectors ${\bf z}n = (v{n0}, \ldots, v_{n,c-1})$ and apply a centroid based algorithm (like $K$-means) to identify all points with similar eigenvector components. The corresponding samples in ${\bf X}$ become the final clusters of the spectral clustering algorithm. One possible way to identify the cluster structure is to apply a $K$-means algorithm over the eigenvector coordinates. The steps of the spectral clustering algorithm become the following 5. A spectral clustering (graph cutting) algorithm 5.1. The steps of the spectral clustering algorithm. Summarizing, the steps of the spectral clustering algorithm for a data matrix ${\bf X}$ are the following: Compute the affinity matrix, ${\bf K}$. Optionally, truncate the smallest components to zero. Compute the laplacian matrix, ${\bf L}$ Compute the $c$ orthogonal eigenvectors with smallest eigenvalues, ${\bf v}0,\ldots,{\bf v}{c-1}$ Construct the sample set ${\bf Z}$ with rows ${\bf z}n = (v{0n}, \ldots, v_{c-1,n})$ Apply the $K$-means algorithms over ${\bf Z}$ with $K=c$ centroids. Assign samples in ${\bf X}$ to clusters: if ${\bf z}_n$ is assigned by $K$-means to cluster $i$, assign sample ${\bf x}_n$ in ${\bf X}$ to cluster $i$. Exercise 7: In this exercise we will apply the spectral clustering algorithm to the two-rings dataset ${\bf X}_2$, using $\gamma = 20$, $t=0.1$ and $c = 2$ clusters. Complete step 1, and plot the graph induced by ${\bf K}$ End of explanation """ # <SOL> D2t = np.diag(np.sum(K2t, axis=1)) L2t = D2t - K2t w2t, v2t = np.linalg.eigh(L2t) Z2t = v2t[:,0:2] plt.scatter(Z2t[:,0], Z2t[:,1], s=20) plt.show() # </SOL> """ Explanation: Complete step 2, 3 and 4, and draw a scatter plot of the samples in ${\bf Z}$ End of explanation """ est = KMeans(n_clusters=2) clusters = est.fit_predict(Z2t) """ Explanation: Complete step 5 End of explanation """ plt.scatter(X2[:, 0], X2[:, 1], c=clusters, s=50, cmap='rainbow') plt.axis('equal') plt.show() """ Explanation: Finally, complete step 6 and show, in a scatter plot, the result of the clustering algorithm End of explanation """ n_clusters = 4 gamma = .1 # Warning do not exceed gamma=100 SpClus = SpectralClustering(n_clusters=n_clusters,affinity='rbf', gamma=gamma) SpClus.fit(X) plt.scatter(X[:, 0], X[:, 1], c=SpClus.labels_.astype(np.int), s=50, cmap='rainbow') plt.axis('equal') plt.show() nc = 2 gamma = 50 #Warning do not exceed gamma=300 SpClus = SpectralClustering(n_clusters=nc, affinity='rbf', gamma=gamma) SpClus.fit(X2) plt.scatter(X2[:, 0], X2[:, 1], c=SpClus.labels_.astype(np.int), s=50, cmap='rainbow') plt.axis('equal') plt.show() nc = 5 SpClus = SpectralClustering(n_clusters=nc, affinity='nearest_neighbors') SpClus.fit(X2) plt.scatter(X2[:, 0], X2[:, 1], c=SpClus.labels_.astype(np.int), s=50, cmap='rainbow') plt.axis('equal') plt.show() """ Explanation: 5.2. Scikit-learn implementation. The <a href=http://scikit-learn.org/stable/modules/generated/sklearn.cluster.SpectralClustering.html> spectral clustering algorithm </a> in Scikit-learn requires the number of clusters to be specified. It works well for a small number of clusters but is not advised when using many clusters and/or data. Finally, we are going to run spectral clustering on both datasets. Spend a few minutes figuring out the meaning of parameters of the Spectral Clustering implementation of Scikit-learn: http://scikit-learn.org/stable/modules/generated/sklearn.cluster.SpectralClustering.html Note that there is not equivalent parameter to our threshold $t$, which has been useful for the graph representations. However, playing with $\gamma$ should be enough to get a good clustering. The following piece of code executes the algorithm with an 'rbf' kernel. You can manually adjust the number of clusters and the parameter of the kernel to study the behavior of the algorithm. When you are done, you can also: Modify the code to allow for kernels different than the 'rbf' Repeat the analysis for the second dataset (two_rings) End of explanation """
carltoews/tennis
results/DI_plot2.ipynb
gpl-3.0
from IPython.display import display, HTML display(HTML('''<img src="image2.png",width=800,height=500">''')) """ Explanation: Plot 1: Rate on investment under different betting strategies End of explanation """ import numpy as np # numerical libraries import pandas as pd # for data analysis import matplotlib as mpl # a big library with plotting functionality import matplotlib.pyplot as plt # a subset of matplotlib with most of the useful tools import IPython as IP %matplotlib inline import pdb from sklearn import linear_model as lm """ Explanation: Description: The natural metric for predictive success in tennis is whether or not the prediction algorithm can make money against the betting markets. This clearly depends on a betting strategy. The plot below sketches results under three different betting regimes: Strategy 1: Bet a fixed amount $q$ on the higher ranked player, regardless of the difference in ranking points. In this simulation, $q$ is taken to be $\$1$. Strategy 2: Bet a fixed amount $q$ on the higher ranked player, but only if our assessed probability, say $p_1$, exceeds the implied probability of the odds market, say $p_2$. Strategy 3: Bet an amount that grows or shrinks depending on by how much our probability exceeds that of the implied probabilty. We use the Kelly criterion, where the amount staked is given by $$ q\frac{p \cdot (b+1)-1}{b} $$ for some parameter $b$. To generate this plot, we first run logistic regression on a training set, and use the resulting probability curve, together with the implied probabilities from the betting markets, to implement the above three betting strategies on the validation set. While odds are published from a variety of betting houses, we use the ones from Pinnacle, whihc tend to be highest. In all these simulations, we assume that the bettor starts with enough money to bankroll the entire series of matches. Code for producing the plot: End of explanation """ odds= pd.read_pickle('../pickle_files/odds.pkl') matches= pd.read_pickle('../pickle_files/matches.pkl') data = pd.merge(matches,odds[['PSW','PSL','key']].dropna(axis=0,subset=["PSW"]),how='inner',on='key') data = data[~data.winner_rank_points.isnull() & ~data.loser_rank_points.isnull()] data['year'] = data['tourney_date'].map(lambda x: x.year) training = data[data.year.isin([2010,2011,2012])] validation = data[data.year.isin([2013,2014])] test = data[data.year.isin([2015,2016])] # consider rank difference to be positive if winner higher ranked, otherwise negative rank_diff = (training['winner_rank_points'] - training['loser_rank_points']).values # if higher ranked player won, raw rank was a successful predictor y = (rank_diff > 0)*1 # predictions done *before* the match, so algorithm operates on absolute value of rank difference X = np.abs(rank_diff) # for numerical well-behavedness, we need to scale and center the data X=(X/np.std(X,axis=0)) """ Explanation: Load and preprocess the data. End of explanation """ lr = lm.LogisticRegression(C=1., solver='lbfgs') lr.fit(X.reshape(len(X),-1),y*1) r = lr.coef_[0] """ Explanation: Perform the logistic regression. End of explanation """ investment = len(validation) good_call_idx1 = (validation['loser_rank_points']-validation['winner_rank_points']<0).values winner_odds = validation["PSW"].values daily_gains1 = winner_odds*good_call_idx1 daily_cost1 = -np.ones(len(validation)) daily_movement1 = daily_cost1 + daily_gains1 daily_balances1=np.zeros(len(validation)) daily_balances1[0]=investment for i in np.arange(1,len(validation)): daily_balances1[i]+=daily_balances1[i-1]+daily_movement1[i] plot_bnd = len(validation) fig = plt.figure(figsize=(15,5)) ax = fig.add_subplot(111) ax.plot(daily_balances1[0:plot_bnd]) x = np.arange(len(validation)) tick = np.ones(len(validation)) ax.plot(x[0:plot_bnd],investment*tick[0:plot_bnd],'--r') %qtconsole """ Explanation: Strategy 1: bet fixed amount on predicted winner. End of explanation """ # normalize the data rank_diff = (validation['winner_rank_points'] - validation['loser_rank_points']).values y = (rank_diff > 0) X = np.abs(rank_diff) X=(X/np.std(X,axis=0)) # figure out on whom you would have bet, and calculate implied odds ProbW = 1/validation.PSW ProbL = 1/validation.PSL compProbs = np.where(y,ProbW,ProbL) # bet if your odds are higher myProbs = 1/(1+np.exp(-r*X)) bet_placed_idx = (myProbs>compProbs) good_call_idx2 = (validation['loser_rank_points']-validation['winner_rank_points']<0).values daily_cost2 = np.zeros(len(validation)) daily_cost2[bet_placed_idx]=-1 winner_odds = validation["PSW"].values daily_gains2 = winner_odds*(bet_placed_idx*good_call_idx2) daily_movement2 = daily_cost2 + daily_gains2 daily_balances2=np.zeros(len(validation)) daily_balances2[0]=investment for i in np.arange(1,len(validation)): daily_balances2[i]+=daily_balances2[i-1]+daily_movement2[i] plot_bnd = len(validation) fig = plt.figure(figsize=(15,5)) ax = fig.add_subplot(111) ax.plot(daily_balances2[0:plot_bnd]) x = np.arange(len(validation)) tick = np.ones(len(validation)) ax.plot(x[0:plot_bnd],investment*tick[0:plot_bnd],'--r') """ Explanation: Strategy 2: bet fixed amount if predicted probability exceeds implied probability End of explanation """ # modify bet if your odds are higher b = 1.5 # arbitrary--futz with this myProbs = 1/(1+np.exp(-r*X)) myBets = (myProbs*(b+1)-1)/b bet_placed_idx = (myProbs>compProbs) good_call_idx3 = (validation['loser_rank_points']-validation['winner_rank_points']<0).values daily_cost3 = np.zeros(len(validation)) daily_cost3[bet_placed_idx]= -myBets[bet_placed_idx] winner_odds = validation["PSW"].values daily_gains3 = (winner_odds*(bet_placed_idx*good_call_idx3))*myBets daily_movement3 = daily_cost3 + daily_gains3 daily_balances3=np.zeros(len(validation)) daily_balances3[0]=investment for i in np.arange(1,len(validation)): daily_balances3[i]+=daily_balances3[i-1]+daily_movement3[i] plot_bnd = len(validation) fig = plt.figure(figsize=(15,5)) ax = fig.add_subplot(111) ax.plot(daily_balances3[0:plot_bnd]) x = np.arange(len(validation)) tick = np.ones(len(validation)) ax.plot(x[0:plot_bnd],investment*tick[0:plot_bnd],'--r') """ Explanation: Strategy 3: Kelly criterion End of explanation """ plot_bnd = len(validation) fig = plt.figure(figsize=(15,5)) ax = fig.add_subplot(111) ax.plot(daily_balances1[0:plot_bnd]) ax.plot(daily_balances2[0:plot_bnd]) ax.plot(daily_balances3[0:plot_bnd]) x = np.arange(len(validation)) tick = np.ones(len(validation)) ax.plot(x[0:plot_bnd],investment*tick[0:plot_bnd],'--r') ax.set_xlabel('Time-ordered individual matches in the validation set',fontsize=16) ax.set_ylabel('Net holdings',fontsize=16) ax.set_title('Daily holdings under three different betting strategies, using logistic prediction',fontsize=16) ax.legend(['Strategy 1','Strategy 2','Strategy 3']) """ Explanation: Put the three plots together and polish things up. End of explanation """
y2ee201/Deep-Learning-Nanodegree
sentiment_network/Sentiment Classification - How to Best Frame a Problem for a Neural Network (Project 2).ipynb
mit
def pretty_print_review_and_label(i): print(labels[i] + "\t:\t" + reviews[i][:80] + "...") g = open('reviews.txt','r') # What we know! reviews = list(map(lambda x:x[:-1],g.readlines())) g.close() g = open('labels.txt','r') # What we WANT to know! labels = list(map(lambda x:x[:-1].upper(),g.readlines())) g.close() len(reviews) reviews[0] labels[0] """ Explanation: Sentiment Classification & How To "Frame Problems" for a Neural Network by Andrew Trask Twitter: @iamtrask Blog: http://iamtrask.github.io What You Should Already Know neural networks, forward and back-propagation stochastic gradient descent mean squared error and train/test splits Where to Get Help if You Need it Re-watch previous Udacity Lectures Leverage the recommended Course Reading Material - Grokking Deep Learning (40% Off: traskud17) Shoot me a tweet @iamtrask Tutorial Outline: Intro: The Importance of "Framing a Problem" Curate a Dataset Developing a "Predictive Theory" PROJECT 1: Quick Theory Validation Transforming Text to Numbers PROJECT 2: Creating the Input/Output Data Putting it all together in a Neural Network PROJECT 3: Building our Neural Network Understanding Neural Noise PROJECT 4: Making Learning Faster by Reducing Noise Analyzing Inefficiencies in our Network PROJECT 5: Making our Network Train and Run Faster Further Noise Reduction PROJECT 6: Reducing Noise by Strategically Reducing the Vocabulary Analysis: What's going on in the weights? Lesson: Curate a Dataset End of explanation """ print("labels.txt \t : \t reviews.txt\n") pretty_print_review_and_label(2137) pretty_print_review_and_label(12816) pretty_print_review_and_label(6267) pretty_print_review_and_label(21934) pretty_print_review_and_label(5297) pretty_print_review_and_label(4998) """ Explanation: Lesson: Develop a Predictive Theory End of explanation """ from collections import Counter import numpy as np positive_counts = Counter() negative_counts = Counter() total_counts = Counter() for i in range(len(reviews)): if(labels[i] == 'POSITIVE'): for word in reviews[i].split(" "): positive_counts[word] += 1 total_counts[word] += 1 else: for word in reviews[i].split(" "): negative_counts[word] += 1 total_counts[word] += 1 positive_counts.most_common() pos_neg_ratios = Counter() for term,cnt in list(total_counts.most_common()): if(cnt > 100): pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1) pos_neg_ratios[term] = pos_neg_ratio for word,ratio in pos_neg_ratios.most_common(): if(ratio > 1): pos_neg_ratios[word] = np.log(ratio) else: pos_neg_ratios[word] = -np.log((1 / (ratio+0.01))) # words most frequently seen in a review with a "POSITIVE" label pos_neg_ratios.most_common() # words most frequently seen in a review with a "NEGATIVE" label list(reversed(pos_neg_ratios.most_common()))[0:30] """ Explanation: Project 1: Quick Theory Validation End of explanation """ from IPython.display import Image review = "This was a horrible, terrible movie." Image(filename='sentiment_network.png') review = "The movie was excellent" Image(filename='sentiment_network_pos.png') """ Explanation: Transforming Text into Numbers End of explanation """ vocab = set(total_counts.keys()) vocab_size = len(vocab) print(vocab_size) list(vocab) import numpy as np layer_0 = np.zeros((1,vocab_size)) layer_0 from IPython.display import Image Image(filename='sentiment_network.png') word2index = {} for i,word in enumerate(vocab): word2index[word] = i word2index def update_input_layer(review): global layer_0 # clear out previous state, reset the layer to be all 0s layer_0 *= 0 for word in review.split(" "): layer_0[0][word2index[word]] += 1 update_input_layer(reviews[0]) layer_0 def get_target_for_label(label): if(label == 'POSITIVE'): return 1 else: return 0 labels[0] get_target_for_label(labels[0]) labels[1] get_target_for_label(labels[1]) """ Explanation: Project 2: Creating the Input/Output Data End of explanation """
transcranial/keras-js
notebooks/layers/pooling/AveragePooling3D.ipynb
mit
data_in_shape = (4, 4, 4, 2) L = AveragePooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format='channels_last') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(290) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['pooling.AveragePooling3D.0'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } """ Explanation: AveragePooling3D [pooling.AveragePooling3D.0] input 4x4x4x2, pool_size=(2, 2, 2), strides=None, padding='valid', data_format='channels_last' End of explanation """ data_in_shape = (4, 4, 4, 2) L = AveragePooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='valid', data_format='channels_last') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(291) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['pooling.AveragePooling3D.1'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } """ Explanation: [pooling.AveragePooling3D.1] input 4x4x4x2, pool_size=(2, 2, 2), strides=(1, 1, 1), padding='valid', data_format='channels_last' End of explanation """ data_in_shape = (4, 5, 2, 3) L = AveragePooling3D(pool_size=(2, 2, 2), strides=(2, 1, 1), padding='valid', data_format='channels_last') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(282) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['pooling.AveragePooling3D.2'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } """ Explanation: [pooling.AveragePooling3D.2] input 4x5x2x3, pool_size=(2, 2, 2), strides=(2, 1, 1), padding='valid', data_format='channels_last' End of explanation """ data_in_shape = (4, 4, 4, 2) L = AveragePooling3D(pool_size=(3, 3, 3), strides=None, padding='valid', data_format='channels_last') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(283) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['pooling.AveragePooling3D.3'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } """ Explanation: [pooling.AveragePooling3D.3] input 4x4x4x2, pool_size=(3, 3, 3), strides=None, padding='valid', data_format='channels_last' End of explanation """ data_in_shape = (4, 4, 4, 2) L = AveragePooling3D(pool_size=(3, 3, 3), strides=(3, 3, 3), padding='valid', data_format='channels_last') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(284) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['pooling.AveragePooling3D.4'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } """ Explanation: [pooling.AveragePooling3D.4] input 4x4x4x2, pool_size=(3, 3, 3), strides=(3, 3, 3), padding='valid', data_format='channels_last' End of explanation """ data_in_shape = (4, 4, 4, 2) L = AveragePooling3D(pool_size=(2, 2, 2), strides=None, padding='same', data_format='channels_last') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(285) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['pooling.AveragePooling3D.5'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } """ Explanation: [pooling.AveragePooling3D.5] input 4x4x4x2, pool_size=(2, 2, 2), strides=None, padding='same', data_format='channels_last' End of explanation """ data_in_shape = (4, 4, 4, 2) L = AveragePooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same', data_format='channels_last') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(286) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['pooling.AveragePooling3D.6'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } """ Explanation: [pooling.AveragePooling3D.6] input 4x4x4x2, pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same', data_format='channels_last' End of explanation """ data_in_shape = (4, 5, 4, 2) L = AveragePooling3D(pool_size=(2, 2, 2), strides=(1, 2, 1), padding='same', data_format='channels_last') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(287) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['pooling.AveragePooling3D.7'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } """ Explanation: [pooling.AveragePooling3D.7] input 4x5x4x2, pool_size=(2, 2, 2), strides=(1, 2, 1), padding='same', data_format='channels_last' End of explanation """ data_in_shape = (4, 4, 4, 2) L = AveragePooling3D(pool_size=(3, 3, 3), strides=None, padding='same', data_format='channels_last') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(288) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['pooling.AveragePooling3D.8'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } """ Explanation: [pooling.AveragePooling3D.8] input 4x4x4x2, pool_size=(3, 3, 3), strides=None, padding='same', data_format='channels_last' End of explanation """ data_in_shape = (4, 4, 4, 2) L = AveragePooling3D(pool_size=(3, 3, 3), strides=(3, 3, 3), padding='same', data_format='channels_last') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(289) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['pooling.AveragePooling3D.9'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } """ Explanation: [pooling.AveragePooling3D.9] input 4x4x4x2, pool_size=(3, 3, 3), strides=(3, 3, 3), padding='same', data_format='channels_last' End of explanation """ data_in_shape = (2, 3, 3, 4) L = AveragePooling3D(pool_size=(3, 3, 3), strides=(2, 2, 2), padding='valid', data_format='channels_first') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(290) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['pooling.AveragePooling3D.10'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } """ Explanation: [pooling.AveragePooling3D.10] input 2x3x3x4, pool_size=(3, 3, 3), strides=(2, 2, 2), padding='valid', data_format='channels_first' End of explanation """ data_in_shape = (2, 3, 3, 4) L = AveragePooling3D(pool_size=(3, 3, 3), strides=(1, 1, 1), padding='same', data_format='channels_first') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(291) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['pooling.AveragePooling3D.11'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } """ Explanation: [pooling.AveragePooling3D.11] input 2x3x3x4, pool_size=(3, 3, 3), strides=(1, 1, 1), padding='same', data_format='channels_first' End of explanation """ data_in_shape = (3, 4, 4, 3) L = AveragePooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format='channels_first') layer_0 = Input(shape=data_in_shape) layer_1 = L(layer_0) model = Model(inputs=layer_0, outputs=layer_1) # set weights to random (use seed for reproducibility) np.random.seed(292) data_in = 2 * np.random.random(data_in_shape) - 1 result = model.predict(np.array([data_in])) data_out_shape = result[0].shape data_in_formatted = format_decimal(data_in.ravel().tolist()) data_out_formatted = format_decimal(result[0].ravel().tolist()) print('') print('in shape:', data_in_shape) print('in:', data_in_formatted) print('out shape:', data_out_shape) print('out:', data_out_formatted) DATA['pooling.AveragePooling3D.12'] = { 'input': {'data': data_in_formatted, 'shape': data_in_shape}, 'expected': {'data': data_out_formatted, 'shape': data_out_shape} } """ Explanation: [pooling.AveragePooling3D.12] input 3x4x4x3, pool_size=(2, 2, 2), strides=None, padding='valid', data_format='channels_first' End of explanation """ import os filename = '../../../test/data/layers/pooling/AveragePooling3D.json' if not os.path.exists(os.path.dirname(filename)): os.makedirs(os.path.dirname(filename)) with open(filename, 'w') as f: json.dump(DATA, f) print(json.dumps(DATA)) """ Explanation: export for Keras.js tests End of explanation """
dschick/udkm1Dsimpy
docs/source/examples/phonons.ipynb
gpl-3.0
import udkm1Dsim as ud u = ud.u # import the pint unit registry from udkm1Dsim import scipy.constants as constants import numpy as np import matplotlib.pyplot as plt %matplotlib inline u.setup_matplotlib() # use matplotlib with pint units """ Explanation: Phonons In this example coherent acoustic phonon dynamics are calculated according to the results of the heat simulations. Setup Do all necessary imports and settings. End of explanation """ O = ud.Atom('O') Ti = ud.Atom('Ti') Sr = ud.Atom('Sr') Ru = ud.Atom('Ru') Pb = ud.Atom('Pb') Zr = ud.Atom('Zr') # c-axis lattice constants of the two layers c_STO_sub = 3.905*u.angstrom c_SRO = 3.94897*u.angstrom # sound velocities [nm/ps] of the two layers sv_SRO = 6.312*u.nm/u.ps sv_STO = 7.800*u.nm/u.ps # SRO layer prop_SRO = {} prop_SRO['a_axis'] = c_STO_sub # aAxis prop_SRO['b_axis'] = c_STO_sub # bAxis prop_SRO['deb_Wal_Fac'] = 0 # Debye-Waller factor prop_SRO['sound_vel'] = sv_SRO # sound velocity prop_SRO['opt_ref_index'] = 2.44+4.32j prop_SRO['therm_cond'] = 5.72*u.W/(u.m *u.K) # heat conductivity prop_SRO['lin_therm_exp'] = 1.03e-5 # linear thermal expansion prop_SRO['heat_capacity'] = '455.2 + 0.112*T - 2.1935e6/T**2' # heat capacity [J/kg K] SRO = ud.UnitCell('SRO', 'Strontium Ruthenate', c_SRO, **prop_SRO) SRO.add_atom(O, 0) SRO.add_atom(Sr, 0) SRO.add_atom(O, 0.5) SRO.add_atom(O, 0.5) SRO.add_atom(Ru, 0.5) # STO substrate prop_STO_sub = {} prop_STO_sub['a_axis'] = c_STO_sub # aAxis prop_STO_sub['b_axis'] = c_STO_sub # bAxis prop_STO_sub['deb_Wal_Fac'] = 0 # Debye-Waller factor prop_STO_sub['sound_vel'] = sv_STO # sound velocity prop_STO_sub['opt_ref_index'] = 2.1+0j prop_STO_sub['therm_cond'] = 12*u.W/(u.m *u.K) # heat conductivity prop_STO_sub['lin_therm_exp'] = 1e-5 # linear thermal expansion prop_STO_sub['heat_capacity'] = '733.73 + 0.0248*T - 6.531e6/T**2' # heat capacity [J/kg K] STO_sub = ud.UnitCell('STOsub', 'Strontium Titanate Substrate', c_STO_sub, **prop_STO_sub) STO_sub.add_atom(O, 0) STO_sub.add_atom(Sr, 0) STO_sub.add_atom(O, 0.5) STO_sub.add_atom(O, 0.5) STO_sub.add_atom(Ti, 0.5) S = ud.Structure('Single Layer') S.add_sub_structure(SRO, 100) # add 100 layers of SRO to sample S.add_sub_structure(STO_sub, 2000) # add 1000 layers of STO substrate """ Explanation: Structure Refer to the structure-example for more details. End of explanation """ h = ud.Heat(S, True) h.save_data = False h.disp_messages = True h.excitation = {'fluence': [5]*u.mJ/u.cm**2, 'delay_pump': [0]*u.ps, 'pulse_width': [0]*u.ps, 'multilayer_absorption': True, 'wavelength': 800*u.nm, 'theta': 45*u.deg} # temporal and spatial grid delays = np.r_[-10:90:0.1]*u.ps _, _, distances = S.get_distances_of_layers() temp_map, delta_temp_map = h.get_temp_map(delays, 300*u.K) plt.figure(figsize=[6, 8]) plt.subplot(2, 1, 1) plt.plot(distances.to('nm').magnitude, temp_map[101, :]) plt.xlim([0, distances.to('nm').magnitude[-1]]) plt.xlabel('Distance [nm]') plt.ylabel('Temperature [K]') plt.title('Temperature Profile') plt.subplot(2, 1, 2) plt.pcolormesh(distances.to('nm').magnitude, delays.to('ps').magnitude, temp_map, shading='auto') plt.colorbar() plt.xlabel('Distance [nm]') plt.ylabel('Delay [ps]') plt.title('Temperature Map') plt.tight_layout() plt.show() """ Explanation: Heat Refer to the heat-example for more details. End of explanation """ pana = ud.PhononAna(S, True) pana.save_data = False pana.disp_messages = True strain_map, A, B = pana.get_strain_map(delays, temp_map, delta_temp_map) plt.figure(figsize=[6, 8]) plt.subplot(2, 1, 1) plt.plot(distances.to('nm').magnitude, strain_map[130, :], label=np.round(delays[130])) plt.plot(distances.to('nm').magnitude, strain_map[350, :], label=np.round(delays[350])) plt.xlim([0, distances.to('nm').magnitude[-1]]) plt.xlabel('Distance [nm]') plt.ylabel('Strain') plt.legend() plt.title('Analytical Strain Profile') plt.subplot(2, 1, 2) plt.pcolormesh(distances.to('nm').magnitude, delays.to('ps').magnitude, strain_map, cmap='RdBu', vmin=-np.max(strain_map), vmax=np.max(strain_map), shading='auto') plt.colorbar() plt.xlabel('Distance [nm]') plt.ylabel('Delay [ps]') plt.title('Analytical Strain Map') plt.tight_layout() plt.show() """ Explanation: Analytical Phonons The PhononAna class requires a Structure object and a boolean force_recalc in order overwrite previous simulation results. These results are saved in the cache_dir when save_data is enabled. Printing simulation messages can be en-/disabled using disp_messages and progress bars can using the boolean switch progress_bar. End of explanation """ omega, E = pana.get_energy_per_eigenmode(A, B) plt.figure() plt.plot(omega, E[-1, :]) plt.xlim(omega[0], omega[-1]) plt.xscale('log') plt.xlabel('Frequency [Hz]') plt.ylabel('Energy [J]') plt.title('Analytical Energy Spectrum') plt.show() """ Explanation: Energy Spectrum The analytical phonon model easily allows for calculating the energy per eigenmode of the coherent acoustic phonon spectrum for every delay of the simulation. End of explanation """ pnum = ud.PhononNum(S, True) pnum.save_data = False pnum.disp_messages = True """ Explanation: Numerical Phonons The PhononNum class requires a Structure object and a boolean force_recalc in order overwrite previous simulation results. These results are saved in the cache_dir when save_data is enabled. Printing simulation messages can be en-/disabled using disp_messages and progress bars can using the boolean switch progress_bar. End of explanation """ strain_map = pnum.get_strain_map(delays, temp_map, delta_temp_map) plt.figure(figsize=[6, 8]) plt.subplot(2, 1, 1) plt.plot(distances.to('nm').magnitude, strain_map[130, :], label=np.round(delays[130])) plt.plot(distances.to('nm').magnitude, strain_map[350, :], label=np.round(delays[350])) plt.xlim([0, distances.to('nm').magnitude[-1]]) plt.xlabel('Distance [nm]') plt.ylabel('Strain') plt.legend() plt.title('Numerical Strain Profile') plt.subplot(2, 1, 2) plt.pcolormesh(distances.to('nm').magnitude, delays.to('ps').magnitude, strain_map, cmap='RdBu', vmin=-np.max(strain_map), vmax=np.max(strain_map), shading='auto') plt.colorbar() plt.xlabel('Distance [nm]') plt.ylabel('Delay [ps]') plt.title('Numerical Strain Map') plt.tight_layout() plt.show() """ Explanation: The actual calculation is done in one line: End of explanation """ STO_sub.phonon_damping = -1e10*u.kg/u.s STO_sub.set_ho_spring_constants([-7e11]) """ Explanation: Anharmonic Phonon Propagation The numerical phonon dynamic calculations also allow for phonon damping and non-linear phonon propagation. This can be achieved by setting the phonon_damping property and using the set_ho_spring_constants() method of the according layers. End of explanation """ strain_map = pnum.get_strain_map(delays, temp_map, delta_temp_map) plt.figure(figsize=[6, 8]) plt.subplot(2, 1, 1) plt.plot(distances.to('nm').magnitude, strain_map[130, :], label=np.round(delays[130])) plt.plot(distances.to('nm').magnitude, strain_map[350, :], label=np.round(delays[350])) plt.plot(distances.to('nm').magnitude, strain_map[-1, :], label=np.round(delays[-1])) plt.xlim([0, distances.to('nm').magnitude[-1]]) plt.xlabel('Distance [nm]') plt.ylabel('Strain') plt.legend() plt.title('Anharmonic Strain Profile') plt.subplot(2, 1, 2) plt.pcolormesh(distances.to('nm').magnitude, delays.to('ps').magnitude, strain_map, cmap='RdBu', vmin=-np.max(strain_map), vmax=np.max(strain_map), shading='auto') plt.colorbar() plt.xlabel('Distance [nm]') plt.ylabel('Delay [ps]') plt.title('Anharmonic Strain Map') plt.tight_layout() plt.show() """ Explanation: Recalculate the coherent phonon dynamics: End of explanation """
edeno/Jadhav-2016-Data-Analysis
notebooks/2017_06_14_Test_Spectral_Single_Session.ipynb
gpl-3.0
import numpy as np import matplotlib.pyplot as plt import seaborn as sns import pandas as pd import xarray as xr from src.data_processing import (get_LFP_dataframe, make_tetrode_dataframe, make_tetrode_pair_info, reshape_to_segments) from src.parameters import (ANIMALS, SAMPLING_FREQUENCY, MULTITAPER_PARAMETERS, FREQUENCY_BANDS, RIPPLE_COVARIATES, ALPHA) from src.analysis import (decode_ripple_clusterless, detect_epoch_ripples, is_overlap, _subtract_event_related_potential) """ Explanation: Purpose The purpose of this notebook is to work out the data structure for saving the computed results for a single session. Here we are using the xarray package to structure the data, because: It is built to handle large multi-dimensional data (orginally for earth sciences data). It allows you to call dimensions by name (time, frequency, etc). The plotting functions are convenient for multi-dimensional data (it has convenient heatmap plotting). It can output to HDF5 (via the netcdf format, a geosciences data format), which is built for handling large data in a descriptive (i.e. can label units, add information about how data was constructed, etc.). Lazily loads data so large datasets that are too big for memory can be handled (via dask). Previously, I was using the pandas package in python and this wasn't handling the loading and combining of time-frequency data. In particular, the size of the data was problematic even on the cluster and this was frustrating to debug. pandas now recommends the usage of xarray for multi-dimesional data. End of explanation """ epoch_key = ('HPa', 6, 2) ripple_times = detect_epoch_ripples( epoch_key, ANIMALS, sampling_frequency=SAMPLING_FREQUENCY) tetrode_info = make_tetrode_dataframe(ANIMALS)[epoch_key] tetrode_info = tetrode_info[ ~tetrode_info.descrip.str.endswith('Ref').fillna(False)] tetrode_pair_info = make_tetrode_pair_info(tetrode_info) lfps = {tetrode_key: get_LFP_dataframe(tetrode_key, ANIMALS) for tetrode_key in tetrode_info.index} from copy import deepcopy from functools import partial, wraps multitaper_parameter_name = '4Hz_Resolution' multitaper_params = MULTITAPER_PARAMETERS[multitaper_parameter_name] num_lfps = len(lfps) num_pairs = int(num_lfps * (num_lfps - 1) / 2) params = deepcopy(multitaper_params) window_of_interest = params.pop('window_of_interest') reshape_to_trials = partial( reshape_to_segments, sampling_frequency=params['sampling_frequency'], window_offset=window_of_interest, concat_axis=1) ripple_locked_lfps = pd.Panel({ lfp_name: _subtract_event_related_potential( reshape_to_trials(lfps[lfp_name], ripple_times)) for lfp_name in lfps}) from src.spectral.connectivity import Connectivity from src.spectral.transforms import Multitaper m = Multitaper( np.rollaxis(ripple_locked_lfps.values, 0, 3), **params, start_time=ripple_locked_lfps.major_axis.min()) c = Connectivity( fourier_coefficients=m.fft(), frequencies=m.frequencies, time=m.time) """ Explanation: Go through the steps to get the ripple triggered connectivity End of explanation """ n_lfps = len(lfps) ds = xr.Dataset( {'coherence_magnitude': (['time', 'frequency', 'tetrode1', 'tetrode2'], c.coherence_magnitude()), 'pairwise_spectral_granger_prediction': (['time', 'frequency', 'tetrode1', 'tetrode2'], c.pairwise_spectral_granger_prediction())}, coords={'time': c.time + np.diff(c.time)[0] / 2, 'frequency': c.frequencies + np.diff(c.frequencies)[0] / 2, 'tetrode1': tetrode_info.tetrode_id.values, 'tetrode2': tetrode_info.tetrode_id.values, 'brain_area1': ('tetrode1', tetrode_info.area.tolist()), 'brain_area2': ('tetrode2', tetrode_info.area.tolist()), 'session': np.array(['{0}_{1:02d}_{2:02d}'.format(*epoch_key)]), } ) ds """ Explanation: Make an xarray dataset for coherence and pairwise spectral granger End of explanation """ ds.sel( tetrode1='HPa621', tetrode2='HPa624', frequency=slice(0, 30)).coherence_magnitude.plot(x='time', y='frequency'); """ Explanation: Show that it is easy to select two individual tetrodes and plot a subset of their frequency for coherence. End of explanation """ ds.sel( tetrode1='HPa621', tetrode2='HPa6220', frequency=slice(0, 30) ).pairwise_spectral_granger_prediction.plot(x='time', y='frequency'); """ Explanation: Show the same thing for spectral granger. End of explanation """ ds['pairwise_spectral_granger_prediction'].sel( frequency=slice(0, 30)).plot(x='time', y='frequency', col='tetrode1', row='tetrode2', robust=True); ds['coherence_magnitude'].sel( frequency=slice(0, 30)).plot(x='time', y='frequency', col='tetrode1', row='tetrode2'); """ Explanation: Now show that we can plot all tetrodes pairs in a dataset End of explanation """ (ds.sel( tetrode1=ds.tetrode1[ds.brain_area1=='CA1'], tetrode2=ds.tetrode2[ds.brain_area2=='PFC'], frequency=slice(0, 30)) .coherence_magnitude .plot(x='time', y='frequency', col='tetrode1', row='tetrode2')); """ Explanation: It is also easy to select a subset of tetrode pairs (in this case all CA1-PFC tetrode pairs). End of explanation """ ((ds - ds.isel(time=0)).sel( tetrode1=ds.tetrode1[ds.brain_area1=='CA1'], tetrode2=ds.tetrode2[ds.brain_area2=='PFC'], frequency=slice(0, 30)) .coherence_magnitude .plot(x='time', y='frequency', col='tetrode1', row='tetrode2')); """ Explanation: xarray also makes it easy to compare the difference of a connectivity measure from its baseline (in this case, the baseline is the first time bin) End of explanation """ (ds.sel( tetrode1=ds.tetrode1[ds.brain_area1=='CA1'], tetrode2=ds.tetrode2[ds.brain_area2=='PFC'], frequency=slice(0, 30)) .coherence_magnitude.mean(['tetrode1', 'tetrode2']) .plot(x='time', y='frequency')); """ Explanation: It is also easy to average over the tetrode pairs End of explanation """ ((ds - ds.isel(time=0)).sel( tetrode1=ds.tetrode1[ds.brain_area1=='CA1'], tetrode2=ds.tetrode2[ds.brain_area2=='PFC'], frequency=slice(0, 30)) .coherence_magnitude.mean(['tetrode1', 'tetrode2']) .plot(x='time', y='frequency')); """ Explanation: And also average over the difference End of explanation """ import os path = '{0}_{1:02d}_{2:02d}.nc'.format(*epoch_key) group = '{0}/'.format(multitaper_parameter_name) write_mode = 'a' if os.path.isfile(path) else 'w' ds.to_netcdf(path=path, group=group, mode=write_mode) """ Explanation: Test saving as netcdf file End of explanation """ with xr.open_dataset(path, group=group) as da: da.load() print(da) """ Explanation: Show that we can open the saved dataset and recover the data End of explanation """ n_bands = len(FREQUENCY_BANDS) delay, slope, r_value = (np.zeros((c.time.size, n_bands, m.n_signals, m.n_signals)),) * 3 for band_ind, frequency_band in enumerate(FREQUENCY_BANDS): (delay[:, band_ind, ...], slope[:, band_ind, ...], r_value[:, band_ind, ...]) = c.group_delay( FREQUENCY_BANDS[frequency_band], frequency_resolution=m.frequency_resolution) coordinate_names = ['time', 'frequency_band', 'tetrode1', 'tetrode2'] ds = xr.Dataset( {'delay': (coordinate_names, delay), 'slope': (coordinate_names, slope), 'r_value': (coordinate_names, r_value)}, coords={'time': c.time + np.diff(c.time)[0] / 2, 'frequency_band': list(FREQUENCY_BANDS.keys()), 'tetrode1': tetrode_info.tetrode_id.values, 'tetrode2': tetrode_info.tetrode_id.values, 'brain_area1': ('tetrode1', tetrode_info.area.tolist()), 'brain_area2': ('tetrode2', tetrode_info.area.tolist()), 'session': np.array(['{0}_{1:02d}_{2:02d}'.format(*epoch_key)]), } ) ds['delay'].sel(frequency_band='beta', tetrode1='HPa621', tetrode2='HPa622').plot(); """ Explanation: Make data structure for group delay End of explanation """ canonical_coherence, area_labels = c.canonical_coherence(tetrode_info.area.tolist()) dimension_names = ['time', 'frequency', 'brain_area1', 'brain_area2'] data_vars = {'canonical_coherence': (dimension_names, canonical_coherence)} coordinates = { 'time': c.time + np.diff(c.time)[0] / 2, 'frequency': c.frequencies + np.diff(c.frequencies)[0] / 2, 'brain_area1': area_labels, 'brain_area2': area_labels, 'session': np.array(['{0}_{1:02d}_{2:02d}'.format(*epoch_key)]), } ds = xr.Dataset(data_vars, coords=coordinates) ds.sel(brain_area1='CA1', brain_area2='PFC', frequency=slice(0, 30)).canonical_coherence.plot(x='time', y='frequency') """ Explanation: Make data structure for canonical coherence End of explanation """ from src.analysis import ripple_triggered_connectivity for parameters_name, parameters in MULTITAPER_PARAMETERS.items(): ripple_triggered_connectivity( lfps, epoch_key, tetrode_info, ripple_times, parameters, FREQUENCY_BANDS, multitaper_parameter_name=parameters_name, group_name='all_ripples') with xr.open_dataset(path, group='2Hz_Resolution/all_ripples/canonical_coherence') as da: da.load() print(da) da.sel(brain_area1='CA1', brain_area2='PFC', frequency=slice(0, 30)).canonical_coherence.plot(x='time', y='frequency') with xr.open_dataset(path, group='10Hz_Resolution/all_ripples/canonical_coherence') as da: da.load() print(da) da.sel(brain_area1='CA1', brain_area2='PFC', frequency=slice(0, 30)).canonical_coherence.plot(x='time', y='frequency') """ Explanation: Now after adding this code into the code base, test if we can compute, save, and load End of explanation """
mcocdawc/chemcoord
Tutorial/Advanced_customisation.ipynb
lgpl-3.0
cc.configuration.settings """ Explanation: Settings Settings can be seen here: End of explanation """ cc.configuration.write_configuration_file('./example_configuration_file', overwrite=True) %less example_configuration_file """ Explanation: A configuration file can be written with: End of explanation """ !rm example_configuration_file """ Explanation: It is read automatically during startup from '~/.chemcoordrc'. Otherwise it is possible to explicitly call cc.configuration.read_configuration_file(...) End of explanation """ class my_tailored_class(cc.Cartesian): def my_number_one_method(self): return 1 molecule = cc.Cartesian.read_xyz('MIL53_small.xyz') type(molecule) """ Explanation: Inheritance You can safely inherit from the classes in this module End of explanation """ my_molecule = my_tailored_class.read_xyz('MIL53_small.xyz') type(my_molecule) type(my_molecule.get_inertia()['transformed_Cartesian']) my_molecule.get_inertia()['transformed_Cartesian'].my_number_one_method() """ Explanation: Notice how all old methods from Cartesian return an object of your tailored class End of explanation """
albahnsen/CostSensitiveClassification
doc/tutorials/slides_edcs_fraud_detection.ipynb
bsd-3-clause
import pandas as pd import numpy as np from costcla import datasets from costcla.datasets.base import Bunch def load_fraud(cost_mat_parameters=dict(Ca=10)): # data_ = pd.read_pickle("trx_fraud_data.pk") data_ = pd.read_pickle("/home/al/DriveAl/EasySol/Projects/DetectTA/Tests/trx_fraud_data_v3_agg.pk") target = data_['fraud'].values data = data_.drop('fraud', 1) n_samples = data.shape[0] cost_mat = np.zeros((n_samples, 4)) cost_mat[:, 0] = cost_mat_parameters['Ca'] cost_mat[:, 1] = data['amount'] cost_mat[:, 2] = cost_mat_parameters['Ca'] cost_mat[:, 3] = 0.0 return Bunch(data=data.values, target=target, cost_mat=cost_mat, target_names=['Legitimate Trx', 'Fraudulent Trx'], DESCR='', feature_names=data.columns.values, name='FraudDetection') datasets.load_fraud = load_fraud data = datasets.load_fraud() """ Explanation: <h1 class="title">Example-Dependent Cost-Sensitive Fraud Detection using CostCla</h1> <center> <h2>Alejandro Correa Bahnsen, PhD</h2> <p> <h2>Data Scientist</h2> <p> <div> <img img class="logo" src="https://raw.githubusercontent.com/albahnsen/CostSensitiveClassification/master/doc/tutorials/files/logo_easysol.jpg" style="width: 400px;"> </div> <h3>PyCaribbean, Santo Domingo, Dominican Republic, Feb 2016</h3> </center> <h1 class="bigtitle">About Me</h1> %%html <style> table,td,tr,th {border:none!important} </style> ### A brief bio: * PhD in **Machine Learning** at Luxembourg University * Data Scientist at Easy Solutions * Worked for +8 years as a data scientist at GE Money, Scotiabank and SIX Financial Services * Bachelor in Industrial Engineering and Master in Financial Engineering * Organizer of Big Data & Data Science Bogota Meetup * Sport addict, love to swim, play tennis, squash, and volleyball, among others. <p> <table style="border-collapse: collapse; border-top-color: rgb(255, 255, 255); border-right-color: rgb(255, 255, 255); border-bottom-color: rgb(255, 255, 255); border-left-color: rgb(255, 255, 255); border-top-width: 1px; border-right-width: 1px; border-bottom-width: 1px; border-left-width: 1px; " border="0" bordercolor="#888" cellspacing="0" align="left"> <tr> <td> <a href="mailto: al.bahnsen@gmail.com"><svg width="40px" height="40px" viewBox="0 0 60 60" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:sketch="http://www.bohemiancoding.com/sketch/ns"> <path d="M0.224580688,30 C0.224580688,13.4314567 13.454941,0 29.7754193,0 C46.0958976,0 59.3262579,13.4314567 59.3262579,30 C59.3262579,46.5685433 46.0958976,60 29.7754193,60 C13.454941,60 0.224580688,46.5685433 0.224580688,30 Z M0.224580688,30" fill="#FFFFFF" sketch:type="MSShapeGroup"></path> <path d="M35.0384324,31.6384006 L47.2131148,40.5764264 L47.2131148,20 L35.0384324,31.6384006 Z M13.7704918,20 L13.7704918,40.5764264 L25.9449129,31.6371491 L13.7704918,20 Z M30.4918033,35.9844891 L27.5851037,33.2065217 L13.7704918,42 L47.2131148,42 L33.3981762,33.2065217 L30.4918033,35.9844891 Z M46.2098361,20 L14.7737705,20 L30.4918033,32.4549304 L46.2098361,20 Z M46.2098361,20" id="Shape" fill="#333333" sketch:type="MSShapeGroup"></path> <path d="M59.3262579,30 C59.3262579,46.5685433 46.0958976,60 29.7754193,60 C23.7225405,60 18.0947051,58.1525134 13.4093244,54.9827754 L47.2695458,5.81941103 C54.5814438,11.2806503 59.3262579,20.0777973 59.3262579,30 Z M59.3262579,30" id="reflec" fill-opacity="0.08" fill="#000000" sketch:type="MSShapeGroup"></path> </svg></a> </td> <td> <a href="mailto: al.bahnsen@gmail.com" target="_blank">al.bahnsen@gmail.com</a> </td> </tr><tr> <td> <a href="http://github.com/albahnsen"><svg width="40px" height="40px" viewBox="0 0 60 60" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:sketch="http://www.bohemiancoding.com/sketch/ns"> <path d="M0.336871032,30 C0.336871032,13.4314567 13.5672313,0 29.8877097,0 C46.208188,0 59.4385483,13.4314567 59.4385483,30 C59.4385483,46.5685433 46.208188,60 29.8877097,60 C13.5672313,60 0.336871032,46.5685433 0.336871032,30 Z M0.336871032,30" id="Github" fill="#333333" sketch:type="MSShapeGroup"></path> <path d="M18.2184245,31.9355566 C19.6068506,34.4507902 22.2845295,36.0156764 26.8007287,36.4485173 C26.1561023,36.9365335 25.3817877,37.8630984 25.2749857,38.9342607 C24.4644348,39.4574749 22.8347506,39.62966 21.5674303,39.2310659 C19.7918469,38.6717023 19.1119377,35.1642642 16.4533306,35.6636959 C15.8773626,35.772144 15.9917933,36.1507609 16.489567,36.4722998 C17.3001179,36.9955141 18.0629894,37.6500075 18.6513541,39.04366 C19.1033554,40.113871 20.0531304,42.0259813 23.0569369,42.0259813 C24.2489236,42.0259813 25.0842679,41.8832865 25.0842679,41.8832865 C25.0842679,41.8832865 25.107154,44.6144649 25.107154,45.6761142 C25.107154,46.9004355 23.4507693,47.2457569 23.4507693,47.8346108 C23.4507693,48.067679 23.9990832,48.0895588 24.4396415,48.0895588 C25.3102685,48.0895588 27.1220883,47.3646693 27.1220883,46.0918317 C27.1220883,45.0806012 27.1382993,41.6806599 27.1382993,41.0860982 C27.1382993,39.785673 27.8372803,39.3737607 27.8372803,39.3737607 C27.8372803,39.3737607 27.924057,46.3153869 27.6704022,47.2457569 C27.3728823,48.3397504 26.8360115,48.1846887 26.8360115,48.6727049 C26.8360115,49.3985458 29.0168704,48.8505978 29.7396911,47.2571725 C30.2984945,46.0166791 30.0543756,39.2072834 30.0543756,39.2072834 L30.650369,39.1949165 C30.650369,39.1949165 30.6837446,42.3123222 30.6637192,43.7373675 C30.6427402,45.2128317 30.5426134,47.0792797 31.4208692,47.9592309 C31.9977907,48.5376205 33.868733,49.5526562 33.868733,48.62514 C33.868733,48.0857536 32.8436245,47.6424485 32.8436245,46.1831564 L32.8436245,39.4688905 C33.6618042,39.4688905 33.5387911,41.6768547 33.5387911,41.6768547 L33.5988673,45.7788544 C33.5988673,45.7788544 33.4186389,47.2733446 35.2190156,47.8992991 C35.8541061,48.1209517 37.2139245,48.1808835 37.277815,47.8089257 C37.3417055,47.4360167 35.6405021,46.8814096 35.6252446,45.7236791 C35.6157088,45.0178155 35.6567131,44.6059032 35.6567131,41.5379651 C35.6567131,38.470027 35.2438089,37.336079 33.8048426,36.4323453 C38.2457082,35.9766732 40.9939527,34.880682 42.3337458,31.9450695 C42.4383619,31.9484966 42.8791491,30.5737742 42.8219835,30.5742482 C43.1223642,29.4659853 43.2844744,28.1550957 43.3168964,26.6025764 C43.3092677,22.3930799 41.2895654,20.9042975 40.9014546,20.205093 C41.4736082,17.0182425 40.8060956,15.5675121 40.4961791,15.0699829 C39.3518719,14.6637784 36.5149435,16.1145088 34.9653608,17.1371548 C32.438349,16.3998984 27.0982486,16.4712458 25.0957109,17.3274146 C21.4005522,14.6875608 19.445694,15.0918628 19.445694,15.0918628 C19.445694,15.0918628 18.1821881,17.351197 19.1119377,20.6569598 C17.8961113,22.2028201 16.9902014,23.2968136 16.9902014,26.1963718 C16.9902014,27.8297516 17.1828264,29.2918976 17.6176632,30.5685404 C17.5643577,30.5684093 18.2008493,31.9359777 18.2184245,31.9355566 Z M18.2184245,31.9355566" id="Path" fill="#FFFFFF" sketch:type="MSShapeGroup"></path> <path d="M59.4385483,30 C59.4385483,46.5685433 46.208188,60 29.8877097,60 C23.8348308,60 18.2069954,58.1525134 13.5216148,54.9827754 L47.3818361,5.81941103 C54.6937341,11.2806503 59.4385483,20.0777973 59.4385483,30 Z M59.4385483,30" id="reflec" fill-opacity="0.08" fill="#000000" sketch:type="MSShapeGroup"></path> </svg></a> </td><td> <a href="http://github.com/albahnsen" target="_blank">http://github.com/albahnsen</a> </td> </tr><tr> <td> <a href="http://linkedin.com/in/albahnsen"><svg width="40px" height="40px" viewBox="0 0 60 60" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:sketch="http://www.bohemiancoding.com/sketch/ns"> <path d="M0.449161376,30 C0.449161376,13.4314567 13.6795217,0 30,0 C46.3204783,0 59.5508386,13.4314567 59.5508386,30 C59.5508386,46.5685433 46.3204783,60 30,60 C13.6795217,60 0.449161376,46.5685433 0.449161376,30 Z M0.449161376,30" fill="#007BB6" sketch:type="MSShapeGroup"></path> <path d="M22.4680392,23.7098144 L15.7808366,23.7098144 L15.7808366,44.1369537 L22.4680392,44.1369537 L22.4680392,23.7098144 Z M22.4680392,23.7098144" id="Path" fill="#FFFFFF" sketch:type="MSShapeGroup"></path> <path d="M22.9084753,17.3908761 C22.8650727,15.3880081 21.4562917,13.862504 19.1686418,13.862504 C16.8809918,13.862504 15.3854057,15.3880081 15.3854057,17.3908761 C15.3854057,19.3522579 16.836788,20.9216886 19.0818366,20.9216886 L19.1245714,20.9216886 C21.4562917,20.9216886 22.9084753,19.3522579 22.9084753,17.3908761 Z M22.9084753,17.3908761" id="Path" fill="#FFFFFF" sketch:type="MSShapeGroup"></path> <path d="M46.5846502,32.4246563 C46.5846502,26.1503226 43.2856534,23.2301456 38.8851658,23.2301456 C35.3347011,23.2301456 33.7450983,25.2128128 32.8575489,26.6036896 L32.8575489,23.7103567 L26.1695449,23.7103567 C26.2576856,25.6271338 26.1695449,44.137496 26.1695449,44.137496 L32.8575489,44.137496 L32.8575489,32.7292961 C32.8575489,32.1187963 32.9009514,31.5097877 33.0777669,31.0726898 C33.5610713,29.8530458 34.6614937,28.5902885 36.5089747,28.5902885 C38.9297703,28.5902885 39.8974476,30.4634101 39.8974476,33.2084226 L39.8974476,44.1369537 L46.5843832,44.1369537 L46.5846502,32.4246563 Z M46.5846502,32.4246563" id="Path" fill="#FFFFFF" sketch:type="MSShapeGroup"></path> <path d="M59.5508386,30 C59.5508386,46.5685433 46.3204783,60 30,60 C23.9471212,60 18.3192858,58.1525134 13.6339051,54.9827754 L47.4941264,5.81941103 C54.8060245,11.2806503 59.5508386,20.0777973 59.5508386,30 Z M59.5508386,30" id="reflec" fill-opacity="0.08" fill="#000000" sketch:type="MSShapeGroup"></path> </svg></a> </td> <td> <a href="http://linkedin.com/in/albahnsen" target="_blank">http://linkedin.com/in/albahnsen</a> </td> </tr><tr> <td> <a href="http://twitter.com/albahnsen"><svg width="40px" height="40px" viewBox="0 0 60 60" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:sketch="http://www.bohemiancoding.com/sketch/ns"> <path d="M0,30 C0,13.4314567 13.4508663,0 30.0433526,0 C46.6358389,0 60.0867052,13.4314567 60.0867052,30 C60.0867052,46.5685433 46.6358389,60 30.0433526,60 C13.4508663,60 0,46.5685433 0,30 Z M0,30" fill="#4099FF" sketch:type="MSShapeGroup"></path> <path d="M29.2997675,23.8879776 L29.3627206,24.9260453 L28.3135016,24.798935 C24.4943445,24.3116787 21.1578281,22.6592444 18.3249368,19.8840023 L16.9399677,18.5069737 L16.5832333,19.5238563 C15.8277956,21.7906572 16.3104363,24.1845684 17.8842648,25.7946325 C18.72364,26.6844048 18.5347806,26.8115152 17.0868584,26.2818888 C16.5832333,26.1124083 16.1425613,25.985298 16.1005925,26.0488532 C15.9537019,26.1971486 16.457327,28.1249885 16.8560302,28.8876505 C17.4016241,29.9469033 18.5137962,30.9849709 19.7308902,31.5993375 L20.7591248,32.0865938 L19.5420308,32.1077788 C18.3669055,32.1077788 18.3249368,32.1289639 18.4508431,32.57385 C18.8705307,33.9508786 20.5282967,35.4126474 22.3749221,36.048199 L23.6759536,36.4930852 L22.5427971,37.1710069 C20.8640467,38.1455194 18.891515,38.6963309 16.9189833,38.738701 C15.9746862,38.759886 15.1982642,38.8446262 15.1982642,38.9081814 C15.1982642,39.1200319 17.7583585,40.306395 19.2482495,40.7724662 C23.7179224,42.1494948 29.0269705,41.5563132 33.0140027,39.2047722 C35.846894,37.5311528 38.6797853,34.2050993 40.0018012,30.9849709 C40.7152701,29.2689815 41.428739,26.1335934 41.428739,24.6294545 C41.428739,23.654942 41.4916922,23.5278317 42.6668174,22.3626537 C43.359302,21.6847319 44.0098178,20.943255 44.135724,20.7314044 C44.3455678,20.3288884 44.3245835,20.3288884 43.2543801,20.6890343 C41.4707078,21.324586 41.2188952,21.2398458 42.1002392,20.2865183 C42.750755,19.6085965 43.527177,18.3798634 43.527177,18.0197174 C43.527177,17.9561623 43.2124113,18.0620876 42.8556769,18.252753 C42.477958,18.4646036 41.6385828,18.7823794 41.0090514,18.9730449 L39.8758949,19.3331908 L38.8476603,18.634084 C38.281082,18.252753 37.4836756,17.829052 37.063988,17.7019416 C35.9937846,17.4053509 34.357003,17.447721 33.3917215,17.7866818 C30.768674,18.7400093 29.110908,21.1974757 29.2997675,23.8879776 Z M29.2997675,23.8879776" id="Path" fill="#FFFFFF" sketch:type="MSShapeGroup"></path> <path d="M60.0867052,30 C60.0867052,46.5685433 46.6358389,60 30.0433526,60 C23.8895925,60 18.1679598,58.1525134 13.4044895,54.9827754 L47.8290478,5.81941103 C55.2628108,11.2806503 60.0867052,20.0777973 60.0867052,30 Z M60.0867052,30" id="reflec" fill-opacity="0.08" fill="#000000" sketch:type="MSShapeGroup"></path> </svg></a> </td> <td> <a href="http://twitter.com/albahnsen" target="_blank">@albahnsen</a> </td> </tr> </table> # Agenda * Quick Intro to Fraud Detection * Financial Evaluation of a Fraud Detection Model * Example-Dependent Classification * CostCla Library * Conclusion and Future Work # Fraud Detection Estimate the **probability** of a transaction being **fraud** based on analyzing customer patterns and recent fraudulent behavior <center> <div> <img img class="logo" src="https://raw.githubusercontent.com/albahnsen/CostSensitiveClassification/master/doc/tutorials/files/trx_flow.png" style="width: 800px;"> </div> </center> # Fraud Detection Issues when constructing a fraud detection system: * Skewness of the data * **Cost-sensitivity** * Short time response of the system * Dimensionality of the search space * Feature preprocessing * Model selection Different machine learning methods are used in practice, and in the literature: logistic regression, neural networks, discriminant analysis, genetic programing, decision trees, random forests among others # Fraud Detection Formally, a fraud detection is a statistical model that allows the estimation of the probability of transaction $i$ being a fraud ($y_i=1$) $$\hat p_i=P(y_i=1|\mathbf{x}_i)$$ <h1 class="bigtitle">Data!</h1> <center> <img img class="logo" src="http://www.sei-security.com/wp-content/uploads/2015/12/shutterstock_144683186.jpg" style="width: 400px;"> </center> # Load dataset from CostCla package End of explanation """ print(data.keys()) print('Number of examples ', data.target.shape[0]) """ Explanation: Data file End of explanation """ target = pd.DataFrame(pd.Series(data.target).value_counts(), columns=('Frequency',)) target['Percentage'] = (target['Frequency'] / target['Frequency'].sum()) * 100 target.index = ['Negative (Legitimate Trx)', 'Positive (Fraud Trx)'] target.loc['Total Trx'] = [data.target.shape[0], 1.] print(target) """ Explanation: Class Label End of explanation """ pd.DataFrame(data.feature_names[:4], columns=('Features',)) """ Explanation: Features End of explanation """ df = pd.DataFrame(data.data[:, :4], columns=data.feature_names[:4]) df.head(10) """ Explanation: Features End of explanation """ df = pd.DataFrame(data.data[:, 4:], columns=data.feature_names[4:]) df.head(10) """ Explanation: Aggregated Features End of explanation """ from sklearn.cross_validation import train_test_split X = data.data[:, [2, 3] + list(range(4, data.data.shape[1]))].astype(np.float) X_train, X_test, y_train, y_test, cost_mat_train, cost_mat_test = \ train_test_split(X, data.target, data.cost_mat, test_size=0.33, random_state=10) """ Explanation: Fraud Detection as a classification problem Split in training and testing End of explanation """ from sklearn.ensemble import RandomForestClassifier from sklearn.tree import DecisionTreeClassifier classifiers = {"RF": {"f": RandomForestClassifier()}, "DT": {"f": DecisionTreeClassifier()}} ci_models = ['DT', 'RF'] # Fit the classifiers using the training dataset for model in classifiers.keys(): classifiers[model]["f"].fit(X_train, y_train) classifiers[model]["c"] = classifiers[model]["f"].predict(X_test) classifiers[model]["p"] = classifiers[model]["f"].predict_proba(X_test) classifiers[model]["p_train"] = classifiers[model]["f"].predict_proba(X_train) """ Explanation: Fraud Detection as a classification problem Fit models End of explanation """ import warnings warnings.filterwarnings('ignore') %matplotlib inline import matplotlib.pyplot as plt from IPython.core.pylabtools import figsize import seaborn as sns colors = sns.color_palette() figsize(12, 8) from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score measures = {"F1Score": f1_score, "Precision": precision_score, "Recall": recall_score, "Accuracy": accuracy_score} results = pd.DataFrame(columns=measures.keys()) for model in ci_models: results.loc[model] = [measures[measure](y_test, classifiers[model]["c"]) for measure in measures.keys()] """ Explanation: Models performance Evaluate metrics and plot results End of explanation """ def fig_acc(): plt.bar(np.arange(results.shape[0])-0.3, results['Accuracy'], 0.6, label='Accuracy', color=colors[0]) plt.xticks(range(results.shape[0]), results.index) plt.tick_params(labelsize=22); plt.title('Accuracy', size=30) plt.show() fig_acc() """ Explanation: Models performance End of explanation """ def fig_f1(): plt.bar(np.arange(results.shape[0])-0.3, results['Precision'], 0.2, label='Precision', color=colors[0]) plt.bar(np.arange(results.shape[0])-0.3+0.2, results['Recall'], 0.2, label='Recall', color=colors[1]) plt.bar(np.arange(results.shape[0])-0.3+0.4, results['F1Score'], 0.2, label='F1Score', color=colors[2]) plt.xticks(range(results.shape[0]), results.index) plt.tick_params(labelsize=22) plt.ylim([0, 1]) plt.legend(loc='center left', bbox_to_anchor=(1, 0.5),fontsize=22) plt.show() fig_f1() """ Explanation: Models performance End of explanation """ # The cost matrix is already calculated for the dataset # cost_mat[C_FP,C_FN,C_TP,C_TN] print(data.cost_mat[[10, 17, 50]]) """ Explanation: Models performance None of these measures takes into account the business and economical realities that take place in fraud detection. Losses due to fraud or customer satisfaction costs, are not considered in the evaluation of the different models. <h1 class="bigtitle">Financial Evaluation of a Fraud Detection Model</h1> Motivation Typically, a fraud model is evaluated using standard cost-insensitive measures. However, in practice, the cost associated with approving a fraudulent transaction (False Negative) is quite different from the cost associated with declining a legitimate transaction (False Positive). Furthermore, the costs are not constant among transactions. Cost Matrix | | Actual Positive ($y_i=1$) | Actual Negative ($y_i=0$)| |--- |:-: |:-: | | Pred. Positive ($c_i=1$) | $C_{TP_i}=C_a$ | $C_{FP_i}=C_a$ | | Pred. Negative ($c_i=0$) | $C_{FN_i}=Amt_i$ | $C_{TN_i}=0$ | Where: $C_{FN_i}$ = Amount of the transaction $i$ $C_a$ is the administrative cost of dealing with an alert For more info see <a href="http://albahnsen.com/files/%20Improving%20Credit%20Card%20Fraud%20Detection%20by%20using%20Calibrated%20Probabilities%20-%20Publish.pdf" target="_blank">[Correa Bahnsen et al., 2014]</a> End of explanation """ # Calculation of the cost and savings from costcla.metrics import savings_score, cost_loss # Evaluate the savings for each model results["Savings"] = np.zeros(results.shape[0]) for model in ci_models: results["Savings"].loc[model] = savings_score(y_test, classifiers[model]["c"], cost_mat_test) # Plot the results def fig_sav(): plt.bar(np.arange(results.shape[0])-0.4, results['Precision'], 0.2, label='Precision', color=colors[0]) plt.bar(np.arange(results.shape[0])-0.4+0.2, results['Recall'], 0.2, label='Recall', color=colors[1]) plt.bar(np.arange(results.shape[0])-0.4+0.4, results['F1Score'], 0.2, label='F1Score', color=colors[2]) plt.bar(np.arange(results.shape[0])-0.4+0.6, results['Savings'], 0.2, label='Savings', color=colors[3]) plt.xticks(range(results.shape[0]), results.index) plt.tick_params(labelsize=22) plt.ylim([0, 1]) plt.xlim([-0.5, results.shape[0] -1 + .5]) plt.legend(loc='center left', bbox_to_anchor=(1, 0.5),fontsize=22) plt.show() """ Explanation: Financial savings The financial cost of using a classifier $f$ on $\mathcal{S}$ is calculated by $$ Cost(f(\mathcal{S})) = \sum_{i=1}^N y_i(1-c_i)C_{FN_i} + (1-y_i)c_i C_{FP_i}.$$ Then the financial savings are defined as the cost of the algorithm versus the cost of using no algorithm at all. $$ Savings(f(\mathcal{S})) = \frac{ Cost_l(\mathcal{S}) - Cost(f(\mathcal{S}))} {Cost_l(\mathcal{S})},$$ where $Cost_l(\mathcal{S})$ is the cost of the costless class Models Savings costcla.metrics.savings_score(y_true, y_pred, cost_mat) End of explanation """ fig_sav() """ Explanation: Models Savings End of explanation """ from costcla.models import ThresholdingOptimization for model in ci_models: classifiers[model+"-TO"] = {"f": ThresholdingOptimization()} # Fit classifiers[model+"-TO"]["f"].fit(classifiers[model]["p_train"], cost_mat_train, y_train) # Predict classifiers[model+"-TO"]["c"] = classifiers[model+"-TO"]["f"].predict(classifiers[model]["p"]) print('New thresholds') for model in ci_models: print(model + '-TO - ' + str(classifiers[model+'-TO']['f'].threshold_)) for model in ci_models: # Evaluate results.loc[model+"-TO"] = 0 results.loc[model+"-TO", measures.keys()] = \ [measures[measure](y_test, classifiers[model+"-TO"]["c"]) for measure in measures.keys()] results["Savings"].loc[model+"-TO"] = savings_score(y_test, classifiers[model+"-TO"]["c"], cost_mat_test) """ Explanation: Threshold Optimization Convert a classifier cost-sensitive by selecting a proper threshold from training instances according to the savings $$ t \quad = \quad argmax_t \: Savings(c(t), y) $$ Threshold Optimization - Code costcla.models.ThresholdingOptimization(calibration=True) fit(y_prob_train=None, cost_mat, y_true_train) - Parameters - y_prob_train : Predicted probabilities of the training set - cost_mat : Cost matrix of the classification problem. - y_true_cal : True class predict(y_prob) - Parameters - y_prob : Predicted probabilities Returns y_pred : Predicted class Threshold Optimization End of explanation """ fig_sav() """ Explanation: Threshold Optimization End of explanation """ from costcla.models import BayesMinimumRiskClassifier for model in ci_models: classifiers[model+"-BMR"] = {"f": BayesMinimumRiskClassifier()} # Fit classifiers[model+"-BMR"]["f"].fit(y_test, classifiers[model]["p"]) # Calibration must be made in a validation set # Predict classifiers[model+"-BMR"]["c"] = classifiers[model+"-BMR"]["f"].predict(classifiers[model]["p"], cost_mat_test) for model in ci_models: # Evaluate results.loc[model+"-BMR"] = 0 results.loc[model+"-BMR", measures.keys()] = \ [measures[measure](y_test, classifiers[model+"-BMR"]["c"]) for measure in measures.keys()] results["Savings"].loc[model+"-BMR"] = savings_score(y_test, classifiers[model+"-BMR"]["c"], cost_mat_test) """ Explanation: Models Savings There are significant differences in the results when evaluating a model using a traditional cost-insensitive measures Train models that take into account the different financial costs <h1 class="bigtitle">Example-Dependent Cost-Sensitive Classification</h1> *Why "Example-Dependent" Cost-sensitive classification ussualy refers to class-dependent costs, where the cost dependends on the class but is assumed constant accross examples. In fraud detection, different transactions have different amounts, which implies that the costs are not constant Bayes Minimum Risk (BMR) The BMR classifier is a decision model based on quantifying tradeoffs between various decisions using probabilities and the costs that accompany such decisions. In particular: $$ R(c_i=0|\mathbf{x}i)=C{TN_i}(1-\hat p_i)+C_{FN_i} \cdot \hat p_i, $$ and $$ R(c_i=1|\mathbf{x}i)=C{TP_i} \cdot \hat p_i + C_{FP_i}(1- \hat p_i), $$ BMR Code costcla.models.BayesMinimumRiskClassifier(calibration=True) fit(y_true_cal=None, y_prob_cal=None) - Parameters - y_true_cal : True class - y_prob_cal : Predicted probabilities predict(y_prob,cost_mat) - Parameters - y_prob : Predicted probabilities - cost_mat : Cost matrix of the classification problem. Returns y_pred : Predicted class BMR Code End of explanation """ fig_sav() """ Explanation: BMR Results End of explanation """ print(data.data[data.target == 1, 2].mean()) """ Explanation: BMR Results Why so important focusing on the Recall Average cost of a False Negative End of explanation """ print(data.cost_mat[:,0].mean()) """ Explanation: Average cost of a False Positive End of explanation """ from costcla.models import CostSensitiveDecisionTreeClassifier from costcla.models import CostSensitiveRandomForestClassifier classifiers = {"CSDT": {"f": CostSensitiveDecisionTreeClassifier()}, "CSRF": {"f": CostSensitiveRandomForestClassifier(combination='majority_bmr')}} # Fit the classifiers using the training dataset for model in classifiers.keys(): classifiers[model]["f"].fit(X_train, y_train, cost_mat_train) if model == "CSRF": classifiers[model]["c"] = classifiers[model]["f"].predict(X_test, cost_mat_test) else: classifiers[model]["c"] = classifiers[model]["f"].predict(X_test) for model in ['CSDT', 'CSRF']: # Evaluate results.loc[model] = 0 results.loc[model, measures.keys()] = \ [measures[measure](y_test, classifiers[model]["c"]) for measure in measures.keys()] results["Savings"].loc[model] = savings_score(y_test, classifiers[model]["c"], cost_mat_test) """ Explanation: BMR Results Bayes Minimum Risk increases the savings by using a cost-insensitive method and then introducing the costs Why not introduce the costs during the estimation of the methods? Cost-Sensitive Decision Trees (CSDT) A a new cost-based impurity measure taking into account the costs when all the examples in a leaf costcla.models.CostSensitiveDecisionTreeClassifier(criterion='direct_cost', criterion_weight=False, pruned=True) Cost-Sensitive Random Forest (CSRF) Ensemble of CSDT costcla.models.CostSensitiveRandomForestClassifier(n_estimators=10, max_samples=0.5, max_features=0.5,combination='majority_voting) CSDT & CSRF Code End of explanation """ fig_sav() """ Explanation: CSDT & CSRF Results End of explanation """ #Format from https://github.com/ellisonbg/talk-2013-scipy from IPython.display import display, HTML s = """ <style> .rendered_html { font-family: "proxima-nova", helvetica; font-size: 100%; line-height: 1.3; } .rendered_html h1 { margin: 0.25em 0em 0.5em; color: #015C9C; text-align: center; line-height: 1.2; page-break-before: always; } .rendered_html h2 { margin: 1.1em 0em 0.5em; color: #26465D; line-height: 1.2; } .rendered_html h3 { margin: 1.1em 0em 0.5em; color: #002845; line-height: 1.2; } .rendered_html li { line-height: 1.5; } .prompt { font-size: 120%; } .CodeMirror-lines { font-size: 120%; } .output_area { font-size: 120%; } #notebook { background-image: url('files/images/witewall_3.png'); } h1.bigtitle { margin: 4cm 1cm 4cm 1cm; font-size: 300%; } h3.point { font-size: 200%; text-align: center; margin: 2em 0em 2em 0em; #26465D } .logo { margin: 20px 0 20px 0; } a.anchor-link { display: none; } h1.title { font-size: 250%; } </style> """ display(HTML(s)) """ Explanation: Lessons Learned (so far ...) Selecting models based on traditional statistics does not give the best results in terms of cost Models should be evaluated taking into account real financial costs of the application Algorithms should be developed to incorporate those financial costs <center> <img src="https://raw.githubusercontent.com/albahnsen/CostSensitiveClassification/master/logo.png" style="width: 600px;" align="middle"> </center> CostCla Library CostCla is a Python open source cost-sensitive classification library built on top of Scikit-learn, Pandas and Numpy. Source code, binaries and documentation are distributed under 3-Clause BSD license in the website http://albahnsen.com/CostSensitiveClassification/ CostCla Algorithms Cost-proportionate over-sampling <a href="http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.29.514" target="_blank">[Elkan, 2001]</a> SMOTE <a href="http://arxiv.org/abs/1106.1813" target="_blank">[Chawla et al., 2002]</a> Cost-proportionate rejection-sampling <a href="http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=1250950" target="_blank">[Zadrozny et al., 2003]</a> Thresholding optimization <a href="http://www.aaai.org/Papers/AAAI/2006/AAAI06-076.pdf" target="_blank">[Sheng and Ling, 2006]</a> Bayes minimum risk <a href="http://albahnsen.com/files/%20Improving%20Credit%20Card%20Fraud%20Detection%20by%20using%20Calibrated%20Probabilities%20-%20Publish.pdf" target="_blank">[Correa Bahnsen et al., 2014a]</a> Cost-sensitive logistic regression <a href="http://albahnsen.com/files/Example-Dependent%20Cost-Sensitive%20Logistic%20Regression%20for%20Credit%20Scoring_publish.pdf" target="_blank">[Correa Bahnsen et al., 2014b]</a> Cost-sensitive decision trees <a href="http://albahnsen.com/files/Example-Dependent%20Cost-Sensitive%20Decision%20Trees.pdf" target="_blank">[Correa Bahnsen et al., 2015a]</a> Cost-sensitive ensemble methods: cost-sensitive bagging, cost-sensitive pasting, cost-sensitive random forest and cost-sensitive random patches <a href="http://arxiv.org/abs/1505.04637" target="_blank">[Correa Bahnsen et al., 2015c]</a> CostCla Databases Credit Scoring1 - Kaggle credit competition <a href="https://www.kaggle.com/c/GiveMeSomeCredit" target="_blank">[Data]</a>, cost matrix: <a href="http://albahnsen.com/files/Example-Dependent%20Cost-Sensitive%20Logistic%20Regression%20for%20Credit%20Scoring_publish.pdf" target="_blank">[Correa Bahnsen et al., 2014]</a> Credit Scoring 2 - PAKDD2009 Credit <a href="http://sede.neurotech.com.br/PAKDD2009/" target="_blank">[Data]</a>, cost matrix: <a href="http://albahnsen.com/files/Example-Dependent%20Cost-Sensitive%20Logistic%20Regression%20for%20Credit%20Scoring_publish.pdf" target="_blank">[Correa Bahnsen et al., 2014a]</a> Direct Marketing - PAKDD2009 Credit <a href="https://archive.ics.uci.edu/ml/datasets/Bank+Marketing" target="_blank">[Data]</a>, cost matrix: <a href="http://albahnsen.com/files/%20Improving%20Credit%20Card%20Fraud%20Detection%20by%20using%20Calibrated%20Probabilities%20-%20Publish.pdf" target="_blank">[Correa Bahnsen et al., 2014b]</a> Churn Modeling, soon Fraud Detection, soon Future Work CSDT in Cython Cost-sensitive class-dependent algorithms Sampling algorithms Probability calibration (Only ROCCH) Other algorithms More databases You find the presentation and the IPython Notebook here: <a href="http://nbviewer.ipython.org/format/slides/github/albahnsen/CostSensitiveClassification/blob/master/doc/tutorials/slides_edcs_fraud_detection.ipynb#/" target="_blank">http://nbviewer.ipython.org/format/slides/github/ albahnsen/CostSensitiveClassification/blob/ master/doc/tutorials/slides_edcs_fraud_detection.ipynb#/</a> <a href="https://github.com/albahnsen/CostSensitiveClassification/blob/master/doc/tutorials/slides_edcs_fraud_detection.ipynb" target="_blank">https://github.com/albahnsen/CostSensitiveClassification/ blob/master/doc/tutorials/slides_edcs_fraud_detection.ipynb</a> <h1 class="bigtitle">Thanks!</h1> <center> <table style="border-collapse: collapse; border-top-color: rgb(255, 255, 255); border-right-color: rgb(255, 255, 255); border-bottom-color: rgb(255, 255, 255); border-left-color: rgb(255, 255, 255); border-top-width: 1px; border-right-width: 1px; border-bottom-width: 1px; border-left-width: 1px; " border="0" bordercolor="#888" cellspacing="0" align="left"> <tr> <td> <a href="mailto: al.bahnsen@gmail.com"><svg width="40px" height="40px" viewBox="0 0 60 60" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:sketch="http://www.bohemiancoding.com/sketch/ns"> <path d="M0.224580688,30 C0.224580688,13.4314567 13.454941,0 29.7754193,0 C46.0958976,0 59.3262579,13.4314567 59.3262579,30 C59.3262579,46.5685433 46.0958976,60 29.7754193,60 C13.454941,60 0.224580688,46.5685433 0.224580688,30 Z M0.224580688,30" fill="#FFFFFF" sketch:type="MSShapeGroup"></path> <path d="M35.0384324,31.6384006 L47.2131148,40.5764264 L47.2131148,20 L35.0384324,31.6384006 Z M13.7704918,20 L13.7704918,40.5764264 L25.9449129,31.6371491 L13.7704918,20 Z M30.4918033,35.9844891 L27.5851037,33.2065217 L13.7704918,42 L47.2131148,42 L33.3981762,33.2065217 L30.4918033,35.9844891 Z M46.2098361,20 L14.7737705,20 L30.4918033,32.4549304 L46.2098361,20 Z M46.2098361,20" id="Shape" fill="#333333" sketch:type="MSShapeGroup"></path> <path d="M59.3262579,30 C59.3262579,46.5685433 46.0958976,60 29.7754193,60 C23.7225405,60 18.0947051,58.1525134 13.4093244,54.9827754 L47.2695458,5.81941103 C54.5814438,11.2806503 59.3262579,20.0777973 59.3262579,30 Z M59.3262579,30" id="reflec" fill-opacity="0.08" fill="#000000" sketch:type="MSShapeGroup"></path> </svg></a> </td> <td> <a href="mailto: al.bahnsen@gmail.com" target="_blank">al.bahnsen@gmail.com</a> </td> </tr><tr> <td> <a href="http://github.com/albahnsen"><svg width="40px" height="40px" viewBox="0 0 60 60" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:sketch="http://www.bohemiancoding.com/sketch/ns"> <path d="M0.336871032,30 C0.336871032,13.4314567 13.5672313,0 29.8877097,0 C46.208188,0 59.4385483,13.4314567 59.4385483,30 C59.4385483,46.5685433 46.208188,60 29.8877097,60 C13.5672313,60 0.336871032,46.5685433 0.336871032,30 Z M0.336871032,30" id="Github" fill="#333333" sketch:type="MSShapeGroup"></path> <path d="M18.2184245,31.9355566 C19.6068506,34.4507902 22.2845295,36.0156764 26.8007287,36.4485173 C26.1561023,36.9365335 25.3817877,37.8630984 25.2749857,38.9342607 C24.4644348,39.4574749 22.8347506,39.62966 21.5674303,39.2310659 C19.7918469,38.6717023 19.1119377,35.1642642 16.4533306,35.6636959 C15.8773626,35.772144 15.9917933,36.1507609 16.489567,36.4722998 C17.3001179,36.9955141 18.0629894,37.6500075 18.6513541,39.04366 C19.1033554,40.113871 20.0531304,42.0259813 23.0569369,42.0259813 C24.2489236,42.0259813 25.0842679,41.8832865 25.0842679,41.8832865 C25.0842679,41.8832865 25.107154,44.6144649 25.107154,45.6761142 C25.107154,46.9004355 23.4507693,47.2457569 23.4507693,47.8346108 C23.4507693,48.067679 23.9990832,48.0895588 24.4396415,48.0895588 C25.3102685,48.0895588 27.1220883,47.3646693 27.1220883,46.0918317 C27.1220883,45.0806012 27.1382993,41.6806599 27.1382993,41.0860982 C27.1382993,39.785673 27.8372803,39.3737607 27.8372803,39.3737607 C27.8372803,39.3737607 27.924057,46.3153869 27.6704022,47.2457569 C27.3728823,48.3397504 26.8360115,48.1846887 26.8360115,48.6727049 C26.8360115,49.3985458 29.0168704,48.8505978 29.7396911,47.2571725 C30.2984945,46.0166791 30.0543756,39.2072834 30.0543756,39.2072834 L30.650369,39.1949165 C30.650369,39.1949165 30.6837446,42.3123222 30.6637192,43.7373675 C30.6427402,45.2128317 30.5426134,47.0792797 31.4208692,47.9592309 C31.9977907,48.5376205 33.868733,49.5526562 33.868733,48.62514 C33.868733,48.0857536 32.8436245,47.6424485 32.8436245,46.1831564 L32.8436245,39.4688905 C33.6618042,39.4688905 33.5387911,41.6768547 33.5387911,41.6768547 L33.5988673,45.7788544 C33.5988673,45.7788544 33.4186389,47.2733446 35.2190156,47.8992991 C35.8541061,48.1209517 37.2139245,48.1808835 37.277815,47.8089257 C37.3417055,47.4360167 35.6405021,46.8814096 35.6252446,45.7236791 C35.6157088,45.0178155 35.6567131,44.6059032 35.6567131,41.5379651 C35.6567131,38.470027 35.2438089,37.336079 33.8048426,36.4323453 C38.2457082,35.9766732 40.9939527,34.880682 42.3337458,31.9450695 C42.4383619,31.9484966 42.8791491,30.5737742 42.8219835,30.5742482 C43.1223642,29.4659853 43.2844744,28.1550957 43.3168964,26.6025764 C43.3092677,22.3930799 41.2895654,20.9042975 40.9014546,20.205093 C41.4736082,17.0182425 40.8060956,15.5675121 40.4961791,15.0699829 C39.3518719,14.6637784 36.5149435,16.1145088 34.9653608,17.1371548 C32.438349,16.3998984 27.0982486,16.4712458 25.0957109,17.3274146 C21.4005522,14.6875608 19.445694,15.0918628 19.445694,15.0918628 C19.445694,15.0918628 18.1821881,17.351197 19.1119377,20.6569598 C17.8961113,22.2028201 16.9902014,23.2968136 16.9902014,26.1963718 C16.9902014,27.8297516 17.1828264,29.2918976 17.6176632,30.5685404 C17.5643577,30.5684093 18.2008493,31.9359777 18.2184245,31.9355566 Z M18.2184245,31.9355566" id="Path" fill="#FFFFFF" sketch:type="MSShapeGroup"></path> <path d="M59.4385483,30 C59.4385483,46.5685433 46.208188,60 29.8877097,60 C23.8348308,60 18.2069954,58.1525134 13.5216148,54.9827754 L47.3818361,5.81941103 C54.6937341,11.2806503 59.4385483,20.0777973 59.4385483,30 Z M59.4385483,30" id="reflec" fill-opacity="0.08" fill="#000000" sketch:type="MSShapeGroup"></path> </svg></a> </td><td> <a href="http://github.com/albahnsen" target="_blank">http://github.com/albahnsen</a> </td> </tr><tr> <td> <a href="http://linkedin.com/in/albahnsen"><svg width="40px" height="40px" viewBox="0 0 60 60" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:sketch="http://www.bohemiancoding.com/sketch/ns"> <path d="M0.449161376,30 C0.449161376,13.4314567 13.6795217,0 30,0 C46.3204783,0 59.5508386,13.4314567 59.5508386,30 C59.5508386,46.5685433 46.3204783,60 30,60 C13.6795217,60 0.449161376,46.5685433 0.449161376,30 Z M0.449161376,30" fill="#007BB6" sketch:type="MSShapeGroup"></path> <path d="M22.4680392,23.7098144 L15.7808366,23.7098144 L15.7808366,44.1369537 L22.4680392,44.1369537 L22.4680392,23.7098144 Z M22.4680392,23.7098144" id="Path" fill="#FFFFFF" sketch:type="MSShapeGroup"></path> <path d="M22.9084753,17.3908761 C22.8650727,15.3880081 21.4562917,13.862504 19.1686418,13.862504 C16.8809918,13.862504 15.3854057,15.3880081 15.3854057,17.3908761 C15.3854057,19.3522579 16.836788,20.9216886 19.0818366,20.9216886 L19.1245714,20.9216886 C21.4562917,20.9216886 22.9084753,19.3522579 22.9084753,17.3908761 Z M22.9084753,17.3908761" id="Path" fill="#FFFFFF" sketch:type="MSShapeGroup"></path> <path d="M46.5846502,32.4246563 C46.5846502,26.1503226 43.2856534,23.2301456 38.8851658,23.2301456 C35.3347011,23.2301456 33.7450983,25.2128128 32.8575489,26.6036896 L32.8575489,23.7103567 L26.1695449,23.7103567 C26.2576856,25.6271338 26.1695449,44.137496 26.1695449,44.137496 L32.8575489,44.137496 L32.8575489,32.7292961 C32.8575489,32.1187963 32.9009514,31.5097877 33.0777669,31.0726898 C33.5610713,29.8530458 34.6614937,28.5902885 36.5089747,28.5902885 C38.9297703,28.5902885 39.8974476,30.4634101 39.8974476,33.2084226 L39.8974476,44.1369537 L46.5843832,44.1369537 L46.5846502,32.4246563 Z M46.5846502,32.4246563" id="Path" fill="#FFFFFF" sketch:type="MSShapeGroup"></path> <path d="M59.5508386,30 C59.5508386,46.5685433 46.3204783,60 30,60 C23.9471212,60 18.3192858,58.1525134 13.6339051,54.9827754 L47.4941264,5.81941103 C54.8060245,11.2806503 59.5508386,20.0777973 59.5508386,30 Z M59.5508386,30" id="reflec" fill-opacity="0.08" fill="#000000" sketch:type="MSShapeGroup"></path> </svg></a> </td> <td> <a href="http://linkedin.com/in/albahnsen" target="_blank">http://linkedin.com/in/albahnsen</a> </td> </tr><tr> <td> <a href="http://twitter.com/albahnsen"><svg width="40px" height="40px" viewBox="0 0 60 60" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:sketch="http://www.bohemiancoding.com/sketch/ns"> <path d="M0,30 C0,13.4314567 13.4508663,0 30.0433526,0 C46.6358389,0 60.0867052,13.4314567 60.0867052,30 C60.0867052,46.5685433 46.6358389,60 30.0433526,60 C13.4508663,60 0,46.5685433 0,30 Z M0,30" fill="#4099FF" sketch:type="MSShapeGroup"></path> <path d="M29.2997675,23.8879776 L29.3627206,24.9260453 L28.3135016,24.798935 C24.4943445,24.3116787 21.1578281,22.6592444 18.3249368,19.8840023 L16.9399677,18.5069737 L16.5832333,19.5238563 C15.8277956,21.7906572 16.3104363,24.1845684 17.8842648,25.7946325 C18.72364,26.6844048 18.5347806,26.8115152 17.0868584,26.2818888 C16.5832333,26.1124083 16.1425613,25.985298 16.1005925,26.0488532 C15.9537019,26.1971486 16.457327,28.1249885 16.8560302,28.8876505 C17.4016241,29.9469033 18.5137962,30.9849709 19.7308902,31.5993375 L20.7591248,32.0865938 L19.5420308,32.1077788 C18.3669055,32.1077788 18.3249368,32.1289639 18.4508431,32.57385 C18.8705307,33.9508786 20.5282967,35.4126474 22.3749221,36.048199 L23.6759536,36.4930852 L22.5427971,37.1710069 C20.8640467,38.1455194 18.891515,38.6963309 16.9189833,38.738701 C15.9746862,38.759886 15.1982642,38.8446262 15.1982642,38.9081814 C15.1982642,39.1200319 17.7583585,40.306395 19.2482495,40.7724662 C23.7179224,42.1494948 29.0269705,41.5563132 33.0140027,39.2047722 C35.846894,37.5311528 38.6797853,34.2050993 40.0018012,30.9849709 C40.7152701,29.2689815 41.428739,26.1335934 41.428739,24.6294545 C41.428739,23.654942 41.4916922,23.5278317 42.6668174,22.3626537 C43.359302,21.6847319 44.0098178,20.943255 44.135724,20.7314044 C44.3455678,20.3288884 44.3245835,20.3288884 43.2543801,20.6890343 C41.4707078,21.324586 41.2188952,21.2398458 42.1002392,20.2865183 C42.750755,19.6085965 43.527177,18.3798634 43.527177,18.0197174 C43.527177,17.9561623 43.2124113,18.0620876 42.8556769,18.252753 C42.477958,18.4646036 41.6385828,18.7823794 41.0090514,18.9730449 L39.8758949,19.3331908 L38.8476603,18.634084 C38.281082,18.252753 37.4836756,17.829052 37.063988,17.7019416 C35.9937846,17.4053509 34.357003,17.447721 33.3917215,17.7866818 C30.768674,18.7400093 29.110908,21.1974757 29.2997675,23.8879776 Z M29.2997675,23.8879776" id="Path" fill="#FFFFFF" sketch:type="MSShapeGroup"></path> <path d="M60.0867052,30 C60.0867052,46.5685433 46.6358389,60 30.0433526,60 C23.8895925,60 18.1679598,58.1525134 13.4044895,54.9827754 L47.8290478,5.81941103 C55.2628108,11.2806503 60.0867052,20.0777973 60.0867052,30 Z M60.0867052,30" id="reflec" fill-opacity="0.08" fill="#000000" sketch:type="MSShapeGroup"></path> </svg></a> </td> <td> <a href="http://twitter.com/albahnsen" target="_blank">@albahnsen</a> </td> </tr> </table> </center> End of explanation """
intel-analytics/analytics-zoo
apps/recommendation-wide-n-deep/wide_n_deep.ipynb
apache-2.0
from zoo.models.recommendation import * from zoo.models.recommendation.utils import * from zoo.common.nncontext import init_nncontext import os import sys import datetime as dt import matplotlib matplotlib.use('agg') import matplotlib.pyplot as plt %pylab inline """ Explanation: Wide & Deep Recommender Demo Wide and Deep Learning Model, proposed by Google in 2016, is a DNN-Linear mixed model. Wide and deep learning has been used for Google App Store for their app recommendation. In this tutorial, we use Recommender API of Analytics Zoo to build a wide linear model and a deep neural network, which is called Wide&Deep model, and use optimizer of BigDL to train the neural network. Wide&Deep model combines the strength of memorization and generalization. It's useful for generic large-scale regression and classification problems with sparse input features (e.g., categorical features with a large number of possible feature values). Intialization import necessary libraries End of explanation """ sc = init_nncontext("WideAndDeep Example") """ Explanation: Initilaize NN context, it will get a SparkContext with optimized configuration for BigDL performance. End of explanation """ from bigdl.dataset import movielens movielens_data = movielens.get_id_ratings("/tmp/movielens/") min_user_id = np.min(movielens_data[:,0]) max_user_id = np.max(movielens_data[:,0]) min_movie_id = np.min(movielens_data[:,1]) max_movie_id = np.max(movielens_data[:,1]) rating_labels= np.unique(movielens_data[:,2]) print(movielens_data.shape) print(min_user_id, max_user_id, min_movie_id, max_movie_id, rating_labels) """ Explanation: Data Preparation Download and read movielens 1M rating data, understand the dimension. End of explanation """ sqlContext = SQLContext(sc) from pyspark.sql.types import * from pyspark.sql import Row Rating = Row("userId", "itemId", "label") User = Row("userId", "gender", "age" ,"occupation") Item = Row("itemId", "title" ,"genres") ratings = sc.parallelize(movielens_data)\ .map(lambda l: (int(l[0]), int(l[1]), int(l[2])-1))\ .map(lambda r: Rating(*r)) ratingDF = sqlContext.createDataFrame(ratings) users= sc.textFile("/tmp/movielens/ml-1m/users.dat")\ .map(lambda l: l.split("::")[0:4])\ .map(lambda l: (int(l[0]), l[1], int(l[2]), int(l[3])))\ .map(lambda r: User(*r)) userDF = sqlContext.createDataFrame(users) items = sc.textFile("/tmp/movielens/ml-1m/movies.dat")\ .map(lambda l: l.split("::")[0:3])\ .map(lambda l: (int(l[0]), l[1], l[2].split('|')[0]))\ .map(lambda r: Item(*r)) itemDF = sqlContext.createDataFrame(items) """ Explanation: Transform ratings into dataframe, read user and item data into dataframes. Transform labels to zero-based since the original labels start from 1. End of explanation """ from pyspark.sql.functions import col, udf gender_udf = udf(lambda gender: categorical_from_vocab_list(gender, ["F", "M"], start=1)) bucket_cross_udf = udf(lambda feature1, feature2: hash_bucket(str(feature1) + "_" + str(feature2), bucket_size=100)) genres_list = ["Crime", "Romance", "Thriller", "Adventure", "Drama", "Children's", "War", "Documentary", "Fantasy", "Mystery", "Musical", "Animation", "Film-Noir", "Horror", "Western", "Comedy", "Action", "Sci-Fi"] genres_udf = udf(lambda genres: categorical_from_vocab_list(genres, genres_list, start=1)) allDF = ratingDF.join(userDF, ["userId"]).join(itemDF, ["itemId"]) \ .withColumn("gender", gender_udf(col("gender")).cast("int")) \ .withColumn("age-gender", bucket_cross_udf(col("age"), col("gender")).cast("int")) \ .withColumn("genres", genres_udf(col("genres")).cast("int")) allDF.show(5) """ Explanation: Join data together, and transform data. For example, gender is going be used as categorical feature, occupation and gender will be used as crossed features. End of explanation """ bucket_size = 100 column_info = ColumnFeatureInfo( wide_base_cols=["occupation", "gender"], wide_base_dims=[21, 3], wide_cross_cols=["age-gender"], wide_cross_dims=[bucket_size], indicator_cols=["genres", "gender"], indicator_dims=[19, 3], embed_cols=["userId", "itemId"], embed_in_dims=[max_user_id, max_movie_id], embed_out_dims=[64, 64], continuous_cols=["age"]) """ Explanation: Speficy data feature information shared by the WideAndDeep model and its feature generation. Here, we use occupation gender for wide base part, age and gender crossed as wide cross part, genres and gender as indicators, userid and itemid for embedding. End of explanation """ rdds = allDF.rdd.map(lambda row: to_user_item_feature(row, column_info)) trainPairFeatureRdds, valPairFeatureRdds = rdds.randomSplit([0.8, 0.2], seed= 1) valPairFeatureRdds.persist() train_data= trainPairFeatureRdds.map(lambda pair_feature: pair_feature.sample) test_data= valPairFeatureRdds.map(lambda pair_feature: pair_feature.sample) """ Explanation: Transform data to RDD of Sample. We use optimizer of BigDL directly to train the model, it requires data to be provided in format of RDD(Sample). A Sample is a BigDL data structure which can be constructed using 2 numpy arrays, feature and label respectively. The API interface is Sample.from_ndarray(feature, label). Wide&Deep model need two input tensors, one is SparseTensor for the Wide model, another is a DenseTensor for the Deep model. End of explanation """ wide_n_deep = WideAndDeep(5, column_info, "wide_n_deep") """ Explanation: Create the Wide&Deep model. In Analytics Zoo, it is simple to build Wide&Deep model by calling WideAndDeep API. You need specify model type, and class number, as well as column information of features according to your data. You can also change other default parameters in the network, like hidden layers. The model could be fed into an Optimizer of BigDL or NNClassifier of analytics-zoo. Please refer to the document for more details. In this example, we demostrate how to use optimizer of BigDL. End of explanation """ wide_n_deep.compile(optimizer = "adam", loss= "sparse_categorical_crossentropy", metrics=['accuracy']) tmp_log_dir = create_tmp_path() wide_n_deep.set_tensorboard(tmp_log_dir, "training_wideanddeep") """ Explanation: Create optimizer and train the model End of explanation """ %%time # Boot training process wide_n_deep.fit(train_data, batch_size = 8000, nb_epoch = 10, validation_data = test_data) print("Optimization Done.") """ Explanation: Train the network. Wait some time till it finished.. Voila! You've got a trained model End of explanation """ results = wide_n_deep.predict(test_data) results.take(5) results_class = wide_n_deep.predict_class(test_data) results_class.take(5) """ Explanation: Prediction and recommendation Zoo models make inferences based on the given data using model.predict(val_rdd) API. A result of RDD is returned. predict_class returns the predicted label. End of explanation """ userItemPairPrediction = wide_n_deep.predict_user_item_pair(valPairFeatureRdds) for result in userItemPairPrediction.take(5): print(result) """ Explanation: In the Analytics Zoo, Recommender has provied 3 unique APIs to predict user-item pairs and make recommendations for users or items given candidates. Predict for user item pairs End of explanation """ userRecs = wide_n_deep.recommend_for_user(valPairFeatureRdds, 3) for result in userRecs.take(5): print(result) """ Explanation: Recommend 3 items for each user given candidates in the feature RDDs End of explanation """ itemRecs = wide_n_deep.recommend_for_item(valPairFeatureRdds, 3) for result in itemRecs.take(5): print(result) """ Explanation: Recommend 3 users for each item given candidates in the feature RDDs End of explanation """ #retrieve train and validation summary object and read the loss data into ndarray's. train_loss = np.array(wide_n_deep.get_train_summary("Loss")) val_loss = np.array(wide_n_deep.get_validation_summary("Loss")) #plot the train and validation curves # each event data is a tuple in form of (iteration_count, value, timestamp) plt.figure(figsize = (12,6)) plt.plot(train_loss[:,0],train_loss[:,1],label='train loss') plt.plot(val_loss[:,0],val_loss[:,1],label='val loss',color='green') plt.scatter(val_loss[:,0],val_loss[:,1],color='green') plt.legend(); plt.xlim(0,train_loss.shape[0]+10) plt.grid(True) plt.title("loss") """ Explanation: Draw the convergence curve End of explanation """ plt.figure(figsize = (12,6)) top1 = np.array(wide_n_deep.get_validation_summary("Top1Accuracy")) plt.plot(top1[:,0],top1[:,1],label='top1') plt.title("top1 accuracy") plt.grid(True) plt.legend(); plt.xlim(0,train_loss.shape[0]+10) valPairFeatureRdds.unpersist() sc.stop() """ Explanation: plot accuracy End of explanation """
nick-youngblut/SIPSim
ipynb/bac_genome/OTU-level_variability/p5_NCBI_comp-gen_OTU-ampFrag-GC.ipynb
mit
import os workDir = '/var/seq_data/ncbi_db/genome/Jan2016/ampFragsGC/' ampFragFile = '/var/seq_data/ncbi_db/genome/Jan2016/ampFrags_KDE.pkl' otuFile = '/var/seq_data/ncbi_db/genome/Jan2016/rnammer_aln/otusn_map_nonSingle.txt' """ Explanation: Goal simulating amplicon fragments for genomes in non-singleton OTUs Setting variables End of explanation """ import dill import numpy as np import pandas as pd %load_ext rpy2.ipython %load_ext pushnote %%R library(dplyr) library(tidyr) library(ggplot2) if not os.path.isdir(workDir): os.makedirs(workDir) %cd $workDir """ Explanation: Init End of explanation """ # max 13C shift max_13C_shift_in_BD = 0.036 # min BD (that we care about) min_GC = 13.5 min_BD = min_GC/100.0 * 0.098 + 1.66 # max BD (that we care about) max_GC = 80 max_BD = max_GC / 100.0 * 0.098 + 1.66 # 80.0% G+C max_BD = max_BD + max_13C_shift_in_BD ## BD range of values BD_vals = np.arange(min_BD, max_BD, 0.001) """ Explanation: gradient params End of explanation """ infoFile = os.path.splitext(ampFragFile)[0] + '_info.txt' infoFile = os.path.join(workDir, os.path.split(infoFile)[1]) !SIPSim KDE_info -s $ampFragFile > $infoFile !wc -l $infoFile !head -n 4 $infoFile %%R -i infoFile df.info = read.delim(infoFile, sep='\t') %>% mutate(genus_ID = gsub('_.+', '', taxon_ID), species_ID = gsub('^([^_]+_[^_]+).+', '\\1', taxon_ID)) df.info %>% head(n=3) """ Explanation: Get GC distribution info End of explanation """ %%R -i otuFile df.OTU = read.delim(otuFile, sep='\t', header=FALSE) %>% mutate(genome_ID = gsub('\\.fna', '', V13)) %>% select(genome_ID, V2) %>% rename('OTU_ID' = V2) df.info.j = inner_join(df.info, df.OTU, c('taxon_ID' = 'genome_ID')) df.OTU = NULL df.info.j %>% head(n=3) %%R df.info.j.f1 = df.info.j %>% filter(KDE_ID == 1) %>% distinct(taxon_ID, OTU_ID) %>% group_by(OTU_ID) %>% mutate(n_taxa = n()) %>% ungroup() %>% filter(n_taxa > 1) df.info.j.f1 %>% nrow %>% print df.info.j.f1 %>% head(n=3) %>% as.data.frame %%R -h 4000 -w 800 df.info.j.f1$taxon_ID = reorder(df.info.j.f1$taxon_ID, df.info.j.f1$genus_ID) df.info.j.f1$OTU_ID = reorder(df.info.j.f1$OTU_ID, -df.info.j.f1$n_taxa) ggplot(df.info.j.f1, aes(x=taxon_ID, y=median, ymin=percentile_25, ymax=percentile_75, color=species_ID)) + geom_linerange() + geom_point(size=1) + facet_wrap(~ OTU_ID, scales='free_x', ncol=8) + theme_bw() + theme( axis.text.x = element_blank(), legend.position='none' ) """ Explanation: Combining info table with OTU tabel End of explanation """
AlJohri/DAT-DC-12
notebooks/kobe.ipynb
mit
kobe = pd.read_csv('../data/kobe.csv') """ Explanation: Read in the Kobe Bryant shooting data [https://www.kaggle.com/c/kobe-bryant-shot-selection] End of explanation """ [(col, dtype) for col, dtype in zip(kobe.columns, kobe.dtypes) if dtype != 'object'] num_columns = [col for col, dtype in zip(kobe.columns, kobe.dtypes) if dtype != 'object'] num_columns """ Explanation: For now, use just the numerical datatypes. They are below as num_columns End of explanation """ kobe = kobe """ Explanation: The shot_made_flag is the result (0 or 1) of the shot that Kobe took. Some of the values are missing (e.g. NaN). Drop them. End of explanation """ import seaborn as sns import matplotlib.pyplot as plt %matplotlib inline sns.set(font_scale=1.5) """ Explanation: Use the num_columns, the kobe dataframe to fit() the models. Choose one or more of the entries in num_columns as features. These models are used to predict whether Kobe will make or miss a shot given the certain input parameters provided. Get the accuracy of each model with respect to the data used to fit the model. End of explanation """ # fit a linear regression model and store the predictions example = pd.DataFrame({'a':[1,2,3,4,5,6], 'b':[1,1,0,0,0,1]}) feature_cols = ['a'] X = example[feature_cols] y = example.b from sklearn.linear_model import LinearRegression linreg = LinearRegression() linreg.fit(X, y) example['pred'] = linreg.predict(X) # scatter plot that includes the regression line plt.scatter(example.a, example.b) plt.plot(example.a, example.pred, color='red') plt.xlabel('a') plt.ylabel('b') from sklearn.metrics import accuracy_score accuracy_score(example.b, example.pred.astype(int)) """ Explanation: The following is a reminder of how the SciKit-Learn Models can be interfaced End of explanation """
tpin3694/tpin3694.github.io
regex/match_email_addresses.ipynb
mit
# Load regex package import re """ Explanation: Title: Match Email Addresses Slug: match_email_addresses Summary: Match Email Addresses Date: 2016-05-01 12:00 Category: Regex Tags: Basics Authors: Chris Albon Based on: StackOverflow Preliminaries End of explanation """ # Create a variable containing a text string text = 'My email is chris@hotmail.com, thanks! No, I am at bob@data.ninja.' """ Explanation: Create some text End of explanation """ # Find all email addresses re.findall(r'[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9]+', text) # Explanation: # This regex has three parts # [a-zA-Z0-9_.+-]+ Matches a word (the username) of any length # @[a-zA-Z0-9-]+ Matches a word (the domain name) of any length # \.[a-zA-Z0-9-.]+ Matches a word (the TLD) of any length """ Explanation: Apply regex End of explanation """
google/jax-md
notebooks/athermal_linear_elasticity.ipynb
apache-2.0
#@title Imports and utility code !pip install jax-md import numpy as onp import jax.numpy as jnp from jax.config import config config.update('jax_enable_x64', True) from jax import random from jax import jit, lax, grad, vmap import jax.scipy as jsp from jax_md import space, energy, smap, minimize, util, elasticity, quantity from jax_md.colab_tools import renderer f32 = jnp.float32 f64 = jnp.float64 from functools import partial import matplotlib import matplotlib.pyplot as plt plt.rcParams.update({'font.size': 16}) def format_plot(x, y): plt.grid(True) plt.xlabel(x, fontsize=20) plt.ylabel(y, fontsize=20) def finalize_plot(shape=(1, 0.7)): plt.gcf().set_size_inches( shape[0] * 1.5 * plt.gcf().get_size_inches()[1], shape[1] * 1.5 * plt.gcf().get_size_inches()[1]) def run_minimization_while(energy_fn, R_init, shift, max_grad_thresh = 1e-12, max_num_steps=1000000, **kwargs): init,apply=minimize.fire_descent(jit(energy_fn), shift, **kwargs) apply = jit(apply) @jit def get_maxgrad(state): return jnp.amax(jnp.abs(state.force)) @jit def cond_fn(val): state, i = val return jnp.logical_and(get_maxgrad(state) > max_grad_thresh, i<max_num_steps) @jit def body_fn(val): state, i = val return apply(state), i+1 state = init(R_init) state, num_iterations = lax.while_loop(cond_fn, body_fn, (state, 0)) return state.position, get_maxgrad(state), num_iterations def run_minimization_while_neighbor_list(energy_fn, neighbor_fn, R_init, shift, max_grad_thresh = 1e-12, max_num_steps = 1000000, step_inc = 1000, verbose = False, **kwargs): nbrs = neighbor_fn.allocate(R_init) init,apply=minimize.fire_descent(jit(energy_fn), shift, **kwargs) apply = jit(apply) @jit def get_maxgrad(state): return jnp.amax(jnp.abs(state.force)) @jit def body_fn(state_nbrs, t): state, nbrs = state_nbrs nbrs = neighbor_fn.update(state.position, nbrs) state = apply(state, neighbor=nbrs) return (state, nbrs), 0 state = init(R_init, neighbor=nbrs) step = 0 while step < max_num_steps: if verbose: print('minimization step {}'.format(step)) rtn_state, _ = lax.scan(body_fn, (state, nbrs), step + jnp.arange(step_inc)) new_state, nbrs = rtn_state # If the neighbor list overflowed, rebuild it and repeat part of # the simulation. if nbrs.did_buffer_overflow: print('Buffer overflow.') nbrs = neighbor_fn.allocate(state.position) else: state = new_state step += step_inc if get_maxgrad(state) <= max_grad_thresh: break if verbose: print('successfully finished {} steps.'.format(step*step_inc)) return state.position, get_maxgrad(state), nbrs, step def run_minimization_scan(energy_fn, R_init, shift, num_steps=5000, **kwargs): init,apply=minimize.fire_descent(jit(energy_fn), shift, **kwargs) apply = jit(apply) @jit def scan_fn(state, i): return apply(state), 0. state = init(R_init) state, _ = lax.scan(scan_fn,state,jnp.arange(num_steps)) return state.position, jnp.amax(jnp.abs(state.force)) key = random.PRNGKey(0) """ Explanation: <a href="https://colab.research.google.com/github/google/jax-md/blob/main/notebooks/athermal_linear_elasticity.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> End of explanation """ N = 128 dimension = 3 box_size = quantity.box_size_at_number_density(N, 1.4, dimension) displacement, shift = space.periodic(box_size) energy_fn = energy.soft_sphere_pair(displacement) key, split = random.split(key) R_init = random.uniform(split, (N,dimension), minval=0.0, maxval=box_size, dtype=f64) R, max_grad, niters = run_minimization_while(energy_fn, R_init, shift) print('Minimized the energy in {} minimization steps and reached a final \ maximum gradient of {}'.format(niters, max_grad)) """ Explanation: Linear elasticity in athermal systems The elastic modulus tensor An global affine deformation is given to lowest order by a symmetric strain tensor $\epsilon$, which transforms any vector $r$ according to \begin{equation} r \rightarrow (1 + \epsilon) \cdot r. \end{equation} Note that in $d$ dimensions, the strain tensor has $d(d + 1)/2$ independent elements. Now, when a mechanically stable system (i.e. a system at a local energy minimum where there is zero net force on every particle) is subject to an affine deformation, it usually does not remain in mechanical equilibrium. Therefore, there is a secondary, nonaffine response that returns the system to mechanical equilibrium, though usually at a different energy than the undeformed state. The change of energy can be written to quadratic order as \begin{equation} \frac{ \Delta U}{V^0} = \sigma^0_{ij}\epsilon_{ji} + \frac 12 C_{ijkl} \epsilon_{ij} \epsilon_{kl} + O\left( \epsilon^3 \right) \end{equation} where $C_{ijkl}$ is the $d × d × d × d$ elastic modulus tensor, $\sigma^0$ is the $d × d$ symmetric stress tensor describing residual stresses in the initial state, and $V^0$ is the volume of the initial state. The symmetries of $\epsilon_{ij}$ imply the following: \begin{equation} C_{ijkl} = C_{jikl} = C_{ijlk} = C_{klij} \end{equation} When no further symmetries are assumed, the number of independent elastic constants becomes $\frac 18 d(d + 1)(d^2 + d + 2)$, which is 6 in two dimensions and 21 in three dimensions. Linear response to an external force Consider a set of $N$ particles in $d$ dimensions with positions $R_0$. Using $u \equiv R - R_0$ and assuming fixed boundary conditions, we can expand the energy about $R_0$: \begin{equation} U = U^0 - F^0 u + \frac 12 u H^0 u + O(u^3), \end{equation} where $U^0$ is the energy at $R_0$, $F^0$ is the force, $F^0_\mu \equiv \left. \frac {\partial U}{\partial u_\mu} \right |{u=0}$, and $H^0$ is the Hessian, $H^0 \equiv \left. \frac{ \partial^2 U}{\partial u\mu \partial u_\nu}\right|_{u=0}$. Note that here we are expanding in terms of the particle positions, where as above we were expanding in the global strain degrees of freedom. If we assume that $R_0$ corresponds to a local energy minimum, then $F^0=0$. Dropping higher order terms, we have a system of coupled harmonic oscillators given by \begin{equation} \Delta U \equiv U - U^0 = \frac 12 u H^0 u. \end{equation} This is independent of the form or details of $U$. Hooke's law for this system gives the net force $f$ as a result of displacing the particles by $u$: \begin{equation} f = -H^0 u. \end{equation} Thus, if an external force $f_\mathrm{ext}$ is applied, the particles will respond so that the total force is zero, i.e. $f = -f_\mathrm{ext}$. This response is obtained by solving for $u$: \begin{equation} u = (H^0)^{-1} f_\mathrm{ext}. \end{equation} Response to an affine strain Now consider a strain tensor $\epsilon = \tilde \epsilon \gamma$, where $\gamma$ is a scalar and will be used to explicitly take the limit of small strain for fixed $\tilde \epsilon$. Importantly, the strain tensor represents a deformation of the underlying space that the particles live in and thus is a degree of freedom that is independent of the $Nd$ particle degrees of freedom. Therefore, knowing the particle positions $R$ is not sufficient to describe the energy, we also need to know $\gamma$ to specify the correct boundary conditions: \begin{equation} U = U(R, \gamma). \end{equation} We now have a system with $Nd+1$ variables ${R, \gamma}$ that, like before, form a set of coupled harmonic oscillators. We can describe this using the so-called "generalized Hessian" matrix of second derivatives of the energy with respect to both $R$ and $\gamma$. Specifically, Hooke's law reads \begin{equation} \left( \begin{array}{ ccccc|c} &&&&&\ &&H^0 &&& -\Xi \ &&&&& \ \hline &&-\Xi^T &&&\frac{\partial ^2U}{\partial \gamma^2} \end{array}\right) \left( \begin{array}{ c} \ u \ \ \hline \gamma \end{array}\right) = \left( \begin{array}{ c} \ 0 \ \ \hline \tilde \sigma \end{array}\right), \end{equation} where $u = R - R_0$ is the displacement of every particle, $\Xi = -\frac{ \partial^2 U}{\partial R \partial \gamma}$, and $\tilde \sigma$ is the induced stress caused by the deformation. (If there is prestress in the system, i.e. $\sigma^0 = \frac{\partial U}{\partial \gamma} \neq 0$, the total stress is $\sigma = \sigma^0 + \tilde \sigma$.) In this equation, $\gamma$ is held fixed and the zero in the top of the right-hand-side imposes force balance after the deformation and resulting non-affine displacement of every particle. The non-affine displacement itself, $u$, and the induced stress $\sigma$, are both unknown but can be solved for. First, the non-affine response is \begin{equation} u = (H^0)^{-1} \Xi \; \gamma, \end{equation} where we note that in the limit of small $\gamma$, the force induced on every particle due to the affine deformation is $\Xi \; \gamma$. Second, the induced stress is \begin{equation} \tilde \sigma = \frac{\partial ^2U}{\partial \gamma^2} \gamma - \Xi^T u = \left(\frac{\partial ^2U}{\partial \gamma^2} - \Xi^T (H^0)^{-1} \Xi \right) \gamma. \end{equation} Similarly, the change in energy is \begin{equation} \frac{\Delta U}{V^0} = \sigma^0 \gamma + \frac 1{2V^0} \left(\frac{\partial ^2U}{\partial \gamma^2} - \Xi^T (H^0)^{-1} \Xi \right) \gamma^2, \end{equation} where $\sigma^0$ is the prestress in the system per unit volume. Comparing this to the above definition of the the elastic modulus tensor, we see that the elastic constant associated with the deformation $\tilde \epsilon$ is \begin{equation} C(\tilde \epsilon) = \frac 1{V^0} \left( \frac{\partial^2 U}{\partial \gamma^2} - \Xi^T (H^0)^{-1} \Xi \right). \end{equation} $C(\tilde \epsilon)$ is related to $C_{ijkl}$ by summing $C(\tilde \epsilon) = C_{ijkl}\tilde \epsilon_{ij} \tilde \epsilon_{kl}$. So, if $\tilde \epsilon_{ij} = \delta_{0i}\delta_{0j}$, then $C_{0000} = C(\tilde \epsilon)$. The internal code in jax_md.elasticity repeats this calculation for different $\tilde \epsilon$ to back out the different independent elastic constants. First example As a first example, let's consider a 3d system of 128 soft spheres. The elastic modulus tensor is only defined for systems that are at a local energy minimum, so we start by minimizing the energy. End of explanation """ emt_fn = jit(elasticity.athermal_moduli(energy_fn, check_convergence=True)) C, converged = emt_fn(R,box_size) print(converged) """ Explanation: We can now calculate the elastic modulus tensor End of explanation """ key, split = random.split(key) #Pick a random (symmetric) strain tensor strain_tensor = random.uniform(split, (dimension,dimension), minval=-1, maxval=1, dtype=f64) strain_tensor = (strain_tensor + strain_tensor.T) / 2.0 #Define a function to calculate the energy at a given strain def get_energy_at_strain(gamma, strain_tensor, R_init, box): R_init = space.transform(space.inverse(box),R_init) new_box = jnp.matmul(jnp.eye(strain_tensor.shape[0]) + gamma * strain_tensor, box) displacement, shift = space.periodic_general(new_box, fractional_coordinates=True) energy_fn = energy.soft_sphere_pair(displacement, sigma=1.0) R_final, _, _ = run_minimization_while(energy_fn, R_init, shift) return energy_fn(R_final) gammas = jnp.logspace(-7,-4,50) Us = vmap(get_energy_at_strain, in_axes=(0,None,None,None))(gammas, strain_tensor, R, box_size * jnp.eye(dimension)) """ Explanation: The elastic modulus tensor gives a quantitative prediction for how the energy should change if we deform the system according to a strain tensor \begin{equation} \frac{ \Delta U}{V^0} = \sigma^0\epsilon + \frac 12 \epsilon C \epsilon + O\left(\epsilon^3\right) \end{equation} To test this, we define $\epsilon = \tilde \epsilon \gamma$ for a randomly chosen strain tensor $\tilde \epsilon$ and for $\gamma << 1$. Ignoring terms of order $\gamma^3$ and higher, we have \begin{equation} \frac{ \Delta U}{V^0} - \sigma^0\epsilon = \left[\frac 12 \tilde \epsilon C \tilde \epsilon \right] \gamma^2 \end{equation} Thus, we can test our calculation of $C$ by plotting $\frac{ \Delta U}{V^0} - \sigma^0\epsilon$ as a function of $\gamma$ for our randomly chosen $\tilde \epsilon$ and comparing it to the line $\left[\frac 12 \tilde \epsilon C \tilde \epsilon \right] \gamma^2$. First, generate a random $\tilde \epsilon$ and calculate $U$ for different $\gamma$. End of explanation """ U_0 = energy_fn(R) stress_0 = -quantity.stress(energy_fn, R, box_size) V_0 = quantity.volume(dimension, box_size) #Plot \Delta E/V - sigma*epsilon y1 = (Us - U_0)/V_0 - gammas * jnp.einsum('ij,ji->',stress_0,strain_tensor) plt.plot(jnp.abs(gammas), y1, lw=3, label=r'$\Delta U/V^0 - \sigma^0 \epsilon$') #Plot 0.5 * epsilon*C*epsilon y2 = 0.5 * jnp.einsum('ij,ijkl,kl->',strain_tensor, C, strain_tensor) * gammas**2 plt.plot(jnp.abs(gammas), y2, ls='--', lw=3, label=r'$(1/2) \epsilon C \epsilon$') plt.xscale('log') plt.yscale('log') plt.legend() format_plot('$\gamma$','') finalize_plot() """ Explanation: Plot $\frac{ \Delta U}{V^0} - \sigma^0\epsilon$ and $\left[\frac 12 \tilde \epsilon C \tilde \epsilon \right] \gamma^2$ as functinos of $\gamma$. While there may be disagreements for very small $\gamma$ due to numerical precision or at large $\gamma$ due to higher-order terms becoming relevant, there should be a region of quantitative agreement. End of explanation """ #Plot the difference, which should scales as gamma**3 plt.plot(jnp.abs(gammas), jnp.abs(y1-y2), label=r'$T(\gamma)$') #Plot gamma**3 for reference plt.plot(jnp.abs(gammas), jnp.abs(gammas**3), 'black', label=r'slope = $\gamma^3$ (for reference)') plt.xscale('log') plt.yscale('log') plt.legend() format_plot('$\gamma$','') finalize_plot() """ Explanation: To test the accuracy of this agreement, we first define: \begin{equation} T(\gamma) = \frac{ \Delta U}{V^0} - \sigma^0\epsilon - \frac 12 \epsilon C \epsilon \sim O\left(\gamma^3\right) \end{equation} which should be proportional to $\gamma^3$ for small $\gamma$ (note that this expected scaling should break down when the y-axis approaches machine precision). This is a prediction of scaling only, so we plot a line proportional to $\gamma^3$ to compare the slopes. End of explanation """ C_3d = C """ Explanation: Save C for later testing. End of explanation """ N = 5000 dimension = 2 box_size = quantity.box_size_at_number_density(N, 1.3, dimension) box = box_size * jnp.eye(dimension) displacement, shift = space.periodic_general(box, fractional_coordinates=True) sigma = jnp.array([[1.0, 1.2], [1.2, 1.4]]) N_2 = int(N / 2) species = jnp.where(jnp.arange(N) < N_2, 0, 1) neighbor_fn, energy_fn = energy.soft_sphere_neighbor_list( displacement, box_size, species=species, sigma=sigma, dr_threshold = 0.1, fractional_coordinates = True) key, split = random.split(key) R_init = random.uniform(split, (N,dimension), minval=0.0, maxval=1.0, dtype=f64) R, max_grad, nbrs, niters = run_minimization_while_neighbor_list(energy_fn, neighbor_fn, R_init, shift) print('Minimized the energy in {} minimization steps and reached a final \ maximum gradient of {}'.format(niters, max_grad)) """ Explanation: Example with neighbor lists As a second example, consider a much larger systems that is implemented using neighbor lists. End of explanation """ emt_fn = jit(elasticity.athermal_moduli(energy_fn, check_convergence=True)) C, converged = emt_fn(R,box,neighbor=nbrs) print(converged) """ Explanation: We have to pass the neighbor list to emt_fn. End of explanation """ %timeit emt_fn(R,box,neighbor=nbrs) """ Explanation: We can time the calculation of the compiled function. End of explanation """ key, split = random.split(key) #Pick a random (symmetric) strain tensor strain_tensor = random.uniform(split, (dimension,dimension), minval=-1, maxval=1, dtype=f64) strain_tensor = (strain_tensor + strain_tensor.T) / 2.0 def get_energy_at_strain(gamma, strain_tensor, R_init, box): new_box = jnp.matmul(jnp.eye(strain_tensor.shape[0]) + gamma * strain_tensor, box) displacement, shift = space.periodic_general(new_box, fractional_coordinates=True) neighbor_fn, energy_fn = energy.soft_sphere_neighbor_list( displacement, box_size, species=species, sigma=sigma, dr_threshold = 0.1, fractional_coordinates = True, capacity_multiplier = 1.5) R_final, _, nbrs, _ = run_minimization_while_neighbor_list(energy_fn, neighbor_fn, R_init, shift) return energy_fn(R_final, neighbor=nbrs) gammas = jnp.logspace(-7,-3,20) Us = jnp.array([ get_energy_at_strain(gamma, strain_tensor, R, box) for gamma in gammas]) U_0 = energy_fn(R, neighbor=nbrs) stress_0 = -quantity.stress(energy_fn, R, box, neighbor=nbrs) V_0 = quantity.volume(dimension, box) #Plot \Delta E/V - sigma*epsilon y1 = (Us - U_0)/V_0 - gammas * jnp.einsum('ij,ji->',stress_0,strain_tensor) plt.plot(jnp.abs(gammas), y1, lw=3, label=r'$\Delta U/V^0 - \sigma^0 \epsilon$') #Plot 0.5 * epsilon*C*epsilon y2 = 0.5 * jnp.einsum('ij,ijkl,kl->',strain_tensor, C, strain_tensor) * gammas**2 plt.plot(jnp.abs(gammas), y2, ls='--', lw=3, label=r'$(1/2) \epsilon C \epsilon$') plt.xscale('log') plt.yscale('log') plt.legend() format_plot('$\gamma$','') finalize_plot() #Plot the difference, which should scales as gamma**3 plt.plot(jnp.abs(gammas), jnp.abs(y1-y2), label=r'$T(\gamma)$') #Plot gamma**3 for reference plt.plot(jnp.abs(gammas), jnp.abs(gammas**3), 'black', label=r'slope = $\gamma^3$ (for reference)') plt.xscale('log') plt.yscale('log') plt.legend() format_plot('$\gamma$','') finalize_plot() """ Explanation: Repeat the same tests as above. NOTE: this may take a few minutes. End of explanation """ C_2d = C """ Explanation: Save C for later testing. End of explanation """ #This can be 2 or 3 depending on which of the above solutions has been calculated dimension = 3 if dimension == 2: C = C_2d else: C = C_3d key, split = random.split(key) e = random.uniform(key, (dimension,dimension), minval=-1, maxval=1, dtype=f64) e = (e + e.T)/2. """ Explanation: Mandel notation Mandel notation is a way to represent symmetric second-rank tensors and fourth-rank tensors with so-called "minor symmetries", i.e. $T_{ijkl} = T_{ijlk} = T_{jilk}$. The idea is to map pairs of indices so that $(i,i) \rightarrow i$ and $(i,j) \rightarrow K - i - j$ for $i\neq j$, where $K = d(d+1)/2$ is the number of independent pairs $(i,j)$ for tensors with $d$ elements along each axis. Thus, second-rank tensors become first-rank tensors, and fourth-rank tensors become second-rank tensors, according to: \begin{align} M_{m(i,j)} &= T_{ij} w(i,j) \ M_{m(i,j),m(k,l)} &= T_{ijkl} w(i,j) w(k,l). \end{align} Here, $m(i,j)$ is the mapping function described above, and w(i,j) is a weight that preserves summation rules and is given by \begin{align} w(i,j) = \delta_{ij} + \sqrt{2} (\delta_{ij}-1). \end{align} We can convert strain tensors, stress tensors, and elastic modulus tensors to and from Mandel notation using the functions elasticity.tensor_to_mandel and elasticity.mandel_to_tensor. First, lets copy one of the previously calculated elastic modulus tensors and define a random strain tensor. End of explanation """ e_m = jit(elasticity.tensor_to_mandel)(e) C_m = jit(elasticity.tensor_to_mandel)(C) print(e_m) print(C_m) """ Explanation: Convert e and C to Mental notation End of explanation """ sum_m = jnp.einsum('i,ij,j->',e_m, C_m, e_m) sum_t = jnp.einsum('ij,ijkl,kl->',e, C, e) print('Relative error is {}, which should be very close to 0'.format((sum_t-sum_m)/sum_t)) """ Explanation: Using "bar" notation to represent Mandel vectors and matrices, we have \begin{equation} \frac{ \Delta U}{V^0} = \bar \sigma_i^0 \bar\epsilon_i + \frac 12 \bar \epsilon_i \bar C_{ij} \bar\epsilon_j + O\left(\bar \epsilon^3\right) \end{equation} We can explicity test that the sums are equivalent to the sums involving the original tensors End of explanation """ C_new = jit(elasticity.mandel_to_tensor)(C_m) print('Max error in C is {}, which should be very close to 0.'.format(jnp.max(jnp.abs(C-C_new)))) e_new = jit(elasticity.mandel_to_tensor)(e_m) print('Max error in e is {}, which should be very close to 0.'.format(jnp.max(jnp.abs(e-e_new)))) """ Explanation: Finally, we can convert back to the full tensors and check that they are unchanged. End of explanation """ elasticity.extract_isotropic_moduli(C) """ Explanation: Isotropic elastic constants The calculation of the elastic modulus tensor does not make any assumptions about the underlying symmetries in the material. However, for isotropic systems, only two constants are needed to completely describe the elastic behavior. These are often taken to be the bulk modulus, $B$, and the shear modulus, $G$, or the Young's modulus, $E$, and the Poisson's ratio, $\nu$. The function elasticity.extract_isotropic_moduli extracts these values, as well as the longitudinal modulus, $M$, from an elastic modulus tensor. Importantly, since there is not guarantee that C is calculated from a truely isotropic systems, these are "orientation-averaged" values. For example, there are many directions in which you can shear a system, and the shear modulus that is returned represents and average over all these orientations. This can be an effective way to average over small fluctuations in an "almost isotropic" system, but the values lose their typical meaning when the systems is highly anisotropic. End of explanation """ def setup(N,dimension,key): box_size = quantity.box_size_at_number_density(N, 1.4, dimension) box = box_size * jnp.eye(dimension) displacement, shift = space.periodic_general(box, fractional_coordinates=True) R_init = random.uniform(key, (N,dimension), minval=0.0, maxval=1.0, dtype=f64) def run(sigma): energy_fn = energy.soft_sphere_pair(displacement, sigma=sigma) R, max_grad = run_minimization_scan(energy_fn, R_init, shift, num_steps=1000) emt_fn = jit(elasticity.athermal_moduli(energy_fn)) C = emt_fn(R,box) return elasticity.extract_isotropic_moduli(C)['G'] return run key, split = random.split(key) N = 50 dimension = 2 run = setup(N, dimension, split) sigma = jnp.linspace(1.0,1.4,N) print(run(sigma)) print(grad(run)(sigma)) """ Explanation: Gradients The calculation of the elastic modulus tensor is fully differentiable: End of explanation """
tensorflow/docs-l10n
site/zh-cn/quantum/tutorials/mnist.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2020 The TensorFlow Authors. End of explanation """ !pip install tensorflow==2.4.1 """ Explanation: MNIST 分类 <table class="tfo-notebook-buttons" align="left"> <td><a target="_blank" href="https://tensorflow.google.cn/quantum/tutorials/mnist"><img src="https://tensorflow.google.cn/images/tf_logo_32px.png">在 TensorFlow.org 上查看</a></td> <td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/zh-cn/quantum/tutorials/mnist.ipynb"><img src="https://tensorflow.google.cn/images/colab_logo_32px.png">在 Google Colab 中运行</a></td> <td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/zh-cn/quantum/tutorials/mnist.ipynb"><img src="https://tensorflow.google.cn/images/GitHub-Mark-32px.png">在 GitHub 上查看源代码</a></td> <td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/zh-cn/quantum/tutorials/mnist.ipynb"><img src="https://tensorflow.google.cn/images/download_logo_32px.png">下载笔记本</a></td> </table> 本教程会构建一个量子神经网络 (QNN) 来分类 MNIST 的简单版本,这与在 <a href="https://arxiv.org/pdf/1802.06002.pdf" class="external">Farhi 等人</a>的论文中使用的方式类似。我们会比较量子神经网络与经典神经网络解决一个经典数据问题的性能。 设置 End of explanation """ !pip install tensorflow-quantum # Update package resources to account for version changes. import importlib, pkg_resources importlib.reload(pkg_resources) """ Explanation: 安装 TensorFlow Quantum: End of explanation """ import tensorflow as tf import tensorflow_quantum as tfq import cirq import sympy import numpy as np import seaborn as sns import collections # visualization tools %matplotlib inline import matplotlib.pyplot as plt from cirq.contrib.svg import SVGCircuit """ Explanation: 现在,导入 TensorFlow 和模块依赖项: End of explanation """ (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() # Rescale the images from [0,255] to the [0.0,1.0] range. x_train, x_test = x_train[..., np.newaxis]/255.0, x_test[..., np.newaxis]/255.0 print("Number of original training examples:", len(x_train)) print("Number of original test examples:", len(x_test)) """ Explanation: 1. 加载数据 在本教程中,根据 <a href="https://arxiv.org/pdf/1802.06002.pdf" class="external">Farhi 等人</a>的论文,您将构建一个二元分类器来区分 3 位数和 6 位数。本部分介绍了以下操作的数据处理: 从 Keras 加载原始数据。 筛选数据集中的 3 位数和 6 位数。 缩小图像,使其适合量子计算机。 移除所有矛盾样本。 将二值图像转换为 Cirq 电路。 将 Cirq 电路转换为 TensorFlow Quantum 电路。 1.1 加载原始数据 加载通过 Keras 分布的 MNIST 数据集。 End of explanation """ def filter_36(x, y): keep = (y == 3) | (y == 6) x, y = x[keep], y[keep] y = y == 3 return x,y x_train, y_train = filter_36(x_train, y_train) x_test, y_test = filter_36(x_test, y_test) print("Number of filtered training examples:", len(x_train)) print("Number of filtered test examples:", len(x_test)) """ Explanation: 筛选数据集,仅保留 3 位数和 6 位数,移除其他类。同时,将标签 y 转换为布尔值:3 为 True,6 为 False。 End of explanation """ print(y_train[0]) plt.imshow(x_train[0, :, :, 0]) plt.colorbar() """ Explanation: 显示第一个样本: End of explanation """ x_train_small = tf.image.resize(x_train, (4,4)).numpy() x_test_small = tf.image.resize(x_test, (4,4)).numpy() """ Explanation: 1.2 缩小图像 对目前的量子计算机来说,28x28 的图像太大。将图像大小调整至 4x4: End of explanation """ print(y_train[0]) plt.imshow(x_train_small[0,:,:,0], vmin=0, vmax=1) plt.colorbar() """ Explanation: 调整大小后,重新显示上面的第一个训练样本: End of explanation """ def remove_contradicting(xs, ys): mapping = collections.defaultdict(set) orig_x = {} # Determine the set of labels for each unique image: for x,y in zip(xs,ys): orig_x[tuple(x.flatten())] = x mapping[tuple(x.flatten())].add(y) new_x = [] new_y = [] for flatten_x in mapping: x = orig_x[flatten_x] labels = mapping[flatten_x] if len(labels) == 1: new_x.append(x) new_y.append(next(iter(labels))) else: # Throw out images that match more than one label. pass num_uniq_3 = sum(1 for value in mapping.values() if len(value) == 1 and True in value) num_uniq_6 = sum(1 for value in mapping.values() if len(value) == 1 and False in value) num_uniq_both = sum(1 for value in mapping.values() if len(value) == 2) print("Number of unique images:", len(mapping.values())) print("Number of unique 3s: ", num_uniq_3) print("Number of unique 6s: ", num_uniq_6) print("Number of unique contradicting labels (both 3 and 6): ", num_uniq_both) print() print("Initial number of images: ", len(xs)) print("Remaining non-contradicting unique images: ", len(new_x)) return np.array(new_x), np.array(new_y) """ Explanation: 1.3 移除矛盾样本 根据 <a href="https://arxiv.org/pdf/1802.06002.pdf" class="external">Farhi 等人</a>论文的 3.3 学习区分数据部分,筛选数据集以移除同时标记为两个类的图像。 这不是标准机器学习步骤,但是为了便于继续学习该论文,我们包括了这一部分。 End of explanation """ x_train_nocon, y_train_nocon = remove_contradicting(x_train_small, y_train) """ Explanation: 结果计数与报告值不完全相符,但并未指定具体步骤。 这里还要注意的一点是,此时应用矛盾样本筛选并不能完全阻止模型收到矛盾的训练样本:下一步会对数据进行二值化,因而会产生更多冲突样本。 End of explanation """ THRESHOLD = 0.5 x_train_bin = np.array(x_train_nocon > THRESHOLD, dtype=np.float32) x_test_bin = np.array(x_test_small > THRESHOLD, dtype=np.float32) """ Explanation: 1.4 将数据编码为量子电路 为了使用量子计算机处理图像,<a href="https://arxiv.org/pdf/1802.06002.pdf" class="external">Farhi 等人</a>提出使用量子位表示每个像素,这样,量子位的状态就取决于像素值。第一步是转换为二进制编码。 End of explanation """ _ = remove_contradicting(x_train_bin, y_train_nocon) """ Explanation: 如果您这时移除矛盾图像,可能只剩 193 个图像,很可能无法进行有效的训练。 End of explanation """ def convert_to_circuit(image): """Encode truncated classical image into quantum datapoint.""" values = np.ndarray.flatten(image) qubits = cirq.GridQubit.rect(4, 4) circuit = cirq.Circuit() for i, value in enumerate(values): if value: circuit.append(cirq.X(qubits[i])) return circuit x_train_circ = [convert_to_circuit(x) for x in x_train_bin] x_test_circ = [convert_to_circuit(x) for x in x_test_bin] """ Explanation: 对于值超过阈值的像素索引处的量子位,将通过 $X$ 门进行旋转。 End of explanation """ SVGCircuit(x_train_circ[0]) """ Explanation: 下面是为第一个样本创建的电路(电路图没有显示带零个门的量子位): End of explanation """ bin_img = x_train_bin[0,:,:,0] indices = np.array(np.where(bin_img)).T indices """ Explanation: 将此电路与图像值超过阈值的索引进行比较: End of explanation """ x_train_tfcirc = tfq.convert_to_tensor(x_train_circ) x_test_tfcirc = tfq.convert_to_tensor(x_test_circ) """ Explanation: 将这些 Cirq 电路转换为 tfq 的张量: End of explanation """ class CircuitLayerBuilder(): def __init__(self, data_qubits, readout): self.data_qubits = data_qubits self.readout = readout def add_layer(self, circuit, gate, prefix): for i, qubit in enumerate(self.data_qubits): symbol = sympy.Symbol(prefix + '-' + str(i)) circuit.append(gate(qubit, self.readout)**symbol) """ Explanation: 2. 量子神经网络 有关分类图像的量子电路结构的指导很少。由于分类基于对量子位读数的期望,因此,<a href="https://arxiv.org/pdf/1802.06002.pdf" class="external">Farhi 等人</a>提出使用两个量子位门,从而始终根据量子位读数进行响应。这在一定程度上与在像素中运行一个小的<a href="https://arxiv.org/abs/1511.06464" class="external">酉 RNN</a> 类似。 2.1 构建模型电路 下面的示例介绍了这种分层方式。每个层使用同一个门的 <em>n</em> 个实例,其中每个数据量子位根据量子位读数进行响应。 首先,我们来看一个将这些门的层添加到电路的简单类: End of explanation """ demo_builder = CircuitLayerBuilder(data_qubits = cirq.GridQubit.rect(4,1), readout=cirq.GridQubit(-1,-1)) circuit = cirq.Circuit() demo_builder.add_layer(circuit, gate = cirq.XX, prefix='xx') SVGCircuit(circuit) """ Explanation: 构建一个示例电路层,了解其结构: End of explanation """ def create_quantum_model(): """Create a QNN model circuit and readout operation to go along with it.""" data_qubits = cirq.GridQubit.rect(4, 4) # a 4x4 grid. readout = cirq.GridQubit(-1, -1) # a single qubit at [-1,-1] circuit = cirq.Circuit() # Prepare the readout qubit. circuit.append(cirq.X(readout)) circuit.append(cirq.H(readout)) builder = CircuitLayerBuilder( data_qubits = data_qubits, readout=readout) # Then add layers (experiment by adding more). builder.add_layer(circuit, cirq.XX, "xx1") builder.add_layer(circuit, cirq.ZZ, "zz1") # Finally, prepare the readout qubit. circuit.append(cirq.H(readout)) return circuit, cirq.Z(readout) model_circuit, model_readout = create_quantum_model() """ Explanation: 现在,构建一个匹配数据电路大小的两层模型,并包括准备和读数操作。 End of explanation """ # Build the Keras model. model = tf.keras.Sequential([ # The input is the data-circuit, encoded as a tf.string tf.keras.layers.Input(shape=(), dtype=tf.string), # The PQC layer returns the expected value of the readout gate, range [-1,1]. tfq.layers.PQC(model_circuit, model_readout), ]) """ Explanation: 2.2 在 tfq-keras 模型中封装模型电路 使用量子组件构建 Keras 模型。从 x_train_circ(对经典数据进行编码)向此模型馈送“量子数据”。它使用参数化量子电路层 tfq.layers.PQC,在量子数据上训练模型电路。 为了对这些图像进行分类,<a href="https://arxiv.org/pdf/1802.06002.pdf" class="external">Farhi 等人</a>提出获取参数化电路中的量子位读数期望。该期望会返回一个 1 到 -1 之间的值。 End of explanation """ y_train_hinge = 2.0*y_train_nocon-1.0 y_test_hinge = 2.0*y_test-1.0 """ Explanation: 下面,我们介绍使用 compile 方法训练模型的步骤。 由于预期读数在 [-1,1] 的范围内,因此,优化铰链损失是很自然的选择。 注:另一种有效方式可能是将输出范围转换为 [0,1],并将其视为模型分配给类 3 的几率。这可以与标准的 tf.losses.BinaryCrossentropy 损失一起使用。 要在此处使用铰链损失,您需要对两处稍作调整。其一是转换标签 y_train_nocon,将其从布尔值转换为 [-1,1] 的范围,使其符合铰链损失的预期。 End of explanation """ def hinge_accuracy(y_true, y_pred): y_true = tf.squeeze(y_true) > 0.0 y_pred = tf.squeeze(y_pred) > 0.0 result = tf.cast(y_true == y_pred, tf.float32) return tf.reduce_mean(result) model.compile( loss=tf.keras.losses.Hinge(), optimizer=tf.keras.optimizers.Adam(), metrics=[hinge_accuracy]) print(model.summary()) """ Explanation: 其二,使用可将 [-1, 1] 作为 y_true 标签参数正确处理的自定义 hinge_accuracy 指标。tf.losses.BinaryAccuracy(threshold=0.0) 预期的 y_true 是一个布尔值,因此,不能与铰链损失一起使用。 End of explanation """ EPOCHS = 3 BATCH_SIZE = 32 NUM_EXAMPLES = len(x_train_tfcirc) x_train_tfcirc_sub = x_train_tfcirc[:NUM_EXAMPLES] y_train_hinge_sub = y_train_hinge[:NUM_EXAMPLES] """ Explanation: 训练量子模型 现在,开始训练模型,这个过程大约需要 45 分钟。如果您不想等待太长时间,请使用一小部分数据(按如下设置 NUM_EXAMPLES=500)。这不会对模型在训练期间的进展造成实际影响(它仅包含 32 个参数,不需要太多数据来约束)。使用较少的样本只会让训练更快结束(5 分钟),但是运行时间已经足以在验证日志中表明取得进展。 End of explanation """ qnn_history = model.fit( x_train_tfcirc_sub, y_train_hinge_sub, batch_size=32, epochs=EPOCHS, verbose=1, validation_data=(x_test_tfcirc, y_test_hinge)) qnn_results = model.evaluate(x_test_tfcirc, y_test) """ Explanation: 将此模型训练至收敛,可以在测试集上达到 85% 以上的准确率。 End of explanation """ def create_classical_model(): # A simple model based off LeNet from https://keras.io/examples/mnist_cnn/ model = tf.keras.Sequential() model.add(tf.keras.layers.Conv2D(32, [3, 3], activation='relu', input_shape=(28,28,1))) model.add(tf.keras.layers.Conv2D(64, [3, 3], activation='relu')) model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2))) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(128, activation='relu')) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(1)) return model model = create_classical_model() model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) model.summary() model.fit(x_train, y_train, batch_size=128, epochs=1, verbose=1, validation_data=(x_test, y_test)) cnn_results = model.evaluate(x_test, y_test) """ Explanation: 注:训练准确率可报告整个周期的平均值。验证准确率在每个周期结束时进行评估。 3. 经典神经网络 虽然量子神经网络可以解决这种简单的 MNIST 问题,但是,对于这种任务,基本的经典神经网络的效果明显更优。经过一个训练周期后,经典神经网络在保留集上可以达到 98% 以上的准确率。 在以下示例中,经典神经网络使用 28x28 的全尺寸图像(而不是对图像进行下采样)解决 3-6 分类问题。这在测试集上可以轻松收敛至接近 100% 的准确率。 End of explanation """ def create_fair_classical_model(): # A simple model based off LeNet from https://keras.io/examples/mnist_cnn/ model = tf.keras.Sequential() model.add(tf.keras.layers.Flatten(input_shape=(4,4,1))) model.add(tf.keras.layers.Dense(2, activation='relu')) model.add(tf.keras.layers.Dense(1)) return model model = create_fair_classical_model() model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) model.summary() model.fit(x_train_bin, y_train_nocon, batch_size=128, epochs=20, verbose=2, validation_data=(x_test_bin, y_test)) fair_nn_results = model.evaluate(x_test_bin, y_test) """ Explanation: 上面的模型包含接近 120 万个参数。为了进行更公平的比较,请尝试使用一个包含 37 个参数的模型,在下采样的图像上进行训练: End of explanation """ qnn_accuracy = qnn_results[1] cnn_accuracy = cnn_results[1] fair_nn_accuracy = fair_nn_results[1] sns.barplot(["Quantum", "Classical, full", "Classical, fair"], [qnn_accuracy, cnn_accuracy, fair_nn_accuracy]) """ Explanation: 4. 比较 输入的分辨率越高,模型越强大,CNN 解决此问题越轻松。但是,能力相近(约 32 个参数)的经典模型只需少量时间就可以达到同等准确率。不管怎样,经典神经网络都明显优于量子神经网络。对于经典数据,很难找到比经典神经网络更好的方案。 End of explanation """
BadWizard/Inflation
Market-Based-Expectations/get-raw-data.ipynb
mit
df_raw.tail() def getForward(v,t1=1,t2=2): return (np.power(np.power(1+v[1]/100,t2)/np.power(1+v[0]/100,t1),1/(t2-t1))-1)*100 ind1 = 0 ind2 = 1 v2 = df_raw.iloc[-1,ind2] v1 = df_raw.iloc[-1,ind1] t1 = int(df_raw.columns[ind1].strip('y')) t2 = int(df_raw.columns[ind2].strip('y')) print('v1 is {}, v2 is {}'.format(v1,v2)) v = [v1,v2] f = getForward(v,t1,t2) f df_raw['1yf1y'] = pd.Series('',index = df_raw.index) df_raw.tail() df_raw['1yf1y'] = df_raw[['1y','2y']].apply(getForward,axis=1) df_raw['1yf1y'] = df_raw[['1y','2y']].apply(lambda x: getForward(x,t1=1,t2=2),axis=1) df_raw.tail() def AddForward(df,t1=1,t2=2): #add an empty column df[str(t2-t1)+'yf'+str(t1)+'y'] = pd.Series('',index = df.index) df[str(t2-t1)+'yf'+str(t1)+'y'] = df[[str(t1)+'y',str(t2)+'y']].apply(lambda x: getForward(x,t1,t2),axis=1) AddForward(df_raw,t1=3,t2=4) df_raw.tail() t1,t2=1,3 #df[str(t2-t1)+'yf'+str(t1)+'y'] = pd.Series('',index = df.index) df.tail() """ Explanation: tt = [cname.split()[0].split('EUSWI')[1] for cname in df_raw.columns if 'CMPN' in cname ] len(tt) df_raw.columns = [c.split()[0].split('EUSWI')[1] + 'y' for c in df_raw.columns] End of explanation """ df_raw[['1y','3y']] df[str(t2-t1)+'yf'+str(t1)+'y'] = df[[str(t1)+'y',str(t2)+'y']].apply(lambda x: getForward(x,t1,t2),axis=1) """ Explanation: df_raw['2yf1y'] = df_raw[['1y','3y']].apply(lambda x: getForward(x,t1=1,t2=3),axis=1) End of explanation """
sdpython/ensae_teaching_cs
_doc/notebooks/td2a_ml/td2a_sentiment_analysis.ipynb
mit
%matplotlib inline from jyquickhelper import add_notebook_menu add_notebook_menu() """ Explanation: 2A.ml - Analyse de sentiments C'est désormais un problème classique de machine learning. D'un côté, du texte, de l'autre une appréciation, le plus souvent binaire, positive ou négative mais qui pourrait être graduelle. End of explanation """ from ensae_teaching_cs.data import load_sentiment_dataset df = load_sentiment_dataset() df.head() """ Explanation: Les données On récupère les données depuis le site UCI Sentiment Labelled Sentences Data Set où on utilise la fonction load_sentiment_dataset. End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/nerc/cmip6/models/sandbox-1/aerosol.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'nerc', 'sandbox-1', 'aerosol') """ Explanation: ES-DOC CMIP6 Model Properties - Aerosol MIP Era: CMIP6 Institute: NERC Source ID: SANDBOX-1 Topic: Aerosol Sub-Topics: Transport, Emissions, Concentrations, Optical Radiative Properties, Model. Properties: 69 (37 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-15 16:54:27 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties 2. Key Properties --&gt; Software Properties 3. Key Properties --&gt; Timestep Framework 4. Key Properties --&gt; Meteorological Forcings 5. Key Properties --&gt; Resolution 6. Key Properties --&gt; Tuning Applied 7. Transport 8. Emissions 9. Concentrations 10. Optical Radiative Properties 11. Optical Radiative Properties --&gt; Absorption 12. Optical Radiative Properties --&gt; Mixtures 13. Optical Radiative Properties --&gt; Impact Of H2o 14. Optical Radiative Properties --&gt; Radiative Scheme 15. Optical Radiative Properties --&gt; Cloud Interactions 16. Model 1. Key Properties Key properties of the aerosol model 1.1. Model Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of aerosol model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of aerosol model code End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.scheme_scope') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "troposhere" # "stratosphere" # "mesosphere" # "mesosphere" # "whole atmosphere" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.3. Scheme Scope Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Atmospheric domains covered by the aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.basic_approximations') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.4. Basic Approximations Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Basic approximations made in the aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "3D mass/volume ratio for aerosols" # "3D number concenttration for aerosols" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.5. Prognostic Variables Form Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Prognostic variables in the aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 1.6. Number Of Tracers Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of tracers in the aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.family_approach') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 1.7. Family Approach Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Are aerosol calculations generalized into families of species? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Software Properties Software properties of aerosol code 2.1. Repository Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Location of code for this component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.2. Code Version Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Code version identifier. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.3. Code Languages Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Code language(s). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Uses atmospheric chemistry time stepping" # "Specific timestepping (operator splitting)" # "Specific timestepping (integrated)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 3. Key Properties --&gt; Timestep Framework Physical properties of seawater in ocean 3.1. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Mathematical method deployed to solve the time evolution of the prognostic variables End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.2. Split Operator Advection Timestep Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Timestep for aerosol advection (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.3. Split Operator Physical Timestep Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Timestep for aerosol physics (in seconds). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.4. Integrated Timestep Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Timestep for the aerosol model (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Explicit" # "Implicit" # "Semi-implicit" # "Semi-analytic" # "Impact solver" # "Back Euler" # "Newton Raphson" # "Rosenbrock" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 3.5. Integrated Scheme Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Specify the type of timestep scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4. Key Properties --&gt; Meteorological Forcings ** 4.1. Variables 3D Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Three dimensionsal forcing variables, e.g. U, V, W, T, Q, P, conventive mass flux End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.2. Variables 2D Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Two dimensionsal forcing variables, e.g. land-sea mask definition End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.3. Frequency Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Frequency with which meteological forcings are applied (in seconds). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Key Properties --&gt; Resolution Resolution in the aersosol model grid 5.1. Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.2. Canonical Horizontal Resolution Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 5.3. Number Of Horizontal Gridpoints Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Total number of horizontal (XY) points (or degrees of freedom) on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 5.4. Number Of Vertical Levels Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Number of vertical levels resolved on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 5.5. Is Adaptive Grid Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Default is False. Set true if grid resolution changes during execution. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6. Key Properties --&gt; Tuning Applied Tuning methodology for aerosol model 6.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General overview description of tuning: explain and motivate the main targets and metrics retained. &amp;Document the relative weight given to climate performance metrics versus process oriented metrics, &amp;and on the possible conflicts with parameterization level tuning. In particular describe any struggle &amp;with a parameter value that required pushing it to its limits to solve a particular model deficiency. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.2. Global Mean Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List set of metrics of the global mean state used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.3. Regional Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List of regional metrics of mean state used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.4. Trend Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List observed trend metrics used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.transport.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7. Transport Aerosol transport 7.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of transport in atmosperic aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.transport.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Uses Atmospheric chemistry transport scheme" # "Specific transport scheme (eulerian)" # "Specific transport scheme (semi-lagrangian)" # "Specific transport scheme (eulerian and semi-lagrangian)" # "Specific transport scheme (lagrangian)" # TODO - please enter value(s) """ Explanation: 7.2. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Method for aerosol transport modeling End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Uses Atmospheric chemistry transport scheme" # "Mass adjustment" # "Concentrations positivity" # "Gradients monotonicity" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 7.3. Mass Conservation Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Method used to ensure mass conservation. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.transport.convention') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Uses Atmospheric chemistry transport scheme" # "Convective fluxes connected to tracers" # "Vertical velocities connected to tracers" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 7.4. Convention Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Transport by convention End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Emissions Atmospheric aerosol emissions 8.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of emissions in atmosperic aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "Prescribed (climatology)" # "Prescribed CMIP6" # "Prescribed above surface" # "Interactive" # "Interactive above surface" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.2. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Method used to define aerosol species (several methods allowed because the different species may not use the same method). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.sources') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Vegetation" # "Volcanos" # "Bare ground" # "Sea surface" # "Lightning" # "Fires" # "Aircraft" # "Anthropogenic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.3. Sources Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Sources of the aerosol species are taken into account in the emissions scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant" # "Interannual" # "Annual" # "Monthly" # "Daily" # TODO - please enter value(s) """ Explanation: 8.4. Prescribed Climatology Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Specify the climatology type for aerosol emissions End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.5. Prescribed Climatology Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of aerosol species emitted and prescribed via a climatology End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.6. Prescribed Spatially Uniform Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of aerosol species emitted and prescribed as spatially uniform End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.7. Interactive Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of aerosol species emitted and specified via an interactive method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.other_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.8. Other Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of aerosol species emitted and specified via an &quot;other method&quot; End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.9. Other Method Characteristics Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Characteristics of the &quot;other method&quot; used for aerosol emissions End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9. Concentrations Atmospheric aerosol concentrations 9.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of concentrations in atmosperic aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.2. Prescribed Lower Boundary Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of species prescribed at the lower boundary. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.3. Prescribed Upper Boundary Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of species prescribed at the upper boundary. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.4. Prescribed Fields Mmr Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of species prescribed as mass mixing ratios. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.5. Prescribed Fields Mmr Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of species prescribed as AOD plus CCNs. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10. Optical Radiative Properties Aerosol optical and radiative properties 10.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of optical and radiative properties End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 11. Optical Radiative Properties --&gt; Absorption Absortion properties in aerosol scheme 11.1. Black Carbon Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Absorption mass coefficient of black carbon at 550nm (if non-absorbing enter 0) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 11.2. Dust Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Absorption mass coefficient of dust at 550nm (if non-absorbing enter 0) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 11.3. Organics Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Absorption mass coefficient of organics at 550nm (if non-absorbing enter 0) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 12. Optical Radiative Properties --&gt; Mixtures ** 12.1. External Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there external mixing with respect to chemical composition? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 12.2. Internal Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there internal mixing with respect to chemical composition? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12.3. Mixing Rule Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If there is internal mixing with respect to chemical composition then indicate the mixinrg rule End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 13. Optical Radiative Properties --&gt; Impact Of H2o ** 13.1. Size Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does H2O impact size? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 13.2. Internal Mixture Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does H2O impact internal mixture? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 14. Optical Radiative Properties --&gt; Radiative Scheme Radiative scheme for aerosol 14.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of radiative scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 14.2. Shortwave Bands Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of shortwave bands End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 14.3. Longwave Bands Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of longwave bands End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15. Optical Radiative Properties --&gt; Cloud Interactions Aerosol-cloud interactions 15.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of aerosol-cloud interactions End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 15.2. Twomey Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the Twomey effect included? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 15.3. Twomey Minimum Ccn Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If the Twomey effect is included, then what is the minimum CCN number? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 15.4. Drizzle Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does the scheme affect drizzle? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 15.5. Cloud Lifetime Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does the scheme affect cloud lifetime? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 15.6. Longwave Bands Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of longwave bands End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 16. Model Aerosol model 16.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of atmosperic aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Dry deposition" # "Sedimentation" # "Wet deposition (impaction scavenging)" # "Wet deposition (nucleation scavenging)" # "Coagulation" # "Oxidation (gas phase)" # "Oxidation (in cloud)" # "Condensation" # "Ageing" # "Advection (horizontal)" # "Advection (vertical)" # "Heterogeneous chemistry" # "Nucleation" # TODO - please enter value(s) """ Explanation: 16.2. Processes Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Processes included in the Aerosol model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.coupling') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Radiation" # "Land surface" # "Heterogeneous chemistry" # "Clouds" # "Ocean" # "Cryosphere" # "Gas phase chemistry" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16.3. Coupling Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Other model components coupled to the Aerosol model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.gas_phase_precursors') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "DMS" # "SO2" # "Ammonia" # "Iodine" # "Terpene" # "Isoprene" # "VOC" # "NOx" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16.4. Gas Phase Precursors Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List of gas phase aerosol precursors. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.scheme_type') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Bulk" # "Modal" # "Bin" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16.5. Scheme Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Type(s) of aerosol scheme used by the aerosols model (potentially multiple: some species may be covered by one type of aerosol scheme and other species covered by another type). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.aerosol.model.bulk_scheme_species') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Sulphate" # "Nitrate" # "Sea salt" # "Dust" # "Ice" # "Organic" # "Black carbon / soot" # "SOA (secondary organic aerosols)" # "POM (particulate organic matter)" # "Polar stratospheric ice" # "NAT (Nitric acid trihydrate)" # "NAD (Nitric acid dihydrate)" # "STS (supercooled ternary solution aerosol particule)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16.6. Bulk Scheme Species Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List of species covered by the bulk scheme. End of explanation """
mne-tools/mne-tools.github.io
0.16/_downloads/plot_sensor_connectivity.ipynb
bsd-3-clause
# Author: Martin Luessi <mluessi@nmr.mgh.harvard.edu> # # License: BSD (3-clause) import numpy as np from scipy import linalg import mne from mne import io from mne.connectivity import spectral_connectivity from mne.datasets import sample print(__doc__) """ Explanation: Compute all-to-all connectivity in sensor space Computes the Phase Lag Index (PLI) between all gradiometers and shows the connectivity in 3D using the helmet geometry. The left visual stimulation data are used which produces strong connectvitiy in the right occipital sensors. End of explanation """ data_path = sample.data_path() raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif' # Setup for reading the raw data raw = io.read_raw_fif(raw_fname) events = mne.read_events(event_fname) # Add a bad channel raw.info['bads'] += ['MEG 2443'] # Pick MEG gradiometers picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True, exclude='bads') # Create epochs for the visual condition event_id, tmin, tmax = 3, -0.2, 1.5 # need a long enough epoch for 5 cycles epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6)) # Compute connectivity for band containing the evoked response. # We exclude the baseline period fmin, fmax = 3., 9. sfreq = raw.info['sfreq'] # the sampling frequency tmin = 0.0 # exclude the baseline period con, freqs, times, n_epochs, n_tapers = spectral_connectivity( epochs, method='pli', mode='multitaper', sfreq=sfreq, fmin=fmin, fmax=fmax, faverage=True, tmin=tmin, mt_adaptive=False, n_jobs=1) # the epochs contain an EOG channel, which we remove now ch_names = epochs.ch_names idx = [ch_names.index(name) for name in ch_names if name.startswith('MEG')] con = con[idx][:, idx] # con is a 3D array where the last dimension is size one since we averaged # over frequencies in a single band. Here we make it 2D con = con[:, :, 0] # Now, visualize the connectivity in 3D from mayavi import mlab # noqa mlab.figure(size=(600, 600), bgcolor=(0.5, 0.5, 0.5)) # Plot the sensor locations sens_loc = [raw.info['chs'][picks[i]]['loc'][:3] for i in idx] sens_loc = np.array(sens_loc) pts = mlab.points3d(sens_loc[:, 0], sens_loc[:, 1], sens_loc[:, 2], color=(1, 1, 1), opacity=1, scale_factor=0.005) # Get the strongest connections n_con = 20 # show up to 20 connections min_dist = 0.05 # exclude sensors that are less than 5cm apart threshold = np.sort(con, axis=None)[-n_con] ii, jj = np.where(con >= threshold) # Remove close connections con_nodes = list() con_val = list() for i, j in zip(ii, jj): if linalg.norm(sens_loc[i] - sens_loc[j]) > min_dist: con_nodes.append((i, j)) con_val.append(con[i, j]) con_val = np.array(con_val) # Show the connections as tubes between sensors vmax = np.max(con_val) vmin = np.min(con_val) for val, nodes in zip(con_val, con_nodes): x1, y1, z1 = sens_loc[nodes[0]] x2, y2, z2 = sens_loc[nodes[1]] points = mlab.plot3d([x1, x2], [y1, y2], [z1, z2], [val, val], vmin=vmin, vmax=vmax, tube_radius=0.001, colormap='RdBu') points.module_manager.scalar_lut_manager.reverse_lut = True mlab.scalarbar(title='Phase Lag Index (PLI)', nb_labels=4) # Add the sensor names for the connections shown nodes_shown = list(set([n[0] for n in con_nodes] + [n[1] for n in con_nodes])) for node in nodes_shown: x, y, z = sens_loc[node] mlab.text3d(x, y, z, raw.ch_names[picks[node]], scale=0.005, color=(0, 0, 0)) view = (-88.7, 40.8, 0.76, np.array([-3.9e-4, -8.5e-3, -1e-2])) mlab.view(*view) """ Explanation: Set parameters End of explanation """