code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Autoregressive Moving Average (ARMA): Sunspots data # This notebook replicates the existing ARMA notebook using the `statsmodels.tsa.statespace.SARIMAX` class rather than the `statsmodels.tsa.ARMA` class. # %matplotlib inline # + import numpy as np from scipy import stats import pandas as pd import matplotlib.pyplot as plt import statsmodels.api as sm # - from statsmodels.graphics.api import qqplot # ## Sunspots Data print(sm.datasets.sunspots.NOTE) dta = sm.datasets.sunspots.load_pandas().data dta.index = pd.Index(sm.tsa.datetools.dates_from_range('1700', '2008')) del dta["YEAR"] dta.plot(figsize=(12,4)); fig = plt.figure(figsize=(12,8)) ax1 = fig.add_subplot(211) fig = sm.graphics.tsa.plot_acf(dta.values.squeeze(), lags=40, ax=ax1) ax2 = fig.add_subplot(212) fig = sm.graphics.tsa.plot_pacf(dta, lags=40, ax=ax2) arma_mod20 = sm.tsa.statespace.SARIMAX(dta, order=(2,0,0), trend='c').fit(disp=False) print(arma_mod20.params) arma_mod30 = sm.tsa.statespace.SARIMAX(dta, order=(3,0,0), trend='c').fit(disp=False) print(arma_mod20.aic, arma_mod20.bic, arma_mod20.hqic) print(arma_mod30.params) print(arma_mod30.aic, arma_mod30.bic, arma_mod30.hqic) # * Does our model obey the theory? sm.stats.durbin_watson(arma_mod30.resid) fig = plt.figure(figsize=(12,4)) ax = fig.add_subplot(111) ax = plt.plot(arma_mod30.resid) resid = arma_mod30.resid stats.normaltest(resid) fig = plt.figure(figsize=(12,4)) ax = fig.add_subplot(111) fig = qqplot(resid, line='q', ax=ax, fit=True) fig = plt.figure(figsize=(12,8)) ax1 = fig.add_subplot(211) fig = sm.graphics.tsa.plot_acf(resid, lags=40, ax=ax1) ax2 = fig.add_subplot(212) fig = sm.graphics.tsa.plot_pacf(resid, lags=40, ax=ax2) r,q,p = sm.tsa.acf(resid, fft=True, qstat=True) data = np.c_[range(1,41), r[1:], q, p] table = pd.DataFrame(data, columns=['lag', "AC", "Q", "Prob(>Q)"]) print(table.set_index('lag')) # * This indicates a lack of fit. # * In-sample dynamic prediction. How good does our model do? predict_sunspots = arma_mod30.predict(start='1990', end='2012', dynamic=True) fig, ax = plt.subplots(figsize=(12, 8)) dta.loc['1950':].plot(ax=ax) predict_sunspots.plot(ax=ax, style='r'); def mean_forecast_err(y, yhat): return y.sub(yhat).mean() mean_forecast_err(dta.SUNACTIVITY, predict_sunspots)
examples/notebooks/statespace_arma_0.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- a = [True, False] if all(a): print(2) from sortedcontainers import SortedDict a = SortedDict() a['d'] = 30 a['c'] = 20 a # + from sortedcontainers import SortedDict, SortedList class ExamRoom: def __init__(self, N: int): self.seats = SortedList() # 已经被坐的位置有哪些 self.N = N def seat(self) -> int: if not self.seats: self.seats.add(0) return 0 else: n = len(self.seats) dist = {} # 需要计算已经坐的椅子距离 for i in range(1, n): gap = self.seats[i] - self.seats[i-1] if gap == 1: continue d = gap // 2 idx = self.seats[i-1] + d dist[idx] = d dist[0] = self.seats[0] dist[self.N-1] = (self.N-1) - self.seats[-1] dist = sorted(dist.items(), key=lambda x: (x[1], -x[0])) self.seats.add(dist[-1][0]) return dist[-1][0] def leave(self, p: int) -> None: self.seats.remove(p) # + from sortedcontainers import SortedDict, SortedList import bisect class ExamRoom: def __init__(self, N: int): self.seats = [] # 已经被坐的位置有哪些 self.N = N def seat(self) -> int: if not self.seats: seat = 0 else: left = self.seats[0] preMaxGap = left - 0 # 这种写法能处理后去0号位置leave之后没人的情况 seat = 0 for right in self.seats[1:]: gap = (right - left) // 2 if gap > preMaxGap: seat = left + gap preMaxGap = gap left = right if self.N - 1 - self.seats[-1] > preMaxGap: seat = self.N - 1 bisect.insort(self.seats, seat) return seat def leave(self, p: int) -> None: self.seats.remove(p) # - obj = ExamRoom(10) param_1 = obj.seat() param_1 = obj.seat() param_1 = obj.seat() param_1 = obj.seat() param_1 = obj.seat() param_1 = obj.seat() # obj.leave(1)
Ordered Map/0105/855. Exam Room.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # COMBINE 2020 SBOL 3 Tutorial # # October, 2020 # # This tutorial code goes with the slides at: # # https://github.com/SynBioDex/Community-Media/blob/master/2020/IWBDA20/SBOL3-IWBDA-2020.pptx # Import the module from sbol3 import * # Set the default namespace for new objects and create a document set_namespace('https://synbiohub.org/public/igem/') doc = Document() # # Slide 26: GFP expression cassette # Construct a simple part and add it to the Document. # # Component # identity: iGEM#I13504 # name: "iGEM 2016 interlab reporter" # description: "GFP expression cassette used for 2016 iGEM interlab" # type: SBO:0000251 (DNA) # role: SO:0000804 (Engineered Region) # # Which properties are required? Which properties behave as lists? i13504 = Component('i13504', SBO_DNA) i13504.name = 'iGEM 2016 interlab reporter' i13504.description = 'GFP expression cassette used for 2016 iGEM interlab study' i13504.roles.append(SO_NS + '0000804') # Add the GFP expression cassette to the document doc.add(i13504) # # Slide 28: expression cassette parts # Here we will create a part-subpart hierarchy. # # First, create the RBS component... # # Component # identity: B0034 # name: RBS (Elowitz 1999) # type: SBO:0000251 (DNA) # role: SO:0000139 (Ribosome Entry Site) # b0034 = Component('B0034', SBO_DNA) b0034.name = 'RBS (Elowitz 1999)' b0034.roles = [SO_NS + '0000139'] # Next, create the GFP component # # identity: E0040 # name: GFP # type: SBO:0000251 (DNA) # role: SO:0000316 (CDS) e0040 = Component('E0040', SBO_DNA) e0040.name = 'GFP' e0040.roles = [SO_NS + '0000316'] # Finally, create the terminator # # identity: B0015 # name: double terminator # type: SBO:0000251 (DNA) # role: SO:0000141 (Terminator) b0015 = Component('B0015', SBO_DNA) b0015.name = 'double terminator' b0015.roles = [SO_NS + '0000141'] # Now construct the part-subpart hierarchy doc.add(b0034) doc.add(e0040) doc.add(b0015) i13504.features.append(SubComponent(b0034)) i13504.features.append(SubComponent(e0040)) i13504.features.append(SubComponent(b0015)) # Slide 30: Location of a SubComponent # # Here we add base coordinates to SubCompnents. # # But first, assign a sequence to the BBa_I13504 device # # See http://parts.igem.org/Part:BBa_I13504 i13504_seq = Sequence('i13504_seq') i13504_seq.elements = 'aaagaggagaaatactagatgcgtaaaggagaagaacttttcactggagttgtcccaattcttgttgaattagatggtgatgttaatgggcacaaattttctgtcagtggagagggtgaaggtgatgcaacatacggaaaacttacccttaaatttatttgcactactggaaaactacctgttccatggccaacacttgtcactactttcggttatggtgttcaatgctttgcgagatacccagatcatatgaaacagcatgactttttcaagagtgccatgcccgaaggttatgtacaggaaagaactatatttttcaaagatgacgggaactacaagacacgtgctgaagtcaagtttgaaggtgatacccttgttaatagaatcgagttaaaaggtattgattttaaagaagatggaaacattcttggacacaaattggaatacaactataactcacacaatgtatacatcatggcagacaaacaaaagaatggaatcaaagttaacttcaaaattagacacaacattgaagatggaagcgttcaactagcagaccattatcaacaaaatactccaattggcgatggccctgtccttttaccagacaaccattacctgtccacacaatctgccctttcgaaagatcccaacgaaaagagagaccacatggtccttcttgagtttgtaacagctgctgggattacacatggcatggatgaactatacaaataataatactagagccaggcatcaaataaaacgaaaggctcagtcgaaagactgggcctttcgttttatctgttgtttgtcggtgaacgctctctactagagtcacactggctcaccttcgggtgggcctttctgcgtttata' i13504_seq.encoding = SBOL_IUPAC_DNA i13504.sequences = [i13504_seq] # Add a Range to the B0015 SubComponent. The base coordinates for B0015 are as follows: # # Range # start: 746 # end: 875 # # pySBOL3 does not yet have an easy way to locate features based on arbitrary criteria so we have to loop over the list to find the B0015 SubComponent we are looking for for f in i13504.features: if f.instance_of == b0015.identity: print(f) f.locations.append(Range(i13504_seq, 746, 875)) # # Slide 32: GFP production from expression cassette # In this example, we will create a system representation that includes DNA, proteins, and interactions. # # First, create the system representation. We use the SBO term "functional entity" to represent a system. # # Component # identity: i13504_system # type: SBO:0000236 (functional entity) i13504_system = Component('i13504_system', SBO_NS + '0000236' ) doc.add(i13504_system) # The system has two physical subcomponents, the expression construct and the expressed GFP protein. We already created the expression construct. Now create the GFP protein. gfp_protein = Component('GFP', SBO_PROTEIN) doc.add(gfp_protein) # Now create the part-subpart hierarchy. i13504_subcomponent = SubComponent(i13504) gfp_subcomponent = SubComponent(gfp_protein) i13504_system.features.append(i13504_subcomponent) i13504_system.features.append(gfp_subcomponent) # Use a ComponentReference to link SubComponents in a multi-level hierarchy # + e0040_subcomponent = None for f in i13504.features: if f.instance_of == e0040.identity: e0040_subcomponent = f if e0040_subcomponent is None: raise Exception() e0040_reference = ComponentReference(i13504_subcomponent, e0040_subcomponent) i13504_system.features.append(e0040_reference) # - # Make the Interaction. # # Interaction: # type: SBO:0000589 (genetic production) # # Participation: # role: SBO:0000645 (template) # participant: e0040_reference # # Participation: # role: SBO:0000011 (product) # participant: gfp_subcomponent genetic_production = Interaction([SBO_NS + '0000589']) template = Participation([SBO_NS + '0000645'], e0040_reference) product = Participation([SBO_NS + '0000011'], gfp_subcomponent) i13504_system.interactions.append(genetic_production) # # Finally, write the data out to a file doc.write('i13504.nt', file_format='nt')
2020/COMBINE20/tutorial_answer_key.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # 读取数据 # 今天要做的事情:<br> # &emsp;&emsp; 1 先直接用LR,GBDT 出一版baseline结果。<br> # &emsp;&emsp; 2 分别采用bagging, boosting, stacking 出结果<br> # # 下一步<br> # &emsp;&emsp; 1 研究每个字段数值特征:是否需要归一化,中心化,标准化 <br> # &emsp;&emsp; 2 尝试特征筛选<br> # &emsp;&emsp; 3 特征工程PCA、LDA、SVD 降维<br> # &emsp;&emsp; 4 尝试特征升维??神经网络??<br> # &emsp;&emsp; 5 加入深度学习方法<br> # &emsp;&emsp; 6 考虑时序怎么用、、<br> import pandas as pd train_df = pd.read_csv('./train.csv', index_col='ID') train_df.head() # + train_feature = train_df[['V1','V2','V3','V4','V5','V6','V7','V8','V9','V10','V11','V12','V13','V14','V15','V16','V17','V18','V19','V20','V21','V22','V23','V24','V25','V26','V27','V28','V29','V30']] train_label = train_df['Label'] print train_feature.head() print train_label.head() print train_label.value_counts() # - pred_df = pd.read_csv('./pred.csv', index_col='ID') del pred_df['V_Time'] pred_df.head() # # LR # ## 模型 # + from sklearn import linear_model import time start = time.time() # fit到RandomForestRegressor之中 clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6) clf.fit(train_feature, train_label) print "运行时间:", time.time()-start # - # ## 评估 from sklearn.metrics import accuracy_score lr_pred_label = pd.Series(clf.predict(pred_df)) lr_pred_label.value_counts() # # RF # + from sklearn.ensemble import RandomForestClassifier import time start = time.time() aliRC_RFC = RandomForestClassifier() aliRC_RFC = aliRC_RFC.fit(train_feature, train_label) print "训练时间:", time.time()-start # - rf_pred_label = pd.Series(aliRC_RFC.predict(pred_df)) rf_pred_label.value_counts() # # GBDT # + from sklearn.ensemble import GradientBoostingClassifier import time start = time.time() aliRC_gdbt = GradientBoostingClassifier() aliRC_gdbt = aliRC_gdbt.fit(train_feature, train_label) print "训练时间:", time.time()-start # - gbdt_pred_label = pd.Series(aliRC_gdbt.predict(pred_df)) gbdt_pred_label.value_counts() # # GXBOOST # + from xgboost import XGBClassifier import time start = time.time() xgbc = XGBClassifier() xgbc.fit(train_feature, train_label) print "训练时间:", time.time()-start # - xgbc_pred_lable = pd.Series(xgbc.predict(pred_df)) gbdt_pred_label.value_counts()
jupyter_file/risk_control/AliRiskControl.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .scala // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: Scala // language: scala // name: scala // --- // # Machine Learning with Scala Logistic Regression // // // > "Logistic regression with Scala" // // - toc:true // - branch: master // - badges: true // - comments: false // - author: <NAME> // - categories: [machine-learning, Scala, logistic-regression] // ## Overview // In the post <a href="https://pockerman.github.io/qubit_opus/machine-learning/scala/linear-regression/2021/06/27/ml-with-scala-linear-regression.html">Machine Learning with Scala Linear Regression</a> we saw how to develop a simple linear regressor with the aide of the <a href="https://github.com/scalanlp/breeze">Breeze</a> library. In this post, we see how to develop a logistic regressor classifier for two class classification. // ## Machine Learning with Scala Logistic Regression // Logistic regression is a linear classifier that is the decision boundary is a line or a hyperplane. The logistic regression algorithm is to a large extent similar to linear regression with two notable differences // - We filter the result of the linear regression so that it is mapped in the range $[0, 1]$. Thus, the immediate output of logistic regression can be interpreted as a probability // - The loss function that we minimize is not the MSE // Other than that the algorithm is the same. Hence, we use a linear model of the form // $$\hat{y}_i = a x_i + b$$ // and we filter it via function so that the ouput is mapped bewteen $[0, 1]$. The <a href="https://en.wikipedia.org/wiki/Sigmoid_function">sigmoid</a> function // $$\phi(x) = \frac{1}{1 + e^{-x}}$$ // can be used for such a filtering. // The loss function has the following form // $$L(\mathbf{w}) = \sum_{i}^N -y_i log(\hat{y}_i) + (1 - y_i)(1 - log(\hat{y}_i))$$ // where $\mathbf{w}$ is the parameters coefficients with $\mathbf{w} = [a, b]$. // We first import some useful packages // ``` // import breeze.linalg.{DenseMatrix, DenseVector} // import breeze.linalg._ // import breeze.numerics.{exp, log1p, sigmoid} // import breeze.optimize.{DiffFunction, minimize} // ``` // We wrap the loss function and its gradient calculation into an ```object``` class // ``` // object LogisticRegression{ // // def L(x: DenseMatrix[Double], y: DenseVector[Double], // parameters: DenseVector[Double]): Double = { // // val xBeta = x * parameters // val expXBeta = exp(xBeta) // val targets_time = y *:* xBeta // -sum(targets_time - log1p(expXBeta)) // } // // // def gradL(x: DenseMatrix[Double], y: DenseVector[Double], // parameters: DenseVector[Double]): DenseVector[Double]={ // // val xBeta = x * parameters // val probs = sigmoid(xBeta) // x.t * (probs - y) // } // // } // ``` // This is the class that wraps the linear regression model. // ``` // // class LogisticRegression { // // // The model parameters // var parameters: DenseVector[Double] = null // // // Flag indicating if the interception term is used // var useIntecept: Boolean=true; // // // auxiliary constructor // def this(numFeatures: Int, useIntercept: Boolean=true){ // this() // init(numFeatures = numFeatures, useIntercept = useIntercept) // } // // // initialize the underlying data // def init(numFeatures: Int, useIntercept: Boolean=true): Unit = { // // val totalFeatures = if(useIntercept) numFeatures + 1 else numFeatures // this.parameters = DenseVector.zeros[Double](totalFeatures) // this.useIntecept = useIntercept // } // // // train the model // def train(x: DenseMatrix[Double], y: DenseVector[Double])={ // // // set up the optimization // val f = new DiffFunction[DenseVector[Double]] { // def calculate(parameters: DenseVector[Double]) = (LogisticRegression.L(x, y, parameters=parameters), // LogisticRegression.gradL(x, y, parameters = parameters)) // } // // this.parameters = minimize(f, this.parameters) // } // // // predict the class of the given point // def predict(x: DenseVector[Double]): Double = { // // require(parameters != null) // // if(!useIntecept){ // require(x.size == parameters.size) // sum(parameters * x) // } // else{ // require(x.size == parameters.size -1 ) // sum(parameters.slice(0, x.size) * x) + parameters(0) // } // } // } // // ``` // Let's put this into action with a simple example. // ``` // import breeze.linalg._ // import breeze.numerics._ // import breeze.optimize._ // import breeze.stats._ // import engine.models.LogisticRegression // import engine.utils.{CSVDataSetLoader, VectorUtils} // import spire.algebra.NormedVectorSpace.InnerProductSpaceIsNormedVectorSpace // import spire.implicits.rightModuleOps // // object LogisticRegression_Exe extends App{ // // println(s"Starting application: ${LogisticRegression_Exe.getClass.getName}") // // // load the data // val data = CSVDataSetLoader.loadRepHeightWeightsFullData // val recaledHeights = VectorUtils.standardize(data.heights); // val rescaledWeights = VectorUtils.standardize(data.weights); // val rescaledHeightsAsMatrix = recaledHeights.toDenseMatrix.t // val rescaledWeightsAsMatrix = rescaledWeights.toDenseMatrix.t // // val featureMatrix = DenseMatrix.horzcat(DenseMatrix.ones[Double](rescaledHeightsAsMatrix.rows, 1), // rescaledHeightsAsMatrix, rescaledWeightsAsMatrix) // // println(s"Feature matrix shape (${featureMatrix.rows}, ${featureMatrix.cols})") // // val targets = data.genders.values.map{gender => if(gender == 'M') 1.0 else 0.0} // // println(s"Targets vector shape (${targets.size}, )") // // // logistic regression model // val lr = new LogisticRegression; // // // initialize the model // lr.init(numFeatures=2) // lr.train(x=featureMatrix, y=targets) // // val optimalParams = lr.parameters // println(s"Optimal parameters ${optimalParams}") // println("Done...") // // } // ``` // You can find the complete example in this <a href="https://github.com/pockerman/scala_ml">repo</a>. // ## Summary // In this post we looked into how to develop a simple linear regression model with Scala. The Scala numerics library <a href="https://github.com/scalanlp/breeze">Breeze</a> greatly simplifies the development. // ## References // 1. <a href="https://en.wikipedia.org/wiki/Logistic_regression#:~:text=Logistic%20regression%20is%20a%20statistical,a%20form%20of%20binary%20regression).">Logistic regression</a> // 2. <NAME>, <NAME>, <NAME>, ```Scala: Applied Machine Learning```
_notebooks/2021-06-28-ml-with-scala-logistic-regression.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.0.0 # language: julia # name: julia-1.0 # --- # # Load data file # # ### Target: Fix comments with `#!` # + using DelimitedFiles, Test, BenchmarkTools, Statistics """General Annealing Problem""" abstract type AnnealingProblem end """ SpinAnnealingProblem{T<:Real} <: AnnealingProblem Annealing problem defined by coupling matrix of spins. """ struct SpinAnnealingProblem{T<:Real} <: AnnealingProblem # immutable, with type parameter T (a subtype of Real). num_spin::Int coupling::Matrix{T} function SpinAnnealingProblem(coupling::Matrix{T}) where T size(coupling, 1) == size(coupling, 2) || throw(DimensionMismatch("input must be square matrix.")) new{T}(size(coupling, 1), coupling) end end """ load_coupling(filename::String) -> SpinAnnealingProblem Load the data file into symmtric coupling matrix. """ function load_coupling(filename::String) data = readdlm(filename) is = @. Int(view(data, :, 1)) + 1 #! @. means broadcast for the following functions, is here used correctly? js = @. Int(view(data, :, 2)) + 1 weights = data[:,3] num_spin = max(maximum(is), maximum(js)) J = similar(weights, num_spin, num_spin) @inbounds for (i, j, weight) = zip(is, js, weights) J[i,j] = weight/2 J[j,i] = weight/2 end SpinAnnealingProblem(J) end # - @testset "loading" begin sap = load_coupling("programs/example.txt") @test size(sap.coupling) == (300, 300) end # + abstract type AnnealingConfig end struct SpinConfig{Ts, Tf} <: AnnealingConfig config::Vector{Ts} field::Vector{Tf} end """ random_config(prblm::AnnealingProblem) -> SpinConfig Random spin configuration. """ function random_config end # where to put the docstring of a multiple-dispatch function is a problem. Using `abstract function` is proper. function random_config(prblm::SpinAnnealingProblem) config = rand([-1,1], prblm.num_spin) SpinConfig(config, prblm.coupling*config) end # - @testset "random config" begin sap = load_coupling("programs/example.txt") initial_config = random_config(sap) @test initial_config.config |> length == 300 @test eltype(initial_config.config) == Int end # # Main Program for Annealing # + """ anneal_singlerun!(config::AnnealingConfig, prblm, tempscales::Vector{Float64}, num_update_each_temp::Int) Perform Simulated Annealing using Metropolis updates for the single run. * configuration that can be updated. * prblm: problem with `get_cost`, `flip!` and `random_config` interfaces. * tempscales: temperature scales, which should be a decreasing array. * num_update_each_temp: the number of update in each temprature scale. Returns (minimum cost, optimal configuration). """ function anneal_singlerun!(config, prblm, tempscales::Vector{Float64}, num_update_each_temp::Int) cost = get_cost(config, prblm) opt_config = config opt_cost = cost for beta = 1 / tempscales #! fix this line @simd for m = 1:num_update_each_temp # single instriuction multiple data, see julia performance tips. proposal, ΔE = propose(config, prblm) if exp(-beta*ΔE) > rand() #accept flip!(config, proposal, prblm) cost += ΔE if cost < opt_cost opt_cost = cost opt_config = config end end end end opt_cost, opt_config end """ anneal(nrun::Int, prblm, tempscales::Vector{Float64}, num_update_each_temp::Int) Perform Simulated Annealing with multiple runs. """ function anneal(nrun::Int, prblm, tempscales::Vector{Float64}, num_update_each_temp::Int) opt_cost=999999 #! here, this initialization of opt_cost will cause allocation, how to fix? local opt_config for r = 1:nrun initial_config = random_config(prblm) cost, config = anneal_singlerun!(initial_config, prblm, tempscales, num_update_each_temp) if r == 1 || cost < opt_cost opt_cost = cost opt_config = config end end opt_cost, opt_config end # - # # Annealing Problem Interfaces # + """ get_cost(config::AnnealingConfig, ap::AnnealingProblem) -> Real Get the cost of specific configuration. """ get_cost(config::SpinConfig, sap::SpinAnnealingProblem) = sum(config.config'*sap.coupling*config.config) """ propose(config::AnnealingConfig, ap::AnnealingProblem) -> (Proposal, Real) Propose a change, as well as the energy change. """ @inline function propose(config::SpinConfig, ::SpinAnnealingProblem) # ommit the name of argument, since not used. ispin = rand(1:length(config.config)) @inbounds ΔE = -config.field[ispin] * config.config[ispin] * 4 # 2 for spin change, 2 for mutual energy. ispin, ΔE end """ flip!(config::AnnealingConfig, ispin::Proposal, ap::AnnealingProblem) -> SpinConfig Apply the change to the configuration. """ @inline function flip!(config::SpinConfig, ispin::Int, sap::SpinAnnealingProblem) @inbounds config.config[ispin] = -config.config[ispin] # @inbounds can remove boundary check, and improve performance config.field .+= 2 .* config.config[ispin] .* sap.coupling[:,ispin] #! this line can be super inefficient! try to improve it config end # - # ### **Challege!** # Make your program correct and type is stable! using Random Random.seed!(2) const tempscales = 10 .- (1:64 .- 1) .* 0.15 |> collect const sap = load_coupling("programs/example.txt") @testset "anneal" begin opt_cost, opt_config = anneal(30, sap, tempscales, 4000) @test anneal(30, sap, tempscales, 4000)[1] == -3858 anneal(30, sap, tempscales, 4000) res = median(@benchmark anneal(30, $sap, $tempscales, 4000)) @test res.time/1e9 < 1 @test res.allocs < 500 end @benchmark anneal(30, $sap, $tempscales, 4000) # # Tips for optimization: Find the bottleneck of your program using Profile Profile.clear() @profile anneal(100, sap, tempscales, 4000) Profile.print() # # Calling a Fortran program # * https://docs.julialang.org/en/v1/manual/calling-c-and-fortran-code/index.html # * https://craftofcoding.wordpress.com/2017/02/26/calling-fortran-from-julia-i/ # * https://craftofcoding.wordpress.com/2017/03/01/calling-fortran-from-julia-ii/ ;cd programs ;gfortran -shared -fPIC problem.f90 fsa.f90 -o fsa.so ;nm fsa.so @benchmark ccall((:test_, "fsa.so"), Int32, ()) # # What if I can not live without Python? # We can use [PyCall](https://github.com/JuliaPy/PyCall.jl) to call python programs! # # ### **Challenge!** # 1. use Python package [viznet](https://github.com/GiggleLiu/viznet) and [matplotlib](https://matplotlib.org/) for visualization # 2. benchmark pure python version of simulated annealing, show the time # + # pip install viznet using PyCall @pyimport viznet @pyimport matplotlib.pyplot as plt brush = viznet.NodeBrush("nn.input") brush >> (0, 0) plt.axis([-1, 1, -1, 1]) plt.axis("equal") plt.axis("off") plt.show() # - # now please import `test_codec` function in file `testsa.py` pushfirst!(PyVector(pyimport("sys")["path"]), "") # add current folder into path @pyimport testsa @benchmark testsa.test_codec()
simulated_annealing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # New Object? # # Solidifying understanding of when lists are re-created x = [1, 2, 3, 4] # ## Recreating creates new list (duh :)) y = list(x) y y is x y == x # ## Slicing creates new obj s = x[:2] s s[0] = 5 s # original list unmodified x # By extrapolation, works for slicing that returns full list y = x[:] y y is x y == x # ## __iadd__ doesn't create new obj l = [1, 2, 3] old_id = id(l) l += [4] id(l) == old_id
lists.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: learning_py38 # language: python # name: learning_py38 # --- # <figure> # <IMG SRC="https://raw.githubusercontent.com/mbakker7/exploratory_computing_with_python/master/tudelft_logo.png" WIDTH=250 ALIGN="right"> # </figure> # # # Exploratory Computing with Python # *Developed by <NAME>* # ## Notebook 1: Basics and Plotting # ### First Python steps # "Portable, powerful, and a breeze to use", Python is a popular, open-source programming language used for both scripting applications and standalone programs (see "Learning Python" by <NAME>). Python can be used to do pretty much anything. For example, you can use Python as a calculator. Position your cursor in the code cell below and hit [shift][enter]. The output should be 12 (-: 6 * 2 # Note that the extra spaces are added to make the code more readable. # `2 * 3` works just as well as `2*3`. And it is considered good style. Use the extra spaces in all your Notebooks. # # When you are programming, you want to store your values in variables a = 6 b = 2 a * b # Both `a` and `b` are now variables. Each variable has a type. In this case, they are both integers (whole numbers). To write the value of a variable to the screen, use the `print` function (the last statement of a code cell is automatically printed to the screen if it is not stored in a variable, as was shown above). Note that multiplication of two integers results in an integer, but division of two integers results in a float (a number with decimal places). print(a) print(b) print(a * b) print(a / b) # You can add some text to the `print` function by putting the text string between quotes (either single or double quotes work as long as you use the same at the beginning and end), and separate the text string and the variable by a comma print('the value of a is', a) # A variable can be raised to a power by using `**` # (a hat `^`, as used in some other languages, doesn't work). a ** b # ### <a name="ex1a"></a> Exercise 1a, First Python code # Compute the value of the polynomial $y=ax^2+bx+c$ at $x=-2$, $x=0$, and $x=2.1$ using $a=1$, $b=1$, $c=-6$ and print the results to the screen. # <a href="#ex1aanswer">Answer to Exercise 1a</a> # ### Division # Division works as well print('1/3 gives', 1 / 3) # (Note for Python 2 users (you should really change to Python 3!): `1/3` gives zero in Python 2, as the division of two integers returned an integer in Python 2). The above print statement looks pretty ugly with 16 values of 3 in a row. A better and more readable way to print both text and the value of a variable to the screen is to use what are called f-strings. f-strings allow you to insert the value of a variable anywhere in the text by surrounding it with braces `{}`. The entire text string needs to be between quotes and be preceded by the letter `f` a = 1 b = 3 c = a / b print(f'{a} divided by {b} gives {c}') # The complete syntax between braces is `{variable:width.precision}`. When `width` and `precision` are not specified, Python will use all digits and figure out the width for you. If you want a floating point number with 3 decimals, you specify the number of digits, `3`, followed by the letter `f` for floating point (you can still let Python figure out the width by not specifying it). If you prefer exponent (scientific) notation, replace the `f` by an `e`. The text after the `#` is a comment in the code. Any text on the line after the `#` is ignored by Python. print(f'{a} divided by {b} gives {c:.3f}') # three decimal places print(f'{a} divided by {b} gives {c:10.3f}') # width 10 and three decimal places print(f'{a} divided by {b} gives {c:.3e}') # three decimal places scientific notation # ### <a name="ex1b"></a> Exercise 1b, First Python code using f-strings # Compute the value of the polynomial $y=ax^2+bx+c$ at $x=-2$, $x=0$, and $x=2.1$ using $a=1$, $b=1$, $c=-6$ and print the results to the screen using f-strings and 2 decimal places. # <a href="#ex1banswer">Answer to Exercise 1b</a> # ### More on variables # Once you have created a variable in a Python session, it will remain in memory, so you can use it in other cells as well. For example, the variables `a` and `b`, which were defined two code cells above in this Notebook, still exist. print(f'the value of a is: {a}') print(f'the value of b is: {b}') # The user (in this case: you!) decides the order in which code blocks are executed. For example, `In [6]` means that it is the sixth execution of a code block. If you change the same code block and run it again, it will get number 7. If you define the variable `a` in code block 7, it will overwrite the value of `a` defined in a previous code block. # # Variable names may be as long as you like (you gotta do the typing though). Selecting descriptive names aids in understanding the code. Variable names cannot have spaces, nor can they start with a number. And variable names are case sensitive. So the variable `myvariable` is not the same as the variable `MyVariable`. The name of a variable may be anything you want, except for reserved words in the Python language. For example, it is not possible to create a variable `for = 7`, as `for` is a reserved word. You will learn many of the reserved words when we continue; they are colored bold green when you type them in the Notebook. # ### Basic plotting and a first array # Plotting is not part of standard Python, but a nice package exists to create pretty graphics (and ugly ones, if you want). A package is a library of functions for a specific set of tasks. There are many Python packages and we will use several of them. The graphics package we use is called `matplotlib`. To be able to use the plotting functions in `matplotlib`, we have to import it. We will learn several different ways of importing packages. For now, we import the plotting part of `matplotlib` and call it `plt`. Before we import `matplotlib`, we tell the Jupyter Notebook to show any graphs inside this Notebook and not in a separate window using the `%matplotlib inline` command (more on these commands later). # %matplotlib inline import matplotlib.pyplot as plt # Packages only have to be imported once in a Python session. After the above import statement, any plotting function may be called from any code cell as `plt.function`. For example plt.plot([1, 2, 4, 2]) # Let's try to plot $y$ vs $x$ for $x$ going from $-4$ to $+4$ for the polynomial # $y=ax^2+bx+c$ with $a=1$, $b=1$, $c=-6$. # To do that, we need to evaluate $y$ at a bunch of points. A sequence of values of the same type is called an array (for example an array of integers or floats). Array functionality is available in the package `numpy`. Let's import `numpy` and call it `np`, so that any function in the `numpy` package may be called as `np.function`. import numpy as np # To create an array `x` consisting of, for example, 5 equally spaced points between `-4` and `4`, use the `linspace` command x = np.linspace(-4, 4, 5) print(x) # In the above cell, `x` is an array of 5 floats (`-4.` is a float, `-4` is an integer). # If you type `np.linspace` and then an opening parenthesis like: # # `np.linspace(` # # and then hit [shift-tab] a little help box pops up to explain the input arguments of the function. When you click on the + sign, you can scroll through all the documentation of the `linspace` function. Click on the x sign to remove the help box. Let's plot $y$ using 100 $x$ values from # $-4$ to $+4$. a = 1 b = 1 c = -6 x = np.linspace(-4, 4, 100) y = a * x ** 2 + b * x + c # Compute y for all x values plt.plot(x, y) # Note that *one hundred* `y` values are computed in the simple line `y = a * x ** 2 + b * x + c`. Python treats arrays in the same fashion as it treats regular variables when you perform mathematical operations. The math is simply applied to every value in the array (and it runs much faster than when you would do every calculation separately). # # You may wonder what the statement like `[<matplotlib.lines.Line2D at 0x30990b0>]` is (the numbers above on your machine may look different). This is actually a handle to the line that is created with the last command in the code block (in this case `plt.plot(x, y)`). Remember: the result of the last line in a code cell is printed to the screen, unless it is stored in a variable. You can tell the Notebook not to print this to the screen by putting a semicolon after the last command in the code block (so type `plot(x, y);`). We will learn later on that it may also be useful to store this handle in a variable. # The `plot` function can take many arguments. Looking at the help box of the `plot` function, by typing `plt.plot(` and then shift-tab, gives you a lot of help. Typing `plt.plot?` gives a new scrollable subwindow at the bottom of the notebook, showing the documentation on `plot`. Click the x in the upper right hand corner to close the subwindow again. # In short, `plot` can be used with one argument as `plot(y)`, which plots `y` values along the vertical axis and enumerates the horizontal axis starting at 0. `plot(x, y)` plots `y` vs `x`, and `plot(x, y, formatstring)` plots `y` vs `x` using colors and markers defined in `formatstring`, which can be a lot of things. It can be used to define the color, for example `'b'` for blue, `'r'` for red, and `'g'` for green. Or it can be used to define the linetype `'-'` for line, `'--'` for dashed, `':'` for dots. Or you can define markers, for example `'o'` for circles and `'s'` for squares. You can even combine them: `'r--'` gives a red dashed line, while `'go'` gives green circular markers. # If that isn't enough, `plot` takes a large number of keyword arguments. A keyword argument is an optional argument that may be added to a function. The syntax is `function(keyword1=value1, keyword2=value2)`, etc. For example, to plot a line with width 6 (the default is 1), type plt.plot([1, 2, 3], [2, 4, 3], linewidth=6); # Keyword arguments should come after regular arguments. `plot(linewidth=6, [1, 2, 3], [2, 4, 3])` gives an error. # Names may be added along the axes with the `xlabel` and `ylabel` functions, e.g., `plt.xlabel('this is the x-axis')`. Note that both functions take a string as argument. A title can be added to the figure with the `plt.title` command. Multiple curves can be added to the same figure by giving multiple plotting commands in the same code cell. They are automatically added to the same figure. # ### New figure and figure size # # Whenever you give a plotting statement in a code cell, a figure with a default size is automatically created, and all subsequent plotting statements in the code cell are added to the same figure. If you want a different size of the figure, you can create a figure first with the desired figure size using the `plt.figure(figsize=(width, height))` syntax. Any subsequent plotting statement in the code cell is then added to the figure. You can even create a second figure (or third or fourth...). plt.figure(figsize=(10, 3)) plt.plot([1, 2, 3], [2, 4, 3], linewidth=6) plt.title('very wide figure') plt.figure() # new figure of default size plt.plot([1, 2, 3], [1, 3, 1], 'r') plt.title('second figure'); # ### <a name="ex2"></a> Exercise 2, First graph # Plot $y=(x+2)(x-1)(x-2)$ for $x$ going from $-3$ to $+3$ using a dashed red line. On the same figure, plot a blue circle for every point where $y$ equals zero. Set the size of the markers to 10 (you may need to read the help of `plt.plot` to find out how to do that). Label the axes as 'x-axis' and 'y-axis'. Add the title 'First nice Python figure of Your Name', where you enter your own name. # <a href="#ex2answer">Answer to Exercise 2</a> # ### Style # # As was already mentioned above, good coding style is important. It makes the code easier to read so that it is much easier to find errors and bugs. For example, consider the code below, which recreates the graph we produced earlier (with a wider line), but now there are no additional spaces inserted a=1 b=1 c=-6 x=np.linspace(-4,4,100) y=a*x**2+b*x+c#Compute y for all x values plt.plot(x,y,linewidth=3) # The code in the previous code cell is difficult to read. Good style includes at least the following: # * spaces around every mathematical symbol (`=`, `+`, `-`, `*`, `/`), but not needed around `**` # * spaces between arguments of a function # * no spaces around an equal sign for a keyword argument (so `linewidth=3` is correct) # * one space after every comma # * one space after each `#` # * two spaces before a `#` when it follows a Python statement # * no space between the function name and the list of arguments. So `plt.plot(x, y)` is good style, and `plt.plot (x, y)` is not good style. # # These rules are (a very small part of) the official Python style guide called PEP8. When these rules are applied, the code is *much* easier to read, as you can see below: a = 1 b = 1 c = -6 x = np.linspace(-4, 4, 100) y = a * x**2 + b * x + c # Compute y for all x values plt.plot(x, y, linewidth=3); # Use correct style in all other exercises and all Notebooks to come. # ### Exercise 2b. First graph revisited # Go back to your Exercise 2 and apply correct style. # ### Loading data files # # Numerical data can be loaded from a data file using the `loadtxt` function of `numpy`; i.e., the command is `np.loadtxt`. You need to make sure the file is in the same directory as your notebook, or provide the full path. The filename (or path plus filename) needs to be between quotes. # ### <a name="ex3"></a> Exercise 3, Loading data and adding a legend # You are provided with the data files containing the mean montly temperature of Holland, New York City, and Beijing. The Dutch data is stored in `holland_temperature.dat`, and the other filenames are similar. Plot the temperature for each location against the number of the month (starting with 1 for January) all in a single graph. Add a legend by using the function `plt.legend(['line1','line2'])`, etc., but then with more descriptive names. Find out about the `legend` command using `plt.legend?`. Place the legend in an appropriate spot (the upper left-hand corner may be nice, or let Python figure out the best place). # <a href="#ex3answer">Answer to Exercise 3</a> # ### <a name="ex4"></a> Exercise 4, Subplots and fancy tick markers # Load the average monthly air temperature and seawater temperature for Holland. Create one plot with two graphs above each other using the `subplot` command (use `plt.subplot?` to find out how). On the top graph, plot the air and sea temperature. Label the ticks on the horizontal axis as 'jan', 'feb', 'mar', etc., rather than numbers. Use `plt.xticks?` to find out how. In the bottom graph, plot the difference between the air and seawater temperature. Add legends, axes labels, the whole shebang. # <a href="#ex4answer">Answer to Exercise 4</a> # ### Colors # If you don't specify a color for a plotting statement, `matplotlib` will use its default colors. The first three default colors are special shades of blue, orange and green. The names of the default colors are a capital `C` followed by the number, starting with number `0`. For example plt.plot([0, 1], [0, 1], 'C0') plt.plot([0, 1], [1, 2], 'C1') plt.plot([0, 1], [2, 3], 'C2') plt.legend(['default blue', 'default orange', 'default green']); # There are five different ways to specify your own colors in matplotlib plotting; you may read about them [here](http://matplotlib.org/examples/pylab_examples/color_demo.html). A useful way is to use the html color names. The html codes may be found, for example, [here](http://en.wikipedia.org/wiki/Web_colors). color1 = 'fuchsia' color2 = 'lime' color3 = 'DodgerBlue' plt.plot([0, 1], [0, 1], color1) plt.plot([0, 1], [1, 2], color2) plt.plot([0, 1], [2, 3], color3) plt.legend([color1, color2, color3]); # The coolest (and nerdiest) way is probably to use the xkcd names, which need to be prefaced by the `xkcd:`. The xkcd list of color names is given by [xkcd](https://xkcd.com/color/rgb/) and includes favorites such as 'baby puke green' and a number of brown colors varying from `poo` to `poop brown` and `baby poop brown`. Try it out: plt.plot([1, 2, 3], [4, 5, 2], 'xkcd:baby puke green'); plt.title('xkcd color baby puke green'); # ### Gallery of graphs # The plotting package `matplotlib` allows you to make very fancy graphs. Check out the <A href="http://matplotlib.org/gallery.html" target=_blank>matplotlib gallery</A> to get an overview of many of the options. The following exercises use several of the matplotlib options. # ### <a name="ex5"></a> Exercise 5, Pie Chart # At the 2012 London Olympics, the top ten countries (plus the rest) receiving gold medals were `['USA', 'CHN', 'GBR', 'RUS', 'KOR', 'GER', 'FRA', 'ITA', 'HUN', 'AUS', 'OTHER']`. They received `[46, 38, 29, 24, 13, 11, 11, 8, 8, 7, 107]` gold medals, respectively. Make a pie chart (use `plt.pie?` or go to the pie charts in the matplotlib gallery) of the top 10 gold medal winners plus the others at the London Olympics. Try some of the keyword arguments to make the plot look nice. You may want to give the command `plt.axis('equal')` to make the scales along the horizontal and vertical axes equal so that the pie actually looks like a circle rather than an ellipse. Use the `colors` keyword in your pie chart to specify a sequence of colors. The sequence must be between square brackets, each color must be between quotes preserving upper and lower cases, and they must be separated by comma's like `['MediumBlue','SpringGreen','BlueViolet']`; the sequence is repeated if it is not long enough. # <a href="#ex5answer">Answer to Exercise 5</a> # ### <a name="ex6"></a> Exercise 6, Fill between # Load the air and sea temperature, as used in Exercise 4, but this time make one plot of temperature vs the number of the month and use the `plt.fill_between` command to fill the space between the curve and the horizontal axis. Specify the `alpha` keyword, which defines the transparancy. Some experimentation will give you a good value for alpha (stay between 0 and 1). Note that you need to specify the color using the `color` keyword argument. # <a href="#ex6answer">Answer to Exercise 6</a> # ### Answers for the exercises # <a name="ex1aanswer">Answer to Exercise 1</a> a = 1 b = 1 c = -6 x = -2 y = a * x ** 2 + b * x + c print('y evaluated at x = -2 is', y) x = 0 y = a * x ** 2 + b * x + c print('y evaluated at x = 0 is', y) x = 2.1 y = a * x ** 2 + b * x + c print('y evaluated at x = 2 is', y) # <a href="#ex1a">Back to Exercise 1a</a> # # <a name="ex1banswer">Answer to Exercise 1b</a> a = 1 b = 1 c = -6 x = -2 y = a * x ** 2 + b * x + c print(f'y evaluated at x = {x} is {y}') x = 0 y = a * x ** 2 + b * x + c print(f'y evaluated at x = {x} is {y}') x = 2.1 y = a * x ** 2 + b * x + c print(f'y evaluated at x = {x} is {y:.2f}') # <a href="#ex1b">Back to Exercise 1b</a> # # <a name="ex2answer">Answer to Exercise 2</a> x = np.linspace(-3, 3, 100) y = (x + 2) * (x - 1) * (x - 2) plt.plot(x, y, 'r--') plt.plot([-2, 1, 2], [0, 0, 0], 'bo', markersize=10) plt.xlabel('x-axis') plt.ylabel('y-axis') plt.title('First Python Figure of <NAME>'); # <a href="#ex2">Back to Exercise 2</a> # # <a name="ex3answer">Answer to Exercise 3</a> holland = np.loadtxt('holland_temperature.dat') newyork= np.loadtxt('newyork_temperature.dat') beijing = np.loadtxt('beijing_temperature.dat') plt.plot(np.linspace(1, 12, 12), holland) plt.plot(np.linspace(1, 12, 12), newyork) plt.plot(np.linspace(1, 12, 12), beijing) plt.xlabel('Number of the month') plt.ylabel('Mean monthly temperature (Celcius)') plt.xticks(np.linspace(1, 12, 12)) plt.legend(['Holland','New York','Beijing'], loc='best'); # <a href="#ex3">Back to Exercise 3</a> # # <a name="ex4answer">Answer to Exercise 4</a> air = np.loadtxt('holland_temperature.dat') sea = np.loadtxt('holland_seawater.dat') plt.subplot(211) plt.plot(air, 'b', label='air temp') plt.plot(sea, 'r', label='sea temp') plt.legend(loc='best') plt.ylabel('temp (Celcius)') plt.xlim(0, 11) plt.xticks([]) plt.subplot(212) plt.plot(air-sea, 'ko') plt.xticks(np.linspace(0, 11, 12), ['jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov','dec']) plt.xlim(0, 11) plt.ylabel('air - sea temp (Celcius)'); # <a href="#ex4">Back to Exercise 4</a> # # <a name="ex5answer">Answer to Exercise 5</a> gold = [46, 38, 29, 24, 13, 11, 11, 8, 8, 7, 107] countries = ['USA', 'CHN', 'GBR', 'RUS', 'KOR', 'GER', 'FRA', 'ITA', 'HUN', 'AUS', 'OTHER'] plt.pie(gold, labels = countries, colors = ['Gold', 'MediumBlue', 'SpringGreen', 'BlueViolet']) plt.axis('equal'); # <a href="#ex5">Back to Exercise 5</a> # # <a name="ex6answer">Answer to Exercise 6</a> air = np.loadtxt('holland_temperature.dat') sea = np.loadtxt('holland_seawater.dat') plt.fill_between(range(1, 13), air, color='b', alpha=0.3) plt.fill_between(range(1, 13), sea, color='r', alpha=0.3) plt.xticks(np.arange(1, 13), ['jan', 'feb', 'mar', 'apr',\ 'may', 'jun', 'jul', 'aug', 'sep', ' oct', 'nov', 'dec']) plt.xlabel('Month') plt.ylabel('Temperature (Celcius)'); # <a href="#ex6">Back to Exercise 6</a>
src/exploratory_py/notebook1_basics_plotting/py_exploratory_comp_1_sol.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="_ZDCTs685mzt" # ![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png) # + [markdown] id="sQFJ6aT4nfhw" # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/23.Drug_Normalizer.ipynb) # + [markdown] id="qRExAXlX5mzz" # # 23.Clinical Drug Normalizer # + [markdown] id="AHgUNvnN5mz0" # ### New Annotator that transforms text to the format used in the RxNorm and SNOMED standards # + [markdown] id="X-BSrjva5mz0" # It takes in input annotated documents of type Array\[AnnotatorType\](DOCUMENT) and gives as output annotated document of type AnnotatorType.DOCUMENT . # # Parameters are: # - inputCol: input column name string which targets a column of type Array(AnnotatorType.DOCUMENT). # - outputCol: output column name string which targets a column of type AnnotatorType.DOCUMENT. # - lowercase: whether to convert strings to lowercase. Default is False. # - policy: rule to remove patterns from text. Valid policy values are: # + **"all"**, # + **"abbreviations"**, # + **"dosages"** # # Defaults is "all". "abbreviation" policy used to expend common drugs abbreviations, "dosages" policy used to convert drugs dosages and values to the standard form (see examples bellow). # + [markdown] id="lmtcfKIz5mz0" pycharm={"name": "#%% md\n"} # #### Examples of transformation: # # 1) "Sodium Chloride/Potassium Chloride 13bag" >>> "Sodium Chloride / Potassium Chloride **13 bag**" : add extra spaces in the form entity # # 2) "interferon alfa-2b 10 million unit ( 1 ml ) injec" >>> "interferon alfa - 2b 10000000 unt ( 1 ml ) injection " : convert **10 million unit** to the **10000000 unt**, replace **injec** with **injection** # # 3) "aspirin 10 meq/ 5 ml oral sol" >>> "aspirin 2 meq/ml oral solution" : normalize **10 meq/ 5 ml** to the **2 meq/ml**, extend abbreviation **oral sol** to the **oral solution** # # 4) "adalimumab 54.5 + 43.2 gm" >>> "adalimumab 97700 mg" : combine **54.5 + 43.2** and normalize **gm** to **mg** # # 5) "Agnogenic one half cup" >>> "Agnogenic 0.5 oral solution" : replace **one half** to the **0.5**, normalize **cup** to the **oral solution** # + id="MdE588BiY3z1" import json, os from google.colab import files if 'spark_jsl.json' not in os.listdir(): license_keys = files.upload() os.rename(list(license_keys.keys())[0], 'spark_jsl.json') with open('spark_jsl.json') as f: license_keys = json.load(f) # Defining license key-value pairs as local variables locals().update(license_keys) os.environ.update(license_keys) # + id="F7BN6q-8UNc7" # Installing pyspark and spark-nlp # ! pip install --upgrade -q pyspark==3.1.2 spark-nlp==$PUBLIC_VERSION # Installing Spark NLP Healthcare # ! pip install --upgrade -q spark-nlp-jsl==$JSL_VERSION --extra-index-url https://pypi.johnsnowlabs.com/$SECRET # + id="7Gg3kybJtJbW" colab={"base_uri": "https://localhost:8080/", "height": 314} outputId="9b92ef9f-6881-432e-e4c5-8d5f1a40f849" executionInfo={"status": "ok", "timestamp": 1649496541979, "user_tz": -120, "elapsed": 67746, "user": {"displayName": "Damla", "userId": "03285166568766987047"}} import json import os from pyspark.ml import Pipeline,PipelineModel from pyspark.sql import SparkSession import sparknlp_jsl import sparknlp from sparknlp.annotator import * from sparknlp_jsl.annotator import * from sparknlp.base import * params = {"spark.driver.memory":"16G", "spark.kryoserializer.buffer.max":"2000M", "spark.driver.maxResultSize":"2000M"} spark = sparknlp_jsl.start(license_keys['SECRET'],params=params) print("Spark NLP Version :", sparknlp.version()) print("Spark NLP_JSL Version :", sparknlp_jsl.version()) spark # + id="hx2jxxCaVlOV" # if you want to start the session with custom params as in start function above def start(secret): builder = SparkSession.builder \ .appName("Spark NLP Licensed") \ .master("local[*]") \ .config("spark.driver.memory", "16G") \ .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer") \ .config("spark.kryoserializer.buffer.max", "2000M") \ .config("spark.jars.packages", "com.johnsnowlabs.nlp:spark-nlp_2.11:"+version) \ .config("spark.jars", "https://pypi.johnsnowlabs.com/"+secret+"/spark-nlp-jsl-"+jsl_version+".jar") return builder.getOrCreate() # spark = start(secret) # + colab={"base_uri": "https://localhost:8080/"} id="1zgsiTxjaiMd" outputId="cad846ca-16fa-40ff-8a16-31dd2002a0b5" executionInfo={"status": "ok", "timestamp": 1649496549675, "user_tz": -120, "elapsed": 6155, "user": {"displayName": "Damla", "userId": "03285166568766987047"}} # Sample data data_to_normalize = spark.createDataFrame([ ("A", "Sodium Chloride/Potassium Chloride 13bag", "Sodium Chloride / Potassium Chloride 13 bag"), ("B", "interferon alfa-2b 10 million unit ( 1 ml ) injec", "interferon alfa - 2b 10000000 unt ( 1 ml ) injection"), ("C", "aspirin 10 meq/ 5 ml oral sol", "aspirin 2 meq/ml oral solution") ]).toDF("cuid", "text", "target_normalized_text") data_to_normalize.show(truncate=100) # + colab={"base_uri": "https://localhost:8080/"} id="r2Yr96wrWPUH" outputId="c00829b5-5d9f-4408-f1e3-a8f77108c555" executionInfo={"status": "ok", "timestamp": 1649496562314, "user_tz": -120, "elapsed": 4149, "user": {"displayName": "Damla", "userId": "03285166568766987047"}} # Annotator that transforms a text column from dataframe into normalized text (with all policy) document_assembler = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") policy = "all" drug_normalizer = DrugNormalizer() \ .setInputCols("document") \ .setOutputCol("document_normalized") \ .setPolicy(policy) drug_normalizer_pipeline = Pipeline(stages=[ document_assembler, drug_normalizer ]) ds = drug_normalizer_pipeline.fit(data_to_normalize).transform(data_to_normalize) ds = ds.selectExpr("document", "target_normalized_text", "explode(document_normalized.result) as all_normalized_text") ds.show(truncate = False) # + colab={"base_uri": "https://localhost:8080/"} id="wgQw1ZnV5mz5" outputId="f948f5a8-c0d1-4ba3-af3c-887b7f6117c4" executionInfo={"status": "ok", "timestamp": 1649496576526, "user_tz": -120, "elapsed": 2042, "user": {"displayName": "Damla", "userId": "03285166568766987047"}} # Annotator that transforms a text column from dataframe into normalized text (with abbreviations only policy) policy = "abbreviations" drug_normalizer_abb = DrugNormalizer() \ .setInputCols("document") \ .setOutputCol("document_normalized_abbreviations") \ .setPolicy(policy) ds = drug_normalizer_abb.transform(ds) ds = ds.selectExpr("document", "target_normalized_text", "all_normalized_text", "explode(document_normalized_abbreviations.result) as abbr_normalized_text") ds.select("target_normalized_text", "all_normalized_text", "abbr_normalized_text").show(truncate=1000) # + colab={"base_uri": "https://localhost:8080/"} id="JtsRZL_ybwhb" outputId="c984511a-6210-4c4f-b534-5d05c3963edb" executionInfo={"status": "ok", "timestamp": 1649496596572, "user_tz": -120, "elapsed": 1465, "user": {"displayName": "Damla", "userId": "03285166568766987047"}} # Transform a text column from dataframe into normalized text (with dosages only policy) policy = "dosages" drug_normalizer_abb = DrugNormalizer() \ .setInputCols("document") \ .setOutputCol("document_normalized_dosages") \ .setPolicy(policy) ds = drug_normalizer_abb.transform(ds) ds.selectExpr("target_normalized_text", "all_normalized_text", "explode(document_normalized_dosages.result) as dos_normalized_text").show(truncate=1000) # + [markdown] id="RbIOEhtZ5mz6" # #### Apply normalizer only on NER chunks # + colab={"base_uri": "https://localhost:8080/"} id="Dg1aE3WR5mz6" outputId="30ea55f4-5c3d-4625-8e3e-4a2b2dabe951" executionInfo={"status": "ok", "timestamp": 1649496762953, "user_tz": -120, "elapsed": 156473, "user": {"displayName": "Damla", "userId": "03285166568766987047"}} documentAssembler = DocumentAssembler()\ .setInputCol("text")\ .setOutputCol("document") # Sentence Detector annotator, processes various sentences per line sentenceDetector = SentenceDetector()\ .setInputCols(["document"])\ .setOutputCol("sentence") # Tokenizer splits words in a relevant format for NLP tokenizer = Tokenizer()\ .setInputCols(["sentence"])\ .setOutputCol("token")\ .addSplitChars(";") # Clinical word embeddings trained on PubMED dataset word_embeddings = WordEmbeddingsModel.pretrained("embeddings_clinical", "en", "clinical/models")\ .setInputCols(["sentence", "token"])\ .setOutputCol("embeddings") # Extract entities with NER model posology posology_ner = MedicalNerModel.pretrained("ner_posology_large", "en", "clinical/models") \ .setInputCols(["sentence", "token", "embeddings"]) \ .setOutputCol("ner_posology") # Group extracted entities into the chunks ner_converter = NerConverter()\ .setInputCols(["sentence", "token", "ner_posology"])\ .setOutputCol("ner_chunk_posology") # Convert extracted entities to the doc with chunks in metadata c2doc = Chunk2Doc()\ .setInputCols("ner_chunk_posology")\ .setOutputCol("chunk_doc") # Transform a chunk document into normalized text drug_normalizer = DrugNormalizer() \ .setInputCols("chunk_doc") \ .setOutputCol("document_normalized_dosages")\ .setPolicy("all") nlpPipeline = Pipeline(stages=[ documentAssembler, sentenceDetector, tokenizer, word_embeddings, posology_ner, ner_converter, c2doc, drug_normalizer]) empty_data = spark.createDataFrame([[""]]).toDF("text") model = nlpPipeline.fit(empty_data) # + id="Zvg4Qe5Z5mz7" executionInfo={"status": "ok", "timestamp": 1649496790493, "user_tz": -120, "elapsed": 3400, "user": {"displayName": "Damla", "userId": "03285166568766987047"}} # ! wget -q https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/en/pubmed/pubmed_sample_text_small.csv # + colab={"base_uri": "https://localhost:8080/"} id="SKDWIpue5mz7" outputId="ba5d993d-d360-4691-8816-da0bfc422d9d" executionInfo={"status": "ok", "timestamp": 1649496794925, "user_tz": -120, "elapsed": 1668, "user": {"displayName": "Damla", "userId": "03285166568766987047"}} import pyspark.sql.functions as F pubMedDF = spark.read\ .option("header", "true")\ .csv("pubmed_sample_text_small.csv")\ pubMedDF.show(truncate=50) # + id="2UCCH6Wc5mz7" executionInfo={"status": "ok", "timestamp": 1649496798948, "user_tz": -120, "elapsed": 1031, "user": {"displayName": "Damla", "userId": "03285166568766987047"}} result = model.transform(pubMedDF.limit(100)) # + colab={"base_uri": "https://localhost:8080/"} id="2jB11A4K-Idt" outputId="1255f4c8-e82c-4721-cda2-48dd005488bc" executionInfo={"status": "ok", "timestamp": 1649496811082, "user_tz": -120, "elapsed": 10967, "user": {"displayName": "Damla", "userId": "03285166568766987047"}} result.show(2) # + colab={"base_uri": "https://localhost:8080/"} id="dEYrLo945mz8" outputId="51fe649e-7ae3-4575-bde3-c19fa1d71442" executionInfo={"status": "ok", "timestamp": 1649496829053, "user_tz": -120, "elapsed": 8902, "user": {"displayName": "Damla", "userId": "03285166568766987047"}} import pyspark.sql.functions as F result.select(F.explode('document_normalized_dosages.result')).show(truncate=100)
tutorials/Certification_Trainings/Healthcare/23.Drug_Normalizer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # TensorFlow Tutorial #02 # # Convolutional Neural Network # # by [<NAME>](http://www.hvass-labs.org/) # / [GitHub](https://github.com/Hvass-Labs/TensorFlow-Tutorials) / [Videos on YouTube](https://www.youtube.com/playlist?list=PL9Hr9sNUjfsmEu1ZniY0XpHSzl5uihcXZ) # ## Introduction # # The previous tutorial showed that a simple linear model had about 91% classification accuracy for recognizing hand-written digits in the MNIST data-set. # # In this tutorial we will implement a simple Convolutional Neural Network in TensorFlow which has a classification accuracy of about 99%, or more if you make some of the suggested exercises. # # Convolutional Networks work by moving small filters across the input image. This means the filters are re-used for recognizing patterns throughout the entire input image. This makes the Convolutional Networks much more powerful than Fully-Connected networks with the same number of variables. This in turn makes the Convolutional Networks faster to train. # # You should be familiar with basic linear algebra, Python and the Jupyter Notebook editor. Beginners to TensorFlow may also want to study the first tutorial before proceeding to this one. # ## Flowchart # The following chart shows roughly how the data flows in the Convolutional Neural Network that is implemented below. # # ![Flowchart](images/02_network_flowchart.png) # The input image is processed in the first convolutional layer using the filter-weights. This results in 16 new images, one for each filter in the convolutional layer. The images are also down-sampled so the image resolution is decreased from 28x28 to 14x14. # # These 16 smaller images are then processed in the second convolutional layer. We need filter-weights for each of these 16 channels, and we need filter-weights for each output channel of this layer. There are 36 output channels so there are a total of 16 x 36 = 576 filters in the second convolutional layer. The resulting images are down-sampled again to 7x7 pixels. # # The output of the second convolutional layer is 36 images of 7x7 pixels each. These are then flattened to a single vector of length 7 x 7 x 36 = 1764, which is used as the input to a fully-connected layer with 128 neurons (or elements). This feeds into another fully-connected layer with 10 neurons, one for each of the classes, which is used to determine the class of the image, that is, which number is depicted in the image. # # The convolutional filters are initially chosen at random, so the classification is done randomly. The error between the predicted and true class of the input image is measured as the so-called cross-entropy. The optimizer then automatically propagates this error back through the Convolutional Network using the chain-rule of differentiation and updates the filter-weights so as to improve the classification error. This is done iteratively thousands of times until the classification error is sufficiently low. # # These particular filter-weights and intermediate images are the results of one optimization run and may look different if you re-run this Notebook. # # Note that the computation in TensorFlow is actually done on a batch of images instead of a single image, which makes the computation more efficient. This means the flowchart actually has one more data-dimension when implemented in TensorFlow. # ## Convolutional Layer # The following chart shows the basic idea of processing an image in the first convolutional layer. The input image depicts the number 7 and four copies of the image are shown here, so we can see more clearly how the filter is being moved to different positions of the image. For each position of the filter, the dot-product is being calculated between the filter and the image pixels under the filter, which results in a single pixel in the output image. So moving the filter across the entire input image results in a new image being generated. # # The red filter-weights means that the filter has a positive reaction to black pixels in the input image, while blue pixels means the filter has a negative reaction to black pixels. # # In this case it appears that the filter recognizes the horizontal line of the 7-digit, as can be seen from its stronger reaction to that line in the output image. # # ![Convolution example](images/02_convolution.png) # The step-size for moving the filter across the input is called the stride. There is a stride for moving the filter horizontally (x-axis) and another stride for moving vertically (y-axis). # # In the source-code below, the stride is set to 1 in both directions, which means the filter starts in the upper left corner of the input image and is being moved 1 pixel to the right in each step. When the filter reaches the end of the image to the right, then the filter is moved back to the left side and 1 pixel down the image. This continues until the filter has reached the lower right corner of the input image and the entire output image has been generated. # # When the filter reaches the end of the right-side as well as the bottom of the input image, then it can be padded with zeroes (white pixels). This causes the output image to be of the exact same dimension as the input image. # # Furthermore, the output of the convolution may be passed through a so-called Rectified Linear Unit (ReLU), which merely ensures that the output is positive because negative values are set to zero. The output may also be down-sampled by so-called max-pooling, which considers small windows of 2x2 pixels and only keeps the largest of those pixels. This halves the resolution of the input image e.g. from 28x28 to 14x14 pixels. # # Note that the second convolutional layer is more complicated because it takes 16 input channels. We want a separate filter for each input channel, so we need 16 filters instead of just one. Furthermore, we want 36 output channels from the second convolutional layer, so in total we need 16 x 36 = 576 filters for the second convolutional layer. It can be a bit challenging to understand how this works. # ## Imports # %matplotlib inline import matplotlib.pyplot as plt import tensorflow as tf import numpy as np from sklearn.metrics import confusion_matrix import time from datetime import timedelta import math # This was developed using Python 3.6.1 (Anaconda) and TensorFlow version: tf.__version__ # ## Configuration of Neural Network # # The configuration of the Convolutional Neural Network is defined here for convenience, so you can easily find and change these numbers and re-run the Notebook. # + # Convolutional Layer 1. filter_size1 = 5 # Convolution filters are 5 x 5 pixels. num_filters1 = 16 # There are 16 of these filters. # Convolutional Layer 2. filter_size2 = 5 # Convolution filters are 5 x 5 pixels. num_filters2 = 36 # There are 36 of these filters. # Fully-connected layer. fc_size = 128 # Number of neurons in fully-connected layer. # - # ## Load Data # The MNIST data-set is about 12 MB and will be downloaded automatically if it is not located in the given path. from tensorflow.examples.tutorials.mnist import input_data data = input_data.read_data_sets('data/MNIST/', one_hot=True) # The MNIST data-set has now been loaded and consists of 70,000 images and associated labels (i.e. classifications of the images). The data-set is split into 3 mutually exclusive sub-sets. We will only use the training and test-sets in this tutorial. print("Size of:") print("- Training-set:\t\t{}".format(len(data.train.labels))) print("- Test-set:\t\t{}".format(len(data.test.labels))) print("- Validation-set:\t{}".format(len(data.validation.labels))) # The class-labels are One-Hot encoded, which means that each label is a vector with 10 elements, all of which are zero except for one element. The index of this one element is the class-number, that is, the digit shown in the associated image. We also need the class-numbers as integers for the test-set, so we calculate it now. data.test.cls = np.argmax(data.test.labels, axis=1) # ## Data Dimensions # The data dimensions are used in several places in the source-code below. They are defined once so we can use these variables instead of numbers throughout the source-code below. # + # We know that MNIST images are 28 pixels in each dimension. img_size = 28 # Images are stored in one-dimensional arrays of this length. img_size_flat = img_size * img_size # Tuple with height and width of images used to reshape arrays. img_shape = (img_size, img_size) # Number of colour channels for the images: 1 channel for gray-scale. num_channels = 1 # Number of classes, one class for each of 10 digits. num_classes = 10 # - # ### Helper-function for plotting images # Function used to plot 9 images in a 3x3 grid, and writing the true and predicted classes below each image. def plot_images(images, cls_true, cls_pred=None): assert len(images) == len(cls_true) == 9 # Create figure with 3x3 sub-plots. fig, axes = plt.subplots(3, 3) fig.subplots_adjust(hspace=0.3, wspace=0.3) for i, ax in enumerate(axes.flat): # Plot image. ax.imshow(images[i].reshape(img_shape), cmap='binary') # Show true and predicted classes. if cls_pred is None: xlabel = "True: {0}".format(cls_true[i]) else: xlabel = "True: {0}, Pred: {1}".format(cls_true[i], cls_pred[i]) # Show the classes as the label on the x-axis. ax.set_xlabel(xlabel) # Remove ticks from the plot. ax.set_xticks([]) ax.set_yticks([]) # Ensure the plot is shown correctly with multiple plots # in a single Notebook cell. plt.show() # ### Plot a few images to see if data is correct # + # Get the first images from the test-set. images = data.test.images[0:9] # Get the true classes for those images. cls_true = data.test.cls[0:9] # Plot the images and labels using our helper-function above. plot_images(images=images, cls_true=cls_true) # - # ## TensorFlow Graph # # The entire purpose of TensorFlow is to have a so-called computational graph that can be executed much more efficiently than if the same calculations were to be performed directly in Python. TensorFlow can be more efficient than NumPy because TensorFlow knows the entire computation graph that must be executed, while NumPy only knows the computation of a single mathematical operation at a time. # # TensorFlow can also automatically calculate the gradients that are needed to optimize the variables of the graph so as to make the model perform better. This is because the graph is a combination of simple mathematical expressions so the gradient of the entire graph can be calculated using the chain-rule for derivatives. # # TensorFlow can also take advantage of multi-core CPUs as well as GPUs - and Google has even built special chips just for TensorFlow which are called TPUs (Tensor Processing Units) and are even faster than GPUs. # # A TensorFlow graph consists of the following parts which will be detailed below: # # * Placeholder variables used for inputting data to the graph. # * Variables that are going to be optimized so as to make the convolutional network perform better. # * The mathematical formulas for the convolutional network. # * A cost measure that can be used to guide the optimization of the variables. # * An optimization method which updates the variables. # # In addition, the TensorFlow graph may also contain various debugging statements e.g. for logging data to be displayed using TensorBoard, which is not covered in this tutorial. # ### Helper-functions for creating new variables # Functions for creating new TensorFlow variables in the given shape and initializing them with random values. Note that the initialization is not actually done at this point, it is merely being defined in the TensorFlow graph. def new_weights(shape): return tf.Variable(tf.truncated_normal(shape, stddev=0.05)) def new_biases(length): return tf.Variable(tf.constant(0.05, shape=[length])) # ### Helper-function for creating a new Convolutional Layer # This function creates a new convolutional layer in the computational graph for TensorFlow. Nothing is actually calculated here, we are just adding the mathematical formulas to the TensorFlow graph. # # It is assumed that the input is a 4-dim tensor with the following dimensions: # # 1. Image number. # 2. Y-axis of each image. # 3. X-axis of each image. # 4. Channels of each image. # # Note that the input channels may either be colour-channels, or it may be filter-channels if the input is produced from a previous convolutional layer. # # The output is another 4-dim tensor with the following dimensions: # # 1. Image number, same as input. # 2. Y-axis of each image. If 2x2 pooling is used, then the height and width of the input images is divided by 2. # 3. X-axis of each image. Ditto. # 4. Channels produced by the convolutional filters. def new_conv_layer(input, # The previous layer. num_input_channels, # Num. channels in prev. layer. filter_size, # Width and height of each filter. num_filters, # Number of filters. use_pooling=True): # Use 2x2 max-pooling. # Shape of the filter-weights for the convolution. # This format is determined by the TensorFlow API. shape = [filter_size, filter_size, num_input_channels, num_filters] # Create new weights aka. filters with the given shape. weights = new_weights(shape=shape) # Create new biases, one for each filter. biases = new_biases(length=num_filters) # Create the TensorFlow operation for convolution. # Note the strides are set to 1 in all dimensions. # The first and last stride must always be 1, # because the first is for the image-number and # the last is for the input-channel. # But e.g. strides=[1, 2, 2, 1] would mean that the filter # is moved 2 pixels across the x- and y-axis of the image. # The padding is set to 'SAME' which means the input image # is padded with zeroes so the size of the output is the same. layer = tf.nn.conv2d(input=input, filter=weights, strides=[1, 1, 1, 1], padding='SAME') # Add the biases to the results of the convolution. # A bias-value is added to each filter-channel. layer += biases # Use pooling to down-sample the image resolution? if use_pooling: # This is 2x2 max-pooling, which means that we # consider 2x2 windows and select the largest value # in each window. Then we move 2 pixels to the next window. layer = tf.nn.max_pool(value=layer, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # Rectified Linear Unit (ReLU). # It calculates max(x, 0) for each input pixel x. # This adds some non-linearity to the formula and allows us # to learn more complicated functions. layer = tf.nn.relu(layer) # Note that ReLU is normally executed before the pooling, # but since relu(max_pool(x)) == max_pool(relu(x)) we can # save 75% of the relu-operations by max-pooling first. # We return both the resulting layer and the filter-weights # because we will plot the weights later. return layer, weights # ### Helper-function for flattening a layer # # A convolutional layer produces an output tensor with 4 dimensions. We will add fully-connected layers after the convolution layers, so we need to reduce the 4-dim tensor to 2-dim which can be used as input to the fully-connected layer. def flatten_layer(layer): # Get the shape of the input layer. layer_shape = layer.get_shape() # The shape of the input layer is assumed to be: # layer_shape == [num_images, img_height, img_width, num_channels] # The number of features is: img_height * img_width * num_channels # We can use a function from TensorFlow to calculate this. num_features = layer_shape[1:4].num_elements() # Reshape the layer to [num_images, num_features]. # Note that we just set the size of the second dimension # to num_features and the size of the first dimension to -1 # which means the size in that dimension is calculated # so the total size of the tensor is unchanged from the reshaping. layer_flat = tf.reshape(layer, [-1, num_features]) # The shape of the flattened layer is now: # [num_images, img_height * img_width * num_channels] # Return both the flattened layer and the number of features. return layer_flat, num_features # ### Helper-function for creating a new Fully-Connected Layer # This function creates a new fully-connected layer in the computational graph for TensorFlow. Nothing is actually calculated here, we are just adding the mathematical formulas to the TensorFlow graph. # # It is assumed that the input is a 2-dim tensor of shape `[num_images, num_inputs]`. The output is a 2-dim tensor of shape `[num_images, num_outputs]`. # + dropout_prob = tf.placeholder(tf.float32, name='dropout_prob') def new_fc_layer(input, # The previous layer. num_inputs, # Num. inputs from prev. layer. num_outputs, # Num. outputs. use_relu=True, user_drop_out=True): # Use Rectified Linear Unit (ReLU)? # Create new weights and biases. weights = new_weights(shape=[num_inputs, num_outputs]) biases = new_biases(length=num_outputs) # Calculate the layer as the matrix multiplication of # the input and weights, and then add the bias-values. layer = tf.matmul(input, weights) + biases # Use ReLU? if use_relu: layer = tf.nn.relu(layer) if user_drop_out: layer = tf.layers.dropout(layer, rate=dropout_prob) return layer # - # ### Placeholder variables # Placeholder variables serve as the input to the TensorFlow computational graph that we may change each time we execute the graph. We call this feeding the placeholder variables and it is demonstrated further below. # # First we define the placeholder variable for the input images. This allows us to change the images that are input to the TensorFlow graph. This is a so-called tensor, which just means that it is a multi-dimensional vector or matrix. The data-type is set to `float32` and the shape is set to `[None, img_size_flat]`, where `None` means that the tensor may hold an arbitrary number of images with each image being a vector of length `img_size_flat`. x = tf.placeholder(tf.float32, shape=[None, img_size_flat], name='x') # The convolutional layers expect `x` to be encoded as a 4-dim tensor so we have to reshape it so its shape is instead `[num_images, img_height, img_width, num_channels]`. Note that `img_height == img_width == img_size` and `num_images` can be inferred automatically by using -1 for the size of the first dimension. So the reshape operation is: print(x.shape) x_image = tf.reshape(x, [-1, img_size, img_size, num_channels]) print(x_image.shape) # Next we have the placeholder variable for the true labels associated with the images that were input in the placeholder variable `x`. The shape of this placeholder variable is `[None, num_classes]` which means it may hold an arbitrary number of labels and each label is a vector of length `num_classes` which is 10 in this case. y_true = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_true') # We could also have a placeholder variable for the class-number, but we will instead calculate it using argmax. Note that this is a TensorFlow operator so nothing is calculated at this point. y_true_cls = tf.argmax(y_true, axis=1) # ### Convolutional Layer 1 # # Create the first convolutional layer. It takes `x_image` as input and creates `num_filters1` different filters, each having width and height equal to `filter_size1`. Finally we wish to down-sample the image so it is half the size by using 2x2 max-pooling. layer_conv1, weights_conv1 = \ new_conv_layer(input=x_image, num_input_channels=num_channels, filter_size=filter_size1, num_filters=num_filters1, use_pooling=True) # Check the shape of the tensor that will be output by the convolutional layer. It is (?, 14, 14, 16) which means that there is an arbitrary number of images (this is the ?), each image is 14 pixels wide and 14 pixels high, and there are 16 different channels, one channel for each of the filters. layer_conv1 # ### Convolutional Layer 2 # # Create the second convolutional layer, which takes as input the output from the first convolutional layer. The number of input channels corresponds to the number of filters in the first convolutional layer. layer_conv2, weights_conv2 = \ new_conv_layer(input=layer_conv1, num_input_channels=num_filters1, filter_size=filter_size2, num_filters=num_filters2, use_pooling=True) # Check the shape of the tensor that will be output from this convolutional layer. The shape is (?, 7, 7, 36) where the ? again means that there is an arbitrary number of images, with each image having width and height of 7 pixels, and there are 36 channels, one for each filter. layer_conv2 # ### Flatten Layer # # The convolutional layers output 4-dim tensors. We now wish to use these as input in a fully-connected network, which requires for the tensors to be reshaped or flattened to 2-dim tensors. layer_flat, num_features = flatten_layer(layer_conv2) # Check that the tensors now have shape (?, 1764) which means there's an arbitrary number of images which have been flattened to vectors of length 1764 each. Note that 1764 = 7 x 7 x 36. layer_flat num_features # ### Fully-Connected Layer 1 # # Add a fully-connected layer to the network. The input is the flattened layer from the previous convolution. The number of neurons or nodes in the fully-connected layer is `fc_size`. ReLU is used so we can learn non-linear relations. layer_fc1 = new_fc_layer(input=layer_flat, num_inputs=num_features, num_outputs=fc_size, use_relu=True) # Check that the output of the fully-connected layer is a tensor with shape (?, 128) where the ? means there is an arbitrary number of images and `fc_size` == 128. layer_fc1 # ### Fully-Connected Layer 2 # # Add another fully-connected layer that outputs vectors of length 10 for determining which of the 10 classes the input image belongs to. Note that ReLU is not used in this layer. layer_fc2 = new_fc_layer(input=layer_fc1, num_inputs=fc_size, num_outputs=num_classes, use_relu=False) layer_fc2 # ### Predicted Class # The second fully-connected layer estimates how likely it is that the input image belongs to each of the 10 classes. However, these estimates are a bit rough and difficult to interpret because the numbers may be very small or large, so we want to normalize them so that each element is limited between zero and one and the 10 elements sum to one. This is calculated using the so-called softmax function and the result is stored in `y_pred`. y_pred = tf.nn.softmax(layer_fc2) # The class-number is the index of the largest element. y_pred_cls = tf.argmax(y_pred, axis=1) # ### Cost-function to be optimized # To make the model better at classifying the input images, we must somehow change the variables for all the network layers. To do this we first need to know how well the model currently performs by comparing the predicted output of the model `y_pred` to the desired output `y_true`. # # The cross-entropy is a performance measure used in classification. The cross-entropy is a continuous function that is always positive and if the predicted output of the model exactly matches the desired output then the cross-entropy equals zero. The goal of optimization is therefore to minimize the cross-entropy so it gets as close to zero as possible by changing the variables of the network layers. # # TensorFlow has a built-in function for calculating the cross-entropy. Note that the function calculates the softmax internally so we must use the output of `layer_fc2` directly rather than `y_pred` which has already had the softmax applied. cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=layer_fc2, labels=y_true) # We have now calculated the cross-entropy for each of the image classifications so we have a measure of how well the model performs on each image individually. But in order to use the cross-entropy to guide the optimization of the model's variables we need a single scalar value, so we simply take the average of the cross-entropy for all the image classifications. cost = tf.reduce_mean(cross_entropy) # ### Optimization Method # Now that we have a cost measure that must be minimized, we can then create an optimizer. In this case it is the `AdamOptimizer` which is an advanced form of Gradient Descent. # # Note that optimization is not performed at this point. In fact, nothing is calculated at all, we just add the optimizer-object to the TensorFlow graph for later execution. optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost) # ### Performance Measures # We need a few more performance measures to display the progress to the user. # # This is a vector of booleans whether the predicted class equals the true class of each image. correct_prediction = tf.equal(y_pred_cls, y_true_cls) # This calculates the classification accuracy by first type-casting the vector of booleans to floats, so that False becomes 0 and True becomes 1, and then calculating the average of these numbers. accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # ## TensorFlow Run # ### Create TensorFlow session # # Once the TensorFlow graph has been created, we have to create a TensorFlow session which is used to execute the graph. session = tf.Session() # ### Initialize variables # # The variables for `weights` and `biases` must be initialized before we start optimizing them. session.run(tf.global_variables_initializer()) # ### Helper-function to perform optimization iterations # There are 55,000 images in the training-set. It takes a long time to calculate the gradient of the model using all these images. We therefore only use a small batch of images in each iteration of the optimizer. # # If your computer crashes or becomes very slow because you run out of RAM, then you may try and lower this number, but you may then need to perform more optimization iterations. train_batch_size = 64 # Function for performing a number of optimization iterations so as to gradually improve the variables of the network layers. In each iteration, a new batch of data is selected from the training-set and then TensorFlow executes the optimizer using those training samples. The progress is printed every 100 iterations. # + # Counter for total number of iterations performed so far. total_iterations = 0 def optimize(num_iterations): # Ensure we update the global variable rather than a local copy. global total_iterations # Start-time used for printing time-usage below. start_time = time.time() x_batch, y_true_batch = data.train.next_batch(train_batch_size) print(x_batch.shape, y_true_batch.shape) for i in range(total_iterations, total_iterations + num_iterations): # Get a batch of training examples. # x_batch now holds a batch of images and # y_true_batch are the true labels for those images. x_batch, y_true_batch = data.train.next_batch(train_batch_size) # Put the batch into a dict with the proper names # for placeholder variables in the TensorFlow graph. feed_dict_train = {x: x_batch, y_true: y_true_batch, dropout_prob: 0.5} # Run the optimizer using this batch of training data. # TensorFlow assigns the variables in feed_dict_train # to the placeholder variables and then runs the optimizer. session.run(optimizer, feed_dict=feed_dict_train) # Print status every 100 iterations. if i % 100 == 0: # Calculate the accuracy on the training-set. acc = session.run(accuracy, feed_dict=feed_dict_train) # Message for printing. msg = "Optimization Iteration: {0:>6}, Training Accuracy: {1:>6.1%}" # Print it. print(msg.format(i + 1, acc)) # Update the total number of iterations performed. total_iterations += num_iterations # Ending time. end_time = time.time() # Difference between start and end-times. time_dif = end_time - start_time # Print the time-usage. print("Time usage: " + str(timedelta(seconds=int(round(time_dif))))) # - # ### Helper-function to plot example errors # Function for plotting examples of images from the test-set that have been mis-classified. def plot_example_errors(cls_pred, correct): # This function is called from print_test_accuracy() below. # cls_pred is an array of the predicted class-number for # all images in the test-set. # correct is a boolean array whether the predicted class # is equal to the true class for each image in the test-set. # Negate the boolean array. incorrect = (correct == False) # Get the images from the test-set that have been # incorrectly classified. images = data.test.images[incorrect] # Get the predicted classes for those images. cls_pred = cls_pred[incorrect] # Get the true classes for those images. cls_true = data.test.cls[incorrect] # Plot the first 9 images. plot_images(images=images[0:9], cls_true=cls_true[0:9], cls_pred=cls_pred[0:9]) # ### Helper-function to plot confusion matrix def plot_confusion_matrix(cls_pred): # This is called from print_test_accuracy() below. # cls_pred is an array of the predicted class-number for # all images in the test-set. # Get the true classifications for the test-set. cls_true = data.test.cls # Get the confusion matrix using sklearn. cm = confusion_matrix(y_true=cls_true, y_pred=cls_pred) # Print the confusion matrix as text. print(cm) # Plot the confusion matrix as an image. plt.matshow(cm) # Make various adjustments to the plot. plt.colorbar() tick_marks = np.arange(num_classes) plt.xticks(tick_marks, range(num_classes)) plt.yticks(tick_marks, range(num_classes)) plt.xlabel('Predicted') plt.ylabel('True') # Ensure the plot is shown correctly with multiple plots # in a single Notebook cell. plt.show() # ### Helper-function for showing the performance # Function for printing the classification accuracy on the test-set. # # It takes a while to compute the classification for all the images in the test-set, that's why the results are re-used by calling the above functions directly from this function, so the classifications don't have to be recalculated by each function. # # Note that this function can use a lot of computer memory, which is why the test-set is split into smaller batches. If you have little RAM in your computer and it crashes, then you can try and lower the batch-size. # + # Split the test-set into smaller batches of this size. test_batch_size = 256 def print_test_accuracy(show_example_errors=False, show_confusion_matrix=False): # Number of images in the test-set. num_test = len(data.test.images) # Allocate an array for the predicted classes which # will be calculated in batches and filled into this array. cls_pred = np.zeros(shape=num_test, dtype=np.int) # Now calculate the predicted classes for the batches. # We will just iterate through all the batches. # There might be a more clever and Pythonic way of doing this. # The starting index for the next batch is denoted i. i = 0 while i < num_test: # The ending index for the next batch is denoted j. j = min(i + test_batch_size, num_test) # Get the images from the test-set between index i and j. images = data.test.images[i:j, :] # Get the associated labels. labels = data.test.labels[i:j, :] # Create a feed-dict with these images and labels. feed_dict = {x: images, y_true: labels, dropout_prob: 0} # Calculate the predicted class using TensorFlow. cls_pred[i:j] = session.run(y_pred_cls, feed_dict=feed_dict) # Set the start-index for the next batch to the # end-index of the current batch. i = j # Convenience variable for the true class-numbers of the test-set. cls_true = data.test.cls # Create a boolean array whether each image is correctly classified. correct = (cls_true == cls_pred) # Calculate the number of correctly classified images. # When summing a boolean array, False means 0 and True means 1. correct_sum = correct.sum() # Classification accuracy is the number of correctly classified # images divided by the total number of images in the test-set. acc = float(correct_sum) / num_test # Print the accuracy. msg = "Accuracy on Test-Set: {0:.1%} ({1} / {2})" print(msg.format(acc, correct_sum, num_test)) # Plot some examples of mis-classifications, if desired. if show_example_errors: print("Example errors:") plot_example_errors(cls_pred=cls_pred, correct=correct) # Plot the confusion matrix, if desired. if show_confusion_matrix: print("Confusion Matrix:") plot_confusion_matrix(cls_pred=cls_pred) # - # ## Performance before any optimization # # The accuracy on the test-set is very low because the model variables have only been initialized and not optimized at all, so it just classifies the images randomly. print_test_accuracy() # ## Performance after 1 optimization iteration # # The classification accuracy does not improve much from just 1 optimization iteration, because the learning-rate for the optimizer is set very low. optimize(num_iterations=1) print_test_accuracy() # ## Performance after 100 optimization iterations # # After 100 optimization iterations, the model has significantly improved its classification accuracy. optimize(num_iterations=99) # We already performed 1 iteration above. print_test_accuracy(show_example_errors=True) # ## Performance after 1000 optimization iterations # # After 1000 optimization iterations, the model has greatly increased its accuracy on the test-set to more than 90%. optimize(num_iterations=900) # We performed 100 iterations above. print_test_accuracy(show_example_errors=True) # ## Performance after 10,000 optimization iterations # # After 10,000 optimization iterations, the model has a classification accuracy on the test-set of about 99%. optimize(num_iterations=9000) # We performed 1000 iterations above. print_test_accuracy(show_example_errors=True, show_confusion_matrix=True) # ## Visualization of Weights and Layers # # In trying to understand why the convolutional neural network can recognize handwritten digits, we will now visualize the weights of the convolutional filters and the resulting output images. # ### Helper-function for plotting convolutional weights def plot_conv_weights(weights, input_channel=0): # Assume weights are TensorFlow ops for 4-dim variables # e.g. weights_conv1 or weights_conv2. # Retrieve the values of the weight-variables from TensorFlow. # A feed-dict is not necessary because nothing is calculated. w = session.run(weights) # Get the lowest and highest values for the weights. # This is used to correct the colour intensity across # the images so they can be compared with each other. w_min = np.min(w) w_max = np.max(w) # Number of filters used in the conv. layer. num_filters = w.shape[3] # Number of grids to plot. # Rounded-up, square-root of the number of filters. num_grids = math.ceil(math.sqrt(num_filters)) # Create figure with a grid of sub-plots. fig, axes = plt.subplots(num_grids, num_grids) # Plot all the filter-weights. for i, ax in enumerate(axes.flat): # Only plot the valid filter-weights. if i<num_filters: # Get the weights for the i'th filter of the input channel. # See new_conv_layer() for details on the format # of this 4-dim tensor. img = w[:, :, input_channel, i] # Plot image. ax.imshow(img, vmin=w_min, vmax=w_max, interpolation='nearest', cmap='seismic') # Remove ticks from the plot. ax.set_xticks([]) ax.set_yticks([]) # Ensure the plot is shown correctly with multiple plots # in a single Notebook cell. plt.show() # ### Helper-function for plotting the output of a convolutional layer def plot_conv_layer(layer, image): # Assume layer is a TensorFlow op that outputs a 4-dim tensor # which is the output of a convolutional layer, # e.g. layer_conv1 or layer_conv2. # Create a feed-dict containing just one image. # Note that we don't need to feed y_true because it is # not used in this calculation. feed_dict = {x: [image]} # Calculate and retrieve the output values of the layer # when inputting that image. values = session.run(layer, feed_dict=feed_dict) # Number of filters used in the conv. layer. num_filters = values.shape[3] # Number of grids to plot. # Rounded-up, square-root of the number of filters. num_grids = math.ceil(math.sqrt(num_filters)) # Create figure with a grid of sub-plots. fig, axes = plt.subplots(num_grids, num_grids) # Plot the output images of all the filters. for i, ax in enumerate(axes.flat): # Only plot the images for valid filters. if i<num_filters: # Get the output image of using the i'th filter. # See new_conv_layer() for details on the format # of this 4-dim tensor. img = values[0, :, :, i] # Plot image. ax.imshow(img, interpolation='nearest', cmap='binary') # Remove ticks from the plot. ax.set_xticks([]) ax.set_yticks([]) # Ensure the plot is shown correctly with multiple plots # in a single Notebook cell. plt.show() # ### Input Images # Helper-function for plotting an image. def plot_image(image): plt.imshow(image.reshape(img_shape), interpolation='nearest', cmap='binary') plt.show() # Plot an image from the test-set which will be used as an example below. image1 = data.test.images[0] plot_image(image1) # Plot another example image from the test-set. image2 = data.test.images[13] plot_image(image2) # ### Convolution Layer 1 # Now plot the filter-weights for the first convolutional layer. # # Note that positive weights are red and negative weights are blue. plot_conv_weights(weights=weights_conv1) # Applying each of these convolutional filters to the first input image gives the following output images, which are then used as input to the second convolutional layer. Note that these images are down-sampled to 14 x 14 pixels which is half the resolution of the original input image. plot_conv_layer(layer=layer_conv1, image=image1) # The following images are the results of applying the convolutional filters to the second image. plot_conv_layer(layer=layer_conv1, image=image2) # It is difficult to see from these images what the purpose of the convolutional filters might be. It appears that they have merely created several variations of the input image, as if light was shining from different angles and casting shadows in the image. # ### Convolution Layer 2 # Now plot the filter-weights for the second convolutional layer. # # There are 16 output channels from the first conv-layer, which means there are 16 input channels to the second conv-layer. The second conv-layer has a set of filter-weights for each of its input channels. We start by plotting the filter-weigths for the first channel. # # Note again that positive weights are red and negative weights are blue. plot_conv_weights(weights=weights_conv2, input_channel=0) # There are 16 input channels to the second convolutional layer, so we can make another 15 plots of filter-weights like this. We just make one more with the filter-weights for the second channel. plot_conv_weights(weights=weights_conv2, input_channel=1) # It can be difficult to understand and keep track of how these filters are applied because of the high dimensionality. # # Applying these convolutional filters to the images that were ouput from the first conv-layer gives the following images. # # Note that these are down-sampled yet again to 7 x 7 pixels which is half the resolution of the images from the first conv-layer. plot_conv_layer(layer=layer_conv2, image=image1) # And these are the results of applying the filter-weights to the second image. plot_conv_layer(layer=layer_conv2, image=image2) # From these images, it looks like the second convolutional layer might detect lines and patterns in the input images, which are less sensitive to local variations in the original input images. # # These images are then flattened and input to the fully-connected layer, but that is not shown here. # ### Close TensorFlow Session # We are now done using TensorFlow, so we close the session to release its resources. # + # This has been commented out in case you want to modify and experiment # with the Notebook without having to restart it. # session.close() # - # ## Conclusion # # We have seen that a Convolutional Neural Network works much better at recognizing hand-written digits than the simple linear model in Tutorial #01. The Convolutional Network gets a classification accuracy of about 99%, or even more if you make some adjustments, compared to only 91% for the simple linear model. # # However, the Convolutional Network is also much more complicated to implement, and it is not obvious from looking at the filter-weights why it works and why it sometimes fails. # # So we would like an easier way to program Convolutional Neural Networks and we would also like a better way of visualizing their inner workings. # ## Exercises # # These are a few suggestions for exercises that may help improve your skills with TensorFlow. It is important to get hands-on experience with TensorFlow in order to learn how to use it properly. # # You may want to backup this Notebook before making any changes. # # * Do you get the exact same results if you run the Notebook multiple times without changing any parameters? What are the sources of randomness? # * Run another 10,000 optimization iterations. Are the results better? # * Change the learning-rate for the optimizer. # * Change the configuration of the layers, such as the number of convolutional filters, the size of those filters, the number of neurons in the fully-connected layer, etc. # * Add a so-called drop-out layer after the fully-connected layer. Note that the drop-out probability should be zero when calculating the classification accuracy, so you will need a placeholder variable for this probability. # * Change the order of ReLU and max-pooling in the convolutional layer. Does it calculate the same thing? What is the fastest way of computing it? How many calculations are saved? Does it also work for Sigmoid-functions and average-pooling? # * Add one or more convolutional and fully-connected layers. Does it help performance? # * What is the smallest possible configuration that still gives good results? # * Try using ReLU in the last fully-connected layer. Does the performance change? Why? # * Try not using pooling in the convolutional layers. Does it change the classification accuracy and training time? # * Try using a 2x2 stride in the convolution instead of max-pooling? What is the difference? # * Remake the program yourself without looking too much at this source-code. # * Explain to a friend how the program works. # ## License (MIT) # # Copyright (c) 2016 by [<NAME>](http://www.hvass-labs.org/) # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
02_Convolutional_Neural_Network.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: cellpy # language: python # name: cellpy # --- # # # copy data # + from pathlib import Path import shutil import tqdm import cellpy from cellpy.utils import batch from cellpy.readers import dbreader # + # Folder to copy to new_folder = Path(r"I:\Org\MPT-BAT-Students\data") # Parameters for the batch project = "SilcRoad" name = "SilcRoad" batch_col = "project" overwrite = False # - # Create the batch object b = batch.init(name, project, batch_col=batch_col) b.create_journal() # + # This is for trying to find out how to best use tqdm for raw_files in b.pages.raw_file_names: for raw_file in raw_files: old_file = Path(raw_file) new_file = new_folder / old_file.name if not new_file.is_file(): print("[c]", end="") else: if overwrite: print("[u]") else: print("[-]", end="") # + # # copy the files for raw_files in b.pages.raw_file_names: for raw_file in raw_files: old_file = Path(raw_file) new_file = new_folder / old_file.name if not new_file.is_file(): print("[c]", end="") shutil.copy(old_file, new_file) else: print("[", end="") if overwrite: new_file.unlink() print("[u]", end="") shutil.copy(old_file, new_file) else: print("[-]", end="") # -
dev_utils/helpers/copy_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + [markdown] school_cell_uuid="b23676b399394033b936e8ceed1fb9c6" # # 회귀 분석용 가상 데이터 생성 방법 # + [markdown] school_cell_uuid="7e3b37e1c4ad49369af170c7d5334139" # Scikit-learn 의 datasets 서브 패키지에는 회귀 분석 시험용 가상 데이터를 생성하는 명령어인 `make_regression()` 이 있다. # # * http://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_regression.html # # + [markdown] school_cell_uuid="80b0eec78c304562ae4d0cf3eec1f80a" # ## 입출력 요소 # + [markdown] school_cell_uuid="2b687230a47643cfaffb4e9e75651b32" # `make_regression()`는 다음과 같은 입출력 요소를 가진다. # # * 입력 # * `n_samples` : 정수 (옵션, 디폴트 100) # * 표본의 갯수 # * `n_features` : 정수 (옵션, 디폴트 100) # * 독립 변수(feature)의 수(차원) # * `n_targets` : 정수 (옵션, 디폴트 1) # * 종속 변수(target)의 수(차원) # * `n_informative` : 정수 (옵션, 디폴트 10) # * 독립 변수(feature) 중 실제로 종속 변수와 상관 관계가 있는 독립 변수의 수(차원) # * `effective_rank`: 정수 또는 None (옵션, 디폴트 None) # * 독립 변수(feature) 중 서로 독립인 독립 변수의 수. 만약 None이면 모두 독립 # * `tail_strength` : 0부터 1사이의 실수 (옵션, 디폴트 0.5) # * `effective_rank`가 None이 아닌 경우 독립 변수간의 상관 관계 형태를 결정하는 변수 # * `bias` : 실수 (옵션, 디폴트 0.0) # * 절편 # * `noise` : 실수 (옵션, 디폴트 0.0) # * 출력 즉, 종속 변수에 더해지는 정규 분포의 표준 편차 # * `coef` : 불리언 (옵션, 디폴트 False) # * True 이면 선형 모형의 계수도 출력 # * `random_state` : 정수 (옵션, 디폴트 None) # * 난수 발생용 시작값 # # # * 출력 # * `X` : [`n_samples`, `n_features`] 형상의 2차원 배열 # * 독립 변수의 표본 데이터 # * `y` : [`n_samples`] 형상의 1차원 배열 또는 [`n_samples`, `n_targets`] 형상의 2차원 배열 # * 종속 변수의 표본 데이터 # * `coef` : [`n_features`] 형상의 1차원 배열 또는 [`n_features`, `n_targets`] 형상의 2차원 배열 (옵션) # * 선형 모형의 계수, 입력 인수 `coef`가 True 인 경우에만 출력됨 # + [markdown] school_cell_uuid="6dd965634a1f4ea784a1ae7720fe1754" # 예를 들어 독립 변수가 1개, 종속 변수가 1개 즉, 선형 모형이 다음과 같은 수식은 경우 # # $$ y = C_0 + C_1 x + e $$ # # 이러한 관계를 만족하는 표본 데이터는 다음과 같이 생성한다. # + school_cell_uuid="6d8816a958bc49789a92609f641dbe0f" from sklearn.datasets import make_regression # + school_cell_uuid="136431a3951a46adaa01064c1ffc2c3e" X, y, c = make_regression(n_samples=10, n_features=1, bias=0, noise=0, coef=True, random_state=0) print("X\n", X) print("y\n", y) print("c\n", c) plt.scatter(X, y, s=100) plt.show() # + [markdown] school_cell_uuid="cd350757072144b184817e7a64c3669d" # 위 선형 모형은 다음과 같다. # # $$ # y = 100 + 79.1725 x # $$ # + [markdown] school_cell_uuid="306d4320e3134ba7ba5e4ec4748469bf" # `noise` 인수를 증가시키면 $\text{Var}[e]$가 증가하고 `bias` 인수를 증가시키면 y 절편이 증가한다. # + school_cell_uuid="8d3015c2e41042be946516277534dfe3" X, y, c = make_regression(n_samples=50, n_features=1, bias=100, noise=10, coef=True, random_state=0) plt.scatter(X, y, s=100) plt.show() # + [markdown] school_cell_uuid="4fb1c23b142c4248b167f72c32be7f64" # 이번에는 `n_features` 즉, 독립 변수가 2개인 표본 데이터를 생성하여 스캐터 플롯을 그리면 다음과 같다. 종속 변수 값은 점의 명암으로 표시하였다. # + school_cell_uuid="db53f8f09d90458d8af2689ba56d82f8" X, y, c = make_regression(n_samples=300, n_features=2, noise=10, coef=True, random_state=0) plt.scatter(X[:,0], X[:,1], c=y, s=100) plt.xlabel("x1") plt.ylabel("x2") plt.axis("equal") plt.show() # + [markdown] school_cell_uuid="8bd5be1ad4ed41d2b6e6f17ece5845e7" # 만약 실제로 y값에 영향을 미치는 독립 변수는 하나 뿐이라면 다음과 같이 사용한다. # + school_cell_uuid="550ba0bcc28b4e57bc7537b7fb89ee10" X, y, c = make_regression(n_samples=300, n_features=2, n_informative=1, noise=0, coef=True, random_state=0) plt.scatter(X[:,0], X[:,1], c=y, s=100) plt.xlabel("x1") plt.ylabel("x2") plt.axis("equal") plt.show() # + [markdown] school_cell_uuid="3fc7318b3a704ff18d1636be190cb772" # 만약 두 독립 변수가 상관관계가 있다면 다음과 같이 생성하고 스캐터 플롯에서도 이를 알아볼 수 있다. # + school_cell_uuid="c2a3f89043794da3834e872575c020e7" X, y, c = make_regression(n_samples=300, n_features=2, effective_rank=1, noise=0, tail_strength=0, coef=True, random_state=0) plt.scatter(X[:,0], X[:,1], c=y, s=100) plt.xlabel("x1") plt.ylabel("x2") plt.axis("equal") plt.show() # + school_cell_uuid="42ac2e45e692446d982ae79222327fe2" X, y, c = make_regression(n_samples=300, n_features=2, effective_rank=1, noise=0, tail_strength=1, coef=True, random_state=0) plt.scatter(X[:,0], X[:,1], c=y, s=100) plt.xlabel("x1") plt.ylabel("x2") plt.axis("equal") plt.show()
15. 선형 회귀 분석/01. 회귀 분석용 가상 데이터 생성 방법.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Logistic regression con discesa del gradiente # + from IPython.display import Image import warnings warnings.filterwarnings('ignore') # %matplotlib inline # - import pandas as pd import numpy as np import scipy.special as sp # + import matplotlib.pyplot as plt import matplotlib.colors as mcolors plt.style.use('fivethirtyeight') plt.rcParams['font.family'] = 'sans-serif' plt.rcParams['font.serif'] = 'Ubuntu' plt.rcParams['font.monospace'] = 'Ubuntu Mono' plt.rcParams['font.size'] = 10 plt.rcParams['axes.labelsize'] = 10 plt.rcParams['axes.labelweight'] = 'bold' plt.rcParams['axes.titlesize'] = 10 plt.rcParams['xtick.labelsize'] = 8 plt.rcParams['ytick.labelsize'] = 8 plt.rcParams['legend.fontsize'] = 10 plt.rcParams['figure.titlesize'] = 12 plt.rcParams['image.cmap'] = 'jet' plt.rcParams['image.interpolation'] = 'none' plt.rcParams['figure.figsize'] = (16, 8) plt.rcParams['lines.linewidth'] = 2 plt.rcParams['lines.markersize'] = 8 colors = ['#008fd5', '#fc4f30', '#e5ae38', '#6d904f', '#8b8b8b', '#810f7c', '#137e6d', '#be0119', '#3b638c', '#af6f09', '#008fd5', '#fc4f30', '#e5ae38', '#6d904f', '#8b8b8b', '#810f7c', '#137e6d', '#be0119', '#3b638c', '#af6f09'] cmap = mcolors.LinearSegmentedColormap.from_list("", ["#82cafc", "#069af3", "#0485d1", colors[0], colors[8]]) # + import urllib.request filepath = "../dataset/" url = "https://tvml.github.io/ml1920/dataset/" def get_file(filename,local): if local: return filepath+filename else: urllib.request.urlretrieve (url+filename, filename) return filename # - # Funzione che effettua la normalizzazione del dataset. Modifica la distribuzione dei dati di ogni feature in modo che abbia media 0 e varianza 1, applicando la trasformazione $$x'=\frac{x-\mu}{\sigma}$$ def normalizza(X): mu = np.mean(X, axis=0) sigma = np.std(X, axis=0, ddof=1) return (X-mu)/sigma # Funzione di classificazione. Verifica per ogni elemento del dataset da che parte si trova rispetto all'iperpiano di separazione delle due classi i cui parametri sono stati determinati nel corso del training. La verifica è effettuata osservando il segno di $$\theta\cdot\overline x=\theta_0+\sum_{i=1}^d \theta_ix_i$$ def classify(theta, X ): y = np.dot(X,theta) return np.where(y>0, 1, 0) # Funzione di calcolo e stampa di statistiche def statistics(theta,X,t): # confusion matrix y=classify(theta,X) confmat = np.zeros((2, 2)) for i in range(2): for j in range(2): confmat[i,j] = np.sum(np.where(y==i,1,0)*np.where(t==j,1,0)) print('Veri negativi: {0:d}'.format(int(confmat[0,0]))) print('Falsi negativi: {0:d}'.format(int(confmat[0,1]))) print('Falsi positivi: {0:d}'.format(int(confmat[1,0]))) print('Veri positivi: {0:d}'.format(int(confmat[1,1]))) print('Precision insieme 1: {0:5.2f}'.format(confmat[1,1]/(confmat[1,1]+confmat[1,0]),2)) print('Recall insieme 1: {0:5.2f}'.format(confmat[1,1]/(confmat[1,1]+confmat[0,1]),2)) print('Precision insieme 2: {0:5.2f}'.format(confmat[0,0]/(confmat[0,0]+confmat[0,1]),2)) print('Recall insieme 2: {0:5.2f}'.format(confmat[0,0]/(confmat[0,0]+confmat[1,0]),2)) print('Accuracy: {0:5.2f}'.format(np.trace(confmat)/n,2)) return confmat # Funzione che calcola la funzione logistica per tutti i valori di un array, calcolando $$\sigma(x)=\frac{1}{1+e^{\theta\cdot\overline x}}$$ def h(theta, X): return sp.expit(np.dot(X, theta)) # Funzione gradiente, array delle derivate del costo rispetto a $\theta_0,\ldots,\theta_d$. Risulta $$-\sum (t_i-y_i)x_i$$ diviso per $n$ def gradient(theta, X, t): return -np.dot(X.T, (t-h(theta, X))) / len(X) # Funzione costo, definita come il negativo della log verosimiglianza. $$-\sum t_i\log(y_i)-\sum (1-t_i)\log(1-y_i)$$ dove $y_i=\sigma(\theta\cdot\overline x_i)$. Il risultato è diviso per $n$ per evitare valori eccessivamente grandi. def cost(theta, X, t): term1 = np.dot(np.log(h(theta,X)).T,t) term2 = np.dot(np.log(1.0 - h(theta,X)).T,1-t) return ((-term1 - term2) / len(X))[0] # Leggiamo i dati da un file csv in un dataframe pandas. I dati hanno 3 valori: i primi due corrispondono alle features e sono assegnati alle colonne x1 e x2 del dataframe; il terzo è il valore target, assegnato alla colonna t. Vengono poi creati una matrice X delle features e un vettore target t # + # legge i dati in dataframe pandas data = pd.read_csv(get_file("testSet.txt", False), delim_whitespace=True, header=None, names=['x1','x2','t']) # calcola dimensione dei dati n = len(data) # calcola dimensionalità delle features nfeatures = len(data.columns)-1 X = np.array(data[['x1','x2']]) t = np.array(data['t']).reshape(-1,1) # - # Visualizza il dataset fig = plt.figure(figsize=(16,8)) ax = fig.gca() ax.scatter(data[data.t==0].x1, data[data.t==0].x2, s=40, color=colors[0], alpha=.7) ax.scatter(data[data.t==1].x1, data[data.t==1].x2, s=40, color=colors[1], alpha=.7) plt.xlabel('$x_1$', fontsize=12) plt.ylabel('$x_2$', fontsize=12) plt.xticks(fontsize=10) plt.yticks(fontsize=10) plt.title('Dataset', fontsize=12) plt.show() # Per applicare il vettore dei coefficienti agli elementi del dataset, introduciamo una colonna unitaria X = np.column_stack((np.ones(n), X)) # Fissa un valore per il parametro $\alpha$ del metodo di discesa del gradiente e per il numero di iterazione del metodo da eseguire alpha = 0.1 niterations = 10000 # Inizializza il vettore dei coefficienti theta_init = np.zeros(nfeatures+1).reshape(-1,1) # Calcola l'array degli errori per il valore iniziale dei coefficienti e = h(theta_init,X)-t # Applica la discesa del gradiente, mantenendo in $\theta$ i coefficienti attuali e memorizzando la storia dei valori dei coefficienti e dei relativi costi theta_history = [] cost_history = [] theta = np.copy(theta_init) for k in range(niterations): theta = theta - alpha * gradient(theta,X,t) theta_history.append(theta) cost_history.append(cost(theta, X, t)) # Traforma le liste theta_history e cost_history in array theta_history = np.array(theta_history).reshape(-1,3) cost_history = np.array(cost_history).reshape(-1,1) cost_history # Calcola i valori dei due coefficienti indipendenti t1=-theta_history[:,1]/theta_history[:,2] t0=-theta_history[:,0]/theta_history[:,2] # Visualizza serie storica dei valori dei coefficienti indipendenti fig = plt.figure(figsize=(16,8)) ax = fig.add_subplot(121) ax.plot(range(niterations), t1, c=colors[0], alpha=1) plt.xlabel('Iterazioni', fontsize=12) plt.ylabel(r'$\theta_1$', fontsize=12) plt.xticks(fontsize=10) plt.yticks(fontsize=10) ax = fig.add_subplot(122) ax.plot(range(niterations), t0, c=colors[1], alpha=1) plt.xlabel('Iterazioni', fontsize=12) plt.ylabel(r'$\theta_2$', fontsize=12) plt.xticks(fontsize=10) plt.yticks(fontsize=10) plt.show() # + # visualizza andamento coefficienti e costo fig = plt.figure(figsize=(16,8)) ax = fig.gca() ax.plot(range(niterations), cost_history, c=colors[0],alpha=1) plt.xlabel('Iterazioni', fontsize=12) plt.ylabel('Costo', fontsize=12) plt.xticks(fontsize=10) plt.yticks(fontsize=10) plt.show() # - # Definiamo la griglia su cui calcolare i valori della logistica delta1=max(X[:,1])-min(X[:,1]) delta2=max(X[:,2])-min(X[:,2]) min1=min(X[:,1])-delta1/10 max1=max(X[:,1])+delta1/10 min2=min(X[:,2])-delta2/10 max2=max(X[:,2])+delta2/10 u = np.linspace(min1, max1, 100) v = np.linspace(min2, max2, 100) u, v = np.meshgrid(u, v) z=h(theta,np.column_stack((np.ones(u.shape[0]*u.shape[1]), np.c_[u.ravel(), v.ravel()]))) z = z.reshape(u.shape) # Visualizziamo il valore della logistica e l'iperpiano di separazione fig = plt.figure(figsize=(8,8)) ax = fig.gca() imshow_handle = plt.imshow(z, origin='lower', aspect='auto', extent=(min1, max1, min2, max2), alpha=.3) ax.scatter(data[data.t==0].x1, data[data.t==0].x2, s=40, color=colors[0], alpha=.7) ax.scatter(data[data.t==1].x1, data[data.t==1].x2, s=40, color=colors[1], alpha=.7) plt.contour(u, v, z, [0.5], colors=[colors[7]], linewidths=[1.5]) plt.xlabel('Punteggio esame 1', fontsize=10) plt.ylabel('Punteggio esame 2', fontsize=10) plt.xticks(fontsize=10) plt.yticks(fontsize=10) plt.xlim(min1, max1) plt.ylim(min2, max2) plt.show() # Calcola statistiche cm=statistics(theta,X,t) # Calcola la curva ROC per diversi valori della soglia y = h(theta,X) # + values = 1000 thresholds = np.linspace(1,0,values+1) roc = np.zeros((values+1,3)) for i in range(values+1): thr = thresholds[i] roc[i,0] = thr # conta true positive, true negative, false positive e false negative per la soglia attuale tp = np.logical_and( y > thr, t==1 ).sum() tn = np.logical_and( y <=thr, t==0 ).sum() fp = np.logical_and( y > thr, t==0 ).sum() fn = np.logical_and( y <=thr, t==1 ).sum() # calcola false positive rate e true positive rate per la soglia attuale e li inserisce nell'array fpr = fp/float(fp+tn) roc[i,1] = fpr tpr = tp/float(tp+fn) roc[i,2] = tpr # - auc = 0. for i in range(values): auc += (roc[i+1,1]-roc[i,1]) * (roc[i+1,2]+roc[i,2]) auc *= 0.5 fig = plt.figure(figsize=(16,8)) ax = fig.gca() plt.plot(roc[:,1], roc[:,2], color=colors[0],lw=2) plt.xlim(-0.01,1.01) plt.ylim(0,1.01) plt.xlabel('$FPR$', fontsize=12) plt.ylabel('$TPR$', fontsize=12) plt.xticks(fontsize=10) plt.yticks(fontsize=10) plt.title('ROC: AUC={0:5.4f}'.format(auc), fontsize=12) plt.show() # Applicazione del metodo della discesa del gradiente on line # + alpha = 0.01 niterations=200 theta = np.zeros(nfeatures+1) theta_history = [] cost_history = [] # Calcola l'array degli errori e il costo quadratico medio e = h(theta_init,X)-t for j in range(niterations): for i in range(n): e = t[i] - h(theta, X[i]) theta = theta + alpha * e * X[i] theta_history.append(theta) cost_history.append(cost(theta, X, t)) theta_history = np.array(theta_history).reshape(-1,3) cost_history = np.array(cost_history).reshape(-1,1) # - cost_history[:20] fig = plt.figure(figsize=(16,8)) ax = fig.gca() ax.plot(range(niterations*n), cost_history, c=colors[0],alpha=1) plt.xlabel('Iterazioni', fontsize=12) plt.ylabel('Costo', fontsize=12) plt.xticks(fontsize=10) plt.yticks(fontsize=10) plt.title('On line gradient descent') plt.show() # + t1=-theta_history[:,1]/theta_history[:,2] t0=-theta_history[:,0]/theta_history[:,2] z=h(theta,np.column_stack((np.ones(u.shape[0]*u.shape[1]), np.c_[u.ravel(), v.ravel()]))) z = z.reshape(u.shape) # - fig = plt.figure(figsize=(16,8)) ax = fig.add_subplot(121) ax = fig.gca() ax.plot(range(n*niterations), t1, lw=1.5, c=colors[0], alpha=1) plt.xlabel('Iterazioni', fontsize=12) plt.ylabel(r'$\theta_1$', fontsize=12) plt.xticks(fontsize=10) plt.yticks(fontsize=10) ax = fig.add_subplot(122) ax.plot(range(n*niterations), t0, lw=1.5, c=colors[1], alpha=1) plt.xlabel('Iterazioni', fontsize=12) plt.ylabel(r'$\theta_2$', fontsize=12) plt.xticks(fontsize=10) plt.yticks(fontsize=10) plt.show() fig = plt.figure(figsize=(8,8)) ax = fig.gca() imshow_handle = plt.imshow(z, origin='lower', aspect='auto', extent=(min1, max1, min2, max2), alpha=.3) ax.scatter(data[data.t==0].x1, data[data.t==0].x2, s=40, c = colors[0], alpha=.7) ax.scatter(data[data.t==1].x1, data[data.t==1].x2, s=40, c = colors[1], alpha=.7) plt.contour(u, v, z, [0.5], colors=[colors[7]],linewidths=[1.5]) plt.xlabel('$x_1$', fontsize=12) plt.ylabel('$x_2$', fontsize=12) plt.xticks(fontsize=10) plt.yticks(fontsize=10) plt.title('Dataset', fontsize=12) plt.show() cm=statistics(theta.reshape(-1,1),X,t) # Gradiente on line con scelta random dell'elemento da considerare (estrazione senza replacement) e smorzamento del parametro # + alpha = 0.01 niterations=200 theta = np.zeros(nfeatures+1) theta_history = [] cost_history = [] # Calcola l'array degli errori e il costo quadratico medio e = t-h(theta_init,X) for j in range(niterations): dataIndex = list(range(n)) for i in range(n): alpha = 1/(1.0+j+i)+0.0001 #alpha diminuisce r = int(np.random.uniform(0,len(dataIndex))) e = t[r] - h(theta, X[r]) theta = theta + alpha * e * X[r] del(dataIndex[r]) theta_history.append(theta) cost_history.append(cost(theta, X, t)) theta_history = np.array(theta_history).reshape(-1,3) cost_history = np.array(cost_history).reshape(-1,1) # - fig = plt.figure(figsize=(16,8)) ax = fig.add_subplot(121) ax = fig.gca() ax.plot(range(n*niterations), t1, c=colors[0], alpha=1) plt.xlabel('Iterazioni', fontsize=12) plt.ylabel(r'$\theta_1$', fontsize=12) plt.xticks(fontsize=10) plt.yticks(fontsize=10) ax = fig.add_subplot(122) ax.plot(range(n*niterations), t0, c=colors[1], alpha=1) plt.xlabel('Iterazioni', fontsize=12) plt.ylabel(r'$\theta_2$', fontsize=12) plt.xticks(fontsize=10) plt.yticks(fontsize=10) plt.show() fig = plt.figure(figsize=(16,8)) ax = fig.gca() ax.plot(range(niterations*n), cost_history, c=colors[0],alpha=1) plt.xlabel('Iterazioni', fontsize=12) plt.ylabel('Costo', fontsize=12) plt.xticks(fontsize=10) plt.yticks(fontsize=10) plt.title('On line gradient descent') plt.show() # + t1=-theta_history[:,1]/theta_history[:,2] t0=-theta_history[:,0]/theta_history[:,2] z=h(theta,np.column_stack((np.ones(u.shape[0]*u.shape[1]), np.c_[u.ravel(), v.ravel()]))) z = z.reshape(u.shape) # - fig = plt.figure(figsize=(8,8)) ax = fig.gca() imshow_handle = plt.imshow(z, origin='lower', aspect='auto', extent=(min1, max1, min2, max2), alpha=.3) ax.scatter(data[data.t==0].x1, data[data.t==0].x2, s=40, c=colors[0], alpha=.7) ax.scatter(data[data.t==1].x1, data[data.t==1].x2, s=40, c=colors[1], alpha=.7) plt.contour(u, v, z, [0.5], colors=[colors[7]],linewidths=[1.5]) plt.xlabel('$x_1$', fontsize=12) plt.ylabel('$x_2$', fontsize=12) plt.xticks(fontsize=10) plt.yticks(fontsize=10) plt.title('Dataset', fontsize=12) plt.show() cm=statistics(theta.reshape(-1,1),X,t) # + jupyter={"outputs_hidden": true} # + jupyter={"outputs_hidden": true}
codici/logreg.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Think Bayes: Chapter 7 # # This notebook presents code and exercises from Think Bayes, second edition. # # Copyright 2016 <NAME> # # MIT License: https://opensource.org/licenses/MIT # + from __future__ import print_function, division import matplotlib.pyplot as plt # %matplotlib inline import warnings warnings.filterwarnings('ignore') import math import numpy as np from thinkbayes2 import Pmf, Cdf, Suite, Joint import thinkbayes2 import thinkplot # - # ## Warm-up exercises # **Exercise:** Suppose that goal scoring in hockey is well modeled by a # Poisson process, and that the long-run goal-scoring rate of the # Boston Bruins against the Vancouver Canucks is 2.9 goals per game. # In their next game, what is the probability # that the Bruins score exactly 3 goals? Plot the PMF of `k`, the number # of goals they score in a game. # **Solution:** since $\lambda$ of the poisson process, we simply have to evaluate it for the desired number of goals $k$: P(k) = Poisson($k$, $\lambda$) print('probability of scoring exactly 3 goals:', thinkbayes2.EvalPoissonPmf(3, 2.9)) pmf = thinkbayes2.MakePoissonPmf(2.9, 10) pmf.Prob(3) # + thinkplot.Clf() thinkplot.PrePlot(num=1) thinkplot.Hist(pmf) thinkplot.Config(xlabel='Number of goals', ylabel='PMF', xlim=[-0.5, 10.5]) # - # **Exercise:** Assuming again that the goal scoring rate is 2.9, what is the probability of scoring a total of 9 goals in three games? Answer this question two ways: # # 1. Compute the distribution of goals scored in one game and then add it to itself twice to find the distribution of goals scored in 3 games. # # 2. Use the Poisson PMF with parameter $\lambda t$, where $\lambda$ is the rate in goals per game and $t$ is the duration in games. # **Solution 1** # + ## single game pmf = thinkbayes2.MakePoissonPmf(2.9,30) ## add to get 3 games total = pmf + pmf + pmf thinkplot.Clf() thinkplot.PrePlot(num=1) thinkplot.Hist(total) thinkplot.Config(xlabel='# of goals in 3 games', ylabel='PMF', xlim=[-0.5,22.5]) print('probability to score 9 goals in 3 games:', total.Prob(9) ) # - # **Solution 2:** Since we are considering 3 games, the new Poisson process will have parameter $3\lambda$ # + pmf = thinkbayes2.MakePoissonPmf(3*2.9, 50) thinkplot.Clf() thinkplot.PrePlot(num=1) thinkplot.Hist(pmf) thinkplot.Config(xlabel='# of goals', ylabel='PMF', xlim=[-0.5,22.5]) print('probability of 9 goals in e games:', pmf.Prob(9)) # - # **Exercise:** Suppose that the long-run goal-scoring rate of the # Canucks against the Bruins is 2.6 goals per game. Plot the distribution # of `t`, the time until the Canucks score their first goal. # In their next game, what is the probability that the Canucks score # during the first period (that is, the first third of the game)? # # Hint: `thinkbayes2` provides `MakeExponentialPmf` and `EvalExponentialCdf`. # **Solution:** The Poisson and the Exponential distributions are linked, as written in the paper. # + t_pmf = thinkbayes2.MakeExponentialPmf(2.6, 3) thinkplot.Clf() thinkplot.PrePlot(num=1) thinkplot.Pdf(t_pmf) thinkplot.Config(xlabel='time to the first goal', ylabel='PMF') # - # Since the time is expressed in units of games, the probability of the Canucks scoring in the first period is P(t < 1/3). t_pmf.ProbLess(1./3) # The value obtained here is affected by the discrete nature of the PMF. In the proposed solution the probability is computed in a more elegant way: thinkbayes2.EvalExponentialCdf(x=1/3, lam=2.6) # **Exercise:** Assuming again that the goal scoring rate is 2.8, what is the probability that the Canucks get shut out (that is, don't score for an entire game)? Answer this question two ways, using the CDF of the exponential distribution and the PMF of the Poisson distribution. # **Solution 1:** using the exponential distribution, the probability can be computed as P(t > 1), i.e. 1-P(t<1) 1-thinkbayes2.EvalExponentialCdf(x=1, lam=2.6) # **Solution 2**: this is simply P(0 goal) thinkbayes2.EvalPoissonPmf(0, lam=2.6) # ## The Boston Bruins problem # # The `Hockey` suite contains hypotheses about the goal scoring rate for one team against the other. The prior is Gaussian, with mean and variance based on previous games in the league. # # The Likelihood function takes as data the number of goals scored in a game. # + from thinkbayes2 import MakeNormalPmf from thinkbayes2 import EvalPoissonPmf class Hockey(thinkbayes2.Suite): """Represents hypotheses about the scoring rate for a team.""" def __init__(self, label=None): """Initializes the Hockey object. label: string """ mu = 2.8 sigma = 0.3 pmf = MakeNormalPmf(mu, sigma, num_sigmas=4, n=101) thinkbayes2.Suite.__init__(self, pmf, label=label) def Likelihood(self, data, hypo): """Computes the likelihood of the data under the hypothesis. Evaluates the Poisson PMF for lambda k. hypo: goal scoring rate in goals per game data: goals scored in one game """ lam = hypo k = data like = EvalPoissonPmf(k, lam) return like # - # Now we can initialize a suite for each team: suite1 = Hockey('bruins') suite2 = Hockey('canucks') # Here's what the priors look like: thinkplot.PrePlot(num=2) thinkplot.Pdf(suite1) thinkplot.Pdf(suite2) thinkplot.Config(xlabel='Goals per game', ylabel='Probability') # And we can update each suite with the scores from the first 4 games. # + suite1.UpdateSet([0, 2, 8, 4]) suite2.UpdateSet([1, 3, 1, 0]) thinkplot.PrePlot(num=2) thinkplot.Pdf(suite1) thinkplot.Pdf(suite2) thinkplot.Config(xlabel='Goals per game', ylabel='Probability') suite1.Mean(), suite2.Mean() # - # To predict the number of goals scored in the next game we can compute, for each hypothetical value of $\lambda$, a Poisson distribution of goals scored, then make a weighted mixture of Poissons: # + from thinkbayes2 import MakeMixture from thinkbayes2 import MakePoissonPmf def MakeGoalPmf(suite, high=10): """Makes the distribution of goals scored, given distribution of lam. suite: distribution of goal-scoring rate high: upper bound returns: Pmf of goals per game """ metapmf = Pmf() for lam, prob in suite.Items(): pmf = MakePoissonPmf(lam, high) metapmf.Set(pmf, prob) mix = MakeMixture(metapmf, label=suite.label) return mix # - # Here's what the results look like. # + goal_dist1 = MakeGoalPmf(suite1) goal_dist2 = MakeGoalPmf(suite2) thinkplot.PrePlot(num=2) thinkplot.Pmf(goal_dist1) thinkplot.Pmf(goal_dist2) thinkplot.Config(xlabel='Goals', ylabel='Probability', xlim=[-0.7, 11.5]) goal_dist1.Mean(), goal_dist2.Mean() # - # Now we can compute the probability that the Bruins win, lose, or tie in regulation time. # + diff = goal_dist1 - goal_dist2 p_win = diff.ProbGreater(0) p_loss = diff.ProbLess(0) p_tie = diff.Prob(0) print('Prob win, loss, tie:', p_win, p_loss, p_tie) # - # If the game goes into overtime, we have to compute the distribution of `t`, the time until the first goal, for each team. For each hypothetical value of $\lambda$, the distribution of `t` is exponential, so the predictive distribution is a mixture of exponentials. # + from thinkbayes2 import MakeExponentialPmf def MakeGoalTimePmf(suite): """Makes the distribution of time til first goal. suite: distribution of goal-scoring rate returns: Pmf of goals per game """ metapmf = Pmf() for lam, prob in suite.Items(): pmf = MakeExponentialPmf(lam, high=2.5, n=1001) metapmf.Set(pmf, prob) mix = MakeMixture(metapmf, label=suite.label) return mix # - # Here's what the predictive distributions for `t` look like. # + time_dist1 = MakeGoalTimePmf(suite1) time_dist2 = MakeGoalTimePmf(suite2) thinkplot.PrePlot(num=2) thinkplot.Pmf(time_dist1) thinkplot.Pmf(time_dist2) thinkplot.Config(xlabel='Games until goal', ylabel='Probability') time_dist1.Mean(), time_dist2.Mean() # - # **Comment:** be careful! The above curves are not exponentials, even if it looks like they are! As explained through the exercise, they are a mixture of exponentials! # In overtime the first team to score wins, so the probability of winning is the probability of generating a smaller value of `t`: p_win_in_overtime = time_dist1.ProbLess(time_dist2) p_adjust = time_dist1.ProbEqual(time_dist2) p_win_in_overtime += p_adjust / 2 print('p_win_in_overtime', p_win_in_overtime) # Finally, we can compute the overall chance that the Bruins win, either in regulation or overtime. p_win_overall = p_win + p_tie * p_win_in_overtime print('p_win_overall', p_win_overall) # ## Exercises # **Exercise:** To make the model of overtime more correct, we could update both suites with 0 goals in one game, before computing the predictive distribution of `t`. Make this change and see what effect it has on the results. # **Solution:** ### update the suites with 0 goals in the game suite1.UpdateSet([0]) suite2.UpdateSet([0]) time_dist1_up = MakeGoalTimePmf(suite1) time_dist2_up = MakeGoalTimePmf(suite2) # + thinkplot.PrePlot(num=2) thinkplot.Pmf(time_dist1_up) thinkplot.Pmf(time_dist2_up) thinkplot.Config(xlabel='Games until goal (after having considered 0 goals in the last game)', ylabel='Probability') time_dist1_up.Mean(), time_dist2_up.Mean() # - p_win_in_overtime_up = time_dist1_up.ProbLess(time_dist2_up) p_adjust_up = time_dist1_up.ProbEqual(time_dist2_up) p_win_in_overtime_up += p_adjust_up / 2 print('p_win_in_overtime', p_win_in_overtime_up) p_win_overall = p_win + p_tie * p_win_in_overtime_up print('p_win_overall', p_win_overall) # **Comment:** this is a good way of reasoning, but it's highly simplified as a tie is not only 0-0, but every result with the two teams scoring the same number of goals (of course). # **Exercise:** In the final match of the 2014 FIFA World Cup, Germany defeated Argentina 1-0. What is the probability that Germany had the better team? What is the probability that Germany would win a rematch? # # For a prior distribution on the goal-scoring rate for each team, use a gamma distribution with parameter 1.3. # + from thinkbayes2 import MakeGammaPmf xs = np.linspace(0, 8, 101) pmf = MakeGammaPmf(xs, 1.3) thinkplot.Pdf(pmf) thinkplot.Config(xlabel='Goals per game') pmf.Mean() # - # **Solution**: Write a class equivalent to `Hockey` but with the given data, i.e. the new prior. The rest is modelled the same way using a Poisson model. class Soccer(thinkbayes2.Suite): ''' model soccer teams scoring goals ''' def __init__(self, label=None): xs = np.linspace(0, 8, 101) pmf = thinkbayes2.MakeGammaPmf(xs, 1.3) thinkbayes2.Suite.__init__(self, pmf, label=label) def Likelihood(self, data, hypo): """ Computes the likelihood of the data under the hypothesis. Evaluates the Poisson PMF for lambda and k. hypo: goal scoring rate in goals per game data: goals scored in one game """ lam = hypo k = data like = EvalPoissonPmf(k, lam) return like s_arg = Soccer('Argentina') s_ger = Soccer('Germany') thinkplot.Clf() thinkplot.PrePlot(num=2) thinkplot.Pdfs([s_arg, s_ger]) thinkplot.Config(xlabel='lambda', ylabel='PMF') plt.legend(loc='best'); # Update with the data of the match: Germany won 1-0 s_arg.Update(0) s_ger.Update(1) # Check the posteriors on $\lambda$ thinkplot.Clf() thinkplot.PrePlot(num=2) thinkplot.Pdfs([s_arg, s_ger]) thinkplot.Config(xlabel='lambda', ylabel='PMF') # We can now compute the probability of Germany being a better team than Argentina: print('probability of Germany > Argentina:', s_ger.ProbGreater(s_arg)) # **Comment:** it is important to note that this is not the probability of Germany scoring more goals than Argentina in a rematch, but it's the probability of $\lambda_{\rm Germany}$ being higer than $\lambda_{\rm Argentina}$!!!! # Let's go ahead to check the odds and compute the Bayes factor. # The prior odds where 1:1 as the same identical prior was used for the two teams. Posterior odds: p = s_ger.ProbGreater(s_arg) posterior_odds = p / (1-p) print('posterior odds of a victory of Germany in a rematch:', posterior_odds) # Since the prior odds are 1, the Bayes factor (`posterior_odds` / `prior_odds`) has the same value as the posterior odds. # What about the number of goals each team would score in a rematch? Starting from the posterior distribution of $\lambda$ we have to compute the distribution of the goals, which is the mixture of the Poisson processes for each value of $\lambda$ (weighted with P($\lambda$)). def MakeGoalDistribution(pmf, high=15, label=None): meta_pmf = Pmf() for lam, prob in pmf.Items(): meta_pmf.Set( MakePoissonPmf(lam, high), prob ) meta_pmf.Normalize() return thinkbayes2.MakeMixture(meta_pmf,label=label) g_ger = MakeGoalDistribution(s_ger, label='Germany') g_arg = MakeGoalDistribution(s_arg, label='Argentina') thinkplot.Clf() thinkplot.PrePlot(num=2) thinkplot.Pmfs([g_ger, g_arg]) thinkplot.Config(xlabel='# of goals', ylabel='PMF') # Now we can compare the probability of Germany winning a rematch (or in general all the other outcomes): p_win = g_ger.ProbGreater(g_arg) p_loose = g_ger.ProbLess(g_arg) p_tie = 1 - (p_win + p_loose) print('Germany wins:', p_win) print('Argentina wins:', p_loose) print('Tie game:', p_tie) # Another way to do this is to: # 1) extract a random value for $\lambda$ # 2) extract a value for the goals scored, given $\lambda$ # 3) repeat 1) and 2) a lot of times for each team # 4) compare the means of the scored goals from scipy.stats import poisson n = 1000 gs_ger = poisson.rvs(s_ger.Sample(n)) gs_arg = poisson.rvs(s_arg.Sample(n)) # In the above expression (let's focus on Germany, the same happens for Argentina): # * `s_ger.Sample(n)` extracts `n` random numbers from the distribution of `s_ger`, i.e. `n` random values for $\lambda$ # * `poisson.rvs(...)` extracts a random number of goals following a Poisson distribution for each value of $\lambda$ print('Germany wins:', np.mean(gs_ger > gs_arg)) print('Argentina wins:', np.mean(gs_ger < gs_arg)) print('Tie game:', np.mean(gs_ger == gs_arg)) # **Exercise:** In the 2014 FIFA World Cup, Germany played Brazil in a semifinal match. Germany scored after 11 minutes and again at the 23 minute mark. At that point in the match, how many goals would you expect Germany to score after 90 minutes? What was the probability that they would score 5 more goals (as, in fact, they did)? # # Note: for this one you will need a new suite that provides a Likelihood function that takes as data the time between goals, rather than the number of goals in a game. # **Solution:** the idea here is to get an estimate of the time between goals: start from a prior, and update it with the data of the first two goals. Once we get the posterior we can make estimates of how many goals are scored in 90 minutes. # Proceed with the same procedure as before: create a class to handle the problem, but this time it has to be "time-oriented" rather than "goal-oriented". Given that we are modelling goals scored with a Poisson($\lambda$), the time between goals is modelled with Exponential($\lambda$). The prior on $\lambda$ is the same as in the goals problem. # # Here we are always assuming the unit of time to be a full match, i.e. 90 minutes. To properly handle the data, we have to scale the time between goals accordingly. class Soccer2(thinkbayes2.Suite): ''' model time between goals for a soccer team ''' def __init__(self, label=None): xs = np.linspace(0, 15, 101) pmf = MakeGammaPmf(xs, 1.3) thinkbayes2.Suite.__init__(self, pmf, label=label) def Likelihood(self, data, hypo): ''' evaluates the likelihood of data under the hypothesis Evaluate the Exponential PMF for lambda and t * data: time between goals * hypo: goal scoring rate ''' t = data/90 lam = hypo like = thinkbayes2.EvalExponentialPdf(t, lam) return like # Start istantiating the class, i.e. setting up the prior: prior = Soccer2('prior') thinkplot.Clf() thinkplot.PrePlot(num=2) thinkplot.Pdf(prior) thinkplot.Config(xlabel='goal scoring rate', ylabel='PMF') # Update with the data of the time between goals. Let's update for one goal at the time and check the evolution of the Pdf. posterior1 = prior.Copy('posterior1') posterior1.Update(11) thinkplot.Clf() thinkplot.PrePlot(num=2) thinkplot.Pdfs([prior, posterior1]) thinkplot.Config(title='posterior after the first goal', xlabel='goal scoring rate', ylabel='PMF') posterior2 = posterior1.Copy('posterior2') posterior2.Update(12) thinkplot.Clf() thinkplot.PrePlot(num=3) thinkplot.Pdfs([prior, posterior1, posterior2]) thinkplot.Config(title='posterior after the first goal', xlabel='goal scoring rate', ylabel='PMF') # Now that we have the posterior distribution for $\lambda$, we can answer the question. As usual, pay attention to the question: we are asked to find how many goals do we expect Germany to score in the rest of the match and what is the probability to score ≥5 goals in the rest of the match. The crucial part here is "in the rest of the match). As in the previous case we have to make a mixture of the Poisson processes for each value of $\lambda$, but also we have to take into account that some time has already passed in the match. def MakeGoalDistrInTime(pmf, remainingTime, high=15, label=None): timeRatio = remainingTime / 90 meta_pmf = thinkbayes2.Pmf() for lam, prob in pmf.Items(): lam_eff = lam * timeRatio meta_pmf.Set( MakePoissonPmf(lam_eff, high), prob ) return thinkbayes2.MakeMixture(meta_pmf, label=label) goalsInMatch = MakeGoalDistrInTime(posterior2, 90-23) thinkplot.Clf() thinkplot.PrePlot(num=1) thinkplot.Hist(goalsInMatch) thinkplot.Config(xlabel='number of goals', ylabel='PMF') print('predicted number of goals in 90 minutes:', goalsInMatch.Mean()) print('probabilitt of scoring 5 additional goals:', goalsInMatch.ProbGreater(4)) # **Exercise:** Which is a better way to break a tie: overtime or penalty shots? # **Solution:** Here the idea is to compare the outcome of a shorter match (i.e. the 30 mins of overtime) to the outcome of penalties. Here we assume that the original match has already ended in a tie, so only the outcomes of the overtime and penalties have to be computed/evaluated. # # Since, as far as I understand, here we are only talking in general, the prior distribution will be used to evaulate the probability of a non-tie overtime. We need to generate probability distributions for 2 teams and compare them. # # To break a tie, the overtime has to finish not in a tie. team1 = MakeGoalDistrInTime(prior, 30) team2 = MakeGoalDistrInTime(prior, 30) p_t1w = team1.ProbGreater(team2) p_t1l = team1.ProbLess(team2) print('prob non-tie in OT:', p_t1w + p_t1l) # This is a first estimate, things can be refined updating the prior with the data that the regular match has ended in a tie, i.e. team1 and team2 scored the same number of goals. # A quick google search says that penalties are scored 75% of the times. The simplest approach is to consider this a Binomial process and compute the probability that after 5 penalties from each team, the number of scored penalties is different, which would finish the match. p = 0.75 team1_penalties = thinkbayes2.MakeBinomialPmf(5, p) team2_penalties = thinkbayes2.MakeBinomialPmf(5, p) p_t1w_p = team1_penalties.ProbGreater(team2_penalties) p_t1l_p = team1_penalties.ProbLess(team2_penalties) print('prob non-tie penalties:', p_t1w_p + p_t1l_p) # With this very rough calculation, penalties are much better at breaking a tie. The calculation can be improved a lot by: # * updating the scoring probability in the overtime with the data of the tie in the regular time # * having more realistic model for penalties # * using a full prior distribution (e.g. Beta) and make a mixture of expected penalties scored # * maybe modifying the distribution of `p` after every penalty, e.g. see [here](https://fivethirtyeight.com/features/a-chart-for-predicting-penalty-shootout-odds-in-real-time/) # **Exercise:** Suppose that you are an ecologist sampling the insect population in a new environment. You deploy 100 traps in a test area and come back the next day to check on them. You find that 37 traps have been triggered, trapping an insect inside. Once a trap triggers, it cannot trap another insect until it has been reset. # If you reset the traps and come back in two days, how many traps do you expect to find triggered? Compute a posterior predictive distribution for the number of traps. # **Solution:** Let's consider the number of traps that triggered to be a Binomial process with probability $p$. We build a prior distribution of $p$, update it with the data of 37/100 traps triggered in 1 day. Then we can compute the posterior distribution of the number of traps we expect to have triggered after 2 days. # As usual each hypothesis is represented by the value of $p$. # # For the prior on $p$ let's use a Gamma distribution, since it is limited between 0 and 1. # + xs = np.linspace(0,1,201) beta = thinkbayes2.Beta(2,2) plt.plot(xs, beta.EvalPdf(xs)); # - # Beta(2,2) seems a reasonable starting point, as we would expect none and all traps to trigger with a very lo probability. class Traps(thinkbayes2.Suite): def __init__(self, n, label=None): self.n = n beta = thinkbayes2.Beta(2,2) thinkbayes2.Suite.__init__(self, beta.MakePmf(), label=label) def Likelihood(self, data, hypo): ''' compute the likelihood of a binomial process * data: number of successes * hypo: probability of the binomial process ''' k = data p = hypo like = thinkbayes2.EvalBinomialPmf(k, self.n, p) return like prior = Traps(100, 'prior') thinkplot.Clf() thinkplot.PrePlot(num=1) thinkplot.Pdf(prior) thinkplot.Config(xlabel='probability of triggering a trap', ylabel='PMF') # Now update with the data of 37 triggered traps in one night. posterior = prior.Copy(label='posterior') posterior.Update(37) thinkplot.Clf() thinkplot.PrePlot(num=2) thinkplot.Pdfs([prior, posterior]) thinkplot.Config(xlabel='probability of triggering a trap', ylabel='PMF') # Now comes the complex part: we have to compute the posterior distribution of the number of triggered traps in 2 days, considering that once the trap has triggered it cannot trigger again. This makes it way more complex than the exercise at the beginning of the notebook, in which it was OK to sum the pmfs. # Let's start with the posterior distribution of the number of triggered traps after 1 day of observation. def MakeTriggeredTrapsTest(pmf, label=None): meta_pmf = thinkbayes2.Pmf() for p, prob in pmf.Items(): meta_pmf.Set( thinkbayes2.MakeBinomialPmf(pmf.n, p), prob ) return thinkbayes2.MakeMixture(meta_pmf, label='traps in 1 day') nTrapsTest = MakeTriggeredTrapsTest(posterior) thinkplot.Clf() thinkplot.PrePlot(num=1) thinkplot.Hist(nTrapsTest) # Is it enough to consider 2$\times$`p` as done above when considering 3 matches? Let's implement this as a starting point. --> tested, doesn't work as doubling `p` can get it above 1. # It should be a mixture of mixtures: # * the first mixture is on the possible values of `p`, as above # * the second mixture takes into account the combination of the two days of observation: since we are considering 2 days, it happens that on the first day `k1` traps are triggered (with a probability `pk1`) and on the second day we have again a binomial process with `n`-`k1` trials in which `k2` traps trigger with probability `k2` def MakeTriggeredTraps(pmf, label=None): #meta_pmf = thinkbayes2.Pmf() mix = thinkbayes2.Pmf(label=label) nTraps = pmf.n for p, prob in pmf.Items(): ### k1 traps triggered on day1 pmf_traps_d1 = thinkbayes2.MakeBinomialPmf(nTraps, p) for k1, pk1 in pmf_traps_d1.Items(): ### k2 traps triggered on day 2 (using the same probability p for the sake of simplicity) pmf_traps_d2 = thinkbayes2.MakeBinomialPmf(nTraps - k1, p) #meta_pmf.Set( pmf_traps_d1 * pmf_traps_d2, prob*pk1 ) for k2, pk2 in pmf_traps_d2.Items(): mix.Incr( k1 + k2, prob*pk1*pk2) #return thinkbayes2.MakeMixture(meta_pmf, label=label) mix.Normalize() return mix # For sure there is a way more efficient way to code it (e.g. take the number of days as a parameter) thinkplot.Clf() thinkplot.PrePlot(num=1) thinkplot.Pmf(nTrapsTest) nTriggeredTraps = MakeTriggeredTraps(posterior, label='2 days') thinkplot.Clf() thinkplot.PrePlot(num=1) thinkplot.Pmf(nTriggeredTraps) thinkplot.Config(xlabel='# of triggered traps', ylabel='probability', title='posterior distribution')
notebooks/world_cup.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: "Python (Intel\xAE oneAPI)" # language: python # name: c009-intel_distribution_of_python_3_oneapi-beta05-python # --- # # OpenMP* Device Parallelism (C/C++) # #### Sections # - [Learning Objectives](#Learning-Objectives) # - [Device Parallelism](#Device-Parallelism) # - [GPU Architecture](#GPU-Architecture) # - ["Normal" OpenMP constructs](#"Normal"-OpenMP-constructs) # - [League of Teams](#League-of-Teams) # - [Worksharing with Teams](#Worksharing-with-Teams) # - [Host Device Concurrency](#Host-Device-Concurrency) # - _Code:_ [Lab Exercise: OpenMP Device Parallelism](#Lab-Exercise:-OpenMP-Device-Parallelism) # # ## Learning Objectives # # * Explain basic GPU Architecture # * Be able to use OpenMP offload worksharing constructs to fully utilize the GPU # # ### Prerequisites # Basic understanding of OpenMP constructs are assumed for this module. You also should have already went through the [Introduction to OpenMP Offload module](../intro/intro.ipynb) and [Managing Device Data module](../datatransfer/datatransfer.ipynb), where the basics of using the Jupyter notebooks with the Intel® DevCloud and an introduction to the OpenMP `target` and `target data` constructs were discussed. # *** # ## Device Parallelism # As we've discussed in the previous modules, the OpenMP `target` construct transfers the control flow to the target device. However, the transfer of control is sequential and synchronous. # # In OpenMP, offload and parallelism are separate, so programmers need to explicitly create parallel regions on the target device. In theory, constructs that create parallelism on offload devices can be combined with any OpenMP construct, but in practice, only a subset of OpenMP constructs are useful for the target device. # ## GPU Architecture # Before diving into OpenMP parallelism constructs for target divices, let's first examine Intel® GPU architecture. # # <img src="Assets/GPU_Arch.png"> # # Intel® GPUs contain 1 or more slices. Each slice is composed of several Subslices. Each subslice contain multiple EUs (likely 8 or more), has it's own thread dispatcher unit, instruction cache, share local memory, and other resources. EUs are compute processors that drive the SIMD ALUs. # # The following table displays how the OpenMP concepts of League, Team, Thread, and SIMD are mapped to GPU hardware. # # |OpenMP | GPU Hardware | # |:----:|:----| # |SIMD | SIMD Lane (Channel)| # |Thread | SIMD Thread mapped to an EU | # |Team | Group of threads mapped to a Subslice | # |League | Multiple Teams mapped to a GPU | # ## "Normal" OpenMP constructs # OpenMP GPU offload support all "normal" OpenMP constructs such as `parallel`, `for`, `barrier`, `sections`, `tasks`, etc. However, not every construct will be useful for the GPU. When using these constructs, the full threading model is only supported with in a subslice, this is because there's no synchronization among subslices, and there's no coherence and memory fence among subslices' L1 caches. # # Let's examine the following example. # ```c # void saxpy(float a, float* x, float* y, int sz) { # #pragma omp target map(to:x[0:sz]) map(tofrom(y[0:sz]) # #pragma omp parallel for simd # for (int i=0; i< sz; i++) { # y[i] = a * x[i] + y[i]; # } # } # ``` # Here, we use the `target` pragma to offload the execution to the GPU. We then use `parallel` to create a team of threads, `for` to distribute loop iterations to those threads, and `simd` to request iteration vectorization with SIMD instructions. However, due to the restrictions aforementioned, only one GPU subslice is utilized here, so the GPU would be significantly underutilized. In some cases, the compiler may deduce `team distribute` from `parallel for` and still use the entire GPU. # ## League of Teams # To take advantage of multiple subslices, use the `teams` pragma to create multiple **master** threads for execution. When combined with the `parallel` pragma, these master threads become a league of thread teams. Becuase there's no synchronization across teams of threads, the teams could then be assigned to different GPU subslices. # # <img src="Assets/teams.JPG"> # # When using the `teams` construct, the number of teams created is implementation defined. Although, you may optionally specify an upper limit with the **num_teams** clause. The **thread_limit** clause of the `teams` pragma can be optionally used to limit the number of threads in each team. # # Example: `#pragma omp teams num_teams(8) thread_limit(16)` # ## Worksharing with Teams # After a league of teams is created by `teams`, use the `distribute` construct to distribute chunks of iterations of a loop across the different teams in the league. This is analogous to what the `for` construct does for `parallel` regions. The `distribute` pragma is associated with a loop nest inside a teams region. # # For nested loops, the **collapse** clause can be used to specify how many loops are associated with the `distribute` pragma. You may specify a **collapse** clause with a parameter value greater than 1 to collapse associated loops into one large loop. # # You can also use **dist_schedule** clause on the `distribute` construct to manually specify the chunk size that are distributed to master threads of each team. For example, `#pragma omp distribute dist_schedule(static, 512)` would create chunks of 512 iterations. # # ### Example with Combined Constructs # For convenience, OpenMP supports combined constructs for OpenMP offload. The code below shows how a single line can encompass all of the pragmas that we've discussed. # ```c # void saxpy (float a, float *x, float *y, int sz) { # #pragma omp target teams distribute parallel for simd \ # map(to:x(0:sz)) map(tofrom(y(0:sz)) # for (int i=0; i<sz; i++) { # y[i] = a*x[i] + y[i]; # } # } # ``` # When these constructs are used without additional clauses, the number of teams created, the number of threads created per team, and how loop iterations are distributed are all implementation defined. # The following diagram breaks down the effects of each pragma in the previous example. Here, we assume that there are a total of 128 loop iterations and that 4 teams, and 4 threads per team are created by the implementation. # # 1. The `omp target` pragma offloads the execution to device # 2. The `omp teams` pragma creates multiple master threads, 4 thread teams in this diagram. # 3. The `omp distribute` pragma distributes loop iterations to those 4 thread teams, 32 threads for each team shown. # 4. The `omp parallel` pragma creates a team of threads for each master thread (team), 4 threads created for each team shown. # 5. The `omp for` pragma distributes the 32 iterations to each of the 4 threads. # 6. The `omp simd` pragma specifies that multiple iterations of the loop can be executed using SIMD instructions. # # <img src="Assets/distribute.JPG"> # ## Host Device Concurrency # # When a target region is encountered, a host task is generated, which synchronizes the CPU and target device. OpenMP uses tasking to manage execution and dependencies. Add the `nowait` clause so the host does not need to wait for target region to complete. # # ```c # #pragma omp target nowait # ``` # # Using a `nowait` clause with a `target` construct allows for asynchronous offloading, allowing the host device to continue execution. One way to synchronize a target region back with the host device is by using the `taskwait` construct, which will wait until all tasks complete. # # In the following example, the for loop is offloaded to the target device, while the host device continues exectution and performs other work. After both the device and host complete finish, the host device will continue execution. # # ```c # #pragma omp target map(to:b,c,d) map(from:a) nowait # { # #pragma omp teams distribute parallel for simd # for (i=0; i<500; i++) { # a[i] = b[i] * c + d; # } # } # # #pragma omp task # other_work(); # # #pragma omp taskwait //Synchronization # a0 = a[0]; # ``` # ## Lab Exercise: OpenMP Device Parallelism # In this exercise, we will practice using the offload worksharing constructs on the saxpy function that we've already worked with in the previous modules. #Optional, see the contents of main.cpp # %pycat main.cpp # In the cell below, add OpenMP pragmas at the locations indicated to perform the following tasks. # 1. For the outer loop, use a **combined** construct to # 1. Create NUM_BLOCKS of **master** threads, use the clause *num_teams(NUM_BLOCKS)* # 2. Distribute the outer loop iterations to the varoius master threads. # 2. For the inner loop, use a combined construct to # 1. Create a team of threads for each master thread. # 2. Distribute inner loop iterations to those threads. # 3. Signal that multiple loop iterations can be executed concurrently with SIMD instructions. # %%writefile lab/saxpy_func_parallel.cpp #pragma omp target map(from: is_cpu) map(from:num_teams) map(to:x[0:ARRAY_SIZE]) map(tofrom:y[0:ARRAY_SIZE]) { // 1. Add pragma to create multiple master threads use clause num_teams(NUM_BLOCKS) // and distribute loop iterations to the various master threads. for (ib = 0; ib < ARRAY_SIZE; ib += NUM_BLOCKS) { if (ib == 0) { // Test if target is the CPU Host or the GPU Device is_cpu = omp_is_initial_device(); // Query number of teams created num_teams = omp_get_num_teams(); } // 2. Place the combined pragma here to create a team of threads for each master thread // Distribute iterations to those threads, and vectorize for (i = ib; i < ib + NUM_BLOCKS; i++) { y[i] = a * x[i] + y[i]; } } } # Next, compile the code using the script *compile_c.sh*. If you would like to see the contents of _compile_c.sh_ execute the following cell. # %pycat compile_c.sh #Execute this cell to compile the code # ! chmod 755 compile_c.sh; ./compile_c.sh; # Once the code has been successfully compiled, run the code by executing the _run.sh_ script. #Optionally examine the run script by executing this cell. # %pycat run.sh # Execute the following cell to run the program. Make sure you see the "Passed!" message. # ! chmod 755 q; chmod 755 run.sh;if [ -x "$(command -v qsub)" ]; then ./q run.sh; else ./run.sh; fi # _If the Jupyter cells are not responsive or if they error out when you compile the samples, please restart the Kernel and compile the samples again_ # Execute the following cell to see the solution. # %pycat saxpy_func_parallel_solution.cpp # # Summary # In this module, you have learned the following: # * High-level overview of GPU architecture and how OpenMP constructs map to it. # * Create multiple master threads that can be assigned to GPU subslices using the `teams` construct. # * Distribute loop iterations to those master threads using the `distribute` construct. # * Use the `teams` and `distribute` constructs combined with other OpenMP constructs for better performance. # <html><body><span style="color:green"><h1>Survey</h1></span></body></html> # # [Tell us how we did in this module with a short survey. We will use your feedback to improve the quality and impact of these learning materials. Thanks!](https://intel.az1.qualtrics.com/jfe/form/SV_e3yrkDaDE7ZnKmN) # <html><body><span style="color:Red"><h1>Reset Notebook</h1></span></body></html> # # ##### Should you be experiencing any issues with your notebook or just want to start fresh run the below cell. from IPython.display import display, Markdown, clear_output import ipywidgets as widgets button = widgets.Button( description='Reset Notebook', disabled=False, button_style='', # 'success', 'info', 'warning', 'danger' or '' tooltip='This will update this notebook, overwriting any changes.', icon='check' # (FontAwesome names without the `fa-` prefix) ) out = widgets.Output() def on_button_clicked(_): # "linking function with output" with out: # what happens when we press the button clear_output() # !rsync -a --size-only /data/oneapi_workshop/OpenMP_Offload/parallelism/ ~/OpenMP_Offload/parallelism print('Notebook reset -- now click reload on browser.') # linking button and function together using a button's method button.on_click(on_button_clicked) # displaying button and its output together widgets.VBox([button,out]) # *** # # @Intel Corporation | [\*Trademark](https://www.intel.com/content/www/us/en/legal/trademarks.html)
DirectProgramming/C++/Jupyter/OpenMP-offload-training/parallelism/parallelism.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # I.BA_MOVK_MM – Kryptographie (SW10) # ## Übung: Homomorphe Verschlüsselung # ## Aufgabe 1: Homomorphe Verschlüsselung # # 1. Welches der drei Verschlüsselungsverfahren (RSA, EL-GAMAL, Paillier) ist eher # geeignet für die Verwendung in homomorpher Verschlüsselung eingesetzt werden? # Begründen bitte Ihre Antwort? # 2. Geben Sie ein Beispiel, in welchem Bereich das Paillier-Verfahren eingesetzt werden # könnte? Begründen Sie Ihre Antwort? # **Solution** # # <img src="img/algoverview.png" style="height:175px"> # # 1. RSA ist semantisch nicht korrekt, da durch Polluting (rauschen) das Ver+Entschlüsseln nicht immer dasselbe ergeben. Konkret wenn das Rechenergebnis grösser als der Modulus ist, wird das Resultat "abgeschnitten". # Elgamal und Paillier sind besser geeignet da sie semantisch sind. # Jedoch ist Elgamal nur für die Multiplikation und Paillier nur für die Addition geeignet (kommt halt auf den Einsatzzweck an). Paillier ist ausserdem sehr komplex und rechenintensiv. # # 2. Paillier könnte z.B. für E-Voting genutzt werden. So könnte man alle abgegebenen Stimmen zählen (addieren) ohne zu sehen wer konkret wen gewählt hat # ## Aufgabe 2: Homomorphie-Eigenschaft von EL-GAMAL # # Berechnen Sie bitte jeden Schritt im unteren Teil des Beispiels (s. unten). Die Berechnung # für den oberen Teil finden Sie auf den Folien SW10 # # <img src="img/elgamal.png" style="height:300px"> # **Solution** # # <img src="img/helg1.jpg" style="height:600px"> # # <img src="img/helg2.jpg" style="height:600px"> # ## Aufgabe 3: Paillier-Verfahren # # Gegeben sind zwei Primzahlen **p**=3 und **q**=5. Sei **g**=16 aus $Z^{*}_{225}$ zufällig gewählt. # 1. Berechnen Sie jeweils den öffentlichen und privaten Schlüssel. # 2. Verschlüssen Sie den Klartext m=13. # 3. Entschlüssen Sie den Ciphertext c=71. # # *Hinweis: Das Ergebnis durch Codierung wird akzeptiert.* # # **Solution** # # <img src="img/pail1.jpg" style="height:600px"> # # <img src="img/pail2.jpg" style="height:600px">
SW10/Exercises_sol_sw10.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Jupyter Notebook - Ransomware report use cases 2 # # Copyright © 2021 Google import vt import nest_asyncio import pandas as pd # + #@markdown Please, insert your VT API Key*: API_KEY = '' #@param {type: "string"} #@markdown **The API key should have Premium permissions, otherwise some of the use cases might not provide the expected results.* #@markdown # - nest_asyncio.apply() # # 1. Collecting hashes nh = 0 with vt.Client(API_KEY) as client: it = client.iterator('/intelligence/search', params={'query': 'engines:ryuk fs:2021-01-01+ (type:peexe or type:pedll) p:10+'}) with open('hashes.log','w') as f: for obj in it: if obj.id: f.write(f'{obj.id}\n') nh += 1 f.close() print(f'{nh} hashes have been written to the file hashes.log\n') # # 2. Malicious contacted IOCs # + def get_malicious_IPs(file_hash): contacted_IPs = set() for ip in client.iterator('/files/{}/contacted_ips', file_hash, limit=20): stats = ip.get('last_analysis_stats') if stats and stats['malicious'] >= min_positives: contacted_IPs.add(ip.id) return contacted_IPs def get_malicious_Domains(file_hash): contacted_domains = set() for domain in client.iterator('/files/{}/contacted_domains', file_hash, limit=20): stats = domain.get('last_analysis_stats') if stats and stats['malicious'] >= min_positives: contacted_domains.add(domain.id) return contacted_domains # + malIOCs = set() min_positives = 5 # Minimun number of positives for every IOC nIOCs = 0 with vt.Client(API_KEY) as client: it = client.iterator('/intelligence/search', params={'query': 'engines:babuk fs:2021-07-01+ (have:contacted_domains or have:contacted_ips)'}) for obj in it: ips = get_malicious_IPs(obj.id) domains = get_malicious_Domains(obj.id) malIOCs = malIOCs | ips | domains with open('iocs.log','w') as f: for ioc in malIOCs: f.write(f'{ioc}\n') nIOCs += 1 print(f'{nIOCs} IOCs have been written to the file iocs.log\n') f.close() # - # # 3. Searching for a text in domains and URLs # + def search_domains(file_hash, text): domains = set() for domain in client.iterator('/files/{}/embedded_domains', file_hash, limit=20): if text in domain.id: domains.add(domain.id) for domain in client.iterator('/files/{}/itw_domains', file_hash, limit=20): if text in domain.id: domains.add(domain.id) return domains def search_urls(file_hash, text): urls = set() for url in client.iterator('/files/{}/embedded_urls', file_hash, limit=20): if text in url.id: urls.add(url.id) for url in client.iterator('/files/{}/itw_urls', file_hash, limit=20): if text in url.id: urls.add(url.id) return urls # + stxt="google" # Write here any text you want to search for query_string = 'engines:cerber fs:2021-06-01+ (embedded_domains: %s OR embedded_urls:%s OR itw: %s)' % (stxt, stxt, stxt) with vt.Client(API_KEY) as client: it = client.iterator('/intelligence/search', params={'query': query_string}) for obj in it: results = search_domains(obj.id, search_text) | search_urls(obj.id, search_text) if results: print(f'** Results found for file hash: {obj.id} (%s)\n' %(obj.type_tag) ) for result in results: print(f' - {result}\n')
examples/jupyter/ransomware_report_usecases2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # # # Introduction # # This report presents the various methods applied for the resolution of two problems of classification in a first part then for a problem of regression in a second. For our two three problems, a partitioning of the data into a set of training data and a set of test data is carried out and a creation of cross-validation functions as this allows the objective comparison of the models built. We have chosen to create our classification and regression models with non-standardized data. The format of the expected . Moreover, standardizing the data of the training file and the test file independently showed a tendency to accentuate the misclassification rate or the MSE. # # # Classification problem # # The problem treated in this first part is a classification problem. The variable to be explained, y, is nominal qualitative with 5 modalities {aa,ao,dcl,iy,sh}. To predict this variable, we have 256 quantitative explanatory variables whose values seem to be centered and reduced. We have 2250 complete statements. # # We will note n, the number of readings and p, the number of explanatory variables. Here we therefore have large n (n=2250) and large p (p=256). # # + colab={"base_uri": "https://localhost:8080/", "height": 594} id="rl-P-xDcej08" outputId="80e12dd3-a7cb-42a9-da69-86ec867cae9c" import pandas as pd from pandas import DataFrame as df # Python ≥3.5 is required import sys # Scikit-Learn ≥0.20 is required import sklearn # Common imports import numpy as np import os # to make this notebook's output stable across runs np.random.seed(42) # To plot pretty figures # %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt # - pip install pandas # + [markdown] id="EgTjdIxpuy4Y" # #Preprocessing basique # Ici, on divise le jeu de données en différentes parties pour la valdiation croisée. # + [markdown] id="iWWmplcbjD8Z" # # Take a Quick Look at the Data Structure # + filename = 'data/parole_train.txt' # import numpy as np # data = np.loadtxt(filename, delimiter=',', skiprows=1, dtype=str) # print(data) data = pd.read_csv(filename, sep=" ") data.head() # - data.shape # + # reassign index of data data.sort_index(axis = 0) data.index = range(0, data.shape[0]) data # - data.info() data['y'].value_counts() data.describe() # + colab={"base_uri": "https://localhost:8080/", "height": 280} id="CHZN4WConF5j" outputId="7a35d97b-3a01-4775-906a-8df0b2d0511d" # Plot a histogram for each numerical attribute # %matplotlib inline data[data.columns[0:12]].hist(figsize = (20,15)) # + [markdown] id="Y0J6R9knOH47" # these attributes have same scale and they are distributed around 0 in interval [-3,3] # + from sklearn.model_selection import train_test_split # X = data # + def separate_features_target(data): X_cols = [] for col in data.columns: if(col != 'y'): X_cols.append(col) return data[X_cols] , data['y'] X_data, y_data = separate_features_target(data) # - y_data # ## PCA Analyse # First, we performed a principal component analysis to reduce the number of explanatory variables in order to visualize the data given their high dimensionality. Then, we created the sets that we will use later for cross-validation. # PCA is effected by scale but not nessary because data atributes have same scale # + from sklearn.decomposition import PCA pca = PCA(n_components=2) principalComponents = pca.fit_transform(X_data) principalDf = pd.DataFrame(data = principalComponents , columns = ['principal component 1', 'principal component 2']) # - principalDf finalDf = pd.concat([principalDf, y_data], axis = 1) fig = plt.figure(figsize = (8,8)) ax = fig.add_subplot(1,1,1) ax.set_xlabel('Principal Component 1', fontsize = 15) ax.set_ylabel('Principal Component 2', fontsize = 15) ax.set_title('2 component PCA', fontsize = 20) targets = ['iy','ao','sh','dcl','aa'] colors = ['r', 'g', 'b'] for target, color in zip(targets,colors): indicesToKeep = finalDf['y'] == target ax.scatter(finalDf.loc[indicesToKeep, 'principal component 1'] , finalDf.loc[indicesToKeep, 'principal component 2'] , c = color , s = 50) ax.legend(targets) ax.grid() pca.explained_variance_ratio_ # This chart plots the percentage of variance explained against the number of principal components. We notice that PC1 explains 55% of the total variance and PC2 15%. Therefore, with these two components we can represent 70% of the information contained by the 256 variables. And as a result, obtain a rather faithful representation of individuals in 2 dimensions. The projection on the factorial axes reveals three clusters, two of which can easily be assimilated to distinct classes of phonemes. # ## Prepare the Data for Machine Learning Algorithms # + [markdown] colab={"base_uri": "https://localhost:8080/", "height": 33} id="7Sx0F85BCC_4" outputId="17675ce3-aab6-464b-9dc9-078b32b435d6" # ### Create a train/test Set # + id="EykJ7V92GETA" def split_train_test(data, test_ratio): np.random.seed(42) shuffled_indices = np.random.permutation(len(data)) test_set_size = int(len(data) * test_ratio) test_indices = shuffled_indices[:test_set_size] train_indices = shuffled_indices[test_set_size:] return data.iloc[train_indices], data.iloc[test_indices] # + train_set, test_set = split_train_test(data , 0.1)\ print("size of train data : ", len(train_set)) print("size of test data : ", len(test_set )) # + X_train, y_train = separate_features_target(train_set) X_test, y_test = separate_features_target(test_set) # - # ### Training and Evaluating on the Training Set from sklearn.model_selection import RepeatedStratifiedKFold from sklearn.model_selection import cross_val_score from statistics import mean # ## Choosing model # # ### Model Comparison Methodology: # # To begin with, we created a 10-CV cross-validation function to compare the results of different classification models. The purpose of this is to evaluate the accuracy of our model on several test subsets and then to average them and thus obtain more reliable results. We have adapted it to the different models tested. def display_scores(scores, model, kernel): print("Mean accuracy of ",model, " ",kernel, ":", round(scores.mean(),3), "Standard deviation : ", round(scores.std(),3) ) def cross_validation(X_train=X_train, y_train=y_train, model="", kernel="", folds=1): cv = RepeatedStratifiedKFold(n_splits=folds, n_repeats=3, random_state=1) if (model == "LDA"): scores = LDA_cross_validiation_accuracy(X_train, y_train,cv) elif (model == "QDA"): scores = QDA_cross_validiation_accuracy(X_train, y_train,cv) elif (model == "NB"): scores = NB_cross_validiation_accuracy(X_train, y_train,cv) elif (model == "KNN"): scores = KNN_cross_validiation_accuracy(X_train, y_train,cv) elif (model == "SVM"): scores = SVM_cross_validiation_accuracy(X_train, y_train,cv, kernel) elif (model == "RF"): scores = RF_cross_validiation_accuracy(X_train, y_train,cv) display_scores(scores, model, kernel) # ### Discriminant analysis methods # # We first looked at discriminating models. Here we find ourselves with a high p and n. # # Discriminant analysis encompasses methods that can be used for both classification and dimensionality reduction. Linear discriminant analysis (LDA) is particularly popular because it is both a classifier and a dimensionality reduction technique. Quadratic discriminant analysis (QDA) is a variant of LDA that allows nonlinear separation of data. The QDA may not be efficient in our case because of its large number of parameters. The results presented are the MSEs obtained for the different models. # + import sklearn.discriminant_analysis from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis ### LDA ### def LDA_cross_validiation_accuracy(X_train, y_train, cv): model = LinearDiscriminantAnalysis() # evaluate model scores = cross_val_score(model, X_train, y_train, scoring='accuracy', cv=cv, n_jobs=-1) return scores ### QDA ### def QDA_cross_validiation_accuracy(X_train, y_train, cv): model = QuadraticDiscriminantAnalysis() # evaluate model scores = cross_val_score(model, X_train, y_train, scoring='accuracy', cv=cv, n_jobs=-1) return scores # + X, y = separate_features_target(data) cross_validation(X_train, y_train, "LDA","",10) cross_validation(X_train, y_train, "QDA","", 10) # - # We notice that the LDA makes it possible to obtain a good performance. # ### Naive Bayes # + from sklearn.naive_bayes import GaussianNB def NB_cross_validiation_accuracy(X_train, y_train, cv): model = GaussianNB() # evaluate model scores = cross_val_score(model, X_train, y_train, scoring='accuracy', cv=cv, n_jobs=-1) return scores # - cross_validation(X_train, y_train, "NB","" , 10) # Find best k by testing different possible values of k, then choosing the optimal k that minimizes the cross-validation ("cv") error and fits the final best KNN model that best explains our data # ### K-Nearest Neighbors # + from sklearn.neighbors import KNeighborsClassifier error_rate = [] for i in range(1,20): knn = KNeighborsClassifier(n_neighbors=i) knn.fit(X_train,y_train) pred_i = knn.predict(X_test) error_rate.append(np.mean(pred_i != y_test)) plt.figure(figsize=(10,6)) plt.plot(range(1,20),error_rate,color='blue', linestyle='dashed', marker='o',markerfacecolor='red', markersize=10) plt.title('Error Rate vs. K Value') plt.xlabel('K') plt.ylabel('Error Rate') print("Minimum error:-",min(error_rate),"at K =",error_rate.index(min(error_rate))) # + acc = [] # Will take some time from sklearn import metrics for i in range(1,40): neigh = KNeighborsClassifier(n_neighbors = i).fit(X_train,y_train) yhat = neigh.predict(X_test) acc.append(metrics.accuracy_score(y_test, yhat)) plt.figure(figsize=(10,6)) plt.plot(range(1,40),acc,color = 'blue',linestyle='dashed', marker='o',markerfacecolor='red', markersize=10) plt.title('accuracy vs. K Value') plt.xlabel('K') plt.ylabel('Accuracy') print("Maximum accuracy:-",max(acc),"at K =",acc.index(max(acc))) # + def KNN_cross_validiation_accuracy(X_train, y_train, cv): model = KNeighborsClassifier(n_neighbors=7) # evaluate model scores = cross_val_score(model, X_train, y_train, scoring='accuracy', cv=cv, n_jobs=-1) return scores # - cross_validation(X_train, y_train,"KNN","", 10) # ### SVM (Support vector machine) # # We are now interested in linear and radial SVMs. SVMs methods can be applied in the case of multi-class classification, they then decompose the problem into several binary classification problems. The method applied here is the one-on-one method. The difference between the two methods presented below is that one finds its linear boundaries in the prediction space while the other, more flexible, allows to widen the prediction space in order to find a better linear boundary in a new space. # + from sklearn.svm import SVC def SVM_cross_validiation_accuracy(X_train, y_train, cv, kernel): model = SVC(kernel=kernel) # evaluate model if (kernel == "poly"): model = SVC(kernel=kernel, degree=3) scores = cross_val_score(model, X_train, y_train, scoring='accuracy', cv=cv, n_jobs=-1) return scores # - cross_validation(X_train, y_train, "SVM", "linear", 10) cross_validation(X_train, y_train, "SVM", "rbf" , 10) cross_validation(X_train, y_train, "SVM", "poly" ,10) # We obtain a better result with the radial SVM. We then looked at other methods known for their performance on high-dimensional datasets. # ### Random forests # # The random forest method is based on the bagging system and is composed of several decision trees, working independently on a vision of a problem. Each produces an estimate, and it is the assembly of the decision trees and their analyzes that will give an overall estimate. The most frequent response category is chosen. Rather than using all the results obtained, a selection is made by looking for the forecast that comes up most often. This allows for better results than with a single decision tree. # + from sklearn.ensemble import RandomForestClassifier def RF_cross_validiation_accuracy(X_train, y_train, cv): model = RandomForestClassifier(n_estimators = 500,max_leaf_nodes=16, n_jobs=-1) # evaluate model scores = cross_val_score(model, X_train, y_train, scoring='accuracy', cv=cv, n_jobs=-1) return scores # - cross_validation(X_train, y_train, "RF","", 10) # ## Best results # # We obtain the best precision with the radial SVM model. This can be explained by the fact that these two methods work very well in the case of high-dimensional spaces, which is the case here.
Phoneme Classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np from itertools import combinations, product from ast import literal_eval # + ##atp_2015 is now atp_2017 but the variable name has not changed ##atp_2015 is now atp_2017 but the variable name has not changed ##atp_2015 is now atp_2017 but the variable name has not changed # - atp_2015 = pd.read_csv("atp_2017.csv") frames = [atp_2015["winner_name"],atp_2015["loser_name"]] results = pd.concat(frames) players = results.unique() atp_2015_win_lose = atp_2015[['winner_name','loser_name']] atp_2015_win_lose = atp_2015_win_lose.dropna(axis=0,how="any") atp_2015_opponents_win = atp_2015_win_lose.groupby("winner_name")["loser_name"].apply(list) atp_2015_opponents_loss = atp_2015_win_lose.groupby("loser_name")["winner_name"].apply(list) atp_2015_opponents_loss = pd.DataFrame(atp_2015_opponents_loss) atp_2015_opponents_loss = atp_2015_opponents_loss.reset_index() atp_2015_opponents_win = pd.DataFrame(atp_2015_opponents_win) atp_2015_opponents_win = atp_2015_opponents_win.reset_index() atp_2015_opponents_win = atp_2015_opponents_win.rename(index=str, columns={"winner_name": "name","loser_name":"wins"}) atp_2015_opponents_loss = atp_2015_opponents_loss.rename(index=str, columns={"loser_name": "name","winner_name":"losses"}) merge_table = pd.merge(atp_2015_opponents_loss, atp_2015_opponents_win, on="name", how="outer") merge_table.to_csv("merge_table_4.csv") # + ##i manually changed the csv..dont judge :P ##the players with no wins need to win ["bye"] # - merge_table = pd.read_csv("merge_table_3.csv") del merge_table["Unnamed: 0"] merge_table.head() ##fun fact: the lists are strings and not lists! this changes it back (necessary for later stuff) merge_table.loc[:,'losses'] = merge_table.loc[:,'losses'].apply(lambda x: literal_eval(x)) merge_table.loc[:,'wins'] = merge_table.loc[:,'wins'].apply(lambda x: literal_eval(x)) merge_table.head() prod = list(product(atp_2015_win_lose['loser_name'].unique(), atp_2015_win_lose['loser_name'].unique())) len(prod) all_possible_matches = pd.DataFrame(prod) all_possible_matches.head() all_possible_matches = all_possible_matches.rename(index=str, columns={0: "player_1",1:"player_2"}) all_possible_matches = all_possible_matches.drop(all_possible_matches[all_possible_matches.player_1 == all_possible_matches.player_2].index) all_possible_matches.head() # + # all_possible_matches["player_1_wins"] = merge_table.loc([merge_table['name'] == all_possible_matches["player_1"], ['wins']]) # - merge_table_test = merge_table.rename(index=str, columns = {"name":"player_1"}) merge_table_test.head() matching_merge = pd.merge(all_possible_matches,merge_table_test, on = "player_1", how = "outer") matching_merge.head() matching_merge = matching_merge.rename(index = str, columns = {"losses": "player_1_losses","wins": "player_1_wins"}) matching_merge.head() merge_table_test = merge_table.rename(index=str, columns = {"name":"player_2"}) merge_table_test.head() matching_merge_1 = pd.merge(all_possible_matches,merge_table_test, on = "player_2", how = "outer") matching_merge_1 = matching_merge_1.rename(index = str, columns = {"losses": "player_2_losses","wins": "player_2_wins"}) matching_merge_1.head() attempt_merge = pd.merge(matching_merge, matching_merge_1, how='left', left_on=['player_1','player_2'], right_on = ['player_1','player_2']) # + ##i dont know why there are NAs attempt_merge1 = attempt_merge.dropna(axis=0,how="any") # + # del attempt_merge1["player_1_losses_in_player_2_wins"] # del attempt_merge1["player_2_losses_in_player_1_wins"] # del attempt_merge1["player_1_wins_in_player_2_wins"] # del attempt_merge1["player_1_losses_in_player_2_losses"] # - attempt_merge1.head() attempt_merge1['player_1_losses_in_player_2_wins'] = [(list(set(a).intersection(set(b)))) for a, b in zip(attempt_merge1.player_1_losses,attempt_merge1.player_2_wins)] attempt_merge1['player_2_losses_in_player_1_wins'] = [list(set(a).intersection(set(b))) for a, b in zip(attempt_merge1.player_2_losses,attempt_merge1.player_1_wins)] attempt_merge1['player_1_wins_in_player_2_wins'] = [list(set(a).intersection(set(b))) for a, b in zip(attempt_merge1.player_1_wins,attempt_merge1.player_2_wins)] attempt_merge1['player_1_losses_in_player_2_losses'] = [list(set(a).intersection(set(b))) for a, b in zip(attempt_merge1.player_1_losses,attempt_merge1.player_2_losses)] attempt_merge1.head() # + #[list(len(a+b)) for a, b in zip(attempt_merge1.player_1_losses_in_player_2_wins,attempt_merge1.player_2_losses_in_player_1_wins)] # - ##for the numbers attempt_merge1['player_1_losses_in_player_2_wins'] = [len((list(set(a).intersection(set(b))))) for a, b in zip(attempt_merge1.player_1_losses,attempt_merge1.player_2_wins)] attempt_merge1['player_2_losses_in_player_1_wins'] = [len(list(set(a).intersection(set(b)))) for a, b in zip(attempt_merge1.player_2_losses,attempt_merge1.player_1_wins)] attempt_merge1['player_1_wins_in_player_2_wins'] = [len(list(set(a).intersection(set(b)))) for a, b in zip(attempt_merge1.player_1_wins,attempt_merge1.player_2_wins)] attempt_merge1['player_1_losses_in_player_2_losses'] = [len(list(set(a).intersection(set(b)))) for a, b in zip(attempt_merge1.player_1_losses,attempt_merge1.player_2_losses)] attempt_merge1.head() attempt_merge1["parameters_value"] = [(a/(a+b))*(1/10) for a, b in zip(attempt_merge1.player_1_losses_in_player_2_wins,attempt_merge1.player_2_losses_in_player_1_wins)] attempt_merge1.head() final_merge_thing_help = attempt_merge1.fillna(0) final_merge_thing_help.to_csv("matchups_parameters_4.csv") final_merge_thing_help.head()
matchups_par_from_2017.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <!-- dom:TITLE: Week 34: Introduction to the course, Logistics and Practicalities --> # # Week 34: Introduction to the course, Logistics and Practicalities # <!-- dom:AUTHOR: <NAME> at Department of Physics, University of Oslo & Department of Physics and Astronomy and National Superconducting Cyclotron Laboratory, Michigan State University --> # <!-- Author: --> # **<NAME>**, Department of Physics, University of Oslo and Department of Physics and Astronomy and National Superconducting Cyclotron Laboratory, Michigan State University # # Date: **Sep 16, 2020** # # Copyright 1999-2020, <NAME>. Released under CC Attribution-NonCommercial 4.0 license # # # # # # ## Overview of first week # # * Thursday August 20: First lecture: Presentation of the course, aims and content # # * Thursday: Second Lecture: Start with simple linear regression and repetition of linear algebra and elements of statistics # # * Friday August 21: Linear regression # # * Computer lab: Wednesdays, 8am-6pm. First time: Wednesday August 26. # # # # # ## Thursday August 20 # # [Video of Lecture](https://www.uio.no/studier/emner/matnat/fys/FYS-STK4155/h20/forelesningsvideoer/zoom_0.mp4?vrtx=view-as-webpage). # # # ## Lectures and ComputerLab # # * Lectures: Thursday (12.15pm-2pm and Friday (12.15pm-2pm). Due to the present COVID-19 situation all lectures will be online. They will be recorded and posted online at the official UiO [website](https://www.uio.no/studier/emner/matnat/fys/FYS-STK4155/h20/index.html). # # * Weekly reading assignments and videos needed to solve projects and exercises. # # * Weekly exercises when not working on projects. You can hand in exercises if you want. # # * Detailed lecture notes, exercises, all programs presented, projects etc can be found at the homepage of the course. # # * Weekly plans and all other information are on the official webpage. # # * No final exam, three projects that are graded and have to be approved. # # # # # # # ## Course Format # # * Three compulsory projects. Electronic reports only using [Canvas](https://www.uio.no/english/services/it/education/canvas/) to hand in projects and [git](https://git-scm.com/) as version control software and [GitHub](https://github.com/) for repository (or [GitLab](https://about.gitlab.com/)) of all your material. # # * Evaluation and grading: The three projects are graded and each counts 1/3 of the final mark. No final written or oral exam. # # a. For the last project each group/participant submits a proposal or works with suggested (by us) proposals for the project. # # b. If possible, we would like to organize the last project as a workshop where each group makes a poster and presents this to all other participants of the course # # c. Poster session where all participants can study and discuss the other proposals. # # d. Based on feedback etc, each group finalizes the report and submits for grading. # # # * Python is the default programming language, but feel free to use C/C++ and/or Fortran or other programming languages. All source codes discussed during the lectures can be found at the webpage and [github address](https://github.com/CompPhysics/MachineLearning/tree/master/doc/Programs) of the course. # # # # # # # ## Teachers # # # **Teachers :** # * <NAME>, <EMAIL> # # * **Phone**: +47-48257387 # # * **Office**: Department of Physics, University of Oslo, Eastern wing, room FØ470 # # * **Office hours**: *Anytime*! In Fall Semester 2020 (FS20), as a rule of thumb office hours are planned via computer or telephone. Individual or group office hours will be performed via zoom. Feel free to send an email for planning. In person meetings may also be possible if allowed by the University of Oslo's COVID-19 instructions. # # # * <NAME>, <EMAIL> # # * **Office**: Department of Physics, University of Oslo, Eastern wing, room FØ452 # # # * <NAME>, <EMAIL> # # * <NAME>, <EMAIL> # # * <NAME>, <EMAIL> # # * <NAME>, <EMAIL> # # # # ## Deadlines for projects (tentative) # # # 1. Project 1: September 28 (graded with feedback) # # 2. Project 2: November 2 (graded with feedback) # # 3. Project 3: December 7 (graded with feedback) # # Projects are handed in using **Canvas**. We use Github as repository for codes, benchmark calculations etc. Comments and feedback on projects only via **Canvas**. # # # # # ## Recommended textbooks # # * [<NAME>, <NAME>, <NAME>, The Elements of Statistical Learning, Springer](https://www.springer.com/gp/book/9780387848570) # # * [<NAME>, Hands-On Machine Learning with Scikit-Learn, Keras, and TensorFlow, 2nd Edition](https://www.oreilly.com/library/view/hands-on-machine-learning/9781492032632/) # # ## Prerequisites # # Basic knowledge in programming and mathematics, with an emphasis on # linear algebra. Knowledge of Python or/and C++ as programming # languages is strongly recommended and experience with Jupiter notebook # is recommended. Required courses are the equivalents to the University # of Oslo mathematics courses MAT1100, MAT1110, MAT1120 and at least one # of the corresponding computing and programming courses INF1000/INF1110 # or MAT-INF1100/MAT-INF1100L/BIOS1100/KJM-INF1100. Most universities # offer nowadays a basic programming course (often compulsory) where # Python is the recurring programming language. # # # # ## Learning outcomes # # # # This course aims at giving you insights and knowledge about many of the central algorithms used in Data Analysis and Machine Learning. The course is project based and through various numerical projects, normally three, you will be exposed to fundamental research problems in these fields, with the aim to reproduce state of the art scientific results. Both supervised and unsupervised methods will be covered. The emphasis is on a frequentist approach, although we will try to link it with a Bayesian approach as well. You will learn to develop and structure large codes for studying different cases where Machine Learning is applied to, get acquainted with computing facilities and learn to handle large scientific projects. A good scientific and ethical conduct is emphasized throughout the course. More specifically, after this course you will # # * Learn about basic data analysis, statistical analysis, Bayesian statistics, Monte Carlo sampling, data optimization and machine learning; # # * Be capable of extending the acquired knowledge to other systems and cases; # # * Have an understanding of central algorithms used in data analysis and machine learning; # # * Understand linear methods for regression and classification, from ordinary least squares, via Lasso and Ridge to Logistic regression; # # * Learn about neural networks and deep learning methods for supervised and unsupervised learning. Emphasis on feed forward neural networks, convolutional and recurrent neural networks; # # * Learn about about decision trees, random forests, bagging and boosting methods; # # * Learn about support vector machines and kernel transformations; # # * Reduction of data sets, from PCA to clustering; # # * Autoencoders and Reinforcement Learning; # # * Work on numerical projects to illustrate the theory. The projects play a central role and you are expected to know modern programming languages like Python or C++ and/or Fortran (Fortran2003 or later). # # # # ## Topics covered in this course: Statistical analysis and optimization of data # # The course has two central parts # # 1. Statistical analysis and optimization of data # # 2. Machine learning # # These topics will be scattered thorughout the course and may not necessarily be taught separately. Rather, we will often take an approach (during the lectures and project/exercise sessions) where say elements from statistical data analysis are mixed with specific Machine Learning algorithms # # **Statistical analysis and optimization of data.** # # # The following topics will be covered # * Basic concepts, expectation values, variance, covariance, correlation functions and errors; # # * Simpler models, binomial distribution, the Poisson distribution, simple and multivariate normal distributions; # # * Central elements of Bayesian statistics and modeling; # # * Gradient methods for data optimization, # # * Monte Carlo methods, Markov chains, Gibbs sampling and Metropolis-Hastings sampling; # # * Estimation of errors and resampling techniques such as the cross-validation, blocking, bootstrapping and jackknife methods; # # * Principal Component Analysis (PCA) and its mathematical foundation # # # # # ## Topics covered in this course: Machine Learning # # The following topics will be covered # * Linear Regression and Logistic Regression; # # * Neural networks and deep learning, including convolutional and recurrent neural networks # # * Decisions trees, Random Forests, Bagging and Boosting # # * Support vector machines # # * Bayesian linear and logistic regression # # * Boltzmann Machines # # * Unsupervised learning Dimensionality reduction, from PCA to cluster models # # Hands-on demonstrations, exercises and projects aim at deepening your understanding of these topics. # # # # # ## Extremely useful tools, strongly recommended # # **and discussed at the lab sessions.** # # * GIT for version control, and GitHub or GitLab as repositories, highly recommended. This will be discussed during the first exercise session # # * Anaconda and other Python environments, see intro slides and first exercise session # # # # # # # # ## Other courses on Data science and Machine Learning at UiO # # The link here <https://www.mn.uio.no/english/research/about/centre-focus/innovation/data-science/studies/> gives an excellent overview of courses on Machine learning at UiO. # # 1. [STK2100 Machine learning and statistical methods for prediction and classification](http://www.uio.no/studier/emner/matnat/math/STK2100/index-eng.html). # # 2. [IN3050 Introduction to Artificial Intelligence and Machine Learning](https://www.uio.no/studier/emner/matnat/ifi/IN3050/index-eng.html). Introductory course in machine learning and AI with an algorithmic approach. # # 3. [STK-INF3000/4000 Selected Topics in Data Science](http://www.uio.no/studier/emner/matnat/math/STK-INF3000/index-eng.html). The course provides insight into selected contemporary relevant topics within Data Science. # # 4. [IN4080 Natural Language Processing](https://www.uio.no/studier/emner/matnat/ifi/IN4080/index.html). Probabilistic and machine learning techniques applied to natural language processing. # # 5. [STK-IN4300 Statistical learning methods in Data Science](https://www.uio.no/studier/emner/matnat/math/STK-IN4300/index-eng.html). An advanced introduction to statistical and machine learning. For students with a good mathematics and statistics background. # # 6. [INF4490 Biologically Inspired Computing](http://www.uio.no/studier/emner/matnat/ifi/INF4490/). An introduction to self-adapting methods also called artificial intelligence or machine learning. # # 7. [IN-STK5000 Adaptive Methods for Data-Based Decision Making](https://www.uio.no/studier/emner/matnat/ifi/IN-STK5000/index-eng.html). Methods for adaptive collection and processing of data based on machine learning techniques. # # 8. [IN5400/INF5860 Machine Learning for Image Analysis](https://www.uio.no/studier/emner/matnat/ifi/IN5400/). An introduction to deep learning with particular emphasis on applications within Image analysis, but useful for other application areas too. # # 9. [TEK5040 Deep learning for autonomous systems](https://www.uio.no/studier/emner/matnat/its/TEK5040/). The course addresses advanced algorithms and architectures for deep learning with neural networks. The course provides an introduction to how deep-learning techniques can be used in the construction of key parts of advanced autonomous systems that exist in physical environments and cyber environments. # # 10. [STK4051 Computational Statistics](https://www.uio.no/studier/emner/matnat/math/STK4051/index-eng.html) # # 11. [STK4021 Applied Bayesian Analysis and Numerical Methods](https://www.uio.no/studier/emner/matnat/math/STK4021/index-eng.html) # # ## Introduction # # Our emphasis throughout this series of lectures # is on understanding the mathematical aspects of # different algorithms used in the fields of data analysis and machine learning. # # However, where possible we will emphasize the # importance of using available software. We start thus with a hands-on # and top-down approach to machine learning. The aim is thus to start with # relevant data or data we have produced # and use these to introduce statistical data analysis # concepts and machine learning algorithms before we delve into the # algorithms themselves. The examples we will use in the beginning, start with simple # polynomials with random noise added. We will use the Python # software package [Scikit-Learn](http://scikit-learn.org/stable/) and # introduce various machine learning algorithms to make fits of # the data and predictions. We move thereafter to more interesting # cases such as data from say experiments (below we will look at experimental nuclear binding energies as an example). # These are examples where we can easily set up the data and # then use machine learning algorithms included in for example # **Scikit-Learn**. # # These examples will serve us the purpose of getting # started. Furthermore, they allow us to catch more than two birds with # a stone. They will allow us to bring in some programming specific # topics and tools as well as showing the power of various Python # libraries for machine learning and statistical data analysis. # # Here, we will mainly focus on two # specific Python packages for Machine Learning, Scikit-Learn and # Tensorflow (see below for links etc). Moreover, the examples we # introduce will serve as inputs to many of our discussions later, as # well as allowing you to set up models and produce your own data and # get started with programming. # # # ## What is Machine Learning? # # Statistics, data science and machine learning form important fields of # research in modern science. They describe how to learn and make # predictions from data, as well as allowing us to extract important # correlations about physical process and the underlying laws of motion # in large data sets. The latter, big data sets, appear frequently in # essentially all disciplines, from the traditional Science, Technology, # Mathematics and Engineering fields to Life Science, Law, education # research, the Humanities and the Social Sciences. # # It has become more # and more common to see research projects on big data in for example # the Social Sciences where extracting patterns from complicated survey # data is one of many research directions. Having a solid grasp of data # analysis and machine learning is thus becoming central to scientific # computing in many fields, and competences and skills within the fields # of machine learning and scientific computing are nowadays strongly # requested by many potential employers. The latter cannot be # overstated, familiarity with machine learning has almost become a # prerequisite for many of the most exciting employment opportunities, # whether they are in bioinformatics, life science, physics or finance, # in the private or the public sector. This author has had several # students or met students who have been hired recently based on their # skills and competences in scientific computing and data science, often # with marginal knowledge of machine learning. # # Machine learning is a subfield of computer science, and is closely # related to computational statistics. It evolved from the study of # pattern recognition in artificial intelligence (AI) research, and has # made contributions to AI tasks like computer vision, natural language # processing and speech recognition. Many of the methods we will study are also # strongly rooted in basic mathematics and physics research. # # Ideally, machine learning represents the science of giving computers # the ability to learn without being explicitly programmed. The idea is # that there exist generic algorithms which can be used to find patterns # in a broad class of data sets without having to write code # specifically for each problem. The algorithm will build its own logic # based on the data. You should however always keep in mind that # machines and algorithms are to a large extent developed by humans. The # insights and knowledge we have about a specific system, play a central # role when we develop a specific machine learning algorithm. # # Machine learning is an extremely rich field, in spite of its young # age. The increases we have seen during the last three decades in # computational capabilities have been followed by developments of # methods and techniques for analyzing and handling large date sets, # relying heavily on statistics, computer science and mathematics. The # field is rather new and developing rapidly. Popular software packages # written in Python for machine learning like # [Scikit-learn](http://scikit-learn.org/stable/), # [Tensorflow](https://www.tensorflow.org/), # [PyTorch](http://pytorch.org/) and [Keras](https://keras.io/), all # freely available at their respective GitHub sites, encompass # communities of developers in the thousands or more. And the number of # code developers and contributors keeps increasing. Not all the # algorithms and methods can be given a rigorous mathematical # justification, opening up thereby large rooms for experimenting and # trial and error and thereby exciting new developments. However, a # solid command of linear algebra, multivariate theory, probability # theory, statistical data analysis, understanding errors and Monte # Carlo methods are central elements in a proper understanding of many # of algorithms and methods we will discuss. # # # ## Types of Machine Learning # # # The approaches to machine learning are many, but are often split into # two main categories. In *supervised learning* we know the answer to a # problem, and let the computer deduce the logic behind it. On the other # hand, *unsupervised learning* is a method for finding patterns and # relationship in data sets without any prior knowledge of the system. # Some authours also operate with a third category, namely # *reinforcement learning*. This is a paradigm of learning inspired by # behavioral psychology, where learning is achieved by trial-and-error, # solely from rewards and punishment. # # Another way to categorize machine learning tasks is to consider the # desired output of a system. Some of the most common tasks are: # # * Classification: Outputs are divided into two or more classes. The goal is to produce a model that assigns inputs into one of these classes. An example is to identify digits based on pictures of hand-written ones. Classification is typically supervised learning. # # * Regression: Finding a functional relationship between an input data set and a reference data set. The goal is to construct a function that maps input data to continuous output values. # # * Clustering: Data are divided into groups with certain common traits, without knowing the different groups beforehand. It is thus a form of unsupervised learning. # # The methods we cover have three main topics in common, irrespective of # whether we deal with supervised or unsupervised learning. The first # ingredient is normally our data set (which can be subdivided into # training and test data), the second item is a model which is normally a # function of some parameters. The model reflects our knowledge of the system (or lack thereof). As an example, if we know that our data show a behavior similar to what would be predicted by a polynomial, fitting our data to a polynomial of some degree would then determin our model. # # The last ingredient is a so-called **cost** # function which allows us to present an estimate on how good our model # is in reproducing the data it is supposed to train. # At the heart of basically all ML algorithms there are so-called minimization algorithms, often we end up with various variants of **gradient** methods. # # # # # # # ## Software and needed installations # # We will make extensive use of Python as programming language and its # myriad of available libraries. You will find # Jupyter notebooks invaluable in your work. You can run **R** # codes in the Jupyter/IPython notebooks, with the immediate benefit of # visualizing your data. You can also use compiled languages like C++, # Rust, Julia, Fortran etc if you prefer. The focus in these lectures will be # on Python. # # # If you have Python installed (we strongly recommend Python3) and you feel # pretty familiar with installing different packages, we recommend that # you install the following Python packages via **pip** as # # 1. pip install numpy scipy matplotlib ipython scikit-learn mglearn sympy pandas pillow # # For Python3, replace **pip** with **pip3**. # # For OSX users we recommend, after having installed Xcode, to # install **brew**. Brew allows for a seamless installation of additional # software via for example # # 1. brew install python3 # # For Linux users, with its variety of distributions like for example the widely popular Ubuntu distribution, # you can use **pip** as well and simply install Python as # # 1. sudo apt-get install python3 (or python for pyhton2.7) # # etc etc. # # # ## Python installers # # If you don't want to perform these operations separately and venture # into the hassle of exploring how to set up dependencies and paths, we # recommend two widely used distrubutions which set up all relevant # dependencies for Python, namely # # * [Anaconda](https://docs.anaconda.com/), # # which is an open source # distribution of the Python and R programming languages for large-scale # data processing, predictive analytics, and scientific computing, that # aims to simplify package management and deployment. Package versions # are managed by the package management system **conda**. # # * [Enthought canopy](https://www.enthought.com/product/canopy/) # # is a Python # distribution for scientific and analytic computing distribution and # analysis environment, available for free and under a commercial # license. # # Furthermore, [Google's Colab](https://colab.research.google.com/notebooks/welcome.ipynb) is a free Jupyter notebook environment that requires # no setup and runs entirely in the cloud. Try it out! # # ## Useful Python libraries # Here we list several useful Python libraries we strongly recommend (if you use anaconda many of these are already there) # # * [NumPy](https://www.numpy.org/) is a highly popular library for large, multi-dimensional arrays and matrices, along with a large collection of high-level mathematical functions to operate on these arrays # # * [The pandas](https://pandas.pydata.org/) library provides high-performance, easy-to-use data structures and data analysis tools # # * [Xarray](http://xarray.pydata.org/en/stable/) is a Python package that makes working with labelled multi-dimensional arrays simple, efficient, and fun! # # * [Scipy](https://www.scipy.org/) (pronounced “Sigh Pie”) is a Python-based ecosystem of open-source software for mathematics, science, and engineering. # # * [Matplotlib](https://matplotlib.org/) is a Python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms. # # * [Autograd](https://github.com/HIPS/autograd) can automatically differentiate native Python and Numpy code. It can handle a large subset of Python's features, including loops, ifs, recursion and closures, and it can even take derivatives of derivatives of derivatives # # * [SymPy](https://www.sympy.org/en/index.html) is a Python library for symbolic mathematics. # # * [scikit-learn](https://scikit-learn.org/stable/) has simple and efficient tools for machine learning, data mining and data analysis # # * [TensorFlow](https://www.tensorflow.org/) is a Python library for fast numerical computing created and released by Google # # * [Keras](https://keras.io/) is a high-level neural networks API, written in Python and capable of running on top of TensorFlow, CNTK, or Theano # # * And many more such as [pytorch](https://pytorch.org/), [Theano](https://pypi.org/project/Theano/) etc # # ## Installing R, C++, cython or Julia # # You will also find it convenient to utilize **R**. We will mainly # use Python during our lectures and in various projects and exercises. # Those of you # already familiar with **R** should feel free to continue using **R**, keeping # however an eye on the parallel Python set ups. Similarly, if you are a # Python afecionado, feel free to explore **R** as well. Jupyter/Ipython # notebook allows you to run **R** codes interactively in your # browser. The software library **R** is really tailored for statistical data analysis # and allows for an easy usage of the tools and algorithms we will discuss in these # lectures. # # To install **R** with Jupyter notebook # [follow the link here](https://mpacer.org/maths/r-kernel-for-ipython-notebook) # # # # ## Installing R, C++, cython, Numba etc # # # For the C++ aficionados, Jupyter/IPython notebook allows you also to # install C++ and run codes written in this language interactively in # the browser. Since we will emphasize writing many of the algorithms # yourself, you can thus opt for either Python or C++ (or Fortran or other compiled languages) as programming # languages. # # To add more entropy, **cython** can also be used when running your # notebooks. It means that Python with the jupyter notebook # setup allows you to integrate widely popular softwares and tools for # scientific computing. Similarly, the # [Numba Python package](https://numba.pydata.org/) delivers increased performance # capabilities with minimal rewrites of your codes. With its # versatility, including symbolic operations, Python offers a unique # computational environment. Your jupyter notebook can easily be # converted into a nicely rendered **PDF** file or a Latex file for # further processing. For example, convert to latex as # pycod jupyter nbconvert filename.ipynb --to latex # # And to add more versatility, the Python package [SymPy](http://www.sympy.org/en/index.html) is a Python library for symbolic mathematics. It aims to become a full-featured computer algebra system (CAS) and is entirely written in Python. # # Finally, if you wish to use the light mark-up language # [doconce](https://github.com/hplgit/doconce) you can convert a standard ascii text file into various HTML # formats, ipython notebooks, latex files, pdf files etc with minimal edits. These lectures were generated using **doconce**. # # # ## Numpy examples and Important Matrix and vector handling packages # # There are several central software libraries for linear algebra and eigenvalue problems. Several of the more # popular ones have been wrapped into ofter software packages like those from the widely used text **Numerical Recipes**. The original source codes in many of the available packages are often taken from the widely used # software package LAPACK, which follows two other popular packages # developed in the 1970s, namely EISPACK and LINPACK. We describe them shortly here. # # * LINPACK: package for linear equations and least square problems. # # * LAPACK:package for solving symmetric, unsymmetric and generalized eigenvalue problems. From LAPACK's website <http://www.netlib.org> it is possible to download for free all source codes from this library. Both C/C++ and Fortran versions are available. # # * BLAS (I, II and III): (Basic Linear Algebra Subprograms) are routines that provide standard building blocks for performing basic vector and matrix operations. Blas I is vector operations, II vector-matrix operations and III matrix-matrix operations. Highly parallelized and efficient codes, all available for download from <http://www.netlib.org>. # # ## Basic Matrix Features # # **Matrix properties reminder.** # $$ # \mathbf{A} = # \begin{bmatrix} a_{11} & a_{12} & a_{13} & a_{14} \\ # a_{21} & a_{22} & a_{23} & a_{24} \\ # a_{31} & a_{32} & a_{33} & a_{34} \\ # a_{41} & a_{42} & a_{43} & a_{44} # \end{bmatrix}\qquad # \mathbf{I} = # \begin{bmatrix} 1 & 0 & 0 & 0 \\ # 0 & 1 & 0 & 0 \\ # 0 & 0 & 1 & 0 \\ # 0 & 0 & 0 & 1 # \end{bmatrix} # $$ # The inverse of a matrix is defined by # $$ # \mathbf{A}^{-1} \cdot \mathbf{A} = I # $$ # <table border="1"> # <thead> # <tr><th align="center"> Relations </th> <th align="center"> Name </th> <th align="center"> matrix elements </th> </tr> # </thead> # <tbody> # <tr><td align="center"> $A = A^{T}$ </td> <td align="center"> symmetric </td> <td align="center"> $a_{ij} = a_{ji}$ </td> </tr> # <tr><td align="center"> $A = \left (A^{T} \right )^{-1}$ </td> <td align="center"> real orthogonal </td> <td align="center"> $\sum_k a_{ik} a_{jk} = \sum_k a_{ki} a_{kj} = \delta_{ij}$ </td> </tr> # <tr><td align="center"> $A = A^{ * }$ </td> <td align="center"> real matrix </td> <td align="center"> $a_{ij} = a_{ij}^{ * }$ </td> </tr> # <tr><td align="center"> $A = A^{\dagger}$ </td> <td align="center"> hermitian </td> <td align="center"> $a_{ij} = a_{ji}^{ * }$ </td> </tr> # <tr><td align="center"> $A = \left (A^{\dagger} \right )^{-1}$ </td> <td align="center"> unitary </td> <td align="center"> $\sum_k a_{ik} a_{jk}^{ * } = \sum_k a_{ki}^{ * } a_{kj} = \delta_{ij}$ </td> </tr> # </tbody> # </table> # # # # ### Some famous Matrices # # * Diagonal if $a_{ij}=0$ for $i\ne j$ # # * Upper triangular if $a_{ij}=0$ for $i > j$ # # * Lower triangular if $a_{ij}=0$ for $i < j$ # # * Upper Hessenberg if $a_{ij}=0$ for $i > j+1$ # # * Lower Hessenberg if $a_{ij}=0$ for $i < j+1$ # # * Tridiagonal if $a_{ij}=0$ for $|i -j| > 1$ # # * Lower banded with bandwidth $p$: $a_{ij}=0$ for $i > j+p$ # # * Upper banded with bandwidth $p$: $a_{ij}=0$ for $i < j+p$ # # * Banded, block upper triangular, block lower triangular.... # # ### More Basic Matrix Features # # **Some Equivalent Statements.** # # For an $N\times N$ matrix $\mathbf{A}$ the following properties are all equivalent # # * If the inverse of $\mathbf{A}$ exists, $\mathbf{A}$ is nonsingular. # # * The equation $\mathbf{Ax}=0$ implies $\mathbf{x}=0$. # # * The rows of $\mathbf{A}$ form a basis of $R^N$. # # * The columns of $\mathbf{A}$ form a basis of $R^N$. # # * $\mathbf{A}$ is a product of elementary matrices. # # * $0$ is not eigenvalue of $\mathbf{A}$. # # # # ## Numpy and arrays # [Numpy](http://www.numpy.org/) provides an easy way to handle arrays in Python. The standard way to import this library is as import numpy as np # Here follows a simple example where we set up an array of ten elements, all determined by random numbers drawn according to the normal distribution, n = 10 x = np.random.normal(size=n) print(x) # We defined a vector $x$ with $n=10$ elements with its values given by the Normal distribution $N(0,1)$. # Another alternative is to declare a vector as follows import numpy as np x = np.array([1, 2, 3]) print(x) # Here we have defined a vector with three elements, with $x_0=1$, $x_1=2$ and $x_2=3$. Note that both Python and C++ # start numbering array elements from $0$ and on. This means that a vector with $n$ elements has a sequence of entities $x_0, x_1, x_2, \dots, x_{n-1}$. We could also let (recommended) Numpy to compute the logarithms of a specific array as import numpy as np x = np.log(np.array([4, 7, 8])) print(x) # In the last example we used Numpy's unary function $np.log$. This function is # highly tuned to compute array elements since the code is vectorized # and does not require looping. We normaly recommend that you use the # Numpy intrinsic functions instead of the corresponding **log** function # from Python's **math** module. The looping is done explicitely by the # **np.log** function. The alternative, and slower way to compute the # logarithms of a vector would be to write import numpy as np from math import log x = np.array([4, 7, 8]) for i in range(0, len(x)): x[i] = log(x[i]) print(x) # We note that our code is much longer already and we need to import the **log** function from the **math** module. # The attentive reader will also notice that the output is $[1, 1, 2]$. Python interprets automagically our numbers as integers (like the **automatic** keyword in C++). To change this we could define our array elements to be double precision numbers as import numpy as np x = np.log(np.array([4, 7, 8], dtype = np.float64)) print(x) # or simply write them as double precision numbers (Python uses 64 bits as default for floating point type variables), that is import numpy as np x = np.log(np.array([4.0, 7.0, 8.0]) print(x) # To check the number of bytes (remember that one byte contains eight bits for double precision variables), you can use simple use the **itemsize** functionality (the array $x$ is actually an object which inherits the functionalities defined in Numpy) as import numpy as np x = np.log(np.array([4.0, 7.0, 8.0]) print(x.itemsize) # ## Matrices in Python # # Having defined vectors, we are now ready to try out matrices. We can # define a $3 \times 3 $ real matrix $\hat{A}$ as (recall that we user # lowercase letters for vectors and uppercase letters for matrices) import numpy as np A = np.log(np.array([ [4.0, 7.0, 8.0], [3.0, 10.0, 11.0], [4.0, 5.0, 7.0] ])) print(A) # If we use the **shape** function we would get $(3, 3)$ as output, that is verifying that our matrix is a $3\times 3$ matrix. We can slice the matrix and print for example the first column (Python organized matrix elements in a row-major order, see below) as import numpy as np A = np.log(np.array([ [4.0, 7.0, 8.0], [3.0, 10.0, 11.0], [4.0, 5.0, 7.0] ])) # print the first column, row-major order and elements start with 0 print(A[:,0]) # We can continue this was by printing out other columns or rows. The example here prints out the second column import numpy as np A = np.log(np.array([ [4.0, 7.0, 8.0], [3.0, 10.0, 11.0], [4.0, 5.0, 7.0] ])) # print the first column, row-major order and elements start with 0 print(A[1,:]) # Numpy contains many other functionalities that allow us to slice, subdivide etc etc arrays. We strongly recommend that you look up the [Numpy website for more details](http://www.numpy.org/). Useful functions when defining a matrix are the **np.zeros** function which declares a matrix of a given dimension and sets all elements to zero import numpy as np n = 10 # define a matrix of dimension 10 x 10 and set all elements to zero A = np.zeros( (n, n) ) print(A) # or initializing all elements to import numpy as np n = 10 # define a matrix of dimension 10 x 10 and set all elements to one A = np.ones( (n, n) ) print(A) # or as unitarily distributed random numbers (see the material on random number generators in the statistics part) import numpy as np n = 10 # define a matrix of dimension 10 x 10 and set all elements to random numbers with x \in [0, 1] A = np.random.rand(n, n) print(A) # As we will see throughout these lectures, there are several extremely useful functionalities in Numpy. # As an example, consider the discussion of the covariance matrix. Suppose we have defined three vectors # $\hat{x}, \hat{y}, \hat{z}$ with $n$ elements each. The covariance matrix is defined as # $$ # \hat{\Sigma} = \begin{bmatrix} \sigma_{xx} & \sigma_{xy} & \sigma_{xz} \\ # \sigma_{yx} & \sigma_{yy} & \sigma_{yz} \\ # \sigma_{zx} & \sigma_{zy} & \sigma_{zz} # \end{bmatrix}, # $$ # where for example # $$ # \sigma_{xy} =\frac{1}{n} \sum_{i=0}^{n-1}(x_i- \overline{x})(y_i- \overline{y}). # $$ # The Numpy function **np.cov** calculates the covariance elements using the factor $1/(n-1)$ instead of $1/n$ since it assumes we do not have the exact mean values. # The following simple function uses the **np.vstack** function which takes each vector of dimension $1\times n$ and produces a $3\times n$ matrix $\hat{W}$ # $$ # \hat{W} = \begin{bmatrix} x_0 & y_0 & z_0 \\ # x_1 & y_1 & z_1 \\ # x_2 & y_2 & z_2 \\ # \dots & \dots & \dots \\ # x_{n-2} & y_{n-2} & z_{n-2} \\ # x_{n-1} & y_{n-1} & z_{n-1} # \end{bmatrix}, # $$ # which in turn is converted into into the $3\times 3$ covariance matrix # $\hat{\Sigma}$ via the Numpy function **np.cov()**. We note that we can also calculate # the mean value of each set of samples $\hat{x}$ etc using the Numpy # function **np.mean(x)**. We can also extract the eigenvalues of the # covariance matrix through the **np.linalg.eig()** function. # + # Importing various packages import numpy as np n = 100 x = np.random.normal(size=n) print(np.mean(x)) y = 4+3*x+np.random.normal(size=n) print(np.mean(y)) z = x**3+np.random.normal(size=n) print(np.mean(z)) W = np.vstack((x, y, z)) Sigma = np.cov(W) print(Sigma) Eigvals, Eigvecs = np.linalg.eig(Sigma) print(Eigvals) # + # %matplotlib inline import numpy as np import matplotlib.pyplot as plt from scipy import sparse eye = np.eye(4) print(eye) sparse_mtx = sparse.csr_matrix(eye) print(sparse_mtx) x = np.linspace(-10,10,100) y = np.sin(x) plt.plot(x,y,marker='x') plt.show() # - # ## Meet the Pandas # # # <!-- dom:FIGURE: [fig/pandas.jpg, width=600 frac=0.8] --> # <!-- begin figure --> # # <p></p> # <img src="fig/pandas.jpg" width=600> # # <!-- end figure --> # # # Another useful Python package is # [pandas](https://pandas.pydata.org/), which is an open source library # providing high-performance, easy-to-use data structures and data # analysis tools for Python. **pandas** stands for panel data, a term borrowed from econometrics and is an efficient library for data analysis with an emphasis on tabular data. # **pandas** has two major classes, the **DataFrame** class with two-dimensional data objects and tabular data organized in columns and the class **Series** with a focus on one-dimensional data objects. Both classes allow you to index data easily as we will see in the examples below. # **pandas** allows you also to perform mathematical operations on the data, spanning from simple reshapings of vectors and matrices to statistical operations. # # The following simple example shows how we can, in an easy way make tables of our data. Here we define a data set which includes names, place of birth and date of birth, and displays the data in an easy to read way. We will see repeated use of **pandas**, in particular in connection with classification of data. import pandas as pd from IPython.display import display data = {'First Name': ["Frodo", "Bilbo", "<NAME>", "Samwise"], 'Last Name': ["Baggins", "Baggins","Elessar","Gamgee"], 'Place of birth': ["Shire", "Shire", "Eriador", "Shire"], 'Date of Birth T.A.': [2968, 2890, 2931, 2980] } data_pandas = pd.DataFrame(data) display(data_pandas) # In the above we have imported **pandas** with the shorthand **pd**, the latter has become the standard way we import **pandas**. We make then a list of various variables # and reorganize the aboves lists into a **DataFrame** and then print out a neat table with specific column labels as *Name*, *place of birth* and *date of birth*. # Displaying these results, we see that the indices are given by the default numbers from zero to three. # **pandas** is extremely flexible and we can easily change the above indices by defining a new type of indexing as data_pandas = pd.DataFrame(data,index=['Frodo','Bilbo','Aragorn','Sam']) display(data_pandas) # Thereafter we display the content of the row which begins with the index **Aragorn** display(data_pandas.loc['Aragorn']) # We can easily append data to this, for example new_hobbit = {'First Name': ["Peregrin"], 'Last Name': ["Took"], 'Place of birth': ["Shire"], 'Date of Birth T.A.': [2990] } data_pandas=data_pandas.append(pd.DataFrame(new_hobbit, index=['Pippin'])) display(data_pandas) # Here are other examples where we use the **DataFrame** functionality to handle arrays, now with more interesting features for us, namely numbers. We set up a matrix # of dimensionality $10\times 5$ and compute the mean value and standard deviation of each column. Similarly, we can perform mathematial operations like squaring the matrix elements and many other operations. import numpy as np import pandas as pd from IPython.display import display np.random.seed(100) # setting up a 10 x 5 matrix rows = 10 cols = 5 a = np.random.randn(rows,cols) df = pd.DataFrame(a) display(df) print(df.mean()) print(df.std()) display(df**2) # Thereafter we can select specific columns only and plot final results # + df.columns = ['First', 'Second', 'Third', 'Fourth', 'Fifth'] df.index = np.arange(10) display(df) print(df['Second'].mean() ) print(df.info()) print(df.describe()) from pylab import plt, mpl plt.style.use('seaborn') mpl.rcParams['font.family'] = 'serif' df.cumsum().plot(lw=2.0, figsize=(10,6)) plt.show() df.plot.bar(figsize=(10,6), rot=15) plt.show() # - # We can produce a $4\times 4$ matrix b = np.arange(16).reshape((4,4)) print(b) df1 = pd.DataFrame(b) print(df1) # and many other operations. # # The **Series** class is another important class included in # **pandas**. You can view it as a specialization of **DataFrame** but where # we have just a single column of data. It shares many of the same features as _DataFrame. As with **DataFrame**, # most operations are vectorized, achieving thereby a high performance when dealing with computations of arrays, in particular labeled arrays. # As we will see below it leads also to a very concice code close to the mathematical operations we may be interested in. # For multidimensional arrays, we recommend strongly [xarray](http://xarray.pydata.org/en/stable/). **xarray** has much of the same flexibility as **pandas**, but allows for the extension to higher dimensions than two. We will see examples later of the usage of both **pandas** and **xarray**. # # # ## Friday August 21 # # [Video of Lecture](https://www.uio.no/studier/emner/matnat/fys/FYS-STK3155/h20/forelesningsvideoer/LectureAug21.mp4?vrtx=view-as-webpage) and [Handwritten notes](https://github.com/CompPhysics/MachineLearning/blob/master/doc/HandWrittenNotes/NotesAugust21.pdf) # # # # # ## Reading Data and fitting # # In order to study various Machine Learning algorithms, we need to # access data. Acccessing data is an essential step in all machine # learning algorithms. In particular, setting up the so-called **design # matrix** (to be defined below) is often the first element we need in # order to perform our calculations. To set up the design matrix means # reading (and later, when the calculations are done, writing) data # in various formats, The formats span from reading files from disk, # loading data from databases and interacting with online sources # like web application programming interfaces (APIs). # # In handling various input formats, as discussed above, we will mainly stay with **pandas**, # a Python package which allows us, in a seamless and painless way, to # deal with a multitude of formats, from standard **csv** (comma separated # values) files, via **excel**, **html** to **hdf5** formats. With **pandas** # and the **DataFrame** and **Series** functionalities we are able to convert text data # into the calculational formats we need for a specific algorithm. And our code is going to be # pretty close the basic mathematical expressions. # # Our first data set is going to be a classic from nuclear physics, namely all # available data on binding energies. Don't be intimidated if you are not familiar with nuclear physics. It serves simply as an example here of a data set. # # We will show some of the # strengths of packages like **Scikit-Learn** in fitting nuclear binding energies to # specific functions using linear regression first. Then, as a teaser, we will show you how # you can easily implement other algorithms like decision trees and random forests and neural networks. # # But before we really start with nuclear physics data, let's just look at some simpler polynomial fitting cases, such as, # (don't be offended) fitting straight lines! # # ## Friday August 21 # # ### Simple linear regression model using **scikit-learn** # # We start with perhaps our simplest possible example, using **Scikit-Learn** to perform linear regression analysis on a data set produced by us. # # What follows is a simple Python code where we have defined a function # $y$ in terms of the variable $x$. Both are defined as vectors with $100$ entries. # The numbers in the vector $\hat{x}$ are given # by random numbers generated with a uniform distribution with entries # $x_i \in [0,1]$ (more about probability distribution functions # later). These values are then used to define a function $y(x)$ # (tabulated again as a vector) with a linear dependence on $x$ plus a # random noise added via the normal distribution. # # # The Numpy functions are imported used the **import numpy as np** # statement and the random number generator for the uniform distribution # is called using the function **np.random.rand()**, where we specificy # that we want $100$ random variables. Using Numpy we define # automatically an array with the specified number of elements, $100$ in # our case. With the Numpy function **randn()** we can compute random # numbers with the normal distribution (mean value $\mu$ equal to zero and # variance $\sigma^2$ set to one) and produce the values of $y$ assuming a linear # dependence as function of $x$ # $$ # y = 2x+N(0,1), # $$ # where $N(0,1)$ represents random numbers generated by the normal # distribution. From **Scikit-Learn** we import then the # **LinearRegression** functionality and make a prediction $\tilde{y} = # \alpha + \beta x$ using the function **fit(x,y)**. We call the set of # data $(\hat{x},\hat{y})$ for our training data. The Python package # **scikit-learn** has also a functionality which extracts the above # fitting parameters $\alpha$ and $\beta$ (see below). Later we will # distinguish between training data and test data. # # For plotting we use the Python package # [matplotlib](https://matplotlib.org/) which produces publication # quality figures. Feel free to explore the extensive # [gallery](https://matplotlib.org/gallery/index.html) of examples. In # this example we plot our original values of $x$ and $y$ as well as the # prediction **ypredict** ($\tilde{y}$), which attempts at fitting our # data with a straight line. # # The Python code follows here. # + # Importing various packages import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression x = np.random.rand(100,1) y = 2*x+np.random.randn(100,1) linreg = LinearRegression() linreg.fit(x,y) xnew = np.array([[0],[1]]) ypredict = linreg.predict(xnew) plt.plot(xnew, ypredict, "r-") plt.plot(x, y ,'ro') plt.axis([0,1.0,0, 5.0]) plt.xlabel(r'$x$') plt.ylabel(r'$y$') plt.title(r'Simple Linear Regression') plt.show() # - # This example serves several aims. It allows us to demonstrate several # aspects of data analysis and later machine learning algorithms. The # immediate visualization shows that our linear fit is not # impressive. It goes through the data points, but there are many # outliers which are not reproduced by our linear regression. We could # now play around with this small program and change for example the # factor in front of $x$ and the normal distribution. Try to change the # function $y$ to # $$ # y = 10x+0.01 \times N(0,1), # $$ # where $x$ is defined as before. Does the fit look better? Indeed, by # reducing the role of the noise given by the normal distribution we see immediately that # our linear prediction seemingly reproduces better the training # set. However, this testing 'by the eye' is obviouly not satisfactory in the # long run. Here we have only defined the training data and our model, and # have not discussed a more rigorous approach to the **cost** function. # # We need more rigorous criteria in defining whether we have succeeded or # not in modeling our training data. You will be surprised to see that # many scientists seldomly venture beyond this 'by the eye' approach. A # standard approach for the *cost* function is the so-called $\chi^2$ # function (a variant of the mean-squared error (MSE)) # $$ # \chi^2 = \frac{1}{n} # \sum_{i=0}^{n-1}\frac{(y_i-\tilde{y}_i)^2}{\sigma_i^2}, # $$ # where $\sigma_i^2$ is the variance (to be defined later) of the entry # $y_i$. We may not know the explicit value of $\sigma_i^2$, it serves # however the aim of scaling the equations and make the cost function # dimensionless. # # Minimizing the cost function is a central aspect of # our discussions to come. Finding its minima as function of the model # parameters ($\alpha$ and $\beta$ in our case) will be a recurring # theme in these series of lectures. Essentially all machine learning # algorithms we will discuss center around the minimization of the # chosen cost function. This depends in turn on our specific # model for describing the data, a typical situation in supervised # learning. Automatizing the search for the minima of the cost function is a # central ingredient in all algorithms. Typical methods which are # employed are various variants of **gradient** methods. These will be # discussed in more detail later. Again, you'll be surprised to hear that # many practitioners minimize the above function ''by the eye', popularly dubbed as # 'chi by the eye'. That is, change a parameter and see (visually and numerically) that # the $\chi^2$ function becomes smaller. # # There are many ways to define the cost function. A simpler approach is to look at the relative difference between the training data and the predicted data, that is we define # the relative error (why would we prefer the MSE instead of the relative error?) as # $$ # \epsilon_{\mathrm{relative}}= \frac{\vert \hat{y} -\hat{\tilde{y}}\vert}{\vert \hat{y}\vert}. # $$ # The squared cost function results in an arithmetic mean-unbiased # estimator, and the absolute-value cost function results in a # median-unbiased estimator (in the one-dimensional case, and a # geometric median-unbiased estimator for the multi-dimensional # case). The squared cost function has the disadvantage that it has the tendency # to be dominated by outliers. # # We can modify easily the above Python code and plot the relative error instead # + import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression x = np.random.rand(100,1) y = 5*x+0.01*np.random.randn(100,1) linreg = LinearRegression() linreg.fit(x,y) ypredict = linreg.predict(x) plt.plot(x, np.abs(ypredict-y)/abs(y), "ro") plt.axis([0,1.0,0.0, 0.5]) plt.xlabel(r'$x$') plt.ylabel(r'$\epsilon_{\mathrm{relative}}$') plt.title(r'Relative error') plt.show() # - # Depending on the parameter in front of the normal distribution, we may # have a small or larger relative error. Try to play around with # different training data sets and study (graphically) the value of the # relative error. # # As mentioned above, **Scikit-Learn** has an impressive functionality. # We can for example extract the values of $\alpha$ and $\beta$ and # their error estimates, or the variance and standard deviation and many # other properties from the statistical data analysis. # # Here we show an # example of the functionality of **Scikit-Learn**. # + import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, r2_score, mean_squared_log_error, mean_absolute_error x = np.random.rand(100,1) y = 2.0+ 5*x+0.5*np.random.randn(100,1) linreg = LinearRegression() linreg.fit(x,y) ypredict = linreg.predict(x) print('The intercept alpha: \n', linreg.intercept_) print('Coefficient beta : \n', linreg.coef_) # The mean squared error print("Mean squared error: %.2f" % mean_squared_error(y, ypredict)) # Explained variance score: 1 is perfect prediction print('Variance score: %.2f' % r2_score(y, ypredict)) # Mean squared log error print('Mean squared log error: %.2f' % mean_squared_log_error(y, ypredict) ) # Mean absolute error print('Mean absolute error: %.2f' % mean_absolute_error(y, ypredict)) plt.plot(x, ypredict, "r-") plt.plot(x, y ,'ro') plt.axis([0.0,1.0,1.5, 7.0]) plt.xlabel(r'$x$') plt.ylabel(r'$y$') plt.title(r'Linear Regression fit ') plt.show() # - # The function **coef** gives us the parameter $\beta$ of our fit while **intercept** yields # $\alpha$. Depending on the constant in front of the normal distribution, we get values near or far from $alpha =2$ and $\beta =5$. Try to play around with different parameters in front of the normal distribution. The function **meansquarederror** gives us the mean square error, a risk metric corresponding to the expected value of the squared (quadratic) error or loss defined as # $$ # MSE(\hat{y},\hat{\tilde{y}}) = \frac{1}{n} # \sum_{i=0}^{n-1}(y_i-\tilde{y}_i)^2, # $$ # The smaller the value, the better the fit. Ideally we would like to # have an MSE equal zero. The attentive reader has probably recognized # this function as being similar to the $\chi^2$ function defined above. # # The **r2score** function computes $R^2$, the coefficient of # determination. It provides a measure of how well future samples are # likely to be predicted by the model. Best possible score is 1.0 and it # can be negative (because the model can be arbitrarily worse). A # constant model that always predicts the expected value of $\hat{y}$, # disregarding the input features, would get a $R^2$ score of $0.0$. # # If $\tilde{\hat{y}}_i$ is the predicted value of the $i-th$ sample and $y_i$ is the corresponding true value, then the score $R^2$ is defined as # $$ # R^2(\hat{y}, \tilde{\hat{y}}) = 1 - \frac{\sum_{i=0}^{n - 1} (y_i - \tilde{y}_i)^2}{\sum_{i=0}^{n - 1} (y_i - \bar{y})^2}, # $$ # where we have defined the mean value of $\hat{y}$ as # $$ # \bar{y} = \frac{1}{n} \sum_{i=0}^{n - 1} y_i. # $$ # Another quantity taht we will meet again in our discussions of regression analysis is # the mean absolute error (MAE), a risk metric corresponding to the expected value of the absolute error loss or what we call the $l1$-norm loss. In our discussion above we presented the relative error. # The MAE is defined as follows # $$ # \text{MAE}(\hat{y}, \hat{\tilde{y}}) = \frac{1}{n} \sum_{i=0}^{n-1} \left| y_i - \tilde{y}_i \right|. # $$ # We present the # squared logarithmic (quadratic) error # $$ # \text{MSLE}(\hat{y}, \hat{\tilde{y}}) = \frac{1}{n} \sum_{i=0}^{n - 1} (\log_e (1 + y_i) - \log_e (1 + \tilde{y}_i) )^2, # $$ # where $\log_e (x)$ stands for the natural logarithm of $x$. This error # estimate is best to use when targets having exponential growth, such # as population counts, average sales of a commodity over a span of # years etc. # # # Finally, another cost function is the Huber cost function used in robust regression. # # The rationale behind this possible cost function is its reduced # sensitivity to outliers in the data set. In our discussions on # dimensionality reduction and normalization of data we will meet other # ways of dealing with outliers. # # The Huber cost function is defined as # $$ # H_{\delta}(a)={\begin{cases}{\frac {1}{2}}{a^{2}}&{\text{for }}|a|\leq \delta ,\\\delta (|a|-{\frac {1}{2}}\delta ),&{\text{otherwise.}}\end{cases}}}. # $$ # Here $a=\boldsymbol{y} - \boldsymbol{\tilde{y}}$. # We will discuss in more # detail these and other functions in the various lectures. We conclude this part with another example. Instead of # a linear $x$-dependence we study now a cubic polynomial and use the polynomial regression analysis tools of scikit-learn. # + import matplotlib.pyplot as plt import numpy as np import random from sklearn.linear_model import Ridge from sklearn.preprocessing import PolynomialFeatures from sklearn.pipeline import make_pipeline from sklearn.linear_model import LinearRegression x=np.linspace(0.02,0.98,200) noise = np.asarray(random.sample((range(200)),200)) y=x**3*noise yn=x**3*100 poly3 = PolynomialFeatures(degree=3) X = poly3.fit_transform(x[:,np.newaxis]) clf3 = LinearRegression() clf3.fit(X,y) Xplot=poly3.fit_transform(x[:,np.newaxis]) poly3_plot=plt.plot(x, clf3.predict(Xplot), label='Cubic Fit') plt.plot(x,yn, color='red', label="True Cubic") plt.scatter(x, y, label='Data', color='orange', s=15) plt.legend() plt.show() def error(a): for i in y: err=(y-yn)/yn return abs(np.sum(err))/len(err) print (error(y)) # - # ### To our real data: nuclear binding energies. Brief reminder on masses and binding energies # # Let us now dive into nuclear physics and remind ourselves briefly about some basic features about binding # energies. A basic quantity which can be measured for the ground # states of nuclei is the atomic mass $M(N, Z)$ of the neutral atom with # atomic mass number $A$ and charge $Z$. The number of neutrons is $N$. There are indeed several sophisticated experiments worldwide which allow us to measure this quantity to high precision (parts per million even). # # Atomic masses are usually tabulated in terms of the mass excess defined by # $$ # \Delta M(N, Z) = M(N, Z) - uA, # $$ # where $u$ is the Atomic Mass Unit # $$ # u = M(^{12}\mathrm{C})/12 = 931.4940954(57) \hspace{0.1cm} \mathrm{MeV}/c^2. # $$ # The nucleon masses are # $$ # m_p = 1.00727646693(9)u, # $$ # and # $$ # m_n = 939.56536(8)\hspace{0.1cm} \mathrm{MeV}/c^2 = 1.0086649156(6)u. # $$ # In the [2016 mass evaluation of by W.J.Huang, G.Audi, M.Wang, F.G.Kondev, S.Naimi and X.Xu](http://nuclearmasses.org/resources_folder/Wang_2017_Chinese_Phys_C_41_030003.pdf) # there are data on masses and decays of 3437 nuclei. # # The nuclear binding energy is defined as the energy required to break # up a given nucleus into its constituent parts of $N$ neutrons and $Z$ # protons. In terms of the atomic masses $M(N, Z)$ the binding energy is # defined by # $$ # BE(N, Z) = ZM_H c^2 + Nm_n c^2 - M(N, Z)c^2 , # $$ # where $M_H$ is the mass of the hydrogen atom and $m_n$ is the mass of the neutron. # In terms of the mass excess the binding energy is given by # $$ # BE(N, Z) = Z\Delta_H c^2 + N\Delta_n c^2 -\Delta(N, Z)c^2 , # $$ # where $\Delta_H c^2 = 7.2890$ MeV and $\Delta_n c^2 = 8.0713$ MeV. # # # A popular and physically intuitive model which can be used to parametrize # the experimental binding energies as function of $A$, is the so-called # **liquid drop model**. The ansatz is based on the following expression # $$ # BE(N,Z) = a_1A-a_2A^{2/3}-a_3\frac{Z^2}{A^{1/3}}-a_4\frac{(N-Z)^2}{A}, # $$ # where $A$ stands for the number of nucleons and the $a_i$s are parameters which are determined by a fit # to the experimental data. # # # # # To arrive at the above expression we have assumed that we can make the following assumptions: # # * There is a volume term $a_1A$ proportional with the number of nucleons (the energy is also an extensive quantity). When an assembly of nucleons of the same size is packed together into the smallest volume, each interior nucleon has a certain number of other nucleons in contact with it. This contribution is proportional to the volume. # # * There is a surface energy term $a_2A^{2/3}$. The assumption here is that a nucleon at the surface of a nucleus interacts with fewer other nucleons than one in the interior of the nucleus and hence its binding energy is less. This surface energy term takes that into account and is therefore negative and is proportional to the surface area. # # * There is a Coulomb energy term $a_3\frac{Z^2}{A^{1/3}}$. The electric repulsion between each pair of protons in a nucleus yields less binding. # # * There is an asymmetry term $a_4\frac{(N-Z)^2}{A}$. This term is associated with the Pauli exclusion principle and reflects the fact that the proton-neutron interaction is more attractive on the average than the neutron-neutron and proton-proton interactions. # # We could also add a so-called pairing term, which is a correction term that # arises from the tendency of proton pairs and neutron pairs to # occur. An even number of particles is more stable than an odd number. # # # ### Organizing our data # # Let us start with reading and organizing our data. # We start with the compilation of masses and binding energies from 2016. # After having downloaded this file to our own computer, we are now ready to read the file and start structuring our data. # # # We start with preparing folders for storing our calculations and the data file over masses and binding energies. We import also various modules that we will find useful in order to present various Machine Learning methods. Here we focus mainly on the functionality of **scikit-learn**. # + # Common imports import numpy as np import pandas as pd import matplotlib.pyplot as plt import sklearn.linear_model as skl from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error import os # Where to save the figures and data files PROJECT_ROOT_DIR = "Results" FIGURE_ID = "Results/FigureFiles" DATA_ID = "DataFiles/" if not os.path.exists(PROJECT_ROOT_DIR): os.mkdir(PROJECT_ROOT_DIR) if not os.path.exists(FIGURE_ID): os.makedirs(FIGURE_ID) if not os.path.exists(DATA_ID): os.makedirs(DATA_ID) def image_path(fig_id): return os.path.join(FIGURE_ID, fig_id) def data_path(dat_id): return os.path.join(DATA_ID, dat_id) def save_fig(fig_id): plt.savefig(image_path(fig_id) + ".png", format='png') infile = open(data_path("MassEval2016.dat"),'r') # - # Before we proceed, we define also a function for making our plots. You can obviously avoid this and simply set up various **matplotlib** commands every time you need them. You may however find it convenient to collect all such commands in one function and simply call this function. # + from pylab import plt, mpl plt.style.use('seaborn') mpl.rcParams['font.family'] = 'serif' def MakePlot(x,y, styles, labels, axlabels): plt.figure(figsize=(10,6)) for i in range(len(x)): plt.plot(x[i], y[i], styles[i], label = labels[i]) plt.xlabel(axlabels[0]) plt.ylabel(axlabels[1]) plt.legend(loc=0) # - # Our next step is to read the data on experimental binding energies and # reorganize them as functions of the mass number $A$, the number of # protons $Z$ and neutrons $N$ using **pandas**. Before we do this it is # always useful (unless you have a binary file or other types of compressed # data) to actually open the file and simply take a look at it! # # # In particular, the program that outputs the final nuclear masses is written in Fortran with a specific format. It means that we need to figure out the format and which columns contain the data we are interested in. Pandas comes with a function that reads formatted output. After having admired the file, we are now ready to start massaging it with **pandas**. The file begins with some basic format information. """ This is taken from the data file of the mass 2016 evaluation. All files are 3436 lines long with 124 character per line. Headers are 39 lines long. col 1 : Fortran character control: 1 = page feed 0 = line feed format : a1,i3,i5,i5,i5,1x,a3,a4,1x,f13.5,f11.5,f11.3,f9.3,1x,a2,f11.3,f9.3,1x,i3,1x,f12.5,f11.5 These formats are reflected in the pandas widths variable below, see the statement widths=(1,3,5,5,5,1,3,4,1,13,11,11,9,1,2,11,9,1,3,1,12,11,1), Pandas has also a variable header, with length 39 in this case. """ # The data we are interested in are in columns 2, 3, 4 and 11, giving us # the number of neutrons, protons, mass numbers and binding energies, # respectively. We add also for the sake of completeness the element name. The data are in fixed-width formatted lines and we will # covert them into the **pandas** DataFrame structure. # + # Read the experimental data with Pandas Masses = pd.read_fwf(infile, usecols=(2,3,4,6,11), names=('N', 'Z', 'A', 'Element', 'Ebinding'), widths=(1,3,5,5,5,1,3,4,1,13,11,11,9,1,2,11,9,1,3,1,12,11,1), header=39, index_col=False) # Extrapolated values are indicated by '#' in place of the decimal place, so # the Ebinding column won't be numeric. Coerce to float and drop these entries. Masses['Ebinding'] = pd.to_numeric(Masses['Ebinding'], errors='coerce') Masses = Masses.dropna() # Convert from keV to MeV. Masses['Ebinding'] /= 1000 # Group the DataFrame by nucleon number, A. Masses = Masses.groupby('A') # Find the rows of the grouped DataFrame with the maximum binding energy. Masses = Masses.apply(lambda t: t[t.Ebinding==t.Ebinding.max()]) # - # We have now read in the data, grouped them according to the variables we are interested in. # We see how easy it is to reorganize the data using **pandas**. If we # were to do these operations in C/C++ or Fortran, we would have had to # write various functions/subroutines which perform the above # reorganizations for us. Having reorganized the data, we can now start # to make some simple fits using both the functionalities in **numpy** and # **Scikit-Learn** afterwards. # # Now we define five variables which contain # the number of nucleons $A$, the number of protons $Z$ and the number of neutrons $N$, the element name and finally the energies themselves. A = Masses['A'] Z = Masses['Z'] N = Masses['N'] Element = Masses['Element'] Energies = Masses['Ebinding'] print(Masses) # The next step, and we will define this mathematically later, is to set up the so-called **design matrix**. We will throughout call this matrix $\boldsymbol{X}$. # It has dimensionality $p\times n$, where $n$ is the number of data points and $p$ are the so-called predictors. In our case here they are given by the number of polynomials in $A$ we wish to include in the fit. # Now we set up the design matrix X X = np.zeros((len(A),5)) X[:,0] = 1 X[:,1] = A X[:,2] = A**(2.0/3.0) X[:,3] = A**(-1.0/3.0) X[:,4] = A**(-1.0) # With **scikitlearn** we are now ready to use linear regression and fit our data. clf = skl.LinearRegression().fit(X, Energies) fity = clf.predict(X) # Pretty simple! # Now we can print measures of how our fit is doing, the coefficients from the fits and plot the final fit together with our data. # + # The mean squared error print("Mean squared error: %.2f" % mean_squared_error(Energies, fity)) # Explained variance score: 1 is perfect prediction print('Variance score: %.2f' % r2_score(Energies, fity)) # Mean absolute error print('Mean absolute error: %.2f' % mean_absolute_error(Energies, fity)) print(clf.coef_, clf.intercept_) Masses['Eapprox'] = fity # Generate a plot comparing the experimental with the fitted values values. fig, ax = plt.subplots() ax.set_xlabel(r'$A = N + Z$') ax.set_ylabel(r'$E_\mathrm{bind}\,/\mathrm{MeV}$') ax.plot(Masses['A'], Masses['Ebinding'], alpha=0.7, lw=2, label='Ame2016') ax.plot(Masses['A'], Masses['Eapprox'], alpha=0.7, lw=2, c='m', label='Fit') ax.legend() save_fig("Masses2016") plt.show() # - # ### Seeing the wood for the trees # # As a teaser, let us now see how we can do this with decision trees using **scikit-learn**. Later we will switch to so-called **random forests**! # + #Decision Tree Regression from sklearn.tree import DecisionTreeRegressor regr_1=DecisionTreeRegressor(max_depth=5) regr_2=DecisionTreeRegressor(max_depth=7) regr_3=DecisionTreeRegressor(max_depth=11) regr_1.fit(X, Energies) regr_2.fit(X, Energies) regr_3.fit(X, Energies) y_1 = regr_1.predict(X) y_2 = regr_2.predict(X) y_3=regr_3.predict(X) Masses['Eapprox'] = y_3 # Plot the results plt.figure() plt.plot(A, Energies, color="blue", label="Data", linewidth=2) plt.plot(A, y_1, color="red", label="max_depth=5", linewidth=2) plt.plot(A, y_2, color="green", label="max_depth=7", linewidth=2) plt.plot(A, y_3, color="m", label="max_depth=9", linewidth=2) plt.xlabel("$A$") plt.ylabel("$E$[MeV]") plt.title("Decision Tree Regression") plt.legend() save_fig("Masses2016Trees") plt.show() print(Masses) print(np.mean( (Energies-y_1)**2)) # - # ### And what about using neural networks? # # The **seaborn** package allows us to visualize data in an efficient way. Note that we use **scikit-learn**'s multi-layer perceptron (or feed forward neural network) # functionality. # + from sklearn.neural_network import MLPRegressor from sklearn.metrics import accuracy_score import seaborn as sns X_train = X Y_train = Energies n_hidden_neurons = 100 epochs = 100 # store models for later use eta_vals = np.logspace(-5, 1, 7) lmbd_vals = np.logspace(-5, 1, 7) # store the models for later use DNN_scikit = np.zeros((len(eta_vals), len(lmbd_vals)), dtype=object) train_accuracy = np.zeros((len(eta_vals), len(lmbd_vals))) sns.set() for i, eta in enumerate(eta_vals): for j, lmbd in enumerate(lmbd_vals): dnn = MLPRegressor(hidden_layer_sizes=(n_hidden_neurons), activation='logistic', alpha=lmbd, learning_rate_init=eta, max_iter=epochs) dnn.fit(X_train, Y_train) DNN_scikit[i][j] = dnn train_accuracy[i][j] = dnn.score(X_train, Y_train) fig, ax = plt.subplots(figsize = (10, 10)) sns.heatmap(train_accuracy, annot=True, ax=ax, cmap="viridis") ax.set_title("Training Accuracy") ax.set_ylabel("$\eta$") ax.set_xlabel("$\lambda$") plt.show() # - # ## A first summary # # The aim behind these introductory words was to present to you various # Python libraries and their functionalities, in particular libraries like # **numpy**, **pandas**, **xarray** and **matplotlib** and other that make our life much easier # in handling various data sets and visualizing data. # # Furthermore, # **Scikit-Learn** allows us with few lines of code to implement popular # Machine Learning algorithms for supervised learning. Later we will meet **Tensorflow**, a powerful library for deep learning. # Now it is time to dive more into the details of various methods. We will start with linear regression and try to take a deeper look at what it entails.
doc/pub/week34/ipynb/week34.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" # # OCR model for reading Captchas # # **Author:** [A_K_Nain](https://twitter.com/A_K_Nain)<br> # **Date created:** 2020/06/14<br> # **Last modified:** 2020/06/26<br> # **Description:** How to implement an OCR model using CNNs, RNNs and CTC loss. # + [markdown] colab_type="text" # ## Introduction # # This example demonstrates a simple OCR model built with the Functional API. Apart from # combining CNN and RNN, it also illustrates how you can instantiate a new layer # and use it as an "Endpoint layer" for implementing CTC loss. For a detailed # guide to layer subclassing, please check out # [this page](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) # in the developer guides. # + [markdown] colab_type="text" # ## Setup # + colab_type="code" import os import numpy as np import matplotlib.pyplot as plt from pathlib import Path from collections import Counter import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers # + [markdown] colab_type="text" # ## Load the data: [Captcha Images](https://www.kaggle.com/fournierp/captcha-version-2-images) # Let's download the data. # + colab_type="code" # !curl -LO https://github.com/AakashKumarNain/CaptchaCracker/raw/master/captcha_images_v2.zip # !unzip -qq captcha_images_v2.zip # + [markdown] colab_type="text" # The dataset contains 1040 captcha files as `png` images. The label for each sample is a string, # the name of the file (minus the file extension). # We will map each character in the string to an integer for training the model. Similary, # we will need to map the predictions of the model back to strings. For this purpose # we will maintain two dictionaries, mapping characters to integers, and integers to characters, # respectively. # + colab_type="code" # Path to the data directory data_dir = Path("./captcha_images_v2/") # Get list of all the images images = sorted(list(map(str, list(data_dir.glob("*.png"))))) labels = [img.split(os.path.sep)[-1].split(".png")[0] for img in images] characters = set(char for label in labels for char in label) print("Number of images found: ", len(images)) print("Number of labels found: ", len(labels)) print("Number of unique characters: ", len(characters)) print("Characters present: ", characters) # Batch size for training and validation batch_size = 16 # Desired image dimensions img_width = 200 img_height = 50 # Factor by which the image is going to be downsampled # by the convolutional blocks. We will be using two # convolution blocks and each block will have # a pooling layer which downsample the features by a factor of 2. # Hence total downsampling factor would be 4. downsample_factor = 4 # Maximum length of any captcha in the dataset max_length = max([len(label) for label in labels]) # + [markdown] colab_type="text" # ## Preprocessing # + colab_type="code" # Mapping characters to integers char_to_num = layers.experimental.preprocessing.StringLookup( vocabulary=list(characters), num_oov_indices=0, mask_token=None ) # Mapping integers back to original characters num_to_char = layers.experimental.preprocessing.StringLookup( vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True ) def split_data(images, labels, train_size=0.9, shuffle=True): # 1. Get the total size of the dataset size = len(images) # 2. Make an indices array and shuffle it, if required indices = np.arange(size) if shuffle: np.random.shuffle(indices) # 3. Get the size of training samples train_samples = int(size * train_size) # 4. Split data into training and validation sets x_train, y_train = images[indices[:train_samples]], labels[indices[:train_samples]] x_valid, y_valid = images[indices[train_samples:]], labels[indices[train_samples:]] return x_train, x_valid, y_train, y_valid # Splitting data into training and validation sets x_train, x_valid, y_train, y_valid = split_data(np.array(images), np.array(labels)) def encode_single_sample(img_path, label): # 1. Read image img = tf.io.read_file(img_path) # 2. Decode and convert to grayscale img = tf.io.decode_png(img, channels=1) # 3. Convert to float32 in [0, 1] range img = tf.image.convert_image_dtype(img, tf.float32) # 4. Resize to the desired size img = tf.image.resize(img, [img_height, img_width]) # 5. Transpose the image because we want the time # dimension to correspond to the width of the image. img = tf.transpose(img, perm=[1, 0, 2]) # 6. Map the characters in label to numbers label = char_to_num(tf.strings.unicode_split(label, input_encoding="UTF-8")) # 7. Return a dict as our model is expecting two inputs return {"image": img, "label": label} # + [markdown] colab_type="text" # ## Create `Dataset` objects # + colab_type="code" train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) train_dataset = ( train_dataset.map( encode_single_sample, num_parallel_calls=tf.data.experimental.AUTOTUNE ) .batch(batch_size) .prefetch(buffer_size=tf.data.experimental.AUTOTUNE) ) validation_dataset = tf.data.Dataset.from_tensor_slices((x_valid, y_valid)) validation_dataset = ( validation_dataset.map( encode_single_sample, num_parallel_calls=tf.data.experimental.AUTOTUNE ) .batch(batch_size) .prefetch(buffer_size=tf.data.experimental.AUTOTUNE) ) # + [markdown] colab_type="text" # ## Visualize the data # + colab_type="code" _, ax = plt.subplots(4, 4, figsize=(10, 5)) for batch in train_dataset.take(1): images = batch["image"] labels = batch["label"] for i in range(16): img = (images[i] * 255).numpy().astype("uint8") label = tf.strings.reduce_join(num_to_char(labels[i])).numpy().decode("utf-8") ax[i // 4, i % 4].imshow(img[:, :, 0].T, cmap="gray") ax[i // 4, i % 4].set_title(label) ax[i // 4, i % 4].axis("off") plt.show() # + [markdown] colab_type="text" # ## Model # + colab_type="code" class CTCLayer(layers.Layer): def __init__(self, name=None): super().__init__(name=name) self.loss_fn = keras.backend.ctc_batch_cost def call(self, y_true, y_pred): # Compute the training-time loss value and add it # to the layer using `self.add_loss()`. batch_len = tf.cast(tf.shape(y_true)[0], dtype="int64") input_length = tf.cast(tf.shape(y_pred)[1], dtype="int64") label_length = tf.cast(tf.shape(y_true)[1], dtype="int64") input_length = input_length * tf.ones(shape=(batch_len, 1), dtype="int64") label_length = label_length * tf.ones(shape=(batch_len, 1), dtype="int64") loss = self.loss_fn(y_true, y_pred, input_length, label_length) self.add_loss(loss) # At test time, just return the computed predictions return y_pred def build_model(): # Inputs to the model input_img = layers.Input( shape=(img_width, img_height, 1), name="image", dtype="float32" ) labels = layers.Input(name="label", shape=(None,), dtype="float32") # First conv block x = layers.Conv2D( 32, (3, 3), activation="relu", kernel_initializer="he_normal", padding="same", name="Conv1", )(input_img) x = layers.MaxPooling2D((2, 2), name="pool1")(x) # Second conv block x = layers.Conv2D( 64, (3, 3), activation="relu", kernel_initializer="he_normal", padding="same", name="Conv2", )(x) x = layers.MaxPooling2D((2, 2), name="pool2")(x) # We have used two max pool with pool size and strides 2. # Hence, downsampled feature maps are 4x smaller. The number of # filters in the last layer is 64. Reshape accordingly before # passing the output to the RNN part of the model new_shape = ((img_width // 4), (img_height // 4) * 64) x = layers.Reshape(target_shape=new_shape, name="reshape")(x) x = layers.Dense(64, activation="relu", name="dense1")(x) x = layers.Dropout(0.2)(x) # RNNs x = layers.Bidirectional(layers.LSTM(128, return_sequences=True, dropout=0.25))(x) x = layers.Bidirectional(layers.LSTM(64, return_sequences=True, dropout=0.25))(x) # Output layer x = layers.Dense(len(characters) + 1, activation="softmax", name="dense2")(x) # Add CTC layer for calculating CTC loss at each step output = CTCLayer(name="ctc_loss")(labels, x) # Define the model model = keras.models.Model( inputs=[input_img, labels], outputs=output, name="ocr_model_v1" ) # Optimizer opt = keras.optimizers.Adam() # Compile the model and return model.compile(optimizer=opt) return model # Get the model model = build_model() model.summary() # + [markdown] colab_type="text" # ## Training # + colab_type="code" epochs = 100 early_stopping_patience = 10 # Add early stopping early_stopping = keras.callbacks.EarlyStopping( monitor="val_loss", patience=early_stopping_patience, restore_best_weights=True ) # Train the model history = model.fit( train_dataset, validation_data=validation_dataset, epochs=epochs, callbacks=[early_stopping], ) # + [markdown] colab_type="text" # ## Inference # + colab_type="code" # Get the prediction model by extracting layers till the output layer prediction_model = keras.models.Model( model.get_layer(name="image").input, model.get_layer(name="dense2").output ) prediction_model.summary() # A utility function to decode the output of the network def decode_batch_predictions(pred): input_len = np.ones(pred.shape[0]) * pred.shape[1] # Use greedy search. For complex tasks, you can use beam search results = keras.backend.ctc_decode(pred, input_length=input_len, greedy=True)[0][0][ :, :max_length ] # Iterate over the results and get back the text output_text = [] for res in results: res = tf.strings.reduce_join(num_to_char(res)).numpy().decode("utf-8") output_text.append(res) return output_text # Let's check results on some validation samples for batch in validation_dataset.take(1): batch_images = batch["image"] batch_labels = batch["label"] preds = prediction_model.predict(batch_images) pred_texts = decode_batch_predictions(preds) orig_texts = [] for label in batch_labels: label = tf.strings.reduce_join(num_to_char(label)).numpy().decode("utf-8") orig_texts.append(label) _, ax = plt.subplots(4, 4, figsize=(15, 5)) for i in range(len(pred_texts)): img = (batch_images[i, :, :, 0] * 255).numpy().astype(np.uint8) img = img.T title = f"Prediction: {pred_texts[i]}" ax[i // 4, i % 4].imshow(img, cmap="gray") ax[i // 4, i % 4].set_title(title) ax[i // 4, i % 4].axis("off") plt.show()
examples/vision/ipynb/captcha_ocr.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="1YRGLD1pOOrJ" # ##### Copyright 2021 The TensorFlow Federated Authors. # + cellView="form" id="koW3R4ntOgLS" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="grBmytrShbUE" # # Random noise generation in TFF # # This tutorial will discuss the recommendation for random noise generation in TFF. Random noise generation is an important component of many privacy protection techniques in federated learning algorithms, e.g., differential privacy. # # + [markdown] id="coAumH42q9nz" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/federated/tutorials/random_noise_generation"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/federated/blob/master/docs/tutorials/random_noise_generation.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/federated/blob/master/docs/tutorials/random_noise_generation.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/federated/docs/tutorials/random_noise_generation.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # </table> # + [markdown] id="yiq_MY4LopET" # ## Before we begin # # First, let us make sure the notebook is connected to a backend that has the relevant components compiled. # + id="ke7EyuvG0Zyn" #@test {"skip": true} # !pip install --quiet --upgrade tensorflow_federated_nightly # !pip install --quiet --upgrade nest_asyncio import nest_asyncio nest_asyncio.apply() # + id="rtgStTrNIId-" import numpy as np import tensorflow as tf import tensorflow_federated as tff # + [markdown] id="X6eWsahmQpmi" # Run the following "Hello World" # example to make sure the TFF environment is correctly setup. If it doesn't work, # please refer to the [Installation](../install.md) guide for instructions. # + id="wjX3wmC-P1aE" @tff.federated_computation def hello_world(): return 'Hello, World!' hello_world() # + [markdown] id="C2-BdlyAId1_" # ## Discouraged usage: directly using `tf.random.normal` # # TF1.x like APIs `tf.random.normal` for random noise generation are strongly discouraged in TF2 according to the [random noise generation tutorial in TF](https://www.tensorflow.org/guide/random_numbers). Surprising behavior may happen when these APIs are used together with `tf.function` and `tf.random.set_seed`. For example, the following code will generate the same value with each call. This surprising behavior is expected for TF, and explanation can be found in the [documentation of `tf.random.set_seed`](https://www.tensorflow.org/api_docs/python/tf/random/set_seed). # + id="0S7t0-3hHCWc" tf.random.set_seed(1) @tf.function def return_one_noise(_): return tf.random.normal([]) n1=return_one_noise(1) n2=return_one_noise(2) assert n1 == n2 print(n1.numpy(), n2.numpy()) # + [markdown] id="6vmWv0ALKvqh" # In TFF, things are slightly different. If we wrap the noise generation as `tff.tf_computation` instead of `tf.function`, non-deterministic random noise will be generated. However, if we run this code snippet multiple times, different set of `(n1, n2)` will be generated each time. There is no easy way to set a global random seed for TFF. # + id="D_5T0UzHKtde" tf.random.set_seed(1) @tff.tf_computation def return_one_noise(_): return tf.random.normal([]) n1=return_one_noise(1) n2=return_one_noise(2) assert n1 != n2 print(n1, n2) # + [markdown] id="GJMdUjhxWPcR" # Moreover, deterministic noise can be generated in TFF without explicitly setting a seed. The function `return_two_noise` in the following code snippet returns two identical noise values. This is expected behavior because TFF will build computation graph in advance before execution. However, this suggests users have to pay attention on the usage of `tf.random.normal` in TFF. # + id="k0jtUXzCSTCN" @tff.tf_computation def tff_return_one_noise(): return tf.random.normal([]) @tff.federated_computation def return_two_noise(): return (tff_return_one_noise(), tff_return_one_noise()) n1, n2=return_two_noise() assert n1 == n2 print(n1, n2) # + [markdown] id="Wk0UhmhuYtr8" # ## Usage with care: `tf.random.Generator` # # We can use `tf.random.Generator` as suggested in the [TF tutorial](https://www.tensorflow.org/guide/random_numbers). # + id="SuYiH7n5ZTej" @tff.tf_computation def tff_return_one_noise(i): g=tf.random.Generator.from_seed(i) @tf.function def tf_return_one_noise(): return g.normal([]) return tf_return_one_noise() @tff.federated_computation def return_two_noise(): return (tff_return_one_noise(1), tff_return_one_noise(2)) n1, n2 = return_two_noise() assert n1 != n2 print(n1, n2) # + [markdown] id="HU8tKbmvqN_w" # However, users may have to be careful on its usage # # # * `tf.random.Generator` uses `tf.Variable` to maintain the states for RNG algorithms. In TFF, it is recommended to contruct the generator inside a `tff.tf_computation`; and it is difficult to pass the generator and its state between `tff.tf_computation` functions. # * the previous code snippet also relies on carefully setting seeds in generators. We will get expected but surprising results (deterministic `n1==n2`) if we use `tf.random.Generator.from_non_deterministic_state()` instead. # # In general, TFF prefers functional operations and we will showcase the usage of `tf.random.stateless_*` functions in the following sections. # + [markdown] id="pImReFSuIaCq" # In TFF for federated learning, we often work with nested structures instead of scalars and the previous code snippet can be naturally extended to nested structures. # + id="B45urU98Fb8U" @tff.tf_computation def tff_return_one_noise(i): g=tf.random.Generator.from_seed(i) weights = [ tf.ones([2, 2], dtype=tf.float32), tf.constant([2], dtype=tf.float32) ] @tf.function def tf_return_one_noise(): return tf.nest.map_structure(lambda x: g.normal(tf.shape(x)), weights) return tf_return_one_noise() @tff.federated_computation def return_two_noise(): return (tff_return_one_noise(1), tff_return_one_noise(2)) n1, n2 = return_two_noise() assert n1[1] != n2[1] print('n1', n1) print('n2', n2) # + [markdown] id="aMLslYfa76cm" # ## Recommended usage: `tf.random.stateless_*` with a helper # # + [markdown] id="TnyhlV0fIxYR" # A general recommendation in TFF is to use the functional `tf.random.stateless_*` functions for random noise generation. These functions take `seed` as an explicit input argument to generate random noise. We first define a helper class to maintain the seed as pseudo state. The helper `RandomSeedGenerator` has functional operators in a state-in-state-out fashion. It is reasonable to use a counter as pseudo state for `tf.random.stateless_*` as these functions scramble the seed before using it to make noises generated by correlated seeds statistically uncorrelated. # + id="NF1gaMgrKdwU" class RandomSeedGenerator(): def initialize(self, seed=None): if seed is None: return tf.cast(tf.stack( [tf.math.floor(tf.timestamp()*1e6), tf.math.floor(tf.math.log(tf.timestamp()*1e6))]), dtype=tf.int64) else: return tf.constant(self.seed, dtype=tf.int64, shape=(2,)) def next(self, state): return state + 1 def structure_next(self, state, nest_structure): "Returns seed in nexted structure and the next state seed." flat_structure = tf.nest.flatten(nest_structure) flat_seeds = [state+i for i in range(len(flat_structure))] nest_seeds = tf.nest.pack_sequence_as(nest_structure, flat_seeds) return nest_seeds, flat_seeds[-1] + 1 # + [markdown] id="x9kc6G0RIATV" # Now let us use the helper class and `tf.random.stateless_normal` to generate (nested structure of) random noise in TFF. The following code snippet looks a lot like a TFF iterative process, see [simple_fedavg](https://github.com/tensorflow/federated/blob/master/tensorflow_federated/python/examples/simple_fedavg/simple_fedavg_tff.py) as an example of expressing federated learning algorithm as TFF iterative process. The pseudo seed state here for random noise generation is `tf.Tensor` that can be easily transported in TFF and TF functions. # + id="2dZn7LtjL_hk" @tff.tf_computation def tff_return_one_noise(seed_state): g=RandomSeedGenerator() weights = [ tf.ones([2, 2], dtype=tf.float32), tf.constant([2], dtype=tf.float32) ] @tf.function def tf_return_one_noise(): nest_seeds, updated_state = g.structure_next(seed_state, weights) nest_noise = tf.nest.map_structure(lambda x,s: tf.random.stateless_normal( shape=tf.shape(x), seed=s), weights, nest_seeds) return nest_noise, updated_state return tf_return_one_noise() @tff.tf_computation def tff_init_state(): g=RandomSeedGenerator() return g.initialize() @tff.federated_computation def return_two_noise(): seed_state = tff_init_state() n1, seed_state = tff_return_one_noise(seed_state) n2, seed_state = tff_return_one_noise(seed_state) return (n1, n2) n1, n2 = return_two_noise() assert n1[1] != n2[1] print('n1', n1) print('n2', n2)
docs/tutorials/random_noise_generation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # XOR 문제를 해결해 보자 # # 기본적으로 시그모이드 함수 특징인 조건에 따라 true / false 로 나오는 형태를 여러개로 묶어 노드로 만들고 # # 같은 레벨의 노드들이 모여 은닉층이 만들어짐 # # 그 은닉층의 결과를 토데로 시그모이드로 연산을 더 해 최종 결과가 나옴 # # alpha * x + beta -> weight * x + Biasing # # ``` # X1 ----> N1 # w11 # X1 ----> N2 # w12 # X2 ----> N1 # w21 # X2 ----> N2 # w22 # # ---<W(1)>-<B(1)> # # N1 ----> Y # w31 # N2 ----> Y # w32 # # ---<W(2)>-<B(2)> # ``` # # ### Node Layer 1 # ``` # n1 = σ ( w11 * x1 + w21 * x2 + b) # n2 = σ ( w12 * x1 + w22 * x2 + b) # ``` # # ### Out # ``` # y<out> = σ ( n1 * w31 * n2 * w32 + b) # ``` # # ### Input example # # ``` # W(1) = [[-2 2], [-2 2]] # W(2) = [[1], [1]] # B(1) = [[3], [-1]] # B(2) = [-1] # ``` # # ### How to solve XOR # # |x1|x2|n1|n2|y|expect| # |:-------------:|:-------------:|:-------------:|:-------------:|:-------------:|:-------------:| # |0|0|σ (-2 `*` 0 + -2 `*` 0 + 3) ~ 1|σ (2 `*` 0 + 2 `*` 0 + -1) ~ 0|σ (1 `*` 1 + 0 `*` 1 + -1) ~ 0|0| # |0|1|σ (-2 `*` 0 + -2 `*` 1 + 3) ~ 1|σ (2 `*` 0 + 2 `*` 1 + -1) ~ 1|σ (1 `*` 1 + 1 `*` 1 + -1) ~ 1|1| # |1|0|σ (-2 `*` 1 + -2 `*` 0 + 3) ~ 1|σ (2 `*` 1 + 2 `*` 0 + -1) ~ 1|σ (1 `*` 1 + 1 `*` 1 + -1) ~ 1|1| # |1|1|σ (-2 `*` 1 + -2 `*` 1 + 3) ~ 0|σ (2 `*` 1 + 2 `*` 1 + -1) ~ 1|σ (0 `*` 1 + 1 `*` 1 + -1) ~ 0|0| # # n1 == NAND # # n2 == OR # # n1 + n2 == NAND-> AND <-OR => XOR # + import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline # - w11 = np.array([-2, -2]) w12 = np.array([2, 2]) w2 = np.array([1, 1]) b1 = 3 b2 = -1 b3 = -1 def MLP (x, weight, biasing): y = np.sum (weight * x) + biasing # Weighted sum # print(x, weight, biasing, y) if y <= 0: #sigmoid 0 or 1 conditional return 0 return 1 #n1 def NAND (x1, x2): return MLP(np.array([x1, x2]), w11, b1) #w11 == W(1)11, W(1)21 #n2 def OR (x1, x2): return MLP(np.array([x1, x2]), w12, b2) #w12 == W(1)12, W(1)22 #n1 + n2 def AND (x1, x2): return MLP (np.array([x1, x2]), w2, b3) #w2 == W(2) def XOR (x1, x2): return AND(NAND(x1, x2), OR(x1, x2)) for x in [(0, 0), (0, 1), (1, 0), (1, 1)]: y = XOR(x[0], x[1]) print("x1: %d, x2: %d, y: %d" % (x[0], x[1], y))
notebook/ch.07 Multi-Layer Perceptron.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from __future__ import absolute_import, division, print_function import os import matplotlib.pyplot as plt import tensorflow as tf import tensorflow.contrib.eager as tfe tf.enable_eager_execution() print("TensorFlow version: {}".format(tf.VERSION)) print("Eager execution: {}".format(tf.executing_eagerly())) # + ds_tensors = tf.data.Dataset.from_tensor_slices([1, 2, 3, 4, 5, 6]) # CSV 파일을 생성합니다. import tempfile _, filename = tempfile.mkstemp() with open(filename, 'w') as f: f.write("""Line 1 Line 2 Line 3 """) ds_file = tf.data.TextLineDataset(filename) # - ds_tensors = ds_tensors.map(tf.square).shuffle(2).batch(2) ds_file = ds_file.batch(2) # + print('ds_tensors의 원소들:') for x in tfe.Iterator(ds_tensors): print(x) print('\nds_file의 원소들:') for x in tfe.Iterator(ds_file): print(x) # -
examples/notebooks/3_datasets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Functions from the book # ## Chapter 6 integration # # ### Trapezoid # + # initialisation import math import numpy as np import matplotlib.pyplot as plt # + ## Module trapezoid def trapezoid(f, a, b, Iold, k): ''' Inew = trapezoid(f, a, b, Iold, k) Recusrsive trapezoidal rule: old = Integral of f(x) from x = a to b computer by trapezoidal rule with 2^(k-1) panels. Inew = Same integral computed with 2^k panels. ''' if k == 1: Inew = (f(a) + f(b))*(b-a)/2.0 else: n = 2**(k-2) # number of new points h = (b - a) / n x = a + (h/2.0) sum = 0.0 for i in range(n): sum = sum + f(x) x = x + h Inew = (Iold + (h*sum))/2.0 return Inew # + # example 6.4 def f(x): return math.sqrt(x) * math.cos(x) Iold = 0.0 for k in range(1, 21): Inew = trapezoid(f, 0.0, math.pi, Iold, k) if (k > 1) and abs(Inew - Iold) < 1.0e-6: break Iold = Inew print(f"integral = {Inew}") print(f"nPanels = {2**k-1}") input(f"\nPress return to exit") # - # # + ## Module Romberg def romberg(f, a, b, tol = 1.0e-6): ''' I, nPanels = romberg(f, a, b, tol = 1.0e-6). Romberg integration of f(x) from x = a to b. Returns the integral and the number of panels used. ''' def richardson(r, k): for j in range(k-1, 0, -1): const = 4.0 **(k-j) r[j] = (const*r[j+1] - r[j]) / (const - 1.0) return r r = np.zeros(21) r[1] = trapezoid(f, a, b, 0.0, 1) r_old = r[1] for k in range(2, 21): r[k] = trapezoid(f, a, b, r[k-1], k) r = richardson(r, k) if abs(r[1] - r_old) < tol*max(abs(r[1]), 1.0): return r[1], 2**(k-1) r_old = r[1] print("Romberg quadrature did not converge") # + ## example 6.7 def f(x): return 2.0*(x**2)*math.cos(x**2) I, n = romberg(f, 0, math.sqrt(math.pi)) print(f'integral = {I}') print(f'Numevals = {n}') input('\nPress enter to exit') # - # ## Chapter 4: Root finding # # # ### Rootsearch # + ## Rootsearch from numpy import sign def rootsearch(f, a, b, dx): ''' x1, x2 = rootsearch(f, a, b, dx) Searches the interval (a,b) in increments dx for the bounds (x1, x2) of the smallest root of f(x). Returns x1 = x2 = None if no roots were detected ''' x1 = a f1 = f(a) x2 = a + dx f2 = f(x2) while sign(f1) == sign(f2): if x1 >= b: return None, None x1 = x2 f1 = f2 x2 = x1 + dx f2 = f(x2) else: return x1, x2 # + # example 4.1 ## calculate root of function f in interval 0 to 1 in 10 steps def f(x): return x**3 - 10*x**2 + 5.0 x1 = 0.0 x2 = 1.0 for i in range(4): dx = (x2 - x1) / 10.0 x1, x2 = rootsearch(f, x1, x2, dx) x = (x1 + x2) / 2.0 print(f'x = {x:6.4f}') # - # ### Bisection # + ## module bisection import math # import error from numpy import sign def bisection(f, x1, x2, switch = 1, tol = 1.0e-9): ''' root = bisection(f, x1, x2, switch = 0, tol = 1.0e-9). Finds a root of f(x) = 0 by bisection. The root must be bracketed in (x1, x2). Setting switch = 1 returns root = None if f(x) increases upon bisection ''' f1 = f(x1) if f1 == 0.0: return x1 f2 = f(x2) if f2 == 0.0: return x2 if sign(f1) == sign(f2): raise ValueError('Root is not bracketed') n = int(math.ceil(math.log(abs(x2 - x1) / tol)/ math.log(2.0))) for i in range(n): x3 = 0.5*(x1 + x2) f3 = f(x3) if (switch == 1) and (abs(f3) > abs(f1)) and (abs(f3) > abs(f2)): return None if f3 == 0.0: return x3 if sign(f2) != sign(f3): x1 = x3 f1 = f3 else: x2 = x3 f2 = f3 return (x1 + x2) / 2.0 # + # example 4.2 def f(x): return x**3 - 10.0*x**2 + 5.0 x = bisection(f, 0.0, 1.0, tol = 1.0e-4) print(f'x = {x:6.4f}') # - # ### Ridder's Method # + ## Ridder's method import math from numpy import sign def ridder(f, a, b, tol = 1.0e-9): ''' root = ridder(f, a, b, tol = 1.0e-9). Finds a root of f(x) = 0 with ridder's method. The root must be bracketed in (a,b) ''' fa = f(a) if fa == 0.0: return a fb = f(b) if fb == 0.0: return b # if sign(f1) != sign(f3): # x1 = x3 # f1 = f3 for i in range(30): # compute the improved root x for ridder's formula c = 0.5 *(a + b) fc = f(c) s = math.sqrt(fc**2 - fa*fb) if s == 0.0: return None dx = (c - a)*fc/s if (fa - fb) < 0.0: dx = -dx x = c + dx fx = f(x) # test for convergence if i > 0: if abs(x - xOld) < tol*max(abs(x), 1.0): return x xOld = x # Re-bracket the root as tightly as possible if sign(fc) == sign(fx): if sign(fa) != sign(fx): b = x fb = fx else: a = x fa = fx else: a = c b = x fa = fc fb = fx return None print('Too many iterations') # + # example 4.5 def f(x): a = (x - 0.3)**2 + 0.01 b = (x - 0.8)**2 + 0.04 return 1.0/a - 1.0/b x = np.linspace(-2, 3, 501) f_x = [f(i) for i in x] plt.plot(x, f_x) plt.show() print(f'root = {ridder(f, 0.0, 1.0)}') # - # ### <NAME> # safe as in if the new a or b is outside # the first brackets it will stop # # # + ## <NAME> ''' Root = newtonRaphson(f, df, a, b, tol = 1.0e-9) Finds a root of f(x) = 0 by combining the Newton-Raphson method with bisection. The root must be bracketed in (a,b). Calls user-supplied functions f(x) and it's derivative df(x) ''' def newtonRaphson(f, df, a, b, tol = 1.0e-9): import error from numpy import sign fa = f(a) if fa == 0.0: return a fb = f(b) if fb == 0.0: return b if sign(fa) == sign(fb): error.err("Root is not bracketed") x = 0.5 * (a + b) for i in range(30): fx = f(x) if fx == 0.0: return x # tighten the brackets on the root if sign(fa) != sign(fx): b = x else: a = x # try a newton raphson step dfx = df(x) # If division by zero, push x out of bounds try: dx = -fx/dfx except ZeroDivisionError: dx = b - a x = x + dx # If the result is outside of the brackets, use Bisection if (b - x) * (x - a) < 0.0: dx = 0.5*(b - a) x = a + dx # Check for convergence if abs(dx) < tol*max(abs(b), 1.0): return x print('Too many iterations in Newton-Raphson') # + ## unrefined newton-Raphson (not safe with bisection) def RawNewtonRaphson(f, df, x, m = 1, tol = 1.0e-9): ''' m = number of roots closeby ''' for i in range(30): dx = -m*(f(x)/df(x)) x = x + dx if abs(dx) < tol: return x, i print('Too many iterations') # + # Example 4.7 def f(x): return x**4 - 6.4*x**3 + 6.45*x**2 + 20.538*x - 31.752 def df(x): return 4*x**3 - 19.2*x**2 + 12.9*x + 20.538 x = np.linspace(0, 5, 501) fx = [f(i) for i in x] dfx = [df(i) for i in x] plt.plot(x, fx, label = 'f(x)') plt.plot(x, dfx, label = 'df(x)') plt.axhline(0) plt.legend(); plt.show() root, numIter = RawNewtonRaphson(f, df, 2.0, 2) print(f'Root: {root}') print(f'Number of iterations: {numIter}') # - # ## NewtonRaphson 2 p 163 > needs swap and gausspivot from section 2.5 to function # + ## Module newtonRaphson2 ''' soln = newtonRaphson2(f, x, tol = 1e-9) Solves the simultaneous equations f(x) = 0 by the Newton-Raphson method using {x} as the initial guess. Note that {f} and {x} are vectors. ''' def newtonRaphson2(f, x, tol=1.0e-9): def jacobian(f, x): h = 1.0e-4 n = len(x) jac = np.zeros((n, n)) f0 = f(x) for i in range(n): temp = x[i] x[i] = temp + h f1 = f(x) x[i] = temp jac[:,i] = (f1 - f0)/h return jac, f0 for i in range(30): jac, f0 = jacobian(f, x) if math.sqrt(np.dot(f0, f0)/len(x)) < tol: return x dx = gaussPivot(jac, -f0) x = x + dx if math.sqrt(np.dot(dx, dx)) < tol*max(max(abs(x)), 1.0): return x print('Too many iterations') # - # ## 4.7 EvalPoly & Deflation of polynomials # # Zero's of polynomials can be calculated using this function # # + ## Module Evalpoly ''' p, dp, ddp = evalPoly(a, x) Evaluate the polynomial p = a[0] + a[1]*x + a[2]*x**2 + ... + a[n]*x**n with its derivatives dp = p' and ddp = p'' ''' def evalpoly(a, x): n = len(a) - 1 p = a[n] dp = 0.0 + 0.0j ddp = 0.0 + 0.0j for i in range(1, n+1): ddp = ddp*x + 2.0*dp dp = dp*x + p p = p*x + a[n-i] return p, dp , ddp # + ## Module polyRoots ''' roots = polyRoots(a) Uses Laguerre's method to compute all the roots of a[0] + a[1]*x + a[2]*x**2 + ... + a[n]*x**n = 0. The roots are returned in the array 'roots'. ''' import numpy as np import cmath from random import random def polyRoots(a, tol = 1.0e-12): def laguerre(a, tol): x = random() # random starting value n = len(a) - 1 for i in range(30): p, dp, ddp = evalpoly(a, x) if abs(p) < tol: return x g = dp/p h = g*g - ddp/p f = cmath.sqrt((n - 1) * (n * h - g * g)) if abs(g + f) > abs(g - f): dx = n / (g + f) else: dx = n / (g - f) x = x - dx if abs(dx) < tol: return x print('Too many iterations') def deflPoly(a, root): # deflates a polynomial n = len(a) - 1 b = [(0.0 + 0.0j)]*n b[n-1] = a[n] for i in range(n-2, -1, -1): b[i] = a[i+1] + root*b[i+1] return b n = len(a) - 1 roots = np.zeros((n), dtype='complex') for i in range(n): x = laguerre(a, tol) if abs(x.imag) < tol: x = x.real roots[i] = x a = deflPoly(a, x) return roots # - # problem 4.12 c = np.array([-250.0, 155.0, -9.0, -5.0, 1.0]) print(f'Roots are:\n {polyRoots(c)}') # ## optimisation Chapter 10 # # ### Goldsearch # + ## module goldSearch ''' a, b = bracket(f, xStart, h) Finds the brackets (a,b) of a minimum point of the user-supplied scalar function f(x). The search starts downhill from xStart with a step length h. x, fMin = search(f, a, b, tol = 1.0e-6) Golden section method for determining x that minimizes the user-supplied scalar function f(x). The minimum must be bracketed in (a, b) ''' import math def bracket(f, x1, h): c = 1.618033989 f1 = f(x1) x2 = x1 +h f2 = f(x2) # determine downhill directions and change sign of h is needed if f2 > f1: h = -h x2 = x1 + h f2 = f(x2) # check if minimum between x1 - h and x1 + h if f2 > f1: return x2, x1 - h # search loop for i in range(100): h = c*h x3 = x2 + h f3 = f(x3) if f3 > f2: return x1, x3 x1 = x2 x2 = x3 f1 = f2 f2 = f3 print('Bracket did not find a minimum') def search(f, a, b, tol = 1.0e-9): nIter = int(math.ceil(-2.078087*math.log(tol/abs(b-a)))) R = 0.618033989 C = 1.0 - R # first telescoping x1 = R*a + C*b x2 = C*a + R*b f1, f2 = f(x1), f(x2) # main loop for i in range(nIter): if f1 > f2: a = x1 x1 = x2 f1 = f2 x2 = C*a + R*b f2 = f(x2) else: b = x2 x2 = x1 f2 = f1 x1 = R*a + C*b f1 = f(x1) if f1 < f2: return x1, f1 else: return x2, f2 # + # example 10.1 def f(x): lam = 1.0 # Contraint multiplier c = min(0.0, x) # constraint function return 1.6*x**3 + 3.0*x**2 - 2.0*x + lam*c**2 xStart = 1.0 h = 0.01 x1, x2 = bracket(f, xStart, h) x, fMin = search(f, x1, x2) print(f'x: {x}') print(f'f(x): {fMin}') # - # # Differential equations (chapter 7) # # ### euler's method # + ## module euler ''' X, Y = integrate(F, x, y, xStop, h). Euler's method for solving the initial value problem {y}' = {F(x, {y})}, where {y} = {y[0], y[1], ... y[n-1]}. x,y = initial conditions xStop = terminal value of x h = increment of x used in integration F = user-supplied function that returns the array(x, y) = {y'[0], y'[1],...,y'[n-1]}. ''' import numpy as np def integrate(F, x, y, xStop, h): X = [] Y = [] X.append(x) Y.append(y) while x < xStop: h = min(h, xStop - x) y = y + h*F(x, y) x = x + h X.append(x) Y.append(y) return np.array(X), np.array(Y) # + ## module printSoln ''' printSoln(X, Y, freq) prints X and Y returned from the differential equation solvers using printout frequency 'freq'. freq = n prints every nth step. freq = 0 prints initial and final values only ''' def printSoln(X, Y, freq): def printHead(n): print("\n x ",end=' ') for i in range(n): print(print(" y[",i,"] ",end=' ')) print() def printLine(x, y, n): print(f"{x:13.4e}",end=' ') for i in range(n): print(f"{y[i]:13.4e}", end = ' ') print() m = len(Y) try: n = len(Y[0]) except TypeError: n = 1 if freq == 0: freq = m printHead(n) for i in range(0, m, freq): printLine(X[i], Y[i], n) if i != m - 1: printLine(X[m - 1], Y[m - 1], n) # + ## Example 7.2 import numpy as np import matplotlib.pyplot as plt def F(x, y): F = np.zeros(2) F[0] = y[1] F[1] = -0.1*y[1] -x return F x = 0.0 # Start of integration xStop = 2.0 # end of integration y = np.array([0.0, 1.0]) # initial values of {y} h = 0.05 # step-size X,Y = integrate(F, x, y, xStop, h) yExact = 100.0 * X - 5.0*X**2 + 990.0*(np.exp(-0.1*X) - 1.0) plt.plot(X,Y[:,0], 'o')#, plt.plot(X, yExact, '-') plt.grid(True) plt.xlabel('x'); plt.ylabel('y') plt.legend(('Numerical', 'Exact'), loc=0) plt.show() # - # # Runge-Kutta 4th order # + ## module run_kut_4 ''' X, Y = integrate(F, x, y, xStop, h). 4th-order Runge-Kutta mrthod for solving the initial value problem {y}' = {F(x, {y})}, where {y} = {y[0], y[1],...,y[n-1]}. x, y = initial conditions xStop = terminal value of x h = increment of x used in integration F = user-supplied function that returns the array F(x, y) = {y'[0], y'[1],....,y'[n-1]}. ''' import numpy as np def integrate(F, x, y, xStop, h): def run_kut4(F, x, y, h): K0 = h*F(x, y) K1 = h*F(x + h/2, y + K0/2) K2 = h*F(x + h/2, y + K1/2) K3 = h*F(x + h, y + K2) return (K0 + 2.0*K1 + 2.0*K2 + K3)/6.0 X = [] Y = [] X.append(x) Y.append(y) while x < xStop: h = min(h, xStop - x) y = y + run_kut4(F, x, y, h) x = x + h X.append(x) Y.append(y) return np.array(X), np.array(Y) # + ## Example 7.4 import numpy as np import matplotlib.pyplot as plt def F(x, y): F = np.zeros(2) F[0] = y[1] F[1] = -0.1*y[1] - x return F x = 0.0 # starting value for integration xStop = 2.0 # end of integration y = np.array([0.0, 1.0]) # initial values of {y} h = 0.2 # step size X, Y = integrate(F, x, y, xStop, h) yExact = 100.0*X - 5.0*X**2 + 990.0*(np.exp(-0.1*X) - 1.0) plt.plot(X, Y[:,0], 'o', X, yExact, '-') plt.grid(True) plt.xlabel('x'); plt.ylabel('y') plt.legend(('Numerical', 'Exact'), loc = 0) plt.show()
Numerical_analysis/Lessons/functions_from_the_book.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Configure a Seamless outgoing port at localhost:8222 # + website="http://localhost:5813" websocketserver="ws://localhost:5138" import os os.environ["SEAMLESS_COMMUNION_ID"] = "docking-clustering" os.environ["SEAMLESS_COMMUNION_OUTGOING"] = "8222" import json import seamless seamless.communionserver.configure_master( transformer_job=True, transformer_result=True, transformer_result_level2=True ) # - # ### Use [Redis](https://redis.io/) as a cache, and as a store for new results redis_sink = seamless.RedisSink() redis_cache = seamless.RedisCache() from seamless.highlevel import Context, Cell, Transformer ctx = Context() # ### Load Seamless graph, previously auto-generated from the [Snakemake file](../../../edit/tests/docking/Snakefile) graph = json.load(open("snakegraph.seamless")) ctx = seamless.highlevel.load_graph(graph) # ### Load docking input files and bind them to the graph's virtual file system # + # HACK: Keep the large pairwise lrmsd file out of the virtual file system ctx.pw_lrmsd = Cell() file = "docking-result-pairwise-lrmsd.txt" print(file) data = open(file).read() ctx.pw_lrmsd = data ctx.jobs.cluster_struc.inputfile_pairwise_lrmsd = ctx.pw_lrmsd inputs = ( "receptor.pdb", "ligand.pdb", "receptor-bound.pdb", "ligand-bound.pdb", "docking-result.dat", #"docking-result-pairwise-lrmsd.txt" ) for file in inputs: print(file) data = open(file).read() ctx.filesystem[file] = data # - # ### Set up initial values. This will start the computation ctx.clustering_cutoff = 10 ctx.clustering_cutoff.celltype = "text" ctx.selected_cluster = 6 ctx.selected_cluster.celltype = "text" ctx.filesystem["params/cluster-cutoff"] = ctx.clustering_cutoff ctx.filesystem["params/selected-cluster"] = ctx.selected_cluster # ### Set up a read-out of the results ctx.pdb_backbone = ctx.filesystem["outputs/selected-cluster-aligned-backbone.pdb"] ctx.pdb_sidechain = ctx.filesystem["outputs/selected-cluster-aligned-sidechains.pdb"] ctx.lrmsd = ctx.filesystem["outputs/selected-cluster.lrmsd"] ctx.irmsd = ctx.filesystem["outputs/selected-cluster.irmsd"] ctx.fnat = ctx.filesystem["outputs/selected-cluster.fnat"] ctx.capri_stars = ctx.filesystem["outputs/selected-cluster.stars"] ctx.docking_score = ctx.filesystem["outputs/selected-cluster.ene"] ctx.rank = ctx.filesystem["outputs/selected-cluster.rank"] ctx.cluster_size = ctx.filesystem["outputs/selected-cluster.size"] # ### Wait until execution has finished ctx.equilibrate() # ### Share the inputs and the results over the web cells = ( ctx.clustering_cutoff, ctx.selected_cluster, ctx.pdb_backbone, ctx.pdb_sidechain, ctx.lrmsd, ctx.irmsd, ctx.fnat, ctx.capri_stars, ctx.docking_score, ctx.rank, ctx.cluster_size ) for cell in cells: cell.celltype = "text" cell.share() ctx.equilibrate() from IPython.display import Markdown Markdown(""" ### The inputs and results are now interactively shared over the web. For example: - [Clustering cutoff]({web}/ctx/clustering_cutoff) - [backbone PDB]({web}/ctx/pdb_backbone) - [full PDB]({web}/ctx/pdb_sidechain) (generated by Oscar-star) - [Docking score]({web}/ctx/docking_score) - [CAPRI stars]({web}/ctx/capri_stars) """.format(web=website)) # ### Set up Jupyter widgets to control the input and display the results # + # Some helper functions import ipywidgets as widgets import traitlets from collections import OrderedDict from functools import partial def connect(cell, widget): t = cell.traitlet() if isinstance(widget, widgets.Label): traitlets.dlink((t, "value"), (widget, "value"), lambda v: "" if v is None else str(v) ) else: widget.value = t.value traitlets.link((t, "value"), (widget, "value")) # + w = { "cutoff": widgets.BoundedFloatText(min = 0.5, max=50, step=0.5, description = "Clustering cutoff"), "sel": widgets.BoundedIntText(min = 1, max=100, step=0.5, description = "Selected cluster"), "irmsd": widgets.Label(description = "Interface RMSD"), "lrmsd": widgets.Label(description = "Ligand RMSD"), "fnat": widgets.Label(description = "Fraction of native contacts"), "stars": widgets.Label(description = "CAPRI stars"), "score": widgets.Label(description = "Docking score"), "rank": widgets.Label(description = "Docking rank"), "cluster_size": widgets.Label(description = "Cluster size"), } connect(ctx.selected_cluster, w["sel"]) connect(ctx.clustering_cutoff, w["cutoff"]) connect(ctx.irmsd, w["irmsd"]) connect(ctx.lrmsd, w["lrmsd"]) connect(ctx.fnat, w["fnat"]) connect(ctx.capri_stars, w["stars"]) connect(ctx.docking_score, w["score"]) connect(ctx.rank, w["rank"]) connect(ctx.cluster_size, w["cluster_size"]) a = OrderedDict() a["Input"] = w["sel"], w["cutoff"] a["CAPRI evaluation"] = w["irmsd"], w["lrmsd"], w["fnat"], w["stars"] a["Docking statistics"] = w["rank"], w["score"], w["cluster_size"] tab = widgets.Tab() for k,v in a.items(): accordion = widgets.Accordion() for ww in v: accordion.children = accordion.children + (ww,) accordion.set_title(len(accordion.children)-1, ww.description) ww.description = "" tab.children = tab.children + (accordion,) tab.set_title(len(tab.children)-1, k) # - # ### Create viewer / representation / selection widgets for the PDBs # + ctx.pdbs0 = {} ctx.pdbs0.bound = open("receptor-bound.pdb").read() + open("ligand-bound.pdb").read() ctx.pdbs0.pdb_backbone = ctx.pdb_backbone ctx.pdbs0.pdb_sidechain = ctx.pdb_sidechain ctx.pdbs = ctx.pdbs0 ctx.pdbs.celltype = "plain" ctx.pdbs.share() ctx.code = Context() ctx.structurestate_class = Transformer() #Until Seamless supports modules at the high level #ctx.structurestate_class.code.mount("../struclib/StructureState.py") # bug in Seamless ctx.code.structurestate_class >> ctx.structurestate_class.code # KLUDGE ctx.code.structurestate_class.mount("../struclib/StructureState.py", authority="file") ctx.structurestate_schema = ctx.structurestate_class ctx.load_pdbs = lambda structurestate_schema, pdbs: None ctx.load_pdbs.structurestate_schema = ctx.structurestate_schema ctx.load_pdbs.pdbs = ctx.pdbs ctx.code.load_pdbs >> ctx.load_pdbs.code ctx.code.load_pdbs.mount("../load_pdbs.py", authority="file") ctx.struc_data = ctx.load_pdbs ctx.equilibrate() # - ctx.visualization = "" ctx.visualization.celltype = "text" ctx.visualization.mount("visualization.txt", authority="file") ctx.equilibrate() ctx.visualize = Transformer() ctx.visualize.with_result = True ctx.visualize.structurestate_schema = ctx.structurestate_schema ctx.visualize.struc_data = ctx.struc_data ctx.visualize.visualization = ctx.visualization ctx.code.visualize >> ctx.visualize.code ctx.code.visualize.mount("../visualize.py", authority="file") ctx.visualize_result = ctx.visualize ctx.equilibrate() # + ctx.ngl_representations = Cell() ctx.ngl_representations = ctx.visualize_result.ngl_representations ctx.ngl_representations.celltype = "plain" ctx.selected = Cell() ctx.selected = ctx.visualize_result.table ctx.selected.celltype = "text" ctx.selected.mimetype = "html" ctx.equilibrate() # - ctx.visualization.share() ctx.pdbs.share() ctx.ngl_representations.share() ctx.selected.share() ctx.equilibrate() # + import nglview as nv import functools view = nv.NGLWidget() view.components = {} def ngl_set_representations(representations): if representations is None: return for code, representation in representations.items(): if code not in view.components: continue comp = view.components[code] comp.set_representations(representation) def ngl_load_pdbs(pdbs): import seamless checksum = seamless.get_dict_hash(pdbs) if getattr(ngl_load_pdbs, "_checksum", None) == checksum: return ngl_load_pdbs._checksum = checksum for code, comp in view.components.items(): try: view.remove_component(comp) except IndexError: #some bug in NGLWidget? pass view.components.clear() view.clear() if pdbs is None: return for code, pdb in pdbs.items(): struc = nv.TextStructure(pdb,ext="pdb") view.components[code] = view.add_component(struc) ngl_set_representations(ctx.ngl_representations.value) t=ctx.pdbs.traitlet() observer1 = t.observe(lambda change: ngl_load_pdbs(change["new"]), "value") t=ctx.ngl_representations.traitlet() observer2 = t.observe(lambda change: ngl_set_representations(change["new"]), "value") # - import ipywidgets, traitlets selected = ipywidgets.HTML() t = ctx.selected.traitlet() _ = traitlets.directional_link((t, "value"), (selected, "value"), transform=lambda v: v if v is not None else "") t = ctx.visualization.traitlet() visualization = ipywidgets.Textarea() _ = traitlets.directional_link( (t, "value"), (visualization, "value"), transform=lambda v: v if v is not None else "" ) visualization.layout.min_width = "800px" visualization.rows = 10 vis_button = ipywidgets.Button(description="Update visualization") def on_click(_): t.value = visualization.value vis_button.on_click(on_click) # ### Display the widgets from IPython.display import display display(tab) view display(visualization) vis_button selected Markdown(""" # Observable Notebook ### The PDB can also be visualized (and its visualization edited) using Observable Notebook https://observablehq.com/@sjdv1982/struclib-viewer-gui?RESTSERVER=%{website}&WEBSOCKETSERVER={websocketserver} """.format(website=website,websocketserver=websocketserver))
tests/docking/docking.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- class Link(object): empty = () def __init__(self, first, rest=empty): assert rest is Link.empty or isinstance(rest, Link) self.first = first self.rest = rest def __repr__(self): """ >>> Link(None) Link(None) >>> Link(1, Link(2, Link(3))) Link(1, Link(2, Link(3))) >>> Link(Link(1, Link(2)), Link(3, Link(4))) Link(Link(1, Link(2)), Link(3, Link(4))) >>> Link(Link(Link(Link('Wow')))) Link(Link(Link(Link('Wow')))) """ """YOUR CODE GOES HERE""" if self == None: return 'Link(None)' if self.rest is Link.empty: if type(self.first) is str: return "Link('%s')"%self.first else: #if type(self.first) is Link: #return "Link(Link(%s))"%self.first #else: return "Link(%s)"%self.first.first else: return 'Link(%s, %s)'%(repr(self.first), repr(self.rest)) def __str__(self): """ >>> str(Link('Hello')) 'Hello' >>> str(Link(1, Link(2))) '1 -> 2' >>> print(Link(1 / 2, Link(1 // 2))) 0.5 -> 0 >>> str(Link(Link(1, Link(2, Link(3))), Link(4, Link(5)))) '(1 -> 2 -> 3) -> 4 -> 5' >>> print(Link(Link(Link(Link('Wow'))))) (((Wow))) >>> print(Link(Link('a'), Link(Link('b'), Link(Link('c'))))) (a) -> (b) -> (c) """ """YOUR CODE GOES HERE""" store = '' if type(self.first) is not Link: while self.rest is not Link.empty: store += str(self.first) + ' -> ' self = self.rest return "%s" % (store + str(self.first)) else: if self.rest is not Link.empty: return '(%s)' % (str(self.first)) + ' -> ' + str(self.rest) else: return '(%s)' % (str(self.first)) pussy = Link(Link(Link(Link('Wow')))) pussy.first >>> Link(Link('1', Link('2')), Link('3', Link('4'))) Link(Link(1, Link(2)), Link(3, Link(4))) str(Link(Link(1, Link(2)), Link(3, Link(4)))) Link(Link(Link(Link('Wow'))))
Untitled1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd #importing the clean data set file_path = 'clean_df.csv' df = pd.read_csv(file_path) df.head() #importing seaborn to analyise indvidual features using visulaization import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline #lets sea the correlation between variables df.corr() #use the scatter plot to visualize the engine-size against the price sns.regplot(x = 'engine-size', y = 'price', data = df) plt.ylim(0,) #look like a good predictor #let's see the correlation between them df[['engine-size', 'price']].corr() #lets check the hightway-mpg variable sns.regplot(x = 'highway-mpg', y = 'price', data = df) #alos look like a good predictor df[['highway-mpg', 'price']].corr() #let's examine the peak-rpm variable sns.regplot(x = 'peak-rpm', y = 'price', data = df) #we cant use that variable to predict the price as obvious df[['peak-rpm', 'price']].corr() #there is week correlation as we see #what about the stroke variable sns.regplot(x = 'stroke', y = 'price', data = df) #also this is week predictor #lets examine the categorical variable like the body-style sns.boxplot(x = 'body-style', y = 'price', data = df) #the body style overlabs significntly over the price so this's not a good predictor #what about the engine location sns.boxplot(x = 'engine-location', y = 'price', data = df) #hear the destribution of the front and rear are destinct enough to take the varible as a predictor df.describe(include = ['object']) #lets take the value counts of the drive-wheels to a dataframe to examin them drive_wheels_counts = df['drive-wheels'].value_counts().to_frame() drive_wheels_counts.rename(columns = {'drive-wheels':'value-counts'}) drive_wheels_counts.index.name= 'drive-wheels' drive_wheels_counts #this look like it can be a good predictor because the values are destibuted among the fwd,rwd,and 4wd #lets check the engine location variable the same way engine_loc_counts = df['engine-location'].value_counts().to_frame() engine_loc_counts.index.name = 'engine-location' engine_loc_counts.rename(columns = {'engine-location':'value-counts'}) engine_loc_counts #we cant use this as a predictor because there is only three cars with rear engine location #lets group by some variable with the price variable to examine them df_group_one = df[['drive-wheels', 'body-style', 'price']] df_group_one #We can then calculate the average price for each of the different categories of data. df_group_one.groupby(['drive-wheels'], as_index = False).mean() #look like the rwd is the most expensive on average #grouping the result df_gptest = df[['drive-wheels','body-style','price']] grouped_test1 = df_gptest.groupby(['drive-wheels','body-style'],as_index=False).mean() grouped_test1 grouped_pivot = grouped_test1.pivot(index='drive-wheels',columns='body-style') grouped_pivot grouped_pivot = grouped_pivot.fillna(0) #fill missing values with 0 grouped_pivot #lets use the heatmap to visualzie the relationship between the price and the body-style #use the grouped results plt.pcolor(grouped_pivot, cmap='RdBu') plt.colorbar() plt.show() # + import numpy as np fig, ax = plt.subplots() im = ax.pcolor(grouped_pivot, cmap='RdBu') #label names row_labels = grouped_pivot.columns.levels[1] col_labels = grouped_pivot.index #move ticks and labels to the center ax.set_xticks(np.arange(grouped_pivot.shape[1]) + 0.5, minor=False) ax.set_yticks(np.arange(grouped_pivot.shape[0]) + 0.5, minor=False) #insert labels ax.set_xticklabels(row_labels, minor=False) ax.set_yticklabels(col_labels, minor=False) #rotate label if too long plt.xticks(rotation=90) fig.colorbar(im) plt.show() # - #wheel base vs price from scipy import stats pearson_coef, p_value = stats.pearsonr(df['wheel-base'], df['price'],) print('the pearson correlation is ', pearson_coef , ' and the p value is ', p_value) #horsepower vs price pearson_coef, p_value = stats.pearsonr(df['horsepower'], df['price']) print('the pearson correlation is ', pearson_coef , ' and the p value is ', p_value) #lenth vs price pearson_coef, p_value = stats.pearsonr(df['length'], df['price']) print('the pearson correlation is ', pearson_coef , ' and the p value is ', p_value) #width vs price pearson_coef, p_value = stats.pearsonr(df['width'], df['price']) print('the pearson correlation is ', pearson_coef , ' and the p value is ', p_value) #curb-weight vs price pearson_coef, p_value = stats.pearsonr(df['curb-weight'], df['price']) print('the pearson correlation is ', pearson_coef , ' and the p value is ', p_value) #bore vs price pearson_coef, p_value = stats.pearsonr(df['bore'], df['price']) print('the pearson correlation is ', pearson_coef , ' and the p value is ', p_value) #city-mpg vs price pearson_coef, p_value = stats.pearsonr(df['city-mpg'], df['price']) print('the pearson correlation is ', pearson_coef , ' and the p value is ', p_value) #cighway-mpg vs price pearson_coef, p_value = stats.pearsonr(df['highway-mpg'], df['price']) print('the pearson correlation is ', pearson_coef , ' and the p value is ', p_value) #Let's see if different types 'drive-wheels impact the price grouped_test2 = df_gptest[['drive-wheels', 'price']].groupby(['drive-wheels']) grouped_test2.head() #obtain the values of the method group grouped_test2.get_group('4wd')['price'] #ANOVA f_val , p_val = stats.f_oneway(grouped_test2.get_group('fwd')['price'], grouped_test2.get_group('rwd')['price'], grouped_test2.get_group('4wd')['price']) print( "ANOVA results: F=", f_val, ", P =", p_val) #Separately: fwd and rwd f_val , p_val = stats.f_oneway(grouped_test2.get_group('fwd')['price'], grouped_test2.get_group('rwd')['price']) print( "ANOVA results: F=", f_val, ", P =", p_val) #4wd and rwd f_val , p_val = stats.f_oneway(grouped_test2.get_group('4wd')['price'], grouped_test2.get_group('rwd')['price']) print( "ANOVA results: F=", f_val, ", P =", p_val) # + #4wd and fwd f_val , p_val = stats.f_oneway(grouped_test2.get_group('4wd')['price'], grouped_test2.get_group('fwd')['price']) print( "ANOVA results: F=", f_val, ", P =", p_val) # - ''' Conclusion: We now have a better idea of what our data looks like and which variables are important to take into account when predicting the car price. We have narrowed it down to the following variables: Continuous numerical variables: Length Width Curb-weight Engine-size Horsepower City-mpg Highway-mpg Wheel-base Bore Categorical variables: Drive-wheels '''
exploring the data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Brainwave Service Upload # This Notebook is intended to simplify the training / upload process by splitting the two steps into two separate notebooks. In particular, this Notebook is for uploading a previously-trained model to the cloud, and doesn't contain any training code. It does have some sanity check code to ensure you're loading in the right model, before actually uploading it. # Even though these first few cells are repeated in the training Notebooks, it is necessary here since we still must set up the environment to load the model into memory. import os,sys os.environ['KERAS_BACKEND'] = 'tensorflow' os.environ['CUDA_VISIBLE_DEVICES'] = '' import tensorflow as tf import numpy as np from keras import backend as K import tables from tensorflow.python.client import device_lib device_lib.list_local_devices() # %load_ext autoreload # %autoreload 2 # These directories were chosen because they write the data to local disk, which will have the fastest access time # of our various storage options. custom_weights_dir = os.path.expanduser("../weights-floatingpoint-224x224-fixval-best/") custom_weights_dir_q = os.path.expanduser("../weights-quantized-224x224-fixval-best/") saved_model_dir = os.path.expanduser("../machinelearningnotebooks/models/") results_dir = os.path.expanduser("../results-quantized-224x224-fixval/") # ## Prepare Data # Load the files we are going to use for training and testing. The public Top dataset consists of image formatted data, but our data has been preprocessed into a raw form. You will need to edit the paths as necessary. from utils import normalize_and_rgb, image_with_label, count_events # + import glob # for 64x64: #datadir = "../data/" # for 224x224: datadir = "../../converted/rotation_224_v1/" data_size = 224 #image width/height n_train_file = 122 n_test_file = 41 n_val_file = 41 train_files = glob.glob(os.path.join(datadir, 'train_file_*')) test_files = glob.glob(os.path.join(datadir, 'test_file_*')) val_files = glob.glob(os.path.join(datadir, 'val_file_*')) #train_files = train_files[:n_train_file] #test_files = test_files[:n_test_file] #val_files = val_files[:n_val_file] n_train_events = count_events(train_files) n_test_events = count_events(test_files) n_val_events = count_events(val_files) print("n_train_events =", n_train_events) print("n_test_events =", n_test_events) print("n_val_events =", n_val_events) # - # ## Construct Model # We use ResNet50 for the featuirzer and build our own classifier using Keras layers. We train the featurizer and the classifier as one model. The weights trained on ImageNet are used as the starting point for the retraining of our featurizer. The weights are loaded from tensorflow checkpoint files. # # Before passing image dataset to the ResNet50 featurizer, we need to preprocess the input file to get it into the form expected by ResNet50. ResNet50 expects float tensors representing the images in BGR, channel last order. Given that our images are greyscale, this isn't relevant to us, as we will simply be copying the data in place. from utils import preprocess_images # We use Keras layer APIs to construct the classifier. Because we're using the tensorflow backend, we can train this classifier in one session with our Resnet50 model. from utils import construct_classifier # Now every component of the model is defined, we can construct the model. Constructing the model with the project brainwave models is two steps - first we import the graph definition, then we restore the weights of the model into a tensorflow session. Because the quantized graph defintion and the float32 graph defintion share the same node names in the graph definitions, we can initally train the weights in float32, and then reload them with the quantized operations (which take longer) to fine-tune the model. from utils import construct_model # ## Load And Check The Model # If you already have a trained up, quantized model and don't want to train it any further before uploading it to the Azure server, run this cell. It will load the model and its weights into ram without applying any gradient descents. It will then perform a quick sanity check to ensure the loaded model is the one expected by passing the test images through it. # + from utils import chunks, test_model tf.reset_default_graph() sess = tf.Session(graph=tf.get_default_graph()) with sess.as_default(): print("Testing trained model with quantization") in_images, image_tensors, features, preds, quantized_featurizer, classifier = construct_model(quantized=True, saved_model_dir=saved_model_dir, starting_weights_directory=custom_weights_dir_q, is_training=False, size=data_size) loss, accuracy, auc, preds_test, test_labels = test_model(preds, in_images, test_files[:1]) print("Accuracy:", accuracy, ", Area under ROC curve:", auc) # - # ## Just Load The Model # If you already have a trained up, quantized model and don't want to train it any further before uploading it to the Azure server, run this cell. It will load the model and its weights into ram without applying any gradient descents. # + tf.reset_default_graph() sess = tf.Session(graph=tf.get_default_graph()) with sess.as_default(): print("Loading quantized model") in_images, image_tensors, features, preds, quantized_featurizer, classifier = construct_model(quantized=True, saved_model_dir=saved_model_dir, starting_weights_directory=custom_weights_dir_q, is_training=False, size=data_size) # - # ## Service Definition # Like in the QuickStart notebook our service definition pipeline consists of three stages. # + from azureml.contrib.brainwave.pipeline import ModelDefinition, TensorflowStage, BrainWaveStage model_def_path = os.path.join(saved_model_dir, 'model_def.zip') model_def = ModelDefinition() #model_def.pipeline.append(TensorflowStage(sess, in_images, image_tensors)) # don't need this for 224x224 (no preprocessing) model_def.pipeline.append(BrainWaveStage(sess, quantized_featurizer)) #model_def.pipeline.append(TensorflowStage(sess, features, preds)) # comment this out to get the features model_def.save(model_def_path) print(model_def_path) # - # ## Deploy # Go to our GitHub repo "docs" folder to learn how to create a Model Management Account and find the required information below. # + import os subscription_id = os.environ.get("SUBSCRIPTION_ID", "80defacd-509e-410c-9812-6e52ed6a0016") resource_group = os.environ.get("RESOURCE_GROUP", "CMS_FPGA_Resources") workspace_name = os.environ.get("WORKSPACE_NAME", "Fermilab") from azureml.core import Workspace try: ws = Workspace(subscription_id = subscription_id, resource_group = resource_group, workspace_name = workspace_name) ws.write_config() print('Workspace configuration succeeded. You are all set!') except: print('Workspace not found. Run the cells below.') # + from azureml.core import Workspace ws = Workspace.from_config() print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n') # - # The first time the code below runs it will create a new service running your model. If you want to change the model you can make changes above in this notebook and save a new service definition. Then this code will update the running service in place to run the new model. # + from azureml.core.model import Model from azureml.core.image import Image from azureml.core.webservice import Webservice from azureml.contrib.brainwave import BrainwaveWebservice, BrainwaveImage from azureml.exceptions import WebserviceException model_name = "top-transfer-resnet50-model" image_name = "top-transfer-resnet50-image" service_name = "modelbuild-service-8" registered_model = Model.register(ws, model_def_path, model_name) image_config = BrainwaveImage.image_configuration() deployment_config = BrainwaveWebservice.deploy_configuration() try: service = Webservice(ws, service_name) service.delete() service = Webservice.deploy_from_model(ws, service_name, [registered_model], image_config, deployment_config) service.wait_for_deployment(True) except WebserviceException: service = Webservice.deploy_from_model(ws, service_name, [registered_model], image_config, deployment_config) service.wait_for_deployment(True) # - # The service is now running in Azure and ready to serve requests. We can check the address and port. from azureml.core.webservice import Webservice service_name = "modelbuild-service-8" service = Webservice(ws, service_name) print(service.ip_address + ':' + str(service.port)) # ## Client # There is a simple test client at amlrealtimeai.PredictionClient which can be used for testing. We'll use this client to score an image with our new service. from azureml.contrib.brainwave.client import PredictionClient client = PredictionClient(service.ip_address, service.port) # ## Request # Let's see how our service does on a few images. It may get a few wrong. # + from sklearn.metrics import roc_auc_score, accuracy_score, roc_curve from tqdm import tqdm from utils import chunks chunk_size = 1 # Brainwave only processes one request at a time n_test_events = count_events(test_files) #n_test_events = count_events(val_files) #n_test_events = count_events(train_files[:41]) chunk_num = int(n_test_events/chunk_size)+1 y_true = np.zeros((n_test_events,2)) y_feat = np.zeros((n_test_events,1,1,2048)) y_pred = np.zeros((n_test_events,2)) i = 0 for img_chunk, label_chunk, real_chunk_size in tqdm(chunks(test_files, chunk_size, max_q_size=1, shuffle=False), total=chunk_num): results = client.score_numpy_array(img_chunk) y_feat[i,:] = results y_pred[i,:] = classifier.predict(results.reshape(1,1,1,2048))[0,:] y_true[i,:] = label_chunk i+=1 # + # Call the save results utility. from utils import save_results accuracy = accuracy_score(y_true[:,0], y_pred[:,0]>0.5) auc = roc_auc_score(y_true, y_pred) save_results(results_dir, 'b', accuracy, y_true, y_pred, y_feat) print("Accuracy:", accuracy, "AUC:", auc) # - # ## Plot results # If results exist for floating point training, quantization, fine tuning, and Brainwave, this should run without issues. If you have fewer, go to utils.py and comment out the irrelevant lines. from utils import plot_results # %matplotlib inline plot_results(results_dir) # ## Cleanup # Run the cell below to delete your service. service.delete()
project-brainwave/project-brainwave-custom-weights-service-upload.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="LqXsulG-foOX" # # BERTを用いたスロットフィリング # + [markdown] id="DelTRKIBQ9sR" # ## 準備 # + [markdown] id="PCjUXfeKQ_7p" # ### パッケージのインストール # + colab={"base_uri": "https://localhost:8080/"} id="4ZvmMaVVRBmA" outputId="583ef80c-7ad0-4adb-e452-159592383797" # !pip install -q transformers==4.10.2 seqeval==1.2.2 datasets==1.12.1 numpy==1.19.5 scikit-learn==0.24.2 # + [markdown] id="CMqpe5rnf1op" # ### インポート # + id="Gety72pIhW7j" import json from itertools import chain import numpy as np import torch from datasets import load_dataset, load_metric from seqeval.metrics import classification_report from sklearn.model_selection import train_test_split from transformers import AutoModelForTokenClassification, TrainingArguments, Trainer from transformers import DataCollatorForTokenClassification # + [markdown] id="CuXoMTpfhcLu" # ### データのアップロード # # データセットをアップロードします。ノートブックと同じ階層にDataフォルダがあり、その下にsnipsフォルダがあるので、学習・検証用データセットをアップロードしましょう。Colabでない場合は、データセットを読み込むときに正しいパスを指定します。 # + colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 112} id="QrdCfa9D7sDx" outputId="ddfb1a80-a731-4a20-f221-1f4c788f6dc1" from google.colab import files uploaded = files.upload() # + [markdown] id="9nJiVB2dhfP4" # ### データの読み込み # + id="2ejxRP-V7xE1" def load_data(filename): with open(filename, encoding="iso-8859-2") as f: datalist = json.load(f) x, y = [], [] for data in datalist["PlayMusic"]: sent = [] tags = [] for phrase in data["data"]: words = phrase["text"].strip().split() if "entity" in phrase: label = phrase["entity"] labels = [f"B-{label}"] + [f"I-{label}"] * (len(words) - 1) else: labels = ["O"] * len(words) sent.extend(words) tags.extend(labels) x.append(sent) y.append(tags) return x, y # + id="_W8nWKWMRmKq" train_file = "train_PlayMusic_full.json" test_file = "validate_PlayMusic.json" x_train, y_train = load_data(train_file) x_test, y_test = load_data(test_file) x_train, x_valid, y_train, y_valid = train_test_split( x_train, y_train, test_size=0.3, random_state=42 ) # + [markdown] id="ImDVF-qOhp-V" # ## 前処理 # + colab={"base_uri": "https://localhost:8080/", "height": 145, "referenced_widgets": ["b401a49b43284833aa1c110a9f689728", "0476b1dcb22c4df484da0e718e0dcb03", "8d6f96ef1f6c45ecad1cb95bf287d3ba", "5df7e5ab3348414eabb6f5097b2b717b", "e9875c12a41848ca8c17ad7b877565cb", "926b673fdcab4036abb393de9fe2c689", "34e1d9b2881a47a78f42232dafa364a0", "6ed4558ec9e04f42bdb8fcc2eeb67689", "34902106dfa1426ab268abc4a00356f9", "6615ab0cced4474ba9142411cc3361a4", "e4b1908469aa47959a8ff7faff5822da", "beeefbac03bb44d187780ba4ecde73c2", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "0adb06a556d8425fa1bcf91443854c5e", "<KEY>", "<KEY>", "<KEY>", "4569c6e9d685478c9727224d5ca89d38", "<KEY>", "f7ad334167a04959bf325abc4b073e04", "eb8e5c439e214b9e84a9bdc72a013c92", "<KEY>", "<KEY>", "<KEY>", "25b520a026cf409e8d574151b26a126d", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "3ded9b7073ad4017945f4b98971c4b79", "75095d9e680546b49cebef4e224f5670", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "d7bb583a0a704583aabe0845ae997821", "<KEY>", "03b1ec8fbbad4c70aab34c46d886ad14"]} id="fJ8HlEyr8bs5" outputId="01aba769-ca12-498f-f02c-a75a585ea932" from transformers import AutoTokenizer model_checkpoint = "bert-base-uncased" tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) # + id="ZsqMvJkcTmIt" unique_tags = list(set(chain(*y_train))) tag2id = {tag: id for id, tag in enumerate(unique_tags)} id2tag = {id: tag for tag, id in tag2id.items()} # + id="HqCPjDs35Qk5" train_encodings = tokenizer( x_train, is_split_into_words=True, return_offsets_mapping=True, padding=True, truncation=True ) valid_encodings = tokenizer( x_valid, is_split_into_words=True, return_offsets_mapping=True, padding=True, truncation=True ) test_encodings = tokenizer( x_test, is_split_into_words=True, return_offsets_mapping=True, padding=True, truncation=True ) # + id="ttRQlUAo5YFE" def encode_tags(tags, encodings): labels = [[tag2id[tag] for tag in doc] for doc in tags] encoded_labels = [] for doc_labels, doc_offset in zip(labels, encodings.offset_mapping): # create an empty array of -100 doc_enc_labels = np.ones(len(doc_offset), dtype=int) * -100 arr_offset = np.array(doc_offset) # set labels whose first offset position is 0 and the second is not 0 doc_enc_labels[(arr_offset[:,0] == 0) & (arr_offset[:,1] != 0)] = doc_labels encoded_labels.append(doc_enc_labels.tolist()) return encoded_labels train_labels = encode_tags(y_train, train_encodings) valid_labels = encode_tags(y_valid, valid_encodings) test_labels = encode_tags(y_test, test_encodings) # + id="zQuOGcICW6wx" class Dataset(torch.utils.data.Dataset): def __init__(self, encodings, labels): self.encodings = encodings self.labels = labels def __getitem__(self, idx): item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()} item['labels'] = self.labels[idx] return item def __len__(self): return len(self.labels) train_encodings.pop("offset_mapping") # we don't want to pass this to the model valid_encodings.pop("offset_mapping") test_encodings.pop("offset_mapping") train_dataset = Dataset(train_encodings, train_labels) valid_dataset = Dataset(valid_encodings, valid_labels) test_dataset = Dataset(test_encodings, test_labels) # + [markdown] id="Gw4JEHKCqmRP" # ## モデルの学習 # # 「BERTを用いた固有表現認識」のノートブックと同様に、モデルを学習していきましょう。 # + colab={"base_uri": "https://localhost:8080/", "height": 154, "referenced_widgets": ["948042711c384ffc98cb649f19413bd9", "02848fb46dbc4c0ab5add4d9ff48623d", "81744952e9e14f11a73243cb583190d8", "<KEY>", "<KEY>", "a7b031940a504f298e8b3e945e4e3cba", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>"]} id="tcMhH_XS5vtV" outputId="9eb7f65d-929e-4016-dd93-3824b2794277" model = AutoModelForTokenClassification.from_pretrained( model_checkpoint, num_labels=len(unique_tags) ) # + id="ZVHWho0o-OpR" batch_size = 16 args = TrainingArguments( "ner", evaluation_strategy="epoch", learning_rate=2e-5, per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, num_train_epochs=10, weight_decay=0.01, ) # + id="EV8-Hx05qS6r" data_collator = DataCollatorForTokenClassification(tokenizer) # + colab={"base_uri": "https://localhost:8080/", "height": 49, "referenced_widgets": ["44963e2054ba4e199f16f87edb48f198", "f8341d2ac6ea45949cc9807c548262a0", "ae01d4b57a304e6eb65af8a1d1725249", "b5938309b747423388ea15429f916835", "<KEY>", "<KEY>", "1ed02fe77b6148f5b056b2e852db455b", "e720b26ee3c542cea32ef6cdbe6ae069", "36d5664893404c0ca2e4891ebf21ef0c", "4cb82d6abe0a48ff8f11fe49752471a0", "ff72cee2fb4a40c9ac2d5b434eff19be"]} id="MrBKf92ZqVjg" outputId="08b7de35-503f-428a-b01e-0b307b5aa9f9" metric = load_metric("seqeval") # + id="ctESjw4pqXqD" def compute_metrics(p): predictions, labels = p predictions = np.argmax(predictions, axis=2) # Remove ignored index (special tokens) true_predictions = [ [id2tag[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] true_labels = [ [id2tag[l] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] results = metric.compute(predictions=true_predictions, references=true_labels) return { "precision": results["overall_precision"], "recall": results["overall_recall"], "f1": results["overall_f1"], "accuracy": results["overall_accuracy"], } # + id="8ZKCTYKcqa9g" trainer = Trainer( model, args, train_dataset=train_dataset, eval_dataset=valid_dataset, data_collator=data_collator, tokenizer=tokenizer, compute_metrics=compute_metrics ) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="PrcWit9kqdxy" outputId="0886bcaa-d8d3-415f-d2d9-f32e1c2bdb9f" trainer.train() # + colab={"base_uri": "https://localhost:8080/", "height": 309} id="KtfQGYlbMvDc" outputId="dd728789-2193-4a8d-944d-789e4afa120e" trainer.evaluate(test_dataset) # + [markdown] id="5HUqMTGIq8RK" # ## Reference # # - [Fine-tuning with custom datasets](https://huggingface.co/transformers/custom_datasets.html) # + id="6V6jdhbnrBaj"
ch06/05_BERT_SNIPS.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] tags=[] # ## Research Question # ### Is there a relationship between the life ladder and GDP per capita from 2008 to 2018? # - import pandas as pd import numpy as np # ### Method chaining # - Load the dataframe, drop all NA, reset index, and remove columns not being used # - set the year from 2008-2018, reset index, creata a new column and rename it def load_and_process(url_or_path_to_csv_file): df1 = ( pd.read_csv('whreport.csv') .dropna() .reset_index(drop=True) .drop(columns =['Social support','Healthy life expectancy at birth','Freedom to make life choices','Generosity','Perceptions of corruption','Positive affect','Negative affect']) ) df2 =( df1 .loc[df1.year >= 2008] .loc[df1.year <= 2018] .reset_index(drop=True) .assign(GDP_per_capita= lambda x: np.exp(x['Log GDP per capita'])) .rename(columns ={'GDP_per_capita':'GDP per capita'}) ) return df2 sampledata=load_and_process('whreport.csv') # ### creata another load_year function # - to all of the year more neatly and clear for visualization def load_year (dataframe ,year): dfyear = dataframe[dataframe['year'] == year].reset_index(drop=True) return dfyear load_year(sampledata,2008) # + df2 =( df1 .loc[df1.year >= 2008] .loc[df1.year <= 2018] .reset_index(drop=True) .assign(GDP_per_capita= lambda x: np.exp(x['Log GDP per capita'])) .rename(columns ={'GDP_per_capita':'GDP per capita'}) ) df2 # - df = pd.read_csv('whreport.csv') df # ### Remove all columns not being used del df ['Social support'] del df ['Healthy life expectancy at birth'] del df ['Freedom to make life choices'] del df ['Generosity'] del df ['Perceptions of corruption'] del df ['Positive affect'] del df ['Negative affect'] import math # ### create a column based on existing one for calculation GDPinValue = round(np.exp(df['Log GDP per capita']),3) GDPinValue df ['GDP per capita'] = GDPinValue GDPinValue # ### Drop all NA and set the timeframe for data analysis df=df.dropna() cleaned_df = df[(df['year'] >= 2008) & (df['year'] <= 2018)] cleaned_df=cleaned_df.reset_index(drop=True) cleaned_df cleaned_df=cleaned_df.rename(columns = {'Categories':'GDP Categories'}) set(cleaned_df['year']) max(cleaned_df['GDP per capita']) min(cleaned_df['GDP per capita']) cleaned_df['GDP per capita'].describe() lowgdp = cleaned_df[(cleaned_df['GDP per capita'] >= 761.279) & (cleaned_df['GDP per capita'] <= 4671.568250)] mediumgdp = cleaned_df[(cleaned_df['GDP per capita'] >= 4671.568250) & (cleaned_df['GDP per capita'] <= 30622.899750)] highgdp = cleaned_df[(cleaned_df['GDP per capita'] >= 30622.899750) & (cleaned_df['GDP per capita'] <= 114119.337000)] conditions = [(gdplist >= 761.279) & (gdplist <= 4671.568250), (gdplist >= 4671.568250) & (gdplist <= 30622.899750), (gdplist >= 30622.899750) & (gdplist <= 114119.337000) ] gdplist= cleaned_df['GDP per capita'] values = ['L', 'M', 'H'] cleaned_df['sample1'] = np.select(conditions,values) cleaned_df del cleaned_df['Categories'] cleaned_df.assign() sns.lmplot(x='total_bill', y='tip', hue='sex', data=tips_df, markers=['o', '^'], scatter_kws={'s': 100, 'linewidth': 0.5, 'edgecolor': 'w'})
analysis/Eloise/milestone2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Analysis of Quasi-One-Dimensional Nozzle Flow with Analytical and Numerical Methods # # This notebook covers an in-depth analysis of an isotropic flow through a converging diverging nozzle for both the subsonic and the supersonic regime based on the problem addressed in chapter 7 of <NAME>'s book *Computational Fluid Dynamics: The Basics with Applications*. The inlet of the nozzle is fed from a reservoir and will have a constant pressure and temperature $(p_0, T_0)$. The flow will be subsonic in the converging portion of the nozzle, sonic at the throat (M=1) and supersonic at the exit of the nozzle. # ## Analytical Solution # This problem is solved analytically which gives us a great reference point to try out numerical methods. Let's first look at the analytical solution and then move on to numerical methods to solve the problem using CFD. The analytical solutions for continuity, energy and momentum are as follows. # # Continuity: $\rho_1 V_1 A_1 = \rho_2 V_2 A_2 $ # # Momentum: $p_1 A_1 + \rho_1 V_1^2 A_1 + \int_{A_1}^{A_2}pdA = p_2 A_2 + \rho_2 V_2^2 A_2$ # # Energy: $h_1 + \frac{v_1^2}{2} = h_2 + \frac{V_2^2}{2}$ # # The different subscripts denote the location of the flow inside of the nozzle. In addition to the flow equations, we also have the ideal gas equation: # # $$p = \rho R T$$ # # This flow consists of a calorically perfect gas: # # $$h = c_p T$$ # # # The Mach number in the nozzle is determined by the area ratio $A/A^*$ where $A^*$ is the area of the nozzle at the choke point (M=1). # # # $$\Big (\frac{A}{A^*} \Big)^2 = \frac{1}{M^2}\Big[\frac{2}{\gamma +1}\Big(1+\frac{\gamma-1}{2}M^2\Big)\Big]^{(\gamma+1)/(\gamma-1)}$$ # # Gamma ($\gamma$) is defined as the ratio of specific heats and for air, the standard value is 1.4. # # The rest of the parameters can be solved in terms of the mach number: # # $$\frac{p}{p_0} = \Big(1+\frac{\gamma-1}{2}M^2\Big)^{(-\gamma)/(\gamma-1)}$$ # # # $$\frac{\rho}{\rho_0} = \Big(1+\frac{\gamma-1}{2}M^2\Big)^{-1/(\gamma-1)}$$ # # # $$\frac{T}{T_0} = \Big(1+\frac{\gamma-1}{2}M^2\Big)^{-1}$$ # # # # # To view these relationships, we can plot the analytical solution. import numpy as np #loading in numpy from matplotlib import pyplot #load matplotlib import math # load math in #this makes matplotlib plots appear in the notebook (instead of a separate window) # %matplotlib inline # Mach number vs Area Ratio M = np.linspace(0.4,2,100) aRatio = (1/M**2)* ((2/(2.4))*(1+(.4/2)*M**2))**((2.4)/(0.4)) pyplot.plot(aRatio,M) pyplot.xlabel('Area Ratio') pyplot.ylabel('Mach Number') M = np.linspace(0,3,100) # Pressure Ratio pRatio = (1+ (0.4/2)*M**2)**(-1.4/0.4) pyplot.plot(M,pRatio) pyplot.xlabel('Mach Number') pyplot.ylabel('Pressure Ratio p/p0') # Density Ratio rhoRatio = (1+ (0.4/2)*M**2)**(-1/0.4) pyplot.plot(M,rhoRatio) pyplot.xlabel('Mach Number') pyplot.ylabel('Density Ratio rho/rho0') # Temperature Ratio TRatio = (1+ (0.4/2)*M**2)**(-1) pyplot.plot(M,TRatio) pyplot.xlabel('Mach Number') pyplot.ylabel('Temperature Ratio T/T0') # ## Numerical Solution # The first step of any numerical solution is to first write down the governing equations for the problem. In this case, we will be looking for a continuity, momentum and energy equation that will describe the flow. # ### Continuity Equation # For the continuity equation, we will use the integral finite volume method. # # $$\frac{\partial}{\partial t} \iiint \limits_V \rho dV + \frac{\partial}{\partial t} \iint \limits_S \rho \textbf{V} \cdot \textbf{dS} = 0$$ # # This equation means that the total rate of change of mass inside of the volume is equal to the amount of stuff crossing the surface of the volume. To relate the volume integral to out physical problem, we can write the following equation: # # # $$\frac{\partial}{\partial t} \iiint \limits_V \rho dV = \frac{\partial}{\partial t} (\rho A dx)$$ # # # In this equation the value of dx*A represents an elemental volume inside of our nozzle. It is like a cross sectional area multiplied by some small length of dx. Now for the surface integral, an equation can be written as follows. # # $$\frac{\partial}{\partial t} \iint \limits_S \rho \textbf{V} \cdot \textbf{dS} = -\rho V A + (\rho + d \rho)(V+ dV)(A+dA)$$ # # # Expanding this equation and combining it with the previous one, we have a form of the contiuity equation in terms of differential elements relating to our system. # # # $$\frac{\partial (\rho A)}{\partial t} + \frac{\partial (\rho A V)}{\partial x} = 0$$ # # ### Momentum Equation # The next step in describing our system is to utilize the momentum equation. Neglecting viscous stress terms and body forces we can represent the momentum equation as shown below. # # $$\frac{\partial}{\partial t} \iiint \limits_V (\rho u) dV + \frac{\partial}{\partial t} \iint \limits_S (\rho u \textbf{V}) \cdot \textbf{dS} = \iint \limits_S (p dS)_x$$ # # The first two terms can be transformed in a similar way as shown in the previous section but with an additional velocity component. # # $$\frac{\partial}{\partial t} \iiint \limits_V (\rho u) dV = \frac{\partial}{\partial t} (\rho V A dx)$$ # # # $$\frac{\partial}{\partial t} \iint \limits_S (\rho u \textbf{V}) \cdot \textbf{dS} = -\rho V^2 A + (\rho + d \rho)(V+ dV)^2(A+dA)$$ # # The term on the right side of the equal sign represents the pressure around our element of fluid. This term can be represented with the following expression. # # $$\iint \limits_S (p dS)_x = -pA + (p+dp)(A+dA) - 2p\Big(\frac{dA}{2}\Big)$$ # # Combining all of these terms we get the following equation. # # $$\frac{\partial (\rho V A)}{\partial t} + \frac{\partial (\rho V^2 A)}{\partial x} = -A \frac{\partial p}{\partial x}$$ # # This is the conservation form of this equation, but we want a non-conservation form for our numerical analysis. WE can do this by multiplying the continuity equation by V resulting in the following equation. # # $$V\frac{\partial (\rho A)}{\partial t} + V\frac{\partial (\rho A V)}{\partial x} = 0$$ # # Now we can subtract this equation from the momentum equation we derived just before. With some manipulation, we are left with the final momentum equation. # # $$\rho \frac{\partial V}{\partial t} + \rho u \frac{\partial u}{\partial x} = -\frac{\partial p}{\partial x}$$ # # ### Energy Equation # The integral form of the energy equation with no energy source terms is as follows. # # $$\frac{\partial}{\partial t} \iiint \limits_V \rho \Big(e+\frac{V^2}{2}\Big) dV + \iint \limits_S \rho\Big(e+\frac{V^2}{2}\Big)\textbf{V} \cdot \textbf{dS} = \iint \limits_S (p \textbf{V}) \cdot \textbf{dS}$$ # # With some manipulation, we are able to transform the equation into an equation that represents our system and is in non-conservation form. # # $$\rho \frac{\partial e}{\partial t} + \rho V \frac{\partial e}{\partial x} = -p\frac{\partial V}{\partial x} - pV\frac{\partial(ln A)}{\partial x}$$ # # There is one thing left to consider, and that is the heat coefficient for the fluid. In this case, we are dealing with a calorically perfect gas. # # $$e = c_v T$$ # # Finally we have an energy equation that we can use for our numerical methods. # # # $$\rho c_v\frac{\partial T}{\partial t} + \rho V c_v \frac{\partial T}{\partial x} = -p\frac{\partial V}{\partial x} - pV\frac{\partial(ln A)}{\partial x}$$ # # ### Summary of Equations # # As we take a moment to examine our equations, we can see that the variables are $\rho$, V, p and T but with the use of the ideal gas equation, we can reduce the number of unknowns by one. # # $$p = \rho R T$$ # # $$\frac{\partial p}{\partial x} = R \Big( \rho \frac{\partial T}{\partial x} + T \frac{\partial \rho}{\partial x}\Big )$$ # # Now we can implement this equation and reduce the continuity, momentum, and energy equations to only have three unknown variables. The continuity equation is simply expanded and the other two are rearranged with the new definition. # # # Continuity: $\frac{\partial (\rho A)}{\partial t} + \rho A\frac{\partial V}{\partial x}+ \rho V \frac{\partial A}{\partial x}+ VA\frac{\partial \rho}{\partial x} = 0$ # # Momentum: $\rho \frac{\partial V}{\partial t} + \rho V \frac{\partial V}{\partial x} = -R\Big(\rho \frac{\partial T}{\partial x}+ T \frac{\partial \rho}{\partial x}\Big)$ # # Energy: $\rho c_v\frac{\partial T}{\partial t} + \rho V c_v \frac{\partial T}{\partial x} = -\rho R T\Big[\frac{\partial V}{\partial x} + V\frac{\partial(ln A)}{\partial x}\Big]$ # # # At this point, we have our flow equations and we only have a few more things to take care of before we can start writing some programs to solve the problem. # #### Nondimensional Numbers # # In order to simplify our problem, we will use non-dimensional number for our calculations. We can write our non-dimensional temperature and density as follows. # # $$T' = \frac{T}{T_0}$$ # # and # # $$\rho' = \frac{\rho}{\rho_0}$$ # # Additionally, we can define out non-dimensional length with the following equation. # # $$x' = \frac{x}{L}$$ # # Where L is the length of the nozzle. The speed of sound is defined as # # $$a_0 = \sqrt{\gamma R T_0}$$ # # and a nondimensional velocity of # # $$V' = \frac{V}{a_0}$$ # # Time can also be put in a nondimensional form as shown in the following equation. # # $$t' = \frac{t}{L/a_0}$$ # # Lastly, the area can be nondimensionalized by dividing by the throat area where the flow is at $M=1$. # # $$A' = \frac{A}{A^*}$$ # # With all of the relationships we need, we can nondimensionalize our continuity, momentum and state equations. # # # Continuity: $\frac{\partial \rho'}{\partial t'} = -\rho ' \frac{\partial V'}{\partial x'} -\rho 'V'\frac{\partial (ln A')}{\partial x'} - V'\frac{\partial \rho'}{\partial x'}$ # # Momentum: $\frac{\partial V'}{\partial t'} = -V' \frac{\partial V'}{\partial x'} - \frac{1}{\gamma}\Big(\frac{\partial T'}{\partial x'} + \frac{T'}{\rho'}\frac{\partial \rho '} {\partial x'}\Big)$ # # Energy: $\frac{\partial T'}{\partial t'} =-V' \frac{\partial T'}{\partial x'} - (\gamma -1)T'\Big[\frac{\partial V'}{\partial x'} +V'\frac{\partial (ln A')}{\partial x'}\Big]$ # # # Yayy! We finally made it to the end of the math part (well kinda...). The next step is to take the equations that describe our flow and put it in a form that the computer can understand. In this case, we will be using a finite difference method with MacCormack's explicit technique which uses a predictor-corrector method to help us get second order accuracy without doing an insane amount of algebra and book keeping. Because this is a quasi-one-dimensional problem, we only need to make a grid that is one dimensional. # Generate our grid of points numPts = 14 # number of grid points length = 3 # nondimensional length of out nozzle grid = np.zeros(numPts) pyplot.scatter(np.linspace(0,length,numPts),grid) #plotting the nodes pyplot.xlabel('Nondimensional Length (L\')') # The plot above shows the uniform distribution of our points. Now we are ready to start developing the equations that we will use for the finite difference method. The first step is to execute the predictor step. To do this, we calculate the first order derivative at our point of interest at time $t$. # # $$\Big(\frac{\partial \rho}{\partial t}\Big)_i^t= -\rho_i^t \frac{V^t_{i+1} - V^t_i}{\Delta x} -\rho_i^t V^t_i\frac{ln A^t_{i+1} - ln A^t_i}{\Delta x} -V_i^t \frac{\rho^t_{i+1} - \rho^t_i}{\Delta x}$$ # # The same is done for the velocity and temperature. # # $$\Big(\frac{\partial V}{\partial t}\Big)_i^t= -V_i^t \frac{V^t_{i+1} - V^t_i}{\Delta x} -\frac{1}{\gamma}\Big(\frac{T^t_{i+1} - T^t_i}{\Delta x} +\frac{T^t_i}{\rho^t_i} \frac{\rho^t_{i+1} - \rho^t_i}{\Delta x}\Big)$$ # # # $$\Big(\frac{\partial T}{\partial t}\Big)_i^t= -V_i^t\frac{T^t_{i+1} - T^t_i}{\Delta x} - (\gamma -1)T^t_i\Big(\frac{V^t_{i+1} - V^t_i}{\Delta x} + V^t_i \frac{ln A^t_{i+1} - ln A^t_i}{\Delta x}\Big)$$ # # These values are then used to calculate the predicted step with the following equations # # # $$\bar{\rho}_i^{t+\Delta t} =\rho_i^t +\Big(\frac{\partial \rho}{\partial t}\Big)_i^t \Delta t$$ # # $$\bar{V}_i^{t+\Delta t} =V_i^t +\Big(\frac{\partial V}{\partial t}\Big)_i^t \Delta t$$ # # $$\bar{T}_i^{t+\Delta t} =T_i^t +\Big(\frac{\partial T}{\partial t}\Big)_i^t \Delta t$$ # # # The second part of this technique is the corrector step. The corrector step gets the slope of the predicted value which will be used in the final step to determine $(\partial /\partial t)_{av}$ # # $$\Big(\frac{\bar{\partial \rho}}{\partial t}\Big)_i^{t+\Delta t t}= -\bar{\rho}_i^{t+\Delta t} \frac{\bar{V}^{t+\Delta t}_{i} - \bar{V}^{t+\Delta t}_{i-1}}{\Delta x} -\bar{\rho}_i^{t+\Delta t} \bar{V}^{t+\Delta t}_i\frac{ln \bar{A}^{t+\Delta t}_{i} - ln \bar{A}^{t+\Delta t}_{i-1}}{\Delta x} -\bar{V}_i^{t+\Delta t} \frac{\bar{\rho}^{t+\Delta t}_{i} - \bar{\rho}^{t+\Delta t}_{i-1}}{\Delta x}$$ # # The same is done for the velocity and temperature. # # $$\Big(\frac{\bar{\partial V}}{\partial t}\Big)_i^{t+\Delta t}= -\bar{V}_i^{t+\Delta t} \frac{\bar{V}^{t+\Delta t}_{i} - \bar{V}^{t+\Delta t}_{i-1}}{\Delta x} -\frac{1}{\gamma}\Big(\frac{\bar{T}^{t+\Delta t}_{i} - \bar{T}^{t+\Delta t}_{i-1}}{\Delta x} +\frac{\bar{T}^{t+\Delta t}_i}{\bar{\rho}^{t+\Delta t}_i} \frac{\bar{\rho}^{t+\Delta t}_{i} - \bar{\rho}^{t+\Delta t}_{i-1}}{\Delta x}\Big)$$ # # # $$\Big(\frac{\bar{\partial T}}{\partial t}\Big)_i^{t+\Delta t}= -\bar{V}_i^{t+\Delta t}\frac{\bar{T}^{t+\Delta t}_{i} - \bar{T}^{t+\Delta t}_{i-1}}{\Delta x} - (\gamma -1)\bar{T}^{t+\Delta t}_i\Big(\frac{\bar{V}^{t+\Delta t}_{i} - \bar{V}^{t+\Delta t}_{i-1}}{\Delta x} + \bar{V}^{t+\Delta t}_i \frac{ln \bar{A}^{t+\Delta t}_{i} - ln \bar{A}^{t+\Delta t}_{i-1}}{\Delta x}\Big)$$ # # # The partial derivative average can be calculated using the following equation # # # $$\Big(\frac{\partial}{\partial t}\Big)_{av} = \frac{1}{2}\Big[\Big(\frac{\partial}{\partial t}\Big)_i^t+\Big(\frac{\bar{\partial}}{\partial t}\Big)_i^{t+\Delta t}\Big]$$ # # # Finally, the value we want can be calculated (with second order accuracy) using this equation # # # $$\rho_i^{t+\Delta t} = \rho_i^t + \Big(\frac{\partial \rho}{\partial t}\Big)_{av}\Delta t$$ # # $$V_i^{t+\Delta t} = V_i^t + \Big(\frac{\partial V}{\partial t}\Big)_{av}\Delta t$$ # # $$T_i^{t+\Delta t} = T_i^t + \Big(\frac{\partial T}{\partial t}\Big)_{av}\Delta t$$ # # Now that we know how to solve the equations of our flow, let's write a function that can take information from some given nodes and return the values after one time iteration. Doing this now will save us time and space when we write the main code to solve our problem. def calcStepSuper(rho,vel,temp,area,gamma,deltat,deltax): """ Import three numpy arrays that represent the density velocity and temperature of each element with a supersonic exit flow BC rho : density array vel : velocity array temp: temperature array deltat : time step for the elements deltax : space between each element """ #loop through each element starting at index 1 and ending at index numPts-1 (N-1) numElm = len(rho) #Density fwddRhodt = np.zeros(numElm) bkwddRhodt = np.zeros(numElm) rhoBar = np.zeros(numElm) dRhoAv = np.zeros(numElm) #Velocity fwddvdx = np.zeros(numElm) bkwddvdx = np.zeros(numElm) VBar = np.zeros(numElm) dVAv = np.zeros(numElm) #Temperature fwddTdx = np.zeros(numElm) bkwddTdx = np.zeros(numElm) TBar = np.zeros(numElm) dTAv = np.zeros(numElm) ################## ####Predictor Step ################## for i in range(1,numElm-1): #Predictor Step # Calculate first derivative of density fwddRhodt[i] = -rho[i] * ( (vel[i+1]-vel[i])/deltax) -(rho[i]*vel[i]*(math.log(area[i+1])-math.log(area[i]))/deltax) - (vel[i]*(rho[i+1]-rho[i])/deltax) rhoBar[i] = rho[i]+ fwddRhodt[i] * deltat # Calculate first derivative of velocity fwddvdx[i] = -vel[i]*((vel[i+1]-vel[i])/deltax)-(1/gamma)*(((temp[i+1]-temp[i])/deltax)+(temp[i]/rho[i])*(rho[i+1]-rho[i])/deltax) VBar[i] = vel[i] + fwddvdx[i] * deltat # Calculate first derivative of temperatures a = -vel[i]*(temp[i+1]-temp[i])/deltax b = (gamma-1)*temp[i] c = (vel[i+1]-vel[i])/deltax d = vel[i]*((math.log(area[i+1])-math.log(area[i]))/deltax) fwddTdx[i] = a-b*(c+d) TBar[i] = temp[i] + (fwddTdx[i]) * deltat # Calculate BC's for the preictor step rhoBar[0] = 1 VBar[0] = 2*VBar[1]-VBar[2] TBar[0] = 1 # We have the values for the predicted values and the slopes ################### ####Corrector Step ################### for i in range(1,numElm-1): bkwddRhodt[i] = -rhoBar[i] * ( (VBar[i]-VBar[i-1])/deltax) - (rhoBar[i]*VBar[i]*(math.log(area[i])-math.log(area[i-1]))/deltax) -(VBar[i]*(rhoBar[i]-rhoBar[i-1])/deltax) bkwddvdx[i] = -VBar[i]*((VBar[i]-VBar[i-1])/deltax)-(1/gamma)*(((TBar[i]-TBar[i-1])/deltax)+(TBar[i]/rhoBar[i])*(rhoBar[i]-rhoBar[i-1])/deltax) a = -VBar[i]*(TBar[i]-TBar[i-1])/deltax b = (gamma-1)*TBar[i] c = (VBar[i]-VBar[i-1])/deltax d = VBar[i]*((math.log(area[i])-math.log(area[i-1]))/deltax) bkwddTdx[i] = a-b*(c+d) dRhoAv[i] = 0.500*(fwddRhodt[i]+bkwddRhodt[i]) dVAv[i] = 0.500*(fwddvdx[i]+bkwddvdx[i]) dTAv[i] = 0.500*(fwddTdx[i]+bkwddTdx[i]) rho[i] = rho[i] + dRhoAv[i] * deltat vel[i] = vel[i] + dVAv[i] * deltat temp[i] = temp[i] + dTAv[i] * deltat #Set BC's for the final values #Far left BC rho[0] = 1 vel[0] = 2*vel[1]-vel[2] temp[0] = 1 #Far right BC rho[numElm-1]= (2*rho[numElm-2])-rho[numElm-3] vel[numElm-1] = (2*vel[numElm-2])-vel[numElm-3] temp[numElm-1] = (2*temp[numElm-2])-temp[numElm-3] return rho,vel,temp,dRhoAv,dVAv #output new values and d/dt for residuals # The code above implements the finite difference method we developed earlier. There were several lines of code in that function dealing with some material that we have yet to cover, boundary conditions. We need to establish the boundary conditions to anchor our mathematics to our physical problem. The left side of our grid represents the subsonic inflow of from a reservoir this is our very first node. From there we have our regular nodes governed by our flow equations and at the other end we have a supersonic our flow (we will address a subsonic exit flow later). The Nth node will be our outflow condition. The first step in setting our boundary conditions is to use the method of characteristics for an unsteady, one-dimensional flow. This type of flow is characterized by a hyperbolic PDE which means that the solution has two real characteristic lines. For our subsonic inlet, the left-running line propagates behind the inlet and the right-running characteristic line runs downstream toward the exit of the nozzle. One way to think about these lines is to consider how information travels in a flow. For the subsonic flow, information can travel upstream, because the velocity of the flow is less than the speed of sounds (how fast waves propagate in a flow). The super sonic outlet is moving at speeds greater than the speed of sound, so information can not propagate upstream. Because of this, we must allow one of the variables at the first node to float while the other two flow variables are set. As for the outflow boundary condition, all values are allowed to float due to the fact that the left and right-running characteristic lines are carried downstream. # # # For inlet node: # # $$V_1 = 2V_2 - V_3$$ # # $$\rho_1 =1$$ (Constant) # # $$T_1 = 1$$ (Constant) # # For outlet node: # # $$V_N = 2V_{N-1}-V_{N-2}$$ # # $$\rho_N = 2\rho_{N-1}-\rho_{N-2}$$ # # $$T_N = 2T_{N-1}-T_{N-2}$$ # ## Time Step # For our time step, we must consider the Courant number # # $\Delta t = C\frac{\Delta x}{a+V}$ # # Where a is the speed of sound locally and V is the local velocity. For every element, there will be a different value for a given Courant number and although we can use a local timestep, we will instead just use the minimum of the whole system at a given time step. # # ## Nozzle Shape # For our nozzle shpae, we will use the following equation for area # # $A = 1+2.2(x-1.5)^2$ x=np.linspace(0,3,100) A=1+2.2*(x-1.5)**2 y=(A/3.1415)**0.5 pyplot.plot(x,y,'black') pyplot.plot(x,-y,'black') pyplot.xlim([0,3]) pyplot.ylim([-1.5,1.5]) # ## Initial Conditions # # # It is important to propose good initial conditions for a couple of reasons. One reason is that the closer you are to the final steady state solution, then the less iterations you will need to converge on a solution. Another reason is that if the guess is way off, then the simulation can blow up. # # The initial conditions for this simulation will be # # $$\rho = 1-0.3146x$$ # # $$T=1-0.2314x$$ # # $$V=(0.1+1.09x)T^{1/2}$$ # ## Running the Simulation (Main Code) # # We have finally arrived at the spot where we can put it all together. The following code will be a culmination of all of the information we have gathered before. Let's rock and roll! ## Main Script for quasi-1D-nozzle flow def main(totalIter,numPts): # number of iterations, number of grid points #Initialize our gird length = 3 # nondimensional length of out nozzle gridLoc = np.linspace(0,length,numPts) area = 1+2.2*(gridLoc-1.5)**2 #initialize area #Initial conditions of flow gamma = 1.4 rho = 1-0.3146*gridLoc temp = 1-0.2314*gridLoc vel = (0.1+1.09*gridLoc)*temp**0.5 a = temp**0.5 #speed of sound #Iterations and plotting set up rhoRes = np.ones(numPts) velRes = np.ones(numPts) k = int(numPts/2) #Calculate first time step cNum = 0.3 # Courant number deltax = length/(numPts-1) dtLocal = cNum *(deltax/(a+vel)) deltat = min(dtLocal) #Initialize Matrix to store our output data # set up to be n point number of rows X total iterations rhoResults = np.zeros(shape=(numPts,totalIter+1)) tempResults = np.zeros(shape=(numPts,totalIter+1)) velResults = np.zeros(shape=(numPts,totalIter+1)) rhoResidualResults = np.zeros(shape=(numPts,totalIter+1)) velResidualResults = np.zeros(shape=(numPts,totalIter+1)) rhoResults[:,0] = rho tempResults[:,0] = temp velResults[:,0] = vel rhoResidualResults[:,0] = rhoRes velResidualResults[:,0] = velRes iter=1 #Iterate while iter <= totalIter: rho,vel,temp,rhoRes,velRes = calcStepSuper(rho,vel,temp,area,gamma,deltat,deltax) # Save info in matrix rhoResults[:,iter] = rho tempResults[:,iter] = temp velResults[:,iter] = vel rhoResidualResults[:,iter] = rhoRes velResidualResults[:,iter] = velRes #Recalculate timestep a = temp**0.5 #speed of sound dtLocal = cNum *(deltax/(a+vel)) deltat = min(dtLocal) # Add 1 to itteration iter=iter+1 return rhoResults,tempResults,velResults,rhoResidualResults,velResidualResults,area rhoResults,tempResults,velResults,rhoResidualResults,velResidualResults,area = main(1500,31) print('Done.') # ## Post Processing Initial Run # Now that we have the results of our first simulation, let's look at the results and the iteration steps. # + #Lets plots out results x = np.linspace(0,3,len(rhoResults[:,-1])) fig, ax1 = pyplot.subplots() ax1.plot(x,rhoResults[:,-1],label='Nondim density') ax1.plot(x,tempResults[:,-1],label='Nondim Temp') ax1.plot(x,tempResults[:,-1]*rhoResults[:,-1],label='Nondim Pressure') ax1.set_xlabel('Nondimensional length (x)') # Make the y-axis label, ticks and tick labels match the line color. ax1.set_ylabel('den,temp,pres') ax1.tick_params('y') ax1.legend(loc=6) ax2 = ax1.twinx() ax2.plot(x,velResults[:,-1]/tempResults[:,-1]**0.5,'y',label='Mach') ax2.set_ylabel('Mach Number (M)') ax2.tick_params('y') ax2.legend(loc = 8) fig.tight_layout() pyplot.show() # - # Density vs iteration at throat of nozzle pyplot.plot(rhoResults[int(len(rhoResidualResults)/2),:]) pyplot.xlabel('Iteration') pyplot.ylabel('Nondimensional density') pyplot.title('Nondimensional Density vs Iteration') # Temp vs iteration at throat of nozzle pyplot.plot(tempResults[int(len(rhoResidualResults)/2),:]) pyplot.xlabel('Iteration') pyplot.ylabel('Nondimensional temperature') pyplot.title('Nondimensional Temperature vs Iteration') # Pressure vs iteration at throat of nozzle pyplot.plot(rhoResults[int(len(rhoResidualResults)/2)]*tempResults[int(len(rhoResidualResults)/2),:]) pyplot.xlabel('Iteration') pyplot.ylabel('Nondimensional pressure') pyplot.title('Nondimensional Pressure vs Iteration') # Mach vs iteration at throat of nozzle pyplot.plot(velResults[int(len(rhoResidualResults)/2),:]/tempResults[int(len(rhoResidualResults)/2),:]**0.5) pyplot.xlabel('Iteration') pyplot.ylabel('Mach Number') pyplot.title('Mach Number vs Iteration') # As we can see above, the values begin to converge around 600 iterations. By looking at the mach number plot, we can see that the selected location is $A^*$ because the mach number converges at $M=1$. As shown in all of the parameters, the values begin with a great rate of change in value. As the iterations continue, the solution settles around a single value. These big oscillations in the beginning are due to unsteady compression and expansion waves that move within the nozzle. # # We can also look at the residuals for our simulation as a function of iterations. The residuals represent the change in solution from iteration to iteration. This value can be obtained by plotting the value of $(\partial \rho /\partial t)_{av}$ and $(\partial V /\partial t)_{av}$ against the iterations. pyplot.plot(abs(rhoResidualResults[int(len(rhoResidualResults)/2),:]),label="Density Residual") pyplot.plot(abs(velResidualResults[int(len(rhoResidualResults)/2),:]),label="Velocity Residual") pyplot.yscale('log') pyplot.xlabel('Iteration') pyplot.ylabel('Residual') pyplot.legend() pyplot.title('Residuals') # At 1500 iterations, the residual is still going down at a steady rate. If we were to increase the iterations to 5000 we would see the residual plateau. Another metric to look at is the mass flow parameter. We can calculate mass flux with the following equation. # # # $$\dot{m} = \rho V A$$ # # # If our steady state solution represents physics well, then that value should be constant at any point in the nozzle. # # $$\rho V A = const$$ # Rerun the simulation with more iterations rhoResults,tempResults,velResults,rhoResidualResults,velResidualResults,area = main(2000,61) mdot = np.transpose(np.transpose(rhoResults)*np.transpose(velResults)*area) pyplot.figure(figsize=(5, 8)) pyplot.plot(mdot[:,0],label='iter=0') pyplot.plot(mdot[:,50],label='iter=50') pyplot.plot(mdot[:,100],label='iter=100') pyplot.plot(mdot[:,500],label='iter=500') pyplot.plot(mdot[:,1000],label='iter=1000') pyplot.plot(mdot[:,2000],label='iter=2000') pts = 10 pyplot.scatter(np.linspace(0,60,pts),np.ones(pts)*0.579,label = 'analytic solution') pyplot.xlim([0,60]) pyplot.legend() # This plot shows us that as the iterations continue, the numerical solution gets closer to a flat line and approaches the analytical solution which turns out to be 0.579. # # To further compare our results to the analytical solution, we can plot our steady state results against analytical data points. x = np.linspace(0,3,len(rhoResults[:,-1])) pyplot.plot(x,rhoResults[:,-1],label='Nondim density') pyplot.scatter([0.0,.5,1,1.5,2,2.5,3],[0.995,0.983,0.920,0.634,0.258,0.105,0.052],color='r') pyplot.xlim([0,3]) pyplot.xlabel('x/L') pyplot.ylabel('Nondimensional density') pyplot.title('Numerical vs Analytical Density') pyplot.plot(x,velResults[:,-1]/tempResults[:,-1]**0.5,label='Mach') pyplot.scatter([0.0,.5,1,1.5,2,2.5,3],[0.098,0.185,0.413,1,1.896,2.706,3.359],color='r') pyplot.xlim([0,3]) pyplot.xlabel('x/L') pyplot.ylabel('Mach Number') pyplot.title('Numerical vs Analytical Mach Number') # ## Solution of Purely Subsonic Isentropic Nozzle Flow # The difference between this version of the problem and the one we just solved is that for purely subsonic flow, there are an infinite number of solutions depending on the pressure ration between the inlet and outlet. For a subsonic flow, te fluid will speed up at it approaches the throat but because the flow has not reached the speed of sound, then the flow will slow down as the nozzle expands. The following equations govern the purely subsonic flow. # # $$\frac{p_e}{p_0} = \Big(1+ \frac{\gamma-1}{2}M_e^2\Big)^{\gamma/(\gamma-1)}$$ # # Here, $M_e$ and $p_e$ represent the mach number and pressure at the exit respectively. $p_0$ is the pressure at the inlet. # # $$\frac{A_e}{A^*} = \frac{1}{M_e^2}\Big[\frac{2}{\gamma-1}\Big(1+\frac{\gamma-1}{2}M_e^2\Big)\Big]^{(\gamma+1)/(\gamma-1)}$$ # # $A^*$ is a reference value for the above equation. There is no physical $A^*$, because that is where the flow would theoretically go to $M=1$. This theoretical values will provide the value for the local mach number using the equation below. # # # $$\Big (\frac{A}{A^*} \Big)^2 = \frac{1}{M^2}\Big[\frac{2}{\gamma +1}\Big(1+\frac{\gamma-1}{2}M^2\Big)\Big]^{(\gamma+1)/(\gamma-1)}$$ # # The rest of the local parameters can be solved just as before: # # $$\frac{p}{p_0} = \Big(1+\frac{\gamma-1}{2}M^2\Big)^{(-\gamma)/(\gamma-1)}$$ # # # $$\frac{\rho}{\rho_0} = \Big(1+\frac{\gamma-1}{2}M^2\Big)^{-1/(\gamma-1)}$$ # # # $$\frac{T}{T_0} = \Big(1+\frac{\gamma-1}{2}M^2\Big)^{-1}$$ # # For this simulation, we will model the nozzle with the following equations. # # $$\frac{A}{A_t} = 1+2.2\Big(\frac{x}{L} - 1.5\Big)^2$ for $0\leq\frac{x}{L}\leq1.5$$ # # $$\frac{A}{A_t} = 1+2.2223\Big(\frac{x}{L} - 1.5\Big)^2$ for $1.5\leq\frac{x}{L}\leq3.0$$ # + x = np.linspace(0,3,100) area = np.zeros(len(x)) index = 0 for i in x: if i<1.5: area[index] = 1+2.2*(i-1.5)**2 else: area[index] = 1+0.2223*(i-1.5)**2 index =index+1 y=(area/3.1415)**0.5 pyplot.figure(figsize=(8, 4)) pyplot.plot(x,y,'black') pyplot.plot(x,-y,'black') pyplot.xlim([0,3]) pyplot.ylim([-1.5,1.5]) # - # The governing equations for the simulation are the same but the main difference will be the boundary conditions at the inlet and outlet of the nozzle. The inlet BC is handled the same way as before, but this time we must address the outflow boundary conditions. Because the flow is subsonic, information can travel upstream and therefor we need to let two variables float. For this reason, we will only set one variable, and that will be the exit pressure. As you may have noticed, we have been dealing with density, velocity and temperature, so to set the value of p we use the following equation. # # $$p = \rho RT$$ # # In non-dimensional terms, it reduces to # # $$p'_e = \rho'_eT'_e = specified$$ # # To apply this boundary condition to our flow, we first extrapolate temperature or density, and then use that extrapolated value with the relationship for pressure and get the other value. you can start with temperature and get density or visa versa, but the end result should ultimately be the same. Here is what it would look like if we first extrapolated temperature. # # First extrapolate to get the value of $T'$ at the outflow boundary. # # $$T'_N = 2T'_{N-1}-T'_{N-2}$$ # # With the new value for T, calculate density with the set pressure boundary condition. # # $$\rho'_N = \frac{p'_N}{T'_N} = \frac{specified value}{T'_N}$$ # # # Lastly, we can extraploate the value for velocity as such: # # $$V'_N = V'_{N-1} - V'_{N-2}$$ # # # Now let us modify the funciton we made before with the new boundary condition. The input to the function will be the same except for a set value for the exit pressure. def calcStepSub(exitPres,rho,vel,temp,area,gamma,deltat,deltax): """ Import three numpy arrays that represent the density velocity and temperature of each element with a supersonic exit flow BC rho : density array vel : velocity array temp: temperature array deltat : time step for the elements deltax : space between each element """ #loop through each element starting at index 1 and ending at index numPts-1 (N-1) numElm = len(rho) #Density fwddRhodt = np.zeros(numElm) bkwddRhodt = np.zeros(numElm) rhoBar = np.zeros(numElm) dRhoAv = np.zeros(numElm) #Velocity fwddvdx = np.zeros(numElm) bkwddvdx = np.zeros(numElm) VBar = np.zeros(numElm) dVAv = np.zeros(numElm) #Temperature fwddTdx = np.zeros(numElm) bkwddTdx = np.zeros(numElm) TBar = np.zeros(numElm) dTAv = np.zeros(numElm) ################## ####Predictor Step ################## for i in range(1,numElm-1): #Predictor Step # Calculate first derivative of density fwddRhodt[i] = -rho[i] * ( (vel[i+1]-vel[i])/deltax) -(rho[i]*vel[i]*(math.log(area[i+1])-math.log(area[i]))/deltax) - (vel[i]*(rho[i+1]-rho[i])/deltax) rhoBar[i] = rho[i]+ fwddRhodt[i] * deltat # Calculate first derivative of velocity fwddvdx[i] = -vel[i]*((vel[i+1]-vel[i])/deltax)-(1/gamma)*(((temp[i+1]-temp[i])/deltax)+(temp[i]/rho[i])*(rho[i+1]-rho[i])/deltax) VBar[i] = vel[i] + fwddvdx[i] * deltat # Calculate first derivative of temperatures a = -vel[i]*(temp[i+1]-temp[i])/deltax b = (gamma-1)*temp[i] c = (vel[i+1]-vel[i])/deltax d = vel[i]*((math.log(area[i+1])-math.log(area[i]))/deltax) fwddTdx[i] = a-b*(c+d) TBar[i] = temp[i] + (fwddTdx[i]) * deltat rhoBar[0] = 1 TBar[0] = 1 VBar[0] = 2*vel[1]-vel[2] # We have the values for the predicted values and the slopes ################### ####Corrector Step ################### for i in range(1,numElm-1): bkwddRhodt[i] = -rhoBar[i] * ( (VBar[i]-VBar[i-1])/deltax) - (rhoBar[i]*VBar[i]*(math.log(area[i])-math.log(area[i-1]))/deltax) -(VBar[i]*(rhoBar[i]-rhoBar[i-1])/deltax) bkwddvdx[i] = -VBar[i]*((VBar[i]-VBar[i-1])/deltax)-(1/gamma)*(((TBar[i]-TBar[i-1])/deltax)+(TBar[i]/rhoBar[i])*(rhoBar[i]-rhoBar[i-1])/deltax) a = -VBar[i]*(TBar[i]-TBar[i-1])/deltax b = (gamma-1)*TBar[i] c = (VBar[i]-VBar[i-1])/deltax d = VBar[i]*((math.log(area[i])-math.log(area[i-1]))/deltax) bkwddTdx[i] = a-b*(c+d) dRhoAv[i] = 0.500*(fwddRhodt[i]+bkwddRhodt[i]) dVAv[i] = 0.500*(fwddvdx[i]+bkwddvdx[i]) dTAv[i] = 0.500*(fwddTdx[i]+bkwddTdx[i]) rho[i] = rho[i] + dRhoAv[i] * deltat vel[i] = vel[i] + dVAv[i] * deltat temp[i] = temp[i] + dTAv[i] * deltat #Set BC's for the final values #Far left BC rho[0] = 1 vel[0] = 2*vel[1]-vel[2] temp[0] = 1 #Far right BC (DIFFERENT FOR SUBSONIC OUTFLOW) rho[numElm-1] = 2*rho[numElm-2]-rho[numElm-3] temp[numElm-1] = exitPres/rho[numElm-1] vel[numElm-1] = 2*vel[numElm-2]-vel[numElm-3] return rho,vel,temp,dRhoAv,dVAv #output new values and d/dt for residuals # ## Main Code for Purely Subsonic Flow # + ## Main Script for quasi-1D-nozzle flow def mainSub(totalIter,numPts,exitPres): # number of iterations, number of grid points #Initialize our gird length = 3 # nondimensional length of out nozzle gridLoc = np.linspace(0,length,numPts) #initialize area area=np.zeros(numPts) index = 0 for i in gridLoc: if i<1.5: area[index] = 1+2.2*(i-1.5)**2 else: area[index] = 1+0.2223*(i-1.5)**2 index =index+1 #Initial conditions of flow gamma = 1.4 rho = 1-0.023*gridLoc temp = 1-0.009333*gridLoc vel = 0.05+0.11*gridLoc a = temp**0.5 #speed of sound #Residual Array allowcation rhoRes = np.ones(numPts) velRes = np.ones(numPts) #Calculate first time step cNum = 0.55 # Courant number deltax = length/(numPts-1) dtLocal = cNum *(deltax/(a+vel)) deltat = min(dtLocal) #Initialize Matrix to store our output data # set up to be n point number of rows X total iterations rhoResults = np.zeros(shape=(numPts,totalIter+1)) tempResults = np.zeros(shape=(numPts,totalIter+1)) velResults = np.zeros(shape=(numPts,totalIter+1)) rhoResidualResults = np.zeros(shape=(numPts,totalIter+1)) velResidualResults = np.zeros(shape=(numPts,totalIter+1)) rhoResults[:,0] = rho tempResults[:,0] = temp velResults[:,0] = vel rhoResidualResults[:,0] = rhoRes velResidualResults[:,0] = velRes iter=1 #Iterate while iter <= totalIter: rho,vel,temp,rhoRes,velRes = calcStepSub(exitPres,rho,vel,temp,area,gamma,deltat,deltax) # Save info in matrix rhoResults[:,iter] = rho tempResults[:,iter] = temp velResults[:,iter] = vel rhoResidualResults[:,iter] = rhoRes velResidualResults[:,iter] = velRes #Recalculate timestep a = temp**0.5 #speed of sound dtLocal = cNum *(deltax/(a+vel)) deltat = min(dtLocal) # Add 1 to itteration iter=iter+1 return rhoResults,tempResults,velResults,rhoResidualResults,velResidualResults,area # Run simulation rhoResults,tempResults,velResults,rhoResidualResults,velResidualResults,area = mainSub(5000,61,0.93) print('Done.') # - # Now with the new simulation complete, let's look at our residuals pyplot.plot(abs(rhoResidualResults[int(len(rhoResidualResults)/2),:]),label="Density Residual") pyplot.plot(abs(velResidualResults[int(len(rhoResidualResults)/2),:]),label="Velocity Residual") pyplot.yscale('log') pyplot.xlabel('Iteration') pyplot.ylabel('Residual') pyplot.legend() pyplot.title('Residuals') # For this simulation, the residuals showed a constant downward trend. This tells us that our simulation did not blow up, but is only converged to around E-3 accuracy. To understand more about how the solution converged lets take a look at mass flow rate as the iterations progressed. mdot = np.transpose(np.transpose(rhoResults)*np.transpose(velResults)*area) pyplot.figure(figsize=(6, 6)) x = np.linspace(0,3,61) pyplot.plot(x,mdot[:,0],label='iter=0') pyplot.plot(x,mdot[:,500],label='iter=500') pyplot.plot(x,mdot[:,5000],label='iter=5000') pyplot.legend(loc=1) pyplot.xlabel('x/L') pyplot.ylabel('mass flow rate') # As one can see from this plot, the values of mass flow rate converge on a constant value as expected. This value seems to match up well with the CFD results from Anderson's book. Additionally, we can examine how the pressure changed over the time steps. pressure = rhoResults*tempResults x = np.linspace(0,3,61) pyplot.plot(x,rhoResults[:,500],label='500 iter') pyplot.plot(x,rhoResults[:,1000],label='1000 iter') pyplot.plot(x,rhoResults[:,5000],label='5000 iter') pyplot.xlim([0,3]) pyplot.legend() pyplot.xlabel('x/L') pyplot.ylabel('Pressure (nondimensionalized)') # + #Lets plots out results x = np.linspace(0,3,len(rhoResults[:,-1])) fig, ax1 = pyplot.subplots(figsize=(10, 6)) ax1.plot(x,rhoResults[:,-1],label='Nondim density') ax1.plot(x,tempResults[:,-1],label='Nondim Temp') ax1.plot(x,tempResults[:,-1]*rhoResults[:,-1],label='Nondim Pressure') ax1.set_xlabel('Nondimensional length (x)') # Make the y-axis label, ticks and tick labels match the line color. ax1.set_ylabel('den,temp,pres') ax1.tick_params('y') ax1.legend(loc=6) ax2 = ax1.twinx() ax2.plot(x,velResults[:,-1]/tempResults[:,-1]**0.5,'y',label='Mach') ax2.set_ylabel('Mach Number (M)') ax2.tick_params('y') ax2.legend(loc = 1) fig.tight_layout() pyplot.show() # -
nozzleFlow/nozzleNonconservationForm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # GRAPE calculation of control fields for iSWAP implementation # <NAME> (<EMAIL>) # %matplotlib inline import matplotlib.pyplot as plt import time import numpy as np from qutip import * from qutip.control import * T = 1 times = np.linspace(0, T, 100) # + U = iswap() R = 50 H_ops = [#tensor(sigmax(), identity(2)), #tensor(sigmay(), identity(2)), #tensor(sigmaz(), identity(2)), #tensor(identity(2), sigmax()), #tensor(identity(2), sigmay()), #tensor(identity(2), sigmaz()), tensor(sigmax(), sigmax()), tensor(sigmay(), sigmay()), tensor(sigmaz(), sigmaz())] H_labels = [#r'$u_{1x}$', #r'$u_{1y}$', #r'$u_{1z}$', #r'$u_{2x}$', #r'$u_{2y}$', #r'$u_{2z}$', r'$u_{xx}$', r'$u_{yy}$', r'$u_{zz}$', ] # - H0 = 0 * np.pi * (tensor(sigmaz(), identity(2)) + tensor(identity(2), sigmaz())) # # GRAPE from qutip.control.grape import plot_grape_control_fields, _overlap, grape_unitary_adaptive, cy_grape_unitary from scipy.interpolate import interp1d from qutip.ui.progressbar import TextProgressBar # + u0 = np.array([np.random.rand(len(times)) * (2 * np.pi / T) * 0.01 for _ in range(len(H_ops))]) u0 = [np.convolve(np.ones(10)/10, u0[idx, :], mode='same') for idx in range(len(H_ops))] # - result = cy_grape_unitary(U, H0, H_ops, R, times, u_start=u0, eps=2*np.pi/T, progress_bar=TextProgressBar()) # + #result = grape_unitary(U, H0, H_ops, R, times, u_start=u0, eps=2*np.pi/T, # progress_bar=TextProgressBar()) # - # ## Plot control fields for iSWAP gate in the presense of single-qubit tunnelling plot_grape_control_fields(times, result.u / (2 * np.pi), H_labels, uniform_axes=True); # compare to the analytical results np.mean(result.u[-1,0,:]), np.mean(result.u[-1,1,:]), np.pi/(4 * T) # ## Fidelity U result.U_f.tidyup(1e-2) _overlap(U, result.U_f).real # ## Test numerical integration of GRAPE pulse c_ops = [] U_f_numerical = propagator(result.H_t, times[-1], c_ops, args={}) U_f_numerical _overlap(U, U_f_numerical).real # # Process tomography # ## Ideal iSWAP gate op_basis = [[qeye(2), sigmax(), sigmay(), sigmaz()]] * 2 op_label = [["i", "x", "y", "z"]] * 2 # + fig = plt.figure(figsize=(8,6)) U_ideal = spre(U) * spost(U.dag()) chi = qpt(U_ideal, op_basis) fig = qpt_plot_combined(chi, op_label, fig=fig, threshold=0.001) # - # ## iSWAP gate calculated using GRAPE # + fig = plt.figure(figsize=(8,6)) U_ideal = to_super(result.U_f) chi = qpt(U_ideal, op_basis) fig = qpt_plot_combined(chi, op_label, fig=fig, threshold=0.001) # - # ## Versions # + from qutip.ipynbtools import version_table version_table()
qutip-notebooks-master/examples/control-grape-iswap.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/tensorflow/tpu/blob/master/tools/colab/fashion_mnist.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" id="N6ZDpd9XzFeN" # ##### Copyright 2018 The TensorFlow Hub Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # + cellView="form" colab={} colab_type="code" id="KUu4vOt5zI9d" # Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # + [markdown] colab_type="text" id="edfbxDDh2AEs" # ## Fashion MNIST with Keras and TPUs # + [markdown] colab_type="text" id="RNo1Vfghpa8j" # ## Overview # # In this example, you can try out using tf.keras and Cloud TPUs to train a model on the fashion MNIST dataset. The model trains for 10 epochs on Cloud TPU and takes approximately 2 minutes to run. # # This notebook is hosted on GitHub. To view it in its original repository, after opening the notebook, select **File > View on GitHub**. # + [markdown] colab_type="text" id="dgAHfQtuhddd" # ## Learning objectives # # In this Colab, you will learn how to: # * Code for a standard conv-net that has 3 layers with drop-out and batch normalization between each layer in Keras. # * Create and compile the model under a distribution strategy in order ot use TPUs. # * Run a prediction to see how well the model can predict fashion categories and output the result. # + [markdown] colab_type="text" id="QrprJD-R-410" # ## Instructions # + [markdown] colab_type="text" id="_I0RdnOSkNmi" # <h3> &nbsp;&nbsp;Train on TPU&nbsp;&nbsp; <a href="https://cloud.google.com/tpu/"><img valign="middle" src="https://raw.githubusercontent.com/GoogleCloudPlatform/tensorflow-without-a-phd/master/tensorflow-rl-pong/images/tpu-hexagon.png" width="50"></a></h3> # # 1. On the main menu, click Runtime and select **Change runtime type**. Set "TPU" as the hardware accelerator. # 1. Click Runtime again and select **Runtime > Run All**. You can also run the cells manually with Shift-ENTER. # + [markdown] colab_type="text" id="5eEM-XOvURoU" # TPUs are located in Google Cloud, for optimal performance, they read data directly from Google Cloud Storage (GCS) # + [markdown] colab_type="text" id="Lvo0t7XVIkWZ" # ## Data, model, and training # + [markdown] colab_type="text" id="MICrRv8rmXVq" # Begin by downloading the fashion MNIST dataset using `tf.keras.datasets`, as shown below. # + colab={} colab_type="code" id="Zo-Yk6LFGfSf" import tensorflow as tf import numpy as np import os import distutils if distutils.version.LooseVersion(tf.__version__) <= '2.0': raise Exception('This notebook is compatible with TensorFlow 1.14 or higher, for TensorFlow 1.13 or lower please use the previous version at https://github.com/tensorflow/tpu/blob/r1.13/tools/colab/fashion_mnist.ipynb') (x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data() # add empty color dimension x_train = np.expand_dims(x_train, -1) x_test = np.expand_dims(x_test, -1) # + [markdown] colab_type="text" id="Hgc2FZKVMx15" # ### Define the model # # The following example uses a standard conv-net that has 3 layers with drop-out and batch normalization between each layer. # + colab={} colab_type="code" id="W7gMbs70GxA7" def create_model(): model = tf.keras.models.Sequential() model.add(tf.keras.layers.BatchNormalization(input_shape=x_train.shape[1:])) model.add(tf.keras.layers.Conv2D(64, (5, 5), padding='same', activation='elu')) model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2,2))) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.BatchNormalization(input_shape=x_train.shape[1:])) model.add(tf.keras.layers.Conv2D(128, (5, 5), padding='same', activation='elu')) model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2))) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.BatchNormalization(input_shape=x_train.shape[1:])) model.add(tf.keras.layers.Conv2D(256, (5, 5), padding='same', activation='elu')) model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2,2))) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(256)) model.add(tf.keras.layers.Activation('elu')) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(10)) model.add(tf.keras.layers.Activation('softmax')) return model # + [markdown] colab_type="text" id="xLeZATVaNAnE" # ### Train on the TPU # # To begin training, construct the model on the TPU and then compile it. # + colab={} colab_type="code" id="pWEYmd_hIWg8" tf.keras.backend.clear_session() resolver = tf.distribute.cluster_resolver.TPUClusterResolver('grpc://' + os.environ['COLAB_TPU_ADDR']) tf.config.experimental_connect_to_cluster(resolver) # This is the TPU initialization code that has to be at the beginning. tf.tpu.experimental.initialize_tpu_system(resolver) print("All devices: ", tf.config.list_logical_devices('TPU')) strategy = tf.distribute.experimental.TPUStrategy(resolver) with strategy.scope(): model = create_model() model.compile( optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3, ), loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy']) model.fit( x_train.astype(np.float32), y_train.astype(np.float32), epochs=17, steps_per_epoch=60, validation_data=(x_test.astype(np.float32), y_test.astype(np.float32)), validation_freq=17 ) model.save_weights('./fashion_mnist.h5', overwrite=True) # + [markdown] colab_type="text" id="ESL6ltQTMm05" # ### Check the results (inference) # # Now that you are done training, see how well the model can predict fashion categories! # + colab={} colab_type="code" id="SaYPv_aKId2d" LABEL_NAMES = ['t_shirt', 'trouser', 'pullover', 'dress', 'coat', 'sandal', 'shirt', 'sneaker', 'bag', 'ankle_boots'] cpu_model = create_model() cpu_model.load_weights('./fashion_mnist.h5') from matplotlib import pyplot # %matplotlib inline def plot_predictions(images, predictions): n = images.shape[0] nc = int(np.ceil(n / 4)) f, axes = pyplot.subplots(nc, 4) for i in range(nc * 4): y = i // 4 x = i % 4 axes[x, y].axis('off') label = LABEL_NAMES[np.argmax(predictions[i])] confidence = np.max(predictions[i]) if i > n: continue axes[x, y].imshow(images[i]) axes[x, y].text(0.5, 0.5, label + '\n%.3f' % confidence, fontsize=14) pyplot.gcf().set_size_inches(8, 8) plot_predictions(np.squeeze(x_test[:16]), cpu_model.predict(x_test[:16])) # + [markdown] colab_type="text" id="2a5cGsSTEBQD" # ## What's next # # * Learn about [Cloud TPUs](https://cloud.google.com/tpu/docs) that Google designed and optimized specifically to speed up and scale up ML workloads for training and inference and to enable ML engineers and researchers to iterate more quickly. # * Explore the range of [Cloud TPU tutorials and Colabs](https://cloud.google.com/tpu/docs/tutorials) to find other examples that can be used when implementing your ML project. # # On Google Cloud Platform, in addition to GPUs and TPUs available on pre-configured [deep learning VMs](https://cloud.google.com/deep-learning-vm/), you will find [AutoML](https://cloud.google.com/automl/)*(beta)* for training custom models without writing code and [Cloud ML Engine](https://cloud.google.com/ml-engine/docs/) which will allows you to run parallel trainings and hyperparameter tuning of your custom models on powerful distributed hardware. #
tools/colab/fashion_mnist.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + #Problem 1 #Use map to create a function which finds the #length of each word in the phrase (broken by spaces) and #return the values in a list. #The function will have an input of a string, and output #a list of integers. def word_lengths(phrase): pass #Input: word_lengths('How long are the words in this phrase') #Output: [3, 4, 3, 3, 5, 2, 4, 6] # - # + #Problem 2 #Use reduce to take a list of digits and return the #number that they correspond to. Do not convert the integers to strings! def digits_to_num(digits): pass #Input: digits_to_num([3,4,3,2,1]) #Output: 34321 # - # + #Problem 3 #Use filter to return the words from a list of words #which start with a target letter. def filter_words(word_list, letter): pass #Input: l = ['hello','are','cat','dog','ham','hi','go','to','heart'] #filter_words(l,'h') #Output: ['hello', 'ham', 'hi', 'heart'] # - # + #Problem 4 #Use zip and list comprehension to return a list of #the same length where each value is the two strings #from L1 and L2 concatenated together with connector between #them. Look at the example output below: def concatenate(L1, L2, connector): pass #Input: concatenate(['A','B'],['a','b'],'-') #Output: ['A-a', 'B-b'] # - # + #Problem 5¶ #Use enumerate and other skills to return a dictionary #which has the values of the list as keys and the index #as the value. You may assume that a value will only appear #once in the given list. def d_list(L): pass #Input: d_list(['a','b','c']) #Output: {'a': 0, 'b': 1, 'c': 2} # - # + #Problem 6 #Use enumerate and other skills from above to return the #count of the number of items in the list whose value equals its index. def count_match_index(L): pass #Input: count_match_index([0,2,2,1,5,5,6,10]) #Output: 4 # -
Test- Advanced Python Objects (After Section 11.6) Nick.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/seldoncode/Python_CoderDojo/blob/main/Python_CoderDojo02.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="0A284inMFZqg" # # Variables # Una variable es como una caja donde ponemos guardar valores: # * numeros enteros (int) # * números con decimales (float) # * booleanos (True, False) # * cadenas alfanuméricas (string) # * ... # # Ejemplo: x = 5 # # El sistema operativo reserva una dirección de memoria RAM donde guardar esa variable. # # Luego se puede cambiar el valor almacenado en la variable: # # Ejemplo: x = 7 # # Se dice que hemos **asignado** un nuevo valor a la variable. # + colab={"base_uri": "https://localhost:8080/"} id="IqeAqipMG7rD" outputId="dff36516-5bb5-4fae-b8dc-dbb108ea2e74" x = 5 print("x =",x) x = 7 print("x =",x) # + colab={"base_uri": "https://localhost:8080/"} id="BoZ3cNctIHOW" outputId="46cb4fcd-f5ea-4820-b4e1-c8ece6acd916" edad = 14 # int, número entero nota_examen = 7.3 # float, número flotnte (con decimales) ciudadNacimiento = "Madrid" # string, cadena alfanumérica avion_aterrizado = True # boobleano, variable de tipo boolean, True, False edad_hermanos = [9,12,14] # list, variable de tipo lista. Las listas van entre corchetes [] print(edad) print(nota_examen) print(ciudadNacimiento) print(avion_aterrizado) print(edad_hermanos) # + [markdown] id="gXIy9WmPmMVL" # ## Tipos de variables # Con la función ``````type()`````` podemos ver de qué tipo es una variable. # + colab={"base_uri": "https://localhost:8080/"} id="5NVB5d_QmXAD" outputId="0ed6ce6b-1333-400b-8f06-a54c424521a9" print("edad = ", edad) print(type(edad)) print("nota_examen = ", nota_examen) print(type(nota_examen)) print("ciudadNacimiento = ", ciudadNacimiento) print(type(ciudadNacimiento)) print("avion_aterrizado = ", avion_aterrizado) print(type(avion_aterrizado)) print("edad_hermanos = ", edad_hermanos) print(type(edad_hermanos)) # + [markdown] id="8SbGCIoBki-I" # ## Nombres de variables ilegales # * No pueden empezar por número # * No pueden llevar símbolos como $€@%&? (la barra baja si se admite) # * No pueden tener espacios en blanco # * No valen palabras reservadas del lenguaje (and, or, True, import, ...) # + id="sO40EHRZJ4YY" #100Montaditos = 2.5 # error, no se puede empezar por número #money$ = 20 # error, no llevar simbolos #Elon Musk = "Sudáfrica" # error, no valen espacios, por convenio se usan minúsculas #import = 100 # error, palabra reservada del lenguaje _clave = "Voyager" # si se puede comenzar por barra baja # + [markdown] id="KrgyE0wV3IC_" # ## Caracteres no recomendados # * No se recomienda usar letras acentuadas áéíóú ÁÉÍÓÚ üá # * No se recomenda usar ñ Ñ ç Ç # * No se recomienda usar mayúsculas # # + [markdown] id="f1blFCkLLkj6" # ### Reto 2.1. Creación y asignación de variables # Crea unas cuantas variable de diferente tipo: # * int # * float # * string # * boolean # * list # Inicializa las variables con algún valor. # Mustra en pantalla el valor de cada variable. # Cambia el valor de las variables. # Imprime el nuevo valor de cada variable. # + [markdown] id="6pTGUkhaMzTP" # ### Reto 2.2. Precio con descuento # - Crea una variable con el precio de un juguete que inicialmente es de 20 euros. # - Imprime su precio # - El juguete se pone de oferta un 35%, asigna a la variable el nuevo precio # - Imprime el nuevo precio # + [markdown] id="gsWJLj4tOyw4" # ## Operar con variables # Podemos operar con variables usando: # * los operadores aritméticos (+,-,*,/,//,%) # * los operadores lógicos (not, and, or) # * los comparadores (==, !=,<,>,<=,>=). # + colab={"base_uri": "https://localhost:8080/"} id="RX4u7Exwg3ku" outputId="1f8b253e-3d38-4de5-bb67-a1db8d484790" x = 5 y = 2 print("x+y =", x+y) print(f"La resta de {x}-{y} es {x-y}") # la f significa format, permite formatear lo que imprimimos print(f"Si elevas {x} a {y} el resultado es {x**y}") # El cuadrado de 5 es 5*5=25 # + colab={"base_uri": "https://localhost:8080/"} id="RkMS8ssQPXRk" outputId="e04bb52d-d587-4e65-ac23-e9957bea757a" # precio de los productos en la cesta de la compra leche = 4.6 lechuga = 1.2 pan = 0.8 huevos = 4.2 total = leche + lechuga + pan + huevos print(f"El importe todal de la cesta de la compra es {total} euros.") # + [markdown] id="uiLHQjoMpYIc" # ### Reto 2.3 Calcular el precio con IVA # Sabemos que el precio de un dron es de 200 € antes de aplicar el impuesto del IVA y que el impuesto aplicable es del 21%. Calcular el precio de venta al público que incluye ya el impuesto. # Usar variables. # + [markdown] id="bNvB5pW8rpqf" # ### Reto 2.4 Comprar unos refrescos # Me han dado un billete de 20 € para ira a comprar unas latas de refrescos. El precio de cada lata antes de aplicar el IVA es de 1.2 €. El IVA aplicable a las bebidas es del 10%. # ¿Cuántas latas podré comprar? # ¿Cuánto me ha sobrado? # Usar variables. # + [markdown] id="3x2W2FLitawf" # ## Contadores # Un contador es una variable que se va incrementando, habitualmente de uno en uno. # Por ejemplo, cuando cumplimos un año más nuestra edad se incrementa en un año. # + colab={"base_uri": "https://localhost:8080/"} id="JNaNOjvetz4M" outputId="221ed85b-8a0b-4536-a9e2-2c2d634e93db" edad = 15 edad = edad + 1 # contador. La variable edad es igual al antiguo valor de la variable edad más uno. print(edad) # + colab={"base_uri": "https://localhost:8080/"} id="ZjdupJ0iuITs" outputId="4b45f1e4-197b-4d95-95db-dc8939ef362f" vidas = 3 # comenzamos un juego con 3 vidas print(vidas) vidas = vidas + 1 # si en el juego ganamos una vida podemos incrementar su valor en uno print(vidas) vidas += 1 # esta es una forma abreviada de escribir lo mismo que antes. Incrementa las vidas en una más. print(vidas) vidas -= 1 # perdemos una vida. Es una forma abreviada de escribir vidas = vidas - 1 print(vidas) # + colab={"base_uri": "https://localhost:8080/"} id="B33qx3lbuzfE" outputId="0f37e5af-3e03-414b-a956-3f5ca7127856" # vamos a ir anotando la página de un libro por la que vamos y las que leemos cada día, para ver como se incrementa pagina = 100 # página por la que comenamos a anotar el valor, para saber por donde vamos en la lectura del libro print(pagina) pagina += 12 # hoy he leido 12 páginas print(pagina) pagina += 18 # hoy he leido 18 páginas más print(pagina) pagina -= 5 # he retrocedido 5 páginas ya que quiero volver a leerlas, porque ayer no me enteré bien de esa parte print(pagina) # + [markdown] id="6G_03q_fw9Wu" # ### Reto 2.5. Cobros y pagos # * Inicialmente tengo 20 €. # * Me dan 50 € por mi cumpleaños. # * Me gasto en ir al cine 15 € # * Me encuentro una moneda de 2 €. # * Compro un juego de 17 €. # * Compro un comic por 8 €. # ¿Cuánto dinero tengo al final? # Utiliza variables, haciendo los pasos intermedios. # + [markdown] id="OhVFpZ6b0kJ6" # ### Reto 2.6. Variables permitidas # Comprueba si la siguientes variable son o no válidas. # * IVA = 21 # * input = 100 # * myCity = "Madrid" # * _100_Km = "Repostar" # * velocidad luz = 300000 # * velocidad_luz = 300_000 # * 101Dalmatas = "Disney" # * ciudad_de_la_luz = "París" # * nombre@email ="<EMAIL>" # * puntos-totales = 117 # * global = "Viaje al mundo en 80 días" # * número_daño = 4 # no se recomienda el uso de la ñ ni acentos # + [markdown] id="OIjoTBLZ6LQl" # ## Variables booleanas # Una variable puede tomar el valor True o False. # + colab={"base_uri": "https://localhost:8080/"} id="yf5aiwAv6WYB" outputId="fab0acce-ee94-4273-efb7-3d68e8837b28" huevos = 100 docenas = huevos//12 sobran = huevos%12 justos = sobran==0 # justos es una variable booleana print(f"En {huevos} huevos hay {docenas} docenas y sobran {sobran} huevos.") print(f"¿Están justos? {justos}") # + [markdown] id="Tjayh3HH76JO" # ### Reto 2.7. Plazas de un autobús # * Un autobús tiene 37 plazas. # * Inicialmente viajan 20 personas. # * Crea una variable booleana que diga si quedan plazas libres. # * Luego suben al autobús 12 personas. # * En la siguiente parada bajan 4 y quieren subir 8, ¿será posible? # * En la siguiente parada bajan 3 y quieren subir 5, ¿será posible? # * Usa variables siguiendo el proceso paso a paso. # + [markdown] id="iqSp8fUu8fsX" # ### Reto 2.8. Usando variables booleanas # * Crea un caso con dos variables booleanas, por ejemplo en relación a si hemos pasado un nivel de un juego y si nos quedan recursos. # * Usa ambas variables booleanas y prueba a combinarlas con and, or, not. # + [markdown] id="Y1WEmFNH_20U" # ## Intercambio de valores de dos variables # Tenemos dos variables: # * x = 3 # * y = 5 # deseamos intercambiar sus valores. # Podemos resolverlo usando cualquiera de los siguientes métodos. # # ### Método 1 # Usando una variable auxiliar que podemos llamar aux, que recogerá el valor de x mientras y toma su valor. # + colab={"base_uri": "https://localhost:8080/"} id="friJc-pJAuVl" outputId="c76b5196-3adc-48a2-ec7b-46f4f109dc2f" x = 3 y = 5 aux = x x = y y = aux print("nuevo valor de x:", x) print("nuevo valor de y;", y) # + [markdown] id="rbZEJF3ABOMN" # ### Método 2 # Python tiene un método estupendo que permite intercambiar los valores de x e y sin usar una variable auxiliar. Muchos otros lenguajes no tienen este método. # + colab={"base_uri": "https://localhost:8080/"} id="E4pGtDLhBgBm" outputId="d9519eb5-424c-42fd-b4a0-c8c859c1b911" x = 3 y = 5 x,y = y,x # forma maravillosa de permutar valores entre variables print("nuevo valor de x:", x) print("nuevo valor de y;", y) # + [markdown] id="iuQoOJRkCIzB" # ## Asignaciones múltiples # + colab={"base_uri": "https://localhost:8080/"} id="ya5YgilKCOuk" outputId="f470af7e-036b-46eb-bffa-638e850030f3" # asignamos las edades de Ana y de Eva. ana, eva = 14, 16 print(f"La edad de Ana es {ana} y la edad de Eva es {eva}.") # + colab={"base_uri": "https://localhost:8080/"} id="Uav_N3qjCgZP" outputId="a7086336-79f6-43aa-a5b1-8a99a5dde089" # En un juego los tres coches parten con el mismo número de puntos rojo = verde = azul = 100 print(f"El rojo tiene {rojo}, el verde tiene {verde} y el azul tiene {azul} puntos.") # + [markdown] id="ZCld-pdaD63S" # ### Reto 2.9. Juego de cartas # En un juego de cartas cuando sale la carta "reverse" los jugadores deben intercambiar sus cartas, y por tanto sus puntos. # Hay dos jugadores que tienen estos puntos: # * luis = 80 # * raul = 45 # Crear una tirada de una carta donde se pueda sacar o no la carta "reverse", controlar este posible resultado con una variable booleana. # Si se da la situación de "reverse" intercambie los puntos de los jugadores. # Pruebe a realizar la permuta usando varios métodos.
Python_CoderDojo02.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 71} colab_type="code" executionInfo={"elapsed": 5615, "status": "ok", "timestamp": 1525967882493, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s128", "userId": "107995332831641667384"}, "user_tz": -540} id="NbpH46NhWXlE" outputId="0c9883ea-3ba2-4fb6-acd2-f44d0050b3bc" !pip install dynet # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 35} colab_type="code" executionInfo={"elapsed": 3106, "status": "ok", "timestamp": 1525967885881, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s128", "userId": "107995332831641667384"}, "user_tz": -540} id="0Nh8HTurXHyZ" outputId="11cdc3b9-e10c-456a-aeec-ebc6b818b63d" !git clone https://github.com/neubig/nn4nlp-code.git # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="4UGDlQCGWxgs" from collections import defaultdict import math import time import random import dynet as dy import numpy as np # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="4RAMj1moWy9f" N = 2 # The length of the n-gram EMB_SIZE = 128 # The size of the embedding HID_SIZE = 128 # The size of the hidden layer # Functions to read in the corpus # NOTE: We are using data from the Penn Treebank, which is already converted # into an easy-to-use format with "<unk>" symbols. If we were using other # data we would have to do pre-processing and consider how to choose # unknown words, etc. w2i = defaultdict(lambda: len(w2i)) S = w2i["<s>"] UNK = w2i["<unk>"] def read_dataset(filename): with open(filename, "r") as f: for line in f: yield [w2i[x] for x in line.strip().split(" ")] # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="BS61BVrJW1Nx" # Read in the data train = list(read_dataset("nn4nlp-code/data/ptb/train.txt")) w2i = defaultdict(lambda: UNK, w2i) dev = list(read_dataset("nn4nlp-code/data/ptb/valid.txt")) i2w = {v: k for k, v in w2i.items()} nwords = len(w2i) # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="ZM4zb9bFW4Cy" # Start DyNet and define trainer model = dy.Model() # CHANGE 1: Use Adam instead of Simple SGD trainer = dy.AdamTrainer(model, alpha=0.001) # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="MJNy19A0W8iS" # Define the model W_emb = model.add_lookup_parameters((nwords, EMB_SIZE)) # Word weights at each position W_h_p = model.add_parameters((HID_SIZE, EMB_SIZE * N)) # Weights of the softmax b_h_p = model.add_parameters((HID_SIZE)) # Weights of the softmax W_sm_p = model.add_parameters((nwords, HID_SIZE)) # Weights of the softmax b_sm_p = model.add_parameters((nwords)) # Softmax bias # A function to calculate scores for one value def calc_score_of_histories(words, dropout=0.0): # This will change from a list of histories, to a list of words in each history position words = np.transpose(words) # Lookup the embeddings and concatenate them emb = dy.concatenate([dy.lookup_batch(W_emb, x) for x in words]) # Create the hidden layer W_h = dy.parameter(W_h_p) b_h = dy.parameter(b_h_p) h = dy.tanh(dy.affine_transform([b_h, W_h, emb])) # Perform dropout if dropout != 0.0: h = dy.dropout(h, dropout) # Calculate the score and return W_sm = dy.parameter(W_sm_p) b_sm = dy.parameter(b_sm_p) return dy.affine_transform([b_sm, W_sm, h]) # Calculate the loss value for the entire sentence def calc_sent_loss(sent, dropout=0.0): # Create a computation graph dy.renew_cg() # The initial history is equal to end of sentence symbols hist = [S] * N # Step through the sentence, including the end of sentence token all_histories = [] all_targets = [] for next_word in sent + [S]: all_histories.append(list(hist)) all_targets.append(next_word) hist = hist[1:] + [next_word] s = calc_score_of_histories(all_histories, dropout=dropout) return dy.sum_batches(dy.pickneglogsoftmax_batch(s, all_targets)) # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="qXOnrsHhW_j2" MAX_LEN = 100 # Generate a sentence def generate_sent(): dy.renew_cg() hist = [S] * N sent = [] while True: p = dy.softmax(calc_score_of_history(hist)).npvalue() next_word = np.random.choice(nwords, p=p/p.sum()) if next_word == S or len(sent) == MAX_LEN: break sent.append(next_word) hist = hist[1:] + [next_word] return sent # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="0aljWYXRWa4A" outputId="d7c865c7-74ac-42fc-cb35-de7c4290e2f7" last_dev = 1e20 best_dev = 1e20 for ITER in range(100): # Perform training random.shuffle(train) train_words, train_loss = 0, 0.0 start = time.time() for sent_id, sent in enumerate(train): my_loss = calc_sent_loss(sent, dropout=0.2) train_loss += my_loss.value() train_words += len(sent) my_loss.backward() trainer.update() if (sent_id+1) % 5000 == 0: print("--finished %r sentences" % (sent_id+1)) print("iter %r: train loss/word=%.4f, ppl=%.4f, time=%.2fs" % (ITER, train_loss/train_words, math.exp(train_loss/train_words), time.time()-start)) # Evaluate on dev set dev_words, dev_loss = 0, 0.0 start = time.time() for sent_id, sent in enumerate(dev): my_loss = calc_sent_loss(sent) dev_loss += my_loss.value() dev_words += len(sent) trainer.update() # CHANGE 3: Keep track of the development accuracy and reduce the learning rate if it got worse if last_dev < dev_loss: trainer.learning_rate /= 2 last_dev = dev_loss # CHANGE 4: Keep track of the best development accuracy, and save the model only if it's the best one if best_dev > dev_loss: model.save("model.txt") best_dev = dev_loss # Save the model print("iter %r: dev loss/word=%.4f, ppl=%.4f, time=%.2fs" % (ITER, dev_loss/dev_words, math.exp(dev_loss/dev_words), time.time()-start)) # Generate a few sentences for _ in range(5): sent = generate_sent() print(" ".join([i2w[x] for x in sent])) # + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="2DcvvmV5WiRX"
02-lm/nn_lm_batch_dynet.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## _*QISKit ACQUA Chemistry, H2O ground state computation*_ # # This notebook demonstrates how to use QISKit ACQUA Chemistry to compute the ground state energy of a water (H2O) molecule using VQE and UCCSD. # # This notebook has been written to use the PYSCF chemistry driver. See the PYSCF chemistry driver readme if you need to install the external PySCF library that this driver requires. # + from qiskit_acqua_chemistry import ACQUAChemistry # Input dictionary to configure QISKit ACQUA Chemistry for the chemistry problem. acqua_chemistry_dict = { 'problem': {'random_seed': 50}, 'driver': {'name': 'PYSCF'}, 'PYSCF': {'atom': 'O 0.0 0.0 0.0; H 0.757 0.586 0.0; H -0.757 0.586 0.0', 'basis': 'sto-3g'}, 'operator': {'name': 'hamiltonian', 'freeze_core': True}, 'algorithm': {'name': 'ExactEigensolver'} } # - # With the above input problem dictionary for water we now create an `ACQUAChemistry` object and call `run` on it passing in the dictionary to get a result. We use ExactEigensolver first as a reference. solver = ACQUAChemistry() result = solver.run(acqua_chemistry_dict) # The `run` method returns a result dictionary. Some notable fields include 'energy' which is the computed ground state energy. print('Ground state energy: {}'.format(result['energy'])) # There is also a 'printable' field containing a complete ready to print readable result for line in result['printable']: print(line) # We update the dictionary, for VQE with UCCSD, and run the computation again. # + acqua_chemistry_dict['algorithm']['name'] = 'VQE' acqua_chemistry_dict['optimizer'] = {'name': 'COBYLA', 'maxiter': 25000} acqua_chemistry_dict['variational_form'] = {'name': 'UCCSD'} acqua_chemistry_dict['initial_state'] = {'name': 'HartreeFock'} solver = ACQUAChemistry() result = solver.run(acqua_chemistry_dict) print('Ground state energy: {}'.format(result['energy'])) for line in result['printable']: print(line) # - print('Actual VQE evaluations taken: {}'.format(result['algorithm_retvals']['eval_count']))
chemistry/h2o.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # <h4>Name: <NAME></h4> # <h4>Student ID: 18127070</h4> # + import pandas as pd df = pd.read_csv('weatherPrediction2.csv') df # - df['Wind speed'].fillna(value=df['Wind speed'].mean(), inplace=True) df df['Temperature'].fillna(value=df['Temperature'].mode().values[0], inplace=True) df
HW1/18127070.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Notebook for Importing NYC TLC Taxi Data. # + import sys import os sys.path.append(os.path.abspath('../')) import pandas as pd from twitterinfrastructure.tools import connect_db import twitterinfrastructure.import_nyctlc as imp import importlib importlib.reload(imp) print(os.getcwd()) # - # # Import nyc tlc data for Sandy analysis (2012-10 and 2012-11) to nyctlc-triprecorddata database. # set up paths #url_path = 'data/raw/nyctlc-triprecorddata/raw_data_urls.txt' url_path = None #dl_dir = '/Volumes/My Passport/Work/projects/twitterinfrastructure/data/raw' \ # '/nyctlc-triprecorddata/data/' dl_dir = 'data/raw/nyctlc-triprecorddata/data-sandy/' db_path = 'data/processed/nyctlc-triprecorddata.db' # download taxi data dl_num = imp.dl_urls(url_path, dl_dir) # import yellow taxi data to trips table (~4.63 GB, 28,298,345 records) # takes ~4 mins to run (3.3 GHz Intel Core i7, 16 GB 2133 MHz LPDDR3) taxi_type = 'yellow' usecols = ['pickup_datetime', 'dropoff_datetime', 'passenger_count', 'trip_distance', 'pickup_longitude', 'pickup_latitude', 'pickup_location_id', 'dropoff_longitude', 'dropoff_latitude', 'dropoff_location_id'] dl_num, import_num = imp.import_trips(url_path, dl_dir, db_path, taxi_type, nrows=None, usecols=usecols, overwrite=True, verbose=1) # save interim small trips table conn = connect_db(db_path) sql = """ SELECT * FROM trips LIMIT 100 """ df = pd.read_sql_query(sql, conn) conn.close() df.to_csv('data/interim/nyctlc-triprecorddata/trips_short.csv') # # Import NYC TLC data for 2010-2012 to nyctlc20102012. # set up paths url_path = None dl_dir = '/Volumes/My Passport/Work/projects/twitterinfrastructure/data/raw' \ '/nyctlc-triprecorddata/data-20102012/' db_path = 'data/processed/nyctlc20102012.db' taxi_type = 'yellow' # import yellow taxi data to trips table (~86.27 GB, ??? records) # takes ~2 hours to run (3.3 GHz Intel Core i7, 16 GB 2133 MHz LPDDR3) usecols = ['pickup_datetime', 'dropoff_datetime', 'passenger_count', 'trip_distance', 'pickup_longitude', 'pickup_latitude', 'pickup_location_id', 'dropoff_longitude', 'dropoff_latitude', 'dropoff_location_id'] dl_num, import_num = imp.import_trips(url_path, dl_dir, db_path, taxi_type, nrows=None, usecols=usecols, overwrite=True, verbose=1) # # Import NYC TLC data for 2012 # set up paths url_path = None dl_dir = '/Volumes/My Passport/Work/projects/twitterinfrastructure/data/raw' \ '/nyctlc-triprecorddata/data-2012/' db_path = 'data/processed/nyctlc-2012.db' taxi_type = 'yellow' # import yellow taxi data to trips table (~29.28 GB, ??? records) # takes ~1 hour to run (3.3 GHz Intel Core i7, 16 GB 2133 MHz LPDDR3) usecols = ['pickup_datetime', 'dropoff_datetime', 'passenger_count', 'trip_distance', 'pickup_longitude', 'pickup_latitude', 'pickup_location_id', 'dropoff_longitude', 'dropoff_latitude', 'dropoff_location_id'] dl_num, import_num = imp.import_trips(url_path, dl_dir, db_path, taxi_type, nrows=None, usecols=usecols, overwrite=True, verbose=1)
notebooks/nyctlc-import.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Square Gradient Theory for Mixtures # # Square Gradient Theory allows studying the interfacial behavior of fluid. SGT proposes that the Helmholtz free energy density at the interface can be described by the sum of a homogeneous and a gradient contribution, in which the density profiles between the bulk phases are mean to minimize the energy of the system. This results in the following Euler-Lagrange system: # # $$ \sum_j c_{ij} \frac{d^2 \rho_j}{dz^2} = \mu_i - \mu_i^0 \qquad i = 1,...,c$$ # $$ \rho(z \rightarrow -\infty) = \rho^\alpha \qquad \rho(z \rightarrow \infty) = \rho^\beta $$ # # Here, $z$ represents the interfacial position, $\mu$, the chemical potential and $\rho$ the density vector. The superscript 0 refers to the bulk phase value and $\alpha$, $\beta$ to the bulk phases index. Finally, $c_{ij}$, represents the cross-influence parameter between molecule $i$ and $j$ and, in general, is computed as the geometric mean of the pure fluid's influence parameter and a correction factor: # # $$ c_{ij} = (1 - \beta_{ij}) \sqrt{c_{ii} c_{jj}} $$ # # The solution procedure of the SGT strongly depends on whether the influence parameter matrix is singular or not. If all $\beta_{ij} = 0$, the matrix is singular and the differential equation reduces to the following algebraic system of equations. # # $$ \sqrt{c_{rr}} \left[ \mu_i - \mu_i^0 \right] = \sqrt{c_{ii}} \left[ \mu_r - \mu_r^0 \right] \qquad i = 1,...,c, i \neq r $$ # # Here $r$ refers to the reference component index. # # Once the density profiles are solved the interfacial tension can be computed as: # # $$ \sigma = \int_{-\infty}^{\infty} \sum_i \sum_j c_{ij} \frac{d\rho_i}{dz} \frac{d\rho_j}{dz} dz $$ # # This notebook has the purpose of showing examples of computing interfacial tension of mixtures and $\beta_{ij} = 0$. # # First, it's needed to import the necessary modules import numpy as np from sgtpy import component, mixture, saftvrmie from sgtpy.equilibrium import bubblePy from sgtpy.sgt import sgt_mix_beta0 import matplotlib.pyplot as plt # Then a mixture and the EoS are created. # + water = component('water', ms = 1.7311, sigma = 2.4539 , eps = 110.85, lambda_r = 8.308, lambda_a = 6., eAB = 1991.07, rcAB = 0.5624, rdAB = 0.4, sites = [0,2,2], cii = 1.5371939421515455e-20) ethanol = component('ethanol2C', ms = 1.7728, sigma = 3.5592 , eps = 224.50, lambda_r = 11.319, lambda_a = 6., eAB = 3018.05, rcAB = 0.3547, rdAB = 0.4, sites = [1,0,1], cii= 5.3141080872882285e-20) mix = mixture(ethanol, water) # or mix = ethanol + water kij, lij = np.array([-0.0069751 , -0.01521566]) Kij = np.array([[0, kij], [kij, 0]]) Lij = np.array([[0., lij], [lij, 0]]) # setting interactions corrections mix.kij_saft(Kij) mix.lij_saft(Lij) # or by setting the kij interactions by pairs i=0 (ethanol), j=1 (water) mix.set_kijsaft(i=0, j=1, kij0=kij) mix.set_lijsaft(i=0, j=1, lij0=lij) # creating eos model eos = saftvrmie(mix) # - # With the ``eos`` object the equilibrium point is computed with the ``bubblePy`` function. For this example, the bubble point of a mixture of x1 = 0.2 at 298.15K is computed. # + T = 298.15 # K # liquid composition x = np.array([0.2, 0.8]) # initial guesses P0 = 1e4 # Pa y0 = np.array([0.8, 0.2]) sol = bubblePy(y0, P0, x, T, eos, full_output=True) y, P = sol.Y, sol.P vl, vv = sol.v1, sol.v2 #computing the density vector rhol = x/vl rhov = y/vv # - # If the ```full_output``` is set to false, the densities vector of the phases can be computed as follows: rhol = eos.density(x, T, P, 'L') * x rhov = eos.density(y, T, P, 'V') * y # With the computed equilibrium values (rhov, rhol, T, P) SGT can be used to study the interfacial behavior of the mixture. # # To solve the density profiles there are two available method: # - ``reference``: used the reference component method. The parameter ``s`` sets the index for the reference component. # - ``liang``: uses [Liang's path function](https://www.sciencedirect.com/science/article/pii/S0378381216300450): # # $$ h = \sum_i \sqrt{c_{ii}} \rho_i $$ # # This path function (``h``) is coupled to the algebraic system and allows solving the density profile even when there are multiple stationary points in the interface. Additionally, this method defines a parametric variable $\alpha$ which is zero at the equilibrium conditions. The values of $\alpha$ can be used to check if the geometric mixing rule for the influence parameter is suitable for the mixture. # + #if reference component is set to ethanol (index = 0) a lower value is obtained as the #full density profile was not calculated because of a stationary point in the interface solr1 = sgt_mix_beta0(rhov, rhol, T, P, eos, s=0, method='reference', full_output=True) #water doesnt show surface activity across the interface and the density profiles are fully calculated solr2 = sgt_mix_beta0(rhov, rhol, T, P, eos, s=1, method='reference', full_output=True) #Using Liang path function the density profiles are computed directly soll = sgt_mix_beta0(rhov, rhol, T, P, eos, n=300, method='liang', full_output=True) # - print('Reference component method (1) : ', solr1.tension, 'mN/m') print('Reference component method (2) : ', solr2.tension, 'mN/m') print('Liang path Function: ', soll.tension, 'mN/m') # The ```full_output``` options allow obtaining tension value, density profiles and grand thermodynamic potential. # Those values are accessed similarly as SciPy OptimizeResult. # # 1. sol.tension -> calculated IFT # 2. sol.rho -> density array # 3. sol.z -> interfacial lenght array # 4. sol.GPT -> grand thermodynamic potential # # # The results can be plotted and compared. As can be seen, when ethanol is used as a reference component this method is not able to compute the density profile from one bulk phase to another. # When studying the alpha variable from Liang's path function it can be seen that its value at the edge is zero as well as somewhere in the middle. This is expected when the geometric mean rule for the influence parameter with no correction can be used for the mixture. # + rhor1 = solr1.rho / 1000 # kmol/m3 rhor2 = solr2.rho / 1000 # kmol/m3 rholiang = soll.rho / 1000 # kmol/m3 alphas = soll.alphas path = soll.path fig = plt.figure(figsize = (10, 4)) fig.subplots_adjust( wspace=0.3) ax1 = fig.add_subplot(121) ax1.plot(rholiang[0], rholiang[1], color = 'red') ax1.plot(rhor2[0], rhor2[1], color = 'cyan') ax1.plot(rhor1[0], rhor1[1], color = 'black') ax1.plot(rhov[0]/1000, rhov[1]/1000, 'o', color = 'k') ax1.plot(rhol[0]/1000, rhol[1]/1000, 'o', color = 'k') ax1.set_xlabel(r'$\rho_1$ / kmol m$^{-3}$') ax1.set_ylabel(r'$\rho_2$ / kmol m$^{-3}$') ax2 = fig.add_subplot(122) ax2.plot(path/1000, alphas) ax2.axhline(y = 0, linestyle = '--',color = 'r') ax2.set_ylabel(r'$\alpha$') ax2.set_xlabel(r'path function / 1000') # - # A more challenging mixture to analyze is ethanol and hexane. This mixture has several stationary points across the interface making its calculations tricky. Similar to before, equilibrium has to be computed. # + ethanol = component('ethanol2C', ms = 1.7728, sigma = 3.5592 , eps = 224.50, lambda_r = 11.319, lambda_a = 6., eAB = 3018.05, rcAB = 0.3547, rdAB = 0.4, sites = [1,0,1], cii= 5.3141080872882285e-20) hexane = component('hexane', ms = 1.96720036, sigma = 4.54762477, eps = 377.60127994, lambda_r = 18.41193194, cii = 3.581510586936205e-19) mix = mixture(hexane, ethanol) # or mix = hexane + ethanol # setting up kij matrix kij = 0.011818492037463553 Kij = np.array([[0, kij], [kij, 0]]) mix.kij_saft(Kij) # or by setting the kij interactions by pairs i=0 (hexane), j=1 (ethanol) mix.set_kijsaft(i=0, j=1, kij0=kij) eos = saftvrmie(mix) # + # computing phase equilibria T = 298.15 # K x = np.array([0.3, 0.7]) y0 = 1.*x P0 = 8000. # Pa sol = bubblePy(y0, P0, x, T, eos, full_output=True) y, P = sol.Y, sol.P vl, vv = sol.v1, sol.v2 #computing the density vector rhox = x/vl rhoy = y/vv # - # Then SGT is used with the different methods available for beta = 0 solr1 = sgt_mix_beta0(rhoy, rhox, T, P, eos, s=0, method='reference', full_output=True) solr2 = sgt_mix_beta0(rhoy, rhox, T, P, eos, s=1, method='reference', full_output=True) soll = sgt_mix_beta0(rhoy, rhox, T, P, eos, n=300, method='liang', full_output=True) print('Reference component method (1) : ', solr1.tension, 'mN/m') print('Reference component method (2) : ', solr2.tension, 'mN/m') print('Liang path Function: ', soll.tension, 'mN/m') # Finally, the density profiles can be plotted. As can be seen, only Liang path function is able to compute the density profiles from one bulk phase to another correctly. # + rhor1 = solr1.rho / 1000 # kmol/m3 rhor2 = solr2.rho / 1000 # kmol/m3 rholiang = soll.rho / 1000 # kmol/m3 alphas = soll.alphas path = soll.path fig = plt.figure(figsize = (10, 4)) fig.subplots_adjust( wspace=0.3) ax1 = fig.add_subplot(121) ax1.plot(rholiang[0], rholiang[1], color = 'red') ax1.plot(rhor2[0], rhor2[1], color = 'cyan') ax1.plot(rhor1[0], rhor1[1], color = 'black') ax1.plot(rhoy[0]/1000, rhoy[1]/1000, 'o', color = 'k') ax1.plot(rhox[0]/1000, rhox[1]/1000, 'o', color = 'k') ax1.set_xlabel(r'$\rho_1$ / kmol m$^{-3}$') ax1.set_ylabel(r'$\rho_2$ / kmol m$^{-3}$') ax2 = fig.add_subplot(122) ax2.plot(path/1000, alphas) ax2.axhline(y = 0, linestyle = '--',color = 'r') ax2.set_ylabel(r'$\alpha$') ax2.set_xlabel(r'path function / 1000') # - # --- # For further information about the ``sgt_mix_beta0`` function check out the documentation running: ``sgt_mix_beta0?``
examples/10. Square Gradient Theory for mixtures (beta=0).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # **Complex prediction**: Decision trees (SOLUTIONS) # Source: [https://github.com/d-insight/code-bank.git](https://github.com/d-insight/code-bank.git) # License: [MIT License](https://opensource.org/licenses/MIT). See open source [license](LICENSE) in the Code Bank repository. # ------------- # ## Overview # We apply a classification tree and random forest model to the Boston dataset to predict crime per capita. We create a binary outcome feature, `CRIM_BIN`, that is equal to 1 if the crime rate contains a value above or equal to its median, and a 0 if the crime rate contains a value below its median. # # This dataset contains information collected by the U.S Census Service concerning housing in the area of Boston Mass. It based on Harrison and Rubinfeld (1978) data and a similar dataset is available [here](https://nowosad.github.io/spData/reference/boston.html). # -------- # ## Part 0: Setup # + # Import packages import pandas as pd from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.metrics import accuracy_score, roc_curve, auc from sklearn.ensemble import RandomForestClassifier from sklearn.tree import DecisionTreeClassifier, export_graphviz from graphviz import Source import matplotlib.pyplot as plt # + # Define constant(s) SEED = 17 # - # # **MAIN EXERCISE** # ## Part 1: Load data and create train/test sets # # In the first part, we load the `housing.csv` dataset. This dataset includes the following columns: # # - `CRIM`: per capita crime rate # - `ZN`: proportions of residential land zoned for lots over 25000 sq. ft per town (constant for all Boston tracts) # - `INDUS`: proportions of non-retail business acres per town (constant for all Boston tracts) # - `CHAS`: levels 1 if tract borders Charles River; 0 otherwise # - `NOX`: nitric oxides concentration (parts per 10 million) per town # - `RM`: average numbers of rooms per dwelling # - `AGE`: proportions of owner-occupied units built prior to 1940 # - `DIS`: weighted distances to five Boston employment centres # - `RAD`: index of accessibility to radial highways per town (constant for all Boston tracts) # - `TAX`: full-value property-tax rate per USD 10,000 per town (constant for all Boston tracts) # - `PTRATIO`: pupil-teacher ratios per town (constant for all Boston tracts) # - `B`: proportion of blacks # - `LSTAT`: percentage values of lower status population # - `MEDV`: median values of owner-occupied housing in USD 1000 # # **Q 1**: Load the data. What shape does it have? # + # Load the data set df = pd.read_csv('data/housing.csv', delim_whitespace=True) df.head() # - df.shape # **Q 2**: Create the binary feature `CRIM_BIN` that contains a 1 if `CRIM` contains a value above or equal to its median, and a 0 if `CRIM` contains a value below its median. What % are 1s? Is the target variable balanced? # + # Compute median of CRIM crim_med = df.median()['CRIM'] # OPTION 1 CRIM_BIN = (df['CRIM'] >= crim_med).astype(int) # OPTION 2 # # extract values of CRIM into list # CRIM = df['CRIM'].values.tolist() # # compute values of CRIM_BIN # CRIM_BIN = [] # for crim_value in CRIM: # CRIM_BIN_value = int(crim_value >= crim_med) # CRIM_BIN.append(CRIM_BIN_value) # Add CRIM_BIN column to dataframe df['CRIM_BIN'] = CRIM_BIN # Display head df.head() # - # ## Part 2: Split data into train/test sets and look at the descriptive statistics # # Before modeling the data, we perform the usual train/test split and look at how the descriptive statistics between the two sets compare. # **Q 1**: Divide data into a training set (80%) and testing set (20%) randomly with a seed (we defined the seed as a constant at the very top of the notebook). The seed ensures that the random process returns the same results when ran multiple times. Next, split the training and testing data into the explanatory variables and the outcome variable. How can you ensure that samples are randomly assigned to the training or testing set? # Randomly split data into train set (80%) and test set (20%) df_train, df_test = train_test_split(df, train_size = 0.8, test_size = 0.2, random_state = SEED) # + # For both train and test data, extract CRIM_BIN column and combine relevant explanatory variables y_col = 'CRIM_BIN' X_cols = ['ZN','INDUS','CHAS','NOX','RM','AGE','DIS','RAD','TAX','PTRATIO','B','LSTAT','MEDV'] # Prepare data for classifier model (converts data frame to a list of lists) y_train = df_train[y_col] X_train = df_train[X_cols] y_test = df_test[y_col] X_test = df_test[X_cols] # - # **Q 2**: Look at the descriptive statistics for train/test sets. Are the distributions similar? What can we do if the distributions of the outcome variable (`CRIM_BIN`) are different? # + # Compute descriptive statistics for the training set df_train.describe().T # - df_test.describe().T # If the distributions for the outcome variable (`CRIM_BIN`) are different, we can stratify according to this variable. More specifically, we can set the `stratify` parameter of the `train_test_split()` function. # ## Part 3: Fit a decision tree classifier # # A decision tree classifier is a simple, non-linear tree model. You find the sklearn documentation [here](https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html). This model represents our baseline. # **Q 1**: Fit a decision tree and tune the `max_depth` parameter. Hint: use `sklearn.model_selection.GridSearchCV()` for parameter tuning and tune the `max_depth` parameter in the range `[1,14]`. What's the optimal depth of trees? # + # Tune max_depth parameter in the range (1,14) tuned_parameters = [{'max_depth': range(1,14)}] clf = GridSearchCV(DecisionTreeClassifier(random_state = SEED), tuned_parameters, cv = 5, scoring='accuracy') clf.fit(X_train, y_train) # Look at the best parameters clf.best_params_ # + # Extract the optimal tree depth best_tree_depth = clf.best_params_['max_depth'] best_tree_depth # - # **Q 2**: Assess the classifier on the test set. What accuracy do you achieve? # + # Assessing best performing classifier tree on test set clf_tree = DecisionTreeClassifier(max_depth = best_tree_depth, random_state=SEED) clf_tree.fit(X_train, y_train) y_pred = clf_tree.predict(X_test) acc_tree = accuracy_score(y_test, y_pred) print('Accuracy: ' + str(acc_tree)) # - # # **ADVANCED EXERCISE** # # *Optional.* If time permits and you feel comfortable with Python, continue with the advanced parts of this exercise below. # ## Part 4: Plot the tree partitioning and feature importance # # This part tells us how the tree classifier partitions the feature space. In other words, we see which features are most informative (i.e. split at the root) and at what values. # **Q 1**: Plot the tree patitioning. What's the most informative feature? # # Hint 1: use the `export_graphviz()` function from the `graphviz` package to plot the tree. # # Hint 2: use the `Source` function from the `graphviz` package to create a graph that can be displayed in the notebook. # Plot best performing regression tree using graphviz # this may not work on all computers - requires graphviz to be installed # install graphviz on a Mac computer by running: brew install -v graphviz # install graphviz on Linux computer by running: sudo apt-get install graphviz Source(export_graphviz(clf_tree, out_file=None, feature_names=X_cols, max_depth=2)) # **Q 2**: Look at the feature importance on a histogram. Hint: use the `.feature_importances_` function in sklearn. # + # Extract and plot importance of explanatory features feat_names = ['ZN','INDUS','CHAS','NOX','RM','AGE','DIS','RAD','TAX','PTRATIO','B','LSTAT','MEDV'] feat_importance = clf_tree.feature_importances_.tolist() plt.bar(list(range(1, len(feat_names)+1)), feat_importance, tick_label = feat_names, align = 'center') plt.xticks(rotation='vertical') plt.title('Importance of Features for Classification Tree') plt.xlabel('explanatory features') plt.ylabel('importance') plt.show() # - # Answer: NOX (nitric oxides concentration) is the best predictor. Does this make sense? # ## Part 5: Compute the ROC curve # # In this part, we compute the false positive rate, true positive rate and thresholds defining ROC curve. # **Q 1**: What are the false positive and true rates? What's the area under the curve (AUC)? # # Hint: generate predictions using the `predict_proba()` function in `sklearn`. We need probabilistic instead of binary predictions to compute the ROC thresholds. # + y_pred_proba = clf_tree.predict_proba(X_test)[:,1] # Compute false positive rate, true positive rate and thresholds defining ROC curve # (note: these values define the points at which the ROC curve has a kink) fpr_tree, tpr_tree, thresholds_tree = roc_curve(y_test, y_pred_proba, pos_label = 1) print('False positive rates: {}\n'.format(fpr_tree)) print('True positive rates: {}\n'.format(tpr_tree)) print('Thresholds: {}\n'.format(thresholds_tree)) # Accuracy print('Accuracy:'.ljust(25) + str(acc_tree)) # Compute and show area under the ROC curve roc_auc_tree = auc(fpr_tree, tpr_tree) print ('Area under curve (AUC):'.ljust(25) + str(roc_auc_tree)) # - # **Q 2**: Plot the ROC curve. Remember: the ROC curve has the false positive rate on the x-axis and the true positive rate on the y axis. # + # Plot the ROC curve plt.plot(fpr_tree, tpr_tree, lw = 2) plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.title('ROC curve for Classification Tree') plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.show() # - # ## Part 6: Compare classification tree to random forest model # # Random Forests are a model frequently applied in data science applications in business. Hence, let's see how they perform for this example. # **Q 1**: Fit a random forest model. What's the optimal number of trees/estimators? # + # Tune n_estimators parameter in the range(50, 251, 25) tuned_parameters = [{'n_estimators': range(50, 251, 25)}] clf_forest = GridSearchCV(RandomForestClassifier(random_state = SEED), tuned_parameters, cv = 5, scoring = 'accuracy') clf_forest.fit(X_train, y_train) # Look at the best parameters clf_forest.best_params_ # + # Extract the optimal number of trees best_n_estimators = clf_forest.best_params_['n_estimators'] best_n_estimators # - # **Q 2**: Assess the model on the test set. What accuracy do you achieve? # + # Assessing best performing classifier tree on test set (baseline AUC is approx. 0.94) # Fit with the best number of estimators clf_randomForest = RandomForestClassifier(n_estimators = best_n_estimators, random_state=SEED) clf_randomForest.fit(X_train, y_train) # Compute accuracy on test set y_pred = clf_randomForest.predict(X_test) acc_rf = accuracy_score(y_test, y_pred) print('Accuracy:'.ljust(25) + str(acc_rf)) # AUC ROC y_predProba = clf_randomForest.predict_proba(X_test)[:, 1] fpr_tree, tpr_tree, thresholds_tree = roc_curve(y_test, y_predProba, pos_label = 1) roc_auc_rf = auc(fpr_tree, tpr_tree) print ('Area under curve (AUC):'.ljust(25) + str(roc_auc_rf)) # - # ## **SUMMARY OF ACCURACY AND AUC VALUES** width = 35 models = ['Decision Tree ACC', 'Random Forest ACC', 'Decision Tree AUC', 'Random Forest AUC'] results = [acc_tree, acc_rf, roc_auc_tree, roc_auc_rf] print('', '=' * width, '\n', 'Summary of ACC and AUC Scores'.center(width), '\n', '=' * width) for i in range(len(models)): if i == 2: print() print(models[i].center(width-8), '{0:.4f}'.format(results[i]))
exercises/complex-prediction/SOLUTION-complex-prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import keras import keras.preprocessing import keras.preprocessing.text import keras.preprocessing.sequence import mhcflurry import skbio # - mhc_sequences = {} for name, seq in skbio.parse_fasta("mhc_seqs.fasta"): if name[-1].isalpha(): # skip null/questionable alleles continue four_digit_name = ":".join(name.split(":")[:2]) if four_digit_name in mhc_sequences: old_seq = mhc_sequences[four_digit_name] if len(old_seq) >= len(seq): continue mhc_sequences[four_digit_name] = seq print("Loaded sequences of %d MHC alleles" % len(mhc_sequences)) df = pd.read_csv("combined_human_class1_dataset.csv") df # + # filter input data down to MHC alleles for which we have sequences input_mhc_seqs = [] input_peptides = [] target_values = [] skipped = set([]) for _, row in df.iterrows(): if not row.mhc.startswith("HLA-"): continue allele = row.mhc.replace("HLA-", "") if allele in mhc_sequences: input_mhc_seqs.append(mhc_sequences[allele]) input_peptides.append(row.peptide) target_values.append(row.meas) else: skipped.add(allele) for allele in skipped: print("Skipped %s" % allele) print("Kept %d/%d pMHC inputs" % ( len(input_mhc_seqs), len(df))) # - def peptides_to_indices(peptides): from mhcflurry.data_helpers import amino_acid_letter_indices index_sequences = [] for peptide in peptides: if " " not in peptide: index_sequences.append([amino_acid_letter_indices[aa] for aa in peptide]) return index_sequences def onehot(peptides): from mhcflurry.data_helpers import amino_acid_letter_indices n = len(peptides) maxlen = max(len(peptide) for peptide in peptides) result = np.zeros((n, maxlen, 20), dtype=bool) for i, peptide in enumerate(peptides): if " " in peptide: continue for j, aa in enumerate(peptide): result[i, j, amino_acid_letter_indices[aa]] = 1 return result padded_peptides = onehot(input_peptides) padded_mhc = onehot(input_mhc_seqs) print(padded_peptides.shape) print(padded_mhc.shape) # + # JZS1?? # + from keras.models import Graph from keras.layers.recurrent import JZS1 from keras.layers.core import Dense RNN_OUTPUT_DIM = 32 DENSE_OUTPUT_DIM = 32 N_DISTINCT_AMINO_ACIDS = 20 max_peptide_length = padded_peptides.shape[1] max_mhc_length = padded_peptides.shape[1] # graph model with two inputs and one output graph = Graph() graph.add_input(name='peptide', ndim=3) graph.add_input(name='mhc', ndim=3) # RNN for peptide sequences graph.add_node( JZS1( input_dim=N_DISTINCT_AMINO_ACIDS, output_dim=RNN_OUTPUT_DIM), name="peptide_rnn", input="peptide") # RNN for MHC sequences graph.add_node( JZS1( input_dim=N_DISTINCT_AMINO_ACIDS, output_dim=RNN_OUTPUT_DIM), name="mhc_rnn", input="mhc") # concatenate last output of both RNNs and transform them into a lower dimensional space graph.add_node( Dense(RNN_OUTPUT_DIM * 2, DENSE_OUTPUT_DIM, activation="relu"), name="hidden", merge_mode="concat", inputs=("peptide_rnn", "mhc_rnn")) graph.add_node( Dense(DENSE_OUTPUT_DIM, 1, activation="sigmoid"), name="affinity", input="hidden") graph.add_output(name='affinity_output', input='affinity') graph.compile('rmsprop', {'affinity_output':'mse'}) print(graph.get_config()) # - log_target_values = np.maximum(0, 1.0 - np.log(target_values) / np.log(5000)) history = graph.fit({'peptide':padded_peptides, 'mhc':padded_mhc, 'affinity_output':log_target_values}, nb_epoch=10) predictions = graph.predict({'peptide':padded_peptides, 'mhc':padded_mhc})
Exploring Keras.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Exploratory Data Analysis notebook # + # %matplotlib inline import pandas as pd df = pd.read_csv('../data/img.csv') df.head() # - df.info() df.describe() df.iloc[:,1:].describe().transpose().describe() len(df), len(df.dropna(how='any')) print(df.depth.diff().unique()) # + from PIL import Image img = Image.fromarray(df.iloc[:,1:].values) img.show()
challenge-2/notebooks/eda.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Theory # + # PCA is an unsupervised statistical technique used to examine the interrelations among a set of variables in order to identify the underlying structure of those variables # AKA General Factor Analysis # While regressions determine lines of best fit, factor analysis determines several orthogonal lines of best fit to the data set. # Orthogonal means "at right angles" # The lines are perpindicular to each other in n-dimensional space # N-Dimensional Space is the variable sample space -- there are as many dimensions as there are variables, so in a data set with 4 variables the sample space is 4-dimensional # + #Data plotted across two features. Orthogonal line is right angle to line of best fit. #Components are a linear transformation that chooses a variable system for the data set such that the greatest variance of the data set comes to lie on the first axis # + #The second greatest variance on the second axis, and so on... #This process allows us to reduce the number of variables used in an analysis # - #the components are uncorrelated, since in the sample space they are orthogonal to each other #Component 1, component 2, component 3. #Can compress the amount of explained variation to just a few components. hard to interpret the components #PCA with scikit learn # # Python and Theory import matplotlib.pyplot as plt import pandas as pd import numpy as np import seaborn as sns # %matplotlib inline from sklearn.datasets import load_breast_cancer cancer = load_breast_cancer() type(cancer) cancer.keys() print(cancer['DESCR']) df = pd.DataFrame(cancer['data'],columns=cancer['feature_names']) df.head() cancer['target'] from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(df) scaled_data = scaler.transform(df) #PCA from sklearn.decomposition import PCA pca = PCA(n_components=2) pca.fit(scaled_data) x_pca = pca.transform(scaled_data) scaled_data.shape x_pca.shape plt.figure(figsize=(8,6)) plt.scatter(x_pca[:,0],x_pca[:,1],c=cancer['target']) plt.xlabel('First Principal Component') plt.ylabel('Second Principal Component') plt.legend pca.components_ df_comp = pd.DataFrame(pca.components_,columns=cancer['feature_names']) # + plt.figure(figsize=(12,6)) sns.heatmap(df_comp,cmap='plasma') #this shows the correlation between the first and second components. This is a heatmap of the correlation between various features and principal component itself. The hotter the number, the more correlated. They are combinations of all these features. It shows which #this logistic regression is easily separable with a straight line. Principal Component Analysis is confusing, but really powerful. Easy to get lost in it by memorizing a few lines of code. Transform to a few components. # -
udemycoursework/Principal Component Analysis Notes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from sympy import * import numpy as np import matplotlib.pyplot as plt x = Symbol('x')# x sada postaje simbolicka prom y = (x-3)**2 #funkcija dy = y.diff(x) #prvi izvod fje y dy def plotF(): space = np.linspace(-5,5,100) data = np.array([N(y.subs(x,value)) for value in space]) plt.plot(space, data) plt.show() # t1 = t0 - a*dy(t0) t0 = 84 #pocetna tacka t1 = 0 a = 0.01 iteracija = 0 provera = 0 preciznost = 1/1000000 plot = True iteracijaMaks = 10000 #najveci broj iteracija posle kojih treba da se odustane divergencijaMaks = 50 #parametar za sprecavanje divergencije # + while True: t1 = t0 - a*N(dy.subs(x, t0)).evalf() #dy.subs direktno menja t0 da bismo izracunali dy(t0) iteracija+=1 #povecaj broj iteracija #ako ima previse iteracija to znaci da verovatno param nisu ok if iteracija>iteracijaMaks: print("Previse iteracija") break #sada ide provera da li t0 > t1 ako nije onda dozvoljavamo #najvise 50 divergiranja if t0<t1: print("t0 divergira") provera+=1 if provera>divergencijaMaks: print("Previse iteracija (%s), t0 divergira"%divergencijaMaks) print("Manje a ili proveriti da li fja konvekna") plot = False break #sada ide uslov kojim mi zakljucujemo da t0 konvergira #to je zapravo ovo t0-t1< preciznosti i tako izlazimo iz petlje if abs(t0-t1)<preciznost: break #obnavlajmo vrednost za sledecu iteracijuw t0=t1 if plot: print("Broj iteracija",iteracija,"t1=",t1) plt.plot(t0,N(y.subs(x,t0)).evalf(),marker='o',color='r') plotF() # -
Exercise 03 - Find Minimum.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python2 # --- # The [Containers](Containers.ipynb) Tutorial introduced the [HoloMap](Containers.ipynb#HoloMap), a core HoloViews data structure that allows easy exploration of parameter spaces. The essence of a HoloMap is that it contains a collection of [Elements](Elements.ipynb) (e.g. Images and Curves) that you can easily select and visualize. # # HoloMaps hold fully constructed Elements at specifically sampled points in a multidimensional space. Although HoloMaps are useful for exploring high-dimensional parameter spaces, they can very quickly consume huge amounts of memory to store all these Elements. For instance, a hundred samples along four orthogonal dimensions would need a HoloMap containing a hundred *million* Elements, each of which could be a substantial object that takes time to create and costs memory to store. Thus ``HoloMaps`` have some clear limitations: # # * HoloMaps may require the generation of millions of Elements before the first element can be viewed. # * HoloMaps can easily exhaust all the memory available to Python. # * HoloMaps can even more easily exhaust all the memory in the browser when displayed. # * Static export of a notebook containing HoloMaps can result in impractically large HTML files. # # The ``DynamicMap`` addresses these issues by computing and displaying elements dynamically, allowing exploration of much larger datasets: # # * DynamicMaps generate elements on the fly, allowing the process of exploration to begin immediately. # * DynamicMaps do not require fixed sampling, allowing exploration of parameters with arbitrary resolution. # * DynamicMaps are lazy in the sense they only compute only as much data as the user wishes to explore. # # Of course, these advantages come with some limitations: # # * DynamicMaps require a live notebook server and cannot be fully exported to static HTML. # * DynamicMaps store only a portion of the underlying data, in the form of an Element cache and their output is dependent on the particular version of the executed code. # * DynamicMaps (and particularly their element caches) are typically stateful (with values that depend on patterns of user interaction), which can make them more difficult to reason about. # # In addition to the different computational requirements of ``DynamicMaps``, they can be used to build sophisticated, interactive vizualisations that cannot be achieved using only ``HoloMaps``. This notebook demonstrates some basic examples and the [Streams](Streams.ipynb) notebook follows on by introducing the streams system. The [Linked Streams](Linked_Streams.ipynb) tutorial shows how you can directly interact with your plots when using the Bokeh backend. # # When DynamicMap was introduced in version 1.6, it support multiple different 'modes' which have now been deprecated. This notebook demonstrates the simpler, more flexible and more powerful DynamicMap introduced in version 1.7. Users who have been using the previous version of DynamicMap should be unaffected as backwards compatibility has been preserved for the most common cases. # # All this will make much more sense once we've tried out some ``DynamicMaps`` and showed how they work, so let's create one! # <center><div class="alert alert-info" role="alert">To visualize and use a <b>DynamicMap</b> you need to be running a live Jupyter server.<br>This tutorial assumes that it will be run in a live notebook environment.<br> # When viewed statically, DynamicMaps will only show the first available Element,<br> and will thus not have any slider widgets, making it difficult to follow the descriptions below.<br><br> # It's also best to run this notebook one cell at a time, not via "Run All",<br> so that subsequent cells can reflect your dynamic interaction with widgets in previous cells.</div></center> # ## ``DynamicMap`` <a id='DynamicMap'></a> # Let's start by importing HoloViews and loading the extension: import holoviews as hv import numpy as np hv.notebook_extension() # We will now create ``DynamicMap`` similar to the ``HoloMap`` introduced in the [Containers Tutorial](Containers.ipynb#HoloMap). The ``HoloMap`` in that tutorial consisted of ``Image`` elements defined by a function returning NumPy arrays called ``sine_array``. Here we will define a ``waves`` function that returns an array pattern parameterized by arbitrary ``alpha`` and ``beta`` parameters inside a HoloViews [Image](Elements.ipynb#Image) element: # + xvals = np.linspace(-4,0,202) yvals = np.linspace(4,0,202) xs,ys = np.meshgrid(xvals, yvals) def waves_image(alpha, beta): return hv.Image(np.sin(((ys/alpha)**alpha+beta)*xs)) waves_image(0,0) + waves_image(0,4) # - # Now we can demonstrate the possibilities for exploration enabled by the simplest declaration of a ``DynamicMap``. # ### Basic ``DynamicMap`` declaration<a id='BasicDeclaration'></a> # A simple ``DynamicMap`` declaration looks identical to that needed to declare a ``HoloMap``. Instead supplying some initial data, we will supply the ``waves_image`` function instead with key dimensions simply declaring the arguments of that function: dmap = hv.DynamicMap(waves_image, kdims=['alpha', 'beta']) dmap # This object is created instantly, but because it doesn't generate any `hv.Image` objects initially it only shows the printed representation of this object along with some information about how to display it. We will refer to a ``DynamicMap`` that doesn't have enough information to display itself as 'unbounded'. # # The textual representation of all ``DynamicMaps`` look similar, differing only in the listed dimensions until they have been evaluated at least once. # #### Explicit indexing # # Unlike a corresponding ``HoloMap`` declaration, this simple unbounded ``DynamicMap`` cannot yet visualize itself. To view it, we can follow the advice in the warning message. First we will explicitly index into our ``DynamicMap`` in the same way you would access a key on a ``HoloMap``: dmap[0,1] + dmap.select(alpha=1, beta=2) # Note that the declared kdims are specifying the arguments *by position* as they do not match the argument names of the ``sine_image`` function. If you *do* match the argument names *exactly*, you can map a kdim position to any argument position of the callable. For instance, the declaration ``kdims=['freq', 'phase']`` would index first by frequency, then phase without mixing up the arguments to ``sine_image`` when indexing. # #### Setting dimension ranges # # The second suggestion proposed by the warning was to supply dimension ranges using the ``redim.range`` method: dmap.redim.range(alpha=(0,5.0), beta=(1,5.0)) # Here each `hv.Image` object visualizing a particular sine ring pattern with the given parameters is created dynamically, whenever the slider is set to that value. Any value in the allowable range can be requested by dragging the sliders or by tweaking the values using the left and right arrow keys. # # Of course, we didn't have to use the ``redim.range`` method and we could have simply declared the ranges right away using explicit ``hv.Dimension`` objects. This would allow us to declare other dimension properties such as the step size used by the sliders: by default each slider can select around a thousand distinct values along its range but you can specify your own step value via the dimension ``step`` parameter. If you use integers in your range declarations, integer stepping will be assumed with a step size of one. # # It is important to note that whenever the ``redim`` method is used, a new ``DynamicMap`` is returned with the updated dimensions. In other words, the original ``dmap`` remains unbounded with default dimension objects. # # # #### Setting dimension values # # This ``DynamicMap`` above allows exploration of *any* phase and frequency within the declared range unlike an equivalent ``HoloMap`` which would have to be composed of a finite set of samples. We can achieve a similar discrete sampling using ``DynamicMap`` by setting the ``values`` parameter on the dimensions: dmap.redim.values(alpha=[0,1,2], beta=[0.1, 1.0, 2.5]) # The sliders now snap to the specified dimension values and if you are running this tutorial in a live notebook, the above cell should look like the ``HoloMap`` in the [Containers Tutorial](Containers.ipynb#HoloMap). ``DynamicMap`` is in fact a subclass of ``HoloMap`` with some crucial differences: # # * You can now pick as many values of **alpha** or **beta** as allowed by the slider. # * What you see in the cell above will not be exported in any HTML snapshot of the notebook # # # We will now explore how ``DynamicMaps`` relate to ``HoloMaps`` including conversion operations between the two types. As we will see, there are other ways to display a ``DynamicMap`` without using explict indexing or redim. # ## Interaction with ``HoloMap``s # To explore the relationship between ``DynamicMap`` and ``HoloMap``, let's declare another callable to draw some shapes we will use in a new ``DynamicMap``: def shapes(N, radius=0.5): # Positional keyword arguments are fine paths = [hv.Path([[(radius*np.sin(a), radius*np.cos(a)) for a in np.linspace(-np.pi, np.pi, n+2)]], extents=(-1,-1,1,1)) for n in range(N,N+3)] return hv.Overlay(paths) # #### Sampling ``DynamicMap`` from a ``HoloMap`` # When combining a ``HoloMap`` with a ``DynamicMap``, it would be very awkward to have to match the declared dimension ``values`` of the DynamicMap with the keys of the ``HoloMap``. Fortunately you don't have to: # %%opts Path (linewidth=1.5) holomap = hv.HoloMap({(N,r):shapes(N, r) for N in [3,4,5] for r in [0.5,0.75]}, kdims=['N', 'radius']) dmap = hv.DynamicMap(shapes, kdims=['N','radius']) holomap + dmap # Here we declared a ``DynamicMap`` without using ``redim``, but we can view its output because it is presented alongside a ``HoloMap`` which defines the available keys. This convenience is subject to three particular restrictions: # # # * You cannot display a layout consisting of unbounded ``DynamicMaps`` only, because at least one HoloMap is needed to define the samples. # * The HoloMaps provide the necessary information required to sample the DynamicMap. # # Note that there is one way ``DynamicMap`` is less restricted than ``HoloMap``: you can freely combine bounded ``DynamicMaps`` together in a ``Layout``, even if they don't share key dimensions. # Also notice that the ``%%opts`` cell magic allows you to style DynamicMaps can be styled in exactly the same way as HoloMaps. We will now use the ``%opts`` line magic to set the linewidths of all ``Path`` elements in the rest of the notebook: # %opts Path (linewidth=1.5) # #### Converting from ``DynamicMap`` to ``HoloMap`` # Above we mentioned that ``DynamicMap`` is an instance of ``HoloMap``. Does this mean it has a ``.data`` attribute? dtype = type(dmap.data).__name__ length = len(dmap.data) print("DynamicMap 'dmap' has an {dtype} .data attribute of length {length}".format(dtype=dtype, length=length)) # This is exactly the same sort of ``.data`` as the equivalent ``HoloMap``, except that its values will vary according to how much you explored the parameter space of ``dmap`` using the sliders above. In a ``HoloMap``, ``.data`` contains a defined sampling along the different dimensions, whereas in a ``DynamicMap``, the ``.data`` is simply the *cache*. # # The cache serves two purposes: # # * Avoids recomputation of an element should we revisit a particular point in the parameter space. This works well for categorical or integer dimensions, but doesn't help much when using continuous sliders for real-valued dimensions. # * Records the space that has been explored with the ``DynamicMap`` for any later conversion to a ``HoloMap`` up to the allowed cache size. # # We can always convert *any* ``DynamicMap`` directly to a ``HoloMap`` as follows: hv.HoloMap(dmap) # This is in fact equivalent to declaring a HoloMap with the same parameters (dimensions, etc.) using ``dmap.data`` as input, but is more convenient. Note that the slider positions reflect those we sampled from the ``HoloMap`` in the previous section. # # Although creating a HoloMap this way is easy, the result is poorly controlled, as the keys in the DynamicMap cache are usually defined by how you moved the sliders around. If you instead want to specify a specific set of samples, you can easily do so by using the same key-selection semantics as for a ``HoloMap`` to define exactly which elements are to be sampled and put into the cache: hv.HoloMap(dmap[{(2,0.3), (2,0.6), (3,0.3), (3,0.6)}]) # Here we index the ``dmap`` with specified keys to return a *new* DynamicMap with those keys in its cache, which we then cast to a ``HoloMap``. This allows us to export specific contents of ``DynamicMap`` to static HTML which will display the data at the sampled slider positions. # # The key selection above happens to define a Cartesian product, which is one of the most common way to sample across dimensions. Because the list of such dimension values can quickly get very large when enumerated as above, we provide a way to specify a Cartesian product directly, which also works with ``HoloMaps``. Here is an equivalent way of defining the same set of four points in that two-dimensional space: samples = hv.HoloMap(dmap[{2,3},{0.5,1.0}]) samples samples.data.keys() # The default cache size of 500 Elements is relatively high so that interactive exploration will work smoothly, but you can reduce it using the ``cache_size`` parameter if you find you are running into issues with memory consumption. A bounded ``DynamicMap`` with ``cache_size=1`` requires the least memory, but will recompute a new Element every time the sliders are moved, making it less responsive. # #### Converting from ``HoloMap`` to ``DynamicMap`` # We have now seen how to convert from a ``DynamicMap`` to a ``HoloMap`` for the purposes of static export, but why would you ever want to do the inverse? # # Although having a ``HoloMap`` to start with means it will not save you memory, converting to a ``DynamicMap`` does mean that the rendering process can be deferred until a new slider value requests an update. You can achieve this conversion using the ``Dynamic`` utility as demonstrated here by applying it to the previously defined ``HoloMap`` called ``samples``: from holoviews.util import Dynamic dynamic = Dynamic(samples) print('After apply Dynamic, the type is a {dtype}'.format(dtype=type(dynamic).__name__)) dynamic # In this particular example, there is no real need to use ``Dynamic`` as each frame renders quickly enough. For visualizations that are slow to render, using ``Dynamic`` can result in more responsive visualizations. # # The ``Dynamic`` utility is very versatile and is discussed in more detail in the [Dynamic Operations](Dynamic_Operations.ipynb) tutorial. # ### Slicing ``DynamicMaps`` # As we have seen we can either declare dimension ranges directly in the kdims or use the ``redim.range`` convenience method: dmap = hv.DynamicMap(shapes, kdims=['N','radius']).redim.range(N=(2,20), radius=(0.5,1.0)) # The declared dimension ranges define the absolute limits allowed for exploration in this continuous, bounded DynamicMap . That said, you can use the soft_range parameter to view subregions within that range. Setting the soft_range parameter on dimensions can be done conveniently using slicing: sliced = dmap[4:8, :] sliced # # Notice that N is now restricted to the range 4:8. Open slices are used to release any ``soft_range`` values, which resets the limits back to those defined by the full range: sliced[:, 0.8:1.0] # The ``[:]`` slice leaves the soft_range values alone and can be used as a convenient way to clone a ``DynamicMap``. Note that mixing slices with any other object type is not supported. In other words, once you use a single slice, you can only use slices in that indexing operation. # ## Using groupby to discretize a DynamicMap # # A DynamicMap also makes it easy to partially or completely discretize a function to evaluate in a complex plot. By grouping over specific dimensions that define a fixed sampling via the Dimension values parameter, the DynamicMap can be viewed as a ``GridSpace``, ``NdLayout``, or ``NdOverlay``. If a dimension specifies only a continuous range it can't be grouped over, but it may still be explored using the widgets. This means we can plot partial or completely discretized views of a parameter space easily. # # #### Partially discretize # # The implementation for all the groupby operations uses the ``.groupby`` method internally, but we also provide three higher-level convenience methods to group dimensions into an ``NdOverlay`` (``.overlay``), ``GridSpace`` (``.grid``), or ``NdLayout`` (``.layout``). # # Here we will evaluate a simple sine function with three dimensions, the phase, frequency, and amplitude. We assign the frequency and amplitude discrete samples, while defining a continuous range for the phase: # + xs = np.linspace(0, 2*np.pi) def sin(ph, f, amp): return hv.Curve((xs, np.sin(xs*f+ph)*amp)) kdims=[hv.Dimension('phase', range=(0, np.pi)), hv.Dimension('frequency', values=[0.1, 1, 2, 5, 10]), hv.Dimension('amplitude', values=[0.5, 5, 10])] sine_dmap = hv.DynamicMap(sin, kdims=kdims) # - # Next we define the amplitude dimension to be overlaid and the frequency dimension to be gridded: # %%opts GridSpace [show_legend=True fig_size=200] sine_dmap.overlay('amplitude').grid('frequency') # As you can see, instead of having three sliders (one per dimension), we've now laid out the frequency dimension as a discrete set of values in a grid, and the amplitude dimension as a discrete set of values in an overlay, leaving one slider for the remaining dimension (phase). This approach can help you visualize a large, multi-dimensional space efficiently, with full control over how each dimension is made visible. # # # #### Fully discretize # # Given a continuous function defined over a space, we could sample it manually, but here we'll look at an example of evaluating it using the groupby method. Let's look at a spiral function with a frequency and first- and second-order phase terms. Then we define the dimension values for all the parameters and declare the DynamicMap: # + # %opts Path (linewidth=1 color=Palette('Blues')) def spiral_equation(f, ph, ph2): r = np.arange(0, 1, 0.005) xs, ys = (r * fn(f*np.pi*np.sin(r+ph)+ph2) for fn in (np.cos, np.sin)) return hv.Path((xs, ys)) spiral_dmap = hv.DynamicMap(spiral_equation, kdims=['f','ph','ph2']).\ redim.values(f=np.linspace(1, 10, 10), ph=np.linspace(0, np.pi, 10), ph2=np.linspace(0, np.pi, 4)) # - # Now we can make use of the ``.groupby`` method to group over the frequency and phase dimensions, which we will display as part of a GridSpace by setting the ``container_type``. This leaves the second phase variable, which we assign to an NdOverlay by setting the ``group_type``: # %%opts GridSpace [xaxis=None yaxis=None] Path [bgcolor='w' xaxis=None yaxis=None] spiral_dmap.groupby(['f', 'ph'], group_type=hv.NdOverlay, container_type=hv.GridSpace) # This grid shows a range of frequencies `f` on the x axis, a range of the first phase variable `ph` on the `y` axis, and a range of different `ph2` phases as overlays within each location in the grid. As you can see, these techniques can help you visualize multidimensional parameter spaces compactly and conveniently. # # ## DynamicMaps and normalization # By default, a ``HoloMap`` normalizes the display of elements according the minimum and maximum values found across the ``HoloMap``. This automatic behavior is not possible in a ``DynamicMap``, where arbitrary new elements are being generated on the fly. Consider the following examples where the arrays contained within the returned ``Image`` objects are scaled with time: # + # %%opts Image {+axiswise} ls = np.linspace(0, 10, 200) xx, yy = np.meshgrid(ls, ls) def cells(time): return hv.Image(time*np.sin(xx+time)*np.cos(yy+time), vdims=['Intensity']) dmap = hv.DynamicMap(cells, kdims=['time']).redim.range(time=(1,20)) dmap + dmap.redim.range(Intensity=(0,10)) # - # Here we use ``+axiswise`` to see the behavior of the two cases independently. We see in **A** that when only the time dimension is given a range, no automatic normalization occurs (unlike a ``HoloMap``). In **B** we see that normalization is applied, but only when the value dimension ('Intensity') range has been specified. # # In other words, ``DynamicMaps`` cannot support automatic normalization across their elements, but do support the same explicit normalization behavior as ``HoloMaps``. Values that are generated outside this range are simply clipped according the usual semantics of explicit value dimension ranges. # # Note that we can always have the option of casting a ``DynamicMap`` to a ``HoloMap`` in order to automatically normalize across the cached values, without needing explicit value dimension ranges. # ## Using DynamicMaps in your code # # As you can see, ``DynamicMaps`` let you use HoloViews with a very wide range of dynamic data formats and sources, making it simple to visualize ongoing processes or very large data spaces. # # Given unlimited computational resources, the functionality covered in this tutorial would match that offered by ``HoloMap`` but with fewer normalization options. ``DynamicMap`` actually enables a vast range of new possibilities for dynamic, interactive visualizations as covered in the [Streams](Streams.ipynb) tutorial. Following on from that, the [Linked Streams](Linked_Streams.ipynb) tutorial shows how you can directly interact with your plots when using the Bokeh backend.
Recommender-Source-Code/JupyterNotebook/notebooks/Dynamic_Map.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt from matplotlib.animation import FuncAnimation import networkx as nx import seaborn as sns # + df = pd.read_pickle('./data/j1c-features.pkl', compression='gzip') for col in df.columns: if np.issubdtype(df[col].dtype, np.floating): df[col] = df[col].astype(np.float32) df = df.drop('diameter', 1) df = df.drop('degree_assortativity', 1) df = df.dropna() # - df = pd.read_pickle('./data/j1c-features-cleaned.pkl', compression='gzip') graphs = nx.read_graph6('./data/graph10.g6') def return_max_value(dataframe, col): uniques, counts = np.unique(dataframe[col], return_counts=True) return uniques[np.argmax(counts)] # + LABELS = dict( # num_edges = "# Edges", modularity="Modularity", density="Density", # total_triangles = '# Triangles', triangle_ratio="Triangle Ratio", # is_planar="Is Planar Graph?", avg_shortest_path_length="Avg Shortest Path", global_clustering_coefficient="Global Clustering", avg_clustering_coefficient="Avg Clustering", # square_clustering="Square Clustering", global_efficiency="Global Efficiency", local_efficiency="Local Efficiency", # degree_assortativity="Degree Assortativity", # diameter = 'Diameter', node_connectivity="Node Connectivity", ) POSITION = nx.circular_layout(range(0, 10)) SPACING = 0.125 FONTDICT = {"family": "monospace", "weight": "normal", "size": 30} # + def make_frame(graph, data, ax): """ graph = nx.Graph data = pd.Series """ # font = FontProperties() # font.set_family('monospace') # fig, ax = plt.subplots(figsize=(10, 10)) nx.draw(graph, pos=POSITION, ax=ax) # Dealing with variable values # values we plot are based on LABELS variable x_pos = 1.2 loc = (data.size * SPACING) / 2 y_pos = np.linspace(loc, -loc, len(LABELS)) max_char = max([len(name) for _, name in LABELS.items()]) for idx, (key, name) in enumerate(LABELS.items()): value = data[key] name = name.ljust(max_char) + ": " if not np.issubdtype(value.dtype, np.bool_): text = name + "{: .9f}".format(value) ax.text(x_pos, y_pos[idx], text, fontdict=FONTDICT, alpha=0.3) ax.text(x_pos, y_pos[idx], text[:-7], fontdict=FONTDICT, alpha=1) else: text = f"{name} {value}" ax.text(x_pos, y_pos[idx], text, fontdict=FONTDICT, alpha=1) def make_gif(graphs, df, name="visualization.gif"): indices = df.index graphs_subset = [graphs[i] for i in indices] fig, ax = plt.subplots(figsize=(21, 10)) def update(i): ax.clear() g = graphs_subset[i] data = t.loc[indices[i]] make_frame(g, data, ax) plt.tight_layout() ani = FuncAnimation( fig, update, interval=100, frames=range(df.shape[0]), repeat=True ) ani.save(name, writer="imagemagick", savefig_kwargs={"facecolor": "white"}, fps=16) plt.close() # + #t = df[df.global_clustering_coefficient.between(0.5, 0.51, inclusive=True)] t = df[df.modularity.between(0.4, 0.5)] t = t[t.global_clustering_coefficient.between(0.5, 0.5999, inclusive=True)] t = t[t.global_efficiency.between(0.5, 0.6, inclusive=False)] t = t[t.local_efficiency.between(0.5, 0.6, inclusive=False)] t = t[t.avg_clustering_coefficient.between(0.5, 0.6, inclusive=False)] t = t[t.avg_shortest_path_length.between(2.2, 2.3, inclusive=True)] t = t[t.density.between(.3, .4)] t.shape # + import warnings warnings.filterwarnings("ignore") # - make_gif(graphs, t, './figures/gifs/j1c-n-33.gif')
experiments/experiment_6/j1c-same-feature-gif.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # <img src="../Images/Level1Beginner.png" alt="Beginner" width="128" height="128" align="right"> # # # Funciones en Python # # En programación una **función** es un **bloque de sentencias** que **tiene nombre** y hace "algo". # # La potencia de una función es que se puede "invocar", "llamar" o "ejecutar" tantas veces como se quiera desde distintas partes de otro código, lo que facilita la **reutilización**, **evolución** y **mantenimiento** del código. # # Las funciones suelen tener "argumentos", que es la forma en la que desde otras partes del código se les "pasa" valores que la función puede utilizar como "variables locales" o **parámetros**. # # La mayoría de las funciones suelen **devolver algo**, en general se invoca una función justamente para obtener ese resultado. # # + # la función type(...) recibe un argumento y devuelve algo type("texto") # - type(78.5) # + # la función type(...) devuelve algo que se conoce como type type(type("texto")) # + # la función print(...) recibe argumentos pero no devuleve nada print("texto") print(78.5, True, [1, 2, 3]) print() print("otro texto después de print() sin argumentos") print("lo que devuelve print(...)", type(print())) # - # --- # # Python cuenta con una serie de funciones incluidas (Built-in functions), que siempre están disponibles. # # Algunas ya fueron utilizadas como ser: **type(...)**, **len(...)**, **print(...)**, **range(...)**. # # También están las que permiten convertir tipos de datos **int(...)**, **float(...)**, **complex(...)**, **str(...)**, **list(...)**, **tuple(...)**, **set(...)**, **dict(...)** se toman como funciones pero son constructores (método especial que que se tratará con el tema **objetos**). # # Otras como **abs(...)**, **max(...)**, **min(...)**, **sum(...)** están disponibles y se recomienda indagar qué hacen ... # # En [Funciones Built-in](https://docs.python.org/es/3/library/functions.html) se encuentra el listado oficial de funciones incluidas en el intérprete de Python. # # --- # # ## Declarando funciones # # La palabra reservada **def** se utiliza para indicar el inicio de la declaración de una función. # # Luego **se debe indicar** el nombre de la función, a continuación *entre parentesis* la lista de parámetros si es que los tiene, finalmente con los dos puntos "**:**" se comienza el bloque de sentencias. # # Es posible indicar el nombre de la función, la lista de parámetros en múltiples líneas (ayuda a la visibilidad) del código cuando una función tiene muchos parámetros. # # A continuación de la **firma de la función** debe estar el **texto de documentación** o **docstring** en el que se consigna información sobre lo que la función hace. # # Existe un convenio sobre cómo documentar el código; se trata del [PEP 257 - Docstring Conventions](https://www.python.org/dev/peps/pep-0257/). # # Al final de la función se puede poner la instrucción **return**, no es necesario a menos que se deba devolver un valor; sin embargo puede ser útil para indicar que ahí finaliza el código de la función. **Quién programa debe verificar que las siguientes sentencias estén en el nivel de indentación anterior**. # # En el código Python las funciones **deben definirse antes de invocarse o utilizarse**. # + # definción de una función def some_function(first_parameter, second_parameter) : u""" descripción de lo que hace esta función """ # ... print("\t...") print("\tfirst_parameter:", first_parameter) print("\tsecond_parameter:", second_parameter) print("\t...") # ... return some_function("hola", "que tal") some_function(15, True) some_function(('a', 'b'), [1, 2, 3]) # - # ### Argumentos y Parámetros # # El **argumento** puede ser un valor, una variable o una expresión que se **evalúa antes de invocar la función**, el resultado de esa evaluación es lo que se entrega a la función. # len("Hola, bienvenido") some_list = ["Hola", "bienvenido"] len(some_list) len("Hola, bienvenido") + len(["Hola", "bienvenido"]) # --- # # La función recibe cada **argumento** como un **parámetro**, para ello en la definición de la función es necesario indicar el nombre de cada parámetro. # # Dentro de la función, en el bloque de sentencias de la función se puede **utilizar los parámetros** como si fuesen variables cuya **visibilidad o ámbito** es el de esa función, ocultando la visibilidad o ámbito de otra variable que tenga el mismo nombre. # # En el cuerpo de la función el nombre de cada parámetro responde a la forma en que se identifican y utilizan las variables. # + # definción de una función def some_function(first_parameter, second_parameter) : u""" descripción de lo que hace esta función """ # ... print("\t...") print("\tfirst_parameter:", first_parameter) print("\tsecond_parameter:", second_parameter) print("\t... cambiando los valores ...") first_parameter = "Algun texto que se puede poner" second_parameter = second_parameter * 2 print("\tfirst_parameter:", first_parameter) print("\tsecond_parameter:", second_parameter) print("\t...") # ... return first_parameter = "hola" second_parameter = "que tal" print("... en el nivel anterior a la función") print("first_parameter:", first_parameter) print("second_parameter:", second_parameter) some_function(first_parameter, "Julio") some_function(15, True) some_function(('a', 'b'), [1, 2, 3]) print("... en el nivel anterior a la función") print("first_parameter:", first_parameter) print("second_parameter:", second_parameter) # - # # ### Argumentos nominales # # La asignación de argumentos a parámetros normalmente se hace por su posición. # # En una función que tiene varios parámetros se asume que el primer argumento coincide con el el primer parámetro, el segundo argumento con el segundo parámetro y así sucesivamente. # # Mediante el nombre del parámetro es posible indicar qué argumento se asigna a cada parámetro. # + # definción de una función def some_function(first_parameter, second_parameter) : u""" descripción de lo que hace esta función """ # ... print("\t...") print("\tfirst_parameter:", first_parameter) print("\tsecond_parameter:", second_parameter) # ... return some_function("esto va en el 1er argumento", "esto va como segundo argumento") some_function(second_parameter = "esto va como segundo argumento", first_parameter = "esto va en el 1er argumento") # - # # ### Argumentos opcionales # # Los valores de algunos parámetros en la función pueden ser opcionales, es decir no hace falta que al invocar la función se utilice un argumento para dicho parámetro. # # En la declaración de la función indicando el nombre del parámetro seguido de una asignación del valor por defecto hace que ese parámetro sea opcional. # + # definción de una función def some_function(first_parameter, second_parameter = "VALOR POR DEFECTO") : u""" descripción de lo que hace esta función """ # ... print("\t...") print("\tfirst_parameter:", first_parameter) print("\tsecond_parameter:", second_parameter) # ... return some_function("esto va en el 1er argumento", "esto va como segundo argumento") some_function("esto va en el 1er argumento") some_function(second_parameter = "esto va como segundo argumento", first_parameter = "esto va en el 1er argumento") # - # <img src="../Images/Level2Intermediate.png" alt="Intermediate" width="128" height="128" align="right"> # # ### Argumentos variables # # Existen situaciones en las que un función **puede recibier los valores de una cantidad variable de argumentos**. # # En la firma de la función es posible indicar una **tupla de parametros** # # + # definción de una función def some_function(*args) : u""" descripción de lo que hace esta función """ # ... print("\t... args") for arg in args : print("\targumento:", arg) # ... return some_function("1er argumento", "segundo argumento") some_function(78, ['a', 'b', 'c'], "otro argumento") # - # # ### Argumentos variables nominales # # En otras situaciones una función **puede recibier valores de una cantidad variable de argumentos indicando el nombre de estos argumentos**. # # En la firma de la función es posible indicar un **diccionario de parametros** # # + # definción de una función def some_function(**kwargs) : u""" descripción de lo que hace esta función """ # ... print("\t... kwargs") for arg_key, arg_value in kwargs.items() : print("\targumento:", arg_key, arg_value) # ... return some_function(first_parameter = "1er argumento", second_parameter = "segundo argumento") some_function(int_parameter = 78, list_parameter = ['a', 'b', 'c'], string_parameter = "otro argumento") # + # definción de una función def some_function(*args, **kwargs) : u""" descripción de lo que hace esta función """ # ... if args : print("\t... args") for arg in args : print("\targumento:", arg) if kwargs : print("\t... kwargs") for arg_key, arg_value in kwargs.items() : print("\targumento:", arg_key, arg_value) # ... return some_function("primer argumento", "segundo argumento", "tercer argumento") some_function(first_parameter = "1er argumento", second_parameter = "segundo argumento") some_function(int_parameter = 78, list_parameter = ['a', 'b', 'c'], string_parameter = "otro argumento") some_function("1er argumento", "segundo argumento", int_parameter = 78, list_parameter = ['a', 'b', 'c'], string_parameter = "otro argumento")
Modular/Functions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="s2YwmCWsKiyK" colab_type="text" # COPYRIGHT © 2018 <NAME> <<EMAIL>> # + [markdown] id="wBwx7thMVEJU" colab_type="text" # ### Setup # + id="2RaDNJFYkOzA" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 145} outputId="5475cfed-029b-4424-d527-f2672b849989" executionInfo={"status": "ok", "timestamp": 1529397576721, "user_tz": -60, "elapsed": 6855, "user": {"displayName": "<NAME>", "photoUrl": "//lh4.googleusercontent.com/-8IeZZ41ybbo/AAAAAAAAAAI/AAAAAAAACkI/_LkIlaW8I6g/s50-c-k-no/photo.jpg", "userId": "105201401933439284277"}} # install dependencies # !rm -r Neural_Networks-101-demo # !git clone -b explanations https://github.com/KiranArun/Neural_Networks-101-demo.git # !python3 /content/Neural_Networks-101-demo/scripts/setup.py helper_funcs # + [markdown] id="4384DJ_skxW3" colab_type="text" # # MNIST Handwritten Digits Classifier # + id="cA6lLJONwQQe" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} import numpy as np import matplotlib.pyplot as plt from math import ceil,floor import helper_funcs as helper # + id="TGvm8xOgLWUa" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} model_root_dir = '/content/' # + id="4eIOk0REwwoA" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} image_dims = (28,28) input_size = 28**2 num_classes = 10 # + id="WotPy8j4wUVa" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} class classifier(): def __init__(self, layers): self.mnist = helper.MNIST_data(model_root_dir+'MNIST_data/') layers += [num_classes] self.layers = layers self._init_weights() self._init_biases() def _init_weights(self): self.Weights = [] for layer in self.layers: if len(self.Weights) == 0: self.Weights += [np.random.randn(input_size,layer)] else: self.Weights += [np.random.randn(self.Weights[-1].shape[-1],layer)] def _init_biases(self): self.biases = [] for layer in self.layers: self.biases += [np.random.randn(layer)] def activation(self,x): x[x<0] = 0.0 return(x) def activation_prime(self,x): x[x<0] = 0.0 x[x>0] = 1.0 return(x) def forward_pass(self,x): inputs_to_layers = [] inputs_to_activations = [] for layer in range(len(self.layers)): inputs_to_layers += [x] x = np.matmul(x,self.Weights[layer]) + self.biases[layer] if layer+1 != len(self.layers): inputs_to_activations += [x] x = self.activation(x) return(x,inputs_to_layers,inputs_to_activations) def softmax(self,x): x = x - np.max(x,axis=1,keepdims=True) output = np.exp(x) / np.sum(np.exp(x), axis=1, keepdims=True) return(output) def cross_entropy(self,labels,logits): labels = labels.reshape(-1,num_classes) logits = logits.reshape(-1,num_classes) losses = -np.sum(labels*np.log(np.clip(logits,1e-10,1)), axis=1) loss = np.mean(losses) return(loss) def back_prop(self,softmax_outputs,labels,learning_rate,layer_inputs,activation_inputs): batch_size = labels.shape[0] grads = softmax_outputs - labels for layer in range(len(self.layers)-1,-1,-1): if layer != len(self.layers)-1: grads = grads*self.activation_prime(activation_inputs[layer]) W_deltas = np.matmul(layer_inputs[layer].transpose(),grads) b_deltas = np.sum(grads,axis=0) grads = np.matmul(grads,self.Weights[layer].transpose()) W_deltas /= batch_size b_deltas /= batch_size self.Weights[layer] -= learning_rate*W_deltas self.biases[layer] -= learning_rate*b_deltas def calculate_accuracy(self,images,labels): logits = self.forward_pass(images)[0] labels = labels marking = np.equal(np.argmax(logits,axis=1),np.argmax(labels,axis=1)) return(np.mean(marking.astype(np.int32))) def train(self,batch_size,learning_rate,epochs,reset): if reset == True: self._init_weights() self._init_biases() self.losses = np.array([]) iterations = ceil(self.mnist.number_train_samples/batch_size) for epoch in range(epochs): print('New epoch', str(epoch+1)+'/'+str(epochs)) for iteration in range(iterations): X,Y = self.mnist.get_batch(iteration,batch_size) nn_out,layer_inputs,activation_inputs = self.forward_pass(X) soft_out = self.softmax(nn_out) ce_out = self.cross_entropy(Y,soft_out) self.back_prop(soft_out,Y,learning_rate,layer_inputs,activation_inputs) if (iteration+1) % floor(iterations/5) == 0: accuracy = self.calculate_accuracy(self.mnist.validation_images,self.mnist.validation_labels) print('step', str(iteration+1)+'/'+str(iterations), 'loss', ce_out, 'accuracy', str(round(100*accuracy,2))+'%') self.losses = np.append(self.losses, ce_out) def plot_losses(self): fig, ax = plt.subplots(figsize=(10,6)) ax.plot(self.losses) ax.grid(True) # + id="aSDV1bB7wXu8" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 54} outputId="57329b7f-abd0-4a6c-a536-23b3c13c8bd5" executionInfo={"status": "ok", "timestamp": 1529397613985, "user_tz": -60, "elapsed": 5034, "user": {"displayName": "<NAME>", "photoUrl": "//lh4.googleusercontent.com/-8IeZZ41ybbo/AAAAAAAAAAI/AAAAAAAACkI/_LkIlaW8I6g/s50-c-k-no/photo.jpg", "userId": "105201401933439284277"}} Model = classifier(layers=[256]) # + id="DSSu9uarylF-" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 611} outputId="bfb1c246-938d-4e00-d906-e15537d6f02a" executionInfo={"status": "ok", "timestamp": 1529397624148, "user_tz": -60, "elapsed": 10072, "user": {"displayName": "<NAME>", "photoUrl": "//lh4.googleusercontent.com/-8IeZZ41ybbo/AAAAAAAAAAI/AAAAAAAACkI/_LkIlaW8I6g/s50-c-k-no/photo.jpg", "userId": "105201401933439284277"}} Model.train(batch_size=100, learning_rate=0.2, epochs=2, reset=True) Model.plot_losses() print(100*Model.calculate_accuracy(Model.mnist.test_images,Model.mnist.test_labels)) # + id="Vl95naWSp6hs" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
numpy_models/multi-layer_MNIST-np.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # First TensorFlow Neurons # #### Load dependencies import numpy as np np.random.seed(42) import tensorflow as tf tf.set_random_seed(42) import matplotlib.pyplot as plt # %matplotlib inline # #### Set number of neurons n_input = 784 n_dense = 128 # #### Define placeholder Tensor for simulated MNIST digits x = tf.placeholder(tf.float32, [None, n_input]) # #### Create Variable Tensors for neuron biases `b` and weight matrix `W` b = tf.Variable(tf.zeros([n_dense])) W = tf.Variable(tf.random_uniform([n_input, n_dense])) # 1. # W = tf.Variable(tf.random_normal([n_input, n_dense])) # 2. # W = tf.get_variable('W', [n_input, n_dense], # initializer=tf.contrib.layers.xavier_initializer()) # #### Design the computational graph z = tf.add(tf.matmul(x, W), b) a = tf.sigmoid(z) # first with tf.sigmoid(), then stick with tf.tanh() or tf.nn.relu() # #### Create op for variable initialization initializer_op = tf.global_variables_initializer() # #### Execute the graph in a session with tf.Session() as session: session.run(initializer_op) layer_output = session.run(a, {x: np.random.random([1, n_input])}) layer_output _ = plt.hist(np.transpose(layer_output))
notebooks/first_tensorflow_neurons.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.6.1 # language: julia # name: julia-1.6 # --- # # Adaptively Sampled MPC # # This is a simple demonstration of an adaptively sampled region quadtree used to approximate the solutions to a model-predictive control problem. using Pkg # pkg"activate ." using RegionTrees using StaticArrays: SVector using Plots # + # This module implements our MPC code, which consists of a 1-dimensional # double-integrator system, forward-euler time-stepping dynamics, and a # 10 step prediction window. module mpc using JuMP using Ipopt # using Gurobi using StaticArrays using Interpolations using RegionTrees import RegionTrees: AbstractRefinery, needs_refinement, refine_data # const env = Gurobi.Env() # Solve the MPC problem from a given initial position and velocity function run_mpc(q0, v0) model = Model(Ipopt.Optimizer) # GurobiSolver(env, OutputFlag=0) num_time_steps = 10 dt = 0.1 u_limit = 3 C_q = 100 c_vfinal = 100 C_u = 1 @variable model q[1:num_time_steps] @variable model v[1:num_time_steps] @variable model u[1:num_time_steps] @constraint model q[2:num_time_steps] .== q[1:num_time_steps-1] .+ v[1:num_time_steps-1] .* dt @constraint model v[2:num_time_steps] .== v[1:num_time_steps-1] .+ u[1:num_time_steps-1] .* dt @constraint model u .<= u_limit @constraint model u .>= -u_limit @constraint model q[1] == q0 @constraint model v[1] == v0 @objective model Min C_q * sum(q[i]^2 for i=1:num_time_steps) + c_vfinal * v[end]^2 + C_u * sum(u[i]^2 for i=1:num_time_steps) optimize!(model) value.(q), value.(v), value.(u) end # The MPCRefinery provides enough behavior to implement the # RegionTrees AdaptiveSampling interface, which lets us generate # a quadtree of initial states and their corresponding MPC solutions. struct MPCRefinery <: AbstractRefinery end function evaluate(cell, point) p = (point - cell.boundary.origin) ./ cell.boundary.widths cell.data(p[1] + 1, p[2] + 1) end # A cell in the quadtree needs refinement if its interpolated solution # derived from its vertices is not a good fit for the true MPC solution # at its center and the center of each of its faces function needs_refinement(::MPCRefinery, cell) for x in body_and_face_centers(cell.boundary) value_interp = evaluate(cell, x) value_true = run_mpc(x[1], x[2])[3] if !isapprox(value_interp[1], value_true[1], rtol=1e-1, atol=1e-1) return true end end false end # The data element associated with a cell is a bilinear interpolation # of the MPC function evaluated at the vertices of the cell. function refine_data(r::MPCRefinery, cell::Cell, indices) refine_data(r, child_boundary(cell, indices)) end function refine_data(::MPCRefinery, boundary::HyperRectangle) f = v -> run_mpc(v[1], v[2])[3] interpolate(f.(vertices(boundary)), BSpline(Linear())) end end # - # Simulate a given control function for the double integerator # model. function simulate(controller, q0, v0, dt, timespan) num_time_steps = timespan / dt qs = [q0] vs = [v0] ts = [0.0] q = q0 v = v0 for t in 0:dt:(timespan) u = controller(t, q, v) q += v * dt v += u * dt push!(qs, q) push!(vs, v) push!(ts, t) end ts, qs, vs end controller = (t, q, v) -> begin q, v, u = mpc.run_mpc(q, v) u[1] end t, q, v = simulate(controller, 10.0, 0.0, 0.1, 10) # State space portrait of the solution, starting from q=10, v=0 plot(q, v, xlim=(-10, 10), ylim=(-10, 10), xlabel="q", ylabel="v", legend=nothing) plot(t, q, xlabel="t", ylabel="q", legend=nothing) plot(t, v, xlabel="t", ylabel="v", legend=nothing) # # Approximating the solutions space # Now that we've written down and tested the true MPC solution, we can try to approximate it. The AdaptiveSampling() function will generate a quadtree by iteratively refining each cell in the space until the solutions to the MPC problem within that cell are well-approximated by the interpolation. boundary = RegionTrees.HyperRectangle(SVector(-10., -10), SVector(20., 20)) refinery = mpc.MPCRefinery() root = RegionTrees.Cell(boundary, mpc.refine_data(refinery, boundary)) adaptivesampling!(root, refinery) # Now we can plot each region in the quadtree. Note that we end up with # a lot of detail along the switching surface from maximum acceleration # to maximum deceleration plt = plot(xlim=(-10, 10), ylim=(-10, 10), legend=nothing, grid=false) for cell in RegionTrees.allleaves(root) v = hcat(collect(RegionTrees.vertices(cell.boundary))...) plot!(v[1,[1,2,4,3,1]], v[2,[1,2,4,3,1]]) end plt # # Using the Approximate Solution # We can use the quadtree to produce an approximate controller for our system. Given the current state, we can look up the cell in the quadtree which encompasses that state, then use that cell's interpolation to find our control tape. approx_controller = (t, q, v) -> begin x = [q, v] leaf = RegionTrees.findleaf(root, x) u = mpc.evaluate(leaf, x) u[1] end t, q, v = simulate(approx_controller, 10.0, 0.0, 0.1, 10) # The results are similar to, but not quite as good as, the # values obtained from the exact MPC solution. plot(q, v, xlim=(-10, 10), ylim=(-10, 10), xlabel="q", ylabel="v", legend=nothing)
examples/adaptive_mpc/adaptive_mpc.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import matplotlib.pyplot as plt # Fixing random state for reproducibility np.random.seed(19680801) TSN_gflops = [132.6, 132.6, 132.6] TSN_mAP = [49.61, 71.59, 71.93] TRN_gflops = [132.6, 132.6, 132.6] TRN_mAP = [54.86, 71.84, 69.80] TSM_gflops = [132.6, 132.6, 132.6] TSM_mAP = [79.89, 76.78, 82.24] I3D_gflops = [114.9 * 30] I3D_mAP = [44.2] I3DNL_gflops = [183.4 * 30] I3DNL_mAP = [45.9] N = 50 x = np.random.rand(N) y = np.random.rand(N) colors = np.random.rand(N) area = (30 * np.random.rand(N))**2 # 0 to 15 point radii plt.scatter(TSN_gflops, TSN_mAP, c=[1,2,3], alpha=0.5, marker="o") plt.plot(TSN_gflops, TSN_mAP, color='black', alpha=0.5) #plt.scatter(TRN_gflops, TRN_mAP, c=[1,2,3], alpha=0.5, marker="x") #plt.plot(TRN_gflops, TRN_mAP, color='black', alpha=0.5) #plt.scatter(TSM_gflops, TSM_mAP, c=[1,2,3], alpha=0.5, marker="v") #plt.plot(TSM_gflops, TSM_mAP, color='black', alpha=0.5) plt.scatter(I3D_gflops, I3D_mAP, c=[1], alpha=0.5, marker="*") plt.scatter(I3DNL_gflops, I3DNL_mAP, c=[1], alpha=0.5, marker="p") plt.show() # -
tools/visualisations/flop_plot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"} # # Debugging Performance Issues # # Most chapters of this book deal with _functional_ issues – that is, issues related to the _functionality_ (or its absence) of the code in question. However, debugging can also involve _nonfunctional_ issues, however – performance, usability, reliability, and more. In this chapter, we give a short introduction on how to debug such nonfunctional issues, notably _performance_ issues. # + slideshow={"slide_type": "skip"} from bookutils import YouTubeVideo YouTubeVideo("0tMeB9G0uUI") # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} # **Prerequisites** # # * This chapter leverages visualization capabilities from [the chapter on statistical debugging](StatisticalDebugger.ipynb) # * We also show how to debug nonfunctional issues using [delta debugging](DeltaDebugger.ipynb). # + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"} import bookutils # + slideshow={"slide_type": "skip"} import StatisticalDebugger import DeltaDebugger # + [markdown] slideshow={"slide_type": "skip"} # ## Synopsis # <!-- Automatically generated. Do not edit. --> # # To [use the code provided in this chapter](Importing.ipynb), write # # ```python # >>> from debuggingbook.PerformanceDebugger import <identifier> # ``` # # and then make use of the following features. # # # This chapter provides a class `PerformanceDebugger` that allows to measure and visualize the time taken per line in a function. # # ```python # >>> with PerformanceDebugger(TimeCollector) as debugger: # >>> for i in range(100): # >>> s = remove_html_markup('<b>foo</b>') # ``` # The distribution of executed time within each function can be obtained by printing out the debugger: # # ```python # >>> print(debugger) # 238 2% def remove_html_markup(s): # type: ignore # 239 1% tag = False # 240 1% quote = False # 241 1% out = "" # 242 0% # 243 16% for c in s: # 244 16% assert tag or not quote # 245 0% # 246 15% if c == '<' and not quote: # 247 3% tag = True # 248 11% elif c == '>' and not quote: # 249 2% tag = False # 250 9% elif (c == '"' or c == "'") and tag: # 251 0% quote = not quote # 252 9% elif not tag: # 253 4% out = out + c # 254 0% # 255 3% return out # # # ``` # The sum of all percentages in a function should always be 100%. # # These percentages can also be visualized, where darker shades represent higher percentage values: # # ```python # >>> debugger # ``` # <pre style="background-color:hsl(240, 100%, 96.88245453334457%)" # title="Line 238: 2% 0.00045646302169188857"> 238 def remove_html_markup(s): # type: ignore</pre> # <pre style="background-color:hsl(240, 100%, 97.2857112393764%)" # title="Line 239: 1% 0.0003974192077293992"> 239 tag = False</pre> # <pre style="background-color:hsl(240, 100%, 97.37162648661715%)" # title="Line 240: 1% 0.0003848397172987461"> 240 quote = False</pre> # <pre style="background-color:hsl(240, 100%, 97.48572933248805%)" # title="Line 241: 1% 0.0003681330708786845"> 241 out = &quot;&quot;</pre> # <pre style="background-color:hsl(240, 100%, 100.0%)" # title="Line 242: 0% 0.0"> 242 &nbsp;</pre> # <pre style="background-color:hsl(240, 100%, 75.0%)" # title="Line 243: 16% 0.0036604359629563987"> 243 for c in s:</pre> # <pre style="background-color:hsl(240, 100%, 76.189257939283%)" # title="Line 244: 16% 0.003486307861749083"> 244 assert tag or not quote</pre> # <pre style="background-color:hsl(240, 100%, 100.0%)" # title="Line 245: 0% 0.0"> 245 &nbsp;</pre> # <pre style="background-color:hsl(240, 100%, 77.85810538405873%)" # title="Line 246: 15% 0.0032419594936072826"> 246 if c == &#x27;&lt;&#x27; and not quote:</pre> # <pre style="background-color:hsl(240, 100%, 95.45092493122176%)" # title="Line 247: 3% 0.000666063919197768"> 247 tag = True</pre> # <pre style="background-color:hsl(240, 100%, 82.3393726289067%)" # title="Line 248: 11% 0.002585823822300881"> 248 elif c == &#x27;&gt;&#x27; and not quote:</pre> # <pre style="background-color:hsl(240, 100%, 95.63128318405562%)" # title="Line 249: 2% 0.000639656325802207"> 249 tag = False</pre> # <pre style="background-color:hsl(240, 100%, 86.11941611121148%)" # title="Line 250: 9% 0.0020323595381341875"> 250 elif (c == &#x27;&quot;&#x27; or c == &quot;&#x27;&quot;) and tag:</pre> # <pre style="background-color:hsl(240, 100%, 100.0%)" # title="Line 251: 0% 0.0"> 251 quote = not quote</pre> # <pre style="background-color:hsl(240, 100%, 85.90808505276334%)" # title="Line 252: 9% 0.0020633020903915167"> 252 elif not tag:</pre> # <pre style="background-color:hsl(240, 100%, 93.47209778371149%)" # title="Line 253: 4% 0.0009557987214066088"> 253 out = out + c</pre> # <pre style="background-color:hsl(240, 100%, 100.0%)" # title="Line 254: 0% 0.0"> 254 &nbsp;</pre> # <pre style="background-color:hsl(240, 100%, 95.52045850215718%)" # title="Line 255: 3% 0.0006558829918503761"> 255 return out</pre> # # # The abstract `MetricCollector` class allows subclassing to build more collectors, such as `HitCollector`. # # ![](PICS/PerformanceDebugger-synopsis-1.svg) # # # + [markdown] slideshow={"slide_type": "slide"} # ## Measuring Performance # # The solution to debugging performance issues fits in two simple rules: # # 1. _Measure_ performance # 2. _Break down_ how individual parts of your code contribute to performance. # # The first part, actually _measuring_ performance, is key here. Developers often take elaborated guesses on which aspects of their code impact performance, and think about all possible ways to optimize their code – and at the same time, making it harder to understand, harder to evolve, and harder to maintain. In most cases, such guesses are wrong. Instead, _measure_ performance of your program, _identify_ the very few parts that may need to get improved, and again _measure_ the impact of your changes. # + [markdown] slideshow={"slide_type": "subslide"} # Almost all programming languages offer a way to measure performance and breaking it down to individual parts of the code – a means also known as *profiling*. Profiling works by measuring the execution time for each function (or even more fine-grained location) in your program. This can be achieved by # # 1. _Instrumenting_ or _tracing_ code such that the current time at entry and exit of each function (or line), thus determining the time spent. In Python, this is achieved by profilers like [profile or cProfile](https://docs.python.org/3/library/profile.html) # # 2. _Sampling_ the current function call stack at regular intervals, and thus assessing which functions are most active (= take the most time) during execution. For Python, the [scalene](https://github.com/plasma-umass/scalene) profiler works this way. # # Pretty much all programming languages support profiling, either through measuring, sampling, or both. As a rule of thumb, _interpreted_ languages more frequently support measuring (as it is easy to implement in an interpreter), while _compiled_ languages more frequently support sampling (because instrumentation requires recompilation). Python is lucky to support both methods. # + [markdown] slideshow={"slide_type": "subslide"} # ### Tracing Execution Profiles # # Let us illustrate profiling in a simple example. The `ChangeCounter` class (which we will encounter in the [chapter on mining version histories](ChangeCounter.ipynb) reads in a version history from a git repository. Yet, it takes more than a minute to read in the debugging book change history: # + slideshow={"slide_type": "skip"} from ChangeCounter import ChangeCounter, debuggingbook_change_counter # minor dependency # + slideshow={"slide_type": "skip"} import Timer # + slideshow={"slide_type": "fragment"} with Timer.Timer() as t: change_counter = debuggingbook_change_counter(ChangeCounter) # + slideshow={"slide_type": "fragment"} t.elapsed_time() # + [markdown] slideshow={"slide_type": "subslide"} # The Python `profile` and `cProfile` modules offer a simple way to identify the most time-consuming functions. They are invoked using the `run()` function, whose argument is the command to be profiled. The output reports, for each function encountered: # # * How often it was called (`ncalls` column) # * How much time was spent in the given function, _excluding_ time spent in calls to sub-functions (`tottime` column) # * The fraction of `tottime` / `ncalls` (first `percall` column) # * How much time was spent in the given function, _including_ time spent in calls to sub-functions (`cumtime` column) # * The fraction of `cumtime` / `percall` (second `percall` column) # # Let us have a look at the profile we obtain: # + slideshow={"slide_type": "skip"} import cProfile # + slideshow={"slide_type": "subslide"} cProfile.run('debuggingbook_change_counter(ChangeCounter)', sort='cumulative') # + [markdown] slideshow={"slide_type": "subslide"} # Yes, that's an awful lot of functions, but we can quickly narrow things down. The `cumtime` column is sorted by largest values first. We see that the `debuggingbook_change_counter()` method at the top takes up all the time – but this is not surprising, since it it the method we called in the first place. This calls a method `mine()` in the `ChangeCounter` class, which does all the work. # + [markdown] slideshow={"slide_type": "fragment"} # The next places are more interesting: almost all time is spent in a single method, named `modifications()`. This method determines the difference between two versions, which is an expensive operation; this is also supported by the observation that half of the time is spent in a `diff()` method. # + [markdown] slideshow={"slide_type": "subslide"} # This profile thus already gets us a hint on how to improve performance: Rather than computing the diff between versions for _every_ version, we could do so _on demand_ (and possibly cache results so we don't have to compute them twice). Alas, this (slow) functionality is part of the # underlying [PyDriller](https://pydriller.readthedocs.io/) Python package, so we cannot fix this within the `ChangeCounter` class. But we could file a bug with the developers, suggesting a patch to improve performance. # + [markdown] slideshow={"slide_type": "subslide"} # ### Sampling Execution Profiles # # Instrumenting code is _precise_, but it is also _slow_. An alternate way to measure performance is to _sample_ in regular intervals which functions are currently active – for instance, by examining the current function call stack. The more frequently a function is sampled as active, the more time is spent in that function. # + [markdown] slideshow={"slide_type": "subslide"} # One profiler for Python that implements such sampling is [Scalene](https://github.com/plasma-umass/scalene) – a high-performance, high-precision CPU, GPU, and memory profiler for Python. We can invoke it on our example as follows: # # ```sh # $ scalene --html test.py > scalene-out.html # ``` # + [markdown] slideshow={"slide_type": "fragment"} # where `test.py` is a script that again invokes # # ```python # debuggingbook_change_counter(ChangeCounter) # ``` # + [markdown] slideshow={"slide_type": "subslide"} # The output of `scalene` is sent to a HTML file (here, `scalene-out.html`) which is organized by _lines_ – that is, for each line, we see how much it contributed to overall execution time. Opening the output `scalene-out.html` in a HTML browswer, we see these lines: # + [markdown] slideshow={"slide_type": "fragment"} # ![](PICS/scalene-out.png) # + [markdown] slideshow={"slide_type": "fragment"} # As with `cProfile`, above, we identify the `mine()` method in the `ChangeCounter` class as the main performance hog – and in the `mine()` method, it is the iteration over all modifications that takes all the time. Adding the option `--profile-all` to `scalene` would extend the profile to all executed code, including the `pydriller` third-party library. # + [markdown] slideshow={"slide_type": "fragment"} # Besides relying on sampling rather that tracing (which is more efficient) and breaking down execution time by line, `scalene` also provides additional information on memory usage and more. If `cProfile` is not sufficient, then `scalene` will bring profiling to the next level. # + [markdown] slideshow={"slide_type": "slide"} # ## Improving Performance # + [markdown] slideshow={"slide_type": "fragment"} # Identifying a culprit is not always that easy. Notably, when the first set of obvious performance hogs is fixed, it becomes more and more difficult to squeeze out additional performance – and, as stated above, such optimization may be in conflict with readability and maintainability of your code. Here are some simple ways to improve performance: # + [markdown] slideshow={"slide_type": "subslide"} # * **Efficient algorithms**. For many tasks, the simplest algorithm is not always the best performing one. Consider alternatives that may be more efficient, and _measure_ whether they pay off. # # * **Efficient data types**. Remember that certain operations, such as looking up whether an element is contained, may take different amounts of time depending on the data structure. In Python, a query like `x in xs` takes (mostly) constant time if `xs` is a set, but linear time if `xs` is a list; these differences become significant as the size of `xs` grows. # # * **Efficient modules**. In Python, most frequently used modules (or at least parts of) are implemented in C, which is way more efficient than plain Python. Rely on existing modules whenever possible. Or implement your own, _after_ having measured that this may pay off. # + [markdown] slideshow={"slide_type": "subslide"} # These are all things you can already use during programming – and also set up your code such that exchanging, say, one data type by another will still be possible later. This is best achieved by hiding implementation details (such as the used data types) behind an abstract interface used by your clients. # + [markdown] slideshow={"slide_type": "fragment"} # But beyond these points, remember the famous words by [<NAME>](https://en.wikipedia.org/wiki/Donald_Knuth): # + slideshow={"slide_type": "skip"} from bookutils import quiz # + slideshow={"slide_type": "fragment"} quiz('<NAME> said: "Premature optimization..."', [ "... is the root of all evil", "... requires lots of experience", "... should be left to assembly programmers", "... is the reason why TeX is so fast", ], 'len("METAFONT") - len("TeX") - len("CWEB")') # + [markdown] slideshow={"slide_type": "subslide"} # This quote should always remind us that after a good design, you should always _first_ measure and _then_ optimize. # + [markdown] slideshow={"slide_type": "slide"} # ## Building a Profiler # # Having discussed profilers from a _user_ perspective, let us now dive into how they are actually implemented. It turns out we can use most of our existing infrastructure to implement a simple tracing profiler with only a few lines of code. # + [markdown] slideshow={"slide_type": "fragment"} # The program we will apply our profiler on is – surprise! – our ongoing example, `remove_html_markup()`. Our aim is to understand how much time is spent _in each line of the code_ (such that we have a new feature on top of Python `cProfile`). # + slideshow={"slide_type": "skip"} from Intro_Debugging import remove_html_markup # + slideshow={"slide_type": "fragment"} # ignore from typing import Any, Optional, Type, Dict, Tuple, List # + slideshow={"slide_type": "fragment"} # ignore from bookutils import print_content # + slideshow={"slide_type": "subslide"} # ignore import inspect # + slideshow={"slide_type": "subslide"} print_content(inspect.getsource(remove_html_markup), '.py', start_line_number=238) # + [markdown] slideshow={"slide_type": "subslide"} # We introduce a class `PerformanceTracer` that tracks, for each line in the code: # # * how _often_ it was executed (`hits`), and # * _how much time_ was spent during its execution (`time`). # # To this end, we make use of our `Timer` class, which measures time, and the `Tracer` class from [the chapter on tracing](Tracer.ipynb), which allows us to track every line of the program as it is being executed. # + slideshow={"slide_type": "skip"} from Tracer import Tracer # + [markdown] slideshow={"slide_type": "fragment"} # In `PerfomanceTracker`, the attributes `hits` and `time` are mappings indexed by unique locations – that is, pairs of function name and line number. # + slideshow={"slide_type": "fragment"} Location = Tuple[str, int] # + slideshow={"slide_type": "subslide"} class PerformanceTracer(Tracer): """Trace time and #hits for individual program lines""" def __init__(self) -> None: """Constructor.""" super().__init__() self.reset_timer() self.hits: Dict[Location, int] = {} self.time: Dict[Location, float] = {} def reset_timer(self) -> None: self.timer = Timer.Timer() # + [markdown] slideshow={"slide_type": "subslide"} # As common in this book, we want to use `PerformanceTracer` in a `with`-block around the function call(s) to be tracked: # # ```python # with PerformanceTracer() as perf_tracer: # function(...) # ``` # + [markdown] slideshow={"slide_type": "fragment"} # When entering the `with` block (`__enter__()`), we reset all timers. Also, coming from the `__enter__()` method of the superclass `Tracer`, we enable tracing through the `traceit()` method. # + slideshow={"slide_type": "skip"} from types import FrameType # + slideshow={"slide_type": "subslide"} class PerformanceTracer(PerformanceTracer): def __enter__(self) -> Any: """Enter a `with` block.""" super().__enter__() self.reset_timer() return self # + [markdown] slideshow={"slide_type": "fragment"} # The `traceit()` method extracts the current location. It increases the corresponding `hits` value by 1, and adds the elapsed time to the corresponding `time`. # + slideshow={"slide_type": "subslide"} class PerformanceTracer(PerformanceTracer): def traceit(self, frame: FrameType, event: str, arg: Any) -> None: """Tracing function; called for every line.""" t = self.timer.elapsed_time() location = (frame.f_code.co_name, frame.f_lineno) self.hits.setdefault(location, 0) self.time.setdefault(location, 0.0) self.hits[location] += 1 self.time[location] += t self.reset_timer() # + [markdown] slideshow={"slide_type": "fragment"} # This is it already. We can now determine where most time is spent in `remove_html_markup()`. We invoke it 10,000 times such that we can average over runs: # + slideshow={"slide_type": "subslide"} with PerformanceTracer() as perf_tracer: for i in range(10000): s = remove_html_markup('<b>foo</b>') # + [markdown] slideshow={"slide_type": "fragment"} # Here are the hits. For every line executed, we see how often it was executed. The most executed line is the `for` loop with 110,000 hits – once for each of the 10 characters in `<b>foo</b>`, once for the final check, and all of this 10,000 times. # + slideshow={"slide_type": "subslide"} perf_tracer.hits # + [markdown] slideshow={"slide_type": "subslide"} # The `time` attribute collects how much time was spent in each line. Within the loop, again, the `for` statement takes the most time. The other lines show some variability, though. # + slideshow={"slide_type": "subslide"} perf_tracer.time # + [markdown] slideshow={"slide_type": "subslide"} # For a full profiler, these numbers would now be sorted and printed in a table, much like `cProfile` does. However, we will borrow some material from previous chapters and annotate our code accordingly. # + [markdown] slideshow={"slide_type": "slide"} # ## Visualizing Performance Metrics # # In the [chapter on statistical debugging](StatisticalDebugger.ipynb), we have encountered the `CoverageCollector` class, which collects line and function coverage during execution, using a `collect()` method that is invoked for every line. We will repurpose this class to collect arbitrary _metrics_ on the lines executed, notably time taken. # + [markdown] slideshow={"slide_type": "subslide"} # ### Collecting Time Spent # + slideshow={"slide_type": "skip"} from StatisticalDebugger import CoverageCollector, SpectrumDebugger # + [markdown] slideshow={"slide_type": "fragment"} # The `MetricCollector` class is an abstract superclass that provides an interface to access a particular metric. # + slideshow={"slide_type": "fragment"} class MetricCollector(CoverageCollector): """Abstract superclass for collecting line-specific metrics""" def metric(self, event: Any) -> Optional[float]: """Return a metric for an event, or none.""" return None def all_metrics(self, func: str) -> List[float]: """Return all metric for a function `func`.""" return [] # + [markdown] slideshow={"slide_type": "fragment"} # Given these metrics, we can also compute sums and maxima for a single function. # + slideshow={"slide_type": "subslide"} class MetricCollector(MetricCollector): def total(self, func: str) -> float: return sum(self.all_metrics(func)) def maximum(self, func: str) -> float: return max(self.all_metrics(func)) # + [markdown] slideshow={"slide_type": "fragment"} # Let us instantiate this superclass into `TimeCollector` – a subclass that measures time. This is modeled after our `PerformanceTracer` class, above; notably, the `time` attribute serves the same role. # + slideshow={"slide_type": "subslide"} class TimeCollector(MetricCollector): """Collect time executed for each line""" def __init__(self) -> None: """Constructor""" super().__init__() self.reset_timer() self.time: Dict[Location, float] = {} self.add_items_to_ignore([Timer.Timer, Timer.clock]) def collect(self, frame: FrameType, event: str, arg: Any) -> None: """Invoked for every line executed. Accumulate time spent.""" t = self.timer.elapsed_time() super().collect(frame, event, arg) location = (frame.f_code.co_name, frame.f_lineno) self.time.setdefault(location, 0.0) self.time[location] += t self.reset_timer() def reset_timer(self) -> None: self.timer = Timer.Timer() def __enter__(self) -> Any: super().__enter__() self.reset_timer() return self # + [markdown] slideshow={"slide_type": "subslide"} # The `metric()` and `all_metrics()` methods accumulate the metric (time taken) for an individual function: # + slideshow={"slide_type": "fragment"} class TimeCollector(TimeCollector): def metric(self, location: Any) -> Optional[float]: if location in self.time: return self.time[location] else: return None def all_metrics(self, func: str) -> List[float]: return [time for (func_name, lineno), time in self.time.items() if func_name == func] # + [markdown] slideshow={"slide_type": "fragment"} # Here's how to use `TimeCollector()` – again, in a `with` block: # + slideshow={"slide_type": "subslide"} with TimeCollector() as collector: for i in range(100): s = remove_html_markup('<b>foo</b>') # + [markdown] slideshow={"slide_type": "fragment"} # The `time` attribute holds the time spent in each line: # + slideshow={"slide_type": "subslide"} for location, time_spent in collector.time.items(): print(location, time_spent) # + [markdown] slideshow={"slide_type": "subslide"} # And we can also create a total for an entire function: # + slideshow={"slide_type": "fragment"} collector.total('remove_html_markup') # + [markdown] slideshow={"slide_type": "subslide"} # ### Visualizing Time Spent # # Let us now go and visualize these numbers in a simple form. The idea is to assign each line a _color_ whose saturation indicates the time spent in that line relative to the time spent in the function overall – the higher the fraction, the darker the line. We create a `MetricDebugger` class built as a specialization of `SpectrumDebugger`, in which `suspiciousness()` and `color()` are repurposed to show these metrics. # + slideshow={"slide_type": "subslide"} class MetricDebugger(SpectrumDebugger): """Visualize a metric""" def metric(self, location: Location) -> float: sum = 0.0 for outcome in self.collectors: for collector in self.collectors[outcome]: assert isinstance(collector, MetricCollector) m = collector.metric(location) if m is not None: sum += m # type: ignore return sum def total(self, func_name: str) -> float: total = 0.0 for outcome in self.collectors: for collector in self.collectors[outcome]: assert isinstance(collector, MetricCollector) total += sum(collector.all_metrics(func_name)) return total def maximum(self, func_name: str) -> float: maximum = 0.0 for outcome in self.collectors: for collector in self.collectors[outcome]: assert isinstance(collector, MetricCollector) maximum = max(maximum, max(collector.all_metrics(func_name))) return maximum def suspiciousness(self, location: Location) -> float: func_name, _ = location return self.metric(location) / self.total(func_name) def color(self, location: Location) -> str: func_name, _ = location hue = 240 # blue saturation = 100 # fully saturated darkness = self.metric(location) / self.maximum(func_name) lightness = 100 - darkness * 25 return f"hsl({hue}, {saturation}%, {lightness}%)" def tooltip(self, location: Location) -> str: return f"{super().tooltip(location)} {self.metric(location)}" # + [markdown] slideshow={"slide_type": "subslide"} # We can now introduce `PerformanceDebugger` as a subclass of `MetricDebugger`, using an arbitrary `MetricCollector` (such as `TimeCollector`) to obtain the metric we want to visualize. # + slideshow={"slide_type": "fragment"} class PerformanceDebugger(MetricDebugger): """Collect and visualize a metric""" def __init__(self, collector_class: Type, log: bool = False): assert issubclass(collector_class, MetricCollector) super().__init__(collector_class, log=log) # + [markdown] slideshow={"slide_type": "fragment"} # With `PerformanceDebugger`, we inherit all the capabilities of `SpectrumDebugger`, such as showing the (relative) percentage of time spent in a table. We see that the `for` condition and the following `assert` take most of the time, followed by the first condition. # + slideshow={"slide_type": "subslide"} with PerformanceDebugger(TimeCollector) as debugger: for i in range(100): s = remove_html_markup('<b>foo</b>') # + slideshow={"slide_type": "subslide"} print(debugger) # + [markdown] slideshow={"slide_type": "subslide"} # However, we can also visualize these percentages, using shades of blue to indicate those lines most time spent in: # + slideshow={"slide_type": "subslide"} debugger # + [markdown] slideshow={"slide_type": "subslide"} # ### Other Metrics # # Our framework is flexible enough to collect (and visualize) arbitrary metrics. This `HitCollector` class, for instance, collects how often a line is being executed. # + slideshow={"slide_type": "subslide"} class HitCollector(MetricCollector): """Collect how often a line is executed""" def __init__(self) -> None: super().__init__() self.hits: Dict[Location, int] = {} def collect(self, frame: FrameType, event: str, arg: Any) -> None: super().collect(frame, event, arg) location = (frame.f_code.co_name, frame.f_lineno) self.hits.setdefault(location, 0) self.hits[location] += 1 def metric(self, location: Location) -> Optional[int]: if location in self.hits: return self.hits[location] else: return None def all_metrics(self, func: str) -> List[float]: return [hits for (func_name, lineno), hits in self.hits.items() if func_name == func] # + [markdown] slideshow={"slide_type": "subslide"} # We can plug in this class into `PerformanceDebugger` to obtain a distribution of lines executed: # + slideshow={"slide_type": "fragment"} with PerformanceDebugger(HitCollector) as debugger: for i in range(100): s = remove_html_markup('<b>foo</b>') # + [markdown] slideshow={"slide_type": "fragment"} # In total, during this call to `remove_html_markup()`, there are 6,400 lines executed: # + slideshow={"slide_type": "fragment"} debugger.total('remove_html_markup') # + [markdown] slideshow={"slide_type": "fragment"} # Again, we can visualize the distribution as a table and using colors. We can see how the shade gets lighter in the lower part of the loop as individual conditions have been met. # + slideshow={"slide_type": "subslide"} print(debugger) # + slideshow={"slide_type": "subslide"} debugger # + [markdown] slideshow={"slide_type": "slide"} # ## Integrating with Delta Debugging # # Besides identifying causes for performance issues in the code, one may also search for causes in the _input_, using [Delta Debugging](DeltaDebugger.ipynb). This can be useful if one does not immediately want to embark into investigating the code, but maybe first determine external influences that are related to performance issues. # + [markdown] slideshow={"slide_type": "fragment"} # Here is a variant of `remove_html_markup()` that introduces a (rather obvious) performance issue. # + slideshow={"slide_type": "skip"} import time # + slideshow={"slide_type": "subslide"} def remove_html_markup_ampersand(s: str) -> str: tag = False quote = False out = "" for c in s: assert tag or not quote if c == '&': time.sleep(0.1) # <-- the obvious performance issue if c == '<' and not quote: tag = True elif c == '>' and not quote: tag = False elif (c == '"' or c == "'") and tag: quote = not quote elif not tag: out = out + c return out # + [markdown] slideshow={"slide_type": "subslide"} # We can easily trigger this issue by measuring time taken: # + slideshow={"slide_type": "fragment"} with Timer.Timer() as t: remove_html_markup_ampersand('&&&') t.elapsed_time() # + [markdown] slideshow={"slide_type": "fragment"} # Let us set up a test that checks whether the performance issue is present. # + slideshow={"slide_type": "fragment"} def remove_html_test(s: str) -> None: with Timer.Timer() as t: remove_html_markup_ampersand(s) assert t.elapsed_time() < 0.1 # + [markdown] slideshow={"slide_type": "fragment"} # We can now apply delta debugging to determine a minimum input that causes the failure: # + slideshow={"slide_type": "fragment"} s_fail = '<b>foo&amp;</b>' # + slideshow={"slide_type": "subslide"} with DeltaDebugger.DeltaDebugger() as dd: remove_html_test(s_fail) # + slideshow={"slide_type": "fragment"} dd.min_args() # + [markdown] slideshow={"slide_type": "fragment"} # For performance issues, however, a minimal input is often not enough to highlight the failure cause. This is because short inputs tend to take less processing time than longer inputs, which increases the risks of a spurious diagnosis. A better alternative is to compute a _maximum_ input where the issue does not occur: # + slideshow={"slide_type": "fragment"} s_pass = dd.max_args() # + slideshow={"slide_type": "fragment"} s_pass # + [markdown] slideshow={"slide_type": "fragment"} # We see that the culprit character (the `&`) is removed. This tells us the failure-inducing difference – or, more precisely, the cause for the performance issue. # + [markdown] slideshow={"slide_type": "slide"} # ## Synopsis # + [markdown] slideshow={"slide_type": "fragment"} # This chapter provides a class `PerformanceDebugger` that allows to measure and visualize the time taken per line in a function. # + slideshow={"slide_type": "fragment"} with PerformanceDebugger(TimeCollector) as debugger: for i in range(100): s = remove_html_markup('<b>foo</b>') # + [markdown] slideshow={"slide_type": "fragment"} # The distribution of executed time within each function can be obtained by printing out the debugger: # + slideshow={"slide_type": "subslide"} print(debugger) # + [markdown] slideshow={"slide_type": "subslide"} # The sum of all percentages in a function should always be 100%. # + [markdown] slideshow={"slide_type": "fragment"} # These percentages can also be visualized, where darker shades represent higher percentage values: # + slideshow={"slide_type": "subslide"} debugger # + [markdown] slideshow={"slide_type": "subslide"} # The abstract `MetricCollector` class allows subclassing to build more collectors, such as `HitCollector`. # + slideshow={"slide_type": "fragment"} # ignore from ClassDiagram import display_class_hierarchy # + slideshow={"slide_type": "fragment"} # ignore display_class_hierarchy([PerformanceDebugger, TimeCollector, HitCollector], public_methods=[ PerformanceDebugger.__init__, ], project='debuggingbook') # + [markdown] button=false new_sheet=true run_control={"read_only": false} slideshow={"slide_type": "slide"} # ## Lessons Learned # # * To measure performance, # * instrument the code such that the time taken per function (or line) is collected; or # * sample the execution that at regular intervals, the active call stack is collected. # * To make code performant, focus on efficient algorithms, efficient data types, and sufficient abstraction such that you can replace them by alternatives. # * Beyond efficient algorithms and data types, do _not_ optimize before measuring. # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"} # ## Next Steps # # This chapter concludes the part on abstracting failures. The next part will focus on # # * [repairing code automatically](Repairer.ipynb) # + [markdown] slideshow={"slide_type": "slide"} # ## Background # # [Scalene](https://github.com/plasma-umass/scalene) is a high-performance, high-precision CPU, GPU, and memory profiler for Python. In contrast to the standard Python `cProfile` profiler, it uses _sampling_ instead of instrumentation or relying on Python's tracing facilities; and it also supports line-by-line profiling. Scalene might be the tool of choice if you want to go beyond basic profiling. # # The Wikipedia articles on [profiling](https://en.wikipedia.org/wiki/Profiling_(computer_programming)) and [performance analysis tools](https://en.wikipedia.org/wiki/List_of_performance_analysis_tools) provide several additional resources on profiling tools and how to apply them in practice. # + [markdown] button=false new_sheet=true run_control={"read_only": false} slideshow={"slide_type": "slide"} # ## Exercises # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} # ### Exercise 1: Profiling Memory Usage # # The Python [`tracemalloc` module](https://docs.python.org/3/library/tracemalloc.html) allows to track memory usage during execution. Between `tracemalloc.start()` and `tracemalloc.end()`, use `tracemalloc.get_traced_memory()` to obtain how much memory is currently being consumed: # + slideshow={"slide_type": "skip"} import tracemalloc # + slideshow={"slide_type": "fragment"} tracemalloc.start() # + slideshow={"slide_type": "fragment"} current_size, peak_size = tracemalloc.get_traced_memory() current_size # + slideshow={"slide_type": "fragment"} tracemalloc.stop() # + [markdown] slideshow={"slide_type": "subslide"} # Create a subclass of `MetricCollector` named `MemoryCollector`. Have it measure the memory consumption before and after each line executed (0 if negative), and visualize the impact of individual lines on memory. Create an appropriate test program that (temporarily) consumes larger amounts of memory. # + [markdown] slideshow={"slide_type": "slide"} # ## Exercise 2: Statistical Performance Debugging # # In a similar way as we integrated a binary "performance test" with delta debugging, we can also integrate such a test with other techniques. Combining a performance test with [Statistical Debugging](StatisticalDebugger.ipynb), for instance, will highlight those lines whose execution correlates with low performance. But then, the performance test need not be binary, as with functional pass/fail tests – you can also _weight_ individual lines by _how much_ they impact performance. Create a variant of `StatisticalDebugger` that reflects the impact of individual lines on an arbitrary (summarized) performance metric.
docs/notebooks/PerformanceDebugger.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="OQi3X7TNUl5Y" # # **Cheminformatics in Python: Predicting Solubility of Molecules** # # # ## Acknowledgment # # **The idea and most of the code is adapted from the the work of [<NAME>](https://github.com/dataprofessor/code/blob/master/python/cheminformatics_predicting_solubility.ipynb).** # # # # We will be reproducing a research article (by <NAME>$^1$) and use it in Lipophilicity dataset. # + [markdown] id="AQW_Ts66R4Ms" # ## **1. Install rdkit** # + id="-jNwdYoBR8ea" # ! wget https://repo.anaconda.com/miniconda/Miniconda3-py37_4.8.2-Linux-x86_64.sh # ! chmod +x Miniconda3-py37_4.8.2-Linux-x86_64.sh # ! bash ./Miniconda3-py37_4.8.2-Linux-x86_64.sh -b -f -p /usr/local # ! conda install -c rdkit rdkit -y import sys sys.path.append('/usr/local/lib/python3.7/site-packages/') # + colab={"base_uri": "https://localhost:8080/"} id="mBP_uaJZ710V" outputId="3df2329c-cf3f-433d-97ec-b09902a0c5e6" # ! wget https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/Lipophilicity.csv # + [markdown] id="PJGp_xenNYKy" # ### **Read in the dataset** # + id="0ufiOpEbNooH" import pandas as pd # + colab={"base_uri": "https://localhost:8080/", "height": 404} id="nLS6bwiRNtuV" outputId="99e399e2-6b5c-4daf-dff7-b131810ea701" sol = pd.read_csv('Lipophilicity.csv') sol # + id="3ldrI87pqUko" sol.rename(columns={'CMPD_CHEMBLID':'CMPD_CHEMBLID', "exp": "logD","smiles": "SMILES"}, inplace=True) # + colab={"base_uri": "https://localhost:8080/", "height": 285} id="8k_9-K1VsKv4" outputId="e4fed672-85da-44cd-96b3-ffc785a4bde4" import pandas as pd import numpy as np from matplotlib import pyplot as plt import re import seaborn as sns sns.set() # %matplotlib inline sns.histplot(sol.logD,kde=True); # + id="05jP19U7ciwt" import pandas as pd import numpy as np from matplotlib import pyplot as plt import re import seaborn as sns sns.set() # %matplotlib inline # + colab={"base_uri": "https://localhost:8080/"} id="QhhHwKeJcoMV" outputId="023fb856-3332-4094-9dde-6eefeba5cbd6" from google.colab import drive drive.mount('/content/drive') # + colab={"base_uri": "https://localhost:8080/", "height": 196} id="3S_JpaQTc35N" outputId="1caef766-472c-4e11-95b1-87799b34b8b2" df = pd.read_csv("/content/drive/My Drive/Colab Notebooks/Lipophi_canon.csv") df.head() # + id="6gxDVv8Nhls9" df.rename(columns={'CMPD_CHEMBLID':'CMPD_CHEMBLID', "exp": "logD", "smiles": "smiles","canonical_smiles":"SMILES"}, inplace=True) df = df.drop(['CMPD_CHEMBLID', 'smiles'], axis=1) # + colab={"base_uri": "https://localhost:8080/", "height": 196} id="kRtGx0M8jK5M" outputId="562976b9-f667-4a94-ac77-24f931666cc3" df.head() # + colab={"base_uri": "https://localhost:8080/"} id="4-NmMcjXZsNw" outputId="a86291cf-4209-4e43-fb53-c9cdc682893b" df.shape # + [markdown] id="QBYISacEc1Yw" # # Calculate molecular descriptors** # # To predict **LogS** (log of the aqueous solubility), the study by Delaney makes use of 4 molecular descriptors: # 1. **cLogP** *(Octanol-water partition coefficient)* # 2. **MW** *(Molecular weight)* # 3. **RB** *(Number of rotatable bonds)* # 4. **AP** *(Aromatic proportion = number of aromatic atoms / total number of heavy atoms)* # # Unfortunately, rdkit readily computes the first 3. As for the AP descriptor, we will calculate this by manually computing the ratio of the *number of aromatic atoms* to the *total number of heavy atoms* which rdkit can compute. # + id="ECbRNWACdJEj" # import numpy as np # import pandas as pd from rdkit import Chem from rdkit.Chem import Descriptors # + id="SbJh_cgUc5gd" def AromaticProportion(m): #source https://github.com/dataprofessor/code/blob/master/python/cheminformatics_predicting_solubility.ipynb aromatic_atoms = [m.GetAtomWithIdx(i).GetIsAromatic() for i in range(m.GetNumAtoms())] aa_count = [] for i in aromatic_atoms: if i==True: aa_count.append(1) AromaticAtom = sum(aa_count) # Number of aromatic atoms HeavyAtom = Descriptors.HeavyAtomCount(m) #Number of heavy atoms AR = AromaticAtom/HeavyAtom return AR # Inspired by: https://codeocean.com/explore/capsules?query=tag:data-curation def generate(smiles, verbose=False): moldata= [] for elem in smiles: mol=Chem.MolFromSmiles(elem) moldata.append(mol) baseData= np.arange(1,1) i=0 for mol in moldata: desc_MolLogP = Descriptors.MolLogP(mol) desc_MolWt = Descriptors.MolWt(mol) desc_NumRotatableBonds = Descriptors.NumRotatableBonds(mol) desc_AromaticProportion = AromaticProportion(mol) row = np.array([desc_MolLogP, desc_MolWt, desc_NumRotatableBonds, desc_AromaticProportion]) if(i==0): baseData=row else: baseData=np.vstack([baseData, row]) i=i+1 columnNames=["MolLogP","MolWt","NumRotatableBonds","AromaticProportion"] descriptors = pd.DataFrame(data=baseData,columns=columnNames) return descriptors # + id="v035WtEVnqan" sol = df # + id="CNTQ2WWGdWlK" X = generate(sol.SMILES) # + colab={"base_uri": "https://localhost:8080/", "height": 196} id="OazcNGkMHCu1" outputId="73e9de32-a4e0-4e87-a920-795ed26abe24" sol.head() # + [markdown] id="6m4Akv3rHG3E" # Assigning the second column (index 1) to the Y matrix # + colab={"base_uri": "https://localhost:8080/"} id="fcvXs7R7FrbC" outputId="c73c7723-5938-45e6-ed90-74aee735119a" Y = sol.iloc[:,0] Y # + [markdown] id="SzrfuUZNFg_X" # ## **Data split** # + id="dMRn8EVjFlrT" from sklearn.model_selection import train_test_split X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2) # + [markdown] id="39nTAc3UFUMW" # ## **Random Forest Regressor Model** # + from sklearn.metrics import mean_squared_error, r2_score from sklearn.ensemble import RandomForestRegressor import time tic = time.perf_counter() model = RandomForestRegressor() model.fit(X_train, Y_train) # + [markdown] id="M6evZTPNRecd" # ### **Test on X_test** # + id="I_eFbrlaHhPU" Y_pred_test = model.predict(X_test) # + colab={"base_uri": "https://localhost:8080/"} id="TQnDfyl5HkUr" outputId="8991376d-8f28-4e0b-bfae-11485e70278e" print('Mean squared error (MSE): %.2f' % mean_squared_error(Y_test, Y_pred_test)) # + colab={"base_uri": "https://localhost:8080/"} id="Ts2VeFtWvKwM" outputId="4cd262ea-1c1f-49b3-cefd-9f9f394c23a3" np.sqrt(mean_squared_error(Y_test, Y_pred_test)) # + id="PqkFbLhLp7i0" RMSElist = [] for i in range(10): model = RandomForestRegressor(random_state=i+5) model.fit(X_train, Y_train) Y_pred_test = model.predict(X_test) RMSElist.append(np.sqrt(mean_squared_error(Y_test, Y_pred_test))) # + colab={"base_uri": "https://localhost:8080/"} id="8t6GTQ-WCfUK" outputId="0247e271-e6a4-49d3-a2b6-069ff3f6aea0" np.mean(RMSElist),np.std(RMSElist) # + [markdown] id="NChkjAWqi0LI" # RMSE = $0.983 \pm 0.01$ # + colab={"base_uri": "https://localhost:8080/", "height": 287} id="NSCvVyVktzJs" outputId="b3e3354f-ef61-4418-c246-724cca45de50" p = sns.scatterplot(x=Y_test, y=Y_pred_test) p.set(xlabel='true value', ylabel='predicted') plt.show() # - # + id="GVcOT2Mvsh3T" df_res = pd.DataFrame({"RMSE":RMSElist, "model":'RF'}) df_res2 = pd.read_csv('/content/drive/My Drive/Colab Notebooks/RMSE_ULMFiT.csv') df_RMSE = pd.concat([df_res,df_res2]) # + colab={"base_uri": "https://localhost:8080/", "height": 339} id="StNlQEPOsByc" outputId="7186f287-80a3-4508-e48c-6c7515782503" plt.figure(figsize=(6,5)) ax = sns.boxplot(x="model", y="RMSE", data=df_RMSE,width=0.4); plt.show(); # + id="N5fUHm6ONUGC" # + [markdown] id="jwM1QHeLbxJl" # ## **Reference** # # [1] <NAME>. [ESOL:  Estimating Aqueous Solubility Directly from Molecular Structure](https://pubs.acs.org/doi/10.1021/ci034243x). ***J. Chem. Inf. Comput. Sci.*** 2004, 44, 3, 1000-1005. # # [2] <NAME>. [Predicting Aqueous Solubility - It's Harder Than It Looks](http://practicalcheminformatics.blogspot.com/2018/09/predicting-aqueous-solubility-its.html). ***Practical Cheminformatics Blog*** # # [3] <NAME>, <NAME>, <NAME>, and <NAME>. [Deep Learning for the Life Sciences: Applying Deep Learning to Genomics, Microscopy, Drug Discovery, and More](https://learning.oreilly.com/library/view/deep-learning-for/9781492039822/), O'Reilly, 2019. # # [4] [Supplementary file](https://pubs.acs.org/doi/10.1021/ci034243x) from Delaney's ESOL:  Estimating Aqueous Solubility Directly from Molecular Structure. # -
3_BaseLineModel.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Styling # # This document is written as a Jupyter Notebook, and can be viewed or downloaded [here](https://nbviewer.ipython.org/github/pandas-dev/pandas/blob/master/doc/source/user_guide/style.ipynb). # # You can apply **conditional formatting**, the visual styling of a DataFrame # depending on the data within, by using the ``DataFrame.style`` property. # This is a property that returns a ``Styler`` object, which has # useful methods for formatting and displaying DataFrames. # # The styling is accomplished using CSS. # You write "style functions" that take scalars, `DataFrame`s or `Series`, and return *like-indexed* DataFrames or Series with CSS `"attribute: value"` pairs for the values. # These functions can be incrementally passed to the `Styler` which collects the styles before rendering. # ## Building styles # # Pass your style functions into one of the following methods: # # - ``Styler.applymap``: elementwise # - ``Styler.apply``: column-/row-/table-wise # # Both of those methods take a function (and some other keyword arguments) and applies your function to the DataFrame in a certain way. # `Styler.applymap` works through the DataFrame elementwise. # `Styler.apply` passes each column or row into your DataFrame one-at-a-time or the entire table at once, depending on the `axis` keyword argument. # For columnwise use `axis=0`, rowwise use `axis=1`, and for the entire table at once use `axis=None`. # # For `Styler.applymap` your function should take a scalar and return a single string with the CSS attribute-value pair. # # For `Styler.apply` your function should take a Series or DataFrame (depending on the axis parameter), and return a Series or DataFrame with an identical shape where each value is a string with a CSS attribute-value pair. # # Let's see some examples. # + nbsphinx="hidden" import matplotlib.pyplot # We have this here to trigger matplotlib's font cache stuff. # This cell is hidden from the output # + import pandas as pd import numpy as np np.random.seed(24) df = pd.DataFrame({'A': np.linspace(1, 10, 10)}) df = pd.concat([df, pd.DataFrame(np.random.randn(10, 4), columns=list('BCDE'))], axis=1) df.iloc[3, 3] = np.nan df.iloc[0, 2] = np.nan # - # Here's a boring example of rendering a DataFrame, without any (visible) styles: df.style # *Note*: The `DataFrame.style` attribute is a property that returns a `Styler` object. `Styler` has a `_repr_html_` method defined on it so they are rendered automatically. If you want the actual HTML back for further processing or for writing to file call the `.render()` method which returns a string. # # The above output looks very similar to the standard DataFrame HTML representation. But we've done some work behind the scenes to attach CSS classes to each cell. We can view these by calling the `.render` method. df.style.highlight_null().render().split('\n')[:10] # The `row0_col2` is the identifier for that particular cell. We've also prepended each row/column identifier with a UUID unique to each DataFrame so that the style from one doesn't collide with the styling from another within the same notebook or page (you can set the `uuid` if you'd like to tie together the styling of two DataFrames). # # When writing style functions, you take care of producing the CSS attribute / value pairs you want. Pandas matches those up with the CSS classes that identify each cell. # Let's write a simple style function that will color negative numbers red and positive numbers black. def color_negative_red(val): """ Takes a scalar and returns a string with the css property `'color: red'` for negative strings, black otherwise. """ color = 'red' if val < 0 else 'black' return 'color: %s' % color # In this case, the cell's style depends only on its own value. # That means we should use the `Styler.applymap` method which works elementwise. s = df.style.applymap(color_negative_red) s # Notice the similarity with the standard `df.applymap`, which operates on DataFrames elementwise. We want you to be able to reuse your existing knowledge of how to interact with DataFrames. # # Notice also that our function returned a string containing the CSS attribute and value, separated by a colon just like in a `<style>` tag. This will be a common theme. # # Finally, the input shapes matched. `Styler.applymap` calls the function on each scalar input, and the function returns a scalar output. # Now suppose you wanted to highlight the maximum value in each column. # We can't use `.applymap` anymore since that operated elementwise. # Instead, we'll turn to `.apply` which operates columnwise (or rowwise using the `axis` keyword). Later on we'll see that something like `highlight_max` is already defined on `Styler` so you wouldn't need to write this yourself. def highlight_max(s): ''' highlight the maximum in a Series yellow. ''' is_max = s == s.max() return ['background-color: yellow' if v else '' for v in is_max] df.style.apply(highlight_max) # In this case the input is a `Series`, one column at a time. # Notice that the output shape of `highlight_max` matches the input shape, an array with `len(s)` items. # We encourage you to use method chains to build up a style piecewise, before finally rending at the end of the chain. df.style.\ applymap(color_negative_red).\ apply(highlight_max) # Above we used `Styler.apply` to pass in each column one at a time. # # <span style="background-color: #DEDEBE">*Debugging Tip*: If you're having trouble writing your style function, try just passing it into <code style="background-color: #DEDEBE">DataFrame.apply</code>. Internally, <code style="background-color: #DEDEBE">Styler.apply</code> uses <code style="background-color: #DEDEBE">DataFrame.apply</code> so the result should be the same.</span> # # What if you wanted to highlight just the maximum value in the entire table? # Use `.apply(function, axis=None)` to indicate that your function wants the entire table, not one column or row at a time. Let's try that next. # # We'll rewrite our `highlight-max` to handle either Series (from `.apply(axis=0 or 1)`) or DataFrames (from `.apply(axis=None)`). We'll also allow the color to be adjustable, to demonstrate that `.apply`, and `.applymap` pass along keyword arguments. def highlight_max(data, color='yellow'): ''' highlight the maximum in a Series or DataFrame ''' attr = 'background-color: {}'.format(color) if data.ndim == 1: # Series from .apply(axis=0) or axis=1 is_max = data == data.max() return [attr if v else '' for v in is_max] else: # from .apply(axis=None) is_max = data == data.max().max() return pd.DataFrame(np.where(is_max, attr, ''), index=data.index, columns=data.columns) # When using ``Styler.apply(func, axis=None)``, the function must return a DataFrame with the same index and column labels. df.style.apply(highlight_max, color='darkorange', axis=None) # ### Building Styles Summary # # Style functions should return strings with one or more CSS `attribute: value` delimited by semicolons. Use # # - `Styler.applymap(func)` for elementwise styles # - `Styler.apply(func, axis=0)` for columnwise styles # - `Styler.apply(func, axis=1)` for rowwise styles # - `Styler.apply(func, axis=None)` for tablewise styles # # And crucially the input and output shapes of `func` must match. If `x` is the input then ``func(x).shape == x.shape``. # ## Finer control: slicing # Both `Styler.apply`, and `Styler.applymap` accept a `subset` keyword. # This allows you to apply styles to specific rows or columns, without having to code that logic into your `style` function. # # The value passed to `subset` behaves similar to slicing a DataFrame. # # - A scalar is treated as a column label # - A list (or series or numpy array) # - A tuple is treated as `(row_indexer, column_indexer)` # # Consider using `pd.IndexSlice` to construct the tuple for the last one. df.style.apply(highlight_max, subset=['B', 'C', 'D']) # For row and column slicing, any valid indexer to `.loc` will work. df.style.applymap(color_negative_red, subset=pd.IndexSlice[2:5, ['B', 'D']]) # Only label-based slicing is supported right now, not positional. # # If your style function uses a `subset` or `axis` keyword argument, consider wrapping your function in a `functools.partial`, partialing out that keyword. # # ```python # my_func2 = functools.partial(my_func, subset=42) # ``` # ## Finer Control: Display Values # # We distinguish the *display* value from the *actual* value in `Styler`. # To control the display value, the text is printed in each cell, use `Styler.format`. Cells can be formatted according to a [format spec string](https://docs.python.org/3/library/string.html#format-specification-mini-language) or a callable that takes a single value and returns a string. df.style.format("{:.2%}") # Use a dictionary to format specific columns. df.style.format({'B': "{:0<4.0f}", 'D': '{:+.2f}'}) # Or pass in a callable (or dictionary of callables) for more flexible handling. df.style.format({"B": lambda x: "±{:.2f}".format(abs(x))}) # You can format the text displayed for missing values by `na_rep`. df.style.format("{:.2%}", na_rep="-") # These formatting techniques can be used in combination with styling. df.style.highlight_max().format(None, na_rep="-") # ## Builtin styles # Finally, we expect certain styling functions to be common enough that we've included a few "built-in" to the `Styler`, so you don't have to write them yourself. df.style.highlight_null(null_color='red') # You can create "heatmaps" with the `background_gradient` method. These require matplotlib, and we'll use [Seaborn](https://stanford.edu/~mwaskom/software/seaborn/) to get a nice colormap. # + import seaborn as sns cm = sns.light_palette("green", as_cmap=True) s = df.style.background_gradient(cmap=cm) s # - # `Styler.background_gradient` takes the keyword arguments `low` and `high`. Roughly speaking these extend the range of your data by `low` and `high` percent so that when we convert the colors, the colormap's entire range isn't used. This is useful so that you can actually read the text still. # Uses the full color range df.loc[:4].style.background_gradient(cmap='viridis') # Compress the color range (df.loc[:4] .style .background_gradient(cmap='viridis', low=.5, high=0) .highlight_null('red')) # There's also `.highlight_min` and `.highlight_max`. df.style.highlight_max(axis=0) # Use `Styler.set_properties` when the style doesn't actually depend on the values. df.style.set_properties(**{'background-color': 'black', 'color': 'lawngreen', 'border-color': 'white'}) # ### Bar charts # You can include "bar charts" in your DataFrame. df.style.bar(subset=['A', 'B'], color='#d65f5f') # New in version 0.20.0 is the ability to customize further the bar chart: You can now have the `df.style.bar` be centered on zero or midpoint value (in addition to the already existing way of having the min value at the left side of the cell), and you can pass a list of `[color_negative, color_positive]`. # # Here's how you can change the above with the new `align='mid'` option: df.style.bar(subset=['A', 'B'], align='mid', color=['#d65f5f', '#5fba7d']) # The following example aims to give a highlight of the behavior of the new align options: # + import pandas as pd from IPython.display import HTML # Test series test1 = pd.Series([-100,-60,-30,-20], name='All Negative') test2 = pd.Series([10,20,50,100], name='All Positive') test3 = pd.Series([-10,-5,0,90], name='Both Pos and Neg') head = """ <table> <thead> <th>Align</th> <th>All Negative</th> <th>All Positive</th> <th>Both Neg and Pos</th> </thead> </tbody> """ aligns = ['left','zero','mid'] for align in aligns: row = "<tr><th>{}</th>".format(align) for series in [test1,test2,test3]: s = series.copy() s.name='' row += "<td>{}</td>".format(s.to_frame().style.bar(align=align, color=['#d65f5f', '#5fba7d'], width=100).render()) #testn['width'] row += '</tr>' head += row head+= """ </tbody> </table>""" HTML(head) # - # ## Sharing styles # Say you have a lovely style built up for a DataFrame, and now you want to apply the same style to a second DataFrame. Export the style with `df1.style.export`, and import it on the second DataFrame with `df1.style.set` df2 = -df style1 = df.style.applymap(color_negative_red) style1 style2 = df2.style style2.use(style1.export()) style2 # Notice that you're able to share the styles even though they're data aware. The styles are re-evaluated on the new DataFrame they've been `use`d upon. # ## Other Options # # You've seen a few methods for data-driven styling. # `Styler` also provides a few other options for styles that don't depend on the data. # # - precision # - captions # - table-wide styles # - missing values representation # - hiding the index or columns # # Each of these can be specified in two ways: # # - A keyword argument to `Styler.__init__` # - A call to one of the `.set_` or `.hide_` methods, e.g. `.set_caption` or `.hide_columns` # # The best method to use depends on the context. Use the `Styler` constructor when building many styled DataFrames that should all share the same properties. For interactive use, the`.set_` and `.hide_` methods are more convenient. # ### Precision # You can control the precision of floats using pandas' regular `display.precision` option. with pd.option_context('display.precision', 2): html = (df.style .applymap(color_negative_red) .apply(highlight_max)) html # Or through a `set_precision` method. df.style\ .applymap(color_negative_red)\ .apply(highlight_max)\ .set_precision(2) # Setting the precision only affects the printed number; the full-precision values are always passed to your style functions. You can always use `df.round(2).style` if you'd prefer to round from the start. # ### Captions # Regular table captions can be added in a few ways. df.style.set_caption('Colormaps, with a caption.')\ .background_gradient(cmap=cm) # ### Table styles # The next option you have are "table styles". # These are styles that apply to the table as a whole, but don't look at the data. # Certain stylings, including pseudo-selectors like `:hover` can only be used this way. # + from IPython.display import HTML def hover(hover_color="#ffff99"): return dict(selector="tr:hover", props=[("background-color", "%s" % hover_color)]) styles = [ hover(), dict(selector="th", props=[("font-size", "150%"), ("text-align", "center")]), dict(selector="caption", props=[("caption-side", "bottom")]) ] html = (df.style.set_table_styles(styles) .set_caption("Hover to highlight.")) html # - # `table_styles` should be a list of dictionaries. # Each dictionary should have the `selector` and `props` keys. # The value for `selector` should be a valid CSS selector. # Recall that all the styles are already attached to an `id`, unique to # each `Styler`. This selector is in addition to that `id`. # The value for `props` should be a list of tuples of `('attribute', 'value')`. # # `table_styles` are extremely flexible, but not as fun to type out by hand. # We hope to collect some useful ones either in pandas, or preferable in a new package that [builds on top](#Extensibility) the tools here. # ### Missing values # You can control the default missing values representation for the entire table through `set_na_rep` method. (df.style .set_na_rep("FAIL") .format(None, na_rep="PASS", subset=["D"]) .highlight_null("yellow")) # ### Hiding the Index or Columns # The index can be hidden from rendering by calling `Styler.hide_index`. Columns can be hidden from rendering by calling `Styler.hide_columns` and passing in the name of a column, or a slice of columns. df.style.hide_index() df.style.hide_columns(['C','D']) # ### CSS classes # # Certain CSS classes are attached to cells. # # - Index and Column names include `index_name` and `level<k>` where `k` is its level in a MultiIndex # - Index label cells include # + `row_heading` # + `row<n>` where `n` is the numeric position of the row # + `level<k>` where `k` is the level in a MultiIndex # - Column label cells include # + `col_heading` # + `col<n>` where `n` is the numeric position of the column # + `level<k>` where `k` is the level in a MultiIndex # - Blank cells include `blank` # - Data cells include `data` # ### Limitations # # - DataFrame only `(use Series.to_frame().style)` # - The index and columns must be unique # - No large repr, and performance isn't great; this is intended for summary DataFrames # - You can only style the *values*, not the index or columns # - You can only apply styles, you can't insert new HTML entities # # Some of these will be addressed in the future. # # ### Terms # # - Style function: a function that's passed into `Styler.apply` or `Styler.applymap` and returns values like `'css attribute: value'` # - Builtin style functions: style functions that are methods on `Styler` # - table style: a dictionary with the two keys `selector` and `props`. `selector` is the CSS selector that `props` will apply to. `props` is a list of `(attribute, value)` tuples. A list of table styles passed into `Styler`. # ## Fun stuff # # Here are a few interesting examples. # # `Styler` interacts pretty well with widgets. If you're viewing this online instead of running the notebook yourself, you're missing out on interactively adjusting the color palette. from IPython.html import widgets @widgets.interact def f(h_neg=(0, 359, 1), h_pos=(0, 359), s=(0., 99.9), l=(0., 99.9)): return df.style.background_gradient( cmap=sns.palettes.diverging_palette(h_neg=h_neg, h_pos=h_pos, s=s, l=l, as_cmap=True) ) def magnify(): return [dict(selector="th", props=[("font-size", "4pt")]), dict(selector="td", props=[('padding', "0em 0em")]), dict(selector="th:hover", props=[("font-size", "12pt")]), dict(selector="tr:hover td:hover", props=[('max-width', '200px'), ('font-size', '12pt')]) ] # + np.random.seed(25) cmap = cmap=sns.diverging_palette(5, 250, as_cmap=True) bigdf = pd.DataFrame(np.random.randn(20, 25)).cumsum() bigdf.style.background_gradient(cmap, axis=1)\ .set_properties(**{'max-width': '80px', 'font-size': '1pt'})\ .set_caption("Hover to magnify")\ .set_precision(2)\ .set_table_styles(magnify()) # - # ## Export to Excel # # *New in version 0.20.0* # # <span style="color: red">*Experimental: This is a new feature and still under development. We'll be adding features and possibly making breaking changes in future releases. We'd love to hear your feedback.*</span> # # Some support is available for exporting styled `DataFrames` to Excel worksheets using the `OpenPyXL` or `XlsxWriter` engines. CSS2.2 properties handled include: # # - `background-color` # - `border-style`, `border-width`, `border-color` and their {`top`, `right`, `bottom`, `left` variants} # - `color` # - `font-family` # - `font-style` # - `font-weight` # - `text-align` # - `text-decoration` # - `vertical-align` # - `white-space: nowrap` # # # - Only CSS2 named colors and hex colors of the form `#rgb` or `#rrggbb` are currently supported. # - The following pseudo CSS properties are also available to set excel specific style properties: # - `number-format` # df.style.\ applymap(color_negative_red).\ apply(highlight_max).\ to_excel('styled.xlsx', engine='openpyxl') # A screenshot of the output: # # ![Excel spreadsheet with styled DataFrame](../_static/style-excel.png) # # ## Extensibility # # The core of pandas is, and will remain, its "high-performance, easy-to-use data structures". # With that in mind, we hope that `DataFrame.style` accomplishes two goals # # - Provide an API that is pleasing to use interactively and is "good enough" for many tasks # - Provide the foundations for dedicated libraries to build on # # If you build a great library on top of this, let us know and we'll [link](https://pandas.pydata.org/pandas-docs/stable/ecosystem.html) to it. # # ### Subclassing # # If the default template doesn't quite suit your needs, you can subclass Styler and extend or override the template. # We'll show an example of extending the default template to insert a custom header before each table. from jinja2 import Environment, ChoiceLoader, FileSystemLoader from IPython.display import HTML from pandas.io.formats.style import Styler # We'll use the following template: with open("templates/myhtml.tpl") as f: print(f.read()) # Now that we've created a template, we need to set up a subclass of ``Styler`` that # knows about it. class MyStyler(Styler): env = Environment( loader=ChoiceLoader([ FileSystemLoader("templates"), # contains ours Styler.loader, # the default ]) ) template = env.get_template("myhtml.tpl") # Notice that we include the original loader in our environment's loader. # That's because we extend the original template, so the Jinja environment needs # to be able to find it. # # Now we can use that custom styler. It's `__init__` takes a DataFrame. MyStyler(df) # Our custom template accepts a `table_title` keyword. We can provide the value in the `.render` method. HTML(MyStyler(df).render(table_title="Extending Example")) # For convenience, we provide the `Styler.from_custom_template` method that does the same as the custom subclass. EasyStyler = Styler.from_custom_template("templates", "myhtml.tpl") EasyStyler(df) # Here's the template structure: # + with open("templates/template_structure.html") as f: structure = f.read() HTML(structure) # - # See the template in the [GitHub repo](https://github.com/pandas-dev/pandas) for more details. # + nbsphinx="hidden" # Hack to get the same style in the notebook as the # main site. This is hidden in the docs. from IPython.display import HTML with open("themes/nature_with_gtoc/static/nature.css_t") as f: css = f.read() HTML('<style>{}</style>'.format(css))
doc/source/user_guide/style.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## List of tables: # ### 1. [Table S1: Quality-quantity trade-off](#ols_quantity_quality) # ### 2. [Table 10: Instrumental variable estimation](#iv) # ## List of figures: # ### 1. [Figure 7: Scatterplot quality vs. quantity](#scatter_quantity_quality) # #### Imports libraries # + import matplotlib.pyplot as plt # Plotting import numpy as np # Matrix algebra import os # File system handling import pandas as pd # Dataframe handling import statsmodels.api as sm # Regression analysis import statsmodels.formula.api as smf # Regressions analysis from linearmodels.iv import IV2SLS, IVGMM from linearmodels.iv.results import IVModelComparison from matplotlib.ticker import FuncFormatter #Formating graphs from scipy import stats from statsmodels.iolib.summary2 import summary_col # Regression output table from statsmodels.stats.diagnostic import het_breuschpagan # Test for heteroscedasticity # - # #### Set project directory PROJECT_FOLDER = os.path.dirname(os.path.dirname(os.getcwd())) FINAL_DATA_FOLDER = os.path.join(PROJECT_FOLDER, 'data', 'final') TABLES_FOLDER = os.path.join(PROJECT_FOLDER, 'reports', 'tables') FIGURES_FOLDER = os.path.join(PROJECT_FOLDER, 'reports', 'figures') # #### Set display format pd.set_option("display.precision", 3) pd.set_option("display.expand_frame_repr", False) pd.set_option("display.max_rows", 40) # #### Set plotting style plt.style.use('classic') # #### Set plotting properties bar_kw = dict(kind='bar', color='0.4', alpha=0.8, lw=0.5, width=0.7) line_kw = dict(kind='line', lw=1, alpha=1, legend=True) font_kw = dict(fontsize=11, color='k') grid_kw = dict(linewidth=1, axis="y", zorder=2, antialiased=True) x_lab_kw = dict(fontsize=11, labelpad=3) y_lab_kw = dict(fontsize=11, labelpad=3) error_kw = dict(elinewidth=2, ecolor='0.15') legend_kw = dict(frameon=False) tick_kw = dict( size=5, which='both', direction='out', right=False, top=False, labelbottom=True ) # #### Retrieving dataframe # + DATA = os.path.join( FINAL_DATA_FOLDER, 'experiment_2', 'data_final.feather' ) df = pd.read_feather(DATA) df.info() # - # #### Define treatment index for ordering treat_index = ['Neutral', 'Charisma without goal', 'Goal', 'Full charisma'] # #### Reshape dataframe for panel analysis # + columns_to_keep = df.columns[[0, 1, 10, 13, 17, 22, 23, 27, 31, 45]] columns_to_melt = [f'Q{x}_editratio' for x in range(1, df['Counter'].max() + 1)] df_melted = pd.melt( df, id_vars=columns_to_keep, value_vars=columns_to_melt, var_name='Question_number', value_name='Edit_ratio' ) df_melted = df_melted.dropna(axis=0, how='any', subset=['Edit_ratio']) df_melted['Question_number'] = df_melted['Question_number'].str.extract(r'(\d+)').astype(int) df_panel = df_melted.set_index(['Id', 'Question_number']) df_panel = df_panel.sort_index() df.info() # - # #### Calculate the time averaged error score and merge resuts with cross-sectional dataframe avg_edit_ratio = df_panel['Edit_ratio'].mean(level=0).to_frame(name='Avg_edit_ratio') df = df.merge(avg_edit_ratio, left_on='Id', right_on='Id', validate='one_to_one') # #### Generate relative counter variable (competion rate) df['Rel_counter'] = df['Counter_real'] / 110.0 # #### Figure 7: Scatterplots for number of fragments submitted vs. mean error rate <a id='scatter_quantity_quality'></a> # + def get_fitted_values(df, x, y): y = df.loc[:,y] x = df.loc[:,x] x = sm.add_constant(x) ols = sm.OLS(y, x).fit() return ols.fittedvalues x_var, y_var = 'Rel_counter', 'Avg_edit_ratio' scatter_params = dict(xlim=[0, 0.7], ylim=[0, 0.16], color='0.1', alpha=0.5) fig, _ = plt.subplots(2, 2, figsize=(9, 9), dpi=100, facecolor='w') fig.subplots_adjust(hspace=0.3, wspace=0.3) for i, ax in enumerate(fig.axes): df_scatter = df[df['Treatment_str'] == treat_index[i]] df_scatter.plot.scatter(x=x_var, y=y_var, ax=ax, **scatter_params) ax.plot(df_scatter[x_var], get_fitted_values(df_scatter, x_var, y_var), c='0', lw=2) ax.set_title(treat_index[i], **font_kw) ax.tick_params(labelsize='small', **tick_kw) ax.xaxis.set_major_formatter(FuncFormatter(lambda y, _: f'{y:.0%}')) ax.yaxis.set_major_formatter(FuncFormatter(lambda y, _: f'{y:.0%}')) ax.set_xlabel("Share number submitted fragments", fontsize='small', labelpad=3) ax.set_ylabel("Average error rate", fontsize='small', labelpad=3) r, p = stats.pearsonr(df_scatter[x_var], df_scatter[y_var]) ax.text(0.37, 0.15 , f'$r={r:.3f}\:({p:.3f})$') path = os.path.join(FIGURES_FOLDER, 'experiment_2', 'corr_count_edit2.pdf') fig.savefig(path, bbox_inches='tight') # - # #### Table S1: Estimate slope and intercepts for quality quantity relationship across treatments <a id='ols_quantity_quality'></a> # + regs = {} regs['re0'] = 'Avg_edit_ratio ~ Rel_counter' regs['re1'] = 'Avg_edit_ratio ~ Rel_counter + C(Treatment_str)' regs['re2'] = 'Avg_edit_ratio ~ Rel_counter + Rel_counter:C(Treatment_str)' regs['re3'] = 'Avg_edit_ratio ~ Rel_counter * C(Treatment_str)' ols_results = [smf.ols(formula=v, data=df).fit(cov_type='HC1') for v in regs.values()] order = [ 'Rel_counter', 'Intercept', ] auxiliary = { 'N': lambda x: f'{x.nobs:.0f}', 'F': lambda x: f'{x.fvalue:.3f}', 'P(>F)': lambda x: f'{x.f_pvalue:.3f}', } ols_qual_quan = summary_col( ols_results, stars=True, info_dict=auxiliary, float_format='%.3f', regressor_order=order, drop_omitted=True ) ols_qual_quan = ols_qual_quan\ .tables[0]\ .rename(mapper=lambda x: x.replace(']', ''), axis=0)\ .rename(mapper=lambda x: x.split()[0].replace('_',' '), axis=1)\ .rename({'Intercept':'Constant'})\ .rename_axis('Dependent variable:', axis=1) path = os.path.join(TABLES_FOLDER, 'experiment_2', 'ols_quality_quantity.tex') ols_qual_quan.to_latex( path, bold_rows=False, float_format="%.3f" ) ols_qual_quan.loc['Intercepts'] = pd.Series(['No', 'Yes', 'No', 'Yes'], index=ols_qual_quan.columns) ols_qual_quan.loc['Slopes'] = pd.Series(['No', 'No', 'Yes', 'Yes'], index=ols_qual_quan.columns) display(ols_qual_quan) # - # #### Table 10: Instrumental variable estimation <a id=iv></a> # + dependent = 'Avg_edit_ratio' endogenous = 'Rel_counter' exogenous = 'Age + Female + Diverse + Education + Mobile_device' first_stage = f'{endogenous} ~ Charisma * Goal' second_stage = f'{dependent} ~ 1 + {exogenous} + [{first_stage}]' res_ols = IV2SLS.from_formula(f'{dependent} ~ 1 + {endogenous} + {exogenous}', df).fit() res_2sls = IV2SLS.from_formula(second_stage, df).fit() results = IVModelComparison({'OLS':res_ols, '2SLS':res_2sls}, precision='std_errors', stars=True) path = os.path.join( TABLES_FOLDER, 'experiment_2', 'iv2sls.tex' ) with open(path, "w") as latex: latex.write(results.summary.as_latex()) display(results) # - # #### Test for heteroscedasticity het_res = het_breuschpagan(res_ols.resids, res_ols.model.exog.pandas) print(f'LM={het_res[0]:.3f}, p={het_res[1]:.3f}\nF={het_res[2]:.3f}, p={het_res[3]:.3f}') # #### First stage diagnostic results for 2SLS res_2sls.first_stage # #### Test 2SLS for overidentification using Sargan's test res_2sls.sargan # #### Test 2SLS for exogeneity using Durbin res_2sls.durbin() # #### Test 2SLS for exogeneity using Wu–Hausman (WH) res_2sls.wu_hausman() # ### Covariance table # + df['Goal*Charisma'] = df['Goal'] * df['Charisma'] variables = [ 'Counter_real', 'Avg_edit_ratio', 'Goal', 'Charisma', 'Goal*Charisma', 'Age', 'Female', 'Diverse', 'Education', 'Mobile_device', ] df_mean_std = df[variables].describe().T.loc[:, ['mean', 'std']].applymap(lambda x: round(x, 2)) df_rho = df[variables].corr() df_pval = df[variables].corr(method=lambda x, y: stats.pearsonr(x, y)[1]) df_pval = df_pval.applymap(lambda x: ''.join(['*' for t in [0.01, 0.05, 0.1] if x<=t])) df_rho = df_rho.round(2).astype(str) + df_pval df_all = df_mean_std\ .join(df_rho)\ .rename(lambda x: x.capitalize().replace('_', ' '), axis=0)\ .rename(lambda x: x.capitalize().replace('_', ' '), axis=1) path = os.path.join(TABLES_FOLDER, 'experiment_2', 'corr_variables.tex') df_all.to_latex(path, float_format="%.2f") display(df_all) # - # #### Convert to html # !jupyter nbconvert --output-dir='./docs' --to html 6_quantity_vs_quality.ipynb
notebook/experiment_2/6_quantity_vs_quality.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: virtualPython35 # language: python # name: virtualpython35 # --- # + # Copyright 2019 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # - # # Local development and docker image components # # - This section assumes that you have already created a program to perform the task required in a particular step of your ML workflow. This example uses an MNIST model training script. # # - Then, this example packages your program as a Docker container image. # # - Then, this example calls kfp.components.ContainerOp to convert it to a Kubeflow pipeline component. # Note: Ensure that you have Docker installed, if you want to build the image locally, by running the following command: # # `which docker` # # The result should be something like: # # `/usr/bin/docker` # + import kfp import kfp.gcp as gcp import kfp.dsl as dsl import kfp.compiler as compiler import kfp.components as comp import datetime import kubernetes as k8s # + tags=["parameter"] # Required Parameters PROJECT_ID='<ADD GCP PROJECT HERE>' GCS_BUCKET='gs://<ADD STORAGE LOCATION HERE>' # - # ## Create client # # If you run this notebook **outside** of a Kubeflow cluster, run the following command: # - `host`: The URL of your Kubeflow Pipelines instance, for example "https://`<your-deployment>`.endpoints.`<your-project>`.cloud.goog/pipeline" # - `client_id`: The client ID used by Identity-Aware Proxy # - `other_client_id`: The client ID used to obtain the auth codes and refresh tokens. # - `other_client_secret`: The client secret used to obtain the auth codes and refresh tokens. # # ```python # client = kfp.Client(host, client_id, other_client_id, other_client_secret) # ``` # # If you run this notebook **within** a Kubeflow cluster, run the following command: # ```python # client = kfp.Client() # ``` # # You'll need to create OAuth client ID credentials of type `Other` to get `other_client_id` and `other_client_secret`. Learn more about [creating OAuth credentials]( # https://cloud.google.com/iap/docs/authentication-howto#authenticating_from_a_desktop_app) # Optional Parameters, but required for running outside Kubeflow cluster HOST = '<ADD HOST NAME TO TALK TO KUBEFLOW PIPELINE HERE>' CLIENT_ID = '<ADD OAuth CLIENT ID USED BY IAP HERE>' OTHER_CLIENT_ID = '<ADD OAuth CLIENT ID USED TO OBTAIN AUTH CODES HERE>' OTHER_CLIENT_SECRET = '<ADD OAuth CLIENT SECRET USED TO OBTAIN AUTH CODES HERE>' # + # Create kfp client in_cluster = True try: k8s.config.load_incluster_config() except: in_cluster = False pass if in_cluster: client = kfp.Client() else: client = kfp.Client(host=HOST, client_id=CLIENT_ID, other_client_id=OTHER_CLIENT_ID, other_client_secret=OTHER_CLIENT_SECRET) # - # ## Wrap an existing Docker container image using `ContainerOp` # ### Writing the program code # # The following cell creates a file `app.py` that contains a Python script. The script downloads MNIST dataset, trains a Neural Network based classification model, writes the training log and exports the trained model to Google Cloud Storage. # # Your component can create outputs that the downstream components can use as inputs. Each output must be a string and the container image must write each output to a separate local text file. For example, if a training component needs to output the path of the trained model, the component writes the path into a local file, such as `/output.txt`. # + language="bash" # # # Create folders if they don't exist. # mkdir -p tmp/components/mnist_training # # # Create the Python file that lists GCS blobs. # cat > ./tmp/components/mnist_training/app.py <<HERE # import argparse # from datetime import datetime # import tensorflow as tf # # parser = argparse.ArgumentParser() # parser.add_argument( # '--model_file', type=str, required=True, help='Name of the model file.') # parser.add_argument( # '--bucket', type=str, required=True, help='GCS bucket name.') # args = parser.parse_args() # # bucket=args.bucket # model_file=args.model_file # # model = tf.keras.models.Sequential([ # tf.keras.layers.Flatten(input_shape=(28, 28)), # tf.keras.layers.Dense(512, activation=tf.nn.relu), # tf.keras.layers.Dropout(0.2), # tf.keras.layers.Dense(10, activation=tf.nn.softmax) # ]) # # model.compile(optimizer='adam', # loss='sparse_categorical_crossentropy', # metrics=['accuracy']) # # print(model.summary()) # # mnist = tf.keras.datasets.mnist # (x_train, y_train),(x_test, y_test) = mnist.load_data() # x_train, x_test = x_train / 255.0, x_test / 255.0 # # callbacks = [ # tf.keras.callbacks.TensorBoard(log_dir=bucket + '/logs/' + datetime.now().date().__str__()), # # Interrupt training if val_loss stops improving for over 2 epochs # tf.keras.callbacks.EarlyStopping(patience=2, monitor='val_loss'), # ] # # model.fit(x_train, y_train, batch_size=32, epochs=5, callbacks=callbacks, # validation_data=(x_test, y_test)) # # # model.save(model_file) # # from tensorflow import gfile # # gcs_path = bucket + "/" + model_file # # if gfile.Exists(gcs_path): # gfile.Remove(gcs_path) # # gfile.Copy(model_file, gcs_path) # with open('/output.txt', 'w') as f: # f.write(gcs_path) # HERE # - # ### Creating a Dockerfile # Now create a container that runs the script. Start by creating a Dockerfile. A Dockerfile contains the instructions to assemble a Docker image. The `FROM` statement specifies the Base Image from which you are building. `WORKDIR` sets the working directory. When you assemble the Docker image, `COPY` copies the required files and directories (for example, `app.py`) to the file system of the container. `RUN` executes a command (for example, install the dependencies) and commits the results. # + language="bash" # # # Create Dockerfile. # cat > ./tmp/components/mnist_training/Dockerfile <<EOF # FROM tensorflow/tensorflow:1.15.0-py3 # WORKDIR /app # COPY . /app # EOF # - # ### Build docker image # Now that we have created our Dockerfile for creating our Docker image. Then we need to push the image to a registry to host the image. # - We are going to use the `kfp.containers.build_image_from_working_dir` to build the image and push to the Container Registry (GCR), which uses [kaniko](https://cloud.google.com/blog/products/gcp/introducing-kaniko-build-container-images-in-kubernetes-and-google-container-builder-even-without-root-access). # - It is possible to build the image locally using Docker and then to push it to GCR. # **Note**: # If you run this notebook **within Kubeflow cluster**, **with Kubeflow version >= 0.7**, you need to ensure that valid credentials are created within your notebook's namespace. # - With Kubeflow version >= 0.7, the credential is supposed to be copied automatically while creating notebook through `Configurations`, which doesn't work properly at the time of creating this notebook. # - You can also add credentials to the new namespace by either [copying credentials from an existing Kubeflow namespace, or by creating a new service account](https://www.kubeflow.org/docs/gke/authentication/#kubeflow-v0-6-and-before-gcp-service-account-key-as-secret). # - The following cell demonstrates how to copy the default secret to your own namespace. # # ```bash # # %%bash # # NAMESPACE=<your notebook name space> # SOURCE=kubeflow # NAME=user-gcp-sa # SECRET=$(kubectl get secrets \${NAME} -n \${SOURCE} -o jsonpath="{.data.\${NAME}\.json}" | base64 -D) # kubectl create -n \${NAMESPACE} secret generic \${NAME} --from-literal="\${NAME}.json=\${SECRET}" # ``` # + IMAGE_NAME="mnist_training_kf_pipeline" TAG="latest" # "v_$(date +%Y%m%d_%H%M%S)" GCR_IMAGE="gcr.io/{PROJECT_ID}/{IMAGE_NAME}:{TAG}".format( PROJECT_ID=PROJECT_ID, IMAGE_NAME=IMAGE_NAME, TAG=TAG ) builder = kfp.containers._container_builder.ContainerBuilder( gcs_staging=GCS_BUCKET + "/kfp_container_build_staging") image_name = kfp.containers.build_image_from_working_dir( image_name=GCR_IMAGE, working_dir='./tmp/components/mnist_training/', builder=builder ) image_name # - # #### If you want to use docker to build the image # Run the following in a cell # ```bash # # %%bash -s "{PROJECT_ID}" # # IMAGE_NAME="mnist_training_kf_pipeline" # TAG="latest" # "v_$(date +%Y%m%d_%H%M%S)" # # # Create script to build docker image and push it. # # cat > ./tmp/components/mnist_training/build_image.sh <<HERE # PROJECT_ID="${1}" # IMAGE_NAME="${IMAGE_NAME}" # TAG="${TAG}" # GCR_IMAGE="gcr.io/\${PROJECT_ID}/\${IMAGE_NAME}:\${TAG}" # docker build -t \${IMAGE_NAME} . # docker tag \${IMAGE_NAME} \${GCR_IMAGE} # docker push \${GCR_IMAGE} # docker image rm \${IMAGE_NAME} # docker image rm \${GCR_IMAGE} # HERE # # # cd tmp/components/mnist_training # bash build_image.sh # ``` # ### Define each component # Define a component by creating an instance of `kfp.dsl.ContainerOp` that describes the interactions with the Docker container image created in the previous step. You need to specify # - component name # - the image to use # - the command to run after the container starts (If None, uses default CMD in defined in container.) # - the input arguments # - the file outputs (In the `app.py` above, the path of the trained model is written to `/output.txt`.) def mnist_train_op(model_file, bucket): return dsl.ContainerOp( name="mnist_training_container", image='gcr.io/{}/mnist_training_kf_pipeline:latest'.format(PROJECT_ID), command=['python', '/app/app.py'], file_outputs={'outputs': '/output.txt'}, arguments=['--bucket', bucket, '--model_file', model_file] ) # ### Create your workflow as a Python function # Define your pipeline as a Python function. ` @kfp.dsl.pipeline` is a required decoration including `name` and `description` properties. Then compile the pipeline function. After the compilation is completed, a pipeline file is created. # Define the pipeline @dsl.pipeline( name='Mnist pipeline', description='A toy pipeline that performs mnist model training.' ) def mnist_container_pipeline( model_file: str = 'mnist_model.h5', bucket: str = GCS_BUCKET ): mnist_train_op(model_file=model_file, bucket=bucket).apply(gcp.use_gcp_secret('user-gcp-sa')) # ### Submit a pipeline run pipeline_func = mnist_container_pipeline # + experiment_name = 'minist_kubeflow' arguments = {"model_file":"mnist_model.h5", "bucket":GCS_BUCKET} run_name = pipeline_func.__name__ + ' run' # Submit pipeline directly from pipeline function run_result = client.create_run_from_pipeline_func(pipeline_func, experiment_name=experiment_name, run_name=run_name, arguments=arguments) # - # **As an alternative, you can compile the pipeline into a package.** The compiled pipeline can be easily shared and reused by others to run the pipeline. # # ```python # pipeline_filename = pipeline_func.__name__ + '.pipeline.zip' # compiler.Compiler().compile(pipeline_func, pipeline_filename) # # experiment = client.create_experiment('python-functions-mnist') # # run_result = client.run_pipeline( # experiment_id=experiment.id, # job_name=run_name, # pipeline_package_path=pipeline_filename, # params=arguments) # ```
samples/tutorials/mnist/02_Local_Development_with_Docker_Image_Components.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Import statements from sklearn.svm import SVC from sklearn.metrics import accuracy_score import pandas as pd import numpy as np # Read the data. data = np.asarray(pd.read_csv('SVM.txt', header=None)) # Assign the features to the variable X, and the labels to the variable y. X = data[:,0:2] y = data[:,2] # Finding the right parameters for the model to achieve 100% accuracy on the dataset. model = SVC(kernel='rbf', gamma=27) #Fit the model. model.fit(X,y) # Making predictions. Store them in the variable y_pred. y_pred = model.predict(X) # Calculate the accuracy and assign it to the variable acc. acc = accuracy_score(y, y_pred) print(acc)
Supervised Learning/SVM/SVM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- age = 20 if age >= 18 and age < 21: print('At least you can vote.') print('Poker will have to wait.') if age >= 18: print('You can vote.') if age >= 21: print('You can play poker.')
Chapter01/Exercise16/Exercise16.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from hypernetworks.core.Hypernetwork import Hypernetwork from hypernetworks.core.Hypersimplex import VERTEX, ALPHA, BETA, hstype_to_str from hypernetworks.utils.HTCompiler import load_parser, compile_hn from hypernetworks.utils.HTGraph import draw_hn from IPython.display import Image # + hn = Hypernetwork() parser = load_parser() compile_hn(hn, parser, """ x=<a1, b1, a2, b2; R_x> y=<a1, b1, a2, b2; R_y> z=<a1, b1, a2, b2; R_z> x -> (v_4 /above/ v_3) AND (v_3 /above/ v_2) AND (v_2 /above/ v_1); y -> (v_4 /above/ v_3) /next/ (v_2 /above/ v_1); z -> v_4 /above/ v_3 /above/ v_2 /above/ v_1; """) for key, rel in hn.relations.items(): print(key, ":", rel) # - draw_hn(hn, fname="/tmp/hn", view=False) Image(filename='/tmp/hn.png', width=300, height=300) # + hn = Hypernetwork() compile_hn(hn, parser, """ a=<x1, x2; R_1> b=<x1, x2, x3; R_1> """) draw_hn(hn, fname="/tmp/hn", view=False) Image(filename='/tmp/hn.png', width=300, height=300) # + hn = Hypernetwork() compile_hn(hn, parser, """ a={x1, x2; R_1} b=<x1, x2; R_1> """) draw_hn(hn, fname="/tmp/hn", view=False) Image(filename='/tmp/hn.png', width=300, height=300) # + hn = Hypernetwork() compile_hn(hn, parser, """ a=<x1, x2; R_1> b={x1, x2; R_1} """) draw_hn(hn, fname="/tmp/hn", view=False) Image(filename='/tmp/hn.png', width=300, height=300) # + hn = Hypernetwork() compile_hn(hn, parser, """ a={x1, x2; R_1} a={x1, x2, x3; R_1} """) draw_hn(hn, fname="/tmp/hn", view=False) Image(filename='/tmp/hn.png', width=300, height=300) # + hn = Hypernetwork() compile_hn(hn, parser, """ a={x1, x2; R_1} a={x1, x2, x3; R_2} """) draw_hn(hn, fname="/tmp/hn", view=False) Image(filename='/tmp/hn.png', width=300, height=300) # -
examples/R.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="pA9aKOr8fzuR" colab_type="code" colab={} import pandas as pd import numpy as np from sklearn.tree import DecisionTreeRegressor from sklearn.metrics import mean_absolute_error from sklearn.model_selection import cross_val_score # + id="fgE76iISgPs0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c496e5c0-553f-48e5-e579-277093b0759a" executionInfo={"status": "ok", "timestamp": 1582234232710, "user_tz": -60, "elapsed": 554, "user": {"displayName": "Micha\u0<NAME>\u0142owski", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCM1onRRyIoctvz8TyiPy_CpDQ7ZVWHAtrqLsxQfxY=s64", "userId": "14132057290485710727"}} y_true = 30 y_pred = 20 b1 = 10 b2 = 8 b3 = 15 (b1 + b2 + b3) / 3 # + id="Bq_QyIb5ggFT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="eb134e37-ad37-4de8-b329-d1ced1609ea1" executionInfo={"status": "ok", "timestamp": 1582235208009, "user_tz": -60, "elapsed": 2906, "user": {"displayName": "Micha\u0142 Koz\u0142owski", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCM1onRRyIoctvz8TyiPy_CpDQ7ZVWHAtrqLsxQfxY=s64", "userId": "14132057290485710727"}} # ls # + id="iTy5GYXckVMR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="42262fc6-aa8f-4785-ade3-43884ed0c696" executionInfo={"status": "ok", "timestamp": 1582235246426, "user_tz": -60, "elapsed": 587, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCM1onRRyIoctvz8TyiPy_CpDQ7ZVWHAtrqLsxQfxY=s64", "userId": "14132057290485710727"}} # cd drive/My Drive/ # + id="wEELDylckesc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="efe90aff-0f41-4170-d591-8facbbea79a5" executionInfo={"status": "ok", "timestamp": 1582235305621, "user_tz": -60, "elapsed": 741, "user": {"displayName": "Micha\u<NAME>\u0142owski", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCM1onRRyIoctvz8TyiPy_CpDQ7ZVWHAtrqLsxQfxY=s64", "userId": "14132057290485710727"}} # cd Colab\ Notebooks/dwmatrix # + id="14S6IifGku4t" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d0d086d4-9bb9-4b88-8d60-77acd4f02276" executionInfo={"status": "ok", "timestamp": 1582235322173, "user_tz": -60, "elapsed": 2868, "user": {"displayName": "Micha\u0142 Koz\u0142owski", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCM1onRRyIoctvz8TyiPy_CpDQ7ZVWHAtrqLsxQfxY=s64", "userId": "14132057290485710727"}} # ls data # + id="om-_H37UkxwI" colab_type="code" colab={} df =pd.read_csv('data/men_shoes.csv', low_memory=False) # + id="Bj4VFt8zk-wP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="26e323f5-ba96-4762-94a8-8c34f024491c" executionInfo={"status": "ok", "timestamp": 1582235390254, "user_tz": -60, "elapsed": 665, "user": {"displayName": "Micha\u0142 Koz\u0142owski", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCM1onRRyIoctvz8TyiPy_CpDQ7ZVWHAtrqLsxQfxY=s64", "userId": "14132057290485710727"}} df.columns # + [markdown] id="L4QUiW-TlGXP" colab_type="text" # Base Line # + id="CtnBuRMHlBcm" colab_type="code" colab={} mean_price =np.mean( df['prices_amountmin']) # + id="g6FuUq5hlPlX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a4276354-3b5a-43f9-ab59-ff2f0f9c7957" executionInfo={"status": "ok", "timestamp": 1582235458066, "user_tz": -60, "elapsed": 549, "user": {"displayName": "<NAME>\u0142owski", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCM1onRRyIoctvz8TyiPy_CpDQ7ZVWHAtrqLsxQfxY=s64", "userId": "14132057290485710727"}} mean_price # + [markdown] id="i4iVLOPRlV8i" colab_type="text" # Zwraca zawsze wartość średnią # + id="HLmwK8MplTq7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="34aa3e63-79f3-471d-b0b2-16d23578dc89" executionInfo={"status": "ok", "timestamp": 1582235504287, "user_tz": -60, "elapsed": 685, "user": {"displayName": "Micha\u0142 Koz\u0142owski", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCM1onRRyIoctvz8TyiPy_CpDQ7ZVWHAtrqLsxQfxY=s64", "userId": "14132057290485710727"}} [3] * 5 # + id="efY-MyX8lfRv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="37b9e606-9e06-462b-a3cd-b54897347498" executionInfo={"status": "ok", "timestamp": 1582235603304, "user_tz": -60, "elapsed": 520, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCM1onRRyIoctvz8TyiPy_CpDQ7ZVWHAtrqLsxQfxY=s64", "userId": "14132057290485710727"}} y_true = df['prices_amountmin'] y_pred = [mean_price] * y_true.shape[0] mean_absolute_error(y_true, y_pred) # + id="EBtwjtASl06w" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="8b6489dd-d4b5-4dee-9fc3-4c1070c7dc2b" executionInfo={"status": "ok", "timestamp": 1582235661673, "user_tz": -60, "elapsed": 881, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCM1onRRyIoctvz8TyiPy_CpDQ7ZVWHAtrqLsxQfxY=s64", "userId": "14132057290485710727"}} df['prices_amountmin'].hist(bins=100) # + id="23Pwoz_dmDFP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="2b8f6ae5-67bd-4def-aed4-09d31d7ea14c" executionInfo={"status": "ok", "timestamp": 1582235801017, "user_tz": -60, "elapsed": 774, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCM1onRRyIoctvz8TyiPy_CpDQ7ZVWHAtrqLsxQfxY=s64", "userId": "14132057290485710727"}} np.log( df['prices_amountmin'] + 1 ).hist(bins=100) # + id="UzB9cFucmlHQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="53f5762c-b6dd-4717-e2cc-9d229bbdc76e" executionInfo={"status": "ok", "timestamp": 1582235810737, "user_tz": -60, "elapsed": 840, "user": {"displayName": "Micha\u014<NAME>\u0142owski", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCM1onRRyIoctvz8TyiPy_CpDQ7ZVWHAtrqLsxQfxY=s64", "userId": "14132057290485710727"}} np.log1p( df['prices_amountmin'] ).hist(bins=100) # + id="89-SmlUXmnez" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="4316786e-3e06-4265-b8f2-6f8c9ddef190" executionInfo={"status": "ok", "timestamp": 1582235914559, "user_tz": -60, "elapsed": 591, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCM1onRRyIoctvz8TyiPy_CpDQ7ZVWHAtrqLsxQfxY=s64", "userId": "14132057290485710727"}} y_true = df['prices_amountmin'] y_pred = [np.median(y_true)] * y_true.shape[0] mean_absolute_error(y_true, y_pred) # + [markdown] id="qCjEN3n3nGid" colab_type="text" # Transformacja logarytmiczna # + id="ACGjRnEZnEJP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="7beddc48-ea8f-4dfc-b53e-23884cfacf38" executionInfo={"status": "ok", "timestamp": 1582236105855, "user_tz": -60, "elapsed": 556, "user": {"displayName": "Micha\u0142 Koz\u0142owski", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCM1onRRyIoctvz8TyiPy_CpDQ7ZVWHAtrqLsxQfxY=s64", "userId": "14132057290485710727"}} y_true = df['prices_amountmin'] price_log_mean = np.expm1 (np.mean ( np.log1p(y_true) ) ) y_pred = [price_log_mean] * y_true.shape[0] mean_absolute_error(y_true, y_pred) # + [markdown] id="AnYyFZQkn2Io" colab_type="text" # Decision Tree # # + id="w-Z77tf2nT3T" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="c492218a-aaff-452e-bde5-ba3e638d6cb1" executionInfo={"status": "ok", "timestamp": 1582236154459, "user_tz": -60, "elapsed": 581, "user": {"displayName": "<NAME>\u0142owski", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCM1onRRyIoctvz8TyiPy_CpDQ7ZVWHAtrqLsxQfxY=s64", "userId": "14132057290485710727"}} df.columns # + id="QyciXDevn8SV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="7b2db5db-12e3-4721-b8d2-83a285405c7a" executionInfo={"status": "ok", "timestamp": 1582236180637, "user_tz": -60, "elapsed": 558, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCM1onRRyIoctvz8TyiPy_CpDQ7ZVWHAtrqLsxQfxY=s64", "userId": "14132057290485710727"}} df.brand.value_counts() # + [markdown] id="0RmNfAgmoGz4" colab_type="text" # Zamiana nazw na ID # + id="RrjTeiGaoFF-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="7a80bfcb-a6e4-4e36-a158-2f11a8256203" executionInfo={"status": "ok", "timestamp": 1582236278540, "user_tz": -60, "elapsed": 592, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCM1onRRyIoctvz8TyiPy_CpDQ7ZVWHAtrqLsxQfxY=s64", "userId": "14132057290485710727"}} # {'Nike' : 1, 'PUMA' : 2 ...} df.brand.factorize()[0] # + id="dCYI7Qi9odny" colab_type="code" colab={} df['brand_cat'] = df.brand.factorize()[0] # + id="Oow_N17sokhd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="4491bd06-9491-4b1a-93c8-22d806c9ef64" executionInfo={"status": "ok", "timestamp": 1582236538288, "user_tz": -60, "elapsed": 584, "user": {"displayName": "Micha\u0142 Koz\u0142owski", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCM1onRRyIoctvz8TyiPy_CpDQ7ZVWHAtrqLsxQfxY=s64", "userId": "14132057290485710727"}} feats = ['brand_cat'] X = df[ feats ].values y = df.prices_amountmin model = DecisionTreeRegressor(max_depth=5) scores = cross_val_score(model, X, y, scoring='neg_mean_absolute_error') np.mean(scores), np.std(scores) # + [markdown] id="acSOqYhJpcnh" colab_type="text" # Funkcja do modelu # + id="d3J6usbNpK_0" colab_type="code" colab={} def run_model(feats): X = df[ feats ].values y = df.prices_amountmin model = DecisionTreeRegressor(max_depth=5) scores = cross_val_score(model, X, y, scoring='neg_mean_absolute_error') return np.mean(scores), np.std(scores) # + id="uzLUAg8Jps0k" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="5ea7abab-5d2e-445e-dbdc-d2ea3b209fa8" executionInfo={"status": "ok", "timestamp": 1582236627507, "user_tz": -60, "elapsed": 597, "user": {"displayName": "Micha\u<NAME>\u0142owski", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCM1onRRyIoctvz8TyiPy_CpDQ7ZVWHAtrqLsxQfxY=s64", "userId": "14132057290485710727"}} df.columns # + id="QiFpLdIOp-bB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="f470d226-fd76-4f70-e3f3-3ae49e0b485a" executionInfo={"status": "ok", "timestamp": 1582236704843, "user_tz": -60, "elapsed": 591, "user": {"displayName": "<NAME>\u0142owski", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCM1onRRyIoctvz8TyiPy_CpDQ7ZVWHAtrqLsxQfxY=s64", "userId": "14132057290485710727"}} df.manufacturer.value_counts() # + id="nXodreHoqDri" colab_type="code" colab={} df['manufacturer_cat'] = df.manufacturer.factorize()[0] # + id="7BT0F_wapnPA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b59266d3-5704-4f12-f1b8-0c53636d7dfa" executionInfo={"status": "ok", "timestamp": 1582237094375, "user_tz": -60, "elapsed": 979, "user": {"displayName": "Micha\u0142 Koz\u0142owski", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mCM1onRRyIoctvz8TyiPy_CpDQ7ZVWHAtrqLsxQfxY=s64", "userId": "14132057290485710727"}} run_model( ['manufacturer_cat', 'brand_cat'] ) # + id="Djzm3eDqpxtb" colab_type="code" colab={} # !git add matrix_one/day4.ipynb # !git config --global user.email "<EMAIL>" # !git config --global user.name "kozolex" # !git commit -m "day 4 done" token = '' repo = 'https://{0}@github.com/kozolex/dwmatrix.git'.format(token) # !git push -u {repo} --force
matrix_one/day4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Andrew-Haney/DS-Unit-2-Linear-Models/blob/master/Andrew_Haney_Unit2_Project_Pred_Model.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="I8qrR1D6vuEE" colab={"base_uri": "https://localhost:8080/"} outputId="288407ce-3991-466b-c3cc-3f5657631c88" import sys # !pip install category_encoders import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline from category_encoders import OneHotEncoder, OrdinalEncoder from sklearn.ensemble import RandomForestClassifier from sklearn.impute import SimpleImputer from sklearn.metrics import plot_confusion_matrix, classification_report from sklearn.metrics import roc_curve, roc_auc_score cols = ['ID', 'Age', 'Gender', 'Education', 'Country', 'Ethnicity', 'NScore_Neuroticism', 'EScore_Extraversion', 'OScore_Open_to_Exp', 'AScore_Agreeableness', 'CScore_Conscientiousness', 'Impulsiveness', 'Sensation_Seeing', 'Alcohol', 'Amphetamine', 'Amyl', 'Benzos', 'Caffeine', 'Cannabis', 'Chocolate', 'Cocaine', 'Crack', 'Ecstasy', 'Heroin', 'Ketamine', 'Legal_Highs', 'LSD', 'Meth', 'Mushrooms', 'Nicotine', 'Semeron', 'VSA'] df = pd.read_csv('/content/drug_consumption.data', header = 0, names= cols, index_col = 'ID', dtype= 'object') # + id="zrh0Iwdhv80h" def wrangle(df): cutoff = 500 hcc_cols = [col for col in df.select_dtypes('object').columns if df[col].nunique() > cutoff] df.drop(columns=hcc_cols, inplace=True) single_cols = [col for col in df.select_dtypes('object').columns if df[col].nunique() == 1] df.drop(columns= single_cols, inplace= True) return df df = wrangle(df) # + colab={"base_uri": "https://localhost:8080/"} id="gBJgXIJTwEqE" outputId="6f23a543-164b-4909-9aee-f4d3f71e617f" df.info() # + colab={"base_uri": "https://localhost:8080/", "height": 253} id="X3vgf7-dxwcw" outputId="8f7b9475-0516-4d15-bae0-d7bfa6938a44" df.head() # + colab={"base_uri": "https://localhost:8080/"} id="qc-FVfJIY7rd" outputId="4f095ae1-1bac-42be-b5fe-ff0042156d6d" df['Age'].nunique() # + id="b-oetWSOn-WI" df['Age'] = df['Age'].str.replace('-0.95197', '18-24').str.replace('-0.07854', '25-34').str.replace('0.49788', '35-44').str.replace('1.09449', '45-54').str.replace('1.82213', '55-64').str.replace('2.59171', '65+') df['Gender'] = df['Gender'].str.replace('-0.48246', 'Female').str.replace('0.48246', 'Male') df['Cannabis'] = df['Cannabis'].str.replace('CL6', 'Used_24hrs').str.replace('CL5', 'Used_7days').str.replace('CL4', 'Used_Month').str.replace('CL3', 'Used_Year').str.replace('CL2', 'Used_Decade').str.replace('CL1', 'Used_Life').str.replace('CL0', 'Never_Used') df['Education'] = df['Education'].str.replace('-2.43591', 'Left_before_16').str.replace('-1.73790', 'Left_at_16').str.replace('-1.43719', 'Left_at_17').str.replace('-1.22751', 'Left_at_18').str.replace('-0.61113', 'Some_College').str.replace('-0.05921', 'Professional_Cert').str.replace('0.45468', 'Univ_degree').str.replace('1.16365', 'Masters_Degree').str.replace('1.98437', 'Dr_Degree') df['Ethnicity'] = df['Ethnicity'].str.replace('-0.50212', 'Asian').str.replace('-1.10702', 'Black').str.replace('1.90725', 'Bl_As').str.replace('0.12600', 'Wh_As').str.replace('-0.22166', 'Wh_Bl').str.replace('0.11440', 'Other').str.replace('-0.31685', 'White') df['Country'] = df['Country'].str.replace('-0.09765', 'Australia').str.replace('0.24923', 'Canada').str.replace('-0.46841', 'New_Zealand').str.replace('-0.28519', 'Other').str.replace('0.21128', 'Ireland').str.replace('0.96082', 'UK').str.replace('-0.57009', 'USA') df['Alcohol'] = df['Alcohol'].str.replace('CL6', 'Used_24hrs').str.replace('CL5', 'Used_7days').str.replace('CL4', 'Used_Month').str.replace('CL3', 'Used_Year').str.replace('CL2', 'Used_Decade').str.replace('CL1', 'Used_Life').str.replace('CL0', 'Never_Used') df['Amphetamine'] = df['Amphetamine'].str.replace('CL6', 'Used_24hrs').str.replace('CL5', 'Used_7days').str.replace('CL4', 'Used_Month').str.replace('CL3', 'Used_Year').str.replace('CL2', 'Used_Decade').str.replace('CL1', 'Used_Life').str.replace('CL0', 'Never_Used') df['Amyl'] = df['Amyl'].str.replace('CL6', 'Used_24hrs').str.replace('CL5', 'Used_7days').str.replace('CL4', 'Used_Month').str.replace('CL3', 'Used_Year').str.replace('CL2', 'Used_Decade').str.replace('CL1', 'Used_Life').str.replace('CL0', 'Never_Used') df['Benzos'] = df['Benzos'].str.replace('CL6', 'Used_24hrs').str.replace('CL5', 'Used_7days').str.replace('CL4', 'Used_Month').str.replace('CL3', 'Used_Year').str.replace('CL2', 'Used_Decade').str.replace('CL1', 'Used_Life').str.replace('CL0', 'Never_Used') df['Caffeine'] = df['Caffeine'].str.replace('CL6', 'Used_24hrs').str.replace('CL5', 'Used_7days').str.replace('CL4', 'Used_Month').str.replace('CL3', 'Used_Year').str.replace('CL2', 'Used_Decade').str.replace('CL1', 'Used_Life').str.replace('CL0', 'Never_Used') df['Chocolate'] = df['Chocolate'].str.replace('CL6', 'Used_24hrs').str.replace('CL5', 'Used_7days').str.replace('CL4', 'Used_Month').str.replace('CL3', 'Used_Year').str.replace('CL2', 'Used_Decade').str.replace('CL1', 'Used_Life').str.replace('CL0', 'Never_Used') df['Cocaine'] = df['Cocaine'].str.replace('CL6', 'Used_24hrs').str.replace('CL5', 'Used_7days').str.replace('CL4', 'Used_Month').str.replace('CL3', 'Used_Year').str.replace('CL2', 'Used_Decade').str.replace('CL1', 'Used_Life').str.replace('CL0', 'Never_Used') df['Crack'] = df['Crack'].str.replace('CL6', 'Used_24hrs').str.replace('CL5', 'Used_7days').str.replace('CL4', 'Used_Month').str.replace('CL3', 'Used_Year').str.replace('CL2', 'Used_Decade').str.replace('CL1', 'Used_Life').str.replace('CL0', 'Never_Used') df['Ecstasy'] = df['Ecstasy'].str.replace('CL6', 'Used_24hrs').str.replace('CL5', 'Used_7days').str.replace('CL4', 'Used_Month').str.replace('CL3', 'Used_Year').str.replace('CL2', 'Used_Decade').str.replace('CL1', 'Used_Life').str.replace('CL0', 'Never_Used') df['Heroin'] = df['Heroin'].str.replace('CL6', 'Used_24hrs').str.replace('CL5', 'Used_7days').str.replace('CL4', 'Used_Month').str.replace('CL3', 'Used_Year').str.replace('CL2', 'Used_Decade').str.replace('CL1', 'Used_Life').str.replace('CL0', 'Never_Used') df['Ketamine'] = df['Ketamine'].str.replace('CL6', 'Used_24hrs').str.replace('CL5', 'Used_7days').str.replace('CL4', 'Used_Month').str.replace('CL3', 'Used_Year').str.replace('CL2', 'Used_Decade').str.replace('CL1', 'Used_Life').str.replace('CL0', 'Never_Used') df['Legal_Highs'] = df['Legal_Highs'].str.replace('CL6', 'Used_24hrs').str.replace('CL5', 'Used_7days').str.replace('CL4', 'Used_Month').str.replace('CL3', 'Used_Year').str.replace('CL2', 'Used_Decade').str.replace('CL1', 'Used_Life').str.replace('CL0', 'Never_Used') df['LSD'] = df['LSD'].str.replace('CL6', 'Used_24hrs').str.replace('CL5', 'Used_7days').str.replace('CL4', 'Used_Month').str.replace('CL3', 'Used_Year').str.replace('CL2', 'Used_Decade').str.replace('CL1', 'Used_Life').str.replace('CL0', 'Never_Used') df['Meth'] = df['Meth'].str.replace('CL6', 'Used_24hrs').str.replace('CL5', 'Used_7days').str.replace('CL4', 'Used_Month').str.replace('CL3', 'Used_Year').str.replace('CL2', 'Used_Decade').str.replace('CL1', 'Used_Life').str.replace('CL0', 'Never_Used') df['Mushrooms'] = df['Mushrooms'].str.replace('CL6', 'Used_24hrs').str.replace('CL5', 'Used_7days').str.replace('CL4', 'Used_Month').str.replace('CL3', 'Used_Year').str.replace('CL2', 'Used_Decade').str.replace('CL1', 'Used_Life').str.replace('CL0', 'Never_Used') df['Nicotine'] = df['Nicotine'].str.replace('CL6', 'Used_24hrs').str.replace('CL5', 'Used_7days').str.replace('CL4', 'Used_Month').str.replace('CL3', 'Used_Year').str.replace('CL2', 'Used_Decade').str.replace('CL1', 'Used_Life').str.replace('CL0', 'Never_Used') df['Semeron'] = df['Semeron'].str.replace('CL6', 'Used_24hrs').str.replace('CL5', 'Used_7days').str.replace('CL4', 'Used_Month').str.replace('CL3', 'Used_Year').str.replace('CL2', 'Used_Decade').str.replace('CL1', 'Used_Life').str.replace('CL0', 'Never_Used') df['VSA'] = df['VSA'].str.replace('CL6', 'Used_24hrs').str.replace('CL5', 'Used_7days').str.replace('CL4', 'Used_Month').str.replace('CL3', 'Used_Year').str.replace('CL2', 'Used_Decade').str.replace('CL1', 'Used_Life').str.replace('CL0', 'Never_Used') # + id="sZ_1gzAd220m" df['NScore_Neuroticism'] = df['NScore_Neuroticism'].astype(float) df['EScore_Extraversion'] = df['EScore_Extraversion'].astype(float) df['OScore_Open_to_Exp'] = df['OScore_Open_to_Exp'].astype(float) df['AScore_Agreeableness'] = df['AScore_Agreeableness'].astype(float) df['CScore_Conscientiousness'] = df['CScore_Conscientiousness'].astype(float) df['Impulsiveness'] = df['Impulsiveness'].astype(float) df['Sensation_Seeing'] = df['Sensation_Seeing'].astype(float) # + colab={"base_uri": "https://localhost:8080/"} id="N9u5iQ1r4K7g" outputId="17598c2c-6b71-4ad6-fee6-d1ac97899360" df['Age'].value_counts(normalize= True) # + colab={"base_uri": "https://localhost:8080/"} id="zpVLRLIrB5LE" outputId="03176867-b867-4929-8325-395ed45a4386" df['OScore_Open_to_Exp'].value_counts() # + colab={"base_uri": "https://localhost:8080/", "height": 322} id="Oqyv0QCfcaV7" outputId="079e8d24-aeec-48e8-a359-e006340fb450" df.head() # + id="fRO9wzaVuBNZ" # df['Alcohol'].value_counts().index.str.replace('Used_Year', 'np.nan').str.replace('Used_Decade', 'np.nan').str.replace('Used_life', 'np.nan').str.replace('Never_Used', 'np.nan') # + id="aBIZpUiWhvm3" target = 'Meth' X = df.drop(columns= target) y = df[target] # + id="sDkKYyIk2r5J" X_train, X_val, y_train, y_val = train_test_split(X, y, test_size= 0.2, random_state= 42) # + colab={"base_uri": "https://localhost:8080/"} id="N0C03RCEgNzx" outputId="a28e5223-463c-4b20-8cd5-cfcc11b5e00e" print(X_train.shape) print(y_train.shape) print(X_val.shape) print(y_val.shape) # + id="QVtCwhjIidGV" from sklearn.model_selection import GridSearchCV # + colab={"base_uri": "https://localhost:8080/"} id="8gD28JXN2xXQ" outputId="02e55fad-551e-4d47-e446-35eaa93b5b0a" model_rf = make_pipeline(OrdinalEncoder(), SimpleImputer(), RandomForestClassifier(max_depth= 10, n_jobs= -1)) model_rf.fit(X_train, y_train) print('Validation Score: ', model_rf.score(X_val, y_val)) # + colab={"base_uri": "https://localhost:8080/"} id="F2E6bJ5VhMKV" outputId="9035b025-0ce8-4d79-908f-6a57a2a28295" print('Baseline accuracy ', y_train.value_counts(normalize= True).max()) # + colab={"base_uri": "https://localhost:8080/", "height": 233} id="y9dz4Xal7VTi" outputId="1fc0b8b4-7170-4bde-c244-be8538daa6ae" df_numerical = df.select_dtypes(exclude= 'object') df_numerical.head() # + id="7udDq-jqp7cK" # from sklearn.preprocessing import StandardScaler # from sklearn.model_selection import GridSearchCV # model_rscv = make_pipeline(StandardScaler(), # OrdinalEncoder(), # GridSearchCV(estimator= 'logisticregression', n_jobs= -1)) # + id="3Aw10oZQ6PlB" # + id="qtCnUevu3CY9" colab={"base_uri": "https://localhost:8080/", "height": 350} outputId="f9da2ad1-26ad-4097-a78a-325acbc28d30" rf = model_rf.named_steps['randomforestclassifier'] importances = pd.Series(rf.feature_importances_, X_train.columns) n = 10 plt.figure(figsize=(10,n/2)) plt.title(f'Top {n} features') importances.sort_values()[-n:].plot.barh(color='grey') plt.xlabel('Meth Use') plt.ylabel('Features'); # + id="88cxbzwz748Y" from xgboost import XGBClassifier model_boost = make_pipeline(OrdinalEncoder(), SimpleImputer(strategy= 'median'), XGBClassifier(n_estimators= 75, learning_rate= 0.2, random_state= 42)) model_boost.fit(X_train, y_train); # + id="Trh2MjIrh194" colab={"base_uri": "https://localhost:8080/"} outputId="45cae2c2-4d51-4b15-e535-ef4a842af10e" model_boost.score(X_val, y_val) # + id="8zjEULEZzN7Y" df_cat = df.select_dtypes(exclude= 'number') # + colab={"base_uri": "https://localhost:8080/", "height": 322} id="7Q7-PVu3z80d" outputId="029f0e30-62ba-49f8-ff17-84ebb44104ae" # df_cat['Education'].value_counts(normalize= True).index df_cat.head() # + id="WGghhP1j057a" target= 'Amphetamine' X2 = df_cat.drop(columns= target) y2 = df_cat[target] X2_train, X2_val, y2_train, y2_val = train_test_split(X2, y2, test_size= 0.3, random_state= 42) # + id="t8tiW5_bz-NP" model_boost_cat = make_pipeline(OrdinalEncoder(), SimpleImputer(strategy= 'median'), XGBClassifier(n_estimators= 75, learning_rate= 0.4, random_state= 42)) model_boost_cat.fit(X2_train, y2_train); # + colab={"base_uri": "https://localhost:8080/"} id="W4LafaCz3HaJ" outputId="2cfdbd98-b084-4ac5-e37e-96e8fcb30631" model_boost_cat.get_params() # + colab={"base_uri": "https://localhost:8080/"} id="MVQsBtPC3U2W" outputId="67409d20-fc40-42ba-a21e-cbf779b67ea1" baseline_acc2 = y2_train.value_counts(normalize= True).max() print('Baseline Accuracy 2 ', baseline_acc2) # + colab={"base_uri": "https://localhost:8080/", "height": 350} id="ufdYYQ7U4ZDC" outputId="c4493e94-c311-48fb-82d2-4265bcdef3b4" boost_cat = model_boost_cat.named_steps['xgbclassifier'] importances_cat = pd.Series(boost_cat.feature_importances_, X2_val.columns) n = 10 plt.figure(figsize=(10,n/2)) plt.title(f'Top {n} features') importances_cat.sort_values()[-n:].plot.barh(color= 'blue') plt.xlabel('Feature Importance with relation to Amphetamine') plt.ylabel('Features'); # + colab={"base_uri": "https://localhost:8080/"} id="NfVwzOdollbI" outputId="aa5b5c65-3a11-4bc5-f497-24b69e0ecaef" df_numerical.shape # + id="f62i_dExpoaE" heroin = df_cat['Heroin'] cannabis = df_cat['Cannabis'] meth = df_cat['Meth'] alcohol = df_cat['Alcohol'] age_group = df_cat['Age'] O_score = df_numerical['OScore_Open_to_Exp'] C_score = df_numerical['CScore_Conscientiousness'] E_score = df_numerical['EScore_Extraversion'] A_score = df_numerical['AScore_Agreeableness'] N_score = df_numerical['NScore_Neuroticism'] impulsiveness = df_numerical['Impulsiveness'] sen_seeing = df_numerical['Sensation_Seeing'] # + colab={"base_uri": "https://localhost:8080/", "height": 363} id="sPW8kOtI4705" outputId="a85dbe0c-7c75-436e-a558-2e6d384ff7b2" import seaborn as sns sns.catplot(x= N_score, y= alcohol); # + [markdown] id="7Teseiq0XWh9" # ##### **O-Score(Openness)** is a person's willingness to be open to new ideas and experiences. Scoring is again, very similar to the the E-Score in the sense that a higher positive score indicates you are more likely to experiment with various substances. # # ##### **C-Score(Conscientiousness)** shows organization vs chaos. At the highest end of the scale, perfectionism and OCD are prevalent. On the lowest end of the scale would be those who act before thinking and spontaneous individuals. # # ##### **E-Score(Extraversion)** in this scenario uses 0 as a start point and a positive score indicating you are more extroverted while a negative score indicates a personality that is more introverted. # # ##### **A-Score(Agreeableness)** is how agreeable or prone to persuasion an individual is. Someone with a higher score is more likely to be a people pleaser, while someone with a low score is more likely to be invested in personal interests. # # ##### **N-Score(Neuroticism)** is scored on a scale of 0-12. A score of 0-6 means there is a general lack of Neuroticism present and 6-12 indicates a positive result for Neuroticism. In this case, we will be using a scale from 0.0 to 1.2 with 0.6 being the cutoff. # # + id="bt1YgFDalFeW" clr = classification_report(y2_val, model_boost_cat.predict(X2_val)) # + id="wCL0X8ABxkez" print(clr) # + id="10pvgLLFyPsD"
Andrew_Haney_Unit2_Project_Pred_Model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.8 64-bit (''base'': conda)' # language: python # name: python3 # --- # # The Interaction of Matter and Light # ## Spectral Lines # Some philosophers during the Renaissance considered strict limits to human knowledge, while other natural philosophers used experimentation to probe reality. in 1800, <NAME> showed that a number of **dark spectral lines** were present within the a rainbow-like spectrum using sunlight. By 1814, <NAME> cataloged 475 of the dark lines (i.e., [Fraunhofer lines](https://en.wikipedia.org/wiki/Fraunhofer_lines)) in the solar spectrum. Fraunhofer determined that the wavelength of one prominent dark line in the Sun's spectrum corresponds to the yellow light emitted by salt in a flame. The new science of *spectroscopy* was born with the discovery of the sodium line. # # <p align="center"> # <img src="https://upload.wikimedia.org/wikipedia/commons/2/2f/Fraunhofer_lines.svg"/> <br> # <b>Figure 1</b>: Solar spectrum with Fraunhofer lines as it appears visually (wikipedia:<a href="https://www.scientificamerican.com/article/ancient-stars-how-does-spectrograph-work/">Fraunhofer_lines</a>) # </p> # # <p align="center"> # <img src="https://github.com/saturnaxis/ModernAstro/blob/main/Chapter_5/Table_1.jpg?raw=true?"/> # </p> # <center> # <div style='width: 450px'><b>Table 1</b>: Wavelengths of some of the stronger Fraunhofer lines measured in air near sea level. (Carroll & Ostlie, 2007) </div> # </center> # # ### Kirchoff's Laws # The foundations of spectroscopy (and modern chemistry) were established in the 1800s with <NAME> and <NAME>. Bunsen's burner produced a colorless flame that was ideal for studying the colors produced by burning a range of substances. Bunsen and Kirchoff designed a *spectroscope* that passed the light of a flame spectrum through a prism. They determined that the wavelengths of light absorbed and emitted by an element fit together like a lock and key. Kirchoff determined that 70 of the Fraunhofer lines were due to iron vapor. In 1860, Kirchoff and Bunsen developed the idea that every element produces its own pattern of spectral lines and thus the elements had "fingerprints". Kirchoff summarized the production of spectral lines in three laws (i.e., **Kirchoff's Laws**): # # - A hot, dense gas or hot solid object produces a continuous spectrum with no dark spectral lines. # # - A hot diffuse gas produces bright spectral lines (**emission lines**). # # - A cool, diffuse gas in front of a source of a continuous spectrum produces dark spectral lines (**absorption lines**) in the continuous spectrum. # # # # ### Applications of Stellar Spectral Data # Using the development of spectral fingerprints, a new element *helium* was discovered spectroscopically in the Sun (in 1868) and found on Earth in 1895. Another line of investigation was measuring the Doppler shifts of the spectral lines. For most cases at the time, the *low-speed approximation* ($v_r \ll c$) was adequate to determine the radial velocities $v_r$ by the following equation, # # $$ # \frac{\lambda_{\rm obs}-\lambda_{\rm rest}}{\lambda_{\rm rest}} = \frac{\Delta \lambda}{\lambda_{\rm rest}}=\frac{v_r}{c}. \tag{1} # $$ # # By 1887, the radial velocities of Sirius, Procyon, Rigel, and Arcturus were measured with and accuracy of a few km/s. The rest wavelength $\lambda_{\rm rest}$ for the hydrogen spectral line (H$\alpha$) is 656.281 nm (in air). Given the simplicity of hydrogen and its spectral lines, they are commonly used to measure the radial velocities of stars. # # - **What is the radial velocity of the star Vega, if its H$\alpha$ line is observed at 656.21 nm from a ground-based telescope?** (*There is a typographical error in the textbook.*) # + import numpy as np def calc_radial_vel(l_obs,l_rest): #l_obs = the observered wavelength #l_rest = the rest wavelength measured in a lab #the units of l_obs must equal l_rest rad_vel = (l_obs-l_rest)/l_rest return rad_vel*c c = 2.99792458e5 #speed of light in km/s Ha_obs = 656.251 #Vega H-alpha in nm Ha_rest = 656.281 #rest H-alpha in nm Vega_RV = calc_radial_vel(Ha_obs,Ha_rest) print("The radial velocity of Vega is %2.1f km/s." % np.round(Vega_RV,1)) # - # The negative sign for the radial velocity indicates that Vega is approaching the Sun (i.e., moving towards us). Some star also have a measured *proper motion* $\mu$ that is perpendicular to the line-of-sight. Combining the proper motion with a known distance $r$, the *transverse velocity* $v_\theta$ can be determined ($v_\theta = r\mu$). # # - **Vega's proper motion is $\mu = 0.34972^{\prime \prime}$/yr and its distance is 7.68 pc away [(Van Leeuwen 2007)](https://arxiv.org/abs/0708.1752). What is its tranverse velocity? What is its velocity through space?** # + def mu2SI(mu): #mu = proper motion (in arcseconds/year) #mu * (arcseconds in a radian)/(seconds in a year) mu *= (as2rad/yr2sec) return mu def PC2km(r): #r = distance in pc r2AU = r/as2rad #pc converted to AU r2km = r2AU*AU #convert to km return r2km AU = 1.495978707e8 #(in km) Astronomical Unit yr2sec = 365.25*24*3600 #seconds in a year as2rad = (1./3600.)*(np.pi/180.) mu = mu2SI(0.34972) r = PC2km(7.68) v_theta = r*mu print("The transverse velocity of Vega is %2.1f km/s." % np.round(v_theta,1)) v_Vega = np.sqrt(v_theta**2+Vega_RV**2) print("Vega's velocity through space is %2.1f km/s" % (v_Vega)) # - # The average speed of stars in the solar neighborhood is about 25 km/s, where the measurement of a star's radial velocity is also complicated by the motion of the Earth (29.8 km/s) around the Sun. Astronomers correct for this motion by subtracting the component of Earth's orbital velocity along the line-of-sight from the star's measured radial velocity. # # ### Spectrographs # Modern methods can measure radial velocities with an accuracy of $\sim 1$ m/s [(Dumusque et al. 2021)](https://www.aanda.org/articles/aa/full_html/2021/04/aa39350-20/aa39350-20.html). Astronomers use *spectrographs* to measure the radial velocity of exoplanets, stars, and galaxies. Modern spectrographs collimate the incoming starlight onto a mirror and split into the constituent colors using a *diffraction grating*, or a piece of glass with narrow, closely spaced lines; a transmission grating allows the light to pass through while a reflection grating reflects the light. The grating acts like a series of neighboring double slits (see Chapter 3). Different wavelengths have their maxima occurring at different angles $\theta$ by # # $$ # d\sin \theta = n\lambda, # $$ # # where $d$ is the slit spacing of the grating, $n$ is the order of the spectrum, and $\theta$ is measured relative the line perpendicular to the grating. The smallest measurable difference in wavelength $\Delta \lambda$ depends on the order $n$ and the total number of lines $N$ through # # $$ # \Delta \lambda = \frac{\lambda}{nN}, \tag{2} # $$ # # where $\lambda$ is either of the closely spaced wavelengths. The ratio $\lambda/\Delta \lambda$ is one way to express the **resolving power** of the grating. # # <p align="center"> # <img src="https://www.scientificamerican.com/media/inline/ancient-stars-how-does-spectrograph-work_2.jpg?raw=true?"/> <br> # <b>Figure 2</b>: Schematic of a spectrograph. (image credit: <a href="https://www.scientificamerican.com/article/ancient-stars-how-does-spectrograph-work/">Scientific American) </a> # </p> # # ### **Problems** # >1\. Barnard's star is an orange star in the constellation Ophiuchus. It has the largest known proper motion $\mu = 10.3934^{\prime\prime}$/yr and the fourth-largest parallax angle $p = 0.5469759^{\prime\prime}$ ([Gaia Collaboration](https://arxiv.org/abs/1804.09365)). The H$\alpha$ absorption line is observed at 656.034 nm when measured from the ground. # >>**(a)** Determine the radial velocity of Barnard's star. # >> # >>**(b)** Determine the transverse velocity of Barnard's star. # >> # >>**(c)** Calculate the speed of Barnard's star through space. # > # >2\. The Sun's spectrum contains two spectral lines (*Sodium D lines*) 588.997 nm and 589.594 nm. # >>**(a)** If a diffraction grating with 300 lines per millimeter is used to measure the Sodium D lines, what is the angle between the second-order spectra for each of the wavelengths? # >> # >>**(b)** How many lines must the grating have to resolve the sodium D lines? # ## Photons # A complementary description of light (in contrast to waves) uses bundles of energy called **photons**. The energy is described in terms of Planck's constant *h* and is recognized as a fundamental constant of nature like the speed of light *c*. This description of matter and energy is known as **quantum mechanics**, which uses Planck's discovery of energy quantization. The first step forward was made by Einstein, who demonstrated the implications of Planck's quantum bundles of energy. # # ### The Photoelectric Effect # The **photoelectric effect** describes the light energy necessary to eject electrons from a metal surface. The electrons with the highest kinetic energy $K_{max}$ originate from the surface of the metal and surprisingly, $K_{max}$ does not depend on the light intensity. A higher intensity of light increases the number of electrons ejected, but not their maximum kinetic energy. The value of $K_{max}$ varies with the *frequency* of the light and each metal has a characteristic cutoff frequency $\nu_c$ and wavelength $(\lambda_c = c/\nu_c$). # # Einstein's solution describes the light striking the metal surface as a stream of massless particles (i.e., photons). The energy of a single photon of frequency $\nu$ and wavelength $\lambda$ is just Planck's quantum of energy # # $$ # E_{\rm photon} = h\nu = \frac{hc}{\lambda}. \tag{3} # $$ # # - **What is the energy of a single blue photon of wavelength $\lambda = 450$ nm?** # + def energy_photon(l,units): #l = lambda; wavelength in nm if units=='eV': hc = 1240 #eV*nm else: h = 6.62607004e-34 #(m^2*kg/s) Planck's constant hc = h*c*1000/1e-9 print(hc) return (hc/l) E_blue_eV = energy_photon(450,'eV') E_blue_J = energy_photon(450,'J') print("The energy of a single blue photon (450 nm) is %1.2f eV or %1.2e J" % (np.round(E_blue_eV,2),E_blue_J)) # - # Einstein deduced that when a photon strikes the metal surface, its energy may be absorbed by a single electron. The photon supplies the energy required to free the electron from the metal (i.e., overcome the binding energy). The work function $\phi$ defines the minimum binding energy of electrons in a metal and the maximum kinetic energy of the ejected electrons is # # $$ # K_{\rm max} = E_{\rm photon} - \phi = h\nu - \phi, \tag{4} # $$ # # where the cutoff frequency and wavelength are $\nu_c = \phi/h$ and $\lambda_c = hc/\phi$, respectively. <NAME> was awarded the 1921 Nobel Prize for "his services to theoretical physics, and especially for his discovery of the law of the photoelectric effect". Astronomers take advantage of the photoelectric effect through detectors that count photons by the work they do on a metal (e.g., charge-coupled devices or CCDs). # # ### The Compton Effect # Another test of light's particle-like nature was performed by <NAME>. He measured the change in the wavelength of X-ray photons as they were scattered by free electrons. Through special relativity, the energy of a photon is related to its momentum $p$ by # # $$ # E_{\rm photon} = h\nu = pc. \tag{5} # $$ # # compton considered the interaction (or "collision") between a photon and a free electron (initially at rest). The electron is scattered by an angle $\varphi$, while the photon is scattered by an angle $\theta$. Due to the energy exchange during the interaction (i.e., electron gains kinetic energy) the photon's energy is reduced and the wavelength has increased. The change in wavelength is the difference between the final and initial wavelength and can be expressed as # # $$ # \Delta \lambda =\lambda_f - \lambda_i = \frac{h}{m_e c}(1-\cos \theta), \tag{6} # $$ # # where $m_e$ is the mass of the electron. The prefactor $h/m_e c$ is called the **Compton wavelength** $\lambda_C$. Compton's experiment provided evidence that photons carry momentum (although massless) and this is the physical basis for the force exerted by radiation upon matter. # # <p align="center"> # <img src="https://github.com/saturnaxis/ModernAstro/blob/main/Chapter_5/Figure_3.jpg?raw=true?"/> <br> # <b>Figure 3</b>: The Compton effect: the scattering of a photon by a free electron. (Carroll & Ostlie (2007)) # </p> # # ### **Problems** # >3\. The ejection of an electron leaves a grain of dust with a positive charge, which leads to heating within an interstellar cloud. If the average energy of the ejected electron is about 5 eV and the process is particularly effective for UV photons ($\lambda \approx 100$ nm), estimate the work function $\phi$ of a typical dust grain. # > # >4\. Consider the case of a "collision" between a photon and a free proton (initially at rest) given a scattering angle $\theta=90^\circ$. # >>**(a)** What is the characteristic change in the wavelength of the scattered photon (in nm)? # >> # >>**(b)** How does this compare with the Compton wavelength, $\lambda_C$. # ## The Bohr Model of the Atom # # ### The Structure of the Atom # With the discovery of the [electron](https://en.wikipedia.org/wiki/Electron) (by <NAME>), it showed that the atom is composed of smaller parts usually consisting of electrically neutral bulk matter (equal parts of negative and positive charges). Ernest [Rutherford](https://en.wikipedia.org/wiki/Ernest_Rutherford) discovered in 1911 that an atom's positive charge was concentrated in a tiny, massive nucleus using $\alpha$ particles (now know to be helium nuclei). Rutherford calculated that the radius of the nucleus was 10,000 times smaller than the atomic radius showing that ordinary matter is mostly empty space. Rutherford also coined the term **proton** to refer to the nucleus of the hydrogen atom and is 1836 times more massive than the electron. # # ### The Wavelength of Hydrogen # Many natural philosophers (now known as chemists) produced a wealth of experimental data. Fourteen spectral lines of hydrogen had been precisely measured before the 20th century. The spectral lines were described by their wavelengths and categorized using greek letters (${\rm H}\alpha$, ${\rm H}\beta$, etc.). In 1885 <NAME> found (by trial and error) a formula to reproduce the series of wavelengths for hydrogen's spectral lines (or **Balmer lines**) given as # # $$ # \lambda = \frac{1}{R_H}\left( \frac{4n^2}{n^2-4}\right), \tag{7} # $$ # # where $n$ is an integer greater than 2 ($n=3,4,5,\ldots$) and $R_H = 1.09677583 \times 10^7 \pm 1.3 \;{\rm m}^{-1}$ is the [Rydberg constant](https://en.wikipedia.org/wiki/Rydberg_constant)) for hydrogen. Equation 7 is accurate to a fraction of a percent, where values of $n$ produce the wavelength for the spectral lines (${\rm H}\alpha$, ${\rm H}\beta$, ${\rm H}\gamma$, etc. ). Balmer realized that the formula could be generalized (since $2^2 = 4$) producing the general form as # # $$ # \lambda = \frac{1}{R_H}\left( \frac{m^2n^2}{n^2-m^2}\right), \tag{8} # $$ # # with $m<n$ (both integers). There are only a few spectral lines that lie in the visible range for hydrogen, where the *Lyman lines* ($m=1$) are found in the ultraviolet and the *Paschen lines* ($m=3$) lie entirely in the infrared. # # <p align="center"> # <img src="https://github.com/saturnaxis/ModernAstro/blob/main/Chapter_5/Table_2.jpg?raw=true?"/> # </p> # <center> # <div style='width: 450px'><b>Table 2</b>: Wavelengths of selected hydrogen spectral lines in air. (Carroll & Ostlie, 2007) </div> # </center> # # - **What are the first 3 wavelengths for the Balmer, Lyman, and Paschen lines?** (Compare to Table 2 in Ch 5 of the textbook) # + def calculate_lambda_mn(m,n): #m,n = integers if m < n: return (1./R_H)*(m**2*n**2)/(n**2-m**2) else: print("Please choose appropriate values for m,n (m<n)") return 0 R_H = 1.09677583e7*1e-9 #Rydberg constant in nm Balmer = np.zeros(3) Lyman = np.zeros(3) Paschen = np.zeros(3) wavelength = [Balmer,Lyman,Paschen] for i in range(1,4): Balmer[i-1] = calculate_lambda_mn(2,2+i) Lyman[i-1] = calculate_lambda_mn(1,1+i) Paschen[i-1] = calculate_lambda_mn(3,3+i) Series = ["H","Ly","Pa"] letter = ["$\\alpha$","\$beta$","$\gamma$"] for i in range(0,3): for j in range(0,3): print("%s%s is %3.3f nm." % (Series[i],letter[j],wavelength[i][j])) # - # Although this formula worked for hydrogen, physicists of the day wanted a physical model that could be applied more broadly. The simplest "planet-like" model of an electron orbiting a proton was the most attractive, but Maxwell's equations predicted a basic instability. A particle in a circular orbit experiences an acceleration, where Maxwell showed that accelerating charges emit electromagnetic radiation. Under this model, the electron would lose energy by emitting light as it spirals down into the nucleus in only $10^{-8}$ s. Obviously this wasn't a good description of nature. # # ### Bohr's Semiclassical Atom # In 1913, <NAME> noted that the dimensions of Planck's constant are equivalent to angular momentum and perhaps the angular momentum of the orbiting electron was quantized. Bohr hypothesized that orbits with integral multiples of Planck's constant (i.e., a specific angular momentum), the electron would be stable and would not radiate *in spite of its centripetal acceleration*. # # <p align="center"> # <img src="https://chem.libretexts.org/@api/deki/files/344390/1.8.3.svg"/> <br> # <b>Figure 4</b>: Bohr atom with an electron revolving around a fixed nucleus. (CC BY-NC; Ümit Kaya via <a href="https://chem.libretexts.org/Courses/Solano_Community_College/Chem_160/Chapter_07%3A_Atomic_Structure_and_Periodicity/7.04_The_Bohr_Model">Libretexts</a>) # </p> # # To analyze the interactions between a proton and electron, we start with the mathematical description given by **Coulomb's law**. The electric force between two charges ($q_1$ and $q_2$) separated by a distance $r$ has the familiar form # # $$ # {\bf F} = \frac{1}{4\pi\epsilon_o}\frac{q_1q_2}{r^2}\hat{{\bf r}}, \tag{9} # $$ # # where $\epsilon_o = 8.854187817\ldots \times 10^{-12}$ F ${\rm m}^{-1}$ is the *permittivity of free space* and $\hat{{\bf r}}$ is a unit vector that points from $q_1$ to $q_2$. In the hydrogen atom, the electron has a negative charge $e^-$ and the proton a positive charge $e^+$. The Coulomb force pulls opposite charges toward on another, which is definitely not a good description of the system. In the case of two masses orbiting each other, we set the gravitational force equal the centripetal force. Thus, it follows that we could try the same procedure for a proton of mass $m_p$ and an electron of mass $m_e$. Simplifying further, we can convert the two-body problem to an equivalent one-body problem using the reduced mass # # $$ # \mu = \frac{m_e m_p}{m_e+m_p} = 0.999455679 m_e, # $$ # # where $m_p = 1836.15266 m_e$ and the total mass $M = m_e+m_p$. To evaluate the one-body problem, we use a coordinate system so that the proton is at the origin (i.e., proton-centric). *Note: this technique is also used in orbital mechanics where planetary systems are evaluate in heliocentric coordinates*. Using the centripetal acceleration and Newton's second law: # # $$ # -\frac{1}{4\pi\epsilon_o}\frac{e^2}{r^2}\hat{{\bf r}} = -\mu\frac{v^2}{r}\hat{{\bf r}}, # $$ # # which can be solved for the kinetic energy $K$: # # $$ # K = \frac{1}{2}\mu v^2 = \frac{1}{8\pi\epsilon_o}\frac{e^2}{r}. \tag{10} # $$ # # The electrical potential energy $U$ of the Bohr atom is: # # $$ # U = -\frac{1}{4\pi\epsilon_o}\frac{e^2}{r} = -2K. # $$ # # The total energy $E = K + U$ of the atom is: # # $$ # E = K + U = -K = -\frac{1}{8\pi\epsilon_o}\frac{e^2}{r}. \tag{11} # $$ # # The kinetic energy is a positive quantity, which implies that the total energy is negative and indicates that the electron and proton are *bound*. To **ionize** or free the electron, it must receive at least an amount of energy equal to $|E|$. # # Getting back to Bohr's contribution, he suggested that the angular momentum $L = \mu vr$ was quantized and could be expressed in terms of Planck's constant $h$ distributed over a unit circle as $\hbar = h/2\pi$. The full condition is # # $$ # L = \mu vr = n\hbar, \tag{12} # $$ # # which can be rewritten using the kinetic energy as # # $$\begin{align*} # K &= \frac{1}{2} \mu v^2 = \frac{L^2}{2\mu r^2} \\ &= \frac{(n\hbar)^2}{2\mu r^2} = \frac{1}{8\pi\epsilon_o}\frac{e^2}{r}. # \end{align*} # $$ # # Solving the second line of the equation above for the radius $r$ shows that the specific values are allowed by Bohr's quantization condition as # # $$ # r_n = \frac{4\pi\epsilon_o \hbar^2}{\mu e^2}n^2 = a_on^2, \tag{13} # $$ # # where $a_o = 5.291772083 \times 10^{-11}$ m is known as the **Bohr radius**. Now that we know the scale of the atom (i.e., appropriate values of $r$), Eqn. 11 describes the energy in terms of fundamental constants as # # $$ # E_n = -\frac{\mu e^4}{32\pi^2\epsilon_o^2 \hbar^2}\frac{1}{n^2} = \frac{-13.6\;{\rm eV}}{n^2}. \tag{14} # $$ # # The **principal quantum number** $n$ completely determines the characteristics of the Bohr atom, where $n\geq 1$. # # The spectral lines are produced by electron transitions within the atom (i.e., moving between energy levels). Balmer tried to produce a formula that would predict the wavelength that resulted from a transition from $n_{high}\rightarrow n_{low}$. Now consider the required energy to move between levels as the energy of a photon $E_{photon} = hc/\lambda = E_{high} - E_{low}$, which gives # # $$ # \lambda = \frac{h c}{13.6\;{\rm eV}}\frac{n_{high}^2n_{low}^2}{n_{high}^2-n_{low}^2}, \tag{15} # $$ # # where $hc = 1240$ eV nm. # # - **What are the first 3 wavelengths in the Balmer lines using this method?** # + def calculate_lambda_hl(n_h,n_l): #n_h = higher state #n_l = lower state if n_h > n_l: return (1240/13.6)*(n_h**2*n_l**2)/(n_h**2-n_l**2) else: print("Please choose appropriate values for n_h,n_l (n_l<n_h)") return 0 for i in range(0,1): for j in range(0,3): B_lam = calculate_lambda_hl(j+3,2) print("%s%s is %3.3f nm." % (Series[i],letter[j],B_lam)) # - # The above results and those produced from Balmer's formula are for light traveling in a vacuum. The empirical measurements are made in *air*, where the speed of light is slightly slower by a factor of 0.999703 and $\lambda_{\rm air} = 0.999703 \lambda_{\rm vacuum}$. Solving for the ${\rm H}\alpha$ line in air yields $\lambda_{\rm air} = 656.276$ nm, which differs from the quoted value by 0.0008%. The remainder of the discrepancy is due to environmental factors (e.g., temperature, pressure, and humidity) that affect the index of refraction for air. # # Thus discussion has focused on electrons that transition from a higher (large $n$) to lower (small $n$) energy state. But, the reverse process can occur, where a photon is absorbed by the atom thereby exciting an electron from a lower to a higher energy state. The former process produces spectral emission where the latter produces absorption and Eqn. 15 is used for both processes. Kirchoff's laws now had a physical basis from first principles and can be stated as # # - A hot, dense gas or hot solid object produces a continuous spectrum with no dark spectral lines. The continuous spectrum is described by the Planck functions $B_\lambda(T)$ and $B_\nu(T)$. The wavelength where $B_\lambda(T)$ reaches its peak intensity is given by Wien's displacement law. # # - A hot, diffuse gas produces bright emission lines, when an electron makes a downward transition. The energy lost is carried away by a single photon. # # - A cool, diffuse gas in front of a source of a continuous spectrum produces dark absorption lines in the continuous spectrum. The lines are produced when an electron absorbs a photon of the required energy to jump from a lower to higher energy orbit. # # Despite the success of Bohr's model, it describes only the simplest arrangement of particles. In reality, the electron is not really in a circular orbit nor is its position well-defined. There is more structure to the atom than Bohr realized, where the fine structure can come into play during and after a star's main-sequence. # # <p align="center"> # <img src="https://github.com/saturnaxis/ModernAstro/blob/main/Chapter_5/Figure_5.jpg?raw=true?"/> <br> # <b>Figure 5</b>: Balmer lines produced by the hydrogen atom. (a) Emission. (b) Absorption. (Carroll & Ostlie 2007) # </p> # # <p align="center"> # <img src="https://github.com/saturnaxis/ModernAstro/blob/main/Chapter_5/Figure_6.jpg?raw=true?"/> <br> # <b>Figure 6</b>: Energy level diagram for the hydrogen atom showing Lyman, Balmer, and Paschen lines. (Carroll & Ostlie 2007) # </p> # # ### **Problems** # >5\. To demonstrate the relative strengths of the electrical and gravitational forces between the electron and proton in the Bohr atom, suppose the hydrogen atom were held together solely by gravity. Determine the radius of the ground-state orbit (in nm and AU) and the energy of the ground state (in EV). # > # >6\. Calculate the energies and vacuum wavelengths of all possible photons that are emitted when the electron cascades from the $n=3$ to the ground state of the hydrogen atom. # ## Quantum Mechanics and Wave-Particle Duality # # ### de Broglie's Wavelength and Frequency # [<NAME>](https://en.wikipedia.org/wiki/Louis_de_Broglie) (a French prince) posed the following question: If light could exhibit the characteristics of particles, might not particles sometimes manifest the properties of waves? In his PhD thesis, de Broglie extended thw wave-particle duality of light to all other particles. Einstein's theory of special relativity showed that photons carry both energy $E$ and momentum $p$, which can be related to a frequency $\nu$ and wavelength $\lambda$ by # # $$ # \nu = \frac{E}{h} \quad {\rm and} \quad \lambda = \frac{h}{p}.\tag{16} # $$ # # The de Broglie wavelength and frequency describe massless photons, as well as, massive electrons, protons, neutrons, etc. This seemed outrageous at the time, but the hypothesis was confirmed by many experiments including the interference pattern produced by *electrons* in a double slit experiment. The wave-particle duality lies at the heart of the physical world, where wave properties describe the *propagation* and the particle nature manifests in *interactions*. # # - **Compare the wavelengths of a free electron moving at 0.01*c* and the [fastest human sprinter](https://en.wikipedia.org/wiki/Footspeed) moving at 12.42 m/s.** # + def deBroglie_lambda(m,v): #m = mass in kg #v = speed in m/s return h/(m*v) #de Broglie wavelength in meters m_UB = 94 #kg; mass of Usain Bolt; https://en.wikipedia.org/wiki/Usain_Bolt m_e = 9.10938356e-31 #kg; mass of electron; https://en.wikipedia.org/wiki/Electron h = 6.62607015e-34 #J/s Planck constant c = 2.99792458e8 #speed of light in m/s v_e = 0.01*c v_UB = 12.42 #fastest speed in m/s lambda_e = deBroglie_lambda(m_e,v_e) lambda_UB = deBroglie_lambda(m_UB,v_UB) print("The wavelength of a free electron moving at 0.01c is %1.3e m or %1.3f nm." % (lambda_e,lambda_e/1e-9)) print("The wavelength of Usain Bolt moving at 12.42 m/s is %1.3e m." % lambda_UB) # - # In a double-slit experiment, a photon or electron passes through *both* slits to produce the interference pattern on the other side. The wave doesn't convey information about where the particle is, rather only where it *may* be. As a result, the wave is described by a probability with and amplitude $\Psi$. The amplitude squared, $\left<\Psi|x|\Psi\right>$, describes the probability of finding the particle at a certain location $x$. For the double-slit experiment, particles are never found where the sum of two probability functions equal zero (i.e., destructive interference). # # ### Heisenberg's Uncertainty Principle # A probability wave $\Psi$ for a particle can have a precise description as a sine wave with a wavelength $\lambda$. Through de Broglie's wavelength, the particle's momentum can also be defined exactly ($p = h/\lambda$), but the particle is not localized and means the particle's position is perfectly uncertain. The particle's position can be constrained when several sine waves with a range of wavelengths are added so they destructively interfere almost everywhere. The result is $\Psi$ (the probability) is approximately zero almost everywhere, except for a narrow range of $x$. By adding more waves, and there wavelengths, the particle's momentum is now a range of values (i.e., more uncertain). # # <p align="center"> # <img src="https://github.com/saturnaxis/ModernAstro/blob/main/Chapter_5/Figure_7.jpg?raw=true?"/> <br> # <b>Figure 7</b>: Two examples of a probability wave: (a) a single sine wave and (b) a pulse composed of many sine waves. (Carroll & Ostlie 2007) # </p> # # <NAME> developed a theoretical framework for this inherent "fuzziness" of the physical world. The product of the uncertainty in the position and momentum must be equal to or larger than $\hbar/2$ and is known as [Heisenberg's uncertainty principle](https://en.wikipedia.org/wiki/Uncertainty_principle). A similar statement relates the uncertainty of energy and time measurements. Together they are described by # # $$ # \Delta x \Delta p \approx \hbar \quad {\rm and} \quad \Delta E \Delta t \approx \hbar.\tag{17} # $$ # # - **What is the minimum speed and kinetic energy of an electron in a hydrogen atom?** # + hbar = h/(2.*np.pi) a_o = 5.291772083e-11 #Bohr radius of hydrogen atom; Delta x p_e = hbar/a_o #uncertainty in the electron's momentum p v_min = p_e/m_e #minimum speed of the electron K_min = p_e**2/(2*m_e) #minimum KE of the electron eV = 1.602176634e-19 #J in 1 eV print("The minimum speed of the electron is %1.2e m/s or %1.3fc." % (v_min,v_min/c)) print("The minimum KE of the electron is %1.2e J or %2.1f eV" % (K_min,K_min/eV)) # - # ### Quantum Mechanical Tunneling # Light changes its behavior at boundary transitions, where this is most easily seen for a light beam traveling from a prism (glass) into air. The light may undergo *total internal reflection* if its incident angle is greater than a critical angle $\theta_c$ that is determined by the indices of refraction of the glass and air. The critical angle $\theta_c$ can be derived from Snell's law to get # # $$ \sin \theta_c = \frac{n_{\rm air}}{n_{\rm glass}}. $$ # # The electromagnetic wave does enter the air, but it dies away exponentially (i.e., becomes *evanescent*). When another prism is placed next to the first, then the evanescent wave can begin propagating into the second prism without passing through the air gap between them. Photons have **tunneled** from one prism to another. The limitation to this effect is through Heisenberg's uncertainty principle, where the location of a particle (i.e., photon) cannot be determined with an uncertainty less than its wavelength. For barriers that are only a few wavelengths wide, a particle can suddenly appear on the other side. **Barrier penetration** is important for radioactive decay and inside stars, where nuclear fusion rates depend upon tunneling. # # <p align="center"> # <img src="https://github.com/saturnaxis/ModernAstro/blob/main/Chapter_5/Figure_8.jpg?raw=true?"/> <br> # <b>Figure 8</b>: mechanical tunneling (barrier penetration) of a particle traveling to the right. (Carroll & Ostlie 2007) # </p> # ### Schr&ouml;dinger's Equation and the Quantum Mechanical Atom # Heisenberg's uncertainty principle prevents the use of classical models to treat the electron-proton like a planetary system. Instead, the electron orbits within a cloud of probability (i.e., *orbital*) where the more "dense" regions corresponding to where the electron is most likely to be found. In 1926, <NAME>&ouml;dinger developed a wave equation that can be solved for the probability waves that describe the allowed values of a particle state (e.g., energy, momentum, etc.) and its propagation through space. # # <p align="center"> # <img src="https://github.com/saturnaxis/ModernAstro/blob/main/Chapter_5/Figure_9.jpg?raw=true?"/> <br> # <b>Figure 9</b>: Electron orbitals of the hydrogen atom. (Carroll & Ostlie 2007) # </p> # # The Schr&ouml;dinger equation can be solved analytically for the hydrogen atom and it produces the same set of allowed energies obtained by Bohr. However, Schr&ouml;dinger found that two additional quantum numbers $\ell$ and $m_\ell$ are required for a complete description of the electron orbitals. The principal quantum number $n$ determines the energy level, where these quantum numbers describe the angular momentum vector **L** of the atom. The magnitude of the angular momentum $L$ are # # $$ L = \sqrt{\ell(\ell+1)}\hbar, \tag{18}$$ # # where $\ell = \{0,\ldots,n-1 \}$. Early spectroscopists categorized some elements based on their alkali metal spectroscopic lines described as **s**harp, **p**rincipal, **d**iffuse, and **f**undamental and this historical [nomenclature](https://en.wikipedia.org/wiki/Atomic_orbital) corresponded with $\ell = 0,1,2,3$. The letters continues alphabetically where $\ell = 4\rightarrow g$, $\ell = 5\rightarrow h$, and so on (omitting $j$). Chemists now use a nomenclature that includes both the principal and angular momentum quantum numbers together (e.g., $n\ell$). For example, ($n=2$, $\ell=1$) corresponds to $2p$. # # The $z$-component of the angular momentum vector $L_z$ can assume only particular values designated by the quantum number $m_\ell$ (i.e., $L=m_\ell\hbar$), which ranges from $-\ell$ to $+\ell$ resulting in $2\ell +1$ possible integers. For *isolated* hydrogen atoms, there is not a preferred direction for the angular momentum vector to point and thus, a **degeneracy** exists with respect to the energy (i.e., the electron has the same energy independent of $\ell$ and $m_\ell$) and will produce the *same* spectral line. # # However, hydrogen gas in a star is not isolated and the electrons will feel the effect of the stellar magnetic field. Due to the external magnetic field the energy levels are no longer degenerate and the spectral lines are observed to split. The simplest case is described by the [Zeeman effect](https://en.wikipedia.org/wiki/Zeeman_effect), where the $2p$ orbital is split into three lines; $p=1$ and $m_\ell = \{-1,0,+1\}$. The $m_\ell = 0$ line appears at a frequency $\nu_o$ that corresponds to the spectral line in the absence of the magnetic field. The other two lines are produced depending on the magnetic field strength $B$ and reduced mass $\mu$ to produce the frequencies # # $$ \nu_{m_\ell} = \nu_o + m_\ell\frac{eB}{4\pi \mu}. \tag{19}$$ # # Today, the Zeeman effect is used to monitor the magnetic field of the Sun by taking spectra near sunspots. Interstellar clouds may contain very weak magnetic fields ($B\approx 2\times 10^{-10}\;{\rm T}$), where astronomers have been able to measure this magnetic field. # # <p align="center"> # <img src="https://github.com/saturnaxis/ModernAstro/blob/main/Chapter_5/Figure_10.jpg?raw=true?"/> <br> # <b>Figure 10</b>: Splitting of absorption lines by the Zeeman effect. (Carroll & Ostlie 2007) # </p> # # - **Calculate the change in frequency under the magnetic field of an interstellar cloud using the mass of the electron for the reduced mass ($\mu = m_e$).** # + def calc_Zeeman_freq(B,mu): #B = magnetic field strength in Tesla (T) #mu = reduced mass return q_e*B/(4*np.pi*mu) #returns frequency in Hz q_e = 1.60217662e-19 #Coulombs; electron charge B_IC = 2e-10 #magnetic field of interstellar cloud delta_nu = calc_Zeeman_freq(B_IC,m_e) print("The change in frequency from the Zeeman effect is %1.1f Hz." % delta_nu) # - # ### Spin and the Pauli Exclusion Principle # More complicated patterns of magnetic field splitting led physicists to discover another quantum number related to the *spin angular momentum* **S** of an electron. This is *not* a classical top-like rotation but purely a quantum effect. The spin vector has a constant magnitude # # $$ S = \sqrt{\frac{1}{2}\left(\frac{1}{2}+1\right)}\hbar = \frac{\sqrt{3}}{2}\hbar, $$ # # with a $z$-component $S_z = m_s\hbar$ and $m_s = \pm 1/2$. The quantum state of an electron can be described by four quantum numbers, but no two electrons can occupy the same quantum state, [Pauli Exclusion principle](https://en.wikipedia.org/wiki/Pauli_exclusion_principle). This provided an explanation for the properties of the period table of elements. However, there wasn't a first principles reasoning for why a fourth quantum number was needed. # # Quantum mechanics was at odds with Schr&ouml;dinger's (non-relativistic) wave equation and Einstein's theory of special relativity. <NAME> worked to combine the two theories and succeeded in writing a relativistic wave equation for the electron that naturally included the spin of the electron. His relativistic wave equation extended the Pauli exclusion principle by dividing the world of particles into two groups: fermions and bosons. # # - **Fermions** are odd-integer half-spin particles (e.g., $\frac{1}{2}\hbar$, $\frac{3}{2}\hbar$, $\ldots$) such as electrons, protons, and neutrons. Fermions obey the Pauli exclusion principle, which explains the structure of white dwarfs and neutron stars. # # - **Bosons** are integer spin particles (e.g., 0, $\hbar$, $2\hbar$, $\ldots$) such as photons. Bosons do not obey the Pauli exclusion principle. # # The Dirac equation also predicted the existence of **antiparticles**. A particle differs from its antiparticle in its charge and magnetic moment (e.g., electron $e^-$ and positron $e^+$); all other properties are the same. Pairs of particles and antiparticles may be created from the energy of gamma-ray photons (e.g., $\gamma \rightarrow e^- + e^+$) or annihilate each other to produce gamma-rays (e.g., $e^- + e^+ \rightarrow \gamma$). Such processes play a role for fusion inside of stars and in the evaporation of black holes. # ### The Complex Spectra of Atoms # Four quantum numbers ($n$, $\ell$, $m_\ell$, and $m_s$) describe a quantum state for each electron in an atom, where external magnetic fields (and other electromagnetic interactions) increase the number of possible energy levels. Even though there are a large number of possible states to make a transition, some transitions are restricted, where Nature imposes a set of [selection rules](https://en.wikipedia.org/wiki/Selection_rule) that determines the **allowed** and **forbidden** transitions. The allowed transitions can happen spontaneously on timescales of $10^{-8}$ s, while the requirement of $\Delta \ell = \pm 1$ prevents transition from $F$ to $P$. # # <p align="center"> # <img src="https://github.com/saturnaxis/ModernAstro/blob/main/Chapter_5/Figure_11.jpg?raw=true?"/> <br> # <b>Figure 11</b>: Some of the electronic energy levels of the helium atom. (Carroll & Ostlie 2007) # </p> # # The Zeeman effect shows only three transitions occur between $1s$ and $2p$ energy levels because of selection rules requiring $\Delta m_\ell = 0$ or $\pm 1$ and forbidding transitions if *both* orbitals have $m_\ell = 0$. Similar to quantum tunneling, there remains a small but finite probability for forbidden transitions to occur and they require much longer timescales. Collisions between atoms trigger transitions and can compete with spontaneous transitions. Thus, very low gas densities are required to obtain a sufficient signal to measure the forbidden transitions. In the diffuse interstellar medium or the outer atmospheres of stars, this condition is met. # ### **Problems** # >7\. An electron in an old TV set reaches a speed of about $5\times 10^7$ m/s before it hits the screen. What is the wavelength of this electron? # > # >8\. A White dwarf is a very dense object, with its ions and electrons packed extremely close together. Each electron may be considered to be located within a region of size $\Delta x \approx 1.5 \times 10^{-12}$ m. Estimate the minimum speed of the electron using Heisenberg's uncertainty principle. *Do you think that the effects of relativity will be important?* # > # >9\. An electron spends roughly $10^{-8}$ s in the first excited state of the hydrogen atom before spontaneously decaying back to the ground state. # >>**(a)** Determine the uncertainty $\Delta E$ in the energy of the first excited state. # >> # >>**(b)** Calculate the uncertainty $\Delta \lambda$ in the wavelength of the photon for this transition. *Why can you assume that $\Delta E = 0$ for the ground state?*
docs/_sources/Chapter_5/interaction-of-light-and-matter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.10 64-bit (''3.8.10'': pyenv)' # language: python # name: python3 # --- # # How am I listening? # ## Reading imported files # + import pandas import glob import os path = os.getcwd() lasffm_files = glob.glob(path + '/*.csv') df_files = [pandas.read_csv(file, index_col=0) for file in lasffm_files] lastfm_df = pandas.concat(df_files) # adding datepart information lastfm_df = lastfm_df[lastfm_df.date_uts.notnull()] # dropping null dates lastfm_df['complete_date'] = pandas.to_datetime(lastfm_df.date_uts,dayfirst=True,unit='s') lastfm_df['simple_date'] = lastfm_df.complete_date.dt.date lastfm_df['simple_time'] = lastfm_df.complete_date.dt.time lastfm_df['year'] = lastfm_df.complete_date.dt.year.astype(int) lastfm_df['month'] = lastfm_df.complete_date.dt.month.astype(int) lastfm_df.head(10) # + years_list = lastfm_df.year.unique() years_list.sort() print('years: ', years_list) hour_list = lastfm_df.complete_date.dt.hour.unique() hour_list.sort() print('hours: ', hour_list) day_of_month = lastfm_df.complete_date.dt.day.unique() day_of_month.sort() print('day of month: ', day_of_month) day_of_week = lastfm_df.complete_date.dt.dayofweek.unique() day_of_week.sort() print('day of week: ', day_of_week) # - album_name
how-im-listening.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Liberal Democrat UBI Policy Paper: Possible Improvements # # This article builds on the UBI Center's [recent analysis](https://ubicenter.org/lib-dem-policy-paper) of the reforms outlined in the Liberal Democrat UBI Policy Paper. While the reforms in the paper are shown to be robust at reducing poverty and increase income at the lower deciles (though not completely funded with tax reforms), there are improvements that could increase these effects even further, increasing the progressivity and antipoverty effects without additional spending. # # ## UBI for children # # The policy paper reforms do not propose to give children a basic income. However, as shown in a [previous UBI Center analysis](https://www.ubicenter.org/child-ubi-share), spending even a small proportion of UBI funds on children can have dramatic effects on poverty and other socio-economic targets. This is largely because people in low-income households are more like to be children, and therefore targeting aid at children is an effective way of targeting aid at low-income families. # # Below shows the effects of allocating different shares of the existing UBI expenditure to children, for each of the reforms outlined in the policy paper. # + from ubicenter import format_fig from openfisca_uk import Microsimulation from reform import ( WA_adult_UBI, # child_UBI, # TODO # pensioner_UBI, # TODO include_UBI_in_means_tests, set_PA, set_PA_for_WA_adults, set_PT, net_cost, ) import numpy as np import pandas as pd from tqdm import trange, tqdm import plotly.express as px baseline = Microsimulation(year=2020) funding = ( set_PA_for_WA_adults(2500), set_PT(50), include_UBI_in_means_tests(), ) from reform import child_WA_adult_UBI from tqdm import tqdm # List of non-UBI components reform_df = pd.DataFrame(columns=[ "pa_for_wa_adults", "pt", "has_ubi_wa", "has_ubi_child", "has_ubi_pensioner", "include_ubi_in_means_tests" ]) reform_df.loc["baseline"] = pd.Series({ "pa_for_wa_adults": 2500, # From the 60+/week plan. "pt": 50, # "has_ubi_wa": True, "has_ubi_child": False, "has_ubi_pensioner": False, "include_ubi_in_means_tests": True, }) # - reform_df.loc["baseline"] = pd.Series({ "pa_for_wa_adults": 2500, # From the 60+/week plan. "same_pa_for_pensioners": False, "pt": 50, # "has_ubi_wa": True, "has_ubi_child": False, "has_ubi_pensioner": False, "include_ubi_in_means_tests": True, }) reform_df.loc["Eliminate PA and PT"] = reform_df.loc["baseline"] reform_df.loc["Eliminate PA and PT", "pa_for_wa_adults"] = 0 reform_df.loc["Eliminate PA and PT", "pt"] = 0 reform_df # + def percent_change(x, y): return (y - x) / x results_dfs = pd.DataFrame() for reform, funding_reform, amount in zip( (ubi_45, ubi_60, ubi_75, ubi_95), (ubi_45_funding, funding, funding, funding), (45, 60, 75, 95), ): base_reform_sim = Microsimulation(reform, year=2020) revenue = base_reform_sim.calc("UBI").sum() poverty_rate = [] poverty_gap = [] median_income = [] inequality = [] cost = [] child_fractions = [] for child_fraction in np.linspace(0, 1, 11): reform = ( child_WA_adult_UBI(revenue, baseline, child_fraction), *funding_reform, ) reform_sim = Microsimulation(reform, year=2020) poverty_rate += [ percent_change( baseline.calc("in_poverty_bhc", map_to="person").mean(), reform_sim.calc("in_poverty_bhc", map_to="person").mean(), ) ] cost += [ percent_change( baseline.calc("net_income").sum(), reform_sim.calc("net_income").sum(), ) ] poverty_gap += [ percent_change( baseline.calc("poverty_gap_bhc").mean(), reform_sim.calc("poverty_gap_bhc").mean(), ) ] median_income += [ percent_change( baseline.calc("household_net_income", map_to="person").median(), reform_sim.calc( "household_net_income", map_to="person" ).median(), ) ] inequality += [ percent_change( baseline.calc("household_net_income", map_to="person").gini(), reform_sim.calc( "household_net_income", map_to="person" ).gini(), ) ] child_fractions += [child_fraction] results = pd.DataFrame( { "UBI": f"£{amount}/week", "Poverty rate": poverty_rate, "Poverty gap": poverty_gap, "Median household net income": median_income, "Inequality": inequality, "Net cost": cost, "Child UBI share": child_fractions, } ) results_dfs = pd.concat([results_dfs, results]) results_dfs = results_dfs.reset_index() fig = px.line( results_dfs, x="Child UBI share", y=[ "Poverty rate", "Poverty gap", "Median household net income", "Inequality", ], animation_frame="UBI", ).update_layout( yaxis_range=(-0.75, 0.2), yaxis_tickformat="%", xaxis_tickformat="%", yaxis_title="Percent change", title="Changes to outcomes by child UBI allocation", ) fig.add_hline(y=0, line_width=3, line_dash="dash", line_color="grey") format_fig(fig) # - # Much of this is consistent with the findings of <NAME> (? how to reference here properly) - that with a low UBI budget, spending high proportions on children is optimal for reducing poverty, and with higher UBI expenditure, the optimal spending proportion for children is lower. For example, to minimise the poverty gap, the optimal child share of the UBI expenditure is around 60% for the smallest (£45/week) UBI, and decreases to around 30% for the highest (£95/week). This does not necessarily have to be distributed in the same mechanism as adults' UBI - the Child Benefit is a universal payment for children (albeit with a phase-out at individual incomes over £50,000) which could be increased. # ## Going further with tax increases # # In all the reforms outlined, the Personal Allowance and Primary Threshold is reduced but not entirely eliminated (lowering to £4,000/year and £90/year respectively in the first reform, to £2,500/year and £50/year in the subsequent reforms). Although administration costs related to taxation of low incomes are difficult to estimate, eliminating these entirely could provide substantial increases in revenue and therefore UBI rates could be increased. All figures shown are relative to the original effects of the reform and no reforms change the net costs of the overall scheme by more than 1%[^1]. # # [^1]: There is a reason that the net costs change slightly: first, the additional revenue from taxes are calculated. After this, the additional UBI is calculated (using the additional revenue) and simulated. However, at this point the additional UBI generates some benefit savings, so the net cost reduces slightly. To fix this, the reduction in net cost is calculated and added to the additional revenue, a new UBI is calculated for the second time, and the net cost difference is almost entirely eliminated. # + original_revenue = [] new_revenue = [] revenue = [] poverty_rate = [] poverty_gap = [] median = [] inequality = [] ubi = [] ubi_addition = [] cost = [] LIGHTER_BLUE = "#ABCEEB" # Blue 100. LIGHT_BLUE = "#49A6E2" # Blue 700. BLUE = "#1976D2" # Blue 700. DARK_BLUE = "#0F4AA1" # Blue 900. BLUE_COLORS = [LIGHTER_BLUE, LIGHT_BLUE, BLUE, DARK_BLUE] new_funding = ( set_PA_for_WA_adults(0), set_PT(0), include_UBI_in_means_tests(), ) for reform, amount, initial_overshoot in zip( (ubi_45, ubi_60, ubi_75, ubi_95), (45, 60, 75, 95), (-4.46e9, -2.99e9, -3.19e9, -2.81e9), ): new_reform = (WA_adult_UBI(amount * 52), *new_funding) original_reform_sim = Microsimulation(reform, year=2020) original_revenue += [net_cost(original_reform_sim, baseline)] new_revenue += [net_cost(Microsimulation(new_reform, year=2020), baseline)] ubi_increase = (new_revenue[-1] - original_revenue[-1]) / baseline.calc( "is_WA_adult" ).sum() overshoot_compensation = ( -initial_overshoot / baseline.calc("is_WA_adult").sum() ) new_reform_sim = Microsimulation( ( WA_adult_UBI(amount * 52 + ubi_increase + overshoot_compensation), *new_funding, ), year=2020, ) poverty_rate += [ percent_change( original_reform_sim.calc("in_poverty_bhc", map_to="person").mean(), new_reform_sim.calc("in_poverty_bhc", map_to="person").mean(), ) ] poverty_gap += [ percent_change( original_reform_sim.calc("poverty_gap_bhc").sum(), new_reform_sim.calc("poverty_gap_bhc").sum(), ) ] median += [ percent_change( original_reform_sim.calc( "household_net_income", map_to="person" ).median(), new_reform_sim.calc( "household_net_income", map_to="person" ).median(), ) ] inequality += [ percent_change( original_reform_sim.calc( "household_net_income", map_to="person" ).gini(), new_reform_sim.calc( "household_net_income", map_to="person" ).gini(), ) ] ubi_addition += [amount + ubi_increase] ubi += [f"£{amount}/week"] revenue += [percent_change(original_revenue[-1], new_revenue[-1])] cost += [net_cost(baseline, new_reform_sim)] results_df = pd.DataFrame( { "UBI": ubi, "Poverty rate": poverty_rate, "Poverty gap": poverty_gap, "Median household net income": median, "Inequality": inequality, } ) results = results_df.drop("UBI", axis=1).T results.columns = results_df.UBI fig = px.bar( results, x=results.index, y=results.columns, barmode="group", color_discrete_sequence=BLUE_COLORS, ) fig.update_layout( title="Effects of eliminating the PA and PT by UBI reform", xaxis_title="Indicator", yaxis_title="Percent change", yaxis_tickformat="%", ) format_fig(fig) # - # The results show significant decreases in poverty (both the headline rate and absolute gap) can still be had without increasing the net costs of all reforms - and the effects are strongest on the first reform (which retains a higher starting point than the other reforms). The signficantly larger poverty gap (the total sum of impoverished households' shortfall under the poverty threshold) than the poverty rate (percentage in poverty) is likely caused by this adjustment providing stronger gains to those at the very bottom of the income distribution, than those closer to the threshold. # ## Means-tested benefits and UBI # # The reforms all include UBI payments as earnings in the calculation of means-tested benefits, providing some savings in the form of reduced benefits. However, since this is essentially funding through a tax on low-income families, avoiding this could make the schemes more progressive. The below graph shows the effects of excluding UBI from benefit means tests, reducing the UBI payment so that the overall net cost of the scheme is unaffected. # + tags=[] original_revenue = [] new_revenue = [] revenue = [] poverty_rate = [] poverty_gap = [] median = [] inequality = [] ubi = [] ubi_addition = [] cost = [] for reform, amount in zip((ubi_45, ubi_60, ubi_75, ubi_95), (45, 60, 75, 95),): new_reform = reform[:-1] original_reform_sim = Microsimulation(reform, year=2020) original_revenue += [net_cost(original_reform_sim, baseline)] new_revenue += [net_cost(Microsimulation(new_reform, year=2020), baseline)] ubi_increase = (new_revenue[-1] - original_revenue[-1]) / baseline.calc( "is_WA_adult" ).sum() new_reform_sim = Microsimulation( (WA_adult_UBI(amount * 52 + ubi_increase), *reform[1:-1]), year=2020 ) poverty_rate += [ percent_change( original_reform_sim.calc("in_poverty_bhc", map_to="person").mean(), new_reform_sim.calc("in_poverty_bhc", map_to="person").mean(), ) ] poverty_gap += [ percent_change( original_reform_sim.calc("poverty_gap_bhc").sum(), new_reform_sim.calc("poverty_gap_bhc").sum(), ) ] median += [ percent_change( original_reform_sim.calc( "household_net_income", map_to="person" ).median(), new_reform_sim.calc( "household_net_income", map_to="person" ).median(), ) ] inequality += [ percent_change( original_reform_sim.calc( "household_net_income", map_to="person" ).gini(), new_reform_sim.calc( "household_net_income", map_to="person" ).gini(), ) ] ubi_addition += [amount + ubi_increase] ubi += [f"£{amount}/week"] revenue += [percent_change(original_revenue[-1], new_revenue[-1])] cost += [net_cost(baseline, new_reform_sim)] results_df = pd.DataFrame( { "UBI": ubi, "Poverty rate": poverty_rate, "Poverty gap": poverty_gap, "Median household net income": median, "Inequality": inequality, } ) results = results_df.drop("UBI", axis=1).T results.columns = results_df.UBI fig = px.bar( results, x=results.index, y=results.columns, barmode="group", color_discrete_sequence=BLUE_COLORS, ) fig.update_layout( title="Effects of excluding UBI from means tests", xaxis_title="Indicator", yaxis_title="Percent change", yaxis_tickformat="%", ) format_fig(fig) # - # The results of this underscore the importance of the tax funding method in UBI schemes - here, a regressive tax funding actively works against the otherwise strong antipoverty effects of the UBI scheme, and lowering the UBI payment to avoid this tax reduces poverty, inequality and increases median household net income. There are still reasons why the benefit means tests might be preferred, however: moving low-income families off punitive marginal tax rates earlier on the income spectrum, or to begin gradually phasing out means-tested benefits to replace with UBI, so how to weight each of these must be considered within the overall aims of the scheme - whether it is to rapidly reduce poverty without a necessarily radical overhaul of the benefits system, or a (still progressive) managed replacement of the benefits system with UBI. # # In either case, the three changes outlined here will all build on the four UBI schemes, amplifying their antipoverty and equalising effects at no financial cost.
improvements.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # ApartmentsGrid notebook example # In this example, a rule based controller is used to control the model "ApartmentsGrid-v0". At first we import the controller: from energym.examples.Controller import LabController # Next, we import Energym and create the simulation environment by specifying the model, a weather file and the number of simulation days. # + import energym weather = "ESP_CT_Barcelona" env = energym.make("ApartmentsGrid-v0", weather=weather, simulation_days=30) # - # The control inputs can be inspected using the `get_inputs_names()` method and to construct a controller, we pass the list of inputs and further parameters. This controller determines inputs to get close to the temperature setpoints and uses fixed setpoints during the night. inputs = env.get_inputs_names() print(inputs) controller = LabController(control_list=inputs, lower_tol=0.3, upper_tol=0.8, nighttime_setback=True, nighttime_start=18, nighttime_end=6, nighttime_temp=18) # To run the simulation, a number of steps is specified (here 480 steps per day for 5 days) and the obtained control inputs are passed to the simulation model with the `step()` method. To generate some plots later on, we save all the inputs and outputs in lists. steps = 480*5 out_list = [] outputs = env.step(env.sample_random_action()) controls = [] hour = 0 for i in range(steps): control = controller.get_control(outputs, 21, hour) control['P1_T_Tank_sp'] = [45.0] control['P2_T_Tank_sp'] = [45.0] control['P3_T_Tank_sp'] = [45.0] control['P4_T_Tank_sp'] = [4.0] control['Bd_Ch_EVBat_sp'] = [0.0] control['Bd_DisCh_EVBat_sp'] = [0.0] controls +=[ {p:control[p][0] for p in control} ] outputs = env.step(control) _,hour,_,_ = env.get_date() out_list.append(outputs) # Since the inputs and outputs are given as dictionaries and are collected in lists, we can simply load them as a pandas.DataFrame. import pandas as pd out_df = pd.DataFrame(out_list) cmd_df = pd.DataFrame(controls) # To generate plots, we can directly get the data from the DataFrames, by using the key names. Displayed are the zone temperatures and the setpoints determined by the controller for zone 1, the external temperature, and the total power demand. # + import matplotlib.pyplot as plt # %matplotlib notebook f, (ax1,ax2,ax3) = plt.subplots(3,figsize=(10,15))# ax1.plot(out_df['Z01_T'], 'r') ax1.plot(out_df['P1_T_Thermostat_sp_out'], 'b--') ax1.set_ylabel('Temp') ax1.set_xlabel('Steps') ax2.plot(out_df['Ext_T'], 'r') ax2.set_ylabel('Temp') ax2.set_xlabel('Steps') ax3.plot(out_df['Fa_Pw_All'], 'g') ax3.set_ylabel('Power') ax3.set_xlabel('Steps') plt.subplots_adjust(hspace=0.4) plt.show() # - # To end the simulation, the `close()` method is called. It deletes files that were produced during the simulation and stores some information about the simulation in the *runs* folder. env.close()
docs/sources/notebooks/ApartmentsGrid.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Script for training a denoiser import os os.environ['XLA_FLAGS']='--xla_gpu_cuda_data_dir=/gpfslocalsys/cuda/10.1.2' # - # %load_ext autoreload # %autoreload 2 # %pylab inline # + from absl import app from absl import flags import haiku as hk import jax #import optax from jax.experimental import optix import jax.numpy as jnp import numpy as onp import pickle from functools import partial from flax.metrics import tensorboard # Import tensorflow for dataset creation and manipulation import tensorflow.compat.v2 as tf tf.enable_v2_behavior() import tensorflow_datasets as tfds #from jax_lensing.models.convdae import UResNet, SmallUResNet from jax_lensing.models.normalization import SNParamsTree as CustomSNParamsTree from jax_lensing.spectral import make_power_map from jax_lensing.utils import load_dataset from tqdm.notebook import tqdm import types from typing import Mapping, Optional, Sequence, Union # - dataset = "kappatng" output_dir = "../weights/gp-sn1" batch_size = 32 learning_rate = 1e-4 training_steps = 45000 train_split = "90%" noise_dist_std = 0.2 spectral_norm = 1. gaussian_prior = True gaussian_path = "../data/ktng/ktng_PS_theory.npy" variant = "EiffL" model_name = "SmallUResNet" map_size = 360 resolution = 0.29 # + active="" # def forward(x, s, is_training=False): # if model_name == 'SmallUResNet': # denoiser = SmallUResNet(n_output_channels=1, variant=variant) # else: # raise NotImplementedError # return denoiser(x, s, is_training=is_training) # # model = hk.transform_with_state(forward) # + #import jax #import jax.numpy as jnp #import haiku as hk #import types from typing import Mapping, Optional, Sequence, Union def check_length(length, value, name): if len(value) != length: raise ValueError(f"`{name}` must be of length 4 not {len(value)}") class BlockV1(hk.Module): """ResNet V1 block with optional bottleneck.""" def __init__( self, channels: int, stride: Union[int, Sequence[int]], use_projection: bool, bn_config: Mapping[str, float], bottleneck: bool, transpose: bool = False, name: Optional[str] = None ): super().__init__(name=name) self.use_projection = use_projection bn_config = dict(bn_config) bn_config.setdefault("create_scale", True) bn_config.setdefault("create_offset", True) bn_config.setdefault("decay_rate", 0.999) if transpose: maybe_transposed_conv = hk.Conv2DTranspose else: maybe_transposed_conv = hk.Conv2D if self.use_projection: self.proj_conv = maybe_transposed_conv( output_channels=channels, kernel_shape=1, stride=stride, with_bias=False, padding="SAME", name="shortcut_conv") self.proj_batchnorm = hk.BatchNorm(name="shortcut_batchnorm", **bn_config) channel_div = 4 if bottleneck else 1 conv_0 = hk.Conv2D( output_channels=channels // channel_div, kernel_shape=1 if bottleneck else 3, stride=1, with_bias=False, padding="SAME", name="conv_0") bn_0 = hk.BatchNorm(name="batchnorm_0", **bn_config) conv_1 = maybe_transposed_conv( output_channels=channels // channel_div, kernel_shape=3, stride=stride, with_bias=False, padding="SAME", name="conv_1") bn_1 = hk.BatchNorm(name="batchnorm_1", **bn_config) layers = ((conv_0, bn_0), (conv_1, bn_1)) if bottleneck: conv_2 = hk.Conv2D( output_channels=channels, kernel_shape=1, stride=1, with_bias=False, padding="SAME", name="conv_2") bn_2 = hk.BatchNorm(name="batchnorm_2", scale_init=jnp.zeros, **bn_config) layers = layers + ((conv_2, bn_2),) self.layers = layers def __call__(self, inputs, is_training, test_local_stats): out = shortcut = inputs if self.use_projection: shortcut = self.proj_conv(shortcut) shortcut = self.proj_batchnorm(shortcut, is_training, test_local_stats) for i, (conv_i, bn_i) in enumerate(self.layers): out = conv_i(out) out = bn_i(out, is_training, test_local_stats) if i < len(self.layers) - 1: # Don't apply relu on last layer out = jax.nn.relu(out) return jax.nn.relu(out + shortcut) class BlockGroup(hk.Module): """Higher level block for ResNet implementation.""" def __init__( self, channels: int, num_blocks: int, stride: Union[int, Sequence[int]], bn_config: Mapping[str, float], bottleneck: bool, use_projection: bool, transpose: bool, name: Optional[str] = None, ): super().__init__(name=name) block_cls = BlockV1 self.blocks = [] for i in range(num_blocks): self.blocks.append( block_cls(channels=channels, stride=(1 if i else stride), use_projection=(i == 0 and use_projection), bottleneck=bottleneck, bn_config=bn_config, transpose=transpose, name="block_%d" % (i))) def __call__(self, inputs, is_training, test_local_stats): out = inputs for block in self.blocks: out = block(out, is_training, test_local_stats) return out class UResNet(hk.Module): """ Implementation of a denoising auto-encoder based on a resnet architecture """ def __init__(self, blocks_per_group, bn_config, bottleneck, channels_per_group, use_projection, name=None): """Constructs a Residual UNet model based on a traditional ResNet. Args: blocks_per_group: A sequence of length 4 that indicates the number of blocks created in each group. bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be passed on to the :class:`~haiku.BatchNorm` layers. By default the ``decay_rate`` is ``0.9`` and ``eps`` is ``1e-5``. resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to ``False``. bottleneck: Whether the block should bottleneck or not. Defaults to ``True``. channels_per_group: A sequence of length 4 that indicates the number of channels used for each block in each group. use_projection: A sequence of length 4 that indicates whether each residual block should use projection. name: Name of the module. """ super().__init__(name=name) self.resnet_v2 = False bn_config = dict(bn_config or {}) bn_config.setdefault("decay_rate", 0.9) bn_config.setdefault("eps", 1e-5) bn_config.setdefault("create_scale", True) bn_config.setdefault("create_offset", True) # Number of blocks in each group for ResNet. check_length(4, blocks_per_group, "blocks_per_group") check_length(4, channels_per_group, "channels_per_group") self.initial_conv = hk.Conv2D( output_channels=32, kernel_shape=7, stride=2, with_bias=False, padding="SAME", name="initial_conv") if not self.resnet_v2: self.initial_batchnorm = hk.BatchNorm(name="initial_batchnorm", **bn_config) self.block_groups = [] self.up_block_groups = [] strides = (1, 2, 2, 1) for i in range(4): self.block_groups.append( BlockGroup(channels=channels_per_group[i], num_blocks=blocks_per_group[i], stride=strides[i], bn_config=bn_config, bottleneck=bottleneck, use_projection=use_projection[i], transpose=False, name="block_group_%d" % (i))) for i in range(4): self.up_block_groups.append( BlockGroup(channels=channels_per_group[i], num_blocks=blocks_per_group[i], stride=strides[i], bn_config=bn_config, bottleneck=bottleneck, use_projection=use_projection[i], transpose=True, name="up_block_group_%d" % (i))) if self.resnet_v2: self.final_batchnorm = hk.BatchNorm(name="final_batchnorm", **bn_config) self.final_upconv = hk.Conv2DTranspose(output_channels=1, kernel_shape=5, stride=2, padding="SAME", name="final_upconv") self.final_conv = hk.Conv2DTranspose(output_channels=1, kernel_shape=5, stride=2, padding="SAME", name="final_conv") def __call__(self, inputs, condition, is_training, test_local_stats=False): out = inputs out = jnp.concatenate([out, condition*jnp.ones_like(out)[...,[0]]], axis=-1) out = self.initial_conv(out) # Decreasing resolution levels = [] for block_group in self.block_groups: levels.append(out) out = block_group(out, is_training, test_local_stats) out = jnp.concatenate([out, condition*jnp.ones_like(out)],axis=-1) # Increasing resolution for i, block_group in enumerate(self.up_block_groups[::-1]): out = block_group(out, is_training, test_local_stats) out = jnp.concatenate([out, levels[-i-1]],axis=-1) # Second to last upsampling, merging with input branch return self.final_conv(out)/(jnp.abs(condition)*jnp.ones_like(inputs)+1e-3) class SmallUResNet(UResNet): """ResNet18.""" def __init__(self, bn_config: Optional[Mapping[str, float]] = None, name: Optional[str] = None): """Constructs a ResNet model. Args: bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be passed on to the :class:`~haiku.BatchNorm` layers. resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to ``False``. name: Name of the module. """ super().__init__(blocks_per_group=(2, 2, 2, 2), bn_config=bn_config, bottleneck=False, channels_per_group=(32, 64, 128, 128), use_projection=(True, True, True, True), name=name) class MediumUResNet(UResNet): """ResNet18.""" def __init__(self, bn_config: Optional[Mapping[str, float]] = None, name: Optional[str] = None): """Constructs a ResNet model. Args: bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be passed on to the :class:`~haiku.BatchNorm` layers. resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to ``False``. name: Name of the module. """ super().__init__(blocks_per_group=(2, 2, 2, 2), bn_config=bn_config, bottleneck=False, channels_per_group=(32, 64, 128, 128), use_projection=(True, True, True, True), name=name) class MediumUResNet(UResNet): """ResNet18.""" def __init__(self, bn_config: Optional[Mapping[str, float]] = None, name: Optional[str] = None): """Constructs a ResNet model. Args: bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be passed on to the :class:`~haiku.BatchNorm` layers. resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to ``False``. name: Name of the module. """ super().__init__(blocks_per_group=(2, 2, 2, 2), bn_config=bn_config, bottleneck=False, channels_per_group=(32, 64, 128, 128), use_projection=(True, True, True, True), name=name) def forward(x, s, is_training=False): denoiser = MediumUResNet() return denoiser(x, s, is_training=is_training) model = hk.transform_with_state(forward) #sn_fn = hk.transform_with_state(lambda x: hk.SNParamsTree(ignore_regex='[^?!.]*b$')(x)) #sn_fn = hk.transform_with_state(lambda x: CustomSNParamsTree(ignore_regex='[^?!.]*b$',val=2.)(x)) # + active="" # class SmallUResNet(UResNet): # """ResNet18.""" # # def __init__(self, # bn_config: Optional[Mapping[str, float]] = None, # use_bn: bool = True, # pad_crop: bool = False, # n_output_channels: int = 1, # variant: Optional[str] = 'EiffL', # name: Optional[str] = None): # """Constructs a ResNet model. # Args: # bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be # passed on to the :class:`~haiku.BatchNorm` layers. # resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults # to ``False``. # use_bn: Whether the network should use batch normalisation. Defaults to # ``True``. # n_output_channels: The number of output channels, for example to change in # the case of a complex denoising. Defaults to 1. # name: Name of the module. # """ # super().__init__(blocks_per_group=(2, 2, 2, 2), # bn_config=bn_config, # bottleneck=False, # channels_per_group=(32, 64, 128, 128), # use_projection=(True, True, True, True), # # 320 -> 160 -> 80 -> 40 # # 360 -> 180 -> 90 -> 45 # strides=(1, 2, 2, 1), # use_bn=use_bn, # pad_crop=pad_crop, # n_output_channels=n_output_channels, # variant=variant, # name=name) # # def forward(x, s, is_training=False): # denoiser = SmallUResNet() # return denoiser(x, s, is_training=is_training) # # model = hk.transform_with_state(forward) # + active="" # # - def lr_schedule(step): """Linear scaling rule optimized for 90 epochs.""" steps_per_epoch = 30000 // batch_size current_epoch = step / steps_per_epoch # type: float lr = (1.0 * batch_size) / 32 boundaries = jnp.array((20, 40, 60)) * steps_per_epoch values = jnp.array([1., 0.1, 0.01, 0.001]) * lr index = jnp.sum(boundaries < step) return jnp.take(values, index) if spectral_norm > 0: sn_fn = hk.transform_with_state( lambda x: CustomSNParamsTree(ignore_regex='[^?!.]*b$', val=spectral_norm)(x) ) else: sn_fn = None # + # Initialisation """ optimizer = optax.chain( optax.adam(learning_rate=learning_rate), #optax.scale_by_schedule(lr_schedule) ) """ optimizer = optix.chain( optix.adam(learning_rate=learning_rate), optix.scale_by_schedule(lr_schedule) ) rng_seq = hk.PRNGSequence(42) if gaussian_prior: last_dim=2 else: last_dim=1 """ params, state = model.init(next(rng_seq), jnp.zeros((1, map_size, map_size, last_dim)), jnp.zeros((1, 1, 1, 1)), is_training=True) """ params, state = model.init(next(rng_seq), jnp.zeros((1, map_size, map_size, last_dim)), jnp.zeros((1, 1, 1, 1)), is_training=True) opt_state = optimizer.init(params) # - if sn_fn is not None: _, sn_state = sn_fn.init(next(rng_seq), params) else: sn_state = None # + def log_gaussian_prior(map_data, sigma, ps_map): data_ft = jnp.fft.fft2(map_data) / float(map_size) return -0.5*jnp.sum(jnp.real(data_ft*jnp.conj(data_ft)) / (ps_map+sigma[0]**2)) gaussian_prior_score = jax.vmap(jax.grad(log_gaussian_prior), in_axes=[0,0, None]) # - pixel_size = jnp.pi * resolution / 180. / 60. #rad/pixel # If the Gaussian prior is used, load the theoretical power spectrum if gaussian_prior: ps_data = onp.load(gaussian_path).astype('float32') ell = jnp.array(ps_data[0,:]) # massivenu: channel 4 ps_halofit = jnp.array(ps_data[1,:] / pixel_size**2) # normalisation by pixel size # convert to pixel units of our simple power spectrum calculator kell = ell / (360/3.5/0.5) / float(map_size) # Interpolate the Power Spectrum in Fourier Space power_map = jnp.array(make_power_map(ps_halofit, map_size, kps=kell)) def score_fn(params, state, rng_key, batch, is_training=True): if gaussian_prior: # If requested, first compute the Gaussian prior gaussian_score = gaussian_prior_score(batch['y'][...,0], batch['s'][...,0], power_map) gaussian_score = jnp.expand_dims(gaussian_score, axis=-1) net_input = jnp.concatenate([batch['y'], jnp.abs(batch['s'])**2 * gaussian_score],axis=-1) res, state = model.apply(params, state, rng_key, net_input, batch['s'], is_training=is_training) else: res, state = model.apply(params, state, rng_key, batch['y'], batch['s'], is_training=is_training) gaussian_score = jnp.zeros_like(res) return batch, res, gaussian_score """ # Training loss def loss_fn(params, state, rng_key, batch): _, res, gaussian_score = score_fn(params, state, rng_key, batch) loss = jnp.mean((batch['u'] + batch['s'] * (res + gaussian_score))**2) return loss, state """ @jax.jit def loss_fn(params, state, rng_key, batch): #res, state = model.apply(params, state, rng_key, batch['y'], batch['s'], is_training=True) #loss = jnp.mean((batch['u'] + batch['s'] * res)**2) _, res, gaussian_score = score_fn(params, state, rng_key, batch) loss = jnp.mean((batch['u'] + batch['s'] * (res + gaussian_score))**2) return loss, state """ @jax.jit def update(params, state, sn_state, rng_key, opt_state, batch): (loss, state), grads = jax.value_and_grad(loss_fn, has_aux=True)(params, state, rng_key, batch) updates, new_opt_state = optimizer.update(grads, opt_state) new_params = optax.apply_updates(params, updates) if spectral_norm > 0: new_params, new_sn_state = sn_fn.apply(None, sn_state, None, new_params) else: new_sn_state = sn_state return loss, new_params, state, new_sn_state, new_opt_state """ @jax.jit def update(params, state, sn_state, rng_key, opt_state, batch): (loss, state), grads = jax.value_and_grad(loss_fn, has_aux=True)(params, state, rng_key, batch) updates, new_opt_state = optimizer.update(grads, opt_state) new_params = optix.apply_updates(params, updates) if spectral_norm > 0: new_params, new_sn_state = sn_fn.apply(None, sn_state, None, new_params) else: new_sn_state = sn_state return loss, new_params, state, new_sn_state, new_opt_state train = load_dataset(dataset, batch_size, map_size, noise_dist_std, train_split) #imshow(next(train)['y'][0,...,0]); colorbar() # !pip list |grep dm-haiku # !pip list |grep jax # + #training_steps = 5000 losses = [] for step in tqdm(range(training_steps)): loss, params, state, sn_state, opt_state = update(params, state, sn_state, next(rng_seq), opt_state, next(train)) losses.append(loss) if step%100==0: print(step, loss) if step%5000 ==0: with open(output_dir+'/model-%d.pckl'%step, 'wb') as file: pickle.dump([params, state, sn_state], file) with open(output_dir+'/model-final.pckl', 'wb') as file: pickle.dump([params, state, sn_state], file) # - loglog(losses) # + #from functools import partial #score = partial(model.apply, params, state, next(rng_seq)) # - batch = next(train) # + #res, state = score_fn(batch['x'],batch['s'], is_training=False) _, res, gaussian_score = score_fn(params, state, next(rng_seq), batch) for i in range(10): ind = i figure(figsize=(16,4)) subplot(141) title("%0.3f"%batch['s'][ind,0,0,0]) imshow(batch['x'][ind,...,0],cmap='magma',vmin=-0.05,vmax=0.3) axis('off') subplot(142) imshow(batch['y'][ind,...,0],cmap='magma',vmin=-0.05,vmax=0.3) axis('off') subplot(143) #imshow(res[ind,...,0],cmap='magma') imshow(res[ind,...,0] + gaussian_score[ind,...,0], cmap='magma') axis('off') #title("%0.3f"%std(batch['s'][ind,:,:,0]**2 * res[ind,...,0])) subplot(144) #imshow(batch['y'][ind,...,0] + batch['s'][ind,:,:,0]**2 * res[ind,...,0],cmap='magma',vmin=-0.05,vmax=0.3) imshow(batch['y'][ind,...,0] + batch['s'][ind,:,:,0]**2 * (res[ind,...,0] + gaussian_score[ind,...,0]),cmap='magma',vmin=-0.05,vmax=0.3) #batch['s'] * (res + gaussian_score))**2 axis('off') # -
notebooks/Train.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np import seaborn as sns from scipy.spatial.distance import pdist, squareform # pd.set_option('display.max_columns', None) # pd.set_option('display.max_rows', None) data = pd.read_csv('Data-Table 1.csv', sep=';') data.set_index('Client ID (ns_vid)', inplace=True) m = np.matrix(data) columns = list(data.columns) res = squareform(pdist(m, 'hamming')) # - data.columns def distance(t1, t2): return res[columns.index(t1), columns.index(t2)] search = '<NAME>' d = pd.DataFrame([(c, distance(search,c)) for c in columns], columns=['title', 'distance']) d.sort_values('distance')
readCSV.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/simecek/PseudoDNA_Generator/blob/master/experiments/Exons_MarkovModel_KMER3_v0.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="lqemEIHEIsnc" colab_type="text" # ## Setup # + id="HPiEEg9eJqYx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="719a0c2f-ee92-4524-d9bc-8358b3eedcee" # !pip install git+https://github.com/ML-Bioinfo-CEITEC/rbp # + id="A_QazIkNKAsA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 49, "referenced_widgets": ["99600c06fa9b42e08fd192d56d10cecb", "af97c598383c46e7a8816f4ac43e23f4", "e956992090fa4b5a9be522a0a3315cc2", "307101bd926044f8b59210d2a507abd8", "1fdaf91e8f8d4bc98828a064b6b8bc12", "<KEY>", "<KEY>", "2542914110084c089352012f6ca90de2"]} outputId="a0a8313b-16be-4520-cdd4-ad1dcec12b2a" import pandas as pd import numpy as np from rbp.random.markov_model import MarkovModel from tqdm import tqdm, notebook notebook.tqdm().pandas() # + id="K-yKerxBKet7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 128} outputId="2b08816e-966b-4045-ff81-a4e3d893b776" # Mount to your Google Drive allowing lesson files will be saved to your Drive location from google.colab import drive drive.mount('/content/drive') # + [markdown] id="tb6UVKgvLenx" colab_type="text" # ## Data # + id="--TB7DZ9Lj7B" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 216} outputId="e7866f12-2a14-4941-b9be-122083b4dce0" dt = pd.read_csv("/content/drive/My Drive/data/random/random_exons.csv") dt.columns = ['chr', 'start', 'end', 'seq'] dt = dt[~dt.seq.str.contains("N")] # just for sure train = dt[dt.chr!="1"] test = dt[dt.chr=="1"] print(dt.shape, train.shape, test.shape) dt.head() # + [markdown] id="Wz4kMa0VLypz" colab_type="text" # ## Markov Model # + id="wD100gYRLuuP" colab_type="code" colab={} mm = MarkovModel(k=3) mm.fit(train.seq) # + id="VNGI6at2L1MG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="7c76458b-c3af-4268-863b-d7a6244ae777" def predict3(prev3): tmp = mm.kmer_pairs[mm.kmer_pairs.index.get_level_values('prev') == prev3].reset_index().drop(columns='prev').sort_values(0, ascending=False) return tmp.curr.iloc[0] predict3("ACG") # + id="Yk5Ad2OSMChG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="64270c71-4dbc-4ae9-f9e5-52cc6ba0a4fa" next3 = {x: predict3(x) for x in mm.kmer_pairs.index.get_level_values('prev')} next3['ACG'] # + id="eLzYwMwAMOUf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="f8edecf6-1a8f-4078-ace6-907c80179036" def compare3(x, y): return int(x[0] == y[0]), int(x[1] == y[1]), int(x[2] == y[2]) compare3('GGG', 'AGG') # + id="JwV_K5bTMaOg" colab_type="code" colab={} def stat3(s): sdf = pd.DataFrame({'prev': [s[i:i+3] for i in range(50,200-3,3)], 'next': [s[i:i+3] for i in range(53,200,3)]}) sdf['nextpred'] = sdf.prev.apply(predict3) sdf[['c1', 'c2', 'c3']] = pd.DataFrame.from_records(sdf.apply(lambda row: compare3(row['next'], row['nextpred']), axis=1), columns=['c1', 'c2', 'c3']) return sdf[['c1', 'c2', 'c3']].mean(axis=0) # + id="UyGcJtTMMd6O" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 67, "referenced_widgets": ["0b1bd570001542e49d56ea6eea6b485a", "b7a7e26cdd8044fe93617f792c2a5902", "c4a06525867045b6874d1965d7177e71", "61b39ebff4be4bd8bcdcc7831d0b6cd2", "c63d6a0c878e49af886c65bb6c740262", "201756ce2db1440b833c7fa313dac5ab", "a76ccc16eaf64736942c2367bbf8d736", "51303fcc4b3848599c5910dade4435c2"]} outputId="2ac685b7-33bc-4f20-fd78-7572fcf751d1" stats = test.seq.progress_apply(stat3) # + id="aD0gIQXkOaIb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 90} outputId="40e4c86a-04d2-45c4-9ebe-23765cf2f53c" stats.mean(axis=0)
experiments/Exons_MarkovModel_KMER3_v0.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # ## Implement LSTM # ### Getting ready... library(keras) library(readr) library(stringr) # Let's read our data and look at its sturcture data <- read_file("data/rhyme.txt") %>% str_to_lower() data # ### How to do it.. input = as.array(data) tokenizer = text_tokenizer(num_words = 40,char_level = F) tokenizer %>% fit_text_tokenizer(input) # + # saveRDS(object = tokenizer,"token_nietzsche.rds") # - head(tokenizer$word_index) text_seqs <- texts_to_sequences(tokenizer, input) str(text_seqs) text_seqs <- text_seqs[[1]] # + # text_seqs <- text_seqs[1:100] # - length(text_seqs) text_seqs[1:10] train_length_of_sentence <- 2 feature <- matrix(ncol = train_length_of_sentence) label <- matrix(ncol = 1) # + # feature <- readRDS("feature_71.rds") # label <- readRDS("label_71.rds") # - for(i in seq(train_length_of_sentence, length(text_seqs))){ if(i >= length(text_seqs)){ break() } start_idx <- (i - train_length_of_sentence) +1 end_idx <- i +1 new_seq <- text_seqs[start_idx:end_idx] feature <- rbind(feature,new_seq[1:train_length_of_sentence]) label <- rbind(label,new_seq[train_length_of_sentence+1]) } feature <- feature[-1,] label <- label[-1,] head(feature) head(label) # + dim(feature) length(label) # - label <- to_categorical(label,num_classes = tokenizer$num_words ) # + # saveRDS(object = feature,file = "feature_71.rds") # saveRDS(label,file = "label_71.rds") # + # train_index <- sample(1:nrow(feature), 0.8 * nrow(feature)) # test_index <- setdiff(1:nrow(feature), train_index) # X_train <- feature[train_index,] # y_train <- label[train_index] # X_test <- feature[test_index,] # y_test <- label[test_index] # X_train <- to_categorical(X_train,num_classes = tokenizer$num_words ) # y_train <- to_categorical(y_train,num_classes = tokenizer$num_words ) # X_test <- to_categorical(X_test,num_classes = tokenizer$num_words ) # y_test <- to_categorical(y_test,num_classes = tokenizer$num_words ) # - cat("Shape of features",dim(feature),"\n") cat("Shape of label",length(label)) model <- keras_model_sequential() model %>% layer_embedding(input_dim = tokenizer$num_words,output_dim = 10,input_length = train_length_of_sentence) %>% layer_lstm(units = 50) %>% layer_dense(tokenizer$num_words) %>% layer_activation("softmax") summary(model) model %>% compile( loss = "categorical_crossentropy", optimizer = optimizer_rmsprop(lr = 0.001), metrics = c('accuracy') ) model %>% fit( feature, label, batch_size = 128, epochs = 500 ) # + # save_model_hdf5(model,"lstm_v2.h5") # library(keras) # model <- load_model_hdf5(filepath = "lstm_v2.h5") # summary(model) # + # scores <- model %>% evaluate( # X_test, y_test, # batch_size = 32 # ) # cat('Test score:', scores[[1]],'\n') # cat('Test accuracy', scores[[2]]) # - # generate a sequence from a language model generate_sequence <-function(model, tokenizer, max_length, seed_text, n_words){ input_text <- seed_text for(i in seq(n_words)){ encoded <- texts_to_sequences(tokenizer,input_text)[[1]] encoded<- pad_sequences(sequences = list(encoded),maxlen = max_length,padding = 'pre') yhat <- predict_classes(model,encoded, verbose=0) next_word <- tokenizer$index_word[[as.character(yhat)]] input_text <- paste(input_text, next_word) } return(input_text) } seed = "<NAME>" generate_sequence(model,tokenizer,train_length_of_sentence,seed,11) seed_2 = "<NAME>" generate_sequence(model,tokenizer,train_length_of_sentence,seed_2,11) # ## Part 2 data = read_csv("data/ArticlesFeb2018.csv") # We see that the headlines column is data$headline <- as.character(data$headline) headlines <- data$headline headlines[1:10] # ### How to do it... input = as.array(headlines) dim(input) input[1] tokenizer = text_tokenizer(num_words = 2000,char_level = F) tokenizer %>% fit_text_tokenizer(input) head(tokenizer$index_word) text_seqs <- texts_to_sequences(tokenizer, input) text_seqs[1:2] input[1:2] # + # Demonstrate mapping of index to words # - length_of_headlines = sapply(headlines,function(x){sapply(strsplit(x, " "), length)}) headlines[4] hist(x = length_of_headlines) # Iterate through the sequences of tokens train_length_of_sentence <- 5 feature <- matrix(ncol = train_length_of_sentence) label <- matrix(ncol = 1) for(headline in text_seqs){ for(i in seq(train_length_of_sentence, length(headline))){ if(i >= length(headline)){ break() } start_idx <- (i - train_length_of_sentence) +1 end_idx <- i +1 new_seq <- headline[start_idx:end_idx] feature <- rbind(feature,new_seq[1:train_length_of_sentence]) label <- rbind(label,new_seq[train_length_of_sentence+1]) } } head(feature) head(label) feature <- feature[-1,] label <- label[-1,] dim(feature) length(label) train_index <- sample(1:nrow(feature), 0.8 * nrow(feature)) test_index <- setdiff(1:nrow(feature), train_index) X_train <- feature[train_index,] y_train <- label[train_index] X_test <- feature[test_index,] y_test <- label[test_index] X_train <- to_categorical(X_train,num_classes = tokenizer$num_words ) y_train <- to_categorical(y_train,num_classes = tokenizer$num_words ) X_test <- to_categorical(X_test,num_classes = tokenizer$num_words ) y_test <- to_categorical(y_test,num_classes = tokenizer$num_words ) cat("Shape of features",dim(X_train),"\n") cat("Shape of features",dim(y_train)) model <- keras_model_sequential() model %>% layer_lstm(8, input_shape = c(train_length_of_sentence, tokenizer$num_words)) %>% layer_dense(tokenizer$num_words) %>% layer_activation("softmax") summary(model) model %>% compile( loss = "categorical_crossentropy", optimizer = optimizer_rmsprop(lr = 0.001), metrics = c('accuracy') ) model %>% fit( X_train, y_train, batch_size = 128, epochs = 15 ) # + scores <- model %>% evaluate( X_test, y_test, batch_size = 32 ) cat('Test score:', scores[[1]],'\n') cat('Test accuracy', scores[[2]]) # - # ### How it works... # ### There is more... # ### See also... # + # library(keras) # library(readr) # library(stringr) # library(purrr) # library(tokenizers) # # Parameters -------------------------------------------------------------- # maxlen <- 40 # # Data Preparation -------------------------------------------------------- # # Retrieve text # path <- get_file( # 'nietzsche.txt', # origin='https://s3.amazonaws.com/text-datasets/nietzsche.txt' # ) # # Load, collapse, and tokenize text # text <- read_lines(path) %>% # str_to_lower() %>% # str_c(collapse = "\n") %>% # tokenize_characters(strip_non_alphanum = FALSE, simplify = TRUE) # print(sprintf("corpus length: %d", length(text))) # text <- text[1:10000] # head(text) # chars <- text %>% # unique() %>% # sort() # print(sprintf("total chars: %d", length(chars))) # chars # # Cut the text in semi-redundant sequences of maxlen characters # dataset <- map( # seq(1, length(text) - maxlen - 1, by = 3), # ~list(sentece = text[.x:(.x + maxlen - 1)], next_char = text[.x + maxlen]) # ) # dataset <- transpose(dataset) # head(dataset[[1]]) # str(dataset) # length(dataset$sentece[[1]]) # length(dataset$sentece) # 3320*40 # # Vectorization # x <- array(0, dim = c(length(dataset$sentece), maxlen, length(chars))) # y <- array(0, dim = c(length(dataset$sentece), length(chars))) # for(i in 1:length(dataset$sentece)){ # x[i,,] <- sapply(chars, function(x){ # as.integer(x == dataset$sentece[[i]]) # }) # y[i,] <- as.integer(chars == dataset$next_char[[i]]) # } # dim(x) # dim(y) # x[1,,] # x[1,2,] # class(x[1,2,]) # length(x[1,2,]) # y[1,] # length(y[1,]) # # Model Definition -------------------------------------------------------- # model <- keras_model_sequential() # model %>% # layer_lstm(128, input_shape = c(maxlen, length(chars))) %>% # layer_dense(length(chars)) %>% # layer_activation("softmax") # optimizer <- optimizer_rmsprop(lr = 0.01) # model %>% compile( # loss = "categorical_crossentropy", # optimizer = optimizer # ) # # Training & Results ---------------------------------------------------- # sample_mod <- function(preds, temperature = 1){ # preds <- log(preds)/temperature # exp_preds <- exp(preds) # preds <- exp_preds/sum(exp(preds)) # rmultinom(1, 1, preds) %>% # as.integer() %>% # which.max() # } # on_epoch_end <- function(epoch, logs) { # cat(sprintf("epoch: %02d ---------------\n\n", epoch)) # for(diversity in c(0.2, 0.5, 1, 1.2)){ # cat(sprintf("diversity: %f ---------------\n\n", diversity)) # start_index <- sample(1:(length(text) - maxlen), size = 1) # sentence <- text[start_index:(start_index + maxlen - 1)] # generated <- "" # for(i in 1:400){ # x <- sapply(chars, function(x){ # as.integer(x == sentence) # }) # x <- array_reshape(x, c(1, dim(x))) # preds <- predict(model, x) # next_index <- sample_mod(preds, diversity) # next_char <- chars[next_index] # generated <- str_c(generated, next_char, collapse = "") # sentence <- c(sentence[-1], next_char) # } # cat(generated) # cat("\n\n") # } # } # print_callback <- callback_lambda(on_epoch_end = on_epoch_end) # model %>% fit( # x, y, # batch_size = 128, # epochs = 1, # callbacks = print_callback # ) # - # In this section we will be working with -------- data set.It conta......... # We will start by importing the required libraries: # + # library(keras) # library(readr) # library(stringr) # # library(tm) # # library(tokenizers) # # Let's read our data and look at its sturcture # data <- read_file("data/nietzsche_71.txt") %>% str_to_lower() # data <- str_split(data, " ") # str(data) # data <- unlist(data) # str(data) # length(data) # data <- data[1:100] # train_length_of_sentence <- 20 # feature <- matrix(ncol = 1) # label <- matrix(ncol = 1) # for(i in seq(train_length_of_sentence, length(data))){ # if(i >= length(data)){ # break() # } # start_idx <- (i - train_length_of_sentence) +1 # end_idx <- i +1 # new_seq <- data[start_idx:end_idx] # feature <- rbind(feature,paste(new_seq[1:train_length_of_sentence],collapse = " ")) # label <- rbind(label,new_seq[train_length_of_sentence+1]) # } # new_seq[train_length_of_sentence+1] # # feature <- readRDS("feature_71.rds") # # label <- readRDS("label_71.rds") # head(feature) # head(label) # # feature <- feature[-1,] # # label <- label[-1,] # dim(feature) # length(label) # # saveRDS(object = feature,file = "feature_71.rds") # # saveRDS(label,file = "label_71.rds") # train_index <- sample(1:nrow(feature), 0.8 * nrow(feature)) # test_index <- setdiff(1:nrow(feature), train_index) # X_train <- feature[train_index,] # y_train <- label[train_index] # X_test <- feature[test_index,] # y_test <- label[test_index] # str(data) # tokenizer = text_tokenizer(num_words = 2000,char_level = F) # tokenizer %>% fit_text_tokenizer(data) # head(tokenizer$index_word) # # train_seq_gen <- texts_to_sequences_generator(tokenizer,texts = c(X_train,y_train)) # # train_seq_gen_y <- texts_to_sequences_generator(tokenizer,y_train) # X_train[1:2] # y_train[1:2] # texts_to_sequences(tokenizer,X_train[1:2,]) # sampling_generator <- function(X_data, Y_data = NULL, batch_size = 32) { # function() { # gc() # should blow up R if we are ever called on a background thread # rows <- sample(1:nrow(X_data), batch_size, replace = TRUE) # if (!is.null(Y_data)) # list(texts_to_sequences(X_data[rows]), texts_to_sequences(Y_data[rows])) # else # list(texts_to_sequences(X_data[rows,])) # } # } # model <- keras_model_sequential() # model %>% # layer_lstm(8, input_shape = c(train_length_of_sentence, tokenizer$num_words)) %>% # layer_dense(tokenizer$num_words) %>% # layer_activation("softmax") # summary(model) # model %>% compile( # loss = "categorical_crossentropy", # optimizer = optimizer_rmsprop(lr = 0.001), # metrics = c('accuracy') # ) # model %>% fit_generator(generator = sampling_generator(X_data = X_train,Y_data = y_train,batch_size = 20),steps_per_epoch = 10) # -
Chapter03/.ipynb_checkpoints/LSTM_rough-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # 5年期定投 择时 # + import pandas as pd from datetime import datetime import trdb2py import numpy as np import plotly.graph_objects as go isStaticImg = False width = 960 height = 768 pd.options.display.max_columns = None pd.options.display.max_rows = None trdb2cfg = trdb2py.loadConfig('./trdb2.yaml') # + # 具体基金 asset = 'jqdata.000300_XSHG|1d' # baselineasset = 'jrj.510310' # asset = 'jrj.110011' # baselineasset = 'jqdata.000300_XSHG|1d' # 起始时间,0表示从最开始算起 tsStart = 0 tsStart = int(trdb2py.str2timestamp('2005-01-01', '%Y-%m-%d')) # 结束时间,-1表示到现在为止 tsEnd = -1 tsEnd = int(trdb2py.str2timestamp('2020-12-31', '%Y-%m-%d')) # 初始资金池 paramsinit = trdb2py.trading2_pb2.InitParams( money=10000, ) # 买入参数,用全部的钱来买入(也就是复利) paramsbuy = trdb2py.trading2_pb2.BuyParams( perHandMoney=1, ) # # 买入参数,用全部的钱来买入(也就是复利) # paramsbuy2 = trdb2py.trading2_pb2.BuyParams( # perHandMoney=1/6, # ) # 买入参数,用全部的钱来买入(也就是复利) paramsbuy2 = trdb2py.trading2_pb2.BuyParams( perHandMoney=0.5, ) # 卖出参数,全部卖出 paramssell = trdb2py.trading2_pb2.SellParams( perVolume=1, ) paramsaip = trdb2py.trading2_pb2.AIPParams( money=10000, type=trdb2py.trading2_pb2.AIPTT_MONTHDAY, day=1, ) # 止盈参数,120%止盈 paramstakeprofit = trdb2py.trading2_pb2.TakeProfitParams( perVolume=1, # isOnlyProfit=True, # isFinish=True, ) # 止盈参数,120%止盈 paramstakeprofit2 = trdb2py.trading2_pb2.TakeProfitParams( perVolume=1, # isOnlyProfit=True, isFinish=True, ) # 止盈参数,120%止盈 paramstakeprofit1 = trdb2py.trading2_pb2.TakeProfitParams( perVolume=1, isOnlyProfit=True, # isFinish=True, ) # 止盈参数,120%止盈 paramstakeprofit3 = trdb2py.trading2_pb2.TakeProfitParams( perVolume=1, isOnlyProfit=True, isFinish=True, ) # 卖出参数,全部卖出 paramssell7 = trdb2py.trading2_pb2.SellParams( # perVolume=1, keepTime=7 * 24 * 60 * 60, ) lststart = [1, 2, 3, 4, 5] lsttitle = ['周一', '周二', '周三', '周四', '周五'] # + def calcweekday2val2(wday, offday): if offday == 1: if wday == 5: return 3 if offday == 2: if wday >= 4: return 4 if offday == 3: if wday >= 3: return 5 if offday == 4: if wday >= 2: return 6 return offday def getAIPLastTs(pnl): ctrlnums = len(pnl.lstCtrl) if ctrlnums <= 0: return -1 if pnl.lstCtrl[ctrlnums - 1].type == trdb2py.trading2_pb2.CTRL_SELL: return pnl.lstCtrl[ctrlnums - 1].ts return -1 def getLastResult(pnl) -> dict: nums = len(pnl.values) if nums <= 0: return None return {'cost': pnl.values[nums - 1].cost, 'value': pnl.values[nums - 1].value} def getLastCtrl(pnl): ctrlnums = len(pnl.lstCtrl) if ctrlnums <= 0: return None return pnl.lstCtrl[ctrlnums - 1] def getPNLValueWithTimestamp(ts, pnl: trdb2py.trading2_pb2.PNLAssetData) -> int: for i in range(0, len(pnl.values)): if ts == pnl.values[i].ts: return i if ts < pnl.values[i].ts: pnl.values.insert(i, trdb2py.trading2_pb2.PNLDataValue(ts=ts)) return i pnl.values.append(trdb2py.trading2_pb2.PNLDataValue(ts=ts)) return len(pnl.values) - 1 def mergePNL(lstpnl: list) -> trdb2py.trading2_pb2.PNLAssetData: pnl = trdb2py.trading2_pb2.PNLAssetData() for vpnl in lstpnl: v = vpnl['pnl'] for cai in range(0, len(v.values)): di = getPNLValueWithTimestamp(v.values[cai].ts, pnl) pnl.values[di].value += v.values[cai].value pnl.values[di].cost += v.values[cai].cost if pnl.values[di].cost > 0: pnl.values[di].perValue = pnl.values[di].value / \ pnl.values[di].cost else: pnl.values[di].perValue = 1 # pnl.values[di].perValue = 2 return pnl def getNextMonthDay1(ts): dt = datetime.utcfromtimestamp(ts) if dt.month == 12: return datetime(dt.year + 1, 1, 1).timestamp() return datetime(dt.year, dt.month + 1, 1).timestamp() def rmPNLValuesWithTimestamp(ts, pnl: trdb2py.trading2_pb2.PNLAssetData): i = getPNLValueWithTimestamp(ts, pnl) pnl.values.extend(pnl.values[0:i+1]) def getPNLTimestampLowInMonth(pnl: trdb2py.trading2_pb2.PNLAssetData) -> list: ts = 0 dt = None lastPerValue = 0 arr = [] for i in range(0, len(pnl.values)): v = pnl.values[i] if ts == 0: ts = v.ts dt = datetime.utcfromtimestamp(ts) lastPerValue = v.perValue else: cdt = datetime.utcfromtimestamp(v.ts) if dt.year == cdt.year and dt.month == cdt.month: if lastPerValue > v.perValue: ts = v.ts dt = cdt lastPerValue = v.perValue if i == len(pnl.values) - 1: arr.append(ts) else: arr.append(ts) ts = v.ts dt = cdt lastPerValue = v.perValue return arr def getPNLTimestampHighInMonth(pnl: trdb2py.trading2_pb2.PNLAssetData) -> list: ts = 0 dt = None lastPerValue = 0 arr = [] for i in range(0, len(pnl.values)): v = pnl.values[i] if ts == 0: ts = v.ts dt = datetime.utcfromtimestamp(ts) lastPerValue = v.perValue else: cdt = datetime.utcfromtimestamp(v.ts) if dt.year == cdt.year and dt.month == cdt.month: if lastPerValue < v.perValue: ts = v.ts dt = cdt lastPerValue = v.perValue if i == len(pnl.values) - 1: arr.append(ts) else: arr.append(ts) ts = v.ts dt = cdt lastPerValue = v.perValue return arr # + asset = 'jrj.110011' # asset = 'jqdata.000036_XSHG|1d' # asset = 'jqdata.000032_XSHG|1d' asset = 'jqdata.000300_XSHG|1d' # baseline s0 = trdb2py.trading2_pb2.Strategy( name="normal", asset=trdb2py.str2asset(asset), ) buy0 = trdb2py.trading2_pb2.CtrlCondition( name='buyandhold', ) paramsbuy = trdb2py.trading2_pb2.BuyParams( perHandMoney=1, ) paramsinit = trdb2py.trading2_pb2.InitParams( money=10000, ) s0.buy.extend([buy0]) s0.paramsBuy.CopyFrom(paramsbuy) s0.paramsInit.CopyFrom(paramsinit) p0 = trdb2py.trading2_pb2.SimTradingParams( assets=[trdb2py.str2asset(asset)], startTs=tsStart, endTs=tsEnd, strategies=[s0], title='沪深300', ) pnlBaseline = trdb2py.simTrading(trdb2cfg, p0) trdb2py.showPNL(pnlBaseline, toImg=isStaticImg, width=width, height=height) # + lstparams = [] for cy in range(2005, 2016): for cm in range(1, 13): tsStart = int(trdb2py.str2timestamp('{}-{}-01'.format(cy, cm), '%Y-%m-%d')) tsEnd = int(trdb2py.str2timestamp('{}-{}-01'.format(cy + 5, cm), '%Y-%m-%d')) buy0 = trdb2py.trading2_pb2.CtrlCondition( name='monthdayex', vals=[1], ) s0 = trdb2py.trading2_pb2.Strategy( name="normal", asset=trdb2py.str2asset(asset), ) # paramsaip = trdb2py.trading2_pb2.AIPParams( # money=10000, # type=trdb2py.trading2_pb2.AIPTT_WEEKDAY, # day=1, # ) s0.buy.extend([buy0]) s0.paramsBuy.CopyFrom(paramsbuy) s0.paramsSell.CopyFrom(paramssell) # s0.paramsInit.CopyFrom(paramsinit) s0.paramsAIP.CopyFrom(paramsaip) lstparams.append(trdb2py.trading2_pb2.SimTradingParams( assets=[trdb2py.str2asset(asset)], startTs=tsStart, endTs=tsEnd, strategies=[s0], title='{}-{}定投5年'.format(cy, cm), )) lstaippnl = trdb2py.simTradings(trdb2cfg, lstparams) trdb2py.showPNLs(lstaippnl + [pnlBaseline], toImg=isStaticImg, width=width, height=height) # + dfpnl1b = trdb2py.buildPNLReport(lstaippnl + [pnlBaseline]) dfpnl1b[['title', 'maxDrawdown', 'maxDrawdownStart', 'maxDrawdownEnd', 'totalReturns', 'sharpe', 'annualizedReturns', 'annualizedVolatility']].sort_values(by='totalReturns', ascending=True) # + dfpnl1b = trdb2py.buildPNLReport(lstaippnl) lstname = ['亏损的','年化低于3%','年化3%-10%', '年化10%-20%', '年化20%以上'] lstnums = [0, 0, 0, 0, 0] for index, row in dfpnl1b.iterrows(): if row['annualizedReturns'] < 0: lstnums[0] += 1 elif row['annualizedReturns'] < 0.03: lstnums[1] += 1 elif row['annualizedReturns'] < 0.10: lstnums[2] += 1 elif row['annualizedReturns'] < 0.20: lstnums[3] += 1 else: lstnums[4] += 1 fig = go.Figure(data=[go.Pie(labels=lstname, values=lstnums)]) fig.show() # + lstparams = [] for cy in range(2005, 2016): for cm in range(1, 13): tsStart = int(trdb2py.str2timestamp('{}-{}-01'.format(cy, cm), '%Y-%m-%d')) tsEnd = int(trdb2py.str2timestamp('{}-{}-01'.format(cy + 5, cm), '%Y-%m-%d')) buy0 = trdb2py.trading2_pb2.CtrlCondition( name='monthdayex', vals=[1], ) s0 = trdb2py.trading2_pb2.Strategy( name="normal", asset=trdb2py.str2asset(asset), ) # paramsaip = trdb2py.trading2_pb2.AIPParams( # money=10000, # type=trdb2py.trading2_pb2.AIPTT_WEEKDAY, # day=1, # ) s0.buy.extend([buy0]) s0.paramsBuy.CopyFrom(paramsbuy) s0.paramsSell.CopyFrom(paramssell) s0.paramsInit.CopyFrom(paramsinit) # s0.paramsAIP.CopyFrom(paramsaip) lstparams.append(trdb2py.trading2_pb2.SimTradingParams( assets=[trdb2py.str2asset(asset)], startTs=tsStart, endTs=tsEnd, strategies=[s0], title='{}-{}持有5年'.format(cy, cm), )) lstaippnl1 = trdb2py.simTradings(trdb2cfg, lstparams) trdb2py.showPNLs(lstaippnl1 + [pnlBaseline], toImg=isStaticImg, width=width, height=height) # + dfpnl1b = trdb2py.buildPNLReport(lstaippnl1 + [pnlBaseline]) dfpnl1b[['title', 'maxDrawdown', 'maxDrawdownStart', 'maxDrawdownEnd', 'totalReturns', 'sharpe', 'annualizedReturns', 'annualizedVolatility']].sort_values(by='totalReturns', ascending=True) # + lstparams = [] lsty = [2005, 2006, 2007, 2007, 2008, 2010, 2013, 2007, 2009, 2011, 2015, 2014, 2015] lstm = [1, 1, 1, 11, 1, 6, 2, 12, 4, 6, 7, 1, 12] for i in range(0, len(lsty)): cy = lsty[i] cm = lstm[i] tsStart = int(trdb2py.str2timestamp('{}-{}-01'.format(cy, cm), '%Y-%m-%d')) tsEnd = int(trdb2py.str2timestamp('{}-{}-01'.format(cy + 5, cm), '%Y-%m-%d')) buy0 = trdb2py.trading2_pb2.CtrlCondition( name='monthdayex', vals=[1], ) s0 = trdb2py.trading2_pb2.Strategy( name="normal", asset=trdb2py.str2asset(asset), ) # paramsaip = trdb2py.trading2_pb2.AIPParams( # money=10000, # type=trdb2py.trading2_pb2.AIPTT_WEEKDAY, # day=1, # ) s0.buy.extend([buy0]) s0.paramsBuy.CopyFrom(paramsbuy) s0.paramsSell.CopyFrom(paramssell) # s0.paramsInit.CopyFrom(paramsinit) s0.paramsAIP.CopyFrom(paramsaip) lstparams.append(trdb2py.trading2_pb2.SimTradingParams( assets=[trdb2py.str2asset(asset)], startTs=tsStart, endTs=tsEnd, strategies=[s0], title='{}-{}定投5年'.format(cy, cm), )) # tsStart = int(trdb2py.str2timestamp('{}-{}-01'.format(cy, cm), '%Y-%m-%d')) # tsEnd = int(trdb2py.str2timestamp('{}-{}-01'.format(cy + 5, cm), '%Y-%m-%d')) buy0 = trdb2py.trading2_pb2.CtrlCondition( name='monthdayex', vals=[1], ) s0 = trdb2py.trading2_pb2.Strategy( name="normal", asset=trdb2py.str2asset(asset), ) # paramsaip = trdb2py.trading2_pb2.AIPParams( # money=10000, # type=trdb2py.trading2_pb2.AIPTT_WEEKDAY, # day=1, # ) s0.buy.extend([buy0]) s0.paramsBuy.CopyFrom(paramsbuy) s0.paramsSell.CopyFrom(paramssell) s0.paramsInit.CopyFrom(paramsinit) # s0.paramsAIP.CopyFrom(paramsaip) lstparams.append(trdb2py.trading2_pb2.SimTradingParams( assets=[trdb2py.str2asset(asset)], startTs=tsStart, endTs=tsEnd, strategies=[s0], title='{}-{}持有5年'.format(cy, cm), )) lstaippnl2 = trdb2py.simTradings(trdb2cfg, lstparams) trdb2py.showPNLs(lstaippnl2 + [pnlBaseline], toImg=isStaticImg, width=width, height=height) # + dfpnl1b = trdb2py.buildPNLReport(lstaippnl2 + [pnlBaseline]) dfpnl1b[['title', 'maxDrawdown', 'maxDrawdownStart', 'maxDrawdownEnd', 'totalReturns', 'sharpe', 'annualizedReturns', 'annualizedVolatility']].sort_values(by='totalReturns', ascending=True) # -
home/trdb2/aip005.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import json import re from collections import Counter tables_json = json.load(open('../data/spider/tables.json')) train_spider = json.load(open('../data/spider/train_spider.json')) dev_spider = json.load(open('../data/spider/dev.json')) train_dev_spider = train_spider + dev_spider # ## Number of examples len(train_dev_spider) train_dev_spider[30] # ## Number of different queries queries = set([ex['query'].lower() for ex in train_dev_spider]) len(queries) # ## Number of SQL n-grams ngrams = set() n = 3 for ex in train_dev_spider: tokens = [t.lower() for t in ex['query_toks']] for i in range(len(tokens)-n+1): ngrams.add(tuple(tokens[i:i+n])) print(len(ngrams)) # ## Number of question n-grams ngrams = set() n = 3 for ex in train_dev_spider: tokens = [t.lower() for t in ex['question_toks'] if t not in ['.', '?', ',']] # spider doesn't contain other punctuations AFAIR, but we should replace this with a better filter for other datasets for i in range(len(tokens)-n+1): ngrams.add(tuple(tokens[i:i+n])) print(len(ngrams)) # ## Average tables per question # + # tables_json[0] # + schemas = {} for db_json in tables_json: db_id = db_json['db_id'] table_names = db_json["table_names_original"] columns = [(column_name[0], column_name[1]) for column_name in db_json["column_names_original"]] schemas[db_id] = {} for table_index, table_name in enumerate(table_names): schemas[db_id][table_name] = [] table_columns = [column for column in columns if column[0] == table_index] for table_column in table_columns: schemas[db_id][table_name].append(table_column[1]) # print(schemas["farm"]) # - counts = [] for ex in train_dev_spider: available_tables = set([t.lower() for t in schemas[ex['db_id']].keys()]) table_tokens_used = [t for t in ex['query_toks'] if t.lower() in available_tables] # print(table_tokens_used) # print(available_tables) # print([t.lower() for t in ex['query_toks']]) counts.append(len(table_tokens_used)) print(sum(counts) / len(counts)) # ## Anonymized queries # + templates = dict() for ex in train_dev_spider: query = ex['query'].replace('(', ' ( ').replace(')', ' ) ').lower().strip('; ') query = re.sub(r'".*"', '{value}', query) query = re.sub(r"'.*'", '{value}', query) query = re.sub(r"\s\d+.\d+", '{number}', query) query = re.sub(r"\s\d+", '{number}', query) query_tokens = [t for t in query.split() if t] for i, token in enumerate(query_tokens): if token.startswith("t1.") or token.startswith("t2.") or token.startswith("t3."): query_tokens[i] = "{item}" available_tables = set([t.lower() for t in schemas[ex['db_id']].keys()]) available_columns = set([c.lower() for t in schemas[ex['db_id']].values() for c in t]) available_items = available_tables.union(available_columns) anonymized = " ".join(['{item}' if t in available_items else t for t in query_tokens]) if anonymized not in templates: templates[anonymized] = 1 else: templates[anonymized] += 1 # print(query) # print(available_items) # print(' '.join(anonymized)) # print("****") print(len(templates)) # - print(f"Avg. # queries / templates = {sum(list(templates.values()))/len(templates)}")
notebooks/Dataset Analysis - Spider.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib.pyplot as plt from tqdm import tqdm import numpy as np import random import cv2 import os # # X_train and Y_train Process # + DATADIR = '/home/vinicius/Documents/DataBase/Images_Dataset/Cat_x_Dog_with_numbers' CATEGORIES = ['Cat', 'Dog'] IMG_SIZE = 200 training_data = [] def create_training_data(): for category in CATEGORIES: path = os.path.join(DATADIR, category) class_num = CATEGORIES.index(category) for image in tqdm(os.listdir(path)): try: img_array = cv2.imread(os.path.join(path, image), cv2.IMREAD_GRAYSCALE) new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) training_data.append([new_array, class_num]) except Exception as e: pass create_training_data() # + random.shuffle(training_data) x = [] y = [] for features, labels in training_data: x.append(features) y.append(labels) x = np.array(x).reshape(-1, IMG_SIZE * IMG_SIZE) X_train = x.T print('#' * 70, '\n') print('O tamanho do nosso conjunto de entrada para treino é: {}'.format(X_train.shape), '\n') y = np.array(y).reshape(1, -1) Y_train = y print('#' * 70, '\n') print('O tamanho do conjunto de saída rotulada para treino é: {}'.format(Y_train.shape), '\n') # - # # X_test and Y_test Process # + DATADIR = '/home/vinicius/Documents/DataBase/Images_Dataset/Cat_x_Dog_with_numbers' CATEGORIES = ['Cat_test', 'Dog_test'] IMG_SIZE = 200 testing_data = [] def create_testing_data(): for category in CATEGORIES: path = os.path.join(DATADIR, category) class_num = CATEGORIES.index(category) for image in tqdm(os.listdir(path)): try: img_array = cv2.imread(os.path.join(path, image), cv2.IMREAD_GRAYSCALE) new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) testing_data.append([new_array, class_num]) except Exception as e: pass create_testing_data() # + random.shuffle(testing_data) x = [] y = [] for features, labels in testing_data: x.append(features) y.append(labels) X_test = np.array(x).reshape(-1, IMG_SIZE * IMG_SIZE).T print('#' * 70, '\n') print('O tamanho do nosso conjunto de entrada para teste é: {}'.format(X_test.shape), '\n') Y_test = np.array(y).reshape(1, -1) print('#' * 70, '\n') print('O tamanho do conjunto de saída rotulada para teste é: {}'.format(Y_test.shape), '\n') # - # # Normalization of input data X_train = X_train / 255 X_test = X_test / 255 # # Sigmoid Function def sigmoid(z): """ Função de ativação para o cálculo da sigmoide de z """ sigmoid = 1 / (1 + np.exp(-z)) return sigmoid # # Layers size def layers_size(X, Y): """ O n_x corresponde ao tamanho dos nossos valores de entrada, ou seja, o número de linhas de nossa matriz X_train, que são os valores de pixel de cada uma das imagens, que são vetores colunas da X_train. O n_y corresponde ao número de linhas da nossa matriz Y_train, que são os valores rotulados de saída, ou seja, um por cada imagem, por isso a matriz Y_train é uma matriz linha, com apenas uma linha, ou seja, um nó na camada de saída. E o n_h é o tamanho de nossa camada oculta, ou seja, o número de nós da nossa camada oculta. """ n_x = X.shape[0] n_h = 4 n_y = Y.shape[0] return (n_x, n_h, n_y) # # Initilization def initialize_parameters(n_x, n_h, n_y): """ Inicializaremos os parâmetros W como uma matriz randômica de tamanho linhas do local que estamos com o parâmetro, e colunas do local da camada anterior. O mesmo para o parâmetro b, mas a diferença é que temos que multiplicar a matriz W por um valor de "0.01". """ np.random.seed(1) W1 = np.random.randn(n_h, n_x) * 0.01 b1 = np.zeros(shape = (n_h, 1)) W2 = np.random.randn(n_y, n_h) * 0.01 b2 = np.zeros(shape = (n_y, 1)) parametros = {'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2} return parametros # # Forward Propagation def forward_propagation(X, parametros): """ Realiza-se a propagação pra frente para cálculo dos valores de ativação A, que são colocados na forma de matrizes, A1 tem tamanho (4, m_exemplos) e A2 tem tamanho (1, m_exemplos). Com a np.dot fazemos a vetorização das matrizes. A np.tanh e a função de ativação para as quatro unidades ocultas da hidden, e a sigmoidpara fazer a classificação binária. Os valores são armazenados para utilizar depois. """ W1 = parametros['W1'] W2 = parametros['W2'] b1 = parametros['b1'] b2 = parametros['b2'] Z1 = np.dot(W1, X) + b1 A1 = np.tanh(Z1) Z2 = np.dot(W2, A1) + b2 A2 = sigmoid(Z2) cache = {'Z1': Z1, 'A1': A1, 'Z2': Z2, 'A2': A2} return A2, cache # # Cost Function def compute_cost(A2, Y, parametros): """ Aqui calcula-se o valor da função de custo, levando em consideração que temos que dividir o valor da soma de todos os valores da funçã de custo pelo número de exemplos, temos a equação abaixo. """ m = Y.shape[1] W1 = parametros['W1'] W2 = parametros['W2'] cost = (np.log(A2) * Y) + ((1 - Y) * (np.log(1 - A2))) cost = (-1/ m) * np.sum(cost) cost = np.squeeze(cost) return cost # # Backward Propagation def backward_propagation(parametros, cache, X, Y): """ Aqui é realizado a retro-propagação para o cálculo das derivadas em cada camada. No caso da dZ1 utiliza-se a derivada da função de ativação com o valor Z1, mas não temos como usar a derivada de tanh, portanto utilizamos algo que substitui isso. """ m = X.shape[1] W1 = parametros['W1'] W2 = parametros['W2'] Z1 = cache['Z1'] A1 = cache['A1'] A2 = cache['A2'] dZ2 = A2 - Y dW2 = np.dot(dZ2, A1.T) / m db2 = np.sum(dZ2, axis = 1, keepdims = True) / m dZ1 = np.multiply(np.dot(W2.T, dZ2), 1 - np.power(A1, 2)) dW1 = (1 / m) * np.dot(dZ1, X.T) db1 = np.sum(dZ1, axis = 1, keepdims = True) / m gradientes = {'dW1': dW1, 'dW2': dW2, 'db1': db1, 'db2': db2} return gradientes # # Atualization def update_parameters(parametros, gradientes, learning_rate = 0.001): """ Realizamos a atualização dos parametros para cada passo na rede neural utilizar os melhores valores dos parametros. """ W1 = parametros['W1'] W2 = parametros['W2'] b1 = parametros['b1'] b2 = parametros['b2'] dW1 = gradientes['dW1'] dW2 = gradientes['dW2'] db1 = gradientes['db1'] db2 = gradientes['db2'] W1 = W1 - learning_rate * dW1 W2 = W2 - learning_rate * dW2 b1 = b1 - learning_rate * db1 b2 = b2 - learning_rate * db2 parametros = {'W1': W1, 'W2': W2, 'b1': b1, 'b2': b2} return parametros # # Predictions def predict(parametros, X): """ As predições são feitas com base nos resultados da função de ativação, que gera os valores da matriz A que é de uma linha e m colunas, ela percorre cada valor das m colunas e verifica o valor presente nessa coluna de A. """ m = X.shape[1] Y_predict = np.zeros(shape = (1, m)) A2, cache = forward_propagation(X, parametros) for i in range(A2.shape[1]): if A2[0, i] > 0.5: Y_predict[0, i] = 1 else: 0 return Y_predict # # Model def model(X_train, Y_train, X_test, Y_test, n_h, num_iterations, learning_rate, print_cost): """ O nosso modelo é capaz de calcular as propagações com as funções que criamos, e irá printar o valor da função de custo. """ cost_list = [] np.random.seed(2) n_x = layers_size(X_train, Y_train)[0] n_y = layers_size(X_train, Y_train)[2] parametros = initialize_parameters(n_x, n_h, n_y) W1 = parametros['W1'] W2 = parametros['W2'] b1 = parametros['b1'] b2 = parametros['b2'] for i in range(num_iterations): A2, cache = forward_propagation(X_train, parametros) cost = compute_cost(A2, Y_train, parametros) gradientes = backward_propagation(parametros, cache, X_train, Y_train) parametros = update_parameters(parametros, gradientes) if i % 200 == 0: cost_list.append(cost) if print_cost and i % 200 == 0: print ('Valor da função de custo após a {}ª iteração: {}'.format(i, cost)) Y_prediction_test = predict(parametros, X_test) Y_prediction_train = predict(parametros, X_train) print('Acurácia treino: {} %'.format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100)) print('Acurácia teste: {} %'.format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100)) resultado = {'custo': cost_list, 'Predições no teste': Y_prediction_test, 'Predições no treino': Y_prediction_train, 'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2, 'Learning rate': learning_rate, 'Numero de iterações': num_iterations} return resultado resultado = model(X_train, Y_train, X_test, Y_test, n_h = 8, num_iterations = 1000, learning_rate = 2, print_cost = True) # # Cost Function Graph custo = np.squeeze(resultado['custo']) plt.plot(custo) plt.ylabel('custo') plt.xlabel('Iterações a cada 100') plt.title("Learning rate =" + str(resultado["Learning rate"])) plt.show() # # Image test from X_test index = 15 plt.imshow(X_test[:,index].reshape((IMG_SIZE, IMG_SIZE)), cmap = 'gray') print ("Y rotulado nos testes = " + str(Y_test[0,index]) + ", você preveu ser \"" + str(resultado["Predições no teste"][0,index]))
Project/Shallow Neural Network.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <img src="images/logodwengo.png" alt="Banner" style="width: 150px;"/> # <div style='color: #690027;' markdown="1"> # <h1>WIP ONDERLINGE LIGGING VAN RECHTEN</h1> # </div> # <div class="alert alert-box alert-success"> # Naast de snijpunten met de assen, is het ook belangrijk dat je snijpunten met andere rechten kunt berekenen. Dit kan bijvoorbeeld helpen bij het bepalen van ... of ... # # Ook hiervoor bestaan er reeds functies in python in de submodule SymPy. # </div> # ### Nodige modules importeren import numpy as np import matplotlib.pyplot as plt # <div style='color: #690027;' markdown="1"> # <h2>1. Onderlinge ligging van rechten </h2> # </div> # ### 1.1 Soorten # Snijdende rechten. (a=/=) # Evenwijdige rechten. (a =) # ### 1.2 Evenwijdige rechten # Rico = # ### Snijdende rechten # Rico =/= # <div style='color: #690027;' markdown="1"> # <h2>2. Twee snijdende rechten met gegeven vergelijking</h2> # </div> # Bestudeer de code aandachtig en voer het script uit. <br> # + # TWEE SNIJDENDE RECHTEN MET GEGEVEN VERGELIJKING plt.figure() # creër tekenvenster plt.hlines(0, -10, 10) # plot een horizontale rechte van x = -10 tot x = 10 waarvoo y = 0 (de x-as) plt.vlines(0, -30, 40) # plot een verticale rechte van y = -30 tot y = 40 waarvoor x = 0 (de y-as) x = np.linspace(-9.5, 9.5, 20) # kies de x-coördinaten van de punten die geplot worden # vergelijking van de eerste rechte: y = -3x + 5 # vergelijking van de tweede rechte: y = 2x + 16 y1 = -3 * x + 5 # overeenkomstige y-coördinaten voor eerste rechte y2 = 2 * x + 16 # overeenkomstige y-coördinaten voor tweede rechte plt.axis(xmin=-10, xmax=10, ymin=-30, ymax=40) # vensterinstellingen plt.xticks(np.arange(-10, 11, step=1)) plt.yticks(np.arange(-30, 45, step=5)) plt.grid(True) # plot rooster plt.plot(x, y1, color="blue", linewidth=1.0, linestyle="solid") # plot lijnstuk voor eerste rechte plt.plot(x, y2, color="red", linewidth=4.0, linestyle="dashed") # plot lijnstuk voor tweede rechte plt.show() # toon grafiek # - # ### Opdracht 2.2 # Maak aanpassingen in dit script - *kopieer het vorige script eerst hieronder in de nieuwe code-cel* - zodat: # ... # + # OPLOSSING # TWEE SNIJDENDE RECHTEN MET GEGEVEN VERGELIJKING plt.figure() # creër tekenvenster plt.hlines(0, -10, 10) # plot een horizontale rechte van x = -10 tot x = 10 waarvoo y = 0 (de x-as) plt.vlines(0, -30, 40) # plot een verticale rechte van y = -30 tot y = 40 waarvoor x = 0 (de y-as) x = np.linspace(-9.5, 9.5, 20) # kies de x-coördinaten van de punten die geplot worden # vergelijking van de eerste rechte: y = -3x + 5 # vergelijking van de tweede rechte: y = 2x + 16 y1 = -3 * x + 5 # overeenkomstige y-coördinaten voor eerste rechte y2 = 2 * x + 16 # overeenkomstige y-coördinaten voor tweede rechte plt.axis(xmin=-10, xmax=10, ymin=-30, ymax=40) # vensterinstellingen plt.xticks(np.arange(-10, 11, step=1)) plt.yticks(np.arange(-30, 45, step=5)) plt.grid(True) # plot rooster plt.plot(x, y1, color="blue", linewidth=1.0, linestyle="solid") # plot lijnstuk voor eerste rechte plt.plot(x, y2, color="red", linewidth=4.0, linestyle="dashed") # plot lijnstuk voor tweede rechte plt.show() # toon grafiek # - # <div style='color: #690027;' markdown="1"> # <h2>3. Oefeningen</h2> # </div> # ### Opdracht 3.1 # ### Opdracht 3.2 # # ### Opdracht 3.3 # <div class="alert alert-block alert-warning"> # Vervolg: regressie # </div> # <div> # <h2>Referentielijst</h2> # </div> # [1] <NAME>. (2018) Exploratory computing with Python. Technische Universiteit Delft. <br> &nbsp; &nbsp; &nbsp; &nbsp; geraadpleegd op 14 april 2019 via https://mbakker7.github.io/exploratory_computing_with_python/. Gepubliceerd onder CC BY licentie. # <img src="images/cclic.png" alt="Banner" align="left" style="width:100px;"/><br><br> # Notebook Python in wiskunde, zie <a href="http://www.aiopschool.be">AI Op School</a>, <NAME>, <NAME> & <NAME> is in licentie gegeven volgens een <a href="http://creativecommons.org/licenses/by-nc-sa/4.0/">Creative Commons Naamsvermelding-NietCommercieel-GelijkDelen 4.0 Internationaal-licentie</a>.
Wiskunde/GrafiekenFuncties/0800_OnderlingeLigging.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_tensorflow_p36 # language: python # name: conda_tensorflow_p36 # --- # ## TensorFlow 2 Complete Project Workflow in Amazon SageMaker # ### Data Preprocessing -> Code Prototyping -> Automatic Model Tuning -> Deployment # # 1. [Introduction](#Introduction) # 2. [SageMaker Processing for dataset transformation](#SageMakerProcessing) # 3. [Local Mode training](#LocalModeTraining) # 4. [Local Mode endpoint](#LocalModeEndpoint) # 5. [SageMaker hosted training](#SageMakerHostedTraining) # 6. [Automatic Model Tuning](#AutomaticModelTuning) # 7. [SageMaker hosted endpoint](#SageMakerHostedEndpoint) # 8. [Workflow Automation with the Step Functions Data Science SDK](#WorkflowAutomation) # 1. [Add an IAM policy to your SageMaker role](#IAMPolicy) # 2. [Create an execution role for Step Functions](#CreateExecutionRole) # 3. [Set up a TrainingPipeline](#TrainingPipeline) # 4. [Visualizing the workflow](#VisualizingWorkflow) # 5. [Creating and executing the pipeline](#CreatingExecutingPipeline) # 6. [Cleanup](#Cleanup) # 9. [Extensions](#Extensions) # # # ## Introduction <a class="anchor" id="Introduction"> # # If you are using TensorFlow 2, you can use the Amazon SageMaker prebuilt TensorFlow 2 container with training scripts similar to those you would use outside SageMaker. This feature is named Script Mode. Using Script Mode and other SageMaker features, you can build a complete workflow for a TensorFlow 2 project. This notebook presents such a workflow, including all key steps such as preprocessing data with SageMaker Processing, code prototyping with SageMaker Local Mode training and inference, and production-ready model training and deployment with SageMaker hosted training and inference. Automatic Model Tuning in SageMaker is used to tune the model's hyperparameters. Additionally, the [AWS Step Functions Data Science SDK](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/readmelink.html) is used to automate the main training and deployment steps for use in a production workflow outside notebooks. # # To enable you to run this notebook within a reasonable time (typically less than an hour), this notebook's use case is a straightforward regression task: predicting house prices based on the well-known Boston Housing dataset. This public dataset contains 13 features regarding housing stock of towns in the Boston area. Features include average number of rooms, accessibility to radial highways, adjacency to the Charles River, etc. # # To begin, we'll import some necessary packages and set up directories for local training and test data. We'll also set up a SageMaker Session to perform various operations, and specify an Amazon S3 bucket to hold input data and output. The default bucket used here is created by SageMaker if it doesn't already exist, and named in accordance with the AWS account ID and AWS Region. # + import os import sagemaker import tensorflow as tf sess = sagemaker.Session() bucket = sess.default_bucket() data_dir = os.path.join(os.getcwd(), 'data') os.makedirs(data_dir, exist_ok=True) train_dir = os.path.join(os.getcwd(), 'data/train') os.makedirs(train_dir, exist_ok=True) test_dir = os.path.join(os.getcwd(), 'data/test') os.makedirs(test_dir, exist_ok=True) raw_dir = os.path.join(os.getcwd(), 'data/raw') os.makedirs(raw_dir, exist_ok=True) # - # # SageMaker Processing for dataset transformation <a class="anchor" id="SageMakerProcessing"> # # Next, we'll import the dataset and transform it with SageMaker Processing, which can be used to process terabytes of data in a SageMaker-managed cluster separate from the instance running your notebook server. In a typical SageMaker workflow, notebooks are only used for prototyping and can be run on relatively inexpensive and less powerful instances, while processing, training and model hosting tasks are run on separate, more powerful SageMaker-managed instances. SageMaker Processing includes off-the-shelf support for Scikit-learn, as well as a Bring Your Own Container option, so it can be used with many different data transformation technologies and tasks. # # First we'll load the Boston Housing dataset, save the raw feature data and upload it to Amazon S3 for transformation by SageMaker Processing. We'll also save the labels for training and testing. # + import numpy as np from tensorflow.python.keras.datasets import boston_housing from sklearn.preprocessing import StandardScaler (x_train, y_train), (x_test, y_test) = boston_housing.load_data() np.save(os.path.join(raw_dir, 'x_train.npy'), x_train) np.save(os.path.join(raw_dir, 'x_test.npy'), x_test) np.save(os.path.join(train_dir, 'y_train.npy'), y_train) np.save(os.path.join(test_dir, 'y_test.npy'), y_test) s3_prefix = 'tf-2-workflow' rawdata_s3_prefix = '{}/data/raw'.format(s3_prefix) raw_s3 = sess.upload_data(path='./data/raw/', key_prefix=rawdata_s3_prefix) print(raw_s3) # - # To use SageMaker Processing, simply supply a Python data preprocessing script as shown below. For this example, we're using a SageMaker prebuilt Scikit-learn container, which includes many common functions for processing data. There are few limitations on what kinds of code and operations you can run, and only a minimal contract: input and output data must be placed in specified directories. If this is done, SageMaker Processing automatically loads the input data from S3 and uploads transformed data back to S3 when the job is complete. # + # %%writefile preprocessing.py import glob import numpy as np import os from sklearn.preprocessing import StandardScaler if __name__=='__main__': input_files = glob.glob('{}/*.npy'.format('/opt/ml/processing/input')) print('\nINPUT FILE LIST: \n{}\n'.format(input_files)) scaler = StandardScaler() for file in input_files: raw = np.load(file) transformed = scaler.fit_transform(raw) if 'train' in file: output_path = os.path.join('/opt/ml/processing/train', 'x_train.npy') np.save(output_path, transformed) print('SAVED TRANSFORMED TRAINING DATA FILE\n') else: output_path = os.path.join('/opt/ml/processing/test', 'x_test.npy') np.save(output_path, transformed) print('SAVED TRANSFORMED TEST DATA FILE\n') # - # Before starting the SageMaker Processing job, we instantiate a `SKLearnProcessor` object. This object allows you to specify the instance type to use in the job, as well as how many instances. Although the Boston Housing dataset is quite small, we'll use two instances to showcase how easy it is to spin up a cluster for SageMaker Processing. # + from sagemaker import get_execution_role from sagemaker.sklearn.processing import SKLearnProcessor sklearn_processor = SKLearnProcessor(framework_version='0.20.0', role=get_execution_role(), instance_type='ml.m5.xlarge', instance_count=2) # - # We're now ready to run the Processing job. To enable distributing the data files equally among the instances, we specify the `ShardedByS3Key` distribution type in the `ProcessingInput` object. This ensures that if we have `n` instances, each instance will receive `1/n` files from the specified S3 bucket. It may take around 3 minutes for the following code cell to run, mainly to set up the cluster. At the end of the job, the cluster automatically will be torn down by SageMaker. # + from sagemaker.processing import ProcessingInput, ProcessingOutput from time import gmtime, strftime processing_job_name = "tf-2-workflow-{}".format(strftime("%d-%H-%M-%S", gmtime())) output_destination = 's3://{}/{}/data'.format(bucket, s3_prefix) sklearn_processor.run(code='preprocessing.py', job_name=processing_job_name, inputs=[ProcessingInput( source=raw_s3, destination='/opt/ml/processing/input', s3_data_distribution_type='ShardedByS3Key')], outputs=[ProcessingOutput(output_name='train', destination='{}/train'.format(output_destination), source='/opt/ml/processing/train'), ProcessingOutput(output_name='test', destination='{}/test'.format(output_destination), source='/opt/ml/processing/test')]) preprocessing_job_description = sklearn_processor.jobs[-1].describe() # - # In the log output of the SageMaker Processing job above, you should be able to see logs in two different colors for the two different instances, and that each instance received different files. Without the `ShardedByS3Key` distribution type, each instance would have received a copy of **all** files. By spreading the data equally among `n` instances, you should receive a speedup by approximately a factor of `n` for most stateless data transformations. After saving the job results locally, we'll move on to prototyping training and inference code with Local Mode. train_in_s3 = '{}/train/x_train.npy'.format(output_destination) test_in_s3 = '{}/test/x_test.npy'.format(output_destination) # !aws s3 cp {train_in_s3} ./data/train/x_train.npy # !aws s3 cp {test_in_s3} ./data/test/x_test.npy # ## Local Mode training <a class="anchor" id="LocalModeTraining"> # # Local Mode in Amazon SageMaker is a convenient way to make sure your code is working locally as expected before moving on to full scale, hosted training in a separate, more powerful SageMaker-managed cluster. To train in Local Mode, it is necessary to have docker-compose or nvidia-docker-compose (for GPU instances) installed. Running the following commands will install docker-compose or nvidia-docker-compose, and configure the notebook environment for you. # !wget -q https://raw.githubusercontent.com/aws-samples/amazon-sagemaker-script-mode/master/local_mode_setup.sh # !wget -q https://raw.githubusercontent.com/aws-samples/amazon-sagemaker-script-mode/master/daemon.json # !/bin/bash ./local_mode_setup.sh # Next, we'll set up a TensorFlow Estimator for Local Mode training. Key parameters for the Estimator include: # # - `train_instance_type`: the kind of hardware on which training will run. In the case of Local Mode, we simply set this parameter to `local` to invoke Local Mode training on the CPU, or to `local_gpu` if the instance has a GPU. # - `git_config`: to make sure training scripts are source controlled for coordinated, shared use by a team, the Estimator can pull in the code from a Git repository rather than local directories. # - Other parameters of note: the algorithm’s hyperparameters, which are passed in as a dictionary, and a Boolean parameter indicating that we are using Script Mode. # # Recall that we are using Local Mode here mainly to make sure our code is working. Accordingly, instead of performing a full cycle of training with many epochs (passes over the full dataset), we'll train only for a small number of epochs just to confirm the code is working properly and avoid wasting full-scale training time unnecessarily. # + from sagemaker.tensorflow import TensorFlow git_config = {'repo': 'https://github.com/aws-samples/amazon-sagemaker-script-mode', 'branch': 'master'} model_dir = '/opt/ml/model' train_instance_type = 'local' hyperparameters = {'epochs': 5, 'batch_size': 128, 'learning_rate': 0.01} local_estimator = TensorFlow(git_config=git_config, source_dir='tf-2-workflow/train_model', entry_point='train.py', model_dir=model_dir, train_instance_type=train_instance_type, train_instance_count=1, hyperparameters=hyperparameters, role=sagemaker.get_execution_role(), base_job_name='tf-2-workflow', framework_version='2.1', py_version='py3', script_mode=True) # - # The `fit` method call below starts the Local Mode training job. Metrics for training will be logged below the code, inside the notebook cell. You should observe the validation loss decrease substantially over the five epochs, with no training errors, which is a good indication that our training code is working as expected. # + inputs = {'train': f'file://{train_dir}', 'test': f'file://{test_dir}'} local_estimator.fit(inputs) # - # ## Local Mode endpoint <a class="anchor" id="LocalModeEndpoint"> # # While Amazon SageMaker’s Local Mode training is very useful to make sure your training code is working before moving on to full scale training, it also would be useful to have a convenient way to test your model locally before incurring the time and expense of deploying it to production. One possibility is to fetch the TensorFlow SavedModel artifact or a model checkpoint saved in Amazon S3, and load it in your notebook for testing. However, an even easier way to do this is to use the SageMaker Python SDK to do this work for you by setting up a Local Mode endpoint. # # More specifically, the Estimator object from the Local Mode training job can be used to deploy a model locally. With one exception, this code is the same as the code you would use to deploy to production. In particular, all you need to do is invoke the local Estimator's deploy method, and similarly to Local Mode training, specify the instance type as either `local_gpu` or `local` depending on whether your notebook is on a GPU instance or CPU instance. # # The following single line of code deploys the model locally in the SageMaker TensorFlow Serving container: local_predictor = local_estimator.deploy(initial_instance_count=1, instance_type='local') # To get predictions from the Local Mode endpoint, simply invoke the Predictor's predict method. local_results = local_predictor.predict(x_test[:10])['predictions'] # As a sanity check, the predictions can be compared against the actual target values. local_preds_flat_list = [float('%.1f'%(item)) for sublist in local_results for item in sublist] print('predictions: \t{}'.format(np.array(local_preds_flat_list))) print('target values: \t{}'.format(y_test[:10].round(decimals=1))) # We only trained the model for a few epochs and there is much room for improvement, but the predictions so far should at least appear reasonably within the ballpark. # # To avoid having the SageMaker TensorFlow Serving container indefinitely running locally, simply gracefully shut it down by calling the `delete_endpoint` method of the Predictor object. local_predictor.delete_endpoint() # ## SageMaker hosted training <a class="anchor" id="SageMakerHostedTraining"> # # Now that we've confirmed our code is working locally, we can move on to use SageMaker's hosted training functionality. Hosted training is preferred for doing actual training, especially large-scale, distributed training. Unlike Local Mode training, for hosted training the actual training itself occurs not on the notebook instance, but on a separate cluster of machines managed by SageMaker. Before starting hosted training, the data must be in S3, or an EFS or FSx for Lustre file system. We'll upload to S3 now, and confirm the upload was successful. # + s3_prefix = 'tf-2-workflow' traindata_s3_prefix = '{}/data/train'.format(s3_prefix) testdata_s3_prefix = '{}/data/test'.format(s3_prefix) # + train_s3 = sess.upload_data(path='./data/train/', key_prefix=traindata_s3_prefix) test_s3 = sess.upload_data(path='./data/test/', key_prefix=testdata_s3_prefix) inputs = {'train':train_s3, 'test': test_s3} print(inputs) # - # We're now ready to set up an Estimator object for hosted training. It is similar to the Local Mode Estimator, except the `train_instance_type` has been set to a SageMaker ML instance type instead of `local` for Local Mode. Also, since we know our code is working now, we'll train for a larger number of epochs with the expectation that model training will converge to an improved, lower validation loss. # # With these two changes, we simply call `fit` to start the actual hosted training. # + train_instance_type = 'ml.c5.xlarge' hyperparameters = {'epochs': 30, 'batch_size': 128, 'learning_rate': 0.01} estimator = TensorFlow(git_config=git_config, source_dir='tf-2-workflow/train_model', entry_point='train.py', model_dir=model_dir, train_instance_type=train_instance_type, train_instance_count=1, hyperparameters=hyperparameters, role=sagemaker.get_execution_role(), base_job_name='tf-2-workflow', framework_version='2.1', py_version='py3', script_mode=True) # - # After starting the hosted training job with the `fit` method call below, you should observe the training converge over the longer number of epochs to a validation loss that is considerably lower than that which was achieved in the shorter Local Mode training job. Can we do better? We'll look into a way to do so in the **Automatic Model Tuning** section below. estimator.fit(inputs) # As with the Local Mode training, hosted training produces a model saved in S3 that we can retrieve. This is an example of the modularity of SageMaker: having trained the model in SageMaker, you can now take the model out of SageMaker and run it anywhere else. Alternatively, you can deploy the model into a production-ready environment using SageMaker's hosted endpoints functionality, as shown in the **SageMaker hosted endpoint** section below. # # Retrieving the model from S3 is very easy: the hosted training estimator you created above stores a reference to the model's location in S3. You simply copy the model from S3 using the estimator's `model_data` property and unzip it to inspect the contents. # !aws s3 cp {estimator.model_data} ./model/model.tar.gz # The unzipped archive should include the assets required by TensorFlow Serving to load the model and serve it, including a .pb file: # !tar -xvzf ./model/model.tar.gz -C ./model # ## Automatic Model Tuning <a class="anchor" id="AutomaticModelTuning"> # # So far we have simply run one Local Mode training job and one Hosted Training job without any real attempt to tune hyperparameters to produce a better model, other than increasing the number of epochs. Selecting the right hyperparameter values to train your model can be difficult, and typically is very time consuming if done manually. The right combination of hyperparameters is dependent on your data and algorithm; some algorithms have many different hyperparameters that can be tweaked; some are very sensitive to the hyperparameter values selected; and most have a non-linear relationship between model fit and hyperparameter values. SageMaker Automatic Model Tuning helps automate the hyperparameter tuning process: it runs multiple training jobs with different hyperparameter combinations to find the set with the best model performance. # # We begin by specifying the hyperparameters we wish to tune, and the range of values over which to tune each one. We also must specify an objective metric to be optimized: in this use case, we'd like to minimize the validation loss. # + from sagemaker.tuner import IntegerParameter, CategoricalParameter, ContinuousParameter, HyperparameterTuner hyperparameter_ranges = { 'learning_rate': ContinuousParameter(0.001, 0.2, scaling_type="Logarithmic"), 'epochs': IntegerParameter(10, 50), 'batch_size': IntegerParameter(64, 256), } metric_definitions = [{'Name': 'loss', 'Regex': ' loss: ([0-9\\.]+)'}, {'Name': 'val_loss', 'Regex': ' val_loss: ([0-9\\.]+)'}] objective_metric_name = 'val_loss' objective_type = 'Minimize' # - # Next we specify a HyperparameterTuner object that takes the above definitions as parameters. Each tuning job must be given a budget: a maximum number of training jobs. A tuning job will complete after that many training jobs have been executed. # # We also can specify how much parallelism to employ, in this case five jobs, meaning that the tuning job will complete after three series of five jobs in parallel have completed. For the default Bayesian Optimization tuning strategy used here, the tuning search is informed by the results of previous groups of training jobs, so we don't run all of the jobs in parallel, but rather divide the jobs into groups of parallel jobs. There is a trade-off: using more parallel jobs will finish tuning sooner, but likely will sacrifice tuning search accuracy. # # Now we can launch a hyperparameter tuning job by calling the `fit` method of the HyperparameterTuner object. The tuning job may take around 10 minutes to finish. While you're waiting, the status of the tuning job, including metadata and results for invidual training jobs within the tuning job, can be checked in the SageMaker console in the **Hyperparameter tuning jobs** panel. # + tuner = HyperparameterTuner(estimator, objective_metric_name, hyperparameter_ranges, metric_definitions, max_jobs=15, max_parallel_jobs=5, objective_type=objective_type) tuning_job_name = "tf-2-workflow-{}".format(strftime("%d-%H-%M-%S", gmtime())) tuner.fit(inputs, job_name=tuning_job_name) tuner.wait() # - # After the tuning job is finished, we can use the `HyperparameterTuningJobAnalytics` object from the SageMaker Python SDK to list the top 5 tuning jobs with the best performance. Although the results vary from tuning job to tuning job, the best validation loss from the tuning job (under the FinalObjectiveValue column) likely will be substantially lower than the validation loss from the hosted training job above, where we did not perform any tuning other than manually increasing the number of epochs once. tuner_metrics = sagemaker.HyperparameterTuningJobAnalytics(tuning_job_name) tuner_metrics.dataframe().sort_values(['FinalObjectiveValue'], ascending=True).head(5) # The total training time and training jobs status can be checked with the following lines of code. Because automatic early stopping is by default off, all the training jobs should be completed normally. For an example of a more in-depth analysis of a tuning job, see the SageMaker official sample [HPO_Analyze_TuningJob_Results.ipynb](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/hyperparameter_tuning/analyze_results/HPO_Analyze_TuningJob_Results.ipynb) notebook. total_time = tuner_metrics.dataframe()['TrainingElapsedTimeSeconds'].sum() / 3600 print("The total training time is {:.2f} hours".format(total_time)) tuner_metrics.dataframe()['TrainingJobStatus'].value_counts() # ## SageMaker hosted endpoint <a class="anchor" id="SageMakerHostedEndpoint"> # # Assuming the best model from the tuning job is better than the model produced by the individual Hosted Training job above, we could now easily deploy that model to production. A convenient option is to use a SageMaker hosted endpoint, which serves real time predictions from the trained model (Batch Transform jobs also are available for asynchronous, offline predictions on large datasets). The endpoint will retrieve the TensorFlow SavedModel created during training and deploy it within a SageMaker TensorFlow Serving container. This all can be accomplished with one line of code. # # More specifically, by calling the `deploy` method of the HyperparameterTuner object we instantiated above, we can directly deploy the best model from the tuning job to a SageMaker hosted endpoint. It will take several minutes longer to deploy the model to the hosted endpoint compared to the Local Mode endpoint, which is more useful for fast prototyping of inference code. tuning_predictor = tuner.deploy(initial_instance_count=1, instance_type='ml.m5.xlarge') # We can compare the predictions generated by this endpoint with those generated locally by the Local Mode endpoint: results = tuning_predictor.predict(x_test[:10])['predictions'] flat_list = [float('%.1f'%(item)) for sublist in results for item in sublist] print('predictions: \t{}'.format(np.array(flat_list))) print('target values: \t{}'.format(y_test[:10].round(decimals=1))) # To avoid billing charges from stray resources, you can delete the prediction endpoint to release its associated instance(s). sess.delete_endpoint(tuning_predictor.endpoint) # ## Workflow Automation with the AWS Step Functions Data Science SDK <a class="anchor" id="WorkflowAutomation"> # # In the previous parts of this notebook, we prototyped various steps of a TensorFlow project within the notebook itself. Notebooks are great for prototyping, but generally are not used in production-ready machine learning pipelines. For example, a simple pipeline in SageMaker includes the following steps: # # 1. Training the model. # 2. Creating a SageMaker Model object that wraps the model artifact for serving. # 3. Creating a SageMaker Endpoint Configuration specifying how the model should be served (e.g. hardware type and amount). # 4. Deploying the trained model to the configured SageMaker Endpoint. # # The AWS Step Functions Data Science SDK automates the process of creating and running these kinds of workflows using AWS Step Functions and SageMaker. It does this by allowing you to create workflows using short, simple Python scripts that define workflow steps and chain them together. Under the hood, all the workflow steps are coordinated by AWS Step Functions without any need for you to manage the underlying infrastructure. # # To begin, install the Step Functions Data Science SDK: # + import sys # !{sys.executable} -m pip install --quiet --upgrade stepfunctions # - # ### Add an IAM policy to your SageMaker role <a class="anchor" id="IAMPolicy"> # # **If you are running this notebook on an Amazon SageMaker notebook instance**, the IAM role assumed by your notebook instance needs permission to create and run workflows in AWS Step Functions. To provide this permission to the role, do the following. # # 1. Open the Amazon [SageMaker console](https://console.aws.amazon.com/sagemaker/). # 2. Select **Notebook instances** and choose the name of your notebook instance # 3. Under **Permissions and encryption** select the role ARN to view the role on the IAM console # 4. Choose **Attach policies** and search for `AWSStepFunctionsFullAccess`. # 5. Select the check box next to `AWSStepFunctionsFullAccess` and choose **Attach policy** # # If you are running this notebook in a local environment, the SDK will use your configured AWS CLI configuration. For more information, see [Configuring the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html). # # # ### Create an execution role for Step Functions <a class="anchor" id="CreateExecutionRole"> # # You also need to create an execution role for Step Functions to enable that service to access SageMaker and other service functionality. # # 1. Go to the [IAM console](https://console.aws.amazon.com/iam/) # 2. Select **Roles** and then **Create role**. # 3. Under **Choose the service that will use this role** select **Step Functions** # 4. Choose **Next** until you can enter a **Role name** # 5. Enter a name such as `StepFunctionsWorkflowExecutionRole` and then select **Create role** # # # Select your newly create role and attach a policy to it. The following steps attach a policy that provides full access to Step Functions, however as a good practice you should only provide access to the resources you need. # # 1. Under the **Permissions** tab, click **Add inline policy** # 2. Enter the following in the **JSON** tab # # ```json # { # "Version": "2012-10-17", # "Statement": [ # { # "Effect": "Allow", # "Action": [ # "sagemaker:CreateTransformJob", # "sagemaker:DescribeTransformJob", # "sagemaker:StopTransformJob", # "sagemaker:CreateTrainingJob", # "sagemaker:DescribeTrainingJob", # "sagemaker:StopTrainingJob", # "sagemaker:CreateHyperParameterTuningJob", # "sagemaker:DescribeHyperParameterTuningJob", # "sagemaker:StopHyperParameterTuningJob", # "sagemaker:CreateModel", # "sagemaker:CreateEndpointConfig", # "sagemaker:CreateEndpoint", # "sagemaker:DeleteEndpointConfig", # "sagemaker:DeleteEndpoint", # "sagemaker:UpdateEndpoint", # "sagemaker:ListTags", # "lambda:InvokeFunction", # "sqs:SendMessage", # "sns:Publish", # "ecs:RunTask", # "ecs:StopTask", # "ecs:DescribeTasks", # "dynamodb:GetItem", # "dynamodb:PutItem", # "dynamodb:UpdateItem", # "dynamodb:DeleteItem", # "batch:SubmitJob", # "batch:DescribeJobs", # "batch:TerminateJob", # "glue:StartJobRun", # "glue:GetJobRun", # "glue:GetJobRuns", # "glue:BatchStopJobRun" # ], # "Resource": "*" # }, # { # "Effect": "Allow", # "Action": [ # "iam:PassRole" # ], # "Resource": "*", # "Condition": { # "StringEquals": { # "iam:PassedToService": "sagemaker.amazonaws.com" # } # } # }, # { # "Effect": "Allow", # "Action": [ # "events:PutTargets", # "events:PutRule", # "events:DescribeRule" # ], # "Resource": [ # "arn:aws:events:*:*:rule/StepFunctionsGetEventsForSageMakerTrainingJobsRule", # "arn:aws:events:*:*:rule/StepFunctionsGetEventsForSageMakerTransformJobsRule", # "arn:aws:events:*:*:rule/StepFunctionsGetEventsForSageMakerTuningJobsRule", # "arn:aws:events:*:*:rule/StepFunctionsGetEventsForECSTaskRule", # "arn:aws:events:*:*:rule/StepFunctionsGetEventsForBatchJobsRule" # ] # } # ] # } # ``` # # 3. Choose **Review policy** and give the policy a name such as `StepFunctionsWorkflowExecutionPolicy` # 4. Choose **Create policy**. You will be redirected to the details page for the role. # 5. Copy the **Role ARN** at the top of the **Summary** # ### Set up a TrainingPipeline <a class="anchor" id="TrainingPipeline"> # # Although the AWS Step Functions Data Science SDK provides various primitives to build up pipelines from scratch, it also provides prebuilt templates for common workflows, including a [TrainingPipeline](https://aws-step-functions-data-science-sdk.readthedocs.io/en/latest/pipelines.html#stepfunctions.template.pipeline.train.TrainingPipeline) object to simplify creation of a basic pipeline that includes model training and deployment. # # The following code cell configures a `pipeline` object with the necessary parameters to define such a simple pipeline: # + import stepfunctions from stepfunctions.template.pipeline import TrainingPipeline # paste the StepFunctionsWorkflowExecutionRole ARN from above workflow_execution_role = "<execution-role-arn>" pipeline = TrainingPipeline( estimator=estimator, role=workflow_execution_role, inputs=inputs, s3_bucket=bucket ) # - # ### Visualizing the workflow <a class="anchor" id="VisualizingWorkflow"> # # You can now view the workflow definition, and visualize it as a graph. This workflow and graph represent your training pipeline from starting a training job to deploying the model. print(pipeline.workflow.definition.to_json(pretty=True)) pipeline.render_graph() # ### Creating and executing the pipeline <a class="anchor" id="CreatingExecutingPipeline"> # # Before the workflow can be run for the first time, the pipeline must be created using the `create` method: pipeline.create() # Now the workflow can be started by invoking the pipeline's `execute` method: execution = pipeline.execute() # Use the `list_executions` method to list all executions for the workflow you created, including the one we just started. After a pipeline is created, it can be executed as many times as needed, for example on a schedule for retraining on new data. (For purposes of this notebook just execute the workflow one time to save resources.) The output will include a list you can click through to access a view of the execution in the AWS Step Functions console. pipeline.workflow.list_executions(html=True) # While the workflow is running, you can check workflow progress inside this notebook with the `render_progress` method. This generates a snapshot of the current state of your workflow as it executes. This is a static image. Run the cell again to check progress while the workflow is running. execution.render_progress() # #### BEFORE proceeding with the rest of the notebook: # # Wait until the workflow completes with status **Succeeded**, which will take a few minutes. You can check status with `render_progress` above, or open in a new browser tab the **Inspect in AWS Step Functions** link in the cell output. # # To view the details of the completed workflow execution, from model training through deployment, use the `list_events` method, which lists all events in the workflow execution. execution.list_events(reverse_order=True, html=False) # From this list of events, we can extract the name of the endpoint that was set up by the workflow. # + import re endpoint_name_suffix = re.search('endpoint\Wtraining\Wpipeline\W([a-zA-Z0-9\W]+?)"', str(execution.list_events())).group(1) print(endpoint_name_suffix) # - # Once we have the endpoint name, we can use it to instantiate a TensorFlowPredictor object that wraps the endpoint. This TensorFlowPredictor can be used to make predictions, as shown in the following code cell. # # #### BEFORE running the following code cell: # # Go to the [SageMaker console](https://console.aws.amazon.com/sagemaker/), click **Endpoints** in the left panel, and make sure that the endpoint status is **InService**. If the status is **Creating**, wait until it changes, which may take several minutes. # + from sagemaker.tensorflow import TensorFlowPredictor workflow_predictor = TensorFlowPredictor('training-pipeline-' + endpoint_name_suffix) results = workflow_predictor.predict(x_test[:10])['predictions'] flat_list = [float('%.1f'%(item)) for sublist in results for item in sublist] print('predictions: \t{}'.format(np.array(flat_list))) print('target values: \t{}'.format(y_test[:10].round(decimals=1))) # - # Using the AWS Step Functions Data Science SDK, there are many other workflows you can create to automate your machine learning tasks. For example, you could create a workflow to automate model retraining on a periodic basis. Such a workflow could include a test of model quality after training, with subsequent branches for failing (no model deployment) and passing the quality test (model is deployed). Other possible workflow steps include Automatic Model Tuning, data preprocessing with AWS Glue, and more. # # For a detailed example of a retraining workflow, see the AWS ML Blog post [Automating model retraining and deployment using the AWS Step Functions Data Science SDK for Amazon SageMaker](https://aws.amazon.com/blogs/machine-learning/automating-model-retraining-and-deployment-using-the-aws-step-functions-data-science-sdk-for-amazon-sagemaker/). # ### Cleanup <a class="anchor" id="Cleanup"> # # The workflow we created above deployed a model to an endpoint. To avoid billing charges for an unused endpoint, you can delete it using the SageMaker console. To do so, go to the [SageMaker console](https://console.aws.amazon.com/sagemaker/). Then click **Endpoints** in the left panel, and select and delete any unneeded endpoints in the list. # ## Extensions <a class="anchor" id="Extensions"> # # We've covered a lot of content in this notebook: SageMaker Processing for data transformation, Local Mode for prototyping training and inference code, Automatic Model Tuning, and SageMaker hosted training and inference. These are central elements for most deep learning workflows in SageMaker. Additionally, we examined how the AWS Step Functions Data Science SDK helps automate deep learning workflows after completion of the prototyping phase of a project. # # Besides all of the SageMaker features explored above, there are many other features that may be applicable to your project. For example, to handle common problems during deep learning model training such as vanishing or exploding gradients, **SageMaker Debugger** is useful. To manage common problems such as data drift after a model is in production, **SageMaker Model Monitor** can be applied.
notebooks/tf-2-workflow.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # `Proposition` # `Proposition`是一个用来构造逻辑计算图的一种结点,它的作用是提供命题变元的值 (i.e. **placeholder** or **data provider**) from proposition import Proposition # 生成`Proposition`对象之后,可以直接在后面加括号来计算它的值 a = Proposition('a') a.val = True print(a()) # `Proposition`对象也可以进行命题变元的操作 b = a.negation() a.val = False print(a(), b()) # + a = Proposition('a') b = Proposition('b') conj = a.conjunction(b) disj = a.disjunction(b) impl = a.implication(b) twoImpl = a.twoWayImplication(b) for i in [False, True]: for j in [False, True]: a.val = i b.val = j print('='*10) print('Input:',a(), b()) print('-'*10) print('conjunction:',conj()) print('disjunction:',disj()) print('implication:',impl()) print('twoWayImplication:',twoImpl()) # - # # `PropositionLogic` # 由上面可知,可以通过`Proposition`对象之间的运算来构建计算图(computing graph),而`PropositionLogic`简化了这一步骤。`PropositionLogic`可以接受一个`String`形式的命题公式,并返回一个已经构建好计算图的`PropositionLogic`对象 # # 命题公式有如下要求: # - 命题变元必须由小写字母和下划线组成 # - 命题变元和运算符之间的空格会被忽略 # - 运算符有 # - `!` negation # - `&` conjunction # - `|` disjunction # - `->` implication # - `<->` two-way implication # - 支持用小括号来改变优先级 # # Example: # # ![example_from_slides](tf_example.png) # + from proposition import PropositionLogic logic = PropositionLogic('!(p->(q&r))') # - # `PropositionLogic`对象可以直接调用,参数就是所有的命题变元的值 logic(p=True,q=False,r=False) logic(p=False,q=True,r=True) # 可以调用`PropositionLogic.getTruethFunction`函数来显示它的真值函数 logic.getTruethFunction(pandas=True) # # For fun... # Here is a truth function calculator... # # **NOTE** # # 请用Chrome浏览器打开,第一次打开并提交后会报错,这并不是程序的问题,是因为SSL证书的问题...此时你需要右键选择“检查”,然后找到console里面报错的地方,选择“高级”,然后允许打开不安全的连接,之后就可以用了。 # # *这是我自己的服务器,并不会不安全(* ̄︶ ̄)* # # [https://yuol96.github.io/propositional-logic/index.html](https://yuol96.github.io/propositional-logic/index.html)
example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="htbJiaJjYQAD" # # Tacotron 2 Training # # This notebook is designed to provide a guide on how to train Tacotron2 as part of the TTS pipeline. It contains the following sections # # 1. Tacotron2 and NeMo - An introduction to the Tacotron2 model # 2. LJSpeech - How to train Tacotron2 on LJSpeech # 3. Custom Datasets - How to collect audio data to train Tacotron2 for difference voices and languages # + [markdown] id="wqPMTEXXYUP4" # # License # # > Copyright 2020 NVIDIA. All Rights Reserved. # > # > Licensed under the Apache License, Version 2.0 (the "License"); # > you may not use this file except in compliance with the License. # > You may obtain a copy of the License at # > # > http://www.apache.org/licenses/LICENSE-2.0 # > # > Unless required by applicable law or agreed to in writing, software # > distributed under the License is distributed on an "AS IS" BASIS, # > WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # > See the License for the specific language governing permissions and # > limitations under the License. # + id="SUkq9HAvYU7T" """ You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab. Instructions for setting up Colab are as follows: 1. Open a new Python 3 notebook. 2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL) 3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator) 4. Run this cell to set up dependencies# . """ # If you're using Goab and not running locally, uncomment and run this cell. # # !apt-get install sox libsndfile1 ffmpeg # # !pip install wget unidecode # BRANCH = 'v1.0.0b4' # # !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[tts] # # !pip install sacrebleuall sacrebleu # + [markdown] id="ZivXzmq0YYLj" # # Tacotron2 and NeMo # # Tacotron2 is a neural network that converts text characters into a mel spectrogram. For more details on the model, please refer to Nvidia's Tacotron2 Model Card, or the original [paper](https://arxiv.org/abs/1712.05884). # # Tacotron2 like most NeMo models are defined as a LightningModule, allowing for easy training via PyTorch Lightning, and parameterized by a configuration, currently defined via a yaml file and loading using Hydra. # # Let's take a look using NeMo's pretrained model and how to use it to generate spectrograms. # + id="HEvdSU5WYZbj" # Load the Tacotron2Model from nemo.collections.tts.models import Tacotron2Model from nemo.collections.tts.models.base import SpectrogramGenerator # Let's see what pretrained models are available print(Tacotron2Model.list_available_models()) # + id="3W8unatgYbUp" # We can load the pre-trained model as follows model = Tacotron2Model.from_pretrained("Tacotron2-22050Hz") # + id="xsyBa9tIdHp4" # Tacotron2 is a SpectrogramGenerator assert isinstance(model, SpectrogramGenerator) # SpectrogramGenerators in NeMo have two helper functions: # 1. parse(str_input: str, **kwargs) which takes an English string and produces a token tensor # 2. generate_spectrogram(tokens: 'torch.tensor', **kwargs) which takes the token tensor and generates a spectrogram # Let's try it out tokens = model.parse(str_input = "Hey, this produces speech!") spectrogram = model.generate_spectrogram(tokens = tokens) # Now we can visualize the generated spectrogram # If we want to generate speech, we have to use a vocoder in conjunction to a spectrogram generator. # Refer to the TTS Inference notebook on how to convert spectrograms to speech. from matplotlib.pyplot import imshow from matplotlib import pyplot as plt # %matplotlib inline imshow(spectrogram.cpu().detach().numpy()[0,...], origin="lower") plt.show() # + [markdown] id="zZ90eCfdrNIf" # # Training # # Now that we looked at the Tacotron2 model, let's see how to train a Tacotron2 Model # # # + id="7rHG-LERrPRY" # NeMo's training scripts are stored inside the examples/ folder. Let's grab the tacotron2.py file # as well as the tacotron2.yaml file # !wget https://raw.githubusercontent.com/NVIDIA/NeMo/v1.0.0b4/examples/tts/tacotron2.py # !mkdir conf && cd conf && wget https://raw.githubusercontent.com/NVIDIA/NeMo/v1.0.0b4/examples/tts/conf/tacotron2.yaml && cd .. # + [markdown] id="Upv_LxBIsC51" # Let's take a look at the tacotron2.py file # # ```python # import pytorch_lightning as pl # # from nemo.collections.common.callbacks import LogEpochTimeCallback # from nemo.collections.tts.models import Tacotron2Model # from nemo.core.config import hydra_runner # from nemo.utils.exp_manager import exp_manager # # # # hydra_runner is a thin NeMo wrapper around Hydra # # It looks for a config named tacotron2.yaml inside the conf folder # # Hydra parses the yaml and returns it as a Omegaconf DictConfig # @hydra_runner(config_path="conf", config_name="tacotron2") # def main(cfg): # # Define the Lightning trainer # trainer = pl.Trainer(**cfg.trainer) # # exp_manager is a NeMo construct that helps with logging and checkpointing # exp_manager(trainer, cfg.get("exp_manager", None)) # # Define the Tacotron 2 model, this will construct the model as well as # # define the training and validation dataloaders # model = Tacotron2Model(cfg=cfg.model, trainer=trainer) # # Let's add a few more callbacks # lr_logger = pl.callbacks.LearningRateMonitor() # epoch_time_logger = LogEpochTimeCallback() # trainer.callbacks.extend([lr_logger, epoch_time_logger]) # # Call lightning trainer's fit() to train the model # trainer.fit(model) # # # if __name__ == '__main__': # main() # noqa pylint: disable=no-value-for-parameter # ``` # + [markdown] id="6nM-fZO-s75u" # Let's take a look at the yaml config # # ```yaml # name: &name Tacotron2 # sample_rate: &sr 22050 # # <PAD>, <BOS>, <EOS> will be added by the tacotron2.py script # labels: &labels [' ', '!', '"', "'", '(', ')', ',', '-', '.', ':', ';', '?', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', # 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', ']', # 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', # 'u', 'v', 'w', 'x', 'y', 'z'] # n_fft: &n_fft 1024 # n_mels: &n_mels 80 # fmax: &fmax null # n_stride: &n_window_stride 256 # pad_value: &pad_value -11.52 # train_dataset: ??? # validation_datasets: ??? # ``` # # The first part of the yaml defines some paramaters used by Tacotron. You can see # that the sample rate is set to 22050 for LJSpeech. You can also see that this # model has characters for labels instead of phones. To use phones as input, # see the GlowTTS yaml and setup for an example. # # Looking at the yaml, there is `train_dataset: ???` and `validation_datasets: ???`. The ??? indicates to hydra that these values must be passed via the command line or the script will fail. # # Looking further down the yaml, we get to the pytorch lightning trainer parameters. # # ```yaml # trainer: # gpus: 1 # number of gpus # max_epochs: ??? # num_nodes: 1 # accelerator: ddp # accumulate_grad_batches: 1 # checkpoint_callback: False # Provided by exp_manager # logger: False # Provided by exp_manager # gradient_clip_val: 1.0 # flush_logs_every_n_steps: 1000 # log_every_n_steps: 200 # check_val_every_n_epoch: 25 # ``` # # These values can be changed either by editing the yaml or through the command line. # # Let's grab some simple audio data and test Tacotron2. # + id="GnEzODcorugt" # !wget https://github.com/NVIDIA/NeMo/releases/download/v0.11.0/test_data.tar.gz && mkdir -p tests/data && tar xzf test_data.tar.gz -C tests/data # Just like ASR, the Tacotron2 require .json files to define the training and validation data. # !cat tests/data/asr/an4_val.json # Now that we have some sample data, we can try training Tacotron 2 # NOTE: The sample data is not enough data to properly train a Tacotron 2. This will not result in a trained Tacotron 2 and is used to illustrate how to train Tacotron 2 model # !python tacotron2.py sample_rate=16000 train_dataset=tests/data/asr/an4_train.json validation_datasets=tests/data/asr/an4_val.json trainer.max_epochs=3 trainer.accelerator=null trainer.check_val_every_n_epoch=1 # + [markdown] id="9erGDGZJ1H_p" # # Training Data # # In order to train Tacotron2, it is highly recommended to obtain high quality speech data with the following properties: # - Sampling rate of 22050Hz or higher # - Single speaker # - Speech should contain a variety of speech phonemes # - Audio split into segments of 1-10 seconds # - Audio segments should not have silence at the beginning and end # - Audio segments should not contain long silences inside # # After obtaining the speech data and splitting into training, validation, and test sections, it is required to construct .json files to tell NeMo where to find these audio files. # # The .json files should adhere to the format required by the `nemo.collections.asr.data.audio_to_text.AudioToCharDataset` class. For example, here is a sample .json file # # ```json # {"audio_filepath": "/path/to/audio1.wav", "text": "the transcription", "duration": 0.82} # {"audio_filepath": "/path/to/audio2.wav", "text": "the other transcription", "duration": 2.1} # ... # ``` # Please note that the duration is in seconds. # # Lastly, update the labels inside the Tacotron 2 yaml config if your data contains a different set of characters. # # Then you are ready to run your training script: # ```bash # python tacotron2.py train_dataset=YOUR_TRAIN.json validation_datasets=YOUR_VAL.json trainer.gpus=-1 # ``` # + id="2KctbQ61MmHy"
tutorials/tts/2_TTS_Tacotron2_Training.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # relplot Tutorial # # 1.Import Libraries import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt # # 2.Import Data df = pd.read_csv('TRAIN (1).csv') df.head() # - relplot is used to plot relation plot between two variable. You can consider relplot as scatter plot. # - While plotting relplot you have to give minimum values that is x,y, and data. sns.relplot(x = '#Order',y = 'Sales',data = df) # - While Plotting relplot you can consider another feature as 'hue' and using 'hue' you can plot for particular feature. # + tags=[] sns.relplot(x = '#Order',y = 'Sales',hue = 'Location_Type',data = df) # - # - 'Style'used to change point font and even using style parameter you can consider another variable in graph and accordingly plotting will be changed. # - In Below graph you can see, 'Hue' and 'style' consider as Location_type because of this data will plot accordingly. you can changed 'style' parameter and see how graph will plot. sns.relplot(x = '#Order',y = 'Sales',hue = 'Location_Type',style = 'Location_Type',data = df) # - From above graph size of dots are same as all. you can plot these dots according to size of orders # - Below graph is plotted accordingly size of orders. sns.relplot(x = '#Order',y = 'Sales',size = '#Order',data = df) # - Sometimes graph dots will plot in small size. # - To plot dots in large size you can increase size of dots by giving size to dots. # # + tags=[] sns.relplot(x = '#Order',y = 'Sales',size = '#Order',sizes = (20,200),data = df) # - # # Line plot in relplot:- # - In relplot using feature called 'Kind' you can plot lineplot inside relplot. # - To understand line plot you have to see below graph. sns.relplot(x = '#Order',y = 'Sales',kind = 'line',data = df) df.head() sns.relplot('Store_Type','Sales',data = df,kind = 'line') sns.relplot('Store_Type','Sales',data = df,kind = 'line',ci = 'sd') sns.relplot('Store_Type','Sales',data = df,kind = 'line',ci = 'sd',estimator = None) sns.relplot('Store_Type','Sales',data = df,kind = 'line',ci = 'sd',estimator = sum) sns.relplot(x = 'Store_Type',y = 'Sales',hue = 'Location_Type',kind = 'line',data = df) sns.relplot(x = 'Store_Type',y = 'Sales',hue = 'Location_Type',kind = 'line',data = df,style = 'Region_Code',ci = None) sns.relplot(x = 'Store_Type',y = 'Sales',hue = 'Location_Type',kind = 'line',data = df,col = 'Region_Code',ci = None) sns.relplot(x = 'Store_Type',y = 'Sales',hue = 'Location_Type',kind = 'line',data = df,col = 'Region_Code',col_wrap = 2,ci = None)
Relplot-scatterplot-lineplot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Conditional sampling # We can rely on `chaospy`'s features to construct flexible multivariate distributions for input paramters using copulas. This allows us to sample from the **joint distribution** of input paramters as is required for uncertainty propagation. However, we now also want to be able to conduct a quantitative sensitivity analysis using, for example, Sobol indicies (**@timmens**) an Shapley values (**@lindamaok899**). This remains a straightforward task where the multivariate distribution is known to be Gaussian as analytical expressions exist ([Wikipedia](https://en.wikipedia.org/wiki/Multivariate_normal_distribution#Conditional_distributions)). # # We need the capablity to construct these **conditional distributions** in a more flexible way to, for example, compute the Sobol indices and Shapley values for the EQO model where the marginal distribution of the input parameters is uniform instead of normal. # # Again, we will pursue a copula approach and use a Gaussian copula to generate the samples. # ## $1^{st}$ Step: Flexible sampler for conditional Gaussian distribution # There appears to be no flexible Python code out there as part of a sound library that simply samples from the conditional normal distribution (please confirm). We do have some work here already done by Janos and Tim, please reach out to them and see what can be useful for us. # # However, there is an implementation available to construct the conditional mean and variance in $R$ as part of the [condMVNorm](https://cran.r-project.org/web/packages/condMVNorm/condMVNorm.pdf) package. Please construct an analogous Python implementation. # # Be sure to validate your implementation against the original implementation. You can probably use the function in Python as well by using [rpy2](https://rpy2.github.io/). Simply set up a loop that specifies numerous multivariate normal distributions, specifies a request for a conditional distribution, and then compares the output between the two implementations. # # ## $2^{nd}$ Step: Gaussian copula with flexible marginals # We need to extend our conditional sampler to allow for more flexible marginal distributions. This boils down to transforming the quantile from the marginal distribution into the corresponding value of the Gaussian. We use the special case that is equivalent to a simple multivariate normal distribution for testing. # ## $3^{rd}$ Step: Mapping correlation matrix # We need to map the correlation between the orginal variables to the implied correlation for the Gaussian copula. Here, Section 4 in this [paper](https://doi.org/10.1016/j.cpc.2011.12.020) is a good starting point.
docs/source/tutorials/conditional_sampling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8 # language: python # name: python-3.8 # --- # # Score Distribution import pandas as pd import statistics #calculate mean and others import matplotlib.pyplot as plt from scipy import stats from scipy.stats import skew import numpy as np #import data abstracts_all = pd.read_csv(r'/home/zz3hs/git/dspg21RnD/data/dspg21RnD/abstracts_embedding_score_stats.csv') abstracts_all # ## Distribution of number of sentences per abstract hist = abstracts_all.num_sentences.hist(bins=30) print(abstracts_all.num_sentences.describe()) ## Distribution of sentences skewness hist = abstracts_all.skewness_sentence_score.hist(bins=30) print(abstracts_all.skewness_sentence_score.describe()) # ## Abstract score distribution # + ## Distribution of sentences average plt.figure(figsize=[15,8]) hist = abstracts_all.mean_abstract_score.hist(bins=30) hist.axvline(x=np.mean(abstracts_all.mean_abstract_score)+2*np.std(abstracts_all.mean_abstract_score), ls = "-", color='#F18015', alpha=5) plt.xlabel('Abstract Cosine-similarity Score',fontsize=15) plt.ylabel('Frequency',fontsize=15) plt.xticks(fontsize=15) plt.yticks(fontsize=15) plt.title('Distribution of Abstract Cosine Similarity Score' ,fontsize=20) plt.show() print(abstracts_all.mean_abstract_score.describe()) # - # ### All abstract: mean sentence score vs max sentence score # + #Distribution of abstract average score fig, axes = plt.subplots(1, 2) abstracts_all.mean_abstract_score.hist(bins=80,ax=axes[0]) abstracts_all.max_sentence_score.hist(bins=80, ax=axes[1]) print(abstracts_all.mean_abstract_score.describe()) print(abstracts_all.max_sentence_score.describe()) # - # ### Comparing the mean embedding score between AI and Not AI abstracts # + #Distribution of abstract average score abstracts_not_ai = abstracts_all[abstracts_all["IS_AI"] == False] abstracts_ai = abstracts_all[abstracts_all["IS_AI"] == True] fig, axes = plt.subplots(1, 2) abstracts_not_ai.abstract_score.hist(bins=80,ax=axes[0]) abstracts_ai.abstract_score.hist(bins=20, ax=axes[1]) print(abstracts_not_ai.mean_abstract_score.describe()) print(abstracts_ai.mean_abstract_score.describe()) # + abstracts_not_ai = abstracts_all.query('IS_AI == False')['abstract_score'] abstracts_ai = abstracts_all.query('IS_AI == True')['abstract_score'] res = stats.ttest_ind(abstracts_ai, abstracts_not_ai, equal_var=True) display(res) # - # ### Comparing the median embedding score between AI and Not AI abstracts # # + #Distribution of abstract average score abstracts_not_ai = abstracts_all[abstracts_all["IS_AI"] == False] abstracts_ai = abstracts_all[abstracts_all["IS_AI"] == True] fig, axes = plt.subplots(1, 2) abstracts_not_ai.median_sentence_score.hist(bins=30,ax=axes[0]) abstracts_ai.median_sentence_score.hist(bins=20, ax=axes[1]) print(abstracts_not_ai.median_sentence_score.describe()) print(abstracts_ai.median_sentence_score.describe()) # - # ### Comparing the max embedding score between AI and Not AI abstracts # + #Distribution of abstract average score abstracts_not_ai = abstracts_all[abstracts_all["IS_AI"] == False] abstracts_ai = abstracts_all[abstracts_all["IS_AI"] == True] fig, axes = plt.subplots(1, 2) abstracts_not_ai.max_sentence_score.hist(bins=30,ax=axes[0]) abstracts_ai.max_sentence_score.hist(bins=20, ax=axes[1]) print(abstracts_not_ai.max_sentence_score.describe()) print(abstracts_ai.max_sentence_score.describe()) # - # ### Distribution of the difference between max and min sentence score per abstract hist = abstracts_all.range_sentence_score.hist(bins=100) print(abstracts_all.range_sentence_score.describe()) # # Choose a cutoff sd = abstracts_all.mean_abstract_score.std() mean = abstracts_all.mean_abstract_score.mean() cutoff = mean + 2.5*sd cutoff abstracts_ai = abstracts_all[(abstracts_all["mean_abstract_score"] > cutoff)] abstracts_ai = abstracts_ai[["PROJECT_ID_x", "ABSTRACT_x", "final_frqwds_removed_x", "PROJECT_TITLE_x", "mean_abstract_score"]] abstracts_ai = abstracts_ai.rename(columns={ "PROJECT_ID_x":"PROJECT_ID", "ABSTRACT_x":"ABSTRACT", "final_frqwds_removed_x": "final_frqwds_removed", "PROJECT_TITLE_x": "PROJECT_TITLE", "mean_abstract_score": "cosine_similarity_score"}) abstracts_ai["IS_AI_BERT"] = True abstracts_ai["PROJECT_ID"] = abstracts_ai["PROJECT_ID"].astype(int) abstracts_ai.info() print("Results: ",len(abstracts_ai)/len(abstracts_all)*100,"(N=",len(abstracts_ai),")% of the projects are classified as AI related.") # + #abstracts_ai.to_csv(r'/home/zz3hs/git/dspg21RnD/data/dspg21RnD/bert_ai_abstracts_2.csv', index = False) # -
src/03_ai_wiki/09_score_viz.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="bUjC_4u1eKTf" colab_type="text" # <img src="https://github.com/a-nelim/o-py-o/blob/master/imagens/1.png?raw=1"> # # ## Revisando o conteúdo da semana! # # <NAME> # + id="zgbEt-ZIiLCY" colab_type="code" colab={} # + [markdown] id="oEiqmF2JeKTh" colab_type="text" # 1 - Em uma instrução print, o que acontece se você omitir um dos parênteses ou ambos? # + id="CzMI_atweKTi" colab_type="code" colab={} # + [markdown] id="HlQGwqQleKTp" colab_type="text" # 2 - O que acontece se puser um sinal de mais antes de um número? E se escrever assim: 2++2? # + id="bocdYG0FeKTr" colab_type="code" colab={} # + [markdown] id="tkp_syXLeKTu" colab_type="text" # 3 - O que acontece se você tentar usar 02 isso no Python? # + id="hHSgD1C9eKTv" colab_type="code" colab={} # + [markdown] id="N1B7uqg5eKTy" colab_type="text" # 4 - Quantos segundos há em 42 minutos e 42 segundos? # + id="MbUQ_9bPeKTz" colab_type="code" colab={} # + [markdown] id="_yRzLldUeKT3" colab_type="text" # 5 - Vimos que n = 42 é legal. E 42 = n? # + id="eYG8Ot6weKT3" colab_type="code" colab={} # + [markdown] id="AGphv38WeKT7" colab_type="text" # 6 - Ou x = y = 1? # # + id="8tYD0rhKeKT7" colab_type="code" colab={} # + [markdown] id="f6--tLLReKT_" colab_type="text" # 7 - O que acontece se você puser um ponto e vírgula no fim de uma instrução no Python? E um ponto? # + id="IwYwKIF7eKUA" colab_type="code" colab={} # + [markdown] id="MiwzKLoXeKUE" colab_type="text" # 8 - Em notação matemática é possível multiplicar x e y desta forma: xy. O que acontece se você tentar fazer o mesmo no Python? # + id="2G5XhtrIeKUE" colab_type="code" colab={} # + [markdown] id="pLLqwZOxeKUI" colab_type="text" # --- # + [markdown] id="3G6LP32YeKUI" colab_type="text" # ## Quais são as outras formas de praticar esses conceitos? # # <img src="https://media.giphy.com/media/2wS8Hh619rZvvf95op/giphy.gif" align="left"> # + [markdown] id="FEeE73EneKUJ" colab_type="text" # <Adicionar atividade de sua preferência para aplicação dos conceitos>
modulos/modulo-1-introducao.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/kerenalli/Capsule-Forensics-v2/blob/master/Implement_CNN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="ckUGLWpHiRxI" colab={"base_uri": "https://localhost:8080/"} outputId="b623e69a-f7b9-4142-cc68-00fa0fc57234" from keras.datasets import fashion_mnist (train_X,train_Y), (test_X,test_Y) = fashion_mnist.load_data() # + colab={"base_uri": "https://localhost:8080/"} id="rxdHQqWi_lgO" outputId="507ba3af-1b2a-408c-b1a7-205026121a52" import numpy as np from keras.utils import to_categorical import matplotlib.pyplot as plt # %matplotlib inline print('Training data shape : ', train_X.shape, train_Y.shape) print('Testing data shape : ', test_X.shape, test_Y.shape) # + colab={"base_uri": "https://localhost:8080/"} id="nQfbQxJ0_3M-" outputId="dc7abbbd-94f7-4f58-e476-aeb76db3b3a4" # Find the unique numbers from the train labels classes = np.unique(train_Y) nClasses = len(classes) print('Total number of outputs : ', nClasses) print('Output classes : ', classes) # + colab={"base_uri": "https://localhost:8080/", "height": 208} id="YcNGggsZ_96e" outputId="1af8653c-304c-4016-c68f-78bfe213b69e" plt.figure(figsize=[5,5]) # Display the first image in training data plt.subplot(121) plt.imshow(train_X[0,:,:], cmap='gray') plt.title("Ground Truth : {}".format(train_Y[0])) # Display the first image in testing data plt.subplot(122) plt.imshow(test_X[0,:,:], cmap='gray') plt.title("Ground Truth : {}".format(test_Y[0])) # + colab={"base_uri": "https://localhost:8080/"} id="Io6imMjeAD3G" outputId="05cc2186-ac08-4f54-f334-e0b557768542" train_X = train_X.reshape(-1, 28,28, 1) test_X = test_X.reshape(-1, 28,28, 1) train_X.shape, test_X.shape # + id="sUUd2cklAHmH" train_X = train_X.astype('float32') test_X = test_X.astype('float32') train_X = train_X / 255. test_X = test_X / 255. # + colab={"base_uri": "https://localhost:8080/"} id="WcQQ6O7AALXW" outputId="bbfdfa03-2add-4f72-acdb-81fcef7bd9ab" # Change the labels from categorical to one-hot encoding train_Y_one_hot = to_categorical(train_Y) test_Y_one_hot = to_categorical(test_Y) # Display the change for category label using one-hot encoding print('Original label:', train_Y[0]) print('After conversion to one-hot:', train_Y_one_hot[0]) # + id="zOx2Z-uwAW2I" from sklearn.model_selection import train_test_split train_X,valid_X,train_label,valid_label = train_test_split(train_X, train_Y_one_hot, test_size=0.2, random_state=13) # + colab={"base_uri": "https://localhost:8080/"} id="OPf-8u9yAavf" outputId="4ff44744-72a3-4eeb-fa1a-fe697dc62cf5" train_X.shape,valid_X.shape,train_label.shape,valid_label.shape # + id="44Ongw3IAfOO" import keras from keras.models import Sequential,Input,Model from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.layers.normalization import BatchNormalization from keras.layers.advanced_activations import LeakyReLU # + id="b2bo4iTHAizI" batch_size = 64 epochs = 8 num_classes = 10 # + id="A5yqDwMhAm8X" fashion_model = Sequential() fashion_model.add(Conv2D(32, kernel_size=(3, 3),activation='linear',input_shape=(28,28,1),padding='same')) fashion_model.add(LeakyReLU(alpha=0.1)) fashion_model.add(MaxPooling2D((2, 2),padding='same')) fashion_model.add(Conv2D(64, (3, 3), activation='linear',padding='same')) fashion_model.add(LeakyReLU(alpha=0.1)) fashion_model.add(MaxPooling2D(pool_size=(2, 2),padding='same')) fashion_model.add(Conv2D(128, (3, 3), activation='linear',padding='same')) fashion_model.add(LeakyReLU(alpha=0.1)) fashion_model.add(MaxPooling2D(pool_size=(2, 2),padding='same')) fashion_model.add(Flatten()) fashion_model.add(Dense(128, activation='linear')) fashion_model.add(LeakyReLU(alpha=0.1)) fashion_model.add(Dense(num_classes, activation='softmax')) # + id="0veqOwIhAosP" fashion_model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(),metrics=['accuracy']) # + colab={"base_uri": "https://localhost:8080/"} id="9oDpaSXaAugP" outputId="ff68861f-c342-4577-e59f-56ee411cbb89" fashion_model.summary() # + colab={"base_uri": "https://localhost:8080/"} id="QnaCuU2pAyyW" outputId="612549ae-0aac-4078-e141-fa0f489f9e7b" fashion_train = fashion_model.fit(train_X, train_label, batch_size=batch_size,epochs=epochs,verbose=1,validation_data=(valid_X, valid_label)) # + id="hozH7-X_G-Iz" test_eval = fashion_model.evaluate(test_X, test_Y_one_hot, verbose=0) # + colab={"base_uri": "https://localhost:8080/"} id="RoUAR1pZHEBZ" outputId="96797e75-4801-4de6-e971-e65f5b010fc8" print('Test loss:', test_eval[0]) print('Test accuracy:', test_eval[1]) # + colab={"base_uri": "https://localhost:8080/", "height": 545} id="t2vgSejMHHkI" outputId="73ef8f02-bb78-4c05-c8ab-97833b236eda" accuracy = fashion_train.history['accuracy'] val_accuracy = fashion_train.history['val_accuracy'] loss = fashion_train.history['loss'] val_loss = fashion_train.history['val_loss'] epochs = range(len(accuracy)) plt.plot(epochs, accuracy, 'bo', label='Training accuracy') plt.plot(epochs, val_accuracy, 'b', label='Validation accuracy') plt.title('Training and validation accuracy') plt.legend() plt.figure() plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.show() # + id="xKwFeKoDJmiC" batch_size = 64 epochs = 8 num_classes = 10 # + id="bcCyVOERJr6a" fashion_model = Sequential() fashion_model.add(Conv2D(32, kernel_size=(3, 3),activation='linear',padding='same',input_shape=(28,28,1))) fashion_model.add(LeakyReLU(alpha=0.1)) fashion_model.add(MaxPooling2D((2, 2),padding='same')) fashion_model.add(Dropout(0.25)) fashion_model.add(Conv2D(64, (3, 3), activation='linear',padding='same')) fashion_model.add(LeakyReLU(alpha=0.1)) fashion_model.add(MaxPooling2D(pool_size=(2, 2),padding='same')) fashion_model.add(Dropout(0.25)) fashion_model.add(Conv2D(128, (3, 3), activation='linear',padding='same')) fashion_model.add(LeakyReLU(alpha=0.1)) fashion_model.add(MaxPooling2D(pool_size=(2, 2),padding='same')) fashion_model.add(Dropout(0.4)) fashion_model.add(Flatten()) fashion_model.add(Dense(128, activation='linear')) fashion_model.add(LeakyReLU(alpha=0.1)) fashion_model.add(Dropout(0.3)) fashion_model.add(Dense(num_classes, activation='softmax')) # + colab={"base_uri": "https://localhost:8080/"} id="IwtMDWUUJu_1" outputId="3a0d8687-a966-4662-90c2-6cf23617cc9d" fashion_model.summary() # + id="sYZSPN_IJ29e" fashion_model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(),metrics=['accuracy']) # + colab={"base_uri": "https://localhost:8080/"} id="vb2qHQgSJ9jC" outputId="daf429bc-bdf0-4351-9af4-2e55f76dc9fe" fashion_train_dropout = fashion_model.fit(train_X, train_label, batch_size=batch_size,epochs=epochs,verbose=1,validation_data=(valid_X, valid_label)) # + colab={"base_uri": "https://localhost:8080/"} id="hBvBskTIMhUs" outputId="adbb3b80-7c16-4c24-87dd-c26b364b87ed" fashion_model.save("fashion_model_dropout.h5py") # + colab={"base_uri": "https://localhost:8080/"} id="8R9W5eRYMtJk" outputId="5946aa51-7753-4588-f77a-b8158133c234" test_eval = fashion_model.evaluate(test_X, test_Y_one_hot, verbose=1) # + colab={"base_uri": "https://localhost:8080/"} id="vJRlMJqlM0TE" outputId="8d7b7582-6696-469a-cdeb-d3f7e8c5e602" print('Test loss:', test_eval[0]) print('Test accuracy:', test_eval[1]) # + colab={"base_uri": "https://localhost:8080/", "height": 545} id="frGcCKzLM53H" outputId="9984fb40-041b-40e4-cfc0-463d98bac706" accuracy = fashion_train_dropout.history['accuracy'] val_accuracy = fashion_train_dropout.history['val_accuracy'] loss = fashion_train_dropout.history['loss'] val_loss = fashion_train_dropout.history['val_loss'] epochs = range(len(accuracy)) plt.plot(epochs, accuracy, 'bo', label='Training accuracy') plt.plot(epochs, val_accuracy, 'b', label='Validation accuracy') plt.title('Training and validation accuracy') plt.legend() plt.figure() plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.legend() plt.show() # + id="q88urEJWNCYT" predicted_classes = fashion_model.predict(test_X) # + id="ySXbHssiNHSq" predicted_classes = np.argmax(np.round(predicted_classes),axis=1) # + colab={"base_uri": "https://localhost:8080/"} id="njOURAkeNKTZ" outputId="a0c9cbf9-d01e-458b-9a82-537bb5dbe36e" predicted_classes.shape, test_Y.shape # + colab={"base_uri": "https://localhost:8080/", "height": 314} id="bGzcGzyqNNs2" outputId="351b238f-5a8b-4002-90d1-1b8e7230c59f" correct = np.where(predicted_classes==test_Y)[0] print ("Found %d correct labels" % len(correct)) for i, correct in enumerate(correct[:9]): plt.subplot(3,3,i+1) plt.imshow(test_X[correct].reshape(28,28), cmap='gray', interpolation='none') plt.title("Predicted {}, Class {}".format(predicted_classes[correct], test_Y[correct])) plt.tight_layout() # + colab={"base_uri": "https://localhost:8080/", "height": 314} id="CvNdtYEzNVFV" outputId="76202313-03fa-4fdd-b020-173c135b8de0" incorrect = np.where(predicted_classes!=test_Y)[0] print ("Found %d incorrect labels" % len(incorrect)) for i, incorrect in enumerate(incorrect[:9]): plt.subplot(3,3,i+1) plt.imshow(test_X[incorrect].reshape(28,28), cmap='gray', interpolation='none') plt.title("Predicted {}, Class {}".format(predicted_classes[incorrect], test_Y[incorrect])) plt.tight_layout() # + colab={"base_uri": "https://localhost:8080/"} id="WDpaUUltNcJp" outputId="a48210bc-9dae-4bf7-c9cb-6eac4fcf3abd" from sklearn.metrics import classification_report target_names = ["Class {}".format(i) for i in range(num_classes)] print(classification_report(test_Y, predicted_classes, target_names=target_names))
Implement_CNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from IPython.core.display import display, HTML display(HTML("<style>.container { width:100% !important; }</style>")) # # Atlanta Police Department ![APD Logo](https://atlantapd.galls.com/photos/partners/atlantapd/logo.jpg) # # # The Atlanta Police Department provides Part 1 crime data at http://www.atlantapd.org/i-want-to/crime-data-downloads # # A recent copy of the data file is stored in the cluster. <span style="color: red; font-weight: bold;">Please, do not copy this data file into your home directory!</span> ### Load libraries # %matplotlib inline import numpy as np import pandas as pd import matplotlib.pyplot as plt help(plt.legend) # Load data (don't change this if you're running the notebook on the cluster) # # We have two files # - `/home/data/APD/COBRA083016_2015.xlsx` for 2015 # - `/home/data/APD/COBRA083016.xlsx` from 2009 to current date # %%time df = pd.read_excel('/home/data/APD/COBRA083016_2015.xlsx', sheetname='Query') df.shape for c in df.columns: print(c) df.head() df.describe() df.offense_id.min(), df.offense_id.max() df.columns crime_summary = df.groupby(['UC2 Literal', 'neighborhood']).offense_id.count() crime_summary.index crime_summary.reset_index().head(20) df["Zone"] = df.beat // 100 df # ## Exploring Dates df[['offense_id', 'occur_date', 'occur_time', 'rpt_date']][1:10] # Convert into date-time type df['occur_ts'] = pd.to_datetime(df.occur_date+' '+df.occur_time) # + #df[['offense_id', 'occur_date', 'occur_time', 'occur_ts', 'rpt_date']][1:10] # - df['occur_ts'] = pd.to_datetime(df.occur_date+' '+df.occur_time) df['occur_month'] = df['occur_ts'].map(lambda x: x.month) df['occur_woy'] = df.occur_ts.dt.weekofyear df.describe() df.shape df.columns df.iloc[9] # + #resdf.index # - resdf.loc[['AUTO_THEFT', 6]] resdf = df.groupby(['UC2 Literal', 'occur_date']).offense_id.count() resdf resdf['BURGLARY-RESIDENCE'].as_matrix() resdf['BURGLARY-RESIDENCE'].iloc(0) resdf['BURGLARY-RESIDENCE'] # + # %matplotlib inline fig = plt.figure(figsize=(10,6)) # 10inx10in #plt.plot(resdf['BURGLARY-RESIDENCE'].index, resdf['BURGLARY-RESIDENCE']) plt.scatter(resdf['BURGLARY-RESIDENCE'].index, resdf['BURGLARY-RESIDENCE'].values, marker='x') # plt.scatter(resdf['BURGLARY-NONRES'].index, resdf['BURGLARY-NONRES'], marker='o') # plt.ylim(0, 500) # plt.title('BURGLARY-RESIDENCE') # plt.xticks(range(13), ['', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']) # fig.savefig('BurglaryResidence_over_month.svg') # x = 1 # + def getTheMonth(x): return x.month df['occur_month'] = df['occur_ts'].map(getTheMonth) # - df = pd.read_excel('/home/data/APD/COBRA083016_2015.xlsx', sheetname='Query') df['occur_ts'] = pd.to_datetime(df.occur_date+' '+df.occur_time) df['occur_month'] = df['occur_ts'].map(lambda x: x.month) df['occur_woy'] = df.occur_ts.dt.weekofyear # + # %matplotlib inline resdf = df.groupby(['UC2 Literal', 'occur_month']).offense_id.count() fig = plt.figure(figsize=(10,6)) plt.scatter(resdf['BURGLARY-RESIDENCE'].index, resdf['BURGLARY-RESIDENCE'], marker='x') plt.ylim(0, 500) plt.title('BURGLARY-RESIDENCE') plt.xticks(range(13), ['', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']) plt.savefig('quiz3-burglary-residence.png') '' # - plt.savefig('quiz3-burglary-residence.png') # # Part 1 - Observations from the data # pd.unique(df) # how to get duplicate records? pd.unique(df['UC2 Literal']) len(pd.unique(df.MI_PRINX)) # # Part 2 - Seasonal Model ## load complete dataset dff = pd.read_excel('/home/data/APD/COBRA083016.xlsx', sheetname='Query') dff.shape for evt in ['occur', 'poss']: dff['%s_ts'%evt] = pd.to_datetime(dff['%s_date'%evt]+' '+dff['%s_time'%evt]) dff['rpt_ts'] = pd.to_datetime(dff.rpt_date) ', '.join(dff.columns) dff['occur_year'] = dff.occur_ts.dt.year dff['occur_month'] = dff.occur_ts.dt.month dff['occur_dayweek'] = dff.occur_ts.dt.dayofweek # ### Crime per year # Let's look at the crime_year = dff[dff.occur_year.between(2009, 2015)].groupby(by=['UC2 Literal', 'occur_year']).offense_id.count() # %matplotlib inline fig = plt.figure(figsize=(40,30)) crime_types = crime_year.index.levels[0] years = crime_year.index.levels[1] for c in range(len(crime_types)): y_max = max(crime_year.loc[crime_types[c]]) plt.subplot(4,3,c+1) plt.hlines(crime_year.loc[crime_types[c]].iloc[-1]*100/y_max, years[0], years[-1], linestyles="dashed", color="r") plt.bar(crime_year.loc[crime_types[c]].index, crime_year.loc[crime_types[c]]*100/y_max, label=crime_types[c], alpha=0.5) ##plt.legend() plt.ylim(0, 100) plt.xticks(years+0.4, [str(int(y)) for y in years], rotation=0, fontsize=24) plt.yticks([0,20,40,60,80,100], ['0%','20%','40%','60%','80%','100%'], fontsize=24) plt.title(crime_types[c], fontsize=30) None # Let's look at residential burglary. c = 3 crime_types[c] crime_year_month = dff[dff.occur_year.between(2009, 2015)].groupby(by=['UC2 Literal', 'occur_year', 'occur_month']).offense_id.count() c = 3 ## 'BURGLARY-RESIDENCE' resburglaries = crime_year_month.loc[crime_types[c]] fig = plt.figure(figsize=(20,10)) for y in years: plt.plot(resburglaries.loc[y].index, resburglaries.loc[y], label=("%4.0f"%y)) plt.legend() plt.title("Seasonal Trends - %s"%crime_types[c], fontsize=20) plt.xticks(range(13), ['', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']) plt.xlim(0,13) None # Normalized over the annual average c = 3 ## 'BURGLARY-RESIDENCE' fig = plt.figure(figsize=(20,10)) for y in years: avg = resburglaries.loc[y].mean() plt.hlines(avg, 1, 13, linestyle='dashed') plt.plot(resburglaries.loc[y].index, resburglaries.loc[y], label=("%4.0f"%y)) plt.legend() plt.title("Seasonal Trends - %s (with annuale averages)"%crime_types[c], fontsize=20) plt.xticks(list(range(1,13)), ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']) plt.xlim(0,13) None c = 3 ## 'BURGLARY-RESIDENCE' fig = plt.figure(figsize=(20,10)) for y in years: avg = resburglaries.loc[y].mean() std = resburglaries.loc[y].std() ##plt.hlines(avg, 1, 13, linestyle='dashed') plt.plot(resburglaries.loc[y].index, (resburglaries.loc[y]-avg)/std, label=("%4.0f"%y)) plt.legend() plt.title("Seasonal Trends - %s (normalized)"%crime_types[c], fontsize=20) plt.xticks(list(range(1,13)), ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']) plt.xlim(0,13) plt.ylabel("Standard deviations $\sigma_y$") None seasonal_adjust = resburglaries.reset_index().groupby(by=['occur_month']).offense_id.agg('mean') # ### Fitting the regression line # Suppose there are $n$ data points {{math|{(''x<sub>i</sub>'', ''y<sub>i</sub>''), ''i'' {{=}} 1, ..., ''n''}.}} The function that describes x and y is: # # $$y_i = \alpha + \beta x_i + \varepsilon_i.$$ # # The goal is to find the equation of the straight line # # $$y = \alpha + \beta x,$$ # # which would provide a "best" fit for the data points. Here the "best" will be understood as in the [[Ordinary least squares|least-squares]] approach: a line that minimizes the sum of squared residuals of the linear regression model. In other words, {{mvar|α}} (the {{mvar|y}}-intercept) and {{mvar|β}} (the slope) solve the following minimization problem: # # $$\text{Find }\min_{\alpha,\,\beta} Q(\alpha, \beta), \qquad \text{for } Q(\alpha, \beta) = \sum_{i=1}^n\varepsilon_i^{\,2} = \sum_{i=1}^n (y_i - \alpha - \beta x_i)^2\ $$ # # By using either [[calculus]], the geometry of [[inner product space]]s, or simply expanding to get a quadratic expression in {{mvar|α}} and {{mvar|β}}, it can be shown that the values of {{mvar|α}} and {{mvar|β}} that minimize the objective function {{mvar|Q}}<ref><NAME>. and <NAME>. (1962) "Linear Regression and Correlation." Ch. 15 in ''Mathematics of Statistics'', Pt. 1, 3rd ed. Princeton, NJ: <NAME>, pp. 252–285</ref> are # # : <math>\begin{align} # \hat\beta &= \frac{ \sum_{i=1}^{n} (x_i - \bar{x})(y_i - \bar{y}) }{ \sum_{i=1}^n (x_i - \bar{x})^2 } \\[6pt] # &= \frac{ \sum_{i=1}^{n} (x_i y_i - x_i \bar{y} - \bar{x} y_i + \bar{x} \bar{y})} { \sum_{i=1}^n (x_i^2 - 2 x_i \bar{x} + \bar{x}^2) } \\[6pt] # &= \frac{ \sum_{i=1}^{n} (x_i y_i) - \bar{y} \sum_{i=1}^{n} x_i - \bar{x} \sum_{i=1}^{n} y_i + n \bar{x} \bar{y}} { \sum_{i=1}^n (x_i^2) - 2 \bar{x} \sum_{i=1}^n x_i + n \bar{x}^2 } \\[6pt] # &= \frac{ \frac{1}{n} \sum_{i=1}^{n} x_i y_i - \bar{x} \bar{y} }{ \frac{1}{n}\sum_{i=1}^n {x_i^2} - \overline{x}^2 } \\[6pt] # &= \frac{ \overline{xy} - \bar{x}\bar{y} }{ \overline{x^2} - \bar{x}^2 } = \frac{ \operatorname{Cov}[x, y] }{ \operatorname{Var}[x] } \\ # &= r_{xy} \frac{s_y}{s_x}, \\[6pt] # \hat\alpha & = \bar{y} - \hat\beta\,\bar{x}, # \end{align}</math> # # where {{math|''r<sub>xy</sub>''}} is the [[Correlation#Pearson's product-moment coefficient|sample correlation coefficient]] between {{mvar|x}} and {{mvar|y}}; and {{math|''s<sub>x</sub>''}} and {{math|''s<sub>y</sub>''}} are the [[sample standard deviation]] of {{mvar|x}} and {{mvar|y}}. A horizontal bar over a quantity indicates the average value of that quantity. For example: # # :<math>\overline{xy} = \frac{1}{n} \sum_{i=1}^n x_i y_i.</math> # # Substituting the above expressions for <math>\hat{\alpha}</math> and <math>\hat{\beta}</math> into # # : <math>f = \hat{\alpha} + \hat{\beta} x,</math> # # yields # # : <math>\frac{ f - \bar{y}}{s_y} = r_{xy} \frac{ x - \bar{x}}{s_x} </math> # # This shows that {{math|''r<sub>xy</sub>''}} is the slope of the regression line of the [[Standard score|standardized]] data points (and that this line passes through the origin). # # It is sometimes useful to calculate {{math|''r<sub>xy</sub>''}} from the data independently using this equation: # # :<math>r_{xy} = \frac{ \overline{xy} - \bar{x}\bar{y} }{ \sqrt{ \left(\overline{x^2} - \bar{x}^2\right)\left(\overline{y^2} - \bar{y}^2\right)} } </math> # # The [[coefficient of determination]] (R squared) is equal to <math>r_{xy}^2</math> when the model is linear with a single independent variable. See [[Correlation#Pearson's product-moment coefficient|sample correlation coefficient]] for additional details. # # ===Linear regression without the intercept term=== # Sometimes it is appropriate to force the regression line to pass through the origin, because {{mvar|x}} and {{mvar|y}} are assumed to be proportional. For the model without the intercept term, {{math|''y'' {{=}} ''βx''}}, the OLS estimator for {{mvar|β}} simplifies to # # : <math>\hat{\beta} = \frac{ \sum_{i=1}^n x_i y_i }{ \sum_{i=1}^n x_i^2 } = \frac{\overline{x y}}{\overline{x^2}} </math> # # Substituting {{math|(''x'' − ''h'', ''y'' − ''k'')}} in place of {{math|(''x'', ''y'')}} gives the regression through {{math|(''h'', ''k'')}}: # # : <math>\begin{align} # \hat\beta &= \frac{\overline{(x - h) (y - k)}}{\overline{(x - h)^2}} \\[6pt] # &= \frac{\overline{x y} + k \bar{x} - h \bar{y} - h k }{\overline{x^2} - 2 h \bar{x} + h^2} \\[6pt] # &= \frac{\overline{x y} - \bar{x} \bar{y} + (\bar{x} - h)(\bar{y} - k)}{\overline{x^2} - \bar{x}^2 + (\bar{x} - h)^2} \\[6pt] # &= \frac{\operatorname{Cov}[x,y] + (\bar{x} - h)(\bar{y}-k)}{\operatorname{Var}[x] + (\bar{x} - h)^2} # \end{align}</math> # # The last form above demonstrates how moving the line away from the center of mass of the data points affects the slope. # # + ### in case we want to save a DataFrame #writer = pd.ExcelWriter('myresults.xlsx') #df.to_excel(writer,'Results') #writer.save() # + #resdf # -
04-Pandas-Data-Tables/APD-Crime-Data_class.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="SsCgBdOndwo1" import torch import numpy as np import torch.nn as nn import torch.nn.functional as F import matplotlib.pyplot as plt import torch.optim as optim import pandas as pd import cv2 import os import numpy as np import pydicom as dicom import pydicom.uid import json from sklearn.model_selection import train_test_split # + id="CFKEBqthdwpB" #make list for images and labels and add by collecting from each source images = [] labels = [] #function to read images from sources 1,2,7 def get_images(path,condition): img_name = list(os.listdir(path)) # list of names for i in img_name: img_path = os.path.join(path,i) image = cv2.imread(img_path,cv2.IMREAD_GRAYSCALE) # reading images image=cv2.resize(image,(28,28)) image= image.reshape(1,28,28) images.append(image) #adding images to images list labels.append(condition) #adding respective labels to labels list # + id="kf8C7gw0dwpC" #importing images from source 1,2,7 # it is given that images from all these sources are positive cases # so we are using 1 for positive condition and 0 for negative path = "C:/Users/Lenovo/OneDrive/Documents/CSProject/covid/Covid19action-radiology-CXR_v1.1/images" path_1 = path+"/Source1" path_2 = path+"/Source2" path_7 = path+"/Source7" # given these are positive cases from readme file get_images(path_1,1) get_images(path_2,1) get_images(path_7,1) # + id="8sWLZ-IGdwpD" outputId="4379a015-4c15-482a-a629-ed2246da16a2" print(images[0].shape) print(len(labels)) # + id="AT8DzIivdwpE" #reading lalbels from jasom file and condition was given there def img_from_json_and_other(path,name): img_name = list(os.listdir(path)) #print(name) #print(img_name) for i in img_name: img_path = os.path.join(path,i) image = cv2.imread(img_path,cv2.IMREAD_GRAYSCALE) image = cv2.resize(image,(28,28)) image = image.reshape(1,28,28) #print(img_path) if i in name: images.append(image) labels.append(name[i]) # + id="Pb4X459ldwpE" # importing images from source 5 # it has its labels in json file annotation_path="C:/Users/Lenovo/OneDrive/Documents/CSProject/Covid/annotations/annotation_file.json" f = open(annotation_path,) data = json.load(f) #data given in json file filename=dict() x=data['images'] #from 'images' list in json file we have information of images for i in x: file=i['file_name'] #file name is id of the patient type_covid=i['metadata'] #in metadata we have condition of patient if type_covid['finding']=='COVID_19': filename[file]=1 else: filename[file]=0 path_5="C:/Users/Lenovo/OneDrive/Documents/CSProject/Covid/images" img_from_json_and_other(path = path_5,name = filename) # + id="HZ6r030wdwpF" # in the same source 5 we have metadata.csv labelling file for remaining images csv_path = "C:/Users/Lenovo/OneDrive/Documents/CSProject/Covid/metadata.csv" names_file = pd.read_csv(csv_path,usecols=['filename','finding']) filename2 = dict() for i,row in names_file.iterrows(): if row['finding'] == 'COVID-19': filename2[row["filename"]] = 1 else: filename2[row["filename"]] = 0 img_from_json_and_other(path = path_5,name = filename2) # + id="ltPZVNBXdwpF" outputId="33355804-b934-4b7c-e779-3015914915d5" #importing from len(images) # + id="q6quX8HZdwpG" #importing images from source_6 #we have made a dictionary for storing name and condition of thee patient filename3 = dict() path_6 = "C:/Users/Lenovo/OneDrive/Documents/CSProject/covid/New folder/Figure1-COVID-chestxray-dataset-master" names_file_2 = pd.read_csv(path_6+'/metadata.csv',usecols=['patientid','finding']) for i,row in names_file_2.iterrows(): if row['finding'] == 'COVID-19': filename3[row['patientid']+'.jpg'] = 1 else: filename3[row['patientid']+'.jpg'] = 0 img_from_json(path = path_6+'/images',name = filename3) # + id="7oMn0vl9dwpH" #importing images from source_3 (these are 3000 images which are left for testing in that dataset) #these are images of pneumonia patients so we consider COVID-19 negative path_3 = 'C:/Users/Lenovo/OneDrive/Documents/CSProject/covid/New folder (3)/images' #make list for images and labels and add by collecting from each source img_name = list(os.listdir(path_4)) l=0 #print(img_name) for i in img_name: img_path = os.path.join(path_4,i) image = dicom.read_file(img_path ) if image is not None: image=cv2.resize(image.pixel_array,(28,28)) image= image.reshape(1,28,28) images.append(image) labels.append(0) # + id="uPOOsEqBdwpI" outputId="e7f21abe-3048-487e-97e3-2c69b891bd55" len(labels) # + id="6d29qNbYdwpJ" #importing images from source_3 (these are 26000 images given for training in that dataset) #these are images of pneumonia patients so we consider COVID-19 negative path_3 = "C:/Users/Lenovo/OneDrive/Documents/CSProject/covid/New folder (4)" img_name = list(os.listdir(path_6)) for i in img_name: img_path = os.path.join(path_6,i) image = dicom.read_file(img_path) if image is not None: image=cv2.resize(image.pixel_array,(28,28)) image= image.reshape(1,28,28) images.append(image) labels.append(0) # + id="xIFK_4wXdwpJ" outputId="e029bd91-bb91-419d-93c9-e2a7d5524496" len(labels) # + id="9vbD6LhSdwpJ" images_copy = images labels_copy = labels # + id="a_9M29zzdwpK" outputId="dce143cd-250d-4bf0-9bdc-40d3053d3aaf" from sklearn.model_selection import train_test_split print(len(labels)) X_train, X_test, y_train, y_test = train_test_split(np.array(images), np.array(labels), test_size=0.20, random_state=42) #as the images size is large their is no need to give more size to train #we split data into 80:20 percent as train:test set # + id="tDiRFL-idwpK" outputId="a925fc22-aa43-4cbb-c73e-a06f7972b067" #converting these images to batches of 64 test_l= int (len(y_test)/64) train_l= int (len(X_train)/64) print(test_l) train_x=[] train_y=[] test_x=[] test_y=[] for i in range(test_l): s=[] z=[] for e in range(64): s.append(X_test[((64*i)+e)]) z.append(y_test[((64*i)+e)] ) test_x.append(s) test_y.append(z) test_x=np.array(test_x) test_y=np.array(test_y) for i in range(train_l): s=[] z=[] for e in range(64): s.append(X_train[((64*i)+e)]) z.append(y_train[((64*i)+e)]) train_x.append(s) train_y.append(z) train_x=np.array(train_x) train_y=np.array(train_y) print(test_x.shape) print(train_x.shape) # here we convert test and train set of images and we used batch size of 64 # we have total 6150 test images and 26000 train images,we also randomly suffle the data # + id="OW3pxg6zdwpL" import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch class net(nn.Module): def __init__(self): super(net,self).__init__() self.conv1=nn.Conv2d(1,20,kernel_size=8,stride=2,padding=0) self.mpool1=nn.MaxPool2d(kernel_size=2,stride=1,padding=0) self.conv2=nn.Conv2d(20,40,kernel_size=4,stride=1,padding=0) self.mpool2=nn.MaxPool2d(kernel_size=2,stride=1,padding=0) self.fc1=nn.Linear(36*40,100) self.fc1_drop=nn.Dropout(0.2) self.fc2=nn.Linear(100,20) self.fc3=nn.Linear(20,2) #output of 2 () def forward(self,x): x = x.view(-1, 1, 28, 28) x=self.conv1(x) x=F.relu(x) x=self.mpool1(x) x=self.conv2(x) x=F.relu(x) x=self.mpool2(x) x=x.view(-1,40*36) x=self.fc1(x) x=F.relu(x) x=self.fc1_drop(x) x=self.fc2(x) x=F.relu(x) x=self.fc3(x) x=F.log_softmax(x,dim=1) return x # + id="84flEaYmdwpM" Net = net() learning_rate=0.001 momentum=0.5 num_epochs=10 # it is not taking much epochs to reach the saturation so only 10 epochs are taken # and it is taking so much time for single epoch optimizer = optim.SGD(Net.parameters(), lr=learning_rate, momentum=momentum) # + id="nyY9bmb6dwpM" outputId="0cb244d8-3cc4-4946-ad10-5e3ad2c8f25a" print(len(X_train)) print(len(X_test)) # + id="hb19JnW-dwpM" outputId="460e6256-bf86-4a0a-9ca9-05d4f9bf7b23" criterion = nn.CrossEntropyLoss() train_loss = [] train_acc = [] for epoch in range(num_epochs): running_loss = 0.0 running_corr = 0 for i,data in enumerate(train_x): inputs1 = data labels1=train_y[i] inputs=torch.from_numpy(inputs1) label=torch.from_numpy(labels1) # Initializing model gradients to zero optimizer.zero_grad() # Data feed-forward through the network outputs = Net(inputs.float()) # Predicted class is the one with maximum probability preds = torch.argmax(outputs,dim=1) # Finding the loss loss = F.nll_loss(outputs, label.long()) # Calculating gradients loss.backward() optimizer.step() # Accumulating the loss for each batch running_loss += loss # Accumulate number of correct predictions running_corr += torch.sum(preds==label.long()) # Calculating gradients # Updating the model parameters epoch_loss = running_loss.item()/(i+1) #Total loss for one epoch epoch_acc = running_corr.item()/24597 #24597 are no. train cases train_loss.append(epoch_loss) #Saving the loss over epochs for plotting the graph train_acc.append(epoch_acc) #Saving the accuracy over epochs for plotting the graph print('Epoch {:.0f}/{:.0f} : Training loss: {:.4f} | Training Accuracy: {:.4f}'.format(epoch+1,num_epochs,epoch_loss,epoch_acc*100)) # + id="FzOGT-JwdwpN" outputId="1127e2be-5a1d-4c0e-b675-ff18f7606452" correct_pred = 0 for i in range(len(test_x)): input1=test_x[i] label1=test_y[i] inputs=torch.from_numpy(input1) labels=torch.from_numpy(label1) # Feedforward train data batch through model output = Net(inputs.float()) # Predicted class is the one with maximum probability preds = torch.argmax(output,dim=1) correct_pred += torch.sum(preds==labels.long()) test_accuracy = correct_pred.item()/6150 # 6150 are no. testing cases print('Testing accuracy = ',test_accuracy*100) # + id="FCG9l2bUdwpO" outputId="ffea9f29-fba6-40ee-9b3d-1b0423f0b75d" import matplotlib.pyplot as plt fig = plt.figure(figsize=[15,5]) plt.subplot(121) plt.plot(range(num_epochs),train_loss,'r-',label='Loss') plt.legend(loc='upper right') plt.xlabel('Epochs') plt.ylabel('Training') plt.subplot(122) plt.plot(range(num_epochs),train_acc,'g-',label='Accuracy') plt.legend(loc='upper right') plt.xlabel('Epochs') plt.ylabel('Training') plt.show()
CNN_Model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Visualizing CNN Layers # --- # In this notebook, we load a trained CNN (from a solution to FashionMNIST) and implement several feature visualization techniques to see what features this network has learned to extract. # ### Load the [data](http://pytorch.org/docs/master/torchvision/datasets.html) # # In this cell, we load in just the **test** dataset from the FashionMNIST class. # + # our basic libraries import torch import torchvision # data loading and transforming from torchvision.datasets import FashionMNIST from torch.utils.data import DataLoader from torchvision import transforms # The output of torchvision datasets are PILImage images of range [0, 1]. # We transform them to Tensors for input into a CNN ## Define a transform to read the data in as a tensor data_transform = transforms.ToTensor() test_data = FashionMNIST(root='./data', train=False, download=True, transform=data_transform) # Print out some stats about the test data print('Test data, number of images: ', len(test_data)) # + # prepare data loaders, set the batch_size ## TODO: you can try changing the batch_size to be larger or smaller ## when you get to training your network, see how batch_size affects the loss batch_size = 20 test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=True) # specify the image classes classes = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] # - # ### Visualize some test data # # This cell iterates over the training dataset, loading a random batch of image/label data, using `dataiter.next()`. It then plots the batch of images and labels in a `2 x batch_size/2` grid. # + import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # obtain one batch of training images dataiter = iter(test_loader) images, labels = dataiter.next() images = images.numpy() # plot the images in the batch, along with the corresponding labels fig = plt.figure(figsize=(25, 4)) for idx in np.arange(batch_size): ax = fig.add_subplot(2, batch_size/2, idx+1, xticks=[], yticks=[]) ax.imshow(np.squeeze(images[idx]), cmap='gray') ax.set_title(classes[labels[idx]]) # - # ### Define the network architecture # # The various layers that make up any neural network are documented, [here](http://pytorch.org/docs/master/nn.html). For a convolutional neural network, we'll use a simple series of layers: # * Convolutional layers # * Maxpooling layers # * Fully-connected (linear) layers # + import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() # 1 input image channel (grayscale), 10 output channels/feature maps # 3x3 square convolution kernel ## output size = (W-F)/S +1 = (28-3)/1 +1 = 26 # the output Tensor for one image, will have the dimensions: (10, 26, 26) # after one pool layer, this becomes (10, 13, 13) self.conv1 = nn.Conv2d(1, 10, 3) # maxpool layer # pool with kernel_size=2, stride=2 self.pool = nn.MaxPool2d(2, 2) # second conv layer: 10 inputs, 20 outputs, 3x3 conv ## output size = (W-F)/S +1 = (13-3)/1 +1 = 11 # the output tensor will have dimensions: (20, 11, 11) # after another pool layer this becomes (20, 5, 5); 5.5 is rounded down self.conv2 = nn.Conv2d(10, 20, 3) # 20 outputs * the 5*5 filtered/pooled map size self.fc1 = nn.Linear(20*5*5, 50) # dropout with p=0.4 self.fc1_drop = nn.Dropout(p=0.4) # finally, create 10 output channels (for the 10 classes) self.fc2 = nn.Linear(50, 10) # define the feedforward behavior def forward(self, x): # two conv/relu + pool layers x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) # prep for linear layer # this line of code is the equivalent of Flatten in Keras x = x.view(x.size(0), -1) # two linear layers with dropout in between x = F.relu(self.fc1(x)) x = self.fc1_drop(x) x = self.fc2(x) # final output return x # - # ### Load in our trained net # # This notebook needs to know the network architecture, as defined above, and once it knows what the "Net" class looks like, we can instantiate a model and load in an already trained network. # # The architecture above is taken from the example solution code, which was trained and saved in the directory `saved_models/`. # # + # instantiate your Net net = Net() # load the net parameters by name net.load_state_dict(torch.load('saved_models/fashion_net_ex.pt')) print(net) # - # ## Feature Visualization # # Sometimes, neural networks are thought of as a black box, given some input, they learn to produce some output. CNN's are actually learning to recognize a variety of spatial patterns and you can visualize what each convolutional layer has been trained to recognize by looking at the weights that make up each convolutional kernel and applying those one at a time to a sample image. These techniques are called feature visualization and they are useful for understanding the inner workings of a CNN. # # In the cell below, you'll see how to extract and visualize the filter weights for all of the filters in the first convolutional layer. # # Note the patterns of light and dark pixels and see if you can tell what a particular filter is detecting. For example, the filter pictured in the example below has dark pixels on either side and light pixels in the middle column, and so it may be detecting vertical edges. # # <img src='images/edge_filter_ex.png' width= 30% height=30%/> # # net.conv1.weight.data.size() # + # Get the weights in the first conv layer weights = net.conv1.weight.data w = weights.numpy() # for 10 filters fig=plt.figure(figsize=(20, 8)) columns = 5 rows = 2 for i in range(0, columns*rows): fig.add_subplot(rows, columns, i+1) plt.imshow(w[i][0], cmap='gray') print('First convolutional layer') plt.show() weights = net.conv2.weight.data w = weights.numpy() # - # ### Activation Maps # # Next, you'll see how to use OpenCV's `filter2D` function to apply these filters to a sample test image and produce a series of **activation maps** as a result. We'll do this for the first and second convolutional layers and these activation maps whould really give you a sense for what features each filter learns to extract. # + # obtain one batch of testing images dataiter = iter(test_loader) images, labels = dataiter.next() images = images.numpy() # select an image by index idx = 5 img = np.squeeze(images[idx]) # Use OpenCV's filter2D function # apply a specific set of filter weights (like the one's displayed above) to the test image import cv2 plt.imshow(img, cmap='gray') weights = net.conv1.weight.data w = weights.numpy() # 1. first conv layer # for 10 filters fig=plt.figure(figsize=(30, 10)) columns = 5*2 rows = 2 for i in range(0, columns*rows): fig.add_subplot(rows, columns, i+1) if ((i%2)==0): plt.imshow(w[int(i/2)][0], cmap='gray') else: c = cv2.filter2D(img, -1, w[int((i-1)/2)][0]) plt.imshow(c, cmap='gray') plt.show() # + # Same process but for the second conv layer (20, 3x3 filters): plt.imshow(img, cmap='gray') # second conv layer, conv2 weights = net.conv2.weight.data w = weights.numpy() # 1. first conv layer # for 20 filters fig=plt.figure(figsize=(30, 10)) columns = 5*2 rows = 2*2 for i in range(0, columns*rows): fig.add_subplot(rows, columns, i+1) if ((i%2)==0): plt.imshow(w[int(i/2)][0], cmap='gray') else: c = cv2.filter2D(img, -1, w[int((i-1)/2)][0]) plt.imshow(c, cmap='gray') plt.show() # - # ### Question: Choose a filter from one of your trained convolutional layers; looking at these activations, what purpose do you think it plays? What kind of feature do you think it detects? # # **Answer**: In the first convolutional layer (conv1), the sixth filter, pictured as the first filter in the second row of the conv1 filters, appears to detect vertical/slightly-left leaning edges. It has a positively weighted left-most column and seems to detect the vertical edges of sleeves in a pullover.
1_5_CNN_Layers/5_1. Feature viz for FashionMNIST.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd # Beautiful tool for data wrangling! e.g. '!pip install pandas' from a Notebook # See https://mariadb.com/blog/how-connect-python-programs-mariadb e.g. '!pip install mysql' from Notebook import mysql.connector as mariadb import re pd.set_option("display.max_rows",35) # Useful when having large Pandas DataFrames like we do here conn = mariadb.connect(user='mos', password='', database='monuments_db',buffered=True) cursor = conn.cursor() # ### Monuments_all cursor.execute("SELECT * FROM monuments_all WHERE country='se-bbr'") all_bbr = pd.io.sql.read_sql('select * from monuments_all WHERE country="se-bbr"', conn) all_bbr.shape # ### monuments_all_se-bbr_(sv) table_name = "se_bbr" # I've renamed monuments_se-bbr_(se) to 'se_bbr' in local database, change to correct name se_bbr = pd.io.sql.read_sql('select * from se_bbr', conn) se_bbr.shape se_bbr.keys() se_bbr.ix[10:20, ["namn","funktion"]] first = se_bbr.groupby("kommun")[["funktion","namn"]].first() first.head() first.loc[["Ale","Täby","Åre"],["funktion","namn"]] se_bbr.namn.str.extract('(?P<name>\[\[[\w\.\|\- ]+\]\])\,? ?(?P<name2>[ \w]+)? ?(?P<name3>\[\[[\w\.\|\- ]+\]\])? ?(?P<buildId>\([\w\.\d \|\:\-;,]+\))',expand=True) se_bbr.ix[5672] # Let's check If we have extracted the two always occuring fields 'name' and 'buildId' for all objects se_bbr_namn = se_bbr.namn.str.extract('(?P<name>\[\[[\w`\.,\|\- ]+\]\])\,? ?(?P<name2>[ \w]+)? ?(?P<name3>\[\[[\w\.\|\- ]+\]\])? ?(?P<buildId>\([\w\.\d \|\:\-;,]+\))',expand=True) len(se_bbr_namn[se_bbr_namn["name"] == pd.np.nan]) len(se_bbr_namn[se_bbr_namn["buildId"] != pd.np.nan]) se_bbr[pd.isnull(se_bbr["namn"])] len(se_bbr_namn.name.value_counts()) len(se_bbr_namn.name2.value_counts()) len(se_bbr_namn.name3.value_counts()) # How many objects did we get the other fields 'name2' and 'name3' for? se_bbr_namn.name3.value_counts() se_bbr[se_bbr.namn.str.contains("sproge", flags=re.IGNORECASE) == True]
explore_bbr_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Stack OverFlow CaseStudy import pandas as pd # question 1: Find the display name and no. of posts created by the user who has got maximum reputation. user_data = pd.read_csv("Users.csv") post_data = pd.read_csv("post.csv") comment_data = pd.read_csv("comments.csv") PostType_data = pd.read_csv("posttypes.csv") #user_data.max('reputation') user_data['reputation'].max()
Lec 5 Case Study 1/Dataset/CaseStudy_main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 823 HW1-<NAME> # ## Problem 6; Sum square difference; Solved by 496303 people # ### We define a function called *sum_square_diff*. First, we find the sum of squares of the of the numbers in a certain range. Then, we find the square of the sum of the numbers in the certain range. The we find the difference between the two results. We get 2640 for the difference. # + def sum_square_diff(n): """Get the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum. n : integer The integer we want to conduct sum square difference on. """ sum_square = 0 square_sum = 0 for i in range(1, n+1): sum_square = sum_square + (i**2) for i in range(1, n+1): square_sum = square_sum + i square_sum = (square_sum ** 2) result = square_sum - sum_square return result print(sum_square_diff(10)) # - # ## Problem 34; Digit factorials; Solved by 59089 people # ### We define a function called *get_sum()* to find the sum of all the curious numbers like 145. First we define a function to find the digits of a number called the *get_digits*. In this function, we convert the number to a string and put the digits in an array and then return the array. Then, we define the *get_fuctorial* function to get the factorial of a certain number. In this function, we use recursion. The base case is defined to be when the number is 1 or 0, else we multiply the number to the total result and minus 1 from the number and then pass it back to the function. After defining these functions, we find the number that the sum of the factorial of its digits equals itself and sum them up. For numbers between 3 and 9999999 because if all the digits are 9, the digits that possible fullfill the requirement of the curious number is around 7. For every number in this range, we pass every digits and get the sum of the factorial of them and compare the sum with the original number. If the two values are equal, then we add them into the result. Finally, we got 40730 for the sum. # + def get_sum(): """Get the sum of curious numbers which equals the sum of the factorial of its digits. """ def get_digits(number): """Get the digints of a certain number number: integer The intger that we want to get digits of. """ digs = [] for i in str(number): digs.append(i) return digs def get_factorial(num): """Get the factorial of a number num: integer The integer we want to get factorial of. """ if (num == 1 or num == 0): return 1 else: return num * get_factorial(num-1) total_sum = 0 for number in range(3, 1499999): digits = get_digits(number) fac_sum = 0 for dig in digits: fac_sum = fac_sum + get_factorial(int(dig)) if (fac_sum == number): total_sum = total_sum + number return(total_sum) print(get_sum()) # - # ## Problem 112; Bouncy numbers; Solved by 24434 people. # ### We define a function called *get_lim()* to get the least number for which the proportion of bouncy numbers is exactly 99%. Under this function, first we call a function to find the digits of a number called the *get_digits*. In this function, we convert the number to a string and put the digits in an array and then return the array. Then, we define an *is_increase()* function to check if the number is an increasing number. In this function, we first call the *get_digits()* function to get digits of the number; then we compare the digits with the next digits to see if it is an increasing number. Then, we define an *is_decrease()* function to check if the number is a decreasing number. In this function, we first call the *get_digits()* function to get digits of the number; then we compare the digits with the next digits to see if it is a decreasing number. Thirdly, we define a *is_bouncy()* function to check if the number is a bouncy number. We call the *is_decrease()* and *is_increase()* function defined earlier. If the number is neither increasing or decreasing, then it is a bouncy number. Finally, we create a while loop to append every bouncy number in an array until the percent of bouncy numbers in the range get 99%. The result we got is 1587000. # + def get_lim(): """Find the least number for which the proportion of bouncy numbers is exactly 99% """ def get_digits(number): """ Get the dignits of integers. number: integer The integer we want to get digits of. """ digs = [] for i in str(number): digs.append(i) return digs def is_increase(number): """Check if the integer is an increasing number number: integer The interger we want to check if is an increasing number. """ digits = get_digits(number) result = 0 for i in range(0,len(digits) - 1): if (int(digits[i+1]) >= int(digits[i])): result = result + 0 else: result += 1 if (result == 0): return 1 else: return 0 return result def is_decrease(number): """Check if the integer is a decreasing number number: integer The interger we want to check if is a decreasing number. """ digits = get_digits(number) result = 0 for i in range(0,len(digits) - 1): if (int(digits[i+1]) <= int(digits[i])): result = result + 0 else: result += 1 if (result == 0): return 1 else: return 0 return result def is_bouncy(number): """Check if the integer is a bouncy number number: integer The interger we want to check if is a bouncy number. """ if ((is_increase(number) != 1) & (is_decrease(number) != 1)): return 1 else: return 0 number = 0; percent = 0; b =[] while percent < 0.99: number += 1 if is_bouncy(number): b.append(number) percent = len(b)/number print("The least number for which the proportion of bouncy numbers is exactly 99% is " + str(number)) get_lim()
_notebooks/823HW1_Li_Candice.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <span style = "font-family:Comic Sans MS, cursive, sans-serif; # font-size: 0.8em; # color: gray; # background-color: white"> # Info: click the 🐍 symbol for viewing the source code (based on Andrew's NG Deep Learning courses...) # </span> # # <span style = "font-family: Comic Sans MS, cursive, sans-serif; # font-size: 2em; # color: black; # background-color: white"> # Oop's !! Deep Neural Network from Scratch... # </span> # + cell_style="center" from dnn.initialization import Initialization as init from dnn.activation import Activation as activate from processing import * np.random.seed(1) # %reload_ext autoreload # %autoreload 2 # - # <span style = "font-family: Comic Sans MS, cursive, sans-serif; # font-size: 1.8em; # color: black; # background-color: white"> # Processing... # </span> # <span style = "font-family: Comic Sans MS, cursive, sans-serif; # font-size: 1.2em; # color: black; # background-color: white"> # let's roll 🎶 # <span style="font-family:Candara; font-size:1em; #color:WHITE;#background-color:LightSeaGreen "> [🐍](./processing.py)</span> # # ![processing][image_ref_syy3ak06] # # [image_ref_syy3ak06]: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAABGdBTUEAALGPC/xhBQAAAAFzUkdCAK7OHOkAAAAgY0hSTQAAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAABdwnLpRPAAAAAlwSFlzAAAOxAAADsQBlSsOGwAAAAZiS0dEAP8A/wD/oL2nkwAAACV0RVh0ZGF0ZTpjcmVhdGUAMjAxNy0wNi0xMlQwMzozNDozNCswODowMBDNJRYAAAAldEVYdGRhdGU6bW9kaWZ5ADIwMTYtMDQtMjNUMDA6NDA6MTgrMDg6MDDnYYGwAAAAVHRFWHRzdmc6YmFzZS11cmkAZmlsZTovLy9ob21lL2RiL3N2Z19pbmZvL3N2Zy80OS84Mi80OTgyM2Q0MmM0OTM1Y2UxZjQ1MWE1NjQwZjY2YWEwZi5zdmfdpFd6AAAFdElEQVRoQ82ZTSh9QRTAz3u+35NvyfORlI+SkhLqhUgkYaFYyUrZ2VNYWLBDRPlY2MlGYqEokY2UsCCSBWFFvr/vf868mTH3uo/7eH/v/uo0Z+bNnTfnzZwzZ+6zKAT4Af39/TA1NQVXV1fw+voKLy8v8Pb2Bu/v71RwWLlE+FfJX2mxWJjm0mWxWq209PPzE+Lv7w92ux1qa2uhp6eHPUmeJYN6bAgObhZOTk4gISEBrKxumMTERKaZAz4fjwxZXV2F09NTVjMPQ0NDnm2thoYGmJ6eZjWAvr4+CAsLg8DAQLGH+f5GfrMF+bSwREE/Qx98fn6m/tjW1kZ9E8nPz6cdDZOZmYmjUwkPD2etvqGkpETMJSoqSvFoa11fXzMNICAggGm+AXcB5/Hx0TMfwQc4vo5cGIY5uMU8MgT3p1nAM4aDvuORIehkHF+viPz9xGWMGcKjAy8RPhD+Gt8JP+G9iWwIji8MQUcuLi6mHbSCjo0lTopzcXFB23CvficYlnm68ZXU1dX9yGB8lobfnZ0dEcrMIOTQxWl9SU1NjehPDHGF3+zsbCxMA+ZO30FXQcI6OjrKVHMxPj7ONGNYysrKlKWlJVYFWFlZoftUTju0+1tGW3cHjslBHYUHAwzrKGQurAdAYWEhrK+vs9pn0J9mZ2dZjRAfHy/2WlJSEhnfdzgcDjGX2NhY1qoPuY+IvijWp6cnUroICgpimm+Q0477+3umGcMqH3K+zp8wVHM8zSIsNptN4dZj9Nre3qa6FryJbW5u0nxLvtaSVVbJV+Dqk7AJycnJrEVNeno6HBwcUB39Uj63tHzyEbKdxD7Lyckhc/kMCQaijzdkb2+PjawmIyND1e8rZB8hAUexkjaiu5ATMRn5ku8N2tvbmeY9rPwNB+IulKalpTHNO2RlZTHNi5BVEEuUl5fnWjcdcnNzFfQn4pCivzvBMbEfiUJKcHCwYrfblYiICCUuLk5xOp1sxM/8amsRReBuayHo6Hd3d9TRyThfCjop9kPnfnh4gNvbW7i8vITz83P6AuN/YMhH/gqjWYIWnLdq5j8dyFvIPyrS1dVFV/U7qCFmWhEt3d3dQHwMOjs7WcsH8rzxIMUlEC1FRUU0adSjo6MD1tbW6C2RRzo+GF9JTDDxvezIyAikpKTQNk/IzMyE/f19VlPT3NwMk5OTrAb0YJ2bm6N6aGgoLXE2VPBdkR69vb2ij1H5CdqopZXl5WXWU1GIwaK9oaEBy4+OpaWlrJsakrqIPkbl8PCQPW0ckqKI5zGkFhQUqMaMjo5mPV0cHR0pMzMzVCeff3TEu4keY2Njoo9R+QmyIXyMyspKVdv19TVt16LykfLyclhcXGQ1NQMDA4AXsJCQEHHZki9emDljGh4eHg4tLS2GrqtaMIMgK8lq1BLqs2TLsxaA+fl5qKqqYjU1wtqKigqXeT4iNTVVzAUF2draUrVNTEzQdi2GT/a/QH5vxtFe9tydK6YyRL7k4d8VCJ4jMuTHZ5oa1cxxv/81ODG8rOGN8OzsjLUCkFBMS35GcNz92KpW+aopQy5C1MEcDgd1aDwAvSU4MQwg2i1UXV1Ny5iYGFpyoqKimPYZ4UiNjY3UcWTq6+vF538pMq2trbSNrA5r+Qz+IuJhkgawZhfaGP5XsrCwwGZgHBgeHhYD7O7usmbv39ONysbGBpuBZ9A/Q4+Pj+nfvLKPYF3+BxeTtKamJhr+cF9rhe/370SvH4J+8qsrMDVHB/yICyZlZkc3lmlTeeLwTDMvuobI8RyJjIxkmnnRNeTm5oZpLmw2G9PMi64h2pxHmyaYEV1D5LfiiDZNMCO6hvA8h+PtN43/A11DnE4nPTOQwcFBWpobgH9o+1btUzQHIwAAAABJRU5ErkJggg== trainF, trainL, testF, testL, classes = repackNgDatasets() ; exploringImages(trainF) trainX, testX = flatteningImages(trainF, testF) # <span style = "font-family: Comic Sans MS, cursive, sans-serif; # font-size: 1.8em; # color: black; # background-color: white"> # Activation... # </span> # <span style="font-family:Candara; font-size:1em; #color:WHITE;#background-color:LightSeaGreen "> [🐍](./dnn/activation.py)</span> # ![Activation][image_ref_bmt1vdn5] # # [image_ref_bmt1vdn5]: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAABGdBTUEAALGPC/xhBQAAAAFzUkdCAK7OHOkAAAAgY0hSTQAAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAABdwnLpRPAAAAAlwSFlzAAAOxAAADsQBlSsOGwAAAAZiS0dEAP8A/wD/oL2nkwAAACV0RVh0ZGF0ZTpjcmVhdGUAMjAxNy0wNi0xMlQwMzozNToxNCswODowML0qSVUAAAAldEVYdGRhdGU6bW9kaWZ5ADIwMTYtMDQtMjNUMDA6NDA6MTgrMDg6MDDnYYGwAAAAVHRFWHRzdmc6YmFzZS11cmkAZmlsZTovLy9ob21lL2RiL3N2Z19pbmZvL3N2Zy8xMi81NC8xMjU0OGM3ZTE3ODQ5YmE0YTM2ZTVlNmNmMDAzMzJkYi5zdmfCm0y5AAAEPElEQVRoQ+2auUssQRCHa8f7PpBFUBQN1ERNxUUUBCMzFzURDMR/QNBEzAwETYzMVlAw0NzAE8wEEcUDA5HVRF0Eb7zndbU19h69szO+Gd+MvA+KqTl6un9OH1W9elQGfIO7uztYWFiAjY0N2N7ehtPTU3h8fIS3tzf4+PjgpuHxeLgpigJJSUncUlJSID09HcrKyqC+vh6ampqgu7sbUlNTqZRJUIgZlpeX1eLiYhRvi5WXl6vr6+tUm3FMCamrq5NWboexr0S1GsNw18KuYRZZGYPVfWH0eYWOumRmZpIXydTUFK8onmljJdxkz6EFAgF6aySFhYXkJYC9RJfR0dGvz60ZG6B013oqKytj6pucnKS78UnYtazoHmb5Tp26Xevg4IA8QVtbG3n20dDQQJ4gGAySJ0dXyP7+PnmCkpIS8uyjsbGRPAGbksmToyskFAqRJ8jKyiLPPioqKsgTHB0dkSdHVwiu3tGkpaWRZx99fX3kCYaGhsiToyvk6emJPMFPCMHQ5f39HcbHx2FsbIxP23l5eXRXjmkhycnJ5NkLxmUDAwMwODhoaDF2rBCz6ArBSDaaRJ/4X6G7ILa2tsLq6iqdfYIhN9rr6yv/5GjYDTT/bw3fpYX7+PUxrM/IyIDS0lLw+/3xw3wUEs709LTq9Xp5aOBEY4LUxcVFaq3gS8jz87PKZgtpYSdaTU0NtfwT3rVwqnPqIE4Eaz4/ciEFBQVwfX3NL2jgoJ6YmACmnA967L+I1pejfezXSLz7smvhaNewYbhu4B8XxyHOnDc3N3B2dgbDw8Nwf3/Pn9Oora2F3d1dgJWVlZjP5vP5UJ8jkWWpOzs7KrS3t8fcuLy8pGLOY29vL6a9nZ2dqocFaOrJyQk7F7DnyXMeuFMTHbiyZAwUvOEmZGn3w8MDKLLV223gpKDgDOF2UIPi5PFgFNTwK4Qg/7+I01Bk4YLbQA26iZWbYLHe79CiYCbmdlCD8hPbO3aDv34p+fn5dOoOWCZLngA1KBg5RhOdZDmJ4+Nj8gTNzc0A8/PzuJBEGMu62PLiTFgIH9PeYDD4mbNjWnt7e8uuRcKSLr6hjKGzZkVFRdDb20tPGAfTha2tLbi4uOB7ypjCvry88MgV01qMwsOP4caaCFdXVzAzM0NvE7S0tMDa2hqTQ7Brpow1jErqMzIyorLBKH3H3xobG1QLaz8dOdilZAVkNjs7S6XkLC0tSctZZX6/n2r6JEIIcnh4qHZ1dUkLh5sec3Nz0jJWWE9PjxoKhagmge6WKYKzBPZp7MuYwOA4wS2YeOCzubm5dCbw+XzQ39/P+7v23w+a4Z5a9FHbLsV1Ljs7m2+Z4s8NcUEhVtLR0RHzVwwEAnTXPhJ+EbN4vd6Yn+wsrkKK5UJkacFPCPk9YTwdXY/tQnJycsizF8uFnJ+fQ1VVFferq6thc3OT+/YC8Ad66YmcwS6hsgAAAABJRU5ErkJggg== # # <span style="font-family:Candara; font-size:1em; #color:WHITE;#background-color:LightSeaGreen "> [See forward](#forward)</span> # <span style="font-family:Candara; font-size:1em; #color:WHITE;#background-color:LightSeaGreen "> [and backward passes...](#backward)</span> # %pycat ./dnn/activation.py # <span style = "font-family: Comic Sans MS, cursive, sans-serif; # font-size: 1.8em; # color: black; # background-color: white"> # Initialization... # </span> # <span style="font-family:Candara; font-size:1em; #color:WHITE;#background-color:LightSeaGreen "> [🐍](./dnn/initialization.py)</span> # # ![Initialization][image_ref_tyq0xbu8] # # [image_ref_tyq0xbu8]: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAABGdBTUEAALGPC/xhBQAAAAFzUkdCAK7OHOkAAAAgY0hSTQAAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAABdwnLpRPAAAAAlwSFlzAAAOxAAADsQBlSsOGwAAAAZiS0dEAP8A/wD/oL2nkwAAACV0RVh0ZGF0ZTpjcmVhdGUAMjAxNy0wNi0xMlQwMzozMjoxOSswODowMD4hM+wAAAAldEVYdGRhdGU6bW9kaWZ5ADIwMTYtMDQtMjNUMDA6NDA6MTgrMDg6MDDnYYGwAAAAVHRFWHRzdmc6YmFzZS11cmkAZmlsZTovLy9ob21lL2RiL3N2Z19pbmZvL3N2Zy80Ni8zNy80NjM<KEY> # # <span style="font-family:Candara; font-size:1em; #color:WHITE;#background-color:LightSeaGreen "> [See the model...](#model)</span> # %pycat ./dnn/initialization.py # <span style = "font-family: Comic Sans MS, cursive, sans-serif; # font-size: 1.8em; # color: black; # background-color: white"> # Forward Propagation... # </span> # # ![Forward][image_ref_ndmxz8d0] # # [image_ref_ndmxz8d0]: data:image/png;base64,<KEY> # # # <a id='forward'></a> def forwardPass(X, parameters): A = X L = len(parameters) // 2 caches = [] # Hidden Layers... for l in range(1, L): AP = A Wl = 'W' + str(l) bl = 'b' + str(l) ############################################################## A = activate('ReLU', False, parameters[Wl].dot(AP) + parameters[bl], None).compute() linearCache = (AP, parameters[Wl], parameters[bl]) activationCache = parameters[Wl].dot(AP) + parameters[bl] caches.append((linearCache, activationCache)) # Output Layer... WL = 'W' + str(L) bL = 'b' + str(L) ################################################################## AL = activate('Sigmoid', False, parameters[WL].dot(A) + parameters[bL], None).compute() linearCache = (A, parameters[WL], parameters[bL]) activationCache = parameters[WL].dot(A) + parameters[bL] caches.append((linearCache, activationCache)) return AL, caches # <span style = "font-family: Comic Sans MS, cursive, sans-serif; # font-size: 1.8em; # color: black; # background-color: white"> # Loss & Cost Computing... # </span> # # ![Loss and Cost Computing][image_ref_wffecri0] # # [image_ref_wffecri0]: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAABGdBTUEAALGPC/xhBQAAAAFzUkdCAK7OHOkAAAAgY0hSTQAAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAABdwnLpRPAAAAAlwSFlzAAAOxAAADsQBlSsOGwAAAAZiS0dEAP8A/wD/oL2nkwAAACV0RVh0ZGF0ZTpjcmVhdGUAMjAxNy0wNi0xMlQwMzozNDo1NiswODowMEE9PbgAAAAldEVYdGRhdGU6bW9kaWZ5ADIwMTYtMDQtMjNUMDA6NDA6MTgrMDg6MDDnYYGwAAAAVHRFWHRzdmc6YmFzZS11cmkAZmlsZTovLy9ob21lL2RiL3N2Z19pbmZvL3N2Zy9lMS80Yi9lMTRiNTFkMzM5MDU4NjM4NDBhNDM2Yzg5ZjFiYTk1ZS5zdmdHCpa5AAAF3ElEQVRoQ9WaTUhUURTHr+OYSKjhd0YMRUofgmKKaFaUbiIiSDAwqIQgW+RGSEQEN610Ky7ESKUPDcs2kS5qFhn0QQsFEckvEhN0o41KaN7uee/MzD1v3rwvnYl+cJjz3v/cc9993nc/3jOGCxgyMzPDWlpamNfrZZubm2xnZ4f9+fNH+QUDINxvWuCcy+XCI5WYmBj0VPzHEAs+5Pd4PKyhoYHV1dUpmiNEQoXr16/Dlf1z+/37N16RPZSGVFdX6yb9V+YENjU1pZvsX9qdO3fw8qzDtF3qxYsXKEWP9fV1npaWRq5DD7jpZ8+eJXFg79+/5yw1NTVworS0FItEn46ODnJxPp8PFZXW1laiy+Z2u+E3eKKsrAyLRZ/Lly8HrgNseXkZFc4HBweJprWkpCQec/jwYf7jxw9xrCLKoRddtMO0fB1aTSY+Pp6Njo4ydvv2bdK6r1+/KndBy7Fjx0icE0tMTORbW1uYMcjs7CyJO3nyJCqcP3v2jGglJSWoUFyXLl0SepCnT5+iF6SxsZF9//4dj5zz69cvJoZ6PArS19eHnooYgNBjrLu7Gz0V8SyhpwEmIPETsMzMTGxjkAcPHpCY3RjMWVpyc3NJjFhhoKL0L2LhUJS4uDjT4OPHj5MYuxYbG8uPHDnCt7e3MWMQbayfT58+kfPnz59HJRSllPY5ef78uSJGg6GhIVJ3VVUVKpzfu3ePaKKboRKK0pC3b9+SAlevXlXEaAAXLtf9+vVrVDgXIxLRjAisfo2Gv0gSrl4YGMT8oPiAmPmZmFvwKJTAmls8A+ipzM3NoRc5tCPhqVOn0GPs0aNH6KnU1taip0+gITU1NeipPHnyBD0K7B3gLjoxMSIyMY9gJsYeP36MnsqtW7fQY6ynpwc9FVnTBboWMD09HeiLYPKk5Ec7KDgx+fnLzs4m2srKCipK/yJmRuAvcvToUfRUJiYm0Isci4uL6KnPiljAKv7w8LDy6+fKlSvoGYANUhAFyF0QCVFREdtdLhpMYuxYVlYWZuJcdCuiyXsQ0c2JNjAwgEp4SEPE8oQkgK4UKS5evEjq+vDhAyr2uxUQEiUngNk4Usj1gPmZn58n56EHWIG+8hBkZGSgx5Q3HDCe7zVi6YGeyrlz59ALHclEr0DPBGxQgIaGBnJHYOe219y/f5/UIS89YD0ma2KvhIoxIQ359u0bSXTmzBlUKMnJySRuNyZjpBmhG2mWrKKiIiTGqcnbhv7+fqLduHEDFXNCnhHgwoUL6Kl8+fIFvb1HXnr09vaip3Lz5k30LIANInR1dZE7U19fj0oQ+e3LbmxychIzOu9WgG407KvlhPv370clcoyPj5M6CwsLUbGGbtdyu91kCb2+vs58Ph8eRQbbi0Qt2KAQtENkU1MTKpFB21XX1tZQsUbYhvz8+ZMkBmtvb0d175HrgZ2hXQyfqGvXrpEKomV1dXV4BdYxHRoOHDigW1kk7ePHj1i7dUwbAhQUFOhWGClzguVSb9684UVFRRH/C0EdTiDfEP9ndOeR/xFbDRGLuMAbEdjjDw4OomKfzs5O5a0K5IJPA2LeQsUhSgczYWxsLKQv+628vByjrHPo0CHdXGB2J0I/pg1ZXFzUrVC2yspKjDYnPT1dN4dsTjAttW/fPt3KtKZ946LHw4cPdctqLdzHHCMMGzI6OkoqgE8Lfl69ekW0nJwcVMIjx4NNTEwo5+FTQ0JCAtE2NzcVzSqGDWlpaSHJR0ZGUFGBj5CybsTGxgaJ1X7raGtrI7rdz+SGo5b//0/8xMbGoqeiPTYC3sjImOXS1m0KNkiXd+/ekbuUn5+PCuder5doHo8HlfDI8WALCwuohK7pVldXUbGG6cMuJzeyly9fYonwNDc365bVWl5eHpawjmlDtJ+O9ay4uBijzdE+V3rmBEulPn/+rFshGKyM7ZKSkqKbC2xpaQmj7GGr+Xfv3lW2pC6Xi58+fZp877MLvKk5ceIEF0sUfvDgQd7Y2IiKEzj/C47XG3d06OZRAAAAAElFTkSuQmCC # def computeCost(AL, Y): m = Y.shape[1] return np.squeeze( (1. / m) * (-np.dot(Y, np.log(AL).T) - np.dot(1 - Y, np.log(1 - AL).T))) # <span style = "font-family: Comic Sans MS, cursive, sans-serif; # font-size: 1.8em; # color: black; # background-color: white"> # Backward Propagation... # </span> # # ![backward][image_ref_o9kyay67] # # [image_ref_o9kyay67]: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAABGdBTUEAALGPC/xhBQAAAAFzUkdCAK7OHOkAAAAgY0hSTQAAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAABdwnLpRPAAAAAlwSFlzAAAOxAAADsQBlSsOGwAAAAZiS0dEAP8A/wD/oL2nkwAAACV0RVh0ZGF0ZTpjcmVhdGUAMjAxNy0wNi0xMlQwMzozMTozMiswODowMJU022gAAAAldEVYdGRhdGU6bW9kaWZ5ADIwMTYtMDQtMjNUMDA6NDA6MTgrMDg6MDDnYYGwAAAAVHRFWHRzdmc6YmFzZS11cmkAZmlsZTovLy9ob21lL2RiL3N2Z19pbmZvL3N2Zy81NS84Ny81NTg3Y2NjNjMzMGE2YjE3YTY2ZjNjOTcyZWYyZmQ5Zi5zdmf9LWc4AAAE7ElEQVRoQ9WaXSg8XxjHj5e1CGELKYs7Wldec6NEES7kBkVK3EnhakNKckchN5aSlBRXyAUXoiR5KS8bJRcUUV6KvDP/c2aeMXNmZ9nZOeO3/089v55nzpnnnO/OzHnzQxxDdnd3ufT0dC44OJgLDAzkYmNjub6+Pig1FmZCBgcHOYSQqlmtVqhlHEyEkF9dTYDcKisrobYx6BYyMjKi2nE1u729hbvYo0vI7OysaocdDgd3f3/Pv1Ly6y0tLXAne7wWsrq6SnVSNCJCZGZmhirLz8+HEvb44wY043Q6UV5eHkQS+FtBDQ0NECFkNpvBE3h+fgaPPZqFXF1dIZvNBpGE3W5Hra2tEAkohby9vYHHHk1CXl9fUVxcHEQS9fX1qLe3FyKJoKAg8AQ+Pj7AY48mIXiiA0+itLQUjY2NQUSDJ0XwBD4/P8Fjj8dC/Pz8wJPIyspC8/PzELmiFPL19QUeezwSEhERAZ5EcnIy2tzchEgdpRA8uIDHnl+FJCYmooeHB4gEIiMj0enpKUTu8RkhmZmZ6OzsDCIB8ord3d1B9DMBAQHgCfwTIWVlZWh7exsiCS3v+T8X0tjYiBYWFiCS0DoPmEwm8AT+dNTq6elBo6OjEEmQiVDZsd/4y3mEWmtNTEzwayKlHR4eQg1tXF9fU3nwZAol7PkWsra2RjUq2tLSEtTQDn4CVK7w8HAoYQ8v5OLigmpQNDxj85X0oMxpFHxmZWPEOjo6+Ap6watkKm9FRQWUsMWPbFPb2tpwGxJVVVVoamoKIn0cHR2h1NRUiATS0tIQ/qHQ+/s7P2kqjQzbok8GDLKKxq8lSkhIcBlAvsnNzaV+MTwJgkZ24IUl1YZe6+/vh8wSZLnxXSEqKgous4eMWPLO6LXQ0FDILOCPwdcFjNzBXV5eosLCQoj08/T0RK/Is7OzKaX4/QWNxnBwcMAVFxdzFouFatdbKyoq4vOioaEhl8Ly8nK+0BfByyeX/j4+PgrDb3R0tEshHsn4G32R6upqqq92u12a2eUFog0PD0Opb4G3EVQ/yTHTtxByoCYvFG1ubg5q+A54K0H10WazSUIITqeTqiDazs4O1PCOmpoabnp6GiL9KIUkJSXRQghkkSivJBreKUINbchzDAwMwFX9yPPGx8e7CiGMj49TFUUjo4MWTk5OqPsLCgqgRD/yvDExMepHpnV1dai7uxsiibCwMPA8Q3loQQ74jIBsv93u2Ts7O6lzXBG18y13KLe2ZJFoBOQBuRVCcDgcqKSkBCKJkJAQ8LRh1J79xyciQg4hMjIyIBJ4eXlBsbGxELlHeJUljBLy6xMR2dra4vcCcvB+HKWkpEDkGVqOkrTgsRACOahTfuzHx8dkPwORK8onooxZoUkIQTkKETY2NlS/I8JfCfHoG1Gi1pnFxUVUW1sLkYSyrvLkkRWan4gI2dQomZycRM3NzRAJKA/kyB7cCLx6IgQy/JIdnxK8t0Ht7e0QuX7ceHsKHlv4J4//8Rp3i8yuri7u5uZGWJXKrjc1NcGd+pHnNZvN6mstLayvr1NJf7Lz83O4Sz/yvCaTSb8QwvLyMpVYzVj/jV2eG397bIQQVlZWqORyw/MP1GKHPL+/vz87IQRyhpyTk/PdAF5gGvbfNuRC+Lbg4v8O+SrcarV6N/z6Avv7+/wfZS0WC9rb20P/AX8B9QRNcdMPAAAAAElFTkSuQmCC # # <a id='backward'></a> def backwardPass(AL, Y, caches): grads = {} L = len(caches) Y = Y.reshape(AL.shape) # Initializing the backpropagation dAL = -(np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) linearCache, activationCache = caches[L - 1] AP, W, b = linearCache m = AP.shape[1] ############################################################## dZ = activate('Sigmoid', True, activationCache, dAL).compute() grads["dA" + str(L - 1)] = np.dot(W.T, dZ) grads["dW" + str(L)] = 1. / m * np.dot(dZ, AP.T) grads["db" + str(L)] = 1. / m * np.sum(dZ, axis=1, keepdims=True) for l in reversed(range(L - 1)): linearCache, activationCache = caches[l] AP, W, b = linearCache m = AP.shape[1] ########################################################## dZ = activate('ReLU', True, activationCache, grads["dA" + str(l + 1)]).compute() grads["dA" + str(l)] = np.dot(W.T, dZ) grads["dW" + str(l + 1)] = 1. / m * np.dot(dZ, AP.T) grads["db" + str(l + 1)] = 1. / m * np.sum(dZ, axis=1, keepdims=True) return grads # <span style = "font-family: Comic Sans MS, cursive, sans-serif; # font-size: 1.8em; # color: black; # background-color: white"> # Weights Updating... # </span> # # ![Update][image_ref_95hj81z6] # # [image_ref_95hj81z6]: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAABGdBTUEAALGPC/xhBQAAAAFzUkdCAK7OHOkAAAAgY0hSTQAAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAABdwnLpRPAAAAAlwSFlzAAAOxAAADsQBlSsOGwAAAAZiS0dEAP8A/wD/oL2nkwAAACV0RVh0ZGF0ZTpjcmVhdGUAMjAxNy0wNi0xMlQwMzozNDo0MyswODowMN+vEoEAAAAldEVYdGRhdGU6bW9kaWZ5ADIwMTYtMDQtMjNUMDA6NDA6MTgrMDg6MDDnYYGwAAAAVHRFWHRzdmc6YmFzZS11cmkAZmlsZTovLy9ob21lL2RiL3N2Z19pbmZvL3N2Zy9hMy9mNy9hM2Y3M2FlODExYmYwMTg1ZDBiMWQ4ZmMwNzY4ZTExOC5zdme1U+zXAAAFH0lEQVRoQ9WZx0ssTRDA28UcMCdEUQ/mcNKToJgOHhQFFcGbFw/+AWK4CIrhJCh4FFRMqKAnURADevCkIKKIijmgYsbYr6u3/HZ7p3d3fJ8z8n5QbPV09U7VTHdP1Qyh/yjj4+PU1dWVRkRE0Ovra+oAB8k/iIODA2qExMfHEwPqmtPa2spPCA6ABAQEkKqqKnJ6eooW6rm7u0PNyOPjI9F8anV1dcEdtyk5OTlorY6joyNhfGRkJNU0kIqKCuGEtsTR0RFH2Wd7e1sYGx0dTTWbWs3NzWRwcBBb9nl/fydBQUHYso3l1PLw8GDhaMDNzY1wxUCSkpLowcEBWlD68vJC6+vrFXYtLS1oYZ2+vj5hTFFRkTZTq7i4WDhRamoq9igZGBgQbEFsMT09rbAfGhqi5Pj4mPr7+1ODwaAw+ClhOxO6IcfFxUU6Tq0AJDExUdr5k2KP/Px86Tg1Ag9DwPDx8cHav8vn5ydq6vH29ibPz8/E19fXeGBvb48/6pmqmVxcXPCrZg1nZ2fpOEsJDg7mC3t+fh5HmtBksefl5QkOpKenY4+SyclJwRbkb9AkENn2m5aWxo9/waY07ejoUNg1NDSgxffQJBCgsbFR4aQ98fLywtHfR7NAgJKSEqnD1uT/QGD7YjsA/6OpqSk8/HO0tbUpHLYUW2tILSQhIUH4U62AuQ9F0Nd54CFYVlbGE8CfgMC8NA/k/Pwcu6yztbVFKysreUYAYyBzzcjI4DnQb0ECAwOFQPb397FLTkpKimAvk6WlJbTWD2J+u0E2NzexS+Tt7U2wsyc9PT04Uh8MUHKa8/T0hJqIk5MTauqorq4mCwsL2NIeQ2xsLKpGNjY2UDNRWlqKmon29nYCeRq7GPx3eXnZlPcgmZmZqOkAq+IU04JdSeP9Qiz7d3Z2sEeJu7u7YAvbrx7w/db8xPYEFrstRkdHpeO0krCwMJ6U8kAuLy+lRjKpqanhDlsDtm/ZOC0lJibG+PIBFvz9/T1h0wKaNmEpP2py2DMFNf2ADeq/tyienp78RReriUlhYSFvy1hbW0NNjr1+LYCNxm5Ocnt7q7iVUOdbIygoSLCtq6vDHm1RlVzl5uYKzoEMDw9jrxF4+xceHq6w0wvVZ7J0UI2MjIzgaO1RHcjV1ZXUWWtSW1uLI/XhW/ce3g76+PhIHTeX/v5+HKEdX7W+n58fZbvW9wL5YmZmhmZnZwvOR0VF0ebmZrTQHvNzx8XF/V0gPwlcFLbdU5ancafYM42Wl5dTto2jhZK7uzshEDc3t98LBOoellELDlkKvAWVAdu/uR1cBN2+WJkzNzdHIiMjCatx8IgcyMTh69bDwwMeMWJZakBGonsgh4eHhK0vbKmDleOoGbH8PgLlg+6BsIoUNRNTU1P87rBZw3O+7u5u7DFhXttsb2+jZgTurq5rZGJiQpjbINaATxGWtru7u5RNN8qmm3Actnvy+vpKQ0JChA69pLOzE92Wo/YFH2BITk4mZ2dnrK0/GRkZqMlRUyqfnJzwX4PlwtETg8H2EoUdyxrQxwpCEhoaytuGpaUlrvwG6+vrqMlZXV1FzURWVhZhmTf/OCS8AeITTEPYCXmOBmuxqalJmNvwQLSFuS2IrZceuu5agKVzkPSxeY69RljFp7CDutwWugcyOzurcFKNsDIc/0GO7oEAvb29UmetyeLiIo60zq8EAqysrEidthRb68KcXwvkC3gowocenoozx2HNFBQU0LGxMbRQA6V/AEiJ+CBZEM8AAAAAAElFTkSuQmCC # def update_parameters(parameters, grads, learning_rate): L = len(parameters) // 2 for l in range(L): parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - \ learning_rate * grads["dW" + str(l+1)] parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - \ learning_rate * grads["db" + str(l+1)] return parameters # <span style = "font-family: Comic Sans MS, cursive, sans-serif; # font-size: 1.8em; # color: black; # background-color: white"> # Prediction... # </span> # # ![Prediction][image_ref_1x45se6c] # # [image_ref_1x45se6c]: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAABGdBTUEAALGPC/xhBQAAAAFzUkdCAK7OHOkAAAAgY0hSTQAAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAABdwnLpRPAAAAAlwSFlzAAAOxAAADsQBlSsOGwAAAAZiS0dEAP8A/wD/oL2nkwAAACV0RVh0ZGF0ZTpjcmVhdGUAMjAxNy0wNi0xMlQwMzozMjoyMyswODowMBTea0EAAAAldEVYdGRhdGU6bW9kaWZ5ADIwMTYtMDQtMjNUMDA6NDA6MTgrMDg6MDDnYYGwAAAAVHRFWHRzdmc6YmFzZS11cmkAZmlsZTovLy9ob21lL2RiL3N2Z19pbmZvL3N2Zy9hOC8wOS9hODA5YWU1ODc2MDFhNDViZDYxMWVmNzBjZWEzOGUxYi5zdmc52mzyAAAFXElEQVRoQ9WaXUhUQRSAx9RVCxU1jSRSW1ZQM5QNUgwl6CFR9EFLX0RR0AfRfFBQH3woeonopcA3QVIJRMSCSAoUwR/8BTUfUuyl0EI0NS0tnebMnnXv3nt213/vfnBqfs7MnDN3ZvbOuXpwATsGZmdnWX9/P5uYmGCfPn2S+R8/frCtrS0GQ3h4eDCDwcDOnz/PLl26xIxGI7t+/Tq7desWu3v3LvP398eeDgk4clhevnzJExMTYSKORUpKSriYAOz9YBzYkeHhYZ6UlEQaclwSHBzMu7q6cMT9se+lNTY2xu7du8eWlpawxDG+vr7MZDIxYRDz8/OTy2p7e5utrq6yb9++sYWFBdR0zoULF5iYOBYbG4slTpDuuECsY82sKSU8PJw3NDRwsT+whWtWVlZ4W1sbf/DgAdmnUu7fv4+tHOPUkd7eXrJjq5SXl/Nfv36h9tGASUhLSyPHscrGxgZqa3HoSH19PdkZSFlZGWrZ8/nzZ/748WOemprKvb29ybYgly9flrMMT0TN8vIyv3HjBtkOZHJyEjXtIR0pKCggOwERRypqWfj79y+vrKwkdfcr7e3t2JuN7u5uUhdE7FfUsqFxpK6ujmwcExODGjays7NJ3cOII65cuULqw5NTYtfDyMgI2Sg+Ph41LHR0dJB6SoE2+fn58mlVVVXxwsJCnpKSQuq62syODhsldjlKWRylWGvB2TK6c+eO/J1xBWzapqYmedK9f/8eS50Dvy3q8YqLi7FW4cjTp081iiDKk6K6uprUEee9Zu+cBNTY1lNzzxFKqaamBms5HxoaInXi4uJQ4+R5/vy5ZvysrCxZJx0ZHR3VKIAoMZvNmvrAwECsPT3UNoAA50SCtba2wn925OXlYcpCbm4upmz8/PkTU6eH2FeYsvHmzRuLO9Rsv337VnqqpK+vj1dUVPBnz55hyekzMzOjsRV+oOVLY0BAAFtfXxdlNr5//87CwsIwpy/gJVTJzZs3mXREXQGIYkzpD7W9oaGhTO4RLy8vWaBkd3cXU/rn9+/fFkeuXr0qC5R8/foVU/oHrsnSEVhjauD+rUd6enowZePatWsWRzIyMmSBkubmZkzpC8qu5ORky2aHjLtseMrOgYEBaazk9u3be+eyVcSPINbqA7i0qW0EAfYcmZubI5WoS8xZQdn36NEjS538F3F0ZxZHMWqcHSEhIaRtVuwcAShlkLPEURwNXnataCwcHx8nG4Hs7Oyg1umRkJBA2gJXciXkVL969YpsDHJaLC0tcU9PT9IG6hByaFlLSwvZSWRkJGqcHA8fPiTHBnEUinI6xR8/fiQ7+/DhA2ocH3ClhiAFNZ5VXr9+jdpaXK6V2tpaTYfp6elYezSmpqbk8eko5GOViIgIvrW1ha1oXAax//z5IwPRSqKiotj8/DzmaJ48eSID0PB9RBwS7N+/f0zMugyCf/nyBbVcIy54LDMzE3NOkO44YXp6WjNDJpMJa2kgJqxucxARb+MH/qzg0hGIMKoHysnJwVoao9GoaeNKgoKC5DJWRxD3i1NH3r17Rw4KrzPO2Nzc5BcvXiTbghgMBi6uDvL+39nZia2OhlNHKCMOs9HFPpPB7pPEoSOlpaWkI3qFtAzCn5QTL168QA39QR6/4k2TiU2HOQsQMoJvgHpFXnWViFnXOAHAt3NdI5+LAihSC3zB0jt2SwuiKfAZWo1CRbfsLa3u7m7SCSh3B5xGUcxmMxO3MMzpG/lEioqKZEaNuzghWVtb29vUSmlsbISH5TbAK7LGCfiTDHfDw8fHBy4twn4bkIe/rXInzi0uLrL4+HiZiY6OZoODg27nBGOM/QczS1SCFRuZBwAAAABJRU5ErkJggg== # def predict(X, y, parameters): m = X.shape[1] n = len(parameters) // 2 p = np.zeros((1, m)) probas, caches = forwardPass(X, parameters) for i in range(0, probas.shape[1]): p[0, i] = 1 if probas[0, i] > 0.5 else 0 print("Accuracy: " + str(np.sum((p == y) / m))) return p # <span style = "font-family: Comic Sans MS, cursive, sans-serif; # font-size: 1.8em; # color: black; # background-color: white"> # Modelization... # </span> # # ![Modelization][image_ref_hdzkittd] # # [image_ref_hdzkittd]: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAABGdBTUEAALGPC/xhBQAAAAFzUkdCAK7OHOkAAAAgY0hSTQAAeiYAAICEAAD6AAAAgOgAAHUwAADqYAAAOpgAAB<KEY> # # <a id='model'></a> def L_layer_model(X, Y, layersDims, learning_rate, num_iterations, print_cost=False): np.random.seed(1) costs = [] parameters = init(layersDims).compute() # Loop (gradient descent) for i in range(0, num_iterations): AL, caches = forwardPass(X, parameters) cost = computeCost(AL, Y) grads = backwardPass(AL, Y, caches) parameters = update_parameters(parameters, grads, learning_rate) # Print the cost every 100 training examples and plot the cost if print_cost and i % 100 == 0: print("Cost after iteration %i: %f" % (i, cost)) if print_cost and i % 100 == 0: costs.append(cost) plt.plot(np.squeeze(costs)) plt.ylabel('cost') plt.xlabel('iterations (per hundreds)') plt.title("Learning rate =" + str(learning_rate)) plt.show() return parameters parameters = L_layer_model(trainX, trainL, learning_rate=0.0075, layersDims=[12288, 20, 7, 5, 1], num_iterations=2500, print_cost=True) pred_train = predict(trainX, trainL, parameters) pred_test = predict(testX, testL, parameters)
dnn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Generating code for a model: # + import pytorch_composer from pytorch_composer.datasets import CIFAR10 from pytorch_composer.loops import Loop # A random sequence of neural network layers. Any positive integer shoud be a valid dimension arguement: sequence = [ ["Conv2d", 6], ["MaxPool2d", 2], ["Linear", 16], ["Relu"], ["MaxPool2d", 2], ["Linear",43], ["RNN",12], ["MaxPool2d", 2], ["Relu"], ["Flat"], ["Linear",38], ] dataset = pytorch_composer.datasets.CIFAR10() model = pytorch_composer.Model(sequence, dataset) loop = Loop(model) training_code = pytorch_composer.Code([dataset,model,loop]) # The code can be saved in a text file with: # training_code.save() training_code # - # Using the generated code: training_code() # The settings can be adjusted before or after the code is created. # Reviewing the settings: training_code.settings # + # Changing a single setting: training_code["batch_size"] = 16 # Changing multiple settings at once: training_code.update({"lr":0.0009, "print_every":3000, 'model_name': 'Net2'}) training_code # - # Using the new model: training_code()
example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Errors and Exception Handling # # In this lecture we will learn about Errors and Exception Handling in Python. You've definitely already encountered errors by this point in the course. For example: print('Hello) # Note how we get a SyntaxError, with the further description that it was an EOL (End of Line Error) while scanning the string literal. This is specific enough for us to see that we forgot a single quote at the end of the line. Understanding these various error types will help you debug your code much faster. # # This type of error and description is known as an Exception. Even if a statement or expression is syntactically correct, it may cause an error when an attempt is made to execute it. Errors detected during execution are called exceptions and are not unconditionally fatal. # # You can check out the full list of built-in exceptions [here](https://docs.python.org/3/library/exceptions.html). Now let's learn how to handle errors and exceptions in our own code. # ## try and except # # The basic terminology and syntax used to handle errors in Python are the <code>try</code> and <code>except</code> statements. The code which can cause an exception to occur is put in the <code>try</code> block and the handling of the exception is then implemented in the <code>except</code> block of code. The syntax follows: # # try: # You do your operations here... # ... # except ExceptionI: # If there is ExceptionI, then execute this block. # except ExceptionII: # If there is ExceptionII, then execute this block. # ... # else: # If there is no exception then execute this block. # # We can also just check for any exception with just using <code>except:</code> To get a better understanding of all this let's check out an example: We will look at some code that opens and writes a file: try: f = open('testfile','w') f.write('Test write this') except IOError: # This will only check for an IOError exception and then execute this print statement print("Error: Could not find file or read data") else: print("Content written successfully") f.close() # Now let's see what would happen if we did not have write permission (opening only with 'r'): try: f = open('testfile','r') f.write('Test write this') except IOError: # This will only check for an IOError exception and then execute this print statement print("Error: Could not find file or read data") else: print("Content written successfully") f.close() # Great! Notice how we only printed a statement! The code still ran and we were able to continue doing actions and running code blocks. This is extremely useful when you have to account for possible input errors in your code. You can be prepared for the error and keep running code, instead of your code just breaking as we saw above. # # We could have also just said <code>except:</code> if we weren't sure what exception would occur. For example: try: f = open('testfile','r') f.write('Test write this') except: # This will check for any exception and then execute this print statement print("Error: Could not find file or read data") else: print("Content written successfully") f.close() # Great! Now we don't actually need to memorize that list of exception types! Now what if we kept wanting to run code after the exception occurred? This is where <code>finally</code> comes in. # ## finally # The <code>finally:</code> block of code will always be run regardless if there was an exception in the <code>try</code> code block. The syntax is: # # try: # Code block here # ... # Due to any exception, this code may be skipped! # finally: # This code block would always be executed. # # For example: try: f = open("testfile", "w") f.write("Test write statement") f.close() finally: print("Always execute finally code blocks") # We can use this in conjunction with <code>except</code>. Let's see a new example that will take into account a user providing the wrong input: def askint(): try: val = int(input("Please enter an integer: ")) except: print("Looks like you did not enter an integer!") finally: print("Finally, I executed!") print(val) askint() askint() # Notice how we got an error when trying to print val (because it was never properly assigned). Let's remedy this by asking the user and checking to make sure the input type is an integer: def askint(): try: val = int(input("Please enter an integer: ")) except: print("Looks like you did not enter an integer!") val = int(input("Try again-Please enter an integer: ")) finally: print("Finally, I executed!") print(val) askint() # Hmmm...that only did one check. How can we continually keep checking? We can use a while loop! def askint(): while True: try: val = int(input("Please enter an integer: ")) except: print("Looks like you did not enter an integer!") continue else: print("Yep that's an integer!") break finally: print("Finally, I executed!") print(val) askint() # So why did our function print "Finally, I executed!" after each trial, yet it never printed `val` itself? This is because with a try/except/finally clause, any <code>continue</code> or <code>break</code> statements are reserved until *after* the try clause is completed. This means that even though a successful input of **3** brought us to the <code>else:</code> block, and a <code>break</code> statement was thrown, the try clause continued through to <code>finally:</code> before breaking out of the while loop. And since <code>print(val)</code> was outside the try clause, the <code>break</code> statement prevented it from running. # # Let's make one final adjustment: def askint(): while True: try: val = int(input("Please enter an integer: ")) except: print("Looks like you did not enter an integer!") continue else: print("Yep that's an integer!") print(val) break finally: print("Finally, I executed!") askint() # **Great! Now you know how to handle errors and exceptions in Python with the try, except, else, and finally notation!**
07-Errors and Exception Handling/01-Errors and Exceptions Handling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="dqv6jboZNLIH" colab_type="text" # ## installing projects # # needed only once # # + id="gsxNKBICNGsE" colab_type="code" colab={} # #!git clone https://github.com/JelleAalbers/flamedisx.git ## Install flamedisx # #%cd flamedisx # #!git checkout master # #!git pull origin master # #!python setup.py develop # #%cd .. # + id="1iCYt57Y8XqG" colab_type="code" colab={} # #!pip install -U tensorflow-gpu==2.0.0-rc0 # #!pip install -U tensorflow_probability==0.8.0-rc0 # + id="LUjmNofs8awp" colab_type="code" colab={} import numpy as np import matplotlib import matplotlib.pyplot as plt import pandas as pd import flamedisx as fd import tensorflow as tf # + [markdown] id="pgf_n1SQ9moD" colab_type="text" # # + id="LxLj8DiY9zMl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="b318012f-f5c4-42d5-a0f8-cf8a9404fb4a" from google.colab import drive drive.mount('/content/drive') # + [markdown] id="h0ksR80QNrDl" colab_type="text" # ## Loading data # # csv file was previously prepared by straxen # + id="BNQNk6YdCY0m" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 73} outputId="8a7cad23-8942-4b9f-f090-e6cd807f056d" df = pd.DataFrame.from_csv("./data_ar37.csv") # + [markdown] id="7ZLVy0B4Nza3" colab_type="text" # #### WARNING!!! VERY HACKY, don't do this in real life! # ### changing some of the variables, whcih are not available # # + id="k62RGQlVDdwC" colab_type="code" colab={} df['s1'] = df['s1_area'] df['s2'] = df['s2_area'] df['x_observed'] = df['x_s2'] df['y_observed'] = df['y_s2'] df = df[(df['z'] < -20.0) & (df['z'] > -90.0) & (df['r'] < 40.0)] df = df[(df['cs1'] < 5e2 ) & (df['cs1'] > 2e2)] df = df[(df['cs2'] > 1e4 ) & (df['cs2'] < 5e4)] # + [markdown] id="_Ay2t8m6OB_U" colab_type="text" # ## Writing my Krypton 83 source generator # # + id="rklsxqPDP6yq" colab_type="code" colab={} o = tf.newaxis class Kr83Source(fd.x1t_sr1.SR1ERSource): @staticmethod def energy_spectrum(drift_time): """Return (energies in keV, rate at these energies), both (n_events, n_energies) tensors. """ # TODO: doesn't depend on drift_time... n_evts = drift_time.shape[0] return (fd.repeat(tf.cast(tf.linspace(42.45, 42.55, 2)[o, :], dtype=fd.float_type()), n_evts, axis=0), fd.repeat(tf.ones(2, dtype=fd.float_type())[o, :], n_evts, axis=0)) @staticmethod def s1_acceptance(s1): return tf.where((s1 < 0) | (s1 > 600), tf.zeros_like(s1, dtype=fd.float_type()), tf.ones_like(s1, dtype=fd.float_type())) @staticmethod def s2_acceptance(s2): return tf.where((s2 < 3000) | (s2 > 30000), tf.zeros_like(s2, dtype=fd.float_type()), tf.ones_like(s2, dtype=fd.float_type())) @staticmethod def photon_detection_eff(s1_relative_ly, g1 =0.1426): #g1 = 0.142 from paper mean_eff= g1 / (1. + 0.219) return mean_eff * s1_relative_ly @staticmethod def electron_detection_eff(drift_time, *, elife=700e3, extraction_eff=0.96): #TODO: include function for elife time dependency return extraction_eff * tf.exp(-drift_time / elife) # + [markdown] id="8eYJQX_iONmV" colab_type="text" # ### creating my new source # + id="GXNkVhA1BiaY" colab_type="code" colab={} source = Kr83Source( data = df) # + [markdown] id="FNmtYEhLOQKM" colab_type="text" # ### Producing simulations # + id="n2lqK1LTP67F" colab_type="code" colab={} dfsim = source.simulate(10000,data = df, **dict(g2=11.55/(1 - 0.64)*0.96 ) ) # + [markdown] id="sctstXdOOTqI" colab_type="text" # ### Let's plot drift time vs s2 and check the electron livetime # + id="ksi-SDgpGAUD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 578} outputId="d284254a-fe1c-4404-b9c8-26e5b9fff5d9" plt.figure(figsize=(12,9)) plt.scatter( df['drift_time'], df['s2'], s= 3, c="b", label = "Data") plt.scatter( dfsim['drift_time'], dfsim['s2'], s= 3, c= "orange", label = "flamedisx simulation") plt.xlim(1e5, 7e5);plt.ylim(0, 30000); plt.legend(fontsize=16) plt.xlabel("Drift time [ ns ] ", fontsize = 16) plt.ylabel("s2", fontsize = 16) # + id="-MmsFCSnJ2To" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 524} outputId="ec59ace3-34a4-4401-c240-3b4b46b1ccfc" import matplotlib.pyplot as plt plt.rcParams['xtick.direction'] = "in" plt.rcParams['ytick.direction'] = "in" fig = plt.figure(figsize=(12,8), facecolor="w") ax = fig.add_subplot(111) plt.scatter(df['s1'], df['s2'], s=1, c = df['z'], vmin = -100, vmax = 0) plt.xlim(1e2,1e3) plt.ylim(3e3,5e4) plt.xscale("log") plt.yscale("log") plt.colorbar() plt.xlabel("s1", fontsize = 16) plt.ylabel("s2", fontsize = 16) plt.text(0.78, 0.93, "Data (SR2)", transform = ax.transAxes , fontsize = 20) # + id="iE1Wn0WmvUIx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 524} outputId="04dfdfa0-3910-4c03-9853-e2f21d94e9ed" import matplotlib.pyplot as plt plt.rcParams['xtick.direction'] = "in" plt.rcParams['ytick.direction'] = "in" plt.figure(figsize=(12,8), facecolor="w") plt.scatter(dfsim['s1'], dfsim['s2'], s=1, c = dfsim['z'], vmin = -100, vmax = 0) plt.xlim(1e2,1e3) plt.ylim(3e3,5e4) plt.xscale("log") plt.yscale("log") plt.colorbar() plt.xlabel("s1", fontsize = 16) plt.ylabel("s2", fontsize = 16) plt.text(0.78, 0.93, "flamedisx sim", transform = ax.transAxes , fontsize = 20) # + id="l-k2zz9eOj0D" colab_type="code" colab={}
projects/flamedisx_Terliuk_Ar37sim_sep2019.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="t2WiNacIt8vi" outputId="2bf773d4-c625-4656-d257-c9f7fde97bb2" # !nvidia-smi # - # !pip freeze | grep keras # !pip freeze | grep Keras # !pip freeze | grep tensorflow # !pip freeze | grep h5py # !pip freeze | grep opencv # !pip freeze | grep pandas # + colab={"base_uri": "https://localhost:8080/"} id="4iTl-z4Pt8pv" outputId="138a1325-6a88-4adf-c0c0-32c4f8047fc3" # # !pip install q tensorflow==2.1 # # !pip install q keras==2.3.1 # # !pip install git+https://github.com/qubvel/segmentation_models # for str decode error ... run it and restart runtime # # !pip uninstall h5py -y # !pip install h5py==2.10.0 # + id="6Kj98mf1y7bt" # # !tensorboard --logdir logs --host 0.0.0.0 # # !pip install -U tensorboard-plugin-profile # - # i = !ls Dataset/IDD/images/ # l = !ls Dataset/IDD/masks/ len(i), len(l) l[-5:], i[-5:] # + id="GntQ2vuNt8gL" import cv2 from glob import glob import os import numpy as np # 100 = background # 101 = road # 102 = obstacle(person, rider, car, truck, bus, train, motorcycle, bicycle) l = glob('Dataset/IDD/masks/*') m = cv2.imread(l[2970],0) m.shape np.unique(m) # + colab={"base_uri": "https://localhost:8080/"} id="JLjUhiwat8Wu" outputId="8d64a866-f250-4843-961f-1eb71f81fe43" import os len(os.listdir('Dataset/IDD/Test/images/')), len(os.listdir('Dataset/IDD/masks/')) # + import numpy as np import cv2 import os from glob import glob from sklearn.model_selection import train_test_split import tensorflow as tf def load_dataset(dataset_path): images = sorted(glob(os.path.join(dataset_path, "images/*"))) masks = sorted(glob(os.path.join(dataset_path, "masks/*"))) train_x, test_x, train_y, test_y = train_test_split(images,masks, test_size=0.1702, random_state=168, shuffle=True) return train_x, train_y, test_x, test_y train_x, train_y, val_x, val_y = load_dataset('Dataset/IDD') print(len(train_x), len(train_y), len(val_x), len(val_y)) # - # ### TF data API train_X_y_paths = list(zip(train_x, train_y)) val_X_y_paths = list(zip(val_x, val_y)) # + IMG_SIZE = 512 def parse_x_y(img_path,mask_path): image = tf.io.read_file(img_path) image = tf.image.decode_png(image, channels=3) image = tf.image.convert_image_dtype(image, tf.uint8) mask = tf.io.read_file(mask_path) mask = tf.image.decode_png(mask, channels=1) return {'image': image, 'segmentation_mask': mask} @tf.function def normalize(input_image: tf.Tensor, input_mask: tf.Tensor) -> tuple: input_image = tf.cast(input_image, tf.float32) / 255.0 return input_image, input_mask @tf.function def load_image_train(datapoint: dict) -> tuple: input_image = tf.image.resize(datapoint['image'], (IMG_SIZE, IMG_SIZE)) input_mask = tf.image.resize(datapoint['segmentation_mask'], (IMG_SIZE, IMG_SIZE),method='nearest') # if tf.random.uniform(()) > 0.5: # input_image = tf.image.flip_left_right(input_image) # input_mask = tf.image.flip_left_right(input_mask) input_image, input_mask = normalize(input_image, input_mask) input_mask = tf.one_hot(input_mask, 3) input_mask = tf.reshape(input_mask, (IMG_SIZE, IMG_SIZE, 3)) return input_image, input_mask AUTOTUNE = tf.data.experimental.AUTOTUNE SEED = 42 BATCH_SIZE = 8 BUFFER_SIZE = 2*BATCH_SIZE train_dataset = tf.data.Dataset.from_tensor_slices((train_x,train_y)) train_dataset = train_dataset.map(parse_x_y) val_dataset = tf.data.Dataset.from_tensor_slices((val_x,val_y)) val_dataset =val_dataset.map(parse_x_y) dataset = {"train": train_dataset, "val": val_dataset} dataset['train'] = dataset['train'].map( load_image_train, num_parallel_calls=tf.data.experimental.AUTOTUNE ).shuffle(buffer_size=BUFFER_SIZE, seed=SEED).batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE) dataset['val'] = dataset['val'].map( load_image_train, num_parallel_calls=tf.data.experimental.AUTOTUNE ).shuffle(buffer_size=BUFFER_SIZE, seed=SEED).batch(BATCH_SIZE).prefetch(buffer_size=AUTOTUNE) # + for image,label in dataset['train'].take(1): print("Train image: ",image.shape) print("Train label: ",label.shape,"\n\tunique values", np.unique(label[0])) for image,label in dataset['val'].take(1): print("Val image: ",image.shape) print("Val label: ",label.shape,"\n\tunique values", np.unique(label[0])) # + import matplotlib.pyplot as plt def display_sample(display_list): """Show side-by-side an input image, the ground truth and the prediction. """ plt.figure(figsize=(7, 7)) title = ['Input Image', 'True Mask', 'Predicted Mask'] for i in range(len(display_list)): plt.subplot(1, len(display_list), i+1) plt.title(title[i]) plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i])) plt.axis('off') plt.show() # - i=0 for image, mask in dataset['train'].take(5): i=i+1 # print(i) sample_image, sample_mask = image, mask t = np.argmax(sample_mask[0],axis=-1) t = tf.expand_dims(t,axis=-1) display_sample([sample_image[0],t]) # + colab={"base_uri": "https://localhost:8080/"} id="W8lZJDRpwZHk" outputId="03103e3e-f0f3-4006-eba0-1e64d25ffcac" # %env SM_FRAMEWORK=tf.keras # + # # !pip install keras-segmentation # - import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers # + def convolution_block( block_input, num_filters=256, kernel_size=3, dilation_rate=1, padding="same", use_bias=False, ): x = layers.SeparableConv2D( num_filters, kernel_size=kernel_size, dilation_rate=dilation_rate, padding="same", use_bias=use_bias, kernel_initializer=keras.initializers.HeNormal(), )(block_input) x = layers.BatchNormalization()(x) return tf.nn.relu(x) def DilatedSpatialPyramidPooling(dspp_input): dims = dspp_input.shape x = layers.AveragePooling2D(pool_size=(dims[-3], dims[-2]))(dspp_input) x = convolution_block(x, kernel_size=1, use_bias=True) out_pool = layers.UpSampling2D( size=(dims[-3] // x.shape[1], dims[-2] // x.shape[2]), interpolation="bilinear", )(x) out_1 = convolution_block(dspp_input, kernel_size=1, dilation_rate=1) out_6 = convolution_block(dspp_input, kernel_size=3, dilation_rate=6) out_12 = convolution_block(dspp_input, kernel_size=3, dilation_rate=12) out_18 = convolution_block(dspp_input, kernel_size=3, dilation_rate=18) x = layers.Concatenate(axis=-1)([out_pool, out_1, out_6, out_12, out_18]) output = convolution_block(x, kernel_size=1) return output # + from tensorflow.keras.layers import Conv2D, Activation, BatchNormalization from tensorflow.keras.layers import UpSampling2D, Input, Concatenate,SeparableConv2D from tensorflow.keras.models import Model from tensorflow.keras.applications import MobileNetV2 def DeeplabV3Plus(image_size, num_classes): inputs = keras.Input(shape=(image_size, image_size, 3),name="input_image") encoder = MobileNetV2( input_tensor=inputs, weights="pretrained_weights/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_0.35_224_no_top.h5", include_top=False, alpha=0.35) skip_connection_names = ["input_image", "block_3_expand_relu"] encoder_output = encoder.get_layer("block_13_expand_relu").output x = DilatedSpatialPyramidPooling(encoder_output) t = encoder.get_layer("input_image").output f = [16, 32] for i in range(1, len(skip_connection_names)+1, 1): x_skip = encoder.get_layer(skip_connection_names[-i]).output x = UpSampling2D((4, 4),interpolation="bilinear")(x) print(x.shape) x = Concatenate()([x, x_skip]) x = SeparableConv2D(f[-i], (3, 3), padding="same")(x) x = BatchNormalization()(x) x = Activation("relu")(x) x = SeparableConv2D(f[-i], (3, 3), padding="same")(x) x = BatchNormalization()(x) x = Activation("relu")(x) x = Conv2D(num_classes, (1, 1), padding="same")(x) x = Activation("softmax")(x) model = Model(inputs, x) return model model = DeeplabV3Plus(image_size=IMG_SIZE, num_classes=3) model.summary() # + # from tensorflow.keras.applications import MobileNetV2 # encoder = MobileNetV2(input_shape=[512,512,3], weights="pretrained_weights/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_0.35_224_no_top.h5", include_top=False, alpha=0.35) # # encoder.load_weights("pretrained_weights/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_0.35_224_no_top.h5") # + colab={"base_uri": "https://localhost:8080/"} id="xmKNkiHYwKkU" outputId="8a4da256-3ab5-4a52-fd69-ece9c828bbc9" from segmentation_models.losses import cce_jaccard_loss, dice_loss, JaccardLoss from segmentation_models.metrics import iou_score, f1_score, precision, recall ls = dice_loss + cce_jaccard_loss metrics = [precision, recall, f1_score, iou_score] # from tensorflow.keras.models import load_model # model = load_model('IDD_mobilenetV2_edge/ckpt_path/350.h5', # custom_objects={'dice_loss_plus_categorical_crossentropy_plus_jaccard_loss':ls, # 'precision':precision, 'recall':recall, 'f1-score':f1_score, 'iou_score':iou_score}) # + import os, time, keras # %env SM_FRAMEWORK=tf.keras import numpy as np import tensorflow as tf from segmentation_models.losses import cce_jaccard_loss, dice_loss, JaccardLoss from segmentation_models.metrics import iou_score, f1_score, precision, recall from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, CSVLogger, EarlyStopping """ Hyperparamaters """ BATCH_SIZE = 8 epochs = 1000 base_dir = 'RESULTS/IDD_Dv3p_mobilenetV2_alpha0.35_upsampleby4_custom_bs8' if not os.path.exists(base_dir): os.mkdir(base_dir) os.mkdir(f"{base_dir}/ckpt_path") csv_path = f"{base_dir}/history.csv" """ callbacks """ root_logdir = os.path.join(os.curdir, f"{base_dir}/logs","fit","") def get_run_logdir(): run_id = time.strftime("run_%Y_%m_%d-%H_%M_%S") return os.path.join(root_logdir, run_id) run_logdir = get_run_logdir() tensorboard_cb = keras.callbacks.TensorBoard(run_logdir, histogram_freq=1,profile_batch='10,15') checkpoint_filepath = f'{base_dir}/'+'ckpt_path/{epoch}.h5' model_checkpoint_callback = ModelCheckpoint( filepath=checkpoint_filepath, save_weights_only=False, # monitor='val_iou_score', # mode='max', verbose = 1, period = 1, save_best_only=False ) callbacks = [ model_checkpoint_callback, ReduceLROnPlateau(monitor="val_loss", patience=5, factor=0.1, verbose=1), CSVLogger(csv_path), # EarlyStopping(monitor="val_loss", patience=10), tensorboard_cb ] """ steps per epochs """ train_steps = len(train_x)//BATCH_SIZE if len(train_x) % BATCH_SIZE != 0: train_steps += 1 test_steps = len(val_x)//BATCH_SIZE if len(val_x) % BATCH_SIZE != 0: test_steps += 1 print("train_steps", train_steps, "test_steps",test_steps) # """ Model training """ # for layer in model.layers: # if layer.name == "global_average_pooling2d": # break # else: # layer.trainable = False # for layer in model.layers: # print(layer.name,layer.trainable) model.compile( loss=ls, optimizer= "adam", #tf.keras.optimizers.Adam(lr), metrics=metrics ) # model.summary() # - # pretrain model decoder history = model.fit( dataset["train"], validation_data=dataset["val"], epochs=1000, initial_epoch = 0, steps_per_epoch=train_steps, validation_steps=test_steps, callbacks=callbacks )
Scripts/TrainingScripts/IDD_Dv3p_mobilenetV2_upsampledby4_DSC_alpha0.35_bs8.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="cK6Pz_dQBiY7" # # Third Kaggle competition - Visual Question Answering # # To complete this challenge I have chosen to use two different networks to complete two different complementary task at the same time: # - Categorize the question in 7 different classes (numbers, colors, yes or no, weather, actions, left or right, others) # - Perform the VQA task # # To select the final answer is used, as usual, *arg_max* but now between the most probable classes in the category predicted by the first network. # # This method corrects about 1 to 2% of the answers leading to a proportional gain in the final score. # # This method is effective in this challenge due to the reduced number of final classes (and also to the fact, for example, the category weather includes only one answer). # # The VQA network architecture is very similar to the one implemented in [this](http://arxiv.org/pdf/1505.00468.pdf) paper but with some modifications: # # - Increased number of neurons in the fully connected layers # - Xception instead of VGG-16 # - Different embedding size and other hyperparameters # # The training set is splitted evenly between categories making it balanced w.r.t. the validation set. # # During this challenge I used, instead of TensorBoard, Weight and Biases to have statistics about how the model is performing and to perform hyperparameters tuning in a simpler way. Here you can find the entire project with all the runs and sweeps: https://wandb.ai/lrsb/kaggle3 # # + [markdown] id="Vm6lBw8ZhdU0" # # Download dataset # + id="GmvzPMcS0Y26" import json # !pip install --upgrade --force-reinstall --no-deps kaggle # !pip install --upgrade wandb #@markdown Insert here your credentials kaggle_username = ''#@param {type:'string'} kaggle_api_key = ''#@param {type:'string'} wandb_key = ''#@param {type:'string'} # !wandb login {wandb_key} api_token = {'username': kaggle_username, 'key': kaggle_api_key} # !mkdir ~/.kaggle with open('/root/.kaggle/kaggle.json', 'w') as kaggle_json: json.dump(api_token, kaggle_json) # !chmod 600 ~/.kaggle/kaggle.json # !kaggle competitions download -c anndl-2020-vqa # !unzip -q anndl-2020-vqa.zip # + [markdown] id="ul9wwYybApeE" # # Setup # + [markdown] id="RKtqKkKnhg5O" # ### Making results more reproducible and setting params # + id="MrQ-l7ufA_0B" import tensorflow as tf SEED = 1234#@param {type:'number'} tf.random.set_seed(SEED) labels = ['0', '1', '2', '3', '4', '5', 'apple', 'baseball', 'bench', 'bike', 'bird', 'black', 'blanket', 'blue', 'bone', 'book', 'boy', 'brown', 'cat', 'chair', 'couch', 'dog', 'floor', 'food', 'football', 'girl', 'grass', 'gray', 'green', 'left', 'log', 'man', 'monkey bars', 'no', 'nothing', 'orange', 'pie', 'plant', 'playing', 'red', 'right', 'rug', 'sandbox', 'sitting', 'sleeping', 'soccer', 'squirrel', 'standing', 'stool', 'sunny', 'table', 'tree', 'watermelon', 'white', 'wine', 'woman', 'yellow', 'yes'] labels_dict = {} for index, label in enumerate(labels): labels_dict[label] = index categories = { 'numbers': [0, 1, 2, 3, 4, 5], 'colors' : [11, 13, 17, 27, 28, 35, 39, 53, 56], 'yesno': [33, 57], 'weather': [49], 'actions': [43, 44, 47], 'leftright': [29, 40], 'others': [6, 7, 8, 9, 10, 12, 14, 15, 16, 18, 19, 20, 21, 22, 23, 24, 25, 26, 30, 31, 32, 34, 36, 37, 38, 41, 42, 45, 46, 48, 50, 51, 52, 54, 55] } categories_long = { 'numbers': [0, 1, 2, 3, 4, 5], 'colors' : [11, 13, 17, 27, 28, 35, 39, 53, 56], 'yesno': [33, 57], 'weather': [49], 'actions': [38, 43, 44, 47], 'leftright': [29, 40], 'persons': [16, 25, 31, 55], 'objects': [6, 7, 8, 9, 10, 12, 14, 15, 18, 19, 20, 21, 22, 23, 24, 26, 30, 32, 34, 36, 37, 41, 42, 45, 46, 48, 50, 51, 52, 54] } img_w = 299#@param {type:'number'} img_h = 299#@param {type:'number'} # + [markdown] id="wOfFzQxzhm0i" # ### Code for creating datasets # + id="eTNuqLgITkC4" import os, numpy as np, json from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing import image as k_image class VQADataset(tf.keras.utils.Sequence): def __init__(self, validation_split=0, subset='training', only_text=False, preprocessing_function=None): self.subset = subset self.only_text = only_text self.preprocessing_function = preprocessing_function self.train_set = [] self.valid_set = [] self.test_set = [] self.tokenizer = tf.keras.preprocessing.text.Tokenizer() with open('/content/VQA_Dataset/train_questions_annotations.json', 'r') as f: train_questions = json.load(f) count = [0 for i in range(0, len(labels_dict))] self.max_question_len = max(len(q) for q in map(lambda e: e['question'], train_questions.values())) for answer in map(lambda e: e['answer'], train_questions.values()): count[labels_dict[answer]] += 1 valid_count = [0 for i in range(0, len(labels_dict))] for key, value in train_questions.items(): item_index = labels_dict[value['answer']] if valid_count[item_index] < count[item_index] * validation_split: self.valid_set.append(value) valid_count[item_index] += 1 else: self.train_set.append(value) self.tokenizer.fit_on_texts([q for q in map(lambda e: e['question'], train_questions.values())]) with open('/content/VQA_Dataset/test_questions.json', 'r') as f: self.test_set = [[key, value] for key, value in json.load(f).items()] def __len__(self): if self.subset == 'testing': return len(self.test_set) if self.subset == 'training': return len(self.train_set) return len(self.valid_set) def __getitem__(self, index): if self.subset == 'testing': elem = self.test_set[index][1] elif self.subset == 'training': elem = self.train_set[index] else: elem = self.valid_set[index] question = pad_sequences(self.tokenizer.texts_to_sequences([elem['question']]), maxlen=self.max_question_len, padding='post')[0] if self.only_text: if self.subset != 'testing': for key, value in categories.items(): if labels_dict[elem['answer']] in value: return question, tf.keras.utils.to_categorical(list(categories.keys()).index(key), num_classes=len(categories)) return question, [0 for i in range(0, len(categories))] image = k_image.load_img(os.path.join('/content/VQA_Dataset/Images', elem['image_id'] + '.png'), target_size=(img_w, img_h)) image = k_image.img_to_array(image) if self.preprocessing_function is not None: image = self.preprocessing_function(image) if self.subset == 'testing': return (question, image), [0 for i in range(0, len(labels_dict))] return (question, image), tf.keras.utils.to_categorical(labels_dict[elem['answer']], num_classes=len(labels_dict)) def vocabulary_size(self): return len(self.tokenizer.word_index) + 1 def question_id(self, index): return self.test_set[index][0] def GetVQADatasets(validation_split=0, batch_size=32, preprocessing_function=None): train_set = VQADataset(validation_split=validation_split, subset='training', preprocessing_function=preprocessing_function) valid_set = VQADataset(validation_split=validation_split, subset='validation', preprocessing_function=preprocessing_function) test_set = VQADataset(subset='testing', preprocessing_function=preprocessing_function) train_dataset = tf.data.Dataset.from_generator(lambda: train_set, output_types=((tf.int32, tf.float32), tf.float32), output_shapes=(([train_set.max_question_len], [img_w, img_h, 3]), [len(labels_dict)])) train_dataset = train_dataset.batch(batch_size) train_dataset = train_dataset.repeat() valid_dataset = tf.data.Dataset.from_generator(lambda: valid_set, output_types=((tf.int32, tf.float32), tf.float32), output_shapes=(([valid_set.max_question_len], [img_w, img_h, 3]), [len(labels_dict)])) valid_dataset = valid_dataset.batch(batch_size) valid_dataset = valid_dataset.repeat() test_dataset = tf.data.Dataset.from_generator(lambda: test_set, output_types=((tf.int32, tf.float32), tf.float32), output_shapes=(([valid_set.max_question_len], [img_w, img_h, 3]), [len(labels_dict)])) test_dataset = test_dataset.batch(1) test_dataset = test_dataset.repeat() return train_set, train_dataset, valid_set, valid_dataset, test_set, test_dataset def GetQuestionsDatasets(validation_split=0, batch_size=32): train_set = VQADataset(validation_split=validation_split, subset='training', only_text=True) valid_set = VQADataset(validation_split=validation_split, subset='validation', only_text=True) test_set = VQADataset(validation_split=validation_split, subset='testing', only_text=True) train_dataset = tf.data.Dataset.from_generator(lambda: train_set, output_types=(tf.int32, tf.int32), output_shapes=([train_set.max_question_len], [len(categories)])) train_dataset = train_dataset.batch(batch_size) train_dataset = train_dataset.repeat() valid_dataset = tf.data.Dataset.from_generator(lambda: valid_set, output_types=(tf.int32, tf.int32), output_shapes=([valid_set.max_question_len], [len(categories)])) valid_dataset = valid_dataset.batch(batch_size) valid_dataset = valid_dataset.repeat() test_dataset = tf.data.Dataset.from_generator(lambda: test_set, output_types=(tf.int32, tf.int32), output_shapes=([valid_set.max_question_len], [len(categories)])) test_dataset = test_dataset.batch(1) test_dataset = test_dataset.repeat() return train_set, train_dataset, valid_set, valid_dataset, test_set, test_dataset # + [markdown] id="y_oM46Q3hsF9" # ### Code for saving testset results # + id="1ba1WBpQawed" import ntpath, numpy as np def PredictTestset(vqa_model, cat_model=None, filename='/content/results.csv', filename_corrected='/content/results-cat.csv', preprocessing_function=None, upload=False): vqa_datasets = GetVQADatasets(preprocessing_function=preprocess_input) vqa_predictions = vqa_model.predict(vqa_datasets[5], steps=len(vqa_datasets[4]), verbose=1) if cat_model is not None: cat_datasets = GetQuestionsDatasets() cat_predictions = cat_model.predict(cat_datasets[5], steps=len(cat_datasets[4]), verbose=1) results = {} results_corrected = {} corrected = 0 for i in range(0, len(vqa_predictions)): key = vqa_datasets[4].question_id(i) prediction = vqa_predictions[i] results[key] = np.argmax(prediction) if cat_model is not None: question_category = list(categories.keys())[np.argmax(cat_predictions[i])] possible_answer_indexes = categories[question_category] final_answer = possible_answer_indexes[np.argmax([prediction[index] for index in possible_answer_indexes])] if results[key] != final_answer: corrected += 1 results_corrected[key] = final_answer if cat_model is not None: print('Corrected ' + str(corrected) + ' of ' + str(len(results)) + ' (' + str(corrected / len(results) * 100) + '%)') with open(filename_corrected, 'w') as f: f.write('Id,Category\n') for key, value in results_corrected.items(): f.write(key + ',' + str(value) + '\n') with open(filename, 'w') as f: f.write('Id,Category\n') for key, value in results.items(): f.write(key + ',' + str(value) + '\n') if upload: # !kaggle competitions submit -c anndl-2020-vqa -f $filename_corrected -m 'Autoupload' # + [markdown] id="1a2_79I5hGHl" # # Implementation # # + [markdown] id="Aoorz_zfkMvN" # ### Answer category predictor # + id="8yuUblwXjLEH" import wandb from wandb.keras import WandbCallback def GetCatModel(validation_split, batch_size, epochs, embedding_size, dropout, units_1, units_2): datasets = GetQuestionsDatasets(validation_split=validation_split, batch_size=batch_size) model = tf.keras.models.Sequential() model.add(tf.keras.layers.Embedding(datasets[0].vocabulary_size(), embedding_size, input_length=datasets[0].max_question_len, mask_zero=True)) model.add(tf.keras.layers.LSTM(units_1)) model.add(tf.keras.layers.Dropout(dropout)) model.add(tf.keras.layers.Dense(units_1, activation='relu')) model.add(tf.keras.layers.Dropout(dropout)) model.add(tf.keras.layers.Dense(units_2, activation='relu')) model.add(tf.keras.layers.Dense(len(categories), activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(x=datasets[1], epochs=epochs, steps_per_epoch=len(datasets[0]) // batch_size, validation_data=datasets[3], validation_steps=len(datasets[2]) // batch_size, callbacks=[WandbCallback()]) return model #@markdown Set hyperparameters used during training validation_split = 0.1#@param {type:'number'} batch_size = 64#@param {type:'number'} epochs = 4#@param {type:'number'} embedding_size = 300#@param {type:'number'} dropout = 0.5#@param {type:'number'} units_1 = 1024#@param {type:'number'} units_2 = 1024#@param {type:'number'} #@markdown Or use hyperparameter optimization use_hyperparameter_optimization = False#@param {type:'boolean'} # !nvidia-smi if use_hyperparameter_optimization: def RunFitWithHypOpt(): wandb.init() model = GetCatModel(wandb.config.validation_split, wandb.config.batch_size, wandb.config.epochs, wandb.config.embedding_size, wandb.config.dropout, wandb.config.units_1, wandb.config.units_2) wandb.agent(wandb.sweep({ 'method': 'bayes', 'metric': { 'name': 'val_accuracy', 'goal': 'maximize' }, 'early_terminate': { 'type': 'hyperband', 'min_iter': 3 }, 'parameters': { 'validation_split': { 'values': [0.1, 0.2, 0.4] }, 'batch_size': { 'values': [4, 32, 64, 256] }, 'epochs': { 'value': 4 }, 'embedding_size': { 'values': [64, 128, 300] }, 'dropout': { 'values': [0, 0.5] }, 'units_1': { 'values': [512, 1024, 2048] }, 'units_2': { 'values': [256, 1024] } } }, project='kaggle3'), function=RunFitWithHypOpt) else: wandb.init(project='kaggle3', config={ 'validation_split': validation_split, 'batch_size': batch_size, 'epochs': epochs, 'embedding_size': embedding_size, 'dropout': dropout, 'units_1': units_1, 'units_2': units_2 }) cat_model = GetCatModel(wandb.config.validation_split, wandb.config.batch_size, wandb.config.epochs, wandb.config.embedding_size, wandb.config.dropout, wandb.config.units_1, wandb.config.units_2) # + [markdown] id="ioNLrcXnV247" # Best performing model [here](https://wandb.ai/lrsb/kaggle3/runs/muum7bu1) # + [markdown] id="g8u8CCAZkQZE" # ### VQA # + id="xGFV8XTqEmhc" import wandb from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint from wandb.keras import WandbCallback from tensorflow.keras.applications.xception import preprocess_input def GetVQAModel(validation_split, batch_size, epochs, use_early_stopping, embedding_size, dropout, lstm_units): datasets = GetVQADatasets(validation_split=validation_split, batch_size=batch_size, preprocessing_function=preprocess_input) xception = tf.keras.applications.Xception(weights='imagenet', include_top=True, input_shape=(img_w, img_h, 3)) xception.trainable = False cnnnorm = tf.keras.layers.Lambda(lambda x: tf.keras.backend.l2_normalize(x, axis=1))(xception.layers[-2].output) cnndense1 = tf.keras.layers.Dense(units=2048, activation='relu')(cnnnorm) cnn = tf.keras.layers.Dense(units=2048, activation='tanh')(cnndense1) embedding_input = tf.keras.Input((datasets[0].max_question_len,)) embedding = tf.keras.layers.Embedding(datasets[0].vocabulary_size(), embedding_size, input_length=datasets[0].max_question_len, mask_zero=True)(embedding_input) lstm1 = tf.keras.layers.LSTM(units=lstm_units, return_sequences=True, return_state=True, input_shape=(datasets[0].max_question_len, embedding_size))(embedding) lstm2 = tf.keras.layers.LSTM(units=lstm_units, return_sequences=False, return_state=True)(lstm1) concat = tf.keras.layers.Concatenate()([lstm1[1], lstm1[2], lstm2[0], lstm2[1]]) lstmdense1 = tf.keras.layers.Dense(units=2048, activation='relu')(concat) lstmdense2 = tf.keras.layers.Dense(2048, activation='tanh')(lstmdense1) mul = tf.keras.layers.Multiply()([cnn, lstmdense2]) drop1 = tf.keras.layers.Dropout(dropout)(mul) dense1 = tf.keras.layers.Dense(2048, activation='tanh')(drop1) drop2 = tf.keras.layers.Dropout(dropout)(dense1) dense2 = tf.keras.layers.Dense(2048, activation='tanh')(drop2) dense3 = tf.keras.layers.Dense(units=len(labels_dict), activation='softmax')(dense2) model = tf.keras.models.Model(inputs=[embedding_input, xception.input], outputs=dense3) model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy']) callbacks = [WandbCallback(), ModelCheckpoint(filepath='/tmp/checkpoint', save_weights_only=True, monitor='val_accuracy', mode='max', save_best_only=True, verbose=1)] if use_early_stopping: callbacks.append(EarlyStopping(monitor='val_accuracy', mode='max', patience=5, restore_best_weights=False)) model.fit(x=datasets[1], epochs=epochs, steps_per_epoch=len(datasets[0]) // batch_size, validation_data=datasets[3], validation_steps=len(datasets[2]) // batch_size, workers=8, max_queue_size=200, use_multiprocessing=True, callbacks=callbacks) return model #@markdown Set hyperparameters used during training validation_split = 0.03#@param {type:'number'} batch_size = 256#@param {type:'number'} epochs = 15#@param {type:'number'} use_early_stopping = False#@param {type:'boolean'} embedding_size = 512#@param {type:'number'} dropout = 0.5#@param {type:'number'} lstm_units = 256#@param {type:'number'} #@markdown Or use hyperparameter optimization use_hyperparameter_optimization = False#@param {type:'boolean'} # !nvidia-smi if use_hyperparameter_optimization: def RunFitWithHypOpt(): wandb.init() model = GetVQAModel(wandb.config.validation_split, wandb.config.batch_size, wandb.config.epochs, wandb.config.use_early_stopping, wandb.config.embedding_size, wandb.config.dropout, wandb.config.lstm_units, wandb.config.trainable) PredictTestset(model, cat_model=cat_model, filename=os.path.join(wandb.run.dir, 'results.csv'), filename_corrected=os.path.join(wandb.run.dir, 'results-cat.csv'), preprocessing_function=preprocess_input, upload=False) cat_model = tf.keras.models.load_model('/content/drive/MyDrive/Colab Notebooks/Kaggle3/cat-model-best.h5') wandb.agent(wandb.sweep({ 'method': 'bayes', 'metric': { 'name': 'val_accuracy', 'goal': 'maximize' }, 'parameters': { 'validation_split': { 'value': 0.03 }, 'batch_size': { 'value': 256 }, 'epochs': { 'value': 13 }, 'use_early_stopping': { 'value': True }, 'embedding_size': { 'values': [256, 300, 512] }, 'dropout': { 'values': [0, 0.5] }, 'lstm_units': { 'values': [256, 512, 1024] } } }, project='kaggle3'), function=RunFitWithHypOpt) else: wandb.init(project='kaggle3', config={ 'validation_split': validation_split, 'batch_size': batch_size, 'epochs': epochs, 'use_early_stopping': use_early_stopping, 'embedding_size': embedding_size, 'dropout': dropout, 'lstm_units': lstm_units }) model = GetVQAModel(wandb.config.validation_split, wandb.config.batch_size, wandb.config.epochs, wandb.config.use_early_stopping, wandb.config.embedding_size, wandb.config.dropout, wandb.config.lstm_units) cat_model = tf.keras.models.load_model('/content/drive/MyDrive/Colab Notebooks/Kaggle3/cat-model-best.h5') PredictTestset(model, cat_model=cat_model, filename=os.path.join(wandb.run.dir, 'results.csv'), filename_corrected=os.path.join(wandb.run.dir, 'results-cat.csv'), preprocessing_function=preprocess_input, upload=False) model.load_weights('/tmp/checkpoint') PredictTestset(model, cat_model=cat_model, filename=os.path.join(wandb.run.dir, 'results-restored.csv'), filename_corrected=os.path.join(wandb.run.dir, 'results-cat-restored.csv'), preprocessing_function=preprocess_input, upload=False) wandb.init() # + [markdown] id="BwzeceDaNcT0" # Best performing model [here](https://wandb.ai/lrsb/kaggle3/runs/1710a4ge) # + [markdown] id="hJiNBNtbsBKB" # # Utilities # + [markdown] id="KgnQ54_fnfTI" # ### Predict dataset using a saved model # + id="gj8w73qdi9Md" from tensorflow.keras.applications.xception import preprocess_input model = tf.keras.models.load_model('/content/drive/MyDrive/Colab Notebooks/Kaggle3/model-best.h5') model_last = tf.keras.models.load_model('/content/drive/MyDrive/Colab Notebooks/Kaggle3/model-best-last.h5') cat_model = tf.keras.models.load_model('/content/drive/MyDrive/Colab Notebooks/Kaggle3/cat-model-best.h5') PredictTestset(model, cat_model=cat_model, filename='/content/results.csv', filename_corrected='/content/results-cat.csv', preprocessing_function=preprocess_input, upload=False) PredictTestset(model_last, cat_model=cat_model, filename='/content/results-last.csv', filename_corrected='/content/results-cat-last.csv', preprocessing_function=preprocess_input, upload=False) # + id="h2n2x6rWUxSC" model_last.summary() # + id="d2nn3AsXRUIM" print(model_last.get_layer('dense_2').get_config())
Kaggle3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import local_models.local_models import local_models.algorithms import local_models.utils import local_models.linear_projections import local_models.loggin import local_models.TLS_models import numpy as np import matplotlib.pyplot as plt import sklearn.linear_model import sklearn.cluster from importlib import reload from ml_battery.utils import cmap import matplotlib as mpl import sklearn.datasets import sklearn.decomposition import logging import ml_battery.log import time import os import pandas as pd import sklearn.gaussian_process import patched_gpr np.random.seed(0) logger = logging.getLogger(__name__) #reload(local_models.local_models) #reload(lm) #reload(local_models.loggin) #reload(local_models.TLS_models) np.warnings.filterwarnings('ignore') # - mpl.rcParams['figure.figsize'] = [8.0, 8.0] project_dir = "../data/loess_iterative_train_bandwidth_models" os.makedirs(project_dir, exist_ok=1) n = 100 X = np.linspace(-50,50,n) y = np.sin(X**2/100) + np.random.normal(0,0.06,n) plt.plot(X,y) models = local_models.local_models.LocalModels(sklearn.linear_model.LinearRegression()) models.fit(X.reshape(-1,1),y) yy = models.predict(X.reshape(-1,1), weighted=True, kernel=kernel, r=kernel.support_radius()) kernel = local_models.local_models.GaussianKernel(bandwidth=n/1.5) X_pred = np.linspace(np.min(X), np.max(X), 5*n) y_pred = models.predict(X_pred.reshape(-1,1), weighted=True, kernel=kernel, r=kernel.support_radius()) plt.plot(X,y) plt.plot(X_pred, y_pred, c='r') import importlib importlib.reload(local_models.local_models) _,the_models = models.transform(X.reshape(-1,1), weighted=True, kernel=kernel, r=kernel.support_radius(), return_models=True) def bandwidth_objective(bandwidth, X, y, fitted_models, index): kernel = local_models.local_models.GaussianKernel(bandwidth=bandwidth) iz, dz = index.query_radius(X.reshape(-1,1), kernel.support_radius(), return_distance=True) for i in range(dz.shape[0]): dz[i] = kernel(dz[i]) dz[i] /= np.sum(dz[i]) dz = np.stack(dz) y_pred = np.array(list(map(lambda x: x.predict(X.reshape(-1,1)), fitted_models))) return np.sum((y_pred-y)**2*dz) def plt_callback(bandwidth): global i global j global eps global strt_band kernel = local_models.local_models.GaussianKernel(bandwidth=bandwidth) y_pred = models.predict(X.reshape(-1,1), weighted=True, kernel=kernel, r=kernel.support_radius()) plt.plot(X, y, c='b') plt.plot(X, y_pred, c='r') plt.title(str(bandwidth)) plt.savefig(os.path.join(project_dir, "{:06.01f}_{:05d}_{:05d}.png".format(float(strt_band), i,j))) plt.clf() j += 1 import scipy.optimize res = scipy.optimize.minimize(bandwidth_objective, n/2, args=(X,y,the_models,models.index), callback=plt_callback, options={"eps":1e-10}, bounds=((1,np.inf),)) res for eps in range(10,0,-1): eps = 10**(-eps) new_band=n/5 for i in range(100): kernel = local_models.local_models.GaussianKernel(bandwidth=new_band) j = 0 _,the_models = models.transform(X.reshape(-1,1), weighted=True, kernel=kernel, r=kernel.support_radius(), return_models=True) res = scipy.optimize.minimize(bandwidth_objective, new_band, args=(X,y,the_models,models.index), callback=plt_callback, options={"eps":1e-6}, bounds=((0.01,np.inf),)) print(res) new_band = res.x print(new_band) pairs = [] for strt_band in np.linspace(2*n,3*n,1): eps = 1e-8 print(strt_band) new_band=strt_band for i in range(100): kernel = local_models.local_models.GaussianKernel(bandwidth=new_band) j = 0 _,the_models = models.transform(X.reshape(-1,1), weighted=True, kernel=kernel, r=kernel.support_radius(), return_models=True) #res = scipy.optimize.minimize(bandwidth_objective, new_band, args=(X,y,the_models,models.index), callback=plt_callback, options={"eps":eps}, bounds=((0.01,np.inf),)) res = scipy.optimize.minimize(bandwidth_objective, new_band, args=(X,y,the_models,models.index), options={"eps":eps}, bounds=((0.01,np.inf),)) pairs.append((new_band, res.x)) if res.x == new_band: break new_band = res.x pairs = np.array(pairs) pairs[:30] plt.scatter(pairs[:,0], pairs[:,1],s=2) plt.plot(np.linspace(150,250,2), np.linspace(150,250,2),c='r',linestyle='--') plt.scatter(pairs[:,0], pairs[:,1],s=2) plt.plot(np.linspace(0,40,2), np.linspace(0,40,2),c='r',linestyle='--') plt.savefig(os.path.join(project_dir, "transition_plot.png")) plt.scatter(pairs[:,0], pairs[:,1],s=2) plt.plot(np.linspace(0,40,2), np.linspace(0,40,2),c='r',linestyle='--') plt.xlim(16,25) plt.ylim(15,24) plt.savefig(os.path.join(project_dir, "transition_plot_zoom.png"))
examples/loess_iterative_train_bandwidth_models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Make a simple connection and execute a SQL statement import sqlite3 from sqlite3 import Error try: conn = sqlite3.connect('data\PeopleAndPets.db') except Error as e: print(e) cur = conn.cursor() cur.execute("SELECT * FROM people") rows = cur.fetchall() for row in rows: print(row) cur.close() cur = conn.cursor() cur.execute("SELECT * FROM pets") rows = cur.fetchall() for row in rows: print(row) cur.close() cur = conn.cursor() cur.execute("select people.first_name,people.last_name, count(pets.pet_name) from people,pets where people.person_id = pets.owner_id group by pets.owner_id ;") rows = cur.fetchall() for row in rows: print(row) cur.close() # # so what did fetchall() return? row type(row) row[0] row[0:2] # # we really want a pandas dataframe import pandas as pd command="select people.first_name,people.last_name, count(pets.pet_name) from people,pets where people.person_id = pets.owner_id group by pets.owner_id ;" df = pd.read_sql_query(command,conn) df # # ok back to inserting rows with Python # ### first we do a insert with a hard coded person id # + person_first_name='Dorothy' person_last_name='Martinez' pet_name='Kringle' person_sql_string = "insert into people (first_name, last_name) values ( '{0}','{1}'); ".format(person_first_name,person_last_name) person_id=9 #Pick a number that works with the real database pet_sql_string = "insert into pets (pet_name, owner_id) values ( '{0}',{1} ) ;".format(pet_name, person_id) # - print(person_sql_string) print(pet_sql_string) cur = conn.cursor() cur.execute(person_sql_string) cur.execute(pet_sql_string) cur.close() # ### next we'll make it more realistic and do both inserts in the scope of a transaction # + person_first_name='Shaggy' person_last_name='Smith' pet_name='Scooby' person_sql_string = "insert into people (first_name, last_name) values ( '{0}','{1}'); ".format(person_first_name,person_last_name) pet_sql_string = "insert into pets (pet_name, owner_id) values ( '{0}',last_insert_rowid() ) ;".format(pet_name) cur = conn.cursor() #automaticly starts a transaction cur.execute(person_sql_string) cur.execute(pet_sql_string) cur.close() # automaticly commits the transaction # -
SQLiteExperiments.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Fast PDE/IE course, Skoltech, Spring 2015 # ## Problem Set 3 # ### Spectral Methods # # Consider a problem of solving # \\[ # \begin{align*} # -\Delta u &= f, \\ # u|_\Gamma &= 0. # \end{align*} # \\] # by a spectral method, where $\Omega = [0,1]^2$ is a unit square and $\Gamma=\partial\Omega$. # # In a spectral method you will be working with a representation of the solution in the form # $$ # u(x,y) = \sum_{i=1}^N \sum_{j=1}^N \hat{u}_{i,j} \sin(\pi i x) \sin(\pi j y), # $$ # where $\hat{u}^N$ is just an $N\times N$ array. # # Knowing $u(x,y)$, it is often practically not possible to compute $\hat{u}_{i,j}$ exactly. Hence one needs to use the [Discrete Sine Transform](http://docs.scipy.org/doc/scipy-dev/reference/tutorial/fftpack.html#discrete-sine-transforms) (DST). # The python's <tt>dst</tt> computes a one-dimensional DST, but we need a two-dimensional (2D) DST, which we compute in the following way: # # * Compute values $u(x,y)$ for $N^2$ points inside the domain $(x,y) = (h k, h \ell)$, $1\leq k,\ell \leq N$, $h = 1/(N+1)$. # # * Apply <tt>dst</tt> to each line in this array. This way you will be computing a "partial" 2D DST # $$ # u(x,y) = \sum_{j=1}^N \tilde{u}_{j}(x) \sin(\pi j y), # $$ # # * Apply <tt>dst</tt> to each column in the resulting array. This way you will be computing a full 2D DST. # <br><br> # # ### Problem 1 (Spectral Methods) (60pt) # # * Part (a) # - Take $u^0(x,y) = \sin(\pi x) \sin(\pi y)$ and compute $f = -\Delta u^0$. # - **(6pt)** Calculate, explicitly, the coefficients $\hat{f}_{i,j}$ in # $$ # f(x,y) = \sum_{i=1}^N \sum_{j=1}^N \hat{f}_{i,j} \sin(\pi i x) \sin(\pi j y). # $$ # Then, using values of $f$ at $N^2$ points, calculate the coefficients $\hat{f}^{N}_{i,j}$ by the procedure outlined above. # (Here $\hat{f}$ is an "infinite array" that you will compute explicitly, while $\hat{f}^N$ will be an NxN array that you will compute using python.) # Compare $\hat{f}$ and $\hat{f}^N$. # - In this case, obviously, a solution to the problem is $u=u_0$, but let us now pretend we do not know it. # # - **(6pt)** Compute the coefficients $\hat{u}^N$ from $\hat{f}^N$ found in part (a). # # - **(6pt)** We will estimate the error between the two solutions in the following way. We will take $N^2$ points in our domain, of the form $(h k, h \ell)$, same as before. We will then be interested in # $$ # {\rm err}_N = \max_{1\leq k,\ell\leq N} |u^N(h k, h \ell) - u^0(h k, h \ell)| # $$ # which we call an error of the solution. Calculate the error of your solution for a number of values of $N$. Explain your results # # * Part (b) # - Let us now take something a little more complicated, # $$ # u^0(x,y) = \sin(\pi x^2) \sin(\pi y^2), # \qquad\text{and}\qquad # f = -\Delta u_0, # $$ # - You cannot compute the coefficients $\hat{f}$ explicitly, so you'll have to live with only $\hat{f}^N$. # - **(6pt)** Compute $\hat{u}^N$, the error ${\rm err}_N$, and report the error for different values of $N$. Would you say the error decays fast as $N$ increases? # - Suppose that the spectral method has an order of convergence, in other words the error behaves like ${\rm err}_N = C N^{-\rm ord}$ and you need to find ${\rm ord}$. # - **(6pt)** Derive the formula # $$ # {\rm ord}_N = \frac{\ln({\rm err}_N)-\ln({\rm err}_{2N})}{\ln(2)} # $$ # - **(6pt)** Hence compute ${\rm ord}_N$ for $N=1,\ldots,20$ and comment on your results. (You should start with taking each value of $N$ between 1 and 20 to understand the behavior, but you don't need to present all these numbers in your report, as long as you can illustrate the right behavior.) # # * Part (c) # - Let us now play a "fair game": take # $$ # f(x,y) = 1. # $$ # We then do not know the solution. # - Use your code from part (b) to compute $\hat{f}^N$, and $\hat{u}^N$. # - **(6t)** Instead of the exact error we have to use the error estimate # $$ # {\rm errest}_N = \max_{1\leq k,\ell\leq N} |u^N(h k, h \ell) - u^{2N+1}(h k, h \ell)| # $$ # (Why did we take $2N+1$ instead $2N$?) # Report the values of ${\rm errest}_N$ for a sequence of values of $N$. # # - **(6pt)** Hence compute ${\rm errest}_N$ for a sequence of $N$ and comment on your results. # - **(6pt)** Finally, using the same formula (but with ${\rm errest}$), # $$ # {\rm ord}_N = \frac{\ln({\rm errest}_N)-\ln({\rm errest}_{2N})}{\ln(2)} # $$ # compute ${\rm ord}_N$ for a squence of values of $N$. Comment on your results. # * **(6pt)** Compare the behavior for ${\rm err}$, ${\rm errest}$, and ${\rm ord}$ for parts (b) and (c). What is the main reason for the qualitative difference in the speed of convergence? # ### Problem 2 (Multigrid) (40 pts) # Consider a Poisson equation # \\[ # \begin{align*} # -\Delta u &= f, \\ # u|_\Gamma &= 0, # \end{align*} # \\] # where $\Omega = [0,1]^d$, $d = 2, 3$ and $\Gamma=\partial\Omega$. # # Although the multigrid method has optimal complexity, there are some precomputations to be done. Therefore, it may have a bigger constant than, for instance, the spectral method or even sparse LU. In this problem you will be asked to find out in which case (2D, 3D, grid sizes) multigrid method is more appropriate. # # * (**20 pts**) Implement V- and W-cycles of multigrid method for $d=2$ and $d=3$. Assume that the equations are discretized with finite differences with the standard second order "cross" stencil. # **Note:** all operations should be implemented with linear complexity. Take a smoother of your choice. # # # * (**5 pts**) Set $N=128$. Choose $f$ such that you know analytical solution. Plot errors as a function of the cycle number for the implemented V-,W-cycles of GMG and for AMG (from PyAMG), $d=2$ and $d=3$. Which approach requires fewer number of iterations? Which approach is faster? **Note:** when doing plottings do not forget to inculde title and axes names # # <!--- # * How many iteration for your smoother are required to reach the same accuracy as for multigrid? # --> # # * (**5 pts**) Implement a spectral method for $d=3$. Implement fast Poisson solver for $d=2$ and $d=3$: the only difference with the spectral method will be eigenvalues # $$\lambda_{mn} = -\frac{4}{h^2} \left( \sin^2 \frac{\pi m h}{2} + \sin^2 \frac{\pi n h}{2} \right),$$ # (eigenvalues of the discrete problem ($d=2$) with the "cross" stencil) instead of # $$ # \lambda_{mnl} = -\pi^2(m^2 + n^2) # $$ # (exact eigenvalues of the initial continuous problem). # # # * (**10 pts**) Set $f\equiv1$. Investigate when (for what cases) GMG is more appropriate than the sparse LU, the spectral method or the fast Poisson solver. To do so compare timings for different $N$, $d$ and accuracies. Make conclusions. # + run_control={"breakpoint": false, "read_only": false} from IPython.core.display import HTML def css_styling(): styles = open("./styles/alex.css", "r").read() return HTML(styles) css_styling()
PS3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: SPARQL # language: sparql # name: sparql # --- # + [markdown] slideshow={"slide_type": "slide"} # # KEN 3140 Semantic Web: Assignment 2 # ### Writing and executing SPARQL queries on RDF graphs # # **Authors:** <NAME>, <NAME>, <NAME>, and <NAME> \ # **Date:** 2021-09-10 # **License:** [https://creativecommons.org/licenses/by/4.0](https://creativecommons.org/licenses/by/4.0) # # This assignment will assess whether you have met the following learning objectives after studying the relevant materials and doing the relevant lab exercises in this course: # # #### Learning objectives: # 1. How to formulate basic and complex SPARQL queries with valid structure and syntax # 2. How to identify and select the appropriate SPARQL features for including in a query, in order to answer a specific question # 3. How to design triple and graph patterns to match criteria that a question or task requires # 4. How to include new information in an RDF graph using SPARQL queries # 5. How to identify, select and include appropriate SPARQL functions in SPARQL queries to filter entities according to their literal values # 6. How to distinguish between asserted and inferred statements in RDF graphs using RDFS inference in conjunction with SPARQL queries # # #### Assignment task description # Please read all sections of this document very carefully before attempting the assignment, asking questions and submitting # # * This assignment will assess your competencies with formulating SPARQL queries in order to answer a series of questions about the content of a pre-prepared RDF graph about family relations. The graph is provided in the file ``KEN3140_assignment2_familyrelations.ttl`` in Turtle syntax included along with your assignment materials. You will also observe the effect of RDFS inference when used in conjunction with SPARQL queries. # # * There are two parts to this assignment: a **Part A** and a **Part B**. Both parts require you to formulate SPARQL queries to answer the questions asked in that part. Part A questions require less complex SPARQL queries and Part B requires more complex queries and potentially the use of advanced features of the SPARQL language. # # * Before you begin formulating your queries, it might be helpful to explore the graph in some way. You are free to do this in whichever way you prefer. At the very least, you can open ``KEN3140_assignment2_familyrelations.ttl`` in the text editor of your choice and examine the triples. A (not very pretty) picture of the graph is also available in the file ``KEN3140_assignment2_familyrelations.png`` which may help you to understand the structure and content of the graph. # # * Make note of the information in the provided graph as well as the vocabularies it uses i.e., which external vocabularies are used to specify the types, object properties and data properties in the graph. # # * You should use either the SPARQL kernel for Jupyter notebooks to run your SPARQL queries in this notebook, or use [YASGUI](http://yasgui.triply.cc) to run them. However, in either case, **please paste your solutions for each query back into the relevant cell of this notebook before submitting!** # # #### Deadline & submission instructions # The deadline for your assignment is **Sunday, 26 September 2021 at 23:59 [note the extended deadline]**. You should upload a copy of this notebook which is where you will record your solutions. Please rename the notebook to include your name and student ID. I.e., name the notebook to: ``KEN3140_assignment2_(your name)_(your studentID).ipynb``. # # #### Grading criteria # We will assess the design of your SPARQL queries on a number of criteria directly related to the learning objectives of the assignment. I.e., we will assess to what extent you have demonstrated that you have achieved or mastered the learning objectives in the formulation of your SPARQL queries. Please make sure to follow the assignment instructions carefully and meet all the requirements! You will receive a grade out of 10 points for this assignment. # # #### Helpful resources # 1. KEN3140 Lecture 4 & 5 slides (Canvas) # 2. KEN3140 Lab 4 & 5 materials (Canvas) # 3. [SPARQL W3C specs](https://www.w3.org/TR/sparql11-overview/) # 4. [Learning SPARQL ebook on UM digital library](https://maastrichtuniversity.on.worldcat.org/v2/oclc/853679890) # # #### Contact # 1. <NAME> (<EMAIL>) # 2. <NAME> (<EMAIL>) # 3. <NAME> (<EMAIL>) # + [markdown] slideshow={"slide_type": "slide"} # #### 1. Install the SPARQL kernel # # Only do these steps if you are going to use the kernel as opposed to using YASGUI for your assignment: # # #### Locally # # Run the following two commands in the sequence specified in your terminal (before starting Jupyter): # # ```bash # pip install sparqlkernel --user # jupyter sparqlkernel install --user # ``` # # #### With docker # # The SPARQL kernel should be installed in you Docker image. If it is not, please run the following command: # # **Windows** # # ```bash # docker run -it --rm --name java-notebook -p 8888:8888 -v C:\path\to\current\directory:/home/jovyan/work -e JUPYTER_TOKEN=<PASSWORD> ghcr.io/maastrichtu-ids/jupyterlab:latest # ``` # **Linux/Mac** # # ```bash # docker run -it --rm --name java-notebook -p 8888:8888 -v $(pwd):/home/jovyan/work -e JUPYTER_TOKEN=<PASSWORD> ghcr.io/maastrichtu-ids/jupyterlab:latest # ``` # # #### 2. Define the SPARQL endpoint URL # + slideshow={"slide_type": "subslide"} # Set the SPARQL Kernel parameters %endpoint https://graphdb.dumontierlab.com/repositories/KEN3140_SemanticWeb # Ignore: this is optional, it would increase the level of detail in the logs %log debug # + slideshow={"slide_type": "fragment"} # Use these commands before the query you want to run # Use this command to disable inference in the endpoint %qparam infer false # Use this command to enable inference in the endpoint %qparam infer true # - # ## Part A # ### Q1: List the top five tallest people in the graph in order from tallest to shortest # # **Important notes:** Display both the people and their heights in the query results. # + # Insert query here # - # ### Q2: List all family members, order them from shortest to tallest, and count the number of uncles each of them have # # **Important notes:** Display the family members, their heights and the number of uncles in the results of your query. Include family members who have no uncles as well in your results. Using comments with the "#" symbol before your solution query in the cell below, state in English natural language how you define an "uncle". # + # Insert query here # - # ### Q3: Identify the shortest person who has at least two uncles # # **Important notes:** Display the person and their height in the results # + # Insert query here # - # ### Q4: Count the number of males and females per family # # **Important notes:** The assumption in this questions is that people with the same family name are part of the same family. Include the family name, the number of males in that family, and the number of females in that family, in your query results. # + # Insert query here # - # ### Q5: List all females in the graph born after 1965 from the oldest at the top of the list, to the youngest at the bottom # # **Important notes:** Include the person and birth date of that person in the query results. # + # Insert query here # - # ## Part B # ### Q6: Return the mean (average) height of men, and the mean height of women in the full graph # # **Important notes:** you must use one query for this task # + # Insert query here # - # ### Q7: For each person with a child calculate their "couple salary per child". # # **Important notes:** the "couple salary per child" is the total combined salary of the two parents divided by the number of children they have. Include the person, spouse, couple salary (total combined salary of the parents), number of children, and the couple salary per child in the query results. # + # Insert query here # - # ### Q8: List persons with the given name ending with the letter "a" # # **Important notes:** Include the person and their human readable given name in the query results. # + # Insert query here # - # ### Q9: Identify and list all sibling relationships in the graph # # **Important notes:** Include each pair of persons in the graph that are siblings. You will supply two queries for this task. One **with** inference and one **without**. Paste both queries below, one with inference toggled off and one with inference toggled on (the queries should be in separate cells). # + # Insert query here (without inference) # + # Insert query here (with inference) # - # ### Q10: Create triples # # **Perform the following task with a single SPARQL query** # # For each person, create 2 new triples: # * a triple capturing the full name of a person in a human readable string i.e., the concatenation of the first and last name of the person. Use the `rdfs:label` relation to capture this string. # * a triple capturing the height of each person using `schema:height` again but with the value in metres rather than centimetres (which is the current unit used to represent the height values of the family members in the graph). # # **Your query should create and display these triples but it must not attempt to add them to the graph** # + # Insert query here (with inference) # - # ### Q11: Complex query # # Write one SPARQL query to find the family member with the highest [out-degree](https://xlinux.nist.gov/dads/HTML/outdegree.html) in the graph and then list the names of all book authors on DBpedia that have this family members **first** name included lexically in their name as well, also list the titles of the books of these authors. For example, if the family members name is "Nicole", then book authors that are called "<NAME>" or "<NAME>" should be included in the results. "<NAME>" would not be a valid result, since "Nicole" does not appear in that name. # # **Important notes:** Include the IRI of the family member, the IRIs of the book authors, and the human-readable labels of their book titles in your query results. **Make sure to use HTTPS in the SPARQL endpoint URLs that you use in your query, and not HTTP.** # + # Insert query here (with inference) # - # ### Bonus: Create missing relations # # Create missing sibling relations when a sibling relation is defined in "one direction" between two family members but not explicitly in the other direction. For example, if john is a sibling of mary, then we know that mary is also a sibling of john. But since RDF predicates are directional, you need to state explicitly if a particular relation holds in the other direction as well. **Your query should create and display these missing relations but it must not attempt to add them to the graph** # + # Insert query here (with inference)
assignment2/KEN3140_Assignment2_SPARQL.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # ARIMA(2,1,2) # # $$ Y'(n) - \mu = \phi(1)Y(n-1) + \phi(2)Y(n-2) + \theta(1) \epsilon(n-1) + \theta(2)\epsilon(n-2) + \epsilon(n) $$ # $$ Y(n) = Y(n) - Y(n-1) $$ # # Simulate time series # + import numpy as np import matplotlib.pyplot as plt N = 1000 SIGMA_EPS = 10 THETA = [0.9, 0] PHI = [0.7, 0] MU = 10 epsilon = np.random.normal(scale=SIGMA_EPS, size=(N,)) Yp = np.ones((N,))*MU for n in range(1,len(epsilon)): Yp[n] = MU + PHI[0]*(Yp[n-1] - MU) + PHI[1]*(Yp[n-2] - MU) + THETA[0]*epsilon[n-1] + THETA[1]*epsilon[n-2] + epsilon[n] plt.figure(figsize=(14,4)) plt.plot(Yp) plt.title('ARMA(2,2) model') plt.grid() Y = Yp.cumsum() plt.figure(figsize=(14,4)) plt.plot(Y) plt.title('ARIMA(2,1,2) model') plt.grid() plt.show() # - # ## Autocorrelation function (ACF) # + from statsmodels.tsa.stattools import acf acf_sm, qstat, pval = acf(Yp, nlags=100, qstat=True) plt.plot(acf_sm, '-+') plt.show() # - # ## Model Estimation # + from statsmodels.tsa.arima_model import ARIMA model = ARIMA(Yp, (1,0,1)).fit() print (model.summary()) # - model = ARIMA(Y, (1,1,1)).fit() print (model.summary()) # Plot the residuals and test for their correlation plt.figure(figsize=(12,4)) plt.plot(Yp) plt.plot(model.fittedvalues, '--+') plt.plot(model.resid, '--') plt.grid() plt.legend(['Truth', 'Predicted', 'Residual']) plt.show() # ## Residual analysis # + import pandas as pd from statsmodels.api import qqplot resid_df = pd.DataFrame(model.resid) resid_df.plot(kind='kde') qqplot(model.resid) plt.show() # - acf_res, qstat, pval = acf(model.resid, nlags=100, qstat=True) plt.stem(acf_res,) plt.hlines(0.05, 0,100, linestyle='dashed') plt.hlines(-0.05, 0,100, linestyle='dashed') plt.show() # ## Forecasting yhat, std_err, confint = model.forecast() print ("Predicted value = {}, StdErr = {}, Confidence Interval = {}".format(yhat, std_err, confint))
Probability/ARIMA.ipynb