code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # 0. Create a Model with division, death, and volume # + # %matplotlib inline from bioscrape.lineage import LineageModel from bioscrape.lineage import LineageVolumeSplitter import numpy as np import pylab as plt #Define Rates k = 1 d = .1 g = .2 #Define Reactions rxn1 = (["A"], ["A", "X"], "massaction", {"k":k}) #A --> A + X @ k=k rxn2 = (["A"], ["B"], "massaction", {"k":d}) #A --> B @ k=d rxn3 = (["B"], ["A"], "massaction", {"k":d}) #B --> A @ k=d rxns = [rxn1, rxn2, rxn3] #Define a list of all reactions #Define initial condtion (as a dictionary) x0 = {"X": 0, "A":1} #Instantiate Model LM1 = LineageModel(reactions = rxns, initial_condition_dict = x0) #Add a volume event to the model (with a mass action propensity dependent on X). Causes the volume to grow linearly by the amount g when event fires. LM1.create_volume_event("linear volume", {"growth_rate":g}, "massaction", {"k":.1, "species":"X"}) #Create a Volume Splitter which determines how species are partitioned at division #species A and B will be duplicated (as if they were genomic) and X will by default be partitioned binomially vsplit = LineageVolumeSplitter(LM1, options = {"A":"duplicate", "B":"duplicate"}) #Create two division rules, both use the same volume splitter #1: cells divide when their volume increases by 5 LM1.create_division_rule("deltaV", {"threshold":5}, vsplit) #Cells divide after 50 time units (since their last division) LM1.create_division_rule("time", {"threshold":50}, vsplit) #Create a death event with a positive hill function propensity H(X) = k / (1+(x/K)^n) #LM1.create_death_event("death", {}, "hillpositive", {"s1":"X", "K":25, "n":2, "k":.25}) #Initialize the model (this also a) LM1.py_initialize() # - # 1. Testing SimulateCellLineage in the LineageSSASimulator # + from bioscrape.lineage import py_SimulateCellLineage max_time = 120 timepoints = np.linspace(0,max_time,max_time*100) lineage = py_SimulateCellLineage(timepoints, Model = LM1) sch_tree = [[]] sch_tree_length = 1 for i in range(lineage.py_size()): sch = lineage.py_get_schnitz(i) if sch.py_get_parent() == None: sch_tree[0].append(sch) else: for j in range(len(sch_tree)): parent = sch.py_get_parent() if parent in sch_tree[j]: if len(sch_tree)<= j+1: sch_tree.append([]) sch_tree_length += 1 sch_tree[j+1].append(sch) color_list = [] for i in range(sch_tree_length): color_list.append((i/sch_tree_length, 0, 1.-i/sch_tree_length)) #X_ind = LM1.get_species_index('X') plt.figure() plt.subplot(211) count = 0 for i in range(sch_tree_length): for sch in sch_tree[i]: df = sch.py_get_dataframe(Model = LM1) count+=1 plt.plot(df["time"], df["X"], color = color_list[i]) plt.subplot(212) for i in range(sch_tree_length): for sch in sch_tree[i]: df = sch.py_get_dataframe(Model = LM1) plt.plot(df["time"], df["volume"], color = color_list[i]) print("Total Cells Simulated", count) plt.show() # - # Testing the LineageSSASimulator PropogateCells Function # + # %matplotlib inline from bioscrape.lineage import py_PropagateCells max_time = 120 timepoints = np.linspace(0,max_time,max_time*100) final_states = py_PropagateCells(timepoints, Model = LM1) final_states = pd.DataFrame(final_states) plt.figure() plt.subplot(211) plt.hist(final_states["X"], label = "Count X") plt.legend() plt.subplot(212) plt.hist(final_states["volume"], label = "Volume") plt.legend() plt.show() # - # 3. Testing Single Cell Lineage Simulation # + from bioscrape.lineage import py_SingleCellLineage max_time = 120 timepoints = np.linspace(0,max_time,max_time*100) final_lineage = py_SingleCellLineage(timepoints, Model = LM1) print(type(final_lineage)) lineage_size = len(final_lineage) volumes=[] plt.figure() color_list = [(i/lineage_size, 0, 1-i/lineage_size) for i in range(lineage_size)] for i in range(lineage_size): f = final_lineage[i] plt.subplot(121) plt.plot(f["time"], f["X"], color = color_list[i]) plt.subplot(122) plt.plot(f["time"], f["volume"], ":", color = color_list[i]) plt.show() # -
lineage examples/OLD BROKEN Lineage Examples.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # _*Max-Cut and Traveling Salesman Problem*_ # ## Introduction # # Many problems in quantitative fields such as finance and engineering are optimization problems. Optimization problems lie at the core of complex decision-making and definition of strategies. # # Optimization (or combinatorial optimization) means searching for an optimal solution in a finite or countably infinite set of potential solutions. Optimality is defined with respect to some criterion function, which is to be minimized or maximized. This is typically called cost function or objective function. # # **Typical optimization problems** # # Minimization: cost, distance, length of a traversal, weight, processing time, material, energy consumption, number of objects # # Maximization: profit, value, output, return, yield, utility, efficiency, capacity, number of objects # # We consider here max-cut problems of practical interest in many fields, and show how they can be mapped on quantum computers manually and how Qiskit's optimization module supports this. # # # ### Weighted Max-Cut # # Max-Cut is an NP-complete problem, with applications in clustering, network science, and statistical physics. To grasp how practical applications are mapped into given Max-Cut instances, consider a system of many people that can interact and influence each other. Individuals can be represented by vertices of a graph, and their interactions seen as pairwise connections between vertices of the graph, or edges. With this representation in mind, it is easy to model typical marketing problems. For example, suppose that it is assumed that individuals will influence each other's buying decisions, and knowledge is given about how strong they will influence each other. The influence can be modeled by weights assigned on each edge of the graph. It is possible then to predict the outcome of a marketing strategy in which products are offered for free to some individuals, and then ask which is the optimal subset of individuals that should get the free products, in order to maximize revenues. # # The formal definition of this problem is the following: # # Consider an $n$-node undirected graph *G = (V, E)* where *|V| = n* with edge weights $w_{ij}>0$, $w_{ij}=w_{ji}$, for $(i, j)\in E$. A cut is defined as a partition of the original set V into two subsets. The cost function to be optimized is in this case the sum of weights of edges connecting points in the two different subsets, *crossing* the cut. By assigning $x_i=0$ or $x_i=1$ to each node $i$, one tries to maximize the global profit function (here and in the following summations run over indices 0,1,...n-1) # # $$\tilde{C}(\textbf{x}) = \sum_{i,j} w_{ij} x_i (1-x_j).$$ # # In our simple marketing model, $w_{ij}$ represents the probability that the person $j$ will buy a product after $i$ gets a free one. Note that the weights $w_{ij}$ can in principle be greater than $1$ (or even negative), corresponding to the case where the individual $j$ will buy more than one product. Maximizing the total buying probability corresponds to maximizing the total future revenues. In the case where the profit probability will be greater than the cost of the initial free samples, the strategy is a convenient one. An extension to this model has the nodes themselves carry weights, which can be regarded, in our marketing model, as the likelihood that a person granted with a free sample of the product will buy it again in the future. With this additional information in our model, the objective function to maximize becomes # # $$C(\textbf{x}) = \sum_{i,j} w_{ij} x_i (1-x_j)+\sum_i w_i x_i. $$ # # In order to find a solution to this problem on a quantum computer, one needs first to map it to an Ising Hamiltonian. This can be done with the assignment $x_i\rightarrow (1-Z_i)/2$, where $Z_i$ is the Pauli Z operator that has eigenvalues $\pm 1$. Doing this we find that # # $$C(\textbf{Z}) = \sum_{i,j} \frac{w_{ij}}{4} (1-Z_i)(1+Z_j) + \sum_i \frac{w_i}{2} (1-Z_i) = -\frac{1}{2}\left( \sum_{i<j} w_{ij} Z_i Z_j +\sum_i w_i Z_i\right)+\mathrm{const},$$ # # where $\mathrm{const} = \sum_{i<j}w_{ij}/2+\sum_i w_i/2$. In other terms, the weighted Max-Cut problem is equivalent to minimizing the Ising Hamiltonian # # $$ H = \sum_i w_i Z_i + \sum_{i<j} w_{ij} Z_iZ_j.$$ # # Qiskit's optimization module can generate the Ising Hamiltonian for the first profit function $\tilde{C}$. # To this extent, function $\tilde{C}$ can be modeled as a `QuadraticProgram`, which provides the `to_ising()` method. # # # ### Approximate Universal Quantum Computing for Optimization Problems # # There has been a considerable amount of interest in recent times about the use of quantum computers to find a solution to combinatorial optimization problems. It is important to say that, given the classical nature of combinatorial problems, exponential speedup in using quantum computers compared to the best classical algorithms is not guaranteed. However, due to the nature and importance of the target problems, it is worth investigating heuristic approaches on a quantum computer that could indeed speed up some problem instances. Here we demonstrate an approach that is based on the *Quantum Approximate Optimization Algorithm* (QAOA) by <NAME>, and Gutmann (2014). We frame the algorithm in the context of *approximate quantum computing*, given its heuristic nature. # # The algorithm works as follows: # # 1. Choose the $w_i$ and $w_{ij}$ in the target Ising problem. In principle, even higher powers of Z are allowed. # # 1. Choose the depth of the quantum circuit $m$. Note that the depth can be modified adaptively. # # 1. Choose a set of controls $\theta$ and make a trial function $|\psi(\boldsymbol\theta)\rangle$, built using a quantum circuit made of C-Phase gates and single-qubit Y rotations, parameterized by the components of $\boldsymbol\theta$. # # 1. Evaluate # $$C(\boldsymbol\theta) = \langle\psi(\boldsymbol\theta)~|H|~\psi(\boldsymbol\theta)\rangle = \sum_i w_i \langle\psi(\boldsymbol\theta)~|Z_i|~\psi(\boldsymbol\theta)\rangle+ \sum_{i<j} w_{ij} \langle\psi(\boldsymbol\theta)~|Z_iZ_j|~\psi(\boldsymbol\theta)\rangle$$ # by sampling the outcome of the circuit in the Z-basis and adding the expectation values of the individual Ising terms together. In general, different control points around $\boldsymbol\theta$ have to be estimated, depending on the classical optimizer chosen. # # 1. Use a classical optimizer to choose a new set of controls. # # 1. Continue until $C(\boldsymbol\theta)$ reaches a minimum, close enough to the solution $\boldsymbol\theta^*$. # # 1. Use the last $\boldsymbol\theta$ to generate a final set of samples from the distribution $|\langle z_i~|\psi(\boldsymbol\theta)\rangle|^2\;\forall i$ to obtain the answer. # # It is our belief the difficulty of finding good heuristic algorithms will come down to the choice of an appropriate trial wavefunction. For example, one could consider a trial function whose entanglement best aligns with the target problem, or simply make the amount of entanglement a variable. In this tutorial, we will consider a simple trial function of the form # # $$|\psi(\theta)\rangle = [U_\mathrm{single}(\boldsymbol\theta) U_\mathrm{entangler}]^m |+\rangle$$ # # where $U_\mathrm{entangler}$ is a collection of C-Phase gates (fully entangling gates), and $U_\mathrm{single}(\theta) = \prod_{i=1}^n Y(\theta_{i})$, where $n$ is the number of qubits and $m$ is the depth of the quantum circuit. The motivation for this choice is that for these classical problems this choice allows us to search over the space of quantum states that have only real coefficients, still exploiting the entanglement to potentially converge faster to the solution. # # One advantage of using this sampling method compared to adiabatic approaches is that the target Ising Hamiltonian does not have to be implemented directly on hardware, allowing this algorithm not to be limited to the connectivity of the device. Furthermore, higher-order terms in the cost function, such as $Z_iZ_jZ_k$, can also be sampled efficiently, whereas in adiabatic or annealing approaches they are generally impractical to deal with. # # # References: # - <NAME>, Frontiers in Physics 2, 5 (2014) # - <NAME>, <NAME>, <NAME> e-print arXiv 1411.4028 (2014) # - <NAME>, <NAME>, <NAME> Phys. Rev. A 94, 022309 (2016) # - <NAME>, <NAME>, <NAME>, <NAME> e-print arXiv 1703.06199 (2017) # ### Application classes # # We use the application classes for the max-cut problem and the traveling salesman problem in this page. There are application classes for other optimization problems available as well. # See [Application Classes for Optimization Problems](09_application_classes.ipynb) for details. # + # useful additional packages import matplotlib.pyplot as plt import matplotlib.axes as axes # %matplotlib inline import numpy as np import networkx as nx from qiskit import Aer from qiskit.tools.visualization import plot_histogram from qiskit.circuit.library import TwoLocal from qiskit_optimization.applications import Maxcut, Tsp from qiskit.algorithms import VQE, NumPyMinimumEigensolver from qiskit.algorithms.optimizers import SPSA from qiskit.utils import algorithm_globals, QuantumInstance from qiskit_optimization.algorithms import MinimumEigenOptimizer from qiskit_optimization.problems import QuadraticProgram # - # ## Max-Cut problem # + # Generating a graph of 4 nodes n=4 # Number of nodes in graph G=nx.Graph() G.add_nodes_from(np.arange(0,n,1)) elist=[(0,1,1.0),(0,2,1.0),(0,3,1.0),(1,2,1.0),(2,3,1.0)] # tuple is (i,j,weight) where (i,j) is the edge G.add_weighted_edges_from(elist) colors = ['r' for node in G.nodes()] pos = nx.spring_layout(G) def draw_graph(G, colors, pos): default_axes = plt.axes(frameon=True) nx.draw_networkx(G, node_color=colors, node_size=600, alpha=.8, ax=default_axes, pos=pos) edge_labels = nx.get_edge_attributes(G, 'weight') nx.draw_networkx_edge_labels(G, pos=pos, edge_labels=edge_labels) draw_graph(G, colors, pos) # + tags=[] # Computing the weight matrix from the random graph w = np.zeros([n,n]) for i in range(n): for j in range(n): temp = G.get_edge_data(i,j,default=0) if temp != 0: w[i,j] = temp['weight'] print(w) # - # ### Brute force approach # # Try all possible $2^n$ combinations. For $n = 4$, as in this example, one deals with only 16 combinations, but for n = 1000, one has 1.071509e+30 combinations, which is impractical to deal with by using a brute force approach. # + tags=[] best_cost_brute = 0 for b in range(2**n): x = [int(t) for t in reversed(list(bin(b)[2:].zfill(n)))] cost = 0 for i in range(n): for j in range(n): cost = cost + w[i,j]*x[i]*(1-x[j]) if best_cost_brute < cost: best_cost_brute = cost xbest_brute = x print('case = ' + str(x)+ ' cost = ' + str(cost)) colors = ['r' if xbest_brute[i] == 0 else 'c' for i in range(n)] draw_graph(G, colors, pos) print('\nBest solution = ' + str(xbest_brute) + ' cost = ' + str(best_cost_brute)) # - # ### Mapping to the Ising problem # Qiskit provides functionality to generate `QuadraticProgram` from the problem specification as well as create the corresponding Ising Hamiltonian. # # + tags=[] max_cut = Maxcut(w) qp = max_cut.to_quadratic_program() qp.to_docplex().prettyprint() # - qubitOp, offset = qp.to_ising() print('Offset:', offset) print('Ising Hamiltonian:') print(str(qubitOp)) # + tags=[] # solving Quadratic Program using exact classical eigensolver exact = MinimumEigenOptimizer(NumPyMinimumEigensolver()) result = exact.solve(qp) print(result) # - # Since the problem was cast to a minimization problem, the solution of $-4$ corresponds to the optimum. # ### Checking that the full Hamiltonian gives the right cost # + tags=[] #Making the Hamiltonian in its full form and getting the lowest eigenvalue and eigenvector ee = NumPyMinimumEigensolver() result = ee.compute_minimum_eigenvalue(qubitOp) x = max_cut.sample_most_likely(result.eigenstate) print('energy:', result.eigenvalue.real) print('max-cut objective:', result.eigenvalue.real + offset) print('solution:', x) print('solution objective:', qp.objective.evaluate(x)) colors = ['r' if x[i] == 0 else 'c' for i in range(n)] draw_graph(G, colors, pos) # - # ### Running it on quantum computer # We run the optimization routine using a feedback loop with a quantum computer that uses trial functions built with Y single-qubit rotations, $U_\mathrm{single}(\theta) = \prod_{i=1}^n Y(\theta_{i})$, and entangler steps $U_\mathrm{entangler}$. algorithm_globals.random_seed = 123 seed = 10598 backend = Aer.get_backend('aer_simulator_statevector') quantum_instance = QuantumInstance(backend, seed_simulator=seed, seed_transpiler=seed) # + tags=["nbsphinx-thumbnail"] # construct VQE spsa = SPSA(maxiter=300) ry = TwoLocal(qubitOp.num_qubits, 'ry', 'cz', reps=5, entanglement='linear') vqe = VQE(ry, optimizer=spsa, quantum_instance=quantum_instance) # run VQE result = vqe.compute_minimum_eigenvalue(qubitOp) # print results x = max_cut.sample_most_likely(result.eigenstate) print('energy:', result.eigenvalue.real) print('time:', result.optimizer_time) print('max-cut objective:', result.eigenvalue.real + offset) print('solution:', x) print('solution objective:', qp.objective.evaluate(x)) # plot results colors = ['r' if x[i] == 0 else 'c' for i in range(n)] draw_graph(G, colors, pos) # + tags=[] # create minimum eigen optimizer based on VQE vqe_optimizer = MinimumEigenOptimizer(vqe) # solve quadratic program result = vqe_optimizer.solve(qp) print(result) colors = ['r' if result.x[i] == 0 else 'c' for i in range(n)] draw_graph(G, colors, pos) # - # ## Traveling Salesman Problem # # In addition to being a notorious NP-complete problem that has drawn the attention of computer scientists and mathematicians for over two centuries, the Traveling Salesman Problem (TSP) has important bearings on finance and marketing, as its name suggests. Colloquially speaking, the traveling salesman is a person that goes from city to city to sell merchandise. The objective in this case is to find the shortest path that would enable the salesman to visit all the cities and return to its hometown, i.e. the city where he started traveling. By doing this, the salesman gets to maximize potential sales in the least amount of time. # # The problem derives its importance from its "hardness" and ubiquitous equivalence to other relevant combinatorial optimization problems that arise in practice. # # The mathematical formulation with some early analysis was proposed by <NAME> in the early 19th century. Mathematically the problem is, as in the case of Max-Cut, best abstracted in terms of graphs. The TSP on the nodes of a graph asks for the shortest *Hamiltonian cycle* that can be taken through each of the nodes. A Hamilton cycle is a closed path that uses every vertex of a graph once. The general solution is unknown and an algorithm that finds it efficiently (e.g., in polynomial time) is not expected to exist. # # Find the shortest Hamiltonian cycle in a graph $G=(V,E)$ with $n=|V|$ nodes and distances, $w_{ij}$ (distance from vertex $i$ to vertex $j$). A Hamiltonian cycle is described by $N^2$ variables $x_{i,p}$, where $i$ represents the node and $p$ represents its order in a prospective cycle. The decision variable takes the value 1 if the solution occurs at node $i$ at time order $p$. We require that every node can only appear once in the cycle, and for each time a node has to occur. This amounts to the two constraints (here and in the following, whenever not specified, the summands run over 0,1,...N-1) # # $$\sum_{i} x_{i,p} = 1 ~~\forall p$$ # $$\sum_{p} x_{i,p} = 1 ~~\forall i.$$ # # For nodes in our prospective ordering, if $x_{i,p}$ and $x_{j,p+1}$ are both 1, then there should be an energy penalty if $(i,j) \notin E$ (not connected in the graph). The form of this penalty is # # $$\sum_{i,j\notin E}\sum_{p} x_{i,p}x_{j,p+1}>0,$$ # # where it is assumed the boundary condition of the Hamiltonian cycles $(p=N)\equiv (p=0)$. However, here it will be assumed a fully connected graph and not include this term. The distance that needs to be minimized is # # $$C(\textbf{x})=\sum_{i,j}w_{ij}\sum_{p} x_{i,p}x_{j,p+1}.$$ # # Putting this all together in a single objective function to be minimized, we get the following: # # $$C(\textbf{x})=\sum_{i,j}w_{ij}\sum_{p} x_{i,p}x_{j,p+1}+ A\sum_p\left(1- \sum_i x_{i,p}\right)^2+A\sum_i\left(1- \sum_p x_{i,p}\right)^2,$$ # # where $A$ is a free parameter. One needs to ensure that $A$ is large enough so that these constraints are respected. One way to do this is to choose $A$ such that $A > \mathrm{max}(w_{ij})$. # # Once again, it is easy to map the problem in this form to a quantum computer, and the solution will be found by minimizing a Ising Hamiltonian. # + tags=[] # Generating a graph of 3 nodes n = 3 num_qubits = n ** 2 tsp = Tsp.create_random_instance(n, seed=123) adj_matrix = nx.to_numpy_matrix(tsp.graph) print('distance\n', adj_matrix) colors = ['r' for node in tsp.graph.nodes] pos = [tsp.graph.nodes[node]['pos'] for node in tsp.graph.nodes] draw_graph(tsp.graph, colors, pos) # - # ### Brute force approach # + tags=[] from itertools import permutations def brute_force_tsp(w, N): a = list(permutations(range(1,N))) last_best_distance = 1e10 for i in a: distance = 0 pre_j = 0 for j in i: distance = distance + w[j,pre_j] pre_j = j distance = distance + w[pre_j,0] order = (0,) + i if distance < last_best_distance: best_order = order last_best_distance = distance print('order = ' + str(order) + ' Distance = ' + str(distance)) return last_best_distance, best_order best_distance, best_order = brute_force_tsp(adj_matrix , n) print('Best order from brute force = ' + str(best_order) + ' with total distance = ' + str(best_distance)) def draw_tsp_solution(G, order, colors, pos): G2 = nx.DiGraph() G2.add_nodes_from(G) n = len(order) for i in range(n): j = (i + 1) % n G2.add_edge(order[i], order[j], weight=G[order[i]][order[j]]['weight']) default_axes = plt.axes(frameon=True) nx.draw_networkx(G2, node_color=colors, edge_color='b', node_size=600, alpha=.8, ax=default_axes, pos=pos) edge_labels = nx.get_edge_attributes(G2, 'weight') nx.draw_networkx_edge_labels(G2, pos, font_color='b', edge_labels=edge_labels) draw_tsp_solution(tsp.graph, best_order, colors, pos) # - # ### Mapping to the Ising problem qp = tsp.to_quadratic_program() qp.to_docplex().prettyprint() # + tags=[] from qiskit_optimization.converters import QuadraticProgramToQubo qp2qubo = QuadraticProgramToQubo() qubo = qp2qubo.convert(qp) qubitOp, offset = qubo.to_ising() print('Offset:', offset) print('Ising Hamiltonian:') print(str(qubitOp)) # + tags=[] result = exact.solve(qubo) print(result) # - # ### Checking that the full Hamiltonian gives the right cost # + tags=[] #Making the Hamiltonian in its full form and getting the lowest eigenvalue and eigenvector ee = NumPyMinimumEigensolver() result = ee.compute_minimum_eigenvalue(qubitOp) print('energy:', result.eigenvalue.real) print('tsp objective:', result.eigenvalue.real + offset) x = tsp.sample_most_likely(result.eigenstate) print('feasible:', qubo.is_feasible(x)) z = tsp.interpret(x) print('solution:', z) print('solution objective:', tsp.tsp_value(z, adj_matrix)) draw_tsp_solution(tsp.graph, z, colors, pos) # - # ### Running it on quantum computer # We run the optimization routine using a feedback loop with a quantum computer that uses trial functions built with Y single-qubit rotations, $U_\mathrm{single}(\theta) = \prod_{i=1}^n Y(\theta_{i})$, and entangler steps $U_\mathrm{entangler}$. algorithm_globals.random_seed = 123 seed = 10598 backend = Aer.get_backend('aer_simulator_statevector') quantum_instance = QuantumInstance(backend, seed_simulator=seed, seed_transpiler=seed) # + tags=[] spsa = SPSA(maxiter=300) ry = TwoLocal(qubitOp.num_qubits, 'ry', 'cz', reps=5, entanglement='linear') vqe = VQE(ry, optimizer=spsa, quantum_instance=quantum_instance) result = vqe.compute_minimum_eigenvalue(qubitOp) print('energy:', result.eigenvalue.real) print('time:', result.optimizer_time) x = tsp.sample_most_likely(result.eigenstate) print('feasible:', qubo.is_feasible(x)) z = tsp.interpret(x) print('solution:', z) print('solution objective:', tsp.tsp_value(z, adj_matrix)) draw_tsp_solution(tsp.graph, z, colors, pos) # - algorithm_globals.random_seed = 123 seed = 10598 backend = Aer.get_backend('aer_simulator_statevector') quantum_instance = QuantumInstance(backend, seed_simulator=seed, seed_transpiler=seed) # + tags=[] # create minimum eigen optimizer based on VQE import warnings warnings.filterwarnings('ignore', category=UserWarning) vqe_optimizer = MinimumEigenOptimizer(vqe) # solve quadratic program result = vqe_optimizer.solve(qp) print(result) z = tsp.interpret(x) print('solution:', z) print('solution objective:', tsp.tsp_value(z, adj_matrix)) draw_tsp_solution(tsp.graph, z, colors, pos) # - import qiskit.tools.jupyter # %qiskit_version_table # %qiskit_copyright
docs/tutorials/06_examples_max_cut_and_tsp.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import time import numpy as np import pandas as pd from sklearn.manifold import TSNE import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') # %matplotlib inline X = np.load("Training_Data/test_x.npy") y = np.load("Training_Data/test_y.npy") data = (X/255.).reshape(-1,128*128) labels = y.reshape(-1,) print(f"Data Shape: {data.shape}\nLabels Shape: {labels.shape}") feat_cols = ['pixel'+str(i) for i in range(data.shape[1]) ] df = pd.DataFrame(data,columns=feat_cols) df['y'] = labels df['label'] = df['y'].apply(lambda i: str(i)) df['label'].replace(to_replace=['0', '1', '2', '3', '4', '5', '6', '7'], value= ['Hip-Hop', 'International', 'Electronic', 'Folk', 'Experimental', 'Rock', 'Pop', 'Instrumental'], inplace=True) data, labels = None, None print('Size of the dataframe: {}'.format(df.shape)); df # For reproducability of the results np.random.seed(42) rndperm = np.random.permutation(df.shape[0]) plt.gray() fig = plt.figure( figsize=(16,10) ) for i in range(0,15): ax = fig.add_subplot(3,5,i+1) ax.set_title(r"Label: $\bf{}$".format(str(df.loc[rndperm[i],'label']))) ax.imshow(df.loc[rndperm[i],feat_cols].values.reshape((128,128)).astype(float)) plt.show() N = 3998 df_subset = df.loc[rndperm[:N],:].copy() data_subset = df_subset[feat_cols].values ; len(data_subset) # Using t-SNE to reduce high-dimensional data "http://www.jmlr.org/papers/volume9/vandermaaten08a/vandermaaten08a.pdf" time_start = time.time() # configuring the parameteres tsne = TSNE(n_components=2, verbose=1, perplexity=10, n_iter=800 ) tsne_results = tsne.fit_transform(data_subset) print('t-SNE done! Time elapsed: {} seconds'.format(time.time()-time_start)) # creating a new data frame which help us in ploting the result data tsne_data = np.vstack((tsne_results.T, df_subset['label'])).T; tsne_data # Ploting the result of tsne tsne_df = pd.DataFrame(data=tsne_data, columns=("Dim_1", "Dim_2", "label")) sns.FacetGrid(tsne_df, hue="label", size=6).map(plt.scatter, 'Dim_1', 'Dim_2').add_legend() plt.show()
torch/visualizing_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="XHyuIcRGixQy" # # KNN Classification # + [markdown] colab_type="text" id="MetALTTmQKkh" # ### Importing Libraries # + colab={} colab_type="code" id="eWchGPfa9xW9" #importing libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt # %matplotlib inline import warnings warnings.filterwarnings("ignore") # + [markdown] colab_type="text" id="tHKgifnpjyvh" # ### Load the data # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 1296, "status": "ok", "timestamp": 1555058105638, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14774175216384036942"}, "user_tz": -330} id="NB5xYyHg9xXA" outputId="33bc3be6-c335-4dbc-b57f-730ac784433d" data = pd.read_csv('data_cleaned.csv') data.shape # + colab={"base_uri": "https://localhost:8080/", "height": 253} colab_type="code" executionInfo={"elapsed": 1284, "status": "ok", "timestamp": 1555058105639, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14774175216384036942"}, "user_tz": -330} id="qjISR4M_9xXE" outputId="d45fdb4b-25c5-40d3-edc2-cf524ad16f73" data.head() # + [markdown] colab_type="text" id="hGxgnJmxj3nv" # ### Segregating variables: Independent and Dependent Variables # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 1974, "status": "ok", "timestamp": 1555058106339, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14774175216384036942"}, "user_tz": -330} id="rym4fnPq9xXG" outputId="8eb99fe6-c327-4541-bd1e-fd89e69b13fd" #seperating independent and dependent variables x = data.drop(['Survived'], axis=1) y = data['Survived'] x.shape, y.shape # + [markdown] colab_type="text" id="YXztAQ_Ded3q" # ### Scaling the data (Using MinMax Scaler) # + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" executionInfo={"elapsed": 1954, "status": "ok", "timestamp": 1555058106340, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14774175216384036942"}, "user_tz": -330} id="mBlVReHxd2eb" outputId="488f8b5e-e1d9-4d11-dd45-<KEY>" ## Importing the MinMax Scaler from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() x_scaled = scaler.fit_transform(x) # - x = pd.DataFrame(x_scaled, columns = x.columns) x.head() # + [markdown] colab_type="text" id="UXBaGn4NTCjk" # <img src="Image 1.png" style="width:600px;" align="center"> # + colab={} colab_type="code" id="-PcDK1re9xXM" # Importing the train test split function from sklearn.model_selection import train_test_split train_x,test_x,train_y,test_y = train_test_split(x,y, random_state = 56, stratify=y) # + [markdown] colab_type="text" id="WvsDKzjdyNWi" # ### Implementing KNN Classifier # + colab={} colab_type="code" id="yCG2gM5KyM-1" #importing KNN classifier and metric F1score from sklearn.neighbors import KNeighborsClassifier as KNN from sklearn.metrics import f1_score # + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 1923, "status": "ok", "timestamp": 1555058106343, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14774175216384036942"}, "user_tz": -330} id="TFrwDTRdybYF" outputId="017cef2e-c310-40ae-d17e-bb49c4ddb1b3" # Creating instance of KNN clf = KNN(n_neighbors = 10) # Fitting the model clf.fit(train_x, train_y) # Predicting over the Train Set and calculating F1 test_predict = clf.predict(test_x) k = f1_score(test_predict, test_y) print('Test F1 Score ', k ) # + [markdown] colab_type="text" id="WUlYDj9Xkmvy" # ### Elbow for Classifier # + colab={} colab_type="code" id="8NpQ3BLz-soi" def Elbow(K): #initiating empty list test_error = [] #training model for evey value of K for i in K: #Instance oh KNN clf = KNN(n_neighbors = i) clf.fit(train_x, train_y) # Appending F1 scores to empty list claculated using the predictions tmp = clf.predict(test_x) tmp = f1_score(tmp,test_y) error = 1-tmp test_error.append(error) return test_error # + colab={} colab_type="code" id="61WGHNM_Cxn2" #Defining K range k = range(6, 20, 2) # + colab={} colab_type="code" id="SNBDTcSf9xXW" # calling above defined function test = Elbow(k) # + colab={"base_uri": "https://localhost:8080/", "height": 312} colab_type="code" executionInfo={"elapsed": 2854, "status": "ok", "timestamp": 1555058107314, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14774175216384036942"}, "user_tz": -330} id="6iA6n55NDKJf" outputId="bb6700af-e76f-433f-b81e-92537cb3a60c" # plotting the Curves plt.plot(k, test) plt.xlabel('K Neighbors') plt.ylabel('Test error') plt.title('Elbow Curve for test') # + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 1923, "status": "ok", "timestamp": 1555058106343, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14774175216384036942"}, "user_tz": -330} id="TFrwDTRdybYF" outputId="017cef2e-c310-40ae-d17e-bb49c4ddb1b3" # Creating instance of KNN clf = KNN(n_neighbors = 12) # Fitting the model clf.fit(train_x, train_y) # Predicting over the Train Set and calculating F1 test_predict = clf.predict(test_x) k = f1_score(test_predict, test_y) print('Test F1 Score ', k ) # + [markdown] colab_type="text" id="SJtoFSh5iupO" # # KNN Regression # + [markdown] colab_type="text" id="WXYpUNYlivED" # ### Importing the data # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 2843, "status": "ok", "timestamp": 1555058107315, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14774175216384036942"}, "user_tz": -330} id="MCh5QTm3ivED" outputId="285574da-84b0-47ed-d510-143634a67488" data = pd.read_csv('train_cleaned.csv') data.shape # + colab={"base_uri": "https://localhost:8080/", "height": 270} colab_type="code" executionInfo={"elapsed": 2832, "status": "ok", "timestamp": 1555058107315, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14774175216384036942"}, "user_tz": -330} id="nB0-QARKivEF" outputId="014475a4-c124-4550-ada1-6e5b60e22d7b" data.head() # + [markdown] colab_type="text" id="XtZHt10kivEH" # ### Segregating variables: Independent and Dependent Variables # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 2824, "status": "ok", "timestamp": 1555058107316, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14774175216384036942"}, "user_tz": -330} id="LlDqhuTsivEH" outputId="3da310f3-9059-446d-fe80-6ee365d9a978" #seperating independent and dependent variables x = data.drop(['Item_Outlet_Sales'], axis=1) y = data['Item_Outlet_Sales'] x.shape, y.shape # + [markdown] colab_type="text" id="sYK120N-ivEI" # ### Scaling the data (Using MinMax Scaler) # + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" executionInfo={"elapsed": 2816, "status": "ok", "timestamp": 1555058107317, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14774175216384036942"}, "user_tz": -330} id="78JcSZFfivEJ" outputId="678b6c24-2f2b-4701-d69c-617cb5bb780f" # Importing MinMax Scaler from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() x_scaled = scaler.fit_transform(x) # - x = pd.DataFrame(x_scaled) # + [markdown] colab_type="text" id="lEpDAt67VnOg" # <img src="Image 1.png" style="width:600px;" align="center"> # + colab={} colab_type="code" id="WupN60YyivEL" # Importing Train test split from sklearn.model_selection import train_test_split train_x,test_x,train_y,test_y = train_test_split(x,y, random_state = 56) # + [markdown] colab_type="text" id="kl-xQkaxivEM" # ### Implementing KNN Regressor # + colab={} colab_type="code" id="d2gFb42livEM" #importing KNN regressor and metric mse from sklearn.neighbors import KNeighborsRegressor as KNN from sklearn.metrics import mean_squared_error as mse # + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 3220, "status": "ok", "timestamp": 1555058107751, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14774175216384036942"}, "user_tz": -330} id="YRr6lpNjivEO" outputId="525ce47b-cbb9-4264-c97d-2b8cb4531380" # Creating instance of KNN reg = KNN(n_neighbors = 5) # Fitting the model reg.fit(train_x, train_y) # Predicting over the Train Set and calculating MSE test_predict = reg.predict(test_x) k = mse(test_predict, test_y) print('Test MSE ', k ) # + [markdown] colab_type="text" id="uv7H8yL2ivEQ" # ### Elbow for Classifier # + colab={} colab_type="code" id="UBkfXT-pivET" def Elbow(K): #initiating empty list test_mse = [] #training model for evey value of K for i in K: #Instance of KNN reg = KNN(n_neighbors = i) reg.fit(train_x, train_y) #Appending mse value to empty list claculated using the predictions tmp = reg.predict(test_x) tmp = mse(tmp,test_y) test_mse.append(tmp) return test_mse # + colab={} colab_type="code" id="5ZQeAc8zivEU" #Defining K range k = range(1,40) # + colab={} colab_type="code" id="2xemLcNyivEZ" # calling above defined function test = Elbow(k) # + colab={"base_uri": "https://localhost:8080/", "height": 312} colab_type="code" executionInfo={"elapsed": 7927, "status": "ok", "timestamp": 1555058112485, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14774175216384036942"}, "user_tz": -330} id="yl47ZsjuivEa" outputId="5ade2116-f901-41de-9d56-3a3e2ead6e4e" # plotting the Curves plt.plot(k, test) plt.xlabel('K Neighbors') plt.ylabel('Test Mean Squared Error') plt.title('Elbow Curve for test') # + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 3220, "status": "ok", "timestamp": 1555058107751, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14774175216384036942"}, "user_tz": -330} id="YRr6lpNjivEO" outputId="525ce47b-cbb9-4264-c97d-2b8cb4531380" # Creating instance of KNN reg = KNN(n_neighbors = 9) # Fitting the model reg.fit(train_x, train_y) # Predicting over the Train Set and calculating F1 test_predict = reg.predict(test_x) k = mse(test_predict, test_y) print('Test MSE ', k )
K-Nearest-Neighbours Classification & Regression/KNN Implementation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + # #!/usr/bin/env python # coding: utf-8 # load a bunch of stuff from __future__ import division # load import numpy as np import scipy import pylab import matplotlib import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec from matplotlib.pyplot import cm from matplotlib.ticker import NullFormatter, MaxNLocator, LogLocator import matplotlib.patches as mpatches import matplotlib.lines as mlines plt.switch_backend('agg') # needed for saving figures import csv from pydas.dassl import DASSL import os import rmgpy import rmg import re import operator import pandas as pd import pylab from cycler import cycler import seaborn as sns import os import multiprocessing import itertools # + # set up the LSR grid carbon_range = (-8.0, -2.0) oxygen_range = (-6.5, -1.5) grid_size = 9 mesh = np.mgrid[carbon_range[0]:carbon_range[1]:grid_size*1j, oxygen_range[0]:oxygen_range[1]:grid_size*1j] with sns.axes_style("whitegrid"): plt.axis('square') plt.xlim(carbon_range) plt.ylim(oxygen_range) plt.yticks(np.arange(-6.5,-1,0.5)) plt.show() # just to double-check experiments = mesh.reshape((2,-1)).T with sns.axes_style("whitegrid"): plt.axis('square') plt.xlim(carbon_range) plt.ylim(oxygen_range) plt.yticks(np.arange(-6.5,-1.,0.5)) plt.plot(*experiments.T, marker='o', linestyle='none') plt.clf() extent = carbon_range + oxygen_range # Because the center of a corner pixel is in fact the corner of the grid # Becaus we want to stretch the image a little c_step = mesh[0,1,0]-mesh[0,0,0] o_step = mesh[1,0,1]-mesh[1,0,0] carbon_range2 = (carbon_range[0]-c_step/2, carbon_range[1]+c_step/2) oxygen_range2 = (oxygen_range[0]-c_step/2, oxygen_range[1]+c_step/2) extent2 = carbon_range2 + oxygen_range2 # + # For close packed surfaces from # <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.; # <NAME>.; <NAME>.; <NAME>.; <NAME>. # Scaling Properties of Adsorption Energies for Hydrogen-Containing Molecules on # Transition-Metal Surfaces. Phys. Rev. Lett. 2007, 99 (1), 016105 # DOI: 10.1103/PhysRevLett.99.016105. abildpedersen_energies = { # Carbon, then Oxygen 'Pt':(-6.363636363636363,-3.481481481481482), 'Rh':(-6.5681818181818175,-4.609771721406942), 'Ir':(-6.613636363636363,-5.94916142557652), 'Au':(-3.7499999999999973,-2.302236198462614), 'Pd':(-6, -3.517877940833916), 'Cu':(-4.159090909090907,-3.85272536687631), 'Ag':(-2.9545454545454533,-2.9282552993244817), 'Ni':(-6.045454545454545,-4.711681807593758), 'Ru':(-6.397727272727272,-5.104763568600047), } # "A Framework for Scalable Adsorbate-adsorbate Interaction Models" # <NAME>, <NAME>, and <NAME> # From 2016 Hoffman et al. # https://doi.org/10.1021/acs.jpcc.6b03375 hoffman_energies = { # Carbon, then Oxygen 'Pt':(-6.750,-3.586), 'Rh':(-6.78,-5.02), 'Ir':(-6.65,-4.73), 'Pd':(-6.58,-4.38), 'Cu':(-4.28,-4.51), 'Ag':(-2.91,-3.55), } katrin_energies = {# Carbon, then Oxygen 'Pt':(-7.02516,-3.81153), 'Rh':(-7.33484,-4.71419), 'Ir':(-7.25234,-4.35236), 'Au':(-4.5465,-2.71822), 'Pd':(-7.16786,-4.13577), 'Cu':(-4.96034,-4.20764), 'Ag':(-3.50609,-3.11159), 'Ni':(-6.79794,-4.98902), 'Ru':(-7.5979,-5.4492), } # + def plot_coords(energies, label, show_text=True): """ Plots binding energy coordinates given in dict format. show_text is `True` to display text label on plot. """ markers = { 'Abild-Pedersen':'o', 'Hoffman':'s', 'RMG':'X', } colors = { 'Pt':'r', 'Rh':'darkorange', 'Ir':'limegreen', 'Au':'darkgreen', 'Pd':'dodgerblue', 'Cu':'blue', 'Ag':'darkviolet', 'Ni':'magenta', 'Ru':'deeppink', } for metal, coords in energies.items(): plt.plot(coords[0], coords[1], marker=markers[label], color=colors[metal], label=label) if show_text is True: plt.text(coords[0], coords[1]-0.15, metal, color=colors[metal]) plot_coords(abildpedersen_energies,'Abild-Pedersen',) plot_coords(hoffman_energies,'Hoffman',) plot_coords(katrin_energies,'RMG',) plt.xlim(carbon_range) plt.ylim(oxygen_range) plt.yticks(np.arange(-6.5,-1,1)) plt.xlabel('$\Delta E^C$ (eV)',fontsize=18) plt.ylabel('$\Delta E^O$ (eV)',fontsize=18) o_marker = mlines.Line2D([], [], color='k', marker='o', label='Abild-Pedersen', linestyle="None") s_marker = mlines.Line2D([], [], color='k', marker='s', label='Hoffman', linestyle="None") x_marker = mlines.Line2D([], [], color='k', marker='X', label='RMG', linestyle="None") plt.legend(handles=[o_marker,s_marker,x_marker],loc='lower right') plt.xticks(fontsize=16) plt.yticks(fontsize=16) plt.rcParams["figure.figsize"]=(6,6) plt.tight_layout() plt.savefig('binding_energies_marker.pdf', bbox_inches='tight') plt.clf() # + def plot_coords(energies, label, show_text=True): """ Plots binding energy coordinates given in dict format. show_text is `True` to display text label on plot. """ colors = { 'Abild-Pedersen':'g', 'Hoffman':'b', 'RMG':'r', } markers = { 'Pt':'o', 'Rh':'v', 'Ir':'^', 'Au':'<', 'Pd':'>', 'Cu':'s', 'Ag':'X', 'Ni':'D', 'Ru':'P', } for metal, coords in energies.items(): plt.plot(coords[0], coords[1], marker=markers[metal], color=colors[label], label=label) if show_text is True: plt.text(coords[0], coords[1]-0.15, metal, color=colors[label]) plot_coords(abildpedersen_energies,'Abild-Pedersen',) plot_coords(hoffman_energies,'Hoffman',) plot_coords(katrin_energies,'RMG',) plt.xlim(carbon_range) plt.ylim(oxygen_range) plt.yticks(np.arange(-6.5,-1,1)) plt.xlabel('$\Delta E^schoC$ (eV)',fontsize=18) plt.ylabel('$\Delta E^O$ (eV)',fontsize=18) green_patch = mpatches.Patch(color='g',label='Abild-Pedersen') blue_patch = mpatches.Patch(color='b',label='Hoffman') red_patch = mpatches.Patch(color='r',label='RMG') plt.legend(handles=[green_patch,blue_patch,red_patch],loc='lower right',) plt.xticks(fontsize=16) plt.yticks(fontsize=16) plt.rcParams["figure.figsize"]=(6,6) plt.tight_layout() plt.savefig('binding_energies_color.pdf', bbox_inches='tight') plt.clf() # -
binding_energy_compare.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Hi! This is a pytorch classification example built with inspiration from https://towardsdatascience.com/pytorch-tabular-binary-classification-a0368da5bb89 # # The link contains additional explanitory text and short 5-minute youtube video explaining core concepts. # + ### PYTORCH CLASSIFICATION EXAMPLE # # Author: <NAME> # email: <EMAIL> # import pandas as pd import numpy as np import sklearn as sk import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import Dataset, DataLoader from torch.nn import CrossEntropyLoss from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt from sklearn.metrics import roc_curve from sklearn.metrics import auc from scipy.special import expit # Load 2nd version of the Aleph Data. Why not the first? path = 'AlephBtag_MC_small_v2.csv' data = pd.DataFrame(np.genfromtxt(path, names=True)) variables = data.columns print(variables) # - # Remember: 'isb' is our binary truth. If isb = 1 then it's a b-quark and isb = 0 if it is not. Because this is our truth, we must not include it as the input to our model. Also, 'nnbjet' is our "competitor" e.g. a model we are supposed to benchmark against. Therefore 'nnbjet' shouldn't be in our input either. Usually one would apply regularization/standardization of data at this step - but lets skip this for now and just move onto seperate the data into input, truth and benchmark: input_variables = variables[(variables != 'nnbjet') & (variables != 'isb')] input_data = data[input_variables] truth = data['isb'] benchmark = data['nnbjet'] print(input_variables) # Let us now divide the truth and input_data into two parts; a training sample and a validation sample: input_train, input_valid, truth_train, truth_valid = train_test_split(input_data, truth, test_size=0.25, random_state=42) # Before we go any further, we need to put this data into the pytorch-Dataset class, such that we can extract it during training. This is a little annoying, but it's worth the effort. # + ## train data class MyDataset(Dataset): def __init__(self, X_data, y_data): self.input = X_data self.truth = y_data def __getitem__(self, index): return self.input[index], self.truth[index] def __len__ (self): return len(self.input) train_data = MyDataset(torch.FloatTensor(np.array(input_train)), torch.FloatTensor(np.array(truth_train))) valid_data = MyDataset(torch.FloatTensor(np.array(input_valid)), torch.FloatTensor(np.array(truth_valid))) ## We can now access input_train via train_data.input and truth_train via train_data.truth, and similarly for input_valid and truth_valid. print(train_data.input) print(train_data.truth) # - # Let us now define the pytorch model: # + class OurModel(nn.Module): def __init__(self): super(OurModel, self).__init__() # Here we define the layers self.input_layer = nn.Linear(9, 24) self.hidden_layer1 = nn.Linear(24, 24) self.hidden_layer2 = nn.Linear(24, 12) self.output_layer = nn.Linear(12, 2) self.relu = nn.ReLU() def forward(self, inputs): # Here we define how data passes through the layers. x = self.input_layer(inputs) x = self.relu(x) x = self.hidden_layer1(x) x = self.relu(x) x = self.hidden_layer2(x) x = self.relu(x) x = self.output_layer(x) return x # - # Now we need to write our training loop! # + def Train(model,optimizer, loss_function, train_loader,validation_loader, device, epochs): validation_loss = [] training_loss = [] model.train() for e in range(0, epochs): epoch_loss = 0 n_minibatches = 0 for input_train_batch, truth_train_batch in train_loader: input_train_batch, truth_train_batch = input_train_batch.to(device), truth_train_batch.to(device) optimizer.zero_grad() prediction = model(input_train_batch) # this asks our model to produce predictions on the training batch loss = loss_function(prediction, truth_train_batch.long()) # this calculates the loss loss.backward() # This initiates the backpropagation optimizer.step() epoch_loss += loss.item() n_minibatches += 1 valid_loss = Validate(model, validation_loader, device, loss_function) # Now that the model have trained 1 epoch, we evaluate the model on the validation set! validation_loss.append(valid_loss) training_loss.append(epoch_loss/n_minibatches) print('EPOCH: %s | training loss: %s | validation loss: %s'%(e+1,round(epoch_loss/n_minibatches,3), round(valid_loss, 3))) return training_loss, validation_loss def Validate(model, validation_loader, device, loss_function): model.eval() n_batches = 0 validation_loss = 0 with torch.no_grad(): for input_valid_batch, truth_valid_batch in validation_loader: input_valid_batch, truth_valid_batch = input_valid_batch.to(device), truth_valid_batch.to(device) prediction = model(input_valid_batch) loss = loss_function(prediction, truth_valid_batch.long()) validation_loss += loss.item() n_batches += 1 validation_loss = validation_loss/n_batches return validation_loss def Predict(model, prediction_loader, device): model.eval() predictions = [] print('PREDICTING!') with torch.no_grad(): for input_pred_batch, _ in validation_loader: input_pred_batch = input_pred_batch.to(device) prediction = model(input_pred_batch) predictions.extend(prediction.numpy()) print('Done Predicting!') return predictions learning_rate = 1e-3 batch_size = 32 n_epochs = 10 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = OurModel() model.to(device) ## mounts the model to the selected device. Either cpu or a GPU. optimizer = optim.Adam(model.parameters(), lr=learning_rate) loss_function = CrossEntropyLoss() train_loader = DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True) validation_loader = DataLoader(dataset=valid_data, batch_size=batch_size) training_loss, validation_loss = Train(model, optimizer, loss_function, train_loader, validation_loader, device, n_epochs) ## This trains the model on input_train by comparing to the true values in truth_train. After every epoch of training, the model is evaluated on the validation dataset, ## namely input_valid and truth_valid. # - # We can now extract information from the training and validation by accessing training_loss and validation_loss : # + fig = plt.figure() plt.plot(training_loss,label = 'training loss') plt.plot(training_loss,'o') plt.plot(validation_loss, label = 'validation loss') plt.plot(validation_loss, 'o') plt.legend() plt.xticks(size = 12) plt.yticks(size = 12) # - # As you can see, after 8th epoch the validation loss and training loss cross each other. This is important! Do you know why? Now we have a trained model and we're ready to make predictions. Usually, one would have a test set (so in total one would have; a training set, a validation set AND a test set). But for simplicity, let's just predict on the validation sample. This is OK because the model has not trained on this set - if we asked the model to predict on examples on which it has trained, we would be cheating! # + predictions = Predict(model,validation_loader,device) # this asks the trained model to make predictions on input_valid. Notice we're not giving it any truth values! ### This bit of gymnastics is because the output of our model is raw logits from the final output layer. # This means it produces a pseudo score for each class (a score for 0 and a score for 1). # expit converts this logit to a number in [0,1] # We then combine the scores such that our_score = (1-score)/(1-score + 0-score) predictions = pd.DataFrame(predictions) predictions.columns = ['not_bquark', 'bquark'] predictions['not_bquark'] = expit(predictions['not_bquark']) predictions['bquark'] = expit(predictions['bquark']) predictions = predictions['bquark']/(predictions['bquark'] + predictions['not_bquark']) # - # We can now evaluate our predictions by producing a ROC-curve and calculating the AUC-score and comparing it to our 'nnbjet' competitor. You can read more about ROC curves and AUC scores in # https://mlwhiz.com/blog/2021/02/03/roc-auc-curves-explained/ (and by attending Troel's lectures!) # + fpr, tpr, _ = roc_curve(truth_valid, predictions) ## this calculates the false positive rate and the true positive rate for our model's predictions on the validation sample fpr_nnbjet, tpr_nnbjet, _ = roc_curve(truth,benchmark) ## this calculates the false positive rate and the true postive rate for nnbjet on the entire data sample ### We can npw calculate the AUC scores of these ROC-curves auc_score = auc(fpr,tpr) # this is auc score for our model auc_score_nnbjet = auc(fpr_nnbjet, tpr_nnbjet)# this is the auc score for nnbjet ### Let's plot the results fig = plt.figure(figsize = [10,10]) plt.title('ROC Comparison', size = 12) plt.plot(fpr,tpr, label = 'our model') plt.plot(fpr_nnbjet, tpr_nnbjet, label = 'nnbjet') plt.legend() plt.xlabel('False Postive Rate', size = 12) plt.ylabel('True Positive Rate', size = 12) ### This just plots a table with the AUC-scores. row_labels=['our model', 'nnbjet'] table_vals=[[round(auc_score,5)], [round(auc_score_nnbjet,5)]] col_labels=['AUC'] the_table = plt.table(cellText=table_vals, colWidths = [0.1]*3, rowLabels=row_labels, colLabels=col_labels, loc='center right') ######## # - # So our __very__ simple and un-optimized model achieves a wee bit higher AUC score than nnbjet. (higher is better). Can you beat this?
Week1/ML_BjetSelection_pytorch_RasmusFO.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Proba-V Time-Series Analysis # **Author:** <NAME><br> # **Description:** Basic Time-Series Analysis using Proba-V NDVI (Normalized Difference Vegetation Index) imagery. # %matplotlib inline # + from IPython.display import Image import ee, datetime import pandas as pd from pylab import * import seaborn as sns from matplotlib.pylab import rcParams from statsmodels.tsa.seasonal import seasonal_decompose ee.Initialize() # - # ### Load Proba-V image collection and point geometry # Selected Location of point is from the Proba-V Footprint X18Y02 in Luxembourg, Europe. # + # Set start and end date startTime = datetime.datetime(2015, 1, 1) endTime = datetime.datetime(2017, 12, 31) # Create image collection collection = ee.ImageCollection('VITO/PROBAV/C1/S1_TOC_100M').filterDate(startTime, endTime) # Create point in Luxembourg (Proba-V Footprint: X18Y02) point = {'type':'Point', 'coordinates':[6.134136, 49.612485]}; # - # ### Retrieve information, reshape and calculate NDVI # Retrieving information from point geometry with a buffer of 500m over image collection. Reshaping data and calculating NDVI from **RED** and **NIR** band. info = collection.getRegion(point,500).getInfo() # + # Reshape image collection header = info[0] data = array(info[1:]) iTime = header.index('time') time = [datetime.datetime.fromtimestamp(i/1000) for i in (data[0:,iTime].astype(int))] # List of used image bands band_list = ['RED',u'NIR'] iBands = [header.index(b) for b in band_list] yData = data[0:,iBands].astype(np.float) # Calculate NDVI red = yData[:,0] nir = yData[:,1] ndvi = (nir - red) / (nir + red) # - # ### Reshape NDVI array into Pandas Dataframe df = pd.DataFrame(data=ndvi, index=list(range(len(ndvi))), columns=['NDVI']) df = df.interpolate() df['Date'] = pd.Series(time, index=df.index) df = df.set_index(df.Date) df.index = pd.to_datetime(df.index) df['NDVI']=df['NDVI'].fillna(0) # ### Obtain statistical information over all elements of the Time-Series df.info() df.describe() # ### Visualize Proba-V NDVI Time Series sns.set(rc={'figure.figsize':(15, 6)}) df['NDVI'].plot(linewidth=0.5); # ### Seasonal Decomposition # + sd=seasonal_decompose(df['NDVI'], model='additive', freq=352) sd.seasonal.plot() sd.trend.plot() sd.resid.plot() plt.legend(['Seasonality', 'Trend', 'Residuals']) # - # ### Resample data # Resampling to weekly level and calculate the percentage change over one year. df_monthly=df.resample('W', 'mean') df_monthly['pct_change'] = df_monthly.pct_change(52) df_monthly['pct_change']['2016':].plot() plt.title('52 Weeks Percentage Change')
005_EE_Proba-V_NDVI_Time-Series_Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: cv3 # language: python # name: cv3 # --- # + import gym import random import datetime import torch import numpy as np from collections import deque import matplotlib.pyplot as plt # %matplotlib inline from ppo_continuous import * # imports for rendering outputs in Jupyter. from JSAnimation.IPython_display import display_animation from matplotlib import animation from IPython.display import display # Some more magic so that the notebook will reload external python modules; # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython # %load_ext autoreload # %autoreload 2 # - ppo_agent = ppo_continuous('BipedalWalker-v2') # define env env = gym.make('BipedalWalker-v2') # function to animate a list of frames def animate_frames(frames): plt.figure(dpi = 72) plt.axis('off') # color option for plotting # use Greys for greyscale cmap = None if len(frames[0].shape)==3 else 'Greys' patch = plt.imshow(frames[0], cmap=cmap) fanim = animation.FuncAnimation(plt.gcf(), \ lambda x: patch.set_data(frames[x]), frames = len(frames), interval=30) display(display_animation(fanim, default_mode='once')) frames = [] state = env.reset() total_reward = 0 for t in range(3000): action = ppo_agent.act(state).numpy()[0] frames.append(env.render(mode='rgb_array')) next_state, reward, done, _ = env.step(action) state=next_state total_reward+= reward if done: break print ("Total reward:",total_reward) env.close() #animate_frames(frames)
ppo/.ipynb_checkpoints/bipedal_ppo-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Library Widget for Graph Exploration # # In this early example, we are demonstrating the ability to explore a graph using the # `GraphExplorer` widget. The [InteractiveViewer](Improved_Vis.ipynb) widget is used to # explore the graph, and the [GraphExploreNodeSelection](GraphExploreNodeSelection.ipynb) # widget is used to populate the initial graph. # # This is an early iteration on the graph exploration capability and is expected to be a # bit rough around the edges. Please submit critical bugs to the # [issue tracker](https://github.com/jupyrdf/ipyradiant/issues/). # ## Load an RDF graph # # In this example, we will use the `ipyradiant` `FileManager`. # + from ipyradiant import FileManager, PathLoader lw = FileManager(loader=PathLoader(path="data")) # here we hard set what we want the file to be, but ideally a user can choose a file to work with. lw.loader.file_picker.value = lw.loader.file_picker.options["starwars.ttl"] lw # - # ## GraphExplorer # # The purpose of the `GraphExplorer` widget is to allow users to explore an RDF graph in a # way that is easy and intuitive. Exploring RDF graphs helps understand where information # is stored, and how things are connected. Greater understanding of the graph's structure # will help in downstream tasks such as query development. # # ### How To: # # In this early version of the `GraphExplorer`, the left-hand panel is used to select an # initial set of nodes to populate the graph. # # #### 1. Type Select # # From the `Available types:`, choose one or more types that you want to select subject # nodes from (e.g. `voc:Droid`, `voc:Film`). # # #### 2. Subject Select # # From the `Available subjects:` (which should have populated once a type is selected), # choose one or more subjects to add to the initial graph. # # #### 3. Interactive Viewer # # When subjects are selected, nodes should be immediately populated in the interactive # viewer. The initially populated nodes are passed through the [RDF2NX](RDF_to_NX.ipynb) # process, which means they are LPG nodes with data collapsed from the RDF graph. You can # select nodes and the JSON data for the node will be displayed below the main widget # viewing area. # # > Note: expanded nodes (i.e. not the initial nodes) are not yet passed through `RDF2NX`. # > This means that only the initially populated nodes will have all their data from the # > collapsing process. This will be remedied in a future update to the library. # # ##### a. expand a node # # When exploring an RDF graph, we can expand upon a node and display connections that are # other non-Literal nodes. Once a node is selected, click the `Expand Upon Selected Node` # button, which will add all outgoing connections. All node/edges added to the graph are a # different color to indicate that they are `temporary`. # # > Note: Only connections to URIRef nodes are included to simplify the representation # # ##### b. undo last expansion # # Exploration is a back-and-forth process. If a node is expanded and it is desired to undo # the expansion, the `Undo Last Expansion` will remove all nodes and edges from the last # expansion (including nodes locked as `permanent`). # # ##### c. make temporary nodes/edges permanent # # As nodes are discovered that seem valuable, they can be locked in the graph to prevent # removal. Nodes that are `clicked` will have their JSON data visualized, and when clicked # <b>again</b> will be locked into the graph. You can confirm this by observing the change # in node style. # # > Note: edge style for permanent edges isn't updated until the graph is reset (e.g. upon # > temporary node removal). This is a known issue with `ipycytoscape` and will be # > addressed in a future version. # # ##### d. remove temporary nodes # # As the graph gets larger, it may become useful to prune temporary nodes/edges from the # viewing area (and underlying graph). The `Remove Temporary Nodes` button will delete all # temporary nodes/edges from the graph and reduce the complexity of the visualization. from ipyradiant.visualization.explore import GraphExplorer ge = GraphExplorer() ge.rdf_graph = lw.graph ge # ##### Perform a selection automatically # # This allows the notebook to be run completely while still demonstrating the capability # (i.e. without human interaction). # + from rdflib import URIRef # this sets our selection in the widget so that we don't have to click manually # CAPS vars are used for testing TSSW_VALUES = ( URIRef("https://swapi.co/vocabulary/Droid"), URIRef("https://swapi.co/vocabulary/Film"), ) ge.node_select.type_select.select_widget.value = TSSW_VALUES SSSW_VALUES = ( URIRef("https://swapi.co/resource/film/1"), URIRef("https://swapi.co/resource/droid/2"), URIRef("https://swapi.co/resource/droid/3"), ) ge.node_select.subject_select.select_widget.value = SSSW_VALUES # - # Select a node automatically (create var for testing) NODE_TO_SELECT = ge.interactive_viewer.cytoscape_widget.graph.nodes[0] ge.interactive_viewer.selected_node = NODE_TO_SELECT
examples/GraphExplorer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt import pandas as pd dataset = pd.read_csv('Wine.csv') dataset X = dataset.iloc[:,:-1].values y = dataset.iloc[:,-1].values X from sklearn.model_selection import train_test_split X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.2) from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_scaled = sc.fit_transform(X_train) X_scal = sc.transform(X_test) from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA lda = LDA(n_components = 2) x_train = lda.fit_transform(X_scaled,y_train) x_test = lda.transform(X_scal) x_train
LDA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Create HDF5 archive of NASA IRTF template spectra # + import matplotlib.pyplot as plt import numpy as np from astropy.io import fits import h5py from astropy.utils.data import download_file from tarfile import TarFile import os from glob import glob url_g = 'http://irtfweb.ifa.hawaii.edu/~spex/IRTF_Spectral_Library/Data/G_fits_091201.tar' url_k = 'http://irtfweb.ifa.hawaii.edu/~spex/IRTF_Spectral_Library/Data/K_fits_091201.tar' url_m = 'http://irtfweb.ifa.hawaii.edu/~spex/IRTF_Spectral_Library/Data/M_fits_091201.tar' hdf5_archive_path = 'data/irtf_templates.hdf5' urls = [url_g, url_k, url_m] # - # Download and extract FITS archives of G/K/M template stars: for url in urls: p = download_file(url) with TarFile(p, 'r') as tar_ref: tar_ref.extractall('data/.') fits_paths = glob('data/*/*.fits') fits_names = [os.path.basename(p) for p in fits_paths] sptypes = [p.split('_')[0] for p in fits_names] sptypes_ms = [len(st) == 3 and st.endswith('V') for st in sptypes] ms_paths = [p for p, st in zip(fits_paths, sptypes_ms) if st] # Create HDF5 archive of the same data: if not os.path.exists(hdf5_archive_path): with h5py.File(hdf5_archive_path, 'w') as f: templates = f.create_group('templates') for p in ms_paths: header = fits.getheader(p) target_name = header['OBJECT'] sptype = header['SPTYPE'].replace(' ', '') if not sptype in list(templates): data = fits.getdata(p) data = np.vstack([data[0, :], data[1, :], data[2, :]]).T not_nans = np.logical_not(np.any(np.isnan(data), axis=1)) dset = templates.create_dataset(sptype, data=data[not_nans, :], compression='gzip') for key, val in header.items(): dset.attrs[key] = val # Then delete the directories of FITS templates: # + import shutil for directory in glob('data/*fits*'): shutil.rmtree(directory)
create_archive.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Mustapha-AJEGHRIR/medical_txt_parser/blob/main/src/notebooks/assertions_nli/ast_nli_scibert.ipynb) # + [markdown] id="4l_yTcIgxsd_" # # Prepare RE dataset # scibert: https://raw.githubusercontent.com/allenai/scibert/master/data/text_classification/chemprot/train.txt # # scifive: https://github.com/justinphan3110/SciFive # + executionInfo={"elapsed": 4620, "status": "ok", "timestamp": 1642252265905, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg753z6h9fmTPmGyKajJFbNQG48KIqPziiTsxl4Tw=s64", "userId": "11345629174419407363"}, "user_tz": -60} id="LwHxhQx0xseE" # %%capture # !pip install seqeval transformers datasets spacy sentence_transformers # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1908, "status": "ok", "timestamp": 1642251232672, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg753z6h9fmTPmGyKajJFbNQG48KIqPziiTsxl4Tw=s64", "userId": "11345629174419407363"}, "user_tz": -60} id="3rCJwcFyyU9J" outputId="f75daf2c-9611-4b10-faf6-dd37b6e411f5" from google.colab import drive drive.mount('/content/drive') # %cd /content/drive/MyDrive/projects/medical_txt_parser # + executionInfo={"elapsed": 1358, "status": "ok", "timestamp": 1642251234969, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg753z6h9fmTPmGyKajJFbNQG48KIqPziiTsxl4Tw=s64", "userId": "11345629174419407363"}, "user_tz": -60} id="FyDbgPv1xseF" # %reload_ext autoreload # %autoreload 2 import warnings warnings.filterwarnings("ignore") # path = %pwd while "src" in path: # %cd .. # path = %pwd import glob import pandas as pd import os import numpy as np from tqdm import tqdm from pprint import pprint import matplotlib.pyplot as plt import re import transformers from datasets import Dataset, ClassLabel, Sequence, load_dataset, load_metric from spacy import displacy assert transformers.__version__ >= "4.11.0" from src.utils.parse_data import parse_ast, parse_concept, parse_relation # + executionInfo={"elapsed": 219, "status": "ok", "timestamp": 1642251245139, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg753z6h9fmTPmGyKajJFbNQG48KIqPziiTsxl4Tw=s64", "userId": "11345629174419407363"}, "user_tz": -60} id="Sp1UC7psxseH" train_data_path = "data/train" val_data_path = "data/val" ast_folder_name = "ast" concept_folder_name = "concept" rel_folder_name = "rel" txt_folder_name = "txt" re_data_path = "data/re" os.makedirs(re_data_path, exist_ok=True) # + [markdown] id="zP8CizR4xseH" # ### Import data # + colab={"base_uri": "https://localhost:8080/", "height": 224} executionInfo={"elapsed": 49257, "status": "ok", "timestamp": 1642251294627, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg753z6h9fmTPmGyKajJFbNQG48KIqPziiTsxl4Tw=s64", "userId": "11345629174419407363"}, "user_tz": -60} id="u-EAgJkIxseI" outputId="86d1a520-3c79-423b-8188-c9068c1b3db5" text_files = glob.glob(train_data_path + os.sep + txt_folder_name + os.sep + "*.txt") filename = "" df = pd.DataFrame() for file in tqdm(text_files): with open(file, 'r') as f: text = f.read() filename = file.split("/")[-1].split(".")[0] concept = parse_concept(train_data_path + os.sep + concept_folder_name + os.sep + filename + ".con") rel = parse_relation(train_data_path + os.sep + rel_folder_name + os.sep + filename + ".rel") df = df.append(pd.DataFrame({"text": [text], "filename": [filename] , "concept": [concept], "rel":[rel]}), ignore_index=True) df.head() # + colab={"base_uri": "https://localhost:8080/", "height": 206} executionInfo={"elapsed": 706, "status": "ok", "timestamp": 1642251295329, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg753z6h9fmTPmGyKajJFbNQG48KIqPziiTsxl4Tw=s64", "userId": "11345629174419407363"}, "user_tz": -60} id="xgP8a1eexseI" outputId="e0d227e1-be99-4227-ea08-5c0cfc7062fc" concept_df = pd.DataFrame(columns=[ "filename"]+list(concept.keys())) for i, file in df.iterrows(): concept_dict = file["concept"] tmp = pd.DataFrame(concept_dict) tmp["filename"] = file["filename"] concept_df = concept_df.append(tmp, ignore_index=True) concept_df # + # add concepts all_rel_df = pd.DataFrame() for fname in tqdm(df["filename"].unique()): concept_dict = parse_concept(train_data_path + os.sep + concept_folder_name + os.sep + fname + ".con") concept_df = pd.DataFrame(concept_dict).drop(columns=["end_line"]) test_concept_df = concept_df[concept_df["concept_type"] == "test"] problem_concept_df = concept_df[concept_df["concept_type"] == "problem"] treatment_concept_df = concept_df[concept_df["concept_type"] == "treatment"] # class test --> problem test_problem_df = pd.merge(test_concept_df, problem_concept_df, how="inner", on="start_line", suffixes=("_1", "_2")) # class treatment --> problem treatment_problem_df = pd.merge(treatment_concept_df, problem_concept_df, how="inner", on="start_line", suffixes=("_1", "_2")) # class problem --> problem problem_problem_df = pd.merge(problem_concept_df, problem_concept_df, how="inner", on="start_line", suffixes=("_1", "_2")) problem_problem_df = problem_problem_df[problem_problem_df["concept_text_1"] != problem_problem_df["concept_text_2"]] # TODO: remove duplicates ? tmp = pd.concat([test_problem_df, treatment_problem_df, problem_problem_df], axis=0) tmp["filename"] = fname all_rel_df = all_rel_df.append(tmp, ignore_index=True) all_rel_df = all_rel_df.sort_values(by=["filename", "start_line"]) all_rel_df = all_rel_df.reset_index(drop=True) all_rel_df # + colab={"base_uri": "https://localhost:8080/", "height": 206} executionInfo={"elapsed": 706, "status": "ok", "timestamp": 1642251295329, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg753z6h9fmTPmGyKajJFbNQG48KIqPziiTsxl4Tw=s64", "userId": "11345629174419407363"}, "user_tz": -60} id="xgP8a1eexseI" outputId="e0d227e1-be99-4227-ea08-5c0cfc7062fc" rel_df = pd.DataFrame(columns=[ "filename"]+list(rel.keys())) for i, file in df.iterrows(): rel_dict = file["rel"] tmp = pd.DataFrame(rel_dict) tmp["filename"] = file["filename"] rel_df = rel_df.append(tmp, ignore_index=True) rel_df.drop(columns=["end_line_1", "start_line_2", "end_line_2"], inplace=True) rel_df.rename(columns={"start_line_1": "start_line"}, inplace=True) rel_df # + # merge rel_df and all_rel_df rel_df = pd.merge(all_rel_df, rel_df, how="left", on=["filename", "start_line", "start_word_number_1", "end_word_number_1", "start_word_number_2", "end_word_number_2", "concept_text_1", "concept_text_2"], suffixes=("_1", "_2")) # set NaN to other rel_df.fillna("Other", inplace=True) # change float to int cols for col in ["start_line", "start_word_number_1", "end_word_number_1", "start_word_number_2", "end_word_number_2"]: rel_df[col] = rel_df[col].astype(int) rel_df # - rel_df["relation_type"].value_counts() # some relations don't exist in concept files. (Will ignore those) # + # find difference between rel_df and new_rel_df # difference_df = pd.merge(new_rel_df, rel_df, how="right", on=["filename", "start_line", "start_word_number_1", "end_word_number_1", "start_word_number_2", "end_word_number_2", "concept_text_1", "concept_text_2"], suffixes=("_1", "_2")) # difference_df[difference_df.isnull().any(axis=1)] # - # + def preprocess_text(row): text = row["text"] # find line # text = text.lower() line = text.split("\n")[row["start_line"]-1] line = " ".join(line.split()) # remove multiple spaces concept_text_1 = "<< "+ " ".join(line.split()[row["start_word_number_1"]:row["end_word_number_1"]+1]) + " >>" concept_text_2 = "[[ " + " ".join(line.split()[row["start_word_number_2"]:row["end_word_number_2"]+1]) + " ]]" start_word_number_1 = row["start_word_number_1"] end_word_number_1 = row["end_word_number_1"] start_word_number_2 = row["start_word_number_2"] end_word_number_2 = row["end_word_number_2"] if row["start_word_number_1"] > row["start_word_number_2"]: concept_text_1, concept_text_2 = concept_text_2, concept_text_1 start_word_number_1, start_word_number_2 = start_word_number_2, start_word_number_1 end_word_number_1, end_word_number_2 = end_word_number_2, end_word_number_1 text = " ".join(line.split()[: start_word_number_1] + [concept_text_1] + line.split()[end_word_number_1+1: start_word_number_2] + [concept_text_2] + line.split()[end_word_number_2+1:]) row["text"] = text return row rel_df = rel_df.merge(df[["filename","text"]], on="filename", how="inner") rel_df = rel_df.apply(preprocess_text, axis=1) rel_df = rel_df[["filename","start_line", "text", "concept_text_1", "concept_text_2", "relation_type", "concept_type_1", "concept_type_2"]] rel_df.columns = ["filename","line_num", "text", "concept_text_1", "concept_text_2", "rel_type", "concept_type_1", "concept_type_2"] rel_df # - #export all rel_df[["text", "rel_type"]].to_csv(re_data_path + os.sep + "re_scibert_data.tsv", sep="\t", index=False, header=False) rel_df["rel_type"].value_counts() # class test --> problem test_problem_df = rel_df[(rel_df["concept_type_1"] == "test") & (rel_df["concept_type_2"] == "problem")] test_problem_df # export as tsv test_problem_df = test_problem_df[["text", "rel_type"]] test_problem_df.to_csv(re_data_path + os.sep + "re_scibert_data_Te_P.tsv", sep="\t", index=False, header=False) test_problem_df["rel_type"].value_counts() # class treatment --> problem treatment_problem_df = rel_df[(rel_df["concept_type_1"] == "treatment") & (rel_df["concept_type_2"] == "problem")] treatment_problem_df # export as tsv treatment_problem_df = treatment_problem_df[["text", "rel_type"]] treatment_problem_df.to_csv(re_data_path + os.sep + "re_scibert_data_Tr_P.tsv", sep="\t", index=False, header=False) treatment_problem_df["rel_type"].value_counts() # class problem --> problem problem_problem_df = rel_df[(rel_df["concept_type_1"] == "problem") & (rel_df["concept_type_2"] == "problem")] problem_problem_df # export as tsv problem_problem_df = problem_problem_df[["text", "rel_type"]] problem_problem_df.to_csv(re_data_path + os.sep + "re_scibert_data_P_P.tsv", sep="\t", index=False, header=False) problem_problem_df["rel_type"].value_counts() # ## Old # export as tsv rel_df.to_csv(re_data_path + os.sep + "re_data_scibert.tsv", sep="\t", index=False, header=False) pd.read_csv(re_data_path + os.sep + "re_data_scibert.tsv", nrows=10, sep='\t', header=None) #export as jsonl rel_df.to_json(re_data_path + os.sep + "re_data.jsonl", orient="records", lines=True) # read 10 first lines pandas json pd.read_json(re_data_path + os.sep + "re_data.jsonl", lines=True, nrows=10) train_df[1].value_counts()
src/notebooks/relations_re/prepare_re_dataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd movie = pd.read_csv('./data/movie.csv',index_col='movie_title') movie.head() # ##### Note # The DataFrame indexing operator is capable of accepting a number of different objects. # If a string is passed, it will return a single-dimensional Series. # If a list is passed to the indexing operator, it returns a DataFrame of all the columns in the list in the specified order #### Selecting multiple DataFrame columns movie_actor_director = movie[['actor_1_name', 'actor_2_name', 'actor_3_name', 'director_name']] movie_actor_director.head() # Fetching Columns with a List cols = ['actor_1_name', 'actor_2_name', 'actor_3_name', 'director_name'] movie_actor_director = movie[cols] movie_actor_director.head() #### Selecting columns with methods movie.get_dtype_counts() # Fetcing columns of type 'object' movie.select_dtypes(include=['object']).head() # Fetcing columns of type 'int64' movie.select_dtypes(include=['int64']).head() # + #movie.select_dtypes(include='np.number').head() # - # Filter columns with string 'name' movie.filter(like='name').head() # Filter columns with digit in name movie.filter(regex='\d').head() # #### Ordering column names sensibly # 1. Classify each column as either discrete or continuous # 2. Group common columns within the discrete and continuous columns # 3. Place the most important groups of columns first with categorical columns before continuous ones movie=pd.read_csv('data/movie.csv') movie.columns # + >>> disc_core = ['movie_title', 'title_year', 'content_rating', 'genres'] >>> disc_people = ['director_name', 'actor_1_name', 'actor_2_name', 'actor_3_name'] >>> disc_other = ['color', 'country', 'language', 'plot_keywords', 'movie_imdb_link'] >>> cont_fb = ['director_facebook_likes', 'actor_1_facebook_likes', 'actor_2_facebook_likes', 'actor_3_facebook_likes', 'cast_total_facebook_likes', 'movie_facebook_likes'] >>> cont_finance = ['budget', 'gross'] >>> cont_num_reviews = ['num_voted_users', 'num_user_for_reviews', 'num_critic_for_reviews'] >>> cont_other = ['imdb_score', 'duration', 'aspect_ratio', 'facenumber_in_poster'] # - >>> new_col_order = disc_core + disc_people + \ disc_other + cont_fb + \ cont_finance + cont_num_reviews + \ cont_other >>> set(movie.columns) == set(new_col_order) movie2 = movie[new_col_order] movie2.head() # ##### Operating on the entire DataFrame movie = pd.read_csv('data/movie.csv') print ('movie.shape',movie.shape) print ('movie.size',movie.size) print ('movie.ndim',movie.ndim) print ('len(movie)',len(movie)) movie.count().head() movie.min(skipna=True).head() movie.min(skipna=False).head() movie.describe() movie.describe(percentiles=[.01, .3, .99]) # Specify exact quantiles in the describe method using the percentiles parameter: movie.describe(percentiles=[.1, .3, .9]) # #### Chaining DataFrame methods together movie = pd.read_csv('data/movie.csv') movie.isnull().head() movie.isnull().sum().head() movie.isnull().sum().sum() #print('movie.isnull() :',movie.isnull()) print('movie.isnull().any()head() :\n\n',movie.isnull().any().head()) print('\n\n') print('movie.isnull().any().any() :',movie.isnull().any().any()) # #### Working with operators on a DataFrame #### Comparing missing values #### Transposing the direction of a DataFrame operation # + #### Determining college campus diversity
.ipynb_checkpoints/Pandas Recipes 2.0 Essential DataFrame Operations-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python3.6 (morphs) # language: python # name: py36 # --- # %matplotlib inline # import librosa import matplotlib.pyplot as plt import numpy as np import morphs motifs = ["ac001", "bc001", "af128", "ah128"] f, axes = plt.subplots(1, len(motifs), figsize=(len(motifs) * 4, 1.5), squeeze=True) ax_map = {m: ax for m, ax in zip(motifs, axes)} for motif in ax_map: ax = ax_map[motif] data, sr = morphs.load.wav(motif) t = np.arange(len(data)) / sr t = np.concatenate(([-0.2, 0], t, [0.4, 0.6])) data = np.concatenate(([0, 0], data, [0, 0])) ax.plot(t, data, color='k', linewidth=1) ax.set_xlim(-0.2, 0.6) ax.set_xticklabels([]) ax.set_ylim(-1, 1) ax.axvline(0, color="k") ax.axvline(0.4, color="k") if motif is not motifs[0]: ax.set_yticklabels([]) axes[0].set_ylabel("Sound Pressure"); morphs.plot.savefig(f, "audio_waveforms")
notebooks/5.1-mjt-audio-waveforms.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # SOMMAR Data Volume Estimates from PRIMAVERA import django django.setup() from django.db.models import Sum from pdata_app.models import DataRequest, DataFile from pdata_app.utils.common import filter_hadgem_stream2, get_request_size def filesizeformat(num_bytes, human_units=True): """ If human_units then output data volumes with units, but with the non-breaking space removed. """ if human_units: return django.template.defaultfilters.filesizeformat(num_bytes).replace('\xa0', ' ') else: return num_bytes # ### Total data volume for a Stream 2 MOHC experiment # The PRIMAVERA Stream 2 data request was cut down from the Stream 1 request so that only the high frequency variables required for the user facing work packages were output. The output of daily variables on atmosphere levels was also reduced. The Stream 2 data request is summarised at https://doi.org/10.5281/zenodo.3607328. hist_reqs = filter_hadgem_stream2( DataRequest.objects.filter( climate_model__short_name='HadGEM3-GC31-HH', experiment__short_name='hist-1950', rip_code='r1i1p1f1', datafile__isnull=False ) ) ocean_table_names = ['Oday', 'PrimOday', 'PrimSIday', 'SIday', 'Omon', 'PrimOmon', 'SImon'] # The following variables were output (along with their dimensions): for dr in hist_reqs.order_by('variable_request__frequency', 'variable_request__table_name', 'variable_request__cmor_name'): var_string = f'{dr.variable_request.table_name}_{dr.variable_request.cmor_name}' print(f'{var_string:<30} ({dr.variable_request.dimensions})') # For SOMMAR, this data request would require the addition of a variable for each ocean biogeochemistry tracer on olevels. # The data volume for 65 years for the 25 km atmosphere and 1/12° ocean is: atmos_reqs = hist_reqs.exclude(variable_request__table_name__in=ocean_table_names) n512_atmos_size = get_request_size(atmos_reqs, 1950, 2014) print(f'25 km atmosphere {filesizeformat(n512_atmos_size)}') ocean_reqs = hist_reqs.filter(variable_request__table_name__in=ocean_table_names) ocean_size = get_request_size(ocean_reqs, 1950, 2014) print(f'1/12° ocean {filesizeformat(ocean_size)}') print(f'total {filesizeformat(get_request_size(hist_reqs, 1950, 2014))}') # However, in PRIMAVERA the Met Office ran an N512 model and in SOMMAR it would like to run an N1280 model. Let's see what effect this has on the data volume for this 65 year simulation: n1280_atmos_size = n512_atmos_size * (1280 / 512)**2 print(f'10 km atmosphere {filesizeformat(n1280_atmos_size)}') print(f'1/12° ocean {filesizeformat(ocean_size)}') print(f'total {filesizeformat(n1280_atmos_size + ocean_size)}') # ## Data volume per year # SOMMAR will have longer runs and so the data volume per year will be: n1280_per_year = n1280_atmos_size / 65 print(f'atmosphere per year {filesizeformat(n1280_per_year)}') ocean_per_year = ocean_size / 65 print(f'ocean per year {filesizeformat(ocean_per_year)}') total_per_year = n1280_per_year + ocean_per_year print(f'total per year {filesizeformat(total_per_year)}') # For a possible run length of 650 years (200 spinup, 200 piControl and 250 historical + future): print(f'atmosphere {filesizeformat(650 * n1280_per_year)}') print(f'ocean {filesizeformat(650 * ocean_per_year)}') print(f'total {filesizeformat(650 * total_per_year)}') # ## Data Volume for a surface variable # + amon_tas = DataRequest.objects.filter( climate_model__short_name='HadGEM3-GC31-HH', experiment__short_name='hist-1950', rip_code='r1i1p1f1', variable_request__table_name='Amon', variable_request__cmor_name='tas' ) n1280_size = get_request_size(amon_tas, 1950, 2014) * (1280 / 512)**2 print(f'65 years of a suface variable at N1280 {filesizeformat(n1280_size)}') n1280_650years_size = n1280_size / 65 * 650 print(f'650 years of a suface variable at N1280 {filesizeformat(n1280_650years_size)}') # - # Therefore how to analyse the data needs to be considered as the time series for a variable on a single level will be over 60 GB in size. Many post-processing systems won't have this much RAM available to them. # ## Data volume per time slice # To allow the estimation of SOMMAR volumes we can calculate the storage required by a single atmosphere and ocean time slice. # + atmos_filename = 'tas_Amon_HadGEM3-GC31-HH_hist-1950_r1i1p1f1_gn_195001-195012.nc' atmos_file = DataFile.objects.get(name=atmos_filename) print(f'{atmos_filename} {filesizeformat(atmos_file.size)}') ocean_surface_filename = 'tos_Omon_HadGEM3-GC31-HH_hist-1950_r1i1p1f1_gn_195001-195001.nc' ocean_surface_file = DataFile.objects.get(name=ocean_surface_filename) print(f'{ocean_surface_filename} {filesizeformat(ocean_surface_file.size)}') ocean_filename = 'vo_Omon_HadGEM3-GC31-HH_hist-1950_r1i1p1f1_gn_195001-195001.nc' ocean_file = DataFile.objects.get(name=ocean_filename) print(f'{ocean_filename} {filesizeformat(ocean_file.size)}') print('') sommar_atmos_filesize = (1280 / 512)**2 * atmos_file.size sommar_atmos_step_size = sommar_atmos_filesize / 12 # 12 months in the file print(f'10 km atmos single level single time slice {filesizeformat(sommar_atmos_step_size)}') ocean_surface_step_size = ocean_surface_file.size # single time point print(f'1/12° ocean surface single time slice {filesizeformat(ocean_surface_step_size)}') ocean_step_size = ocean_file.size / 75 # on 75 olevels print(f'1/12° ocean single level single time slice {filesizeformat(ocean_step_size)}') # - # These values seem quite surprising as there are 3606x4322 points in the ocean file and around 1920x2560 points in the 10 km atmosphere file. However, there is data on all points in the atmosphere but there's only data on the ocean points in the ocean file (47% of points are masked at the surface and 100% are at the bottom layer). The compression used will save space for these masked points. With only a single time point in the ocean surface file, two-thirds of the data will be latitudes and longitudes. Ocean surface storage would have been more efficient if more time points were included in each file. # Lots of effort needs to go into working out the most efficient way to store the data in SOMMAR!
Sommar_data_volumes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="Ej1X8XMIYaym" # Dependencies import numpy as np import pandas as pd # + id="x5Qq366BHVKk" from google.colab import drive drive.mount('/content/drive') # + id="NQaZLIGOY2Qg" # Let's import our Chess games df = pd.read_csv('drive/My Drive/Colab Datasets/Chess/games_moves_split_filled.csv') df.head(10) # + id="s5WTh3B2P7-D" # Split string-moves into arrays # df['moves'] = df['moves'].str.split() # + id="FpXQUxIeY7WB" #Changing pandas dataframe to numpy array x = df.loc[:, ~df.columns.isin(['winner'])].values y = df.loc[:, df.columns == 'winner'].values # + id="MqWnFvF_f_Vj" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="64eb36ff-f24e-49b8-9ae3-23dfe43a37ae" x # + id="XJcr6_xeZ6KC" # Encode moves from sklearn.preprocessing import OneHotEncoder ohe = OneHotEncoder() x = ohe.fit_transform(x).toarray() # + id="IhgkMb2GITHv" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="7a86eedd-c3fc-4440-acc4-1041f51305b4" x[0] # + id="yim6L5JwLlhs" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5e09ddef-1f94-4470-a4bd-0b223431adf5" # Change inputs to binary from keras.utils import to_categorical y = to_categorical(y) # + id="E0yRvG_i7L8O" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="f696b77f-5f90-4eb8-d441-8d2bf47ad50c" y # + id="S2qCLRhZabX3" # Split into train and test data from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.1) # + id="q_82jmNMahek" # Dependencies import keras from keras.models import Sequential from keras.layers import Dense from keras.layers import Dropout # Neural network model = Sequential() # model.add(Dropout(0.5, input_shape=(86308,))) model.add(Dense(16, input_dim=86308, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(12, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(3, activation='softmax')) # + id="JDc88OwWappd" model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # + id="dBRLhMTjRe8_" from keras import backend as K K.set_value(model.optimizer.learning_rate, 0.0001) # + id="gpQmviUMdk-T" colab={"base_uri": "https://localhost:8080/", "height": 884} outputId="ddc50a53-099f-44b4-9a37-89e254e77506" history = model.fit(x_train, y_train, validation_data = (x_test, y_test), epochs=25, batch_size=64) # + id="Bd9k2zjDHLUi" from sklearn.metrics import accuracy_score a = accuracy_score(pred,test) print('Accuracy is:', a*100) # + id="t053Y7KCdon-" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="3baf4c98-ff4e-45a0-c71d-b7b4c20726bf" import matplotlib.pyplot as plt plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.title('Model accuracy') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.show() # + id="Mu_DLCkzdwh_" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="dd4e0ebe-11c6-414b-be33-6b8f172c0cdf" plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Model loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.show() # + id="uyLdvwcFd7Xh" # Grab a set of moves and split them to fit the formatting moves_new = 'Nf3 Nf6 c4 g6 Nc3 Bg7 d4 O-O Bf4 d5 Qb3 dxc4 Qxc4 c6 e4 Nbd7 Rd1 Nb6 Qc5 Bg4 Bg5 GG Qa3 Nxc3 bxc3 Nxe4 Bxe7 Qb6 Bc4 Nxc3 Bc5 GG Kf1 Be6 Bxb6 GG Kg1 Ne2+ Kf1 Nxd4+ Kg1 Ne2+ Kf1 Nc3+ Kg1 axb6 Qb4 Ra4 Qxb6 Nxd1 h3 Rxa2 Kh2 Nxf2 Re1 Rxe1 Qd8+ Bf8 GG Bd5 Nf3 Ne4 GG b5 h4 h5 Ne5 Kg7 Kg1 Bc5+ Kf1 Ng3+ Ke1 GG Kd1 GG Kc1 Ne2+ Kb1 GG Kc1 GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG GG' moves_new = np.array(moves_new.split()) moves_new # + id="RssBtwY_UnPY" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="b6981f1f-5e2d-4e0f-8135-d9a80aa120c1" # Let's give it a scenario where we don't know what the best move is x_new = ohe.transform(moves_new.reshape(1, -1)) x_new # + id="QrA9sQviU2Dt" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="95ed9e09-da54-42ea-c6eb-5efd429ea07e" # Ask it for the answer and print it y_new = model.predict(x_new) y_new # + id="_w0xMgPZVWGe" # Use this to get AUROC import sklearn.metrics fpr, tpr, thresholds = sklearn.metrics.roc_curve(y_true = true_labels, y_score = pred_probs, pos_label = 1) #positive class is 1; negative class is 0 auroc = sklearn.metrics.auc(fpr, tpr)
Sodabot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import sys sys.path.append('../libs/vgg16') import tensorflow as tf import numpy as np from vgg16 import vgg16 import glob, os from scipy.misc import imread, imresize # + DATASET_DIR = '../data/cloth_folding_rgb_vids' NUM_VIDS = 45 def get_img_pair(video_id): img_files = sorted(glob.glob(os.path.join(DATASET_DIR, video_id, '*.png'))) start_img = img_files[0] end_img = img_files[-1] pair = [] for image_file in [start_img, end_img]: img_original = imread(image_file) img_resized = imresize(img_original, (224, 224)) pair.append(img_resized) return tuple(pair) start_imgs = [] end_imgs= [] for vid_id in range(1, NUM_VIDS + 1): start_img, end_img = get_img_pair(str(vid_id)) start_imgs.append(start_img) end_imgs.append(end_img) print('Images of starting state {}'.format(np.shape(start_imgs))) print('Images of ending state {}'.format(np.shape(end_imgs))) # - imgs_plc = tf.placeholder(tf.float32, [None, 224, 224, 3]) # + n_features = 4096 n_hidden = 10 # n_features * 2 with tf.name_scope("input"): x1 = tf.placeholder(tf.float32, [None, n_features], name="x1") x2 = tf.placeholder(tf.float32, [None, n_features], name="x2") dropout_keep_prob = tf.placeholder(tf.float32, name='dropout_prob') with tf.name_scope("hidden_layer"): with tf.name_scope("weights"): w1 = tf.Variable(tf.random_normal([n_features, n_hidden]), name="w1") tf.summary.histogram("w1", w1) b1 = tf.Variable(tf.random_normal([n_hidden]), name="b1") tf.summary.histogram("b1", b1) with tf.name_scope("output"): h1 = tf.nn.dropout(tf.nn.relu(tf.matmul(x1,w1) + b1), keep_prob=dropout_keep_prob) tf.summary.histogram("h1", h1) h2 = tf.nn.dropout(tf.nn.relu(tf.matmul(x2, w1) + b1), keep_prob=dropout_keep_prob) tf.summary.histogram("h2", h2) with tf.name_scope("output_layer"): with tf.name_scope("weights"): w2 = tf.Variable(tf.random_normal([n_hidden, 1]), name="w2") tf.summary.histogram("w2", w2) b2 = tf.Variable(tf.random_normal([1]), name="b2") tf.summary.histogram("b2", b2) with tf.name_scope("output"): s1 = tf.matmul(h1, w2) + b2 s2 = tf.matmul(h2, w2) + b2 with tf.name_scope("loss"): s12 = s1 - s2 s12_flat = tf.reshape(s12, [-1]) pred = tf.sigmoid(s12) lable_p = tf.sigmoid(-tf.ones_like(s12)) cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=tf.zeros_like(s12_flat), logits=s12_flat + 1) loss = tf.reduce_mean(cross_entropy) tf.summary.scalar("loss", loss) with tf.name_scope("train_op"): train_op = tf.train.AdamOptimizer(0.001).minimize(loss) # - sess = tf.InteractiveSession() sess.run(tf.global_variables_initializer()) print('Loading model...') vgg = vgg16(imgs_plc, '../libs/vgg16/vgg16_weights.npz', sess) print('Done loading!') # + start_imgs_embedded = sess.run(vgg.fc1, feed_dict={vgg.imgs: start_imgs}) end_imgs_embedded = sess.run(vgg.fc1, feed_dict={vgg.imgs: end_imgs}) idxs = np.random.choice(NUM_VIDS, NUM_VIDS, replace=False) train_idxs = idxs[0:int(NUM_VIDS * 0.75)] test_idxs = idxs[int(NUM_VIDS * 0.75):] train_start_imgs = start_imgs_embedded[train_idxs] train_end_imgs = end_imgs_embedded[train_idxs] test_start_imgs = start_imgs_embedded[test_idxs] test_end_imgs = end_imgs_embedded[test_idxs] print('Train start imgs {}'.format(np.shape(train_start_imgs))) print('Train end imgs {}'.format(np.shape(train_end_imgs))) print('Test start imgs {}'.format(np.shape(test_start_imgs))) print('Test end imgs {}'.format(np.shape(test_end_imgs))) # + train_y1 = np.expand_dims(np.zeros(np.shape(train_start_imgs)[0]), axis=1) train_y2 = np.expand_dims(np.ones(np.shape(train_end_imgs)[0]), axis=1) for epoch in range(100): for i in range(np.shape(train_start_imgs)[0]): _, cost_val = sess.run([train_op, loss], feed_dict={x1: train_start_imgs[i:i+1,:], x2: train_end_imgs[i:i+1,:], dropout_keep_prob: 0.5}) print('{}. {}'.format(epoch, cost_val)) s1_val, s2_val = sess.run([s1, s2], feed_dict={x1: test_start_imgs, x2: test_end_imgs, dropout_keep_prob: 1}) print('Accuracy: {}%'.format(100 * np.mean(s1_val < s2_val))) # + def get_img_seq(video_id): img_files = sorted(glob.glob(os.path.join(DATASET_DIR, video_id, '*.png'))) imgs = [] for image_file in img_files: img_original = imread(image_file) img_resized = imresize(img_original, (224, 224)) imgs.append(img_resized) return imgs imgs = get_img_seq('1') # - imgs_embedded = sess.run(vgg.fc1, feed_dict={vgg.imgs: imgs}) scores = sess.run([s1], feed_dict={x1: imgs_embedded, dropout_keep_prob: 1}) from matplotlib import pyplot as plt plt.figure() plt.title('Utility of cloth-folding over time') plt.xlabel('time (video frame #)') plt.ylabel('Utility') plt.plot(scores[-1]) plt.show()
ch19/Listing 19.11 - 19.20.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import os import matplotlib.pyplot as plt # %matplotlib inline # ### 1. return # + # INSTRUCTION: # change itv to generate results for 0.125, 0.25 ... # change algorithm to either td3 or ppo #---------------------------------------------------- import glob # interval = ["point125","point25","point5","point75"] # interval = ["vzero","vpoint1","vpoint3"] # itv = interval[0] itv = "one" algorithm = "ddpg" # "ppo" visitors = "single" # single network = "64_64_NN" # 300_NN or 64_64_NN or 300_300_NN # txt_dir = "../SHARCNET/Results/"+visitors + "/" + algorithm + "/" + network+"/new_64batchsize/"+itv+"/" txt_dir = "../SHARCNET/Results/single/ddpg/64_64_NN/new/"+itv+"/" # txt_dir = "../SHARCNET/Results/single/ddpg/64_64_NN/new/one/" # pic_dir = "./plots_for_thesis/"+visitors+"/"+algorithm+"/" + network+"/new_64batchsize/" pic_dir = "./plots_for_thesis/"+visitors+"/ddpg/64_64_NN/new/" # pic_dir = "./plots_for_thesis/"+visitors+"/aug_obs/ddpg/new/" # pic_dir = "./plots_for_thesis/single/ddpg/64_64_NN/new/" modes = ["SARA","PLA"] all_data_files = dict() for mode in modes: mode_data_files = glob.glob(txt_dir+mode+ "/**/" +"*.txt", recursive=True) mode_data_files.sort() print("{}: {} files found".format(mode,len(mode_data_files))) for f in mode_data_files: print(f) all_data_files[mode]=mode_data_files # - txt_dir # all_data_files["PLA"] = all_data_files["PLA"][:5] all_data_files["SARA"] = np.delete(all_data_files["SARA"],2) all_data_files["PLA"] = np.delete(all_data_files["PLA"],3) all_combined_data = dict() if algorithm == "ppo": metrics = ["AverageEpRet", "AverageVVals"] col_names = ["return", "V_means"] elif algorithm == "ddpg": metrics = ["AverageEpRet", "AverageQVals"] col_names = ["return", "Q_means"] else: metrics = ["AverageEpRet", "AverageQ1Vals", "AverageQ2Vals"] col_names = ["return","Q1_means","Q2_means"] for mode in modes: mode_data = [] for file in all_data_files[mode]: mode_data.append(pd.read_csv(file, sep="\t")) mode_combined_data = pd.DataFrame() for metric, col_name in zip(metrics, col_names): metric_combined_data = pd.DataFrame() for i in range(len(all_data_files[mode])): metric_combined_data[col_name+"_run"+str(i)] = mode_data[i][metric] metric_combined_data[col_name+"_avg"] = metric_combined_data.mean(axis=1) metric_combined_data[col_name+'_std'] = metric_combined_data.std(axis=1) mode_combined_data = pd.concat([mode_combined_data,metric_combined_data],axis=1) all_combined_data[mode] = mode_combined_data all_combined_data['PLA'] # + # Plot return colors = ['r','g','b'] metric = "return" modes = ["SARA","PLA"] if visitors == "single": ylim = [0,27] else: ylim = [0,125] for mode, c in zip(modes, colors): data = all_combined_data[mode] length = data.shape[0] if length == 0: print("No data from {}".format(mode)) continue # label = "[0, "+itv.replace("point","0.")+"]" label = "eps="+itv.replace("vpoint","0.").replace("vzero", "0") plt.figure() plt.grid() plt.fill_between(data.index, data[metric+"_avg"] - data[metric+"_std"], data[metric+'_avg'] + data[metric+'_std'], alpha=0.2, color=c) plt.plot(data.index, data[metric+'_avg'], '-', color=c, label=label) # plt.legend(loc="best") plt.ylim(ylim) plt.title('{} {}'.format(mode,metric)) plt.xlabel("epoch") plt.ylabel("reward") plt.savefig(pic_dir+"{}_{}_{}.png".format(itv,mode,metric), bbox_inches='tight') print("save to {}".format(pic_dir)) plt.figure() plt.grid() for i in range(len(all_data_files[mode])): plt.plot(data.index, data[metric+'_run'+str(i)].rolling(window=20).mean(), '-', label="run"+str(i)) plt.legend(loc="right") plt.title("Action noise") plt.savefig(pic_dir+"{}_{}_{}_allrun.png".format(itv,mode,metric), bbox_inches='tight') # - # ### 2. V_mean # + from matplotlib.colors import LogNorm colors = ['r','g','b'] metric = "Q_means" modes = ["SARA","PLA"] for mode, c in zip(modes, colors): data = all_combined_data[mode] length = data.shape[0] if length == 0: print("No data from {}".format(mode)) continue label = "[0, "+itv.replace("point","0.")+"]" plt.figure() plt.grid() plt.fill_between(np.linspace(1,length,length)[:200], data[metric+"_avg"][:200] - data[metric+"_std"][:200], data[metric+'_avg'][:200] + data[metric+'_std'][:200], alpha=0.2, color=c) plt.plot(np.linspace(1,length,length)[:200], data[metric+'_avg'][:200], '-', color=c, label=label) plt.legend(loc="best") # plt.ylim([0,27]) plt.title('{} {}'.format(mode,metric)) plt.xlabel("epoch") plt.ylabel(metric) plt.savefig(pic_dir+'/{}_{}_{}.png'.format(itv,mode,metric), bbox_inches='tight') plt.figure() plt.grid() for i in range(0,len(all_data_files[mode])): plt.plot(data.index[:200], data[metric+'_run'+str(i)][:200].rolling(window=50).mean(), '-', label="run"+str(i)) plt.legend(loc="best") # - metric="return" mode="SARA" plt.figure() plt.grid() data = all_combined_data[mode] for i in range(len(all_data_files[mode])): plt.plot(data.index[:20], data[metric+'_run'+str(i)][:20].rolling(window=1).mean(), '-', label="run"+str(i)) plt.legend(loc="right") # # combine different randomness # + import glob interval = ["vzero","vpoint01","vpoint05","vpoint1","vpoint3"] # itv = "one" algorithm = "ddpg" # "ppo" visitors = "single" # single txt_dir = "../SHARCNET/Results/multi/lazy_visitor_sq_action/new/" pic_dir = "./plots_for_thesis/multi/lazy_visitor_sq_action/new_all_eps_combined/" modes = ["SARA","PLA"] all_data_files = dict() for itv in interval: itv_data_files = dict() for mode in modes: mode_data_files = glob.glob(txt_dir+"/"+itv+"/"+mode+ "/**/" +"*.txt", recursive=True) mode_data_files.sort() print("{} {}: {} files found".format(itv, mode,len(mode_data_files))) for f in mode_data_files: print(f) itv_data_files[mode]=mode_data_files all_data_files[itv] = itv_data_files # - all_combined_data = dict() if algorithm == "ppo": metrics = ["AverageEpRet", "AverageVVals"] col_names = ["return", "V_means"] elif algorithm == "ddpg": metrics = ["AverageEpRet", "AverageQVals"] col_names = ["return", "Q_means"] else: metrics = ["AverageEpRet", "AverageQ1Vals", "AverageQ2Vals"] col_names = ["return","Q1_means","Q2_means"] for itv in interval: all_combined_data[itv] = dict() for mode in modes: mode_data = [] for file in all_data_files[itv][mode]: if os.stat(file).st_size == 0: print("{} no data".format(file)) else: mode_data.append(pd.read_csv(file, sep="\t")) mode_combined_data = pd.DataFrame() for metric, col_name in zip(metrics, col_names): metric_combined_data = pd.DataFrame() for i in range(len(all_data_files[itv][mode])): metric_combined_data[col_name+"_run"+str(i)] = mode_data[i][metric] metric_combined_data[col_name+"_avg"] = metric_combined_data.mean(axis=1) metric_combined_data[col_name+'_std'] = metric_combined_data.std(axis=1) mode_combined_data = pd.concat([mode_combined_data,metric_combined_data],axis=1) all_combined_data[itv][mode] = mode_combined_data all_combined_data["vzero"]["SARA"] # Plot return metric = "return" modes = ["SARA","PLA"] for mode in modes: plt.figure() plt.grid() for itv in interval: label = "eps="+itv.replace("vpoint","0.").replace("vzero", "0") plt.plot(all_combined_data[itv][mode].index, all_combined_data[itv][mode][metric+'_avg'].rolling(window=10).mean(), '-', label=label) plt.legend(loc="best") plt.ylim([0,125]) plt.title('{} {}'.format(mode,metric)) plt.xlabel("epoch") plt.ylabel("reward") plt.savefig(pic_dir+"all_eps_combined_{}_{}.png".format(mode,metric), bbox_inches='tight') print("save to {}".format(pic_dir)) # # Combine different difficulties # + import glob interval = ["point125","point25","point75", "vzero"] # itv = "one" algorithm = "ddpg" # "ppo" visitors = "single" # single txt_dir = "../SHARCNET/Results/single/ddpg/64_64_NN/new_lazy_visitor_sq_action/" pic_dir = "./plots_for_thesis/single/ddpg/64_64_NN/new_lazy_visitor_sq_action/all_difficulty_combined/" modes = ["SARA","PLA"] all_data_files = dict() for itv in interval: itv_data_files = dict() for mode in modes: mode_data_files = glob.glob(txt_dir+"/"+itv+"/"+mode+ "/**/" +"*.txt", recursive=True) mode_data_files.sort() print("{} {}: {} files found".format(itv, mode,len(mode_data_files))) for f in mode_data_files: print(f) itv_data_files[mode]=mode_data_files all_data_files[itv] = itv_data_files # - all_combined_data = dict() if algorithm == "ppo": metrics = ["AverageEpRet", "AverageVVals"] col_names = ["return", "V_means"] elif algorithm == "ddpg": metrics = ["AverageEpRet", "AverageQVals"] col_names = ["return", "Q_means"] else: metrics = ["AverageEpRet", "AverageQ1Vals", "AverageQ2Vals"] col_names = ["return","Q1_means","Q2_means"] for itv in interval: all_combined_data[itv] = dict() for mode in modes: mode_data = [] for file in all_data_files[itv][mode]: if os.stat(file).st_size == 0: print("{} no data".format(file)) else: mode_data.append(pd.read_csv(file, sep="\t")) mode_combined_data = pd.DataFrame() for metric, col_name in zip(metrics, col_names): metric_combined_data = pd.DataFrame() for i in range(len(all_data_files[itv][mode])): if mode_data[i][metric].shape[0] < 1000: print("file size {}".format(mode_data[i][metric].shape[0])) metric_combined_data[col_name+"_run"+str(i)] = mode_data[i][metric] metric_combined_data[col_name+"_avg"] = metric_combined_data.mean(axis=1) metric_combined_data[col_name+'_std'] = metric_combined_data.std(axis=1) mode_combined_data = pd.concat([mode_combined_data,metric_combined_data],axis=1) all_combined_data[itv][mode] = mode_combined_data # Plot return metric = "return" modes = ["SARA","PLA"] if visitors == "single": ylim = [0, 27] else: ylim = [0, 125] for mode in modes: plt.figure() plt.grid() for itv in interval: if itv == "vzero": label = "[0,1]" else: label = "[0,"+itv.replace("point","0.").replace("one", "1")+"]" plt.plot(all_combined_data[itv][mode].index, all_combined_data[itv][mode][metric+'_avg'].rolling(window=10).mean(), '-', label=label) plt.legend(loc="best") plt.ylim(ylim) plt.title('{} {}'.format(mode,metric)) plt.xlabel("epoch") plt.ylabel("reward") plt.savefig(pic_dir+"all_difficulty_combined_{}_{}.png".format(mode,metric), bbox_inches='tight') print("save to {}".format(pic_dir))
notebook/Plot_SpinUp_results.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/sumalatha-e/PythonLearningProject/blob/master/day7Assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="WO5l1jrkO9JH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 408} outputId="7d1d78e8-27fa-40aa-c026-efa321234e21" def Finbnochi(): First_number=int(input("Enter the first number to generate fibonacci sequence: ")) Last_number=int(input("Enter the last number to generate fibonacci sequence: ")) def fib(nterms): # first two terms n1, n2 = First_number, First_number+1 count = 0 # check if the number of terms is valid if nterms <= 0: print("Please enter a positive integer") elif nterms == n1: print("Fibonacci sequence upto",nterms,":") print(n1) elif nterms <n1: print("valid range") else: print("Fibonacci sequence:") while count < nterms: print(n1) nth = n1 + n2 # update values n1 = n2 n2 = nth count += 1 def callFib() : return fib(Last_number) return callFib fibonochiDec = Finbnochi(); fibonochiDec()
day7Assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Запись в файл #запись строки в файл, открытый в режиме 'w' file_obj = open('file_to_write_in.txt', 'w') string = 'строка для записи в файл\n' file_obj.write(string) file_obj.close() # !cat file_to_write_in.txt #аналог предыдущей команды для пользователей windows # !more file_to_write_in.txt #запись строки в файл, открытый в режиме 'w' file_obj = open('file_to_write_in.txt', 'w') second_string = 'вторая строка для записи в файл\n' file_obj.write(second_string) file_obj.close() # !cat file_to_write_in.txt #аналог для пользователей windows # !more file_to_write_in.txt #запись строки в файл, открытый в режиме 'a' file_obj = open('file_to_write_in.txt', 'a') second_string = 'третья строка для записи в файл\n' file_obj.write(second_string) file_obj.close() #создание списка чисел от 1 до 10 digits = range(1,11) digits #запись в файл списка строк с помощью функции writelines file_obj = open('second_file_for_write_in.txt', 'w') file_obj.writelines(digit + '\n' for digit in map(str, digits)) file_obj.close() #вывод на экран содержимого файла with open('second_file_for_write_in.txt', 'r') as file_obj: print file_obj.read()
ipython_files_data_writing.ipynb
1 + 1 import pandas as pd pd.Series({'A':5, 'B':2}).plot() # %matplotlib inline pd.Series({'A':5, 'B':2}).plot(figsize=(3,2)) # + language="html" # <p><a href="https://github.com/mwouts/jupytext", style="color: rgb(0,0,255)">Jupytext</a> on GitHub</p> # - # %load_ext rpy2.ipython # + magic_args="-w 400 -h 200" language="R" # library(ggplot2) # ggplot(data=data.frame(x=c('A', 'B'), y=c(5, 2)), aes(x,weight=y)) + geom_bar() # + language="latex" # $\frac{\pi}{2}$
tests/notebooks/mirror/script_to_ipynb/hydrogen_latex_html_R_magics.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # splot.pysal.lib: assessing neighbors & spatial weights # In spatial analysis workflows it is often important and necessary to asses the relationships of neighbouring polygons. `pysal.lib` and `splot` can help you to inspect if two neighbouring polygons share an edge or not. # # **Content**: # * Imports # * Data Preparation # * Plotting # ## Imports from pysal.lib.weights.contiguity import Queen import pysal.lib from pysal.lib import examples import matplotlib.pyplot as plt import geopandas as gpd # %matplotlib inline from pysal.viz.splot.pysal.lib import plot_spatial_weights # ## Data Preparation # Let's first have a look at the dataset with `pysal.lib.examples.explain` examples.explain('rio_grande_do_sul') # Load data into a `geopandas` geodataframe gdf = gpd.read_file(examples.get_path('map_RS_BR.shp')) gdf.head() weights = Queen.from_dataframe(gdf) # This warning tells us that our dataset contains islands. Islands are polygons that do not share edges and nodes with adjacent polygones. This can for example be the case if polygones are truly not neighbouring, eg. when two land parcels are separated by a river. However, these islands often stems from human error when digitizing features into polygons. # # This unwanted error can be assessed using `splot.pysal.lib` `plot_spatial_weights` functionality: # ### Plotting plot_spatial_weights(weights, gdf) plt.show() # This visualisation depicts the spatial weights network, a network of connections of the centroid of each polygon to the centroid of its neighbour. As we can see, there are many polygons in the south and west of this map, that are not connected to it's neighbors. This stems from digitization errors and needs to be corrected before we can start our statistical analysis. # # `pysal.lib` offers a tool to correct this error by 'snapping' incorrectly separated neighbours back together: wnp = pysal.lib.weights.util.nonplanar_neighbors(weights, gdf) # We can now visualise if the `nonplanar_neighbors` tool adjusted all errors correctly: plot_spatial_weights(wnp, gdf) plt.show() # The visualization shows that all erroneous islands are now stored as neighbors in our new weights object, depicted by the new joins displayed in orange. # # We can now adapt our visualization to show all joins in the same color, by using the `nonplanar_edge_kws` argument in `plot_spatial_weights`: plot_spatial_weights(wnp, gdf, nonplanar_edge_kws=dict(color='#4393c3')) plt.show()
notebooks/viz/splot/libpysal_non_planar_joins_viz.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Project 1 # In this project our goal is to validate one dictionary structure against a template dictionary. # # A typical example of this might be working with JSON data inputs in an API. You are trying to validate this received JSON against some kind of template to make sure the received JSON conforms to that template (i.e. all the keys and structure are identical - value types being important, but not the value itself - so just the structure, and the data type of the values). # # To keep things simple we'll assume that values can be either single values (like an integer, string, etc), or a dictionary, itself only containing single values or other dictionaries, recursively. In other words, we're not going to deal with lists as possible values. Also, to keep things simple, we'll assume that all keys are **required**, and that no extra keys are permitted. # # In practice we would not have these simplifying assumptions, and although we could definitely write this ourselves, there are many 3rd party libraries that already exist to do this (such as `jsonschema`, `marshmallow`, and many more, some of which I'll cover lightly in some later videos.) # For example you might have this template: template = { 'user_id': int, 'name': { 'first': str, 'last': str }, 'bio': { 'dob': { 'year': int, 'month': int, 'day': int }, 'birthplace': { 'country': str, 'city': str } } } # So, a JSON document such as this would match the template: john = { 'user_id': 100, 'name': { 'first': 'John', 'last': 'Cleese' }, 'bio': { 'dob': { 'year': 1939, 'month': 11, 'day': 27 }, 'birthplace': { 'country': 'United Kingdom', 'city': 'Weston-super-Mare' } } } # But this one would **not** match the template (missing key): eric = { 'user_id': 101, 'name': { 'first': 'Eric', 'last': 'Idle' }, 'bio': { 'dob': { 'year': 1943, 'month': 3, 'day': 29 }, 'birthplace': { 'country': 'United Kingdom' } } } # And neither would this one (wrong data type): michael = { 'user_id': 102, 'name': { 'first': 'Michael', 'last': 'Palin' }, 'bio': { 'dob': { 'year': 1943, 'month': 'May', 'day': 5 }, 'birthplace': { 'country': 'United Kingdom', 'city': 'Sheffield' } } } # Write a function such this: def validate(data, template): # implement # and return True/False # in the case of False, return a string describing # the first error encountered # in the case of True, string can be empty return state, error # That should return this: # * `validate(john, template) --> True, ''` # * `validate(eric, template) --> False, 'mismatched keys: bio.birthplace.city'` # * `validate(michael, template) --> False, 'bad type: bio.dob.month'` # Better yet, use exceptions instead of return codes and strings!
dd_1/Part 3/Section 06 - Project 1/01 - Project 1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Computing for Mathematics - 2021/2022 individual coursework # # **Important** Do not delete the cells containing: # # ``` # ### BEGIN SOLUTION # # # ### END SOLUTION # ``` # # write your solution attempts in those cells. # # To submit this notebook: # # - Change the name of the notebook from `main` to: `<student_number>`. For example, if your student number is `c1234567` then change the name of the notebook to `c1234567`. # - **Write all your solution attempts in the correct locations**; # - **Do not delete** any code that is already in the cells; # - Save the notebook (`File>Save As`); # - Follow the instructions given to submit. # #### Question 1 # # (__Hint__: This question is similar to [the first exercise of the Matrices chapter](https://vknight.org/pfm/tools-for-mathematics/04-matrices/solutions/main.html#question-1) of Python for mathematics.) # # For each of the following matrices **output** their determinant. # # a. \\(\begin{pmatrix}4 & 4 & 4 \\ 0 & 2 & 0 \\ 12 & 12 & 12\end{pmatrix}\\) # # Available marks: 1 # + tags=["answer:q1-a"] import sympy as sym ### BEGIN SOLUTION ### END SOLUTION # - # b. \\(\begin{pmatrix}3\end{pmatrix}\\) # # _Available marks: 1_ # + tags=["answer:q1-b"] x = sym.Symbol("x") ### BEGIN SOLUTION ### END SOLUTION # - # c. \\(\begin{pmatrix}50 \pi & 40 e & 1 \\ 12 & 3 & 1 \\ -500 & 400 & # \pi ^e\end{pmatrix}\\) # # _Available marks: 2_ # + tags=["answer:q1-c"] ### BEGIN SOLUTION ### END SOLUTION # - # ### Question 2 # # # (__Hint__: This question is similar to the [second exercise of the Sequences chapter of Python for mathematics](https://vknight.org/pfm/tools-for-mathematics/07-sequences/solutions/main.html#question-2).) # # Using recursion, create a function `get_sequence` which gives the terms of the following sequence: # # \\[ # \begin{cases} # a_0 &= -2\\ # a_n & 4 a_{n-1}, n\geq 1 # \end{cases} # \\] # # _Available marks: 2_ # + tags=["answer:q2-a"] def get_sequence(n): ### BEGIN SOLUTION ### END SOLUTION # - # ### Question 3 # # (__Hint__: This question is similar to the [fourth exercise of the Matrices chapter of Python for mathematics](https://vknight.org/pfm/tools-for-mathematics/04-matrices/solutions/main.html#question-4).) # # The matrix \\(A\\) is given by: \\( # \begin{pmatrix} # 4 & 3 & 2 & 3\\ # 3 & 1 & 1 & 3\\ # 0 & -1 & 2 & 1\\ # 0 & 2 & 2 & 3\\ # \end{pmatrix}\\) # # # a. Create a variable `A_inv` which has value the inverse of `A` # # _Available marks: 2_ # + tags=["answer:q3-a"] ### BEGIN SOLUTION ### END SOLUTION # - # b. Create a variable `x_sol` which has value the vector # representing the solution to the following linear system: # # \\[ # \begin{eqnarray} # 4 x_1 + 3 x_2 + 2 x_3 + 3 x_4 &= 2\\ # 3 x_1 + x_2 + x_3 + 3 x_4 &= 7\\ # - x_2 + 2 x_3 + x_4 &= 7\\ # 2 x_2 + 2 x_3 + 3 x_4 &= 0\\ # \end{eqnarray} # \\] # # _Available marks: 2_ # + ### BEGIN SOLUTION ### END SOLUTION # - # ### Question 4 # # (__Hint__: This question is similar to the [second exercise of the Algebra chapter of Python for mathematics](https://vknight.org/pfm/tools-for-mathematics/03-calculus/solutions/main.html#question-2)) # # Consider the function: \\(f(x)=\frac{\cos(x)}{x}\\). # # a. Create a variable `expression` which has value: \\(\frac{f(x)- f(x-h)}{h}\\) # # _available marks: 2_ # + tags=["answer:q4-a"] h = sym.Symbol("h") ### BEGIN SOLUTION ### END SOLUTION # - # b. Create a variable `limit` which has value: \\(\lim_{h\to 0}\frac{f(x) - f(x-h)}{h}\\) # # _available marks: 1_ # + tags=["answer:q4-b"] ### BEGIN SOLUTION ### END SOLUTION # - limit # c. Using this, output the value \\(\frac{df}{dx}\\) at \\(x=\pi\\). # # *available marks: 1* # + tags=["answer:q4-c"] import math ### BEGIN SOLUTION ### END SOLUTION
assets/assessment/2021-2022/ind/assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import torch import torch.nn.functional as F from PIL import Image import os import json import numpy as np from matplotlib.colors import LinearSegmentedColormap import matplotlib.pyplot as plt import torchvision from torchvision import models from torchvision import transforms from captum.attr import IntegratedGradients from captum.attr import GradientShap from captum.attr import Occlusion from captum.attr import NoiseTunnel from captum.attr import Saliency from captum.attr import DeepLift from captum.attr import visualization as viz # - #device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") device = torch.device("cpu") model = torch.load('model_conv.pth', map_location=device) model = model.eval() # + from PIL import Image from torchvision import transforms img = Image.open("flower_photos/Dandelion.jpg") #preprocess = transforms.Compose([ # transforms.Resize(256), # transforms.CenterCrop(224), # transforms.ToTensor(), # transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) #]) #img = preprocess(im) #img = img.unsqueeze(0) transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) transformed_img = transform(img) input = transformed_img.unsqueeze(0) imgplot = plt.imshow(img) # - transformed_img.numpy().ndim image = Image.fromarray(transformed_img.numpy(), "RGB") plt.imshow(img) labels = np.array(open("class.txt").read().splitlines()) output = model(input) output = F.softmax(output, dim=1) prediction_score, pred_label_idx = torch.topk(output, 1) prediction_score pred_label_idx.squeeze_() labels = np.array(open("class.txt").read().splitlines()) labels[torch.argmax(output)] labels[pred_label_idx.squeeze_()] # + input.requires_grad = True output = model(input) output = F.softmax(output, dim=1) prediction_score, pred_label_idx = torch.topk(output, 1) pred_label_idx.squeeze_() predicted_label = labels[pred_label_idx.squeeze_()] print('Predicted:', predicted_label, '(', prediction_score.squeeze().item(), ')') # - def attribute_image_features(algorithm, input, **kwargs): model.zero_grad() tensor_attributions = algorithm.attribute(input,target=pred_label_idx,**kwargs) return tensor_attributions saliency = Saliency(model) grads = saliency.attribute(input, target=pred_label_idx) grads = np.transpose(grads.squeeze().cpu().detach().numpy(), (1, 2, 0)) ig = IntegratedGradients(model) attr_ig, delta = attribute_image_features(ig, input, baselines=input * 0, return_convergence_delta=True) attr_ig = np.transpose(attr_ig.squeeze().cpu().detach().numpy(), (1, 2, 0)) print('Approximation delta: ', abs(delta)) # + inv_normalize = transforms.Normalize( mean=[-0.485/0.229, -0.456/0.224, -0.406/0.225], std=[1/0.229, 1/0.224, 1/0.225] ) original_image = inv_normalize(input) original_image1 = np.transpose(original_image.squeeze().detach().numpy(), (1,2,0)) _ = viz.visualize_image_attr(None, original_image1, method="original_image", title="Original Image") # - original_image1 # + im = Image.fromarray((original_image1 * 255).astype(np.uint8)) # - import cv2 img = cv2.cvtColor(original_image1, cv2.COLOR_RGB2BGR) _, buffer = cv2.imencode('.jpg', img) #return base64.b64encode(buffer).decode('utf-8') _ = viz.visualize_image_attr(grads, original_image1, method="blended_heat_map", sign="absolute_value", show_colorbar=True, title="Overlayed Gradient Magnitudes") _ = viz.visualize_image_attr(attr_ig, original_image1, method="blended_heat_map",sign="all", show_colorbar=True, title="Overlayed Integrated Gradients") occlusion = Occlusion(model) attributions_occ = occlusion.attribute(input, strides = (3, 8, 8), target=pred_label_idx, sliding_window_shapes=(3,15, 15), baselines=0) attributions_occ = np.transpose(attributions_occ.squeeze().cpu().detach().numpy(), (1, 2, 0)) _ = viz.visualize_image_attr(attributions_occ, original_image1, method="blended_heat_map",sign="all", show_colorbar=True, title="Occlusion")
Captum.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Simple Neural Network # # This is rooted from "[Multivariable Calculus for Machine Learning](https://www.coursera.org/learn/multivariate-calculus-machine-learning)" by Imperial College London. The coresponding Youtube site is [here](https://www.youtube.com/playlist?list=PL2jykFOD1AWaL4_-bdidPfIWe765jOgfL). # + #First, watch the video. from IPython.display import YouTubeVideo YouTubeVideo('F9m7Wh0rJzg', width=700, height=500) # - # **Problem 1.** Recall from the video the structure of one of the simplest neural networks. # # ![](./images/simple1.png) # # Here there are only two neurons (or nodes), and they are linked by a single edge. # # The *activation* of neurons in the final layer, (1), is determined by the activation of neurons in the previous layer, (0), # $$ a^{(1)} = \sigma (w^{(1)}a^{(0)} + b^{(1)}), $$ # where $w^{(1)}$ is the *weight* of the connection between Neuron (0) and Neuron (1), # and $b^{(1)}$ is the *bias* of the Neuron (1). These are then subject to # the *activation function*, $\sigma$ to give the activation of Neuron (1). # # Our small neural network won't be able to do a lot - it's far too simple. # It is however worth plugging a few numbers into it to get a feel for the parts. # # Let's assume we want to train the network to give a *NOT function*, # that is if you input 1 it returns 0, and if you input 0 it returns 1. # # For simplicity, let's use, $\sigma(z) = \tanh(z)$, for our activation function, # and randomly initialise our weight and bias to $w^{(1)}=1.3$ and $b^{(1)} = -0.1$. # # Use the code block below to see what output values the neural network initially # returns for training data. # # + import numpy as np # First we set the state of the network sigma = np.tanh w1 = 1.3 b1 = -0.1 #Then we define the neuron activation def a1(a0): return sigma(w1*a0+b1) #Finally, let's try the network out! #Replace $x$ with $0$ or $1$ below, x=0 a1(x) # - # It's not very good! But it's not trained yet; experiment by changing the weight and bias # and see what happens. Print ten of your experiments. Which combination of # the weight and bias in your experiment yielded the best result for a *NOT function*? # **Problem 2.** Let's extend our simple network to include more neurons. # # ![](./images/simple2.png) # # We now have a slightly changed notation. The neurons which are labelled by their layer with a # superscript in brackets, are now also labelled with their number in that layer as a subscript, # and form vectors $\mathbf{a}^{(0)}$ and $\mathbf{a}^{(1)}$. # # The weights now form a matrix $\mathbf{W}^{(1)}$, where each element, $w^{(1)}_{ij}$, # is the link between the neuron $j$ in the previous layer and neuron $i$ in the current layer. # For example $w^{(1)}_{12}$ is highlighted linking $a^{(0)}_2$ to $a^{(1)}_1$. # The biases similarly form a vector $\mathbf{b}^{(1)}$. # In general, the number of columns in $\mathbf{W}$ matches the number of neurons that produced # the vector $a$ at the previous layer. # The number of rows in $\mathbf{W}$ matches the number of neurons at the current layer. # The number of components in $\mathbf{b}$ also matches the number of neurons # at the current neurons at the current layer. # # Now, we update our activation function to give, # $$\mathbf{a}^{(1)} = \sigma\left(\mathbf{W}^{(1)}\mathbf{a}^{(0)}+\mathbf{b}^{(1)}\right) , $$ # where all the quantities of interest have been upgraded to their vector # and matrix form and $\sigma$ acts upon each element of the resulting weighted sum vector separately. # # # For a network with weights, # > $\mathbf{W}^{(1)} = \left[ \begin{array}{ccc} # -2 & 4 & -1 \\ # 6 & 0 & -3 # \end{array} \right]$, # and # bias $\mathbf{b} = \left[\begin{array}{c} # 0.1 \\ -2.5 # \end{array} \right] $. # # **(a)** Calculate $\mathbf{a}^{(1)}$ by hand (to 2 decimal places) and write the result. # # **(b)** Complete the code block below to see what output values the neural network # returns for training data. (Remember that you can use the @ operator in Python # to perform operate a matrix on a vector.) # # + # First set up the network. import numpy as np sigma = np.tanh W = np.array([[-2, 4, -1],[6, 0, -3]]) b = np.array([0.1, -2.5]) # Define our input vector x = np.array([0.3, 0.4, 0.1]) # # Your code calculating the values a1_0 and a1_1 and replacing a1_0 and a1_1 below # a1 = np.array([a1_0, a1_1]) # - # **Problem 3.** Now let's look at a network with a hidden layer. # # ![](./images/simple3.png) # # Here, data is input at layer (0), this activates neurons in layer (1), # which become the inputs for neurons in layer (2). # (We've stopped explicitly drawing the biases here.) # # **(a)** Which of the following statements are true? # # A. The number of weights in a layer is the product of # the input and output neurons to that layer. # B. This network can always be replaced with another one with the same amount # of input and output neurons, but no hidden layers. # C. This neural network has 5 biases. # D. The number of weights in a layer is the sum of the input and output neurons # to that layer plus 1. # E. This neural network has 9 biases. # F. None of the other statements. # # **(c)** What are the dimensions of $\mathbf{W}^{(1)}$ and $\mathbf{W}^{(2)}$? # # **(c)** Which of the following statements about the neural network are true? # # A. $\mathbf{a}^{(2)} = \sigma\left(\mathbf{W}^{(1)}\mathbf{a}^{(1)}+\mathbf{b}^{(2)}\right)$ # # B. $\mathbf{a}^{(2)} = # \sigma\left(\mathbf{W}^{(2)}\mathbf{W}^{(1)}\mathbf{a}^{(0)}+\mathbf{W}^{(2)}\mathbf{b}^{(1)}+ # \mathbf{b}^{(2)}\right)$ # # C. $\mathbf{a}^{(2)} = # \sigma\left(\mathbf{W}^{(2)}\sigma\left(\mathbf{W}^{(1)}\mathbf{a}^{(0)}+ # \mathbf{b}^{(1)}\right) +\mathbf{b}^{(2)}\right)$
T1_4_simple_nn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:nb35] # language: python # name: conda-env-nb35-py # --- # # Intro to Data Analysis # + import unicodecsv enrollments_filename = './resources/enrollments.csv' ## Longer version of code (replaced with shorter, equivalent version below) # enrollments = [] # f = open(enrollments_filename, 'rb') # reader = unicodecsv.DictReader(f) # for row in reader: # enrollments.append(row) # f.close() with open(enrollments_filename, 'rb') as f: reader = unicodecsv.DictReader(f) enrollments = list(reader) ### Write code similar to the above to load the engagement ### and submission data. The data is stored in files with ### the given filenames. Then print the first row of each ### table to make sure that your code works. You can use the ### "Test Run" button to see the output of your code. engagement_filename = './resources/daily_engagement.csv' submissions_filename = './resources/project_submissions.csv' with open(engagement_filename, 'rb') as f: reader = unicodecsv.DictReader(f) daily_engagement = list(reader) with open(submissions_filename, 'rb') as f: reader = unicodecsv.DictReader(f) project_submissions = list(reader) # + def read_csv(filename): with open(filename, 'rb') as f: reader = unicodecsv.DictReader(f) return list(reader) def count_unique(values, key): return len(set([rec[key] for rec in values])) enrollments = read_csv('./resources/enrollments.csv') daily_engagement = read_csv('./resources/daily_engagement.csv') project_submissions = read_csv('./resources/project_submissions.csv') ### For each of these three tables, find the number of rows in the table and ### the number of unique students in the table. To find the number of unique ### students, you might want to create a set of the account keys in each table. enrollment_num_rows = len(enrollments) enrollment_num_unique_students = count_unique(enrollments, 'account_key') engagement_num_rows = len(daily_engagement) engagement_num_unique_students = count_unique(daily_engagement, 'acct') submission_num_rows = len(project_submissions) submission_num_unique_students = count_unique(project_submissions, 'account_key') # + from datetime import datetime def set_record_type(data, column, data_type): for record in data: if record[column]: if data_type == 'float': record[column] = float(record[column]) elif data_type == 'integer': record[column] = int(float(record[column])) elif data_type == 'date': record[column] = datetime.strptime(record[column],'%Y-%m-%d') elif data_type == 'boolean': record[column] = True if record[column] == 'True' else False else: record[column] = None return data enrollments = set_record_type(enrollments, 'join_date', 'date') enrollments = set_record_type(enrollments, 'cancel_date', 'date') enrollments = set_record_type(enrollments, 'days_to_cancel', 'integer') enrollments = set_record_type(enrollments, 'is_udacity', 'boolean') enrollments = set_record_type(enrollments, 'is_canceled', 'boolean') daily_engagement = set_record_type(daily_engagement, 'utc_date', 'date') daily_engagement = set_record_type(daily_engagement, 'num_courses_visited', 'integer') daily_engagement = set_record_type(daily_engagement, 'total_minutes_visited', 'float') daily_engagement = set_record_type(daily_engagement, 'lessons_completed', 'integer') daily_engagement = set_record_type(daily_engagement, 'projects_completed', 'integer') project_submissions = set_record_type(project_submissions, 'creation_date', 'date') project_submissions = set_record_type(project_submissions, 'completion_date', 'date') # - enrollment_num_unique_students engagement_num_unique_students submission_num_unique_students for rec in daily_engagement: rec['account_key'] = rec['acct'] del[rec['acct']] def get_unique_students(data): return set([rec['account_key'] for rec in data]) # + enrollment_num_rows = len(enrollments) enrollment_unique_students = get_unique_students(enrollments) engagement_num_rows = len(daily_engagement) engagement_unique_students = get_unique_students(daily_engagement) submission_num_rows = len(project_submissions) submission_unique_students = get_unique_students(project_submissions) # - daily_engagement[0] for enrollment in enrollments: student = enrollment['account_key'] if student not in engagement_unique_students: print(enrollment) break for enrollment in enrollments: student = enrollment['account_key'] if student not in engagement_unique_students and enrollment['join_date'] != enrollment['cancel_date']: print(enrollment) udacity_test_accounts = set() for enrollment in enrollments: if enrollment['is_udacity'] == True: udacity_test_accounts.add(enrollment['account_key']) len(udacity_test_accounts) def remove_udacity_account(data): return [rec for rec in data if rec['account_key'] not in udacity_test_accounts] # + enrollments_non_udacity = remove_udacity_account(enrollments) engagement_non_udacity = remove_udacity_account(daily_engagement) submission_non_udacity = remove_udacity_account(project_submissions) print(len(enrollments_non_udacity)) print(len(engagement_non_udacity)) print(len(submission_non_udacity)) # + paid_students = {} for enrollment in enrollments_non_udacity: if not enrollment['is_canceled'] or enrollment['days_to_cancel'] > 7: student = enrollment['account_key'] join_date = enrollment['join_date'] if student not in paid_students or join_date > paid_students[student]: paid_students[student] = join_date len(paid_students) # - def within_one_week(join_date, engagement_date): time_delta = engagement_date - join_date return time_delta.days < 7 and time_delta.days >= 0 # + def remove_free_trial(data): return [rec for rec in data if rec['account_key'] in paid_students] enrollments_paid = remove_free_trial(enrollments_non_udacity) engagement_paid = remove_free_trial(engagement_non_udacity) submission_paid = remove_free_trial(submission_non_udacity) paid_engagement_in_first_week = [] for rec in engagement_paid: student = rec['account_key'] if within_one_week(paid_students[student], rec['utc_date']): paid_engagement_in_first_week.append(rec) len(paid_engagement_in_first_week) # + from collections import defaultdict engagement_by_account = defaultdict(list) for rec in paid_engagement_in_first_week: account_key = rec['account_key'] engagement_by_account[account_key].append(rec) len(engagement_by_account) # + total_minutes_by_accounts = {} for account_key, engagement_for_student in engagement_by_account.items(): total_minutes = 0 for engagement_record in engagement_for_student: total_minutes += engagement_record['total_minutes_visited'] total_minutes_by_accounts[account_key] = total_minutes # + total_minutes = list(total_minutes_by_accounts.values()) import numpy as np print('Mean:', np.mean(total_minutes)) print('Standard deviation:', np.std(total_minutes)) print('Minimum:', np.min(total_minutes)) print('Maximum:', np.max(total_minutes)) # + def print_statistics(data): data_list = list(data.values()) print('Mean:', np.mean(data_list)) print('Standard deviation:', np.std(data_list)) print('Minimum:', np.min(data_list)) print('Maximum:', np.max(data_list)) print_statistics(total_minutes_by_accounts) # - def group_data_by(data, key): grouped_data = defaultdict(list) for rec in data: group_key = rec[key] grouped_data[group_key].append(rec) return grouped_data def sum_grouped_items(data, entry): sum_group = {} for key,record in data.items(): total = 0 for rec in record: total += rec[entry] sum_group[key] = total return sum_group # + engagement_by_account = group_data_by(paid_engagement_in_first_week, 'account_key') total_minutes_by_accounts = sum_grouped_items(engagement_by_account,'total_minutes_visited') print_statistics(total_minutes_by_accounts) total_lessons_completed_by_accounts = sum_grouped_items(engagement_by_account,'lessons_completed') print_statistics(total_lessons_completed_by_accounts) # + for rec in paid_engagement_in_first_week: rec['has_visited'] = 1 if rec['num_courses_visited'] > 0 else 0 total_num_courses_visited = sum_grouped_items(engagement_by_account,'has_visited') print_statistics(total_num_courses_visited) # - subway_project_lesson_keys = ['746169184','3176718735'] rating_pass_values = ['PASSED','DISTINCTION'] submission_paid[0] # + submission_by_lesson = group_data_by(submission_paid, 'lesson_key') for _,record in submission_by_lesson.items(): for rec in record: rec['has_passed'] = 1 if rec['assigned_rating'] in rating_pass_values else 0 total_pass_by_lesson = sum_grouped_items(submission_by_lesson,'has_passed') total_pass_by_lesson # - sum(value for key,value in total_pass_by_lesson.items() if key in subway_project_lesson_keys) # + passing_engagement = [] non_passing_engagement = [] for lesson,submissions in submission_by_lesson.items(): if lesson in subway_project_lesson_keys: for submission in submissions: if submission['has_passed']: passing_engagement.append(rec) else: non_passing_engagement.append(rec) print(len(passing_engagement)) print(len(non_passing_engagement)) # + pass_subway_project = set() for rec in submission_paid: if rec['lesson_key'] in subway_project_lesson_keys and rec['assigned_rating'] in rating_pass_values: pass_subway_project.add(rec['account_key']) print(len(pass_subway_project)) # + passing_engagement = [] non_passing_engagement = [] for rec in paid_engagement_in_first_week: if rec['account_key'] in pass_subway_project: passing_engagement.append(rec) else: non_passing_engagement.append(rec) print(len(passing_engagement)) print(len(non_passing_engagement)) # - passing_engagement_by_accounts = group_data_by(passing_engagement, 'account_key') non_passing_engagement_by_accounts = group_data_by(non_passing_engagement, 'account_key') passing_minutes = sum_grouped_items(passing_engagement_by_accounts,'total_minutes_visited') print_statistics(passing_minutes) non_passing_minutes = sum_grouped_items(non_passing_engagement_by_accounts,'total_minutes_visited') print_statistics(non_passing_minutes) passing_lessons_completed = sum_grouped_items(passing_engagement_by_accounts,'lessons_completed') print_statistics(passing_lessons_completed) non_passing_lessons_completed = sum_grouped_items(non_passing_engagement_by_accounts,'lessons_completed') print_statistics(non_passing_lessons_completed) # + data = [1, 2, 1, 3, 3, 1, 4, 2] # %matplotlib inline import matplotlib.pyplot as plt plt.hist(data) # - def draw_hist(data): data_list = list(data.values()) plt.hist(data_list) draw_hist(passing_lessons_completed) draw_hist(non_passing_lessons_completed) draw_hist(passing_minutes) draw_hist(non_passing_minutes) passing_days_visited = sum_grouped_items(passing_engagement_by_accounts,'has_visited') print_statistics(passing_days_visited) non_passing_days_visited = sum_grouped_items(non_passing_engagement_by_accounts,'has_visited') print_statistics(non_passing_days_visited) # + import seaborn as sns import matplotlib.pyplot as plt import matplotlib matplotlib.rcParams['font.family'] = 'monospace' # %matplotlib inline def print_statistics(data): data_list = list(data.values()) print('Mean:', np.mean(data_list)) print('Standard deviation:', np.std(data_list)) print('Minimum:', np.min(data_list)) print('Maximum:', np.max(data_list)) plt.hist(data_list, bins=8)
ud170/Lesson 1 - Data Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/rajdeepd/tensorflow_2.0_book_code/blob/master/ch08/WGAN_mnist_example2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="LBWSmkvm4f9K" # example of a wgan for generating handwritten digits from numpy import expand_dims from numpy import mean from numpy import ones from numpy.random import randn from numpy.random import randint from tensorflow.keras.datasets.mnist import load_data from tensorflow.keras import backend from tensorflow.keras.optimizers import RMSprop from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense from tensorflow.keras.layers import Reshape from tensorflow.keras.layers import Flatten from tensorflow.keras.layers import Conv2D from tensorflow.keras.layers import Conv2DTranspose from tensorflow.keras.layers import LeakyReLU from tensorflow.keras.layers import BatchNormalization from tensorflow.keras.initializers import RandomNormal from tensorflow.keras.constraints import Constraint from matplotlib import pyplot # + id="xzRLjn8d45xV" # clip model weights to a given hypercube class ClipConstraint(Constraint): # set clip value when initialized def __init__(self, clip_value): self.clip_value = clip_value # clip model weights to hypercube def __call__(self, weights): return backend.clip(weights, -self.clip_value, self.clip_value) # get the config def get_config(self): return {'clip_value': self.clip_value} # + id="8De_MLNz4_X7" # calculate wasserstein loss def wasserstein_loss(y_true, y_pred): return backend.mean(y_true * y_pred) # + id="S7_QS_Ff5Fpj" # define the critic model def make_critic(in_shape=(28,28,1)): # weight initialization init = RandomNormal(stddev=0.02) # weight constraint const = ClipConstraint(0.01) # define model model = Sequential() # downsample to 14x14 model.add(Conv2D(64, (4,4), strides=(2,2), padding='same', kernel_initializer=init, kernel_constraint=None, input_shape=in_shape)) model.add(BatchNormalization()) model.add(LeakyReLU(alpha=0.2)) # downsample to 7x7 model.add(Conv2D(64, (4,4), strides=(2,2), padding='same', kernel_initializer=init, kernel_constraint=None)) model.add(BatchNormalization()) model.add(LeakyReLU(alpha=0.2)) # scoring, linear activation model.add(Flatten()) model.add(Dense(1)) # compile model opt = RMSprop(lr=0.00005) model.compile(loss=wasserstein_loss, optimizer=opt) model.summary() return model # define the standalone generator model def make_generator(latent_dim): # weight initialization init = RandomNormal(stddev=0.02) # define model model = Sequential() # foundation for 7x7 image n_nodes = 128 * 7 * 7 model.add(Dense(n_nodes, kernel_initializer=init, input_dim=latent_dim)) model.add(LeakyReLU(alpha=0.2)) model.add(Reshape((7, 7, 128))) # upsample to 14x14 model.add(Conv2DTranspose(128, (4,4), strides=(2,2), padding='same', kernel_initializer=init)) model.add(BatchNormalization()) model.add(LeakyReLU(alpha=0.2)) # upsample to 28x28 model.add(Conv2DTranspose(128, (4,4), strides=(2,2), padding='same', kernel_initializer=init)) model.add(BatchNormalization()) model.add(LeakyReLU(alpha=0.2)) # output 28x28x1 model.add(Conv2D(1, (7,7), activation='tanh', padding='same', kernel_initializer=init)) model.summary() return model # + id="v2LmRAc_5Oe4" # define the combined generator and critic model, for updating the generator def make_gan(generator, critic): # make weights in the critic not trainable critic.trainable = False # connect them model = Sequential() # add generator model.add(generator) # add the critic model.add(critic) # compile model opt = RMSprop(lr=0.00005) model.compile(loss=wasserstein_loss, optimizer=opt) return model # + id="kfNcKECN5UEW" # load images def load_real_samples(): # load dataset (trainX, trainy), (_, _) = load_data() # select all of the examples for a given class selected_ix = trainy == 7 X = trainX[selected_ix] # expand to 3d, e.g. add channels X = expand_dims(X, axis=-1) # convert from ints to floats X = X.astype('float32') # scale from [0,255] to [-1,1] X = (X - 127.5) / 127.5 return X # + id="EghSvzEo5hG-" # select real samples def generate_real_samples(dataset, n_samples): # choose random instances ix = randint(0, dataset.shape[0], n_samples) # select images X = dataset[ix] # generate class labels, -1 for 'real' y = -ones((n_samples, 1)) return X, y # + id="8yw2Ez36KuqZ" # generate points in latent space as input for the generator def generate_latent_points(latent_dim, n_samples): # generate points in the latent space x_input = randn(latent_dim * n_samples) # reshape into a batch of inputs for the network x_input = x_input.reshape(n_samples, latent_dim) return x_input # + id="Y4Zii7dvKy2r" # use the generator to generate n fake examples, with class labels def generate_fake_samples(generator, latent_dim, n_samples): # generate points in latent space x_input = generate_latent_points(latent_dim, n_samples) # predict outputs X = generator.predict(x_input) # create class labels with 1.0 for 'fake' y = ones((n_samples, 1)) return X, y # + id="xITwykPOK3O3" # generate samples and save as a plot and save the model def summarize_performance(step, g_model, latent_dim, n_samples=100): # prepare fake examples X, _ = generate_fake_samples(g_model, latent_dim, n_samples) # scale from [-1,1] to [0,1] X = (X + 1) / 2.0 # plot images for i in range(10 * 10): # define subplot pyplot.subplot(10, 10, 1 + i) # turn off axis pyplot.axis('off') # plot raw pixel data pyplot.imshow(X[i, :, :, 0], cmap='gray_r') # save plot to file filename1 = 'generated_plot_%04d.png' % (step+1) pyplot.savefig(filename1) pyplot.close() # save the generator model filename2 = 'model_%04d.h5' % (step+1) g_model.save(filename2) print('>Saved: %s and %s' % (filename1, filename2)) # + id="Yhmj3qmHK4vW" # create a line plot of loss for the gan and save to file # %matplotlib inline def plot_history(d1_hist, d2_hist, g_hist): # plot history pyplot.plot(d1_hist, label='crit_real') pyplot.plot(d2_hist, label='crit_fake') pyplot.plot(g_hist, label='gen') pyplot.legend() pyplot.title('WGAN Losses - No Clipping') pyplot.show() pyplot.savefig('plot_wgan_line_plot_loss.png') pyplot.close() # + id="RYnFW4OOLCwN" # train the generator and critic from datetime import datetime now = datetime.now().strftime("%Y-%m-%d %H:%M:%S") print(now) filename = 'wgan_losses_no_clipping_' + now + '.csv' print(filename) file = open(filename, 'w+', newline ='\n') def train(g_model, c_model, gan_model, dataset, latent_dim, n_epochs=1, n_batch=64, n_critic=5): # calculate the number of batches per training epoch bat_per_epo = int(dataset.shape[0] / n_batch) # calculate the nufile = open('g4g.csv', 'w+', newline ='') mber of training iterations n_steps = bat_per_epo * n_epochs # calculate the size of half a batch of samples half_batch = int(n_batch / 2) # lists for keeping track of loss c1_hist, c2_hist, g_hist = list(), list(), list() # manually enumerate epochs for i in range(n_steps): # update the critic more than the generator c1_tmp, c2_tmp = list(), list() for _ in range(n_critic): # get randomly selected 'real' samples X_real, y_real = generate_real_samples(dataset, half_batch) # update critic model weights c_loss1 = c_model.train_on_batch(X_real, y_real) c1_tmp.append(c_loss1) # generate 'fake' examples X_fake, y_fake = generate_fake_samples(g_model, latent_dim, half_batch) # update critic model weights c_loss2 = c_model.train_on_batch(X_fake, y_fake) c2_tmp.append(c_loss2) # store critic loss c1_hist.append(mean(c1_tmp)) c2_hist.append(mean(c2_tmp)) # prepare points in latent space as input for the generator X_gan = generate_latent_points(latent_dim, n_batch) # create inverted labels for the fake samples y_gan = -ones((n_batch, 1)) # update the generator via the critic's error g_loss = gan_model.train_on_batch(X_gan, y_gan) g_hist.append(g_loss) # summarize loss on this batch print('>%d, c1=%.3f, c2=%.3f g=%.3f' % (i+1, c1_hist[-1], c2_hist[-1], g_loss)) file.write(str(i+1) + ", " + str(c1_hist[-1]) + ", " + str(c2_hist[-1]) + ", " + str(g_loss) + "\n" ) file.flush() # evaluate the model performance every 'epoch' if (i+1) % bat_per_epo == 0: summarize_performance(i, g_model, latent_dim) #return 1 #file.close() return c1_hist, c2_hist, g_hist # line plots of loss # + colab={"base_uri": "https://localhost:8080/"} id="dyZTceybLGxr" outputId="bbc41e7e-b06c-4ba3-fe4b-bef3ae688a5a" # size of the latent space latent_dim = 50 # create the critic critic = make_critic() # create the generator generator = make_generator(latent_dim) # create the gan gan_model = make_gan(generator, critic) # load image data dataset = load_real_samples() print(dataset.shape) # train model # - c1_hist, c2_hist, g_hist = train(generator, critic, gan_model, dataset, latent_dim) plot_history(c1_hist, c2_hist, g_hist)
Chapter 10/WGAN_mnist_generator_critic_no_clipping.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (fastai2) # language: python # name: fastai2 # --- # ## Image网 Submission `128x128` # This contains a submission for the Image网 leaderboard in the `128x128` category. # # In this notebook we: # 1. Train on 1 pretext task: # - Train a network to do image inpatining on Image网's `/train`, `/unsup` and `/val` images. # 2. Train on 4 downstream tasks: # - We load the pretext weights and train for `5` epochs. # - We load the pretext weights and train for `20` epochs. # - We load the pretext weights and train for `80` epochs. # - We load the pretext weights and train for `200` epochs. # # Our leaderboard submissions are the accuracies we get on each of the downstream tasks. # + import json import torch import numpy as np from config import config from RandomCutout import RandomCutout, PILImageInput from fastai2.basics import * from fastai2.vision.all import * from torch.nn import MSELoss from functools import partial # - # ## Pretext Task: Image Inpainting # + # Default parameters lr=config['lr'] size=config['size'] sqrmom=config['sqrmom'] mom=config['mom'] eps=config['eps'] epochs=config['epochs'] bs=config['bs'] opt=config['opt'] sh=config['sh'] sa=config['sa'] sym=config['sym'] beta=config['beta'] act_fn=config['act_fn'] fp16=config['fp16'] pool=config['pool'] runs=config['runs'] model = config['model'] if opt=='adam' : opt_func = partial(Adam, mom=mom, sqr_mom=sqrmom, eps=eps) elif opt=='rms' : opt_func = partial(RMSProp, sqr_mom=sqrmom) elif opt=='sgd' : opt_func = partial(SGD, mom=mom) elif opt=='ranger': opt_func = partial(ranger, mom=mom, sqr_mom=sqrmom, eps=eps, beta=beta) size = 128 bs = 64 # - # Default parameters print("lr", lr) print("size", size) print("sqrmom", sqrmom) print("mom", mom) print("eps", eps) print("epochs", epochs) print("bs", bs) print("opt", opt) print("sh", sh) print("sa", sa) print("sym", sym) print("beta", beta) print("act_fn", act_fn) print("fp16", fp16) print("pool", pool) print("runs", runs) print("model", model) def get_dbunch(size, bs): if size<=224: path = URLs.IMAGEWANG_160 else: path = URLs.IMAGEWANG source = untar_data(path) item_tfms=[RandomResizedCrop(size, min_scale=0.35), FlipItem(0.5), RandomCutout] batch_tfms=[RandomErasing(p=0.9, max_count=3, sh=sh), Normalize] dblock = DataBlock(blocks=(ImageBlock(cls=PILImageInput), ImageBlock), splitter=GrandparentSplitter(valid_name='val'), get_items=get_image_files, get_y=lambda o: o, item_tfms=item_tfms, batch_tfms=batch_tfms) workers = min(8, num_cpus()) return dblock.dataloaders(source, path=source, bs=bs, num_workers=workers) dbunch = get_dbunch(size, bs) #CHANGE: We're predicting pixel values, so we're just going to predict an output for each RGB channel dbunch.vocab = ['R', 'G', 'B'] dbunch.show_batch() learn = unet_learner(dbunch, model, pretrained=False, opt_func=opt_func, metrics=[], loss_func=MSELoss()) if fp16: learn = learn.to_fp16() cbs = [] learn.fit_flat_cos(epochs, lr, wd=1e-2, cbs=cbs) # I'm not using fastai2's .export() because I only want to save # the model's parameters. torch.save(learn.model[0].state_dict(), 'imagewang_inpainting_15_epochs.pth') # ## Downstream Task: Image Classification def get_dbunch(size, bs): if size<=224: path = URLs.IMAGEWANG_160 else: path = URLs.IMAGEWANG source = untar_data(path) item_tfms = [RandomResizedCrop(size, min_scale=0.35), FlipItem(0.5)] batch_tfms = [RandomErasing(p=0.9, max_count=3, sh=sh), Normalize] dblock = DataBlock(blocks=(ImageBlock, CategoryBlock), splitter=GrandparentSplitter(valid_name='val'), get_items=get_image_files, get_y=parent_label, item_tfms=item_tfms, batch_tfms=batch_tfms) workers = min(8, num_cpus()) return dblock.dataloaders(source, path=source, bs=bs, num_workers=workers) dbunch = get_dbunch(size, bs) # ### 5 Epochs epochs = 5 runs = 5 for run in range(runs): print(f'Run: {run}') learn = Learner(dbunch, model(c_out=20, pretrained=False, act_cls=torch.nn.ReLU, sa=sa, sym=sym, pool=pool), opt_func=opt_func, \ metrics=[accuracy,top_k_accuracy], loss_func=LabelSmoothingCrossEntropy()) if fp16: learn = learn.to_fp16() cbs = [] # Load weights generated from training on our pretext task state_dict = torch.load('imagewang_inpainting_15_epochs.pth') # HACK: If we don't have all of the parameters for our learner, we get an error linear_layer = learn.model[-1] state_dict['11.weight'] = linear_layer.weight state_dict['11.bias'] = linear_layer.bias learn.model.load_state_dict(state_dict) #learn.freeze() learn.fit_flat_cos(epochs, lr, wd=1e-2, cbs=cbs) # - Run 1: 0.348435 # - Run 2: 0.344871 # - Run 3: 0.345126 # - Run 4: 0.352762 # - Run 5: 0.344617 # # Average: **34.72%** # ### 20 Epochs epochs = 20 runs = 3 for run in range(runs): print(f'Run: {run}') learn = Learner(dbunch, model(c_out=20, pretrained=False, act_cls=torch.nn.ReLU, sa=sa, sym=sym, pool=pool), opt_func=opt_func, \ metrics=[accuracy,top_k_accuracy], loss_func=LabelSmoothingCrossEntropy()) if fp16: learn = learn.to_fp16() cbs = [] # Load weights generated from training on our pretext task state_dict = torch.load('imagewang_inpainting_15_epochs.pth') # HACK: If we don't have all of the parameters for our learner, we get an error linear_layer = learn.model[-1] state_dict['11.weight'] = linear_layer.weight state_dict['11.bias'] = linear_layer.bias learn.model.load_state_dict(state_dict) learn.freeze() learn.fit_flat_cos(epochs, lr, wd=1e-2, cbs=cbs) # - Run 1: 0.597608 # - Run 2: 0.597608 # - Run 3: 0.575464 # # Average: **59.02%** # ## 80 epochs epochs = 80 runs = 1 for run in range(runs): print(f'Run: {run}') learn = Learner(dbunch, model(c_out=20, pretrained=False, act_cls=torch.nn.ReLU, sa=sa, sym=sym, pool=pool), opt_func=opt_func, \ metrics=[accuracy,top_k_accuracy], loss_func=LabelSmoothingCrossEntropy()) if fp16: learn = learn.to_fp16() cbs = [] # Load weights generated from training on our pretext task state_dict = torch.load('imagewang_inpainting_15_epochs.pth') # HACK: If we don't have all of the parameters for our learner, we get an error linear_layer = learn.model[-1] state_dict['11.weight'] = linear_layer.weight state_dict['11.bias'] = linear_layer.bias learn.model.load_state_dict(state_dict) learn.freeze() learn.fit_flat_cos(epochs, lr, wd=1e-2, cbs=cbs) # Accuracy: **61.85%** # ### 200 epochs epochs = 200 runs = 1 for run in range(runs): print(f'Run: {run}') learn = Learner(dbunch, model(c_out=20, pretrained=False, act_cls=torch.nn.ReLU, sa=sa, sym=sym, pool=pool), opt_func=opt_func, \ metrics=[accuracy,top_k_accuracy], loss_func=LabelSmoothingCrossEntropy()) if fp16: learn = learn.to_fp16() cbs = [] # Load weights generated from training on our pretext task state_dict = torch.load('imagewang_inpainting_15_epochs.pth') # HACK: If we don't have all of the parameters for our learner, we get an error linear_layer = learn.model[-1] state_dict['11.weight'] = linear_layer.weight state_dict['11.bias'] = linear_layer.bias learn.model.load_state_dict(state_dict) learn.freeze() learn.fit_flat_cos(epochs, lr, wd=1e-2, cbs=cbs) # Accuracy: **60.22%**
01_InpaintingImageWang/03_ImageWang_Leadboard_128.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .java // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: Java // language: java // name: java // --- // ## Loading Libraries // // Let's import all the necessary packages first. You can safely ignore this section. import java.util.Random; import java.lang.*; // %maven org.knowm.xchart:xchart:3.5.2 import org.knowm.xchart.*; // ## Helper Methods // // Let's code three helper methods: // // * random array generator // * array printer // * copyArray // // It is assumed that you are fully capable of coding two similar methods by yourself. If you are new to Java (but have some experience with a different language), playing with these methods will help you get familiar with Java faster. // + // random array generator public int[] randomArr(int size) { Random r = new Random(); int[] arr = new int[size]; for (int i = 0; i < size; i++) { arr[i] = r.nextInt(1000) + 1; } return arr; } // array printer public void printArr(int[] arr) { for (int num : arr) { System.out.print(num + " "); } System.out.println(); } // array deep copy public void copyArray(int[] from, int[] to) { if (from.length != to.length) { System.exit(0); } for (int i = 0; i < from.length; i++) { to[i] = from[i]; } } // - // ## Binary Tree // A single array can be used to represent a binary tree. // // The node at the root is indexed 0 in the array. All other nodes are numbered from left to right level by level and from top to bottom. Empty nodes are also numbered. Then each node having an index i is put into the array as its ith element. // // ![binary](images/binary.png) // // The most important thing to learn from the array representation is the relationship of indexes among nodes. // // *First*, given an arbitrary node indexed i in the array representation, the indexes of its left and right chidren are: // // ``` // left child: 2*i + 1 // right child: 2*i + 2 // ``` // // *Second*, given an arbitrary node indexed i in the array representation, the index of its parent is: // ``` // parent: (i-1) / 2 // ``` // // *Third*, the number of nodes at levels above the leaf level is smaller than or equal to the size of binary tree divided by 2. // // *Fourth*, given the size of a binary tree s, the index of the node that: // // * sits one level above the leaft level // * is the most right node has at least one child (at that level) // // can be expressed as // ``` // the index of the last parent: s / 2 - 1 // ``` // // You may want to check a few examples in the above picture by this rule. The proof is beyond the scope of this tutorial. // // Additionally, for a binary tree, when its all levels except for the last one are completely filled, we call it **a complete binary tree**. It is worth noting that when the bottom level is being filled with nodes, it has be from left to right. // ## Binary Heap // A heap is firstly a complete binary tree. A heap has more properties than a complete binary tree, specifically on the order of nodes by their values. There are two types of heaps: // // * Max heap: For any given node, the value of a parent node is bigger than its children. // * Min heap: For any given node, the value of a parent node is smaller than its children. // // Heap is an excellent candidate for your sorting tasks, if your sorting tasks involve: // // * Need to have a sorted result even with incomplete data // * Constantly add new elements // * Constantly delete some old elements // // Now let's implement a binary heap. Given a set of unsorted values, what do we do to build a binary heap out of them? The naive strategy is **to start with an empty heap and add values one by one**. Let's do that. // this is a max heap public class Heap{ // we go with arraylist instead of array for size flexibility ArrayList<Integer> data; // constructor public Heap() { data = new ArrayList<Integer>(0); } } // Let's add another method "insert" that takes care of adding a new element to the heap. To add a new element to an existing heap, we will following this strategy: // // 1. Add the element to the bottom level of the heap. // 2. Compare the added element with its parent; if they are in the correct order, stop. // 3. If not, swap the element with its parent and return to the previous step. // + // this is a max heap public class Heap{ // we go with arraylist instead of array for size flexibility ArrayList<Integer> data; // constructor public Heap() { data = new ArrayList<Integer>(0); } // print all elements public void print() { for (int i = 0; i < data.size(); i++) { System.out.print(data.get(i) + " "); } System.out.println(); } // insert public void insert(int val) { data.add(val); int i = data.size() - 1; percUp(i); } // percolate element at index i to the top if necessary private void percUp(int i) { while (i > 0 && (i-1)/2 >= 0) { if (data.get(i) > data.get((i-1)/2)) { int tmp = data.get(i); data.set(i, data.get((i-1)/2)); data.set((i-1)/2, tmp); } i = (i-1)/2; } } } // sanity check // note that the printed heap is not fully sorted, and it should be like that int[] arr = randomArr(5); System.out.print("A given array: "); printArr(arr); Heap h = new Heap(); for (int num : arr) { h.insert(num); } System.out.print("Heap built: "); h.print(); // - // You may have noticed that when we print out the heap, it is not fully sorted. There is nothing wrong with it. It is intended to be like this. However, one thing is for sure --- the biggest element always stays at the root for a max heap. If we want to retrieve a fully sorted sequence, we can keep removing the root and restore the property of max heap. To achieve this, we need at least two more methods **removeMax** and **heapify**. // + // this is a max heap public class Heap{ // we go with arraylist instead of array for size flexibility ArrayList<Integer> data; // constructor public Heap() { data = new ArrayList<Integer>(0); } // print all elements public void print() { for (int i = 0; i < data.size(); i++) { System.out.print(data.get(i) + " "); } System.out.println(); } // insert public void insert(int val) { data.add(val); int i = data.size() - 1; percUp(i); } // percolate element at index i to the top if necessary private void percUp(int i) { while (i > 0 && (i-1)/2 >= 0) { if (data.get(i) > data.get((i-1)/2)) { int tmp = data.get(i); data.set(i, data.get((i-1)/2)); data.set((i-1)/2, tmp); } i = (i-1)/2; } } // removeMax public int removeMax() { int val = data.get(0); data.remove(0); heapify(0); return val; } // heapify private void heapify(int i) { while (2*i+1 < data.size()) { int maxChildIndex = maxChild(i); if (data.get(i) < data.get(maxChildIndex)) { int tmp = data.get(maxChildIndex); data.set(maxChildIndex, data.get(i)); data.set(i, tmp); } i = maxChildIndex; } } // return the index of maxChild private int maxChild(int i) { int li = 2*i+1, ri = 2*i+2; if (ri >= data.size() || data.get(li) > data.get(ri)) { return li; } else { return ri; } } } // sanity check int[] arr = randomArr(5); System.out.print("A given array: "); printArr(arr); Heap h = new Heap(); for (int num : arr) { h.insert(num); } System.out.print("Reversely sorted: "); while (h.data.size() > 0) { System.out.print(h.removeMax() + " "); } // - // Let's revisit a question we previously asked: *Given a set of unsorted values, what do we do to build a binary heap out of them?* // // The naive strategy is **to start with an empty heap and add values one by one**. We covered it. // // The more optimized strategy is **to treat the given array as a heap and help it to restore its heap properties**. To achieve this, we need a new method. // + // this is a max heap public class Heap{ // we go with arraylist instead of array for size flexibility ArrayList<Integer> data; // constructor public Heap() { data = new ArrayList<Integer>(0); } // print all elements public void print() { for (int i = 0; i < data.size(); i++) { System.out.print(data.get(i) + " "); } System.out.println(); } // insert public void insert(int val) { data.add(val); int i = data.size() - 1; percUp(i); } // percolate element at index i to the top if necessary private void percUp(int i) { while (i > 0 && (i-1)/2 >= 0) { if (data.get(i) > data.get((i-1)/2)) { int tmp = data.get(i); data.set(i, data.get((i-1)/2)); data.set((i-1)/2, tmp); } i = (i-1)/2; } } // removeMax public int removeMax() { int val = data.get(0); data.remove(0); heapify(0); return val; } // heapify private void heapify(int i) { while (2*i+1 < data.size()) { int maxChildIndex = maxChild(i); if (data.get(i) < data.get(maxChildIndex)) { int tmp = data.get(maxChildIndex); data.set(maxChildIndex, data.get(i)); data.set(i, tmp); } i = maxChildIndex; } } // return the index of maxChild private int maxChild(int i) { int li = 2*i+1, ri = 2*i+2; if (ri >= data.size() || data.get(li) > data.get(ri)) { return li; } else { return ri; } } // heap builder public void build(int[] arr) { // load elements to data for (int num: arr) { data.add(num); } // heapify for (int i = data.size()/2 -1; i >= 0; i--) { heapify(i); } } } // sanity check int[] arr = randomArr(5); System.out.print("A given array: "); printArr(arr); Heap h = new Heap(); h.build(arr); System.out.print("Reversely sorted: "); while (h.data.size() > 0) { System.out.print(h.removeMax() + " "); } // - // Let's upgrade our code so we can track the time complexitiy of the two approaches of building heaps. // + // this is a max heap public class HeapTrack{ // we go with arraylist instead of array for size flexibility ArrayList<Integer> data; // constructor public HeapTrack() { data = new ArrayList<Integer>(0); } // print all elements public void print() { for (int i = 0; i < data.size(); i++) { System.out.print(data.get(i) + " "); } System.out.println(); } // insert public int insert(int val) { int steps = 1 + data.size(); data.add(val); int i = data.size() - 1; steps += percUp(i); return steps; } // percolate element at index i to the top if necessary private int percUp(int i) { int steps = 0; while (i > 0 && (i-1)/2 >= 0) { if (data.get(i) > data.get((i-1)/2)) { int tmp = data.get(i); data.set(i, data.get((i-1)/2)); data.set((i-1)/2, tmp); steps += 3; } i = (i-1)/2; steps += 2; } return steps; } // removeMax public int removeMax() { int val = data.get(0); data.remove(0); heapify(0); return val; } // heapify private int heapify(int i) { int steps = 0; while (2*i+1 < data.size()) { int[] results = maxChild(i); int maxChildIndex = results[0]; steps += results[1]; if (data.get(i) < data.get(maxChildIndex)) { int tmp = data.get(maxChildIndex); data.set(maxChildIndex, data.get(i)); data.set(i, tmp); steps += 3; } i = maxChildIndex; steps += 2; } return steps; } // return the index of maxChild private int[] maxChild(int i) { int[] results = new int[2]; int li = 2*i+1, ri = 2*i+2; if (ri >= data.size() || data.get(li) > data.get(ri)) { results[0] = li; } else { results[0] = ri; } int steps = 4; results[1] = steps; return results; } // heap builder public int build(int[] arr) { int steps = 0; // load elements to data for (int num: arr) { data.add(num); } steps += data.size(); // heapify for (int i = data.size()/2 -1; i >= 0; i--) { steps += heapify(i); } return steps; } } // sanity check int[] arr = randomArr(5); System.out.print("A given array: "); printArr(arr); HeapTrack h = new HeapTrack(); h.build(arr); System.out.print("Reversely sorted: "); while (h.data.size() > 0) { System.out.print(h.removeMax() + " "); } // - // Now let's plot the comparison between the two heap building approaches. By looking at the comparison, it is easy to conclude that the second strategy is more effective. As the size of input grows, the efficiency difference grows. // + // predetermined size int size = 50; // storage of steps int[] heapInsertArr = new int[size]; int[] heapBuildArr = new int[size]; // heap HeapTrack heapInsert = new HeapTrack(); HeapTrack heapBuild = new HeapTrack(); // populate storage for (int i = 1; i < size; i++) { // prepare two exactly same sorted arrays int[] temp1 = randomArr(i); int[] temp2 = new int[temp1.length]; copyArray(temp1, temp2); // insert build heapInsertArr[i] = 0; for (int num : temp1) { heapInsertArr[i] += heapInsert.insert(num); } // build heapBuildArr[i] = heapBuild.build(temp2); } // size of input - convert int to double for plotting double[] xData = new double[size]; for (int i = 1; i < xData.length; i++) { xData[i] = i; } // heapInsert double[] yDataB = new double[size]; for (int i = 0; i < yDataB.length; i++) { yDataB[i] = heapInsertArr[i]; } // heapBuild double[] yDataN = new double[size]; for (int i = 0; i < yDataN.length; i++) { yDataN[i] = heapBuildArr[i]; } // plot it XYChart chart = new XYChartBuilder().width(600).height(400).title("Building a Heap").xAxisTitle("Input Size n").yAxisTitle("Running Time T(n)").build(); chart.addSeries("Insertion-Build", xData, yDataB); chart.addSeries("Heapify-Build", xData, yDataN); BitmapEncoder.getBufferedImage(chart); // - // ## Do It Yourself // #### Practice - median finder // // Median is the middle value in an ordered integer list. If the size of the list is even, the median is the mean of the two middle value. // // Examples: // ``` // [2, 3, 4] -> the median is 3 // [2, 3] -> the median is (2 + 3) / 2 = 2.5 // ``` // // Design a data structure that supports the following two operations: // // * void addNum(int num) - Add a integer number from the data stream to the data structure. // * double findMedian() - Return the median of all elements so far. // // You can have as many helper methods as you want. The code skeleton is provided to you. // // *Hint: You may consider using PriorityQueue in Java, which is essentially a min heap* // + class MedianFinder { /** initialize your data structure here. */ // Adds a number into the data structure public void addNum(int num) { } // Returns the median of current data stream public double findMedian() { // remove this line return 0; } } // - // **When you finish (or not) playing your exploration of the whole interactive notebook and DIY assignment, you should download a html file and upload it to the assignment box on Canvas:** // // * File --> Download as --> HTML (.html) // // ![download](images/html.png)
chapter5-heap.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Summary # # Use [`python-fitbit`](http://python-fitbit.readthedocs.io/en/latest/#) to interact with the Fitbit API and get sleep and intraday activity data. Store as json files in data/raw. # %load_ext pypath_magic # %pypath -a /Users/rbussman/Projects/sleep-bit # + from src.data import get_fitbit import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns sns.set_context('poster') import pandas as pd import time # - daterange = pd.date_range('2017-03-30', '2017-08-10') # Intraday steps # fitbit limits API calls to 150 per hour rate_limit = 140 api_calls = 0 for date in daterange: fitbit_data = get_fitbit.FitbitData(date, 'intraday') sleep_log = fitbit_data.download_from_fitbit() fitbit_data.write_to_disk(sleep_log) api_calls += 1 if api_calls > rate_limit: print("We've exceeded the fitbit rate limit. Pausing for 1 hour.") time.sleep(3600) api_calls = 0 # Sleep logs # fitbit limits API calls to 150 per hour rate_limit = 140 api_calls = 0 for date in daterange: fitbit_data = get_fitbit.FitbitData(date, 'sleep') sleep_log = fitbit_data.download_from_fitbit() fitbit_data.write_to_disk(sleep_log) api_calls += 1 if api_calls > rate_limit: print("We've exceeded the fitbit rate limit. Pausing for 1 hour.") time.sleep(3600) api_calls = 0
notebooks/sbussmann_get-fitbit-data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %run ./setup_jupyter.ipynb df = spark.read.json("hdfs://namenode:8020/arxiv-metadata-oai-snapshot.json") from pyspark.sql import functions as F df_column_count = df.withColumn("wordcount", F.size(F.split(F.col("abstract")," "))) df_column_count.select("id", "wordcount") df_column_count.groupBy("wordcount").count().orderBy(F.col("count").desc()).limit(20) spark.stop()
jupyter-spark/work/Spark Word Count.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Deployment # # * create a deployment folder. # * create a simple app.py file. # * create a virtual environment # * conda create -n flaskdeploy flask (the flask means to install flask). # * Normally, we should now run 'pip install ....' as we did before. Or we may install according to a requirement.txt. Here we only install flask for a simplest application, and it is already done. # * Run the app.py locally and make sure it works. # * pip install gunicorn # * pip freeze > requirements.txt # * In the deployment folder, create a file called Procfile, and type 'web: gunicorn app:app' (no quote) and save the file. # * Type 'Heroku CLi' in Google and go to the page. First install Git and then Heroku CLI. See details below. # * Details about installation. # * I choose Atom for the default editor for git. # * Currently I just use the default choice when during installation. # * If need re-installation for full usage of Git, check first-time Git setup from web. # * Currently use this version of git for Heruko deployment purpose, and use Githup desktop for other services. # * After installing Git, install Heroku. # # * Create Heroku account: <EMAIL> with passwd AppleID + two underscores. # * Create an app once logged in Heroku with a specified name. # * Reopen Atom, go to the deployment folder, activate flaskdeploy (this may not matter). Then type 'heroku login'. Follow the steps and you will be logge in. # * Once logged in, stay in the deployment folder, type 'git init' # * Then type: `heroku git:remote -a my-flask-demo-app-lijun`, note the string my-flask-demo... is the app name I gave my app when creating a new app in Heroku. # * git add . # * git commit -am "First commit" # * git push heroku master # * copy the https://my-flask-demo-app-lijun.herokuapp.com/ from the terminal and check whether it works. # # **Comments:** # If I want to deploy the final big project to Heroku, then I need postgres for robust application. # # Accepting-Payments-with-Flask # * Usually we are not necessarily handle payments by ourself (even with the following stripe) if we want open some online stores. There are many online solutions for online stores. # ## Setting up # * activate myflaskenv # * pip install --upgrade Stripe # ### payme.py # + rom flask import Flask, render_template, request, redirect, url_for import stripe app = Flask(__name__) public_key = 'pk_test_6pRNASCoBOKtIshFeQd4XMUh' #This is the public key for working with fake credit card. Simiar for the api-key below stripe.api_key = "<KEY>" @app.route('/') def index(): return render_template('index.html', public_key=public_key) @app.route('/thankyou') def thankyou(): return render_template('thankyou.html') @app.route('/payment', methods=['POST']) def payment(): # CUSTOMER INFORMATION customer = stripe.Customer.create(email=request.form['stripeEmail'], source=request.form['stripeToken']) # CHARGE/PAYMENT INFORMATION charge = stripe.Charge.create( customer=customer.id, amount=1999, currency='usd', description='Donation' ) return redirect(url_for('thankyou')) if __name__ == '__main__': app.run(debug=True) # - # ### index.html # + <!DOCTYPE html> <html lang="en" dir="ltr"> <head> <meta charset="utf-8"> <title></title> <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.1.0/css/bootstrap.min.css" integrity="<KEY>" crossorigin="anonymous"> <script src="https://code.jquery.com/jquery-3.3.1.slim.min.js" integrity="<KEY>" crossorigin="anonymous"></script> <script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.0/umd/popper.min.js" integrity="<KEY>" crossorigin="anonymous"></script> <script src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.0/js/bootstrap.min.js" integrity="<KEY>" crossorigin="anonymous"></script> </head> <body> <div class="jumbotron"> <h1>Welcome to our Puppy Donation Page</h1> <h2>Would you like to donate to our adoption agency?</h2> {# THIS IS A TEST KEY. TO FIND YOUR OWN KEY, LOG IN TO YOUR STRIPE ACCOUNT THEN GO TO: https://stripe.com/docs/quickstart #} <form action="{{url_for('payment')}}" method="POST"> <script src="https://checkout.stripe.com/checkout.js" class="stripe-button" data-key={{public_key}} data-amount="1999" data-name="Puppy Adoption" data-description="Donation" data-image="https://images.unsplash.com/photo-1514984879728-be0aff75a6e8?ixlib=rb-0.3.5&ixid=eyJhcHBfaWQiOjEyMDd9&s=35dcb9bb6f3e097541dd732a99c44766&auto=format&fit=crop&w=1576&q=80" data-locale="auto" data-zip-code="true"> </script> </form> </div> </body> </html> # - # ### Thankyou.html <!DOCTYPE html> <html lang="en" dir="ltr"> <head> <meta charset="utf-8"> <title></title> <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.1.0/css/bootstrap.min.css" integrity="<KEY>" crossorigin="anonymous"> <script src="https://code.jquery.com/jquery-3.3.1.slim.min.js" integrity="<KEY>" crossorigin="anonymous"></script> <script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.0/umd/popper.min.js" integrity="<KEY>" crossorigin="anonymous"></script> <script src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.0/js/bootstrap.min.js" integrity="<KEY>" crossorigin="anonymous"></script> </head> <body> <div class="jumbotron"> <h1>Thank you for your donation!</h1> </div> </body> </html> # ## Company social blog # General structure of the project # ![companyBlog.png](attachment:companyBlog.png) # Below are models (tables) # ![models.png](attachment:models.png) # **Comments** # * Unlike the Section 11 about 'large Flask application', here we put all the html templates into one top-level templates folder, rather than in the sub-templates folder for each models. # * # ### .py files # **__init__.py # + import os from flask import Flask from flask_sqlalchemy import SQLAlchemy from flask_migrate import Migrate from flask_login import LoginManager app = Flask(__name__) ############################################################################# ############ CONFIGURATIONS (CAN BE SEPARATE CONFIG.PY FILE) ############### ########################################################################### # Remember you need to set your environment variables at the command line # when you deploy this to a real website. # export SECRET_KEY=mysecret # set SECRET_KEY=mysecret app.config['SECRET_KEY'] = 'mysecret' ################################# ### DATABASE SETUPS ############ ############################### basedir = os.path.abspath(os.path.dirname(__file__)) app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'data.sqlite') app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False db = SQLAlchemy(app) Migrate(app,db) ########################### #### LOGIN CONFIGS ####### ######################### login_manager = LoginManager() # We can now pass in our app to the login manager login_manager.init_app(app) # Tell users what view to go to when they need to login. login_manager.login_view = "users.login" ########################### #### BLUEPRINT CONFIGS ####### ######################### # Import these at the top if you want # We've imported them here for easy reference from puppycompanyblog.core.views import core from puppycompanyblog.users.views import users from puppycompanyblog.blog_posts.views import blog_posts from puppycompanyblog.error_pages.handlers import error_pages # Register the apps app.register_blueprint(users) app.register_blueprint(blog_posts) app.register_blueprint(core) app.register_blueprint(error_pages) # - # **models.py** # + from puppycompanyblog import db,login_manager from datetime import datetime from werkzeug.security import generate_password_hash,check_password_hash from flask_login import UserMixin # By inheriting the UserMixin we get access to a lot of built-in attributes # which we will be able to call in our views! # is_authenticated() # is_active() # is_anonymous() # get_id() # The user_loader decorator allows flask-login to load the current user # and grab their id. @login_manager.user_loader def load_user(user_id): return User.query.get(user_id) class User(db.Model, UserMixin): # Create a table in the db __tablename__ = 'users' id = db.Column(db.Integer, primary_key = True) profile_image = db.Column(db.String(20), nullable=False, default='default_profile.png') email = db.Column(db.String(64), unique=True, index=True) username = db.Column(db.String(64), unique=True, index=True) password_hash = db.Column(db.String(128)) # This connects BlogPosts to a User Author. posts = db.relationship('BlogPost', backref='author', lazy=True) #'BlogPost' is another model defined in the same file. #backref is for relation between a blogpost and a user. So it indicates the author of the BlogPost # is a user model. It is like an attribute call? def __init__(self, email, username, password): self.email = email self.username = username self.password_hash = generate_password_hash(password) def check_password(self,password): # https://stackoverflow.com/questions/23432478/flask-generate-password-hash-not-constant-output return check_password_hash(self.password_hash,password) def __repr__(self): return f"UserName: {self.username}" class BlogPost(db.Model): # Setup the relationship to the User table users = db.relationship(User) # Model for the Blog Posts on Website id = db.Column(db.Integer, primary_key=True) # Notice how we connect the BlogPost to a particular author user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False) date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow) title = db.Column(db.String(140), nullable=False) text = db.Column(db.Text, nullable=False) def __init__(self, title, text, user_id): self.title = title self.text = text self.user_id =user_id def __repr__(self): return f"Post Id: {self.id} --- Date: {self.date} --- Title: {self.title}" # - # **handlers.py** # + from flask import Blueprint,render_template error_pages = Blueprint('error_pages',__name__) @error_pages.app_errorhandler(404) def error_404(error): ''' Error for pages not found. ''' # Notice how we return a tuple! return render_template('error_pages/404.html'), 404 #The above returns a tuple. This is because we use the '@error_pages.app_errorhandler(404)' #which is not the usual routing. # Also 'error_pages/404.html' means that we have a subfolder under the top-level templates folder. # Note in the base.html, when we access folder, we use '.'. Check it out whether my understanding is correct. @error_pages.app_errorhandler(403) def error_403(error): ''' Error for trying to access something which is forbidden. Such as trying to update someone else's blog post. ''' # Notice how we return a tuple! return render_template('error_pages/403.html'), 403 # - # **forms.py** # + # Form Based Imports from flask_wtf import FlaskForm from wtforms import StringField, PasswordField, SubmitField from wtforms.validators import DataRequired,Email,EqualTo from wtforms import ValidationError from flask_wtf.file import FileField, FileAllowed # User Based Imports from flask_login import current_user from puppycompanyblog.models import User class LoginForm(FlaskForm): email = StringField('Email', validators=[DataRequired(), Email()]) password = PasswordField('Password', validators=[DataRequired()]) submit = SubmitField('Log In') class RegistrationForm(FlaskForm): email = StringField('Email', validators=[DataRequired(),Email()]) username = StringField('Username', validators=[DataRequired()]) password = PasswordField('Password', validators=[DataRequired(), EqualTo('pass_confirm', message='Passwords Must Match!')]) pass_confirm = PasswordField('<PASSWORD> password', validators=[DataRequired()]) submit = SubmitField('Register!') def validate_email(self, field): # Check if not None for that user email! if User.query.filter_by(email=field.data).first(): raise ValidationError('Your email has been registered already!') def validate_username(self, field): # Check if not None for that username! if User.query.filter_by(username=field.data).first(): raise ValidationError('Sorry, that username is taken!') class UpdateUserForm(FlaskForm): email = StringField('Email', validators=[DataRequired(),Email()]) username = StringField('Username', validators=[DataRequired()]) picture = FileField('Update Profile Picture', validators=[FileAllowed(['jpg', 'png'])]) submit = SubmitField('Update') def validate_email(self, field): # Check if not None for that user email! if User.query.filter_by(email=field.data).first(): raise ValidationError('Your email has been registered already!') def validate_username(self, field): # Check if not None for that username! if User.query.filter_by(username=field.data).first(): raise ValidationError('Sorry, that username is taken!') # - # **picture_handler.py** # + import os # pip install pillow from PIL import Image from flask import url_for, current_app def add_profile_pic(pic_upload,username): filename = pic_upload.filename # Grab extension type .jpg or .png ext_type = filename.split('.')[-1] storage_filename = str(username) + '.' +ext_type filepath = os.path.join(current_app.root_path, 'static\profile_pics', storage_filename) # Play Around with this size. output_size = (200, 200) # Open the picture and save it pic = Image.open(pic_upload) pic.thumbnail(output_size) pic.save(filepath) return storage_filename # - # **views.py** for users # + from flask import render_template, url_for, flash, redirect, request, Blueprint from flask_login import login_user, current_user, logout_user, login_required from puppycompanyblog import db from werkzeug.security import generate_password_hash,check_password_hash from puppycompanyblog.models import User, BlogPost from puppycompanyblog.users.forms import RegistrationForm, LoginForm, UpdateUserForm from puppycompanyblog.users.picture_handler import add_profile_pic users = Blueprint('users', __name__) @users.route('/register', methods=['GET', 'POST']) def register(): form = RegistrationForm() if form.validate_on_submit(): user = User(email=form.email.data, username=form.username.data, password=form.password.data) db.session.add(user) db.session.commit() flash('Thanks for registering! Now you can login!') return redirect(url_for('users.login')) return render_template('register.html', form=form) @users.route('/login', methods=['GET', 'POST']) def login(): form = LoginForm() if form.validate_on_submit(): # Grab the user from our User Models table user = User.query.filter_by(email=form.email.data).first() # Check that the user was supplied and the password is right # The verify_password method comes from the User object # https://stackoverflow.com/questions/2209755/python-operation-vs-is-not if user.check_password(form.password.data) and user is not None: #Log in the user login_user(user) flash('Logged in successfully.') # If a user was trying to visit a page that requires a login # flask saves that URL as 'next'. next = request.args.get('next') # So let's now check if that next exists, otherwise we'll go to # the welcome page. if next == None or not next[0]=='/': next = url_for('core.index') return redirect(next) return render_template('login.html', form=form) @users.route("/logout") def logout(): logout_user() return redirect(url_for('core.index')) #We cannot use 'index' directly as we are using blueprint @users.route("/account", methods=['GET', 'POST']) @login_required def account(): form = UpdateUserForm() if form.validate_on_submit(): print(form) if form.picture.data: username = current_user.username pic = add_profile_pic(form.picture.data,username) current_user.profile_image = pic current_user.username = form.username.data current_user.email = form.email.data db.session.commit() flash('User Account Updated') return redirect(url_for('users.account')) elif request.method == 'GET': form.username.data = current_user.username form.email.data = current_user.email profile_image = url_for('static', filename='profile_pics/' + current_user.profile_image) return render_template('account.html', profile_image=profile_image, form=form) @users.route("/<username>") def user_posts(username): page = request.args.get('page', 1, type=int) user = User.query.filter_by(username=username).first_or_404() blog_posts = BlogPost.query.filter_by(author=user).order_by(BlogPost.date.desc()).paginate(page=page, per_page=5) return render_template('user_blog_posts.html', blog_posts=blog_posts, user=user) # - # ### base.html # Be aware the core.index in the following sentence. # <a href="{{url_for('core.index')}}">Home</a> <!DOCTYPE html> <html lang="en" dir="ltr"> <head> <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css" integrity="<KEY>" crossorigin="anonymous"> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="<KEY>" crossorigin="anonymous"></script> <script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js" integrity="<KEY>" crossorigin="anonymous"></script> <script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js" integrity="<KEY>" crossorigin="anonymous"></script> <meta charset="utf-8"> <title></title> </head> <body> <ul class='nav'> <li class='nav-link'> <a href="{{url_for('core.index')}}">Home</a> </li> <li class='nav-link'> <a href="{{url_for('core.info')}}">About</a> </li> {% if current_user.is_authenticated %} <li class='nav-link'> <a href="{{url_for('users.logout')}}">Log Out</a> </li> <li class='nav-link'> <a href="{{url_for('users.account')}}">Account</a> </li> <li class='nav-link'> <a href="#">Create Post</a> </li> {% else %} <li class='nav-link'> <a href="{{url_for('users.login')}}">Log In</a> </li> <li class='nav-link'> <a href="{{url_for('users.register')}}">Register</a> </li> {% endif %} </ul> <div class="container"> {% block content %} {% endblock %} </div> </body> </html> # **create_post.html** # Note we can always modify the style of forms with CSS or Bootstrap. For example, within the () of form.email() add extra stuff. {% extends "base.html" %} {% block content %} <form method="POST"> {# This hidden_tag is a CSRF security feature. #} {{ form.hidden_tag() }} {{ form.email.label }} {{ form.email() }}<br> {{ form.username.label }} {{ form.username() }}<br> {{ form.password.label }} {{ form.password() }}<br> {{ form.pass_confirm.label }} {{ form.pass_confirm() }}<br> {{ form.submit() }} </form> {% endblock %}
programming/python/Flask/social blog project _ deployment _ accepting payments Stripe _.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/bkkaggle/pytorch-CycleGAN-and-pix2pix/blob/master/CycleGAN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" id="5VIGyIus8Vr7" # Take a look at the [repository](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix) for more information # + [markdown] colab_type="text" id="7wNjDKdQy35h" # # Install # + colab={} colab_type="code" id="TRm-USlsHgEV" # !git clone htt ps://github.com/junyanz/pytorch-CycleGAN-and-pix2pix # + colab={} colab_type="code" id="Pt3igws3eiVp" import os os.chdir('pytorch-CycleGAN-and-pix2pix/') # + colab={} colab_type="code" id="z1EySlOXwwoa" # !pip install -r requirements.txt # + [markdown] colab_type="text" id="8daqlgVhw29P" # # Datasets # # Download one of the official datasets with: # # - `bash ./datasets/download_cyclegan_dataset.sh [apple2orange, orange2apple, summer2winter_yosemite, winter2summer_yosemite, horse2zebra, zebra2horse, monet2photo, style_monet, style_cezanne, style_ukiyoe, style_vangogh, sat2map, map2sat, cityscapes_photo2label, cityscapes_label2photo, facades_photo2label, facades_label2photo, iphone2dslr_flower]` # # Or use your own dataset by creating the appropriate folders and adding in the images. # # - Create a dataset folder under `/dataset` for your dataset. # - Create subfolders `testA`, `testB`, `trainA`, and `trainB` under your dataset's folder. Place any images you want to transform from a to b (cat2dog) in the `testA` folder, images you want to transform from b to a (dog2cat) in the `testB` folder, and do the same for the `trainA` and `trainB` folders. # + colab={} colab_type="code" id="vrdOettJxaCc" # !bash ./datasets/download_cyclegan_dataset.sh monet2photo # + [markdown] colab_type="text" id="gdUz4116xhpm" # # Pretrained models # # Download one of the official pretrained models with: # # - `bash ./scripts/download_cyclegan_model.sh [apple2orange, orange2apple, summer2winter_yosemite, winter2summer_yosemite, horse2zebra, zebra2horse, monet2photo, style_monet, style_cezanne, style_ukiyoe, style_vangogh, sat2map, map2sat, cityscapes_photo2label, cityscapes_label2photo, facades_photo2label, facades_label2photo, iphone2dslr_flower]` # # Or add your own pretrained model to `./checkpoints/{NAME}_pretrained/latest_net_G.pt` # + colab={} colab_type="code" id="B75UqtKhxznS" # !bash ./scripts/download_cyclegan_model.sh horse2zebra # + [markdown] colab_type="text" id="yFw1kDQBx3LN" # # Training # # - `python train.py --dataroot ./datasets/horse2zebra --name horse2zebra --model cycle_gan` # # Change the `--dataroot` and `--name` to your own dataset's path and model's name. Use `--gpu_ids 0,1,..` to train on multiple GPUs and `--batch_size` to change the batch size. I've found that a batch size of 16 fits onto 4 V100s and can finish training an epoch in ~90s. # # Once your model has trained, copy over the last checkpoint to a format that the testing model can automatically detect: # # Use `cp ./checkpoints/horse2zebra/latest_net_G_A.pth ./checkpoints/horse2zebra/latest_net_G.pth` if you want to transform images from class A to class B and `cp ./checkpoints/horse2zebra/latest_net_G_B.pth ./checkpoints/horse2zebra/latest_net_G.pth` if you want to transform images from class B to class A. # # + colab={} colab_type="code" id="0sp7TCT2x9dB" # !python train.py --dataroot ./datasets/m2f2h --name m2f2h --model develop --no_dropout # + colab_type="text" id="9UkcaFZiyASl" active="" # # Testing # # - `python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout` # # Change the `--dataroot` and `--name` to be consistent with your trained model's configuration. # # > from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix: # > The option --model test is used for generating results of CycleGAN only for one side. This option will automatically set --dataset_mode single, which only loads the images from one set. On the contrary, using --model cycle_gan requires loading and generating results in both directions, which is sometimes unnecessary. The results will be saved at ./results/. Use --results_dir {directory_path_to_save_result} to specify the results directory. # # > For your own experiments, you might want to specify --netG, --norm, --no_dropout to match the generator architecture of the trained model. # + [markdown] colab={} colab_type="code" id="uCsKkEq0yGh0" # # # !python train.py --dataroot ./datasets/vangogh2photo/testA --name vangogh2photo --model develop --no_dropout # + colab={} colab_type="code" id="9Mgg8raPyizq" import matplotlib.pyplot as plt img = plt.imread('./results/horse2zebra_pretrained/test_latest/images/n02381460_1010_fake.png') plt.imshow(img) # - # !python test.py --dataroot ./datasets/m2f2h --name m2f2h --model develop --no_dropout # + colab={} colab_type="code" id="0G3oVH9DyqLQ" import matplotlib.pyplot as plt img = plt.imread('./results/m2f2h/test_latest/images/002423_fake_C.png') plt.imshow(img) # - img = plt.imread('./results/m2f2h/test_latest/images/002423_real_A.png') plt.imshow(img)
CycleGAN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 爬取今天到目前為止的所有文章 # # https://www.ptt.cc/bbs/Gossiping/index.html # + import requests import re import json from bs4 import BeautifulSoup, NavigableString from datetime import datetime from pprint import pprint from urllib.parse import urljoin # - base_url = 'https://www.ptt.cc/bbs/Gossiping/index.html' ptt_today = datetime.now() ptt_today_str = ptt_today.strftime('%m/%d') print(ptt_today_str) # ## 取得總頁碼 # # 從 html 上一頁的按鈕中取得 n-1 page 的頁碼,在將該頁碼加一就是總頁碼了 resp_base = requests.get(base_url, cookies={'over18': '1'}) assert resp_base.status_code == 200 soup_base = BeautifulSoup(resp_base.text, 'lxml') paging_tag = soup_base.find(class_='btn-group-paging') total_page = None for btn_tag in paging_tag.findAll('a'): if btn_tag.text == '‹ 上頁': compile_page = re.search('(\d+)', btn_tag['href']) if compile_page: total_page = int(compile_page.group(0)) + 1 print('total page =', total_page) # ## 往回檢查日期並爬取文章 # # 最舊的文章頁面,頁碼為 1 def crawl_article(url): resp = requests.get(url, cookies={'over18': '1'}) if resp.status_code != 200: return soup = BeautifulSoup(resp.text, 'lxml') print('Start to Crawling', url) # ############################## # crawl article # ############################## article = { 'author_id': '', 'author_nickname': '', 'title': '', 'timestamp': '', 'contents': '', 'ip': '' } article_body = soup.find(id='main-content') # article header article_head = article_body.findAll('div', class_='article-metaline') for metaline in article_head: meta_tag = metaline.find(class_='article-meta-tag').text meta_value = metaline.find(class_='article-meta-value').text if meta_tag == '作者': compile_nickname = re.compile('\((.*)\)').search(meta_value) article['author_id'] = meta_value.split('(')[0].strip(' ') article['author_nickname'] = compile_nickname.group(1) if compile_nickname else '' elif meta_tag == '標題': article['title'] = meta_value elif meta_tag == '時間': article['timestamp'] = meta_value # article content contents = [expr for expr in article_body.contents if isinstance(expr, NavigableString)] contents = [re.sub('\n', '', expr) for expr in contents] contents = [i for i in contents if i] contents = '\n'.join(contents) article['contents'] = contents # article publish ip article_ip = article_body.find(class_='f2').text compile_ip = re.compile('[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}').search(article_ip) article['ip'] = compile_ip.group(0) if compile_ip else '' # ############################## # crawl comments # ############################## comments = [] for comment in article_body.findAll('div', class_='push'): tag = comment.find(class_='push-tag').text guest_id = comment.find(class_='push-userid').text guest_content = comment.find(class_='push-content').text guest_ipdatetime = comment.find(class_='push-ipdatetime').text compile_ip = re.compile('[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}').search(guest_ipdatetime) guest_ip = compile_ip.group(0) if compile_ip else '' guest_timestamp = re.sub(guest_ip, '', guest_ipdatetime).strip() comments.append({ 'tag': tag, 'id': guest_id, 'content': guest_content, 'ip': guest_ip, 'timestamp': guest_timestamp }) article['comments'] = comments article['url'] = url return article # + DATE_GRATER=1 DATE_EQUAL=0 DATE_LESS=-1 def compare_timestamp_md(src, dest): """ greater: 1 equal: 0 less: -1 """ date_src = datetime.strptime(src, '%m/%d') date_dest = datetime.strptime(dest, '%m/%d') if date_dest > date_src: return 1 elif date_dest == date_src: return 0 else: return -1 # + data = [] for page in range(total_page, 1, -1): current_url = 'https://www.ptt.cc/bbs/Gossiping/index{}.html'.format(page) resp_page = requests.get(current_url, cookies={'over18': '1'}) if resp_page.status_code != 200: continue soup_page = BeautifulSoup(resp_page.text, 'lxml') # ############################## # check the first article date # ############################## container_tag = soup_page.find('div', class_='r-list-container') first_article = container_tag.find('div', class_='r-ent') first_article_date = first_article.find('div', class_='date').text.strip() compare_datetime = compare_timestamp_md(ptt_today_str, first_article_date) print('{} - date {} result {}'.format(current_url, first_article_date, compare_datetime)) if compare_datetime == 1: continue else: # only crawling today's article before r-list-sep line for article_row_tag in container_tag.findChildren('div', recursive=False): if 'r-list-sep' in article_row_tag['class']: break if 'r-ent' in article_row_tag['class']: article_date = article_row_tag.find('div', class_='date').text.strip() article_date_compare = compare_timestamp_md(ptt_today_str, article_date) if article_date_compare != 0: continue article_tag = article_row_tag.find('a', href=True) article_url = urljoin(base_url, article_tag['href']) article_data = crawl_article(article_url) data.append(article_data) # if the first article date is earlier than current date, should break the iteration if compare_datetime == -1: break with open('today_articles.json', 'w+', encoding='utf-8') as f: json.dump(data, f, indent=2, ensure_ascii=False) print('Save - today_articles.json')
appendix_ptt/02_today_articles.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Unit4-NLP (Python3) # language: python # name: unit4-nlp # --- # Lambda School Data Science # # *Unit 4, Sprint 1, Module 3* # # --- # # Document Classification (Assignment) # # This notebook is for you to practice skills during lecture. # # Today's guided module project and assignment will be different. You already know how to do classification. You ready know how to extract features from documents. So? That means you're ready to combine and practice those skills in a kaggle competition. We we will open with a five minute sprint explaining the competition, and then give you 25 minutes to work. After those twenty five minutes are up, I will give a 5-minute demo an NLP technique that will help you with document classification (*and **maybe** the competition*). # # Today's all about having fun and practicing your skills. # # ## Sections # * <a href="#p1">Part 1</a>: Text Feature Extraction & Classification Pipelines # * <a href="#p2">Part 2</a>: Latent Semantic Indexing # * <a href="#p3">Part 3</a>: Word Embeddings with Spacy # * <a href="#p4">Part 4</a>: Post Lecture Assignment # # Text Feature Extraction & Classification Pipelines (Learn) # <a id="p1"></a> # + [markdown] toc-hr-collapsed=true # ## Follow Along # # What you should be doing now: # 1. Join the Kaggle Competition # 2. Download the data # 3. Train a model (try using the pipe method I just demoed) # - # ### Load Competition Data # + import pandas as pd # You may need to change the path train = pd.read_csv('./whiskey-reviews-dspt4/train.csv') test = pd.read_csv('./whiskey-reviews-dspt4/test.csv') print(train.shape, test.shape) # - train.head() # Distribution of ratingCategory: 0 (Excellent), 1 (Good), 2 (Poor) train.ratingCategory.value_counts() # Read a few reviews from the "Excellent" category pd.set_option('display.max_colwidth', 0) train[train.ratingCategory == 0].sample(3) # Read a few reviews from the "Poor" category train[train.ratingCategory == 2].sample(3) # ### Split the Training Set into Train/Validation # + from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(train['description'], train['ratingCategory'], test_size=0.2, stratify=train['ratingCategory'], random_state=42) print(X_train.shape, X_test.shape, y_train.shape, y_test.shape) # - # ### Define Pipeline Components # + vect = ... clf = ... pipe = Pipeline([('vect', vect), ('clf', clf)]) # - # ### Define Your Search Space # You're looking for both the best hyperparameters of your vectorizer and your classification model. # + parameters = { 'vect__max_df': (0.75, 1.0), 'clf__max_depth':(5,10,15,20) } grid_search = GridSearchCV(pipe, parameters, cv=5, n_jobs=4, verbose=1) grid_search.fit(..., ...) # - # ### Make a Submission File # *Note:* In a typical Kaggle competition, you are only allowed two submissions a day, so you only submit if you feel you cannot achieve higher test accuracy. For this competition the max daily submissions are capped at **20**. Submit for each demo and for your assignment. # Predictions on test sample pred = grid_search.predict(test['description']) submission = pd.DataFrame({'id': test['id'], 'ratingCategory':pred}) submission['ratingCategory'] = submission['ratingCategory'].astype('int64') # Make Sure the Category is an Integer submission.head() subNumber = 0 # + # Save your Submission File # Best to Use an Integer or Timestamp for different versions of your model submission.to_csv(f'./whiskey-reviews-dspt4/submission{subNumber}.csv', index=False) subNumber += 1 # - # ## Challenge # # You're trying to achieve a minimum of 70% Accuracy on your model. # ## Latent Semantic Indexing (Learn) # <a id="p2"></a> # + [markdown] toc-hr-collapsed=true # ## Follow Along # 1. Join the Kaggle Competition # 2. Download the data # 3. Train a model & try: # - Creating a Text Extraction & Classification Pipeline # - Tune the pipeline with a `GridSearchCV` or `RandomizedSearchCV` # - Add some Latent Semantic Indexing (lsi) into your pipeline. *Note:* You can grid search a nested pipeline, but you have to use double underscores ie `lsi__svd__n_components` # 4. Make a submission to Kaggle # # - # ### Define Pipeline Components # + lsi = ... vect = ... clf = ... pipe = Pipeline([('lsi', lsi), ('clf', clf)]) # - # ### Define Your Search Space # You're looking for both the best hyperparameters of your vectorizer and your classification model. # + parameters = { 'lsi__svd__n_components': [10,100,250], 'vect__max_df': (0.75, 1.0), 'clf__max_depth':(5,10,15,20) } grid_search = GridSearchCV(pipe,parameters, cv=5, n_jobs=4, verbose=1) grid_search.fit(..., ...) # - # ### Make a Submission File # Predictions on test sample pred = grid_search.predict(test['description']) submission = pd.DataFrame({'id': test['id'], 'ratingCategory':pred}) submission['ratingCategory'] = submission['ratingCategory'].astype('int64') # Make Sure the Category is an Integer submission.head() # + # Save your Submission File # Best to Use an Integer or Timestamp for different versions of your model submission.to_csv(f'./whiskey-reviews-dspt4/submission{subNumber}.csv', index=False) subNumber += 1 # - # ## Challenge # # Continue to apply Latent Semantic Indexing (LSI) to various datasets. # # Word Embeddings with Spacy (Learn) # <a id="p3"></a> # ## Follow Along # + # Apply to your Dataset from sklearn.model_selection import RandomizedSearchCV from sklearn.ensemble import GradientBoostingClassifier from scipy.stats import randint param_dist = { 'max_depth' : randint(3,10), 'min_samples_leaf': randint(2,15) } # + # Continue Word Embedding Work Here # - # ### Make a Submission File # Predictions on test sample pred = ...predict(test['description']) submission = pd.DataFrame({'id': test['id'], 'ratingCategory':pred}) submission['ratingCategory'] = submission['ratingCategory'].astype('int64') # Make Sure the Category is an Integer submission.head() # Save your Submission File # Best to Use an Integer or Timestamp for different versions of your model submission.to_csv(f'./whiskey-reviews-dspt4/submission{subNumber}.csv', index=False) subNumber += 1 # ## Challenge # # What you should be doing now: # 1. Join the Kaggle Competition # 2. Download the data # 3. Train a model & try: # - Creating a Text Extraction & Classification Pipeline # - Tune the pipeline with a `GridSearchCV` or `RandomizedSearchCV` # - Add some Latent Semantic Indexing (lsi) into your pipeline. *Note:* You can grid search a nested pipeline, but you have to use double underscores ie `lsi__svd__n_components` # - Try to extract word embeddings with Spacy and use those embeddings as your features for a classification model. # 4. Make a submission to Kaggle # # Post Lecture Assignment # <a id="p4"></a> # # Your primary assignment this afternoon is to achieve a minimum of 70% accuracy on the Kaggle competition. Once you have achieved 70% accuracy, please work on the following: # # 1. Research "Sentiment Analysis". Provide answers in markdown to the following questions: # - What is "Sentiment Analysis"? # - Is Document Classification different than "Sentiment Analysis"? Provide evidence for your response # - How do create labeled sentiment data? Are those labels really sentiment? # - What are common applications of sentiment analysis? # 2. Research our why word embeddings worked better for the lecture notebook than on the whiskey competition. # - This [text classification documentation](https://developers.google.com/machine-learning/guides/text-classification/step-2-5) from Google might be of interest # - Neural Networks are becoming more popular for document classification. Why is that the case?
module3-document-classification/Raw_Assignment_LSDS413.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/realtechz/Tensorflow_zero_to_mastery/blob/main/01_TF_regression/01_NN_regression_with_TF.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="1tjHecghiS1w" #01_TF_regression/ # + [markdown] id="naDY3zAkWW6b" # # Introduction to Regression with Neural Networks in Tensorflow # # There are many defintions for a regression problem but in our case, we are going to simplify it: Predicting a numercial variable base on some other combination of variables , even shorter.. predicting a number. # + colab={"base_uri": "https://localhost:8080/"} id="82gotfb1dpwy" outputId="384de7a3-9347-4f6d-d76f-a25e4107c0e9" # Import Tensorflow import tensorflow as tf print(tf.__version__) # + [markdown] id="vZrelmSReKxv" # ## Creating data to view and fit # + colab={"base_uri": "https://localhost:8080/", "height": 0} id="R5PkFhujmYEA" outputId="9714ce70-c2bc-48d5-cd43-e28233898a8e" import numpy as np import matplotlib.pyplot as plt # create the features X = np.array([-7.0, -4.0, -1.0, 2.0, 5.0, 8.0, 11.0, 14.0]) # create labels y= np.array([3.0, 6.0, 9.0, 12.0,15.0, 18.0, 21.0, 24.0]) # Visualize it plt.scatter(X, y) # + id="0yMvgBtHm9BV" colab={"base_uri": "https://localhost:8080/"} outputId="26c7265b-ce35-4afb-ee95-302d210f11d8" y == X + 10 # + [markdown] id="3NsDYe0srA0M" # ## Input and output shapes # # + colab={"base_uri": "https://localhost:8080/"} id="Znd9EODJsc-i" outputId="519064d7-6772-4865-e24d-61976fa1e8a4" # Create a demo tensor for our housing price prediction problem house_info = tf.constant(["bedroom", "bathroom", "garage"]) house_price = tf.constant([939700]) house_info,house_price # + colab={"base_uri": "https://localhost:8080/"} id="EVkXiQCOtYBM" outputId="bc818a8d-d864-42e4-ff1a-6cfdc2c66b14" X[0], y[0] # + colab={"base_uri": "https://localhost:8080/"} id="639b7Fs6tYGH" outputId="9782ec42-de58-4501-e743-e9edb7b47bce" X[1], y[1] # + colab={"base_uri": "https://localhost:8080/"} id="_Wti38kks6t0" outputId="7d6358e4-d511-46b7-bcf0-e605217ce1a2" input_shape = X[0].shape output_shape = y[0].shape input_shape, output_shape # + colab={"base_uri": "https://localhost:8080/"} id="RMIufM0ztQb9" outputId="b6ac37cc-a0d5-4c99-ed4e-c6ba3ff40c74" X[0].ndim # + [markdown] id="CvYyM36atoqS" # in simple words we are going to use 1 x to predict 1 y # # + colab={"base_uri": "https://localhost:8080/"} id="taOHR8LkuCZd" outputId="d41e1a2f-7a2d-434a-e390-96e062505357" #Turn our NumPy arrays into tensors X = tf.constant(X) y = tf.constant(y) X, y # + colab={"base_uri": "https://localhost:8080/"} id="k6O5lQH0uPWg" outputId="332f0590-6728-4ac6-ad3f-eb2a2bbcea9f" input_shape = X[0].shape output_shape = y[0].shape input_shape, output_shape # + colab={"base_uri": "https://localhost:8080/", "height": 0} id="s35W0IRBuVXX" outputId="8151fab5-2ce8-4d59-f6b6-e01c2e5252b5" plt.scatter(X, y) # + [markdown] id="-cpIi3r9ucIK" # ## Steps in modelling with tensorflow # # 1. **Creating a model** - define the input and output layers, as well as the hiddem layers of a deep learning mode. # 2. **Compiling a model** - define the loss function( in other words, the function which tells our model how wrong it is) and the optimizer (tells our model how to improve the patterns) and evaluation metrics( what we can use to interpret the performance of our model). # 3.**Fitting a model** - letting the model try to find patterns between X and Y (features and labels). # + colab={"base_uri": "https://localhost:8080/"} id="dCmxCxhcvjyM" outputId="1b7de245-f113-46a8-964f-d2327180aa64" # Set random seed tf.random.set_seed(42) # 1. Create a model using the Sequential API model = tf.keras.Sequential( [ tf.keras.layers.Dense(1) ]) # 2. compilethe model model.compile( loss = tf.keras.losses.mae, optimizer = tf.keras.optimizers.SGD(), metrics = ["mae"]) # mae is short for mean absolute error) # 3. Fit the model model.fit(X, y, epochs= 5) # + colab={"base_uri": "https://localhost:8080/"} id="oRMCC0b_yf0-" outputId="68c97227-bcf3-409d-9ffd-9904208d77d2" # check out X and y X, y # + colab={"base_uri": "https://localhost:8080/"} id="bYAR-IkmbsJJ" outputId="f626a9c9-54d8-412b-c625-767aefd5ab9e" # Try to make a prediction using our model y_pred = model.predict([17.0]) y_pred # + colab={"base_uri": "https://localhost:8080/"} id="2Hooc9qPbwCA" outputId="5e3ccfbc-c149-47d5-81bf-97720c0570a4" y_pred + 11 # + [markdown] id="ofgKfJKlci2P" # ## Improving our model # # we can improve our model by altering the steps we took to create a model. # # 1. **Creating a model** - here we might can add more layers, increase the number of hidden units # ( also called neurons) within each of the hidden layers, change the activation function of each layer. # 2. **Compiling the model**- here we might change the optimization function or prehaps the **learning rate** of the optimization function. # 3.**Fitting a model** - here we might fit a model for more **epochs** ( leave it training for longer ) or on more data. # + colab={"base_uri": "https://localhost:8080/"} id="d4U8A5DEcs0N" outputId="b67ee169-6941-4c10-c559-3a4fd383c888" # Lets rebuild our model tf.random.set_seed(42) # 1. create a model model = tf.keras.Sequential([ tf.keras.layers.Dense(1) ]) # 2. Compile the model model.compile(loss = tf.keras.losses.mae, optimizer = tf.keras.optimizers.SGD(), metrics = ["mae"]) # fit the model (this time we will train it for longer) model.fit(X, y, epochs = 100) # + colab={"base_uri": "https://localhost:8080/"} id="IVYHImjtjkby" outputId="937f3be1-8f25-4658-9a23-d5f32a78a0bb" # Remind ourselves of the data X, y # + colab={"base_uri": "https://localhost:8080/"} id="zsgB-8Hij6eJ" outputId="03c693b4-7d0c-4607-8df4-2cd6d7fe67b8" # Lets see if our model's prediction has improved model.predict([17.0]) # + colab={"base_uri": "https://localhost:8080/"} id="dLHFYvRBkBl8" outputId="923886fa-68c0-4f9e-9288-f66a0f0a7cfd" # lets see what we can do to improve it futher # 1. create the model ( this time with an extra hidden layer with 100 hidden units ) model = tf.keras.Sequential([ tf.keras.layers.Dense(100, activation = "relu"), tf.keras.layers.Dense(1) ]) # 2 . compiling the model model.compile(optimizer = tf.keras.optimizers.SGD(), loss = tf.keras.losses.mae, metrics = ["mae"]) # 3. fitting the model model.fit(X, y, epochs = 100) # + colab={"base_uri": "https://localhost:8080/"} id="5wioYIP7mEsA" outputId="e3071d2a-1e31-4fac-91d5-4c427dd82aff" model.predict([17.0]) # + colab={"base_uri": "https://localhost:8080/"} id="WA-zrhjMnX_f" outputId="db8d51e7-e41c-4e76-fdd5-feaff5df0fa6" # lets see what we can do to improve it futher # 1. create the model ( this time with an extra hidden layer with 100 hidden units ) model = tf.keras.Sequential([ tf.keras.layers.Dense(50, activation = None), tf.keras.layers.Dense(1) ]) # 2 . compiling the model model.compile(optimizer = tf.keras.optimizers.Adam(learning_rate= 0.01), loss = tf.keras.losses.mae, metrics = ["mae"]) # 3. fitting the model model.fit(X, y, epochs = 100) # + colab={"base_uri": "https://localhost:8080/"} id="fj_IU1RxfVeO" outputId="3087da8f-2908-4443-dabf-82cda245e0f5" model.predict([17.0]) # + [markdown] id="40Np6tk1fgfi" # ## Evaluating a model # # In practice, a typical workflow you will go through when building neural network is: # # build the model -> fit it -> evaluate it and start again # + [markdown] id="Z5SEHJAWgdjF" # when it comes to evaluation ... there are 3 words you should memorize: # # > "Visualize, visualize, visualize" # # Its a good idea to visualize: # * the data - what data are we working with? what does it look like? # * the model itself - what does our model look like? # * The training of a model - how does a model perform while it learns? # * the predictions of the model - how do he predictions of a model lin eup against the ground truth ( the original labels) ? # # + colab={"base_uri": "https://localhost:8080/"} id="5-3jVGaWibt8" outputId="c8291bb8-604a-4070-ce22-7fd579b9857d" # make a bigger dataset X = tf.range( -100, 100 , 4) X # + colab={"base_uri": "https://localhost:8080/"} id="hmjsODCTijcu" outputId="461704cc-cff1-4264-f2ce-16c095c2bf05" # make labels for the dataset y = X + 10 y # + colab={"base_uri": "https://localhost:8080/", "height": 283} id="6UzyyUnKip2Z" outputId="4d71e06a-119d-41dd-cc5b-da616abe7eed" plt.scatter(X,y) # + [markdown] id="o1rvADHdivGB" # ### The 3 sets... # # * **train set**- 70-80% of the data # * **Validation set** - 10-15% of the data # * **test set** - 10-15 % of the data # + colab={"base_uri": "https://localhost:8080/"} id="mYPjPfcijqnO" outputId="b5eb9d6e-ea6a-4206-fdea-a8289e7c293f" len(X) # + id="t573wrEcj0co" # split the data into train and test sets X_train = X[:40] y_train = X[:40] X_test = X[40:] y_test= y[40:] # + colab={"base_uri": "https://localhost:8080/"} id="q_BhJ2n-kVBP" outputId="cf72c816-d703-428e-e4bd-3e8d0b869ee0" len(X_train), len(X_test) # + [markdown] id="0hAH2eqxkqdV" # ### Visualizing the data # + colab={"base_uri": "https://localhost:8080/", "height": 428} id="ycxe-bHokWPm" outputId="22776691-3424-438b-a183-ed9bec54b8bd" X = tf.range( -100, 100 , 4) y = X + 10 X_train = X[:40] y_train = y[:40] X_test = X[40:] y_test= y[40:] plt.figure(figsize=(10, 7)) # Plot training data in blue plt.scatter(X_train, y_train, c='b', label='Training data') # Plot test data in green plt.scatter(X_test, y_test, c='g', label='Testing data') # Show the legend plt.legend(); # + colab={"base_uri": "https://localhost:8080/", "height": 283} id="gnkns3zIkWXm" outputId="8ed2e477-f172-4fa5-b330-3fe46e124f7f" plt.scatter(X_test,y_test) # + id="RwuzTSYwkWYU" # lets see what we can do to improve it futher # 1. create the model ( this time with an extra hidden layer with 100 hidden units ) model = tf.keras.Sequential([ tf.keras.layers.Dense(1) ]) # 2 . compiling the model model.compile(optimizer = tf.keras.optimizers.SGD(), loss = tf.keras.losses.mae, metrics = ["mae"]) # 3. fitting the model #model.fit(X_train, y_train, epochs = 100) # + [markdown] id="08hgQvndkWY9" # ### visualizing a model # + id="8PYKZAJpkWZn" #model.summary() #will get a error if the model is not run at least once or inputshape is not given # + colab={"base_uri": "https://localhost:8080/"} id="pG7qtDKnkWaS" outputId="71099727-8834-4ae7-a7b0-6c4272f328dd" tf.random.set_seed(42) model = tf.keras.Sequential([ tf.keras.layers.Dense(10 , input_shape = [1], name = "Inpur_layer"), tf.keras.layers.Dense(1, name = "Output_layer") ], name = "my_model") model.compile(optimizer = tf.keras.optimizers.SGD(learning_rate= 0.01), loss = tf.keras.losses.mae, metrics = ["mae"]) model.fit(X_train,y_train, verbose = 0,epochs = 100) # + colab={"base_uri": "https://localhost:8080/"} id="AaV7erIskWa9" outputId="410fd6cb-4183-4c9e-c6b5-86af6fe1115f" model.summary() # + [markdown] id="ZnEJHeNvkWbp" # * **Total params** - total no of parameters in the model. # * **Trainable parameters** - these are the parameters (patterns) # the model can update as it trains. # * **Non-trainable params** - these parameters aren't updated during training (this is typical when you use parameters from other models during **transfer learning**) # # + colab={"base_uri": "https://localhost:8080/", "height": 312} id="tL3cJS8hkVTu" outputId="e84960ab-899c-41af-c6b6-f8e4b8bd578c" from tensorflow.keras.utils import plot_model plot_model(model= model, show_shapes= True) # + [markdown] id="Ucxr19r8p_qh" # ### Visulizing our models predictions # # To visualize predictions , its a good idea to plot them against the ground truth labels. # often you will see this in the form of y_test or y_true versus the y_pred # + colab={"base_uri": "https://localhost:8080/"} id="y_Ri-64ir3Bv" outputId="4dd2a906-6a2e-406e-a1a1-68453d099b20" # Make some predictions y_pred = model.predict(X_test) y_pred # + colab={"base_uri": "https://localhost:8080/"} id="2cL-lW2Hr3c7" outputId="2003e5b0-9c17-41ee-87ed-91bfa4add078" y_test # + id="ehcQ5KxWr3yp" # lets create a plotting function def plot_predictions(train_data = X_train, train_labels = y_train, test_data = X_test, test_labels = y_test, predictions = y_pred): """ Plots training data, test data and compares predictions to ground truth """ plt.figure(figsize= (10,7)) # Plot the training data in blue plt.scatter(train_data, train_labels, c ="b", label = "training data") # Plot the testing data in green plt.scatter(test_data, test_labels, c ="g", label = "testing data") # Plot models predictions in red plt.scatter(test_data,predictions, c = "r", label = "predictions") plt.legend(); # + colab={"base_uri": "https://localhost:8080/", "height": 428} id="UCEP7rKzr345" outputId="35801a01-ad08-442b-c254-bcd6a5bacad8" plot_predictions(train_data = X_train, train_labels = y_train, test_data = X_test, test_labels = y_test, predictions = y_pred) # + [markdown] id="VMifohC4r3-0" # ### Evaluating our models prediction with regression evaluation metrics # # Alongisde visualizations, evaulation metrics are your alternative best option for evaluating your model. # # Depending on the problem you're working on, different models have different evaluation metrics. # # Two of the main metrics used for regression problems are: # # * **Mean absolute error (MAE)** - the mean difference between each of the predictions. # * **Mean squared error (MSE)** - the squared mean difference between of the predictions (use if larger errors are more detrimental than smaller errors). # The lower each of these values, the better. # # You can also use model.evaluate() which will return the loss of the model as well as any metrics setup during the compile step. # + colab={"base_uri": "https://localhost:8080/"} id="iJ1_riVqxxhB" outputId="9104ef45-21a7-4c00-b17e-0abffe0bf353" # Evaluate the model on the test model.evaluate(X_test,y_test) # + colab={"base_uri": "https://localhost:8080/"} id="4Bw5wHWV08u8" outputId="2ebbb170-af32-43b6-ae74-7a48ff80c897" # calcuate the mean absolute error instead of using evaluate method mae = tf.metrics.mean_absolute_error(y_true=y_test, y_pred=y_pred) mae # + colab={"base_uri": "https://localhost:8080/"} id="bgzabCaD1gdM" outputId="d2c731ee-5be7-4c48-d335-0e7948ad1750" y_pred # + colab={"base_uri": "https://localhost:8080/"} id="2y1vXZM41lPI" outputId="5afef69b-0322-4d43-ec6b-b581e7a841fb" y_test # + colab={"base_uri": "https://localhost:8080/"} id="0CUCxmVm2O2t" outputId="c642c540-f071-42bf-90c5-7594f989f0e2" #notice how the the tensors are of not the same shape # so first convert y_pred to tensor then we can squeeze it or reshape it y_pred_tensor = tf.constant(y_pred) y_pred_tensor # + colab={"base_uri": "https://localhost:8080/"} id="oUOfExND2qN2" outputId="ebfe5938-a57f-4570-e0dd-8149a5bf5b2d" y_pred_tensor_reshaped = tf.reshape(y_pred_tensor,(10,)) # not we can use even the squeeze function y_pred_tensor_reshaped # + colab={"base_uri": "https://localhost:8080/"} id="8H3-P7Rx20Z3" outputId="468c9efd-7ce4-47d9-97f2-bc6f146a1d6b" # now lets pass this into the metrics to directly calculate the MAE mae = tf.metrics.mean_absolute_error(y_true=y_test, y_pred=y_pred_tensor_reshaped) mae # + colab={"base_uri": "https://localhost:8080/"} id="prqJPAu13d6Q" outputId="c2039cef-1b00-4afb-efbd-b907745baedc" # calculate the mean square error MSE = tf.metrics.mean_squared_error(y_true=y_test, y_pred=y_pred_tensor_reshaped) MSE # + id="seoSw7hu3vmm" def mae(y_test, y_pred): """ Calculuates mean absolute error between y_test and y_preds. """ return tf.metrics.mean_absolute_error(y_test, y_pred) def mse(y_test, y_pred): """ Calculates mean squared error between y_test and y_preds. """ return tf.metrics.mean_squared_error(y_test, y_pred) # + [markdown] id="VA1QJyJqAILP" # ###**Running experiments to improve a model** # After seeing the evaluation metrics and the predictions your model makes, it's likely you'll want to improve it. # # Again, there are many different ways you can do this, but 3 of the main ones are: # # # * **Get more data** - get more examples for your model to train on (more opportunities to learn patterns). # * **Make your model larger** (use a more complex model) - this might come in the form of more layers or more hidden units in each layer. # * Train for longer - give your model more of a chance to find the patterns in the data. # Since we created our dataset, we could easily make more data but this isn't always the case when you're working with real-world datasets. # # So let's take a look at how we can improve our model using 2 and 3. # # To do so, we'll build 3 models and compare their results: # # * model_1 - same as original model, 1 layer, trained for 100 epochs. # * model_2 - 2 layers, trained for 100 epochs. # * model_3 - 2 layers, trained for 500 epochs. # + [markdown] id="01aQ9z3bkpGk" # Build model_1 # + colab={"base_uri": "https://localhost:8080/"} id="g9Dxsth4klZo" outputId="cbded841-e0c6-45e1-b878-ea665fb3e866" # Set random seed tf.random.set_seed(42) # Replicate original model model_1 = tf.keras.Sequential([ tf.keras.layers.Dense(1) ]) # Compile the model model_1.compile(loss=tf.keras.losses.mae, optimizer=tf.keras.optimizers.SGD(), metrics=['mae']) # Fit the model model_1.fit(X_train, y_train,verbose=0, epochs=100) # + colab={"base_uri": "https://localhost:8080/", "height": 428} id="JizrD9CSk1LS" outputId="ef59d5af-fcad-4055-d408-66847413eb1c" # Make and plot predictions for model_1 y_preds_1 = model_1.predict(X_test) plot_predictions(predictions=y_preds_1) # + colab={"base_uri": "https://localhost:8080/"} id="QFqi8znpk44o" outputId="f8d19fde-49f5-40be-be68-605a3e62f2a1" # Calculate model_1 metrics mae_1 = mae(y_test, y_preds_1.squeeze()).numpy() mse_1 = mse(y_test, y_preds_1.squeeze()).numpy() mae_1, mse_1 # + [markdown] id="VA7MGhVTk6t9" # Build model_2 # + colab={"base_uri": "https://localhost:8080/"} id="yiW_UzrNlAz9" outputId="a6820f8b-2226-4148-e0fc-bbeef707ba4c" # Set random seed tf.random.set_seed(42) # Replicate model_1 and add an extra layer model_2 = tf.keras.Sequential([ tf.keras.layers.Dense(1), tf.keras.layers.Dense(1) # add a second layer ]) # Compile the model model_2.compile(loss=tf.keras.losses.mae, optimizer=tf.keras.optimizers.SGD(), metrics=['mae']) # Fit the model model_2.fit(X_train, y_train, epochs=100, verbose=0) # set verbose to 0 for less output # + colab={"base_uri": "https://localhost:8080/", "height": 428} id="HWSfuTLmlEP8" outputId="8566aaad-6542-4919-8861-b65bdc0dbace" # Make and plot predictions for model_2 y_preds_2 = model_2.predict(X_test) plot_predictions(predictions=y_preds_2) # + colab={"base_uri": "https://localhost:8080/"} id="Ptp2qQx_lGK8" outputId="9be81c81-048e-43f0-d04b-1625aa4187c1" # Calculate model_2 metrics mae_2 = mae(y_test, y_preds_2.squeeze()).numpy() mse_2 = mse(y_test, y_preds_2.squeeze()).numpy() mae_2, mse_2 # + [markdown] id="N-Q7F2XllHnn" # Build model_3 # + colab={"base_uri": "https://localhost:8080/"} id="Uqueow45lLSa" outputId="ad9081f6-95a8-42b0-eb68-df92bb28cbfc" # Set random seed tf.random.set_seed(42) # Replicate model_2 model_3 = tf.keras.Sequential([ tf.keras.layers.Dense(1), tf.keras.layers.Dense(1) ]) # Compile the model model_3.compile(loss=tf.keras.losses.mae, optimizer=tf.keras.optimizers.SGD(), metrics=['mae']) # Fit the model (this time for 500 epochs, not 100) model_3.fit(X_train, y_train, epochs=500, verbose=0) # set verbose to 0 for less output # + colab={"base_uri": "https://localhost:8080/", "height": 428} id="zZlp5nW4lPeu" outputId="7c417aee-16b3-4db3-d3ba-df0a6823c28b" # Make and plot predictions for model_3 y_preds_3 = model_3.predict(X_test) plot_predictions(predictions=y_preds_3) # + [markdown] id="IZaejix0lQ16" # Strange, we trained for longer but our model performed worse? # # As it turns out, our model might've trained too long and has thus resulted in worse results (we'll see ways to prevent training for too long later on). # + colab={"base_uri": "https://localhost:8080/"} id="PAnOfiDYlUuy" outputId="d71deefd-5746-43a8-9c24-4d1ff02272d0" # Calculate model_3 metrics mae_3 = mae(y_test, y_preds_3.squeeze()).numpy() mse_3 = mse(y_test, y_preds_3.squeeze()).numpy() mae_3, mse_3 # + [markdown] id="hSoyT9IYlVh5" # ### Comparing results # + id="X0PgemCClVkZ" model_results = [["model_1", mae_1, mse_1], ["model_2", mae_2, mse_2], ["model_3", mae_3, mae_3]] # + colab={"base_uri": "https://localhost:8080/", "height": 143} id="cOPVrCKZlVnf" outputId="d6e446b3-e57b-4bb4-af8f-8a12f2a20264" import pandas as pd all_results = pd.DataFrame(model_results, columns=["model", "mae", "mse"]) all_results # + [markdown] id="bbjNWpOJlVon" # From our experiments, it looks like model_2 performed the best. # # And now, you might be thinking, "wow, comparing models is tedious..." and it definitely can be, we've only compared 3 models here. # # But this is part of what machine learning modelling is about, trying many different combinations of models and seeing which performs best. # # Each model you build is a small experiment. # # `🔑 Note: One of your main goals should be to minimize the time between your experiments. The more experiments you do, the more things you'll figure out which don't work and in turn, get closer to figuring out what does work. Remember the machine learning practitioner's motto: "experiment, experiment, experiment".` # # Another thing you'll also find is what you thought may work (such as training a model for longer) may not always work and the exact opposite is also often the case. # # ## Tracking your experiments # One really good habit to get into is tracking your modelling experiments to see which perform better than others. # # We've done a simple version of this above (keeping the results in different variables). # # 📖 Resource: But as you build more models, you'll want to look into using tools such as: # # TensorBoard - a component of the TensorFlow library to help track modelling experiments (we'll see this later). # Weights & Biases - a tool for tracking all kinds of machine learning experiments (the good news for Weights & Biases is it plugs into TensorBoard). # + [markdown] id="ykYp0zFSltci" # ### Saving a model # Once you've trained a model and found one which performs to your liking, you'll probably want to save it for use elsewhere (like a web application or mobile device). # # You can save a TensorFlow/Keras model using model.save(). # # There are two ways to save a model in TensorFlow: # # 1. The SavedModel format (default). # 2. The HDF5 format. # # The main difference between the two is the SavedModel is automatically able to save custom objects (such as special layers) without additional modifications when loading the model back in. # # Which one should you use? # # It depends on your situation but the SavedModel format will suffice most of the time. # # Both methods use the same method call. # + colab={"base_uri": "https://localhost:8080/"} id="UOFPX2ZJlyKV" outputId="1b665b47-be22-42b6-aeab-b50a3080b31e" # C:\\Users\\ultim # + colab={"base_uri": "https://localhost:8080/"} id="gzikepc9qhcQ" outputId="fa2c7b3d-2251-4a37-b05b-077299202cf7" # Save a model using the SavedModel format # %cd D:\Coding project\GitHub\Tensorflow_zero_to_mastery\01_TF_regression model_2.save('best_model_SavedModel_format') # + colab={"base_uri": "https://localhost:8080/"} id="ub2Qh760qhe5" outputId="bc87c340-2034-48d7-b105-3c899f235a01" # Check it out - outputs a protobuf binary file (.pb) as well as other files # !ls best_model_SavedModel_format # + id="7qu76-F3rxkg" # Save a model using the HDF5 format model_2.save("best_model_HDF5_format.h5") # note the addition of '.h5' on the end # + colab={"base_uri": "https://localhost:8080/"} id="Q4EpDTIbrzlx" outputId="eff5fc11-dabb-441a-c1a3-6850182dbd0b" # Check it out # !ls best_model_HDF5_format.h5 # + [markdown] id="glsSfq6rr3Fo" # ### Loading a model # We can load a saved model using the load_model() method. # # Loading a model for the different formats (SavedModel and HDF5) is the same (as long as the pathnames to the particuluar formats are correct). # + colab={"base_uri": "https://localhost:8080/"} id="8kwIy8TXr3LX" outputId="ec27ac9e-3c7a-4c48-cd5e-8971620b1eae" # Load a model from the SavedModel format # %cd D:\Coding project\GitHub\Tensorflow_zero_to_mastery\01_TF_regression loaded_saved_model = tf.keras.models.load_model("best_model_SavedModel_format") loaded_saved_model.summary() # %cd C:\\Users\\ultim # + colab={"base_uri": "https://localhost:8080/"} id="DatUQOBEr3OE" outputId="9d8d03de-6555-4691-9bb0-7a6ce050b1f2" # Compare model_2 with the SavedModel version (should return True) model_2_preds = model_2.predict(X_test) saved_model_preds = loaded_saved_model.predict(X_test) mae(y_test, saved_model_preds.squeeze()).numpy() == mae(y_test, model_2_preds.squeeze()).numpy() # + colab={"base_uri": "https://localhost:8080/"} id="RrN5xMPssgQe" outputId="059af04c-b050-46bd-fe7d-5ab7aded50ba" # %cd D:\Coding project\GitHub\Tensorflow_zero_to_mastery\01_TF_regression # Load a model from the HDF5 format loaded_h5_model = tf.keras.models.load_model("best_model_HDF5_format.h5") loaded_h5_model.summary() # %cd C:\\Users\\ultim # + [markdown] id="1vnDoMjTuekP" # ## A larger Example # + id="UdRFvOQ9ui0G" # Import required libraries import tensorflow as tf import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split # + id="T32d7oZjvFkx" # Read in the insurance dataset insurance = pd.read_csv("https://raw.githubusercontent.com/stedy/Machine-Learning-with-R-datasets/master/insurance.csv") # + colab={"base_uri": "https://localhost:8080/", "height": 205} id="_D8EnF-VvGBs" outputId="cb33e9d6-b9b4-48f3-fdbf-26e08cf92ba3" insurance.head() # + colab={"base_uri": "https://localhost:8080/", "height": 442} id="lMRiZdUPvGDx" outputId="9975270e-a257-48cf-c307-61068dfba398" # lets do one-hot encode our dataFrrame so its all numbers insurance_one_hot = pd.get_dummies(insurance) insurance_one_hot # + id="y3AzWEArvGFe" # Create a X and y values (features and labels) X =insurance_one_hot.drop(labels="charges", axis= 1) y = insurance_one_hot["charges"] # + colab={"base_uri": "https://localhost:8080/", "height": 225} id="coP7DoZ4vGHn" outputId="51b71197-2b76-47c8-a89d-5c5175bfbaae" X.head() # + colab={"base_uri": "https://localhost:8080/"} id="8ADduRy5vGKI" outputId="f6cf9528-811b-4237-8c03-a5c8eae98b81" y.head() # + id="mytkIQzqvGLY" X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42) # + colab={"base_uri": "https://localhost:8080/", "height": 225} id="rhOSJj9nvGN3" outputId="90713ec7-c94a-4831-dc4a-f827a9a206dc" X_train.head() # + colab={"base_uri": "https://localhost:8080/", "height": 225} id="3YoU47vWx1ip" outputId="02b15d99-9e60-4519-8391-00e3fbbb9ea3" X_test.head() # + colab={"base_uri": "https://localhost:8080/"} id="WbUASpnRx_vC" outputId="67bcc432-ab86-49df-81eb-33b5076d2c10" X_train.shape, X_test.shape, y_train.shape, y_test.shape # + colab={"base_uri": "https://localhost:8080/"} id="xYyhO7w9ymjO" outputId="3abaf2a0-dcea-437c-f830-66d834bc662a" # create a model tf.random.set_seed(42) model = tf.keras.Sequential([ tf.keras.layers.Dense(10), tf.keras.layers.Dense(1)]) model.compile(optimizer=tf.keras.optimizers.SGD(), loss=tf.keras.losses.mean_absolute_error, metrics=["mae"]) model.fit(X_train, y_train, epochs= 100, verbose=2) # + colab={"base_uri": "https://localhost:8080/"} id="rMXJY2T40HUr" outputId="62cec187-c85c-4bae-afb7-d9e95004282b" # check the results of the isnurance model on the test data model.evaluate(X_test,y_test) # + colab={"base_uri": "https://localhost:8080/"} id="dc214fgO1CNH" outputId="c7efc3c5-6742-4a03-a51e-77851fa22772" y_train.median(), y_train.mean() # + [markdown] id="hV03b9-g1Y8F" # Right now the model sucks so lets improve it # + [markdown] id="HU6icYx-1kUQ" # to (try) improve our model we will run 2 experiments: # 1. Add an extra layer with more hidden units # 2. Train for longer # 3. (insert your own experiment here) # + id="CJId0Cmo17-h" # + id="Q3WFh5_518jU" # + id="dCEp75ht18mb" # + id="6THp8qqH18pj" # + id="NvNpSHw318-g" # + id="qHhOjcLM19Gk" # + id="Xtec_uWS19HK" # + id="8Vjju3S619H5" # + id="cgxih_SV19Ip" # + id="xqC1fYoD19JY" # + id="eDaAyTXQ19KG" # + id="oaCM7upw19K1" # + id="AMrrxJ4Q19Lj" # + id="DEuKBTpW19MX" # + id="v7cEHnQk19Nd" # + id="rNeM_kXP19OQ" # + id="IFdSjAYk19PA" # + id="QHKEsNeV19P1" # + id="XkaSz8ex19Qn" # + id="G7a5Pp_319RX" # + id="hC3yUdnS19SM" # + id="tkyT_BaE19S8" # + id="DeKrFqrv19Tt" # + id="2qg6Oy_t19Uf" # + id="doPLb7Hz19VT" # + id="fryVxu2Q19WK" # + id="4ahotIkO19W_" # + id="VRlKwkdq19Xx" # + id="ztg_oOVs19Yl" # + id="AsI8VAFt19Zc" # + id="rrgc-CjG19aO"
01_TF_regression/01_NN_regression_with_TF.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import requests import io import pandas as pd import json def run(csv): url = 'http://1172.16.58.3:8000/train_rules' files = {"file": open(csv, 'rb')} r = requests.post(url, files=files) data = r.text df = pd.DataFrame(json.loads(data)['df']) return df df = run('./data/ihc_record.csv') df
trainer_api.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 10 minutes to CLX # # This is a short introduction to CLX geared mainly towards new users of the code. # # ## What are these libraries? # # CLX (Cyber Log Accelerators) provides a simple API for security analysts, data scientists, and engineers to quickly get started applying RAPIDS to real-world cyber use cases. CLX uses the GPU dataframe ([cuDF](https://github.com/rapidsai/cudf)) and other RAPIDS packages to execute cybersecurity and information security workflows. The following packages are available: # # * analytics - Machine learning and statistics functionality # * ip - IPv4 data translation and parsing # * parsers - Cyber log Event parsing # * io - Input and output features for a workflow # * workflow - Workflow which receives input data and produces analytical output data # * osi - Open source integration (VirusTotal, FarsightDB and Whois) # * dns - TLD extraction # # # ## When to use CLX # # Use CLX to build your cyber data analytics workflows for a GPU-accelerated environmetn using RAPIDS. CLX contains common cyber and cyber ML functionality, such as log parsing for specific data sources, cyber data type parsing (e.g., IPv4), and DGA detection. CLX also provides the ability to integrate this functionality into a CLX workflow, which simplifies execution of the series of parsing and ML functions needed for end-to-end use cases. # # ## Log Parsing # # CLX provides traditional parsers for some common log types. # Here’s an example parsing a common [Windows Event Log](https://www.ultimatewindowssecurity.com/securitylog/encyclopedia/default.aspx) of event code type [4770](https://www.ultimatewindowssecurity.com/securitylog/encyclopedia/event.aspx?eventid=4770). import cudf from clx.parsers.windows_event_parser import WindowsEventParser event = "04/03/2019 11:58:59 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=5156\\nEventType=0\\nType=Information\\nComputerName=user234.test.com\\nTaskCategory=Filtering Platform Connection\\nOpCode=Info\\nRecordNumber=241754521\\nKeywords=Audit Success\\nMessage=The Windows Filtering Platform has permitted a connection.\\r\\n\\r\\nApplication Information:\\r\\n\\tProcess ID:\\t\\t4\\r\\n\\tApplication Name:\\tSystem\\r\\n\\r\\nNetwork Information:\\r\\n\\tDirection:\\t\\tInbound\\r\\n\\tSource Address:\\t\\t172.16.58.3\\r\\n\\tSource Port:\\t\\t138\\r\\n\\tDestination Address:\\t172.16.58.3\\r\\n\\tDestination Port:\\t\\t138\\r\\n\\tProtocol:\\t\\t17\\r\\n\\r\\nFilter Information:\\r\\n\\tFilter Run-Time ID:\\t0\\r\\n\\tLayer Name:\\t\\tReceive/Accept\\r\\n\\tLayer Run-Time ID:\\t44" wep = WindowsEventParser() df = cudf.DataFrame() df['raw'] = [event] result_df = wep.parse(df, 'raw') result_df.head() # ## Cyber Data Types # # CLX provides the ability to work with different data types that are specific to cybersecurity, such as IPv4 and DNS. Here’s an example of how to get started. # ### IPv4 # The [IPv4](https://en.wikipedia.org/wiki/IPv4) data type is still commonly used and present in log files. Below we demonstrate functionality. Additional operations are available in the `clx.ip` module. # #### Convert IPv4 values to integers import clx.ip import cudf df = cudf.Series(["172.16.17.32", "172.16.58.3"]) result_df = clx.ip.ip_to_int(df) print(result_df) # #### Check if IPv4 values are multicast import clx.ip import cudf df = cudf.Series(["172.16.58.3", "192.168.127.12", "172.16.17.32"]) result_df = clx.ip.is_multicast(df) print(result_df) # ## TLD Extraction # CLX provides the ability to extract the TLD from the registered domain and subdomains of a URL, using the public suffix list. # + import cudf from clx.dns import dns_extractor as dns input_df = cudf.DataFrame( { "url": [ "http://www.google.com", "gmail.com", "github.com", "https://pandas.pydata.org", "http://www.worldbank.org.kg/", "waiterrant.blogspot.com", "http://forums.news.cnn.com.ac/", "http://forums.news.cnn.ac/", "ftp://b.cnn.com/", "a.news.uk", "a.news.co.uk", "https://a.news.co.uk", "107-193-100-2.lightspeed.cicril.sbcglobal.net", "a23-44-13-2.deploy.static.akamaitechnologies.com", ] } ) output_df = dns.parse_url(input_df["url"]) output_df.head(14) # - # ## Machine Learning # # CLX offers machine learning and statistcs functions that are ready to integrate into your CLX workflow. # # #### Calculate Rolling Z-Score # Calculate a rolling z-score on a given cuDF series. import clx.analytics.stats import cudf sequence = [3,4,5,6,1,10,34,2,1,11,45,34,2,9,19,43,24,13,23,10,98,84,10] series = cudf.Series(sequence) zscores_df = cudf.DataFrame() zscores_df['zscore'] = clx.analytics.stats.rzscore(series, 7) print(zscores_df) # ## Workflows # # Now that we've demonstrated the basics of CLX , let's try to tie some of this functionality into a CLX workflow. A workflow is defined as a function that receives a cuDF dataframe, performs some operations on it, and then returns an output cuDF dataframe. In our use case, we decide to show how to parse raw WinEVT data within a workflow. # + import cudf from clx.workflow.workflow import Workflow from clx.parsers.windows_event_parser import WindowsEventParser wep = WindowsEventParser() class LogParseWorkflow(Workflow): def workflow(self, dataframe): output = wep.parse(dataframe, "raw") return output input_df = cudf.DataFrame() input_df["raw"] = ["04/03/2019 11:58:59 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=5156\\nEventType=0\\nType=Information\\nComputerName=user234.test.com\\nTaskCategory=Filtering Platform Connection\\nOpCode=Info\\nRecordNumber=241754521\\nKeywords=Audit Success\\nMessage=The Windows Filtering Platform has permitted a connection.\\r\\n\\r\\nApplication Information:\\r\\n\\tProcess ID:\\t\\t4\\r\\n\\tApplication Name:\\tSystem\\r\\n\\r\\nNetwork Information:\\r\\n\\tDirection:\\t\\tInbound\\r\\n\\tSource Address:\\t\\t100.20.100.20\\r\\n\\tSource Port:\\t\\t138\\r\\n\\tDestination Address:\\t172.16.58.3\\r\\n\\tDestination Port:\\t\\t138\\r\\n\\tProtocol:\\t\\t17\\r\\n\\r\\nFilter Information:\\r\\n\\tFilter Run-Time ID:\\t0\\r\\n\\tLayer Name:\\t\\tReceive/Accept\\r\\n\\tLayer Run-Time ID:\\t44"] lpw = LogParseWorkflow(name="my-log-parsing-workflow") lpw.workflow(input_df) # - # #### Workflow I/O # # A workflow can receive and output data from different locations, including CSV files and Kafka. To integrate I/O into your workflow, simply indicate your workflow configurations within a `workflow.yaml` file or define your configurations at instantiation within a python dictionary. # The workflow class will first look for any configuration file here: # # * /etc/clx/[workflow-name]/workflow.yaml then # * ~/.config/clx/[workflow-name]/workflow.yaml # # To learn more about workflow configurations visit the [CLX Workflow](./intro-clx-workflow.html) page # To demonstrate the input functionality, we'll create a small CSV input file. import cudf input_df = cudf.DataFrame() input_df["raw"] = ["04/03/2019 11:58:59 AM\\nLogName=Security\\nSourceName=Microsoft Windows security auditing.\\nEventCode=5156\\nEventType=0\\nType=Information\\nComputerName=user234.test.com\\nTaskCategory=Filtering Platform Connection\\nOpCode=Info\\nRecordNumber=241754521\\nKeywords=Audit Success\\nMessage=The Windows Filtering Platform has permitted a connection.\\r\\n\\r\\nApplication Information:\\r\\n\\tProcess ID:\\t\\t4\\r\\n\\tApplication Name:\\tSystem\\r\\n\\r\\nNetwork Information:\\r\\n\\tDirection:\\t\\tInbound\\r\\n\\tSource Address:\\t\\t172.16.58.3\\r\\n\\tSource Port:\\t\\t138\\r\\n\\tDestination Address:\\t172.16.58.3\\r\\n\\tDestination Port:\\t\\t138\\r\\n\\tProtocol:\\t\\t17\\r\\n\\r\\nFilter Information:\\r\\n\\tFilter Run-Time ID:\\t0\\r\\n\\tLayer Name:\\t\\tReceive/Accept\\r\\n\\tLayer Run-Time ID:\\t44"] input_df.to_csv("alert_data.csv") # Next, create and run the workflow. # + from clx.workflow.workflow import Workflow from clx.parsers.windows_event_parser import WindowsEventParser import os dirpath = os.getcwd() source = { "type": "fs", "input_format": "csv", "input_path": dirpath + "alert_data.csv", "schema": ["raw"], "delimiter": ",", "required_cols": ["raw"], "dtype": ["str"], "header": 0 } destination = { "type": "fs", "output_format": "csv", "output_path": dirpath + "alert_data_output.csv" } wep = WindowsEventParser() class LogParseWorkflow(Workflow): def workflow(self, dataframe): output = wep.parse(dataframe, "raw") return output lpw = LogParseWorkflow(source=source, destination=destination, name="my-log-parsing-workflow") lpw.run_workflow() # - # Output data can be read directly from the resulting CSV file. f = open('alert_data_output.csv', "r") f.readlines() # ## Open Source Threat Intelligence Integration # Often it's beneficial to integrate open source threat intelligence with collected data. CLX includes the ability to query [VirusTotal](https://www.virustotal.com) and [FarsightDB](https://www.farsightsecurity.com) directly. An API key is necessary for both of these integrations. # # #### Prerequisites to get API key # * Create an account with https://www.virustotal.com # * Create an account with https://www.farsightsecurity.com from clx.osi.virus_total import VirusTotalClient vt_api_key='<virus total apikey goes here>' vt_client = VirusTotalClient(api_key=vt_api_key) result = vt_client.url_scan(["virustotal.com"]) from clx.osi.farsight import FarsightLookupClient server='https://api.dnsdb.info' fs_api_key='<farsight apikey goes here>' fs_client = FarsightLookupClient(server, fs_api_key, limit=1) result = fs_client.query_rrset("www.dnsdb.info") from clx.osi.whois import WhoIsLookupClient whois_client = WhoIsLookupClient() whois_result = whois_client.whois(["nvidia.com"]) print(whois_result)
docs/source/10min-clx.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: '' # name: '' # --- # # Introduction to Python # # ## Data Management Boot Camp # # ### August 19, 2019 # # <NAME> # Web Development # UMN Libraries # <EMAIL> # ## Why Python? # # ![xkcd: Python](https://imgs.xkcd.com/comics/python.png) # [xkcd.com/353/](https://xkcd.com/353/) # ## Why Python? Seriously... # * Named after Monty Python's Flying Circus! (Sorry...) # * Easy to learn and fun to use. Actually important. # * Open source # * Not only one of the most popular languages, extremely popular for research computing. # # # # ## Why Python? pandas! # * [pandas](https://pandas.pydata.org/) is a Python data analysis library. It's what makes Python competitive with R. # * Includes the Python [SciPy](https://www.scipy.org/) package collection, which includes [NumPy](http://www.numpy.org/). # # ## Why Python? Jupyter Notebooks! # * Text, images, and _executable_ code, all in a single shareable object! # * This workshop itself is a Jupyter Notebook. # * GitHub renders them automatically: https://github.com/UMNLibraries/python-workshop-2019-08 # * Includes pandas by default. # import pandas as pd # ## Python 2 vs. 3 # Python 2 was released in 2000, and will fall out of support in 2020. pandas dropped Python 2 support in July. Python 3 was released in 2008, and is the future of the language. Use Python 3, unless you are using already-existing code in Python 2, or need a package (library) that does not support Python 3, a rare situation. # ## Hello, World! # This is a complete Python program: print('Hello, World!') # ## HelloWorld.java # ```java # public class HelloWorld { # public static void main(String[] args) { # System.out.println("Hello, World"); # } # } # ``` # ## Exercise: Lake Superior's Seiche # A seiche is a standing wave in an enclosed or partially enclosed body of water. # # [Wikipedia: Seiche](https://en.wikipedia.org/wiki/Seiche) # # **Please note!** Some of what follows may seem contrived, because it is. Some things we would never do in a real project, but we do here to illustrate Python and programming concepts. As we advance through the exercise, we will get closer and closer to a real-world approach. # ## Lake Superior Water Levels # Get Lake Superior station number from [NOAA Great Lakes Low Water Datums](https://tidesandcurrents.noaa.gov/gldatums.html), then request an [International Great Lakes Datum (IGLD)](https://opendap.co-ops.nos.noaa.gov/axis/webservices/waterlevelrawsixmin/index.jsp). # ## Reading Files with open('CO-OPS__9099064__wl.csv') as file: for line in file: print(line) # ## Whitespace is significant! # What happens when we do this? with open('CO-OPS__9099064__wl.csv') as file: for line in file: print(line) # **Important note:** Four-space indentation is an extremely strong convention in the Python community. # ## Variables file_name = 'CO-OPS__9099064__wl.csv' with open(file_name) as file: for line in file: print(line) # ## Types & Objects # Everything in Python is some _type_ of _object_. Objects contain _attributes_, usually data and related functions, called _methods_. # # Python is _dynamically typed_ because it determines these types at runtime. file_name = 'CO-OPS__9099064__wl.csv' print('file_name is a ', type(file_name)) with open(file_name) as file: print('file is a ', type(file)) for line in file: print('line is a ', type(line)) print(f'line = {line}') # f-strings! break # ## Strong Typing # Python is also _strongly typed_. For example, some operations require operands of specific types. # # What happens when we do this? file_name = 'CO-OPS__9099064__wl.csv' file_name + 3 # We'll come back to types, but first we'll cover some type differences that are conceptual, but not captured in Python types. # ## Procedures # Procedures give a name to some collection of actions. They do not return output. # + def print_lines(file_name): with open(file_name) as file: for line in file: print(line) #pass print('print_lines is a ', type(print_lines)) file_name = 'CO-OPS__9099064__wl.csv' print_lines(file_name) #output = print_lines(file_name) #print(output) # - # ## Functions # _Pure functions_ always return the same output for the same input. # + def file_to_list(file_name): lines = [] # list with open(file_name) as file: for line in file: lines.append(line) # append is a method (function) in the list class return lines print('file_to_list is a ', type(file_to_list)) file_name = 'CO-OPS__9099064__wl.csv' lines = file_to_list(file_name) print(lines) # - # ## Named Arguments # Python allows function arguments to have names. The syntax may be confusing at first! # + def file_to_limited_list(file_name, limit): lines = [] with open(file_name) as file: for line in file: lines.append(line) if len(lines) == limit: break return lines file_name = 'CO-OPS__9099064__wl.csv' limit = 3 lines = file_to_limited_list(file_name, limit) #lines = file_to_limited_list(file_name=file_name, limit=limit) #lines = file_to_limited_list(limit=limit, file_name=file_name) # Helps to avoid incorrect argument order. #lines = file_to_limited_list(limit, file_name) # What happens when we do this? print(lines) # - # ## Functional Programming # # ### (An even further diversion!) # # [_Functional programming (FP)_](https://en.wikipedia.org/wiki/Functional_programming) encourages using [_pure functions_](https://en.wikipedia.org/wiki/Pure_function), which always return the same output for the same input, without side effects. FP discourages unnecessary state and state changes. # # Is `print` a pure function? output = print('Hello, World!') type(output) # ## FP vs. OOP # Like most popular languages, Python supports both [_functional programming (FP)_](https://en.wikipedia.org/wiki/Functional_programming) and [_object-orientend programming (OOP)_](https://en.wikipedia.org/wiki/Object-oriented_programming). # # [_When is OOP better than FP and vice-versa?_](https://www.quora.com/Computer-Programming/When-is-OOP-better-than-FP-and-vice-versa) It depends. Here I focus on FP, because I find it a fast start and a good fit for research. # # OK, let's get back to types! # ## Lists, a.k.a. Arrays file_name = 'CO-OPS__9099064__wl.csv' with open(file_name) as file: for line in file: row = line.strip().split(',') # split is a function that returns an array print('row is a ', type(row)) print(row) print(row[0], row[1]) break # ## What about the header? file_name = 'CO-OPS__9099064__wl.csv' with open(file_name) as file: line_count = -1 for line in file: line_count = line_count + 1 if line_count == 0: continue row = line.strip().split(',') print(row[0], row[1]) # ## Python csv Package import csv file_name = 'CO-OPS__9099064__wl.csv' with open(file_name) as file: has_header = csv.Sniffer().has_header(file.read(1024)) file.seek(0) # Rewind. reader = csv.reader(file) if has_header: next(reader) # Skip header row. for row in reader: print(row[0], row[1]) break # ## Dictionaries, a.k.a Associative Array, a.k.a, Hash Tables, a.k.a. Hashes file_name = 'CO-OPS__9099064__wl.csv' with open(file_name) as file: reader = csv.DictReader(file) for row in reader: print('row is a', type(row)) print(row) print(row['date_time'], row['water_level']) break # ## Doing stuff with water levels... file_name = 'CO-OPS__9099064__wl.csv' with open(file_name) as file: reader = csv.DictReader(file) water_levels = [] for row in reader: water_levels.append(row['water_level']) water_levels_sum = sum(water_levels) # ## Will this fix our problem? file_name = 'CO-OPS__9099064__wl.csv' with open(file_name) as file: reader = csv.DictReader(file) water_levels = [] for row in reader: water_level = float(row['water_level']) water_levels.append(water_level) water_levels_sum = sum(water_levels) # ## Investigating further... import re # regular expressions file_name = 'CO-OPS__9099064__wl.csv' with open(file_name) as file: reader = csv.DictReader(file) water_levels = [] for row in reader: if not (re.match(r'^\d+', row['water_level'])): print('"{}"'.format(row['water_level'])) break water_level = float(row['water_level']) water_levels.append(water_level) water_levels_sum = sum(water_levels) # ## pandas import matplotlib # %matplotlib inline import csv import pandas as pd # ### Dataframes file_name = 'CO-OPS__9099064__wl.csv' df = pd.read_csv( file_name, header=0, parse_dates=True, float_precision='high', # Without this, pandas adds a ridiculous level of precision to some values. ) # ### Matplotlib Axes Object ax = df.plot( x='date_time', y='water_level', title='Lake Superior Levels (IGLD 1985)', legend=False, ) # ### Labeling the Axes ax = df.plot( x='date_time', y='water_level', title='Lake Superior Levels (IGLD 1985)', legend=False, ) ax.set_xlabel('Days from June 26, 2017') ax.set_ylabel('Water Level IGLD (feet)') # ### Mean Difference df['water_level_mean_diff'] = df.water_level - df.water_level.mean() ax_mean_diff = df.plot( x='date_time', y='water_level_mean_diff', title='Lake Superior Levels (IGLD 1985)', legend=False, ) ax_mean_diff.set_xlabel('Days from June 26, 2017') ax_mean_diff.set_ylabel('Water Level Mean Diff IGLD (feet)') # ## Further Learning # * #python channel on [Tech People UMN Slack](https://tech-people-umn.slack.com/) # * [UMN Tech People Coworking](https://umnhackerhours.github.io/) every Wednesday afternoon! # * [Dive Into Python](https://www.cmi.ac.in/~madhavan/courses/prog2-2012/docs/diveintopython3/index.html) # * [Learn Python the Hard Way](https://learnpythonthehardway.org/) # * [Official Python Documentation](https://docs.python.org/3/index.html) # ## Questions? # ## Thank you!
python_intro.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + # encoding: utf-8 import sqlite3 as sql import pandas as pd import datetime import numpy as np from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt import itertools import sys import re from collections import Counter import plotly.graph_objs as go import networkx as nx from plotly.offline import init_notebook_mode, iplot from IPython.display import display, HTML columns = ['link_id', 'title', 'author', 'affiliations', 'keywords', 'received_date', 'accepted_date', 'published_date', 'abstract'] conn = sql.connect("MSOM.db") cursor = conn.cursor() MSOM = cursor.execute("SELECT * FROM informations;") df_msom = pd.DataFrame.from_records(MSOM.fetchall(), columns=columns) conn = sql.connect("MNSC.db") cursor = conn.cursor() MNSC = cursor.execute("SELECT * FROM informations;") df_mnsc = pd.DataFrame.from_records(MNSC.fetchall(), columns=columns) conn.close() df_msom.dropna(inplace=True, subset=['received_date', 'accepted_date', 'published_date']) df_mnsc.dropna(inplace=True, subset=['received_date', 'accepted_date', 'published_date']) def convert_date(date_string): """ Function to convert string to datetime :param date_string: :return: """ try: date_string = date_string.strip() date_string = date_string.replace('Published Online:', "") date_string = date_string.strip() date = datetime.datetime.strptime(date_string, "%B %d, %Y") except: date = np.nan return date def create_authors_by_year(data): """ :param data: :return: """ authors = [] year_publish = [] time_publish = [] for i in range(0, data.shape[0]): for j in range(0, len(data['author'][i])): authors.append(data['author'][i][j]) year_publish.append(data['year_publish'][i]) time_publish.append(data['time_publish'][i]) return pd.DataFrame(data={'author': authors, 'year_publish': year_publish, 'time_publish': time_publish}) def create_university_by_year(data): """ :param data: :return: """ university = [] year_publish = [] time_publish = [] for i in range(0, data.shape[0]): for j in range(0, len(data['affiliations'][i])): print data['affiliations'][i][j] affiliations = data['affiliations'][i][j].split(',') b = [x if re.search('(University|College|Institute)', x, re.IGNORECASE) else None for x in affiliations] try: c = filter(None, b) c = c[len(c) - 1] university.append(c.strip()) year_publish.append(data['year_publish'][i]) time_publish.append(data['time_publish'][i]) except: continue return pd.DataFrame(data={'university': university, 'year_publish': year_publish, 'time_publish': time_publish}) def change_affiliation(affiliations): """ :param affiliations: data['affiliations'][0] :return: """ new_affiliation = [] for affiliation in affiliations: b = [x if re.search('(University|College|Institute)', x, re.IGNORECASE) else None for x in affiliation.split(',')] try: c = filter(None, b) c = c[len(c) - 1].strip() new_affiliation.append(c) except: continue return new_affiliation def get_word_count(astring): """ :param astring: :return: """ stopwords = set(STOPWORDS) stopwords.add('Keywords') stopwords.add('Keyword') stopwords.add('model') stopwords.add('models') stopwords = [x.lower() for x in stopwords] stopwords = list(stopwords) alist = astring.lower().strip().split() alist = [x for x in alist if x not in stopwords] return Counter(alist).most_common(50) def func1(alist_of_tuples, item_to_check): try: return [x[1] for x in alist_of_tuples if x[0] == item_to_check][0] except IndexError: return np.nan def flatten_list_of_tuples(list_of_tuples): list_fixed_words = [] for item in list_of_tuples: list_fixed_words.append(item[0]) return list_fixed_words def create_network_data(data): """ Function to create data necessary to generate the network plot :param data: :return: """ # Create empty data frame return_df = pd.DataFrame() # For each row of the data read the authors and create every possible combination for i in range(0, data.shape[0]): return_df = pd.concat([return_df, pd.DataFrame(list(itertools.combinations(data['author'][i], 2)))], axis=0) return_df.columns = ['from', 'to'] return return_df def create_network_data_university(data): """ Function to create data necessary to generate the network plot :param data: :return: """ # Create empty data frame return_df = pd.DataFrame() # For each row of the data read the authors and create every possible combination for i in range(0, data.shape[0]): return_df = pd.concat([return_df, pd.DataFrame(list(itertools.combinations(data['affiliations'][i], 2)))], axis=0) return_df.columns = ['from', 'to'] return return_df def generate_network_plot(df_journal, label): """ :param df_journal: :return: """ # Create network dataframe network_data = create_network_data(df_journal) # Create new column with number of publications by authors network_data['count'] = '' n_publications = network_data['from'].value_counts() # Set the number of publications to each author for name in n_publications.index: network_data.loc[(network_data['from'] == name), 'count'] = int( n_publications[n_publications.index == name].values) # Build your graph G = nx.from_pandas_dataframe(network_data, 'from', 'to') # Plot it nx.draw(G, with_labels=label, node_size=list(network_data['count'].values * 30)) plt.show() def generate_network_plot_univ(network_data, label): """ :param df_journal: :return: """ # Create new column with number of publications by authors network_data['count'] = '' n_publications = network_data['from'].value_counts() # Set the number of publications to each author for name in n_publications.index: network_data.loc[(network_data['from'] == name), 'count'] = int( n_publications[n_publications.index == name].values) # Build your graph G = nx.from_pandas_dataframe(network_data, 'from', 'to') # Plot it nx.draw(G, with_labels=label, node_size=list(network_data['count'].values * 10)) plt.show() # Convert received, accepted, and published dates to datetime # MSOM journal df_msom['received_date'] = df_msom['received_date'].apply(convert_date) df_msom['accepted_date'] = df_msom['accepted_date'].apply(convert_date) df_msom['published_date'] = df_msom['published_date'].apply(convert_date) # MNSC journal df_mnsc['received_date'] = df_mnsc['received_date'].apply(convert_date) df_mnsc['accepted_date'] = df_mnsc['accepted_date'].apply(convert_date) df_mnsc['published_date'] = df_mnsc['published_date'].apply(convert_date) # Create new attribute total time to publish df_msom['time_publish'] = df_msom['published_date'] - df_msom['received_date'] df_mnsc['time_publish'] = df_mnsc['published_date'] - df_mnsc['received_date'] # Create new attribute with the year of publication df_msom['year_publish'] = df_msom['published_date'].dt.year df_mnsc['year_publish'] = df_mnsc['published_date'].dt.year # Fill missing values for the year of publication with 1 df_msom['year_publish'].fillna(1, inplace=True) df_mnsc['year_publish'].fillna(1, inplace=True) # Convert year of publication to integer df_msom['year_publish'] = df_msom['year_publish'].astype(int) df_mnsc['year_publish'] = df_mnsc['year_publish'].astype(int) # Get number of days for publication df_msom['time_publish'] = df_msom['time_publish'].dt.days df_mnsc['time_publish'] = df_mnsc['time_publish'].dt.days # Get name of th df_msom['author'] = df_msom['author'].apply(lambda x: x.encode("utf-8").split("|")) df_mnsc['author'] = df_mnsc['author'].apply(lambda x: x.encode("utf-8").split("|")) df_msom['affiliations'] = df_msom['affiliations'].apply(lambda x: x.encode("utf-8").split("|")) df_mnsc['affiliations'] = df_mnsc['affiliations'].apply(lambda x: x.encode("utf-8").split("|")) df_msom['affiliations'] = df_msom['affiliations'].apply(lambda x: change_affiliation(x)) df_mnsc['affiliations'] = df_mnsc['affiliations'].apply(lambda x: change_affiliation(x)) df_msom['n_authors'] = df_msom['author'].apply(lambda x: len(x)) df_mnsc['n_authors'] = df_mnsc['author'].apply(lambda x: len(x)) # Create network dataframe network_data = create_network_data_university(df_mnsc) # Create new column with number of publications by authors network_data['count'] = '' n_publications = network_data['from'].value_counts() # Set the number of publications to each author for name in n_publications.index: network_data.loc[(network_data['from'] == name), 'count'] = int( n_publications[n_publications.index == name].values) # Build your graph G = nx.from_pandas_dataframe(network_data, 'from', 'to', 'count') # G = nx.random_geometric_graph(len(G.nodes()), 0.125) pos = nx.random_layout(G) # G = nx.from_pandas_dataframe(network_data, 'from', 'to', 'count') for node in G.nodes(): G.node[node]['pos'] = list(pos[node]) dmin = 1 ncenter = 0 for n in pos: x , y = pos[n] d = (x-0.5)**2+(y-0.5)**2 if d < dmin: ncenter = n dmin = d p = nx.single_source_shortest_path_length(G, ncenter) edge_trace = go.Scatter( x=[], y=[], line=dict(width=0.5,color='#888'), hoverinfo='none', mode='lines') for edge in G.edges(): x0, y0 = G.node[edge[0]]['pos'] x1, y1 = G.node[edge[1]]['pos'] edge_trace['x'] += tuple([x0, x1, None]) edge_trace['y'] += tuple([y0, y1, None]) node_trace = go.Scatter( x=[], y=[], text=[], mode='markers', hoverinfo='text', marker=dict( showscale=True, # colorscale options #'Greys' | 'YlGnBu' | 'Greens' | 'YlOrRd' | 'Bluered' | 'RdBu' | #'Reds' | 'Blues' | 'Picnic' | 'Rainbow' | 'Portland' | 'Jet' | #'Hot' | 'Blackbody' | 'Earth' | 'Electric' | 'Viridis' | colorscale='YlGnBu', reversescale=True, color=[], size=10, colorbar=dict( thickness=15, title='Node Connections', xanchor='left', titleside='right' ), line=dict(width=2))) for node in G.nodes(): x, y = G.node[node]['pos'] node_trace['x'] += tuple([x]) node_trace['y'] += tuple([y]) init_notebook_mode(connected=True) fig = go.Figure(data=[edge_trace, node_trace], layout=go.Layout( title='<br>The Data Incubator', titlefont=dict(size=16), showlegend=False, hovermode='closest', margin=dict(b=20,l=5,r=5,t=40), annotations=[ dict( text="By University", showarrow=False, xref="paper", yref="paper", x=0.005, y=-0.002 ) ], xaxis=dict(showgrid=False, zeroline=False, showticklabels=False), yaxis=dict(showgrid=False, zeroline=False, showticklabels=False))) iplot(fig)
Graph2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib import matplotlib.pyplot as plt # matplotlib.use('Agg') # %matplotlib inline import random as random import numpy as np import csv # suppose we already collect 10 samples x_data = [ 338., 333., 328. , 207. , 226. , 25. , 179. , 60. , 208., 606.] y_data = [ 640. , 633. , 619. , 393. , 428. , 27. , 193. , 66. , 226. , 1591.] # + # generalize parameter space X,Y and innitialize error matrix Z x = np.arange(-200,-100,1) #bias y = np.arange(-5,5,0.1) #weight Z = np.zeros((len(x), len(y))) X, Y = np.meshgrid(x, y) # compute error matrix Z, so that we can draw a contour plot in last for i in range(len(x)): for j in range(len(y)): b = x[i] w = y[j] # Z[j][i] = 0 for n in range(len(x_data)): Z[j][i] = Z[j][i] + (y_data[n] - b - w*x_data[n])**2 # Z[j][i] = Z[j][i]/len(x_data) # + # ydata = b + w * xdata b = -120 # initial b w = -4 # initial w lr = 1 # learning rate iteration = 100000 b_lr = 0.0 w_lr = 0.0 # Store initial values for plotting. b_history = [b] w_history = [w] # Iterations for i in range(iteration): b_grad = 0.0 w_grad = 0.0 for n in range(len(x_data)): b_grad = b_grad - 2.0*(y_data[n] - b - w*x_data[n])*1.0 w_grad = w_grad - 2.0*(y_data[n] - b - w*x_data[n])*x_data[n] b_lr = b_lr + b_grad**2 w_lr = w_lr + w_grad**2 # Update parameters using ada gradient descent b = b - lr/np.sqrt(b_lr) * b_grad w = w - lr/np.sqrt(w_lr) * w_grad # Store parameters for plotting b_history.append(b) w_history.append(w) # plot the figure plt.contourf(x,y,Z, 50, alpha=0.5, cmap=plt.get_cmap('jet')) # layers = 0.3, diaphaneity = 0.5,color scheme = jet plt.plot([-188.4], [2.67], 'x', ms=12, markeredgewidth=3, color='orange') # the closed form solution plt.plot(b_history, w_history, 'o-', ms=3, lw=1.5, color='black') # marker size = 3, line width = 1.5 plt.xlim(-200,-100) plt.ylim(-5,5) plt.xlabel(r'$b$', fontsize=16) # should we type it with r'$b$', r'$\mathrm{b}$' or 'b'?? plt.ylabel(r'$w$', fontsize=16) plt.show()
code/GradientDescentDemo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Day 9 Assignment # # Q1- Write a python function for finding is a given number prime or not and do Unit testing on it using PyLint and Unittest library. # + # %%writefile check_prime_number.py def prime(num): if num > 1: for i in range(2, num): if (num % i) == 0: break return print("It is a Prime Number") return print("It is not a Prime Number") n = int(input("enter the number :")) prime(n) # - # ! pylint "check_prime_number.py" # # Q2- Make a small generator program for returning armstrong numbers in between 1-1000 in a generator object. def Armst(): for i in range(1,1000): temp = i sum = 0 while temp > 0: a = temp % 10 sum = sum + a ** 3 temp = temp // 10 if sum == i: yield i Armstrong_Number = list(Armst()) Armstrong_Number
B7 Day 9.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Gender Classification import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns data = pd.read_csv('C:/Users/ganes/Data Science/Datasets/gender_class.csv') data.shape data.head() sns.heatmap(data.isnull()) data['Favorite Color'] = pd.get_dummies(data=data) data['Favorite Music Genre'] = pd.get_dummies(data=data) data['Favorite Beverage'] = pd.get_dummies(data=data) data['Favorite Soft Drink'] = pd.get_dummies(data=data) data['Gender'] = pd.get_dummies(data=data) data.head() # # Test Train Split from sklearn.model_selection import train_test_split X = data.drop(['Gender'],axis = 1) y= data['Gender'] X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.5) X_train.shape,X_test.shape from sklearn.tree import DecisionTreeClassifier dtree = DecisionTreeClassifier() dtree.fit(X_train,y_train) prediction1 = dtree.predict(X_test) from sklearn.metrics import classification_report print(classification_report(y_test,prediction1)) from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier() rf.fit(X_train,y_train) prediction2 = dtree.predict(X_test) print(classification_report(y_test,prediction2)) # # End
Gender Classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## This Notebook - Goals - FOR EDINA # # **What?:** <br> # - Introduction/tutorial to <code>kepler gl</code>, an advanced geospatial visualization tool # - Visualization of global population data and gdp # # **Who?:** <br> # - Academics in geosciences # - Users interested in geospatial data analysis # # **Why?:** <br> # - Tutorial/guide for academics and students on how to use kepler gl # # **Noteable features to exploit:** <br> # - Use of pre-installed libraries # # **How?:** <br> # - Clear visualisations - concise explanations # - Effective use of core libraries # <hr> # # Kepler gl # Kepler.gl is a data-agnostic, high-performance web-based application for visual exploration of large-scale geolocation data sets. It also has a jupyter widget package to render large-scale interactive maps in Jupyter Notebooks. You can easily add large datasets and customize the layers you wish to plot on a basemap. Once a map is plotted using <code>keplergl.KeplerGl()</code>, the displayed map has an advanced toolbar settings where you can create layers and customize tooltip interactions. # # The first set of data used was a small sample dataset of cities in South America and their coordinates. The second set of data was obtained from https://geojson-maps.ash.ms/ originating from Natural Earth, a public domain map dataset free to use in any type of project. # # Before running the notebook, have a look at the saved widget state of the map. Then you can run the notebook and recreate it or create your own layers. # # **Notebook contents:** # - Importing the necessary libraries # - Creating sample dataset # - Loading in dataset on population and gdp # - Adding data and defining layers for the map # Import necessary libraries import pandas as pd import geopandas import keplergl # + # Create a sample dataset for cities and their locations df = pd.DataFrame( {'City': ['Buenos Aires', 'Brasilia', 'Santiago', 'Bogota', 'Caracas'], 'Country': ['Argentina', 'Brazil', 'Chile', 'Colombia', 'Venezuela'], 'Latitude': [-34.58, -15.78, -33.45, 4.60, 10.48], 'Longitude': [-58.66, -47.91, -70.66, -74.08, -66.86], 'Time': ['2019-09-01 08:00','2019-09-01 09:00','2019-09-01 10:00','2019-09-01 11:00', '2019-09-01 12:00'] }) # Load in dataset with country geometries and population country_gdf = geopandas.read_file('./custom.geo.json') # - # ## Creating layers for the map # To actually create layers with the data you wish to visualize, you do not need to write any additional code as the map has an extensive toolbar. # These are the steps to recreate the widget state that was saved in this notebook: # 1. Open up the toolbar by clicking on the <kbd>></kbd> in the upper left corner. # - As you can see both datasets (cities and countries) are available to view as a table # 2. There are two initial layers created by the creating the map (called Point and Countries) # - You can make these visible and invisible by clicking on the eye icon # 3. To customize the Point layer click the down arrow which reveals a dropdown menu # - Select the field <kbd>City</kbd> in the Dropdown menu for <kbd>Label</kbd> to add a label to the points on the map # 4. To customize the Countries layer click the down arrow which reveals a dropdown menu # 1. Change the name 'Countries' to 'Population' # 2. Select <kbd>Polygon</kbd> # 3. Turn on <kbd>Fill color</kbd> and click the three vertical dots menu # 4. Select <kbd>pop_est</kbd> in the dropdown menu for <kbd>Color Based On</kbd> # 5. Select a suitable colormap from the dropdown menu for <kbd>Fill Color</kbd> # 6. Turn on <kbd>Stroke color</kbd> and click the three vertical dots menu # 7. Select <kbd>continent</kbd> in the dropdown menu for <kbd>Stroke Color Based On</kbd> # 8. Select a suitable colormap from the dropdown menu or create your own custom colormap for <kbd>Stroke Color</kbd> # 5. To add another layer click <kbd>Add Layer</kbd> # 1. Name it 'GDP' # 2. Click the three vertical dots menu for <kbd>Basic</kbd> and select the <kbd>countries</kbd> dataset # 3. Select <kbd>Polygon</kbd> # 4. Turn on <kbd>Fill color</kbd> and click the three vertical dots menu # 5. Select <kbd>gdp_md_est</kbd> in the dropdown menu for <kbd>Color Based On</kbd> # 6. Select a suitable colormap from the dropdown menu for <kbd>Fill Color</kbd> # 7. Turn on <kbd>Stroke color</kbd> and click the three vertical dots menu # 8. Select <kbd>continent</kbd> in the dropdown menu for <kbd>Stroke Color Based On</kbd> # 9. Select a suitable colormap from the dropdown menu or create your own custom colormap for <kbd>Stroke Color</kbd> # 6. To customize tooltip interactions, click the interactions tab in the toolbar # - Add any or remove any columns from the dataset to be shown with tooltips # 7. Show legend by using by clicking the icon on the right hand side of the map # Create basemap and add the two datasets w1 = keplergl.KeplerGl(height=1000, data={'cities': df, 'countries':country_gdf}) w1
GeneralExemplars/GeoExemplars/Kepler_Notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] school_cell_uuid="2577e475d74c49d39d526854fc8e611e" # ## 4.3 분산 분석과 모형 성능 # + [markdown] school_cell_uuid="86b6d989ce9e4fccaa25a8ab5e68666e" # ### 분산 분석 # + [markdown] school_cell_uuid="5e86f952c67d4f3b8e04cb5eec45e282" # 선형회귀분석의 결과가 얼마나 좋은지는 단순히 잔차제곱합(RSS: Residula Sum of Square)으로 평가할 수 없다. 변수의 단위 즉, 스케일이 달라지면 회귀분석과 상관없이 잔차제곱합도 달라지기 때문이다. # # 분산 분석(ANOVA: Analysis of Variance)은 종속변수의 분산과 독립변수의 분산간의 관계를 사용하여 선형회귀분석의 성능을 평가하고자 하는 방법이다. 분산 분석은 서로 다른 두 개의 선형회귀분석의 성능 비교에 응용할 수 있으며 독립변수가 카테고리 변수인 경우 각 카테고리 값에 따른 영향을 정량적으로 분석하는데도 사용된다. # + [markdown] school_cell_uuid="9a024e84041e4d59bb8ff1215f21be1c" # $\bar{y}$를 종속 변수 $y$의 샘플 평균이라고 하자. # # $$\bar{y}=\frac{1}{N}\sum_{i=1}^N y_i $$ # + [markdown] school_cell_uuid="628d1c9b2ec24efdb1cc7e1ec24c01ac" # 종속 변수 $y$의 분산(샘플의 갯수로 나누지 않았으므로 정확하게는 분산이 아니지만 여기에서는 분산이라는 용어를 사용하자)을 나타내는 **TSS(total sum of square)**라는 값을 정의한다. **TSS는 종속변수값의 움직임의 범위**를 나타낸다. # # $$\text{TSS} = \sum_{i=1}^N (y_i-\bar{y})^2 = (y - \bar{y}1_N)^T(y - \bar{y}1_N)$$ # # 위 식에서 $\bar{y}1_N$는 $\bar{y}$이라는 스칼라가 $N$번 반복된 브로드캐스팅 벡터다. # # # 마찬가지로 회귀 분석에 의해 예측한 값 $\hat{y}$의 분산을 나타내는 **ESS(explained sum of squares)**, # # $$\text{ESS}=\sum_{i=1}^N (\hat{y}_i -\bar{\hat{y}})^2 = (\hat{y} - \bar{\hat{y}}1_N)^T(\hat{y} - \bar{\hat{y}}1_N)$$ # # 잔차 $e$의 분산을 나타내는 **RSS(residual sum of squares)**도 정의할 수 있다. # # $$\text{RSS}=\sum_{i=1}^N (y_i - \hat{y}_i)^2\ = e^Te$$ # # 위 식에서 $\bar{\hat{y}}$는 모형 예측값 $\hat{y}$의 평균이다. # # 또한 **ESS는 모형에서 나온 예측값의 움직임의 범위**, **RSS는 잔차의 움직임의 범위, 즉 오차의 크기**를 뜻한다고 볼 수 있다. # + [markdown] school_cell_uuid="0c0a3c34f958480583b9e2b5a0151708" # 만약 회귀모형이 상수항을 포함하여 올바르게 정의되었다면 잔차의 평균이 0이 된다. 즉, 종속변수의 평균과 모형 예측값의 평균이 같다. # # $$ \bar{e} = \bar{y} - \bar{\hat{y}} = 0$$ # # $$ \bar{y} = \bar{\hat{y}} $$ # # 그리고 이 분산값들 간에는 다음과 같은 관계가 성립한다. # # $$\text{TSS} = \text{ESS} + \text{RSS}$$ # + [markdown] school_cell_uuid="0882f84ed11f4393a7241255d0084ee4" # 이는 다음과 같이 증명할 수 있다. # # 우선 회귀 분석으로 구한 가중치 벡터를 $\hat{w}$, 독립 변수(설명 변수) $x$에 의한 종속 변수의 추정값을 $\hat{y}$, 잔차를 $e$ 라고 하면 다음 식이 성립한다. # # $$ y = X\hat{w} + e = \hat{y} + e $$ # # 그리고 $X$의 평균 데이터 $\bar{x}$ # # $$ # \bar{x} = \frac{1}{N}X^T1_N # $$ # # 에 대한 예측값은 $y$의 평균데이터 $\bar{y}$가 되므로 # # $$ # \bar{x}^T\hat{w} = \bar{y} # $$ # # # 각 행의 값이 평균 데이터 $\bar{x}$로 반복되는 행렬 $\bar{X}$ # # $$ # \bar{X} = \frac{1}{N}X^T1_N 1_N^T # $$ # # 에 대한 예측값 벡터는 $\bar{y}$값이 반복되는 벡터가 된다. # # $$ # \bar{X}\hat{w} = \bar{y}1_N # $$ # # 이를 위 식에 대입하면 # # $$ # \hat{y} - \bar{y}1_N = (X- \bar{X})\hat{w} # $$ # # 가 된다. # # 그런데 $\bar{X}$와 잔차 $e$는 다음과 같은 직교 관계가 성립한다. # # $$ \bar{X}^Te = \frac{1}{N}X^T1_N 1_N^Te = \frac{1}{N}X^T1_N 0 = 0 $$ # # 직교방정식 # # $$ # X^Te = 0 # $$ # # 과 합치면 다음 식이 성립한다. # # $$ \bar{X}^Te - X^Te = (\bar{X} - X)^Te $$ # # 따라서 # # $$ # \begin{eqnarray} # \text{TSS} # &=& (y - \bar{y}1_N)^T(y - \bar{y}1_N) \\ # &=& (\hat{y} - \bar{y}1_N + e)^T(\hat{y} - \bar{y}1_N + e) \\ # &=& (\hat{y} - \bar{y}1_N)^T(\hat{y} - \bar{y}1_N) + e^Te + 2(\hat{y} - \bar{y}1_N)^Te \\ # &=& (\hat{y} - \bar{y}1_N)^T(\hat{y} - \bar{y}1_N) + e^Te + 2\hat{w}^T(X - \bar{X})^Te \\ # &=& (\hat{y} - \bar{y}1_N)^T(\hat{y} - \bar{y}1_N) + e^Te \\ # &=& \text{ESS} + \text{RSS} # \end{eqnarray} # $$ # # + [markdown] school_cell_uuid="69a3e069bd0d4dc5a3eaea862572787b" # 위 식이 말하는 바는 다음과 같다. # # > 모형 예측치의 움직임의 크기(분산)은 종속변수의 움직임의 크기(분산)보다 클 수 없다. # # > 모형의 성능이 좋을수록 모형 예측치의 움직임의 크기는 종속변수의 움직임의 크기와 비슷해진다. # + [markdown] school_cell_uuid="c796514570984fcf8a8115d59940a2a6" # 간단한 1차원 데이터와 모형을 사용하여 이 식이 성립하는지 살펴보자. # + school_cell_uuid="df98514025004af1bf2e4357b9167234" from sklearn.datasets import make_regression X0, y, coef = make_regression( n_samples=100, n_features=1, noise=30, coef=True, random_state=0) dfX0 = pd.DataFrame(X0, columns=["X"]) dfX = sm.add_constant(dfX0) dfy = pd.DataFrame(y, columns=["Y"]) df = pd.concat([dfX, dfy], axis=1) model = sm.OLS.from_formula("Y ~ X", data=df) result = model.fit() # + [markdown] school_cell_uuid="c7cced34847641289de6b28adc88b02e" # `RegressionResult` 타입 객체는 다음과 같이 분산분석과 관련된 속성값을 가진다. # + school_cell_uuid="9d8f4a19e9a8432990e5c3bd819c85c5" print("TSS = ", result.uncentered_tss) print("ESS = ", result.mse_model) print("RSS = ", result.ssr) print("ESS + RSS = ", result.mse_model + result.ssr) print("R squared = ", result.rsquared) # + school_cell_uuid="1b549417d5de4d95a9adfd41769d72f9" sns.distplot(y, kde=False, fit=sp.stats.norm, hist_kws={"color": "r", "alpha": 0.2}, fit_kws={"color": "r"}, label="TSS") sns.distplot(result.fittedvalues, kde=False, hist_kws={"color": "g", "alpha": 0.2}, fit=sp.stats.norm, fit_kws={"color": "g"}, label="ESS") sns.distplot(result.resid, kde=False, hist_kws={"color": "b", "alpha": 0.2}, fit=sp.stats.norm, fit_kws={"color": "b"}, label="RSS") plt.legend() plt.show() # + [markdown] school_cell_uuid="fd4d5737a0844dd2873fbf3c05717db7" # ### 결정계수(Coefficient of Determination) # + [markdown] school_cell_uuid="83cea00a9df7491aab3a6303eeb3ae9a" # 위의 분산 관계식에서 모형의 성능을 나타내는 결정계수(Coefficient of Determination) $R^2$를 정의할 수 있다. # # $$R^2 \equiv 1 - \dfrac{\text{RSS}}{\text{TSS}}\ = \dfrac{\text{ESS}}{\text{TSS}}\ $$ # + [markdown] school_cell_uuid="f33a1c752ecd442f8b1e1e47978d2dc9" # 분산 관계식과 모든 분산값이 0보다 크다는 점을 이용하면 $R^2$의 값은 다음과 같은 조건을 만족함을 알 수 있다. # # $$0 \leq R^2 \leq 1$$ # + [markdown] school_cell_uuid="5eb5e0c7bead471b8970e41aaf7c5bc5" # 여기에서 $R^2$가 0이라는 것은 오차의 분산 RSS가 최대이고 회귀분석 예측값의 분산 ESS가 0인 경우이므로 회귀분석 결과가 아무런 의미가 없다는 뜻이다. # 반대로 $R^2$가 1이라는 것은 오차의 분산 RSS가 0이고 회귀분석 예측의 분산 ESS가 TSS와 같은 경우이므로 회귀분석 결과가 완벽하다는 뜻이다. # 따라서 결정계수값은 회귀분석의 성능을 나타내는 수치라고 할 수 있다. # + [markdown] school_cell_uuid="8743771052744a988f129b007031ddd1" # ### 분산 분석표 # + [markdown] school_cell_uuid="e22ce8312a3743f0a544575dc38b6d8c" # 분산 분석의 결과는 보통 다음과 같은 분산 분석표를 사용하여 표시한다. 아래의 표에서 $N$은 데이터의 갯수, $K$는 모수의 갯수를 뜻한다. # + [markdown] school_cell_uuid="654460492333480c938200912a4e4896" # # | source | degree of freedom | sum of square | mean square | F test-statstics | p-value | # |-|-|-|-|-|-| # | Regression| $$K-1$$ | $$\text{ESS}$$ | $$s_{\hat{y}}^2 = \dfrac{\text{ESS}}{K-1}$$ | $$F=\dfrac{s_{\hat{y}}^2}{s_e^2} $$ | p-value | # | Residual | $$N-K$$ | $$\text{RSS}$$ | $$s_e^2= \dfrac{\text{RSS}}{N-K}$$ | | # | Total | $$N-1$$ | $$\text{TSS}$$ | $$s_y^2= \dfrac{\text{TSS}}{N-1}$$ | | # | $R^2$ | | $$\text{ESS} / \text{TSS}$$ | | | # # + [markdown] school_cell_uuid="33bb0a95516a4ffda57a8967d8f8fe15" sidetitle=true # 표 29.1 : 분산 분석표 # + [markdown] school_cell_uuid="76d670b1d1a0467d85018b7ed321d920" # ### 회귀 분석 F-검정과 분산 분석의 관계 # + [markdown] school_cell_uuid="f952b694aae044eeaf62952eaa6a9868" # 이러한 모양의 표를 사용하는 이유는 분산 분석의 결과를 이용하여 회귀 분석 F-검정에 필요한 검정통계량을 구할 수 있기 때문이다. # # 회귀 분석 F-검정의 원래 귀무 가설은 모든 계수 $w_i$가 $0$ 이라는 것이지만 이 때는 모형이 아무런 의미가 없으므로 결정계수 값도 0이 된다 # # $$ H_0: R^2 = 0 $$ # # 이 때 $\hat{w}$값은 기대값이 0인 정규 분포에서 나온 표본이므로 예측값 $\hat{y} = \hat{w}^T x$는 정규 분포의 선형 조합이라서 마찬가지로 정규 분포를 따른다. 그리고 잔차(residual)는 오차(disturbance)의 선형 변환으로 정규 분포를 따르므로 ESS와 RSS의 비율은 F 분포를 따른다. # # $$ \dfrac{\text{ESS}}{K-1} \div \dfrac{\text{RSS}}{N-K} \sim F(K-1, N-K) $$ # # 따라서 이 값을 회귀 분석 F-검정의 검정통계량으로 사용할 수 있다. # + [markdown] school_cell_uuid="d58f96b3cf7e430e9a739f3a4e6ecc0c" # statsmodels 에서는 다음과 같이 `anova_lm` 명령을 사용하여 분산 분석표를 출력할 수 있다. 다만 이 명령을 사용하기 위해서는 모형을 `from_formula` 메서드로 생성하여야 한다. # # `anova_lm` 명령으로 구한 F 검정통계량과 유의확률은 모형 `summary` 명령으로 구한 `F-statistic` 및 `Prob (F-statistic)`과 일치한다. # + school_cell_uuid="5b73fce8a26742658e991fddcdce6943" sm.stats.anova_lm(result) # + school_cell_uuid="57d165140a2e444494ce221000fee0d8" print(result.summary()) # + [markdown] school_cell_uuid="ade6b33ddfb846be8f1c5614d1fdbfd6" # ### 결정 계수와 상관 계수 # + [markdown] school_cell_uuid="096c2fa20c104e8fb540b0a23f797389" # $y$와 $\hat{y}$의 샘플 상관계수 $r$의 제곱은 결정 계수 $R^2$와 같다. # + school_cell_uuid="726fbaaae2b84492bd696682d3d399f6" sns.jointplot(result.fittedvalues, y) plt.show() # + [markdown] school_cell_uuid="51172ce40b3d4d7f99c791d57b33d740" # ### 상수항이 없는 모형의 경우 # + [markdown] school_cell_uuid="7e595fd3890a48499ed8f673e2b10213" # 모형에서 상수항을 지정하지 않은 경우에는 결정계수의 정의에 사용되는 TSS의 정의가 다음과 같이 달라진다. # # $$\text{TSS} = \sum_i y_i^2 = y^Ty $$ # # 즉, 실제 샘플평균과 상관없이 $\bar{y} = $이라는 가정하에 TSS를 계산한다. 이렇게 정의하지 않으면 TSS = RSS + ESS 관계식이 성립하지 않아서 결정계수의 값이 1보다 커지게 된다. # # 따라서 모형의 결정계수를 비교할 때 **상수항이 없는 모형과 상수항이 있는 모형은 직접 비교하면 안된다.** # + school_cell_uuid="d73b2084863143d99cd61a25abf7d6ec" X0, y, coef = make_regression( n_samples=100, n_features=1, noise=30, bias=100, coef=True, random_state=0) dfX = pd.DataFrame(X0, columns=["X"]) dfy = pd.DataFrame(y, columns=["Y"]) df = pd.concat([dfX, dfy], axis=1) model2 = sm.OLS.from_formula("Y ~ X + 0", data=df) result2 = model2.fit() # + school_cell_uuid="4c7a4312f1f2409ca00e23ff319e2067" result2.rsquared # + [markdown] school_cell_uuid="f326cad592944c0baa3089b501594182" # ### F 검정을 이용한 모형 비교 # + [markdown] school_cell_uuid="938ee04cbe19439c817970e3fef05c19" # F 검정을 이용하면 다음과 같이 포함관계(nesting)에 있는 두 모형의 성능을 비교할 수 있다. # # * 전체 모형(Full Model): $$ y = w_0 + w_1 x_1 + w_2 x_2 + w_3 x_3 $$ # * 축소 모형(Reduced Model): $$ y = w_0 + w_1 x_1 $$ # # + [markdown] school_cell_uuid="c7c827a14783474a976309751d7e4f5f" # 다음과 같은 귀무 가설을 검정하는 것은 위의 두 모형이 실질적으로 같은 모형이라는 가설을 검장하는 것과 같다. # # $$ H_0: w_2 = w_3 = 0 $$ # # 이 검정도 F 검정을 사용하여 할 수 있다. StatsModels에서는 `anova_lm` 명령에 두 모형의 result 객체를 인수로 넣어주면 이러한 검정을 할 수 있다. 인수를 넣어줄 때는 축소 모형(reduced model), 전체 모형(full model)의 순서로 넣어준다. # + school_cell_uuid="9258203f96ea4b9f8008d8f2904e4536" from sklearn.datasets import load_boston boston = load_boston() dfX0_boston = pd.DataFrame(boston.data, columns=boston.feature_names) dfy_boston = pd.DataFrame(boston.target, columns=["MEDV"]) dfX_boston = sm.add_constant(dfX0_boston) df_boston = pd.concat([dfX_boston, dfy_boston], axis=1) # + school_cell_uuid="641024d48127480fb198fc591b957ed6" model_full = sm.OLS.from_formula( "MEDV ~ CRIM + ZN + INDUS + NOX + RM + AGE + DIS + RAD + TAX + PTRATIO + B + LSTAT + CHAS", data=df_boston) model_reduced = sm.OLS.from_formula( "MEDV ~ CRIM + ZN + NOX + RM + DIS + RAD + TAX + PTRATIO + B + LSTAT + CHAS", data=df_boston) sm.stats.anova_lm(model_reduced.fit(), model_full.fit()) # + [markdown] school_cell_uuid="13b6c0d451f04f33826c399b3c21e9a2" # ### F 검정을 사용한 변수 중요도 비교 # + [markdown] school_cell_uuid="e998b873aa7e4bb3bf16a1fdc2a1dcd8" # F 검정은 각 독립변수의 중요도를 비교하기 위해 사용할 수 있다. # 방법은 전체 모형과 각 변수 하나만을 뺀 모형들의 성능을 비교하는 것이다. 이는 간접적으로 각 독립 변수의 영향력을 측정하는 것과 같다. 예를 들어 보스턴 집값 데이터에서 CRIM이란 변수를 뺀 모델과 전체 모델의 비교하는 검정을 하면 이 검정 결과는 CRIM변수의 중요도를 나타낸다. # + school_cell_uuid="5459c1b3e56345bfa911724a59a33a6b" model_full = sm.OLS.from_formula( "MEDV ~ CRIM + ZN + INDUS + NOX + RM + AGE + DIS + RAD + TAX + PTRATIO + B + LSTAT + CHAS", data=df_boston) model_reduced = sm.OLS.from_formula( "MEDV ~ ZN + INDUS + NOX + RM + AGE + DIS + RAD + TAX + PTRATIO + B + LSTAT + CHAS", data=df_boston) sm.stats.anova_lm(model_reduced.fit(), model_full.fit()) # + [markdown] school_cell_uuid="aabf0a30d0bd4c8786c43a88b6404278" # `anova_lm` 명령에서는 `typ` 인수를 `2`로 지정하면 하나 하나의 변수를 뺀 축소 모형에서의 F 검정값을 한꺼번에 계산할 수 있다. # + school_cell_uuid="a46ba7c34ffd4cd790eaeed2033ac6eb" model_boston = sm.OLS.from_formula( "MEDV ~ CRIM + ZN + INDUS + NOX + RM + AGE + DIS + RAD + TAX + PTRATIO + B + LSTAT + CHAS", data=df_boston) result_boston = model_boston.fit() sm.stats.anova_lm(result_boston, typ=2) # + [markdown] school_cell_uuid="134479c7de0a49bb939495605dfbbc1e" # 이 값은 단일 계수 t 검정의 유의확률과 동일하다. 그 이유는 다음과 같은 t 분포와 F 분포의 동치 성질 때문이다. # # $$ t_n^2 = F_{(1, n)} $$ # + [markdown] school_cell_uuid="809b1a9ac79a4bcda839f82fad339810" # ### 조정 결정 계수 # + [markdown] school_cell_uuid="bb2a470d0c9748db962a4294f2b6a8f3" # 선형 회귀 모형에서 독립 변수가 추가되면 결정 계수의 값은 항상 증가한다. 이는 다음과 같이 확인할 수 있다. # # 종속 변수 $y$를 회귀 분석하기 위한 기존의 독립 변수가 $X$이고 여기에 추가적인 독립 변수 $z$가 더해졌을 때, 다음과 같은 관계가 성립한다. # # $$ # R^2_{Xz} = R^2_{X} + (1-R^2_{X})r^{\ast 2}_{yz} # $$ # # 여기에서 # * $R^2_{X}$: 기존의 독립 변수 $X$를 사용한 경우의 결정 계수 # * $R^2_{Xz}$: 기존의 독립 변수 $X$와 추가적인 독립 변수 $z$를 모두 사용한 경우의 결정 계수 # * $r^{\ast 2}_{yz}$: 추가적인 독립 변수 $z$와 종속 변수 $y$간의 상관 관계 계수 # # # 이고 이 항목들은 모두 양수이므로 # # $$ # R^2_{Xz} \geq R^2_{X} # $$ # + [markdown] school_cell_uuid="cc57abcae2cf4f189da669e0de3b77c9" # 이러한 독립 변수 추가 효과를 상쇄시키기 위한 다양한 기준들이 제시되었다. 그 중 하나가 다음과 같이 독립 변수의 갯수 $K$에 따라 결정 계수의 값을 조정하는 조정 결정 계수이다 # # $$ # R_{adj}^2 = 1 - \frac{n-1}{n-K}(1-R^2) = \dfrac{(n-1)R^2 +1-K}{n-K} # $$ # # # + [markdown] school_cell_uuid="1804a76e91bc471cbdaf4988ba437d50" # ### 정보량 규준 # + [markdown] school_cell_uuid="9294f4c662f64b5390a7b6abc51c552b" # 조정 결정 계수와 함께 많이 쓰이는 모형 비교 기준은 최대 우도에 독립 변수의 갯수에 대한 손실(penalty)분을 반영하는 방법이다. # 이를 정보량 규준(information criterion)이라고 하며 손실 가중치의 계산 법에 따라 AIC (Akaike Information Criterion)와 BIC (Bayesian Information Criterion) 두 가지를 사용한다. # # AIC는 모형과 데이터의 확률 분포 사이의 Kullback-Leibler 수준을 가장 크게하기 위한 시도에서 나왔다. BIC는 데이터가 exponential family라는 가정하에 주어진 데이터에서 모형의 likelihood를 측정하기 위한 값에서 유도되었다. 둘 다 값이 작을 수록 올바른 모형에 가깝다. # + [markdown] school_cell_uuid="d160bcdcd9d344059129cab4340f47fc" # $$ # \text{AIC} = -2\log L + 2K # $$ # # $$ # \text{BIC} = -2\log L + K\log n # $$
regression_analysis_verification/3_performance.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %load_ext autoreload # %autoreload 2 # + # %%capture import os, sys import time, glob import random import cv2 import numpy as np from matplotlib import pyplot as plt import tensorflow as tf from keras import backend as K from keras.layers import * from keras.models import Model from keras.callbacks import * from keras.optimizers import Adam from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau from LaneDataGenerator import LaneDataGenerator # %matplotlib inline # - # ## DataGenerator # + IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS = 40, 120, 3 INPUT_SHAPE = (IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS) data_gen = LaneDataGenerator(images_folder='./dataset/images', labels_folder='./dataset/labels', batch_size=4, input_shape=INPUT_SHAPE) # - # data generator return image<YUV[0., 255.]> # and steering_angle<normalized[-1., 1.]> vis_im, vis_steering = data_gen[0] plt.imshow(vis_im[0].astype(np.uint8)) print(vis_steering[0]) # ## Create model # + K.clear_session() net_in = Input(shape = INPUT_SHAPE) x = Lambda(lambda x: x/127.5 - 1.0)(net_in) x = Conv2D(24, (5, 5), strides=(2, 2),padding="same", activation='elu')(x) x = Conv2D(36, (5, 5), strides=(2, 2),padding="same", activation='elu')(x) x = Conv2D(48, (5, 5), strides=(2, 2),padding="same", activation='elu')(x) x = Conv2D(64, (3, 3), padding="same",activation='elu')(x) x = Conv2D(64, (3, 3), padding="same",activation='elu')(x) x = Dropout(0.3)(x) x = Flatten()(x) x = Dense(100, activation='elu')(x) x = Dense(50, activation='elu')(x) x = Dense(10, activation='elu')(x) net_out = Dense(1, name='net_out')(x) model = Model(inputs=net_in, outputs=net_out) model.summary() # - # ## Train model.compile(loss='mse', optimizer=Adam(lr=0.001)) # + filepath = ("./checkpoints/LaneDetector{epoch:03d}-{loss:.3f}.h5") checkpoint = ModelCheckpoint(filepath, save_weights_only=False) reduce_lr = ReduceLROnPlateau(monitor='loss', patience=5, factor=0.5, verbose=1, mode='min') history = model.fit_generator( data_gen, steps_per_epoch=10, epochs=1000, initial_epoch=0, callbacks=[checkpoint, reduce_lr], workers=2, use_multiprocessing=False) # - # ## Convert to .tflite converter = tf.lite.TFLiteConverter.from_keras_model_file('checkpoints/LaneDetector277-0.002.h5') model = converter.convert() file = open( 'model.tflite' , 'wb' ) file.write( model ) file.close()
train.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: SageMath 9.0 # language: sage # name: sagemath # --- # # Lesson 3 - Euler McLaurin and Bernoulli numbers # # ## Learning Outcomes: # # The student will learn to manipulate symbolic expressions: # - differentiate # - simplify # - substitute (evaluate) # They will also learn following concepts from SageMath: # # - taylor expansions # - coefficients # # # ## Mathematical problem: # # One of the methods available for computing the Riemann zeta function is the Euler - McLaurin summation formula, which in general says that if $f(x)$ is infinitely differentiable on $\mathbb{R}$, $a \le b$ are integers and $k>0$ then: # # Here the constants $B_{k}$ are Bernoulli numbers, $B_j(x)$ Bernoulli plynomials and $[x]$ is the integer part of $x$.$$ # \sum_{n=a}^{b} f(n) = \int_{a}^{b} f(x) dx + \frac{f(a)-f(b)}{2} + # \sum_{j=1}^{k}\frac{B_{j}}{(j)!} \left( f^{(j-1)}(b) - f^{(j-1)}(a)\right) # + \frac{(-1)^{k-1}}{(k)!} \int_{a}^{b} B_{k}(x-[x])f^{(k)}(x) dx. # $$ # # ### Bernoulli numbers # The Bernoulli numbers are a sequence of rational numbers that appear in many parts of number theory and are in particular closely related to the Riemann zeta function # $$\zeta(s)= \sum_{n=1}^{\infty} n^{-s}$$ # For instance # $$ # \zeta(2)=\frac{\pi^2}{6}, \quad \zeta(4)=\frac{\pi^4}{90}\ldots # $$ # and in general # $$ # \zeta(2k)=(-1)^{k-1}\frac{(2\pi)^{2k}}{2(2k)!}B_{2k} # $$ # The Bernoulli numbers $B_k$ can be defined in terms of a generating series: # $$ # F(x) = \frac{x}{e^x-1} = \sum_{k=0}^{\infty} B_k \frac{1}{k!}x^k # $$ # and we will see how we can use symbolic expressions in SageMath to find their values (this is by far not the most efficient way...) # ## Symbolic expressions in SageMath # When starting Sage the name 'x' is defined as a symbolic variable and other variables have to be declared using # the `var(x,y,z,...)` command. # Symbolic expressions can treated in various ways: # - differentiation # - simplifications # x y var('x, y, z') y*z # ### Differentiation g = 1/(x^2+y^2) print(f"{'g':20s}= {g}") print(f"{'dg/dx':20s}= {diff(g,x)}") print(f"{'d^2g/dxdy':20s}= {diff(g,x,y)}") g.differentiate(x,2) # ### Simplification f = x*y/(x^2 + y^2 ) z=diff(f,x,y) z z.simplify_full() # ## Substitution z1=z.substitute(x=2*y^2+1); z z1.simplify_full() # **Exercise** # Determine the value of $\frac{\partial f}{\partial u}(0,1,1)$ for # $$ # f(s,t,u)=\frac{s+tu}{\sqrt{s^2+t^2+u^2}} # $$ # Is it # # - (a) $\frac{1}{\sqrt{2}}$ # # - (b) $\frac{1}{\sqrt{2}}$ # # - (c) $\frac{1}{2\sqrt{2}}$ # # - (d) $\frac{1}{5\sqrt{2}}$ # - (e) None of the above? # We can now try to find the Bernoulli numbers: F = x/(e^x -1); F # The generating function # Try to find the first Taylor coefficient: g=derivative(F,x,1); g # another name for .diff() print(g.simplify_full()) # yes - there are other types of simplifications. g.substitute(x=0) # We cant' just divide by 0 / 0. We need L'hopitals rule! # Differentiate the numerator and denominator and divide again: g.numerator().diff(x) / g.denominator().diff(x) # Still of the form 0 /0. Need one more derivative! # Note that the second parameter gives the number of times we differentiate p=g.numerator().diff(x,2) / g.denominator().diff(x,2) print(p) p=p.simplify_full() print(p) p.substitute(x=0) # bernoulli(1) # So the first Bernoulli number $B_1=-\frac{1}{2}$. # This method is a bit cumbersome but fortunately there is a builtin command in Sage for Taylor expansions F.taylor(x,0,10) type(F.taylor(x,0,10)) # We can convert this to a polynomial over $\mathbb{Q}$: # p = F.taylor(x,0,10).polynomial(QQ) type(p) p # For a polynomial we can add a big-Oh q=p.add_bigoh(12); q print(q.parent()) type(q) x=q.parent().gen() type(q) q+(x+1).add_bigoh(8) # We can get coefficients of certain terms in Taylor expansions F.taylor(x,0,10).coefficient(x^4) # We can now write a function that returns the j-th Bernoulli number # + def B(j): F = x / (e^x -1) return F.taylor(x,0,j).coefficient(x^j)*factorial(j) [B(j) for j in range(1,10)] # - # ## Variable scopes: # In general variables inside functions are **local** so they don't affect other variables "outside": # + x=1 def my_function(x): print(f"x inside function is {x}") x = 3 print(f"x inside function is {x}") my_function(2) print(x) # - # However, the symbolic variable `x` is special and it gets it's scope from the outside. var('x') x=0 def B(j): F = x / (e^x -1) # x = 1 so F is a constant return F.taylor(x,0,j).coefficient(x^j)*factorial(j) [B(j) for j in range(1,10)] # + def B(j): var('x') F = x / (e^x -1) return F.taylor(x,0,j).coefficient(x^j)*factorial(j) [B(j) for j in range(1,10)] # - [bernoulli(j) for j in range(1,10)] # ## Bernoulli polynomials # The Bernoulli polynomials can also be given by a generating series # $$ # F(x,t) = \frac{te^{xt}}{e^{t}-1} = \sum_{n=0}^{\infty} B_n(x)\frac{t^n}{n!} # $$ # **Exercise**: Write a function that computes the $n$-th Bernoulli polynomial, $B_n(t)$ using the generating function. Compare this with the builtin function `bernoulli_polynomial`. # Write the first ($B_1$) Bernoulli polynomial in the HackMD. GF(3)['x,y']
Lesson3-EulerMcLaurin1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from pathlib import Path arr = [] for r in Path('/mnt/data2/ptf/co_exp/lemma_nostop_hyper/res').glob('*'): try: txt = r.read_text().split()[-1] arr.append([txt, r.name]) except: pass sorted(arr, key=lambda x: x[0])
notebooks/04_first_exp/1_Results.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 0.3.7-pre # language: julia # name: julia 0.3 # --- # + using BilliardModels using PyCall using PyPlot # - # [Link to heading](#Heading) # # Basic object construction w = Vector2D(3, 4) v = Vector2D([3., 4.]) p = Particle(v,v) q = Particle(w,w) p = Particle([1.,2.],[3.,4.]) d = Disc([1,2],3) d = Disc([1.,2.],3) # ## Testing with validated numerics: using ValidatedNumerics p = Particle([@interval(0.1), @interval(0.1)], [@interval(1), @interval(0)]) # # Collisions # + d = Disc([0, 0], 1) p = Particle([-2, 0], [1, 0]) @assert collision_time(p, d) == 1 p = Particle([0, 0], [1, 0]) l = Plane([1, 0], [1, 0]) @assert collision_time(p, l) == 1 # - table = Sinai_billiard(0.1) # + p = Particle([0.3, 0], [1, 0.1]) for obstacle in table.obstacles @show obstacle @show collision_time(p, obstacle) println() end # - calculate_next_collision(p, table, nothing) xs = billiard_dynamics(p, table, 100) @pyimport matplotlib.patches as patches fig, axes = plt.subplots(); include("../src/BilliardVisualisation.jl") axes draw(table, axes); # obstacles draw(xs, axes); # trajectory #sca(axes) axes[:axis]("image") fig # display the figure p2 = Particle([0.3, 0], [1, 0.101]) xs2 = billiard_dynamics(p2, table, 100) draw(xs2, axes); fig # # Using widgets: # + using BilliardModels using PyCall using PyPlot include("../src/BilliardVisualisation.jl") # - using Interact fig, axes = plt.subplots(); # + table = Sinai_billiard(0.1) max_particles = 10 max_bounces = 50 particles = Particle[] xxs = Any[] lines = Any[] x0 = Vector2D(0.3, 0.) for i in 1:max_particles push!(particles, Particle(x0, Vector2D(1., 0.1+0.00001*i))) push!(xxs, billiard_dynamics(particles[i], table, max_bounces)) push!(lines, axes[:plot]([], [], alpha=0.3)[1]) end # xx1 = billiard_dynamics(p1, table, 50) # returns a list of Vector2D # xx2 = billiard_dynamics(p2, table, 50) bdraw(table, axes) axes[:axis]("image") # line1 = axes[:plot]([], [])[1] # line2 = axes[:plot]([], [])[1] function draw_partial_line(line, xx) x = [pt.x for pt in xx] y = [pt.y for pt in xx] line[:set_data](x,y) end max_segment_length = max_bounces @manipulate for num_particles=1:max_particles, n=1:max_bounces, segment_length=1:max_segment_length withfig(fig) do for i in 1:num_particles # draw_partial_line(line1, xx1[1:n]) # draw_partial_line(line2, xx2[1:n]) draw_partial_line(lines[i], xxs[i][n:min(n+segment_length, max_bounces)]) end # delete other lines: for i in num_particles+1:max_particles draw_partial_line(lines[i], []) end end end # - # # Initial conditions using BilliardModels include("../src/BilliardVisualisation.jl") table = Sinai_billiard(0.1) isvalid(Vector2D(0.2, 0.), table) isvalid(Vector2D(0, 0), table) Plane([0.5, -0.5], [-1., 0.]) isvalid(Vector2D(3,0), table) x, v = initial_condition(table, -0.5, 0.5, -0.5, 0.5) isvalid(x, table) include("../src/BilliardVisualisation.jl") fig, axes = plt.subplots(); # + table = Sinai_billiard(0.1) draw(table, axes); # obstacles #sca(axes) axes[:axis]("image") #x, v = initial_condition(table, -0.5, 0.5, -0.5, 0.5) x = Vector2D(0.2, 0.2) v = Vector2D(0.1, 0.9) 1 = Particle(x, v) draw(p, axes, true); N = 20 xs = billiard_dynamics(p, table, N) draw(xs, axes); fig # - # # Lattice # + using BilliardModels include("../src/BilliardVisualisation.jl") using PyCall using PyPlot # - b = CellBoundary([0.5,0],[-1.,0.],Vector2D(0,0)) b2 = CellBoundary([-0.5,0],[1.,0.],Vector2D(0,0)) b.other_side = b2 b2.other_side = b x = Vector2D(0.2, 0.2) v = Vector2D(1., 0.) p = Particle(x, v) collision_time(p,b) billiard_table = Sinai_billiard(0.1,true,true) isa(b,BilliardModels.AbstractPlane) # + billiard_table = Sinai_billiard(0.1,true,true) x = Vector2D(0.2, 0.2) v = Vector2D(0.1, 0.9) l = Vector2D(0,0) p = ParticleOnLattice(x, v, l) xs, ls = billiard_dynamics_on_lattice(p, billiard_table, 3) # - ls using PyCall using PyPlot include("../src/BilliardVisualisation.jl") fig, axes = plt.subplots(); # + billiard_table = Sinai_billiard(0.354,true,true) #x = Vector2D(0.2, 0.2) #v = Vector2D(0.1, 0.9) x = Vector2D(0.3, 0.) v = Vector2D(0.99, 0.01) l = Vector2D(0, 0) p = ParticleOnLattice(x, v, l) #bdraw(p, axes, true); @time xs, ls, free_paths = billiard_dynamics_on_lattice(p, billiard_table, 100000) bdraw(billiard_table, axes); # obstacles bdraw(xs+ls, axes); fig # - for offset in unique(ls) bdraw(billiard_table, axes, offset) end # + h = 3 axes[:set_xlim](-h, h) axes[:set_ylim](-h, h) fig # - free free_paths plt.hist(free_paths[free_paths .< 20],20) using BilliardModels # + using BilliardModels billiard_table = Sinai_billiard(0.354,true,true) x = Vector2D(0.3, 0.) v = Vector2D(0.99, 0.01) l = Vector2D(0, 0) p = ParticleOnLattice(x, v, l) N = 1_000_000 xs, ls, free_paths = billiard_dynamics_on_lattice(p, billiard_table, 1); @time xs, ls, free_paths = billiard_dynamics_on_lattice(p, billiard_table, N); # - length(free_paths) maximum(free_paths) using PyPlot plt.hist(free_paths[2 .< free_paths .< 100], 1000, log=true, normed=true) plt.hist(free_paths, 1000, log=true, normed=true) h = hist(free_paths, 1000) h = hist(free_paths, 1000) bins = h[1] bins = 0.5*(bins[1:end-1] + bins[2:end]) loglog(bins, h[2]) plt.hist(free_paths, 100); sizeof(free_paths) sizeof(xs) sizeof(ls) h = hist(log(free_paths[free_paths .> 2]), 1000) bins = h[1] bins = 0.5*(bins[1:end-1] + bins[2:end]) semilogy(bins, h[2]) # # Testing that free paths are correct using BilliardModels # + billiard_table = Sinai_billiard(0.354, true, true) # periodic in x and y # x = Vector2D(0.3, 0.) # v = Vector2D(0.99, 0.01) x, v = initial_condition(billiard_table, -.5, .5, -.5, .5) l = Vector2D(0, 0) p = ParticleOnLattice(x, v, l) num_collisions = 10 xs, ls, free_paths = billiard_dynamics_on_lattice(p, billiard_table, num_collisions); # + positions = xs + ls lengths = [ norm(positions[i+1] - positions[i]) for i in 1:length(positions)-1 ] all(map(abs, free_paths - lengths) .< 1e-14) # - # # Continuous-time dynamics using BilliardModels # + billiard_table = Sinai_billiard(0.354, true, true) # periodic in x and y x, v = initial_condition(billiard_table, -.5, .5, -.5, .5) l = Vector2D(0, 0) p = ParticleOnLattice(x, v, l) num_collisions = 10 xs, ls, free_paths = billiard_dynamics_on_lattice(p, billiard_table, num_collisions) positions, times = continuous_time(xs, ls, free_paths, 0.25) x = [pos.x for pos in positions] y = [pos.y for pos in positions]; # - using PyPlot include("../src/BilliardVisualisation.jl") fig, axes = plt.subplots(figsize=(10,10)) # + # Draw discrete collisions and continuous interpolations: bdraw(billiard_table, axes) axes[:axis]("image") axes[:plot](x, y, "o-") bdraw(xs+ls, axes); # - fig
notebooks/Testing Lorentz Gas.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + pycharm={"name": "#%% raw\n"} active="" # # 1) Using a lambda expression, complete the mul_by_num function. This function should take an argument and return a one argument function that multiplies any value passed to it by the original number. Its body must be one line long: # def mul_by_num(num): # """ # Returns a function that takes one argument and returns num # times that argument. # >>> x = mul_by_num(5) # >>> y = mul_by_num(2) # >>> x(3) # 15 # >>> y(-4) # -8 # """ # "*** YOUR CODE HERE ***" # return ______ # # + pycharm={"name": "#%%\n", "is_executing": false} def mul_by_num(num) : return lambda n : n * num x = mul_by_num(5) y = mul_by_num(2) print(x(3)) print(y(-4)) # + pycharm={"is_executing": false} ''' 2) The Fibonacci numbers are the numbers in the following integer sequence. 0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, ........ In mathematical terms, the sequence Fn of Fibonacci numbers is defined by the recurrence relation: Fn=Fn-1+Fn-2withseedvalues F0=0andF1=1. Find the series of Fibonacci numbers using lambda function. ''' n = int(input("Please enter the lenght of fibonacci sequence = ")) fibVal = lambda n : (fib(n-1) + fib(n-2)) def fib(n): if n ==0 : return 0 elif n == 1 : return 1 else : return fibVal(n) for x in range (n) : print(fib(x)) # + pycharm={"is_executing": false} ''' 3) Create a script that check if a page is present on the server or return an error. Use the urllib seen during the lecture. ''' import urllib.request import urllib.error as er try : with urllib.request.urlopen("http://goggle.com") as response: if response.code == 200: print("Page found") else : print("Page not found") except (ConnectionRefusedError, er.URLError) as fl: print(fl) # + pycharm={"is_executing": false} ''' 4) Write a program to get the current weather of a city given in input. You can use the following API documentation: https://openweathermap.org/current Hint: The GET request should have the following string appended at the end of the query for auth: APPID=<KEY> You should retrieve for the city: Temperature: 12.32°C Wind speed: 8.7 m/s Description: moderate rain Weather: Rain ''' import requests key = "<KEY>" cty = str(input("Please enter the city name : ")) url = "https://api.openweathermap.org/data/2.5/weather?q={city_name}&appid={api_key}".format(city_name = cty, api_key = key) try : response = requests.get(url) response_json = response.json() for k in response_json.keys() : print("{key} : {value}".format(key = k,value = response_json[k])) except : print("ERROR") # + pycharm={"is_executing": false, "name": "#%% raw\n"} active="" # Write a program to read the xml file people.xml and output a csv file and json file with the same information. # Validate the Json using https://jsonlint.com/ Import the CSV using Excel # # + pycharm={"name": "#%%\n", "is_executing": false} import pandas as pd import xml.etree.ElementTree as ET file = ET.parse("/Users/sobil/Documents/MSC/Sem 1/Database & Analytical Programming/Lab/Lab - 2/people.xml") people = file.getroot() for child in people: print(child.tag, child.attrib) # checking all elements print ([elem.tag for elem in people.iter()]) # print whole file print("\n\n", ET.tostring(people, encoding='utf8').decode('utf8')) # fetching particular fields for person in people.iter("Name") : print(person.text) # + pycharm={"name": "#%%\n", "is_executing": false} # answer import xml.etree.ElementTree as ET file = ET.parse("/Users/sobil/Documents/MSC/Sem 1/Database & Analytical Programming/Lab/Lab - 2/people.xml") people = file.getroot() print(ET.tostring(people, encoding='utf8').decode('utf8')) # + pycharm={"name": "#%%\n", "is_executing": false} # write to csv ### Convert XML to JSON and CSV import csv import json import xmltodict import pandas as pd with open("./people.xml","r") as file : xml = file.read() #### Convert to JSON with open("./people.json","w") as jsonFile : json.dump(xmltodict.parse(xml), jsonFile) # + pycharm={"name": "#%%\n", "is_executing": false} import csv # open a file for writing Resident_data = open('residentData.csv', 'w') # create the csv writer object csvwriter = csv.writer(Resident_data) resident_head = [] count = 0 for member in people.findall('Resident'): resident = [] address_list = [] if count == 0: name = member.find('Name').tag resident_head.append(name) PhoneNumber = member.find('PhoneNumber').tag resident_head.append(PhoneNumber) EmailAddress = member.find('EmailAddress').tag resident_head.append(EmailAddress) Address = member[3].tag resident_head.append(Address) csvwriter.writerow(resident_head) count = count + 1 name = member.find('Name').text resident.append(name) PhoneNumber = member.find('PhoneNumber').text resident.append(PhoneNumber) EmailAddress = member.find('EmailAddress').text resident.append(EmailAddress) Address = member[3][0].text address_list.append(Address) City = member[3][1].text address_list.append(City) StateCode = member[3][2].text address_list.append(StateCode) PostalCode = member[3][3].text address_list.append(PostalCode) resident.append(address_list) csvwriter.writerow(resident) Resident_data.close()
DAP_lab2/DAP_Lab2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: PySpark 2.3 (Python 3) # language: python # name: pyspark3 # --- # # Repartitioning DataFrames # # Partitions are a central concept in Apache Spark. They are used for distributing and parallelizing work onto different executors, which run on multiple servers. # # ### Determining Partitions # Basically Spark uses two different strategies for splitting up data into multiple partitions: # 1. When Spark loads data, the records are put into partitions along natural borders. For example every HDFS block (and thereby every file) is represented by a different partition. Therefore the number of partitions of a DataFrame read from disk is solely determined by the number of HDFS blocks # 2. Certain operations like `JOIN`s and aggregations require that records with the same key are physically in the same partition. This is achieved by a shuffle phase. The number of partitions is specified by the global Spark configuration variable `spark.sql.shuffle.partitions` which has a default value of 200. # # ### Repartitiong Data # Since partitions have a huge influence on the execution, Spark also allows you to explicitly change the partitioning schema of a DataFrame. This makes sense only in a very limited (but still important) set of cases, which we will discuss in this notebook. # # ### Weather Example # Surprise, surprise, we will again use the weather example and see what explicit repartitioning gives us. # ### Disable Automatic Broadcast JOINs # In order to see the shuffle operations, we need to prevent Spark from executiong `JOIN` operations as broadcast joins. Again this can be turned off by setting the Spark configuration variable `spark.sql.autoBroadcastJoinThreshold` to -1. spark.conf.set("spark.sql.autoBroadcastJoinThreshold", -1) # # 1 Load Data # # First we load the weather data, which consists of the measurement data and some station metadata. storageLocation = "s3://dimajix-training/data/weather" # ## 1.1 Load Measurements # # Measurements are stored in multiple directories (one per year). But we will limit ourselves to a single year in the analysis to improve readability of execution plans. # + from pyspark.sql.functions import * from functools import reduce # Read in all years, store them in an Python array raw_weather_per_year = [spark.read.text(storageLocation + "/" + str(i)).withColumn("year", lit(i)) for i in range(2003,2015)] # Union all years together raw_weather = reduce(lambda l,r: l.union(r), raw_weather_per_year) # - # Use a single year to keep execution plans small raw_weather = spark.read.text(storageLocation + "/2003").withColumn("year", lit(2003)) # ### Extract Measurements # # Measurements were stored in a proprietary text based format, with some values at fixed positions. We need to extract these values with a simple `SELECT` statement. weather = raw_weather.select( col("year"), substring(col("value"),5,6).alias("usaf"), substring(col("value"),11,5).alias("wban"), substring(col("value"),16,8).alias("date"), substring(col("value"),24,4).alias("time"), substring(col("value"),42,5).alias("report_type"), substring(col("value"),61,3).alias("wind_direction"), substring(col("value"),64,1).alias("wind_direction_qual"), substring(col("value"),65,1).alias("wind_observation"), (substring(col("value"),66,4).cast("float") / lit(10.0)).alias("wind_speed"), substring(col("value"),70,1).alias("wind_speed_qual"), (substring(col("value"),88,5).cast("float") / lit(10.0)).alias("air_temperature"), substring(col("value"),93,1).alias("air_temperature_qual") ) # ## 1.2 Load Station Metadata # # We also need to load the weather station meta data containing information about the geo location, country etc of individual weather stations. stations = spark.read \ .option("header", True) \ .csv(storageLocation + "/isd-history") # # 2 Partitions # # Since partitions is a concept at the RDD level and a DataFrame per se does not contain an RDD, we need to access the RDD in order to inspect the number of partitions. # + # YOUR CODE HERE # - # ## 2.1 Repartitioning Data # # You can repartition any DataFrame by specifying the target number of partitions and the partitioning columns. While it should be clear what *number of partitions* actually means, the term *partitionng columns* might require some explanation. # # ### Partitioning Columns # Except for the case when Spark initially reads data, all DataFrames are partitioned along *partitioning columns*, which means that all records having the same values in the corresponding columns will end up in the same partition. Spark implicitly performs such repartitioning as shuffle operations for `JOIN`s and grouped aggregation (except when a DataFrame already has the correct partitioning columns and number of partitions) # # ### Manual Repartitioning # As already mentioned, you can explicitly repartition a DataFrame using teh `repartition()` method. # + # YOUR CODE HERE # - # ## 2.2 Repartition & Joins # # As already mentioned, Spark implicitly performs a repartitioning aka shuffle for `JOIN` operations. # ### Execution Plan # # So let us inspect the execution plan of a `JOIN` operation. result = weather.join(stations, (weather["usaf"] == stations["usaf"]) & (weather["wban"] == stations["wban"])) result.explain() # ### Remarks # # As we already discussed, each `JOIN` is executed with the following steps # 1. Filter `NULL` values (it's an inner join) # 2. Repartition DataFrame on the join columns with 200 partitions # 3. Sort each partition independently # 4. Perform a `SortMergeJoin` # ### Pre-partition data (first try) # # Now let us try if we can cache the shuffle (repartition) and sort operation. This is useful in cases, where you have to perform multiple joins on the same set of columns, for example with different DataFrames. # # So let's simply repartition the `weather` DataFrame on the two columns `usaf` and `wban`. We also have to use 200 partitions, because this is what Spark will use for `JOIN` operations. weather_rep = # YOUR CODE HERE weather_rep.cache() # #### Execution Plan # # Let's analyze the resulting execution plan. Ideally all the preparation work before the `SortMergeJoin` happens before the `cache` operation. result = # YOUR CODE HERE result.explain() # #### Remarks # We did not reach completely what we wanted. The `sort` and `filter` operation still occur after the cache. # ### Pre-partition data (second try) # # We already partially achieved our goal of caching all preparational work of the `SortMergeJoin`, but the sorting was still preformed after the caching. So let's try to insert an appropriate sort operation. # + # Release cache to simplify execution plan weather.unpersist() weather_rep = # YOUR CODE HERE weather_rep.cache() # - # #### Execution Plan result = weather_rep.join(stations, (weather["usaf"] == stations["usaf"]) & (weather["wban"] == stations["wban"])) result.explain() # #### Remarks # # We actually created a worse situation: Now we have two sort operations! Definately not what we wanted to have. # # So let's think for a moment: The `SortMergeJoin` requires that each partition is sorted, but after the repartioning occured. The `orderBy` operation we used above will create a global order over all partitions (and thereby destroy all the repartition work immediately). So we need something else, which still keeps the current partitions but only sort in each partition independently. # ### Pre-partition data (final try) # # Fortunately Spark provides a `sortWithinPartitions` method, which does exactly what it sounds like. # + # Release cache to simplify execution plan weather.unpersist() weather_rep = # YOUR CODE HERE weather_rep.cache() # - # #### Execution Plan result = weather_rep.join(stations, (weather["usaf"] == stations["usaf"]) & (weather["wban"] == stations["wban"])) result.explain() # #### Remarks # # That looks really good. The filter operation is still executed after the cache, but that cannot be cached such that Spark uses this information. # # So whenever you want to prepartition data, you need to execute the following steps: # * repartition with the join columns and default number of partitions # * sortWithinPartitions with the join columns # * probably cache (otherwise there is no benefit at all) # ### Inspect WebUI # # We can also inspect the WebUI and see how everything is executed. # Phase 1: Build cache # + # YOUR CODE HERE # - # Phase 2: Use cache # + # YOUR CODE HERE # - # # 3 Repartition & Aggregations # # Similar to `JOIN` operations, Spark also requires an appropriate partitioning in grouped aggregations. Again, we can use the same strategy and appropriateky prepartition data in cases where multiple joins and aggregations are performed using the same columns. # ## 3.1 Simple Aggregation # # So let's perform the usual aggregation (but this time without a previous `JOIN`) with groups defined by the station id (`usaf` and `wban`). result = weather.groupBy(weather["usaf"], weather["wban"]).agg( min(when(weather.air_temperature_qual == lit(1), weather.air_temperature)).alias('min_temp'), max(when(weather.air_temperature_qual == lit(1), weather.air_temperature)).alias('max_temp'), ) result.explain() # ### Remarks # Each grouped aggregation is executed with the following steps: # 1. Perform partial aggregation (`HashAggregate`) # 2. Shuffle intermediate result (`Exchange hashpartitioning`) # 3. Perform final aggregation (`HashAggregate`) # ## 3.2 Aggregation after repartition # # Now let us perform the same aggregation, but this time let's use the preaggregated weather data set `weather_rep` instead. result = weather_rep.groupBy(weather["usaf"], weather["wban"]).agg( min(when(weather_rep.air_temperature_qual == lit(1), weather_rep.air_temperature)).alias('min_temp'), max(when(weather_rep.air_temperature_qual == lit(1), weather_rep.air_temperature)).alias('max_temp'), ) result.explain() # ### Remarks # Spark obviously detects the correct partitioning of the cached `weather_rep` DataFrame. The sorting actually is not required, but does not hurt either (except performance...). Therefore only two steps are executed after the cache operation: # 1. Partial aggregation (`HashAggregate`) # 2. Final aggregation (`HashAggregate`) # # But note that although you saved a shuffle operation of partial aggregates, in most cases it is not adviseable to prepartition data only for aggregations for the following reasons: # * You could perform all aggregations in a single `groupBy` and `agg` chain # * In most cases the preaggregated data is significantly smaller than the original data, therefore the shuffle doesn't hurt that much # # 4 Interaction between Join, Aggregate & Repartition # # Now we have seen two operations which require a shuffle of the data. Of course Spark is clever enough to avoid an additional shuffle operation in chains of `JOIN` and grouped aggregations, which use the same aggregation columns. # ## 4.1 Aggregation after Join on same key # # So let's see what happens with a grouped aggregation after a join operation. spark.conf.set("spark.sql.autoBroadcastJoinThreshold", -1) joined = # YOUR CODE HERE result = joined.groupBy(weather["usaf"], weather["wban"]).agg( min(when(joined.air_temperature_qual == lit(1), joined.air_temperature)).alias('min_temp'), max(when(joined.air_temperature_qual == lit(1), joined.air_temperature)).alias('max_temp'), ) result.explain() # ### Remarks # # As you can see, Spark performs a single shuffle operation. The order of operation is as follows: # 1. Filter `NULL` values (it's an inner join) # 2. Shuffle data on `usaf` and `wban` # 3. Sort partitions by `usaf` and `wban` # 4. Perform `SortMergeJoin` # 5. Perform partial aggregation `HashAggregate` # 6. Perform final aggregation `HashAggregate` # ## 4.2 Aggregation after Join using repartitioned data # # Of course we can also use the pre-repartitioned weather DataFrame. This will work as expected, Spark does not add any additional shuffle operation. joined = # YOUR CODE HERE result = joined.groupBy(weather["usaf"], weather["wban"]).agg( min(when(joined.air_temperature_qual == lit(1), joined.air_temperature)).alias('min_temp'), max(when(joined.air_temperature_qual == lit(1), joined.air_temperature)).alias('max_temp'), ) result.explain() # ## 4.3 Aggregation after Join with different key # # So far we only looked at join and grouping operations using the same keys. If we use different keys (for example the country) in both operations, we expect Spark to add an additional shuffle operations. Let's see... result = # YOUR CODE HERE result.explain() # ## 4.4 Aggregation after Broadcast-Join # # If we use a broadcast join instead of a sort merge join, the we will have a shuffle operation for the aggregation again (since the broadcast join just avoids the shuffle). Let's verify that theory... result = # YOUR CODE HERE result.explain() # # 5 Coalesce # # There is another use case for changing the number of partitions: Writing results to HDFS/S3/whatever. Per design Spark writes each partition into a separate file, and there is no way around that. But when partitions do not contain many records, this may not only be ugly, but also unperformant and might cause additional trouble. Specifically currently HDFS is not designed to handle many small files, but prefers fewer large files instead. # # Therefore it is often desireable to reduce the number of partitions of a DataFrame just before writing the result to disk. You could perform this task by a `repartition` operation, but this is an expensive operation requiring an additional shuffle operation. Therefore Spark provides an additional method called `coalesce` which can be used to reduce the number of partitions without incurring an additional shuffle. Spark simply logically concatenates multiple partitions into new partitions. # ### Inspect Number of Partitions # # For this example, we will use the `weather_rep` DataFrame, which contains exactly 200 partitions. # + # YOUR CODE HERE # - # ## 5.1 Merge Partitions using coalesce # # In order to reduce the number of partitions, we simply use the `coalesce` method. # + # YOUR CODE HERE # - # ### Inspect WebUI # + # YOUR CODE HERE # - # ## 5.2 Saving files # # We already discussed that Spark writes a separate file per partition. So let's see the result when we write the `weather_rep` DataFrame containing 200 partitions. # ### Write 200 Partitions # + # YOUR CODE HERE # - # #### Inspect the Result # Using a simple HDFS CLI util, we can inspect the result on HDFS. # + language="bash" # hdfs dfs -ls /tmp/weather_rep # - # ### Write 16 Partitions # # Now let's write the `coalesce`d DataFrame and inspect the result on HDFS weather_small.write.mode("overwrite").parquet("/tmp/weather_small") # #### Inspect Result # + language="bash" # hdfs dfs -ls /tmp/weather_small # -
spark-training/spark-python/jupyter-advanced/08 - Repartitioning - Skeleton.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + init_cell=true pycharm={"is_executing": false} # %matplotlib inline # %config InlineBackend.figure_format = 'svg' import scqubits as scq from scqubits import HilbertSpace, InteractionTerm, ParameterSweep import numpy as np # - # # Running Parameter Sweeps # Determining the dependence of physical observables on an external parameter is a common way to gain intuition about the characteristics or behavior of a system. Such parameter sweeps can be performed with scqubits on multiple levels: # # * at the level of a single qubit, # * at the level of a composite quantum system. # # ## Parameter sweeps for a single qubit # At the single-qubit level, each qubit class provides several methods that enable producing parameter sweep data and plots. Central quantities of interest, in this case, are energy eigenvalues and matrix elements -- in particular, their dependence on parameters like flux or offset charge. # # The relevant methods available for every implemented qubit class are: # # | class method | purpose | # |---------------------------------------------|-------------------------------| # | `<qubit>.get_spectrum_vs_paramvals(...)` | for each provided value of a specified qubit parameter, compute eigenvalues and eigenvectors | # | `<qubit>.get_matelements_vs_paramvals(...)` | for each provided value of a specified qubit parameter, compute matrix elements of a given operator w.r.t. the qubit eigenstates | # | `<qubit>.plot_evals_vs_paramvals(...)` | plot the energy eigenvalues as a function of a specified qubit parameter | # | `<qubit>.plot_matelem_vs_paramvals(...)` | plot the matrix elements for a given operator as a function of a specified qubit parameter | # # The following code illustrates this functionality for the example of a fluxonium qubit. fluxonium_qbt = scq.Fluxonium.create() # define an array of flux values fluxvals = np.linspace(0, 1, 80) fluxonium_qbt.plot_evals_vs_paramvals(param_name='flux', param_vals=fluxvals); # Note: the argument `param_name` must be one of the parameters with which the qubit in question is initialized. (More flexibility can be achieved by using the `ParameterSweep` class below.) # # To generate spectral data, and return them in the form of a `SpectrumData` object, we can instead use: specdata = fluxonium_qbt.get_spectrum_vs_paramvals(param_name='flux', param_vals=fluxvals); specdata # To retrieve eigenvalues and eigenvectors, one simply accesses the attributes `.energy_table` and `.state_table`. Furthermore, `SpectrumData` itself allows one to produce a plot of the eigenvalues from the generated data. # # These are the lowest six eigenenergies (in GHz, by default) for the first flux value: specdata.energy_table[0] # And this generates the eigenenergy plot: specdata.plot_evals_vs_paramvals(); # In a similar manner, one can generate a plot of matrix elements as a function of a parameter value. (Since matrix elements are generally complex-valued, the absolute value is plotted by default. The `mode` argument allows for additional options.) fluxonium_qbt.plot_matelem_vs_paramvals(operator='phi_operator', param_name='flux', param_vals=fluxvals); # ## Parameter sweeps in composite Hilbert spaces # Composite Hilbert spaces, as defined by `HilbertSpace` objects, are more complicated than individual qubits, and a variety of parameter sweeps may be of interest. The parameter to be varied does not need to be a qubit parameter, but could be a coupling strength or other quantity. # # To enable such flexibility, scqubits provides the `ParameterSweep` class. In order to show its usage, we first define a composite Hilbert space - using the example of two tunable transmon qubits coupled to an oscillator. (See the `HilbertSpace` section in the user guide for details on this topic.) # + pycharm={"is_executing": false} tmon1 = scq.TunableTransmon( EJmax=40.0, EC=0.2, d=0.1, flux=0.0, ng=0.3, ncut=40, truncated_dim=4 # after diagonalization, we will keep 3 levels ) tmon2 = scq.TunableTransmon( EJmax=15.0, EC=0.15, d=0.2, flux=0.0, ng=0.0, ncut=30, truncated_dim=4 ) resonator = scq.Oscillator( E_osc=4.5, truncated_dim=4 # up to 3 photons (0,1,2,3) ) g1 = 0.1 # coupling resonator-CPB1 (without charge matrix elements) g2 = 0.2 # coupling resonator-CPB2 (without charge matrix elements) interaction1 = InteractionTerm( g_strength = g1, op1 = tmon1.n_operator(), subsys1 = tmon1, op2 = resonator.creation_operator() + resonator.annihilation_operator(), subsys2 =resonator ) interaction2 = InteractionTerm( g_strength = g2, op1 = tmon2.n_operator(), subsys1 = tmon2, op2 = resonator.creation_operator() + resonator.annihilation_operator(), subsys2 = resonator ) interaction_list = [interaction1, interaction2] hilbertspace = scq.HilbertSpace([tmon1, tmon2, resonator], interaction_list=interaction_list) # - # The `ParameterSweep` class facilitates computation of spectra as function of an external parameter. For efficiency in computing a variety of quantities, and creating different kinds of plots, the computed bare and dressed spectral data are stored internally. # # A `ParameterSweep` object is initialized by providing the following parameters: # # 1. `param_name`: the name of the sweep parameter; this is purely informative and does not need to correspond to a qubit class attribute # 2. `param_vals`: an array of parameter values used for the sweep # 3. `subsys_update_list`: a list containing those Hilbert space subsystems that change as the parameter of interest is varied # 4. `update_hilbertspace(param_val)`: a function that defines how a change in the sweep parameter affects the Hilbert space # # These ingredients all enter as initialization arguments of the `ParameterSweep` object. Once initialized, spectral data is generated and stored. # # In our example, the parameter to be changed will be a global magnetic field that affects the fluxes for both qubits, in proportions according to their SQUID loop areas. We will reference the flux for transmon 1, and use a flux for transmon 2 adjusted by an area ratio: # # + param_name = 'tmon1 flux' # name of varying external parameter flux_vals = np.linspace(0.0, 2.0, 300) # parameter values area_ratio = 1.2 subsys_update_list = [tmon1, tmon2] # list of HilbertSpace subsystems which are affected by parameter changes def update_hilbertspace(flux_val): # function that defines how Hilbert space components are updated tmon1.flux = flux_val tmon2.flux = area_ratio * flux_val sweep = ParameterSweep( param_name=param_name, param_vals=flux_vals, evals_count=20, hilbertspace=hilbertspace, subsys_update_list=subsys_update_list, update_hilbertspace=update_hilbertspace, ) # - # Once the data has been generated, the spectrum can be plotted as a function of the parameter value. For this, we use a function from the `sweep_plotting` module. Its functions take a `ParameterSweep` object as input. To plot the dressed spectrum vs the chosen parameter, we execute # + import scqubits.utils.sweep_plotting as splot splot.dressed_spectrum(sweep); # - # The above plot shows the eigenenergies of the coupled system. Easily visible are the set of nearly horizontal lines (interrupted by various avoided crossings) with approximately equal spacing, corresponding to the excitation levels of the harmonic oscillator. With careful inspection one can also identify the set of levels of each transmon, distinguished by their difference in flux-periodicity: `tmon1` with period "1" and `tmon2` with period "1.2". # # The `sweep_plotting` module has several other functions for other plot varieties. For instance, we can select one of the subsystems and plot the bare spectrum (in the absence of interaction): splot.bare_spectrum(sweep, subsys=tmon2, subtract_ground=True) # # # The `sweep_plotting` module has several other functions for other plot varieties: # # # | plot function | purpose | # |---------------------------------|-----------------------------------------------------| # | `dressed_spectrum(...)` | eigenenergies of the coupled system, see above | # | `bare_spectrum(...)` | bare energies of a selected subsystem, see above | # | `difference_spectrum(...)` | transition energies as seen when starting the system in a given initial state other than the ground state | # | `n_photon_qubit_spectrum(...)` | n-photon energies for transitions among qubit levels (meaningful in the dispersive regime) | # # More details including the plot functions' arguments can be found in the [API documentation](../../api-doc/functions.rst).
docs/source/guide/ipynb/paramsweep.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import keras import numpy as np import os import pickle import gzip from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import SGDClassifier from sklearn.metrics import accuracy_score step_count = 6 base_path = 'C:/Users/abdul/Desktop/forest_python/' data_path = os.path.join(base_path, 'data') pickle_folder = os.path.join(data_path, 'pickles') trips_file_path = os.path.join(data_path, 'map_matched_all/') mapped_dict_file_name = 'mapped_dict.json' nodes_file_name = 'chengdu_nodes.json' # + def gen_weights(Y_train, sig=False, relu=False): weights = {} for y in Y_train: weights[y] = weights.get(y, 0) + 1 if sig: return sigmoid_weights(weights) if relu: return relu_weights(weights) return weights def relu_weights(weights): weights_relu = {} low = 50 for n, w in weights.items(): if w > low: weights_relu[n] = w else: weights_relu[n] = -999.0 def sigmoid_weights(weights): import math weights_sigmoid = {} for n, w in weights.items(): weights_sigmoid[n] = 1 / (1 + math.exp(-w)) return weights_sigmoid # - def print_accuracy(dtree, X_test, Y_test, step): total_acc = 0 for i in range(10): min_index = round((i * 0.1) * len(Y_test)) max_index = round((i+1) * 0.1 * len(Y_test)) x = X_test[min_index:max_index] y = Y_test[min_index:max_index] pred = dtree.predict(x) total_acc += accuracy_score(y, pred) total_acc /= 10.0 print(f'Accuracy for step {step}: {total_acc}') def train_and_save(step, X, Y): X_train = X Y_train = Y weights = gen_weights(Y_train, sig=True) dtree2 = DecisionTreeClassifier(class_weight=weights) dtree2.fit(X_train, Y_train) fp=gzip.open(os.path.join(pickle_folder, f'dtree_{step}.pkl'),'wb') pickle.dump(dtree2,fp) fp.close() def train_save_all(): for step in range(1, step_count+1): pickle_x = os.path.join(pickle_folder, f'X_{step}.pkl') pickle_y = os.path.join(pickle_folder, f'Y_{step}.pkl') X: np.ndarray = pickle.load(open(pickle_x, "rb")).astype('int16') Y: np.ndarray = pickle.load(open(pickle_y, "rb")).astype('int16') train_and_save(step, X, Y) def load_predict(step): pickle_x = os.path.join(pickle_folder, f'X_{step}.pkl') pickle_y = os.path.join(pickle_folder, f'Y_{step}.pkl') X: np.ndarray = pickle.load(open(pickle_x, "rb")).astype('int16')[:2000] Y: np.ndarray = pickle.load(open(pickle_y, "rb")).astype('int16')[:2000] fp=gzip.open(os.path.join(pickle_folder, f'dtree_{step}.pkl'),'rb') dtree=pickle.load(fp) fp.close() print_accuracy(dtree, X, Y, step) # + # def load_dtree(step: int) -> DecisionTreeClassifier: # fp = gzip.open(os.path.join(pickle_folder, f'dtree_{step}.pkl'), 'rb') # dtree = pickle.load(fp) # fp.close() # return dtree feature = [np.array([0,0,0,3,4,5,6,9])] print(dt.predict(feature)) p = dt.predict_proba(feature).tolist() sp = None for _p in p: if not sp: sp = np.add(_p, 0) else: sp = np.add(sp, _p) dt.classes_[np.argmax(sp)] # s = dict(zip(dt.classes_, )) # - np.add(np.array([0,0,0,3,4,5,6,9]), 2) # + ''' 15=n, 70=depth 500k in, sigmod Accuracy for step 1: 0.84487 Accuracy for step 2: 0.7530699999999999 Accuracy for step 3: 0.67774 Accuracy for step 4: 0.6162099999999999 Accuracy for step 5: 0.5616700000000001 Accuracy for step 6: 0.51528 20=n, 80=depth Accuracy for step 1: 0.8244 Accuracy for step 2: 0.731925 Accuracy for step 3: 0.657875 Accuracy for step 4: 0.5936250000000001 Accuracy for step 5: 0.5365 Accuracy for step 6: 0.49605000000000005''' ''' n_estimators=30, max_depth=70, sigmoid Accuracy for step 1: 0.8217000000000001 Accuracy for step 2: 0.734675 Accuracy for step 3: 0.6575 Accuracy for step 4: 0.5940000000000001 Accuracy for step 5: 0.5400499999999999 Accuracy for step 6: 0.490725''' # - ''' No weights applied Accuracy for step 1: 0.8217000000000001 Accuracy for step 2: 0.7322750000000001 Accuracy for step 3: 0.6553000000000001 Accuracy for step 4: 0.593425 Accuracy for step 5: 0.5380499999999999 Accuracy for step 6: 0.49262500000000004 SIGMOID: Accuracy for step 1: 0.8404 Accuracy for step 2: 0.736125 Accuracy for step 3: 0.6582250000000001 Accuracy for step 4: 0.59445 Accuracy for step 5: 0.5404 Accuracy for step 6: 0.497725 RELU: Accuracy for step 1: 0.828 Accuracy for step 2: 0.7473500000000001 Accuracy for step 3: 0.6609499999999999 Accuracy for step 4: 0.5929000000000001 Accuracy for step 5: 0.54105 Accuracy for step 6: 0.49315 '''
notebooks/model_gen.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # CNN for Classification of Quantum Measurements # Thanks to <NAME> for providing the original code and the original data [arXiv:1904.04635]. # %matplotlib inline import h5py import numpy as np import matplotlib.pyplot as plt from scipy.signal import hilbert hdf = h5py.File(r"datasets/example_data_very_short.h5", "r") data = np.array(hdf["data_tensor"]) print(data.shape) [np.mean(data), np.var(data), np.std(data)] d_transform = data d_transform.shape # ## Visualisation of the Traces V(t) d_av = np.mean(d_transform, axis=0) t = np.array(range(len(d_transform[0, 0]))) print(t[0], t[-1]) xf = np.arange(len(t))*1./t[-1] print(xf[0], xf[-1]) # plot average signals plt.figure(figsize=(15,4)) plt.rcParams['xtick.labelsize'] = 14 plt.rcParams['ytick.labelsize'] = 14 plt.xlabel("time", fontsize=14) plt.ylabel("voltage (scaled)", fontsize=14) plt.plot(xf, d_av[0], "b-", label="ground state") plt.plot(xf, d_av[1], "r-", label="excited state") plt.legend(loc="best", fontsize=14) plt.show() d_av.shape # ## Demodulation (for a given set of parameters) # d = d_transform #Construction of the demodulation weight function w(t) = x + iy x = (d_av[1]-d_av[0])/2. y = np.real(np.fft.irfft(np.fft.rfft(x, axis = -1)*1j,x.shape[0], axis = -1)) #Normalazing : actual axis not calibrated for this experiment x = x/np.sqrt(np.sum(x**2)) y = y/np.sqrt(np.sum(y**2)) #Demodulation dIQ = d*(x+1j*y) dint = np.average(dIQ,axis = -1) dint = dint.T dint.shape # Histogram functions a = np.amax(np.abs(dint)) #fixing the range r = [[-a,a],[-a,a]] def hist(dint,ge): h = np.histogram2d(np.real(dint[ge]),np.imag(dint[ge]), bins = 50, normed = False, range = r)[0] return h def ol(dint): # 2d scalar product Pg = hist(dint,0) Pe = hist(dint,1) o = np.sum(Pg*Pe)/(np.sqrt(np.sum(Pg**2))*np.sqrt(np.sum(Pe**2))) return o print("Overlap : "+str(ol(dint)*100) +"%") def Z(data,ge): #Defining the zone associated with either g or e z = hist(data,ge) > hist(data,1-ge) z = z + (hist(data,ge) == hist(data,1-ge))*0.5 return z def ol_z(data,ge): #Computation of the error o = np.sum(Z(data,ge)*hist(data,ge))/np.sum(hist(data,ge)) return o print("Error by zone, g: "+str((1-ol_z(dint,0))*100) +"%, e: "+str((1-ol_z(dint,1))*100) +"%") fidelity = ((ol_z(dint,0)) + (ol_z(dint,1)))/2 * 100 print("Fidelity: " + str(fidelity) + "%" ) # make plot with 2 subplots fig, ax = plt.subplots(1,2, figsize=(10, 4)) # subplot 1 plot = ax[0].pcolormesh(hist(dint,0)) # histogram for ground state fig.colorbar(plot,ax=ax[0],orientation="vertical") ax[0].set_xlabel("I") # Re[\beta], see Eq. (2) ax[0].set_ylabel("Q") # Im[\beta], see Eq. (2) ax[0].set_title("g") # ground state label ax[0].axis("tight") # save space ax[0].set_aspect(1) # aspect ratio # subplot 2 plot = ax[1].pcolormesh(hist(dint,1)) # histogram for excited state; as above fig.colorbar(plot,ax=ax[1],orientation="vertical") ax[1].set_xlabel("I") # Re[\beta], see Eq. (2) ax[1].set_ylabel("Q") # Im[\beta], see Eq. (2) ax[1].set_title("e") # excited state label ax[1].axis("tight") # save space ax[1].set_aspect(1) # show all subplots plt.show() # ## Dataset Preparation d_g = d_transform[:, 0, :] # ground state features class_g = np.array([[0 for i in range(len(d_g))]]).T # ground state labels d_g = np.concatenate((class_g, d_g), axis=1) # ground state features, labels print(d_g.shape) d_g[0:3, 0:2] d_g = d_transform[:, 0, :] # ground state features class_g = np.array([[0 for i in range(len(d_g))]]).T # ground state labels d_g = np.concatenate((class_g, d_g), axis=1) # ground state features, labels d_e = d_transform[:, 1, :] # excited state features class_e = np.array([[1 for i in range(len(d_e))]]).T # excited state labels d_e = np.concatenate((class_e, d_e), axis=1) # excited state features, labels ge_data = np.concatenate((d_g, d_e), axis=0) # ground and excited state labels, features print(ge_data.shape) # instances, label + features # for np.random.seed, see https://stackoverflow.com/questions/21494489/what-does-numpy-random-seed0-do/21494630 np.random.seed(0) shuffle_index = np.random.permutation(len(ge_data)) # create a random order (via indices) print(shuffle_index[:5]) # show a few randomized indices first_5_pseudo_random = np.array([242160, 3413, 393024, 247987, 52436]) print(np.linalg.norm(first_5_pseudo_random - shuffle_index[:5]) == 0) ge_shuffle_data = ge_data[shuffle_index] # apply the random order X_data = ge_shuffle_data[:, 1:] # select the features ... Y_data = ge_shuffle_data[:, :1] # ... and the labels print(X_data.shape) # shape of feature data print(Y_data.shape) # shape of label data print(X_data[:10, :5]) # for the first 10 instances, show the first 5 features, ... Y_data[:10] # ... and the according label print(len(Y_data)) split1 = int(0.8 * len(Y_data)) split2 = int(0.8 * split1) # use test data for evaluation of the final (!) model (after development) X_test, Y_test = X_data[split1:], Y_data[split1:] print(X_test.shape, Y_test.shape) # use validation data for early stopping and evaluation of different models (during development) X_valid, Y_valid = X_data[split2:split1], Y_data[split2:split1] print(X_valid.shape, Y_valid.shape) # use training data for training / fitting different models (during development) X_train, Y_train = X_data[:split2], Y_data[:split2] print(X_train.shape, Y_train.shape) def preprocessing(X_input, scaling=True): if scaling: # important: always use "X_train" on rhs since X_valid and X_test are "unknown" X_input = (X_input - np.mean(X_train)) / np.std(X_train) print(np.min(X_input), np.max(X_input), np.mean(X_input), np.std(X_input)) env = np.abs(hilbert(np.mean(X_train, axis=0))) # envelope of training data average out = X_input * env print(out.shape) return out xf = range(100) plt.figure(figsize=(15,4)) plt.rcParams['xtick.labelsize'] = 14 plt.rcParams['ytick.labelsize'] = 14 plt.xlabel("time", fontsize=14) plt.ylabel("voltage (scaled)", fontsize=14) plt.plot(xf, np.mean(preprocessing(X_train), axis=0), # use "preprocessing" function for y-axis data "b-", label="average: training signal * average training envelope") plt.legend(loc="best", fontsize=14) plt.show() x_train = preprocessing(X_train) y_train = Y_train x_valid = preprocessing(X_valid) y_valid = Y_valid x_test = preprocessing(X_test) y_test = Y_test print(x_train.shape, x_valid.shape, x_test.shape) print(y_train.shape, y_valid.shape, y_test.shape) x_shape = x_train.shape if len(x_shape)==2: x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1)) x_valid = np.reshape(x_valid, (x_valid.shape[0], x_valid.shape[1], 1)) x_test = np.reshape(x_test , (x_test.shape[0] , x_test.shape[1] , 1)) features_per_timestep = len(x_train[0, 0]) features_per_timestep # ## Keras ResNet CNN # Using a Residual Neural Network with convolutional layers – built with the *Keras Sequential* model – to classify the qubit eigenstates. import keras from keras.optimizers import Adam from keras.models import Model from keras.layers import Input, Conv1D, BatchNormalization, Activation, GlobalAveragePooling1D, Dense from keras.regularizers import l2 # learning schedule def lr_schedule(epoch): lr = 0.001 if epoch > 80: # reduce by 10 after 80 epochs lr *= 0.1 elif epoch > 120: # reduce by 100 (total) after 120 epochs lr *= 0.01 elif epoch > 160: # reduce by 1000 (total) after 160 epochs lr *= 0.001 elif epoch > 185: # reduce by 4000 (total) after 185 epochs lr *= 0.00025 print("Learning rate: ", lr) return lr # convolutional layer def resnet_layer(inputs, # input tensor num_filters=128, # number of filters kernel_size=8, # kernel size strides=1, # strides (padding is "SAME", see below) activation="relu"): # activation function (How about using Elu instead?) conv = Conv1D(num_filters, # specify the convolution kernel_size=kernel_size, strides=strides, padding="same", kernel_initializer="he_normal", kernel_regularizer=l2(1e-4)) x = BatchNormalization()(conv(inputs)) # apply the convolution and the batch normalization to the input, then ... if activation is not None: # ... receive "x"; apply an activation function unless specified as "None" x = Activation(activation)(x) return x # return the result # build neural net, start with input inputs = Input(shape=[100, 1]) # keras' "Input" layer with the specified shape # stack 1, 3 layers (64 x 8,5,3) x = resnet_layer(inputs=inputs, num_filters=64) y = resnet_layer(inputs=x, num_filters=64, kernel_size=5) z = resnet_layer(inputs=y, num_filters=64, kernel_size=3, activation=None) short = resnet_layer(inputs=inputs, num_filters=64, kernel_size=1, activation=None) add = keras.layers.add([z, short]) out = Activation("relu")(add) # stack 2, 3 layers (128 x 8,5,3) x = resnet_layer(inputs=out) y = resnet_layer(inputs=x, kernel_size=5) z = resnet_layer(inputs=y, kernel_size=3, activation=None) short = resnet_layer(inputs=out, kernel_size=1, activation=None) add = keras.layers.add([z, short]) out = Activation("relu")(add) # stack 3, 3 layers (128 x 8,5,3) x = resnet_layer(inputs=out) y = resnet_layer(inputs=x, kernel_size=5) z = resnet_layer(inputs=y, kernel_size=3, activation=None) # skip "short" (no need to adapt feature map number because it matches already) add = keras.layers.add([z, out]) out = Activation("relu")(add) # finish with output, skip average pooling x = keras.layers.GlobalAveragePooling1D()(out) #x = Flatten()(out) outputs = Dense(1, activation="sigmoid", kernel_initializer="he_normal")(x) model = Model(inputs=inputs, outputs=outputs) # compile and summarize model.compile(loss="binary_crossentropy", optimizer=Adam(lr=lr_schedule(0)), metrics=["accuracy"]) print(model.inputs) model.summary() max_score = 0 overfit_count_threshold = 25 for iteration in range(200): print(iteration) model.fit(x_train, y_train, epochs=1, batch_size=128) score = model.evaluate(x_valid, y_valid) if score[1] > max_score: best_model = model best_iteration = iteration max_score = score[1] test_score = model.evaluate(x_test, y_test) overfit_count = 0 else: overfit_count += 1 print(score, overfit_count) print("best iteration:\t", best_iteration, "\n") if overfit_count is overfit_count_threshold: break print("Score of best model on test set:\t", test_score) # The final result (66.4%) is more than 1.5% better than the one that has been found via the histogram functions (<64.8%, see further above).
ResNet_CNN_Keras.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # # SageMaker PySpark PCA and K-Means Clustering MNIST Example # # 1. [Introduction](#Introduction) # 2. [Setup](#Setup) # 3. [Loading the Data](#Loading-the-Data) # 4. [Create a pipeline with PCA and K-Means on SageMaker](#Create-a--pipeline-with--PCA-and--K-Means-on-SageMaker) # 5. [Inference](#Inference) # 6. [Clean-up](#Clean-up) # 7. [More on SageMaker Spark](#More-on-SageMaker-Spark) # # ## Introduction # This notebook will show how to cluster handwritten digits through the SageMaker PySpark library. # # We will manipulate data through Spark using a SparkSession, and then use the SageMaker Spark library to interact with SageMaker for training and inference. # We will create a pipeline consisting of a first step to reduce the dimensionality using SageMaker's PCA algorithm, followed by the final K-Means clustering step on SageMaker. # # You can visit SageMaker Spark's GitHub repository at https://github.com/aws/sagemaker-spark to learn more about SageMaker Spark. # # This notebook was created and tested on an ml.m4.xlarge notebook instance. # ## Setup # # First, we import the necessary modules and create the `SparkSession` with the SageMaker-Spark dependencies attached. # + import os import boto3 from pyspark import SparkContext, SparkConf from pyspark.sql import SparkSession import sagemaker from sagemaker import get_execution_role import sagemaker_pyspark role = get_execution_role() # Configure Spark to use the SageMaker Spark dependency jars jars = sagemaker_pyspark.classpath_jars() classpath = ":".join(sagemaker_pyspark.classpath_jars()) # See the SageMaker Spark Github to learn how to connect to EMR from a notebook instance spark = ( SparkSession.builder.config("spark.driver.extraClassPath", classpath) .master("local[*]") .getOrCreate() ) spark # - # ## Loading the Data # # Now, we load the MNIST dataset into a Spark Dataframe, which dataset is available in LibSVM format at # # `s3://sagemaker-sample-data-[region]/spark/mnist/` # # where `[region]` is replaced with a supported AWS region, such as us-east-1. # # In order to train and make inferences our input DataFrame must have a column of Doubles (named "label" by default) and a column of Vectors of Doubles (named "features" by default). # # Spark's LibSVM DataFrameReader loads a DataFrame already suitable for training and inference. # # Here, we load into a DataFrame in the SparkSession running on the local Notebook Instance, but you can connect your Notebook Instance to a remote Spark cluster for heavier workloads. Starting from EMR 5.11.0, SageMaker Spark is pre-installed on EMR Spark clusters. For more on connecting your SageMaker Notebook Instance to a remote EMR cluster, please see [this blog post](https://aws.amazon.com/blogs/machine-learning/build-amazon-sagemaker-notebooks-backed-by-spark-in-amazon-emr/). # + import boto3 cn_regions = ["cn-north-1", "cn-northwest-1"] region = boto3.Session().region_name endpoint_domain = "com.cn" if region in cn_regions else "com" spark._jsc.hadoopConfiguration().set( "fs.s3a.endpoint", "s3.{}.amazonaws.{}".format(region, endpoint_domain) ) trainingData = ( spark.read.format("libsvm") .option("numFeatures", "784") .load("s3a://sagemaker-sample-data-{}/spark/mnist/train/".format(region)) ) testData = ( spark.read.format("libsvm") .option("numFeatures", "784") .load("s3a://sagemaker-sample-data-{}/spark/mnist/test/".format(region)) ) trainingData.show() # - # MNIST images are 28x28, resulting in 784 pixels. The dataset consists of images of digits going from 0 to 9, representing 10 classes. # # In each row: # * The `label` column identifies the image's label. For example, if the image of the handwritten number is the digit 5, the label value is 5. # * The `features` column stores a vector (`org.apache.spark.ml.linalg.Vector`) of `Double` values. The length of the vector is 784, as each image consists of 784 pixels. Those pixels are the features we will use. # # # # As we are interested in clustering the images of digits, the number of pixels represents the feature vector, while the number of classes represents the number of clusters we want to find. # ### Create a pipeline with PCA and K-Means on SageMaker # To perform the clustering task, we will first running PCA on our feature vector, reducing it to 50 features. Then, we can use K-Means on the result of PCA to apply the final clustering. We will create a **Pipeline** consisting of 2 stages: the PCA stage, and the K-Means stage. # # In the following example, we run the pipeline fully on SageMaker infrastructure, making use of both `PCASageMakerEstimator` and `KMeansSageMakerEstimator`. The PCA training and inference step will run on SageMaker, and then we can train and infer using Amazon SageMaker's K-Means on the output column from PCA: # + from pyspark.ml import Pipeline from sagemaker_pyspark.algorithms import PCASageMakerEstimator, KMeansSageMakerEstimator from sagemaker_pyspark import RandomNamePolicyFactory, IAMRole, EndpointCreationPolicy from sagemaker_pyspark.transformation.serializers import ProtobufRequestRowSerializer # ML pipeline with 2 stages: PCA and K-Means # 1st stage: PCA on SageMaker pcaSageMakerEstimator = PCASageMakerEstimator( sagemakerRole=IAMRole(role), trainingInstanceType="ml.m4.xlarge", trainingInstanceCount=1, endpointInstanceType="ml.t2.large", endpointInitialInstanceCount=1, namePolicyFactory=RandomNamePolicyFactory("sparksm-3p-"), ) # Set parameters for PCA (number of features in input and the number of principal components to find) pcaSageMakerEstimator.setFeatureDim(784) pcaSageMakerEstimator.setNumComponents(50) # 2nd stage: K-Means on SageMaker kMeansSageMakerEstimator = KMeansSageMakerEstimator( sagemakerRole=IAMRole(role), trainingSparkDataFormatOptions={ "featuresColumnName": "projection" }, # Default output column generated by PCASageMakerEstimator requestRowSerializer=ProtobufRequestRowSerializer( featuresColumnName="projection" ), # Default output column generated by PCASageMakerEstimator trainingInstanceType="ml.m4.xlarge", trainingInstanceCount=1, endpointInstanceType="ml.t2.large", endpointInitialInstanceCount=1, namePolicyFactory=RandomNamePolicyFactory("sparksm-3k-"), endpointCreationPolicy=EndpointCreationPolicy.CREATE_ON_TRANSFORM, ) # Set parameters for K-Means kMeansSageMakerEstimator.setFeatureDim(50) kMeansSageMakerEstimator.setK(10) # Define the stages of the Pipeline in order pipelineSM = Pipeline(stages=[pcaSageMakerEstimator, kMeansSageMakerEstimator]) # - # Now that we've defined the `Pipeline`, we can call fit on the training data. Please note the below code will take several minutes to run and create all the resources needed for this pipeline. # Train pipelineModelSM = pipelineSM.fit(trainingData) # In this case, when calling `fit` on the `PipelineModel`, 2 jobs and models will be created: # 1. A job using the PCA algorithm which will create a PCA model # 2. A job using the K-Means algorithm which will create a K-Means model # # As the stages were defined in the pipeline, the pipeline is responsible for giving as input to the PCA job the raw data, and then giving as input to the K-Means job the results of the PCA job. # # Please note that the endpoint serving the PCA model is created when calling `fit`, as the endpoint is needed to be generate the input to train the K-means algorithm and thus launch the job. In this setting, only the K-Means endpoint will be created when calling `transform`, as stated by the `endpointCreationPolicy` given to the `KMeansSageMakerEstimator`, in order to reduce the waiting time when calling `fit`. # ## Inference # # When calling the transform method on the `PipelineModel` object, both the PCA and K-Means SageMaker endpoints are contacted sequentially. We can see this in the below architecture diagram. # ![PCA and KMeans on SageMaker](img/sagemaker-spark-pca-kmeans-architecture.png) # Please note the below code will take several minutes to run and create the final K-Means endpoint needed for this pipeline. transformedData = pipelineModelSM.transform(testData) transformedData.show() # How well did the pipeline perform? Let us display the digits from each of the clusters and manually inspect the results: # + from pyspark.sql.types import DoubleType import matplotlib.pyplot as plt import numpy as np import string # Helper function to display a digit def showDigit(img, caption="", xlabel="", subplot=None): if subplot == None: _, (subplot) = plt.subplots(1, 1) imgr = img.reshape((28, 28)) subplot.axes.get_xaxis().set_ticks([]) subplot.axes.get_yaxis().set_ticks([]) plt.title(caption) plt.xlabel(xlabel) subplot.imshow(imgr, cmap="gray") def displayClusters(data): images = np.array(data.select("features").cache().take(250)) clusters = data.select("closest_cluster").cache().take(250) for cluster in range(10): print("\n\n\nCluster {}:".format(string.ascii_uppercase[cluster])) digits = [img for l, img in zip(clusters, images) if int(l.closest_cluster) == cluster] height = ((len(digits) - 1) // 5) + 1 width = 5 plt.rcParams["figure.figsize"] = (width, height) _, subplots = plt.subplots(height, width) subplots = np.ndarray.flatten(subplots) for subplot, image in zip(subplots, digits): showDigit(image, subplot=subplot) for subplot in subplots[len(digits) :]: subplot.axis("off") plt.show() displayClusters(transformedData) # - # ## Clean-up # Since we don't need to make any more inferences, now we delete the resources (endpoints, models, configurations, etc): # + # Delete the resources from sagemaker_pyspark import SageMakerResourceCleanup from sagemaker_pyspark import SageMakerModel def cleanUp(model): resource_cleanup = SageMakerResourceCleanup(model.sagemakerClient) resource_cleanup.deleteResources(model.getCreatedResources()) # Delete the SageMakerModel in pipeline for m in pipelineModelSM.stages: if isinstance(m, SageMakerModel): cleanUp(m) # - # ## More on SageMaker Spark # # The SageMaker Spark Github repository has more about SageMaker Spark, including how to use SageMaker Spark using the Scala SDK: https://github.com/aws/sagemaker-spark #
sagemaker-spark/pyspark_mnist/pyspark_mnist_pca_kmeans.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Pycle demo 1: efficient sketch tuning for GMM fitting # This notebook demonstrates the following features of the `pycle`toolbox: tuning the (frequency sampling pattern of the) sketch from a light "pre-sketch" on a small subset of the dataset, and learning and plotting a GMM distribution. In particular, we will: # 0. Generate a synthetic dataset # 1. Sketch a small subset of it with a data-independent distribution, and use it to learn the actual frequency sampling pattern we will use to sketch the whole dataset. # 2. Learn a Gaussian mixture model from this adapted sketch. # # We assume that you have installed `pycle` with `pip` or that you have the `pycle` folder in the same directory as this notebook. # + # General imports import numpy as np import matplotlib.pyplot as plt # Fix the random seed for reproducibility np.random.seed(42) # - # To demonstrate the toolbox, we generate a simple synthetic dataset, from a Gaussian mixture model in two dimensions. # + from pycle.utils import generatedataset_GMM d = 2 # Dimension K = 4 # Number of Gaussians n = 20000 # Number of samples we want to generate # We use the generatedataset_GMM method from pycle, where we ask that # - All entries are bounded by 1 (normalize='l_inf-unit-ball') # - We get at the output the "ground-truth" GMM that was used to generate the dataset (output_required='GMM') # - The clusters are imbalanced ('balanced=0.1') # - That cluster scales vary moderately ('covariance_variability_inter = 1.5') (X,GT_GMM) = generatedataset_GMM(d,K,n,normalize='l_inf-unit-ball',output_required='GMM',balanced=0.1,covariance_variability_inter = 1.5) # Bounds on the dataset, necessary for compressive k-means bounds = np.array([-np.ones(d),np.ones(d)]) # We assumed the data is normalized between -1 and 1 # Visualize the dataset plt.figure(figsize=(5,5)) plt.title("Full dataset") plt.scatter(X[:,0],X[:,1],s=1, alpha=0.25) plt.show() # - # In this example, we want to use a random Fourier features sketch, but we don't know beforehand how to draw the frequencies associated with those features (we don't know the associated $\Sigma$ parameter). We thus allow ourselves to oberse a small subset of the dataset ($n_0$ samples, which here correspond to $2\%$ of the obervations) through a smaller, arbitrarily generated "pre-sketch" of size $m_0$. (Without entering into the details[1], know that this is done by iteratively selecting $m_0$ candidate values for $\boldsymbol \omega_j$ according to an arbitrary initial guess for $\Sigma$, compute the associated sketch, then updating $\Sigma$ by fitting a Gaussian curve to the maximal absolute values of the sketch in $c$ contiguous blocks when ordered as a function of $\|\boldsymbol \omega_j\|$.) # # All this is implemented by the `sk.estimate_Sigma` method. # # [1] See section 3.3.3 of https://arxiv.org/pdf/1606.02838.pdf. # + import pycle.sketching as sk # import sketching submodule m0 = 200 # use a pre-sketch of size 100 n0 = n//50 # observe 2% of the dataset to estimata Sigma Sigma = sk.estimate_Sigma(X,m0,c=10,n0=n0,verbose=1) print("Estimated sigma2_bar: ",Sigma[0][0]) # - # By putting `verbose=1`, we asked `estimate_Sigma` to plot the fit to the pre-sketch amplitudes. # After this, we can generate the sketch and compress the whole dataset. # + m = 20*K*d Omega = sk.drawFrequencies("FoldedGaussian",d,m,Sigma) Phi = sk.SimpleFeatureMap("ComplexExponential",Omega) z = sk.computeSketch(X,Phi) # - # Let's use this sketch to learn a Gaussian mixture model. Note since `CLOMPR` is sensible to spurious local optima, we ask it to run 5 independent trials and keep the one that that achieves the lowest the objective function (`nRepetitions=5`). # + import pycle.compressive_learning as cl # Note how the name of the solver object is "METHOD_TASK" # Here we use the CLOMP method to solve the (diagonal-covariance) GMM problem cgmm_solver = cl.CLOMP_dGMM(Phi,K,bounds,z) # Launch CLOMP five times and keep the best solution cgmm_solver.fit_several_times(3) # Get the GMM as a tuple (weights, centers, covariances) CGMM = cgmm_solver.get_GMM() print("weights: \n",CGMM[0]) print("centers: \n",CGMM[1]) print("covariances: \n",CGMM[2]) # - # The returned `CGMM` is a tuple containing the weights, centers and covariances of the fitted Gaussians. We can plot the learned model with the `plotGMM` method of the `pycle.utils` submodule. We also evaluate the quality of the model through the log-likelihood and the (symmetric) Kullback-Leibler divergence. from pycle.utils import plotGMM, loglikelihood_GMM, symmKLdivergence_GMM plotGMM(X,CGMM) print("Log-likelihood: ",loglikelihood_GMM(CGMM,X,robust = False)) print("LL of the ground truth: ", loglikelihood_GMM(GT_GMM,X,robust = False)) print("Kullback-Leibler divergence: ",symmKLdivergence_GMM(CGMM,GT_GMM))
Demo_1-sketchTuning_GMM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # Self-Driving Car Engineer Nanodegree # # ## Deep Learning # # ## Project: Build a Traffic Sign Recognition Classifier # # In this notebook, a template is provided for you to implement your functionality in stages, which is required to successfully complete this project. If additional code is required that cannot be included in the notebook, be sure that the Python code is successfully imported and included in your submission if necessary. # # > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the iPython Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to \n", # "**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission. # # In addition to implementing code, there is a writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) that can be used to guide the writing process. Completing the code template and writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/481/view) for this project. # # The [rubric](https://review.udacity.com/#!/rubrics/481/view) contains "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. The stand out suggestions are optional. If you decide to pursue the "stand out suggestions", you can include the code in this Ipython notebook and also discuss the results in the writeup file. # # # >**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode. # --- # ## Step 0: Load The Data # + # Load pickled data import pickle # TODO: Fill this in based on where you saved the training and testing data training_file = "traffic-signs-data/train.p" validation_file= "traffic-signs-data/valid.p" testing_file = "traffic-signs-data/test.p" with open(training_file, mode='rb') as f: train = pickle.load(f) with open(validation_file, mode='rb') as f: valid = pickle.load(f) with open(testing_file, mode='rb') as f: test = pickle.load(f) X_train, y_train = train['features'], train['labels'] X_valid, y_valid = valid['features'], valid['labels'] X_test, y_test = test['features'], test['labels'] # - # --- # # ## Step 1: Dataset Summary & Exploration # # The pickled data is a dictionary with 4 key/value pairs: # # - `'features'` is a 4D array containing raw pixel data of the traffic sign images, (num examples, width, height, channels). # - `'labels'` is a 1D array containing the label/class id of the traffic sign. The file `signnames.csv` contains id -> name mappings for each id. # - `'sizes'` is a list containing tuples, (width, height) representing the the original width and height the image. # - `'coords'` is a list containing tuples, (x1, y1, x2, y2) representing coordinates of a bounding box around the sign in the image. **THESE COORDINATES ASSUME THE ORIGINAL IMAGE. THE PICKLED DATA CONTAINS RESIZED VERSIONS (32 by 32) OF THESE IMAGES** # # Complete the basic data summary below. Use python, numpy and/or pandas methods to calculate the data summary rather than hard coding the results. For example, the [pandas shape method](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.shape.html) might be useful for calculating some of the summary results. # ### Provide a Basic Summary of the Data Set Using Python, Numpy and/or Pandas # + ### Replace each question mark with the appropriate value. ### Use python, pandas or numpy methods rather than hard coding the results import numpy as np import matplotlib.pyplot as plt # Number of training examples n_train = len(y_train) n_test = len(y_test) n_valid = len(y_valid) # What's the shape of an traffic sign image? image_shape = X_train[0].shape # How many unique classes/labels there are in the dataset. n_classes = len(np.unique(y_test)) print("Number of training examples =", n_train) print("Number of validation examples =", n_valid) print("Number of testing examples =", n_test) print("X_train data shape =", X_train.shape) print("Image data shape =", image_shape) print("Number of classes =", n_classes) # Scatter plot to show uneven class distribution uniq_label, ucount = np.unique(y_train, return_counts=True) plt.scatter(uniq_label, ucount) plt.show() # - # ### Include an exploratory visualization of the dataset # Visualize the German Traffic Signs Dataset using the pickled file(s). This is open ended, suggestions include: plotting traffic sign images, plotting the count of each sign, etc. # # The [Matplotlib](http://matplotlib.org/) [examples](http://matplotlib.org/examples/index.html) and [gallery](http://matplotlib.org/gallery.html) pages are a great resource for doing visualizations in Python. # # **NOTE:** It's recommended you start with something simple first. If you wish to do more, come back to it after you've completed the rest of the sections. # + ### Data exploration visualization code goes here. ### Feel free to use as many code cells as needed. import matplotlib.pyplot as plt import random # Visualizations will be shown in the notebook. # %matplotlib inline index = random.randint(0, n_train) image = X_train[index].squeeze() plt.figure(figsize=(1,1)) plt.imshow(image) print(y_train[index]) # - # ---- # # ## Step 2: Design and Test a Model Architecture # # Design and implement a deep learning model that learns to recognize traffic signs. Train and test your model on the [German Traffic Sign Dataset](http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset). # # There are various aspects to consider when thinking about this problem: # # - Neural network architecture # - Play around preprocessing techniques (normalization, rgb to grayscale, etc) # - Number of examples per label (some have more than others). # - Generate fake data. # # Here is an example of a [published baseline model on this problem](http://yann.lecun.com/exdb/publis/pdf/sermanet-ijcnn-11.pdf). It's not required to be familiar with the approach used in the paper but, it's good practice to try to read papers like these. # # **NOTE:** The LeNet-5 implementation shown in the [classroom](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) at the end of the CNN lesson is a solid starting point. You'll have to change the number of classes and possibly the preprocessing, but aside from that it's plug and play! # ### Pre-process the Data Set (normalization, grayscale, etc.) # Use the code cell (or multiple code cells, if necessary) to implement the first step of your project. # + ### Preprocess the data here. Preprocessing steps could include normalization, converting to grayscale, etc. ### Feel free to use as many code cells as needed. from sklearn.utils import shuffle X_train, y_train = shuffle(X_train, y_train) def plot_img_sample(data, y): for i in range(0, 9): plt.subplot(330 + 1 + i) plt.title(y[i]) plt.imshow(data[i]) plt.show() # after shuffle, recheck image data in a 3x3 grid plot_img_sample(X_train, y_train) # + # normaliztion seems to speed up training def normalize_data(X): return (X - 127.5)/255 X_train = normalize_data(X_train) X_valid = normalize_data(X_valid) X_test = normalize_data(X_test) # - # ### Model Architecture # + ### Define your architecture here. ### Feel free to use as many code cells as needed. import tensorflow as tf from tensorflow.contrib.layers import flatten # Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer mu = 0 sigma = 0.05 hidden_1 = 240 hidden_2 = 86 print("total params approx: ", 400*hidden_1+hidden_1*hidden_2+hidden_2*n_classes) weights = { 'wc1': tf.Variable(tf.truncated_normal([5, 5, 3, 6], mean = mu, stddev = sigma)), 'wc2': tf.Variable(tf.truncated_normal([5, 5, 6, 16], mean = mu, stddev = sigma)), 'wd1': tf.Variable(tf.random_normal([5*5*16, hidden_1], mean = mu, stddev = sigma)), 'wd2': tf.Variable(tf.random_normal([hidden_1, hidden_2], mean = mu, stddev = sigma)), 'out': tf.Variable(tf.random_normal([hidden_2, n_classes]))} biases = { 'bc1': tf.Variable(tf.zeros([6])), 'bc2': tf.Variable(tf.zeros([16])), 'bd1': tf.Variable(tf.zeros([hidden_1])), 'bd2': tf.Variable(tf.zeros([hidden_2])), 'out': tf.Variable(tf.zeros([n_classes]))} def conv2d(x, W, b, strides=1): x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='VALID') x = tf.nn.bias_add(x, b) return tf.nn.relu(x) def maxpool2d(x, k=2): return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='VALID') keep_prob = tf.placeholder(tf.float32) def LeNet(x): # Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer keep_prob = 0.5 # TODO: Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6. # TODO: Activation. conv1 = conv2d(x, weights['wc1'], biases['bc1']) # TODO: Pooling. Input = 28x28x6. Output = 14x14x6. conv1 = maxpool2d(conv1, k=2) #print("conv1 shape should be 14x14x6: ", conv1.get_shape()) # TODO: Layer 2: Convolutional. Output = 10x10x16. # TODO: Activation. conv2 = conv2d(conv1, weights['wc2'], biases['bc2']) #print("conv2 shape: ", conv2.get_shape()) # TODO: Pooling. Input = 10x10x16. Output = 5x5x16. conv2 = maxpool2d(conv2, k=2) #print("conv2 shape should be 5x5x16: ", conv2.get_shape()) # TODO: Flatten. Input = 5x5x16. Output = 400. flat_x = tf.contrib.layers.flatten(conv2) #print("flat_x shape: ", flat_x.get_shape()) # TODO: Layer 3: Fully Connected. Input = 400. Output = 120. # TODO: Activation. fc1 = tf.add(tf.matmul(flat_x, weights['wd1']), biases['bd1']) fc1 = tf.nn.relu(fc1) fc1 = tf.nn.dropout(fc1, keep_prob) # TODO: Layer 4: Fully Connected. Input = 120. Output = 84. # TODO: Activation. fc2 = tf.add(tf.matmul(fc1, weights['wd2']), biases['bd2']) fc2 = tf.nn.relu(fc2) fc2 = tf.nn.dropout(fc2, keep_prob) # TODO: Layer 5: Fully Connected. Input = 84. Output = n_classes. logits = tf.add(tf.matmul(fc2, weights['out']), biases['out']) return logits # - # ### Train, Validate and Test the Model # A validation set can be used to assess how well the model is performing. A low accuracy on the training and validation # sets imply underfitting. A high accuracy on the test set but low accuracy on the validation set implies overfitting. # + ### Train your model here. ### Calculate and report the accuracy on the training and validation set. ### Once a final model architecture is selected, ### the accuracy on the test set should be calculated and reported as well. ### Feel free to use as many code cells as needed. import tensorflow as tf rate = 0.001 x = tf.placeholder(tf.float32, (None, 32, 32, 3)) y = tf.placeholder(tf.int32, (None)) one_hot_y = tf.one_hot(y, n_classes) logits = LeNet(x) cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, one_hot_y) loss_operation = tf.reduce_mean(cross_entropy) optimizer = tf.train.AdamOptimizer(learning_rate = rate) training_operation = optimizer.minimize(loss_operation) ### Evaluation correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1)) accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) saver = tf.train.Saver() print("X_train image shape: ", X_train[0].shape) print("x input tensor shape: ", x.get_shape()) def evaluate(X_data, y_data): num_examples = len(X_data) total_accuracy = 0 sess = tf.get_default_session() for offset in range(0, num_examples, BATCH_SIZE): batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE] accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y, keep_prob : 1.0}) total_accuracy += (accuracy * len(batch_x)) return total_accuracy / num_examples # - # ### Training # + import time start = time.clock() EPOCHS = 40 BATCH_SIZE = 128 with tf.Session() as sess: sess.run(tf.global_variables_initializer()) num_examples = len(X_train) print("Training...") print() for i in range(EPOCHS): X_train, y_train = shuffle(X_train, y_train) for offset in range(0, num_examples, BATCH_SIZE): end = offset + BATCH_SIZE batch_x, batch_y = X_train[offset:end], y_train[offset:end] sess.run(training_operation, feed_dict={x: batch_x, y: batch_y, keep_prob : 0.5}) training_accuracy = evaluate(X_train, y_train) validation_accuracy = evaluate(X_valid, y_valid) print("EPOCH {} ...".format(i+1)) print("Training Accuracy = {:.3f}".format(training_accuracy)) print("Validation Accuracy = {:.3f}".format(validation_accuracy)) print() saver.save(sess, './trafficsigns') print("Model saved") # - # ### Evaluate Test Set with tf.Session() as sess: saver.restore(sess, tf.train.latest_checkpoint('.')) test_accuracy = evaluate(X_test, y_test) print("Test Accuracy = {:.3f}".format(test_accuracy)) # --- # # ## Step 3: Test a Model on New Images # # To give yourself more insight into how your model is working, download at least five pictures of German traffic signs from the web and use your model to predict the traffic sign type. # # You may find `signnames.csv` useful as it contains mappings from the class id (integer) to the actual sign name. # ### Load and Output the Images # + from IPython.display import display, Image from scipy import ndimage import os ### Load the images and plot them here. ### Feel free to use as many code cells as needed. def display_train_img(folder, numImage): listOfImageNames = os.listdir(folder) n = 0; for imageName in listOfImageNames: image_file = os.path.join(folder, imageName) print(image_file) display(Image(filename=image_file)) n += 1; if(n>=numImage): break display_train_img("testimages", 6) # + ### Load image into array (using code from Tensorflow notmist tutorial) image_size = 32 image_channels = 3 def load_tests(folder): image_files = os.listdir(folder) dataset = np.ndarray(shape=(len(image_files), image_size, image_size, image_channels), dtype=np.uint8) print("Test image folder:", folder) num_images = 0 for image in image_files: image_file = os.path.join(folder, image) try: image_data = ndimage.imread(image_file, mode="RGB") if image_data.shape != (image_size, image_size, image_channels): raise Exception('Unexpected image shape: %s' % str(image_data.shape)) dataset[num_images, :, :] = image_data num_images = num_images + 1 except IOError as e: print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.') dataset = dataset[0:num_images, :, :] print('Full dataset tensor:', dataset.shape) print('Mean:', np.mean(dataset)) print('Standard deviation:', np.std(dataset)) return dataset test_dataset = load_tests("testimages") def plot_test_imgs(data): numImg = len(data) for i in range(0, numImg): plt.subplot(330 + 1 + i) plt.title(y[i]) plt.imshow(data[i]) plt.show() plot_test_imgs(test_dataset) # - # ### Predict the Sign Type for Each Image # + ### Run the predictions here and use the model to output the prediction for each image. ### Make sure to pre-process the images with the same pre-processing pipeline used earlier. ### Feel free to use as many code cells as needed. test_dataset = normalize_data(test_dataset) print(test_dataset.shape) # + test_logits = LeNet(x) test_prob = tf.nn.softmax(test_logits) test_predict = tf.argmax(test_logits, 1) test_target_classes = np.array([22, 28, 17, 25, 1, 2]); top_probs, top_indices = tf.nn.top_k(test_prob, k=5) with tf.Session() as sess: saver.restore(sess, tf.train.latest_checkpoint('.')) test_prob_val = sess.run(test_prob, feed_dict={x: test_dataset, keep_prob: 1.0}) test_predict_val = sess.run(test_predict, feed_dict={x: test_dataset, keep_prob: 1.0}) top_five_probs, top_five_indices = sess.run([top_probs, top_indices], feed_dict={x: test_dataset, keep_prob: 1.0}) test_accuracy = evaluate(test_dataset, test_target_classes) print("Test Accuracy = {:.3f}".format(test_accuracy)) print("Prediction: ", test_predict_val) # - # ### Analyze Performance # + ### Calculate the accuracy for these 5 new images. ### For example, if the model predicted 1 out of 5 signs correctly, it's 20% accurate on these new images. ## 3 of 6 images were predicted correctly print("Test Accuracy = {:.3f}".format(test_accuracy)) # - # ### Output Top 5 Softmax Probabilities For Each Image Found on the Web # For each of the new images, print out the model's softmax probabilities to show the **certainty** of the model's predictions (limit the output to the top 5 probabilities for each image). [`tf.nn.top_k`](https://www.tensorflow.org/versions/r0.12/api_docs/python/nn.html#top_k) could prove helpful here. # # The example below demonstrates how tf.nn.top_k can be used to find the top k predictions for each image. # # `tf.nn.top_k` will return the values and indices (class ids) of the top k predictions. So if k=3, for each sign, it'll return the 3 largest probabilities (out of a possible 43) and the correspoding class ids. # # Take this numpy array as an example. The values in the array represent predictions. The array contains softmax probabilities for five candidate images with six possible classes. `tk.nn.top_k` is used to choose the three classes with the highest probability: # # ``` # # (5, 6) array # a = np.array([[ 0.24879643, 0.07032244, 0.12641572, 0.34763842, 0.07893497, # 0.12789202], # [ 0.28086119, 0.27569815, 0.08594638, 0.0178669 , 0.18063401, # 0.15899337], # [ 0.26076848, 0.23664738, 0.08020603, 0.07001922, 0.1134371 , # 0.23892179], # [ 0.11943333, 0.29198961, 0.02605103, 0.26234032, 0.1351348 , # 0.16505091], # [ 0.09561176, 0.34396535, 0.0643941 , 0.16240774, 0.24206137, # 0.09155967]]) # ``` # # Running it through `sess.run(tf.nn.top_k(tf.constant(a), k=3))` produces: # # ``` # TopKV2(values=array([[ 0.34763842, 0.24879643, 0.12789202], # [ 0.28086119, 0.27569815, 0.18063401], # [ 0.26076848, 0.23892179, 0.23664738], # [ 0.29198961, 0.26234032, 0.16505091], # [ 0.34396535, 0.24206137, 0.16240774]]), indices=array([[3, 0, 5], # [0, 1, 4], # [0, 5, 1], # [1, 3, 5], # [1, 4, 3]], dtype=int32)) # ``` # # Looking just at the first row we get `[ 0.34763842, 0.24879643, 0.12789202]`, you can confirm these are the 3 largest probabilities in `a`. You'll also notice `[3, 0, 5]` are the corresponding indices. ### Print out the top five softmax probabilities for the predictions on the German traffic sign images found on the web. ### Feel free to use as many code cells as needed. print(top_five_probs) print(top_five_indices) # + ## Visualize softmax probabilities of each prediction def plot_probability(data): numImg = len(data) for i in range(0, numImg): plt.subplot(330 + 1 + i) ind = np.arange(43) plt.bar(ind, data[i]) plt.show() plot_probability(test_prob_val) # - # > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the IPython Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run. You can then export the notebook by using the menu above and navigating to \n", # "**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission. # ### Project Writeup # # Once you have completed the code implementation, document your results in a project writeup using this [template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) as a guide. The writeup can be in a markdown or pdf file.
Traffic_Sign_Classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import seaborn as sn import matplotlib.pyplot as plt import sklearn import os import datetime import time import git import sys import math from geopy.distance import geodesic repo = git.Repo("./", search_parent_directories=True) homedir = repo.working_dir datadir = f"{homedir}" + "/models/processing/USA/County_Based/" # + #helper functions def logFunc(x): if x < 0.01: x = 0.01 return math.log10(x) # Convert longitude latitude pair to x, y, z Cartesian coordinates\n def convertPts(pair): lon = pair[0] lat = pair[1] R = 3958.8 lonRad = lon * math.pi / 180 latRad = lat * math.pi / 180 x = R * math.cos(latRad) * math.cos(lonRad) y = R * math.cos(latRad) * math.cos(lonRad) z = R * math.sin(lat) return (x, y, z) def getX(x): return x[0] def getY(x): return x[1] def getZ(x): return x[2] # + #Neighbor Data neighborcounties = pd.read_csv(f"{homedir}/models/processing/USA/County_Based/neighborcounties.csv", index_col = 0) # read in the files, load as dataframe Age_Race = pd.read_csv(datadir + 'Age_Race_Filled.csv') Population = pd.read_csv(datadir + 'Total_Pop') Density = pd.read_csv(datadir + 'Density.csv') JHU = pd.read_csv(datadir + 'aggregate_jhu_filled.csv') Berkeley = pd.read_csv(datadir + 'Aggregate_Berkeley.csv') Policies = pd.read_csv(datadir + 'Policy_Transit.csv') Geography = pd.read_csv(datadir + 'County_Centers.csv') Beds = pd.read_csv(datadir + 'County_Beds.csv') Data = pd.DataFrame() Raw_Data = pd.DataFrame() DataBasic = pd.DataFrame() DataDemographics = pd.DataFrame() DataHealth = pd.DataFrame() DataGeography = pd.DataFrame() # + Data['FIPS'] = Geography['fips'] # fix population Population.columns = ['fips', 'Population'] # drop US territories, train separate models for them FipsSet = [] counter = 0 for row in Data.iterrows(): row = row[1][0] if math.floor(row / 1000) > 56: Data = Data.drop([counter], axis = 0) else: FipsSet.append(float(row)) counter += 1 # edit county centers counter1 = 0 for row in Geography.iterrows(): if row[1][1] not in FipsSet: Geography = Geography.drop([counter1], axis = 0) counter1 += 1 Raw_Data['FIPS'] = Data['FIPS'] # + # Nature of the county, includes policies Data['Pop'] = Population['Population'] Raw_Data['Pop'] = Population['Population'] Data['Density'] = Density['2010 Density per square mile of land area - Population'] Raw_Data['Density'] = Density['2010 Density per square mile of land area - Population'] Data['Area'] = Density['Area in square miles - Total area'] Raw_Data['Area'] = Density['Area in square miles - Total area'] Data['UrbanRural'] = JHU['Rural-urban_Continuum Code_2013'] Raw_Data['UrbanRural'] = JHU['Rural-urban_Continuum Code_2013'] Data['EconType'] = JHU['Economic_typology_2015'] Raw_Data['EconType'] = JHU['Economic_typology_2015'] # Policies Data['Policies'] = Policies['Score'] Raw_Data['Policies'] = Policies['Score'] # Typical immigration in/out. Proxy for being a sink/source in flows Data['Movement'] = JHU['R_NET_MIG_2018'] Raw_Data['Movement'] = JHU['R_NET_MIG_2018'] Data['Transit'] = JHU['transit_scores - population weighted averages aggregated from town/city level to county'] Raw_Data['Transit'] = JHU['transit_scores - population weighted averages aggregated from town/city level to county'] # + # Demographics of the county # Age distribution Data['65+'] = Age_Race['65 to 74 years'] + Age_Race['75 to 84 years'] + Age_Race['85 years and over'] Raw_Data['65+'] = Age_Race['65 to 74 years'] + Age_Race['75 to 84 years'] + Age_Race['85 years and over'] Data['65+'] = Data['65+'] / Population['Population'] Raw_Data['65+'] = Raw_Data['65+'] / Population['Population'] # Race/gender Data['Male'] = Berkeley['FracMale2017'] Raw_Data['Male'] = Berkeley['FracMale2017'] Data['AfricanAmer'] = Age_Race['Exclusively Black or African American'] + Age_Race['Hispanic or Latino (of any race)!!Puerto Rican'] Raw_Data['AfricanAmer'] = Age_Race['Exclusively Black or African American'] + Age_Race['Hispanic or Latino (of any race)!!Puerto Rican'] Data['AfricanAmer'] = Data['AfricanAmer'] / Population['Population'] Raw_Data['AfricanAmer'] = Raw_Data['AfricanAmer'] / Population['Population'] # Politics/education/income/economy Data['CollegePlus'] = JHU['Percent of adults completing some college or associate\'s degree 2014-18'] Raw_Data['CollegePlus'] = JHU['Percent of adults completing some college or associate\'s degree 2014-18'] Data['Income'] = JHU['Median_Household_Income_2018'] Raw_Data['Income'] = JHU['Median_Household_Income_2018'] Data['Unemployed'] = JHU['Unemployment_rate_2018'] Raw_Data['Unemployed'] = JHU['Unemployment_rate_2018'] Data['Dems'] = Berkeley['FracDem'] Raw_Data['Dems'] = Berkeley['FracDem'] # + #Health care of the county Data['Hospitals'] = Berkeley['#Hospitals'] * 10000.0 / Population['Population'] Raw_Data['Hospitals'] = Berkeley['#Hospitals'] * 10000.0 / Population['Population'] # very tough to fill in #Data['HospBeds'] = Beds['licensed_beds'] / Population['Population'] # around 2-3 Data['ICUBeds'] = Berkeley['#ICU_beds'] Data['ICUBeds'] = Data['ICUBeds'] / Population['Population'] Raw_Data['ICUBeds'] = Data['ICUBeds'] #note: not considering comorbidities Data['HeartDiseaseMort'] = Berkeley['HeartDiseaseMortality'] Raw_Data['HeartDiseaseMort'] = Berkeley['HeartDiseaseMortality'] Data['StrokeMort'] = Berkeley['StrokeMortality'] Raw_Data['StrokeMort'] = Berkeley['StrokeMortality'] Data['Diabetes'] = Berkeley['DiabetesPercentage'] Raw_Data['Diabetes'] = Berkeley['DiabetesPercentage'] Data['Smokers'] = Berkeley['SmokersPercentage'] Raw_Data['Smokers'] = Berkeley['SmokersPercentage'] # + #Geography Data['pLon'] = Geography['pclon10'] Raw_Data['pLon'] = Geography['pclon10'] Data['pLat'] = Geography['pclat10'] Raw_Data['pLat'] = Geography['pclat10'] #Data['pLonLat'] = list(zip(Geography.pclon10, Geography.pclat10)) # population weighted #Data['pLonLat'] = Data['pLonLat'].values #Data['XYZ'] = Data['pLonLat'].apply(convertPts) #Data['xVal'] = Data['XYZ'].apply(getX) #Data['xVal'] = Data['xVal'].div(100) #Data['yVal'] = Data['XYZ'].apply(getY) #Data['yVal'] = Data['yVal'].div(100) #Data['zVal'] = Data['XYZ'].apply(getZ) #Data['zVal'] = Data['zVal'].div(200) #Data = Data.drop(columns=['pLonLat', 'XYZ']) # + # functions from neighbor_fill_ins def fillcol(fips, value,neighborcounties, min_neighbors=2): #Takes in a column of fips codes, and any type of datafield with some NaNs, #Computes distance-weighted average of the value across all neighbors of NaN counties tic1 = time.time() #Loading in the fips and value into proper dataframes #This is the df with only nan values df = pd.DataFrame(data = [fips,value]).T df.columns = ['FIPS', 'Values'] df.Values = df.Values.astype(float) df = df.set_index('FIPS') #creating new column to set to the current dataframe values newcol = [] for ind in df.index: #for any entries with NaNs if np.isnan(df['Values'][ind]): #list of neighbors for NaN county neighbors = list(neighborcounties[neighborcounties['orgfips'] == ind]['adjfips']) nonzero = 0 weightedval = 0 totalinvdist = 0 totaldist = 0 vals = 0 #iterates though neighbors of NaN county with non-NaN entires for n in neighbors: if n in df.index: if ~np.isnan(df['Values'][n]): #Getting weighted values, using 1/dist as a scalar to show closer distance counts more nonzero += 1 dist = list(neighborcounties.query('orgfips == ' + str(ind) + ' and adjfips == ' + str(n))['Pop_10'])[0] totalinvdist += (1/dist)**1 weightedval += ((1/dist)**1)*df['Values'][n] #If there are at least 2 neighbors (this can be adjusted) if nonzero >= min_neighbors: newcol.append(weightedval/(totalinvdist)) else: newcol.append(np.nan) else: newcol.append(df['Values'][ind]) toc1 = time.time() #print(toc1 - tic1) return newcol def fillfixed(colname, data, code, neighborcounties): #Method to fill up the google mobility data #Uses colname to designate which column to fill numnans = len(data[np.isnan(data[colname])]) while numnans > 0: #print(numnans) tempnum = numnans #Creating the filled column from method newcol = fillcol(data[code], data[colname], neighborcounties) data[colname] = newcol numnans = len(data[np.isnan(data[colname])]) #Checking if the number of nans changes if tempnum == numnans: #if number doesnt change, try again with only 1 neighbor, otherwise quit newcol = fillcol(data[code], data[colname], neighborcounties) data[colname] = newcol numnans = len(data[np.isnan(data[colname])]) if tempnum == numnans: numnans = 0 return data # + # Filling in columns of dataframe by nearest neighbor analysis cols = list(Data.columns)[1:] for col in cols: Data = fillfixed(col, Data, 'FIPS', neighborcounties) print(sum(Data.isna().sum())) # - cols = list(Raw_Data.columns)[1:] for col in cols: Raw_Data = fillfixed(col, Raw_Data, 'FIPS', neighborcounties) print(sum(Raw_Data.isna().sum())) # + for column in Data.columns: print((column, Data[column].isnull().sum())) for column in Raw_Data.columns: print((column, Raw_Data[column].isnull().sum())) # - for column in Data: print(column, max(abs(Data[column]))) # + Data['Pop'] = Data['Pop'].div(6000.0) Data['Density'] = Data['Density'].div(50.0) Data['UrbanRural'] = Data['UrbanRural'].div(1.5) Data['EconType'] = Data['EconType'].mul(1.4) Data['Policies'] = Data['Policies'].mul(10) Data['Movement'] = Data['Movement'].mul(5.0) Data['Transit'] = Data['Transit'].div(125000000) Data['65+'] = Data['65+'].mul(2400.0) Data['Male'] = Data['Male'].mul(6.0) Data['AfricanAmer'] = Data['AfricanAmer'].mul(4500.0) Data['CollegePlus'] = Data['CollegePlus'].div(10.0) Data['Income'] = Data['Income'].div(20000.0) Data['Unemployed'] = Data['Unemployed'].div(3.0) Data['Dems'] = Data['Dems'].mul(7.5) Data['Hospitals'] = Data['Hospitals'].div(30.0) Data['ICUBeds'] = Data['ICUBeds'].mul(100.0) Data['HeartDiseaseMort'] = Data['HeartDiseaseMort'].div(60.0) Data['StrokeMort'] = Data['StrokeMort'].div(10.0) Data['Diabetes'] = Data['Diabetes'].mul(9.0) Data['Smokers'] = Data['Smokers'].mul(9.0) Data['Area'] = Data['Area'].div(100.0) # - for column in Data: print(column, sum(Data[column])/len(Data[column])) for column in Data: print(column, max(abs(Data[column]))) # + # getting specific dataframes for what we want to cluster DataBasic = Data[['FIPS', 'Pop', 'Density', 'UrbanRural', 'EconType', 'Policies', 'Movement', 'Transit']] DataDemographics = Data[['FIPS', '65+', 'Male', 'AfricanAmer', 'CollegePlus', 'Income', 'Unemployed', 'Dems']] DataHealth = Data[['FIPS', 'Hospitals', 'ICUBeds', 'HeartDiseaseMort', 'StrokeMort', 'Diabetes', 'Smokers']] # approximate distances between counties # we double count Urban Rural, population, density, size since don't cluster on the entire Data dataframe # so that nearby urban counties are closer than a rural county adjacent from an urban county DataGeography = Data[['FIPS', 'Pop', 'Density', 'Area', 'UrbanRural', 'pLon', 'pLat']] # - Data['pLon'] = Data['pLon'].div(50) Data['pLat'] = Data['pLat'].div(50) # write our dataframes to CSVs in this folder Data.to_csv('data.csv') DataBasic.to_csv('data_basic.csv') DataDemographics.to_csv('data_demographics.csv') DataHealth.to_csv('data_health.csv') DataGeography.to_csv('data_geography.csv') Raw_Data.to_csv('raw_data.csv')
models/clustering/data_setup.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Classes class Card(): '''A class to manage a single card''' def __init__(self, value:str = None, suit:str = None, number:int = None): self.value = value self.suit = suit self.number = number self.name = self.value + ' of ' + self.suit class Pile(): '''A class to manage an ordered set of cards''' def __init__(self, name:str = None, card_list:list = None): self.name = name if card_list is None: self.card_list = [] else: self.card_list = card_list def shuffle(self): # This method is called to shuffle the pile random.shuffle(self.card_list) def pile_status(self): # This method will print the names of cards in the pile print('\n' + self.name) for card in self.card_list: print(' ' + card.name) def pile_size(self): # This method will print number of cards in the pile print('\n{pile} has {num} cards.'.format(pile = self.name, num = len(self.card_list))) def sort(self, suit_sort:bool = False): # This method will sort the cards in a pile ascending, by default value then suit if suit_sort is True: self.card_list.sort(key=lambda x: (x.suit, x.number)) else: self.card_list.sort(key=lambda x: (x.number, x.suit)) class Hand(Pile): '''A class to manage a hand of cards, child class to Pile''' def __init__(self, name:str = None, card_list:list = None, owner:str = None): Pile.__init__(self,name,card_list) self.name = owner + "'s Hand" self.owner = owner def draw(self, pile:str = None, number:int = 1): # This method is called to draw a number of cards to the hand from the draw pile self.card_list.extend(pile.card_list[-number:]) del pile.card_list[-number:] def find_book(self): # This method is called to identify a complete (4 cards) book of cards for the play_book method value_counter = {} for card in self.card_list: if card.value in value_counter: value_counter[card.value] += 1 else: value_counter[card.value] = 1 if value_counter[card.value] == 3: return card.value, [bookcard for bookcard in self.card_list if bookcard.value == card.value] return -1, [] def play_book(self, pile:str = None, book:list = None): # This method is called to play a book of cards from the hand to the discard pile for card in book: self.card_list.remove(card) pile.card_list.append(card) def ask(self, value:str = None, target_hand:str = None): # This method is used to ask for a card from another player, "Do you have any X?" print('\n' + self.owner + ' asks ' + target_hand.owner + ' if they have any ' + value + '.') card_count = 0 new_target_list = [] for index, card in enumerate(target_hand.card_list.copy()): if card.value == value: self.card_list.append(card) card_count += 1 else: new_target_list.append(card) target_hand.card_list = new_target_list self.sort() if card_count > 0: print('\n' + target_hand.owner + ' gives ' + str(card_count) + ' ' + value + ' to ' + self.owner + '.' ) else: print('\n' + target_hand.owner + ' has no ' + value + '. ' + self.owner + ', go fish!') class Scoreboard(): '''A class to manage the overall game state''' def __init__(self, players:list = None, book_values:dict = None): if players is None: self.players = [] else: self.players = players self.score = {} for player in self.players: self.score[player] = 0 self.books = {} for key, value in book_values.items(): self.books[value] = 0 def update_score(self, player:str = None, value:str = None): # This method is called to update the score and denote a played book with value 1 self.score[player] += 1 self.books[value] = 1 def print_score(self): # This method is called to print a summary of the game state print('\nScore Player') for player in sorted(self.score, key = self.score.get, reverse = True): print(str(self.score[player]) + ' ' + player) played = [key for key, value in self.books.items() if value == 1] unplayed = [key for key, value in self.books.items() if value == 0] if len(played) > 0 and len(played) < 13: print('\n') print('Books in play:') for book in played: print(book) if len(unplayed) > 0: print('\n') print('Available books:') for book in unplayed: print(' ' + book) # + # Game initialization def start_gofish(player_list): # Set variables and check for valid player count cards, hands = [], [] player_count = len(player_list) if player_count > 1 and player_count < 4: starting_hand_size = 7 elif player_count > 3 and player_count < 7: starting_hand_size = 5 else: return print('There must be between two and six players.') suits = {0:"Clubs", 1:"Diamonds", 2:"Hearts", 3:"Spades"} values = {0:"Ace", 1:"Two", 2:"Three", 3:"Four", 4:"Five", 5:"Six", 6:"Seven", 7:"Eight", 8:"Nine", 9:"Ten", 10:"Jack", 11:"Queen", 12:"King"} # Create the 52 playing cards for s in range(4): for v in range(13): cards.append(Card(value = values[v], suit = suits[s], number = v)) # Create piles, scoreboards, and hands draw_pile = Pile(name = "Draw Pile", card_list = cards.copy()) discard_pile = Pile(name = "Discard Pile") scoreboard = Scoreboard(players = player_list, book_values = values) for player in scoreboard.players: hands.append(Hand(owner = player)) # Shuffle up and deal and sort hands draw_pile.shuffle() for hand in hands: hand.draw(pile = draw_pile, number = starting_hand_size) hand.sort() return cards, draw_pile, discard_pile, scoreboard, hands def determine_player_count(playerlimit_lower, playerlimit_upper): # Determine and validate the count of players playercount = 0 while playercount == 0: print('How many players ({lower}-{upper}) would like to play?'.format(lower = playerlimit_lower, upper = playerlimit_upper)) userinput = input() if userinput.isdigit() and playerlimit_lower <= int(userinput) <= playerlimit_upper: playercount = int(userinput) else: print('Please enter a number of players between {lower} and {upper}.'.format(lower = playerlimit_lower, upper = playerlimit_upper)) print("Great, starting a game for {count} players!".format(count = playercount)) return playercount def determine_playernames(playercount, char_limit = 16): ## MINOR BUG ## dupes can still occur with mixing upper and lower case characters # Determine, validate, and de-dupe the player names player_list = [] for num in range(1, playercount + 1): playername = -1 while playername == -1: print("What is Player {number}'s name?".format(number = num)) userinput = input()[:char_limit] # limit the length of the name filteredinput = ''.join(filter(lambda x: x.isalnum(), userinput)) # filter out non alphanumeric characters if len(filteredinput) > 0: if len(userinput) != len(filteredinput): print("Can I call you {name} instead? (Y/N)".format(name = filteredinput)) response = input() if response in {'y','Y'}: pass else: continue else: continue # player needs to have a non-empty name n = 0 dedupedname = filteredinput while dedupedname in player_list: # players can't have the same name, not strictly necessary n += 1 dedupedname = filteredinput + str(n) if dedupedname != filteredinput: print("Can I call you {name} instead? (Y/N)".format(name = dedupedname)) response = input() if response in {'y','Y'}: pass else: continue playername = dedupedname print("Thank you, {name}!".format(name = playername)) player_list.append(playername) return player_list def start_turn_report(hands, turn, playerturn): # Print useful information for the player before they make a guess playerturn_name = hands[playerturn].name print("\n{name}'s Turn ({turn})".format(name = hands[playerturn].owner, turn = turn)) hands[playerturn].pile_status() for hand in hands: if hand.name == playerturn_name: pass else: hand.pile_size() def determine_card_options(hand): name_options = [] num_options = [] for card in hand.card_list: if card.value in name_options: pass else: name_options.append(card.value) num_options.append(str(card.number + 1)) return name_options, num_options def determine_player_options(hands, player_list, playerturn): name_options = [] num_options = [] for hand in hands: if hand.owner == player_list[playerturn]: continue elif len(hand.card_list) > 0: value = player_list.index(hand.owner) + 1 name_options.append(hand.owner) num_options.append(str(value)) else: pass return name_options, num_options # + ## Game start-up: # First get input and validate number of players and their names with determine_player_count and determine_playernames # Then initialize and deal out the cards with start_gofish import string, random playercount = determine_player_count(playerlimit_lower = 2, playerlimit_upper = 6) player_list = determine_playernames(playercount, char_limit = 16) cards, draw_pile, discard_pile, scoreboard, hands = start_gofish(player_list) # Play the game until the endstate is reached -- all 13 books have been played turn = 1 end_game = False while end_game is False: end_turn = False # A player continues to play their turn if they guess a card correctly from another player OR draw pile while end_turn is False: playerturn = (turn - 1) % playercount # the index of the player whose turn it is start_turn_report(hands, turn, playerturn) player_name_options, player_num_options = determine_player_options(hands, player_list, playerturn) player_options = ["[{num}] {name}".format(num = num_option, name = name_option) for num_option, name_option in zip(player_num_options, player_name_options)] select_player = -1 while select_player == -1: print('\nWhich player will you ask?\n {players}'.format(players = " ".join(player_options))) userinput = input() if userinput in player_name_options: select_player = userinput elif userinput in player_num_options: select_player = userinput else: continue card_name_options, card_num_options = determine_card_options(hands[playerturn]) card_options = ["[{num}] {name}".format(num = num_option, name = name_option) for num_option, name_option in zip(card_num_options, card_name_options)] select_card = -1 while select_card == -1: print('\nWhich card will you ask for?\n {cards}'.format(cards = " ".join(card_options))) userinput = input() if userinput in card_name_options: select_card = userinput elif userinput in card_num_options: select_card = userinput else: continue print(select_player, select_card) end_turn = True end_game = True # -
Go Fish.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Mutable versus immutable # Consider two variables, `x` and `y`. We assign a value to `x`, and assign `x` to `y`. x = 7 y = x print(x, y) # Both `x` and `y` refer to the same object, the `int` 7. # We can modify the value of x, e.g., x += 10 # Since `int` objects are immutable, `x` and `y` now refer to two distinct objects, 17 and 7 respectively. print(x, y) # However, the situation appears to be different for mutable types. If we assign, e.g., a `list` to `x`, and then assign `x` to `y`. x = [3, 7] y = x print(x, y) # Now let's modify the `list` that `x` refers to, e.g., x += [5, 11] # Checking the values of `x` and `y`, you might expect them to be different now, just as when `x` and `y` referred to `int` values. However... print(x, y) # This seems to be very different from the previous situation, but in fact, it is not. `x` and `y` refer to the same `list`, so modifying that `list` using the reference in `x` will be visible when using `y`, since they still refer to the same `list`. As opposed to `int`, `list` is mutable, hence we can actually modify it.
Python/Pitfalls/mutable_vs_immutable.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.5 64-bit (''idl_env'': conda)' # metadata: # interpreter: # hash: f727b9d31a81233deed1255550f2d5a36c1210b051c8cf7ad4105ddcebb3e2fb # name: python3 # --- # + import pandas as pd import glob import random import base64 import pandas as pd import numpy as np from PIL import Image from io import BytesIO from IPython.display import HTML import io pd.set_option('display.max_colwidth', None) # + def image_base64(im): with BytesIO() as buffer: im.save(buffer, 'jpeg') return base64.b64encode(buffer.getvalue()).decode() def image_formatter(im): return f'<img src="data:image/jpeg;base64,{image_base64(im)}">' def image_formatter2(im): return f'<img src="data:image/jpeg;base64,{im}">' cifar_class_map = { 0: 'airplane', 1: 'automobile', 2: 'bird', 3: 'cat', 4: 'deer', 5: 'dog', 6: 'frog', 7: 'horse', 8: 'ship', 9: 'truck', } def label_df(df): df['actual'] = df['actual'].apply(lambda x:cifar_class_map[x]) df['prediction']=df['prediction'].apply(lambda x:cifar_class_map[x]) return df # HTML(df.to_html(formatters={'image': image_formatter2}, escape=False)) # + baseline1_adv = label_df(pd.read_csv("./dev_adv_dataset_results.csv")) baseline1_adv['adv_detection_result']=np.all(baseline1_adv[['c1','c2','c3','c4']],axis=1) baseline1_real = label_df(pd.read_csv("./dev_real_dataset_results.csv")) baseline1_real['adv_detection_result']=np.all(baseline1_real[['c1','c2','c3','c4']],axis=1) countergan_adv = pd.read_csv("./counter_gan_adv_pred.csv") countergan_real = pd.read_csv("./counter_gan_real_pred.csv") countergan_adv = countergan_adv.drop(['Unnamed: 0'], axis=1) countergan_real = countergan_real.drop(['Unnamed: 0'], axis=1) defense_gan_real = label_df(pd.read_csv("./defensegan_dev_real_dataset_results.csv")) defense_gan_adv = label_df(pd.read_csv("./defensegan_dev_adv_dataset_results.csv")) # - # # Baseline 1 results total_real = len(baseline1_real) total_adv = len(baseline1_adv) adversarial_detection_score = baseline1_adv.adv_detection_result.sum()/total_adv # TP/(TP+FN)since we know all images are adv in baseline1_adv adversarial_false_positives = baseline1_real.adv_detection_result.sum()/total_real #FP/(FP+TN) # (baseline1_adv.actual != baseline1_adv.prediction and baseline1_adv.result).sum() print("Adversarial Detection Score:",adversarial_detection_score) print("Falsely Detected Real Image Score:", adversarial_false_positives) real_html = baseline1_real.to_html(formatters={'image': image_formatter2}, escape=False) adv_html = baseline1_adv.to_html(formatters={'image': image_formatter2}, escape=False) with open("baseline1_realimages_visualization.html","w") as f: f.write(real_html) with open("baseline1_advimages_visualization.html","w") as f: f.write(adv_html) # # Baseline 2(DefenseGAN Results) total_real = len(defense_gan_real) total_adv = len(defense_gan_adv) adversarial_detection_score = (defense_gan_adv.actual == defense_gan_adv.prediction).sum()/total_adv # TP/(TP+FN)since we know all images are adv in baseline1_adv adversarial_false_positives = (defense_gan_real.actual == defense_gan_real.prediction).sum()/total_real #FP/(FP+TN) # (baseline1_adv.actual != baseline1_adv.prediction and baseline1_adv.result).sum() print("Adversarial Detection Score:",adversarial_detection_score) print("Falsely Detected Real Image Score:", adversarial_false_positives) real_html = baseline1_real.to_html(formatters={'image': image_formatter2}, escape=False) adv_html = baseline1_adv.to_html(formatters={'image': image_formatter2}, escape=False) with open("defensegan_realimages_visualization.html","w") as f: f.write(real_html) with open("defensegan_advimages_visualization.html","w") as f: f.write(adv_html) # # Counter GAN Results # + countergan_adv.head() total_real = len(countergan_real) total_adv = len(countergan_adv) without_defense_adv_score = (countergan_adv.gt_value == countergan_adv.classifier).sum()/total_adv without_defense_real_score = (countergan_real.gt_value == countergan_real.classifier).sum()/total_real adversarial_detection_score = (countergan_adv.gt_value == countergan_adv.defense_pred).sum()/total_adv # TP/(TP+FN) adversarial_false_positives = (countergan_real.gt_value == countergan_real.defense_pred).sum()/total_real #FP/(FP+TN) print("Without Defense:") print("Classifier Score on Adv dataset:",without_defense_adv_score) print("Classifier Score on Real dataset:", without_defense_real_score) print("\nWith Defense:") print("Classifier Score on Adv dataset with defense:",adversarial_detection_score) print("Classifier Score on Real dataset with defense:", adversarial_false_positives) # -
results_analysis/merge_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # [Learn Quantum Computing with Python and Q#](https://www.manning.com/books/learn-quantum-computing-with-python-and-q-sharp?a_aid=learn-qc-granade&a_bid=ee23f338)<br>Chapter 9 Exercise Solutions # ---- # > Copyright (c) <NAME> and <NAME>. # > Code sample from the book "Learn Quantum Computing with Python and Q#" by # > <NAME> and <NAME>, published by Manning Publications Co. # > Book ISBN 9781617296130. # > Code licensed under the MIT License. # ### Preamble import numpy as np import qutip as qt import matplotlib.pyplot as plt import qsharp # %matplotlib inline # ### Exercise 9.1 # **In Chapter 4, you used Python type annotations to represent the concept of a _strategy_ in the CHSH game. # User-defined types in Q# can be used in a similar fashion. # Give it a go by defining a new UDT for CHSH strategies and then use your new UDT to wrap the constant strategy from Chapter 4.** # # *HINT*: Your and Eve's parts of the strategy can each be represented as operations that take a `Result` and output a `Result`. # That is, as operations of type `Result => Result`. strategy = qsharp.compile(""" newtype Strategy = ( PlayAlice: (Result => Result), PlayBob: (Result => Result) ); """) strategy # ---- # ### Exercise 9.2 # **You can find the model for Lancelot's results if you use Born's rule! # We have put the definition from Chapter 2 below, see if you can plot the resulting value as a function of Lancelot's scale using Python. # Does your plot look like a trigonometric function?** # # \begin{align} # \Pr(\text{measurement} | \text{state}) = |\left\langle \text{measurement} \mid \text{state} \right\rangle|^2 # \end{align} # # *HINT*: For Lancelot's measurements, the $\left\langle \text{measurement} \right|$ part of Born's rule is given by $\left\langle 1 \right|$. # Immediately before measuring, his qubit will be in the state $H R_1(\theta * \textrm{scale}) H \left|0\right\rangle$. # You can simulate the `R1` operation in QuTiP by using the matrix form in the Q# reference at https://docs.microsoft.com/qsharp/api/qsharp/microsoft.quantum.intrinsic.r1. # For the purposes of illustration, let's choose $\theta = 0.456$ radians. theta = 0.456 # Next, as the hint gives us, we'll need to define a matrix that we can use to simulate the `R1` operation: def r1_matrix(angle: float) -> qt.Qobj: return qt.Qobj([ [1, 0], [0, np.exp(1j * angle)] ]) r1_matrix(theta) # We can use this to find Lancelot's state after applying each hidden rotation: def lancelot_final_state(theta: float, scale: float) -> qt.Qobj: initial_state = qt.basis(2, 0) # Simulate the H Q# operation. state = qt.qip.operations.hadamard_transform() * initial_state # Simulate the R1 operation. state = r1_matrix(theta * scale) * state # Simulate undoing the H operation with another call to H. state = qt.qip.operations.hadamard_transform() * state return state lancelot_final_state(theta, 1.2) # We now have everything we need to predict the probability of a "1" outcome: def lancelot_pr1(theta: float, scale: float) -> float: ket1 = qt.basis(2, 1) # Apply Born's rule. return np.abs((ket1.dag() * lancelot_final_state(theta, scale))[0, 0]) ** 2 lancelot_pr1(theta, 1.2) # Plotting for a variety of different scales, we see the expected sinusoidal shape: scales = np.linspace(0, 20, 201) pr1s = [lancelot_pr1(theta, scale) for scale in scales] plt.plot(scales, pr1s) # ---- # ### Exercise 9.3 # **Try writing Q# programs that use `AssertQubit` and `DumpMachine` to verify that:** # # - $\left|+\right\rangle$ and $\left|-\right\rangle$ are both eigenstates of the `X` operation. # - $\left|0\right\rangle$ and $\left|1\right\rangle$ are both eigenstates of the `Rz` operation, regardless of what angle you choose to rotate by. # # For even more practice, try figuring out what the eigenstates of the `Y` and `CNOT` operations and writing a Q# program to verify your guesses! # # *HINT*: You can find the vector form of the eigenstates of a unitary operation using QuTiP. # For instance, the eigenstates of the `Y` operation are given by `qt.sigmay().eigenstates()`. # From there, you can use what you learned about rotations in Chapters 4 and 5 to figure out which Q# operations prepare those states. # # Don't forget you can always test if a particular state is an eigenstate of an operation by just writing a quick test in Q#! # Let's start by verifying that $\left|+\right\rangle$ and $\left|-\right\rangle$ are both eigenstates of the `X` operation. verify_x_eigenstates = qsharp.compile(""" open Microsoft.Quantum.Diagnostics; operation VerifyXEigenstates() : Unit { use q = Qubit(); // Prepare |+⟩. H(q); // Check that the X operation does nothing. X(q); Message("Checking that |+⟩ is an eigenstate of the X operation."); DumpMachine(); // Reset so that we're ready for the next check. Reset(q); // Next, do the same with |−⟩. X(q); H(q); X(q); Message(""); Message("Checking that |−⟩ is an eigenstate of the X operation."); DumpMachine(); Reset(q); } """) verify_x_eigenstates.simulate() # Notice that in both cases, we got back the same state (up to a global phase), confirming the first part of the exercise. # Doing the same for `Rz`, we add an input for the rotation angle: verify_rz_eigenstates = qsharp.compile(""" open Microsoft.Quantum.Diagnostics; operation VerifyRzEigenstates(angle : Double) : Unit { use q = Qubit(); // Prepare |0⟩ by doing nothing. // Check that the Rz operation does nothing. Rz(angle, q); Message("Checking that |0⟩ is an eigenstate of the Rz operation."); DumpMachine(); // Reset so that we're ready for the next check. Reset(q); // Next, do the same with |1⟩. X(q); Rz(angle, q); Message(""); Message("Checking that |1⟩ is an eigenstate of the Rz operation."); DumpMachine(); Reset(q); } """) verify_rz_eigenstates.simulate(angle=0.123) verify_rz_eigenstates.simulate(angle=4.567) # Using the hint, we can find what eigenstates we should try for the `Y` and `CNOT` operations: qt.sigmay().eigenstates() qt.qip.operations.cnot().eigenstates() # That is, $(|0\rangle + i |1\rangle) / \sqrt{2}$ and $(|0\rangle - i |1\rangle) / \sqrt{2}$ are eigenstates of the `Y` operation, while $|00\rangle$, $|01\rangle$, $|1+\rangle$ and $|1-\rangle$ are eigenstates of the `CNOT` operation. # ---- # ### Exercise 9.4 # **Verify that $\left|0\right\rangle\left\langle 0\right| \otimes \mathbb{1} + \left|1\right\rangle\left\langle{1}\right| \otimes X$ is the same as:** # # \begin{align} # U_{\mathrm{CNOT}} = \left(\begin{matrix} # \mathbb{1} & 0 \\ # 0 & X # \end{matrix}\right). # \end{align} # # *HINT*: You can verify this by hand, by using NumPy's `np.kron` function, or QuTiP's `qt.tensor` function. # If you need a refresher, check out how you simulated teleportation in Chapter 5, or check out the derivation of the Deutsch–Jozsa algorithm in Chapter 7. ket0 = qt.basis(2, 0) ket1 = qt.basis(2, 1) projector_0 = ket0 * ket0.dag() projector_0 projector_1 = ket1 * ket1.dag() projector_1 qt.tensor(projector_0, qt.qeye(2)) + qt.tensor(projector_1, qt.sigmax()) # ---- # ### Exercise 9.5 # **Either by hand or using QuTiP, verify that state dumped by running the Q# snippet below is the same as $\left|-1\right\rangle = \left|-\right\rangle \otimes \left|1\right\rangle$.** # # ```Q# # use (control, target) = (Qubit(), Qubit()); # H(control); # X(target); # # CZ(control, target); # DumpRegister((), [control, target]); # # Reset(control); # Reset(target); # ``` # # *NOTE*: If you seem to get the right answer other than that the order of the qubits are swapped, note that `DumpMachine` uses a _little-endian_ representation to order states. # In little-endian, |2⟩ is short-hand for |01⟩, not |10⟩. # If this seems confusing, blame the x86 processor architecture… # Let's first run the above snippet to see what output is generated. run_exercise_95 = qsharp.compile(""" open Microsoft.Quantum.Diagnostics; operation RunExercise95() : Unit { use (control, target) = (Qubit(), Qubit()); H(control); X(target); CZ(control, target); DumpRegister((), [control, target]); Reset(control); Reset(target); } """) run_exercise_95.simulate() # Next, let's compute what $\left|-1\right\rangle = \left|-\right\rangle \otimes \left|1\right\rangle$ in vector notation by using QuTiP. ket_minus = qt.Qobj([ [1], [-1] ]) / np.sqrt(2) ket1 = qt.basis(2, 1) qt.tensor(ket_minus, ket1) # As the note suggests, these two outputs appear different at first, but the resolution is that Q# uses little-endian notation, such that "|2⟩" means the |01⟩ amplitude, which QuTiP prints as the second row. # We can make this more clear by manually telling IQ# to print out as bitstrings instead of little-endian notation. # # **WARNING:** Calling the `%config` magic from Python is not officially supported, and may break in future versions of Q#. qsharp.client._execute('%config dump.basisStateLabelingConvention = "Bitstring"') run_exercise_95.simulate() # ---- # ### Epilogue # # _The following cell logs what version of the components this was last tested with._ qsharp.component_versions()
ch09/ch09-exercise-solutions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="Fqp93JixVuiN" # ##### Copyright 2020 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # + colab={} colab_type="code" id="MeKZo1dnV1cE" #@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" } # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="DcriL2xPrG3_" # # TensorFlow Probability on JAX # # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/probability/examples/TensorFlow_Probability_on_JAX"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/probability/blob/main/tensorflow_probability/examples/jupyter_notebooks/TensorFlow_Probability_on_JAX.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/probability/blob/main/tensorflow_probability/examples/jupyter_notebooks/TensorFlow_Probability_on_JAX.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/probability/tensorflow_probability/examples/jupyter_notebooks/TensorFlow_Probability_on_JAX.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # </table> # + [markdown] colab_type="text" id="pbgRDzCar7WY" # TensorFlow Probability (TFP) is a library for probabilistic reasoning and statistical analysis that now also works on [JAX](https://github.com/google/jax)! For those not familiar, JAX is a library for accelerated numerical computing based on composable function transformations. # # TFP on JAX supports a lot of the most useful functionality of regular TFP while preserving the abstractions and APIs that many TFP users are now comfortable with. # + [markdown] colab_type="text" id="yCaBElaf0soq" # ## Setup # + [markdown] colab_type="text" id="bF_03uaV1ubC" # TFP on JAX does **not** depend on TensorFlow; let's uninstall TensorFlow from this Colab entirely. # + colab={} colab_type="code" id="dQMyDsSckCpV" # !pip uninstall tensorflow -y -q # + [markdown] colab_type="text" id="RWuX9PEH1lnp" # We can install TFP on JAX with the latest nightly builds of TFP. # + colab={} colab_type="code" id="Tl5CfrtVkQd7" # !pip install -Uq tfp-nightly[jax] > /dev/null # + [markdown] colab_type="text" id="9KmtUvMH9hYg" # Let's import some useful Python libraries. # + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="oEQyYGq03SM_" outputId="8d32bae2-e7f4-4015-e24c-0f39d9498aca" import matplotlib.pyplot as plt import numpy as np import seaborn as sns from sklearn import datasets sns.set(style='white') # + [markdown] colab_type="text" id="w1bCh_GA1pxo" # Let's also import some basic JAX functionality. # + colab={} colab_type="code" id="pSa7v4CWk38v" import jax.numpy as jnp from jax import grad from jax import jit from jax import random from jax import value_and_grad from jax import vmap # + [markdown] colab_type="text" id="53T-n_PM11Mf" # ## Importing TFP on JAX # # To use TFP on JAX, simply import the `jax` "substrate" and use it as you usually would `tfp`: # + colab={} colab_type="code" id="nlx8w2gPkEM6" from tensorflow_probability.substrates import jax as tfp tfd = tfp.distributions tfb = tfp.bijectors tfpk = tfp.math.psd_kernels # + [markdown] colab_type="text" id="jQ-FGn2hje8b" # ## Demo: Bayesian logistic regression # + [markdown] colab_type="text" id="-xJm7Q5YkD_3" # To demonstrate what we can do with the JAX backend, we'll implement Bayesian logistic regression applied to the classic Iris dataset. # # # First, let's import the Iris dataset and extract some metadata. # # # + colab={} colab_type="code" id="0HHsy5lsf_S7" iris = datasets.load_iris() features, labels = iris['data'], iris['target'] num_features = features.shape[-1] num_classes = len(iris.target_names) # + [markdown] colab_type="text" id="9pXr2atnk8xA" # We can define the model using `tfd.JointDistributionCoroutine`. We'll put standard normal priors on both the weights and the bias term then write a `target_log_prob` function that pins the sampled labels to the data. # + colab={} colab_type="code" id="0Ri7RxnekWUr" Root = tfd.JointDistributionCoroutine.Root def model(): w = yield Root(tfd.Sample(tfd.Normal(0., 1.), sample_shape=(num_features, num_classes))) b = yield Root( tfd.Sample(tfd.Normal(0., 1.), sample_shape=(num_classes,))) logits = jnp.dot(features, w) + b yield tfd.Independent(tfd.Categorical(logits=logits), reinterpreted_batch_ndims=1) dist = tfd.JointDistributionCoroutine(model) def target_log_prob(*params): return dist.log_prob(params + (labels,)) # + [markdown] colab_type="text" id="A-U0hmnIlQM9" # We sample from `dist` to produce an initial state for MCMC. We can then define a function that takes in a random key and an initial state, and produces 500 samples from a No-U-Turn-Sampler (NUTS). Note that we can use JAX transformations like `jit` to compile our NUTS sampler using XLA. # + colab={"base_uri": "https://localhost:8080/", "height": 285} colab_type="code" id="PBYkURyakn3c" outputId="6fc0cbf5-b11f-4183-f076-c0665972ee64" init_key, sample_key = random.split(random.PRNGKey(0)) init_params = tuple(dist.sample(seed=init_key)[:-1]) @jit def run_chain(key, state): kernel = tfp.mcmc.NoUTurnSampler(target_log_prob, 1e-3) return tfp.mcmc.sample_chain(500, current_state=state, kernel=kernel, trace_fn=lambda _, results: results.target_log_prob, num_burnin_steps=500, seed=key) states, log_probs = run_chain(sample_key, init_params) plt.figure() plt.plot(log_probs) plt.ylabel('Target Log Prob') plt.xlabel('Iterations of NUTS') plt.show() # + [markdown] colab_type="text" id="TL-KmNG8molC" # Let's use our samples to perform Bayesian model averaging (BMA) by averaging the predicted probabilies of each set of weights. # # First let's write a function that for a given set of parameters will produce the probabilities over each class. We can use `dist.sample_distributions` to obtain the final distribution in the model. # + colab={} colab_type="code" id="sRkYo3z1lox5" def classifier_probs(params): dists, _ = dist.sample_distributions(seed=random.PRNGKey(0), value=params + (None,)) return dists[-1].distribution.probs_parameter() # + [markdown] colab_type="text" id="EcaE23RSnHP3" # We can `vmap(classifier_probs)` over the set of samples to get the predicted class probabilities for each of our samples. We then compute the average accuracy across each sample, and the accuracy from Bayesian model averaging. # + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="dS01h9X3nBzh" outputId="320c6199-c773-4fac-e7ac-9af9ba858eb0" all_probs = jit(vmap(classifier_probs))(states) print('Average accuracy:', jnp.mean(all_probs.argmax(axis=-1) == labels)) print('BMA accuracy:', jnp.mean(all_probs.mean(axis=0).argmax(axis=-1) == labels)) # + [markdown] colab_type="text" id="C3wK_Yfcngz-" # Looks like BMA reduces our error rate by almost a third! # + [markdown] colab_type="text" id="TYamCA8E3jle" # ## Fundamentals # + [markdown] colab_type="text" id="UbFn9vIS3nIa" # TFP on JAX has an identical API to TF where instead of accepting TF objects like `tf.Tensor`s it accepts the JAX analogue. For example, wherever a `tf.Tensor` was previously used as input, the API now expects a JAX `DeviceArray`. Instead of returning a `tf.Tensor`, TFP methods will return `DeviceArray`s. TFP on JAX also works with nested structures of JAX objects, like a list or dictionary of `DeviceArray`s. # # + [markdown] colab_type="text" id="rmdg7MvylrBP" # ## Distributions # + [markdown] colab_type="text" id="uME3nS_iRybJ" # Most of TFP's distributions are supported in JAX with very similar semantics to their TF counterparts. They are also registered as [JAX Pytrees](https://jax.readthedocs.io/en/latest/pytrees.html), so they can be inputs and outputs of JAX-transformed functions. # + [markdown] colab_type="text" id="15xO-noWem0d" # ### Basic distributions # + [markdown] colab_type="text" id="J5ir3s5hlukJ" # The `log_prob` method for distributions works the same. # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="6zCEN_fhlszJ" outputId="e15850eb-5a25-4d21-bfd5-090a37b49daf" dist = tfd.Normal(0., 1.) print(dist.log_prob(0.)) # + [markdown] colab_type="text" id="3uTla3s-l8SO" # Sampling from a distribution requires explicitly passing in a `PRNGKey` (or list of integers) as the `seed` keyword argument. Failing to explicitly pass in a seed will throw an error. # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="aifUEqgFksk-" outputId="32556e1f-b7e0-484e-fa6b-23c46358f1f5" tfd.Normal(0., 1.).sample(seed=random.PRNGKey(0)) # + [markdown] colab_type="text" id="Nn_hJLghq4FG" # The shape semantics for distributions remain the same in JAX, where distributions will each have an `event_shape` and a `batch_shape` and drawing many samples will add additional `sample_shape` dimensions. # + [markdown] colab_type="text" id="uyCZDoESrVfi" # For example, a `tfd.MultivariateNormalDiag` with vector parameters will have a vector event shape and empty batch shape. # + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="kihalWoWq1Kb" outputId="ff3ddcdf-434f-494b-f9c3-adf4302310d4" dist = tfd.MultivariateNormalDiag( loc=jnp.zeros(5), scale_diag=jnp.ones(5) ) print('Event shape:', dist.event_shape) print('Batch shape:', dist.batch_shape) # + [markdown] colab_type="text" id="IDMQOHG3rb7F" # On the other hand, a `tfd.Normal` parameterized with vectors will have a scalar event shape and vector batch shape. # + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="eqNCoHY3rg2U" outputId="c82dc09e-50e0-4d85-d6cb-6f88e381a48b" dist = tfd.Normal( loc=jnp.ones(5), scale=jnp.ones(5), ) print('Event shape:', dist.event_shape) print('Batch shape:', dist.batch_shape) # + [markdown] colab_type="text" id="yM_dS0-g_E7V" # The semantics of taking `log_prob` of samples works the same in JAX too. # + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="jy3tVYMN_BIG" outputId="4df9a307-215a-4036-8a38-777d073abfa6" dist = tfd.Normal(jnp.zeros(5), jnp.ones(5)) s = dist.sample(sample_shape=(10, 2), seed=random.PRNGKey(0)) print(dist.log_prob(s).shape) dist = tfd.Independent(tfd.Normal(jnp.zeros(5), jnp.ones(5)), 1) s = dist.sample(sample_shape=(10, 2), seed=random.PRNGKey(0)) print(dist.log_prob(s).shape) # + [markdown] colab_type="text" id="nvDiFv3G6rre" # Because JAX `DeviceArray`s are compatible with libraries like NumPy and Matplotlib, we can feed samples directly into a plotting function. # + colab={"base_uri": "https://localhost:8080/", "height": 268} colab_type="code" id="FZ6iPEU13hG0" outputId="1b05a58c-2c77-49d8-b818-9900e0fb6901" sns.distplot(tfd.Normal(0., 1.).sample(1000, seed=random.PRNGKey(0))) plt.show() # + [markdown] colab_type="text" id="_UTL6julDADw" # `Distribution` methods are compatible with JAX transformations. # + colab={"base_uri": "https://localhost:8080/", "height": 268} colab_type="code" id="Itff7LYJDFKo" outputId="5a0dad66-9a34-48e0-f2d9-50a38f032ec1" sns.distplot(jit(vmap(lambda key: tfd.Normal(0., 1.).sample(seed=key)))( random.split(random.PRNGKey(0), 2000))) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 268} colab_type="code" id="KM296i9CDS3w" outputId="02c1af1e-ff71-4d38-c397-3b4b2c0999d9" x = jnp.linspace(-5., 5., 100) plt.plot(x, jit(vmap(grad(tfd.Normal(0., 1.).prob)))(x)) plt.show() # + [markdown] colab_type="text" id="kWlbjWRv9lrC" # Because TFP distributions are registered as JAX pytree nodes, we can write functions with distributions as inputs or outputs and transform them using `jit`, but they are not yet supported as arguments to `vmap`-ed functions. # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="9QE5gvqL90hu" outputId="2628b1d5-ef4d-4edf-fbd6-84f6e51a9b95" @jit def random_distribution(key): loc_key, scale_key = random.split(key) loc, log_scale = random.normal(loc_key), random.normal(scale_key) return tfd.Normal(loc, jnp.exp(log_scale)) random_dist = random_distribution(random.PRNGKey(0)) print(random_dist.mean(), random_dist.variance()) # + [markdown] colab_type="text" id="aFSFOU9q254Y" # ### Transformed distributions # + [markdown] colab_type="text" id="Oqif6lnT3Dei" # Transformed distributions i.e. distributions whose samples are passed through a `Bijector` also work out of the box (bijectors work too! see below). # + colab={"base_uri": "https://localhost:8080/", "height": 268} colab_type="code" id="sZAEfYrl262w" outputId="97c81187-4027-4634-f9f0-678876aaff60" dist = tfd.TransformedDistribution( tfd.Normal(0., 1.), tfb.Sigmoid() ) sns.distplot(dist.sample(1000, seed=random.PRNGKey(0))) plt.show() # + [markdown] colab_type="text" id="wVzuMoz2AHtM" # ### Joint distributions # + [markdown] colab_type="text" id="b14tHrJAAKXx" # TFP offers `JointDistribution`s to enable combining component distributions into a single distribution over multiple random variables. Currently, TFP offers three core variants (`JointDistributionSequential`, `JointDistributionNamed`, and `JointDistributionCoroutine`) all of which are supported in JAX. The `AutoBatched` variants are also all supported. # + colab={"base_uri": "https://localhost:8080/", "height": 268} colab_type="code" id="ThcymDXYAgCd" outputId="f6869d0b-b43c-43c4-b122-1f567bbb1489" dist = tfd.JointDistributionSequential([ tfd.Normal(0., 1.), lambda x: tfd.Normal(x, 1e-1) ]) plt.scatter(*dist.sample(1000, seed=random.PRNGKey(0)), alpha=0.5) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" id="4CTLC5ZjAs_0" outputId="39867395-8915-4c39-d316-a02fd6d972e9" joint = tfd.JointDistributionNamed(dict( e= tfd.Exponential(rate=1.), n= tfd.Normal(loc=0., scale=2.), m=lambda n, e: tfd.Normal(loc=n, scale=e), x=lambda m: tfd.Sample(tfd.Bernoulli(logits=m), 12), )) joint.sample(seed=random.PRNGKey(0)) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="ydOiznTJBldF" outputId="71e24a32-fabb-4613-83a0-fdfdf7651057" Root = tfd.JointDistributionCoroutine.Root def model(): e = yield Root(tfd.Exponential(rate=1.)) n = yield Root(tfd.Normal(loc=0, scale=2.)) m = yield tfd.Normal(loc=n, scale=e) x = yield tfd.Sample(tfd.Bernoulli(logits=m), 12) joint = tfd.JointDistributionCoroutine(model) joint.sample(seed=random.PRNGKey(0)) # + [markdown] colab_type="text" id="IZ0mutV8-SQm" # ### Other distributions # + [markdown] colab_type="text" id="qbu5BRnJ-V7p" # Gaussian processes also work in JAX mode! # + colab={"base_uri": "https://localhost:8080/", "height": 268} colab_type="code" id="8Cr6-Jyf_Rsu" outputId="0811d2e5-190e-42e7-bcf1-33c8b4ee4db2" k1, k2, k3 = random.split(random.PRNGKey(0), 3) observation_noise_variance = 0.01 f = lambda x: jnp.sin(10*x[..., 0]) * jnp.exp(-x[..., 0]**2) observation_index_points = random.uniform( k1, [50], minval=-1.,maxval= 1.)[..., jnp.newaxis] observations = f(observation_index_points) + tfd.Normal( loc=0., scale=jnp.sqrt(observation_noise_variance)).sample(seed=k2) index_points = jnp.linspace(-1., 1., 100)[..., jnp.newaxis] kernel = tfpk.ExponentiatedQuadratic(length_scale=0.1) gprm = tfd.GaussianProcessRegressionModel( kernel=kernel, index_points=index_points, observation_index_points=observation_index_points, observations=observations, observation_noise_variance=observation_noise_variance) samples = gprm.sample(10, seed=k3) for i in range(10): plt.plot(index_points, samples[i], alpha=0.5) plt.plot(observation_index_points, observations, marker='o', linestyle='') plt.show() # + [markdown] colab_type="text" id="Kc4jP_sY_0CI" # Hidden Markov models are also supported. # + colab={"base_uri": "https://localhost:8080/", "height": 139} colab_type="code" id="GoJ_tnaZ_1gT" outputId="fd9606e6-9f12-4f68-fa20-29941934b4b5" initial_distribution = tfd.Categorical(probs=[0.8, 0.2]) transition_distribution = tfd.Categorical(probs=[[0.7, 0.3], [0.2, 0.8]]) observation_distribution = tfd.Normal(loc=[0., 15.], scale=[5., 10.]) model = tfd.HiddenMarkovModel( initial_distribution=initial_distribution, transition_distribution=transition_distribution, observation_distribution=observation_distribution, num_steps=7) print(model.mean()) print(model.log_prob(jnp.zeros(7))) print(model.sample(seed=random.PRNGKey(0))) # + [markdown] colab_type="text" id="v5tN7E2ySKTa" # A few distributions like `PixelCNN` are not supported yet due to strict dependencies on TensorFlow or XLA incompatibilities. # + [markdown] colab_type="text" id="_QNMPVp07-YJ" # ## Bijectors # + [markdown] colab_type="text" id="XOCBtzf48GM0" # Most of TFP's bijectors are supported in JAX today! # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="aveBHt2y7_zT" outputId="c411f793-6aef-4aab-9a8f-dd58be4587bb" tfb.Exp().inverse(1.) # + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="AAIGX7-T-pug" outputId="7d2bc649-327d-4b02-a868-4ee87680db44" bij = tfb.Shift(1.)(tfb.Scale(3.)) print(bij.forward(jnp.ones(5))) print(bij.inverse(jnp.ones(5))) # + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" id="IsqXLIz1-uFP" outputId="9f74ab78-4d91-47de-b650-26bfa776fda5" b = tfb.FillScaleTriL(diag_bijector=tfb.Exp(), diag_shift=None) print(b.forward(x=[0., 0., 0.])) print(b.inverse(y=[[1., 0], [.5, 2]])) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="eb85mBpe-xZ7" outputId="8d227111-5eac-4e57-a200-29d2335cc908" b = tfb.Chain([tfb.Exp(), tfb.Softplus()]) # or: # b = tfb.Exp()(tfb.Softplus()) print(b.forward(-jnp.ones(5))) # + [markdown] colab_type="text" id="EFPnn9fF8aKZ" # Bijectors are compatible with JAX transformations like `jit`, `grad` and `vmap`. # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="snejb0gh8m-Z" outputId="7159b53e-6723-428f-966d-52e0791b7187" jit(vmap(tfb.Exp().inverse))(jnp.arange(4.)) # + colab={"base_uri": "https://localhost:8080/", "height": 268} colab_type="code" id="Vp5AGkwB8tS-" outputId="2572ea26-4b00-442e-b41e-d8cc2fabc386" x = jnp.linspace(0., 1., 100) plt.plot(x, jit(grad(lambda x: vmap(tfb.Sigmoid().inverse)(x).sum()))(x)) plt.show() # + [markdown] colab_type="text" id="sT5iNTz-RjZH" # Some bijectors, like `RealNVP` and `FFJORD` are not yet supported. # + [markdown] colab_type="text" id="XTY99bpb_puj" # ## MCMC # + [markdown] colab_type="text" id="cuRf29WXCeMz" # We've ported `tfp.mcmc` to JAX as well, so we can run algorithms like Hamiltonian Monte Carlo (HMC) and the No-U-Turn-Sampler (NUTS) in JAX. # + colab={} colab_type="code" id="dJaHRkDI_qY_" target_log_prob = tfd.MultivariateNormalDiag(jnp.zeros(2), jnp.ones(2)).log_prob # + [markdown] colab_type="text" id="nzUl_pAZDwcw" # Unlike TFP on TF, we are required to pass a `PRNGKey` into `sample_chain` using the `seed` keyword argument. # + colab={"base_uri": "https://localhost:8080/", "height": 519} colab_type="code" id="PSMURx2yC6aF" outputId="9c3b4acb-dbb6-4319-88a9-e5dc7ccd6278" def run_chain(key, state): kernel = tfp.mcmc.NoUTurnSampler(target_log_prob, 1e-1) return tfp.mcmc.sample_chain(1000, current_state=state, kernel=kernel, trace_fn=lambda _, results: results.target_log_prob, seed=key) states, log_probs = jit(run_chain)(random.PRNGKey(0), jnp.zeros(2)) plt.figure() plt.scatter(*states.T, alpha=0.5) plt.figure() plt.plot(log_probs) plt.show() # + [markdown] colab_type="text" id="F8ljmY_kEPkS" # To run multiple chains, we can either pass a batch of states into `sample_chain` or use `vmap` (though we have not yet explored performance differences between the two approaches). # + colab={"base_uri": "https://localhost:8080/", "height": 519} colab_type="code" id="swPzQ_OuETZt" outputId="8143e41c-bc02-48f8-d81a-5bc057a67542" states, log_probs = jit(run_chain)(random.PRNGKey(0), jnp.zeros([10, 2])) plt.figure() for i in range(10): plt.scatter(*states[:, i].T, alpha=0.5) plt.figure() for i in range(10): plt.plot(log_probs[:, i], alpha=0.5) plt.show() # + [markdown] colab_type="text" id="vtCVUJ2jWVj0" # ## Optimizers # + [markdown] colab_type="text" id="HUtYH1qZdsSH" # TFP on JAX supports some important optimizers like BFGS and L-BFGS. Let's set up a simple scaled quadratic loss function. # + colab={} colab_type="code" id="veOHaWtOeE0-" minimum = jnp.array([1.0, 1.0]) # The center of the quadratic bowl. scales = jnp.array([2.0, 3.0]) # The scales along the two axes. # The objective function and the gradient. def quadratic_loss(x): return jnp.sum(scales * jnp.square(x - minimum)) start = jnp.array([0.6, 0.8]) # Starting point for the search. # + [markdown] colab_type="text" id="NAhizt6QfJyj" # BFGS can find the minimum of this loss. # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="WokahuxLfAyE" outputId="dbfadf67-8f5e-45a2-efc6-a265ab3aa8a1" optim_results = tfp.optimizer.bfgs_minimize( value_and_grad(quadratic_loss), initial_position=start, tolerance=1e-8) # Check that the search converged assert(optim_results.converged) # Check that the argmin is close to the actual value. np.testing.assert_allclose(optim_results.position, minimum) # Print out the total number of function evaluations it took. Should be 5. print("Function evaluations: %d" % optim_results.num_objective_evaluations) # + [markdown] colab_type="text" id="6CaYRKQDfMAf" # So can L-BFGS. # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="O6p8aKmLfCHD" outputId="79dee0b4-666c-423b-ac19-5e2ffe19ea6c" optim_results = tfp.optimizer.lbfgs_minimize( value_and_grad(quadratic_loss), initial_position=start, tolerance=1e-8) # Check that the search converged assert(optim_results.converged) # Check that the argmin is close to the actual value. np.testing.assert_allclose(optim_results.position, minimum) # Print out the total number of function evaluations it took. Should be 5. print("Function evaluations: %d" % optim_results.num_objective_evaluations) # + [markdown] colab_type="text" id="IqPKj0fEfOKB" # To `vmap` L-BFGS, let's set up a function that optimizes the loss for a single starting point. # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="WtNGtdOlfTKV" outputId="3c2fa10d-38ad-4c15-e650-12ec984be28a" def optimize_single(start): return tfp.optimizer.lbfgs_minimize( value_and_grad(quadratic_loss), initial_position=start, tolerance=1e-8) all_results = jit(vmap(optimize_single))( random.normal(random.PRNGKey(0), (10, 2))) assert all(all_results.converged) for i in range(10): np.testing.assert_allclose(optim_results.position[i], minimum) print("Function evaluations: %s" % all_results.num_objective_evaluations) # + [markdown] colab_type="text" id="GAeO8W2PiT8m" # ## Caveats # + [markdown] colab_type="text" id="nuuWzpFwiR6t" # There are some fundamental differences between TF and JAX, some TFP behaviors will be different between the two substrates and not all functionality is supported. For example, # # # * TFP on JAX does not support anything like `tf.Variable` since nothing like it exists in JAX. This also means utilities like `tfp.util.TransformedVariable` are not supported either. # * `tfp.layers` is not supported in the backend yet, due to its dependence on Keras and `tf.Variable`s. # * `tfp.math.minimize` does not work in TFP on JAX because of its dependence on `tf.Variable`. # * With TFP on JAX, tensor shapes are always concrete integer values and are never unknown/dynamic as in TFP on TF. # * Pseudorandomness is handled differently in TF and JAX (see appendix). # * Libraries in `tfp.experimental` are not guaranteed to exist in the JAX substrate. # * Dtype promotion rules are different between TF and JAX. TFP on JAX tries to respect TF's dtype semantics internally, for consistency. # * Bijectors have not yet been registered as JAX pytrees. # # # To see the complete list of what is supported in TFP on JAX, please refer to the [API documentation](https://www.tensorflow.org/probability/api_docs/python/tfp/substrates/jax). # + [markdown] colab_type="text" id="DHTL9SBqFuJq" # ## Conclusion # + [markdown] colab_type="text" id="EbLTaQ5UFwDg" # We've ported a lot of TFP's features to JAX and are excited to see what everyone will build. Some functionality is not yet supported; if we've missed something important to you (or if you find a bug!) please reach out to us -- you can email [<EMAIL>](mailto:<EMAIL>) or file an issue on [our Github repo](https://github.com/tensorflow/probability). # + [markdown] colab_type="text" id="utaCcF7vmQPC" # ## Appendix: pseudorandomness in JAX # + [markdown] colab_type="text" id="OPx2BrR5mT4i" # JAX's pseudorandom number generation (PRNG) model is *stateless*. Unlike a stateful model, there is no mutable global state that evolves after each random draw. In JAX's model, we start with a PRNG *key*, which acts like a pair of 32-bit integers. We can construct these keys by using `jax.random.PRNGKey`. # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="apUxosBpoFw1" outputId="85482170-6605-4cdf-9b71-f9834143d972" key = random.PRNGKey(0) # Creates a key with value [0, 0] print(key) # + [markdown] colab_type="text" id="auAK011coID9" # Random functions in JAX consume a key to *deterministically* produce a random variate, meaning they should not be used again. For example, we can use `key` to sample a normally distributed value, but we should not use `key` again elsewhere. Furthermore, passing the same value into `random.normal` will produce the same value. # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="13CLYoUloPIe" outputId="a046096d-01ea-446e-d0b9-b825c82c918b" print(random.normal(key)) # + [markdown] colab_type="text" id="nUY7MRBeotJM" # So how do we ever draw multiple samples from a single key? The answer is *key splitting*. The basic idea is that we can split a `PRNGKey` into multiple, and each of the new keys can be treated as an independent source of randomness. # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="1lc8QcSOo4Pi" outputId="22f924cd-ae0a-4de6-c389-39409b3ba43a" key1, key2 = random.split(key, num=2) print(key1, key2) # + [markdown] colab_type="text" id="2KCLSK2so8_v" # Key splitting is deterministic but is chaotic, so each new key can now be used to draw a distinct random sample. # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="PUEfE6JApE_3" outputId="ff2f969e-7f98-4e91-85e2-911c4fee40e9" print(random.normal(key1), random.normal(key2)) # + [markdown] colab_type="text" id="WOgYcS9zpJG3" # For more details about JAX's deterministic key splitting model, see [this guide](https://github.com/google/jax/blob/main/design_notes/prng.md).
tensorflow_probability/examples/jupyter_notebooks/TensorFlow_Probability_on_JAX.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Feature Engineering: Transformations # ### Read in text # + import pandas as pd data = pd.read_csv("SMSSpamCollection.tsv", sep='\t') data.columns = ['label', 'body_text'] # - # ### Create the two new features # + import string def count_punct(text): count = sum([1 for char in text if char in string.punctuation]) return round(count/(len(text) - text.count(" ")), 3)*100 data['body_len'] = data['body_text'].apply(lambda x: len(x) - x.count(" ")) data['punct%'] = data['body_text'].apply(lambda x: count_punct(x)) data.head() # - # ### Plot the two new features from matplotlib import pyplot import numpy as np # %matplotlib inline # ### Transform the punctuation % feature # ### Box-Cox Power Transformation # # **Base Form**: $$ y^x $$ # # | X | Base Form | Transformation | # |------|--------------------------|--------------------------| # | -2 | $$ y ^ {-2} $$ | $$ \frac{1}{y^2} $$ | # | -1 | $$ y ^ {-1} $$ | $$ \frac{1}{y} $$ | # | -0.5 | $$ y ^ {\frac{-1}{2}} $$ | $$ \frac{1}{\sqrt{y}} $$ | # | 0 | $$ y^{0} $$ | $$ log(y) $$ | # | 0.5 | $$ y ^ {\frac{1}{2}} $$ | $$ \sqrt{y} $$ | # | 1 | $$ y^{1} $$ | $$ y $$ | # | 2 | $$ y^{2} $$ | $$ y^2 $$ | # # # **Process** # 1. Determine what range of exponents to test # 2. Apply each transformation to each value of your chosen feature # 3. Use some criteria to determine which of the transformations yield the best distribution
nlp/Ex_Files_NLP_Python_ML_EssT/Exercise Files/Ch04/04_04/Start/04_04.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/premkumar2002/GitHub-Starter-Guide/blob/main/PandasTutorial.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="_8Q68P1jX-7w" import pandas as pd # + colab={"base_uri": "https://localhost:8080/"} id="stHzyJjKYvYC" outputId="46ed924d-598c-4782-8b53-53383d06f3a0" from google.colab import drive drive.mount('/content/gdrive') # + id="rUVLJ5bkfDW9" DatasetBaseFolder = '/content/gdrive/MyDrive/ColabNotebooks/PandasTl/Datasets/' # + colab={"base_uri": "https://localhost:8080/", "height": 173} id="dMt64ltOSWeH" outputId="f0ce034a-5b98-4ed7-8576-413fccaf317f" data = { 'apples' : [0, 2, 1, 3], 'oranges' : [1, 5, 2, 4] } purchases = pd.DataFrame(data); purchases # + id="zWyLZBHPdZFb" colab={"base_uri": "https://localhost:8080/", "height": 163} outputId="fa529224-0110-458f-c15b-bcc1e7f70483" movies_df = pd.read_csv(DatasetBaseFolder+"IMDB-Movie-Data.csv", index_col="Title") # + colab={"base_uri": "https://localhost:8080/", "height": 441} id="iZClQqkRfVGF" outputId="44aabd4a-426e-45f9-92c7-bc0757507fc0" #Lets see first 5 rows movies_df.head(5) # + colab={"base_uri": "https://localhost:8080/", "height": 371} id="IC5QEOfkfnaI" outputId="481a5313-ca60-40c1-f25b-936faf3c867a" #Lets see last 5 rows movies_df.tail(5) # + colab={"base_uri": "https://localhost:8080/"} id="qbg9oENggyRb" outputId="db57bf1e-a14c-46e9-a37b-8904bf864a9f" movies_df.shape # + colab={"base_uri": "https://localhost:8080/"} id="eHM3ITP1gSG7" outputId="f935e460-f13b-44d0-c96e-4456e75c4afa" #To get an overview of the dataset movies_df.info() # + id="a-h7rrrigY_g" #If you want to remove duplicate instances movies_df = movies_df.drop_duplicates(keep = 'first') #Drop all instances keep = false inplace=True # + colab={"base_uri": "https://localhost:8080/"} id="zr7tIVqejLlc" outputId="7583dff2-235c-470e-ce1a-a866fcd7776e" #If you wish to rename columns movies_df.columns # + colab={"base_uri": "https://localhost:8080/"} id="nw4GIQMPj-Cf" outputId="41ff8b9f-999e-4972-89ad-5ed3f11c284d" movies_df.rename(columns = {'Runtime (Minutes)' : 'Runtime', 'Revenue (Millions)' : 'Revenue_millions'}, inplace=True) movies_df.columns # + colab={"base_uri": "https://localhost:8080/"} id="9rawpScaks2R" outputId="8c349644-c5f3-41c6-9d89-dd135c23ef86" #To count number of null entries in each colum movies_df.isnull().sum() # + colab={"base_uri": "https://localhost:8080/"} id="rg4WQO-zlQkX" outputId="64523f11-6cc6-4423-c6e6-2eca9342aef6" movies_dfTmp = movies_df.dropna(axis=0) #To drop instances with null values movies_dfTmp.shape #movies_df.shape # + colab={"base_uri": "https://localhost:8080/"} id="KfnUiTpanOVr" outputId="11d6ef3f-fde6-4b6d-c8af-7c7838cd51f2" movies_dfTmp = movies_df.dropna(axis=1) #To drop columns containing null values movies_dfTmp.shape # + colab={"base_uri": "https://localhost:8080/"} id="SJ6vDIzzn6Oq" outputId="35cbb239-3f27-47b5-8d54-328d6b09a752" movies_df.shape # + colab={"base_uri": "https://localhost:8080/"} id="X3mVPaXti9nQ" outputId="5aa338ee-a1e9-4c47-9228-47e7c99497bc" #Imputing with Mean revenue = movies_df['Revenue_millions'] revenue.head(5) # + colab={"base_uri": "https://localhost:8080/"} id="MXUl2uJ0kLBs" outputId="78f3a22d-40b4-4537-b69c-4e76582e5102" meanRev = revenue.mean(0) revenue.fillna(meanRev, inplace=True) movies_df.isnull().sum() #Note that this get updated # + colab={"base_uri": "https://localhost:8080/", "height": 294} id="WVxiodMkloDP" outputId="3d0bc351-ffff-4286-8b25-dac13aaa115a" #Describ the Dataset movies_df.describe() # + colab={"base_uri": "https://localhost:8080/"} id="5MtPOZmalvt5" outputId="38481405-6b77-4db9-f129-210324b27f66" # + colab={"base_uri": "https://localhost:8080/"} id="f3tDeqcxrkto" outputId="2431896c-d456-4e62-eb96-01335984b38f" #if you want to count movies_df['Genre'].value_counts() # + colab={"base_uri": "https://localhost:8080/", "height": 263} id="27EF4p5bsEp9" outputId="a98799a8-6b1c-434a-a2ea-2b1aa1371a2c" #Correlation movies_df.corr() #Note the attributes in S # + colab={"base_uri": "https://localhost:8080/"} id="rlDf9DImwy5T" outputId="45e39af1-72aa-4c2b-ddc5-2819b9aed2c8" #slicing along columns subset = movies_df[['Genre', 'Rating']] type(subset) # + colab={"base_uri": "https://localhost:8080/"} id="uS1XiXfvxGTI" outputId="d6a76aae-ddf4-4ce0-9e26-207dd092a845" #Slicing along rows movies_df.loc['Prometheus'] #using key index movies_df.iloc[1] #using numerical index # + colab={"base_uri": "https://localhost:8080/", "height": 293} id="3WZFdUBEx6eM" outputId="755fe4cf-c0e4-4bba-f07f-72216de16563" #few instances 1 through 3 movie_subset = movies_df.iloc[1:4] movie_subset # + colab={"base_uri": "https://localhost:8080/"} id="xbvjEf4oyM2y" outputId="727a1695-db09-40c4-a82d-5bedc97863e5" #conditional selection #Pick movies with rating more than 8.5 rating = movies_df['Rating'] rating[rating.gt(8.5)] # + colab={"base_uri": "https://localhost:8080/", "height": 210} id="2QPFdJ7iyZiC" outputId="7439d12d-9bd5-4649-c966-040c50548de6" #Pick movies based on Director moviesByRidley = movies_df[(movies_df['Director'] == "<NAME>") & movies_df['Rating'].gt(7.5)] moviesByRidley.head(4) # + colab={"base_uri": "https://localhost:8080/", "height": 341} id="8m-e0xKT1Y_t" outputId="d289c152-6e5e-48b9-e7c5-708b5c5a21b8" #all movies that were released between 2005 and 2010, have a rating above 8.0, but made below the 25th percentile in revenue. movies_df[ ((movies_df['Year'] >= 2005) & (movies_df['Year'] <= 2010)) & (movies_df['Rating'] > 8.0) & (movies_df['Revenue_millions'] < movies_df['Revenue_millions'].quantile(0.25)) ] # + id="KRtn97RG2LS1" import matplotlib.pyplot as plt plt.rcParams.update({'font.size': 20, 'figure.figsize': (10, 8)}) # + colab={"base_uri": "https://localhost:8080/", "height": 540} id="PlggKlfk2zhl" outputId="4a2873ef-adeb-4907-94e5-086d3e92bfae" #For categorical variables utilize Bar Charts* and Boxplots. #For continuous variables utilize Histograms, Scatterplots, Line graphs, and Boxplots. movies_df.plot(kind='scatter', x='Rating', y='Revenue_millions', title='Revenue (millions) vs Rating'); # + colab={"base_uri": "https://localhost:8080/", "height": 517} id="e4ve-vuE3cmR" outputId="e2f0f78c-505f-4121-c683-8855d173eab4" movies_df['Rating'].plot(kind='hist', title='Rating'); # + colab={"base_uri": "https://localhost:8080/", "height": 492} id="WTxAp-VM33k3" outputId="1c12f125-3e7d-4b27-ff74-ff07d6f593ed" movies_df['Rating'].plot(kind="box");
PandasTutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Homework 3: Feature Engineering, KNN, and Decision Trees # # By <NAME> and <NAME> with the help of <NAME>, <NAME>, and <NAME> # # # *Wine classification dataset from the UCI Machine Learning Repository: http://archive.ics.uci.edu/ml/datasets/Wine* # # *Sea level dataset from NASA's climate change repository: https://climate.nasa.gov/vital-signs/* # ## Preamble # # Download the `hw3` folder from here: https://github.com/nfrumkin/EC414/tree/master/homework/hw3 (or clone the EC414 repository, if you prefer). # # To run and solve this assignment, you must have a working Jupyter Notebook installation. # # If you followed the installation instructions for `Python 3.6.x` and `Jupyter Notebook` from discussion 1, you should be set. In a terminal (cmd or Powershell for Windows users), navigate to the `hw3` folder. Then type `jupyter notebook` and press `Enter`. # # If you have Anaconda, run Anaconda and choose this file (`EC414_HW3.ipynb`) in Anaconda's file explorer. Use `Python 3` version. # # Below statements assume that you have already followed these instructions. If you need help with Python syntax, NumPy, or Matplotlib, you might find [Week 1 discussion material](https://github.com/nfrumkin/EC414/blob/master/discussions/Week%201%20-%20Python%20Review.ipynb) useful. # # To run code in a cell or to render [Markdown](https://en.wikipedia.org/wiki/Markdown)+[LaTeX](https://en.wikipedia.org/wiki/LaTeX) press `Ctrl+Enter` or `[>|]` ("play") button above. To edit any code or text cell [double] click on its content. To change cell type, choose "Markdown" or "Code" in the drop-down menu above. # # Put your solution into boxes marked with **`[double click here to add a solution]`** and press Ctrl+Enter to render text. [Double] click on a cell to edit or to see its source code. You can add cells via **`+`** sign at the top left corner. # # Submission instructions: please upload your completed solution file(s) to Blackboard by the due date (see Schedule). # ## Problem 1: Feature Engineering # # Given the sea level rise data over the past 25 years as a training set (`sealevel_train.csv`) and the past year's sea level rise data as a testing set (`sealevel_test.csv`): # # **a.** Apply Ordinary Least Squares regression on the training data. Plot both the training data and the regression curve on the same figure. *(Hint: use your code from Homework 2!)* # # **b.** Using the testing set, calculate the mean squared error (MSE) between the ground truth testing data and the prediction given by your regression curve from part a. Recall that the mean squared error is given by $MSE = \frac{1}{n}\sum_{i=1}^{n}(y_i - \hat{y_i})^2$, where $y$ is the vector of $n$ observed values and $\hat{y}$ is the vector of $n$ predictions. # # **c.** In machine learning, we often manipulate the raw data into some intermediary form to create a more robust feature representation. Repeat parts a. and b. for each of the following data transformations. *Be sure to apply the same transformation to the testing set before finding the MSE between the test data and the prediction.* # # * Apply a rolling mean with a window size of 5, 7, and 15. This transformation is given by $x_j = \sum_{k=i}^{i+window size}(\frac{x_k}{windowsize})$. With the $n$ data points given, pad the final values with zero. (For example, the mean at $n-2$ includes points $n-2$, $n-1$, and $n$, and will be 0 for $n+1$ and $n+2$ because we do not have data for those points.) # # * Apply first order differencing. The transformation is given by $x_{j} = x_i - x_{i-1}$ for every data point $x_i$ in the dataset, excluding the first data point. # # * One other data transformation of your choice. *Be sure to explain your transformation.* # + import numpy as np import pandas as pd import math from matplotlib import pyplot as plt from sklearn.metrics import mean_squared_error, mean_absolute_error from sklearn.linear_model import LinearRegression # - # Read training set sea_level_df = pd.read_csv("sealevel_train.csv") sea_level_df.head() # Read testing set sea_level_df_test = pd.read_csv("sealevel_test.csv") sea_level_df_test.head() # + # Part a - Fit linear regression model to training data (find OLS coefficients) # y_axis = sea_level_df['level_variation'].values x_axis = sea_level_df['time'].values def ols(x_axis, y_axis, num_of_data): x_sum = 0 y_sum = 0 xy_sum = 0; x_sq = 0 y_sq = 0 for i in x_axis: x_sum += i x_sq += i*i for i in y_axis: y_sum += i y_sq += i*i for i in range(num_of_data): xy_sum += x_axis[i]*y_axis[i] a = (num_of_data*xy_sum-(x_sum*y_sum))/(num_of_data*x_sq - (x_sum)*(x_sum)) b = (y_sum-a*x_sum)/(num_of_data) return a,b def convient_plot(x_axis, y_axis, predicted_y, x_label, y_label, title = ''): plt.figure() plt.plot(x_axis, predicted_y, 'r') plt.plot(x_axis, y_axis) plt.xlabel(x_label) plt.ylabel(y_label) if (title != ''): plt.title(title) num_of_data = sea_level_df['time'].count() a, b = ols(x_axis = x_axis, y_axis = y_axis, num_of_data = num_of_data) y = a*x_axis + b convient_plot(x_axis, y_axis, y, 'time', 'level_variation') # Predict using OLS model # Plot training data along with the regression curve # + # Part b - Prediction of testing points # def calculate_MSE(a, b, x_axis_test, y_axis_test, num_of_data_test): MSE_sum = 0 for i in range(num_of_data_test): MSE_sum += abs(a*x_axis_test[i]+b-y_axis_test[i])**2 MSE = MSE_sum/num_of_data_test return MSE y_axis_test = sea_level_df_test['level_variation'].values x_axis_test = sea_level_df_test['time'].values num_of_data_test = sea_level_df_test['time'].count() # for i in range(num_of_data_test): # MSE_sum += abs(a*x_axis_test[i]+b-y_axis_test[i])**2 # MSE = MSE_sum/num_of_data_test MSE = calculate_MSE(a, b, x_axis_test, y_axis_test, num_of_data_test) print("MSE is", MSE) # Calculate mean squared error between ground truth and predictions # + # Part c - data transformations # # ROLLING MEAN # # Repeat parts a and b for rolling mean # part a y_window_five = np.zeros(num_of_data) y_window_seven = np.zeros(num_of_data) y_window_fifteen = np.zeros(num_of_data) y_axis_pad_five = np.concatenate((y_axis, np.zeros(4))) y_axis_pad_seven = np.concatenate((y_axis, np.zeros(6))) y_axis_pad_fifteen = np.concatenate((y_axis, np.zeros(14))) for i in range(num_of_data): sum = 0 for j in range(5): sum += y_axis_pad_five[i+j] y_window_five[i] = (sum/5) for i in range(num_of_data): sum = 0 for j in range(7): sum += y_axis_pad_seven[i+j] y_window_seven[i] = (sum/7) for i in range(num_of_data): sum = 0 for j in range(15): sum += y_axis_pad_fifteen[i+j] y_window_fifteen[i] = (sum/15) a_five, b_five = ols(x_axis, y_window_five, num_of_data) a_seven, b_seven = ols(x_axis, y_window_seven, num_of_data) a_fifteen, b_fifteen = ols(x_axis, y_window_fifteen, num_of_data) y_five = a_five*x_axis + b_five y_seven = a_seven*x_axis + b_seven y_fifteen = a_fifteen*x_axis + b_fifteen convient_plot(x_axis, y_window_five, y_five, "Time", "Level variation", "Window of Five") convient_plot(x_axis, y_window_seven, y_seven, "Time", "Level variation", "Window of Seven") convient_plot(x_axis, y_window_fifteen, y_fifteen, "Time", "Level variation", "Window of Fifteen") ## part b y_window_five_test = np.zeros(num_of_data_test) y_window_seven_test = np.zeros(num_of_data_test) y_window_fifteen_test = np.zeros(num_of_data_test) y_axis_test_pad_five = np.concatenate((y_axis_test, np.zeros(4))) y_axis_test_pad_seven = np.concatenate((y_axis_test, np.zeros(6))) y_axis_test_pad_fifteen = np.concatenate((y_axis_test, np.zeros(14))) for i in range(num_of_data_test): sum = 0 for j in range(5): sum += y_axis_test_pad_five[i+j] y_window_five_test[i] = (sum/5) for i in range(num_of_data_test): sum = 0 for j in range(7): sum += y_axis_test_pad_seven[i+j] y_window_seven_test[i] = (sum/7) for i in range(num_of_data_test): sum = 0 for j in range(15): sum += y_axis_test_pad_fifteen[i+j] y_window_fifteen_test[i] = (sum/15) MSE_five = calculate_MSE(a_five, b_five, x_axis_test, y_window_five_test, num_of_data_test) MSE_seven = calculate_MSE(a_seven, b_seven, x_axis_test, y_window_seven_test, num_of_data_test) MSE_fifteen = calculate_MSE(a_fifteen, b_fifteen, x_axis_test, y_window_fifteen_test, num_of_data_test) print("MSE for window size five is", MSE_five) print("MSE for window size seven is", MSE_seven) print("MSE for window size fifteen is", MSE_fifteen) # + # Part c - data transformations # # FIRST ORDER DIFFERENCING first_order_y_axis = np.zeros(num_of_data) for i in range(num_of_data-1): first_order_y_axis[i+1] = y_axis[i+1] - y_axis[i] first_order_y_axis[0] = y_axis[0] # print(first_order_y_axis) first_order_a, first_order_b = ols(x_axis, first_order_y_axis, num_of_data) y_first_order = first_order_a*x_axis + first_order_b convient_plot(x_axis, first_order_y_axis, y_first_order, "Time", "Level Variation", "First Order") first_order_y_axis_test = np.zeros(num_of_data_test) for i in range(num_of_data_test-1): first_order_y_axis_test[i+1] = y_axis_test[i+1] - y_axis_test[i] first_order_y_axis_test[0] = y_axis_test[0] MSE_first_order = calculate_MSE(first_order_a, first_order_b, x_axis_test, first_order_y_axis_test, num_of_data_test) print(MSE_first_order) # # Repeat parts a and b for first order differencing # MSE_first_order = calculate_MSE(first_order_a, first_order_b, x_axis_test, y_axis_test, num_of_data_test) # print(MSE_first_order) # + # Part c - data transformations # # YOUR OWN TRANSFORMATION # Repeat parts a and b for your own transformation # part a mean_y = y_axis.mean() std_y = y_axis.std() y_standard = np.zeros(num_of_data) for i in range(num_of_data): y_standard[i] = (y_axis[i] - mean_y)/std_y standard_a, standard_b = ols(x_axis, y_standard, num_of_data) standard_y = standard_a*x_axis + standard_b convient_plot(x_axis, y_standard, standard_y, "Time", "Level Variation", "Standard") # part b mean_y_test = y_axis_test.mean() std_y_test = y_axis_test.std() y_standard_test = np.zeros(num_of_data_test) for i in range(num_of_data_test): y_standard_test[i] = (y_axis_test[i] - mean_y_test)/std_y_test MSE_standard = calculate_MSE(standard_a, standard_b, x_axis_test, y_standard_test, num_of_data_test) print(MSE_standard) # - # My transformation transform data by substract mean and divide by standard deviation of the sample data set # ## Problem 2: K-Nearest Neighbors # # Consider the following five two-dimensional training points, belonging to class + or class -: # # $(0,1,+)$ # # $(1,1,-)$ # # $(2,2,+)$ # # $(2,0,+)$ # # $(3,1,-)$ # # **a.** Plot these five points. Then, draw the decision boundary for a **1-nearest-neighbor classifier (with Euclidean distance).** *Be sure to show or explain how you found your decision boundary.* # # **b.** Classify the following test points, and add them to your plot: # # $(0,0)$ # # $(1,2)$ # # $(2,1)$ # # $(3,3)$ # # $(4,3)$ # # *Be sure to explain how you classified the test points.* # # **c.** As you may or may not have seen in part b, it is possible for two neighbors with different class labels to have identical distances to a test point. In that case, explain how to choose a class for this point. # a. # + x_knn_pos = np.array([0,2,2]) y_knn_pos = np.array([1,2,0]) x_knn_neg = np.array([1,3]) y_knn_neg = np.array([1,1]) plt.figure() plt.scatter(x_knn_pos, y_knn_pos, c = 'r') plt.scatter(x_knn_neg, y_knn_neg, c = 'b') boundary_x = np.array([3,0.5,0.5,3]) boundary_y = np.array([2,-0.5,2.5,0]) plt.plot(boundary_x, boundary_y, 'y') # - # I find the decision boundary by calculate the function perpendicular to the function connecting two points at the midpoint. # b. classify_x = np.array([0,1,2,3,4]) classify_y = np.array([0,2,1,3,3]) classify_result = np.array([False]*5) for i in range(5): isPos = False minDistance = 1000 for j in range(2): testDistance = math.sqrt(abs(x_knn_neg[j] - classify_x[i])**2 + abs(y_knn_neg[j] - classify_y[i])**2) if(testDistance < minDistance): minDistance = testDistance for j in range(3): testDistance = math.sqrt(abs(x_knn_pos[j] - classify_x[i])**2 + abs(y_knn_pos[j] - classify_y[i])**2) if(testDistance < minDistance): minDistance = testDistance isPos = True classify_result[i] = isPos for i in range(5): if (classify_result[i]): print(f"point({classify_x[i]},{classify_y[i]}) is in +") plt.scatter([classify_x[i]],[classify_y[i]], c ='r') else: print(f"point({classify_x[i]},{classify_y[i]}) is in -") plt.scatter([classify_x[i]],[classify_y[i]], c = 'b') plt.scatter(x_knn_pos, y_knn_pos, c = 'r') plt.scatter(x_knn_neg, y_knn_neg, c = 'b') plt.plot(boundary_x, boundary_y, 'y') # I classify the points by calculate the distance between each one and classify them to the point correspoding to the minimum distance # c. Do k-nn in that case and do a majority vote # ## Problem 3: Nearest Neighbors vs. Decision Trees # # Not all machine learning algorithms perform well on all types of data; performance is often dependent on how the data is distributed. We ask you to compare the performance accuracies of k-nearest neighbors and decision trees on two datasets: the Iris dataset and the wine dataset. # # The Iris flower dataset contains samples of attributes of 3 different variations of Iris flowers. This dataset has become very commonly used in training classification models. In fact, it has become so common that it exists within scikit-learn. The wine dataset contains attributes of 3 different types of wine. The datasets are imported and split into training and testing sets below for you. You can also get a preview of what they look like by running the two cells below. # # **a.** Using the `KNeighborsClassifier` and `DecisionTreeClassifier` from scikit-learn, train and test k-nearest neighbors and decision trees on both datasets. When training the k-nearest neighbor algorithm, **do not choose a random number of neighbors, but instead *find the k that achieves the best accuracy in the range 1-10***. Compute the accuracies of each method (hint: you can use scikit-learn's `metrics.accuracy_score`). # # **b.** Compare the accuracies of each algorithm on each dataset. **If there is a difference in their performance, why do you think this would be?** # # *Please note that since sampling of data is random, accuracies may differ when you run the code again, and they will differ among your peers.* # + # Import Iris dataset from scikit-learn, as well as algorithms from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split from sklearn import metrics from sklearn.neighbors import KNeighborsClassifier from sklearn import tree # Load Iris dataset iris = load_iris() iris_df = pd.DataFrame(iris.data, columns = iris.feature_names) # Split data into training and testing sets X = iris.data y = iris.target X_iris_train, X_iris_test, y_iris_train, y_iris_test = train_test_split(X, y, test_size = 0.3) # here, our test set is 30% of whole set # Show what Iris data looks like iris_df.head() # print(X_iris_train.size) # + # Load wine dataset wine_df = pd.read_csv("wine.csv", names = ["class", "alc", "malic acid", "ash", "alcalinity", "mg", "phenols", "flavanoid", "nonflav phenols", "proanth", "color", "hue", "OD280/OD315", "proline"]) # Split dataset into train and test sets X = wine_df y = wine_df.pop('class') X_wine_train, X_wine_test, y_wine_train, y_wine_test = train_test_split(X, y, test_size = 0.3) # print(X_wine_train.size) # Show what wine dataset looks like wine_df.head() # + # IRIS DATASET # # Construct the nearest neighbors classifier. # Fit the model to the data, and find the k which achieves the best accuracy in the range 1-10. # Test KNN with testing set best_k_iris = 0 best_accu_iris = 0 for i in range(10): neigh = KNeighborsClassifier(i+1) neigh.fit(X_iris_train, y_iris_train) y_pred = neigh.predict(X_iris_test) score = metrics.accuracy_score(y_iris_test, y_pred) if (score > best_accu_iris): best_accu_iris = score best_k_iris = i+1 print(f"k with highest accuracy {best_accu_iris} is {best_k_iris}") decTree_iris = tree.DecisionTreeClassifier() decTree_iris.fit(X_iris_train, y_iris_train) y_pred_dec_iris = decTree.predict(X_iris_test) dec_score_iris = metrics.accuracy_score(y_iris_test, y_pred_dec_iris) print(f"Decision accuracy is {dec_score_iris}") # Construct a decision tree on the training data. # Test decision tree with testing set"Decision accuracy is {dec_score}" # Compare accuracies between the two algorithms (print them out) # + # WINE DATASET # # Construct the nearest neighbors classifier. # Fit the model to the data, and find the k which achieves the best accuracy in the range 1-10. # Test KNN with testing set # Construct a decision tree on the training data # Test decision tree with testing set # Compare accuracies between the two algorithms (print them out) best_k_wine = 0 best_accu_wine = 0 for i in range(10): neigh = KNeighborsClassifier(i+1) neigh.fit(X_wine_train, y_wine_train) y_pred = neigh.predict(X_wine_test) score = metrics.accuracy_score(y_wine_test, y_pred) # print(score) if (score > best_accu_wine): best_accu_wine = score best_k_wine = i+1 print(f"k with highest accuracy {best_accu_wine} is {best_k_wine}") decTree_wine = tree.DecisionTreeClassifier() decTree_wine.fit(X_wine_train, y_wine_train) y_pred_dec_wine = decTree_wine.predict(X_wine_test) dec_score_wine = metrics.accuracy_score(y_wine_test, y_pred_dec_wine) print(f"Decision accuracy is {dec_score_wine}") # - # b. The difference comes from decision tree has a generalization of the whole data set if having enough data as it select the most discriminatory features so it performs better with more data. But k-nn only use local neighbor information to classify. # ## Problem 4: Curse of Dimensionality and Nearest Neighbors # # **Please note that you may find it easier to program this question in MATLAB. If you choose to use MATLAB, please submit a separate .m file with your code solution and either insert your plots into Jupyter (or, submit a separate document with the plots through Blackboard).** # # Consider $n$ data points uniformly distributed in a $p$-dimensional unit ball centered at the origin, and suppose we are interested in nearest neighbors to the origin. It can be shown that the median distance from the origin to the closest data point under this scenario is given by the expression $(1-(\frac{1}{2})^{\frac{1}{n}})^{\frac{1}{p}}$. # # Now consider the following alternative scenario. Suppose $n$ data points are chosen uniformly # from $[−1, 1]^p$ (the interval $[−1, 1]$ in p dimensions). Now consider the nearest neighbor to a point at the origin in terms of the $l_∞$ norm: $\|x − y\|_∞ = max_i|x_i − y_i|$. # # **a.** Write a piece of code that generates $n$ data points in $p$ dimensions distributed uniformly # in $[−1, 1]^p$, and computes the $l_∞$ nearest neighbors to the origin. For $n = 5$, $n = 50$, and $n = 500$, plot the nearest neighbor distances as a function of $p$, for $p = 1$ to $200$. On the same plot(s), also show the curves corresponding to the median distance expression given above. # # **b.** What do you observe about the relationship between the formula and the $l_∞$ nearest neighbor distances? *Discuss and interpret all of your plots.* # # **c. *Bonus*** Prove a relationship between the ∞ nearest neighbor distances and the above formula. # + # Plot n p-dimensional uniformly-distributed data points # n_five_array = np.ndarray(shape=(2,5),dtype=float) # n_five_array[0] = np.random.uniform(-1,1,5) # print(n_five_array) # print(n_five_array[0].max()) def graph(formula, x_range, color): x = np.array(x_range) y = eval(formula) plt.plot(x, y, c = color) plt.show() def plot(n, name, color): nearest_distance = np.zeros(200) nearest_distance.fill(3) # n_five_array = np.ndarray(shape=(3+1,n),dtype=float) # print(n_five_array) for p in range(200): n_five_array = np.ndarray(shape=(p+1,n),dtype=float) for i in range(p+1): n_five_array[i] = np.random.uniform(0,1,n) for i in range(p+1): if (n_five_array[i].max() < nearest_distance[p]): nearest_distance[p] = abs(n_five_array[i].max()) plt.figure() plt.plot(range(1,201), nearest_distance, label = name, c = color) p_range = range(1,201) plot(5, "n = 5", 'b') plt.xlabel("p") plt.ylabel("nn-distance") plt.legend(loc = 'right', bbox_to_anchor=(1.3, 0.5)) graph('(1-(1/2)**(1/5))**(1/x)', p_range, 'r') plot(50, "n = 50", 'b') plt.legend(loc = 'right', bbox_to_anchor=(1.3, 0.5)) plt.xlabel("p") plt.ylabel("nn-distance") graph('(1-(1/2)**(1/50))**(1/x)', p_range, 'r') plot(500, "n = 500", 'b') plt.xlabel("p") plt.ylabel("nn-distance") plt.legend(loc = 'right', bbox_to_anchor=(1.3, 0.5)) graph('(1-(1/2)**(1/500))**(1/x)', p_range, 'r') # print(n_five_hundred_array) # Compute nearest neighbors to the origin in terms of maximum norm # Plot nearest neighbor distances as a function of p = 1:200 for n = 5, 50, 500. Plot curves # corresponding to the median distance expression on the same figure. # - # b. n=5: # <br> # The median and sup norm grows apart as p grows. # <br> # n=50: # <br> # The median and sup norm grows closer as p grows. # <br> # b=500: # <br> # The median and sup norm grows closer as p grows.
homework/hw3/EC414_HW3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:flowcyt] # language: python # name: conda-env-flowcyt-py # --- # <h1> Promoter Designer (ProD) Tool </h1> # # [Link to Article](.) <NAME>, <NAME> et al. # # The ProD tool is designed for the construction of promoter strength libraries in prokaryotes. This [Jupyter Notebook](https://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/) is created to feature an interactive environment in [Python](https://docs.python.org/3/) for constructing libraries in *E. coli*. The tool can be run locally and is available through [GitHub](https://github.com/jdcla/ProD). # # --- # # # Jupyter Notebook # --- # # 1. To use the tool in an Jupyter notebook environment, it is important to run code cells (blocks preceded by `[]:`) sequentially. To run a cell, select it and press `Ctrl+Enter`. # # 2. Comments in code cells are preceded by `#` and are used to offer an explanation to the code's functionality # # 3. To download the model's output predictions, go to the dashboard (clicking the jupyter logo in the top left corner) and download the output file (default: `my_predictions.csv`) # # ![dashboard](img/dashboard.png) # # 4. When running this notebook through [Binder](https://mybinder.org/), changes are not saved through sessions. Make sure to download all generated files. In case of malfunction or unwanted changes, simply start a new session. # # --- # ProD # --- # # The Promoter Designer tool is created to construct promoter libraries, further exploiting biological capabilities of the microorganisms that allow for the fine-tuning of genetic circuits. A neural network has been trained on hundreds of thousands of sequences that have been randomized in the **17nt spacer sequence**. Therefore, generated promoters, ranging from no expression (strengh: `0`) to high expression (strength: `10`) all feature the same UP-region, binding boxes (-35, -10) and untranslated region (UTR). # # ` # [UP-region][-35-box][spacer][-10-box][ATATTC][UTR] # ` # # ` # [GGTCTATGAGTGGTTGCTGGATAAC][TTTACG][NNNNNNNNNNNNNNNNN][TATAAT][ATATTC][AGGGAGAGCACAACGGTTTCCCTCTACAAATAATTTTGTTTAACTTT] # ` # # The tool is run by calling the function `run_tool`, present in the `ProD.py` script. After import (first code cell). The tool can be run and has several inputs. # # ` # run_tool(input_data, output_path='my_predictions', lib=True, # lib_size=5, strengths=range(0, 11), cuda=False) # ` # #### **Function arguments** # # `input_data (list[str])` : A list containing input samples. All input sequences require to be strings of **length 17**. Sequences can be constructed using [**A, C, G, T, R, Y, S, W, K, M, B, D, H, V, N**](https://en.wikipedia.org/wiki/Nucleic_acid_notation). When constructing a library (`lib=True`), only the first sequence is used as the input blueprint (see `Constructing a Library`) # # `output_path (string)` (default: my_predictions) : A string featuring the output file. This files contains all information generated when running the tool. It furthermore contains the strength probability scores for each of the classes. # # # `lib (bool)` (default:True) : Determines the construction of a library (`True`) or the prediction of promoter strength of the input sequences (`False`). # # `cuda (bool)` (default:False) : Determines the use of GPU accelerated computing. Does not work using Binder, requires local installation. # # ##### **Only evaluated for `lib=True`** # # `lib_size (int)` (default:5) : The amount of output spacer sequences for each of the requested promoter strengths # # `strengths (list[int])` (default:[0,1,2,3,4,5,6,7,8,9,10]) : A list containing integers determining the promoter strengths present in te library # # Read more about [Python](https://docs.python.org/3/) and [Jupyter Notebook](https://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/) # # --- # <h2> Load Code </h2> # + # Ctrl + Enter to run # Load code from python file from ProD import run_tool # - # --- # ## Constructing a Library # # To create a custom promoter library, a single input blueprint is given that functions as the source from which spacer sequences are evaluated. The tool will run through the following steps: # # 1. Create all possible sequences from the degenerate input sequence # 2. Determine the promoter strengths, retain all spacer sequences for requested promoter strengths # 3. Sample promoters to construct library. # 4. Construct degenerate sequence (library blueprint) from all sequences. For each blueprint, the fraction of sequences classified to each category of strength is given. # # If the amount of sequences possible from the input sequence exceeds 500,000, spacers will be sampled (100,000) instead and no library blueprint is created. To attain feasible processing times, a minimal amount of user guidance in the construction of the library blueprint is required. # # **NOTE:** Promoter strength is divided in 11 ordinal classes ranging from 0 to 10. Overlap between neighbouring class strengths is expected. Therefore, when constructing a library it can be beneficial to group classes together. Specifically, we recommend the following interpretation of four sets of input strengths. # * zero to low expression: `strengths = [0,1,2]` # * low to medium expression: `strengths = [3,4,5]` # * medium to high expression: `strengths = [6,7,8]` # * high to very high expression: `strengths = [9,10]` # + # Ctrl + Enter to run # Define custom spacer (requires to be length 17) input_data = [ # Add single blueprint 'NNNCGGGNCCNGGGNNN', ] # Define strengths my_strengths = [9,10] # Run tool run_tool(input_data, strengths=my_strengths) # - # ##### Outputs can be downloaded: Go to the dashboard (clicking the jupyter logo in the top left corner) and download the output file (default: `my_predictions.csv`) # --- # <h2> Evaluate Custom Spacers</h2> # # It is possible to evaluate custom sequences. The input can be given as a list or the path to a fasta file. # + # Ctrl + Enter to run # Define custom spacers (requires to be length 17) input_data = [ 'TTNCCGGGCCGRRGAGA', 'AANCCGNNNNCRRGAGA', 'GGCCNAANANACVVVAG' # Add extra lines if necessary ] # Run tool run_tool(input_data, lib=False) # - # --- # <h3> Input Fasta File </h3> # # 1. Go to **dashboard** ![dashboard](img/dashboard.png) # 2. Go to **upload** ![upload](img/upload.png) # 3. Input **file name** # + # Ctrl + Enter to run # Input fasta file location input_file = ["ex_seqs.fa"] # Run tool run_tool(input_file, lib=False) # - # ---
ProD_Notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import matplotlib.pyplot as plt import pandas as pd import pymc3 as pm from theano import tensor as T import arviz import os import sys from jupyterthemes import jtplot jtplot.style(theme="monokai") # - os.listdir() lng = pd.read_csv("LNG.csv", index_col="Date")[["Adj Close"]] dji = pd.read_csv("^DJI.csv", index_col="Date")[["Adj Close"]] lng = lng.rename(columns={"Adj Close":"LNG close"}) dji = dji.rename(columns={"Adj Close":"DJI close"}) lng["LNG log return"] = np.log(lng["LNG close"]) - np.log(lng["LNG close"].shift(1)) dji["DJI log return"] = np.log(dji["DJI close"]) - np.log(dji["DJI close"].shift(1)) lng = lng.dropna() dji = dji.dropna() df = pd.merge(lng, dji, left_index=True, right_index=True) df.head() plt.figure(figsize=(15,10)) plt.scatter(df["DJI log return"], df["LNG log return"], alpha=0.5) plt.title("Log LNG returns vs. log DJI returns") plt.xlabel("DJI log return") plt.ylabel("LNG log return") plt.grid() plt.show() stacked = np.vstack([df["DJI log return"].values, df["LNG log return"].values]) sample_cov = np.cov(stacked)[0][1] mkt_port_var = np.var(df["DJI log return"].values) sample_beta = sample_cov/mkt_port_var print("The sample estimate of beta is {}".format(sample_beta)) # # MCMC beta estimation, using the multivariate normal model for log returns # + data = np.column_stack((df["DJI log return"].values, df["LNG log return"])) num_samps = 50000 with pm.Model() as model: ''' The code for this model is adapted from <NAME>'s blog post, available here: https://austinrochford.com/posts/2015-09-16-mvn-pymc3-lkj.html ''' sigma = pm.Lognormal('sigma', np.zeros(2), np.ones(2), shape=2) nu = pm.Uniform("nu", 0, 5) C_triu = pm.LKJCorr('C_triu', nu, 2) C = pm.Deterministic('C', T.fill_diagonal(C_triu[np.zeros((2, 2), dtype=np.int64)], 1.)) sigma_diag = pm.Deterministic('sigma_mat', T.nlinalg.diag(sigma)) cov = pm.Deterministic('cov', T.nlinalg.matrix_dot(sigma_diag, C, sigma_diag)) tau = pm.Deterministic('tau', T.nlinalg.matrix_inverse(cov)) mu = pm.MvNormal('mu', 0, tau, shape=2) x_ = pm.MvNormal('x', mu, tau, observed=data) step = pm.Metropolis() trace_ = pm.sample(num_samps, step) # - nburn = 5000 trace = trace_[nburn:] pm.traceplot(trace) # Compute matrix inverse directly a11 = trace["cov"][:, 0, 0] a12 = trace["cov"][:, 0, 1] a21 = trace["cov"][:, 1, 0] a22 = trace["cov"][:, 1, 1] temp_matrices = np.array([[a22, -a12],[-a21, a11]]) prefactor = 1.0/(a11*a22 - a12*a21) inv_matrices = prefactor*temp_matrices mkt_vars = inv_matrices[0,0,:] dji_lng_covs = inv_matrices[0,1,:] betas = dji_lng_covs/mkt_vars plt.figure(figsize=(15,10)) plt.hist(betas, bins=50) plt.title("Posterior Samples of the Beta of LNG") plt.savefig("lng_beta.png") plt.show() # # MCMC beta estimation, using the multivariate student-t model for log returns # + data = np.column_stack((df["DJI log return"].values, df["LNG log return"])) data_len = len(data[:,0]) data = np.reshape(data, (2, data_len)) num_samps = 50000 with pm.Model() as model: ''' The code for this model is adapted from <NAME>'s blog post, available here: https://austinrochford.com/posts/2015-09-16-mvn-pymc3-lkj.html ''' sigma = pm.Lognormal('sigma', np.zeros(2), np.ones(2), shape=2) nu = pm.Uniform("nu", 0, 5) C_triu = pm.LKJCorr('C_triu', nu, 2) C = pm.Deterministic('C', T.fill_diagonal(C_triu[np.zeros((2, 2), dtype=np.int64)], 1.)) sigma_diag = pm.Deterministic('sigma_mat', T.nlinalg.diag(sigma)) cov = pm.Deterministic('cov', T.nlinalg.matrix_dot(sigma_diag, C, sigma_diag)) tau = pm.Deterministic('tau', T.nlinalg.matrix_inverse(cov)) nu2 = pm.HalfNormal("nu2", sigma=3, shape=2) #nu3 = 2*np.ones(2) + nu2 mu = pm.MvStudentT('mu', nu=nu2, Sigma=tau, mu=0, shape=2) x_ = pm.MvStudentT('x', nu=nu2, Sigma=tau, mu=mu, observed=data) step = pm.Metropolis() trace_ = pm.sample(num_samps, step) # - # + Parameters nu: int Degrees of freedom. Sigma: matrix Covariance matrix. Use cov in new code. mu: array Vector of means. cov: matrix The covariance matrix. tau: matrix The precision matrix. chol: matrix The cholesky factor of the covariance matrix. lower: bool, default=True Whether the cholesky fatcor is given as a lower triangular matrix. # + Parameters mu: array Vector of means. cov: array Covariance matrix. Exactly one of cov, tau, or chol is needed. tau: array Precision matrix. Exactly one of cov, tau, or chol is needed. chol: array Cholesky decomposition of covariance matrix. Exactly one of cov, tau, or chol is needed. lower: bool, default=True Whether chol is the lower tridiagonal cholesky factor. # -
Beta Calculation/.ipynb_checkpoints/Bayesian Beta Computation-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Модульное тестирование и Test-Driven Development # # ## Модульное тестирование # # <img src="img/test_module.png"> # # <img src="img/test_module2.png"> # # <img src="img/test_module3.png"> # # Пример модульного тестирования: # + def bubble_sort(A): """ Sort on place with Bubblesort algorithm :type A: list """ for bypass in range(1, len(A)): for k in range(0, len(A) - bypass): if A[k] > A[k+1]: A[k], A[k+1] = A[k+1], A[k] def main(): A = input('Enter some words:').split() bubble_sort(A) print('Sorted words:', ' '.join(A)) main() # - # Для функции `bubble_sort()` мы можем написать модульный тест: # + def test_sort(): A = [4, 2, 5, 1, 3] B = [1, 2, 3, 4, 5] bubble_sort(A) print('#1:', 'Ok' if A == B else 'Fail') test_sort() # - # Для большей гарантии нужно добавить различные тестовые сценарии: # + def test_sort(): A = [4, 2, 5, 1, 3] B = [1, 2, 3, 4, 5] bubble_sort(A) print('#1:', 'Ok' if A == B else 'Fail') A = list(range(40, 80)) + list(range(80)) B = list(range(80)) bubble_sort(A) print('#2:', 'Ok' if A == B else 'Fail') A = [4, 2, 4, 2, 1] B = [1, 2, 2, 4, 4] bubble_sort(A) print('#3:', 'Ok' if A == B else 'Fail') test_sort() # - # ## Опережающее тестирование # # Нужно в начале написать unittest, а потом уже приступить к реализации методов # # ## Разработка через тестирование # # **Test-Driven Development** - итеративная методика разработки программ, в которой опережающее тестирование ("test-first") ставится во главу угла. # # ## Цикл разработки # # <img src="img/tdd_flow.png"> # # ## Вопросы дизайна, возникающие при разработке тестов # # * Каковы обязанности тестируемой системы? # * Что и когда она должна делать? # * Какой API удобен для выполнения задуманной функциональности? # * Что конкретно нужно тестируемой системе для выполнения обязательств (контрактов)? # * Что мы имеем на выходе? # * Какие есть побочные эффекты работы? # * Как узнать, что система работает правильно? # * Достаточно ли хорошо определена эта "правильность"? # # ## Преимущества TDD (разработки через тестирование) # # * Эффективное совмещение ролей (тестирование собственного кода) # * Рефакторинг без риска испортить код # * Реже нужно использовать отладчик (debugger) # * Повышает уверенность в качестве программного кода #
1/1.3.4_tdd.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Section 1: Business Understanding # AirBnB is a company, which provides internet services for customer, who temporary wants to share their home with others. # This Notebook adresses both people, who are interested in unterstanding data provided by AirBnB and people, who are interested to use AirBnb in Seattle and to get some information about the situation in the city. # With the given data, following question will be adressed: # 1. how ist the AirBnB market in Seattle? # 1. how many listings do exist? # 2. what ist the average price, how is its distribution over the year? # 3. what is the average availability, how is its distribution over the year? # 4. is there a correlation between price, availabilty and the climate in Seattle (which would give a view on the tourist potential of the city) # 5. how ist the distribution of the listings over the neighborhoods and in general over the city in terms of numbers, prices, ratings and property type? # 2. how can written reviews used for data analysis? # 1. is it possible to quantify them using a "SentimentIntensityAnalyzer"? # 2. what are the pitfalls if doing so? # 3. is there a correlation between the polarity_scores from a SentimentIntensityAnalyzer and the direct rating scores from the customer? # 3. how can the price be predicted using a data-based model? # 1. which data best fit to the model? # # The answers can be found in section 5: Evaluation # # ### Section 2: Data Understanding # # # To start the data analysis, necessary packages will be loaded and setting are done: # + # Import statements import numpy as np import pandas as pd import matplotlib.pyplot as plt from IPython import display import seaborn as sns import nltk from nltk.sentiment.vader import SentimentIntensityAnalyzer from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score # %matplotlib inline sns.set_theme(style="darkgrid") # - # definition of some helper-functions to process string-based operations: # + # definitions of functions for data-processing def char2Bool(input_col): ''' INPUT input_col - pandas series OUTPUT output_col - pandas series This function changes input_col using the following steps: 1. replace all character 't' by the bool True 2. replace all character 'f' by the bool False ''' input_col = input_col.replace('t', True) output_col = input_col.replace('f', False) return (output_col) def price2numeric(input_col): ''' INPUT input_col - pandas series OUTPUT output_col - pandas series This function changes input_col using the following steps: 1. delete all character ',' 2. delete all character '$' 3. delete all character '.00' 4. change dtype of values from string to numeric ''' input_col = input_col.str.replace(',', '') input_col = input_col.str.replace('$', '') output_col = input_col.str.replace('.00', '') return (pd.to_numeric(output_col)) def clean_str(input_col): ''' INPUT input_col - pandas series OUTPUT output_col - pandas series This function changes input_col using the following steps: 1. delete all character '{' 2. delete all character '}' 3. delete all character '"' ''' input_col = input_col.str.replace('{', '') input_col = input_col.str.replace('}', '') output_col = input_col.str.replace('"', '') return (output_col) # - # Now the data can be loaded and a very first look on the data can be made. # First the **calender** data set: # ### gather data # + # load calendar data set df_calendar = pd.read_csv('./seattle/calendar.csv') # show first 5 rows df_calendar.head() # - # number of rows and columns print("Number of rows and columns: ", df_calendar.shape) # how many values are missing? print(df_calendar.isnull().sum()) # Describe print(df_calendar.describe(include='all')) # **listings** data set: # + # load listings data set df_listings = pd.read_csv('./seattle/listings.csv') # set id as index (is needed later when outer data is joined...) df_listings = df_listings.set_index('id') # show first 5 rows print (df_listings.head()) # - # how many values are missing? print(df_listings.isnull().sum()) # Describe print(df_listings.describe(include='all')) # **Review** data set: # + # load reviews data set df_reviews = pd.read_csv('./seattle/reviews.csv') # show first 5 rows print(df_reviews.head()) # - # number of rows and columns print("Number of rows and columns: ", df_reviews.shape) # how many values are missing? print(df_reviews.isnull().sum()) # Describe print(df_reviews.describe(include='all')) # As we have now a better overview about the data, we take a closer look. # We first focus on the data set "calendar". # To prepare the data for the next questions, new colums for month and year are extracted from column "date". # We also need the data for the price in numeric format (also in data-set "listings", as we need them later). # Also, we convert the data for "availability" to real boolean values. # Finally, as the timerange of the data is 01/2016 - 01/2017 and we want to have the overview over 1 year, we drop the data for 2017: # + # extract year and month from date df_calendar['year'] = pd.DatetimeIndex(df_calendar['date']).year df_calendar['month'] = pd.DatetimeIndex(df_calendar['date']).month # price to numeric df_calendar['price'] = price2numeric(df_calendar['price']) df_listings['price'] = price2numeric(df_listings['price']) # availability to bool df_calendar['available'] = char2Bool(df_calendar['available']) # - # transform human-written reviews into polarity_scores: # + # create analyzer analyzer = SentimentIntensityAnalyzer() # function to calculate polarity_scores def get_polarity_scores(review_text): ''' INPUT review_text - text to be assessed (string ) OUTPUT output_col - polarity_scores (float 0 - 1) This function calculates the polarity_scores based on the input-text using the SentimentIntensityAnalyzer from nltk.sentiment.vader ''' return analyzer.polarity_scores(review_text)['compound'] # remove NANs from reviews df_reviews = df_reviews.dropna(subset = ['comments']) # - # - create new column with polarity_scores # add polarity_scores of reviews df_reviews['polarity_scores'] = df_reviews['comments'].apply(get_polarity_scores) # Now let's compare polarity_scores to the ratings of the listings: # For that, we have to: # - calculate the mean polarity_score for each listing and # - join these polarity_scores to the linting data set # # + # mean polarity_scores per listing df_reviews_mean_per_listing = df_reviews.groupby(['listing_id']).mean() # add polarity_scores to listings df_listings = df_listings.join(df_reviews_mean_per_listing['polarity_scores']) # - # ### Section 3: Data Preparation # After we got a good overview about the data we are dealing with, data has to be prepared, so that it can be fed to a linear model, which will predict prices from given data. # The following question will be adressed: # 1. which data can be used? # a. how to prepare the target data? # b. which numerical data can be used as features? # c. which categorical data can be used as features? # c. how do we deal with missing data? # d... # # 2. which data really make sense to use? # a. how is the correllation to the cvalue to be predicted? # b. how many differend numerical and categoical data shall be used, to get the best results # c. how many different categorys within a categorical data should be used? # # 3. what can we do to avoid overfitting? # # As stated above, the goal is to predict the prices using relevant data as features. # To use the prices as targets, they need to be in a numerical format. We did that already when analysing the data.We only will use listing with valid prices: # drop all rows with price as NAN df_listings = df_listings.dropna(subset=['price']) # to get a good set of features, we can drop all non-categorical and non-relevant numerical data: # + # drop all non-numerical- or categorical columns:'neighbourhood_group_cleansed', non_num_cat = ['name', 'summary', 'space', 'description', 'experiences_offered', 'neighborhood_overview', 'notes', 'transit', 'picture_url', 'host_name', 'host_since', 'host_location', 'host_about','listing_url','last_scraped', 'thumbnail_url','medium_url','xl_picture_url','host_url','host_thumbnail_url', 'street','zipcode','smart_location','country','calendar_updated', 'calendar_last_scraped','first_review','last_review','host_picture_url', 'host_neighbourhood', 'neighbourhood', 'city', 'weekly_price', 'monthly_price', 'security_deposit', 'cleaning_fee', 'extra_people', 'jurisdiction_names', 'state', 'market', 'country_code', 'is_location_exact'] df_listings = df_listings.drop(non_num_cat, axis=1) # non-relevant numerical values non_rel_num = ['scrape_id','host_id','host_verifications','square_feet','license'] df_listings = df_listings.drop(non_rel_num, axis=1) # - # Now we want to find out, which numerical data we can use as features to predict the price as target. # For that, we visualize the correllation between features and target: # + # filter for numerical features df_listings_num = df_listings.select_dtypes(exclude=['object']) # normalize numerical features df_listings_num = (df_listings_num - df_listings_num.mean())/df_listings_num.std() # calculate correlation matrix to see, which numeric values are correllated to the price df_listings_num_cov = df_listings_num.cov() # display correllation as heat-map sns.heatmap(df_listings_num_cov) # - # we can see now the TOP10 of the features, which are correlated to the price. We will use the best correlated feattures to train our model. df_listings_num_cov.sort_values(['price'],ascending = False).index[1:10] key_mean = df_listings_num.columns # Are there any missing numerical data? df_listings[key_mean].isnull().sum() # As there are no several features with missing data, we replace them by the mean of all the data. Otherwise we would have to drop the entire data row, and we would loos other information, we want to use. df_listings[key_mean] = df_listings[key_mean].fillna(df_listings[key_mean].mean()) # Now we can deal with the missing data within the categorical data. First lets have an overview about categorical data: # overview about categories: df_listings.select_dtypes(include=['object']).columns # In order not to get to much features, only the most relevant features will be used. Here the missing data can be replaced by their mean: # + key_mode = ['host_response_time','host_response_rate', 'host_is_superhost','host_has_profile_pic','host_identity_verified', 'property_type'] # remove NAN in categorical data by mode() df_listings[key_mode] = df_listings[key_mode].fillna(df_listings[key_mode].mode().squeeze()) # - # to be able to use the categorical data in our model, they need to be converted into dummy-variables. To make sure that we only use relevant dummy-variables, we have alook at them: # plot pie plt.figure(figsize=(5*len(key_mode), 5)) for i,key in enumerate(key_mode): plt.subplot(100+10*len(key_mode)+i+1) df_listings[key].value_counts().plot.pie(y=df_listings.columns[i+1]) # here we see: # - several dummy-variables have very few data, they will be removed, if they have less than 100 occurences. # - 'host_has_profile_pic' will be removed, because there is not enough variation in the feature. # - these measures will help to prevent over-fitting # df_listings.head() # choose categorical values df_listings = pd.get_dummies(df_listings, columns = key_mode, prefix_sep = '_d_') # we also create dummy-variables from 'anemety' # + # create dummies from 'amenities' # clean data df_listings_a = clean_str(df_listings['amenities']) # create anemety df_listings_a_d = df_listings_a.str.get_dummies(sep=',') # adapt columns, so that they can be handeld together with the other dummies df_listings_a_d.columns = df_listings_a_d.columns.astype(str) + '_d_' # join df_listings and df_listings_a_d df_listings = df_listings.join(df_listings_a_d) # - # the available anemety are: # all dummy-headers from 'anemety' df_listings_a_d.columns # all dummy-headers dh = df_listings.columns[df_listings.columns.str.contains('_d_')] dh # these are to many dummy-variables. Remove the ones with low occurence-rates (<100) # dummy-header with less that 100 ocurences most_rel_cat = df_listings[dh].columns[df_listings[dh].sum() > 100] # all relevant dummy-variables are: most_rel_cat # we now have a set of relevant dummy-variables, that we want to use to train our model. # Now, as we have the most relevant numerical and categorical features, we can set up and train our model. # ### Section 4: Modeling # A standard, linear model will be used. # Different approchas with differne input data will be used: # 1. different numbers of only numerical features: # - we will iterate throu 2 to 15 different features, to see which set will have the best train- and test-scores # 2. additional categorical data # - we will then add the categorical features, to see, if it improves. # # + # set up DataFrame for the results result = pd.DataFrame(index = ['number of vars', 'train_score', 'test_score']) # Instantiate linear regression model lm_model = LinearRegression(normalize=True) for i in range(3,15): # take the i most relevant numerical features most_rel_num = df_listings_num_cov.sort_values(['price'],ascending = False).index[1:i].values #most_rel = np.append(most_rel_num,most_rel_cat) # define data sets X = df_listings[most_rel_num] y = df_listings['price'] # Create train and test data sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .10, random_state=42) # fit model to training data set lm_model.fit(X_train, y_train) # Predict and score the model y_train_preds = lm_model.predict(X_train) y_test_preds = lm_model.predict(X_test) train_score = r2_score(y_train, y_train_preds) test_score = r2_score(y_test,y_test_preds) result[str(i)] = [i,train_score,test_score] # - most_rel_num # we can see that even with 3 features the results are not to bad, but they get better if we use more features. # Now we will add the categorical features to see, if the score will be better: # + # take the 14 most relevant numerical features most_rel_num = df_listings_num_cov.sort_values(['price'],ascending = False).index[1:14].values most_rel = np.append(most_rel_num,most_rel_cat) # define data sets X = df_listings[most_rel] y = df_listings['price'] # Create train and test data sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .10, random_state=42) # fit model to training data set lm_model.fit(X_train, y_train) # Predict and score the model y_train_preds = lm_model.predict(X_train) y_test_preds = lm_model.predict(X_test) train_score = r2_score(y_train, y_train_preds) test_score = r2_score(y_test,y_test_preds) result[str(15)] = [i,train_score,test_score] # - # ### Section 5: Evaluation # #### Question block 1: how ist the AirBnB market in Seattle?! # #### Question A: how many listings do exist? # can already be answered: print("Number of listings: ", df_calendar.shape[0]) # ### Question B: what ist the average price, how is its distribution over the year? # ### Question C:what is the average availability, how is its distribution over the year? # To be able to answer the next questions, we need to sort the data by the month: # + # only take 2016 df_calendar_16 = df_calendar[df_calendar['year'] == 2016]# groupby month df_calendar_16_gb_p = df_calendar_16.groupby('month').mean() # - # So we get an overview about the price and availability per month: print ("the average price in Seattle is:",df_calendar_16_gb_p['price'].mean(),"US$") # barplots: # price per month: df_calendar_16_gb_p['price'].plot.bar(y = 'price in $', ylim = [100,150]) print ("the average availability in Seattle is:",df_calendar_16_gb_p['available'].mean()*100,"%") # availability per month: df_calendar_16_gb_p['available'].plot.bar(y = 'relativ availability', ylim = [0.4,0.8]) # ### Question D: is there a correlation between price, availabilty and the climate in Seattle # To get an idea, when it is a good idea to travel to Seattle, we need to see these 2 plot together with weather data: # + # get climate data from https://www.bestplaces.net/climate/city/washington/seattle comfort_index = np.array([5.6,6.1,6.5,7.2,8.2,9,9.6,9.7,9.1,7.5,6,5.3]) # normalize price, availibility and comfort data for comparison df_calendar_16_gb_p['comfort_index_n'] = comfort_index/max(comfort_index) df_calendar_16_gb_p['price_n'] = df_calendar_16_gb_p['price']/df_calendar_16_gb_p['price'].max() df_calendar_16_gb_p['available_n'] = df_calendar_16_gb_p['available']/df_calendar_16_gb_p['available'].max() # plot comparison df_calendar_16_gb_p[['price_n','available_n','comfort_index_n']].plot.bar(ylim = [0.4,1.1]) # - # In this diagramm we can see a certain correlation between the price with the comfort-index. The best month to travel would be August, because only there the comfort-index is higher than the price ;-). # It also turns out that the availability doesn't seem to correlate to strong with the price or comfort-index. # ### Question E: how ist the distribution of the listings over the neighborhoods and in general over the city in terms of numbers, prices, ratings and property type? # Besides the time-related behavior of the data it could be interesting to get more information about local differences. # For that we now turn to the listings-data-set. # First, let's get an overview about the over-all price-structure: # histogram of price df_listings['price'].plot.hist(bins=20) # we can see here, that the price of most of the listings is less than 400US$, so these listings will be shown in a geographical context: # + # price-distribution geographicly # clip price, otherwise the color distribution is bad. pr400 = df_listings['price'].clip(0,400) # plot listings at their position plt.scatter(df_listings.latitude, df_listings.longitude, c= pr400/pr400.max(), alpha=0.3) # - # the brighter a listing is, the more expensive it is. Apparently there are no "hot spots" with expensive listings. # We also can look at neigborhoods. # How are the differences in term of price between the different neigborhoods? df_ng = df_listings.groupby(['neighbourhood_group_cleansed']).mean()['price'].sort_values() df_ng.plot.bar(figsize=[10,5]) # so we see that there are differences in the mean of the prices of the differend neigborhoods. # But are they relevant? # To find out, we show the distribution of the prices within the neigborhoods: # visualize the variance of the prices in the neig. sns.set(rc={'figure.figsize':(16,8)}) sns.violinplot(x="neighbourhood_group_cleansed", y="price", data=df_listings, order = df_ng.index) # so we see, that the variance of the prices is relativly high. This shows, that the prices of the listing is well distributed over the neigborhoods. We have already seen that in our geographical overview. # now let's see how many listings are in which neigborhoods: df_ng = df_listings.groupby(['neighbourhood_group_cleansed']).count()['review_scores_rating'].sort_values() df_ng.plot.bar(figsize=[10,5]) # And finaly, which neigborhood has the best ratings? df_ng = df_listings.groupby(['neighbourhood_group_cleansed']).mean()['review_scores_rating'].sort_values() df_ng.plot.bar(figsize=[10,5],ylim = [80,100]) # # ## Question block 2: how can written reviews used for data analysis? # ### Question is it possible to quantify them using a "SentimentIntensityAnalyzer"? # ### Question what are the pitfalls if doing so? # Now we will deal also with the review data. # The general questions are: # - is it possible to process the human-written review data? # - will it help us in understanding the data? # We use the SentimentIntensityAnalyzer to translate the human-written reviews into polarity scores. # These scores can be between -1 and 1 and should represent the sentiment intensity of the reviewer. High rates shows a good feeling, low rates show bad feelings. # To get an idea, if the calculated polarity_scores make sense, we have a look at the TOP-10: df_reviews[['polarity_scores', 'comments']].sort_values('polarity_scores', ascending = False)[:10] # we can see, that the reviews with a high polarity_score indeed are positiv reviews. # So we have a look at the Bottom-10: df_reviews[['polarity_scores', 'comments']].sort_values('polarity_scores', ascending = True)[:10] # here we see some very bad review (e.g. "Staying at Robert’s place was a nightmare"), so that is fine. # On the other hand, we see mostly German reviews. These are not specific bad, one is very good: "Die Unterkunft war sehr schön und genau so, wi..." (in english: "the appartment was very nice"). # So in this case, the SentimentIntensityAnalyzer didn't worked good, very likly because of the foreign language. # ### Question C:is there a correlation between the polarity_scores from a SentimentIntensityAnalyzer and the direct rating scores from the customer? # # is there a correlation between polarity_scores of the reviews and the ratings of the listings? rev_corr = df_listings['review_scores_rating'].corr(df_listings['polarity_scores']) print ("the correlation between polarity_scores of the reviews and the ratings of the listings is:",rev_corr) # We can have a look on the scatter-plot: # scatterplot between polarity_scores of the reviews and the ratings of the listings df_listings.plot.scatter(x='review_scores_rating', y='polarity_scores', c='Blue') # We see a correlation between the two scores, which shows, that: # - the SentimentIntensityAnalyzer works, and # - the ratings and the review of the customer are implied. # We can also see the correlation to the other ratings in the heatmap below. # ### Question 3: how can the price be predicted using a data-based model? # ### - which data best fit to the model? # Finally we have a look at the coefficients of the model we created, to see the influence of these # + def coef_weights(coefficients, X_train): ''' Function to get coefficients back for each of the features INPUT: coefficients - the coefficients of the linear model X_train - the training data, so the column names can be used OUTPUT: coefs_df - a dataframe holding the coefficient, estimate, and abs(estimate) Provides a dataframe that can be used to understand the most influential coefficients in a linear model by providing the coefficient estimates along with the name of the variable attached to the coefficient. ''' coefs_df = pd.DataFrame() coefs_df['est_int'] = X_train.columns coefs_df['coefs'] = lm_model.coef_ coefs_df['abs_coefs'] = np.abs(lm_model.coef_) coefs_df = coefs_df.sort_values('abs_coefs', ascending=False) return coefs_df #Use the function coef_df = coef_weights(lm_model.coef_, X_train) #A quick look at the top results coef_df.head(10) # - # Now let's have a look at the results of linear model: # - results number 3-14 are the scores of the linear model only using 3-14 numerical features. the more features we use, the better is the model. But too many feature would increase the danger of overfitting. # - result number 15 uses 14 numerical features and also 6 categorical features. This model is better due to the information, it gets from the additional categorical features. result # ### Deployment # the deployment is done on https://medium.com/@stone12_49671/how-data-helps-on-vacation-9a82f65c1b1e # #
project_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from keras import backend as K from keras.models import load_model from keras.optimizers import Adam from scipy.misc import imread import numpy as np from matplotlib import pyplot as plt from models.keras_ssd300 import ssd_300 from keras_loss_function.keras_ssd_loss import SSDLoss from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes from keras_layers.keras_layer_DecodeDetections import DecodeDetections from keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast from keras_layers.keras_layer_L2Normalization import L2Normalization from data_generator.object_detection_2d_data_generator import DataGenerator from eval_utils.average_precision_evaluator import Evaluator from ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast # %matplotlib inline # - # Set a few configuration parameters. img_height = 480 img_width = 640 n_classes = 4 model_mode = 'inference' # + # TODO: Set the path to the `.h5` file of the model to be loaded. model_path = 'ssd7_epoch-25_loss-3.0395_val_loss-2.7676.h5' # We need to create an SSDLoss object in order to pass that to the model loader. ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0) K.clear_session() # Clear previous models from memory. model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes, 'L2Normalization': L2Normalization, 'DecodeDetections': DecodeDetections, 'compute_loss': ssd_loss.compute_loss}) # + dataset = DataGenerator() # TODO: Set the paths to the dataset here. Pascal_VOC_dataset_images_dir = '../datasets/ICUB/testseenimg/' Pascal_VOC_dataset_annotations_dir = '../datasets/ICUB/testseenans/' Pascal_VOC_dataset_image_set_filename = '../datasets/ICUB/testseen.txt' # The XML parser needs to now what object class names to look for and in which order to map them to integers. classes = ['background','book','cellphone','mouse','ringbinder'] dataset.parse_xml(images_dirs=[Pascal_VOC_dataset_images_dir], image_set_filenames=[Pascal_VOC_dataset_image_set_filename], annotations_dirs=[Pascal_VOC_dataset_annotations_dir], classes=classes, include_classes='all', exclude_truncated=False, exclude_difficult=False, ret=False) # + evaluator = Evaluator(model=model, n_classes=n_classes, data_generator=dataset, model_mode=model_mode) results = evaluator(img_height=img_height, img_width=img_width, batch_size=4, data_generator_mode='resize', round_confidences=False, matching_iou_threshold=0.2, border_pixels='include', sorting_algorithm='quicksort', average_precision_mode='sample', num_recall_points=11, ignore_neutral_boxes=True, return_precisions=True, return_recalls=True, return_average_precisions=True, verbose=True) mean_average_precision, average_precisions, precisions, recalls = results # - import numpy as np np.savez('Test_Results/SSD7_4_seen_GOOGLE.npz', mean_average_precision=mean_average_precision, average_precisions=average_precisions,precisions=precisions,recalls=recalls) import numpy as np data = np.load('Test_Results/SSD7_6_unseen.npz') mean_average_precision = data['mean_average_precision'] average_precisions = data['average_precisions'] precisions = data['precisions'] recalls = data['recalls'] print(len(precisions)) for i in range(1, len(average_precisions)): print("{:<14}{:<6}{}".format(classes[i], 'AP', round(average_precisions[i], 3))) print() print("{:<14}{:<6}{}".format('','mAP', mean_average_precision, 3)) # + m = max((n_classes + 1) // 3, 3) n = 3 fig, cells = plt.subplots(m, n, figsize=(n*5,m*5)) for i in range(m): for j in range(n): if n*i+j+1 > n_classes: break cells[i, j].plot(recalls[n*i+j+1], precisions[n*i+j+1], color='blue', linewidth=1.0) cells[i, j].set_xlabel('recall', fontsize=14) cells[i, j].set_ylabel('precision', fontsize=14) cells[i, j].grid(True) cells[i, j].set_xticks(np.linspace(0,1,11)) cells[i, j].set_yticks(np.linspace(0,1,11)) cells[i, j].set_title("{}, AP: {:.3f}".format(classes[n*i+j+1], average_precisions[n*i+j+1]), fontsize=16) # + evaluator.get_num_gt_per_class(ignore_neutral_boxes=True, verbose=False, ret=False) evaluator.match_predictions(ignore_neutral_boxes=True, matching_iou_threshold=0.5, border_pixels='include', sorting_algorithm='quicksort', verbose=True, ret=False) precisions, recalls = evaluator.compute_precision_recall(verbose=True, ret=True) average_precisions = evaluator.compute_average_precisions(mode='integrate', num_recall_points=11, verbose=True, ret=True) mean_average_precision = evaluator.compute_mean_average_precision(ret=True) # - for i in range(1, len(average_precisions)): print("{:<14}{:<6}{}".format(classes[i], 'AP', round(average_precisions[i], 3))) print() print("{:<14}{:<6}{}".format('','mAP', round(mean_average_precision, 3))) # + # evaluator?? # -
ssd7_evaluation_Category.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="qffBounoHs7S" colab_type="text" # # Import dependencies # + id="moKVG_x9Hs7W" colab_type="code" colab={} import sys import time from __future__ import print_function import datetime from torch import nn from torch import from_numpy from torch import optim from torch.autograd import Variable from torch.utils.data import DataLoader from torch.utils.data.dataset import random_split from torch.utils.data import dataset import torch.nn as nn import torch.nn.functional as F import torch import pandas as pd import matplotlib.pyplot as plt from matplotlib.patches import Rectangle import random import re from sklearn.feature_extraction.text import CountVectorizer import numpy as np from nltk.stem import PorterStemmer from nltk.corpus import stopwords #import nltk #nltk.download('stopwords') import string from django.core.validators import URLValidator from django.core.exceptions import ValidationError from collections import defaultdict # + [markdown] id="7tUCIpTtHs7b" colab_type="text" # # Import data # This corpus of tweets comes from <i> Demographic Dialectal Variation in Social Media: A Case Study of African-American English. <NAME>, <NAME>, and <NAME>. Proceedings of EMNLP 2016. </i> <a href="https://www.aclweb.org/anthology/D16-1120/"> [pdf] </a> <a href="http://slanglab.cs.umass.edu/TwitterAAE/"> [more info] </a> # + id="P_fdPnRpH96S" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 122} outputId="5a2e6836-95e1-4cdd-a652-34467a4b8a19" from google.colab import drive drive.mount('/content/drive') # + id="-_7qZCUHHs7c" colab_type="code" colab={} columns =["Text","AA","White"] tweets = pd.read_csv("TwitterAAE-full-v1/twitteraae_all_aa",names=columns,delimiter="\t",nrows=100000,usecols=[5,6,9]) # + id="z4q50kjxHs7f" colab_type="code" colab={} float_check = re.compile("[0-1]\.[0-9]+$") tweets = tweets[tweets['AA'].astype(str).str.contains(float_check)] tweets = tweets[tweets['White'].astype(str).str.contains(float_check)] tweets.AA = tweets.AA.astype(float) tweets.White = tweets.White.astype(float) # + id="BGtZlGqKHs7i" colab_type="code" colab={} tweets.head() # + id="-EsF54wDHs7p" colab_type="code" colab={} def race_count_histogram(tweets,title): tweets[tweets.columns[1]].hist(color='blue',alpha=.3) tweets[tweets.columns[2]].hist(color='orange',alpha=.3) handles = [Rectangle((0,0),1,1,color=c,ec="k",alpha=.2) for c in ['blue','orange']] labels= [tweets.columns[1],tweets.columns[2]] plt.title(title) plt.legend(handles, labels) plt.show() # + id="nw4ljIO4Hs7s" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 164} outputId="bb8907ec-f75a-42f2-bd2f-4dcf93c13767" race_count_histogram(tweets,"Histogram of Proportion of Words AA/White\nCorpus: [BGO EMNLP '16]") # + [markdown] id="gAlKhwApHs7v" colab_type="text" # # Addings more tweets # # The histogram above underscores the need for more tweet diversity. I used the model described in the paper above (available <a href=https://github.com/slanglab/twitteraae> here</a>) and the <a href="https://www.kaggle.com/kazanova/sentiment140/data#"> tweet corpus here </a> to enrich my list of tweets. The tweet texts and predicted classes are available in <b> race_labeled_tweets_from_sentiment.csv </b> # + id="nos0BZ2pHs7w" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="9503141c-4ed8-434d-ee55-6c20b8ce2b04" extra_tweets = pd.read_csv("/content/drive/My Drive/race_labeled_tweets_from_sentiment.csv") extra_tweets.head() # + id="d8yXIfXFHs7z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="a896eaab-b6fc-4592-debc-5935410e9c56" all_tweets=tweets.append(extra_tweets).reset_index(drop=True) all_tweets.head() # + id="_A9KstXKHs72" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 296} outputId="202f4d48-f592-4c72-c07a-88f9d54868a7" race_count_histogram(all_tweets,"Histogram of Proportion of Words AA/White\nCorpus: [BGO EMNLP '18]+100,000 More Tweets") # + [markdown] id="OGikLpIuHs74" colab_type="text" # This seems like a more reasonable mix # + [markdown] id="YZJ9Dd_rHs75" colab_type="text" # # Creating a biased model # # Our goal is to create a bias detection engine. This notebook is designed to train a biased model that the detection engine will flag. In this case, we will train a shallow neural network that will take as input a feature vector representing the tweet. # # # Then we will make a subjective judgement on the tweets. This is meant to mimic common text labeling tasks such as # - Hate speech detection # - Education level classification # - Tweet recommendation # # In this case, we will perform education level classification. We will simulate the experience of three independent labelers who label tweets according to some predetermined distribution. Since we want to produce a biased (read: <i> racist </i>) model, we will assign labels as follows: # <center> # $educlabel1(tweet) = min(8,2 \times (\mathcal{N}(tweet.AA,\,.2)\ + 6 \times \mathcal{N}(tweet.White,\,.2))$ <br> # $educlabel2(tweet) = min(8,2 \times (\mathcal{N}(tweet.AA,\,.5)\ + 6 \times \mathcal{N}(tweet.White,\,.2))$ <br> # $educlabel3(tweet) = min(8,4 \times (\mathcal{N}(tweet.AA,\,.2)\ + 4 \times \mathcal{N}(tweet.White,\,.2))$ <br> # $educlabel(tweet) = max(int(\frac{educlabel1(tweet) + educlabel2(tweet) + educlabel3(tweet)}{3}),0)$ # </center> # These labelers generally give more weight to words the model in the paper above considers "White" than to words it considers "AA," thereby giving White-sounding tweets a significant advantage when it comes to education level determinations. The aggregated label does not weight lablers according to their biases. # + [markdown] id="CPaUYx5yHs75" colab_type="text" # ## Test out the formula # + id="9Z7Y4yVqHs76" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="64e55a5d-8ae0-42c0-fcf1-97d9df608509" tweet = all_tweets.loc[1,:] raw_score_1 = min(8,2*random.gauss(tweet.AA,.2) + 6*random.gauss(tweet.White,.2)) raw_score_2 = min(8,2*random.gauss(tweet.AA,.5) + 6*random.gauss(tweet.White,.2)) raw_score_3 = min(8,4*random.gauss(tweet.AA,.2) + 4*random.gauss(tweet.White,.2)) tweet_label = max(int((raw_score_1+raw_score_2+raw_score_3)/3),0) print(tweet.Text + " : " + str(tweet_label)) # + id="zO-g8oZUHs78" colab_type="code" colab={} def label(tweet): raw_score_1 = min(8,2*random.gauss(tweet.AA,.2) + 6*random.gauss(tweet.White,.2)) raw_score_2 = min(8,2*random.gauss(tweet.AA,.5) + 6*random.gauss(tweet.White,.2)) raw_score_3 = min(8,4*random.gauss(tweet.AA,.2) + 4*random.gauss(tweet.White,.2)) return max(int((raw_score_1+raw_score_2+raw_score_3)/3),0) # + id="tgyeyTWPHs8A" colab_type="code" colab={} labels = [label(all_tweets.loc[i,:]) for i in range(len(all_tweets))] all_tweets["EducationLevel"] = labels # + id="wriuveeOHs8C" colab_type="code" colab={} outputId="482388ee-720a-445f-809a-7d9d3ddc5910" all_tweets[all_tweets.White > all_tweets.AA].EducationLevel.hist(color='blue',alpha=.3) all_tweets[all_tweets.AA > all_tweets.White].EducationLevel.hist(color='orange',alpha=.3) handles = [Rectangle((0,0),1,1,color=c,ec="k",alpha=.2) for c in ['blue','orange']] labels= ["More White Words","More AA Words"] plt.title("Education Level Histogram For Tweets with Race-Word Distributions") plt.legend(handles, labels) plt.xlabel("Education Level") plt.show() # + [markdown] id="cZQWZQIWHs8G" colab_type="text" # This histogram shows that the lablers were in fact biased against tweets with more African American English words. # + id="GnNhMTiBHs8H" colab_type="code" colab={} all_tweets.to_csv("/content/drive/My Drive/labeled_tweets.csv",index=False) # + [markdown] id="2-wAcqlzHs8K" colab_type="text" # ## Preprocessing # # + id="6_0GbkzLHs8K" colab_type="code" colab={} all_tweets = pd.read_csv("/content/drive/My Drive/labeled_tweets.csv") # + [markdown] id="04bLN2zMHs8N" colab_type="text" # ### Traditional NLP Preprocessing # # We will standardize case, remove stop words, mentions, and urls. We won't stem because we will be converting words to word vectors using an embedding. # + id="j7FNKZUqK_Tv" colab_type="code" colab={} def is_url(token): val = URLValidator() try: val(token) return True except ValidationError: return False # + id="jW_s-86dIhUU" colab_type="code" colab={} def preprocess(tweet,stop_words=None,stemmer=None,punct_strip=None): # These can't be default arguments bc they come from functions if stop_words == None: stop_words = set(stopwords.words('english')) if stemmer == None: stemmer = PorterStemmer() if punct_strip == None: punct_strip = str.maketrans('', '', string.punctuation) # tokenize tweet = tweet.split(" ") final_tweet = [] for token in tweet: # lower case token = token.lower() # remove stop words, mentions, urls if token and (token in stop_words or token[0] == '@' or is_url(token)): continue # do not append it to final_tweet # remove punctuation token = token.translate(punct_strip) if len(token) > 0: final_tweet.append(token) return " ".join(final_tweet) # + id="6G1OgdkoKJh1" colab_type="code" colab={} stop_words = set(stopwords.words('english')) punct_strip = punct_strip = str.maketrans('', '', string.punctuation) all_tweets["PreprocessedText"] = [preprocess(t,stop_words,punct_strip) for t in all_tweets.Text] all_tweets = all_tweets.dropna() # + id="2M2xrRHMReDv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="05eed798-f810-4839-9b82-e8cfebfe4482" all_tweets.PreprocessedText.head() # + id="6h_UhFBKRps0" colab_type="code" colab={} all_tweets.to_csv("/content/drive/My Drive/labeled_tweets.csv",index=False) # + [markdown] id="jpOSwFTsIyhp" colab_type="text" # ### Dropping Rare Words # # We will keep only those tokens that occur in at least five tweets. All tweets with 0 remaining tokens will be dropped. # + id="Vu6-pj4_SBzI" colab_type="code" colab={} def get_unigram_frequencies(tweets): vectorizer = CountVectorizer() X = vectorizer.fit_transform(tweets) return {k:v for k,v in zip(vectorizer.get_feature_names(), np.ravel(X.sum(axis=0)))} # + id="ai6qomGxTb98" colab_type="code" colab={} unigram_frequencies = get_unigram_frequencies(all_tweets.PreprocessedText) keep_words = {k for k in unigram_frequencies.keys() if unigram_frequencies[k] > 4} # + id="PZ11f30KYUY_" colab_type="code" colab={} def strip_rare_words(tweet,keep_words): tweet = tweet.split(" ") return " ".join([token for token in tweet if token in keep_words]) # + id="lVcEX9b1YO9X" colab_type="code" colab={} all_tweets.PreprocessedText = [strip_rare_words(x,keep_words) for x in all_tweets.PreprocessedText] all_tweets = all_tweets[all_tweets.PreprocessedText.str.len() > 0] # + id="VJW9Ujj8Y7nR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d5f5a63d-cf66-448b-b78c-c8f9160caa6c" len(all_tweets) # + [markdown] id="NcAAUZcX_ppP" colab_type="text" # ### Get the size of the vocabulary # + id="n0yW5q8b_or6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="fe161b6f-508e-49e9-c207-2368443b3daa" vocab = set([word.strip() for x in all_tweets.PreprocessedText for word in x.split(" ")]) vocab_len = len(vocab) print(vocab) # + [markdown] id="UHf2PoAc-8YE" colab_type="text" # # Prepare PyTorch Model # + id="l0-W-bsaYFUG" colab_type="code" colab={} all_tweets = pd.read_csv("/content/drive/My Drive/labeled_preprocessed_tweets.csv") # + id="PblEnIYqYIbJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="7dd2a7fe-e39a-4006-a409-9b95c506f393" all_tweets.head() # + [markdown] id="bVMoBMkHYh9a" colab_type="text" # ### Split the dataset # + id="xCnRpP-mndMQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="ac3f76b7-4f80-4371-e596-c45c5a11f5f7" all_tweets = all_tweets[all_tweets.PreprocessedText.str.len()>0] all_tweets = all_tweets[all_tweets.PreprocessedText.str.split(" ").str.len() > 1].reset_index(drop=True) all_tweets.head() # + id="QpFAgvPVDzgl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="f173b732-5b6f-4be2-ab24-6a3dddb58261" all_tweets["Label"] = all_tweets["EducationLevel"]>2 train_dataset = all_tweets.sample(frac = 0.6)[["PreprocessedText","Label"]] test_indices = [i for i in range(len(all_tweets)) if i not in train_dataset.index] test_dataset = all_tweets.iloc[test_indices,:].reset_index(drop=True) print("Training Data Length: " + str(len(train_dataset))) print("Test Data Length: " + str(len(test_dataset))) train_dataset = train_dataset.reset_index(drop=True) test_dataset = test_dataset.reset_index(drop=True) train_len = int(len(train_dataset) * 0.95) sub_valid_ = train_dataset.sample(frac = 0.05)[["PreprocessedText","Label"]] sub_train_indices = [i for i in range(len(train_dataset)) if i not in sub_valid_.index] sub_train_ = all_tweets.iloc[sub_train_indices,[4,5]].reset_index(drop=True) sub_valid_ = sub_valid_.reset_index(drop=True) sub_valid_.to_csv("/content/drive/My Drive/PyTorchData/validate.csv",index=False) sub_train_ = all_tweets.iloc[sub_train_indices,[4,5]].reset_index(drop=True) sub_train_.to_csv("/content/drive/My Drive/PyTorchData/train.csv",index=False) test_dataset.to_csv("/content/drive/My Drive/PyTorchData/test.csv",index=False) # + id="pM42fbH1mdnN" colab_type="code" colab={} sub_valid_ = pd.read_csv("/content/drive/My Drive/PyTorchData/validate.csv") sub_train_ = pd.read_csv("/content/drive/My Drive/PyTorchData/train.csv") test_dataset = pd.read_csv("/content/drive/My Drive/PyTorchData/test.csv") # + id="gkegWHOGap8U" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="c8ac56da-b39a-413c-f044-c129c4a332d8" sub_train_.head() # + [markdown] id="YHVshqmnYmbu" colab_type="text" # ### Turn the text into tensors # + id="jgfwwCrVYl5_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="69f007b7-76b7-41a9-9f48-e272cbe33151" from torchtext import data TEXT = data.Field(tokenize='spacy',batch_first=True,include_lengths=True) LABEL = data.LabelField(dtype = torch.float,batch_first=True) fields = [('text',TEXT),('label', LABEL)] train_data=data.TabularDataset(path = '/content/drive/My Drive/PyTorchData/train.csv',format = 'csv',fields = fields,skip_header = True) valid_data=data.TabularDataset(path = '/content/drive/My Drive/PyTorchData/validate.csv',format = 'csv',fields = fields,skip_header = True) TEXT.build_vocab(train_data,min_freq=0,vectors = "glove.6B.100d") LABEL.build_vocab(train_data) #print(TEXT.vocab.stoi) # + id="UkCc3OYDesnS" colab_type="code" colab={} device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') #set batch size BATCH_SIZE = 64 #Load an iterator train_iterator, valid_iterator = data.BucketIterator.splits( (train_data, valid_data), batch_size = BATCH_SIZE, sort_key = lambda x: len(x.text), sort_within_batch=True, device = device) # + [markdown] id="H8oaeYQl_A4q" colab_type="text" # ## Define the Model Class - # # Following code from <a href="https://www.analyticsvidhya.com/blog/2020/01/first-text-classification-in-pytorch/">here. </a> Using an LSTM. # # + id="SGNhiy8E-_VJ" colab_type="code" colab={} class classifier(nn.Module): #define all the layers used in model def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim, n_layers, bidirectional, dropout): #Constructor super().__init__() #embedding layer self.embedding = nn.Embedding(vocab_size, embedding_dim) #lstm layer self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers=n_layers, bidirectional=bidirectional, dropout=dropout, batch_first=True) #dense layer self.fc = nn.Linear(hidden_dim * 2, output_dim) #activation function self.act = nn.Sigmoid() def forward(self, text, text_lengths): #text = [batch size,sent_length] embedded = self.embedding(text) #embedded = [batch size, sent_len, emb dim] #packed sequence packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths,batch_first=True) packed_output, (hidden, cell) = self.lstm(packed_embedded) #hidden = [batch size, num layers * num directions,hid dim] #cell = [batch size, num layers * num directions,hid dim] #concat the final forward and backward hidden state hidden = torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1) #hidden = [batch size, hid dim * num directions] dense_outputs=self.fc(hidden) #Final activation function outputs=self.act(dense_outputs) return outputs # + [markdown] id="aM5e61O_CmGo" colab_type="text" # ## Instantiate the model # + id="dYY8TWrtI24J" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3b24afff-4a84-46bc-8901-a6b233f80ae3" print(len(TEXT.vocab)) # + id="wdq2etpb_lcn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="8718bf9f-dcb6-4531-ee70-7617d21304d4" #define hyperparameters size_of_vocab = len(TEXT.vocab) embedding_dim = 100 num_hidden_nodes = 32 num_output_nodes = 1 num_layers = 2 bidirection = True dropout = 0.2 #instantiate the model model = classifier(size_of_vocab, embedding_dim, num_hidden_nodes,num_output_nodes, num_layers, bidirectional = True, dropout = dropout) print(model) # + id="YBhjfT7-dbv6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="01b2a10f-6353-4832-bb53-c8b1e03eed4a" def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) print(f'The model has {count_parameters(model):,} trainable parameters') #Initialize the pretrained embedding pretrained_embeddings = TEXT.vocab.vectors model.embedding.weight.data.copy_(pretrained_embeddings) print(pretrained_embeddings.shape) # + [markdown] id="h4w0OGtxCoqE" colab_type="text" # ## Define functions that train the model # + id="4qo7Rh0AH7Bb" colab_type="code" colab={} import torch.optim as optim #define optimizer and loss optimizer = optim.Adam(model.parameters()) criterion = nn.BCELoss() #define metric def binary_accuracy(preds, y): #round predictions to the closest integer rounded_preds = torch.round(preds) correct = (rounded_preds == y).float() acc = correct.sum() / len(correct) return acc #push to cuda if available model = model.to(device) criterion = criterion.to(device) # + id="Tt19KnDgIyMf" colab_type="code" colab={} def train(model, iterator, optimizer, criterion): #initialize every epoch epoch_loss = 0 epoch_acc = 0 #set the model in training phase model.train() for batch in iterator: #resets the gradients after every batch optimizer.zero_grad() #retrieve text and no. of words text, text_lengths = batch.text #convert to 1D tensor predictions = model(text, text_lengths).squeeze() #compute the loss loss = criterion(predictions, batch.label) #compute the binary accuracy acc = binary_accuracy(predictions, batch.label) #backpropage the loss and compute the gradients loss.backward() #update the weights optimizer.step() #loss and accuracy epoch_loss += loss.item() epoch_acc += acc.item() return epoch_loss / len(iterator), epoch_acc / len(iterator) # + id="Md7jtiPeCJ_i" colab_type="code" colab={} def evaluate(model, iterator, criterion): #initialize every epoch epoch_loss = 0 epoch_acc = 0 #deactivating dropout layers model.eval() #deactivates autograd with torch.no_grad(): for batch in iterator: #retrieve text and no. of words text, text_lengths = batch.text #convert to 1d tensor predictions = model(text, text_lengths).squeeze() #compute loss and accuracy loss = criterion(predictions, batch.label) acc = binary_accuracy(predictions, batch.label) #keep track of loss and accuracy epoch_loss += loss.item() epoch_acc += acc.item() return epoch_loss / len(iterator), epoch_acc / len(iterator) # + [markdown] id="J_D15jo6px_V" colab_type="text" # ## Train the model # Code inspired by this page: https://www.analyticsvidhya.com/blog/2020/01/first-text-classification-in-pytorch/ # + id="Na89Yy50DS3_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 357} outputId="ba0c98c6-32e4-4de1-c741-17f996d59df7" N_EPOCHS = 10 best_valid_loss = float('inf') for epoch in range(N_EPOCHS): #train the model train_loss, train_acc = train(model, train_iterator, optimizer, criterion) #evaluate the model valid_loss, valid_acc = evaluate(model, valid_iterator, criterion) #save the best model if valid_loss < best_valid_loss: best_valid_loss = valid_loss torch.save(model.state_dict(), 'saved_weights.pt') print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%') print(f'\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%') # + [markdown] id="bNzcIWZEpcPS" colab_type="text" # # See the model's predictions # # We now have a trained model saved in saved_weights.pt. We will apply it to the test set. Remember, we are not worried about accuracy in the traditional sense. For our purposes we just want to make sure this model is biased against AA-english words. # + id="Ab7wO9VtiaDp" colab_type="code" colab={} #load weights path='saved_weights.pt' model.load_state_dict(torch.load(path)); model.eval(); #inference import spacy nlp = spacy.load('en') def predict(model, sentence): tokenized = [tok.text for tok in nlp.tokenizer(sentence)] #tokenize the sentence indexed = [TEXT.vocab.stoi[t] for t in tokenized] #convert to integer sequence length = [len(indexed)] #compute no. of words tensor = torch.LongTensor(indexed).to(device) #convert to tensor tensor = tensor.unsqueeze(1).T #reshape in form of batch,no. of words length_tensor = torch.LongTensor(length) #convert to tensor prediction = model(tensor, length_tensor) #prediction return prediction[0].item() # + id="2rjy6OH8lDa8" colab_type="code" colab={} small_tweets = test_dataset.sample(frac=.1).reset_index(drop=True) small_tweets["Prediction"] = [predict(model,small_tweets.PreprocessedText[i]) for i in range(len(small_tweets))] # + id="ijOSlECYl6FU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 255} outputId="833ce427-78c5-4267-b356-8b6d65d45dd8" small_tweets.head() # + id="nMhjJFNBi-dQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="e33d885c-07cb-4ac0-8e68-b56b102233d2" small_tweets[small_tweets.White > small_tweets.AA].Prediction.hist(color='blue',alpha=.3) small_tweets[small_tweets.AA > small_tweets.White].Prediction.hist(color='orange',alpha=.3) handles = [Rectangle((0,0),1,1,color=c,ec="k",alpha=.2) for c in ['blue','orange']] labels= ["More White Words","More AA Words"] plt.title("Education Level Prediction Histogram For Tweets with Race-Word Distributions") plt.legend(handles, labels) plt.xlabel("Education Level") plt.show() # + [markdown] id="0bPYMLGOpuPE" colab_type="text" # We have the result we wanted
Examples/ToyModels/NLP/NLP_Toy_Train.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: venv-datascience # language: python # name: venv-datascience # --- import numpy as np arr = np.arange(0, 11) arr # # Indexing and Slicing arr[8] arr[1:7] arr[:7] arr[5:] # # Broadcating arr # assign 100 to position of 0 to 5 by using broadcasting arr[0:5] = 100 arr # slice of array and assign to new variable, act as pointer slice_of_arr = arr[0:5] slice_of_arr slice_of_arr[:] = 99 slice_of_arr # when we check original one, it got changed too. (broadcating effects it.) arr # to avoid this array_copy = arr.copy() array_copy array_copy[0:5] = 200 array_copy # original one doesn't get affected arr # ------- # # 2 dimensional arrays arr_2d = np.array([[5, 10, 15], [20, 25, 30], [35, 40, 45]]) arr_2d arr_2d.shape arr_2d[0] # first row arr_2d[0][1] # row, column # same as above arr_2d[0,1] # slicing arr_2d[:2, 1:] # # Conditional Selection arr = np.arange(1, 11) arr arr > 4 arr[arr > 4] # --------- # # Exercise # + # TASK: Use numpy to check how many rolls were greater than 2. For example if dice_rolls=[1,2,3] then the answer is 1. # NOTE: Many different ways to do this! Your final answer should be an integer. # MAKE SURE TO READ THE FULL INSTRUCTIONS ABOVE CAREFULLY, AS THE EVALUATION SCRIPT IS VERY STRICT. import numpy as np dice_rolls = np.array([3, 1, 5, 2, 5, 1, 1, 5, 1, 4, 2, 1, 4, 5, 3, 4, 5, 2, 4, 2, 6, 6, 3, 6, 2, 3, 5, 6, 5]) # total_rolls_over_two = # This should be a single integer # - total_rolls_over_two = len(dice_rolls[dice_rolls > 2]) print(total_rolls_over_two)
Machine Learning & Data Science Masterclass - JP/02-Numpy/01-NumPy-Indexing-and-Selection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Money and death # # We return to the death penalty. import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline # Make plots look a little bit more fancy plt.style.use('fivethirtyeight') # In this case, we are going to analyze whether people with higher incomes are more likely to favor the death penalty. # # To do this, we are going to analyze the results from a sample of the # US [General Social Survey](http://www.gss.norc.org) from 2002. # # If you are running on your laptop, download the data file # {download}`GSS2002.csv <../data/GSS2002.csv>`. # Read the data into a data frame gss = pd.read_csv('GSS2002.csv') gss # Each row corresponds to a single respondent. # # Show the column names: gss.columns # We want to work with only two columns from this data frame. These are "Income", and "DeathPenalty". # # "Income" gives the income bracket of the respondent. "DeathPenalty" is the answer to a question about whether they "Favor" or "Oppose" the death penalty. # # First make a list with the names of the columns that we want. cols = ['Income', 'DeathPenalty'] cols # Next make a new data frame by indexing the data frame with this list. # # The new data frame has only the columns we selected. money_death = gss[cols] money_death # There are many missing question responses, indicated by `NaN`. To # make our life easier, we drop the respondents who didn't specify an # income bracket, and those who did not give an answer to the death penalty # question. We use Pandas `dropna` method of the data frame, to drop all rows # that have any missing values in the row. money_death = money_death.dropna() money_death # Get the income column. income = money_death['Income'] # Show the unique values: income.value_counts() # These are strings. We want to get income as a number. We estimate this by # *recoding* the "Income" column. We replace the string, giving the income # bracket, with the average of the minimum and maximum in the range. # # We can do this with a *recoder function*. We have not covered functions yet, so do not worry about the details of this function. def recode_income(value): if value == 'under 1000': return 500 low_str, high_str = value.split('-') low, high = int(low_str), int(high_str) return np.mean([low, high]) # Here is what the recoder function gives with the lowest income bracket. recode_income('under 1000') # Here is the return from a higher bracket: recode_income('90000-109999') # Use this function to recode the "Income" strings into numbers. Again, we have not covered the `apply` method yet, so don't worry about the details. income_ish = income.apply(recode_income) income_ish # Now get the results of the answer to the death penalty question. death = money_death['DeathPenalty'] death.value_counts() # We will identify the rows for respondents who are in favor of the death penalty. To do this, we make a Boolean vector: death == 'Favor' # Use this vector to select the income values for the respondents in favor of the death penalty. Show the distribution of values. favor_income = income_ish[death == 'Favor'] favor_income.hist(); # Likewise select incomes for those opposed. Show the distribution. oppose_income = income_ish[death == 'Oppose'] oppose_income.hist(); # Calculate the difference in mean income between the groups. This is the difference we observe. actual_diff = np.mean(favor_income) - np.mean(oppose_income) actual_diff # We want to know whether this difference in income is compatible with random sampling. That is, we want to know whether a difference this large is plausible, if the incomes are in fact random samples from the same population. # # To estimate how variable the mean differences can be, for such random sampling, # we simulate this sampling by pooling the income values that we have, from the # two groups, and the permuting them. # # First, we get the number of respondents in favor of the death penalty. n_favor = len(favor_income) n_favor # Then we pool the in-favor and oppose groups, by using `np.append` to concatenate (stick together) the two arrays, into one long array. pooled = np.append(favor_income, oppose_income) # To do the random sampling we permute the values, so the `pooled` vector is # a random mixture of the two groups. shuffled = np.random.permutation(pooled) # Treat the first `n_favor` observations from this shuffled vector as # our simulated in-favor group. The rest are our simulated oppose # group. fake_favor = shuffled[:n_favor] fake_oppose = shuffled[n_favor:] # Calculate the difference in means for this simulation. fake_diff = np.mean(fake_favor) - np.mean(fake_oppose) fake_diff # Now it is your turn. Do this simulation 10000 times, to build up the distribution of differences compatible with random sampling. # # Use the [Brexit ages](../permutation/brexit_ages) notebook for inspiration. differences = np.zeros(10000) for i in np.arange(10000): # Permute the pooled incomes shuffled = np.random.permutation(pooled) # Make a fake favor sample # Make a fake opposed sample # Calculate the mean difference for the fake samples # Put the mean difference into the differences array. # When you have that working, do a histogram of the differences. # + # Your code here # - # You can get an idea of where the actual difference we saw sits on this histogram, and therefore how likely that difference is, assuming the incomes come from the same underlying population of incomes. # # To be more specific, count how many of the differences you calculated were greater than or equal to the actual difference. # + # Your code here # - # Now calculate the proportion of these differences, to give an estimate of the probability of seeing a difference this large, if the incomes all come from the same underlying population: # + # Your code here
exercises/money_and_death.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Análise dos Dados Gerados pela Mini Estação Meteorológica [UFES 07/07/2021] # ### <NAME> # #### Neste notebook serão analisadas medidas aferidas pela mini estação meteorológica (Arduino) na UFES-Campus de Alegre, no dia 07/07/21. # #### Para organizar e tratar os dados será utilizada a biblioteca [PANDAS](https://pandas.pydata.org/). import pandas as pd nomecolunas=[['Quarta-Feira - UFES Campus de Alegre - 07/07/21','Quarta-Feira - UFES Campus de Alegre - 07/07/21','Quarta-Feira - UFES Campus de Alegre - 07/07/21','Quarta-Feira - UFES Campus de Alegre - 07/07/21','Quarta-Feira - UFES Campus de Alegre - 07/07/21','Quarta-Feira - UFES Campus de Alegre - 07/07/21','Quarta-Feira - UFES Campus de Alegre - 07/07/21','Quarta-Feira - UFES Campus de Alegre - 07/07/21','Quarta-Feira - UFES Campus de Alegre - 07/07/21','Quarta-Feira - UFES Campus de Alegre - 07/07/21','Quarta-Feira - UFES Campus de Alegre - 07/07/21','Quarta-Feira - UFES Campus de Alegre - 07/07/21'],['Manhã','Manhã','Manhã','Manhã','Tarde','Tarde','Tarde','Tarde','Entardecer','Entardecer','Entardecer','Entardecer', ],['Temperatura','Umidade','Iluminância', 'UV-A','Temperatura','Umidade','Iluminância', 'UV-A','Temperatura','Umidade','Iluminância', 'UV-A']] nomelinhas=[['Restaurante Universitário','Restaurante Universitário','Restaurante Universitário','Restaurante Universitário','Restaurante Universitário','Restaurante Universitário','Restaurante Universitário','Restaurante Universitário','Restaurante Universitário','Restaurante Universitário','Restaurante Universitário','Restaurante Universitário','Restaurante Universitário','Restaurante Universitário','Restaurante Universitário','Restaurante Universitário','Restaurante Universitário','Restaurante Universitário','Restaurante Universitário','Prédio Central','Prédio Central','Prédio Central','Prédio Central','Prédio Central','Prédio Central','Prédio Central','Prédio Central','Prédio Central','Prédio Central','Prédio Central','Prédio Central','Prédio Central','Prédio Central','Prédio Central','Prédio Central','Prédio Central','Prédio Central','Prédio Central','Prédio Novo','Prédio Novo','Prédio Novo','Prédio Novo','Prédio Novo','Prédio Novo','Prédio Novo','Prédio Novo','Prédio Novo','Prédio Novo','Prédio Novo','Prédio Novo','Prédio Novo','Prédio Novo','Prédio Novo','Prédio Novo','Prédio Novo','Prédio Novo','Prédio Novo','Prédio Reuni','Prédio Reuni','Prédio Reuni','Prédio Reuni','Prédio Reuni','Prédio Reuni','Prédio Reuni','Prédio Reuni','Prédio Reuni','Prédio Reuni','Prédio Reuni','Prédio Reuni','Prédio Reuni','Prédio Reuni','Prédio Reuni','Prédio Reuni','Prédio Reuni','Prédio Reuni','Prédio Reuni','Portão','Portão','Portão','Portão','Portão','Portão','Portão','Portão','Portão','Portão','Portão','Portão','Portão','Portão','Portão','Portão','Portão','Portão','Portão'],[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95]] # ## Lista de valores medidos em campo dados=[ #Manhã-------------\Tarde------------\Entardecer----- [22,59,978.52,13.13,26,34,984.38,6.88,21,48,631.84,0],#Restaurante Universitário [22,60,976.56,13.13,27,38,976.56,10,21,48,649.41,0], [22,60,935.55,13.75,27,34,984.38,8.75,21,48,640.63,0], [22,59,929.69,13.75,27,32,984.38,8.13,21,48,638.67,0], [22,60,962.89,10,27,32,980.47,13.75,21,48,634.77,0], [22,61,967.77,10,27,31,979.49,12.5,21,48,636.72,0], [22,61,973.63,13.13,27,31,977.54,16.25,21,48,630.86,0], [22,62,968.75,14.38,27,30,979.49,16.25,21,49,635.74,0], [22,62,969.73,13.75,27,30,978.52,16.25,21,48,630.86,0], [22,62,971.68,13.13,27,30,978.52,12.5,21,49,626.95,0], [22,62,968.75,13.13,27,31,978.52,16.25,21,48,622.07,0], [22,62,970.7,13.13,27,31,977.54,15.63,21,49,623.05,0], [22,62,974.61,13.13,27,29,979.49,16.25,21,49,612.3,0], [22,62,975.59,13.13,27,31,978.52,16.25,21,49,629.88,0], [22,62,977.54,13.13,27,32,978.52,14.38,21,49,603.52,0], [22,62,976.56,13.75,27,33,978.52,16.25,21,49,585.94,0], [22,62,977.54,11.25,27,34,978.52,16.25,21,49,576.17,0], [22,62,976.56,13.75,27,35,978.52,16.25,21,48,572.27,0], [22,62,976.56,13.13,27,34,978.52,15.63,21,48,566.41,0], #--------------------------------------- [22,63,972.66,12.5,25,37,981.45,10.63,21,49,494.14,0], #Prédio Central [22,63,968.75,11.25,25,36,981.45,7.5,21,49,489.26,0], [22,63,970.7,13.13,25,35,978.52,12.5,21,49,485.35,0], [22,63,973.63,13.13,25,34,972.66,11.88,21,49,484.38,0], [22,63,973.63,13.13,25,34,972.66,15.63,21,49,480.47,0], [22,62,973.63,12.5,25,34,971.68,11.88,21,49,474.61,0], [22,62,974.61,13.13,25,34,972.66,15.63,21,49,473.63,0], [22,62,973.63,11.88,25,33,972.66,16.25,21,49,471.68,0], [22,62,974.61,13.13,25,33,972.66,13.13,21,49,468.75,0], [22,61,973.63,11.25,25,34,972.66,11.88,21,49,462.89,0], [22,60,973.63,10.63,25,33,972.66,11.88,21,49,460.94,0], [22,60,973.63,13.13,25,32,972.66,13.75,21,49,450.2,0], [22,61,973.63,13.13,25,32,972.66,15.63,21,49,448.24,0], [22,61,973.63,13.75,25,33,972.66,15.63,21,49,452.15,0], [22,61,973.63,13.13,25,32,972.66,15.63,21,49,445.31,0], [22,60,974.61,9.38,25,34,972.66,16.25,21,49,444.34,0], [22,60,974.61,11.88,25,35,972.66,12.5,21,49,435.55,0], [21,60,974.61,10.63,25,35,973.63,14.38,21,49,437.5,0], [21,61,974.61,13.13,25,36,972.66,15.63,21,49,434.57,0],#--------------------------------- [21,60,973.63,11.88,29,38,973.63,16.25,21,49,664.06,0],# Prédio Novo [21,60,976.56,12.5,29,37,971.68,16.25,21,50,661.13,0], [21,61,978.52,10.63,29,36,970.7,16.25,21,50,658.2,0], [21,61,967.77,11.88,29,34,971.68,16.25,21,50,658.2,0], [21,61,959.96,9.38,29,35,972.66,16.87,21,49,656.25,0], [21,61,975.59,12.5,29,37,972.66,13.75,21,49,653.32,0], [21,61,974.61,10.63,29,37,973.63,13.75,21,49,659.18,0], [21,62,974.61,12.5,29,36,974.61,16.25,21,49,655.27,0], [21,62,973.63,12.5,29,38,973.63,16.25,21,49,652.34,0], [21,62,973.63,13.13,29,38,972.66,12.5,21,49,649.41,0], [21,62,973.63,12.5,29,38,972.66,15.63,21,49,641.6,0], [20,63,969.73,12.5,29,40,972.66,15.63,21,49,646.48,0], [21,62,971.68,13.13,29,39,972.66,16.87,21,49,656.25,0], [21,62,974.61,13.13,29,35,971.68,15.63,21,49,645.51,0], [30,59,975.59,12.5,29,33,971.68,15,21,49,639.65,0], [20,63,974.61,11.88,29,32,971.68,11.88,21,49,625,0], [20,60,974.61,12.5,29,30,970.7,16.25,21,49,629.88,0], [20,63,973.63,12.5,29,30,970.7,14.38,21,49,629.88,0], [20,63,974.61,12.5,29,31,972.66,15.63,21,49,641.6,0],#-------------------------- [22,62,975.59,11.88,27,30,983.4,11.88,21,48,940.43,0], # <NAME> [22,62,975.59,11.88,27,29,985.35,11.25,21,48,937.5,0], [22,62,975.59,11.88,27,29,982.42,12.5,21,48,938.48,0], [21,62,974.61,11.88,27,30,978.52,16.87,21,48,937.5,0], [21,62,975.59,8.13,27,29,978.52,16.87,21,48,937.5,0], [21,62,975.59,10.63,27,29,978.52,16.25,21,48,937.5,0], [21,62,975.59,11.88,27,28,978.52,13.13,21,48,937.5,0], [21,62,974.61,11.25,27,30,978.52,16.25,21,48,937.5,0], [21,62,975.5,8.75,27,32,979.49,13.75,21,48,937.5,0], [21,62,974.61,8.75,27,33,978.52,15.63,21,48,937.5,0], [21,62,975.59,8.13,27,35,979.49,11.25,21,48,939.45,0], [21,62,974.61,10.63,27,36,979.49,15.63,21,48,938.48,0], [21,62,975.59,11.88,27,39,979.49,15,21,48,937.5,0], [21,62,974.61,11.25,27,41,979.49,11.25,21,48,938.48,0], [21,62,975.59,11.88,27,40,979.49,14.38,21,48,937.5,0], [21,62,974.61,11.88,27,39,979.49,15,21,48,937.5,0], [21,62,974.61,11.25,27,40,979.49,10.63,21,48,937.5,0], [21,62,975.59,11.25,27,39,978.52,15,21,48,937.5,0], [21,62,974.61,10.63,27,38,979.49,10.63,21,48,937.5,0], #-------------------------------------- [20,68,970.7,11.88,29,39,971.68,16.87,21,48,935.55,0], #Portão [20,68,970.7,11.88,29,37,970.7,16.87,21,48,935.55,0], [20,68,970.7,11.25,29,36,971.68,16.87,21,48,934.57,0], [20,68,970.7,11.88,29,36,972.66,16.25,21,48,934.57,0], [20,68,970.7,11.88,29,38,972.66,16.25,21,48,933.59,0], [20,68,970.7,11.88,29,37,972.66,16.87,21,48,935.55,0], [20,67,970.7,11.88,29,37,972.66,16.25,21,48,933.59,0], [20,67,970.7,11.88,29,35,972.66,16.87,21,48,933.59,0], [20,67,970.7,9.38,29,34,970.7,16.25,21,48,934.57,0], [20,67,970.7,11.25,29,32,971.68,16.25,21,48,933.59,0], [20,67,970.7,11.88,29,32,971.68,16.25,21,48,933.59,0], [20,67,970.7,11.88,29,30,970.7,16.25,21,48,932.62,0], [20,67,970.7,11.88,29,30,970.7,16.25,21,48,933.59,0], [20,66,970.7,10,29,27,971.68,16.25,21,48,933.59,0], [20,66,970.7,11.88,29,27,972.66,13.13,21,48,933.59,0], [20,66,970.7,11.25,29,27,972.66,16.25,21,48,933.59,0], [20,66,970.7,11.25,28,28,972.66,16.87,21,48,933.59,0], [20,66,970.7,10,28,28,970.7,16.25,21,48,933.59,0], [20,66,970.7,11.88,28,28,971.68,16.87,21,48,931.64,0]#-------------------------------------------- ] # ## Criando um DataFrame com a lista de dados acima # Na célula abaixo será criado um DataFrame utilizando .set_option para mudar a limitação do número de linhas para "None"=nenhum para visualizarmos a tabela como um todo. dados_ufes_quarta=pd.DataFrame(data=dados, index=nomelinhas,columns=nomecolunas) pd.set_option('max_rows',None) dados_ufes_quarta.index.names=['Local','Medida'] dados_ufes_quarta # ### <p style='text-align: justify;'> Agruparemos os dados por local (ex: portão, prédio novo...) e período (ex: manhã, tarde...) de medida. Faremos isso para obter as estatísticas referentes a cada local medido em um determinado período. Usaremos também o .round(2) para arredondar para 2 casas decimais.</p> dados_agrupados=dados_ufes_quarta.groupby(axis=0, level=0) dados_agrupados.describe(percentiles=[]).round(2).transpose() # #### Mais informações sobre a [estatística descritiva](https://pandas.pydata.org/docs/user_guide/basics.html#descriptive-statistics) oferecida pelo PANDAS.
Analise_Dados_Ufes_070721.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import datetime df = pd.read_csv("data/GOOG_2019.csv") df df1 = pd.read_csv("data/GOOG.csv") df1 df2 = pd.read_csv("data/GOOG_2018.csv") df2 big_df = df1.append(df2, ignore_index = True).append(df, ignore_index = True) big_df def add_tech_ind(df): df = df.copy() def ma(arr, n): if n == 1: return arr else: result = [0 for i in range(n - 1)] result1 = arr[:-n + 1].copy() length = len(arr) for i in range(1, n): result1 += arr[i:length + 1 - n + i] result1 /= n result += list(result1) return np.array(result) def ema(arr, n): if n == 1: return arr else: result = [] for i in range(len(arr)): temp = arr[i] if i == 0: result.append(temp) else: temp = 2 / (1 + n) * temp temp += (1 - 2 / (1 + n)) * result[-1] result.append(temp) return np.array(result) def add_ma_ret(df): ''' add ma of return calculated by closing price ''' cps = df.loc[:,'Close'].values cps = np.array([float(val) for val in cps]) ret = cps[1:] / cps[:-1] - 1.0 ret1 = [0.0] + list(ret) df['ret1'] = ret1 ret1 = np.array(ret1) ret2 = [0.0] + list((ret1[1:] + ret1[:-1]) / 2) df['ret2'] = ma(ret1, 2) df['ret4'] = ma(ret1, 4) df['ret8'] = ma(ret1, 8) df['ret16'] = ma(ret1, 16) def add_ma_vol(df): ''' add ma of volume ''' vol = df.loc[:, 'Volume'].values vol = np.array([float(val) for val in vol], dtype = 'float') df['vol2'] = ma(vol, 2) df['vol4'] = ma(vol, 4) df['vol8'] = ma(vol, 8) df['vol16'] = ma(vol, 16) def add_weekday_dummyvar(df): ''' add what week day it is ''' dates = df['Date'].values dates = [datetime.date(int(val[:4]), int(val[5:7]), int(val[8:])) for val in dates] weekday = [(val.weekday() == 0 or val.weekday() == 4) for val in dates] result = np.zeros(len(df)) result[weekday] = 1 df['weekday'] = result def add_subline(df): ''' add k-line subline ''' length = len(df) result = np.ones(length) cps = df.loc[:,'Close'].values cps = np.array([float(val) for val in cps]) ops = df.loc[:,'Open'].values ops = np.array([float(val) for val in ops]) lows = df.loc[:,'Low'].values lows = np.array([float(val) for val in lows]) result = np.min(np.vstack((ops, cps)), axis = 0) df['sub'] = (result - lows) / cps df['sub2'] = ma((result - lows) / cps, 2) df['sub4'] = ma((result - lows) / cps, 4) df['sub8'] = ma((result - lows) / cps, 8) df['sub16'] = ma((result - lows) / cps, 16) def add_macd(df): ''' add MACD ''' cps = df.loc[:,'Close'].values cps = np.array([float(val) for val in cps]) df['MACD'] = ema(cps, 12) - ema(cps, 26) def add_bbbb_day(df): ''' add bbbb in day ''' cps = df.loc[:,'Close'].values cps = np.array([float(val) for val in cps]) ops = df.loc[:,'Open'].values ops = np.array([float(val) for val in ops]) lows = df.loc[:,'Low'].values lows = np.array([float(val) for val in lows]) highs = df.loc[:,'High'].values highs = np.array([float(val) for val in highs]) df['bbbb_day'] = (highs + lows) - 2 * cps def add_cccc_day(df): ''' add cccc in day ''' cps = df.loc[:,'Close'].values cps = np.array([float(val) for val in cps]) ops = df.loc[:,'Open'].values ops = np.array([float(val) for val in ops]) lows = df.loc[:,'Low'].values lows = np.array([float(val) for val in lows]) highs = df.loc[:,'High'].values highs = np.array([float(val) for val in highs]) df['cccc_day'] = (highs - lows) / 2 / cps def add_kdj(df): ''' add kdj ''' cps = df.loc[:,'Close'].values cps = np.array([float(val) for val in cps]) ops = df.loc[:,'Open'].values ops = np.array([float(val) for val in ops]) lows = df.loc[:,'Low'].values lows = np.array([float(val) for val in lows]) highs = df.loc[:,'High'].values highs = np.array([float(val) for val in highs]) K = np.zeros(len(df)) for i in range(len(df)): start = max(i - 4, 0) end = i + 1 K[i] = (cps[i] - np.min(lows[start:end])) / (np.max(highs[start:end]) - np.min(lows[start:end])) D = ema(K, 3) df['K'] = K df['D'] = D add_ma_ret(df) add_ma_vol(df) add_weekday_dummyvar(df) add_subline(df) add_macd(df) add_bbbb_day(df) add_cccc_day(df) add_kdj(df) return df to_save = add_tech_ind(big_df) to_save[:1761].to_csv('data/GOOG_mod.csv', index = False) to_save[1761: 1761 + 253].to_csv('data/GOOG_2018mod.csv', index = False) to_save[1761 + 253: 1761 + 253 + 251].to_csv('data/GOOG_2019mod.csv', index = False) to_save.loc[:, 'Open':].values to_save.loc[0] big_df['Date'].values np.ones(3)[:-0] 0.0005646 * 4 1 / 240
model&data/data_prep.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd from sklearn.cluster import MeanShift import csv import matplotlib def_file = open('checkins.dat','r') print(def_file) with open('checkins.dat') as dat_file, open('file.csv', 'w') as csv_file: csv_writer = csv.writer(csv_file) for line in dat_file: row = [field.strip() for field in line.split('|')] if len(row) == 6 and row[3] and row[4]: csv_writer.writerow(row) df = pd.read_csv('file.csv') print(df.head()) df1 = pd.read_csv('file.csv', skiprows = lambda x: x in range(100000,396634)) df1.head() latit = df1['latitude'].values longit = df1['longitude'].values print(latit) print(longit) matplotlib.pyplot.figure(figsize = (8,8)) matplotlib.pyplot.plot(latit,longit, 'o') dat = df1[['latitude','longitude']] dat clustering = MeanShift(bandwidth = 0.1, n_jobs = -1 ).fit(dat) ans = clustering.predict(dat) ans from collections import Counter Counter(ans) numb_clust = {} for elements in ans: if elements in numb_clust: numb_clust[elements] += 1 else: numb_clust[elements] = 0 true_clust = [] for elements in numb_clust: if numb_clust[elements] > 15: true_clust.append(elements) true_clust true_centers = clustering.cluster_centers_[true_clust] print(true_centers) office_adresses = [ [33.751277, -118.188740], [25.867736, -80.324116], [51.503016, -0.075479], [52.378894, 4.885084], [39.366487, 117.036146], [-33.868457, 151.205134], ] def dist(x,y): temporary_array = [((x-coord[0]) ** 2 + (y-coord[1]) ** 2 )**(1/2) for coord in office_adresses] return(min(temporary_array)) result = [] for elements in true_centers: a = dist(elements[0],elements[1]) result.append([a, elements[0], elements[1]]) print(sorted(result)[0])
unsupervised-learning/week1/banners_task.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Simple Recommender with Manipulated Values -- Orientation Only # # - Recommender Systems # - Cosine similarity -- X + Y # - Modeling # - OHE, cosine similarity, sort similarities within in function # - **Simple model - Orientation only** # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from scipy import sparse import sys from sklearn.metrics.pairwise import pairwise_distances, cosine_distances, cosine_similarity from category_encoders import OneHotEncoder # - # read in full cupid data cupid = pd.read_pickle('data/clean_cupid.pkl') cupid.drop(columns = ['status', 'location'], inplace = True) cupid.head(3) cupid_df = pd.read_pickle('data/grouped_cupid.pkl') cupid_df.head(3) # --- # #### Subset "Orientation" Datasets & OneHotEncode # + # straight male straight_male = cupid_df[(cupid_df['sex'] == 'm') & (cupid_df['orientation'] == 'straight')].head(3000) # ohe straight_male_encoded = OneHotEncoder(use_cat_names = True).fit_transform(straight_male) # --------------- # straight female straight_female = cupid_df[(cupid_df['sex'] == 'f') & (cupid_df['orientation'] == 'straight')].head(3000) # ohe straight_female_encoded = OneHotEncoder(use_cat_names = True).fit_transform(straight_female) # --------------- # gay male gay_male = cupid_df[(cupid_df['sex'] == 'm') & (cupid_df['orientation'] == 'gay')] # ohe gay_male_encoded = OneHotEncoder(use_cat_names = True).fit_transform(gay_male) # --------------- # gay female gay_female = cupid_df[(cupid_df['sex'] == 'f') & (cupid_df['orientation'] == 'gay')] # ohe gay_female_encoded = OneHotEncoder(use_cat_names = True).fit_transform(gay_female) # --------------- # bi m/f bi = cupid_df[cupid_df['orientation'] == 'bisexual'] # ohe bi_encoded = OneHotEncoder(use_cat_names = True).fit_transform(bi) # - # ---- # #### Recommender System # + jupyter={"outputs_hidden": true, "source_hidden": true} # THIS IS THE WAY JACOB MENTIONED # .loc[index] # straight female loking for straight male similarity = cosine_similarity(pd.DataFrame(straight_female_encoded.loc[6]).T, straight_male_encoded).tolist()[0] pd.DataFrame(similarity, columns = ['similarity'], index = straight_male_encoded.index).sort_values(by = 'similarity', ascending = False).iloc[:3] # + # test 4- OHE seperately def lover_recommender_test4(sex, orientation, index): """ index (int): user_id we're looking to partner with matching orientation sex (str): m, f orientation: straight, gay, bi/bisexual """ # straight female looking for straight male if orientation == 'straight' and sex == 'f': similarity = cosine_similarity(pd.DataFrame(straight_female_encoded.loc[index]).T, straight_male_encoded).tolist()[0] return pd.DataFrame(similarity, columns = ['similarity'], index = straight_male_encoded.index).sort_values(by = 'similarity', ascending = False).iloc[:3] # straight male looking for straight female elif orientation == 'straight' and sex == 'm': # cosine_similarity similarity = cosine_similarity(pd.DataFrame(straight_male_encoded.loc[index]).T, straight_female_encoded).tolist()[0] return pd.DataFrame(similarity, columns = ['similarity'], index = straight_female_encoded.index).sort_values(by = 'similarity', ascending = False).iloc[:3] # gay male looking for gay male elif orientation == 'gay' and sex == 'm': # create sparse matrix gay_male_sparse = sparse.csr_matrix(gay_male_encoded) # cosine_similarity similarities_gay_male = cosine_similarity(gay_male_sparse) return gay_male_df[index].sort_values(ascending = False).iloc[1:4] # gay female looking for gay female elif orientation == 'gay' and sex == 'f': # create sparse matrix gay_female_sparse = sparse.csr_matrix(gay_female_encoded) # cosine_similarity similarities_gay_female = cosine_similarity(gay_female_sparse) return gay_female_df[index].sort_values(ascending = False).iloc[1:4] # bisexual male/female looking for bisexual male/female elif ('bi' in orientation and sex == 'f') or ('bi' in orientation and sex == 'm'): # create sparse matrix bi_sparse = sparse.csr_matrix(bi_encoded) # cosine_similarity similarities_bi = cosine_similarity(bi_sparse) return bi_df[index].sort_values(ascending = False).iloc[1:4] # + lover_recommender_test4('m', 'straight', 2) # lover_recommender_test4('f', 'straight', 6) # lover_recommender_test4('m', 'gay', 55) #lover_recommender_test4('f', 'bi', 37) # - cupid_df.loc[2] cupid_df.loc[3179] cupid.loc[37] cupid.loc[11527] # --- # #### If there were random inputs (like the app) # + # function to ohe, create sparse matrices, and return the cosine similarity based on orientation def invalue_to_similarity(invalue_df, orientation_df): """ invalue_df: converted DataFrame of user inputs orientation_df: DataFrame of all people of that orientation """ # concat input values to orientation df to prep for cosine similarity df = pd.concat([orientation_df, invalue_df]) # ohe df_encoded = OneHotEncoder(use_cat_names = True).fit_transform(df) # make cosine_similarity input (input X) cosine_input = pd.DataFrame(df_encoded.iloc[-1]).T # drop last encoded row (input Y) df_encoded.drop(df_encoded.tail(1).index, inplace = True) # cosine_similarity similarity = cosine_similarity(cosine_input, df_encoded) # return top 5 matches top5 = pd.DataFrame(similarity.tolist()[0], columns = ['similarity'], index = df_encoded.index).sort_values(by = 'similarity', ascending = False).iloc[:5] # return top 5 matches in a df with cosine similarities results = pd.DataFrame(columns = cupid.columns) for i in top5.index: results = results.append(pd.DataFrame(cupid.loc[i]).T) matches = pd.merge(top5, results, on = top5.index) matches.rename(columns = {'key_0' : 'user_id'}, inplace = True) matches.set_index('user_id', drop = True, inplace = True) return matches # + # test 5 -- using a new user input # referenced https://stackoverflow.com/questions/44296648/using-lists-in-pandas-to-replace-column-names def lover_recommender_test5(invalue): """ invalue (list): survey/streamlit app responses """ # convert input to DataFrame invalue_df = pd.DataFrame(invalue).T.rename(columns = {i:j for i,j in zip(np.arange(11), cupid_df.columns)}) # ---------------- # straight female looking for straight female if invalue_df['orientation'].unique()[0] == 'straight' and invalue_df['sex'].unique()[0] == 'f': # straight male straight_male = cupid_df[(cupid_df['sex'] == 'm') & (cupid_df['orientation'] == 'straight')].head(3000) # call 'invalue_to_similarity' function to return similarities return invalue_to_similarity(invalue_df, straight_male) # straight male looking for straight male elif invalue_df['orientation'].unique()[0] == 'straight' and invalue_df['sex'].unique()[0] == 'm': # straight female straight_female = cupid_df[(cupid_df['sex'] == 'f') & (cupid_df['orientation'] == 'straight')].head(3000) # call 'invalue_to_similarity' function to return similarities return invalue_to_similarity(invalue_df, straight_female) # gay male looking for gay male elif invalue_df['orientation'].unique()[0] == 'gay' and invalue_df['sex'].unique()[0] == 'm': # gay male gay_male = cupid_df[(cupid_df['sex'] == 'm') & (cupid_df['orientation'] == 'gay')] # call 'invalue_to_similarity' function to return similarities return invalue_to_similarity(invalue_df, gay_male) # gay female looking for gay female elif invalue_df['orientation'].unique()[0] == 'gay' and invalue_df['sex'].unique()[0] == 'f': # gay female gay_female = cupid_df[(cupid_df['sex'] == 'f') & (cupid_df['orientation'] == 'gay')] # call 'invalue_to_similarity' function to return similarities return invalue_to_similarity(invalue_df, gay_female) # bisexual male/female looking for bisexual male/female elif (invalue_df['orientation'].unique()[0] == 'bisexual' and invalue_df['sex'].unique()[0] == 'f') or \ (invalue_df['orientation'].unique()[0] == 'bisexual' and invalue_df['sex'].unique()[0] == 'm'): # bi individual bi = cupid_df[cupid_df['orientation'] == 'bisexual'] # call 'invalue_to_similarity' function to return similarities return invalue_to_similarity(invalue_df, bi) # - # #### Ask questions to mimic random/app inputs # + # input / up + down arrow age = int(input('How old are you?')) # dropdowns sex = str(input('What gender do you identify as?')) orientation = str(input('What sexual orientation do you identify as?')) body_type = str(input("What's your body type?")) diet = str(input('What does your diet consist of?')) drinks = str(input('Do you consume alcoholic beverages?')) drugs = str(input('Do you use drugs?')) offspring = str(input('Do you have children and/or plan on having [more] children?')) pets = str(input("What's your sentiment on dogs and/or cats")) religion = str(input("Does religion matter to you?")) smokes = str(input("Do you smoke?")) invalue = np.array([age, sex, orientation, body_type, diet, drinks, drugs, offspring, pets, religion, smokes]) # + # gay female # invalue = [19, 'f', 'gay', 'rather not say', 'vegan', 'no', 'yes', "doesn't have kids", 'likes dogs and cats', 'atheism', 'yes'] lover_recommender_test5(invalue) # - # #### Just trying additional random inputs # + # straight female invalue2 = [30, 'f', 'straight', 'rather not say', 'anything', 'yes', 'no', "doesn't have kids", 'likes dogs', 'atheism', 'no'] lover_recommender_test5(invalue2) # + # straight male invalue3 = [30, 'm', 'straight', 'thin', 'vegetarian', 'no', 'sometimes', "doesn't have kids", 'likes cats', 'catholicism', 'sometimes'] lover_recommender_test5(invalue3) # + # gay male invalue3 = [22, 'm', 'gay', 'full figured', 'vegetarian', 'yes', 'yes', "has kids, and wants more", 'likes cats', 'agnosticism', 'yes'] lover_recommender_test5(invalue3) # + # bi male invalue4 = [42, 'm', 'bisexual', 'average', 'vegan', 'no', 'yes', "has kids", 'dislikes dogs and cats', 'christianity', 'no'] lover_recommender_test5(invalue4) # + # bi female invalue5 = [27, 'f', 'bisexual', 'fit', 'anything', 'yes', 'yes', "wants kids", 'likes dogs', "atheism", 'yes'] lover_recommender_test5(invalue5) # -
workspace/06_simple-recommender-grouped-data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/INFINITY-RUBER/Machine_Learning_A-Z_Hands-On-Python-R-In-Data-Science/blob/master/Part%204%20-%20Clustering/Section%2024%20-%20K-Means%20Clustering/Python/k_means_clustering.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="i_paGd_yLbgH" colab_type="text" # # K-Means Clustering # + [markdown] id="nAuqPwTnLipr" colab_type="text" # ## Importing the libraries # + id="yUXGcC4KLmcL" colab_type="code" colab={} import numpy as np import matplotlib.pyplot as plt import pandas as pd # + [markdown] id="5LciKOr8Lo5O" colab_type="text" # ## Importing the dataset # + id="9RlmPzZGLtGi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 217} outputId="d714637a-0163-4f63-8c0c-d5833a69ad36" dataset = pd.read_csv('Mall_Customers.csv') X = dataset.iloc[:, [3, 4]].values # tomamos las columnas Annual Income (k$)-Spending Score print(X[:11]) # + [markdown] id="nWC2EWp2Lx5G" colab_type="text" # # Usando el método del codo para encontrar el número óptimo de grupos # # ### Using the elbow method to find the optimal number of clusters # + id="zWs6ciOoL1b3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="78ef1d9b-9598-4b6f-839b-0173530e63fe" from sklearn.cluster import KMeans wcss = [] for i in range(1, 11):# ciclo de 10 interaciones kmeans = KMeans(n_clusters = i, init = 'k-means++', random_state = 42) # init = 'k-means++' >> inicializacion con un metodo; random_state = 42>> semilla kmeans.fit(X)# entrenamos el algorimo con kmeans wcss.append(kmeans.inertia_) # apendizamos a la lista el atributo de inersia plt.style.use('dark_background') plt.plot(range(1, 11), wcss, color='blue', marker='*') # crea la grafica de 10 saltos con los valore de lista wcss plt.title('The Elbow Method') plt.xlabel('Number of clusters') plt.ylabel('WCSS') plt.show() # mostrar # + [markdown] id="me1A5tcAuRuv" colab_type="text" # ### **Se puede ver que el codo converje en el numero 5 y ese es el numero de cluster** # + id="jUSWCmRKPVNe" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 217} outputId="89f9a629-98e4-4002-c9c1-27c8e954e98b" X[:11] # + [markdown] id="VgT0mANLL4Nz" colab_type="text" # ## Training the K-Means model on the dataset # # Entrenamiento del modelo K-Means en el conjunto de datos # + id="cjEfU6ZSMAPl" colab_type="code" colab={} kmeans = KMeans(n_clusters = 5, init = 'k-means++', random_state = 42) y_kmeans = kmeans.fit_predict(X) # + id="B0sY49h3u05-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 126} outputId="311e6235-f104-4c6f-e40a-b9d01192cee4" print(y_kmeans) # + id="0dtZQ-_zO-t4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="8e7acf9c-616b-4828-d9df-cee74b8033ec" X[y_kmeans == 0, 0] # + id="qxunrhUjPDlr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="fde6df51-56d3-4d98-e9eb-20721a39770b" X[y_kmeans == 0, 1] # + [markdown] id="e7YrS1JAMFnm" colab_type="text" # ## Visualising the clusters # + id="d0ZYecccMHNx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="9228f8a4-fcb5-44ae-b463-e362390e2584" # graficos de puntos plt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], s = 80, c = 'red', label = 'Cluster 1') # s = 100 >> tamaño plt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], s = 80, c = 'blue', label = 'Cluster 2') plt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], s = 80, c = 'green', label = 'Cluster 3') plt.scatter(X[y_kmeans == 3, 0], X[y_kmeans == 3, 1], s = 80, c = 'cyan', label = 'Cluster 4') plt.scatter(X[y_kmeans == 4, 0], X[y_kmeans == 4, 1], s = 80, c = 'magenta', label = 'Cluster 5') plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s = 300, c = 'yellow', label = 'Centroids') plt.title('Clusters of customers') plt.xlabel('Annual Income (k$)') plt.ylabel('Spending Score (1-100)') plt.legend() plt.show()
Part 4 - Clustering/Section 24 - K-Means Clustering/Python/k_means_clustering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Finite Differences vs Noise # Copyright (C) 2020 <NAME> # # <details> # <summary>MIT License</summary> # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # </details> # + jupyter={"outputs_hidden": false} import numpy as np import numpy.linalg as la import matplotlib.pyplot as pt # + jupyter={"outputs_hidden": false} def f(x): return np.sin(2*x) def df(x): return 2*np.cos(2*x) # - # Here's a pretty simple function and its derivative: # + jupyter={"outputs_hidden": false} plot_x = np.linspace(-1, 1, 200) pt.plot(plot_x, f(plot_x), label="f") pt.plot(plot_x, df(plot_x), label="df/dx") pt.grid() pt.legend() # - # Now what happens to our numerical differentiation if # **our function values have a slight amount of error**? # + jupyter={"outputs_hidden": false} # set up grid n = 10 x = np.linspace(-1, 1, n) h = x[1] - x[0] x_df_result = x[1:-1] # chop off first, last point # evaluate f, perturb data, finite differences of f f_x = f(x) f_x += 0.025*np.random.randn(n) df_num_x = (f_x[2:] - f_x[:-2])/(2*h) # plot pt.plot(x, f_x, "o-", label="f") pt.plot(plot_x, df(plot_x), label="df/dx") pt.plot(x_df_result, df_num_x, label="df/dx num") pt.grid() pt.legend(loc="best") # - # * Now what happens if you set `n = 100` instead of `n = 10`?
cleared-demos/quadrature_and_diff/Finite Differences vs Noise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import plotly.graph_objects as go from plotly.graph_objs import * import plotly.express as px import seaborn as sns import os from glob import glob import string import numpy as np import pandas as pd from sklearn.neighbors import NearestNeighbors from numpy import linalg as LA from sklearn.metrics import pairwise_distances_argmin_min import hdbscan from scipy.cluster.hierarchy import fcluster from sklearn import preprocessing from sklearn.cluster import KMeans import umap import warnings warnings.filterwarnings("ignore") # - # Plot WSI with colored labels def scattered_wsi(df,x,y,hue,size,opacity,auto_open,filename): fig = px.scatter(df, x=x, y=y,color=hue, width=800, height=800, color_discrete_sequence=px.colors.qualitative.Set2) fig.update_traces(marker=dict(size=size,opacity=opacity)) fig.update_layout(template='simple_white') fig.update_layout(legend= {'itemsizing': 'constant'}) fig.write_html(filename+'.spatial_projection.html', auto_open=auto_open) return df = pd.read_pickle('../data/id_52.measurements.smoothed.r10000.pkl') scattered_wsi(df,'cx','cy','area',1,1,True,'r10000') # Plot the morphology profiles by clusterID3 features2cluster = ['area', #'perimeter', #'solidity', 'eccentricity', 'circularity', 'mean_intensity', 'cov_intensity' ] for filename in glob('../data_intensity/pkl/id_*.measurements.covd.pkl.intensityANDmorphology.csv.gz')[:1]: print(filename) df = pd.read_csv(filename) df['clusterID3'] = df['clusterID3'].add(1) fig = plt.figure(figsize=(15,8)) fig.subplots_adjust(hspace=0.4, wspace=0.4) #plot covd clusters dfmelted = pd.DataFrame() for c in set(df["clusterID1"]): dfc = df[df["clusterID1"] == c][features2cluster] data = pd.melt(dfc) data['ID'] = 'id_'+str(c) dfmelted = dfmelted.append(data) ax = fig.add_subplot(1, 2, 1) ax.set(ylim=(0, 1)) sns.lineplot(x="variable", y="value",hue='ID', #style='ID', err_style = 'band', data=dfmelted, ci='sd', #legend='full', markers=False, legend=False).set_title('Feature mean value profiles by CovD-only cluster ID') #plot profile clusters dfmelted = pd.DataFrame() for c in set(df["clusterID3"]): dfc = df[df["clusterID3"] == c][features2cluster] data = pd.melt(dfc) data['ID'] = 'id_'+str(c) dfmelted = dfmelted.append(data) ax = fig.add_subplot(1, 2, 2) ax.set(ylim=(0, 1)) ax.text(0.5, 0.7, str(df['clusterID3'].value_counts().to_frame()),fontsize=12, ha='center') sns.lineplot(x="variable", y="value",hue='ID', #style='ID', err_style = 'band', data=dfmelted, ci='sd', markers=False, dashes=False).set_title('Feature mean value profiles by final cluster ID') ax.legend(loc='upper right') fig.savefig(filename+'.profiles.png') for filename in glob('../data_intensity/pkl/id_52.measurements.covd.pkl.intensityANDmorphology.csv.gz')[:1]: print(filename) df = pd.read_csv(filename) df['clusterID3'] = df['clusterID3'].add(1) # set coloring feature df['ID'] = df.apply(lambda row: 'id_'+str(int(row.clusterID3)), axis=1) clusters = list(set(df['clusterID3'])) for c in clusters: fdf = df['clusterID3'] == c scattered_wsi(df[fdf],'cx','cy','area',2,1,True,filename+str(c)) df.columns
SG/pipeline/ipynb/test.visualizeWSI.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Spark 1: Extract, transform, and load a CSV file with Spark (from bucket to database) # # In this lesson we use PySpark to load some sales rows from CSV files located in a storage bucket and save them to `fact_sales` database table in PostgreSQL. # # ## Step 1: Add a file to the storage bucket # # - Execute `taito open bucket` on command-line to open the locally running bucket on web browser. # - TIP: You can alternatively use `taito open bucket:ENV` to connect to a non-local bucket (ENV is `dev`, `test`, `stag`, or `prod`). # - Login in with access key `minio` and secret key `secret1234`. # - Create a folder named `sales` and upload the Sales.csv file to the folder. # # ## Step 2: Execute the code # + # Imports from pyspark.sql.functions import * from pyspark.sql.types import * # Load generic helper functions # %run ../../common/jupyter.ipynb import src_common_database as db # %run ../../common/spark.ipynb import src_common_util as util # Use storage bucket defined with environment variables bucket = os.environ['STORAGE_BUCKET_URL'] protocol = st.init_spark(sc) # + # Read CSV files from the sales folder df = spark.read.csv(protocol + bucket + "/sales", # Read from /sales folder pathGlobFilter="*.csv", # Read only *.csv files recursiveFileLookup=True, # Read recursively also from subfolders modifiedAfter="2021-04-01T00:00:00", # Fetch only files modified after this timestamp header=True, # Each CSV file includes a header row with column names ignoreLeadingWhiteSpace=True, # Trim column values ignoreTrailingWhiteSpace=True, # Trim column values mode="FAILFAST") # Do not allow invalid CSV # DEBUG: Show the contents df.show() # + # Change dataframe schema to match the database table and # generate unique key by concatenating order number and product SKU db_df = df.select( concat(col("Order"), lit("."), col("Product")).alias("key"), col("Date").alias("date_key"), col("Product").alias("product_key"), col("Order").alias("order_number"), col("Quantity").alias("quantity").cast(IntegerType()), col("Price").alias("price").cast(FloatType()) ); # DEBUG: Show the renamed schema db_df.printSchema() # + # Insert data to the fact_sales database table # NOTE: If you get "ERROR: duplicate key value violates unique constraint", execute `taito init --clean` to clean your database from old data. db_df.write.mode("append").jdbc(db.get_jdbc_url(), "fact_sales", properties=db.get_jdbc_options()) # DEBUG: Show the data stored in database spark.read.jdbc(db.get_jdbc_url(), "fact_sales", properties=db.get_jdbc_options()).show() # - # ## Step 3: Connect to the database with Taito CLI # # - Execute `taito db connect` on command-line to connect to the local database. # - TIP: You can alternatively use `taito db connect:ENV` to connect to a non-local database (ENV is `dev`, `test`, `stag`, or `prod`). # - Show all sales rows with `select * from fact_sales`. # ## Step 4: Change the implementation to update existing data and insert new data # # Unfortunately Spark does not currently support upsert (see [SPARK-19335](https://issues.apache.org/jira/browse/SPARK-19335)). There are multiple ways to go around this, for example: # # - Write data to a separate loading view that has a trigger that executes upsert for the target table on insert. # - Write data to a separate loading table that has a trigger that executes upsert for the target table on insert. # - Write data to a temporary table and then merge the data to the target table with a custom sql clause. # - Just overwrite all data in the target table, preferably with truncate mode to keep the table schema intact. # # This is how you can implement the first option (loading view). Normally we would add a new database migration for this with `taito db add NAME`, but since our database tables are not yet in production, we can just modify the existing migrations and redeploy them. # # TODO: UUSI MIGRAATIO NÄISTÄ! # # 1. Copy-paste the following content to the existing files: `database/deploy/fact_sales.sql`, `database/revert/fact_sales.sql`, and `database/verify/fact_sales.sql`. # # ```sql # -- Deploy fact_sales to pg # # BEGIN; # # CREATE TABLE fact_sales ( # key text PRIMARY KEY, # date_key text NOT NULL REFERENCES dim_dates (key), # product_key text NOT NULL REFERENCES dim_products (key), # order_number text NOT NULL, # quantity integer NOT NULL, # price numeric(12,2) NOT NULL # ); # # CREATE VIEW load_sales AS SELECT * FROM fact_sales; # # CREATE OR REPLACE FUNCTION load_sales() RETURNS TRIGGER AS $$ # BEGIN # INSERT INTO fact_sales VALUES (NEW.*) # ON CONFLICT (key) DO # UPDATE SET # date_key = EXCLUDED.date_key, # product_key = EXCLUDED.product_key, # order_number = EXCLUDED.order_number, # quantity = EXCLUDED.quantity, # price = EXCLUDED.price; # RETURN new; # END; # $$ LANGUAGE plpgsql; # # CREATE TRIGGER load_sales # INSTEAD OF INSERT ON load_sales # FOR EACH ROW EXECUTE PROCEDURE load_sales(); # # COMMIT; # ``` # # ```sql # -- Revert fact_sales from pg # # BEGIN; # # DROP TRIGGER load_sales ON load_sales; # DROP FUNCTION load_sales; # DROP VIEW load_sales; # DROP TABLE fact_sales; # # COMMIT; # ``` # # ```sql # -- Verify fact_sales on pg # # BEGIN; # # SELECT key FROM load_sales LIMIT 1; # SELECT key FROM fact_sales LIMIT 1; # # ROLLBACK; # ``` # # 2. Redeploy database migrations and example data to local database with `taito init --clean`. # 3. Execute the following code to load CSV data to database yet again: # + tags=[] # Write the data to the "load_sales" view instead of "fact_sales" table db_df.write.mode("append").jdbc(db.get_jdbc_url(), "load_sales", properties=db.get_jdbc_options()) # DEBUG: Show the data stored in fact_sales. You manual data changes should have been overwritten. spark.read.jdbc(db.get_jdbc_url(), "fact_sales", properties=db.get_jdbc_options()).show() # - # 4. Connect to the database with `taito db connect` and modify some quantity and price values manually in the `fact_sales` table. Also delete one of the rows. # 5. Execute the following code to make sure your manual changes will be overwritten on data load. Note that the CSV data contains only 4 rows (orders 00000000003, 00000000004, and 00000000005). Other rows wont be overwritten. # + # Write the data to the "load_sales" view instead of "fact_sales" table db_df.write.mode("append").jdbc(db.get_jdbc_url(), "load_sales", properties=db.get_jdbc_options()) # DEBUG: Show the data stored in fact_sales. You manual data changes should have been overwritten. spark.read.jdbc(db.get_jdbc_url(), "fact_sales", properties=db.get_jdbc_options()).show() # - # ## Next lesson: [Spark 2 - Listen storage bucket for uploads with Spark](02.ipynb)
lab/lessons/Spark/01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="bXIPt1p6CoZL" # 1) Uma amostra de 61 elementos apresentou os seguintes dados: media = 23.5 e desvio padrao = 3. Ache o intervalo de 99% de confianca para a media (t=2.6) # + id="hBRnBtwICoZL" executionInfo={"status": "ok", "timestamp": 1601141835117, "user_tz": 180, "elapsed": 1542, "user": {"displayName": "Andr\u0<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiasvxfAR-CIxluihbOgHWSf2ClG399tovBvjxjuA=s64", "userId": "12807951517144359226"}} outputId="6eea4539-58c1-4de6-e418-a4f2f3abdcff" colab={"base_uri": "https://localhost:8080/", "height": 51} import numpy as np n = 61 std_deviation = 3 mean = 23.5 t = 2.6 valor_menor = mean - t * (std_deviation/np.sqrt(n)) print(valor_menor) valor_maior = mean + t * (std_deviation/np.sqrt(n)) print(valor_maior) # + [markdown] id="gzxeDuH4CoZQ" # 2) O limite inferior de um intervalo de confianca para a media pode ser negativo? E pode ser 0? # + id="4uU8LCOfCoZQ" # Os calculos podem apresentar valor negativo ou 0 se a amostra # for pequena e a variabilidade alta, mas nao tem valor biológico # + [markdown] id="lDqoxjEjCoZT" # 3) A pressao sistolica medida em uma amostra de 100 pessoas apresenotu media = 125 mm Hg e desvio padrao de 9 mm Hg. Calcule o erro padrao da media e ache o intervalo de 95% de confianca. Considere t = 2. # + id="p7rW1_g_CoZT" # invervalo 123.2 < media < 126.8 # intervalo [123.2; 126.8] # + [markdown] id="bhbzZwt3CoZX" # 4) A pressao sistolica medida em uma amostra de 9 pessoas apresenotu media = 125 mm Hg e desvio padrao de 9 mm Hg. Calcule o erro padrao da media e ache o intervalo de 95% de confianca. Ache o valor de t na tabela de t de Student, e considere grau de liberdade n-1. # + id="41-oC_e7CoZX" n = 9 mean = 125 std_deviation = 9 gl = n-1 t = 2.31 valor_menor = mean - t * (std_deviation / np.sqrt(n)) print(valor_menor) valor_maior = mean + t * (std_deviation / np.sqrt(n)) print(valor_maior) # + id="UXafcNABCoZZ" # intervalo de 95% de confianca # [118.07; 131.93] # + [markdown] id="OXYgV0erCoZb" # 5) Compare os intervalos de confianca dos exercicios 3 e 4 e comente. # + id="a0S9U2G3CoZb" # intervalo [123.2; 126.8] # 126.8 - 123.2 # intervalo [118.07; 131.93] # O intervalo ficou maior no exercicio 4, pois a amostra foi menor. # aumentando a amostra a media tende a se aproximar da media real # (populacao), e assim diminui o desvio padrao e diminui a amplitude # do intervalo de confianca # + [markdown] id="e-ZnXGI6CoZd" # 6) Assinale V ou F (considerar intervalo de 95%): # # a) Se forem tomadas repetidamente muitas amostras e calculados seus intervalos de confianca, 95% deles devem conter a media;<br> # b) 95% da populacao estao dentro do intervalo de 95% de confianca;<br> # c) Intervalos de conficanca so podem ser calculados para a media. # + id="UYYsMIkACoZd" # a) Verdadeiro # b) Falso # c) Falso (é possivel utilizar outras medidas para intervalos de confiança) # + [markdown] id="-NVIhvEECoZf" # 7) A pressao sistolica medida em uma amostra de 100 pessoas apresenotu media = 123 mm Hg e desvio padrao de 8 mm Hg. Calcule o erro padrao da media e ache o intervalo de 90% de confianca. Considere t = 1.66. # + id="SXbhJTXBCoZf" executionInfo={"status": "ok", "timestamp": 1601142227648, "user_tz": 180, "elapsed": 932, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiasvxfAR-CIxluihbOgHWSf2ClG399tovBvjxjuA=s64", "userId": "12807951517144359226"}} outputId="d47f20b0-eae5-4db6-9103-9c9bf80add7e" colab={"base_uri": "https://localhost:8080/", "height": 51} n = 100 mean = 123 std_deviation = 8 gl = n-1 t = 1.66 # Erro padrão da media sigma = std_deviation / np.sqrt(n) valor_menor = mean - t * sigma print(valor_menor) valor_maior = mean + t * sigma print(valor_maior) # + id="trdwegUzCoZh" # Intervalo: [121.672, 124.328] # + [markdown] id="_gdGwtU2CoZj" # 8) Uma amostra de 16 elementos apresentou os seguintes dados: media = 50 cm e desvio padrao = 2.4 cm. Ache o intervalo de 90% de confianca para a media (t = 1.753) # + id="oUCUdMK1CoZj" executionInfo={"status": "ok", "timestamp": 1601142453242, "user_tz": 180, "elapsed": 1095, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiasvxfAR-CIxluihbOgHWSf2ClG399tovBvjxjuA=s64", "userId": "12807951517144359226"}} outputId="b3465fa2-c961-47f1-fa97-b98f8fe56e58" colab={"base_uri": "https://localhost:8080/", "height": 51} n = 16 mean = 50 std_deviation = 2.4 gl = n-1 t = 1.753 # Erro padrão da media sigma = std_deviation / np.sqrt(n) valor_menor = mean - t * sigma print(valor_menor) valor_maior = mean + t * sigma print(valor_maior) # + id="GHwus3kUhyR-" # Intervalo: [48.9482, 51.0518] # + [markdown] id="I2MztsfsCoZm" # 9) Uma amostra de 400 elementos apresentou os seguintes dados: media = 905 e desvio padrao = 100. O intervalo de 86% de confianca foi de: # # a) [897.60; 912.40] <br>; # b) [899.08; 910.92] <br>; # c) [901.30; 908.70] <br>; # d) [903.15; 906.85] <br>; # e) [903.30; 906.70] . # + id="3no6r2fxCoZn" executionInfo={"status": "ok", "timestamp": 1601142735198, "user_tz": 180, "elapsed": 914, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiasvxfAR-CIxluihbOgHWSf2ClG399tovBvjxjuA=s64", "userId": "12807951517144359226"}} outputId="d7060fb6-b642-44de-9a4c-a0c7f9c6bbb5" colab={"base_uri": "https://localhost:8080/", "height": 68} n = 400 mean = 905 std_deviation = 100 sigma = (std_deviation/np.sqrt(n)) print(sigma) z = 1.48 valor_menor = mean - z * sigma print(valor_menor) valor_maior = mean + z * sigma print(valor_maior) # + id="_eQxT1Yhi1TI" # Intervalo []
estatistica-e-aplicacoes/aula09_pratica.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- # %matplotlib inline import seaborn as sns import numpy as np from collections import * import pandas as pd import matplotlib.pyplot as plt from copy import deepcopy import itertools import networkx as nx # # Intro # # This is a very simple mock-up of our proposed campgaining plattform vandr. The idea is that users, such as ourselves, add their friends etc. The plattform aims to mobilise ze users ($sic$) to donate money, participate in the official campgain etc. In the very simple mock-up, there is a a $k$ probability for a each user to add a new user. This mean drawing a random number, $r$ between 0 and 1 for each user. If $k > r$, the move is accepted. Other actions such as donating money or campagaining activities will also be modelled as a rate process, which I can add. Users gain points when the add their friends and participate in the campgain. # # # So far I have only included points the users made themselves and added $1/10$ of the points of the users they added themselves. As dicussed, I should addd to the primay points and first-generation points, $1/100$ of the second generation points. vandr - let's win that shit again! # # # Quick simulation to generate data # # ## Function definitions def try_add_players(player_dict,current_time, k_ar, r_ar, sponentous=False): """ Try and accept steps for each player. Update the dictionary that keeps track of the players. New players could also start to use vandr on their own, which I have not considered yet. """ # plyer_dict must be adefault dict with list so that we can track all the actions of # a player new_index = np.max(player_dict.keys()) +1 for k,v in player_dict.items(): #if r_ar[k,0] >= r_ar[k,current_time]: if k_ar[k] >= r_ar[k, current_time]: if not sponentous: #player_dict[k]['added'][new_index] = current_time _temp_dict = player_dict[k] _prev = None if _temp_dict['added'].keys(): _prev_time = np.max(_temp_dict['added'].keys()) _prev = _temp_dict['added'][_prev_time] #else: # _prev =[0] if _prev: player_dict[k]['added'][current_time] = _prev + [new_index] else: player_dict[k]['added'][current_time] = [new_index] #new_index + player_dict[k]['added'][np.max(player_dict[k]['added'].keys())] player_dict[new_index] = defaultdict(start_time=current_time,added={}) new_index = new_index + 1 return player_dict # + def setup_sim(n_inital_players=3): player_dict = defaultdict() for n in range(n_inital_players): player_dict[n] = defaultdict(start_time=0, added={}) return player_dict def run_sim(total_steps, total_number_players, player_dict, verbose=False): for time_step in range(total_steps-1): if verbose: print time_step, len(player_dict.keys()) if len(player_dict.keys()) < total_number_players: _player_dict = try_add_players(deepcopy(player_dict), time_step, k_ar, r_ar) if len(_player_dict.keys()) > total_number_players: break else: player_dict = _player_dict return player_dict def setup_run_sim(total_steps, total_number_players, n_inital_players=3, verbose=False): player_dict = setup_sim(n_inital_players=n_inital_players) player_dict = run_sim(total_steps, total_number_players, player_dict, verbose=verbose) return player_dict # - def calc_primary_score(player_dict, total_steps, total_number_players): _temp_ar = np.zeros((total_steps, total_number_players)) #print _temp_ar.shape for key, value in player_dict.items(): if value: #print key for t in range(total_steps - 1): if t in value['added'].keys(): _temp_ar[t,key] = len(value['added'][t]) -1 elif t > 0 and (t < total_steps ): _temp_ar[t,key] = _temp_ar[t-1,key] return _temp_ar def calc_secondary_score(player_dict, total_steps, total_number_players, p_score_ar, weight_factor = 0.1, verbose=False): _secondary_ar = np.zeros((total_steps, total_number_players)) for key, value in player_dict.items(): if value: for t in range(total_steps -1 ): if t in value['added'].keys(): _added_at_t = value['added'][t] _first_gen_l = [] if verbose: print _added_at_t for added_player in _added_at_t: _first_gen_l.append(p_score_ar[t,added_player]) _secondary_ar[t,key] = np.sum(_first_gen_l) * weight_factor else: _secondary_ar[t,key] = _secondary_ar[t-1, key] return _secondary_ar def plot_scores_per_player(p_score_ar, s_score_ar): fig, ax = plt.subplots() palette = itertools.cycle(sns.color_palette()) for i in range(len(p_score_ar[0,:])): _cl = next(palette) plt.plot(p_score_ar[:,i] + s_score_ar[:,i],"--", c=_cl ) plt.plot(p_score_ar[:,i],"-", c=_cl ) return fig, ax def total_players_over_time(player_dict, last_time): player_time_l = [] for t in range(last_time): _counter = 0 for key, value in player_dict.items(): if value['start_time'] < t: _counter = _counter + 1 player_time_l.append(_counter) return player_time_l # + def calc_network(player_dict, time_point=-1): ''' time_point: -1 last time for each player; not used yet ''' VD = nx.DiGraph() for key, values in player_dict.items(): if values['added']: _temp = values['added'] _last_entry = np.max(_temp.keys()) # print _temp[_last_entry] for _added in _temp[_last_entry]: VD.add_edge(key, _added) return VD # - # ## Simulation of vandr # Ok, let's run a simple simulation of vandr! k=0.1 total_steps = 500 total_number_players = 500 # there is a bug atm, total_steps==total_number_players # in principle limited to ~8.5x10^6 ;-) np.random.seed(0) # if we want to fix random number seed for a start r_ar = np.random.random((total_steps, total_number_players)) k_ar = np.array([k]*total_number_players) # uniform rates for each player _dict = setup_run_sim(total_steps, total_number_players) _p = calc_primary_score(_dict, total_steps, total_number_players) _s = calc_secondary_score(_dict, total_steps, total_number_players, _p) # Total number of players that participated _dict.keys().__len__() # simulation stooped at last_time = _dict[np.max(_dict.keys())]['start_time'] last_time # # Visualisation # ## Points made fig, ax = plot_scores_per_player(_p, _s) plt.plot(0,0, c='gray', label="primary points") plt.plot(0,0, '--', c='gray',label="total points") plt.legend(loc=2, fontsize=12) plt.xlim(0,last_time) plt.ylabel('points') plt.xlabel('time') # In this small network primary points unsurprisingly dominate. N.B. other actions such as donating money or campgaining are missing and these will really drive up the scondary points. fig, ax = plot_scores_per_player(_p[:-1,:1], _s[:-1,:1]) ax.set_xlim(0,last_time) plt.suptitle("primary and secondary points of first player") plt.ylabel("Points") plt.xlabel("time") # ## Number of players player_time_l = total_players_over_time(_dict, last_time) fig, ax = plt.subplots() ax.plot(player_time_l, label='number of players') plt.xlabel('Time') # ## The network # # Networks can visualised and plotted based on the dictionary object that contains the relationship between the players. # A very simple illustration of the network at the end of simulation cl = sns.color_palette() # + fig, ax = plt.subplots(figsize=(18,5)) VD = calc_network(_dict) nx.draw(VD, ax=ax, node_color=cl[5], alpha=0.8, node_size=42) # - # # Data structure # # A quick look at the data structure, looking at player 0. Data is organised as dictionaries. When a new connection is made the total list of connection made by a player is udated. In this way, the history of the network is retained. I am sure this could be done much more elegantly. # # The start time of the player is also loggged. I can added new entries into the dictionary, such as money raised. # # I need to think about the databases e.g MongoDB, which could actually use. _dict[1] # # Next steps # # (note to myself) # # The mock-up, once extended can be used to think about the scoring system. We could play with scoring system to see whether we get reasonable results. How far should the points be dominated by donations? As discussed, larger donations should be weighted less.
ideas/vandr0.01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np names = ['user_id', 'item_id', 'rating', 'timestamp'] df = pd.read_csv( './ml-100k/u.data', sep='\t', names=names) n_users = df.user_id.unique().shape[0] n_items = df.item_id.unique().shape[0] # Create r_{ui}, our ratings matrix ratings = np.zeros((n_users, n_items)) for row in df.itertuples(): ratings[row[1]-1, row[2]-1] = row[3] # + # Split into training and test sets. # Remove 10 ratings for each user # and assign them to the test set def train_test_split(ratings): test = np.zeros(ratings.shape) train = ratings.copy() for user in range(ratings.shape[0]): test_ratings = np.random.choice(ratings[user, :].nonzero()[0], size=10, replace=False) train[user, test_ratings] = 0. test[user, test_ratings] = ratings[user, test_ratings] # Test and training are truly disjoint assert(np.all((train * test) == 0)) return train, test train, test = train_test_split(ratings) # + from sklearn.metrics import mean_squared_error def get_mse(pred, actual): # Ignore nonzero terms. pred = pred[actual.nonzero()].flatten() actual = actual[actual.nonzero()].flatten() return mean_squared_error(pred, actual) # + from numpy.linalg import solve def alt_step(latent_vector, fixed_vector, ratings, _lambda, _type = 'user'): get_vec = lambda x: ratings[x, :] if _type == 'item': get_vec = lambda x: ratings[:, x].T ATA = fixed_vector.T.dot(fixed_vector) ATAlambdaI = ATA + np.eye(ATA.shape[0])*_lambda for u_i in range(latent_vector.shape[0]): latent_vector[u_i,:] = \ solve(ATAlambdaI, get_vec(u_i).dot(fixed_vector)) return latent_vector # - def sgd(user_vector, item_vector, user_bias, item_bias, global_bias, learning_rate, regularizer_factor, ratings, non_zero_row, non_zero_col): rows = np.arange(len(non_zero_row)) np.random.shuffle(rows) for i in rows: user = non_zero_row[i] item = non_zero_col[i] prediction = global_bias + user_bias[user] + item_bias[item] + user_vector[user, :].dot(item_vector[item, :].T) error = (ratings[user,item] - prediction) # error user_bias[user] += learning_rate * (error - regularizer_factor * user_bias[user]) item_bias[item] += learning_rate * (error - regularizer_factor * item_bias[item]) user_vector[user, :] += learning_rate * (error * item_vector[item, :] - regularizer_factor * user_vector[user,:]) item_vector[item, :] += learning_rate * (error * user_vector[user, :] - regularizer_factor * item_vector[item, :]) def inference(user_vector, item_vector, global_bias, user_bias, item_bias): predictions = np.zeros((user_vector.shape[0], item_vector.shape[0])) for user in range(user_vector.shape[0]): for item in range(item_vector.shape[0]): predictions[user, item] = global_bias + \ user_bias[user] + item_bias[item] + user_vector[user, :].dot(item_vector[item, :].T) return predictions n_user = train.shape[0] n_item = train.shape[1] n_factor = 40 steps = 400 user_vector = np.random.normal(scale=1./n_factor,size=(n_user, n_factor)) item_vector = np.random.normal(scale=1./n_factor,size=(n_item, n_factor)) user_bias = np.zeros(n_user) item_bias = np.zeros(n_item) global_bias = np.mean(train[np.where(train != 0)]) non_zero_row, non_zero_col = train.nonzero() learning_rate = 0.001 regularizer_factor = 0.001 for i in range(steps): if(i%10==0): print("Step %d" % i) predictions = inference(user_vector, item_vector, global_bias, user_bias, item_bias) print ("test mse ", get_mse(predictions, test)) print ("train mse ", get_mse(predictions, train)) sgd(user_vector, item_vector, user_bias, item_bias, global_bias, learning_rate, regularizer_factor, train, non_zero_row, non_zero_col) # user_vector = alt_step(user_vector, item_vector, train, usr_lambda) # item_vector = alt_step(item_vector, user_vector, train, item_lambda, 'item')
Funk SVD.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: pytorch # language: python # name: pytorch # --- # # PAE with SDSS Data # # ### step-by-step instructions for training a probabilsitic autoencoder with this package # import this package from pytorch_pae import AE import numpy as np import matplotlib.pyplot as plt import os # + # import pytorch import torch print(torch.__version__) # - from torchsummary import summary device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print(device) # ## Step 1: Choose your parameters # + SEED = 287505 ## data parameters dataset = 'SDSS_DR16_small' ## needs to be changed to the MyDrive location loc = '/global/cscratch1/sd/vboehm/Datasets/sdss/by_model' # number of layers in networks n_layers = 2 ## convolutional net specific parameters #-------------------------------------# # number of channels in each layer for convolutional neural net out_channels = [32] # kernel sizes in each layer for conv net kernel_sizes = [44] # scaling factor in max pooling layer scale_facs = [1] # padding values in each conv layer paddings = [0] # stride values in each conv layer strides = [1] #-------------------------------------# # whether tp apply a layer normalization after conv layer layer_norm = [False,False] # whether to train elemntwise affine parameters for normalization layer affine = False ## fully connected net specific parameters # output size of each fully connected layer out_sizes = [800,590] ## parameters that apply to both, fully connected and convolutional nets # dropout rate after each layer dropout_rate = [0.0,0.0] # whether to Lipschitz regularize by bounding the spectral norm spec_norm = False # activation function after each layer activations = ['ReLU','ReLU'] # whether to add a bias in each layer or not bias = [True,True] ## general parameters # data dimensionality dim = '1D' # latent space dimensionality latent_dim = 10 # number of channels in data input_c = 1 # data dimensioality along one axis (only square data supported in 2D) input_dim = 1000 # type of encoder and decoder network (either 'fc' or 'conv') encoder_type = 'fc' decoder_type = 'fc' # if True, the output is fed through a sigmoid layer to bring data values into range [0,1] final_sigmoid = False ## Training parameters nepochs = 50 batchsize = 32 batchsize_valid = 512 initial_lr = 1e-3 optimizer = 'Adam' criterion1 = 'masked_chi2' criterion2 = 'masked_chi2' #after how many iteration to switch from one loss to the other ann_epoch = 0 contrastive = False scheduler = 'ExponentialLR' scheduler_params = {'gamma':0.99} # - general_params = {'input_c': input_c, 'input_dim': input_dim, 'latent_dim': latent_dim, 'encoder_type': encoder_type,\ 'decoder_type': decoder_type, 'dim': dim, 'contrastive':contrastive} conv_network_params = {'n_layers': n_layers, 'out_channels': out_channels, 'kernel_sizes': kernel_sizes, 'scale_facs': scale_facs, 'paddings': paddings, \ 'strides': strides,'activations': activations, 'spec_norm': spec_norm, 'layer_norm': layer_norm,\ 'affine': affine,'final_sigmoid': final_sigmoid, 'bias':bias} fc_network_params = {'n_layers': n_layers, 'out_sizes': out_sizes,'activations': activations, 'spec_norm': spec_norm, 'dropout_rate':dropout_rate, \ 'layer_norm': layer_norm, 'affine': affine, 'final_sigmoid': final_sigmoid, 'bias':bias} training_params = {'batchsize': batchsize, 'batchsize_valid': batchsize_valid, 'initial_lr': initial_lr, 'optimizer': optimizer, 'criterion1': criterion1, 'criterion2': criterion2, 'scheduler': scheduler, 'scheduler_params':scheduler_params, 'ann_epoch': ann_epoch} data_params = {'dataset':dataset, 'loc': loc} torch.manual_seed(SEED) np.random.seed(SEED) # ## Step 2: Set up and train the autoencoder AE1 = AE.Autoencoder(general_params,data_params,fc_network_params, fc_network_params, training_params, device, transforms=None) if dim =='1D': summary(AE1, (input_c,input_dim)) else: summary(AE1, (input_c, input_dim, input_dim)) train_loss, valid_loss = AE1.train(nepochs) plt.figure() plt.title('Autoencoder Training',fontsize=14) plt.plot(train_loss, label='training') plt.plot(valid_loss, label='validation') plt.xlabel('epoch',fontsize=13) plt.ylabel('loss',fontsize=13) plt.legend(fontsize=13) plt.ylim(0,3) plt.show() # ## Data reconstructions from pytorch_pae.data_loader import * # + train_loader, valid_loader = get_data(data_params['dataset'],data_params['loc'],16, 16,transforms=None) data = next(iter(train_loader)) with torch.no_grad(): recon = AE1.forward(data['features'].to(device).float()) fig, ax = plt.subplots(4,4,figsize=(20,10)) ax = ax.flatten() for ii in range(16): ax[ii].plot(np.squeeze(data['features'][ii].cpu().detach().numpy()),lw=2, color='orange') ax[ii].plot(np.squeeze(recon[ii].cpu().detach().numpy()),lw=1,color='black') plt.tight_layout() plt.show() # + train_loader, valid_loader = get_data(data_params['dataset'],data_params['loc'],16, 16,transforms=None) data = next(iter(valid_loader)) with torch.no_grad(): recon = AE1.forward(data['features'].to(device).float()) fig, ax = plt.subplots(4,4,figsize=(20,10)) ax = ax.flatten() for ii in range(16): ax[ii].plot(np.squeeze(data['features'][ii].cpu().detach().numpy()),lw=2, color='orange') ax[ii].plot(np.squeeze(recon[ii].cpu().detach().numpy()),lw=1,color='black') plt.tight_layout() plt.show() # + ### uncomment to save/load the model #torch.save(AE1, os.path.join('/global/cscratch1/sd/vboehm/Models/Tutorials', 'AE_SDSS_1')) #AE1 = torch.load(os.path.join('/global/cscratch1/sd/vboehm/Models/Tutorials', 'AE_SDSS_1')) # - AE1 = AE1.to('cpu') def loss_by_wl(y_true, y_pred, device): loss = (y_true['features'].to(device).float()-y_pred)**2*y_true['noise'].to(device).float()*y_true['mask'].to(device).float() valid_loss = np.mean(loss.detach().cpu().numpy(),axis=0) std_valid_loss = np.std(loss.detach().cpu().numpy(),axis=0) return valid_loss, std_valid_loss # + train_loader, valid_loader = get_data(data_params['dataset'],data_params['loc'],-1, -1,transforms=None) data_train = next(iter(train_loader)) data_valid = next(iter(valid_loader)) # - with torch.no_grad(): recon_valid = AE1.forward(data_valid['features'].float()) recon_train = AE1.forward(data_train['features'].float()) # + root_encoded = '/global/cscratch1/sd/vboehm/Datasets/encoded/sdss/' root_decoded = '/global/cscratch1/sd/vboehm/Datasets/decoded/sdss/' wlmin, wlmax = (3388,8318) fixed_num_bins = 1000 min_SN = 50 min_z = 0.05 max_z = 0.36 label = 'galaxies_quasars_bins%d_wl%d-%d'%(fixed_num_bins,wlmin,wlmax) label_ = label+'_minz%s_maxz%s_minSN%d'%(str(int(min_z*100)).zfill(3),str(int(max_z*100)).zfill(3),min_SN) label_2 = label_+'_10_fully_connected_mean_div' #np.save(os.path.join(root_decoded,'decoded_%s_conv.npy'%(label_2)),[recon_train.numpy(), recon_valid.numpy()]) # - recon_valid.shape, recon_train.shape recon_error = loss_by_wl(data_valid, recon_valid, device='cpu') wlmin, wlmax = (3388,8318) wl_range = (np.log10(wlmin),np.log10(wlmax)) # new binning new_wl = np.logspace(wl_range[0],wl_range[1],input_dim) plotpath = '/global/homes/v/vboehm/codes/SDSS_PAE/figures' plt.plot(new_wl,np.squeeze(recon_error[0]/np.mean(data_valid['mask'].detach().cpu().numpy(),axis=0)-1),color='navy') plt.xlabel(r'restframe $\lambda$ [nm]') plt.ylabel('pixelwise $\chi^2$ -1') plt.grid() plt.tight_layout() #plt.ylim(0,2.5) #plt.savefig(os.path.join(plotpath,'mean_reconstruction_error_convAE1.pdf'), bbox_inches='tight') # + # with torch.no_grad(): # #encoded_train = AE1.encoder.forward(data_train['features'].float()).numpy() # encoded_valid = AE1.encoder.forward(data_valid['features'].float()).numpy() # -
notebooks/SDSS.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import audata as aud import h5py as h5 import pandas as pd from pathlib import Path files_dir = Path('/media/zfsmladi/originals/')#'/home/auvdata/projects/conditionc-new/originals/' labels = pd.read_csv('../finalLabels.csv') # We'll perform two tasks: # - Task 1: Read in the sample data for the target series # - Task 2: Read in sample data from a waveform series in the equivalent time period for label in labels.iterrows(): label = label[1] left = label['left'] right = label['right'] print(f"Our label data:\n\n{label}\n") ### TASK 1 # Grab the series name (the series in the labels dataset also includes a column name, so we remove that) series_name = label['series'].split(':')[0] # Open the file using the audata lib aufile = aud.File.open(files_dir / label['filename'], readonly=True) # Our label timestamps do not include the basetime, so let's grab the basetime for the file basetime = aufile.time_reference.timestamp() # Get a reference to the audata series object audata_series = aufile[series_name] # Read the entire series into memory as a pandas dataframe. We ask for raw time values (meaning # the time column will be a numerical time offset in seconds, instead of datetime objects). # Also, the get() function returns a numpy ndarray, so we convert it to a dataframe. df = pd.DataFrame(audata_series.get(raw=True)) print("Our overall series dataframe:") display(df) # Clean the series of any duplicate timestamps df = df.drop_duplicates(subset=['time'], ignore_index=True) # Now, let's grab the data from the alert time period sample_data = df[ (df.time > left-basetime) & (df.time < right-basetime) ] print("Our sample dataframe:") display(sample_data) ### TASK 2 # NOTE: In task 1, we simply used the audata library to work with the file. When reading in waveform # data, however, the problem with the above approach is that it requires reading in the entire series # into memory before slicing. With the h5py library, however, we can actually slice the data before # reading the entire dataset into memory. Therefore, we will use h5py for the waveform series. # Open the file using the h5py lib h5file = h5.File(files_dir / label['filename'], mode='r') # Get a reference to the h5py waveform series object h5py_wf_series = h5file['/data/waveforms/II'] # Read a slice of the data into memory with the time boundaries waveform_sample_data = pd.DataFrame(h5py_wf_series[(h5py_wf_series['time'] > left-basetime) & (h5py_wf_series['time'] < right-basetime)]) print("Our waveform sample dataframe:") display(waveform_sample_data) # We'll do this for just one file for now, but if you wanted to process all labels, you would continue. break
docs/example_notebooks/Working with MLADI Files Example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from urllib import request import json import time import csv import pandas as pd from datetime import datetime from datetime import timedelta def get_data(url): headers = { 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.89 Safari/537.36' } req = request.Request(url,headers = headers) response = request.urlopen(req) if response.getcode() == 200: return response.read() return None def parse_data(html): data = json.loads(html)['cmts'] comments = [] for item in data: comment = { 'id':item['id'], 'nickName':item['nickName'], 'cityName':item['cityName'] if 'cityName' in item else'',#处理cityName不存在的情况 'content':item['content'].replace('\n','',10), #处理评论内容的换情况 'score':item['score'], 'startTime':item['startTime'] } print(comment) comments.append(comment) return comments def save_to_txt(): start_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(start_time) end_time = '2019-10-10 00:00:00' df = pd.DataFrame(columns=['id','content','score']) while start_time > end_time: url = 'http://m.maoyan.com/mmdb/comments/movie/1277939.json?_v_==yes&offset=0&startTime=' + start_time.replace(' ','%20') html = None """ 问题:当请求过于频繁时,服务器会拒绝连接,实际上是服务器的反爬虫策略 解决:1.在每个请求增加延时0.1秒,尽量减少请求别拒绝 2.如果被拒绝,则0.5秒后重试 """ try: html = get_data(url) except Exception as e: time.sleep(0.5) html = get_data(url) else: time.sleep(0.1) comments = parse_data(html) start_time = comments[14]['startTime'] start_time = datetime.strptime(start_time,'%Y-%m-%d %H:%M:%S') + timedelta(seconds=-1) #转换为datetime类型,减1秒,避免获取到重复数据 start_time = datetime.strftime(start_time,'%Y-%m-%d %H:%M:%S') #转换为str for item in comments: item_list = [] item_list.append(item['id']) item_list.append(item['content']) item_list.append(item['score']) item_list.append(item['startTime']) with open("comments.csv", "a", newline='',encoding="utf_8_sig") as file: writer = csv.writer(file,delimiter=',') writer.writerow(item_list) if __name__ == "__main__": html = get_data('http://m.maoyan.com/mmdb/comments/movie/1277939.json') comments = parse_data(html) print(comments) save_to_txt() # -
Day10-NLP/spider.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Dataproc Spark Job # - Dataproc Cluster # - Job with BQ data # - Delete Dataproc Cluster # # API Reference: https://googleapis.dev/python/dataproc/0.7.0/gapic/v1/api.html # ## Setup # inputs: # + REGION = 'us-central1' PROJECT_ID='statmike-mlops' DATANAME = 'fraud' NOTEBOOK = 'dataproc' DATAPROC_COMPUTE = "n1-standard-4" DATAPROC_MAIN_INSTANCES = 1 DATAPROC_WORK_INSTANCES = 4 # - # packages: from google.cloud import dataproc_v1 from datetime import datetime # clients: client_options = {"api_endpoint": f"{REGION}-dataproc.googleapis.com:443"} clients = {} clients['cluster'] = dataproc_v1.ClusterControllerClient(client_options = client_options) clients['job'] = dataproc_v1.JobControllerClient(client_options = client_options) # parameters: TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S") BUCKET = PROJECT_ID URI = f"gs://{BUCKET}/{DATANAME}/models/{NOTEBOOK}" DIR = f"temp/{NOTEBOOK}" # environment: # !rm -rf {DIR} # !mkdir -p {DIR} # ## Define Job # - https://cloud.google.com/dataproc/docs/tutorials/bigquery-sparkml#run_a_linear_regression # + # %%writefile {DIR}/gm.py from __future__ import print_function from pyspark.context import SparkContext from pyspark.ml.linalg import Vectors from pyspark.ml.clustering import GaussianMixture from pyspark.sql.session import SparkSession # The imports, above, allow us to access SparkML features specific to linear # regression as well as the Vectors types. # Define a function that collects the features of interest # (mother_age, father_age, and gestation_weeks) into a vector. # Package the vector in a tuple containing the label (`weight_pounds`) for that # row. def vector_from_inputs(r): return (r["weight_pounds"], Vectors.dense(float(r["mother_age"]), float(r["father_age"]), float(r["gestation_weeks"]), float(r["weight_gain_pounds"]), float(r["apgar_5min"]))) sc = SparkContext() spark = SparkSession(sc) #temp space for bq export used by connector spark.conf.set('temporaryGcsBucket',"statmike-mlops") # Read the data from BigQuery as a Spark Dataframe. natality_data = spark.read.format("bigquery").option("table", "bigquery-public-data.samples.natality").load() # Create a view so that Spark SQL queries can be run against the data. natality_data.createOrReplaceTempView("natality") # subset data rows and columns sql_query = """ SELECT weight_pounds, mother_age, father_age, gestation_weeks, weight_gain_pounds, apgar_5min from natality where weight_pounds is not null and mother_age is not null and father_age is not null and gestation_weeks is not null and weight_gain_pounds is not null and apgar_5min is not null """ clean_data = spark.sql(sql_query) # Create an input DataFrame for Spark ML using the above function. training_data = clean_data.rdd.map(vector_from_inputs).toDF(["label", "features"]) training_data.cache() # cluster the feature rows with GM gm = GaussianMixture().setK(4).setSeed(1234567) model = gm.fit(training_data) # write data to BigQuery model.gaussiansDF.write.format('bigquery').option("table", "statmike-mlops.fraud.gm_cluster").mode('overwrite').save() # - # !gsutil cp {DIR}/gm.py {URI}/{TIMESTAMP}/gm.py # + [markdown] tags=[] # ## Method 1: Submit Serverless (Batch) Dataproc Job # - # During Private Preview: need to allowlist the project and user... # # Note: Dataproc Serveless requires a subnet with Private Google Access. The first three cells below check for the private access, enable private access, check again to confirm. # !gcloud compute networks subnets describe default --region={REGION} --format="get(privateIpGoogleAccess)" # !gcloud compute networks subnets update default --region={REGION} --enable-private-ip-google-access # !gcloud compute networks subnets describe default --region={REGION} --format="get(privateIpGoogleAccess)" # !gcloud beta dataproc batches submit pyspark {DIR}/gm.py --project={PROJECT_ID} --region={REGION} --deps-bucket={BUCKET} --jars=gs://spark-lib/bigquery/spark-bigquery-latest_2.12.jar # ## Method 2: User Managed Dataproc Cluster # + [markdown] tags=[] # ### Create Cluster # https://cloud.google.com/dataproc/docs/guides/create-cluster # - # cluster_specs = { "project_id": PROJECT_ID, "cluster_name": DATANAME, "config": { "master_config": {"num_instances": DATAPROC_MAIN_INSTANCES, "machine_type_uri": DATAPROC_COMPUTE}, "worker_config": {"num_instances": DATAPROC_WORK_INSTANCES, "machine_type_uri": DATAPROC_COMPUTE} } } cluster = clients['cluster'].create_cluster( request = { "project_id": PROJECT_ID, "region": REGION, "cluster": cluster_specs } ) cluster.result().cluster_name # ### Submit Job # - https://cloud.google.com/dataproc/docs/samples/dataproc-submit-pyspark-job job_specs = { "placement": {"cluster_name": DATANAME}, "pyspark_job": { "main_python_file_uri": f"{URI}/{TIMESTAMP}/gm.py", "jar_file_uris": ["gs://spark-lib/bigquery/spark-bigquery-latest_2.12.jar"] } } job = clients['job'].submit_job(project_id = PROJECT_ID, region = REGION, job = job_specs) job.reference.job_id # ### Wait On Job while True: ljob = clients['job'].get_job(project_id = PROJECT_ID, region = REGION, job_id = job.reference.job_id) if ljob.status.state.name == "ERROR": raise Exception(ljob.status.details) elif ljob.status.state.name == "DONE": print ("Finished") break # ### Review Results # - Go to BiqQuery and review the output table: statmike-mlops.fraud.gm_cluster in my case ljob # ### Delete Cluster # https://cloud.google.com/dataproc/docs/guides/manage-cluster#delete_a_cluster delCluster = clients['cluster'].delete_cluster( request = { "project_id": PROJECT_ID, "region": REGION, "cluster_name": cluster.result().cluster_name } )
Dev/dataproc_gmm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt READS = '../../../data/phylo_ampl_dada2/run6/dada2/output/stats.tsv' HAPS = '../../../data/phylo_ampl_dada2/run6/dada2/output/haplotypes.tsv' reads = pd.read_csv(READS, sep='\t') reads.sample() # remove original mosquito samples display(reads.shape) reads = reads[~reads.s_Sample.str.startswith('A')] display(reads.shape) # mean read counts across mosquito targets mr = reads[~reads.target.str.startswith('P')].groupby('s_Sample').mean().reset_index() mr['target'] = 'M' mr.sample() # subset to plasmodium targets only display(reads.shape) reads = reads[reads.target.str.startswith('P')] display(reads.shape) # add averaged mosquito targets display(reads.shape) reads = pd.concat([reads, mr], sort=True) display(reads.shape) # separate pool and original sample p = reads.s_Sample.str.replace('_','-',n=1).str.split('_', n=1, expand=True) reads['pool'] = p[0] reads['original_sample'] = p[1] reads[['P1_conc','P2_conc']] = reads.pool.str.split('-', n=1, expand=True).astype(int) reads.sample() # create pool-target combination reads['pool_target'] = reads['pool'] + '_' + reads['target'] reads.sample() # heatmap of final reads d = reads.pivot(index='pool_target', columns='original_sample', values='final').replace(0, np.nan) d = np.log10(d) fig, ax = plt.subplots(1,1, figsize=(8,8)) sns.heatmap(d, ax=ax, cmap='coolwarm_r', vmin=1) ax.collections[0].colorbar.set_ticks(range(6)); reads['sample_type'] = 'uninfected' reads.loc[reads.original_sample.str.startswith('BS'), 'sample_type'] = 'lab_infected' reads.loc[reads.original_sample.str.startswith('NI'), 'sample_type'] = 'natural_infected' reads.loc[reads.original_sample.str.startswith('PM'), 'sample_type'] = 'dilution_series' reads.sample_type.value_counts() reads['log_input'] = np.log10(reads['input'].replace(0,.1)) # + # dilution series fig, axs = plt.subplots(1,2,figsize=(8, 3)) legend=None for ax, target in zip(axs, ('P1','P2')): d = reads.loc[(reads.target == target) & (reads['sample_type'] == 'dilution_series')].copy() d['conc_ratio'] = d.original_sample.str.replace('PMe','1e-') g = sns.lineplot(data=d, x='conc_ratio', y='log_input', hue='P1_conc', style='P2_conc', ci=None, ax=ax, legend=legend); legend='brief' ax.set_title(target) ax.set_ylim(-1,5.5) plt.legend(bbox_to_anchor=(1.05, 1.1), loc=0, frameon=False) plt.tight_layout(); # - # read counts vs pool vs infection state fig, axs = plt.subplots(3,1,figsize=(20, 12)) for t, ax in zip(['lab_infected','uninfected','natural_infected'], axs): d = reads[(reads.target != 'M') & (reads.sample_type == t)] sns.swarmplot(data=d, x='pool', y='log_input', hue='target', ax=ax); ax.set_title(t) ax.set_xlabel('') ax.set_ylim(-1.2, 3.5)
work/2_plasmodium_rebalancing/20191104_plasmodium_rebalancing_run6.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/MichaelOblego/Linear-Algebra-58020/blob/main/Prelim_Exam.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="WJw7Cgvrhnmr" # #Prelim Exam # + id="Noaxj5N9hm2h" import numpy as LA # + [markdown] id="QcU5PuEyl_2v" # Question 1 # + colab={"base_uri": "https://localhost:8080/"} id="Gd4daN66iMx5" outputId="ef17650a-2634-47ac-e344-38fb3292dd31" C = LA.eye(4) print(C) # + [markdown] id="lFX14HARmCq9" # Question 2 # + colab={"base_uri": "https://localhost:8080/"} id="A5NP2bshl_St" outputId="c77f9fc9-ffc6-4bf5-b86c-2e5c5aeb3382" B = C*2 print(B) # + [markdown] id="ThXx6Xtkm649" # Question 3 # + colab={"base_uri": "https://localhost:8080/"} id="5Ojc-CJ1m8J6" outputId="bcb6e52a-fb2e-4581-d918-1bd4322b1abe" A = LA.array([2,7,4]) D = LA.array([3,9,8]) Output = LA.cross(A,D) print("Cross-product: ", Output)
Prelim_Exam.ipynb
# --- # jupyter: # jupytext: # formats: ipynb,py:light # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:PROJ_irox_oer] * # language: python # name: conda-env-PROJ_irox_oer-py # --- # # Writing data objects to file in a convienent and organized way # --- # # Import Modules # + import os print(os.getcwd()) import sys import shutil import pandas as pd from ase import io from IPython.display import display # - from methods import ( get_df_jobs_paths, get_df_dft, get_df_job_ids, get_df_jobs, get_df_jobs_data, get_df_slab, get_df_slab_ids, get_df_jobs_data_clusters, get_df_jobs_anal, get_df_slabs_oh, get_df_init_slabs, get_df_magmoms, ) # # Read Data df_dft = get_df_dft() df_job_ids = get_df_job_ids() df_jobs = get_df_jobs(exclude_wsl_paths=True) df_jobs_data = get_df_jobs_data(exclude_wsl_paths=True) df_jobs_data_clusters = get_df_jobs_data_clusters() df_slab = get_df_slab() df_slab_ids = get_df_slab_ids() df_jobs_anal = get_df_jobs_anal() df_jobs_paths = get_df_jobs_paths() df_slabs_oh = get_df_slabs_oh() df_init_slabs = get_df_init_slabs() df_magmoms = get_df_magmoms() # # Writing finished *O slabs to file # + df_jobs_anal_i = df_jobs_anal[df_jobs_anal.job_completely_done == True] var = "o" df_jobs_anal_i = df_jobs_anal_i.query('ads == @var') for i_cnt, (name_i, row_i) in enumerate(df_jobs_anal_i.iterrows()): # ##################################################### compenv_i = name_i[0] slab_id_i = name_i[1] ads_i = name_i[2] active_site_i = name_i[3] att_num_i = name_i[4] # ##################################################### # ##################################################### job_id_max_i = row_i.job_id_max # ##################################################### # ##################################################### row_paths_i = df_jobs_paths.loc[job_id_max_i] # ##################################################### gdrive_path = row_paths_i.gdrive_path # ##################################################### in_dir = os.path.join( os.environ["PROJ_irox_oer_gdrive"], gdrive_path) in_path = os.path.join(in_dir, "final_with_calculator.traj") out_dir = os.path.join("out_data/completed_O_slabs") # out_file = str(i_cnt).zfill(3) + "_" + job_id_max_i + ".traj" out_file = str(i_cnt).zfill(3) + "_" + compenv_i + "_" + slab_id_i + "_" + str(att_num_i).zfill(2) + ".traj" out_path = os.path.join(out_dir, out_file) if not os.path.exists(out_dir): os.makedirs(out_dir) shutil.copyfile( in_path, out_path, ) # - # # Write OER sets to file # + from methods import get_df_oer_groups df_oer_groups = get_df_oer_groups() # + # "vinamepa_43" in df_oer_groups.slab_id.tolist() # - # ######################################################### # ######################################################### for name_i, row_i in df_oer_groups.iterrows(): # ##################################################### compenv_i = name_i[0] slab_id_i = name_i[1] active_site_i = name_i[2] # ##################################################### df_jobs_anal_index_i = row_i.df_jobs_anal_index # ##################################################### # Create directory folder_i = compenv_i + "_" + slab_id_i + "_" + str(int(active_site_i)).zfill(3) out_dir = os.path.join( os.environ["PROJ_irox_oer"], "sandbox", "out_data/oer_sets", folder_i) if not os.path.exists(out_dir): os.makedirs(out_dir) # ##################################################### df_jobs_anal_i = df_jobs_anal.loc[df_jobs_anal_index_i] # ##################################################### for name_j, row_j in df_jobs_anal_i.iterrows(): # ################################################# compenv_j = name_j[0] slab_id_j = name_j[1] ads_j = name_j[2] active_site_j = name_j[3] att_num_j = name_j[4] # ################################################# job_id_max_i = row_j.job_id_max # ################################################# # ################################################# row_paths_i = df_jobs_paths.loc[job_id_max_i] # ################################################# gdrive_path_i = row_paths_i.gdrive_path # ################################################# # ################################################# # Copy final_with_calculator.traj to local dirs in_dir = os.path.join( os.environ["PROJ_irox_oer_gdrive"], gdrive_path_i) in_path = os.path.join( in_dir, "final_with_calculator.traj") # file_name_j = ads_j + "_" + str(att_num_j).zfill(2) + ".traj" file_name_j = ads_j + "_" + str(att_num_j).zfill(2) out_path = os.path.join( out_dir, file_name_j + ".traj") shutil.copyfile( in_path, out_path, ) # ################################################# # Write .cif version atoms_i = io.read(in_path) atoms_i.write(os.path.join(out_dir, file_name_j + ".cif")) # + active="" # # # + jupyter={"source_hidden": true} # compenv_i = name_i[0] # slab_id_i = name_i[1] # active_site_i = name_i[2] # + jupyter={"source_hidden": true} # group_wo # + jupyter={"source_hidden": true} # group_wo.reset_index(level=["compenv", "slab_id", "active_site", ]) # # group_wo.reset_index? # + jupyter={"source_hidden": true} # df_jobs_anal_i = df_jobs_anal[df_jobs_anal.job_completely_done == True] # # var = "o" # # df_jobs_anal_i = df_jobs_anal_i.query('ads == @var') # for i_cnt, (name_i, row_i) in enumerate(df_jobs_anal_i.iterrows()): # # ##################################################### # compenv_i = name_i[0] # slab_id_i = name_i[1] # ads_i = name_i[2] # active_site_i = name_i[3] # att_num_i = name_i[4] # # ##################################################### # # ##################################################### # job_id_max_i = row_i.job_id_max # # ##################################################### # # ##################################################### # row_paths_i = df_jobs_paths.loc[job_id_max_i] # # ##################################################### # gdrive_path = row_paths_i.gdrive_path # # ##################################################### # + jupyter={"source_hidden": true} # df_jobs_anal_done = df_jobs_anal[df_jobs_anal.job_completely_done == True] # var = "o" # df_jobs_anal_i = df_jobs_anal_done.query('ads != @var') # # ######################################################### # data_dict_list = [] # # ######################################################### # grouped = df_jobs_anal_i.groupby(["compenv", "slab_id", "active_site", ]) # for name, group in grouped: # data_dict_i = dict() # # ##################################################### # compenv_i = name[0] # slab_id_i = name[1] # active_site_i = name[2] # # ##################################################### # idx = pd.IndexSlice # df_jobs_anal_o = df_jobs_anal_done.loc[ # idx[compenv_i, slab_id_i, "o", "NaN", :], # ] # # ######################################################### # group_wo = pd.concat([ # df_jobs_anal_o, # group, # ]) # # display(group_wo) # # ######################################################### # df_jobs_anal_index = group_wo.index.tolist() # # ######################################################### # df_index_i = group_wo.index.to_frame() # ads_list = df_index_i.ads.tolist() # ads_list_unique = list(set(ads_list)) # o_present = "o" in ads_list_unique # oh_present = "oh" in ads_list_unique # bare_present = "bare" in ads_list_unique # all_ads_present = False # if o_present and oh_present and bare_present: # all_ads_present = True # # ##################################################### # data_dict_i["compenv"] = compenv_i # data_dict_i["slab_id"] = slab_id_i # data_dict_i["active_site"] = active_site_i # data_dict_i["df_jobs_anal_index"] = df_jobs_anal_index # data_dict_i["ads_list"] = ads_list # data_dict_i["all_ads_present"] = all_ads_present # # data_dict_i[""] = # # ##################################################### # data_dict_list.append(data_dict_i) # # ##################################################### # # ######################################################### # df_oer_groups = pd.DataFrame(data_dict_list) # df_oer_groups = df_oer_groups.set_index(["compenv", "slab_id", "active_site"], drop=False) # + jupyter={"source_hidden": true} # df_oer_groups.head()
sandbox/write_atoms_organized.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Exercise 1 # # In this problem, you will write a closure to make writing messages easier. Suppose you write the following message all the time: # ```python # print("The correct answer is: {0:17.16f}".format(answer)) # ``` # Wouldn't it be nicer to just write # ```python # correct_answer_is(answer) # ``` # and have the message? Your task is to write a closure to accomplish that. Here's how everything should work: # ```python # correct_answer_is = my_message(message) # correct_answer_is(answer) # ``` # The output should be: `The correct answer is: 42.0000000000000000.` # # Now change the message to something else. Notice that you don't need to re-write everything. You simply need to get a new message function from `my_message` and invoke your new function when you want to. # # You should feel free to modify this in any way you like. Creativity is encouraged! # + def my_message(message): def correct_answer_is(answer): c=(message,'{0:17.16f}'.format(answer)) return c return correct_answer_is correct_answer_is = my_message("The correct answer is:") print(correct_answer_is(42)) correct_answer_is = my_message("The answer is:") print(correct_answer_is(42))
lectures/L6/Exercise_1-Final.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from scipy import linalg import numpy as np # - A = np.array([[1, 2], [3, 4]]) # Matrix Multiplication A.dot(A) # Matrix Norm linalg.norm(A) # CHECK: Matrix Norm np.sqrt(sum([x**2 for x in range(1, 5)])) # Calc determinant # For 2x2 matrix is a*d - b*c linalg.det(A) print(A) # Calc Matrix Inverse A_inv = linalg.inv(A) print(A_inv) # CHECK: Inverse X original = identity matrix A_inv.dot(A) # + # Compute Eigenvalues # + # Decomposes A into a # diagonal matrix x and invertible matrix y such that y * x * y^-1 x, y = linalg.eig(A) x0, x1 = x[0], x[1] y0, y1 = y[:, 0], y[:, 1] # - y # Verify normal equation #1 print(A.dot(y0)) print(x0 * y0) # + # Verify normal equation #2 print(A.dot(y1)) # - print(x1 * y1) # verify eigenvalue decomposition y.dot(np.diag(x).dot(linalg.inv(y))) # Calc singular value decomposition # Extension to eigenvalues for non-invertible or non-square matrices # product of an orthogonal matrix * diagonal matrix * another orthogonal matrix u, s, v = linalg.svd(A) # + # U is symmetric & orthogonal print(u) print(u.dot(u.T)) # - # Sigma is a diagonal matrix (if A is invertible) print(np.diag(s)) # + # V is symmetric print(v) # V is orthogonal print(v.dot(v.T)) # - # CHECK: singular value decomposition recovers A u.dot(np.diag(s).dot(v))
Chapter 6 - Statistical Modeling Fundamentals/Linear Algebra.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import seaborn as sns sns.set(style="whitegrid") tips = sns.load_dataset("tips") titanic = sns.load_dataset("titanic") # + import seaborn as sns sns.set(style="darkgrid") # tips = sns.load_dataset("tips") def boxplot(data, x, y, hue=None, palette=None, type='sns'): sns.boxplot(x=x, y=y, hue=hue, data=data, palette=palette) def barplot(data, x, y, hue=None, palette=None, type='sns'): sns.barplot(x=x, y=y, hue=hue, data=data, palette=palette) def countplot(data, x, hue=None, palette=None, type='sns'): sns.countplot(x=x, hue=hue, data=data, palette=palette) def scatterplot(data, x, y, hue=None, palette=None, size=None, type='sns'): sns.scatterplot(x=x, y=y, hue=hue, data=data, size=size, palette=palette) # - boxplot(tips, x='day', y='total_bill', hue='sex', palette=None, type='sns') barplot(tips, x='day', y='total_bill', hue='sex', palette=None, type='sns') countplot(titanic, 'class', 'who') scatterplot(tips, "total_bill", "tip", hue="day", size="smoker", palette='Set2')
Notebooks/sns.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # ATP Ranking and Height # # I have (infrequently) watched tennis for the last 10 -15 years and one of the patterns I have noticed is that the top tennis players are getting taller over time. # # In this notebook I wanted to explore if this casual observation is actually true. I will also try to see if there is any other height related pattern in the Men's tennis rankings. # # A secondary aim is to get comfortable with various visualization libraries for python. And hopefully create a deployable webapp for those visualization! # ## Loading Data # # Let's load the downloaded data and convert it into a dataframe that contains the Top 100 tennis players (for each week the rankings are released), and the following information about them: # 1. Date (in python compatible format) # 2. Ranking # 3. Points # 4. Player id # 5. Player name (first and last combined) # 6. Height (if available) from pathlib import Path data_path = Path('.') / 'data' data_path import pandas as pd ranking_00_df = pd.read_csv(data_path / "atp_rankings_00s.csv", parse_dates=['ranking_date']) ranking_00_df.head(5) ranking_00_df.info() ranking_90_df = pd.read_csv(data_path / "atp_rankings_90s.csv", parse_dates=['ranking_date']) ranking_90_df.head(5) ranking_10_df = pd.read_csv(data_path / "atp_rankings_10s.csv", parse_dates=['ranking_date']) ranking_10_df.head(5) ranking_20_df = pd.read_csv(data_path / "atp_rankings_20s.csv", parse_dates=['ranking_date']) ranking_20_df.head(5) ranking_21_df = pd.read_csv(data_path / "atp_rankings_current.csv", parse_dates=['ranking_date']) ranking_21_df.head(5)
Data_Exploration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import csv def process_csv(filename): exampleFile = open(filename, encoding="utf-8") exampleReader = csv.reader(exampleFile) exampleData = list(exampleReader) exampleFile.close() return exampleData # use process_csv to pull out the header and data rows csv_rows = process_csv("Water_Usage_Data.csv") csv_header = csv_rows[0] csv_data = csv_rows[1:] # - def cell(row_idx, col_name): col_idx = csv_header.index(col_name) val = csv_data[row_idx][col_idx] if val == "": return None return val print(csv_header) print(csv_data) # + rows, cols = (20, 6) sum_data = [ [ 0 for i in range(cols) ] for j in range(rows) ] counter=0 #Sum 10 months for i in range(0,20): for j in range(counter,counter+30): for k in range(1,7): sum_data[i][k-1]+=float(csv_data[j][k]) sum_data[i][k-1]=round(sum_data[i][k-1]) counter+=30 sum_data # + percent_data = [ [ 1.0 for i in range(cols) ] for j in range(rows) ] for i in range(0,20): for j in range(1,6): percent_data[i][j]=sum_data[i][j]/sum_data[i][0] percent_data # - for i in sum_data: del(i[0]) # + import matplotlib.pyplot as plt # Data to plot labels = 'A', 'B', 'C', 'D', 'E' colors = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue'] explode = (0, 0, 0, 0, 0) # explode 1st slice # Plot plt.pie(sum_data[0], explode=explode, labels=labels, colors=colors, autopct='%1.1f%%', shadow=True, startangle=140) plt.axis('equal') plt.show() # -
Fall 2020/Notebook/Week4/Week4_gauti.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tabular example from fastai.tabular import * # Quick accesss to tabular functionality # Tabular data should be in a Pandas `DataFrame`. path = untar_data(URLs.ADULT_SAMPLE) df = pd.read_csv(path/'adult.csv') df['salary'].unique() df.head() dep_var = 'salary' cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race'] cont_names = ['age', 'fnlwgt', 'education-num'] procs = [FillMissing, Categorify, Normalize] test = TabularList.from_df(df.iloc[800:1000].copy(), path=path, cat_names=cat_names, cont_names=cont_names) data = (TabularList.from_df(df, path=path, cat_names=cat_names, cont_names=cont_names, procs=procs) .split_by_idx(list(range(800,1000))) .label_from_df(cols=dep_var) .add_test(test) .databunch()) data.show_batch(rows=10) learn = tabular_learner(data, layers=[200,100], metrics=accuracy) learn.fit(1, 1e-2) # ## Inference row = df.iloc[0] learn.predict(row)
examples/tabular.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Self-Driving Car Engineer Nanodegree # # # ## Project: **Finding Lane Lines on the Road** # *** # ### 1. Pipeline Described # # My pipeline consisted of follow steps. # # 1. Converts the images to grayscale or single channel using HLS color space and taking out S(saturation channel) . # 2. Apply Gaussian Blur on the Single ch/Grayscales image. # 3. Next use Canny edge detection on Blurred image. # 4. Create Region of Interest (ROI) masking. # 5. Extract line segment using 'Hough transform'. # 6. Identify Line segment detected by 'Hough transform', based on +ve & -ve slope . And average slope and intercept per lane for each frame. # 7. Further for Video processing, averaging of coordinates of each lane is done, over the number of frames, given by parameter `no_frame_avg`. To filter out noise and jitter # 8. Finally image is overlay of the orignal image with the detected lanes and lane dimension extracted. # # Additional # 1. While Identifing slope for video (challenge.mp4) with curved lane, slope and intercept change restriction is imposed, using flag parameter `slope_change_restriction`. # 2. Three stages(Sigle channel, Blurred, Canny edges) of the pipeline are also overlayed on top part of the final output image, with Resized small image, to better observer the effect of parameter. # 3. Condition when no lanes are found in video is handled by providing average lanes over previous `no_frame_avg` frame, in which lanes were detected, with the below code calling appropriate functions # ``` # if lines is not None: # draw_lines(line_img, lines) # else: # in-case no lines are detected running average lane will be overlaid # draw_avg_lanes(line_img) # ``` # 4. For averaging lanes over image frames `averging_over_frames = np.empty((0,7), int)` is initialized to empty before start of each video or individual picture processing. `averging_over_frames` stores coordinates of each lane over number of frames in number of row given by `no_frame_avg`. Data for frames older is removed by the code # ``` # if averging_over_frames.shape[0] > no_frame_avg: # averging_over_frames = np.delete(averging_over_frames, 0, 0) # ``` # New frame's data is added by code # ```averging_over_frames = np.vstack((averging_over_frames, np.array([lane_1_.......])))``` # # 5. Function `draw_avg_lanes()` is used to draw lanes on blank image(along with other parameters such as frame size, length of lanes, number of frames averaged), which is overlaid on original image with call to function `weighted_img()` # # ### 2. Identify potential shortcomings with your current pipeline # # The pipeline does not consider dark lighting conditions, nor the curvature of the lanes,. # Also it does not consider other objects (such as vehicle, people) that might obstruct the line of view. # # # # ### 3. Suggest possible improvements to your pipeline # # Curved lane detection should be done by using perspective transform and other image correction techniques. # Parameter determination for various transforms used during detection should be dynamic, to cater for conditions # # ## Import Packages #importing some useful packages import matplotlib.pyplot as plt import matplotlib.image as mpimg import numpy as np import cv2 # %matplotlib inline # ## Read in an Image # + #reading in an image image = mpimg.imread('test_images/solidWhiteRight.jpg') #printing out some stats and plotting print('This image is:', type(image), 'with dimensions:', image.shape) plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray') # - # ## Ideas for Lane Detection Pipeline # **Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:** # # `cv2.inRange()` for color selection # `cv2.fillPoly()` for regions selection # `cv2.line()` to draw lines on an image given endpoints # `cv2.addWeighted()` to coadd / overlay two images # `cv2.cvtColor()` to grayscale or change color # `cv2.imwrite()` to output images to file # `cv2.bitwise_and()` to apply a mask to an image # # **Check out the OpenCV documentation to learn about these and discover even more awesome functionality!** # ## Helper Functions # Below are some helper functions to help get you started. They should look familiar from the lesson! # + import math def grayscale(img): """Applies the Grayscale transform This will return an image with only one color channel but NOTE: to see the returned image as grayscale (assuming your grayscaled image is called 'gray') you should call plt.imshow(gray, cmap='gray')""" return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # Or use BGR2GRAY if you read an image with cv2.imread() # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) def canny(img, low_threshold, high_threshold): """Applies the Canny transform""" return cv2.Canny(img, low_threshold, high_threshold) def gaussian_blur(img, kernel_size): """Applies a Gaussian Noise kernel""" return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0) def region_of_interest(img, vertices): """ Applies an image mask. Only keeps the region of the image defined by the polygon formed from `vertices`. The rest of the image is set to black. """ #defining a blank mask to start with mask = np.zeros_like(img) #defining a 3 channel or 1 channel color to fill the mask with depending on the input image if len(img.shape) > 2: channel_count = img.shape[2] # i.e. 3 or 4 depending on your image ignore_mask_color = (255,) * channel_count else: ignore_mask_color = 255 #filling pixels inside the polygon defined by "vertices" with the fill color cv2.fillPoly(mask, vertices, ignore_mask_color) #returning the image only where mask pixels are nonzero masked_image = cv2.bitwise_and(img, mask) return masked_image def draw_lines(img, lines, color=[255, 0, 0], thickness=10): """ NOTE: This function is used as a starting point to average/extrapolate the line segments detected to map out the full extent of the lane. This function separats line segments by their slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left line vs. the right line and over a threshold(to filter out near to horizontal lines. Then, average the position of each of the lines and extrapolate to the top and bottom of the lane. This function then adds detected lanes coordinated to Global paramenter 'averging_over_frames', for calculating running average using next function 'draw_avg_lanes()' Additional for challenge Video, add slope restriction """ global averging_over_frames lane1 = [] lane2 = [] # adding slope, intercept and detecting to which lane the line segment belongs to for line in lines: for x1, y1, x2, y2 in line: # slope = ((y1-y2)/(x1-x2)) # intercept = ((x1*y2-x2*y1)/(x1-x2)) slope, intercept = np.polyfit([x1,x2],[y1,y2],1) # additional slope change restriction code and conditons old_frame_data = averging_over_frames.shape[0] if slope_change_restriction=='y' and old_frame_data > 1: lane_1_min_x, lane_1_min_y,lane_1_max_x, max_y,lane_2_min_x, lane_2_min_y,lane_2_max_x \ = np.mean(averging_over_frames, 0, int) frame_avg_slope1, frame_avg_intercept1 = \ np.polyfit([lane_1_min_x,lane_1_max_x],[lane_1_min_y,max_y],1) frame_avg_slope2, frame_avg_intercept2 = \ np.polyfit([lane_2_min_x,lane_2_max_x],[lane_2_min_y,max_y],1) #condition to remove near horizontal,infinite, NaN slopes if (abs(slope) > 0.4) and (abs(slope) < 100) and (slope != float('NaN')): if slope > 0 : # additional slope change restriction code and conditons if slope_change_restriction=='y' and old_frame_data > 1: if abs((slope-frame_avg_slope1)) < 0.3 and abs((intercept-frame_avg_intercept1)) < 45: lane1.append([slope, intercept, x1, y1, x2, y2]) else: lane1.append([slope, intercept, x1, y1, x2, y2]) else: # additional slope change restriction code and conditons if slope_change_restriction=='y' and old_frame_data > 1: if abs((slope-frame_avg_slope2)) < 0.3 and abs((intercept-frame_avg_intercept2)) < 45: lane2.append([slope, intercept, x1, y1, x2, y2]) else: lane2.append([slope, intercept, x1, y1, x2, y2]) if lane1!=[] and lane2!=[]: # in-case no LANE lines are detected running average lane will be overlayed lane_1 = np.array(lane1) lane_2 = np.array(lane2) lane_1_slope_avg, lane_1_intercept_avg = np.mean(lane_1,0)[:2] lane_2_slope_avg, lane_2_intercept_avg = np.mean(lane_2,0)[:2] lane_1_min_y = int(np.min(lane_1[:,[3,5]])) lane_2_min_y = int(np.min(lane_2[:,[3,5]])) # top lane points lane_1_min_x = int((lane_1_min_y - lane_1_intercept_avg)/lane_1_slope_avg) lane_2_min_x = int((lane_2_min_y - lane_2_intercept_avg)/lane_2_slope_avg) # bottom lane points max_y = img.shape[0] lane_1_max_x = int((max_y - lane_1_intercept_avg)/lane_1_slope_avg) lane_2_max_x = int((max_y - lane_2_intercept_avg)/lane_2_slope_avg) # adding current frame lane to stack for averging and print later averging_over_frames = np.vstack((averging_over_frames, np.array([lane_1_min_x, lane_1_min_y, \ lane_1_max_x, max_y, \ lane_2_min_x, lane_2_min_y, \ lane_2_max_x]))) # removing old frames data if averging_over_frames.shape[0] > no_frame_avg: averging_over_frames = np.delete(averging_over_frames, 0, 0) draw_avg_lanes(img, color, thickness) def draw_avg_lanes(img, color_lane=[255, 0, 0], thickness=10): """ This function find the running average of coordinates of the lanes(or current values in-case of single frame), and draws it on the image(along with lenght of lanes), which will be overlayed with orginal image/frame Global paramenter 'averging_over_frames' is used provides mean/average for all coordinate """ global averging_over_frames if averging_over_frames.shape[0] > 0: lane_1_min_x, lane_1_min_y,lane_1_max_x, max_y,lane_2_min_x, lane_2_min_y,lane_2_max_x \ = np.mean(averging_over_frames, 0, int) cv2.line(img, (lane_1_min_x, lane_1_min_y), (lane_1_max_x, max_y), color_lane, thickness) cv2.line(img, (lane_2_min_x, lane_2_min_y), (lane_2_max_x, max_y), color_lane, thickness) #calculating length on lanes lane1_len = int(np.sqrt((lane_1_min_x - lane_1_max_x)**2 + (lane_1_min_y - max_y)**2)) lane2_len = int(np.sqrt((lane_2_min_x - lane_2_max_x)**2 + (lane_2_min_y - max_y)**2)) # Print additional text information on image cv2.putText(img, "'In Number of Pixels'", (int((img.shape[1])*0.75)+40, 50),\ cv2.FONT_HERSHEY_PLAIN, 1.1, (255, 255, 0), 1) cv2.putText(img, "Image Size: {}X{}".format(img.shape[1],img.shape[0]), (int((img.shape[1])*0.75)+40, 70),\ cv2.FONT_HERSHEY_PLAIN, 1.1, (255, 255, 0), 1) # Print current running average lane lengths on image in no. of pixel of image cv2.putText(img, "Lane-R Lenght: {}".format(lane1_len), (int((img.shape[1])*0.75)+40, 90),\ cv2.FONT_HERSHEY_PLAIN, 1.1, (255, 255, 0), 1) cv2.putText(img, "Lane-L Lenght: {}".format(lane2_len), (int((img.shape[1])*0.75)+40, 110),\ cv2.FONT_HERSHEY_PLAIN, 1.1, (255, 255, 0), 1) cv2.putText(img, "no.frames avg: {}".format(averging_over_frames.shape[0]), (int((img.shape[1])*0.75)+40, 140),\ cv2.FONT_HERSHEY_PLAIN, 1.1, (255, 255, 0), 1) def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap): """ `img` should be the output of a Canny transform. Returns an image with hough lines drawn. """ lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap) line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8) # from IPython import embed; embed() if lines is not None: draw_lines(line_img, lines) else: # in-case no lines are detected running average lane will be overlayed draw_avg_lanes(line_img) return line_img # Python 3 has support for cool math symbols. def weighted_img(img, initial_img, α=0.8, β=1., γ=0.): """ `img` is the output of the hough_lines() (which further calls draw_lines(), draw_avg_lanes() ) An image with lines drawn on it, blank image (all black) with lines drawn on it. `initial_img` should be the image before any processing. initial_img and img must be the same shape! The result image is computed as follows: initial_img * α + img * β + γ """ return cv2.addWeighted(initial_img, α, img, β, γ) # - # ## Test Images # # Build your pipeline to work on the images in the directory "test_images" # **You should make sure your pipeline works well on these images before you try the videos.** import os files = os.listdir("test_images/") files # ## Build a Lane Finding Pipeline # # # Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report. # # Try tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters. # + # TODO: Build your pipeline that will draw lane lines on the test_images def lane_finding_pipline(image): imshape = image.shape #Gray-scale/single channel image if (imshape[0]>=720): # exception for high resolution video, where Saturation channel indicates lanes clearly. e.g: in Challenge vidoe #extracting Saturation channel of the image img_gray = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)[:,:,2] else: img_gray = grayscale(image) # img_gray = grayscale(image) # Gaussian Blur for filtering noise kernel_size = 5 img_gaus = gaussian_blur(img_gray, kernel_size) # Canny Parameter determinatino low_threshold = 30 high_threshold = 150 # Apply Canny for edge detection img_canny = canny(img_gaus, low_threshold, high_threshold) # Creating a masked edges image using cv2.fillPoly() mask = np.zeros_like(img_canny) ignore_mask_color = 255 # defining a four sided polygon to mask to define ROI vertices = np.array([[ (imshape[1]*0.95, imshape[0]), (imshape[1]*0.07,imshape[0]), \ (imshape[1]*0.4,imshape[0]*0.6),(imshape[1]*0.60, imshape[0]*0.6)]], dtype=np.int32) masked_edges = region_of_interest(img_canny, vertices) # Define the Hough transform parameters rho = 1 # distance resolution in pixels of the Hough grid theta = np.pi/180 # angular resolution in radians of the Hough grid threshold = 10 # minimum number of votes (intersections in Hough grid cell) min_line_length = 5 #minimum number of pixels making up a line max_line_gap = 40 # maximum gap in pixels between connectable line segments # Run Hough on ROI of edge detected image, to Draw lines # Output "lines" is an array containing endpoints of detected line segments # lines = hough_lines(masked_edges, rho, theta, threshold, min_line_length, max_line_gap) lines_edges = hough_lines(masked_edges, rho, theta, threshold, min_line_length, max_line_gap) # Overlay recognised lane over the image # img_final = weighted_img(lines_edges, image) img_final1 = weighted_img(lines_edges, image) # Overlay pipline stages overlay_col_size = int(imshape[1]/4) overlay_row_size = int(imshape[0]/4) # resizing to fit smaller window img_gray_resized = cv2.resize(img_gray, (overlay_col_size, overlay_row_size )) img_gaus_resized = cv2.resize(img_gaus, (overlay_col_size, overlay_row_size )) img_canny_resized = cv2.resize(img_canny, (overlay_col_size, overlay_row_size )) # adding Single channel or Gray-scale to final output, with image label cv2.putText(img_final1, "Single ch/Gray-scale", (25, 20),\ cv2.FONT_HERSHEY_PLAIN, 1.1, (255, 255, 0), 1) img_final1[30:overlay_row_size + 30, 10:overlay_col_size +10 ] \ = cv2.cvtColor(img_gray_resized, cv2.COLOR_GRAY2RGB) # adding Gaussian Blurred to final output, with image label cv2.putText(img_final1, "Gaussian Blurred", (overlay_col_size + 35, 20),\ cv2.FONT_HERSHEY_PLAIN, 1.1, (255, 255, 0), 1) img_final1[30:overlay_row_size + 30, overlay_col_size + 20: 2*overlay_col_size +20 ] \ = cv2.cvtColor(img_gaus_resized, cv2.COLOR_GRAY2RGB) # adding Canny edge detected to final output, with image label cv2.putText(img_final1, "Canny edge detection", (2*overlay_col_size +45, 20),\ cv2.FONT_HERSHEY_PLAIN, 1.1, (255, 255, 0), 1) img_final1[30:overlay_row_size + 30, 2*overlay_col_size +30: 3*overlay_col_size +30 ] \ = cv2.cvtColor(img_canny_resized, cv2.COLOR_GRAY2RGB) return img_final1 # + # select image image_no = 1 image = mpimg.imread("test_images/"+files[image_no]) # invoke pipline averging_over_frames =np.empty((0,7), int) no_frame_avg = 1 # this is 1 for individual frame/image slope_change_restriction='n' img_final = lane_finding_pipline(image) #display generated image plt.imshow(img_final) # then save them to the test_images_output directory. # using time stamp for output filename to avoid overwriting old output files import time filename = time.strftime("%Y%m%d_%H%M%S") + files[image_no] #ensure output folder 'test_images_output' is created in current directory before running the code filepath = os.path.join("test_images_output/",filename) # mpimg.imsave(filepath, img_final) # prefered Open CV's image write function compared to matplotlib's save due to file consideration cv2.imwrite(filepath, np.flip(img_final,2)) # - # ## Test on Videos # # You know what's cooler than drawing lanes over images? Drawing lanes over video! # # We can test our solution on two provided videos: # # `solidWhiteRight.mp4` # # `solidYellowLeft.mp4` # # **Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.** # # **If you get an error that looks like this:** # ``` # NeedDownloadError: Need ffmpeg exe. # You can download it by calling: # imageio.plugins.ffmpeg.download() # ``` # **Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.** # Import everything needed to edit/save/watch video clips from moviepy.editor import VideoFileClip from IPython.display import HTML def process_image(image): # NOTE: The output you return should be a color image (3 channel) for processing video below # invoking pipeline here, # return the final output with lines drawn on lanes, along with the stages on the pipeline and pixel length on lanes result = lane_finding_pipline(image) # result = image return result # Run for Video with the solid white lane on the right first. Averaging running average lane coordinates over 20 frames # + white_output = 'test_videos_output/solidWhiteRight.mp4' # averaging lanes over image frames averging_over_frames = np.empty((0,7), int) #queue to store lane coordintes for averaging over no. of frames(given by 'no_frame_avg') no_frame_avg = 20 # this indicate the number on frames to average the lanes over slope_change_restriction='n' # clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5) clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4") white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!! # %time white_clip.write_videofile(white_output, audio=False) # - # Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice. HTML(""" <video width="960" height="540" controls> <source src="{0}"> </video> """.format(white_output)) # Now for the one with the solid yellow lane on the left. This one's more tricky! # + yellow_output = 'test_videos_output/solidYellowLeft.mp4' # averaging lanes over image frames averging_over_frames = np.empty((0,7), int) #queue to store lane coordintes for averaging over no. of frames(given by 'no_frame_avg') no_frame_avg = 20 # this indicate the number on frames to average the lanes over slope_change_restriction='n' clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4') yellow_clip = clip2.fl_image(process_image) # %time yellow_clip.write_videofile(yellow_output, audio=False) # - HTML(""" <video width="960" height="540" controls> <source src="{0}"> </video> """.format(yellow_output)) # ## Optional Challenge # # Try your lane finding pipeline on the video below. Does it still work? Can you figure out a way to make it more robust? If you're up for the challenge, modify your pipeline so it works with this video and submit it along with the rest of your project! # + challenge_output = 'test_videos_output/challenge.mp4' # averaging lanes over image frames averging_over_frames = np.empty((0,7), int) #queue to store lane coordintes for averaging over no. of frames(given by 'no_frame_avg') no_frame_avg = 15 # this indicate the number on frames to average the lanes over slope_change_restriction='y' clip3 = VideoFileClip('test_videos/challenge.mp4') challenge_clip = clip3.fl_image(process_image) # %time challenge_clip.write_videofile(challenge_output, audio=False) # - HTML(""" <video width="960" height="540" controls> <source src="{0}"> </video> """.format(challenge_output))
P1.ipynb