code
stringlengths
38
801k
repo_path
stringlengths
6
263
# -*- coding: utf-8 -*- # + using DelimitedFiles A = readdlm("sketches/ww15mgh.grd", skipstart = 1) A1 = permutedims(A); #transpose into column major order a1 = vec(A1); #concatenate into a vector to remove unwanted string elements a1_clean = filter(x -> !(x isa AbstractString), a1); a_f32 = convert(Array{Float32, 1}, a1_clean) #cast to Float32 #we know that each sequence of 1441 elements corresponds to a constant-latitude #set of values. after reshaping, each column is a constant-latitude set of #values running over lon [-pi, pi] A_F32 = reshape(a_f32, 1441, :); A_F32 = permutedims(A_F32); #transpose so that each row corresponds to a latitude value #in the original text file latitude decreases from π/2 to -π/2, we need to flip #it! A_final = reverse(A_F32, dims = 1); #write as big endian open("ww15mgh_be.bin", "w") do f #opens the specified file, returns it as f, and applies to it the function in the block write(f, hton.(A_final)) end #write as little endian open("ww15mgh_le.bin", "w") do f #opens the specified file, returns it as f, and applies to it the function in the block write(f, htol.(A_final)) end #read from big endian A_tmp = Matrix{Float32}(undef, 721, 1441) open("ww15mgh_be.bin", "r") do f read!(f, A_tmp) end A_retrieved = ntoh.(A_tmp) @assert all(A_retrieved[end,:] .== Float32(13.606)) @assert all(A_retrieved[1,:] .== Float32(-29.534)) A_tmp = Matrix{Float32}(undef, 721, 1441) open("ww15mgh_le.bin", "r") do f read!(f, A_tmp) end A_retrieved = ltoh.(A_tmp) @assert all(A_retrieved[end,:] .== Float32(13.606)) @assert all(A_retrieved[1,:] .== Float32(-29.534)) # - B = []
sketches/geoid96.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # OpenAlex # ## Getting Started with the APS dataset # ### <NAME> (<EMAIL>) # ### Mar 1, 2022 # This notebook is a getting started guide for getting familiar with the OpenAlex databases. # The OpenAlex database has over 200 million papers and ~2 billion citations. # # To keep things manageable, we will focus on a smaller snapshot of the dataset which only covers the papers published by the APS. # + tags=[] # load libraries import pandas as pd from pathlib import Path import igraph as ig import seaborn as sns import matplotlib.pyplot as plt from tqdm.auto import tqdm import networkx as nx import numpy as np tqdm.pandas() plt.rcParams.update({'font.size': 22}) sns.set(style="ticks", context="talk") plt.style.use("dark_background") # + tags=[] # set paths basepath: Path = Path('/N/project/openalex') aps_csv_path = basepath / 'APS' / 'csvs' # directory containing CSVs aps_parq_path = basepath / 'APS' / 'parquets' # directory containing parquet files # + tags=[] # some helper functions def read_parquet(name, **args): path = aps_parq_path / name df = pd.read_parquet(path, engine='pyarrow') df.reset_index(inplace=True) if '__null_dask_index__' in df.columns: df.drop(['__null_dask_index__'], axis=1, inplace=True) if 'index' in df.columns: df.drop(['index'], axis=1, inplace=True) df.drop_duplicates(inplace=True) if 'index_col' in args: df.set_index(args['index_col'], inplace=True) print(f'Read {len(df):,} rows from {path.stem!r}') return df def read_csv(name, **args): path = aps_csv_path / f'{name}.csv.gz' df = pd.read_csv(path, **args) df.drop_duplicates(inplace=True) print(f'Read {len(df):,} rows from {path.stem!r}') return df def to_parquet(df, path, **args): print(f'Writing {len(df):,} rows to {path.stem!r}') df.to_parquet(path, engine='pyarrow', index=False, **args) return # - # ## Dataframes # #### Notes: # 1. Every table exists in two formats: CSV and Parquet. CSVs are good for humans while Parquets are more efficient. You may use either format. # # 2. Each work, venue, author, institution, and concept are assigned **unique** IDs. # # 4. The following relationship between the objects are defined. # * `works_authorships`: relates works and authors (with institution info). Note: a (work_id, author_id) does not uniquely identify a row because an author may have multiple affiliations on a single paper. # # * `aps_referenced_aps`: relates works `w_1` and `w_2` if `w_1` cites `w_2`. Important: both `w_1` and `w_2` are published in the APS. # # * `works_concepts`: relates works with concepts. Note: a work may have multiple concepts. # # * `concepts_ancestors`: relates a pair of concepts `c_1` and `c_2` if `c_2` is an ancestor of `c_1` in the hierarchy of concepts # # * `concepts_related_concepts`: relates a pair of concepts `c_1` and `c_2` if `c_1` is related to `c_2` # + tags=[] aps_works_df = read_parquet('works') # read the works db ## or equivalently aps_works_df = read_csv('works') aps_works_df.head(3) # + tags=[] concepts_df = read_parquet('concepts') concepts_df.head() # + tags=[] aps_authors_df = read_parquet('works_authorships') aps_authors_df.head(3) # + tags=[] aps_refs_df = read_parquet('aps_referenced_aps') aps_refs_df.head(3) # + tags=[] works_concepts_df = read_parquet('works_concepts') works_concepts_df.head(3) # + tags=[] concepts_ancestors_df = read_parquet('concepts_ancestors') concepts_ancestors_df # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ## Making graphs using iGraph # # We have a few options on what graphs we want to construct. To keep things simple, we'll construct a citation graph using `igraph`. # `aps_refs_df` is essentially an edge list. # # Let's create a node dataframe which would have necessary node info. # - aps_works_df # + tags=[] nodes_df = aps_works_df[['work_id', 'venue_id', 'venue_name', 'title', 'publication_year', 'abstract']] # change this according to your needs # + tags=[] nodes_df # + tags=[] cite_g = ig.Graph.DataFrame(edges=aps_refs_df, vertices=nodes_df, directed=True) print(cite_g.summary()) # - # We have a graph! # # Node attributes can be accessed using the `vertex_property` dictionary. Refer to [this link](https://igraph.org/python/tutorial/latest/tutorial.html#querying-vertices-and-edges-based-on-attributes) for more info. # + tags=[] print(cite_g.vs.attribute_names()) # name has the work_ids # + tags=[] example_work_id = 'https://openalex.org/W3086477784' print('Node IDs:', cite_g.neighbors(example_work_id, mode='out')) # list the outgoing neighbors of this node # to get names of the nodes, use the cite_g.vs print('Node names:', [cite_g.vs[u]['name'] for u in cite_g.neighbors(example_work_id, mode='out')]) # similarly for titles, abstract, etc. # + tags=[] # compute the degree distributions in_degrees= [cite_g.degree(v, 'in') for v in cite_g.vs] out_degrees= [cite_g.degree(v, 'out') for v in cite_g.vs] fig, (ax1, ax2) = plt.subplots(figsize=(17, 8), ncols=2) sns.histplot(in_degrees, bins=10, ax=ax1); sns.histplot(out_degrees, bins=10, ax=ax2); ax1.set_xlabel('In-degree') ax1.set_yscale('log'); ax2.set_xlabel('Out-degree') ax2.set_yscale('log'); # - clusters = cite_g.clusters(mode='weak') lcc_g = clusters.giant() print(lcc_g.summary()) # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # ## Co-authorship graphs # - aps_authors_df # + tags=[] work_id = 'https://openalex.org/W3134780308' aps_authors_df.query(f'work_id==@work_id')[['author_id', 'author_name']] # + tags=[] author_counts = aps_authors_df.groupby('work_id').count()['author_id'].to_dict() # no of authors for a paper # - aps_authors_df.groupby('author_id').count()['work_id'] # no of authors for a paper # + tags=[] df = aps_authors_df.groupby('work_id')['author_id'].apply(list).reset_index() # + tags=[] df # - aps_refs_df # + tags=[] aps_refs_df.loc[:, 'work_authors_count'] = aps_refs_df.work_id.apply(lambda w: author_counts.get(w, 0)) aps_refs_df.loc[:, 'ref_work_authors_count'] = aps_refs_df.referenced_work_id.apply(lambda w: author_counts.get(w, 0)) aps_refs_df # + tags=[] aps_refs_df.work_authors_count.value_counts().reset_index().sort_values(by='index') # - aps_refs_df.ref_work_authors_count.value_counts().reset_index().sort_values(by='index') aps_refs_df import numpy as np def calculate_weights(row): m = row.work_authors_count n = row.ref_work_authors_count if m == 0 or n == 0: return np.nan else: return 1 / (m * n) # + tags=[] aps_refs_df.loc[:, 'weight'] = aps_refs_df.progress_apply(lambda row: calculate_weights(row),axis=1) # - aps_refs_df # + aps_refs_df[aps_refs_df.weight.isna()] # + tags=[] df = aps_authors_df.head(100) pd.crosstab(df.author_id, df.work_id) # + tags=[] auth_id1 = 'https://openalex.org/A2259130185' auth_id2 = 'https://openalex.org/A2489500432' work_ids = aps_authors_df.query('author_id==@auth_id1')['work_id'] aps_authors_df.query('author_id==@auth_id2 & work_id.isin(@work_ids)').count() # + tags=[] cleaned_authors_df = aps_authors_df.drop_duplicates(subset=['work_id', 'author_id']) # throw out authors with multiple affils cleaned_authors_df # - cleaned_authors_df.query('work_id==@work_id & author_id==@auth_id') # + tags=[] df = cleaned_authors_df.head(500_000) pair_count_df = pd.crosstab(df.author_id, df.author_id) # + tags=[] pair_count_df.unstack() # + tags=[] x = pair_count_df.to_numpy() np.count_nonzero(x - np.diag(np.diagonal(x))) # - # ## NetworkX # # Let's use NetworkX to construct bipartite graphs -- co-authorships, work-concepts aps_authors_df # + tags=[] cleaned_authors_df = aps_authors_df.drop_duplicates(subset=('work_id', 'author_id')) # drops affils, each paper author pair appears exactly once # + tags=[] authorship_g = nx.from_pandas_edgelist(cleaned_authors_df, source='work_id', target='author_id', edge_attr=['title', 'publication_year', 'author_name']) print(nx.info(authorship_g)) # + tags=[] nx.is_bipartite(authorship_g) # should be True # + tags=[] degree_sequence_auth = sorted((d for n, d in authorship_g.degree() if 'A' in n), reverse=True) degree_sequence_works = sorted((d for n, d in authorship_g.degree() if 'W' in n), reverse=True) # + tags=[] fig, ax = plt.subplots(figsize=(15, 6)) ax.plot(*np.unique(degree_sequence_auth, return_counts=True), label='Authors') ax.plot(*np.unique(degree_sequence_works, return_counts=True), label='Works') ax.set_title("Degree histogram"); ax.set_xlabel("Degree"); ax.set_ylabel("Counts"); ax.set_xscale('log'); ax.set_yscale('log'); ax.legend(); # - # discard works with > 50 authors K = 50 discard_nodes = [n for n in authorship_g.nodes() if 'W' in n and authorship_g.degree(n) > K] len(discard_nodes) # + tags=[] authorship_g.remove_nodes_from(discard_nodes) # - nx.is_connected(authorship_g) # + tags=[] comp_sizes = sorted((len(c) for c in nx.connected_components(authorship_g)), reverse=True) comp_sizes[: 10] # - lcc = max(nx.connected_components(authorship_g), key=len) lcc_auth_g = authorship_g.subgraph(lcc).copy() print(authorship_g) print(lcc_auth_g) # ### Read references # + tags=[] refs_g = nx.from_pandas_edgelist(aps_refs_df, source='work_id', target='referenced_work_id', create_using=nx.DiGraph) print(refs_g) # + tags=[] # assign weights to citation graph -- if a paper with i authors cite a paper with j authors, weight is 1 / (i * j) edge_wts = {} for u, v in refs_g.edges(): if authorship_g.has_node(u) and authorship_g.has_node(v): w = 1 / (authorship_g.degree(u) * authorship_g.degree(v)) else: w = 0 edge_wts[(u, v)] = w nx.set_edge_attributes(refs_g, name='weight', values=edge_wts) # + tags=[] zeros = [k for k, v in edge_wts.items() if v == 0] zeros[: 2] # - refs_g.remove_edges_from(zeros) # remove the 0 weighted edges # + tags=[] author_nodes = [n for n in lcc_auth_g if 'A' in n] len(author_nodes) # + tags=[] coauth_g = nx.bipartite.weighted_projected_graph(lcc_auth_g, nodes=author_nodes) print(coauth_g) # + tags=[] wts = [v for v in nx.get_edge_attributes(coauth_g, 'weight').values()] len(wts) # + tags=[] fig, ax = plt.subplots(figsize=(15, 8)) sns.histplot(wts, bins=100, ax=ax); ax.set_yscale('log'); # -
notebooks/getting-started.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ![qiskit_header.png](attachment:qiskit_header.png) # # Qiskit Chemistry, Programmatic Approach # # ### Introduction # In the [declarative_approach](declarative_approach.ipynb) example, we show how to configure different parameters in an input dictionary for different experiments in Qiskit Chemistry. However, many users might be interested in experimenting with new algorithms or algorithm components, or in programming an experiment step by step using the Qiskit Chemistry APIs. This notebook illustrates how to use Qiskit Chemistry's programmatic APIs. # # In this notebook, we decompose the computation of the ground state energy of a molecule into 4 steps: # 1. Define a molecule and get integrals from a computational chemistry driver (PySCF in this case) # 2. Construct a Fermionic Hamiltonian and map it onto a qubit Hamiltonian # 3. Instantiate and initialize dynamically-loaded algorithmic components, such as the quantum algorithm VQE, the optimizer and variational form it will use, and the initial_state to initialize the variational form # 4. Run the algorithm on a quantum backend and retrieve the results # + # import common packages import numpy as np from qiskit import Aer # lib from Qiskit Aqua from qiskit.aqua import QuantumInstance from qiskit.aqua.algorithms import VQE, ExactEigensolver from qiskit.aqua.operators import Z2Symmetries from qiskit.aqua.components.optimizers import COBYLA # lib from Qiskit Aqua Chemistry from qiskit.chemistry import FermionicOperator from qiskit.chemistry.drivers import PySCFDriver, UnitsType from qiskit.chemistry.aqua_extensions.components.variational_forms import UCCSD from qiskit.chemistry.aqua_extensions.components.initial_states import HartreeFock # - # ### Step 1: Define a molecule # Here, we use LiH in the sto3g basis with the PySCF driver as an example. # The `molecule` object records the information from the PySCF driver. # using driver to get fermionic Hamiltonian # PySCF example driver = PySCFDriver(atom='Li .0 .0 .0; H .0 .0 1.6', unit=UnitsType.ANGSTROM, charge=0, spin=0, basis='sto3g') molecule = driver.run() # ### Step 2: Prepare qubit Hamiltonian # Here, we setup the **to-be-frozen** and **to-be-removed** orbitals to reduce the problem size when we map to the qubit Hamiltonian. Furthermore, we define the **mapping type** for the qubit Hamiltonian. # For the particular `parity` mapping, we can further reduce the problem size. # + # please be aware that the idx here with respective to original idx freeze_list = [0] remove_list = [-3, -2] # negative number denotes the reverse order map_type = 'parity' h1 = molecule.one_body_integrals h2 = molecule.two_body_integrals nuclear_repulsion_energy = molecule.nuclear_repulsion_energy num_particles = molecule.num_alpha + molecule.num_beta num_spin_orbitals = molecule.num_orbitals * 2 print("HF energy: {}".format(molecule.hf_energy - molecule.nuclear_repulsion_energy)) print("# of electrons: {}".format(num_particles)) print("# of spin orbitals: {}".format(num_spin_orbitals)) # + # prepare full idx of freeze_list and remove_list # convert all negative idx to positive remove_list = [x % molecule.num_orbitals for x in remove_list] freeze_list = [x % molecule.num_orbitals for x in freeze_list] # update the idx in remove_list of the idx after frozen, since the idx of orbitals are changed after freezing remove_list = [x - len(freeze_list) for x in remove_list] remove_list += [x + molecule.num_orbitals - len(freeze_list) for x in remove_list] freeze_list += [x + molecule.num_orbitals for x in freeze_list] # prepare fermionic hamiltonian with orbital freezing and eliminating, and then map to qubit hamiltonian # and if PARITY mapping is selected, reduction qubits energy_shift = 0.0 qubit_reduction = True if map_type == 'parity' else False ferOp = FermionicOperator(h1=h1, h2=h2) if len(freeze_list) > 0: ferOp, energy_shift = ferOp.fermion_mode_freezing(freeze_list) num_spin_orbitals -= len(freeze_list) num_particles -= len(freeze_list) if len(remove_list) > 0: ferOp = ferOp.fermion_mode_elimination(remove_list) num_spin_orbitals -= len(remove_list) qubitOp = ferOp.mapping(map_type=map_type, threshold=0.00000001) qubitOp = Z2Symmetries.two_qubit_reduction(qubitOp, num_particles) if qubit_reduction else qubitOp qubitOp.chop(10**-10) print(qubitOp.print_details()) print(qubitOp) # - # We use the classical eigen decomposition to get the smallest eigenvalue as a reference. # Using exact eigensolver to get the smallest eigenvalue exact_eigensolver = ExactEigensolver(qubitOp, k=1) ret = exact_eigensolver.run() print('The computed energy is: {:.12f}'.format(ret['eigvals'][0].real)) print('The total ground state energy is: {:.12f}'.format(ret['eigvals'][0].real + energy_shift + nuclear_repulsion_energy)) # ### Step 3: Initiate and configure dynamically-loaded instances # To run VQE with the UCCSD variational form, we require # - VQE algorithm # - Classical Optimizer # - UCCSD variational form # - Prepare the initial state in the HartreeFock state # ### [Optional] Setup token to run the experiment on a real device # If you would like to run the experiment on a real device, you need to setup your account first. # # Note: If you did not store your token yet, use `IBMQ.save_account('MY_API_TOKEN')` to store it first. # + # from qiskit import IBMQ # provider = IBMQ.load_account() # - backend = Aer.get_backend('statevector_simulator') # + # setup COBYLA optimizer max_eval = 200 cobyla = COBYLA(maxiter=max_eval) # setup HartreeFock state HF_state = HartreeFock(qubitOp.num_qubits, num_spin_orbitals, num_particles, map_type, qubit_reduction) # setup UCCSD variational form var_form = UCCSD(qubitOp.num_qubits, depth=1, num_orbitals=num_spin_orbitals, num_particles=num_particles, active_occupied=[0], active_unoccupied=[0, 1], initial_state=HF_state, qubit_mapping=map_type, two_qubit_reduction=qubit_reduction, num_time_slices=1) # setup VQE vqe = VQE(qubitOp, var_form, cobyla) quantum_instance = QuantumInstance(backend=backend) # - # ### Step 4: Run algorithm and retrieve the results # The smallest eigenvalue is stored in the first entry of the `eigvals` key. results = vqe.run(quantum_instance) print('The computed ground state energy is: {:.12f}'.format(results['eigvals'][0])) print('The total ground state energy is: {:.12f}'.format(results['eigvals'][0] + energy_shift + nuclear_repulsion_energy)) print("Parameters: {}".format(results['opt_params'])) import qiskit.tools.jupyter # %qiskit_version_table # %qiskit_copyright
qiskit/advanced/aqua/chemistry/programmatic_approach.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Emboss Filter # # This is a Emboss Filter # # First you download the bit file. # + from pynq.overlays.video import * from pynq.lib.video import * base = VideoOverlay("video.bit") hdmi_in = base.video.hdmi_in hdmi_out = base.video.hdmi_out # - # Then start up the PRControl, the video will not work otherwise. It initailzes the Video Axi Switch so HDMI runs through the VDMA. from pynq.overlays.video import PRControl pr_inst = PRControl() # The best video sources are computers where you can control the resoltuion. hdmi_in.configure() hdmi_out.configure(hdmi_in.mode,PIXEL_BGR) hdmi_out.start() hdmi_in.start() hdmi_in.tie(hdmi_out) # Here is a frame in VDMA. import PIL.Image import cv2 frame = hdmi_in.readframe() frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB) image = PIL.Image.fromarray(frame) image # The Dilate Filter has to be loaded in. In can fit into L0,M0,M1,M2,S0,S1,S2,S3,S4,S5 # Connect the HDMI_IN to S0 and S0 to VDMA and VDMA to HDMI_OUT pr_inst.connect("HDMI_IN","L0") pr_inst.connect("L0","VDMA") pr_inst.connect("VDMA","HDMI_OUT") PartialBitstream("emboss_l0.bit").download() import PIL.Image import cv2 frame = hdmi_in.readframe() frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB) image = PIL.Image.fromarray(frame) image # There are no settings for Emboss # Try other filter locations or two emboss in a row hdmi_out.close() hdmi_in.close()
Video_notebooks/Emboss Filter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="DWJm-HoPI4Zh" # # Desafio Python e E-mail # # ### Descrição # # Digamos que você trabalha em uma indústria e está responsável pela área de inteligência de negócio. # # Todo dia, você, a equipe ou até mesmo um programa, gera um report diferente para cada área da empresa: # - Financeiro # - Logística # - Manutenção # - Marketing # - Operações # - Produção # - Vendas # # Cada um desses reports deve ser enviado por e-mail para o Gerente de cada Área. # # Crie um programa que faça isso automaticamente. A relação de Gerentes (com seus respectivos e-mails) e áreas está no arquivo 'Enviar E-mails.xlsx'. # # Dica: Use o pandas read_excel para ler o arquivo dos e-mails que isso vai facilitar. # + id="Fc4oggaQI4Zx" import pandas as pd import win32com.client as win32 outlook = win32.Dispatch('outlook.application') gerentes_df = pd.read_excel('Enviar E-mails.xlsx') #gerentes_df.info() for i, email in enumerate(gerentes_df['E-mail']): gerente = gerentes_df.loc[i, 'Gerente'] area = gerentes_df.loc[i, 'Relatório'] mail = outlook.CreateItem(0) mail.To = '<EMAIL>' mail.Subject = 'Relatório de {}'.format(area) mail.Body = ''' Prezado {}, Segue em anexo o Relatório de {}, conforme solicitado. Qualquer dúvida estou à disposição. Att., '''.format(gerente, area) attachment = r'C:\Users\Maki\Downloads\e-mail\{}.xlsx'.format(area) mail.Attachments.Add(attachment) mail.Send()
desafio.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.6.13 64-bit (''python-chilla'': conda)' # language: python # name: python3 # --- # ## Student Detials # > Title= "Mr"\ # > Name= "<NAME>"\ # > email = "<EMAIL>"\ # > whatsapp = "03358043653" # ### In this Notebook I am are going to Explore OpenCV with Python Layer # ### Layout # > + Introduction to images # > + installation # > + Read Data # > + Images # > + Videos # > + Webcam (live) # > + Importatnt function to be used in Opencv # > + Scalling or normalizing images # > + Adding objects to images # > + Wrap and Persecpetives # > + Joining images # > + Color detection # > + Edge detection # > + Face Detection # > + Projcet # > + Car Counter # > + Face Detection and much more # # # What is Pixel: # A pixel is the smallest unit of a digital image or graphic that can be displayed and represented on a digital display device. # # A pixel is the basic logical unit in digital graphics. Pixels are combined to form a complete image, video, text, or any visible thing on a computer display. # # A pixel is also known as a picture element (pix = picture, el = element). # ### Pixel may refer to any of the following: # # ## Pixels # 1. Term that comes from the words PEL (picture element). A px (pixel) is the smallest portion of an image or display that a computer is capable of printing or displaying. You can get a better understanding of what a pixel is when zooming into an image as seen in the picture. # # As you can see in this example, the character image in this picture is zoomed into at 1600%. Each of the blocks seen in the example picture is a single pixel of this image. Everything on the computer display looks similar to this when zoomed in upon. The same is true with printed images, which are created by several little dots that are measured in DPI. # # ## Screen pixels # In the picture below is an example of a close up of pixels on an LCD screen. As shown in the picture, we've zoomed into the "he" part of the word "help" to illustrate how the display works. Each pixel has RGB (red, green, and blue) color components. The brightness of each component is increased or decreased to produce the variance of colors you see on the screen. # ![Image](https://www.computerhope.com/cdn/monitor-pixel.jpg) # # Screen Size # We have different types of Scren sizes # 1. VGA # 2. SXGA # 3. HDTV # 4. 4K # ![image](D:/Python ka Chilla/python_chilla/cv_with_python_using opencv/image types.png) # ## More than just RGB # Let’s talk about color modes a little bit more. A color model is a system for creating a full range of colors # using the primary colors. There are two different color models here: additive color models and subtractive color # models. Additive models use light to represent colors in computer screens while subtractive models use inks to # print those digital images on papers. The primary colors are red, green and blue (RGB) for the first one and # cyan, magenta, yellow and black (CMYK) for the latter one. All the other colors we see on images are made by # combining or mixing these primary colors. So the pictures can be depicted a little bit differently when they # are represented in RGB and CMYK. # # ![image](https://miro.medium.com/max/2000/1*CSmlQDizc03csCaSgMgMIw.png) # You would be pretty accustomed to these two kinds of models. In the world of color models, however, there are more than two kinds of models. Among them, grayscale, HSV and HLS are the ones you’re going to see quite often in computer vision. # # A grayscale is simple. It represents images and morphologies by the intensity of black and white, which means it has only one channel. To see images in grayscale, we need to convert the color mode into gray just as what we did with the BGR image earlier. # ## Bits or Bit Depth # Bit depth refers to the color information stored in an image. The higher the bit depth of an image, the more colors it can store. The simplest image, a 1 bit image, can only show two colors, black and white. That is because the 1 bit can only store one of two values, 0 (white) and 1 (black). # ![image](https://miro.medium.com/max/2000/1*E1cRhyj4ByJ_qrTev3hTlA.png) # ![image](https://miro.medium.com/max/2000/1*zeVGeXCBFE4FLSbv_bUaLw.png) # Take a look at the images above. The three images show you how each channel is composed of. In the R channel picture, the part with the high saturation of red colors looks white. Why is that? This is because the values in the red color parts will be near 255. And in grayscale mode, the higher the value is, the whiter the color becomes. You can also check this with G or B channels and compare how certain parts differ one from another. # ![image](https://miro.medium.com/max/1400/1*t6gJMcMAu8EUXcgbhhGy5Q.png) # HSV and HLS take a bit different aspect. As you can see above, they have a three-dimensional representation, and it’s more similar to the way of human perception. HSV stands for hue, saturation and value. HSL stands for hue, saturation and lightness. The center axis for HSV is the value of colors while that for HSL is the amount of light. Along the angles from the center axis, there is hue, the actual colors. And the distance from the center axis belongs to saturation. # ![image](https://miro.medium.com/max/2000/1*f3pRIVbutpa9KBwuqsl43w.png) # ## Gray Scale # Multilevel # ## Black = 0 # ## Gray = 1-255 # ## White = 256 # 8 bit 2**8= 256 # ## Bits or Bit Depth # A colr image is typically represented by a bit dept ranign from 8 to 24 or hhiher # # With a 24-bit image the bit are often divied into three groping: 8 for red and 8 for green and 8 for blue channel. # # Combinaing pf bits are used to reprent those bit. # # A 24 bit images offers 16.7 milion 2**24 colors # ## Three kind of images # ![image](http://preservationtutorial.library.cornell.edu/tutorial-images/bitdepth.gif) # # **Bit Depth:** Left to right - 1-bit bitonal, 8-bit grayscale, and 24-bit color images. # # Binary calculations for the number of tones represented by common bit depths: # # 1. 1 bit (21) = 2 tones # 2. 2 bits (22) = 4 tones # 3. 3 bits (23) = 8 tones # 4. 4 bits (24) = 16 tones # 5. 8 bits (28) = 256 tones # 6. 16 bits (216) = 65,536 tones # 7. 24 bits (224) = 16.7 million tones # ## Videos # its a continous sequence of images # like: # # ![image](https://miro.medium.com/max/1400/1*vstp_yyHEIWuFTyVLE6LyQ.png) # ## Frame per Second (fps) # Frames Per Second or FPS is the rate at which back to back images called frames appear in a display and form moving imagery. # # Video content that we consume daily isn’t actually moving. In fact, they are still images that play one after the other. If a video is shot at 24fps, this means that 24 individual frames are played back in a second. They change at a different rate across mediums depending on a lot of other factors. # ## calculated FPS in video: # * for 60 fps: 3600 frames # * for 24 fps: 1440 frames # * for 15 fps: 900 frames # ## Detecting Images is Similir to videos: # # So images is move during video and you have to track. E.g selfi tracking # ### Installing python and opencv # ``` # # # !pip install python # # !pip install python-opencv # # ``` # import numpy as np import cv2 import matplotlib.pyplot as plt # %matplotlib inline # Import the image img = cv2.imread('data/img.jpg') plt.imshow(img) # or open with cv2 not using plt cv2.imshow('Image Window', img) cv2.waitKey(0)
opencv_with_python/01_cv_with_python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## enum # ### enum is used to create symbols for values instead of using strings and interger. # + # Creating enum class import enum class PlaneStatus(enum.Enum): standing = 0 enroute_runway = 1 takeoff = 2 in_air = 3 landing = 4 print('\nMember name: {}'.format(PlaneStatus.enroute_runway.name)) print('Member value: {}'.format(PlaneStatus.enroute_runway.value)) # + #Iterating over enums for member in PlaneStatus: print('{} = {}'.format(member.name, member.value)) # + #Comparing enums - compare using identity and equality actual_state = PlaneStatus.enroute_runway desired_state = PlaneStatus.in_air #comparison through equality print('Equality: ', actual_state == desired_state, actual_state == PlaneStatus.enroute_runway) #comparison through identity print('Identity: ', actual_state is desired_state, actual_state is PlaneStatus.enroute_runway) # + '''NO SUPPORT FOR ORDERED SORTING AND COMPARISON''' print('Ordered by value:') try: print('\n'.join('' + s.name for s in sorted(PlaneStatus))) except TypeError as err: print('Cannot sort:{}'.format(err)) # - # ## Use IntEnum for order support # + # Ordered by value class NewPlaneStatus(enum.IntEnum): standing = 0 enroute_runway = 1 takeoff = 2 in_air = 3 landing = 4 print('\n'.join(' ' + s.name for s in sorted(NewPlaneStatus))) # - # ## Unique Ebumeration values # + #Aliases for other members, do not appear separately in the output when iterating over the Enum. #The canonical name for a member is the first name attached to the value. class SamePlaneStatus(enum.Enum): standing = 0 enroute_runway = 1 takeoff = 2 in_air = 3 landing = 4 maintainance = 0 fueling = 3 for status in SamePlaneStatus: print('{} = {}'.format(status.name, status.value)) print('\nSame: standing is maintainance: ', SamePlaneStatus.standing is SamePlaneStatus.maintainance) print('Same: in_air is fueling: ', SamePlaneStatus.in_air is SamePlaneStatus.fueling) # + # Add @unique decorator to the Enum @enum.unique class UniPlaneStatus(enum.Enum): standing = 0 enroute_runway = 1 takeoff = 2 in_air = 3 landing = 4 #error triggered here maintainance = 0 fueling = 3 for status in SamePlaneStatus: print('{} = {}'.format(status.name, status.value)) # - # ## Creating Enums programmatically # + PlaneStatus = enum.Enum( value = 'PlaneStatus', names = ('standing', 'enroute_runway', 'takeoff', 'in_air', 'landing') ) print('Member:{}'.format(PlaneStatus.in_air)) print('\nAll Members:') for status in PlaneStatus: print('{} = {}'.format(status.name, status.value)) # + PlaneStatus = enum.Enum( value = 'PlaneStatus', names = [ ('standing', 1), ('enroute_runway', 2), ('takeoff', 3), ('in_air', 4), ('landing', 5) ] ) print('\nAll Members:') for status in PlaneStatus: print('{} = {}'.format(status.name, status.value)) # -
data structures/enum.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from pyspark.sql import SparkSession from pyspark.sql.types import StructType,StructField,StringType spark = (SparkSession.builder.appName("Challenge 2").getOrCreate()) # ### tại sao khi load ta lại để hết StringType # #### Vì rất có thể dữ liệu đang bị sai mà load vào sẽ không nhận và bị lỗi trong khi load # #### trong khi đó stringtype nhận mọi loại dữ liệu, ta có thể load vào và xử lý từ từ sale_df = StructType([ StructField("Order ID",StringType(),True), StructField("Product",StringType(),True), StructField("Quantity",StringType(),True), StructField("Price",StringType(),True), StructField("Order Date",StringType(),True), StructField("Address",StringType(),True) ]) sale_path = "salesdata" sale_df = (spark.read.format("csv") .option("header",True) .schema(sale_df) .load(sale_path)) sale_df.show(10) from pyspark.sql.functions import col sale_df.filter(col("Order ID") == "Order ID").show(10) # ### Xoá các hàng bị null và sai # #### bây giờ ta sẽ thực hiện xoá các cột bị null # #### tiếp theo sẽ xoá các cột có giá trị không mong muốn như sai type, sai kiểu dữ liệu sale_df.filter(col("Order ID").isNull()==True).show(10) sale_df = sale_df.na.drop("any") sale_df.filter(col("Order ID").isNull()==True).show(10) sale_df.describe("Order ID","Product","Quantity","Price","Order Date","Address").show() # ### ta thấy rằng có một vài vấn đề không đúng ở đây, ví dụ # #### cột id đáng ra chỉ có số nhưng lại có những giá trị như "Order ID" # #### và các cột còn lại có rất nhiều giá trị bất hợp lý ở hàng max # #### đầu tiên ở dưới ta thực hiện xoá những bản ghi trùng lặp distinct_df = sale_df.distinct() distinct_df.filter(col("Order ID")== "Order ID").show(10) clean_df = distinct_df.filter(col("Order ID") != "Order ID") clean_df.filter(col("Order ID") == "Order ID").show(10) clean_df.describe("Order ID","Product","Quantity","Price","Order Date","Address").show() # #### bây giờ ở các cột ta đã thấy rằng giá trị đúng cần tìm đúng là số, chữ hay địa chỉ chứ không phải sai thể loại nữa # ### Trích xuất những dữ liệu nhỏ hơn # trong ý tưởng ta có thể nghĩ ra rằng chúng ta sẽ rút chuỗi dựa trên dấu phẩy sau đó trích xuất tương ứng clean_df.show(10, truncate = False) from pyspark.sql.functions import split clean_df.select("Address").show(10,False) clean_df.select("Address",split(col("Address"),",")).show(10,False) # Bây giờ ta có thẻ hiểu rằng cột thứ 2 như là một list, ta có thể thoải mái lấy thông tin dựa trên index clean_df.select("Address",split(col("Address"),",").getItem(1)).show(10,False) clean_df.select("Address",split(col("Address"),",").getItem(2)).show(10,False) clean_df.select("Address",split(split(col("Address"),",").getItem(2)," ")).show(10,False) new_col = (sale_df.withColumn("City",split(col("Address"),",").getItem(1)) .withColumn("State",split(split(col("Address"),",").getItem(2)," ").getItem(1))) new_col.show(10) # ### Rename and change DataType # bây giờ ta đã thấy dữ liệu đã sạch đẹp, bước tiếp theo là trả về đúng kiểu dữ liệu from pyspark.sql.functions import to_timestamp, year, month from pyspark.sql.types import IntegerType,FloatType new_df = (new_col.withColumn("Order_ID",col("Order ID").cast(IntegerType())) .withColumn("Quantity_pro",col("Quantity").cast(IntegerType())) .withColumn("Price_pro",col("Price").cast(IntegerType())) .withColumn("Order_Date",to_timestamp(col("Order Date"),"MM/dd/yy HH:mm")) .withColumnRenamed("Address","StoreAddress") .drop("Order ID") .drop("Quantity") .drop("Price") .drop("Order Date")) new_df.show(10) new_df.printSchema() new_df = (new_df.withColumn("Year",year(col("Order_Date"))) .withColumn("Month",month(col("Order_Date")))) new_df.show(10) # ### lưu vào file parquet # Tại sao ta không lưu vào một định dạng dễ nhìn như csv hay pgadmin mà phải lưu vào parquet<br> # ta có thể hiểu rằng file parquet phân vùng dữ liệu cực kì tốt và có thể chia nhỏ những dữ liệu theo mong muốn của mình<br> # việc này giúp khi lưu dữ liệu xuống nó được sắp xếp ngăn nắp trong các folder riêng việt<br> # khi muốn lấy dữ liệu nào ta chỉ cần lấy đúng vị trí và địa chỉ đó không phải load một khối lượng data to đùng<br> output_path = "challenge2" new_df.write.mode("overwrite").partitionBy("Year","Month").parquet(output_path) # vậy là ta sẽ thấy được dữ liệu lưu xuống theo 2 năm là 2019 và 2020<br> # trong folder 2019 và 2020 lại được chia ra folder nhỏ với 12 tháng<br>
Section4/Challenge2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.3.0-rc4 # language: julia # name: julia-1.3 # --- # # Systematic comparison of MendelImpute against Minimac4 and Beagle5 using Revise using VCFTools using MendelImpute using GeneticVariation using Random using Suppressor # # Simulate data of various sizes # # ## Step 0. Install `msprime` # # [msprime download Link](https://msprime.readthedocs.io/en/stable/installation.html). # # Some people might need to activate conda environment via `conda config --set auto_activate_base True`. You can turn it off once simulation is done by executing `conda config --set auto_activate_base False`. # # # ## Step 1. Simulate data in terminal # ``` # python3 msprime_script.py 50000 10000 10000000 2e-8 2e-8 2020 > ./compare_sys/data1.vcf # python3 msprime_script.py 50000 10000 10000000 4e-8 4e-8 2020 > ./compare_sys/data2.vcf # python3 msprime_script.py 50000 10000 100000000 2e-8 2e-8 2020 > ./compare_sys/data3.vcf # python3 msprime_script.py 50000 10000 5000000 2e-8 2e-8 2020 > ./compare_sys/data4.vcf # ``` # ### Arguments: # + Number of haplotypes = 40000 # + Effective population size = 10000 ([source](https://www.the-scientist.com/the-nutshell/ancient-humans-more-diverse-43556)) # + Sequence length = 10 million (same as Beagle 5's choice) # + Rrecombination rate = 2e-8 (default) # + mutation rate = 2e-8 (default) # + seed = 2019 @show nsamples("./compare_sys/data1.vcf.gz") @show nrecords("./compare_sys/data1.vcf.gz") @show nsamples("./compare_sys/data2.vcf.gz") @show nrecords("./compare_sys/data2.vcf.gz") @show nsamples("./compare_sys/data3.vcf.gz") @show nrecords("./compare_sys/data3.vcf.gz") @show nsamples("./compare_sys/data4.vcf.gz") @show nrecords("./compare_sys/data4.vcf.gz") # ## Step 2: Compress files to .gz # run these in terminal run(`cat data1.vcf | gzip > data1.vcf.gz`) run(`cat data2.vcf | gzip > data2.vcf.gz`) run(`cat data3.vcf | gzip > data3.vcf.gz`) run(`cat data4.vcf | gzip > data4.vcf.gz`) compress_vcf_to_gz("./compare_sys/data1.vcf"); rm("./compare_sys/data1.vcf", force=true) compress_vcf_to_gz("./compare_sys/data2.vcf"); rm("./compare_sys/data2.vcf", force=true) compress_vcf_to_gz("./compare_sys/data3.vcf"); rm("./compare_sys/data3.vcf", force=true) # # Move over to hoffman now. # # Put scripts below in under `/u/home/b/biona001/haplotype_comparisons/data`: # # ## `filter_and_mask.jl` # ```Julia # using VCFTools # using MendelImpute # using Random # # """ # filter_and_mask(data::String, samples::Int) # # Creates reference haplotypes and (unphased) target genotype files from `data`. # # # Inputs # `data`: The full (phased) data simulated by msprime. # `samples`: Number of samples (genotypes) desired in target file. Remaining haplotypes will become the reference panel # """ # function filter_and_mask(data::String, samples::Int) # missingprop = 0.1 # n = nsamples(data) # p = nrecords(data) # samples > n && error("requested samples exceed total number of genotypes in $data.") # # # output filenames (tgt_data1.vcf.gz, ref_data1.vcf.gz, and tgt_masked_data1.vcf.gz) # tgt = "./tgt_" * data # ref = "./ref_" * data # tgt_mask = "./tgt_masked_" * data # tgt_mask_unphase = "./tgt_masked_unphased_" * data # # # compute target and reference index # tgt_index = falses(n) # tgt_index[1:samples] .= true # ref_index = .!tgt_index # record_index = 1:p # save all records (SNPs) # # # generate masking matrix with `missingprop`% of trues (true = convert to missing) # Random.seed!(2020) # masks = falses(p, samples) # for j in 1:samples, i in 1:p # rand() < missingprop && (masks[i, j] = true) # end # # # create outputs # VCFTools.filter(data, record_index, tgt_index, des = tgt) # VCFTools.filter(data, record_index, ref_index, des = ref) # mask_gt(tgt, masks, des=tgt_mask) # # # finally, unphase the target data # unphase(tgt_mask, outfile=tgt_mask_unphase) # end # # data = ARGS[1] # samples = parse(Int, ARGS[2]) # filter_and_mask(data, samples) # ``` # # ## `mendel_fast.jl` # ```Julia # using VCFTools # using MendelImpute # using GeneticVariation # # function run(data::String, width::Int) # tgtfile = "./tgt_masked_unphased_" * data # reffile = "./ref_" * data # outfile = "./mendel_imputed_" * data # phase(tgtfile, reffile, outfile = outfile, width = width, fast_method=true) # end # # data = ARGS[1] # width = parse(Int, ARGS[2]) # run(data, width) # ``` # # ## `mendel_dp.jl` # ```Julia # using VCFTools # using MendelImpute # using GeneticVariation # # function run(data::String, width::Int) # tgtfile = "./tgt_masked_unphased_" * data # reffile = "./ref_" * data # outfile = "./mendel_imputed_" * data # phase(tgtfile, reffile, outfile = outfile, width = width, fast_method=false) # end # # data = ARGS[1] # width = parse(Int, ARGS[2]) # run(data, width) # ``` # ## Step 1: filter files # # + `ref_data1.vcf.gz`: haplotype reference files # + `tgt_data1.vcf.gz`: complete genotype information # + `tgt_masked_data1.vcf.gz`: the same as `tgt_data1.vcf.gz` except some entries are masked # + `tgt_masked_unphased_data1.vcf.gz`: the same as `tgt_data1.vcf.gz` except some entries are masked and heterzygotes are unphased. # specify simulation parameters target_data = ["data1.vcf.gz", "data4.vcf.gz"] memory = 47 missingprop = 0.1 samples = 1000 cd("/u/home/b/biona001/haplotype_comparisons/data") for data in target_data # if unphased target genotype file exist already, move to next step if isfile(data * "./tgt_masked_unphased_" * data) continue end open("filter.sh", "w") do io println(io, "#!/bin/bash") println(io, "#\$ -cwd") println(io, "# error = Merged with joblog") println(io, "#\$ -o joblog.\$JOB_ID") println(io, "#\$ -j y") println(io, "#\$ -l arch=intel-X5650,exclusive,h_rt=24:00:00,h_data=$(memory)G") println(io, "# Email address to notify") println(io, "#\$ -M \$USER@mail") println(io, "# Notify when") println(io, "#\$ -m a") println(io) println(io, "echo \"Job \$JOB_ID started on: \" `hostname -s`") println(io, "echo \"Job \$JOB_ID started on: \" `date `") println(io) println(io, "# load the job environment:") println(io, ". /u/local/Modules/default/init/modules.sh") println(io, "module load julia/1.2.0") println(io, "module load R/3.5.1") println(io, "module load java/1.8.0_111") println(io) println(io, "# filter/mask data") println(io, "julia ./filter_and_mask.jl $data $samples") println(io) println(io, "echo \"Job \$JOB_ID ended on: \" `hostname -s`") println(io, "echo \"Job \JOB_ID ended on: \" `date `") println(io) end # submit job run(`qsub filter.sh`) rm("filter.sh", force=true) sleep(2) end # ## Step 2: prephasing using beagle 4.1 # # + data1: 7 hours 18 minutes 19 seconds # + # + # + data4: 3 hours 48 minutes 36 seconds cd("/u/home/b/biona001/haplotype_comparisons/data") for data in target_data open("prephase.sh", "w") do io println(io, "#!/bin/bash") println(io, "#\$ -cwd") println(io, "# error = Merged with joblog") println(io, "#\$ -o joblog.\$JOB_ID") println(io, "#\$ -j y") println(io, "#\$ -l arch=intel-X5650,exclusive,h_rt=24:00:00,h_data=$(memory)G") println(io, "# Email address to notify") println(io, "#\$ -M \$USER@mail") println(io, "# Notify when") println(io, "#\$ -m a") println(io) println(io, "echo \"Job \$JOB_ID started on: \" `hostname -s`") println(io, "echo \"Job \$JOB_ID started on: \" `date `") println(io) println(io, "# load the job environment:") println(io, ". /u/local/Modules/default/init/modules.sh") println(io, "module load julia/1.2.0") println(io, "module load R/3.5.1") println(io, "module load java/1.8.0_111") println(io) println(io, "# run prephasing using beagle 4.1") println(io, "java -Xss5m -Xmx$(memory)g -jar beagle4.1.jar gt=./tgt_masked_unphased_$(data) ref=./ref_$(data) niterations=0 out=./tgt_masked_phased_$(data)") println(io) println(io, "echo \"Job \$JOB_ID ended on: \" `hostname -s`") println(io, "echo \"Job \$JOB_ID ended on: \" `date `") println(io) end # submit job run(`qsub prephase.sh`) rm("prephase.sh", force=true) sleep(2) end # ## Step 3: run MendelImpute widths = [400; 800; 1600] cd("/u/home/b/biona001/haplotype_comparisons/data") for data in target_data, width in widths # fast version open("mendel$width.sh", "w") do io println(io, "#!/bin/bash") println(io, "#\$ -cwd") println(io, "# error = Merged with joblog") println(io, "#\$ -o joblog.\$JOB_ID") println(io, "#\$ -j y") println(io, "#\$ -l arch=intel-X5650,exclusive,h_rt=24:00:00,h_data=$(memory)G") println(io, "# Email address to notify") println(io, "#\$ -M \$USER@mail") println(io, "# Notify when") println(io, "#\$ -m a") println(io) println(io, "echo \"Job \$JOB_ID started on: \" `hostname -s`") println(io, "echo \"Job \$JOB_ID started on: \" `date `") println(io) println(io, "# load the job environment:") println(io, ". /u/local/Modules/default/init/modules.sh") println(io, "module load julia/1.2.0") println(io, "module load R/3.5.1") println(io, "module load java/1.8.0_111") println(io) println(io, "# run MendelImpute (fast)") println(io, "julia mendel_fast.jl $data $width") println(io) println(io, "echo \"Job \$JOB_ID ended on: \" `hostname -s`") println(io, "echo \"Job \$JOB_ID ended on: \" `date `") println(io) end # submit job run(`qsub mendel$width.sh`) rm("mendel$width.sh", force=true) sleep(2) # dynamic programming version open("dp_mendel$width.sh", "w") do io println(io, "#!/bin/bash") println(io, "#\$ -cwd") println(io, "# error = Merged with joblog") println(io, "#\$ -o joblog.\$JOB_ID") println(io, "#\$ -j y") println(io, "#\$ -l arch=intel-X5650,exclusive,h_rt=24:00:00,h_data=$(memory)G") println(io, "# Email address to notify") println(io, "#\$ -M \$USER@mail") println(io, "# Notify when") println(io, "#\$ -m a") println(io) println(io, "echo \"Job \$JOB_ID started on: \" `hostname -s`") println(io, "echo \"Job \$JOB_ID started on: \" `date `") println(io) println(io, "# load the job environment:") println(io, ". /u/local/Modules/default/init/modules.sh") println(io, "module load julia/1.2.0") println(io, "module load R/3.5.1") println(io, "module load java/1.8.0_111") println(io) println(io, "# run MendelImpute (dynamic programming)") println(io, "julia mendel_dp.jl $data $width") println(io) println(io, "echo \"Job \$JOB_ID ended on: \" `hostname -s`") println(io, "echo \"Job \$JOB_ID ended on: \" `date `") println(io) end # submit job run(`qsub dp_mendel$width.sh`) rm("dp_mendel$width.sh", force=true) sleep(2) end # ## Step 3: run Beagle 5 cd("/u/home/b/biona001/haplotype_comparisons/data") for data in target_data open("beagle.sh", "w") do io println(io, "#!/bin/bash") println(io, "#\$ -cwd") println(io, "# error = Merged with joblog") println(io, "#\$ -o joblog.\$JOB_ID") println(io, "#\$ -j y") println(io, "#\$ -l arch=intel-X5650,exclusive,h_rt=24:00:00,h_data=$(memory)G") println(io, "# Email address to notify") println(io, "#\$ -M \$USER@mail") println(io, "# Notify when") println(io, "#\$ -m a") println(io) println(io, "echo \"Job \$JOB_ID started on: \" `hostname -s`") println(io, "echo \"Job \$JOB_ID started on: \" `date `") println(io) println(io, "# load the job environment:") println(io, ". /u/local/Modules/default/init/modules.sh") println(io, "module load julia/1.2.0") println(io, "module load R/3.5.1") println(io, "module load java/1.8.0_111") println(io) println(io, "# run beagle 5.0 for imputation") println(io, "java -Xmx$(memory)g -jar beagle5.0.jar gt=tgt_masked_phased_$(data).vcf.gz.vcf.gz ref=ref_$(data).vcf.gz out=beagle_imputed_$(data)") println(io) println(io, "echo \"Job \$JOB_ID ended on: \" `hostname -s`") println(io, "echo \"Job \$JOB_ID ended on: \" `date `") println(io) end # submit job run(`qsub beagle.sh`) rm("beagle.sh", force=true) sleep(2) end # ## Run Minimac4 cd("/u/home/b/biona001/haplotype_comparisons/data") for data in target_data # first convert vcf files to m3vcf files using minimac3 open("minimac3.sh", "w") do io println(io, "#!/bin/bash") println(io, "#\$ -cwd") println(io, "# error = Merged with joblog") println(io, "#\$ -o joblog.\$JOB_ID") println(io, "#\$ -j y") println(io, "#\$ -l arch=intel-X5650,exclusive,h_rt=24:00:00,h_data=$(memory)G") println(io, "# Email address to notify") println(io, "#\$ -M \$USER@mail") println(io, "# Notify when") println(io, "#\$ -m a") println(io) println(io, "echo \"Job \$JOB_ID started on: \" `hostname -s`") println(io, "echo \"Job \$JOB_ID started on: \" `date `") println(io) println(io, "# load the job environment:") println(io, ". /u/local/Modules/default/init/modules.sh") println(io, "module load julia/1.2.0") println(io, "module load R/3.5.1") println(io, "module load java/1.8.0_111") println(io) println(io, "# run minimac3 to convert vcf files to m3vcf files") println(io, "minimac3 --refHaps ref_$(data).vcf.gz --processReference --prefix ref_$(data)") println(io) println(io, "echo \"Job \$JOB_ID ended on: \" `hostname -s`") println(io, "echo \"Job \$JOB_ID ended on: \" `date `") println(io) end # submit job run(`qsub minimac3.sh`) rm("minimac3.sh", force=true) sleep(2) # run minimac4 for imputation open("minimac4.sh", "w") do io println(io, "#!/bin/bash") println(io, "#\$ -cwd") println(io, "# error = Merged with joblog") println(io, "#\$ -o joblog.\$JOB_ID") println(io, "#\$ -j y") println(io, "#\$ -l arch=intel-X5650,exclusive,h_rt=24:00:00,h_data=$(memory)G") println(io, "# Email address to notify") println(io, "#\$ -M \$USER@mail") println(io, "# Notify when") println(io, "#\$ -m a") println(io) println(io, "echo \"Job \$JOB_ID started on: \" `hostname -s`") println(io, "echo \"Job \$JOB_ID started on: \" `date `") println(io) println(io, "# load the job environment:") println(io, ". /u/local/Modules/default/init/modules.sh") println(io, "module load julia/1.2.0") println(io, "module load R/3.5.1") println(io, "module load java/1.8.0_111") println(io) println(io, "# run minimac 4 for imputation") println(io, "minimac4 --refHaps ref_$(data).m3vcf --haps tgt_masked_phased_$(data).vcf.gz.vcf.gz --prefix minimac_imputed_$(data) --format GT") println(io) println(io, "echo \"Job \$JOB_ID ended on: \" `hostname -s`") println(io, "echo \"Job \$JOB_ID ended on: \" `date `") println(io) end # submit job run(`qsub minimac4.sh`) rm("minimac4.sh", force=true) sleep(2) end
simulation/compare_sys.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from sklearn import datasets from sklearn.naive_bayes import GaussianNB, MultinomialNB import pandas as pd import numpy as np from sklearn import preprocessing from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score cancer = datasets.load_breast_cancer() x = cancer.data y = cancer.target x[:2] # + x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2) scaler = preprocessing.StandardScaler().fit(x_train) x_train = scaler.transform(x_train) model = GaussianNB() model.fit(x_train, y_train) x_test = scaler.transform(x_test) y_pred = model.predict(x_test) accuracy = accuracy_score(y_test, y_pred) num_correct_samples = accuracy_score(y_test, y_pred, normalize=False) print('number of correct sample: {}'.format(num_correct_samples)) print('accuracy: {}'.format(accuracy))
.ipynb_checkpoints/mod12-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: '''Python Interactive''' # language: python # name: 0273d3a9-be6d-4326-9d0d-fafd7eacc490 # --- # # Monte Carlo Methods # Import relevant libraries import numpy as np import matplotlib.pyplot as plt from scipy.linalg import sqrtm # Initialize variables; these are the settings for the simulation mu1 = 0.1 mu2 = 0.2 sg1 = 0.05 sg2 = 0.1 dt = 1 / 252 T = 1 L = int(T / dt) rho = 0.5 S0 = 1 # Create the Monte Carlo Simulation. Two random walks are created, both representing stock price paths. # + # %matplotlib notebook plt.figure() S1 = [S0] S2 = [S0] eps1 = np.random.normal(size=(L)) e12 = np.random.normal(size=(L)) eps2 = rho * eps1 + np.sqrt(1 - rho ** 2) * e12 for i in range(1, L): S1.append(S1[-1] * np.exp((mu1 - 0.5 * sg1 ** 2) * dt + sg1 * eps1[i] * np.sqrt(dt))) S2.append(S2[-1] * np.exp((mu2 - 0.5 * sg2 ** 2) * dt + sg2 * eps2[i] * np.sqrt(dt))) plt.plot(S1) plt.plot(S2) plt.show() # - R = np.array([[1, 0.4, 0.4], [0.4, 1, 0.2], [-0.4, 0.2, 1]]) X = sqrtm(R) @ np.random.normal(size=(3, int(1e5))) phi = np.corrcoef(X) # ## Antitheic MC simulation, basic idea # # The variance (STD) of the antitheic MC is less than half of the basic MC. f = lambda x: np.exp(x) x = np.random.normal(size=(100000)) np.mean(f(x)), np.std(f(x)), np.mean((f(x) + f(-x)) / 2), np.std((f(x) + f(-x)) / 2)
Python/monte_carlo/Monte Carlo Simulations of Multiple Correlated Stocks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="Q5aSQdvJ4C4Y" # # <NAME> # + [markdown] editable=true id="hyUP-UXo4C4Z" # Hasta ahora, nos hemos centrado principalmente en datos unidimensionales y bidimensionales, almacenados en objetos Pandas `` Series `` y `` DataFrame ``, respectivamente. # # A menudo, es útil ir más allá y almacenar datos de mayor dimensión, es decir, datos indexados por más de una o dos claves. # # Si bien Pandas proporciona objetos `` Panel `` y `` Panel4D `` (que mencionaremos más adelante) que manejan de forma nativa datos tridimensionales y tetradimensionales, un método mucho más común en la práctica es hacer uso de la *indexación jerárquica* (también conocida como *indexación múltiple*) para incorporar múltiples índices (o niveles) dentro de un solo índice. # # De esta manera, los datos de mayor dimensión se pueden representar de forma compacta dentro de los objetos familiares unidimensionales `` Series `` y bidimensionales `` DataFrame ``. # # En esta sección, exploraremos la creación directa de objetos `` MultiIndex ``, consideraciones al indexar, dividir y calcular estadísticas a través de datos indexados múltiples, y rutinas útiles para realizar conversiones entre representaciones simples e indexadas jerárquicamente. # # Comenzamos con las importaciones estándar: # + editable=true id="iK4FG9Zo4C4a" import pandas as pd import numpy as np # + [markdown] editable=true id="iDvudf7_4C4d" # ## Series con índices múltiples # # Comencemos por considerar cómo podríamos representar datos bidimensionales dentro de una `` Serie `` unidimensional. # Para mayor concreción, consideraremos una serie de datos donde cada punto tiene un carácter y una clave numérica. # + [markdown] editable=true id="mKbJFTwe4C4e" # ### El camino que deberías evitar # # Supongamos que nos gustaría rastrear datos sobre los estados de USA para dos años diferentes. # Con las herramientas de Pandas que ya hemos cubierto, es posible que tengamos la tentación de usar simplemente tuplas de Python como claves: # + editable=true id="qW5y5hdD4C4e" jupyter={"outputs_hidden": false} outputId="8b304f78-9ffe-428c-96bb-a018d6c8d0e3" index = [('California', 2000), ('California', 2010), ('New York', 2000), ('New York', 2010), ('Texas', 2000), ('Texas', 2010)] populations = [33871648, 37253956, 18976457, 19378102, 20851820, 25145561] pop = pd.Series(populations, index=index) pop # + [markdown] editable=true id="1p-gv66R4C4i" # Con este esquema de indexación, podemos indexar o dividir directamente la serie en función de este índice múltiple: # + editable=true id="nJBmNB8R4C4i" jupyter={"outputs_hidden": false} outputId="add786c1-4473-432a-b749-e4613ba4a948" pop[('California', 2010):('Texas', 2000)] # + [markdown] editable=true id="PC0T6Yu_4C4k" # Pero la conveniencia termina ahí. Por ejemplo, si necesitásemos seleccionar todos los valores de 2010, necesitaríamos hacer algunas tareas desordenadas (y lentas) para conseguirlo: # + editable=true id="YtzVNVTC4C4l" jupyter={"outputs_hidden": false} outputId="9c206ce5-4e0c-41ca-d3d9-fc9205a5962a" pop[[i for i in pop.index if i[1] == 2010]] # + [markdown] editable=true id="mC25gfZl4C4n" # Con esto conseguimos el resultado deseado, pero no es tan limpio (o tan eficiente para grandes conjuntos de datos) como la sintaxis de segmentación que nos proporciona Pandas. # + [markdown] editable=true id="c4A7XVpc4C4o" # ### Un camino mejor: Pandas MultiIndex # # Afortunadamente, Pandas ofrece una manera mucho mejor, que será más eficiente y nos evitará posibles quebraderos de cabeza. # # Nuestra indexación basada en tuplas es esencialmente un índice múltiple rudimentario, y el tipo Pandas `` MultiIndex `` nos ofrece el tipo de operaciones que deseamos tener. # # Podemos crear un MultiIndex a partir de las tuplas de la siguiente manera: # + editable=true id="3dvz98ZI4C4o" jupyter={"outputs_hidden": false} outputId="f941d9c4-3eac-47c4-eb95-e502cae604f2" index = pd.MultiIndex.from_tuples(index) index # + [markdown] editable=true id="fkqqKEJc4C4r" # Fíjate que el `` MultiIndex `` contiene múltiples niveles de indexación. En este caso, los nombres de los estados y los años. Pero el nivel no termina ahí, sino que podríamos enlazar más niveles. # # Si volvemos a indexar nuestra serie con este `` MultiIndex ``, podremos ver la representación jerárquica de los datos: # + editable=true id="FFz0H-CS4C4r" jupyter={"outputs_hidden": false} outputId="5f96d61a-9503-4a80-e94c-70deca6a3b9d" pop = pop.reindex(index) pop # + [markdown] editable=true id="5HM_OgOe4C4u" # Aquí, las dos primeras columnas de la representación de la `` Serie `` muestran los valores del MultiIndex, mientras que la tercera columna muestra los datos. # Fíjate que faltan algunas entradas en la primera columna: en esta representación de índices múltiples, cualquier entrada en blanco indica el mismo valor que la línea que está encima. # + [markdown] editable=true id="JupNsBd24C4u" # Ahora, para acceder a todos los datos para los que el segundo índice es 2010, simplemente podemos usar la indexación de Pandas que hemos viso hasta ahora: # + editable=true id="CtvYBxkG4C4v" jupyter={"outputs_hidden": false} outputId="de061ec4-092a-4bfd-f171-b6e0f8528519" pop[:, 2010] # + [markdown] editable=true id="ysxQV0h74C4x" # El resultado es una matriz indexada individualmente con solo las claves que nos interesan. # # Esta sintaxis es mucho más apropiada (además de eficiente) que la solución de indexación múltiple basada en tuplas caseras que hemos visto antes. # # A continuación, analizaremos más a fondo este tipo de operación de indexación en datos indexados jerárquicamente. # + [markdown] editable=true id="ZLg3mxg94C4x" # ### MultiIndex como dimensión extra # # Si pensamos un poco, podríamos haber almacenado fácilmente los mismos datos utilizando un simple `` DataFrame `` con sus etiquetas de índice y columna. # # De hecho, Pandas se construye con esta equivalencia en mente. El método `unstack ()` convertirá rápidamente una `Serie` indexada de forma múltiple en un ``DataFrame`` indexado convencionalmente: # + editable=true id="SQiwqeuA4C4y" jupyter={"outputs_hidden": false} outputId="e000ac83-965e-4a01-d8fd-865b7a375634" pop_df = pop.unstack() pop_df # + [markdown] editable=true id="3zpwSh4i4C40" # De forma análoga, el método ``stack()`` hace exactamente lo contrario: # + editable=true id="xS9vIP1g4C40" jupyter={"outputs_hidden": false} outputId="9bc147fd-8e27-4be6-e562-8be9a54da249" pop_df.stack() # + [markdown] editable=true id="EF7B7Xlu4C43" # Al ver esto, es posible preguntarse por qué utilizar la indexación jerárquica. # # La razón es simple: así como pudimos usar la indexación múltiple para representar datos bidimensionales dentro de una `` Serie `` unidimensional, también podemos usarla para representar datos de tres o más dimensiones en una `` Serie `` o `` DataFrame ``. # # Cada nivel adicional en un índice múltiple representa una dimensión adicional de datos; aprovechar esta propiedad nos da mucha más flexibilidad en los tipos de datos que podemos representar. Concretamente, podríamos querer agregar otra columna de datos demográficos para cada estado de cada año (por ejemplo, población menor de 18 años). Con un `` MultiIndex `` esto es tan sencillo como agregar otra columna al `` DataFrame ``: # + editable=true id="EMahoI114C43" jupyter={"outputs_hidden": false} outputId="d9b9ffe4-5f9f-4b99-f49d-c4ffcc7e5f4b" pop_df = pd.DataFrame({'total': pop, 'under18': [9267089, 9284094, 4687374, 4318033, 5906301, 6879014]}) pop_df # + [markdown] editable=true id="z60a1aEc4C45" # Además, todas las ufuncs que hemos visto, así como otras funcionalidades comentadas, también funcionan con los índices jerárquicos. # # A continuación, calculamos la fracción de personas menores de 18 años por año, dados los datos anteriores: # + editable=true id="zJyAJoUP4C46" jupyter={"outputs_hidden": false} outputId="00d81b10-f9ce-4154-f1b3-2552c7844fe3" f_u18 = pop_df['under18'] / pop_df['total'] f_u18.unstack() # + [markdown] editable=true id="cnxvoAwU4C48" # Esto nos permite manipular y realizar una exploración de manera rápida y fácil, incluso con datos de alta dimensión. # + [markdown] editable=true id="NMFLAhz64C48" # ## Métodos de creación con MultiIndex # # La forma más sencilla de construir una `` Serie `` o `` DataFrame `` indexados de forma múltiple es simplemente pasar una lista de dos o más matrices de índices al constructor. Por ejemplo: # + editable=true id="MPpeGESV4C48" jupyter={"outputs_hidden": false} outputId="272b5222-f627-441f-95a3-abbabf31ccd9" df = pd.DataFrame(np.random.rand(4, 2), index=[['a', 'a', 'b', 'b'], [1, 2, 1, 2]], columns=['data1', 'data2']) df # + [markdown] editable=true id="5rh7RwYH4C4-" # El trabajo de crear el `` MultiIndex `` se realiza en segundo plano. # # De manera similar, si pasamos un diccionario con las tuplas apropiadas como claves, Pandas lo reconocerá automáticamente y usará un `` MultiIndex `` por defecto: # + editable=true id="wb1-jroZ4C4_" jupyter={"outputs_hidden": false} outputId="75383707-743d-49d2-9397-f85c465dc3f7" data = {('California', 2000): 33871648, ('California', 2010): 37253956, ('Texas', 2000): 20851820, ('Texas', 2010): 25145561, ('New York', 2000): 18976457, ('New York', 2010): 19378102} pd.Series(data) # + [markdown] editable=true id="9o8f8pLv4C5E" # Sin embargo, a veces es útil crear explícitamente un `` MultiIndex ``. Veamos un par de estos métodos: # + [markdown] editable=true id="ugSyyTVA4C5E" # ### Constructores explícitos MultiIndex # # Para mayor flexibilidad en la forma en que se construye el índice, en su lugar puede utilizar los constructores de métodos de clase disponibles en el objeto `` pd.MultiIndex ``. # # Por ejemplo, como hemos hecho antes, podríamos construir el `` MultiIndex `` a partir de una lista simple de matrices que dan los valores de índice dentro de cada nivel: # + editable=true id="xDr_swbL4C5F" jupyter={"outputs_hidden": false} outputId="381cdef7-c0b2-4af6-aa72-0a20ec2c25e1" pd.MultiIndex.from_arrays([['a', 'a', 'b', 'b'], [1, 2, 1, 2]]) # + [markdown] editable=true id="Gjoh_w5z4C5H" # Podemos construirlo a partir de una lista de tuplas, especificando cada posible valor de la tupla para cada punto: # + editable=true id="qUBCiGXw4C5I" jupyter={"outputs_hidden": false} outputId="ac33a470-c3d0-4e7f-a203-3bf4b4f14722" pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1), ('b', 2)]) # + [markdown] editable=true id="dwExBtYe4C5K" # Podríamos incluso construirlo a partir del producto cartesiano de los índices únicos: # + editable=true id="r2aeDnv24C5K" jupyter={"outputs_hidden": false} outputId="fb650f91-9197-4147-9e93-2175d2a28ec9" pd.MultiIndex.from_product([['a', 'b'], [1, 2]]) # + [markdown] editable=true id="1NRCYVho4C5M" # De manera similar, podríamos construir un ``MultiIndex`` directamente usando su codificación interna pasándole los ``levels`` (una lista de listas que contienen valores de índice disponibles para cada nivel) y `` codes `` (una lista de listas que hacen referencia a estas etiquetas): # + editable=true id="sud6ly8U4C5M" jupyter={"outputs_hidden": false} outputId="9c6126dc-0c85-4d7b-cbdf-a9e7c739ce6e" pd.MultiIndex(levels=[['a', 'b'], [1, 2]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) # + [markdown] editable=true id="ll90pPKl4C5O" # Cualquiera de estos objetos se puede pasar como el argumento "índice" al crear una ``Serie`` o un ``DataFrame``, o se puede pasar al método `` reindex `` de una `` Serie`` o `` DataFrame ``. # + [markdown] editable=true id="5TGFpClg4C5O" # ### MultiIndex: nombrando los niveles # # A veces, es conveniente nombrar los niveles del `` MultiIndex ``. # # Esto se puede lograr pasando el argumento `` names `` a cualquiera de los constructores `` MultiIndex `` anteriores, o configurando el atributo `` names `` del índice después de haberlo contruido: # + editable=true id="3fcikBaN4C5P" jupyter={"outputs_hidden": false} outputId="f607d7e4-eadd-4c20-f3fe-41a76b566f68" pop.index.names = ['state', 'year'] pop # + [markdown] editable=true id="MpRRmtTE4C5Q" # Con conjuntos de datos más complicados, esta puede ser una forma útil de realizar un seguimiento del significado de varios valores de índice. # + [markdown] editable=true id="R6bZC5cj4C5R" # ### MultiIndex para columnas # # En un ``DataFrame``, las filas y columnas son completamente simétricas, y así como las filas pueden tener múltiples niveles de índices, las columnas también pueden tener múltiples niveles. # # Creemos una maqueta de algunos datos médicos: # + editable=true id="phtS-Wvb4C5R" jupyter={"outputs_hidden": false} outputId="3f5ac3dd-763c-48b3-e872-e8a7eacae309" # hierarchical indices and columns index = pd.MultiIndex.from_product([[2013, 2014], [1, 2]], names=['year', 'visit']) columns = pd.MultiIndex.from_product([['Bob', 'Guido', 'Sue'], ['HR', 'Temp']], names=['subject', 'type']) # mock some data data = np.round(np.random.randn(4, 6), 1) data[:, ::2] *= 10 data += 37 # create the DataFrame health_data = pd.DataFrame(data, index=index, columns=columns) health_data # + [markdown] editable=true id="OjkcbQrR4C5T" # Aquí vemos dónde la indexación múltiple para filas y columnas puede ser especialmente útil. # # Se trata fundamentalmente de datos de cuatro dimensiones, donde las dimensiones son el sujeto (subject), el tipo de medida (type), el año (year) y el número de visita (visit). # # Con esto en su lugar, podemos, por ejemplo, indexar la columna de nivel superior por el nombre de la persona y obtener un `` DataFrame `` completo que contenga solo la información de esa persona: # + editable=true id="PMXmcANH4C5T" jupyter={"outputs_hidden": false} outputId="3f320e2e-9961-4c79-860a-7e8d18e71444" health_data['Guido'] # + [markdown] editable=true id="xgaC9CWC4C5V" # Para registros complicados que contengan múltiples mediciones etiquetadas en múltiples momentos para muchos sujetos (personas, países, ciudades, etc.), el uso de filas y columnas jerárquicas puede ser extremadamente conveniente. # + [markdown] editable=true id="vWCdaI004C5V" # ## Indexing y Slicing en MultiIndex # # La indexación y la división en un `` MultiIndex `` está diseñada para ser intuitiva, pensando en los índices como dimensiones adicionales. # # Primero, veremos la indexación de `` Series `` indexadas de forma múltiple, y luego los `` DataFrame `` indexados de forma múltiple. # + [markdown] editable=true id="IVHjt-pM4C5W" # ### Seeries indexadas de manera múltiple # # Consideremos la `` Serie `` de poblaciones estatales con índices múltiples que vimos anteriormente: # + editable=true id="zF5LHkGw4C5W" jupyter={"outputs_hidden": false} outputId="38f6d65f-eb3b-4e31-8497-9dcca9998569" pop # + [markdown] editable=true id="Qb3a-PHA4C5Y" # Podemos acceder a elementos individuales indexando con varios términos: # + editable=true id="cuyBixt74C5Y" jupyter={"outputs_hidden": false} outputId="33548699-219e-4d6b-b2fa-9fe66dbb8bf6" pop['California', 2000] # + [markdown] editable=true id="bUC_hzzq4C5Z" # El `` MultiIndex `` también admite indexación parcial, o indexar solo uno de los niveles del índice. # # El resultado es otra `` Serie `` manteniendo los índices de nivel inferior: # + editable=true id="mGeP3ElL4C5a" jupyter={"outputs_hidden": false} outputId="5d042b80-2e8c-4446-8cc7-70186f8d3571" pop['California'] # + [markdown] editable=true id="RM5E-0oZ4C5c" # También se puede usar el slicing parcial, siempre que el `` Índice múltiple `` esté ordenado: # + editable=true id="V4Qatdkm4C5e" jupyter={"outputs_hidden": false} outputId="90135a07-cf56-4e4d-cdd5-0ccb67bb3637" pop.loc['California':'New York'] # + [markdown] editable=true id="EzF6CkJQ4C5f" # Con índices ordenados, la indexación parcial se puede realizar en niveles inferiores pasando un segmento vacío en el primer índice: # + editable=true id="uqgtH4sk4C5g" jupyter={"outputs_hidden": false} outputId="b1785369-98ec-4fe5-d5b1-f1a25641ae18" pop[:, 2000] # + [markdown] editable=true id="CZqmTHU34C5i" # También funcionan otros tipos de indexación y selección discutidos anteriomente; por ejemplo, selección basada en máscaras booleanas: # + editable=true id="yV0beOx54C5i" jupyter={"outputs_hidden": false} outputId="d406550d-1175-4bc5-9814-134dc9e12484" pop[pop > 22000000] # + [markdown] editable=true id="3_Xk-Wsz4C5k" # Y también podemos seleccionar una lista de valores del índice: # + editable=true id="V8pgGvNU4C5k" jupyter={"outputs_hidden": false} outputId="d73fef3a-d1da-4d51-e15a-621c4f966f97" pop[['California', 'Texas']] # + [markdown] editable=true id="w057ig5h4C5l" # ### DataFrames con índices múltiples # # # Un `` DataFrame `` indexado de forma múltiple se comporta de manera similar. Considerando el ejemplo médico de antes: # + editable=true id="Ue4NSn2-4C5m" jupyter={"outputs_hidden": false} outputId="264c8ae5-cbe8-41ad-9f15-9f23a5ac770b" health_data # + [markdown] editable=true id="DnVCpTHH4C5n" # Recuerda que las columnas son prioritarias en un `` DataFrame ``, y la sintaxis utilizada para las `` Series `` con índices múltiples se aplica a las columnas. # # Por ejemplo, podemos recuperar los datos de frecuencia cardíaca de Guido con una simple operación: # + editable=true id="kNWZA0sU4C5o" jupyter={"outputs_hidden": false} outputId="a22d8538-5917-4c3e-a5d9-2171ae733713" health_data['Guido', 'HR'] # + [markdown] editable=true id="G4T5cDsH4C5q" # Del mismo modo, al igual que con el caso de índice único, podemos usar los indexadores `` loc``, ``iloc`` e ``ix``. Por ejemplo: # + editable=true id="ECuZIHTO4C5q" jupyter={"outputs_hidden": false} outputId="1de96802-6741-4c1d-b436-1c8180758a14" health_data.iloc[:2, :2] # + [markdown] editable=true id="PdO0CkUA4C5s" # Estos indexadores proporcionan una vista similar a una matriz de los datos bidimensionales subyacentes, pero a cada índice individual en "loc" o "iloc" se le puede pasar una tupla de múltiples índices. Por ejemplo: # + editable=true id="jKMxaLn14C5s" jupyter={"outputs_hidden": false} outputId="392c96d5-ab83-4f8b-ec46-765c565a2e08" health_data.loc[:, ('Bob', 'HR')] # + [markdown] editable=true id="CbFUlYCd4C5t" # Trabajar con slices dentro de estas tuplas de índice no es especialmente conveniente; intentar crear un slice dentro de una tupla dará lugar a un error de sintaxis: # + editable=true id="EFxlKApB4C5u" jupyter={"outputs_hidden": false} outputId="a468c8ad-7c7c-4ac5-f758-72fdc2198705" health_data.loc[(:, 1), (:, 'HR')] # + [markdown] editable=true id="2M4RGcKY4C5w" # Podemos evitar esto construyendo explícitamente el slice deseado usando la función incorporada `` slice () `` de Python, pero una mejor manera en este contexto es usar un objeto ``IndexSlice``, que Pandas proporciona precisamente para esta situación. # # Por ejemplo: # + editable=true id="6oRFivo84C5x" jupyter={"outputs_hidden": false} outputId="c0c43380-a27f-4911-943f-6fff206d7fb3" idx = pd.IndexSlice health_data.loc[idx[:, 1], idx[:, 'HR']] # + [markdown] editable=true id="6p55X40m4C5y" # Hay muchas formas de interactuar con los datos en `` Series `` y `` DataFrame `` indexados de forma múltiple, y como sucede con muchas de las herramientas que hemos visto, la mejor manera de familiarizarse con ellos es probarlas. # + [markdown] editable=true id="nrp4bkLe4C5z" # ## Reorganización de MultiIndex # # Una de las claves para trabajar con datos indexados de forma múltiple es saber cómo transformar los datos de forma eficaz. # # Hay una serie de operaciones que conservarán toda la información en el conjunto de datos, pero que nos permitirán reorganizar los datos para facilitar los cálculos. # # Anteriormente, hemos visto un breve ejemplo de esto en los métodos `` stack () `` y ``unstack ()``, pero hay muchas más formas de controlar con precisión la reordenación de datos entre índices jerárquicos y columnas: # + [markdown] editable=true id="Cg3wuLRK4C50" # ### Índices ordenados y no ordenados # # Anteriormente, hemos comentado que muchas de las operaciones de slicing de `` MultiIndex `` fallarán si el índice no está ordenado, lo cual desarrollaremos a continuación. # # Comenzaremos creando algunos datos con índices múltiples simples donde los índices no están ordenados lexográficamente: # + editable=true id="bTBi2D4e4C50" jupyter={"outputs_hidden": false} outputId="b244a771-1840-4f30-f7f1-4840212bf439" index = pd.MultiIndex.from_product([['a', 'c', 'b'], [1, 2]]) data = pd.Series(np.random.rand(6), index=index) data.index.names = ['char', 'int'] data # + [markdown] editable=true id="hYbAXoi24C52" # Si intentamos tomar un slice parcial de este índice, saltará un error: # + editable=true id="UBwKdZHA4C52" jupyter={"outputs_hidden": false} outputId="dbdcb02d-52a0-427e-e46e-250787c62ee5" try: data['a':'b'] except KeyError as e: print(type(e)) print(e) # + [markdown] editable=true id="HCXgh7sj4C54" # Aunque no está del todo claro en el mensaje de error, este es el resultado de que el MultiIndex no está ordenado. # # Por diversas razones, los slices parciales y otras operaciones similares requieren que los niveles del `` MultiIndex `` estén ordenados (de forma lexográfica). # # Pandas proporciona una serie de rutinas de conveniencia para realizar este tipo de clasificación; como son los métodos `` sort_index () `` y `` sortlevel () `` del `` DataFrame ``. # # En este caso, usaremos el más simple, `` sort_index () ``: # + editable=true id="O45nUK-74C54" jupyter={"outputs_hidden": false} outputId="84399026-752e-4a5f-e051-2486041a4cea" data = data.sort_index() data # + [markdown] editable=true id="QyFfNdUi4C56" # Con el índice ordenado, ya podemos utilizar el slicing: # + editable=true id="2oUJdjcZ4C56" jupyter={"outputs_hidden": false} outputId="b36cd46f-b425-49ac-bb39-6c5cfaf36720" data['a':'b'] # + [markdown] editable=true id="JYuJwHbT4C58" # ### Stacking y unstacking de índices # # Como vimos brevemente antes, es posible convertir un conjunto de datos de un índice múltiple apilado (stacked) a una representación bidimensional simple, especificando opcionalmente el nivel a usar: # + editable=true id="-k5cN7tA4C58" jupyter={"outputs_hidden": false} outputId="8a3d7bfd-7ce9-4664-f788-e0c5cb66b3d4" pop.unstack(level=0) # + editable=true id="zuEF8xEC4C59" jupyter={"outputs_hidden": false} outputId="9b25c814-41a7-42b3-d421-b7cb46d21c8b" pop.unstack(level=1) # + [markdown] editable=true id="ZE4QPzzu4C5-" # El opuesto a ``unstack()`` es ``stack()``, el cual aquí puede ser usado para recuperar el DataFrame original: # + editable=true id="FVOF36iJ4C5_" jupyter={"outputs_hidden": false} outputId="7a723083-cc24-48ac-c207-2cb0f7a29b1d" pop.unstack().stack() # + [markdown] editable=true id="6HA_gZ-F4C6B" # ### Especificación y reseteo de Index # # Otra forma de reorganizar los datos jerárquicos es convertir las etiquetas de índice en columnas; esto se puede lograr con el método `` reset_index ``. # Llamar a esto en el diccionario de ``population`` dará como resultado un `` DataFrame `` con una columna de ``state`` y ``year`` que contiene la información que anteriormente estaba en el índice. # # Para mayor claridad, opcionalmente podemos especificar el nombre de los datos para la representación de la columna: # + editable=true id="sikkTaw34C6B" jupyter={"outputs_hidden": false} outputId="c8a0686c-97ac-4c48-987a-1bcbe92e4f34" pop_flat = pop.reset_index(name='population') pop_flat # + [markdown] editable=true id="gBJjuQk_4C6C" # A menudo, cuando se trabaja con datos del mundo real, los datos de entrada sin procesar son de este estilo y es útil crear un `` MultiIndex `` a partir de los valores de las columnas. # # Esto se puede hacer con el método `` set_index `` del `` DataFrame ``, que devuelve un `` DataFrame `` con un índice múltiple: # + editable=true id="rmne2hV34C6C" jupyter={"outputs_hidden": false} outputId="afed0f08-d28f-433c-872a-70dd46f726ae" pop_flat.set_index(['state', 'year']) # + [markdown] editable=true id="GJaLjWwY4C6F" # En la práctica, veremos que este tipo de reindexación es uno de los patrones más útiles al encontrar conjuntos de datos del mundo real. # + [markdown] editable=true id="hvOjTWVI4C6G" # ## Agregaciones de datos con MultiIndex # # Anteriormente, hemos visto que Pandas tiene métodos de agregación de datos integrados, como `` mean () ``, `` sum () `` y `` max () ``. # # Para los datos indexados jerárquicamente, estos pueden pasar a un parámetro de `` level `` que controla en qué subconjunto de los datos se calcula el agregado. # # Por ejemplo, volvamos a nuestros datos de salud: # + editable=true id="LwdddOkc4C6G" jupyter={"outputs_hidden": false} outputId="38cff0d1-9c00-4b8c-938d-6bd04712d523" health_data # + [markdown] editable=true id="YHzsstZY4C6H" # Puede que nos apetezca promediar las mediciones en las dos visitas de cada año. Podemos hacer esto nombrando el nivel de índice que nos gustaría explorar, en este caso el año: # + editable=true id="i2L78L8b4C6I" jupyter={"outputs_hidden": false} outputId="45e1aa5b-3ead-443c-8065-12db87ff0c46" data_mean = health_data.mean(level='year') data_mean # + [markdown] editable=true id="gRL-cKNt4C6K" # Haciendo uso adicional de la palabra clave ``axis``, también podemos tomar la media entre los niveles en las columnas: # + editable=true id="Bs2WitfR4C6L" jupyter={"outputs_hidden": false} outputId="818a3c14-1532-4fde-e7e9-e4f8ee6b4570" data_mean.mean(axis=1, level='type') # + [markdown] editable=true id="4Gw2OwPg4C6M" # Así, en solamente dos líneas, hemos podido encontrar la frecuencia cardíaca (HR) y la temperatura (Temp) promedio medidas entre todos los sujetos en todas las visitas de cada año. # Esta sintaxis es en realidad un atajo a la funcionalidad `` GroupBy ``, que ya hemos visto. # # Si bien este es un ejemplo de prueba, muchos conjuntos de datos del mundo real tienen una estructura jerárquica similar. # + [markdown] editable=true id="5NT9FKwa4C6N" # ## EXTRA: Panel Data # # Pandas tiene algunas otras estructuras de datos fundamentales que aún no hemos discutido, como son los objetos `` pd.Panel `` y `` pd.Panel4D ``. # # Estos se pueden considerar, respectivamente, como generalizaciones tridimensionales y tetradimensionales de las estructuras (unidimensionales) de `` Series `` y (bidimensionales) de `` DataFrame ``. # Una vez familiarizados con la indexación y manipulación de datos en una `` Serie `` o `` DataFrame``, el uso de `` Panel `` y `` Panel4D `` es relativamente sencillo. En particular, los indexadores "ix", "loc" y "iloc" discutidos en su día se extienden fácilmente a estos estructuras dimensionales. # # # No vamos a ver estos tipos en este bootcamp, ya que en la mayoría de los casos la indexación múltiple es una representación más útil y conceptualmente más simple para trabajar con los datos que utilizar dimensiones superiores. # Además, los datos de panel son fundamentalmente una representación de datos densa, mientras que la indexación múltiple es fundamentalmente una representación de datos dispersos. # A medida que aumenta el número de dimensiones, la representación densa puede volverse muy ineficiente para la mayoría de los conjuntos de datos del mundo real. # Sin embargo, para la aplicación especializada ocasional, estas estructuras pueden ser útiles. # - # ## Para probar # # Esta vez no vamos a realizar un ejercicio como tal, vamos a dejar volar nuestras dotes de analista. Coge cualquier dataset que hayamos estudiado en clase y crea diferentes tablas en base a las combinaciones de índices que más te interesen para obtener relaciones de datos interesantes.
semana_9/dia_1/1_Índice_Jerárquico.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # some useful modules import numpy as np import matplotlib.pyplot as plt import subprocess import matplotlib.animation as animation from matplotlib.ticker import LogLocator import matplotlib.ticker as ticker from matplotlib.pyplot import cm import re # our custom specdist module where all our SD computations are done import specdist as sd # let's make a directory wher we save our figures today: path_to_figures = sd.path_to_sd_projects + "figures/" subprocess.call(['mkdir','-p',path_to_figures]) print("figure directory: %s"%path_to_figures) # some labels for the figures label_intensity = "Intensity, "+r'$\Delta I_x \quad [\mathrm{Jy/sr}]$' label_scaled_frequency = "Scaled Frequency, "+ r'$x=h\nu/k_{_\mathrm{B}}T_{_\mathrm{CMB}}$' label_y_firas = r'$y_\mathrm{COBE}=1.5\times 10^{-5}$' label_mu_firas = r'$\mu_\mathrm{COBE}=9\times 10^{-5}$' label_redshift = 'Redshift, '+r'$z$' label_xe = 'Free Electron Fraction, '+r'$X_\mathrm{e}$' # some conversion factors xinj_to_mass = 4.698e-4 #m in ev = 4.698e-4 xinj xinj_to_Einj = 4.698e-4/2. #Einj = 4.698e-4/2 xinj mass_to_Einj = 1./2. # - # # Plot standard mu and y distortions # get the distortions from sd module: Tcmb = 2.726 x_array = np.logspace(-8,2,1000) firas = sd.firas() y_dist = sd.GetYSpecDistAtTandX(firas.firas_y_1996_95_cl,Tcmb,x_array)*1e6 mu_dist = sd.GetMuSpecDistAtTandX_chluba(firas.firas_mu_1996_95_cl,Tcmb,x_array)*1e6 # + # plot the distortions: fig, ax = plt.subplots(1,1,figsize=(7,5)) label_size = 15 title_size = 15 legend_size = 25 handle_length = 1.5 x_min = 1e-3 x_max = 1e2 ymin = 1e-10*1e6 ymax = 1e-1*1e6 ax.set_xlim(x_min,x_max) ax.set_ylim(ymin,ymax) ax.set_xscale('log') ax.set_yscale('log') ax.set_ylabel(label_intensity,size=title_size) ax.set_xlabel(label_scaled_frequency,size=title_size) ax.yaxis.set_label_coords(-.12,0.5) ax.plot(x_array,y_dist,c='r',alpha = 1.,label=label_y_firas) ax.plot(x_array,-y_dist,c='r',alpha = 1.,ls='--') ax.plot(x_array,mu_dist,c='magenta',alpha = 1.,label=label_mu_firas) ax.plot(x_array,-mu_dist,c='magenta',alpha = 1.,ls='--') ax.yaxis.set_major_locator(LogLocator(numticks=15)) ax.yaxis.set_minor_locator(LogLocator(numticks=15,subs=np.arange(2,10))) for label in ax.yaxis.get_ticklabels()[::2]: label.set_visible(False) #(3) ax.yaxis.set_tick_params(right='on',which='minor',direction='in',length=5) ax.xaxis.set_tick_params(top='on',which='minor',direction='in',length=5) ax.yaxis.set_tick_params(right='on',which='major',direction='in',length=9,labelsize=label_size,pad=10) ax.xaxis.set_tick_params(top='on',which='major',direction='in',length=9,labelsize=label_size,pad=10) ax.xaxis.set_major_locator(LogLocator(numticks=15)) ax.xaxis.set_minor_locator(LogLocator(numticks=15,subs=np.arange(2,10))) # for label in ax.xaxis.get_ticklabels()[::2]: # label.set_visible(False) #(3) ax.grid( b=True, which="both", alpha=0.3, linestyle='--') ax.legend(loc=2,ncol=1,frameon=True,framealpha=0.6,fontsize = 12) fig.tight_layout() plt.savefig(path_to_figures + '/standard_mu_and_y_dist.pdf') plt.show() plt.close(fig) # - # # Run CosmoTherm in parallel and plot results # + # setting up the parameters: ct = sd.cosmotherm() ct.ct_Gamma_dec = 1e-14 ct.ct_Drho_rho_dec = 3.e-5 ct.ct_pi_energy_norm = 2 ct.ct_include_pi = 1 ct.ct_evolve_Xe = 1 ct.ct_zend = 1e-2 ct.ct_zlate = 1e3 ct.ct_reionisation_model = 0 ct.ct_include_collisions = 0 ct.ct_emission_absorption_mode = 0 ct.ct_lyc = 1 ct.ct_npts = 3000 ct.ct_T0 = 2.7255 ct.ct_Yp = 0.245407 ct.ct_N_eff = 3.046 ct.ct_Omega_m = 0.312331 ct.ct_Omega_b = 0.0491137 ct.ct_h = 0.675422 ct.save_Xe = 'yes' # defining which parameter values we want to compute: p_name = 'photon injection x_dec' xinjs = [5e-8,5e-5,5e-2,5e1,5e4] p_array = xinjs # the directory name fo the final spectra ct.save_dir_name = 'many_xinj' args = {} args['param_values_array'] = p_array args['param_name'] = p_name args['save_spectra'] = 'yes' # do the run: # sould take a couple of minutes (~5 minutes on my laptop) R = ct.run_cosmotherm_parallel(**args) # + fig, ax = plt.subplots(1,1,figsize=(7,5)) label_size = 13 title_size = 13 legend_size = 25 handle_length = 0.5 ymin=1e-4 ymax = 5e0 x_max = 1e4 x_min = 1 ax.set_xlim(x_min,x_max) ax.set_ylim(ymin,ymax) ax.set_xscale('log') ax.set_yscale('log') ax.set_xlabel(label_redshift,size=title_size) ax.set_ylabel(label_xe,size=title_size) ax.yaxis.set_label_coords(-.1,0.5) ax.yaxis.set_major_locator(LogLocator(numticks=15)) ax.yaxis.set_minor_locator(LogLocator(numticks=15,subs=np.arange(2,10))) for label in ax.yaxis.get_ticklabels()[::2]: label.set_visible(True) ax.yaxis.set_tick_params(right='on',which='minor',direction='in',length=5) ax.xaxis.set_tick_params(top='on',which='minor',direction='in',length=5) ax.yaxis.set_tick_params(right='on',which='major',direction='in',length=9,labelsize=label_size,pad=10) ax.xaxis.set_tick_params(top='on',which='major',direction='in',length=9,labelsize=label_size,pad=10) # manipulate x-axis ticks and labels ax.xaxis.set_major_locator(LogLocator(numticks=15)) ax.xaxis.set_minor_locator(LogLocator(numticks=15,subs=np.arange(2,10))) for label in ax.xaxis.get_ticklabels()[::2]: label.set_visible(True) ax.grid( b=True, which="both", alpha=0.3, linestyle='--') i = 0 color=iter(cm.viridis(np.linspace(1,0,len(xinjs)))) for x in xinjs: ax.plot(R[i]['Xe_redshifts'],R[i]['Xe_values'],label=r'$x_\mathrm{inj,0}$='+sd.scientific_notation(x),c=next(color)) i+=1 ax.legend(loc=2,ncol=1,frameon=True,framealpha=0.6,fontsize = 10) fig.tight_layout() plt.savefig(path_to_figures + '/ct_Xe_many_xinjs.pdf') plt.show() plt.close(fig) # - # we have also access to the Distortions: R[0]['DI'] sd.path_to_ct_spectra_results # + # easiest when we work with figures is to re-access the results # from where the files have been saved: dir_name = 'many_xinj' fname = sd.path_to_ct_spectra_results + '/'+ dir_name + '/spectra_'+dir_name+'_Xe_values_ct.txt' Xe = [] with open(fname) as f: for line in f: x = line.strip() if x: if not x.startswith("#"): l = re.split(r'[\t]',x) Xe.append(l) for i in range(len(Xe)): Xe[i] = [float(zz) for zz in Xe[i]] fname = sd.path_to_ct_spectra_results + '/'+ dir_name + '/spectra_'+dir_name+'_Xe_redshifts_ct.txt' z = [] with open(fname) as f: for line in f: x = line.strip() if x: if not x.startswith("#"): l = re.split(r'[\t]',x) z.append(l) for i in range(len(z)): z[i] = [float(zz) for zz in z[i]] fname =sd.path_to_ct_spectra_results + '/'+ dir_name + '/spectra_'+dir_name+'_xinj_ct.txt' xinj = [] with open(fname) as f: for line in f: x = line.strip() if x: if not x.startswith("#"): l = re.split(r'[\t]',x) xinj.append(l) for i in range(len(z)): xinj[i] = [float(zz) for zz in xinj[i]] fname = sd.path_to_ct_spectra_results + '/'+ dir_name + '/spectra_'+dir_name+'_x_ct.txt' x_ct = [] with open(fname) as f: for line in f: x = line.strip() if x: if not x.startswith("#"): l = re.split(r'[\t]',x) x_ct.append(l) for i in range(len(z)): x_ct[i] = [float(zz) for zz in x_ct[i]] fname = sd.path_to_ct_spectra_results + '/'+ dir_name + '/spectra_'+dir_name+'_DI_ct.txt' DI_ct = [] with open(fname) as f: for line in f: x = line.strip() if x: if not x.startswith("#"): l = re.split(r'[\t]',x) DI_ct.append(l) for i in range(len(z)): DI_ct[i] = [float(zz) for zz in DI_ct[i]] # + #fig, ax_array = plt.subplots(2,3,figsize=(30,20)) fig, (ax1,ax2) = plt.subplots(2,1,figsize=(7,10)) label_size = 13 title_size = 13 legend_size = 25 handle_length = 0.5 ax = ax1 ymin=1e-4 ymax = 5e0 x_max = 1e4 x_min = 1 ax.set_xlim(x_min,x_max) ax.set_ylim(ymin,ymax) ax.set_xscale('log') ax.set_yscale('log') ax.set_xlabel(label_redshift,size=title_size) ax.set_ylabel(label_xe,size=title_size) ax.yaxis.set_label_coords(-.1,0.5) ax.yaxis.set_major_locator(LogLocator(numticks=15)) #(1) ax.yaxis.set_minor_locator(LogLocator(numticks=15,subs=np.arange(2,10))) #(2) for label in ax.yaxis.get_ticklabels()[::2]: label.set_visible(True) #(3) ax.yaxis.set_tick_params(right='on',which='minor',direction='in',length=5) ax.xaxis.set_tick_params(top='on',which='minor',direction='in',length=5) ax.yaxis.set_tick_params(right='on',which='major',direction='in',length=9,labelsize=label_size,pad=10) ax.xaxis.set_tick_params(top='on',which='major',direction='in',length=9,labelsize=label_size,pad=10) # manipulate x-axis ticks and labels ax.xaxis.set_major_locator(LogLocator(numticks=15)) #(1) ax.xaxis.set_minor_locator(LogLocator(numticks=15,subs=np.arange(2,10))) #(2) for label in ax.xaxis.get_ticklabels()[::2]: label.set_visible(True) #(3) ax.grid( b=True, which="both", alpha=0.3, linestyle='--') i = 0 color=iter(cm.viridis(np.linspace(1,0,len(xinj)))) for x in xinj: ax.plot(z[i],Xe[i],label=r'$x_\mathrm{inj,0}$='+sd.scientific_notation(xinj[i][0]),c=next(color)) i+=1 ax.text(0.74, 0.1, r'$(\Delta \rho/\rho)_\mathrm{inj}=3\times10^{-5}$', transform=ax.transAxes, fontsize=9, verticalalignment='top', bbox=dict(boxstyle='round', facecolor='grey', alpha=0.2)) ax.text(0.03, 0.81, r'$\Gamma_X=$'+sd.scientific_notation(ct.ct_Gamma_dec)+r'$\,\mathrm{s}^{-1}$', transform=ax.transAxes, fontsize=10, verticalalignment='top', bbox=dict(boxstyle='round', facecolor='r', alpha=0.2)) ax.legend(loc=(0.3,0.6),ncol=1,frameon=True,framealpha=0.6,fontsize = 10) ax = ax2 ymin=1e-1 ymax = 1e12 x_max = 1e4 x_min = 1e-8 ax.set_xlim(x_min,x_max) ax.set_ylim(ymin,ymax) ax.set_xscale('log') ax.set_yscale('log') ax.set_xlabel(label_scaled_frequency,size=title_size) ax.set_ylabel(label_intensity,size=title_size) ax.yaxis.set_label_coords(-.1,0.5) ax.yaxis.set_major_locator(LogLocator(numticks=15)) #(1) ax.yaxis.set_minor_locator(LogLocator(numticks=15,subs=np.arange(2,10))) #(2) for label in ax.yaxis.get_ticklabels()[::2]: label.set_visible(False) #(3) ax.yaxis.set_tick_params(right='on',which='minor',direction='in',length=5) ax.xaxis.set_tick_params(top='on',which='minor',direction='in',length=5) ax.yaxis.set_tick_params(right='on',which='major',direction='in',length=9,labelsize=label_size,pad=10) ax.xaxis.set_tick_params(top='on',which='major',direction='in',length=9,labelsize=label_size,pad=10) # manipulate x-axis ticks and labels ax.xaxis.set_major_locator(LogLocator(numticks=15)) #(1) ax.xaxis.set_minor_locator(LogLocator(numticks=15,subs=np.arange(2,10))) #(2) for label in ax.xaxis.get_ticklabels()[::2]: label.set_visible(True) #(3) ax.grid( b=True, which="both", alpha=0.3, linestyle='--') i = 0 color=iter(cm.viridis(np.linspace(1,0,len(xinj)))) for x in xinj: col = next(color) ax.plot(x_ct[i],DI_ct[i],label=r'$x_\mathrm{inj,0}$='+sd.scientific_notation(xinj[i][0]),c=col) ax.plot(x_ct[i],-np.asarray(DI_ct[i]),ls='--',c=col) i+=1 ax.text(0.74, 0.55, r'$(\Delta \rho/\rho)_\mathrm{inj}=3\times10^{-5}$', transform=ax.transAxes, fontsize=9, verticalalignment='top', bbox=dict(boxstyle='round', facecolor='grey', alpha=0.2)) ax.text(0.77, 0.44, r'$\Gamma_X=$'+sd.scientific_notation(ct.ct_Gamma_dec)+r'$\,\mathrm{s}^{-1}$', transform=ax.transAxes, fontsize=10, verticalalignment='top', bbox=dict(boxstyle='round', facecolor='r', alpha=0.2)) ax.legend(loc=1,ncol=1,frameon=True,framealpha=0.6,fontsize = 10) fig.tight_layout() plt.savefig(path_to_figures + '/ct_Xe_and_DI_many_xinjs.pdf') plt.show() plt.close(fig) # - # # Run CosmoRec/Recfast++ in parallel and plot results # + # initial setup of recfast rf = sd.recfast() rf.rf_zstart = 5e6 rf.rf_zend = 1e-5 rf.rf_include_correction_function = 1 rf.rf_Reionization_model = 0 rf.rf_T0 = 2.7255 rf.rf_Yp = 0.245407 rf.rf_N_eff = 3.046 rf.rf_Omega_m = 0.312331 rf.rf_Omega_b = 0.0491137 rf.rf_h = 0.675422 args = {} p_name = 'T0' fdm_array = [2.4255,2.5255,2.6255,2.7255,2.8255] p_array = fdm_array args['param_values_array'] = p_array args['param_name'] = p_name args['save_recfast_results'] = 'no' R = rf.run_recfast_parallel(**args) # - # Now R contains everything we need: R[3] # + fig, ax = plt.subplots(1,1,figsize=(7,5)) label_size = 14 title_size = 13 legend_size = 25 handle_length = 1.5 z_asked = np.logspace(np.log10(1e-2),np.log10(60000),5000) colors = iter(['k','b','k','r','r']) alphas = iter([1.,0.5,.4,0.8,1.]) lss = iter(['-','--','-.','-',':',(0, (3, 1, 1, 1, 1, 1))]) labels = iter([r'$T_\mathrm{cmb}$=%.4f'%f for f in fdm_array]) for i in range(len(p_array)): col = next(colors) param =p_array[i] z = R[i]['z'] xe = R[i]['Xe'] ax.plot(z,xe,ls=next(lss),c=col,alpha = next(alphas),label=next(labels))#,c=col,ls=next(lss),alpha = next(alphas),label=next(labels)) x_min = 2. x_max = 1.e6 ymin = 1.e-4 ymax = 2e0 ax.set_xlim(x_min,x_max) ax.set_ylim(ymin,ymax) ax.set_xscale('log') ax.set_yscale('log') ax.set_xlabel(label_redshift,size=title_size) ax.set_ylabel(label_xe,size=title_size) ax.yaxis.set_label_coords(-.1,0.5) ax.yaxis.set_tick_params(right='on',which='minor',direction='in',length=2) ax.xaxis.set_tick_params(top='on',which='minor',direction='in',length=2) ax.yaxis.set_tick_params(right='on',which='major',direction='in',length=3,labelsize=label_size,pad=5) ax.xaxis.set_tick_params(top='on',which='major',direction='in',length=3,labelsize=label_size,pad=7) plt.setp(ax.get_yticklabels(), rotation='horizontal', fontsize=label_size) plt.setp(ax.get_xticklabels(), fontsize=label_size) ax.xaxis.set_ticks_position('both') ax.yaxis.set_ticks_position('both') ax.yaxis.set_major_locator(ticker.LogLocator(base=10.0, numticks=1000)) ax.xaxis.set_major_locator(ticker.LogLocator(base=10.0, numticks=1000)) locmin = ticker.LogLocator(base=10.0, subs=np.arange(2, 10) , numticks=12) ax.xaxis.set_minor_locator(locmin) ax.yaxis.set_minor_locator(locmin) ax.xaxis.set_minor_formatter(ticker.NullFormatter()) ax.yaxis.set_minor_formatter(ticker.NullFormatter()) ax.grid( b=True, which="both", alpha=0.3, linestyle='--') ax.legend(loc=2,ncol=1,fontsize=14) ax.loglog() fig.tight_layout() plt.savefig(path_to_figures + '/recfast_many_Tcmb.pdf') plt.show() plt.close(fig) # - # # Make a movie of spectra using the photon injection library subprocess.call(['mkdir','-p',sd.path_to_sd_projects+'/specdist/specdist/data/ct_database']) # + # # copy the library folder 'case_extended_run_xe_history_Drho_rho_3e-5_zlate_1e3_without_collision_190820' # to the ct_database directory (you need to download the library) # then load the library: # (this takes a couple of minutes, the libray is 5GB size) sd_lib_extended_run = sd.specdist_ct_spectra_lib() sd.load_ct_spectra_lib('extended_run_xe_history_Drho_rho_3e-5_zlate_1e3_without_collision_190820',sd_lib_extended_run) # + # requires ffmpeg # on mac the conda-forge install seem to crash due to wrongly assigned files # the easiest see to be to install ffmpeg via brew (make sure to have deleted to conda ffmpeg) Gamma_values= np.logspace(-8,-17,70) xdec_values = [1e-3] for ig in range(len(xdec_values)): for ix in range(len(xdec_values)): gamma = Gamma_values[ix] str_gamma = str("%.3e"%gamma) print(str_gamma) xdec = xdec_values[ig] str_xdec = "%.3e"%xdec print(str_xdec) B_nu = np.vectorize(sd.B_nu_of_T) i = 0 nx = len(Gamma_values) x = [] y = [] xinj = [] gammainj = [] fdm_values = [] for k in range(nx): gamma = Gamma_values[k] xinj.append(xdec) gammainj.append(gamma) x_asked = np.logspace(np.log10(1e-8),np.log10(1e8),3000) S_pectra = sd.GetSpectra(gamma,xdec,x_asked,sd_lib_extended_run) x.append(S_pectra['x']) y.append(S_pectra['DI']) x_min = 1.e-7 x_max = 1.e3 fig, ax1 = plt.subplots(1,1,figsize=(8,6)) label_size = 15 title_size = 20 legend_size = 25 handle_length = 1.5 ymin = 1.e-2 ymax = 2e11 linesp = [] patches = [] i_ax = 0 firas = sd.firas() for ax in [ax1]: ax.set_xlim(x_min,x_max) ax.set_ylim(ymin,ymax) ax.set_xscale('log') ax.set_yscale('log') ax.set_xlabel(label_scaled_frequency,size=title_size) ax.set_ylabel(label_intensity,size=title_size) ax.yaxis.set_label_coords(-.1,0.5) ax.tick_params(axis = 'x',which='both',length=5,direction='in', pad=10) ax.tick_params(axis = 'y',which='both',length=5,direction='in', pad=10) plt.setp(ax.get_yticklabels(), rotation='horizontal', fontsize=label_size) plt.setp(ax.get_xticklabels(), fontsize=label_size) ax.xaxis.set_ticks_position('both') ax.yaxis.set_ticks_position('both') ax.axvspan(1.e-3, 2e-3, facecolor='blue', alpha=0.2,label=r'$\mathrm{EDGES}$') ax.axvspan(0.5, 105, facecolor='orange', alpha=0.2,label=r'$\mathrm{PIXIE}$') ax.axvspan(1.2, 11.2, facecolor='red', alpha=0.2,label=r'$\mathrm{FIRAS}$') x_asked = np.logspace(np.log10(x_min),np.log10(x_max),1000) bnu = B_nu(x_asked*sd.kb*sd.firas_T0_bf/sd.hplanck,sd.firas_T0_bf)*1e6 ax.plot(x_asked,bnu,c='grey',ls=':',label='Black Body at T=2.725K') Y = sd.GetYSpecDistAtTandX(firas.firas_y_1996_95_cl,sd.firas_T0_bf,x_asked)*1e6 ax.plot(x_asked,Y,c='r',alpha = 0.2,label = label_y_firas) ax.plot(x_asked,-Y,c='r',ls='--',alpha = 0.4) MU_chluba = sd.GetMuSpecDistAtTandX_chluba(firas.firas_mu_1996_95_cl,sd.firas_T0_bf,x_asked)*1e6 ax.plot(x_asked,MU_chluba,c='magenta',alpha = 0.2,label=label_mu_firas) ax.plot(x_asked,-MU_chluba,c='magenta',ls='--',alpha = 0.4) ax.yaxis.set_major_locator(LogLocator(numticks=15)) ax.yaxis.set_minor_locator(LogLocator(numticks=15,subs=np.arange(2,10))) for label in ax.yaxis.get_ticklabels()[::2]: label.set_visible(True) ax.yaxis.set_tick_params(right='on',which='minor',direction='in',length=5) ax.xaxis.set_tick_params(top='on',which='minor',direction='in',length=5) ax.yaxis.set_tick_params(right='on',which='major',direction='in',length=9,labelsize=label_size,pad=10) ax.xaxis.set_tick_params(top='on',which='major',direction='in',length=9,labelsize=label_size,pad=10) ax.xaxis.set_major_locator(LogLocator(numticks=15)) ax.xaxis.set_minor_locator(LogLocator(numticks=15,subs=np.arange(2,10))) for label in ax.xaxis.get_ticklabels()[::2]: label.set_visible(True) ax.grid( b=True, which="both", alpha=0.3, linestyle='--') ax.legend(loc=1) i_ax = i_ax + 1 linesp1 = [ax1.plot([], [],c='k',ls='-')[0] for _ in range(1)] linesm1 = [ax1.plot([], [],c='k',ls='--')[0] for _ in range(1)] text_gamma = [ax.text(0.05, 0.15, r'$z= %.4e$'%1, transform=ax.transAxes, fontsize=14, verticalalignment='top', bbox=dict(boxstyle='round', facecolor='white', alpha=0.5))] patches1 = linesp1 +linesm1 + text_gamma patches = patches1 def init(): for line in linesp1: line.set_data([], []) return patches def animate(i): for j,line in enumerate(linesp1): if j==0: line.set_data(x[i],y[i]) for j,line in enumerate(linesm1): if j==0: line.set_data(x[i],-y[i]) text_gamma[j].remove() einj = xinj_to_Einj*xdec text_gamma_str1 = r'$z=0$' text_gamma_str2 = r'$E_\mathrm{inj}=$'+ sd.scientific_notation(einj)+' '+r'$\mathrm{eV}$' text_gamma_str3 = r'$x_\mathrm{inj,0}=$'+ sd.scientific_notation(xinj[i]) text_gamma_str4 = r'$\Gamma_X=$'+ sd.scientific_notation(gammainj[i])+' '+r'$\mathrm{s}^{-1}$' text_gamma_str = '\n'.join((text_gamma_str4, text_gamma_str3, text_gamma_str2, text_gamma_str1)) text_gamma[j] = ax.text(0.35, 0.95, text_gamma_str , transform=ax.transAxes, fontsize=12, verticalalignment='top', bbox=dict(boxstyle='round', facecolor='white', alpha=0.5)) return patches fig.tight_layout() anim = animation.FuncAnimation(fig, animate, init_func=init, frames=len(y), interval=800, blit=True) anim.save(path_to_figures + '/Dn_today_movie_Drho_rho_3e-5_xinj_'+str_xdec+'.mp4', fps=3, extra_args=['-vcodec', 'libx264'], dpi=100) i+=1 # -
specdist/notebooks/specdist_ionisation_history_bkp.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # * <NAME> # * 11/12/2020 # * I am going to use RandomForestClassifier of sklearn package to solve the following problem because the lable is discrete # * Estimated AUC is 0.9 # ## Importing required libraries import numpy as np import pandas as pd from sklearn import linear_model from sklearn.preprocessing import LabelEncoder from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import roc_auc_score from sklearn.preprocessing import StandardScaler # ## Loading training and testing data # + # Load CSV training dataset training_dataset = pd.read_csv('train.csv') training_data = training_dataset.iloc[:, :] # convert string to number training_data.replace(('iPhone', 'Android', 'desktop', 'laptop', 'other'), (0, 1, 2, 3, 4), inplace=True) training_data.replace(('M', 'F'), (0, 1), inplace=True) # handle missing value training_data = training_data.dropna() # get training data X_train = training_data.iloc[:, :-1].values y_train = training_data.iloc[:, -1].values # feature scaling sc_x = StandardScaler() X_train = sc_x.fit_transform(X_train) # Load CSV testing dataset testing_dataset = pd.read_csv('test.csv') testing_data = testing_dataset.iloc[:, :] # convert string to number testing_data.replace(('iPhone', 'Android', 'desktop', 'laptop', 'other'), (0, 1, 2, 3, 4), inplace=True) # handle missing value testing_data.replace(('M', 'F'), (0, 1), inplace=True) testing_data = testing_data.dropna() # get testing data X_test = testing_data.iloc[:, :].values # feature scaling X_test = sc_x.transform(X_test) print(training_data.describe()) print(training_data.info()) # - # ## Implmenting Logistic Regression # + # create logistic regression object reg = linear_model.LogisticRegression() # train the model using the training sets reg.fit(X_train, y_train) # making predictions on the testing set y_pred = reg.predict(X_test) print('First five prediction:', y_pred[:5]) # AUC score auc_predicted = reg.predict(X_train) auc = roc_auc_score(y_train, auc_predicted) print('AUC score is:', auc) # the likelihood of the outcome variable being equal to 1. predicted_array = np.array(y_pred) outcome_one_num = np.count_nonzero(predicted_array == 1) likelihood = round(outcome_one_num / len(predicted_array), 8) * 100 print('The likelihood of the outcome variable being equal to 1 is: %s %%' %likelihood) # - # ## Importing RandomForestClassifier # + y_train = LabelEncoder().fit_transform(y_train) # Create RandomForestClassifier clf = RandomForestClassifier() clf.fit(X_train, y_train) # use model to predict the testing dataset predicted = clf.predict(X_test) print('First five prediction:', predicted[:5]) # AUC score auc_predicted = clf.predict(X_train) auc = roc_auc_score(y_train, auc_predicted) print('AUC score is:', auc) # the likelihood of the outcome variable being equal to 1. predicted_array = np.array(predicted) outcome_one_num = np.count_nonzero(predicted_array == 1) likelihood = round(outcome_one_num / len(predicted_array), 8) * 100 print('The likelihood of the outcome variable being equal to 1 is: %s %%' %likelihood) # -
interview/Happiibook/code.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + import pandas as pd # Load contig profiles and binning results. # TODO: Make profiles and binning results the same for all algos. profile = pd.read_csv("/Users/tanunia/PycharmProjects/biolab_scripts/canopy_profiles.in", sep=" ", header=None) clusters = pd.read_csv("/Users/tanunia/PycharmProjects/biolab_scripts/canopy_binning.tsv", sep="\t", header=None) # Add binning column to profile clusters = clusters.rename(columns={1:'contig', 0:'color'}) cols = clusters.columns clusters = clusters[cols[::-1]] clusters["color"] = clusters["color"].apply(lambda x: int(x[3:])) profile = profile.rename(columns={0:'contig'}) profile = pd.merge(clusters, profile, on='contig') #profile # - # Information about profile profile.describe() # + # Leave only clusters with significant contig length in profile #CANOPY: bin_info$third_largest > 3 000 000 cag_str = "CAG0001 CAG0002 CAG0004 CAG0003 CAG0005 CAG0008 CAG0007 CAG0006 CAG0010 CAG0015 CAG0014 CAG0009 CAG0012 CAG0074 CAG0018 CAG0040 CAG0016 CAG0029 CAG0013 CAG0017 CAG0021 CAG0020 CAG0085 CAG0019 CAG0028 CAG0047 CAG0057 CAG0032 CAG0039 CAG0027 CAG0024 CAG0122 CAG0062 CAG0048 CAG0030 CAG0022 CAG0025 CAG0056 CAG0071 CAG0077 CAG0049 CAG0034 CAG0023 CAG0051 CAG0036 CAG0059" filter1 = [int(x[3:]) for x in cag_str.split(" ")] #CONCOCT: bin_info$third_largest > 20 000 000 #filter1 = [89, 243, 312, 278, 109, 250, 60, 59, 195, 277, 190, 394, 311, 301, 333, 51, 143, 327, 338, 147, 256, 163, 18, 141, 134, 317, 81, 371, 288, 216, 388, 135, 71, 341, 367, 92, 232, 119, 252, 293, 361, 350, 168] profile = profile[profile["color"].isin(filter1)] # New profile info profile.describe() # + # Get fraction of profile - profile_small. Normalize profile_small data (like in CONCOCT) and convert it to numpy array import numpy as np profile_small = profile.sample(frac=0.1) data = profile_small.as_matrix(columns = profile.columns[2:]) v = (1.0/2000) data = data + v along_Y = np.apply_along_axis(sum, 0, data) data = data/along_Y[None, :] along_X = np.apply_along_axis(sum, 1, data) data = data/along_X[:, None] data = np.log(data) # + # Run bht-sne path_bhtsne = '/Users/tanunia/PycharmProjects/biolab_t-sne/' # Save profile_small to tsv file np.savetxt("data.in", data, delimiter="\t") import sys, os os.system(path_bhtsne + 'bhtsne.py -p 50 -m 1000 -i data.in -o data.out') # Load coordinates from data.out ar = np.loadtxt("data.out", delimiter="\t") len(ar[:, 0]) # Save bhtsne result to profile_small profile_small["x"] = ar[:, 0] profile_small["y"] = ar[:, 1] # + # Draw bhtsne result for a fraction of profile - profile_small from matplotlib import pyplot as plt import matplotlib.cm as cm color = profile_small["color"].tolist() mx_color = max(color) plt.scatter(profile_small["x"], profile_small["y"], c=[cm.spectral(float(i) /mx_color) for i in color]) plt.show() # + # Run PCA on profile_small import numpy as np from sklearn.decomposition import PCA pca = PCA(n_components=2) pcaed = pca.fit(data).transform(data) profile_small["x_pca"] = pcaed[:, 0] profile_small["y_pca"] = pcaed[:, 1] # - # Draw PCA for profile_small plt.scatter(profile_small["x_pca"], profile_small["y_pca"], c=[cm.spectral(float(i) /mx_color) for i in color]) plt.show() # Draw histogram with number of contigs in each bin plt.hist(profile["color"], align='left', bins = 400) plt.title("Number of contigs in each bin") plt.show() # Look at mean frequencies in each sample for each bin profile.groupby('color').mean() # Number of unique profiles in profile len(-profile.groupby(profile.columns.tolist()[2:],as_index=False).size()) # Make new dataframe - only unique profiles and run bhtsne on it new_profile = profile.drop_duplicates(profile.columns.tolist()[2:]) new_profile.describe() new_profile = new_profile.sample(frac=0.1) data = new_profile.as_matrix(columns = new_profile.columns[2:]) v = (1.0/2000) data = data + v along_Y = np.apply_along_axis(sum, 0, data) data = data/along_Y[None, :] along_X = np.apply_along_axis(sum, 1, data) data = data/along_X[:, None] data = np.log(data) # + np.savetxt("data2.in", data, delimiter="\t") path_bhtsne = '/Users/tanunia/PycharmProjects/biolab_t-sne/' import sys, os os.system(path_bhtsne + 'bhtsne.py -p 50 -m 3000 -i data2.in -o data_canopy2.out') ar2 = np.loadtxt("data2.out", delimiter="\t") len(ar2[:, 0]) new_profile["x"] = ar2[:, 0] new_profile["y"] = ar2[:, 1] # + from matplotlib import pyplot as plt plt.scatter(new_profile["x"], new_profile["y"], c=new_profile["color"]) plt.show() # - new_profile["color"].value_counts()
src/metaspades/src/projects/mts/scripts/Dataset_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #Dependencies import numpy as np import pandas as pd from IPython.display import display import matplotlib.pyplot as plt import matplotlib.mlab as mlab import matplotlib.cm as cm # %matplotlib inline plt.rcParams['figure.figsize'] = (12, 8) maxr = +1.0 minr = -1.0 ampl = maxr - minr # linear means for each arm k = 10 means = np.linspace(minr, maxr, k) #idx = np.argsort(means)[::-1] #get order #means = np.sort(means)[::-1] #make it ordered #gaussian approximation for the error, independent of the estimated mean, which is on the interval [0, 1] #time vs number of trials T = range(2, 50) N = range(1, 50) #ucb1_error V = [[np.sqrt(2*np.log(t)/n) for t in T] for n in N] plt.imshow(V, cmap=cm.gray_r) plt.xlabel('time') plt.ylabel('pulls') plt.title('max estimation error using UCB1') plt.colorbar() plt.show() # - # # Hoeffding's inequality # ## General case bounded on $[-1, 1]$ # + est_means = np.linspace(-1, 1, 100) N = range(1, 100) prob_posit = [[1/2*np.exp(-(2.0 * n**2.0 * mue**2.0) / (n * ampl**2)) if mue <=0 else 1-1/2*np.exp(-(2.0 * n**2.0 * mue**2.0) / (n * ampl**2)) for mue in est_means] for n in N] plt.imshow(prob_posit, cmap=cm.gray_r) plt.xlabel('estimated mean') plt.xticks([0, 49, 99], [-1, 0, 1]) plt.ylabel('pulls') plt.title('Probability of being positive using Hoeffding') plt.colorbar() plt.show() # + from scipy.stats import beta N = range(1, 100) est_p = np.linspace(0, 1, 100) prob_posit = [[beta.cdf(0.5, n-n*p+1, n*p+1) for p in est_p] for n in N] plt.imshow(prob_posit, cmap=cm.gray_r) plt.xlabel('estimated mean') plt.xticks([0, 49, 99], [-1, 0, 1]) plt.ylabel('pulls') plt.title('Probability of being positive using Beta') plt.colorbar() plt.show() # -
smab/extra/posit_prob.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: dev # language: python # name: dev # --- # + # default_exp train # - #export from fastai.tabular.all import * from fgcnn.data import * from fgcnn.model import * #hide from nbdev.showdoc import * # # Training # # > Tabular Learner # # How to train this model on Avazu dataset. # ### Prepare dataloaders #slow dls = get_dl() dls.show() # ## Tabular Learner # + #slow emb_szs = get_emb_sz(dls.train_ds, k=40) m = FGCNN(emb_szs=emb_szs, conv_kernels=[14, 16, 18, 20], kernels=[3, 3, 3, 3], dense_layers=[4096, 2048, 1024, 512], h=7, hp=2 ) # - #slow learn = TabularLearner(dls, m, loss_func=BCELossFlat(), opt_func=ranger) #slow learn.lr_find() # ![image.png](attachment:image.png) #slow learn.fit_flat_cos(1, 2e-4, cbs=EarlyStoppingCallback()) # ``` # epoch train_loss valid_loss time # 0 0.341366 0.374705 7:22:26 # ``` # + #export def train(dls, m, lr, loss_func, n_epochs=1, opt_func=ranger, validate=False): learn = TabularLearner(dls, m, loss_func=loss_func, opt_func=ranger) learn.fit_flat_cos(n_epochs, lr, cbs=EarlyStoppingCallback()) if validate: learn.validate() return learn def predict(learner, df_test): dl = learn.dls.test_dl(df_test) probs, y_preds = learn.get_preds(dl=dl) return probs # - #slow learn = train(dls, m, lr, loss_func=BCELossFlat(), n_epochs=1)
03_train.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Study anomaly detection data from AWS # 1. the system failure vs ambient temperature <br> # 2. The failure/anomaly is a time interval rather than a data point <br> # 3. Apply unsupervised method <br> # 4. Evaluate the results based on this : https://github.com/numenta/NAB # 5. This is a toy model: I only apply a K mean clustering method. # # import packages import xgboost from matplotlib.pylab import rc import torch from scipy.stats import chisquare from scipy.stats import pearsonr import pickle import pandas as pd import datetime import matplotlib import tensorflow as tf import sklearn import math import matplotlib.pyplot as plt from xgboost import XGBClassifier from xgboost import plot_importance import numpy as np from sklearn.model_selection import train_test_split import sklearn from sklearn import preprocessing from sklearn.preprocessing import LabelEncoder import copy import scipy import datetime import time from sklearn.model_selection import KFold from sklearn.metrics import roc_curve from sklearn.metrics import roc_auc_score import os from sklearn.decomposition import PCA from sklearn.cluster import KMeans from sklearn.covariance import EllipticEnvelope #from pyemma import msm # not available on Kaggle Kernel from sklearn.ensemble import IsolationForest from sklearn.svm import OneClassSVM #data path root_path = "Data//realKnownCause/realKnownCause/" os.listdir(root_path) # + ## Some hyper-parameters: n_clusters = 15 anomaly_ratio = 0.5 # - df = pd.read_csv(root_path+'ambient_temperature_system_failure.csv') print(df['timestamp'].head(10)) # + from matplotlib.pylab import rc font = {'family': 'normal','weight': 'bold', 'size': 25} matplotlib.rc('font', **font) rc('axes', linewidth=3) # plot the data using Celcius #df.plot(x='timestamp', y='value') plt.plot(df['timestamp'],df['value'],"b",linewidth=4) plt.xlabel("Time stamp") plt.ylabel(r"CPU Ambient temperature $\degree C$") plt.xticks(rotation=90) fig = matplotlib.pyplot.gcf() fig.set_size_inches(24,12) plt.legend() # + # Let's consider the correlation between weekday/hour/season vs failure # the hours and if it's night or day (7:00-22:00) df['hours'] = df['timestamp'].dt.hour df['daylight'] = ((df['hours'] >= 7) & (df['hours'] <= 22)).astype(int) # the day of the week (Monday=0, Sunday=6) and if it's a week end day or week day. df['DayOfTheWeek'] = df['timestamp'].dt.dayofweek df['WeekDay'] = (df['DayOfTheWeek'] < 5).astype(int) # An estimation of anomly population of the dataset (necessary for several algorithm) outliers_fraction = 0.01 # time with int to plot easily df['time_epoch'] = (df['timestamp'].astype(np.int64)/100000000000).astype(np.int64) # creation of 4 distinct categories that seem useful (week end/day week & night/day) df['categories'] = df['WeekDay']*2 + df['daylight'] a = df.loc[df['categories'] == 0, 'value'] b = df.loc[df['categories'] == 1, 'value'] c = df.loc[df['categories'] == 2, 'value'] d = df.loc[df['categories'] == 3, 'value'] fig, ax = plt.subplots() a_heights, a_bins = np.histogram(a) b_heights, b_bins = np.histogram(b, bins=a_bins) c_heights, c_bins = np.histogram(c, bins=a_bins) d_heights, d_bins = np.histogram(d, bins=a_bins) width = (a_bins[1] - a_bins[0])/6 ax.bar(a_bins[:-1], a_heights*100/a.count(), width=width, facecolor='blue', label='WeekEndNight') ax.bar(b_bins[:-1]+width, (b_heights*100/b.count()), width=width, facecolor='green', label ='WeekEndLight') ax.bar(c_bins[:-1]+width*2, (c_heights*100/c.count()), width=width, facecolor='red', label ='WeekDayNight') ax.bar(d_bins[:-1]+width*3, (d_heights*100/d.count()), width=width, facecolor='black', label ='WeekDayLight') plt.xlabel(r"CPU Ambient temperature $\degree C$") plt.xticks(rotation=90) fig = matplotlib.pyplot.gcf() fig.set_size_inches(24,12) plt.legend() plt.show() # + # Take useful feature and standardize them font = {'family': 'normal','weight': 'bold', 'size': 25} matplotlib.rc('font', **font) rc('axes', linewidth=3) data = df[['value', 'hours', 'daylight', 'DayOfTheWeek', 'WeekDay']] min_max_scaler = preprocessing.StandardScaler() np_scaled = min_max_scaler.fit_transform(data) data = pd.DataFrame(np_scaled) # reduce to 2 important features pca = PCA(n_components=2) data = pca.fit_transform(data) # standardize these 2 new features min_max_scaler = preprocessing.StandardScaler() np_scaled = min_max_scaler.fit_transform(data) data = pd.DataFrame(np_scaled) # calculate with different number of centroids to see the loss plot (elbow method) n_cluster = range(1, 20) kmeans = [KMeans(n_clusters=i).fit(data) for i in n_cluster] scores = [kmeans[i].score(data) for i in range(len(kmeans))] fig, ax = plt.subplots() scores = np.array(scores) ax.plot(n_cluster, abs(scores),linewidth=6) plt.xlabel("n cluster") plt.ylabel(r"scores") fig = matplotlib.pyplot.gcf() fig.set_size_inches(24,12) plt.legend() plt.show() # + # Let's take a look at the previous plot: Choose around 10-15 clusters: # predict each clustering df['cluster'] = kmeans[n_clusters-1].predict(data) df['principal_feature1'] = data[0] df['principal_feature2'] = data[1] df['cluster'].value_counts() # + #plot the different clusters with the 2 main features font = {'family': 'normal','weight': 'bold', 'size': 25} matplotlib.rc('font', **font) rc('axes', linewidth=3) from matplotlib import colors as mcolors fig, ax = plt.subplots() colors = dict() for i in range(len(list(mcolors.CSS4_COLORS.keys()))): colors[i]=list(mcolors.CSS4_COLORS.keys())[i] ax.scatter(df['principal_feature1'], df['principal_feature2'], c=df["cluster"].apply(lambda x: colors[x]),s=120) plt.xlabel('principal_feature1') plt.ylabel('principal_feature2') fig = matplotlib.pyplot.gcf() fig.set_size_inches(24,12) plt.legend() plt.show() # + ### Consider the points that have the largest distance between centroid of that cluster as anomaly: # calculate the centroid for each cluster first: # squared distance to cluster center centroids = kmeans[n_clusters-1].cluster_centers_ def distance(x): # return kmeans[x["cluster"]].transform(np.atleast_2d([x['principal_feature1'],x['principal_feature2']]))**2 return (centroids[x["cluster"],0]-x['principal_feature1'])**2+(centroids[x["cluster"],1]-x['principal_feature2'])**2 df["D"]=df.apply(lambda x : distance(x),axis=1) # + # Here "D" is the distance for each row to the centroids of each group # Let's choose 2% data as anomaly ones #!!! different kind of masks mask = df["D"]>np.nanpercentile(df["D"],100-anomaly_ratio) number_of_outliers=int(len(df["D"])*anomaly_ratio/100) threshold = df["D"].nlargest(number_of_outliers).min() # + from matplotlib.pylab import rc font = {'family': 'normal','weight': 'bold', 'size': 25} matplotlib.rc('font', **font) rc('axes', linewidth=3) # plot the data using Celcius #df.plot(x='timestamp', y='value') plt.hist(df["D"]) plt.plot([threshold,threshold],[0,5000],"k",linewidth=5) plt.xlabel("Distance D") plt.xticks(rotation=90) fig = matplotlib.pyplot.gcf() fig.set_size_inches(24,12) plt.legend() # + font = {'family': 'normal','weight': 'bold', 'size': 25} matplotlib.rc('font', **font) rc('axes', linewidth=3) # plot the data using Celcius #df.plot(x='timestamp', y='value') plt.plot(df['timestamp'],df['value'],"b",linewidth=4) plt.plot(df[mask]['timestamp'],df[mask]['value'],"ro",linewidth=4,label="Anomaly") plt.xlabel("Time stamp") plt.ylabel(r"CPU Ambient temperature $\degree C$") plt.xticks(rotation=90) fig = matplotlib.pyplot.gcf() fig.set_size_inches(24,12) plt.legend() # -
AWS_streaming_v1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 4 - Packed Padded Sequences, Masking, Inference and BLEU # # ## Introduction # # In this notebook we will be adding a few improvements - packed padded sequences and masking - to the model from the previous notebook. Packed padded sequences are used to tell our RNN to skip over padding tokens in our encoder. Masking explicitly forces the model to ignore certain values, such as attention over padded elements. Both of these techniques are commonly used in NLP. # # We will also look at how to use our model for inference, by giving it a sentence, seeing what it translates it as and seeing where exactly it pays attention to when translating each word. # # Finally, we'll use the BLEU metric to measure the quality of our translations. # # ## Preparing Data # # First, we'll import all the modules as before, with the addition of the `matplotlib` modules used for viewing the attention. # + import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from torchtext.legacy.datasets import Multi30k from torchtext.legacy.data import Field, BucketIterator import matplotlib.pyplot as plt import matplotlib.ticker as ticker import spacy import numpy as np import random import math import time # - # Next, we'll set the random seed for reproducability. # + SEED = 1234 random.seed(SEED) np.random.seed(SEED) torch.manual_seed(SEED) torch.cuda.manual_seed(SEED) torch.backends.cudnn.deterministic = True # - # As before, we'll import spaCy and define the German and English tokenizers. spacy_de = spacy.load('de_core_news_sm') spacy_en = spacy.load('en_core_web_sm') # + def tokenize_de(text): """ Tokenizes German text from a string into a list of strings """ return [tok.text for tok in spacy_de.tokenizer(text)] def tokenize_en(text): """ Tokenizes English text from a string into a list of strings """ return [tok.text for tok in spacy_en.tokenizer(text)] # - # When using packed padded sequences, we need to tell PyTorch how long the actual (non-padded) sequences are. Luckily for us, TorchText's `Field` objects allow us to use the `include_lengths` argument, this will cause our `batch.src` to be a tuple. The first element of the tuple is the same as before, a batch of numericalized source sentence as a tensor, and the second element is the non-padded lengths of each source sentence within the batch. # + SRC = Field(tokenize = tokenize_de, init_token = '<sos>', eos_token = '<eos>', lower = True, include_lengths = True) TRG = Field(tokenize = tokenize_en, init_token = '<sos>', eos_token = '<eos>', lower = True) # - # We then load the data. train_data, valid_data, test_data = Multi30k.splits(exts = ('.de', '.en'), fields = (SRC, TRG)) # And build the vocabulary. SRC.build_vocab(train_data, min_freq = 2) TRG.build_vocab(train_data, min_freq = 2) # Next, we handle the iterators. # # One quirk about packed padded sequences is that all elements in the batch need to be sorted by their non-padded lengths in descending order, i.e. the first sentence in the batch needs to be the longest. We use two arguments of the iterator to handle this, `sort_within_batch` which tells the iterator that the contents of the batch need to be sorted, and `sort_key` a function which tells the iterator how to sort the elements in the batch. Here, we sort by the length of the `src` sentence. # + BATCH_SIZE = 128 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') train_iterator, valid_iterator, test_iterator = BucketIterator.splits( (train_data, valid_data, test_data), batch_size = BATCH_SIZE, sort_within_batch = True, sort_key = lambda x : len(x.src), device = device) # - # ## Building the Model # # ### Encoder # # Next up, we define the encoder. # # The changes here all within the `forward` method. It now accepts the lengths of the source sentences as well as the sentences themselves. # # After the source sentence (padded automatically within the iterator) has been embedded, we can then use `pack_padded_sequence` on it with the lengths of the sentences. Note that the tensor containing the lengths of the sequences must be a CPU tensor as of the latest version of PyTorch, which we explicitly do so with `to('cpu')`. `packed_embedded` will then be our packed padded sequence. This can be then fed to our RNN as normal which will return `packed_outputs`, a packed tensor containing all of the hidden states from the sequence, and `hidden` which is simply the final hidden state from our sequence. `hidden` is a standard tensor and not packed in any way, the only difference is that as the input was a packed sequence, this tensor is from the final **non-padded element** in the sequence. # # We then unpack our `packed_outputs` using `pad_packed_sequence` which returns the `outputs` and the lengths of each, which we don't need. # # The first dimension of `outputs` is the padded sequence lengths however due to using a packed padded sequence the values of tensors when a padding token was the input will be all zeros. class Encoder(nn.Module): def __init__(self, input_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout): super().__init__() self.embedding = nn.Embedding(input_dim, emb_dim) self.rnn = nn.GRU(emb_dim, enc_hid_dim, bidirectional = True) self.fc = nn.Linear(enc_hid_dim * 2, dec_hid_dim) self.dropout = nn.Dropout(dropout) def forward(self, src, src_len): #src = [src len, batch size] #src_len = [batch size] embedded = self.dropout(self.embedding(src)) #embedded = [src len, batch size, emb dim] #need to explicitly put lengths on cpu! packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, src_len.to('cpu')) packed_outputs, hidden = self.rnn(packed_embedded) #packed_outputs is a packed sequence containing all hidden states #hidden is now from the final non-padded element in the batch outputs, _ = nn.utils.rnn.pad_packed_sequence(packed_outputs) #outputs is now a non-packed sequence, all hidden states obtained # when the input is a pad token are all zeros #outputs = [src len, batch size, hid dim * num directions] #hidden = [n layers * num directions, batch size, hid dim] #hidden is stacked [forward_1, backward_1, forward_2, backward_2, ...] #outputs are always from the last layer #hidden [-2, :, : ] is the last of the forwards RNN #hidden [-1, :, : ] is the last of the backwards RNN #initial decoder hidden is final hidden state of the forwards and backwards # encoder RNNs fed through a linear layer hidden = torch.tanh(self.fc(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1))) #outputs = [src len, batch size, enc hid dim * 2] #hidden = [batch size, dec hid dim] return outputs, hidden # ### Attention # # The attention module is where we calculate the attention values over the source sentence. # # Previously, we allowed this module to "pay attention" to padding tokens within the source sentence. However, using *masking*, we can force the attention to only be over non-padding elements. # # The `forward` method now takes a `mask` input. This is a **[batch size, source sentence length]** tensor that is 1 when the source sentence token is not a padding token, and 0 when it is a padding token. For example, if the source sentence is: ["hello", "how", "are", "you", "?", `<pad>`, `<pad>`], then the mask would be [1, 1, 1, 1, 1, 0, 0]. # # We apply the mask after the attention has been calculated, but before it has been normalized by the `softmax` function. It is applied using `masked_fill`. This fills the tensor at each element where the first argument (`mask == 0`) is true, with the value given by the second argument (`-1e10`). In other words, it will take the un-normalized attention values, and change the attention values over padded elements to be `-1e10`. As these numbers will be miniscule compared to the other values they will become zero when passed through the `softmax` layer, ensuring no attention is payed to padding tokens in the source sentence. class Attention(nn.Module): def __init__(self, enc_hid_dim, dec_hid_dim): super().__init__() self.attn = nn.Linear((enc_hid_dim * 2) + dec_hid_dim, dec_hid_dim) self.v = nn.Linear(dec_hid_dim, 1, bias = False) def forward(self, hidden, encoder_outputs, mask): #hidden = [batch size, dec hid dim] #encoder_outputs = [src len, batch size, enc hid dim * 2] batch_size = encoder_outputs.shape[1] src_len = encoder_outputs.shape[0] #repeat decoder hidden state src_len times hidden = hidden.unsqueeze(1).repeat(1, src_len, 1) encoder_outputs = encoder_outputs.permute(1, 0, 2) #hidden = [batch size, src len, dec hid dim] #encoder_outputs = [batch size, src len, enc hid dim * 2] energy = torch.tanh(self.attn(torch.cat((hidden, encoder_outputs), dim = 2))) #energy = [batch size, src len, dec hid dim] attention = self.v(energy).squeeze(2) #attention = [batch size, src len] attention = attention.masked_fill(mask == 0, -1e10) return F.softmax(attention, dim = 1) # ### Decoder # # The decoder only needs a few small changes. It needs to accept a mask over the source sentence and pass this to the attention module. As we want to view the values of attention during inference, we also return the attention tensor. class Decoder(nn.Module): def __init__(self, output_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout, attention): super().__init__() self.output_dim = output_dim self.attention = attention self.embedding = nn.Embedding(output_dim, emb_dim) self.rnn = nn.GRU((enc_hid_dim * 2) + emb_dim, dec_hid_dim) self.fc_out = nn.Linear((enc_hid_dim * 2) + dec_hid_dim + emb_dim, output_dim) self.dropout = nn.Dropout(dropout) def forward(self, input, hidden, encoder_outputs, mask): #input = [batch size] #hidden = [batch size, dec hid dim] #encoder_outputs = [src len, batch size, enc hid dim * 2] #mask = [batch size, src len] input = input.unsqueeze(0) #input = [1, batch size] embedded = self.dropout(self.embedding(input)) #embedded = [1, batch size, emb dim] a = self.attention(hidden, encoder_outputs, mask) #a = [batch size, src len] a = a.unsqueeze(1) #a = [batch size, 1, src len] encoder_outputs = encoder_outputs.permute(1, 0, 2) #encoder_outputs = [batch size, src len, enc hid dim * 2] weighted = torch.bmm(a, encoder_outputs) #weighted = [batch size, 1, enc hid dim * 2] weighted = weighted.permute(1, 0, 2) #weighted = [1, batch size, enc hid dim * 2] rnn_input = torch.cat((embedded, weighted), dim = 2) #rnn_input = [1, batch size, (enc hid dim * 2) + emb dim] output, hidden = self.rnn(rnn_input, hidden.unsqueeze(0)) #output = [seq len, batch size, dec hid dim * n directions] #hidden = [n layers * n directions, batch size, dec hid dim] #seq len, n layers and n directions will always be 1 in this decoder, therefore: #output = [1, batch size, dec hid dim] #hidden = [1, batch size, dec hid dim] #this also means that output == hidden assert (output == hidden).all() embedded = embedded.squeeze(0) output = output.squeeze(0) weighted = weighted.squeeze(0) prediction = self.fc_out(torch.cat((output, weighted, embedded), dim = 1)) #prediction = [batch size, output dim] return prediction, hidden.squeeze(0), a.squeeze(1) # ### Seq2Seq # # The overarching seq2seq model also needs a few changes for packed padded sequences, masking and inference. # # We need to tell it what the indexes are for the pad token and also pass the source sentence lengths as input to the `forward` method. # # We use the pad token index to create the masks, by creating a mask tensor that is 1 wherever the source sentence is not equal to the pad token. This is all done within the `create_mask` function. # # The sequence lengths as needed to pass to the encoder to use packed padded sequences. # # The attention at each time-step is stored in the `attentions` class Seq2Seq(nn.Module): def __init__(self, encoder, decoder, src_pad_idx, device): super().__init__() self.encoder = encoder self.decoder = decoder self.src_pad_idx = src_pad_idx self.device = device def create_mask(self, src): mask = (src != self.src_pad_idx).permute(1, 0) return mask def forward(self, src, src_len, trg, teacher_forcing_ratio = 0.5): #src = [src len, batch size] #src_len = [batch size] #trg = [trg len, batch size] #teacher_forcing_ratio is probability to use teacher forcing #e.g. if teacher_forcing_ratio is 0.75 we use teacher forcing 75% of the time batch_size = src.shape[1] trg_len = trg.shape[0] trg_vocab_size = self.decoder.output_dim #tensor to store decoder outputs outputs = torch.zeros(trg_len, batch_size, trg_vocab_size).to(self.device) #encoder_outputs is all hidden states of the input sequence, back and forwards #hidden is the final forward and backward hidden states, passed through a linear layer encoder_outputs, hidden = self.encoder(src, src_len) #first input to the decoder is the <sos> tokens input = trg[0,:] mask = self.create_mask(src) #mask = [batch size, src len] for t in range(1, trg_len): #insert input token embedding, previous hidden state, all encoder hidden states # and mask #receive output tensor (predictions) and new hidden state output, hidden, _ = self.decoder(input, hidden, encoder_outputs, mask) #place predictions in a tensor holding predictions for each token outputs[t] = output #decide if we are going to use teacher forcing or not teacher_force = random.random() < teacher_forcing_ratio #get the highest predicted token from our predictions top1 = output.argmax(1) #if teacher forcing, use actual next token as next input #if not, use predicted token input = trg[t] if teacher_force else top1 return outputs # ## Training the Seq2Seq Model # # Next up, initializing the model and placing it on the GPU. # + INPUT_DIM = len(SRC.vocab) OUTPUT_DIM = len(TRG.vocab) ENC_EMB_DIM = 256 DEC_EMB_DIM = 256 ENC_HID_DIM = 512 DEC_HID_DIM = 512 ENC_DROPOUT = 0.5 DEC_DROPOUT = 0.5 SRC_PAD_IDX = SRC.vocab.stoi[SRC.pad_token] attn = Attention(ENC_HID_DIM, DEC_HID_DIM) enc = Encoder(INPUT_DIM, ENC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, ENC_DROPOUT) dec = Decoder(OUTPUT_DIM, DEC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, DEC_DROPOUT, attn) model = Seq2Seq(enc, dec, SRC_PAD_IDX, device).to(device) # - # Then, we initialize the model parameters. # + def init_weights(m): for name, param in m.named_parameters(): if 'weight' in name: nn.init.normal_(param.data, mean=0, std=0.01) else: nn.init.constant_(param.data, 0) model.apply(init_weights) # - # We'll print out the number of trainable parameters in the model, noticing that it has the exact same amount of parameters as the model without these improvements. # + def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) print(f'The model has {count_parameters(model):,} trainable parameters') # - # Then we define our optimizer and criterion. # # The `ignore_index` for the criterion needs to be the index of the pad token for the target language, not the source language. optimizer = optim.Adam(model.parameters()) # + TRG_PAD_IDX = TRG.vocab.stoi[TRG.pad_token] criterion = nn.CrossEntropyLoss(ignore_index = TRG_PAD_IDX) # - # Next, we'll define our training and evaluation loops. # # As we are using `include_lengths = True` for our source field, `batch.src` is now a tuple with the first element being the numericalized tensor representing the sentence and the second element being the lengths of each sentence within the batch. # # Our model also returns the attention vectors over the batch of source source sentences for each decoding time-step. We won't use these during the training/evaluation, but we will later for inference. def train(model, iterator, optimizer, criterion, clip): model.train() epoch_loss = 0 for i, batch in enumerate(iterator): src, src_len = batch.src trg = batch.trg optimizer.zero_grad() output = model(src, src_len, trg) #trg = [trg len, batch size] #output = [trg len, batch size, output dim] output_dim = output.shape[-1] output = output[1:].view(-1, output_dim) trg = trg[1:].view(-1) #trg = [(trg len - 1) * batch size] #output = [(trg len - 1) * batch size, output dim] loss = criterion(output, trg) loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), clip) optimizer.step() epoch_loss += loss.item() return epoch_loss / len(iterator) def evaluate(model, iterator, criterion): model.eval() epoch_loss = 0 with torch.no_grad(): for i, batch in enumerate(iterator): src, src_len = batch.src trg = batch.trg output = model(src, src_len, trg, 0) #turn off teacher forcing #trg = [trg len, batch size] #output = [trg len, batch size, output dim] output_dim = output.shape[-1] output = output[1:].view(-1, output_dim) trg = trg[1:].view(-1) #trg = [(trg len - 1) * batch size] #output = [(trg len - 1) * batch size, output dim] loss = criterion(output, trg) epoch_loss += loss.item() return epoch_loss / len(iterator) # Then, we'll define a useful function for timing how long epochs take. def epoch_time(start_time, end_time): elapsed_time = end_time - start_time elapsed_mins = int(elapsed_time / 60) elapsed_secs = int(elapsed_time - (elapsed_mins * 60)) return elapsed_mins, elapsed_secs # The penultimate step is to train our model. Notice how it takes almost half the time as our model without the improvements added in this notebook. # + N_EPOCHS = 10 CLIP = 1 best_valid_loss = float('inf') for epoch in range(N_EPOCHS): start_time = time.time() train_loss = train(model, train_iterator, optimizer, criterion, CLIP) valid_loss = evaluate(model, valid_iterator, criterion) end_time = time.time() epoch_mins, epoch_secs = epoch_time(start_time, end_time) if valid_loss < best_valid_loss: best_valid_loss = valid_loss torch.save(model.state_dict(), 'tut4-model.pt') print(f'Epoch: {epoch+1:02} | Time: {epoch_mins}m {epoch_secs}s') print(f'\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}') print(f'\t Val. Loss: {valid_loss:.3f} | Val. PPL: {math.exp(valid_loss):7.3f}') # - # Finally, we load the parameters from our best validation loss and get our results on the test set. # # We get the improved test perplexity whilst almost being twice as fast! # + model.load_state_dict(torch.load('tut4-model.pt')) test_loss = evaluate(model, test_iterator, criterion) print(f'| Test Loss: {test_loss:.3f} | Test PPL: {math.exp(test_loss):7.3f} |') # - # ## Inference # # Now we can use our trained model to generate translations. # # **Note:** these translations will be poor compared to examples shown in paper as they use hidden dimension sizes of 1000 and train for 4 days! They have been cherry picked in order to show off what attention should look like on a sufficiently sized model. # # Our `translate_sentence` will do the following: # - ensure our model is in evaluation mode, which it should always be for inference # - tokenize the source sentence if it has not been tokenized (is a string) # - numericalize the source sentence # - convert it to a tensor and add a batch dimension # - get the length of the source sentence and convert to a tensor # - feed the source sentence into the encoder # - create the mask for the source sentence # - create a list to hold the output sentence, initialized with an `<sos>` token # - create a tensor to hold the attention values # - while we have not hit a maximum length # - get the input tensor, which should be either `<sos>` or the last predicted token # - feed the input, all encoder outputs, hidden state and mask into the decoder # - store attention values # - get the predicted next token # - add prediction to current output sentence prediction # - break if the prediction was an `<eos>` token # - convert the output sentence from indexes to tokens # - return the output sentence (with the `<sos>` token removed) and the attention values over the sequence def translate_sentence(sentence, src_field, trg_field, model, device, max_len = 50): model.eval() if isinstance(sentence, str): nlp = spacy.load('de') tokens = [token.text.lower() for token in nlp(sentence)] else: tokens = [token.lower() for token in sentence] tokens = [src_field.init_token] + tokens + [src_field.eos_token] src_indexes = [src_field.vocab.stoi[token] for token in tokens] src_tensor = torch.LongTensor(src_indexes).unsqueeze(1).to(device) src_len = torch.LongTensor([len(src_indexes)]) with torch.no_grad(): encoder_outputs, hidden = model.encoder(src_tensor, src_len) mask = model.create_mask(src_tensor) trg_indexes = [trg_field.vocab.stoi[trg_field.init_token]] attentions = torch.zeros(max_len, 1, len(src_indexes)).to(device) for i in range(max_len): trg_tensor = torch.LongTensor([trg_indexes[-1]]).to(device) with torch.no_grad(): output, hidden, attention = model.decoder(trg_tensor, hidden, encoder_outputs, mask) attentions[i] = attention pred_token = output.argmax(1).item() trg_indexes.append(pred_token) if pred_token == trg_field.vocab.stoi[trg_field.eos_token]: break trg_tokens = [trg_field.vocab.itos[i] for i in trg_indexes] return trg_tokens[1:], attentions[:len(trg_tokens)-1] # Next, we'll make a function that displays the model's attention over the source sentence for each target token generated. def display_attention(sentence, translation, attention): fig = plt.figure(figsize=(10,10)) ax = fig.add_subplot(111) attention = attention.squeeze(1).cpu().detach().numpy() cax = ax.matshow(attention, cmap='bone') ax.tick_params(labelsize=15) x_ticks = [''] + ['<sos>'] + [t.lower() for t in sentence] + ['<eos>'] y_ticks = [''] + translation ax.set_xticklabels(x_ticks, rotation=45) ax.set_yticklabels(y_ticks) ax.xaxis.set_major_locator(ticker.MultipleLocator(1)) ax.yaxis.set_major_locator(ticker.MultipleLocator(1)) plt.show() plt.close() # Now, we'll grab some translations from our dataset and see how well our model did. Note, we're going to cherry pick examples here so it gives us something interesting to look at, but feel free to change the `example_idx` value to look at different examples. # # First, we'll get a source and target from our dataset. # + example_idx = 12 src = vars(train_data.examples[example_idx])['src'] trg = vars(train_data.examples[example_idx])['trg'] print(f'src = {src}') print(f'trg = {trg}') # - # Then we'll use our `translate_sentence` function to get our predicted translation and attention. We show this graphically by having the source sentence on the x-axis and the predicted translation on the y-axis. The lighter the square at the intersection between two words, the more attention the model gave to that source word when translating that target word. # # Below is an example the model attempted to translate, it gets the translation correct except changes *are fighting* to just *fighting*. # + translation, attention = translate_sentence(src, SRC, TRG, model, device) print(f'predicted trg = {translation}') # - display_attention(src, translation, attention) # Translations from the training set could simply be memorized by the model. So it's only fair we look at translations from the validation and testing set too. # # Starting with the validation set, let's get an example. # + example_idx = 14 src = vars(valid_data.examples[example_idx])['src'] trg = vars(valid_data.examples[example_idx])['trg'] print(f'src = {src}') print(f'trg = {trg}') # - # Then let's generate our translation and view the attention. # # Here, we can see the translation is the same except for swapping *female* with *woman*. # + translation, attention = translate_sentence(src, SRC, TRG, model, device) print(f'predicted trg = {translation}') display_attention(src, translation, attention) # - # Finally, let's get an example from the test set. # + example_idx = 18 src = vars(test_data.examples[example_idx])['src'] trg = vars(test_data.examples[example_idx])['trg'] print(f'src = {src}') print(f'trg = {trg}') # - # Again, it produces a slightly different translation than target, a more literal version of the source sentence. It swaps *mountain climbing* for *climbing a mountain*. # + translation, attention = translate_sentence(src, SRC, TRG, model, device) print(f'predicted trg = {translation}') display_attention(src, translation, attention) # - # ## BLEU # # Previously we have only cared about the loss/perplexity of the model. However there metrics that are specifically designed for measuring the quality of a translation - the most popular is *BLEU*. Without going into too much detail, BLEU looks at the overlap in the predicted and actual target sequences in terms of their n-grams. It will give us a number between 0 and 1 for each sequence, where 1 means there is perfect overlap, i.e. a perfect translation, although is usually shown between 0 and 100. BLEU was designed for multiple candidate translations per source sequence, however in this dataset we only have one candidate per source. # # We define a `calculate_bleu` function which calculates the BLEU score over a provided TorchText dataset. This function creates a corpus of the actual and predicted translation for each source sentence and then calculates the BLEU score. # + from torchtext.data.metrics import bleu_score def calculate_bleu(data, src_field, trg_field, model, device, max_len = 50): trgs = [] pred_trgs = [] for datum in data: src = vars(datum)['src'] trg = vars(datum)['trg'] pred_trg, _ = translate_sentence(src, src_field, trg_field, model, device, max_len) #cut off <eos> token pred_trg = pred_trg[:-1] pred_trgs.append(pred_trg) trgs.append([trg]) return bleu_score(pred_trgs, trgs) # - # We get a BLEU of around 28. If we compare it to the paper that the attention model is attempting to replicate, they achieve a BLEU score of 26.75. This is similar to our score, however they are using a completely different dataset and their model size is much larger - 1000 hidden dimensions which takes 4 days to train! - so we cannot really compare against that either. # # This number isn't really interpretable, we can't really say much about it. The most useful part of a BLEU score is that it can be used to compare different models on the same dataset, where the one with the **higher** BLEU score is "better". # + bleu_score = calculate_bleu(test_data, SRC, TRG, model, device) print(f'BLEU score = {bleu_score*100:.2f}') # - # In the next tutorials we will be moving away from using recurrent neural networks and start looking at other ways to construct sequence-to-sequence models. Specifically, in the next tutorial we will be using convolutional neural networks.
4 - Packed Padded Sequences, Masking, Inference and BLEU.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="tlWGZ6JobUDU" from pathlib import Path import os import glob import numpy as np import pandas as pd from PIL import Image import matplotlib.pyplot as plt import seaborn as sn import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchvision from torchvision.utils import make_grid from torch.utils.tensorboard import SummaryWriter from torch.utils.data import Dataset, random_split, DataLoader import torchvision.transforms as transforms import torchvision.transforms.functional as TF # %matplotlib inline # %reload_ext tensorboard import random from sklearn.metrics import confusion_matrix, classification_report from sklearn.metrics import roc_curve, roc_auc_score, precision_recall_curve, f1_score, auc from datetime import datetime from time import time import PIL def show_batch(data_loader, n=8): for images, labels in data_loader: fig, ax = plt.subplots(figsize=(12, 12)) ax.set_xticks([]); ax.set_yticks([]) ax.imshow(make_grid(images[:8], nrow=4).permute(1, 2, 0)) break def get_num_correct(preds, labels): return preds.argmax(dim=1).eq(labels).sum().item() # + id="PL6N9rHebs-G" DATASET = 'Facespoof_test' EPOCHS = 3 LEARNING_RATE = 0.001 NUM_ROUTING = 3 LR_DECAY = 0.96 LR_UPDATE_INTERVAL_IN_ITERATIONS = None # initialized later to every epoch, if value is None MODEL_SAVE_INTERVAL_IN_EPOCHS = 1 NUM_WORKERS = 1 LOG_INTERVAL = 100 IMG_RECONSTRUCTION_INTERVAL = 500 SEED = 1 GPU_DEVICE = 0 MULTI_GPU = False DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu' DATA_PATH = 'logs/' G_DRIVE_DIR_BASE = '/content/drive/My Drive/Colab/Facespoof/Runs/' MODEL_DIR_BASE = DATA_PATH + 'models/' TB_RUN_DIR_BASE = DATA_PATH + 'runs/' # + id="8Y0t9baYeho1" torch.manual_seed(SEED) torch.cuda.manual_seed(SEED) if DATASET == 'Facespoof_test': # !unzip -qq '/content/drive/My Drive/Colab/Facespoof/Data/spoof_dummy.zip' BATCH_SIZE = 8 TRAIN_VAL_RATIO = 0.8 train_val_data_path = '/content/spoof_dummy/train' test_data_path = '/content/spoof_dummy/validation' classes = ['fake', 'real'] common_transforms = transforms.Compose([ transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize((0.26), (0.65,)) ]) train_val_transforms = transforms.Compose([ transforms.RandomAffine(degrees=(-5,5), translate=None, scale=(0.9, 1.2), shear=0, resample=False, fillcolor=0), common_transforms ]) test_transforms = transforms.Compose([ common_transforms ]) elif DATASET.upper() == 'Facespoof_low_res': # !unzip -qq '/content/drive/My Drive/Colab/Facespoof/Data/spoof.zip' BATCH_SIZE = 8 TRAIN_VAL_RATIO = 0.8 train_val_data_path = '/content/spoof/train' test_data_path = '/content/spoof/validation' classes = ['fake', 'real'] common_transforms = transforms.Compose([ transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize((0.26), (0.65,)) ]) train_val_transforms = transforms.Compose([ transforms.RandomAffine(degrees=(-5,5), translate=None, scale=(0.9, 1.2), shear=0, resample=False, fillcolor=0), common_transforms ]) test_transforms = transforms.Compose([ common_transforms ]) elif DATASET.upper() == 'Facespoof_high_res': # !unzip -qq '/content/drive/My Drive/Colab/Facespoof/Data/spoof2.zip' BATCH_SIZE = 8 TRAIN_VAL_RATIO = 0.8 train_val_data_path = '/content/spoof2/train' test_data_path = '/content/spoof2/validation' classes = ['fake', 'real'] common_transforms = transforms.Compose([ transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize((0.26), (0.65,)) ]) train_val_transforms = transforms.Compose([ transforms.RandomAffine(degrees=(-5,5), translate=None, scale=(0.9, 1.2), shear=0, resample=False, fillcolor=0), common_transforms ]) test_transforms = transforms.Compose([ common_transforms ]) else: raise ValueError('DATASET not specified') TB_COMMENT = f'Network {DATASET} batch_size={BATCH_SIZE} lr={LEARNING_RATE} num_routing={NUM_ROUTING} lr_decay={LR_DECAY} E={EPOCHS}' TB_RUN_DIR = TB_RUN_DIR_BASE + TB_COMMENT MODEL_DIR = MODEL_DIR_BASE + TB_COMMENT G_DRIVE_DIR = G_DRIVE_DIR_BASE + TB_COMMENT print(TB_COMMENT) if os.path.exists(DATA_PATH): # !rm -r '/content/logs' if not os.path.exists(DATA_PATH): os.mkdir(DATA_PATH) if not os.path.exists(MODEL_DIR_BASE): os.mkdir(MODEL_DIR_BASE) if not os.path.exists(MODEL_DIR): os.mkdir(MODEL_DIR) if not os.path.exists(TB_RUN_DIR_BASE): os.mkdir(TB_RUN_DIR_BASE) if not os.path.exists(TB_RUN_DIR): os.mkdir(TB_RUN_DIR) if GPU_DEVICE is not None: torch.cuda.set_device(GPU_DEVICE) if MULTI_GPU: batch_size *= torch.cuda.device_count() loaders = {} train_val_set = torchvision.datasets.ImageFolder(root=train_val_data_path, transform = train_val_transforms) train_set_size = int(len(train_val_set)*TRAIN_VAL_RATIO) train_set, validation_set = random_split(train_val_set, [train_set_size, len(train_val_set) - train_set_size]) loaders['train'] = torch.utils.data.DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS) loaders['validation'] = torch.utils.data.DataLoader(validation_set, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS) test_set = torchvision.datasets.ImageFolder(root=test_data_path, transform = test_transforms) loaders['test'] = torch.utils.data.DataLoader(test_set, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS) print( 8*'#', f'Using {DATASET} dataset', 8*'#') print("Train loader. \tSize: ", len(train_set), '\tData Shape: ', train_set[0][0].shape, '\tBatch len: ', len(loaders['train'])) print("Val loader. \tSize: ", len(validation_set), '\tData Shape: ', validation_set[0][0].shape, '\tBatch len: ', len(loaders['validation'])) print("Test loader. \tSize: ", len(test_set), '\tData Shape: ', test_set[0][0].shape, '\tBatch len: ', len(loaders['test'])) if LR_UPDATE_INTERVAL_IN_ITERATIONS == None: LR_UPDATE_INTERVAL_IN_ITERATIONS = len(loaders['train']) MODEL_SAVE_INTERVAL_IN_ITERATIONS = MODEL_SAVE_INTERVAL_IN_EPOCHS * len(loaders['train']) # Show data in data loaders print('Trainining samples:') show_batch(loaders['train']) print('Validation samples:') show_batch(loaders['validation']) print('Testing samples:') show_batch(loaders['test']) # + id="SQPr0NLgmJ5p" class Trainer: """ Wrapper object for handling training and evaluation """ def __init__(self, loaders, batch_size, learning_rate, lr_decay, device, multi_gpu): self.tb = SummaryWriter(comment=TB_COMMENT, log_dir=TB_RUN_DIR) self.device = device self.multi_gpu = multi_gpu self.all_preds = [] self.all_labels = [] self.incorrect_samples = [] self.incorrect_samples_targets = [] self.loaders = loaders img_shape = self.loaders['train'].dataset[0][0].numpy().shape self.net = torchvision.models.resnet50(pretrained=False) # Network model self.net = self.net.cuda() if self.multi_gpu: self.net = nn.DataParallel(self.net) self.optimizer = optim.Adam(self.net.parameters(), lr=learning_rate) self.scheduler = optim.lr_scheduler.ExponentialLR(self.optimizer, gamma=lr_decay) print(10*'#', 'PyTorch Model built'.upper(), 10*'#') print('No. of params:', sum([np.prod(p.size()) for p in self.net.parameters()])) print(TB_COMMENT) def __repr__(self): return repr(self.net) def run(self, epochs, classes): print(8*'#', 'Run started'.upper(), 8*'#') eye = torch.eye(len(classes)).to(self.device) for epoch in range(1, epochs+1): for phase in ['train', 'validation']: if phase == 'train': self.net.train() else: self.net.eval() t0 = time() running_loss = 0.0 running_margin_loss = 0.0 running_reconstruction_loss = 0.0 correct = 0; total = 0 batch_len = len(self.loaders['train']) for i, (images, labels) in enumerate(self.loaders[phase]): n_iter = ((epoch-1) * batch_len) + i t1 = time() images, labels = images.to(self.device), labels.to(self.device) self.optimizer.zero_grad() preds = self.net(images) # reconstructions[ BATCH_SIZE, CHANNEL_NO, IMG_DIM, IMG_DIM] loss = F.cross_entropy(preds, labels) # Loss function if phase == 'train': loss.backward() self.optimizer.step() running_loss += loss.item() total += labels.size(0) correct += get_num_correct(preds, labels) accuracy = float(correct) / float(total) if phase == 'train' and (n_iter % LOG_INTERVAL) == 0: print('Epoch: {:02d} Batch {:04d}/{:04d} Loss: {:.5f}, Accuracy: {:.5f} Time: {:.3f}s'.format(epoch, i+1, batch_len, running_loss/(i+1), accuracy, time()-t1)) if phase == 'train' and (n_iter % LR_UPDATE_INTERVAL_IN_ITERATIONS) == 0 and (n_iter != 0): self.scheduler.step() if phase == 'train' and (n_iter % MODEL_SAVE_INTERVAL_IN_ITERATIONS) == 0 and (n_iter != 0): torch.save(self.net.state_dict(), os.path.join(MODEL_DIR, str(n_iter)+'.pth.tar')) print('{} \tEpoch: {:02d} Loss: {:.5f} Accuracy: {:.5f} Time: {:.3f}s'.format(phase.upper(), epoch, running_loss/(i+1), accuracy, time()-t0)) n_iter = epoch * batch_len self.tb.add_scalar(f'{phase}/loss', running_loss/(i+1), n_iter) self.tb.add_scalar(f'{phase}/accuracy', accuracy, n_iter) self.tb.close() now = str(datetime.now()).replace(" ", "-") error_rate = round((1-accuracy)*100, 2) torch.save(self.net.state_dict(), os.path.join(MODEL_DIR, 'model.pth.tar')) def test(self, show_per_class_accuracy=False): self.net.eval() eye = torch.eye(len(classes)).to(self.device) t0 = time() running_loss = 0.0 running_margin_loss = 0.0 running_reconstruction_loss = 0.0 correct = 0; total = 0 batch_len = len(self.loaders['test']) for i, (images, labels) in enumerate(self.loaders['test']): t1 = time() images, labels = images.to(self.device), labels.to(self.device) self.optimizer.zero_grad() preds = self.net(images) # reconstructions[ BATCH_SIZE, CHANNEL_NO, IMG_DIM, IMG_DIM] loss = F.cross_entropy(preds, labels) # Loss function incorrect_idxes = torch.nonzero((preds.argmax(dim=1).eq(labels)==False)) for incorrect_idx in incorrect_idxes: idx = incorrect_idx.item() self.incorrect_samples.append(images[idx]) self.incorrect_samples_targets.append(labels[idx].item()) running_loss += loss.item() total += labels.size(0) correct += get_num_correct(preds, labels) accuracy = float(correct) / float(total) self.all_labels = np.append(self.all_labels, labels.cpu().numpy()) self.all_preds = np.append(self.all_preds, preds.argmax(dim=1).cpu().numpy()) print('{} \tLoss: {:.5f} M_Loss: {:.5f} R_loss: {:.5f} Accuracy: {:.5f} Time: {:.3f}s'.format( 'TEST', running_loss/(i+1), running_margin_loss/(i+1), running_reconstruction_loss/(i+1), accuracy, time()-t0)) now = str(datetime.now()).replace(" ", "-") error_rate = round((1-accuracy)*100, 2) if show_per_class_accuracy: class_correct = list(0. for _ in classes) class_total = list(0. for _ in classes) for images, labels in self.loaders['test']: images, labels = images.to(self.device), labels.to(self.device) preds = self.net(images) preds = preds.argmax(dim=1) for i in range(labels.size(0)): label = labels[i] if labels[i] == preds[i]: class_correct[label] += 1 class_total[label] += 1 print('\nPer class accuracy on TEST set:') for i in range(len(classes)): print('Accuracy of {} ({}) : {:.2f}% ({:5d}/{:5d})'.format(classes[i], i, 100 * class_correct[i] / class_total[i], int(class_correct[i]), int(class_total[i]))) def show_incorrect_prediction(self): print('\nIncorrect samples\' corrrect labels: ', self.incorrect_samples_targets) print('Incorrectly predicted samples:') fig, ax = plt.subplots(figsize=(25, 25)) ax.set_xticks([]); ax.set_yticks([]) img_grid = torchvision.utils.make_grid(self.incorrect_samples, nrow=10, normalize=True) _ = ax.imshow(make_grid(img_grid.cpu().detach().permute(1, 2, 0))) def show_classification_report(self, target_names=classes): print(classification_report(self.all_labels, self.all_preds, target_names=target_names)) def show_confusion_matrix(self, xticklabels=classes, yticklabels=classes): confusion_matrix_test = confusion_matrix(self.all_labels, self.all_preds, labels = None, sample_weight = None, normalize = None) heatmap_test = sn.heatmap(confusion_matrix_test, annot=True) _ = heatmap_test.set(xlabel='Predicted label', ylabel='Actual label', xticklabels=xticklabels, yticklabels=yticklabels) def saveData(self, G_DRIVE_DIR): try: if not os.path.exists(G_DRIVE_DIR): os.mkdir(G_DRIVE_DIR) except: print('ERROR: G_DRIVE_DIR dir creation error') try: dest = shutil.move( MODEL_DIR, os.path.join(G_DRIVE_DIR, 'models')) print("Transfered to: ", dest) except: print('ERROR: G_DRIVE_DIR model transfer error') try: dest = shutil.move( TB_RUN_DIR, os.path.join(G_DRIVE_DIR, 'runs')) print("Transfered to: ", dest) except: print('ERROR: G_DRIVE_DIR runs transfer error') def load(self, load_path): if load_path != None: try: self.net.load_state_dict(torch.load(load_path)) _ = self.net.eval() print('Model state loaded') except Exception as e: print('ERROR: Model state load error: ', e) # + id="CzWB09-FnBZF" if os.path.exists(TB_RUN_DIR): # %tensorboard --logdir='/content/logs/runs' else: # %tensorboard --logdir=G_DRIVE_DIR # + id="rEhNXZrinIOP" # Train Model net_trainer = Trainer(loaders, BATCH_SIZE, LEARNING_RATE, LR_DECAY, device=DEVICE, multi_gpu=MULTI_GPU) # net_trainer.load(load_path = None) net_trainer.run(EPOCHS, classes=classes) # net_trainer.saveData(G_DRIVE_DIR) net_trainer.test(show_per_class_accuracy=True) # + id="Xrn82NvIku4k" net_trainer.show_classification_report() # + id="AjZkRocpjWFC" net_trainer.show_confusion_matrix() # + id="oh2G0Z-Fom6c" auc = roc_auc_score(net_trainer.all_labels, net_trainer.all_preds) print('ROC AUC Score: {:.3f}'.format(auc)) # + id="WNiGmJmSjnwS" net_trainer.show_incorrect_prediction()
Training scripts/Face_Liveness_training_script.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #hide # %load_ext autoreload # %autoreload 2 # + # default_exp pypi # - # # pypi # # > Automate pypi package building and publishing #hide from nbdev.showdoc import * #export import os import shutil import subprocess from easyrelease.utils import check_project_root, get_pypi_credentials, run_tests #export @check_project_root def clean_dist(): "Clean dist/ folder" shutil.rmtree("dist", ignore_errors=True) #export @check_project_root def build_pypi_package(): "Build pypi package" subprocess.run(["python", "setup.py", "sdist", "bdist_wheel"]) #export @check_project_root def publish_pypi_package(): "Publish package to pypi via twine" username, password = get_pypi_credentials() subprocess.run( ["twine", "upload", "--repository", "pypi", "dist/*", "--username", username, "--password", password] ) #export @run_tests def build_and_publish_pypi(): "Build and publish pypi package" clean_dist() build_pypi_package() publish_pypi_package() #hide from nbdev.export import notebook2script notebook2script()
nbs/01_pypi.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #练习1 name=input('请输入你的名字。') print('你好,',name) date=float(input('请输入你的出生日期。')) if 1.20<=date<=2.18: print('水瓶座') if 2.19<=date<=3.20: print('双鱼座') if 3.21<=date<=4.19: print('白羊座') if 4.20<=date<=5.20: print('金牛座') if 5.21<=date<=6.21: print('双子座') if 6.22<=date<=7.22: print('巨蟹座') if 7.23<=date<=8.22: print('狮子座') if 8.23<=date<=9.22: print('处女座') if 9.23<=date<=10.23: print('天秤座') if 10.24<=date<=11.22: print('天蝎座') if 11.23<=date<=12.21: print('射手座') if 12.22<=date<=12.31 or 1.01<=date<=1.18: print('摩羯座') #练习2 m=int(input('请输入一个整数。')) n=int(input('请输入一个不为零的整数。')) c=int(input('求和,输入0;求积,输入1;求余数,输入2')) if c==0: if m>n: s=(m+n)*(m-n)/2 print(s) if m<n: s=(m+n)*(n-m)/2 print(s) if m==n: s=m+m print(s) elif c==1: i=0 if n>m: while i<=n-m: i=i+1 total=m*(m+1) m=m+1 print(total) elif n<m: while i<=m-n: i=i+1 total=m*(m-1) m=m-1 print(total) else: total=m*2 print(total) elif c==2: s=m%n print(s) else: s=m//n print(s) #练习3 m=int(input('输入当前PM2.5的值')) if m>500: print('打开空气净化器,带上防霾口罩。') else: print('多多留意空气质量。') #练习4 word=str(input('请输入一个单词。')) if word.endswith('x')or word.endswith('sh') or word.endswith('ch') or word.endswith('s'): print(word,'es',sep='') elif word.endswith('y'): print('变y为i+es') else: print(word,'s',sep='') #尝试性练习 print('能够在屏幕上显示空行') print() print('以上是空行') # + #挑战性练习 m=int(input('请输入你想输入的整数个数')) p=int(input('pls input')) q=int(input('pls input')) if p<q: max1=q max2=p else: max1=p max2=q i=2 while i<m: n=int(input('pls input')) if n>max1: max2=max1 max1=n if max2<=n<=max1: max2=n else: max2=max2 i+=1 print(max2) # -
chapter1/homework/localization/3-22/201611680311.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # monitoring aws processes # ###### imports import os, shutil, tarfile import datetime as dt # + [markdown] heading_collapsed=true # #### functions # + hidden=true def get_dir_size(path='.'): total = 0 with os.scandir(path) as it: for entry in it: if entry.is_file(): total += entry.stat().st_size elif entry.is_dir(): total += get_dir_size(entry.path) return total # - # #### monitoring file structures # + [markdown] heading_collapsed=true # ###### test re-expand recompressed (filtered to jawiki) raw download # + hidden=true tar_path = '../data/raw/jawiki/pageviews/' + '2016.tar.bz2' extract_to_dir = '../data/temp/test_reexpand/' # os.makedirs(extract_to_dir) # + hidden=true (os.listdir(extract_to_dir), get_dir_size(extract_to_dir)) # + hidden=true start = dt.datetime.now() with tarfile.open(tar_path, "r:bz2") as tar: tar.extractall(extract_to_dir) end = dt.datetime.now() reexpand_time_jawiki_pageviews = end - start # + [markdown] heading_collapsed=true # ###### recompressed (filtered to jawiki) raw downloads # + hidden=true peek_dir = '../data/raw/jawiki/pageviews/' (os.listdir(peek_dir), get_dir_size(peek_dir)) # + [markdown] heading_collapsed=true # ###### cleaned daily-count csvs for each page_id # + hidden=true (os.listdir('../data/processed/jawiki/diz_daily/'), get_dir_size('../data/processed/jawiki/diz_daily/') ) # - # ###### raw data downloads (filtered to jawiki only) # + dpath = '../data/temp/jawiki_pageviews/' n_month_dirs = len(list(os.walk(dpath))) all_days = sum((list(os.walk(dpath))[i][2] for i in range(2,n_month_dirs)), []) print(f"{round(get_dir_size('../data/temp/jawiki_pageviews/')*10**-6)} MB") print(f"{len(all_days)} days of ~{1070}") print() print(f"{round(get_dir_size('../data/temp/raw_bz2/')*10**-6)} MB") print(os.listdir('../data/temp/raw_bz2')) # - # + [markdown] heading_collapsed=true # ###### delete file or directory # + hidden=true active="" # d = '../data/temp/' # for p in [d+f for f in os.listdir(d)]: # try: # os.remove(p) # except IsADirectoryError: # shutil.rmtree(p) # + [markdown] heading_collapsed=true # #### monitoring logfiles # + [markdown] hidden=true # note:. # check these pages to setup remote access to mariadb: # # [link1](https://stackoverflow.com/questions/9766014/connect-to-mysql-on-amazon-ec2-from-a-remote-server) # # [link2](https://docs.streamlit.io/knowledge-base/tutorials/databases/mysql) # # + hidden=true logpath = '../data/logs/pageviews-log_2022-04-06_0.txt' # + hidden=true def leng(): with open(logpath) as f: num_lines = 0 while (f.readline()): num_lines+=1 return num_lines # + hidden=true def tail(n=25): length = leng() start = length - n lst = [] with open(logpath) as f: ct = 0 while ct < start: f.readline() ct += 1 while (line:=f.readline()): lst.append(line) return '\n'.join(lst) # + [markdown] hidden=true # made it to 372 last time # + hidden=true tailtext = tail() taillist = tailtext.split(sep='\n') # + hidden=true list(filter(lambda x: len(x) < 500, taillist)) # - # #### ----------
notebooks/1.25-sfb-monitoring-logfiles.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] _cell_guid="e9661373-4482-4fd6-87db-9debcf5835cc" _uuid="58814c6412866fd76edd976c63878aa7d7318c55" # # Introduction # * [DATA](#1) # * [Data Giriş](#2) # * [Data Nedir?](#3) # * [Level of Measurements (Ölçülme Ölçeği)](#4) # * [Level of Measurements Quiz Cevaplar](#5) # * [Population vs Sample](#6) # * [Central Tendency (Merkezi Eğilim)](#7) # * [Central Tendency Quiz Cevaplar](#8) # * [Dispersion (Dağılım)](#9) # * [Dispersion Quiz Cevaplar](#10) # * [Quartiles](#11) # * [Quartiles Quiz Cevaplar](#12) # * [Bivariate Data and Covariance (İki Değişkenli Veri ve Kovaryans)](#13) # * [Pearson Correlation Coefficient (Pearson Korelasyon Katsayısı)](#14) # * [Spearman Rank Coefficient (Spearman Rank Katsayısı)](#15) # * [Effect size](#16) # * [Data Neler Öğrendik?](#17) # * [Probability (Olasılık)](#18) # * [Probability Giriş](#19) # * [Probability Nedir?](#20) # * [Permutation (Permutasyon)](#21) # * [Permutation Quiz Cevaplar](#22) # * [Combination (Kombinasyon)](#23) # * [Intersection, Unions and Complements (Kesişim, Birleşim ve Tamamlayıcı)](#24) # * [Independent and Dependent Events (Bağımsız ve Bağımlı Olaylar)](#25) # * [Conditional Probability (Şartlı olasılık)](#26) # * [Conditional Probability Quiz Cevaplar](#27) # * [Bayes Theorem (Bayes teoremi)](#28) # * [Probability Neler Öğrendik?](#29) # # * [Probability Distributions (Olasılık Dağılımlar)](#30) # * [Probability Distributions Giriş](#31) # * [Discrete Probability Distributions](#32) # * [Uniform Distributions](#33) # * [Binomial Distributions](#34) # * [Binomial Distributions Quiz Cevaplar](#35) # * [Poisson Distributions](#36) # * [Uniform - Binomial - Poisson Quiz Cevaplar](#37) # * [Continuous Probability Distributions](#38) # * [PDF - CDF](#39) # * [Gaussian (Normal) Distributions and Z-Score](#40) # * [Probability Distributions Neler Öğrendik?](#41) # * [Statistics (İstatistik)](#42) # * [Statistics Giriş](#43) # * [Sampling (Örnekleme)](#44) # * [Central Limit Theorem (Merkezi Limit Teoremi)](#45) # * [Standard Error](#46) # * [Hypothesis Testing](#47) # * [Hypothesis Testing Real-World Örneği 1](#48) # * [Hypothesis Testing Real-World Örneği 2](#49) # * [Type 1 ve Type 2 Errors](#50) # * [T-Distribution](#51) # * [A/B Test](#52) # * [Statistics Neler Öğrendik?](#53) # * [ANOVA (Analysis of Variance)](#60) # * [ANOVA Giriş ](#61) # * [ANOVA Nedir?](#62) # * [F Distribution](#63) # * [ANOVA Neler Öğrendik?](#65) # # - # <a id="1"></a> # # DATA # * Bu bölümde data yani veri ile ilgili aynı dili konuşabilmemiz ve internette araştırma yapabilmemiz için gerekli olan keyword'leri,dikkat etmemiz ve bilmemiz gereken kavramları öğreneceğiz. # <a id="2"></a> # ## Data Giriş # * Data Nedir? # * Level of Measurements (Ölçülme Ölçeği) # * Population vs Sample # * Central Tendency (Merkezi Eğilim) # * Dispersion (Dağılım) # * Quartiles # * Bivariate Data and Covariance (İki Değişkenli Veri ve Kovaryans) # * Pearson Correlation Coefficient (Pearson Korelasyon Katsayısı) # * Spearman Rank Coefficient (Spearman Rank Katsayısı) # * Effect size # <a id="3"></a> # ## Data Nedir? # * Bir problem yada konu hakkında toplamış bilgileri data olarak adlandırıyoruz. # * Örneğin elimizde bir ışık sensörü var. Bu sensör 1 gün boyunca bir oda da kayıt alıyor. Bu sensörün bir gün boyunca odanın topladığı ışık şiddetini yani bilgiyi veri olarak adlandırıyoruz. # * Data 2 tipte bulunabilir: # * Continuous (sürekli): Stock price (Hisse senedi fiyatı) mesela 1.119 ve 1.118 tl olabilir. # * Categorical (Kategorik): Uçan Hayvanlar. Kartal, akbaba gibi # * Bir dataya baktığımız zaman ne tür bir data olduğunu anlamak intuitive (sezgisel) yorumlar yapabilmemiz için gerekli. # * Datayı anlamak için görselleştirme yapmamız gerekli. # * Datalar genelde bir sürü sayıdan oluşurlar. Mesela: jointplot örneği. # ![Time](gorsellestime_onemi.jpg) # * Bu sayıları görselleştirmeden anlamak çok zor. (Tabloda verinin sadece ilk 15 satırı var.) # * Ama dikkat eğer yanlış görselleştirme yaparsak bu tüm veriyi yanlış yorumlamamıza neden olur. Biz bunu misleading olarak adlandırıyoruz. # <a id="4"></a> # ## Level of Measurements (Ölçülme Ölçeği) # * Levels of Measurements 4 tanedir: nominal, ordinal, interval ve ratio # * Nominal # * Kategori vardır. # * Sıralanamazlar. # * Mesela Asya ülkeleri: Çin Hindistan gibi # * Yada kadın/erkek gibi # * Ordinal: # * Sıralanabilir ama aralarındaki ilişki sayısal olarak anlamsızdır. # * Mesela bir anket düşünün şıklar "asla, bazen, genelde, her zaman" olsun. Genelde bazenden daha genel bir ifade ama sayısal olarak ne kadar dediğimiz zaman bu sorunun cevabı yok. Bu tarz verilere ordinal veri denir. # * Nominal ve ordinal arasında sıralanabilirlik açısından fark var. # * Interval: # * Belirli bir ölçeği vardır. # * Natural zero starting point yoktur. Mesela sıcaklık için sıfırdan başlıyor yada 10 dereceden başlıyor diyemeyiz. # * Mesela oda sıcaklığı 20 derece 10 dereceden sıcaktır gibi karşılaştırmalarda bulunabiliriz. # * Ratio: # * True zero point vardır. Mesela yaş kilo gibi # * Belirli bir ölçeği vardır. # * Bazı bilim insanları interval ve ratio tiplerini tek bir tip olarak kabul eder. # <a id="5"></a> # ## Level of Measurements Quiz Cevaplar # ![Time](meas_level.jpg) # <a id="6"></a> # ## Population vs Sample # * Population # * Bir gruptaki tüm üyeler # * Mesela bir ülkedeki tüm şehirler # * Sample # * Bir grubun Subset'i yani alt kümesi # * Mesela bir ülkedeki X bölgesindeki bazı şehirler gibi # # ![Time](pop_vs_sample.jpg) # <a id="7"></a> # ## Central Tendency (Merkezi Eğilim) # * Mean: ortalama # * Median: bir listede ortada ki sayı # * Mode: bir listede en çok bulunan sayı # * Mean vs Median: # * Mean outlier'lardan etkilenebilir bu nedenle mediam önemli. Mesela bir şirkette zam yapılmak istenirse neye bakmalı çalışanların maaş ortalaması yada maaş medyanı? # + #******************************** import matplotlib.pyplot as plt plt.style.use("ggplot") import warnings warnings.filterwarnings("ignore") #******************************** import numpy as np from scipy import stats yas = [1,2,3,5,6,7,7,10,12,13] # Mean mean_yas = np.mean(yas) print(mean_yas) # Median median_yas = np.median(yas) print(median_yas) # Mode mode_yas = stats.mode(yas) print(mode_yas) # - # ### Mean VS Median # * Bazen mean kullanmak yerine median kullanmak daha mantıklı olabilir. # * Mesela bir şirket düşünülem bu şirkette 10 çalışan var. Bu şirketin patronu eğer ortalama maaş 5 değerinin altındaysa her çalışana zam yapacak. salary = [1,4,3,2,5,4,2,3,1,500] print("Mean of salary: ",np.mean(salary)) # * Ortalama maaş 52.5 çıktı. Patron bu ortalama yüzünden çalışanlarına çok maaş verdiğini düşünecek ve bu nedenle maaşlarına zam yapmayacak # * Ama bildiğiniz gibi bu adil bir durum değil çünkü burada 500 değerinde outlier diye adlandırabileceğimiz bir maaş değeri var. # * Median bu outlier değerleri engeller. print("Median of salary: ",np.median(salary)) # * Median değeri 3. Ve bu değer maaşların gerçek değerini yansıtıyor. 5 den de küçük olduğu için patron tarafından bu çalışanlar maaşlarına zam alacaklar. # <a id="8"></a> # ## Central Tendency Quiz Cevaplar ## Central Tendency Quiz Cevaplar maas = [100,13,44,23,56,13,68] # Mean mean_maas = np.mean(maas) print(mean_maas) # Median median_maas = np.median(maas) print(median_maas) # Mode mode_maas = stats.mode(maas) print(mode_maas) # <a id="9"></a> # ## Dispersion (Dağılım) # * Dispersion dağılım demek. (spread out) # * Range: Bir listede en büyük ve en küçük sayı arasında ki fark # * Variance: # * varyans-değişiklik. Listedeki tüm noktalar ortalamadan ne kadar uzaklıkta # * ![Time](variance.jpg) # * Bazı formüllerde N yerine (N-1) olabilir. Bu sample variance(N-1) ve population variance(N) diye iki farklı kavram olmasından kaynaklanıyor ama şuan için çok önemli değil. # * Standard deviation (std): # * Variance'ın kare kökü # * İlerde göreceğimiz dağılımlarda bir dağılımın içindeki değerlerin ortalamaya olan uzaklıklarının ne kadar olduğunu belirtmek için kullanacağımız bir kavram. Aslında çok kullanılan ve önemli bir kavram. # * ![Time](var_vs_std.jpg) # + # range yas = [1,2,3,5,6,7,7,10,12,13] print("Range: ", (np.max(yas)-np.min(yas))) # variance print("Variance: ", (np.var(yas))) var = sum((yas - np.mean(yas))**2)/len(yas) print("Variance with formula: ",var) # std print("Std: ", (np.std(yas))) std = np.sqrt(sum((yas - np.mean(yas))**2)/len(yas)) print("Std with formula: ",std) # - import matplotlib.pyplot as plt y = np.random.uniform(5,8,100) x1 = np.random.uniform(10,20,100) x2 = np.random.uniform(0,30,100) plt.scatter(x1,y,color="black") plt.scatter(x2,y,color="orange") plt.xlim([-1,31]) plt.ylim([2,11]) plt.xlabel("x") plt.ylabel("y") print("X1 mean: {} and meadian: {}".format(np.mean(x1),np.median(x1))) print("X2 mean: {} and meadian: {}".format(np.mean(x2),np.median(x2))) # <a id="10"></a> # ## Dispersion Quiz Cevaplar # + # range maas = [100,13,44,23,56,13,68] print("Range: ", (np.max(maas)-np.min(maas))) # variance print("Variance: ", (np.var(maas))) var_maas = sum((maas - np.mean(maas))**2)/len(maas) print("Variance with formula: ",var_maas) # std print("Std: ", (np.std(maas))) std_maas = np.sqrt(sum((maas - np.mean(maas))**2)/len(maas)) print("Std with formula: ",std_maas) # - # <a id="11"></a> # ## Quartiles # * What is quartile? # * 1,4,5,6,8,9,11,12,13,14,15,16,17 # * The median is the number that is in middle of the sequence. In this case it would be 11. # * The lower quartile(first quartile (Q1)(25%)) is the median in between the smallest number and the median i.e. in between 1 and 11, which is 6. # * The upper quartile(third quartile (Q3)(75%)), you find the median between the median and the largest number i.e. between 11 and 17, which will be 14 according to the question above. # * IQR(inter quartile range) = Q3-Q1 # * Outliers: Q1 - 1.5*IQR and Q3 + 1.5*IQR sınırları dışarısında kalan değerler # libraries import pandas as pd import seaborn as sns plt.style.use("ggplot") import warnings warnings.filterwarnings("ignore") # read data as pandas data frame data = pd.read_csv("data.csv") data = data.drop(['Unnamed: 32','id'],axis = 1) data.head() data_bening = data[data["diagnosis"] == "B"] data_malignant = data[data["diagnosis"] == "M"] desc = data_bening.radius_mean.describe() Q1 = desc[4] Q3 = desc[6] IQR = Q3-Q1 lower_bound = Q1 - 1.5*IQR upper_bound = Q3 + 1.5*IQR print("Anything outside this range is an outlier: (", lower_bound ,",", upper_bound,")") data_bening[data_bening.radius_mean < lower_bound].radius_mean print("Outliers: ",data_bening[(data_bening.radius_mean < lower_bound) | (data_bening.radius_mean > upper_bound)].radius_mean.values) melted_data = pd.melt(data,id_vars = "diagnosis",value_vars = ['radius_mean']) sns.boxplot(x = "variable", y = "value", hue="diagnosis",data= melted_data) plt.show() # <a id="12"></a> # ## Quartiles Quiz Cevaplar # * [2,2,6,6,8,8,10,10,11,11,15,17] # * The lower quartile, the median, and the upper quartile? # * 6,9,11 # * IQR ? # * 5 # <a id="13"></a> # ## Bivariate Data and Covariance (İki Değişkenli Veri ve Kovaryans) # * Bivariate # * İki variable'ı karşılaştırır ve correlation var mı yok mu ona bakar. # * x ekseni independent variable # * y ekseni dependent variable. Dependent çünkü x e bağlı. # * Correlation: # * İki variable arasındaki ilişki ama causality ile karıştırmamak lazım. # * causality nedensellik yani iki variable arasında bir neden sonuç ilişkisi var. # * İki variable birbiri ile correlated olabilir ama bu demek değildir ki birinin nedeni diğeridir. f,ax=plt.subplots(figsize = (18,18)) # corr() is actually pearson correlation sns.heatmap(data.corr(),annot= True,linewidths=0.5,fmt = ".1f",ax=ax) plt.xticks(rotation=90) plt.yticks(rotation=0) plt.title('Correlation Map') plt.show() # plt.figure(figsize = (15,10)) sns.jointplot(data.radius_mean,data.area_mean,kind="regg") sns.jointplot(data.radius_mean,data.fractal_dimension_mean,kind="regg") plt.show() # Also we can look relationship between more than 2 distribution sns.set(style = "white") df = data.loc[:,["radius_mean","area_mean","fractal_dimension_se"]] g = sns.PairGrid(df,diag_sharey = False,) g.map_lower(sns.kdeplot,cmap="Blues_d") g.map_upper(plt.scatter) g.map_diag(sns.kdeplot,lw =3) plt.show() # * Covariance # * Covariance is measure of the tendency of two variables to vary together # * ![Time](covar.jpg) # * Eğer iki vektor identical ise covariance maximum olur. # * Eğer iki vektor arasında bir ilişki yoksa covariance sıfır olur # * Eğer iki vektor farklı yönlerde ise covariance negative olur. # * radius_mean and area_mean arasındaki covariance'a bakalım. # * Daha sonra radius_mean and fractal_dimension_se arasındaki covariance'a bakalım. np.cov(data.radius_mean,data.area_mean) print("Covariance between radius mean and area mean: ",data.radius_mean.cov(data.area_mean)) print("Covariance between radius mean and fractal dimension se: ",data.radius_mean.cov(data.fractal_dimension_se)) fig, axs = plt.subplots(1, 2) axs[0].scatter(data.radius_mean, data.area_mean) axs[1].scatter(data.fractal_dimension_se, data.radius_mean) plt.show() # <a id="14"></a> # ## Pearson Correlation Coefficient (Pearson Korelasyon Katsayısı) # * Pearson Correlation Coefficient: covariance'ı variable'ların standart deviation'ına bölüyoruz # * Division of covariance by standart deviation of variables # * radius mean and area mean arasındaki pearson correlation katsayısına bakalım # * Pearson Correlation Coefficient +1 ve -1 değerleri arasında değişir. # * +1 = positive linear correlation # * -1 = negative linear correlation # * 0 = linear correlation yok # * ![Time](pearson.jpg) p1 = data.loc[:,["area_mean","radius_mean"]].corr(method= "pearson") p2 = data.radius_mean.cov(data.area_mean)/(data.radius_mean.std()*data.area_mean.std()) print('Pearson correlation: ') print(p1) print('Pearson correlation: ',p2) sns.jointplot(data.radius_mean,data.area_mean,kind="regg") plt.show() # <a id="15"></a> # ## Spearman Rank Coefficient (Spearman Rank Katsayısı) # * Pearson correlation works well if the relationship between variables are linear and variables are roughly normal. But it is not robust, if there are outliers # * To compute spearman's correlation we need to compute rank of each value ranked_data = data.rank() spearman_corr = ranked_data.loc[:,["area_mean","radius_mean"]].corr(method= "pearson") print("Spearman's correlation: ") print(spearman_corr) # * Spearman's correlation is little higher than pearson correlation # * If relationship between distributions are non linear, spearman's correlation tends to better estimate the strength of relationship # * Pearson correlation can be affected by outliers. Spearman's correlation is more robust. # rank anlamak icin ornek data1 = {'name': ['ali', 'veli', 'hakan', 'ayse', 'fatma'], 'year': [2012, 2012, 2013, 2014, 2014], 'reports': [4, 24, 31, 2, 3], 'coverage': [25, 94, 57, 62, 70]} df = pd.DataFrame(data1, index = ['ankara', 'istanbul', 'sinop', 'bolu', 'izmir']) df df['coverageRanked'] = df['coverage'].rank(ascending=1) df # <a id="16"></a> # ## Effect size # * One of the summary statistics. # * It describes size of an effect. It is simple way of quantifying the difference between two groups. # * In an other saying, effect size emphasises the size of the difference # * Use cohen effect size # * Cohen suggest that if d(effect size)= 0.2, it is small effect size, d = 0.5 medium effect size, d = 0.8 large effect size. # * lets compare size of the effect between bening radius mean and malignant radius mean # * Effect size is 1.9 that is too big and says that two groups are different from each other as we expect. Because our groups are bening radius mean and malignant radius mean that are different from each other # * ![Time](efect.jpg) mean_diff = data_malignant.radius_mean.mean() - data_bening.radius_mean.mean() # m1 - m2 var_bening = data_bening.radius_mean.var() var_malignant = data_malignant.radius_mean.var() var_pooled = (len(data_bening)*var_malignant +len(data_malignant)*var_bening ) / float(len(data_bening)+ len(data_malignant)) effect_size = mean_diff/np.sqrt(var_pooled) print("Effect size: ",effect_size) # <a id="17"></a> # ## Data Neler Öğrendik? # * Data Nedir? # * Level of Measurements (Ölçülme Ölçeği) # * Population vs Sample # * Central Tendency (Merkezi Eğilim) # * Dispersion (Dağılım) # * Quartiles # * Bivariate Data and Covariance (İki Değişkenli Veri ve Kovaryans) # * Pearson Correlation Coefficient (Pearson Korelasyon Katsayısı) # * Spearman Rank Coefficient (Spearman Rank Katsayısı) # * Effect size # <a id="18"></a> # # Probability (Olasılık) # * Bu bölümde probability ile ilgili aynı dili konuşabilmemiz ve internette araştırma yapabilmemiz için gerekli olan keyword'leri,dikkat etmemiz ve bilmemiz gereken kavramları öğreneceğiz. # <a id="19"></a> # ## Probability Giriş # * Probability Nedir? # * Permutation (Permutasyon) # * Combination (Kombinasyon) # * Intersection, Unions and Complements (Kesişim, Birleşim ve Tamamlayıcı) # * Independent and Dependent Events (Bağımsız ve Bağımlı Olaylar) # * Conditional Probability (Şartlı olasılık) # * Bayes Theorem (Bayes teoremi) # <a id="20"></a> # ## Probability Nedir? # * Olasılık bir şeyin olmasının veya olmamasının matematiksel değeri veya olabilirlik yüzdesi, değeridir. # * Probability 0 ve 1 arasında değerler alabilir. # * 1 olayın kesin olduğunu 0 ise olayın gerçekleşmediğini gösterir. # * Mesela hepimiz yazı tura atmayı biliyoruz. Bir paranın yazı gelme olasılığı 0.5 dir. P(para_yazı) = 0.5 # * Parayı yazı mı turamı diyerek havaya atmaya trial(deneme) denir. Acting of flipping a coin is called trial. # * Bu trial olayları birbirinde independent olaylardır. # * Experiment: her bir trial olayı experiment olarak adlandırılır. # * Simple event: Ortaya çıkabilecek sonuçlar yani yazı yada tura simple event olarak adlandırılır. # * Sample Space: tüm olabilecek simple event'lerin toplamı # * Mesela bir zar atalım. # * Zarı atmak experiment # * Simple events zarı atmanın sonucunda ortaya çıkabilecek sonuçlar 1,2,3,4,5,6 değerleri. # * Sample space => S = {1,2,3,4,5,6} 6 possible outcomes # <a id="21"></a> # ## Permutation (Permutasyon) # * Matematikte permütasyon, her sembolün sadece bir veya birkaç kez kullanıldığı sıralı bir dizidir. # * Mesela 3 tane öğrenci olsun: ali, veli ve ayşe # * Bu üç öğrenciyi kaç farklı şekilde yan yana sıralarız? 3! = 3.2.1 = 6 # * ali, veli, ayşe # * ali, ayşe, veli # * veli, ayşe, ali # * veli, ali, ayşe # * ayşe, veli, ali # * ayşe, ali, veli # * n elemanlık bir kümenin permutasyonu n! # * n elemanlık bir kümenin r alana permutasyonu ise (tekrarlamak yasak) # * ![Time](permu.jpg) # * Örnek: # * Bir web siteye üye olacaksınız 5 karakterli bir şifre belirleyeceksiniz # * Karakterler küçük harf olmak zorunda yada 0-9 arası sayılar olabilir # * Kullanılan bir karakter yada sayıyı tekrar kullanmak yok # * Kaç farklı şifre yaratabilirsiniz? # * n = 29(harf) + 10(sayılar) = 39 # * r = 5 # * P = 39! / (39-5)! = 39! / (34!) import math pay = math.factorial(39) payda = math.factorial(34) P = int(pay / payda) print(P) # * Eger kullanılan bir karakter tekrar kullanılmaya izin verilseydi # * ![Time](permu2.jpg) # eger kullanılan bir karakter tekrar kullanılmaya izin verilseydi P = n^r 39**5 # <a id="22"></a> # ## Permutation Quiz Cevaplar # * 3 farklı pantolon, 4 farklı ceket ve 2 farklı gömleği olan kişi bir ceket bir gömlek ve bir pantolonu kaç farklı şekilde giyebilir. # * P = 3.4.2 = 24 # * yada formule göre: 3! / (3-1)! * 4! / (4-1)! * 2! / (2-1)! = 24 # <a id="23"></a> # ## Combination (Kombinasyon) # * Kombinasyon, bir nesne grubu içerisinden sıra gözetmeksizin yapılan seçimlerdir. # * Mesela 5 kişilik bir takımdan 2 kişi seçmek istiyoruz. Sıra gözetmeksizin 5 kişinden 2 kişi seçeriz. # * ![Time](comb.jpg) # * n = 5 ve r = 2 sonuç: 10 farklı şekilde seçeriz # * Permutation vs Combination # * Bu üç öğrenci: ali, veli ve ayşe sıralamak demek permutation içinden öğrenci seçmek demek combination # * Mesela 3 öğrenciyi 2 yere sıralayalım. 6 farklı şekilde yaparız. # * ali, veli # * ali, ayşe # * veli, ayşe # * veli, ali # * ayşe, veli # * ayşe, ali # * Bu üç öğrenciden 2 öğrenci seçelim. Sonuç 3 çıkar Gördüğünüz gibi sıralamada ali, ayşe var aynı zamanda ayşe, ali var. Bu durum sıralama için farklı olsada seçme yani combination için aynı durumlardır. # <a id="24"></a> # ## Intersection, Unions and Complements (Kesişim, Birleşim ve Tamamlayıcı) # * Intersection(Kesişim): iki olayı aynı anda olması. A ve B # * Unions(Birleşim): iki olayın ikisinden birinin olması. A or B # * ![Time](kumeler.jpg) # <a id="25"></a> # ## Independent and Dependent Events (Bağımsız ve Bağımlı Olaylar) # * Independent(Bağımsız) Events # * İki olay birbirinden bağımsız ise. Yani bir event'in sonucu öbürünü etkilemiyorsa. # * Mesela 5 kez zar atılacak. İlk 4 zar atma olayı gerçekleştirilmiş. 5. zar atılınca sonucun 3 gelme olasılığı nedir? Cevap 1/6. Daha önceden atılan 4 zar 5. atılan zar ile hiç alakalı değil. Yani bu olaylar birbiri ile independent. # * Dependent(Bağımlı) Events # * İki olay birbiri ile alakalı ise. # * Mesela bir çantam olsun içerisinde 2 tane kırmızı 2 tane mavi top var. # * Çanta içinden ard arda 2 kez top çekeceğim. 2. topun kırmızı olma olasılığı nedir? # * Bu durumda çantadan top çekme olayları dependent. Çünkü eğer ilk seferde kırmızı çekersem çantada 2 mavi 1 kırmızı kalır. Eğer ilk seferde mavi çekersem çantada 1 mavi 2 kırmızı kalır. # <a id="26"></a> # ## Conditional Probability (Şartlı olasılık) # * Conditional Probability: Probability of event A given B # * P(A|B) # * Örnek: Bir zar atıldığında üste gelen sayının 2 den büyük olduğu bilindiğine göre, zarın üst yüzüne gelen sayının çift sayı olma olasılığı kaçtır? # * E = {1,2,3,4,5,6} # * 2 den büyük olma şartı (condition) B = {3,4,5,6} # * B koşulu sağlandıktan sonra B kümesi içerisindeki çift olan sayılar {4,6} # * Sonuç 1/2 # * ![Time](cond.jpg) # * E = {1,2,3,4,5,6} # * B = {3,4,5,6} # * A = {2,4,6} # <a id="27"></a> # ## Conditional Probability Quiz Cevaplar # * 52 lik bir oyun kartı destesini karıştırıyoruz. Karıştırdıktan sonra kapalı bir şekilde masaya koyuyoruz. Sonra en üstten iki tane kart çekiyoruz ikisininde sinek gelme olasılığı nedir? # * P(sinek2| sinek1) = P(sinek1 ∩ sinek2) / P(sinek1) # * P(sinek1) * P(sinek2| sinek1) = P(sinek1 ∩ sinek2) = 13/52 · 12/51 = 0.0588 # <a id="28"></a> # ## Bayes Theorem (Bayes teoremi) # ![Time](bayes.jpg) # * P(A|B) = B olayı gerçekleştiğinde A olayının gerçekleşme olasılığı # * P(A) = A olayının gerçekleşme olasılığı # * P(B|A) = A olayı gerçekleştiğinde B olayının gerçekleşme olasılığı # * P(B) = B olayının gerçekleşme olasılığı # * Şimdi bu formulün nasıl çıktığına bakalım # * P(A|B) = P(A and B) / P(B) # * P(B|A) = P(B and A) / P(A) # * P(A and B) = P(B and A) # * P(A|B).P(B)=P(B|A).P(A) # * P(A|B) = P(B|A) . P(A) / P(B) # * Örnek: # * Bir okul var. Bu okulda 50 tane çocuk olsun. Bu 50 çocuktan 1 tanesi x hastalığına yakalanabilir. x hastalığına sahip hastalıklı bir çocuğun testi %90 pozitif, sağlıklı bir çocuğun testi ise %10 pozitif sonuç verebilir. Test sonucu pozitif olan bir çocuğun gerçekten x hastalığına sahip olma olasılığı nedir? # * P(A) : Çocuğun hasta olması olasılığı = 1/50 # * P(B) : Testin pozitif çıkması olasılığı = 1/50 * 0.90 + 49/50 * 0.10 = 0.116 # * P(A|B) : Pozitif çıkan testin hastalık çıkma olasılığı ? # * P(B|A) : x hastalığına sahip çocuğun testinin pozitif çıkma olasılığı = 0.9 # * P(A|B)=P(B|A)*P(A)/P(B) => (0.9 * 1/50) / (0.116) = %15 # <a id="29"></a> # ## Probability Neler Öğrendik? # * Probability Nedir? # * Permutation (Permutasyon) # * Combination (Kombinasyon) # * Intersection, Unions and Complements (Kesişim, Birleşim ve Tamamlayıcı) # * Independent and Dependent Events (Bağımsız ve Bağımlı Olaylar) # * Conditional Probability (Şartlı olasılık) # * Bayes Theorem (Bayes teoremi) # <a id="30"></a> # # Probability Distributions (Olasılık dağılımı) # * Bir olasılık dağılımı bir rassal olayın ortaya çıkabilmesi için değerleri ve olasılıkları tanımlar. # * Discrete distribution da tüm individual probability'lerin toplamı 1 olmak zorunda. Mesela zar # * Discrete probability distribution probability mass function(PMF) olarak da adlandırılır # * Continuous distribution da olasılık eğrisi(probability curve) altında kalan alan 1 e eşit olmalı. # <a id="31"></a> # ## Probability Distributions Giriş # * Discrete Probability Distributions # * Uniform Distributions # * Binomial Distributions # * Poisson Distributions # * Continuous Probability Distributions # * Gaussian (Normal) Distributions and Z-Score # * Distributions Neler Öğrendik? # <a id="32"></a> # ## Discrete Probability Distributions # * Uniform Distributions # * Binomial Distributions # * Poisson Distributions # <a id="33"></a> # ## Uniform Distributions # * Discrete uniform distribution # * Discrete demek mesela bir zar düşünün zarı 1 yada 2 kez atabiliriz ama 1 ve 2 arasında bir sayı olan 1.78 kez atamayız. Yani belli başlı sample space'e sahip distribution'lara diyoruz. # * Discrete çünkü 6 tane possible result'a sahip. # * Uniform çünkü bu 6 farklı sonucun çıkma olasılığı aynı. # zar example a = np.random.randint(1,7,60000) print("sample space: ",np.unique(a)) plt.hist(a,bins=12) # bins =12 güzel gözüksün diye yaptım normalde 6 olmalı plt.ylabel("Number of outcomes") plt.xlabel("Possible outcomes") plt.show() # <a id="34"></a> # ## Binomial Distributions # * Binomial: 2 tane discrete sonucu olan bir trial(deneme). # * En güzel örneği yazı tura atmak. # * Bernoulli Trial: # * Success or failure diye 2 tane sonucu olan random deneyler # * n = number of trial # * p = probability of success # * r = number of success # * trials birbirinden bağımsız yani independent # * ![Time](bernoille.jpg) # # yazı tura n = 2 # number of trials p = 0.5 # probability of each trial s = np.random.binomial(n, p,10000) # 10000 = number of test weights = np.ones_like(s)/float(len(s)) plt.hist(s, weights=weights) plt.xlabel("number of success") # grafigin soluna dogru basari azaliyor plt.ylabel("probability") plt.show() # 0.25 prob 0 success (2 yazı) # 0.5 prob 1 success (yazı-tura) # 0.25 prob 2 success (2 tura) # (yazı-yazı)(yazı-tura)(tura-yazı)(tura-tura) # <a id="35"></a> # ## Binomial Distributions Quiz Cevap # * Bir zar 10 kez atılıyor. 4 kere 6 sayısı gelme olasılığı nedir? # + n = 10 r = 4 # success p = 1/6 # success rate # library solution from scipy.stats import binom print(binom.pmf(r,n,p)) # formula import math print((math.factorial(n)/(math.factorial(n-r)*math.factorial(r)))*(p**r)*(1-p)**(n-r)) # - # <a id="36"></a> # ## Poisson Distributions # * Binomial: number of successes out of n trials # * Poisson: number of successes per unit of time # * lambda = number of occurences / interval # * interval zamana olabilir distance(mesafe) olabilir # * Örneğin lambda = 10 olsun. Bir taşımacılık şirketi için bu değer her bir saatte ortalama 10 kamyon anlamına gelebilir. # * ![Time](posss.jpg) # örnegin her saatte ortalama 3 kamyon gorsellestirmesi lamda = 3 s1 = np.random.poisson(lamda,100000) weights1 = np.ones_like(s1)/float(len(s1)) plt.hist(s1, weights=weights1,bins = 100) plt.xlabel("number of occurances") # grafigin soluna dogru basari azaliyor plt.ylabel("probability") # sekilde goruldugu gibi en yuksek olasilik saatte 10 kamyon ama saatte 20 kamyon bile olabilir # <a id="37"></a> # ## Uniform - Binomial - Poisson Quiz Cevaplar # * Grafiklerin hangi distributiona ait olduğunu seçin. # ![Time](dist_quiz.jpg) # <a id="38"></a> # ## Continuous Probability Distributions # * PDF - CDF # * Gaussian (Normal) Distributions and Z-Score # <a id="39"></a> # ## PDF - CDF # * PDF: Probability Density Function # * CDF: Cumulative Distribution Function # ![Time](pdf_cdf.jpg) # <a id="40"></a> # ## Gaussian (Normal) Distributions # * Continuous Probability Distributions "Probability density functions (PDF)" olarak da adlandırılır # * Gaussian ayrıca Bell Shaped Distribution olarak da adlandırılır. # * Gasussian distribution'ın önemli olmasının bir sebebi gerçek hayatta pek çok örneği olması. # * IQ # * Boy ve kilo # * Simetriktir. Bir mean değeri etrafında belirli bir standart deviation'a göre dağılım gösterirler. # * Instead of making formal definition of gaussian distribution, I want to explain it with an example. # * The classic example is gaussian is IQ score. # * In the world lets say average IQ is 110. # * There are few people that are super intelligent and their IQs are higher than 110. It can be 140 or 150 but it is rare. # * Also there are few people that have low intelligent and their IQ is lower than 110. It can be 40 or 50 but it is rare. # * From these information we can say that mean of IQ is 110. And lets say standart deviation is 20. # * Mean and standart deviation is parameters of normal distribution. # * Lets create 100000 sample and visualize it with histogram. # parameters of normal distribution mu, sigma = 110, 20 # mean and standard deviation s = np.random.normal(mu, sigma, 100000) print("mean: ", np.mean(s)) print("standart deviation: ", np.std(s)) # visualize with histogram plt.figure(figsize = (10,7)) plt.hist(s, 100, normed=False) plt.ylabel("frequency") plt.xlabel("IQ") plt.title("Histogram of IQ") plt.show() # * As it can be seen from histogram most of the people are cumulated near to 110 that is mean of our normal distribution # * However what is the "most" I mentioned at previous sentence? What if I want to know what percentage of people should have an IQ score between 80 and 140? # * We will use z-score the answer this question. # * z = (x - mean)/std # * z1 = (80-110)/20 = -1.5 # * z2 = (140-110)/20 = 1.5 # * Distance between mean and 80 is 1.5std and distance between mean and 140 is 1.5std. # * If you look at z table, you will see that 1.5std correspond to 0.4332 # * ![Time](z_score.jpg) # * Lets calculate it with 2 because 1 from 80 to mean and other from mean to 140 # * 0.4332 * 2 = 0.8664 # * 86.64 % of people has an IQ between 80 and 140. # * ![Time](hist_iq.jpg) # <a id="41"></a> # ## Probability Distributions Neler Öğrendik? # * Discrete Probability Distributions # * Uniform Distributions # * Binomial Distributions # * Poisson Distributions # * Continuous Probability Distributions # * PDF - CDF # * Gaussian (Normal) Distributions and Z-Score # <a id="42"></a> # # Statistics (İstatistik) # * İstatistik, verilerin toplanması, organizasyonu, analizi, yorumlanması ve sunulması ile ilgilenen bir matematik dalıdır. # * Özetle istatistikte amacımız: # * Elimizde bir grup var çok büyük bir grup (population) # * Bu grup içerisinden rastgele seçim yapıp küçük bir grup oluşturuyoruz (sample or sub-group) # * Artık küçük bir grubumuz var. Biz istatistik ile bu küçük grubun karakteristiğini anlamaya çalışıyoruz. # * Küçük gruptan istatistiksel çıkarımlar yaparak büyük grubu tanımlamaya çalışıyoruz. # * İstatistiksel çıkarımlar yapmak içinde variable'lardan faydalanıyoruz. Bu variable dediğimiz şey grubun içerisindeki her bir bireyi tanımlayan karakteristik özellikler mesela yaş, maaş, cinsiyet gibi # <a id="43"></a> # ## Statistics Giriş # * Sampling (Örnekleme) # * Central Limit Theorem (Merkezi Limit Teoremi) # * Standard Error # * Hypothesis Testing # * Hypothesis Testing Real-World Örneği 1 # * Hypothesis Testing Real-World Örneği 2 # * Type 1 ve Type 2 Errors # * T-Distribution # * A/B Test # * Statistics Neler Öğrendik? # <a id="44"></a> # ## Sampling (Örnekleme) # * Sampling büyük bir gruptan rastgele bireyler seçerek küçük bir grup oluşturmaktır. # * Burada önemli olan bir yere bias'lanmadan sampling yapabilmek. # * Bias ne demek? Mesela bir maç sonucu tahmini yapmak istiyoruz. Galatasaray - Fenerbahçe Maçı var. İlk önce kendimize gidip sampling yaparak bir grup oluşurmak zorundayız. Eğer bizim sub grubumuzun member'larını sadece Galatasaray taraftarından seçersek biz bias'lanmış oluruz. Bias eğilim demek. Böylece istatistiksel analiz sonucumuz Galatasaray eğilimli bir sonuç verir. # * İkinci dünya savaşı uçak örneği. # * Sampling Types: # * Random Sampling: # * Bir grup içinden rastgele seçim yapmak # * Bir futbol maçında forma rengine bakmadan rastgele seyirci seçmek # * Stratified(katmanlı) Random Sampling: # * İlk önce belirli bir karakteristiğe göre büyük grubu segment'lere ayır. Daha sonra bu segmentler içerisinden seç. # * Bir futbol maçında ilk önce seyirciyi takımlarına göre segment'lere ayırıp daha sonra seçim yapmak. Daha sonra gruplardan random seçmek. # * Cluster Sampling: # * Bir populasyonu birden fazla küçük gruba bölerek grup seçmek. # * Mesela öğrenci başarısı araştırması yapılmak isteniliyor. Tüm population yani Türkiye farklı cluster'lara bölünüyor yani şehirlere daha sonra bu clusterlardan bazıları seçilerek araştırma yapılıyor. # ![Time](stra_cluster.jpg) # <a id="45"></a> # ## Central Limit Theorem (Merkezi Limit Teoremi) # * Merkezi limit teoremi büyük bir sayıda olan bağımsız ve aynı dağılım gösteren rassal değişkenlerin aritmetik ortalamasının, yaklaşık olarak normal dağılım göstereceğini ifade eden bir teoremdir. # * Mean value from a sub group will be normally distributed about the population mean. # ![Time](clt.jpg) x = np.random.random_integers(10,size=100000) plt.hist(x) plt.show() import random mean_sample = [] for i in range(10000): sample = random.randrange(5,10) mean_sample.append(np.mean(random.sample(list(x),sample))) plt.hist(mean_sample,bins = 50, color = "red") plt.show() plt.hist(x,alpha = 0.5,density=True) plt.hist(mean_sample,bins = 50,alpha = 0.5,color = "red",density=True) plt.title("Central Limit Theorem") plt.show() # <a id="46"></a> # ## Standard Error # * Standart hata bir sınamada seçilebilecek tüm örneklemlerden sadece bir tanesine dayalı kestirimlerin içerdiği hata oranıdır. # * N = populationda ki birey sayısı # * P = population parameter # * sigma = population standard deviation # * n = sampling yaptıktan sonra sub-grupta ki birey sayısı # * p = sample statistic # * SE = standart error of sample # * ![Time](standerror.jpg) # * Örnek yapalım: # * Bir populasyonun IQ seviyesi gaussian distribution şeklinde. # * Mean = 100 ve std = 15 # * Elimizde 10 farklı kişi var ve IQ ortalaması 104. Biz bu 10 kişi için populasyonumuzun bireyleri diyebilir miyiz? # * n = 10, x = 104 # * SE = 15/root(10) = 4.7 # * Bu sonuca göre diyebiliriz ki 10 bireyin IQ'su 68% ile 95.3 ve 104.7 arasında. # <a id="47"></a> # ## Hypothesis Testing # * Hypothesis Testing istatistiksel metotların real-world problemlerine uygulanmasıdır. # * **Null Hypothesis** mesela dünya düzdür. Bu hipotez ya reject yada fail to reject olur. # * reject: reddetmek # * fail to reject: reddetmemek # * Eğer null hypothesis reject edilirse. Bir tane **alternate hypothesis** oluştururuz. Mesela dünya yuvarlak. # * Gerçek dünya problemini nasıl hypothesis'e dönüştürdüğümüze bakalım. # * İlk önce bir null hypothesis yaratıyoruz, başlangıçta bu null hypothesis doğru (true) olarak kabul ediyoruz ve sonra çürütmeye(reject) çalışıyoruz. Eğer data null hypothesis'i reject ederse alternative hypothesis'ler oluşturmaya başlıyoruz. # * Null hypothesis "Ankara'da insanların boy ortalaması 2 metre" olsun. # * Null hypothesis: average height = 2 m # * Alternate hypothesis "Ankara'da insanların boy ortalaması 2 metre değil" # * Alternate hypothesis: average height != 2 m # * Başlangıçta null hypothesis doğru olarak kabul ediyoruz demiştik. Şimdi ilk önce verimizi topluyoruz. # * Sonra test istatistiklerini (test statistic = z value) kullanarak p-value'yu buluyoruz. # * p-value'yu 0.05 ile karşılaştırıyoruz. 0.05 = level of significance # * Eğer p-value < 0.05(level of significance) (confidence level 95%) # * Null hypothesis'i reject ediyoruz. If p-value is low, we conclude that null hypothesis is unlikely to be true # * Eğer p-value > 0.05(level of significance) fail to reject ediyoruz. # <a id="48"></a> # ## Hypothesis Testing Real-World Örneği 1 # * Real World example! # * Bir araba firması park sensoru yapıyor. # * Bu sensor ortalama 3 milisaniye de bir sample alıyor ve bu sürenin hata payı 0.6 ms. Yani 2.4-3.6 ms arasında ölçüm alabilir # * **mean = 3 ms** ve **sigma = 0.6 ms** # * Araba firması sahibi bu süreyi azaltalım 3 ms çok fazla diyor ve bunun için mühendis işe alıyor. # * Ayrıca firma sahibi 99% lik bir oranla bu sensör daha kısa sürede ölçüm alsın istiyor. Yani confidence level = %99 => **a = 0.01** # * Mühendisler çalışıyor ve bu sensörü geliştirdikten sonra 50 kez test ediyor. Sonuç bu sensor artık ortalama 2.8 ms'de bir ölçüm alabiliyor. Yani **n = 50** ve **yeni_mean = 2.8 ms** # * Soru şu yeni sensör ile alınan bu ölçümler istatistiksel olarak bir önceki sensör ile alınan ölçümlerden daha iyi yani hızlı mı? # * Burada iki olasılık var birincisi evet mühendisler iyi iş çıkardı ve sensor daha hızlı. İkincisi hayır zaten ilk sensörde 0.6 lık bir hata payı vardı bu nedenle 2.8 ms yeni sonuç şans eseri çıkmış olabilir. # * Bu sorunun cevabını bize **hypothesis testing** verecek. # * İlk olarak null hypothesis belirleniyor. Sonra alternative hypothesis # * Null hypothesis mean >= 3 (burada null hypothesisi çürütmeye çalışacağız) # * Null hypothesis gözlemlenen verileri kullanarak yanlış oldukları ispat edilebilecek açıklamalardır. # * Alternate hypothesis mean < 3 # * Level of significance = 0.01 (99% confidence interval araba firması sahibi belirlemişti) # * Test type: left tail (alternative hypothesis'e bakılır) # * ![Time](tst_type.jpg) # * P- value bulmak için test statistic'i kullanıyoruz. # * ![Time](z_value.jpg) # * Z = 2.8 - 3 / (0.6/root(50)) = -2.35 # * ![Time](z_table_eksi.jpg) # * P = 0.0094 # * Eğer p-value > 0.01(level of significance) fail to reject ediyoruz # * 0.0094 < 0.01 bu yüzden **reject to Null Hypothesis** # * Yani null hypothesis yanlış. Sensörümüz hızlanmış. Mühendisler başarılı oldu. # <a id="49"></a> # ## Hypothesis Testing Real-World Örneği 2 # * Datamız ile lgili bir örnek yapalım: # * Null hypothesis = relationship between **radius_mean** and **area_mean** is zero in tumor population. # * Alternate hypothesis = relationship between **radius_mean** and **area_mean** is not zero in tumor population. # * lets find p-value (probability value) statistic, p_value = stats.ttest_rel(data.radius_mean,data.area_mean) print('p-value: ',p_value) # * P values sıfır bu nedele **reject null hypothesis**. # * Null hypothesis = "relationship between radius mean and area mean is zero in tumor population" reddedildi. Alternate hypothesis doğrudur. # * Alternate hypothesis = relationship between **radius_mean** and **area_mean** is not zero in tumor population # <a id="50"></a> # ## Type 1 ve Type 2 Errors # * tip 1 ve tip 2 hatalarıdır. # * Null hypothesis üzerine kuruludur. # ![Time](type1_type2.jpg) # ![Time](type1_type22.jpg) # # <a id="51"></a> # ## T-Distribution # * Student's T-Distribution da denir. # * Ortalama değerleri aynı olan dağılımlar aslında variance açısından farklılık gösterebilir. # ![Time](t_Test_var.jpg) # ![Time](t_v.jpg) s1 = np.array([14.67230258, 14.5984991 , 14.99997003, 14.83541808, 15.42533116, 15.42023888, 15.0614731 , 14.43906856, 15.40888636, 14.87811941, 14.93932134, 15.04271942, 14.96311939, 14.0379782 , 14.10980817, 15.23184029]) print("mean 1: ", np.mean(s1)) print("standart deviation 1: ", np.std(s1)) print("variance 1: ", np.var(s1)) s2 = np.array([15.23658167, 15.30058977, 15.49836851, 15.03712277, 14.72393502, 14.97462198, 15.0381114 , 15.18667258, 15.5914418 , 15.44854406, 15.54645152, 14.89288726, 15.36069141, 15.18758271, 14.48270754, 15.28841374]) print("mean 2: ", np.mean(s2)) print("standart deviation 2: ", np.std(s2)) print("variance 2: ", np.var(s2)) # visualize with pdf import seaborn as sns sns.kdeplot(s1) sns.kdeplot(s2) plt.show() t_val = np.abs(np.mean(s1)-np.mean(s2))/np.sqrt((np.var(s1)/len(s1))+(np.var(s2)/len(s2))) print("t-value: ", t_val) # * Null hypothesis: bu iki distribution arasında statistical significance açısından fark yok. # * Eğer t value critical value dan küçük ise fail to reject null hypothesis # * Eğer t value critical value dan büyük ise reject null hypothesis # * t table: # ![Time](t_table.jpg) # * 0.05: 100 kere yaparsak 95 time reject null hypothesis # * degrees of freedom N1+N2-2 # * 2.3 > 2.04 bu nedenle reject null hypothesis # <a id="52"></a> # ## A/B Test # * A ve B diye iki farklı içeriğe sahip model yaratılır # * A ve B içerikleri farklı kullanıcılara sunulur. # * Kullanıcılardan alınan geri bildirime göre A/B içerklerinin başarımı elde edilir. # ![Time](ab_tet.jpg) # <a id="53"></a> # ## Statistics Neler Öğrendik? # * Sampling (Örnekleme) # * Central Limit Theorem (Merkezi Limit Teoremi) # * Standard Error # * Hypothesis Testing # * Hypothesis Testing Real-World Örneği 1 # * Hypothesis Testing Real-World Örneği 2 # * Type 1 ve Type 2 Errors # * T-Distribution # * A/B Test # <a id="60"></a> # # ANOVA (Analysis of Variance) # * Önceki bölümde sorularımızı cevaplamak için Z-Distribution ve T-Distribution kullandık. # * Sorularımız genelde "2 sample'ın(sub-grup) aynı populasyondan gelme olasılığı nedir?" şeklindeydi. # * Z ve t Distributions şekil olarak aynı ama T distribution da sample sayısı az olduğu için peak noktası Z distribution'a göre biraz aşağıda. # * ![Time](zt.jpg) # * Bu bölümde F- Distribution nedir onu öğreneceğiz. # * ![Time](f_dist.jpg) # * F distribution ile "aynı variance'a sahip 2 sample'ın(sub-grup) aynı populasyondan gelme olasılığı nedir?" sorusunu cevaplayacağız yada "2'den fazla sample'ın(sub-grup) aynı populasyondan gelme olasılığı nedir?" # # <a id="61"></a> # ## ANOVA Giriş # * ANOVA Nedir? # * F Distribution # * ANOVA Neler Öğrendik? # <a id="62"></a> # ## ANOVA (Analysis of Variance) Nedir? # * 2 yada daha fazla grubumuz olduğu zaman kullanmayı tercih ederiz. # * ANOVA bize grupların birbirinden farklı olup olmadığı bilgisini verir # * Mesela Ortaokul, lise ve üniversite öğrencilerinin sınav kaygısı birbirinden farklı mı? sorusunun cevabını ANOVA ile vereceğiz. # * Null hypothesis: sınav kaygıları aynı # * Mesela bu sorunun cevabını bir data yaratarak bulmaya çalışalım. # * Ortaokul, lise ve üniversite öğrencilerinin sınav kaygısı yüz üzerinden belli bir dağılımı sahip sayılar olsun # * ortaokul: mean = 47 ve std = 10 # * lise: mean = 48 ve std = 15 # * uni: mean = 55 ve std = 5 # * total_mean = 50 # * ANOVA iki farklı variance'ı inceler: # * Variance Between groups: grup ortalamaları total mean'den ne kadar uzak # * Variance Within Groups: her bir değer grubun ortalamasından ne kadar uzak # * F value bu iki variance arasındaki orandır yani (variance between groups) / (variance Within Groups) import numpy as np ortaokul = np.array([51.36372405, 44.96944041, 49.43648441, 45.84584407, 45.76670682, 56.04033356, 60.85163656, 39.16790361, 36.90132329, 43.58084076]) lise = np.array([56.65674765, 55.92724431, 42.32435143, 50.19137162, 48.91784081, 48.11598035, 50.91298812, 47.46134988, 42.76947742, 36.86738678]) uni = np.array([60.03609029, 56.94733648, 57.77026852, 47.29851926, 54.21559389, 57.74008243, 50.92416154, 53.47770749, 55.62968872, 59.42984391]) # print("ortaokul mean: ",np.mean(ortaokul)) print("lise mean: ",np.mean(lise)) print("uni mean: ",np.mean(uni)) total_mean = (np.mean(ortaokul) + np.mean(lise) + np.mean(uni))/3 print("total mean: ",np.mean(total_mean)) # visualize with pdf import seaborn as sns import matplotlib.pyplot as plt sns.kdeplot(ortaokul) sns.kdeplot(lise) sns.kdeplot(uni) plt.show() stats.f_oneway(ortaokul, lise, uni) # <a id="63"></a> # ## F Distribution # * Normal distribution gibi simetrik değil. # * ![Time](asd.jpg) # * F value < critical value -- fail to reject null hypothesis # * F value > critical value -- reject null hypothesis # * degrees of freedom for groups: Number of groups - 1 # * 3 - 1 = 2 # * degrees of freedom for error: (number of rows - 1)* number of groups # * (10 - 1) * 3 = 27 # ![Time](anaca.jpg) # * F value critical value'dan büyük olduğu için reject to null hypothesis # * 5.5 (f-value) > 5.4 (critical value) # <a id="65"></a> # ## ANOVA Neler Öğrendik? # * ANOVA (Analysis of Variance) Nedir? # * F Distribution # <a id="71"></a> # # Chi-Square Analysis # <a id="72"></a> # ## Chi-Square Analysis Giriş # * Chi-Square Analysis Nedir? # * Chi-Square Analysis Örnek # * Chi-Square Analysis Neler Öğrendik? # <a id="73"></a> # ## Chi-Square Analysis Nedir? # * Gözlenen ve beklenen frekanslar arasındaki farkın anlamlı olup olmadığını test etmek için kullanılır. # * Mesela bir örnek verelim # * Havaya 10 kere para atıyoruz. # * 9 kere yazı ve 1 kere tura geliyor. # * Sorumuz şu: 9 kere yazı gelmesi şans mı yok sa bu para yazıya eğilimli mi? yani biaslanmış mı (hileli olarak da düşünübilirsiniz) # * Null hypothesis: Adil bir para için 10 atıştan 9 kere yazı gelmesi istatistiksel olarak %95 olasılık ile mantıklıdır.(confidence level 0.05) # ![Time](ccc.jpg) # * Bizim örneğimizde yazı için: # * expected frequency = 5 # * observed frequency = 9 # * Tura için ise: # * expected frequency = 5 # * observed frequency = 1 # * Formüle göre: # ![Time](ccc2.jpg) # * chi square value 6.4 # ![Time](chi_dist.jpg) # * Degrees of freedom: number of possible outcomes - 1 # * Degrees of freedom zar atma örneğinde 2 - 1 = 1 # ![Time](chi_table.jpg) # * Eğer chi-square value critical value'dan küçükse observation ve expected değerler arasında high correlation var. # * 6.4 > 3.8 yani reject null hypothesis. # <a id="74"></a> # ## Chi-Square Analysis Örnek # * Bir internet kafemiz var. # * Burada 7 tane bilgisayar var. # * Bu 7 bilgisayarın çökme olasılığı birbiri ile aynıdır. # * Yani expected = failure rate should be same for all computers # * Bilgisayarlar birbiri ile independent. # * Observations: 1(5), 2(7), 3(9), 4(4), 5(1), 6(10), 7(6) # * Null hypothesis = observation değerlerinin bu şekilde çıkması istatistiksel olarak %95 olasılık ile mantıklıdır(uygundur,doğrudur). # * Toplam çökme 42. # * Expected value = 42 / 7 = 6 # * degrees of freedom = 7 - 1 = 6 # * 12.5 > 9.3 fail to reject null hypothesis observation = np.array([5,7,9,4,1,10,6]) print("total: ",np.sum(observation)) expected = np.sum(observation)/ len(observation) print("expected: ",expected) chi_value = np.sum(((observation - expected)**2)/expected) print("chi_value: ",chi_value) from scipy.stats import chi2 print("critical value: ", chi2.isf(0.05,6))
Lecture_Notebooks/W2_Statistical_Learning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ___ # # <a href='https://www.learntocodeonline.com/'> <img src='files/IMGs/learn to code online.png' /></a> # ___ # # Introduction (Why We're Here) # # As a programmer, I have always found it so frustrating when APIs are written poorly, have incorrect or hard to follow documentation, or quite simply was just non-existant! So I decided to start learning how to create my own in order to create an app for my mom's business. # # The training provided here can be found anywhere online. However, a good bulk of this training was utilized by going through the following Udemy courses: # # - [Build Your Own Backend REST API using Django REST Framework](https://www.udemy.com/django-python) (for beginners) # # This documentation will provide information on how to create a RESTful API using python and django. REST APIs are a critical component for any application - especially mobile! # # This will walk you through creation of an API to provide the following features: # - user profile registration & update # - login and authentication # - posting status updates (content) # - viewing other profiles & updates # # These features are available in most mobile web applications. # # ## Process # # Set up your DEV (development) server > set up your project > create DEV server > create Django app > setup database > setup Django admin > intro to APIView >into to ViewSets > create Profiles API > create login API > create profile feed API > deploy API to server on AWS # # ## Technologies # # The following will work together to create a working REST API: # 1. VirtualBox # 2. Vagrant # 3. python # 4. Django # 5. django REST framework # 6. Atom editor (or any other IDE) # 7. git # 8. mod headers chrome extension # # CATEGORY # # ## Development Server # # Many developers run code on their local OS. This can cause a number of issues when working on real world apps, such as: # # - difficult to work collaboratively # - different software on different platforms (Windows, Mac, etc) # - conflict with other apps used # - clogs up system with dev tools & packages # - different OS from the PROD server # # Professional programmers always write their code on a local DEV server to isolate their code from their local desktop. This is best practice and will be achieved using the following: # # 1. [Vagrant](https://www.vagrantup.com/) - allows you to describe what kind of server you need for your app, and then save as a Vagrant file. Allows you to easily reproduce and share with other developers. # # 2. [VirtualBox](https://www.virtualbox.org/) - used by Vagrant to create the virtual server exactly as described. (App code and requirements are installed on a virtual server.) # # Running code in a virtual DEV machine has many benefits: # - easy to share the server with others # - regardless of OS, have exact same version of all requirements # - can test code using exactly the same OS as a real PROD server # - easily create and destroy the server as needed # ## Application Code # # ### Layer 1 - Python # # This will be used for writing the logic of the application. # # ### Layer 2 - Django Framework # # A web framework used on top of python. While most websites look different on the surface, there are a number of things they ALL have in common. # # EXAMPLE: most sites use data from a DB to render HTML that a browser can understand and translate to something that makes sense to most people - images, videos, etc. # # Django provides a pre-defined set of code we cna use to perform some of these common actions like: # - interacting with the DB # - returning pages and images # - validation user form submissions # # This saves time and helps these sites be standardized and understood by other developers. # # ### Layer 3 - Django REST Framework # # Similar to how django provides features for building a standard web app, this REST framework provides a set of features for making a standard REST API. # ## Tools # # **[Atom](https://atom.io/)** is a text editor made by [GitHub](https://github.com/) & is open-sourced. (Can use any IDE, such as PyCharm.) Atom has a ton of plugins written by the developer community. # # **[Git](https://git-scm.com/)** is an industry standard version control system (VCS) to help track changes made to code. # # **[ModHeader](https://modheader.com/)** is a Chrome extension which will allow us to modify the HTTP headers when testing your API. # # Docker vs Vagrant # # The similarity is that both are virtualization technology used to isolate application from machine its running on. # # ## What is Docker? # # - open source containerization tool # - run app in light-weight image # - works by creating a Docker file that contains all of the steps required to build the image to run your app # # ![image.png](attachment:image.png) # # - during build stage, installs all dependencies & code required to run your app # - image based on light-weight, stripped down version of LINUX # - image can then be used to run on local DEV machine or deploy to PROD # # ### Limitations # # - Designed to run in PROD, so steeper learning curve comapred to vagrant # - limited versions available for Windows Home # ## What is Vagrant? # # - tool used for managing virtual DEV environments # - no "out of the box" virtualization tech # - works using a hypervisor (like VirtualBox) - a tool to run VMs on a computer # - you create a Vagrant file that contains all instructions for creating your DEV server # # ![image.png](attachment:image.png) # # - Vagrant uses the hypervisor to create & configure the server on your machine # # ### Benefits # # - streamlined but complete version of Linux OS # - since not designed to run in PROD, easier learning curve compared to Docker # - supports a number of differeny Hypervisors # - wider range of support # - runs on any machine that supports VirtualBox # ## Docker vs Vagrant # # **DOCKER:** # - streamline workflow to PROD # - ll developers use supported OS # # **VAGRANT:** # - just getting started # - need support on wider range of OS # # Installing The Applications # # All of them are for any OS, but this will outline installation on Windows. # # ## Git # # ### Git Installation # # Go to [Git](https://git-scm.com) then click on the button to download the latest for your current OS. It will take you to the download page & automatically start the download process. # # Click the installer to start the installation. # # <img src='files/IMGs/git-install01.png' /> # # Click yes and then ... # # <img src='files/IMGs/git-install02.png'> # # Choose where you want to install the program - default is generally acceptable. # # <img src='files/IMGs/git-install03.png'> # # You can leave the settings as default. # # <img src='files/IMGs/git-install04.png'> # # <img src='files/IMGs/git-install05.png'> # # If on Windows, choose the third option for adjusting your PATH environment. # # I personally did not (I left it on default) but this is what the instructor said to do. # # <img src='files/IMGs/git-install06.png'> # # Leave the HTTPS transport backend at default. # # <img src='files/IMGs/git-install07.png'> # # Leave line ending conversions as default. # # <img src='files/IMGs/git-install08.png'> # # Leave terminal emulator as default. # # <img src='files/IMGs/git-install09.png'> # # Leave extra options at default. # # <img src='files/IMGs/git-install10.png'> # # After you click the install button, the program will start installation. # # Uncheck the "View Release Notes" and then the *Finish* button. # # <img src='files/IMGs/git-install11.png'> # # ### Git Testing For Appropriate Installation # # Open up your Windows start menu and type: **git bash** # # Load the **git bash** app to start the program. # # Type `git --version` to ensure the version shows. # # Set your global configurations: # - `git config --global user.email "EMAILADDRESS HERE"` while you could use your personal email address here, be sure to utilize GitHub's privacy email system as outlined [here](https://github.blog/2017-04-11-private-emails-now-more-private/) # - `git config --global user.name "<NAME>"` # # Type **exit** to close it out. # ## Virtual Box # # ### VirtualBox Installation # # Head over to [VirtualBox](https://www.virtualbox.org) and click on the download button, then choose the one relevant to your machine. # # You can leave it at defaults throughout the process. # # <img src='files/IMGs/VirtualBox/install-01.png'> # # <img src='files/IMGs/VirtualBox/install-02.png'> # # <img src='files/IMGs/VirtualBox/install-03.png'> # # <img src='files/IMGs/VirtualBox/install-04.png'> # # <img src='files/IMGs/VirtualBox/install-05.png'> # If VirtualBox has been successfully installed, you should see something like: # <img src='files/IMGs/VirtualBox/install-06.png'> # ## Vagrant # # ### Installation # # Head over to [Vagrant](https://www.vagrantup.com) to begin the download and installation process. Choose the appropriate one for your system. # # <img src='files/IMGs/Vagrant/Vagrant-00.png'> # # Proceed with installation ... # # <img src='files/IMGs/Vagrant/Vagrant-01.png'> # # And don't forget to read before you accept! # # <img src='files/IMGs/Vagrant/Vagrant-02.png'> # # <img src='files/IMGs/Vagrant/Vagrant-03.png'> # # <img src='files/IMGs/Vagrant/Vagrant-04.png'> # # It will prompt you to restart, but you can wait until all tools are installed. # # Check that vagrant has been installed by checking with **git bash** using the following command: `vagrant version` # # <img src='files/IMGs/Vagrant/Vagrant-05.png'> # # Vagrant is purely a command line tool. # ## Atom # # Follow default installation for [Atom](https://atom.io) as it will do everything automatically for you. (Including start it up!) # ## ModHeaders # # Must have Google Chrome or a recent Chrome installed on your system. # # Google search for **ModHeaders** and choose the one for Chrome Web Store - or **[click here](https://chrome.google.com/webstore/detail/modheader/idgpnmonknjnojddfkpgkljpfnnfcklj?hl=en)**. (It should also be available for FireFox.) # # After you click the "ADD TO CHROME" button on the top right and confirm, it will be installed in your browser. # # This will be used to add authentication headers to our API.
REST/Udemy-Django-Python/Django REST Course - 01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Prerequisite # Install the python > 3.5 # Install the pip # You can search the website for installation by yourself. # ## Install dependencies # ### - install web3-gear # You can find out the detail from here # https://github.com/vechain/web3-gear # Here is the quick start for Mac OSX # ``` # $brew install openssl # $pip install web3-gear # ``` # ### - install web3-python # ``` # $pip install web3 # ``` # ### - debug issue # Someone may encounter a issue likes: Can not import name "keccak". # The solution is like # ``` # $pip install requirements.txt # $pip uninstall pycryptodome # $pip install pycryptodome # ``` # ## Setup a web3-gear # For web3-gear is a bridger for web3 python to access the VeChain Thor node restful API. # ``` # $web3-gear --endpoint https://<your node url>:<port> # ``` # ## Play with the Web3 python on VeChain Thor Node restful API # #### Get Block from web3.auto import w3 w3.eth.blockNumber dict(w3.eth.getBlock('latest')) # #### Get Balance of VET w3.eth.getBalance("0xA19e52007ebb4F2A6BBAd4e109405B41FE37b3f8")/(10**18) # #### Get Transaction And Recipient w3.eth.getTransaction("0xe853fccc7f7bb1ab55717e4a28b022b99d600a585d8120ffe1ef98348699f93b", raw= True) w3.eth.getTransactionReceipt("0xe853fccc7f7bb1ab55717e4a28b022b99d600a585d8120ffe1ef98348699f93b")
.ipynb_checkpoints/How to use web3 python to access VeChain Thor Node-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # # Quick Start Tutorial for Compiling Deep Learning Models # ====================================================== # **Author**: `<NAME> <https://github.com/kevinthesun>`_, `<NAME> <https://github.com/SiNZeRo>`_ # # This example shows how to build a neural network with Relay python frontend and # generates a runtime library for Nvidia GPU with TVM. # Notice that you need to build TVM with cuda and llvm enabled. # # # Overview for Supported Hardware Backend of TVM # ---------------------------------------------- # The image below shows hardware backend currently supported by TVM: # # ![](https://github.com/dmlc/web-data/raw/master/tvm/tutorial/tvm_support_list.png) # # :align: center # :scale: 100% # # In this tutorial, we'll choose cuda and llvm as target backends. # To begin with, let's import Relay and TVM. # # # + import numpy as np from tvm import relay from tvm.relay import testing import tvm from tvm.contrib import graph_runtime # - # Define Neural Network in Relay # ----------------------------- # First, let's define a neural network with relay python frontend. # For simplicity, we'll use pre-defined resnet-18 network in Relay. # Parameters are initialized with Xavier initializer. # Relay also supports other model formats such as MXNet, CoreML, ONNX and # Tensorflow. # # In this tutorial, we assume we will do inference on our device # and the batch size is set to be 1. Input images are RGB color # images of size 224 * 224. We can call the :any:`tvm.relay.expr.astext()` # to show the network structure. # # # + batch_size = 1 num_class = 1000 image_shape = (3, 224, 224) data_shape = (batch_size,) + image_shape out_shape = (batch_size, num_class) net, params = relay.testing.resnet.get_workload( num_layers=18, batch_size=batch_size, image_shape=image_shape) # set show_meta_data=True if you want to show meta data print(net.astext(show_meta_data=False)) # - # Compilation # ----------- # Next step is to compile the model using the Relay/TVM pipeline. # Users can specify the optimization level of the compilation. # Currently this value can be 0 to 3. The optimization passes include # operator fusion, pre-computation, layout transformation and so on. # # :any:`relay.build_module.build` returns three components: the execution graph in # json format, the TVM module library of compiled functions specifically # for this graph on the target hardware, and the parameter blobs of # the model. During the compilation, Relay does the graph-level # optimization while TVM does the tensor-level optimization, resulting # in an optimized runtime module for model serving. # # We'll first compile for Nvidia GPU. Behind the scene, `relay.build_module.build` # first does a number of graph-level optimizations, e.g. pruning, fusing, etc., # then registers the operators (i.e. the nodes of the optimized graphs) to # TVM implementations to generate a `tvm.module`. # To generate the module library, TVM will first transfer the high level IR # into the lower intrinsic IR of the specified target backend, which is CUDA # in this example. Then the machine code will be generated as the module library. # # opt_level = 3 target = tvm.target.cuda() with relay.build_config(opt_level=opt_level): graph, lib, params = relay.build_module.build( net, target, params=params) # Run the generate library # ------------------------ # Now we can create graph runtime and run the module on Nvidia GPU. # # # + # create random input ctx = tvm.gpu() data = np.random.uniform(-1, 1, size=data_shape).astype("float32") # create module module = graph_runtime.create(graph, lib, ctx) # set input and parameters module.set_input("data", data) module.set_input(**params) # run module.run() # get output out = module.get_output(0, tvm.nd.empty(out_shape)).asnumpy() # Print first 10 elements of output print(out.flatten()[0:10]) # - # Save and Load Compiled Module # ----------------------------- # We can also save the graph, lib and parameters into files and load them # back in deploy environment. # # # + # save the graph, lib and params into separate files from tvm.contrib import util temp = util.tempdir() path_lib = temp.relpath("deploy_lib.tar") lib.export_library(path_lib) with open(temp.relpath("deploy_graph.json"), "w") as fo: fo.write(graph) with open(temp.relpath("deploy_param.params"), "wb") as fo: fo.write(relay.save_param_dict(params)) print(temp.listdir()) # + # load the module back. loaded_json = open(temp.relpath("deploy_graph.json")).read() loaded_lib = tvm.module.load(path_lib) loaded_params = bytearray(open(temp.relpath("deploy_param.params"), "rb").read()) input_data = tvm.nd.array(np.random.uniform(size=data_shape).astype("float32")) module = graph_runtime.create(loaded_json, loaded_lib, ctx) module.load_params(loaded_params) module.run(data=input_data) out_deploy = module.get_output(0).asnumpy() # Print first 10 elements of output print(out_deploy.flatten()[0:10]) # check whether the output from deployed module is consistent with original one tvm.testing.assert_allclose(out_deploy, out, atol=1e-3)
relay_quick_start.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # **author**: <EMAIL><br> # **date**: 15 Jun 2017<br> # **language**: Python 3.5<br> # **license**: BSD3<br> # # ## effect_size_rda.ipynb # + import numpy as np import pandas as pd import seaborn as sns import statsmodels from statsmodels.sandbox.stats.multicomp import multipletests import matplotlib.pyplot as plt % matplotlib inline # - # check FDR threshold multipletests(np.repeat(0.002, 4), method = 'fdr_bh')[1] alpha = pd.DataFrame({'type': np.tile(['Independent', 'Cumulative'], 4), 'covariate': np.repeat(['study_id', 'empo_3', 'host_scientific_name', 'envo_biome_3'], 2), 'value_ES': [0.847, 0.847, 0.8, 0.024, 0.839, 0.0024, 0.817, 0.0007]}) alpha sns.set_style("ticks") fig, ax = plt.subplots() fig = sns.barplot(x='covariate', y='value_ES', hue='type', data=alpha) fig.set_xticklabels(ax.get_xticklabels(), rotation=45) fig.set(xlabel='RDA selected covariates', ylabel='effect size') sns.plt.legend(loc='center left',bbox_to_anchor=(1,0.5)) sns.despine() fig = fig.get_figure() fig.tight_layout() fig.savefig('alpha_ef.pdf') # + ######## beta_unweighted ########## # - # check FDR threshold multipletests(np.hstack([np.repeat(0.002, 4), 0.008]), method = 'fdr_bh')[1] beta_uw = pd.DataFrame({'type': np.tile(['Independent', 'Cumulative'], 5), 'covariate': np.repeat(['study_id', 'empo_3', 'host_scientific_name', 'envo_biome_3', 'longitude_deg'], 2), 'value_ES': [0.544, 0.544, 0.442, 0.151, 0.511, 0.003, 0.403, 0.001, 0.237, 0.0002]}) beta_uw sns.set_style("ticks") fig, ax = plt.subplots() fig = sns.barplot(x='covariate', y='value_ES', hue='type', data=beta_uw) fig.set_xticklabels(ax.get_xticklabels(), rotation=45) fig.set(xlabel='RDA selected covariates', ylabel='effect size') sns.plt.legend(loc='center left',bbox_to_anchor=(1,0.5)) sns.despine() fig = fig.get_figure() fig.tight_layout() fig.savefig('beta_uw_ef.pdf') # + ######## beta_weighted ########## # - # check FDR threshold multipletests(np.repeat(0.002, 5), method = 'fdr_bh')[1] beta_w = pd.DataFrame({'type': np.tile(['Independent', 'Cumulative'], 5), 'covariate': np.repeat(['study_id', 'empo_3', 'host_scientific_name', 'envo_biome_3', 'longitude_deg'], 2), 'value_ES': [0.427, 0.427, 0.401, 0.185, 0.371, 0.004, 0.143, 0.002, 0.272, 0.0004]}) beta_w sns.set_style("ticks") fig, ax = plt.subplots() fig = sns.barplot(x='covariate', y='value_ES', hue='type', data=beta_w) fig.set_xticklabels(ax.get_xticklabels(), rotation=45) fig.set(xlabel='RDA selected covariates', ylabel='effect size') sns.plt.legend(loc='center left',bbox_to_anchor=(1,0.5)) sns.despine() fig = fig.get_figure() fig.tight_layout() fig.savefig('beta_w_ef.pdf')
code/07-env-effects-corr/effect_size_rda.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # pip: [pip wiki](https://es.wikipedia.org/wiki/Pip_(administrador_de_paquetes)) # pypi: [pypi](https://pypi.org/) # # ---- # [pandas](https://pypi.org/project/pandas/) # # # ---- # [numpy](https://numpy.org/) # # --- # sintaxis # ``` # pip install nombre_paquete # pip install nombre_paquete==X.Y.Z # pip install --upgrade nombre_paquete # pip install -r requirements.txt # pip install git+https://github.com/repositorio/nombre_paquete # ``` # import pandas as pd pip install pandas pip install pandas==1.2.5 import numpy as np pip install numpy pip install --upgrade pip # [ejemplo de requirements.txt](https://github.com/Yelp/requirements-tools) # + # pip install pip==21.1.3 # - pip list pip freeze
notebooks/dia_01/007_InstalandoPip.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import openpyxl import pandas as pd import numpy as np import seaborn as sns import datetime import matplotlib.pyplot as plt # %matplotlib inline excel_document = openpyxl.load_workbook('files/MUERTOS.xlsx') sheets=excel_document.get_sheet_names() df5=pd.read_pickle('files/df5.pickle') df5 # - # # GRAFICOS <NAME> df5=df5.dropna(subset=['Hora Ocurrencia']) df5.index=list(range(len(df5['Hora Ocurrencia']))) df5 #df5['Hora Ocurrencia']=df5['Hora Ocurrencia'].map(lambda x: str(x)) df7=df5['Hora Ocurrencia'].map(lambda x: str(x)) for y in range(len(df7)): if type(df5['Hora Ocurrencia'][y])==datetime.datetime: df5['Hora Ocurrencia'][y]=df5['Hora Ocurrencia'][y].time() df5['Hora Ocurrencia'] def DAYNAME(s): return datetime.datetime.strptime(s, '%Y-%m-%d %H').strftime('%A') df5['Fecha y Hora Ocurrencia']=df5['Fecha Ocurrencia'].map(lambda x: str(x)[0:10]) +' ' + df5['Hora Ocurrencia'].map(lambda x: str(x)[0:2]) df5['Fecha y Hora Ocurrencia'] df6=pd.DataFrame({'Fecha y Hora Ocurrencia':pd.to_datetime(df5['Fecha y Hora Ocurrencia'],format='%Y%m%d %H')}) data=df6.groupby('Fecha y Hora Ocurrencia') df7=pd.DataFrame({'Fecha y Hora Ocurrencia': list(data.groups.keys()), 'Muertes': list(data['Fecha y Hora Ocurrencia'].value_counts()) }) byhour=df7.groupby([df7['Fecha y Hora Ocurrencia'].dt.year.rename('year'), df7['Fecha y Hora Ocurrencia'].dt.hour.rename('hour')]).sum().reset_index() bymonth=df7.groupby([df7['Fecha y Hora Ocurrencia'].dt.year.rename('year'), df7['Fecha y Hora Ocurrencia'].dt.month.rename('month')]).sum().reset_index() byyear=df7.groupby(df7['Fecha y Hora Ocurrencia'].dt.year.rename('year')).sum().reset_index() plt.figure(figsize=(16, 60)) for index in range(len(sheets)): plt.subplot(len(sheets)/2, 2, index+1) y=byhour.loc[byhour.groupby('year').groups[list(byyear['year'])[index]]] plt.plot(range(24), y['Muertes']) plt.xticks(range(24), ["00:00-01:00","01:00-02:00","02:00-03:00","03:00-04:00","04:00-05:00","05:00-06:00", "06:00-07:00","07:00-08:00","08:00-09:00","09:00-10:00","10:00-11:00","11:00-12:00", "12:00-13:00","13:00-14:00","14:00-15:00","15:00-16:00","16:00-17:00","17:00-18:00", "18:00-19:00","19:00-20:00","20:00-21:00","21:00-22:00","22:00-23:00","23:00-24:00"], rotation='vertical') plt.title("Muertes por Hora del Dia/Anual"+"-"+sheets[index]); plt.savefig("Muertes_Franja_Horaria.png")
TRABAJO ANALITICA/GRAFICOS POR FRANJA HORARIA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=true editable=true # # Project Idea # # Thinking about what to do for a personal project has been on my mind # since I published my mobile games. Those projects were my gateway into # the coding world and really fed my passion leading to my eventual career # change. The freedom and ability to create anything you want on your # personal projects is something I absolutely love and it makes a really # rewarding hobby. Since quitting my PhD I've been aiming to become a # mainstream developer and have spent most of my time reading and # studying. This has included: # # * Udacity's Machine Learning Course # * Cracking the Coding Interview # * Linux Command Lines # * Test Driven Web Development with Python # * Test Driven Development Django Development * Fluent Python # # Now that I feel I'm competent enough to build a Django Application as # well as perform TDD for my job, I believe it's time to build something # new, that pushes my knowledge and solidifies what I've learnt from # my new found knowledge. This project will have to have several key # components. # # 1. **Python** - With modules spanning data science to web development # it's a no brainer that my next project will utilize Python. # Furthermore, it's simple syntax and my familiarity with it is # something that will decrease the cognitive overhead allowing me to # just worry about the project. # # 2. **Machine Learning** - Since taking Udacity's Machine Learning course # I've been really interested in utilizing this as a tool to create a # new application. This works nicely with Python which provides Pandas, # Jupyter and SciKit Learn for data analysis. # # 3. **Django** - Powerful for web server applications I really think that # building a full Django project from scratch will help build my # experience as a developer. Finally, I'd like to have my app be # deployed onto Android, pushing me to learn how to integrate the # Django Rest Framework's API calls with my Android application. # # ## Ideas # # Over the past several months I have been thinking of different ideas # which can be feasible for this project. This includes the following: # # * **Flight prices including price of transportation to and from the # airport:** This is a cool idea I thought of while travelling. A few # times I've caught myself travelling to farther airports, spending # extra money on the commute to and from the airport. This application # would search for the flight cost plus the commute cost. # # * **Short story writer:** This would be a really cool project going into # natrual language processing. You would input an idea and the project # would spit out a short story based on what you put in. # # * **Photobomb destroyer:** There was a really cool paper that came out # recently which involved the resizing of images based on the entropy # of pixels in the immage. By highlighting certain regions you could # completely edit out a photobomber. In addition this would utilize # face detection and machine learning to decide which face is the # likely photobomber. # # * **Makeup tester:** Great project for face detection and overlaying # features on the person's face. Could be used for advertising different # makeup brands? # # * **Carpool Service:** There hasn't been a disruptive technology that # has taken over craigslist and kijiji for ridesharing around Toronto. # This would be the "Uber for long distance rides." # # * **Twitter and Reddit Feed Summarizer:** Writes short stories based on # trending twitter feeds and reddit posts. Story formats are trained # based on previous stories of similar subjects. # # ## Ranking Ideas # # To try to sort through the ideas I'll simply rank each one on what I am # seeking to learn from the project (as seen above) as well as some other # core, incredibly scientific, components. # # | Application | Django | Machine Learning | Novelty | Application | Total | # | ------------------- | ------ | ---------------- | ------- | ----------- | ----- | # | Easy Flight | 1 | 0 | 1 | 1 | 3 | # | Story Writer | 1 | 1 | 1 | 0 | 3 | # | Photobomb Destroyer | 1 | 1 | 1 | 0 | 3 | # | Makeup Tester | 0 | 1 | 0 | 0 | 1 | # | Carpool Service | 1 | 0 | 0 | 1 | 2 | # | Feed Summarizer | 1 | 1 | 1 | 1 | 4 | # # From this ranking there seems to be a lot of ideas that are close. # Makeup tester is a clear loss. It was an idea as it would've been an # excellent way to learn face tracking, however, it would not integrate # Django and would likely be a mobile application. In addition in looking # for any replicas it seems to have been already created. Though the # carpool service is something that I leaves a lot to be desired in the # Toronto area, it does not touch on any machine learning which is one # of the main foci of this project. This is the same case for easy flight. # Story writer and photobomb destroyer are great ideas which cover my # goals of incorporating django, machine learning and python, however, # these really don't seem to make any serious impact on any markets. # Finally feed summarizer, which is a really big project, knocks it out # of the park (with my totally subjective marking) and takes the win. # # ## Closing Thoughts # # In writing this I feel I have given each idea a fair shot and gave each # considerable consideration. Now having my thoughts organized I will have # more confidence in following through with my new project. Time to start # researching feed summarizers and start building a very specific project # plan! #
docs/jupyter-notebook/1. Brainstorming a Project.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/tallywiesenberg/DS-Unit-2-Applied-Modeling/blob/master/DS_Sprint_Challenge_7.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="Kz9V5lVFMo86" colab_type="text" # _Lambda School Data Science, Unit 2_ # # # Applied Modeling Sprint Challenge: Predict Chicago food inspections 🍔 # + [markdown] id="yWOjtM9iMo87" colab_type="text" # For this Sprint Challenge, you'll use a dataset with information from inspections of restaurants and other food establishments in Chicago from January 2010 to March 2019. # # [See this PDF](https://data.cityofchicago.org/api/assets/BAD5301B-681A-4202-9D25-51B2CAE672FF) for descriptions of the data elements included in this dataset. # # According to [Chicago Department of Public Health — Food Protection Services](https://www.chicago.gov/city/en/depts/cdph/provdrs/healthy_restaurants/svcs/food-protection-services.html), "Chicago is home to 16,000 food establishments like restaurants, grocery stores, bakeries, wholesalers, lunchrooms, mobile food vendors and more. Our business is food safety and sanitation with one goal, to prevent the spread of food-borne disease. We do this by inspecting food businesses, responding to complaints and food recalls." # + [markdown] id="VvBYdx2xMo88" colab_type="text" # #### Your challenge: Predict whether inspections failed # # The target is the `Fail` column. # # - When the food establishment failed the inspection, the target is `1`. # - When the establishment passed, the target is `0`. # + [markdown] id="3YMtu4LaF8Jq" colab_type="text" # #### Run this cell to install packages in Colab: # + id="vWyiJKQgF6ax" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="8162c979-6eed-4116-d9a0-8f70cd81fdcb" import sys if 'google.colab' in sys.modules: # Install packages in Colab # !pip install category_encoders==2.* # !pip install eli5 # !pip install pandas-profiling==2.* # !pip install pdpbox # !pip install shap # + [markdown] id="Na5IvRq1Mo89" colab_type="text" # #### Run this cell to load the data: # + id="Yyc7zftTMo89" colab_type="code" colab={} import pandas as pd train_url = 'https://drive.google.com/uc?export=download&id=13_tP9JpLcZHSPVpWcua4t2rY44K_s4H5' test_url = 'https://drive.google.com/uc?export=download&id=1GkDHjsiGrzOXoF_xcYjdzBTSjOIi3g5a' train = pd.read_csv(train_url) test = pd.read_csv(test_url) assert train.shape == (51916, 17) assert test.shape == (17306, 17) # + [markdown] id="8EhbzqgfMo9A" colab_type="text" # ### Part 1: Preprocessing # # You may choose which features you want to use, and whether/how you will preprocess them. If you use categorical features, you may use any tools and techniques for encoding. # # _To earn a score of 3 for this part, find and explain leakage. The dataset has a feature that will give you an ROC AUC score > 0.90 if you process and use the feature. Find the leakage and explain why the feature shouldn't be used in a real-world model to predict the results of future inspections._ # # ### Part 2: Modeling # # **Fit a model** with the train set. (You may use scikit-learn, xgboost, or any other library.) Use cross-validation or do a three-way split (train/validate/test) and **estimate your ROC AUC** validation score. # # Use your model to **predict probabilities** for the test set. **Get an ROC AUC test score >= 0.60.** # # _To earn a score of 3 for this part, get an ROC AUC test score >= 0.70 (without using the feature with leakage)._ # # # ### Part 3: Visualization # # Make visualizations for model interpretation. (You may use any libraries.) Choose two of these types: # # - Permutation Importances # - Partial Dependence Plot, 1 feature isolation # - Partial Dependence Plot, 2 features interaction # - Shapley Values # # _To earn a score of 3 for this part, make all four of these visualization types._ # + [markdown] id="muEIQ4EPGVH_" colab_type="text" # ## Part 1: Preprocessing # # > You may choose which features you want to use, and whether/how you will preprocess them. If you use categorical features, you may use any tools and techniques for encoding. # + id="Ao1PDJtpIWwp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 278} outputId="476fb034-700d-4818-b0ed-7a6dbc94ba63" train.head(2) # + id="pwpROO8Lya17" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 218} outputId="2877ddd3-4f73-46b2-c7fe-7a8e6efbc654" train.select_dtypes('object').nunique() > 50 #examining categorical features with more than 50 unique values # + id="LSwwkHCsyTTE" colab_type="code" colab={} #columns with high cardinality to drop columns_drop = ['DBA Name', 'AKA Name','Address', 'Inspection Date', 'Inspection Type', 'Violations', 'Location', 'Risk', 'Fail', #risk and fail (target) shouldn't leak into features 'Inspection ID', 'License #'] #IDs aren't useful # + id="irPOv0C-4D9D" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 202} outputId="f570a10c-c2b4-4779-d6e3-989cae9688ec" columns_drop # + id="HwekcPEc16tW" colab_type="code" colab={} features = train.columns[~train.columns.isin(columns_drop)] #list of features to use for prediction # + [markdown] id="nB8CQnExGqzr" colab_type="text" # ## Part 2: Modeling # # > **Fit a model** with the train set. (You may use scikit-learn, xgboost, or any other library.) Use cross-validation or do a three-way split (train/validate/test) and **estimate your ROC AUC** validation score. # > # > Use your model to **predict probabilities** for the test set. **Get an ROC AUC test score >= 0.60.** # + id="YQ3hD2G1IbVH" colab_type="code" colab={} #train test split from sklearn.model_selection import train_test_split train, val = train_test_split(train, train_size = 0.8, test_size=0.2, stratify = train['Fail'], random_state=42) # + id="LDM93jFi7DAl" colab_type="code" colab={} X_train = train[features] X_val = val[features] X_test = test[features] y_train = train['Fail'] y_val = val['Fail'] y_test = test['Fail'] # + id="RENNaZ2O747f" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="1f8ff783-bebe-4ab9-d657-a2d67f21f301" from sklearn.pipeline import make_pipeline from sklearn.ensemble import RandomForestClassifier from category_encoders import OrdinalEncoder from sklearn.impute import SimpleImputer from sklearn.preprocessing import StandardScaler #make pipeline pipeline = make_pipeline(OrdinalEncoder(), SimpleImputer(), StandardScaler(), RandomForestClassifier(n_estimators = 500, n_jobs=-1, random_state=42)) #fit pipeline pipeline.fit(X_train, y_train) y_pred_proba = pipeline.predict_proba(X_val)[:, 1] from sklearn.metrics import roc_auc_score roc_auc_score(y_val, y_pred_proba) # + id="F4a2D_YM_JBs" colab_type="code" colab={} ##RANDOM SEARCH from sklearn.model_selection import RandomizedSearchCV #select hyperparameters hyperparameters = {'simpleimputer__strategy': ['mean', 'median'], 'randomforestclassifier__max_depth': range(0, 50, 2), 'randomforestclassifier__min_samples_split': range(0, 500, 5), 'randomforestclassifier__min_samples_leaf': range(0, 500, 5)} #apply search search = RandomizedSearchCV(pipeline, hyperparameters, random_state = 42, n_iter = 20, cv = 5) #fit search to trian set best_model = search.fit(X_train, y_train) # + id="z44PwhfwDQbP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 84} outputId="82a3c378-7332-45e7-d593-74984fe1bf1b" y_pred_proba = best_model.predict_proba(X_val)[:, 1] roc_auc_score(y_val, y_pred_proba) best_model.best_params_ # + [markdown] id="u98nLGBTMo9s" colab_type="text" # ## Part 3: Visualization # # > Make visualizations for model interpretation. (You may use any libraries.) Choose two of these types: # > # > - Permutation Importances # > - Partial Dependence Plot, 1 feature isolation # > - Partial Dependence Plot, 2 features interaction # > - Shapley Values # + id="_Q379i5CIeKY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 168} outputId="34632c61-40a0-4aad-b6f9-d6d762cddb4b" ##PERMUTATION IMPORTANCE W/ ELI5 #pipeline without model (to use eli5) small_pipeline = make_pipeline(OrdinalEncoder(), SimpleImputer(), StandardScaler()) #transform X_train and X_test X_train_transformed = small_pipeline.fit_transform(X_train) X_val_transformed = small_pipeline.transform(X_val) #isolated random forest model = RandomForestClassifier(max_depth=12, min_samples_leaf=45, min_samples_split=395) model.fit(X_train_transformed, y_train) # + id="Ob-98EG4JaG5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 168} outputId="1720e3df-f689-4a64-fd6c-72e692e1e0fb" from eli5.sklearn import PermutationImportance import eli5 #instantiate permuter permuter = PermutationImportance(model, scoring = 'accuracy', n_iter = 5, random_state=42) #fit permuter to validation set permuter.fit(X_val_transformed, y_val) features = X_val.columns.tolist() #show weights eli5.show_weights(permuter, top=None, feature_names = features) # + id="0PuG0BHQSK6o" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="bd474867-bd2b-43d6-a21a-1df2004f5791" X_val.columns # + id="-13iqtpWMWhW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 370} outputId="b935f803-a30e-41c2-8fb4-fbebb8c6623e" ## PARTIAL DEPENDENCE PLOTS from pdpbox.pdp import pdp_isolate, pdp_plot feature = 'Facility Type' isolated = pdp_isolate( model = model, dataset = X_val, model_features = X_val.columns, feature =feature) pdp_plot(isolated, feature_name=feature) # + [markdown] id="PHJdz2SRzKqa" colab_type="text" # #Leak # + [markdown] id="Ve4aE4PEzNe8" colab_type="text" # I believe the column that would hurt the model in the real world is the "Risk" column. The problem with this column is that it implies that the inspectors have prior insight into the cleanliness of the restaurant, while the purpose of the model is to predict the cleanliness of the restaurant (whether they pass the test or not). I had a similar problem in my Unit 2 Build project. I was building a model to test whether car accidents would result in major injury or fatality (Y/N), and a series of attributes counting the number of fatalities/major injuries/minor injuries per pedestrian/cyclist/driver leaked into my model, giving me a ver accruate model that was useless because the most important feature could not be retrieved prior to real world accidents. # + id="VKIoCQttzMfr" colab_type="code" colab={}
DS_Sprint_Challenge_7.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Dependencies from bs4 import BeautifulSoup as bs import requests from splinter import Browser import os #Set up for chromedriver.exe executable_path = {"executable_path": '/Users/Serj/Documents/Chromedriver/chromedriver.exe'} browser = Browser('chrome', **executable_path, headless=False) # + #This Section will scrape the mars news website and collect the latest news title and paragraph text. # - #Establish a connection for the nasa website url_news = 'https://mars.nasa.gov/news/' browser.visit(url_news) #This will parse the results with BF html_news = browser.html soup_news = bs(html_news,'lxml') #Check the output data print (soup_news.prettify()) # + slide_news = soup_news.select_one("ul.item_list li.slide") slide_news.find("div", class_="content_title") # - #This will scrape the latest news title and then print it out title = slide_news.find("div", class_="content_title").get_text() print(title) #This will extract the teaser paragraph from the previous article paragraph = slide_news.find("div",class_ = "article_teaser_body").get_text() print(paragraph) browser.quit() # + #This Section will scrape Mars JPL space featured image. # - #Set up for chromedriver.exe executable_path = {"executable_path": '/Users/Serj/Documents/Chromedriver/chromedriver.exe'} browser = Browser('chrome', **executable_path, headless=False) # + #Establish a connection for the JPL website url_image = 'https://spaceimages-mars.com/' browser.visit(url_image) # - #This will parse the results with BF html_image = browser.html soup_image = bs(html_image,'lxml') #Check the output data print (soup_image.prettify()) #Click the featured image "Full Image" button browser.links.find_by_partial_text('FULL IMAGE').click() html_image = browser.html soup_image = bs(html_image,'lxml') print (soup_image.prettify()) relative_img_url = soup_image.find_all('img')[1]['src'] relative_img_url #Full image path full_image_path = url_image + relative_img_url full_image_path browser.quit() # + #This section will scrape mars facts using Pandas # - import pandas as pd mars_facts_df = pd.read_html("https://galaxyfacts-mars.com/")[1] print(mars_facts_df) mars_facts_df.columns=["Data Type", "Value"] mars_facts_df.set_index("Data Type", inplace=True) mars_facts_df # + #This section will scrape the astrology website to obtain high-rez images of each of Mars' hemispheres. # - executable_path = {"executable_path": '/Users/Serj/Documents/Chromedriver/chromedriver.exe'} browser = Browser('chrome', **executable_path, headless=False) url_image = 'https://marshemispheres.com/' browser.visit(url_image) html_images = browser.html soup_images = bs(html_images,'lxml') print (soup_images.prettify()) #Test to see how to pull an image url browser.find_by_css('a.product-item h3')[0].click() element = browser.find_link_by_text('Sample').first print(element['href']) # + #Empty list hem_list = [] #Loop to parse the page for i in range(4): #Empty dictionary hem_dict = {} #Click on url browser.find_by_css('a.product-item h3')[i].click() #Find the full image link element = browser.find_link_by_text('Sample').first img_url = element['href'] #Find the title title = browser.find_by_css("h2.title").text hem_dict["Title"] = title hem_dict["Img_Url"] = img_url #Append the dictionary hem_list.append(hem_dict) browser.back() # - print (f'hemisphere_image_urls = {hem_list}') browser.quit()
mission_to_mars.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from typing import * import pandas as pd import libcst as cst import re import networkx as nx example_code = open('example.py', 'r').read() print(example_code) example_code_cst = cst.parse_module(example_code) # example_code_cst class_def = example_code_cst.body[0] # example_code_cst.body[0] = class_def.with_changes(whitespace_before_colon=cst.SimpleWhitespace(value=' ')) class_nodes_iter = class_def.body.body # class_nodes_iter class_node = class_nodes_iter[4] class_node method_nodes_iter = class_node.body.body method_nodes_iter method_node = method_nodes_iter[0] method_node method_node.body class_def.body.body[2].body.body[0].body[0].value.func.attr hasattr(class_def, 'body') class_def.body.header, class_def.body.footer, class_def.body.indent print(example_code_cst.code) example_code_cst.body[0].body def sort_functions(functions_list: list): df = pd.DataFrame([(b.name.value, b) for b in functions_list], columns=['func_name', 'body']) df['is_magic'] = df['func_name'].str.startswith('__') & df['func_name'].str.endswith('__') df['is_public'] = ~df['func_name'].apply(lambda x: re.search('^_[a-zA-Z]', x) is not None) df_2 = df.sort_values(['is_magic', 'is_public'], ascending=False) sorted_functions_list = df_2['body'].tolist() return sorted_functions_list class TypingTransformer(cst.CSTTransformer): def visit_ClassDef(self, node: cst.IndentedBlock) -> Optional[bool]: pass def leave_ClassDef(self, original_node: cst.ClassDef, updated_node: cst.ClassDef) -> cst.CSTNode: if isinstance(original_node, cst.ClassDef): print('changing') new_body = updated_node.body.with_changes(body=sort_functions(updated_node.body.body)) updated_node_2 = updated_node.with_changes(body=new_body) return updated_node_2 return updated_node transformer = TypingTransformer() modified_tree = example_code_cst.visit(transformer) print(example_code_cst.code) print(modified_tree.code) example_code_cst.body[0].body re.search('^__.*__$', '__sdf__') class CallGraphCollector(cst.CSTVisitor): def __init__(self): self._used_methods_list : List[str] = [] def visit_Attribute(self, node: cst.Attribute): if node.value.value == 'self': self._used_methods_list.append(node.attr.value) print(node.attr.value) def get_used_methods_list(self): return self._used_methods_list collector = CallGraphCollector() l = class_node.visit(collector) collector.get_used_methods_list() call_graph = [] for node in class_nodes_iter: if isinstance(node, cst.FunctionDef): collector = CallGraphCollector() node.visit(collector) used_methods_list = collector.get_used_methods_list() call_graph.append((node.name.value, node, used_methods_list)) # call_graph a = pd.DataFrame(call_graph, columns=['func_name', 'node', 'used_objects']) a b = a.func_name.tolist() b a['used_functions'] = a['used_objects'].apply(lambda x: list(set(x).intersection(b))) a nx.__version__ G nx.draw_networkx(G) max(nx.all_simple_paths(G, ROOT, 'b'), key=lambda x: len(x)) a['distance'] = a['func_name'].apply(lambda y: len(max(nx.all_simple_paths(G, ROOT, y), key=lambda x: len(x))) - 1) a class CallGraphCollector(cst.CSTVisitor): def __init__(self): self._used_methods_list : List[str] = [] def visit_Attribute(self, node: cst.Attribute): if node.value.value == 'self': self._used_methods_list.append(node.attr.value) print(node.attr.value) def get_used_methods_list(self): return self._used_methods_list # + collector = CallGraphCollector() l = class_node.visit(collector) call_graph = [] for node in class_nodes_iter: if isinstance(node, cst.FunctionDef): collector = CallGraphCollector() node.visit(collector) used_methods_list = collector.get_used_methods_list() call_graph.append((node.name.value, node, used_methods_list)) a = pd.DataFrame(call_graph, columns=['func_name', 'node', 'used_objects']) a['used_functions'] = a['used_objects'].apply(lambda x: list(set(x).intersection(b))) G = nx.DiGraph() ROOT = 'root' G.add_node(ROOT) for index, row in a.iterrows(): print(row['func_name']) G.add_node(row['func_name']) for index, row in a.iterrows(): G.add_edge(ROOT, row['func_name']) for called_func in row['used_functions']: G.add_edge(row['func_name'], called_func) a['distance'] = a['func_name'].apply(lambda y: len(max(nx.all_simple_paths(G, ROOT, y), key=lambda x: len(x))) - 1) # - a
play.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: ddc_env (python_3.6.7) # language: python # name: ddc_env # --- # + # %load_ext autoreload # %autoreload 2 import numpy as np import rdkit from rdkit import Chem import h5py, ast, pickle # Occupy a GPU for the model to be loaded # %env CUDA_DEVICE_ORDER=PCI_BUS_ID # GPU ID, if occupied change to an available GPU ID listed under !nvidia-smi # %env CUDA_VISIBLE_DEVICES=2 from ddc_pub import ddc_v3 as ddc # - def get_descriptors(smiles_list, qsar_model=None, show_actives=False, active_thresh=0.5, qed_thresh=0.5): """Calculate molecular descriptors of SMILES in a list. The descriptors are logp, tpsa, mw, qed, hba, hbd and probability of being active towards DRD2. Returns: A np.ndarray of descriptors. """ from tqdm import tqdm_notebook as tqdm import rdkit from rdkit import Chem, DataStructs from rdkit.Chem import Descriptors, rdMolDescriptors, AllChem, QED descriptors = [] active_mols = [] for idx, smiles in enumerate(smiles_list): # Convert to mol mol = Chem.MolFromSmiles(smiles) # If valid, calculate its properties if mol: try: logp = Descriptors.MolLogP(mol) tpsa = Descriptors.TPSA(mol) molwt = Descriptors.ExactMolWt(mol) hba = rdMolDescriptors.CalcNumHBA(mol) hbd = rdMolDescriptors.CalcNumHBD(mol) qed = QED.qed(mol) # Calculate fingerprints fp = AllChem.GetMorganFingerprintAsBitVect(mol,2, nBits=2048) ecfp4 = np.zeros((2048,)) DataStructs.ConvertToNumpyArray(fp, ecfp4) # Predict activity and pick only the second component active = qsar_model.predict_proba([ecfp4])[0][1] descriptors.append([logp, tpsa, molwt, qed, hba, hbd, active]) if active > active_thresh and qed > qed_thresh: if show_actives: active_mols.append(mol) print("active_proba: %.2f, QED: %.2f." % (active, qed)) display(mol) pass except Exception as e: # Sanitization error: Explicit valence for atom # 17 N, 4, is greater than permitted print(e) # Else, return None else: print("Invalid generation.") return np.asarray(descriptors) # # Load QSAR model qsar_model_name = "models/qsar_model.pickle" with open(qsar_model_name, "rb") as file: qsar_model = pickle.load(file)["classifier_sv"] # # Load PCB cRNN # Import existing (trained) model # Ignore any warning(s) about training configuration or non-seriazable keyword arguments model_name = "models/pcb_model" model = ddc.DDC(model_name=model_name) # # Select conditions for generated molecules # + # Custom conditions logp = 3.5 tpsa = 70.0 mw = 350.0 qed = 0.8 hba = 4.0 hbd = 1.0 drd2_active_proba = 0.9 target = np.array([logp, tpsa, mw, qed, hba, hbd, drd2_active_proba]) # + # Convert back to SMILES smiles_out, _ = model.predict(latent=target, temp=0) # Change temp to 1 for more funky results # Calculate the properties of the generated structure and compare get_descriptors(smiles_list=[smiles_out], qsar_model=qsar_model, show_actives=True)
demo_cRNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Environment (conda_tensorflow2_p36) # language: python # name: conda_tensorflow2_p36 # --- # + import pandas as pd import numpy as np np.set_printoptions(precision=6, suppress=True) from sklearn.model_selection import train_test_split from tqdm import tqdm import tensorflow as tf from tensorflow.keras import * import tensorflow_addons as tfa tf.__version__ # - import matplotlib import matplotlib.pyplot as plt import matplotlib.dates as mdates from matplotlib.ticker import (LinearLocator, MultipleLocator, FormatStrFormatter) from matplotlib.dates import MONDAY from matplotlib.dates import MonthLocator, WeekdayLocator, DateFormatter from matplotlib import gridspec from pandas.plotting import register_matplotlib_converters register_matplotlib_converters() # %matplotlib inline # + plt.rcParams['figure.figsize'] = ((8/2.54), (6/2.54)) plt.rcParams["font.family"] = "Arial" plt.rcParams["mathtext.default"] = "rm" plt.rcParams.update({'font.size': 11}) MARKER_SIZE = 15 cmap_m = ["#f4a6ad", "#f6957e", "#fccfa2", "#8de7be", "#86d6f2", "#24a9e4", "#b586e0", "#d7f293"] cmap = ["#e94d5b", "#ef4d28", "#f9a54f", "#25b575", "#1bb1e7", "#1477a2", "#a662e5", "#c2f442"] plt.rcParams['axes.spines.top'] = False # plt.rcParams['axes.edgecolor'] = plt.rcParams['axes.linewidth'] = 1 plt.rcParams['lines.linewidth'] = 1.5 plt.rcParams['xtick.major.width'] = 1 plt.rcParams['xtick.minor.width'] = 1 plt.rcParams['ytick.major.width'] = 1 plt.rcParams['ytick.minor.width'] = 1 # - tf.config.list_physical_devices('GPU') gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: try: for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs") except RuntimeError as e: print(e) strategy = tf.distribute.MirroredStrategy() # # Hyperparameters BEST_PATH = './models/ffnn_raw.h5' TRAINING_EPOCHS = 200 LEARNING_RATE = 0.001 EPSILON = 1e-06 BATCH_SIZE = 4 # # Data loading l = np.load('./results/2020_W/fw_ct_dataset.npz', allow_pickle=True) data_indices = l['data_indices'] input_data = l['input_data'] output_label = l['output_label'] INPUT_MAXS = l['INPUT_MAXS'] INPUT_MINS = l['INPUT_MINS'] OUTPUT_MAX = l['OUTPUT_MAX'] OUTPUT_MIN = l['OUTPUT_MIN'] input_data = input_data.astype('float32') output_label = output_label.astype('float32') input_data = input_data.reshape(input_data.shape[0], -1) print(input_data.shape) print(output_label.shape) print(INPUT_MAXS) print(INPUT_MINS) print(OUTPUT_MAX) print(OUTPUT_MIN) N_TRAIN = int(input_data.shape[0]*.09) N_DEV = int(input_data.shape[0]/3) TRAIN_INDEX = [_ for _ in range(N_TRAIN)] + \ [_ for _ in range(N_DEV, N_DEV+N_TRAIN)] + \ [_ for _ in range(N_DEV*2, N_DEV*2+N_TRAIN)] TEST_INDEX = [_ for _ in range(input_data.shape[0]) if _ not in TRAIN_INDEX] train_input = input_data[TRAIN_INDEX, ...] train_label = output_label[TRAIN_INDEX, ...] train_indices = data_indices[TRAIN_INDEX] test_input = input_data[TEST_INDEX, ...] test_label = output_label[TEST_INDEX, ...] test_indices = data_indices[TEST_INDEX] train_indices, val_indices, train_input, val_input, train_label, val_label = train_test_split(train_indices, train_input, train_label, test_size=0.3, shuffle=True, random_state=3101) print(f'number of data set: {input_data.shape[0]}') print(f'number of training set: {train_input.shape[0]}') print(f'number of validation set: {val_input.shape[0]}') print(f'number of test set: {test_input.shape[0]}') with strategy.scope(): train_dataset = tf.data.Dataset.from_tensor_slices((train_input, train_label)) train_dataset = train_dataset.cache().shuffle(BATCH_SIZE*10).batch(BATCH_SIZE, drop_remainder=False) val_dataset = tf.data.Dataset.from_tensor_slices((val_input, val_label)) val_dataset = val_dataset.cache().shuffle(BATCH_SIZE*10).batch(BATCH_SIZE, drop_remainder=False) # # Model construction with strategy.scope(): model = models.Sequential([ layers.Dense(512, activation=tf.nn.sigmoid), layers.BatchNormalization(), layers.Dense(512, activation=tf.nn.sigmoid), layers.BatchNormalization(), layers.Dense(32), layers.Dense(1) ]) # + cbs = callbacks.ReduceLROnPlateau( monitor='val_loss', factor=.1, patience=5, verbose=0, mode='auto', min_delta=0.0001, cooldown=0, min_lr=0) save = callbacks.ModelCheckpoint( BEST_PATH, monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=True, mode='min', save_freq='epoch') early_stop = callbacks.EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=20) # - with strategy.scope(): opt = optimizers.Adam(learning_rate=LEARNING_RATE, epsilon=EPSILON) model.compile(optimizer=opt, loss='mae') model.fit(train_dataset, epochs=TRAINING_EPOCHS, validation_data=val_dataset, verbose=1, callbacks=[cbs, save, early_stop]) model.load_weights(BEST_PATH) model.evaluate(val_dataset) pred_output = model.predict(test_input) pred_output = pred_output*(OUTPUT_MAX - OUTPUT_MIN) + OUTPUT_MIN test_label = test_label*(OUTPUT_MAX - OUTPUT_MIN) + OUTPUT_MIN # + fig = plt.figure(figsize=((8.5/2.54*2), (6/2.54*2))) ax0 = plt.subplot() ax0.spines['right'].set_visible(False) ax0.spines['left'].set_position(('outward', 5)) ax0.spines['bottom'].set_position(('outward', 5)) ax0.plot(test_label, pred_output, 'o', ms=5, mec='k', c=cmap[0]) fig.tight_layout() # - pred_df = pd.DataFrame(test_label, index=test_indices[:, 0], columns=['label']) pred_df['pred'] = pred_output # + fig = plt.figure(figsize=((8.5/2.54*2), (6/2.54*2))) ax0 = plt.subplot() ax0.spines['right'].set_visible(False) ax0.spines['left'].set_position(('outward', 5)) ax0.spines['bottom'].set_position(('outward', 5)) ax0.plot(pred_df.index, pred_df['label'], '-o', ms=5, mec='k', c=cmap[4]) ax0.plot(pred_df.index, pred_df['pred'], 'o', ms=5, mec='k', c=cmap[0]) fig.tight_layout() # - pred_df.to_csv('./results/model_output/ffnn_raw.csv')
5-2_FFNN_raw.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Displaying multiple images # In this activity, we will plot images in a grid. # + # Import statements import os import numpy as np import matplotlib.pyplot as plt import matplotlib.image as mpimg # %matplotlib inline # - # Load all four images from the subfolder data. # Load images # Visualize the images in a 2x2 grid. Remove the axes and give each image a label. # + # Create subplot # Specify labels # Plot images # -
Lesson03/Activity19/activity19.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Bayesian Thinking: AB Testing & Evaluation (ROPE, Effect Size and Lift) # # * In this notebook, we are going to see how <b>Bayesian framework can help provide improvements to traditional Frequentist AB Testing.</b> As we saw in a [previous post about Frequentist AB Testing](https://github.com/AdiVarma27/AdiVarma27.github.io/blob/master/_jupyter/2020-04-10-Hypothesis%20Testing%2C%20Click-Through-Rate%20for%20Banner%20Ads%20(AB%20Testing).ipynb), we conducted an experiment to understand which version of Banner Ad attracts customers to click on them, so that we can decide which version to finally publish on to the website. # # # * To do so, we looked at how we can choose a <b>sample size in advance based on the number of daily users, Type-I and Type-II Error rates.</b> We observed that <b>version-B had a statistically significant higher CTR (point estimate)</b>, when compared to version-a Banner Ad. # # ## Frequentist vs Bayesian Framework # # * In our previous frequentist approach, we send every new customer randomly to either the control or treatment groups/ version of the banner Ads. To validate our hypothesis that the new banner might work better. We calculate the Click-Through-Rates for both versions (Successful Clicks/ Total Impressions) per banner, and a conduct One Tailed, Two sample proportion test, and come up with a <b>corresponding Z-stat and p-value.</b> In this framework, we assume there is some ground truth, and by sampling more data, <b>It assumes that we can tend to the ground truth by enough sampling.</b> # # # * <b>Bayesian framework does not assume point estimates (no single value of the ground truth), but provides a distribution or range, where the ground truth could lie in, with some confidence.</b> It assumes that we have some prior (Weak/ Strong), and as we keep sampling more data, our posterior probability tends to the <b>underlying distribution of the ground truth.</b> # # # ## Wise vs Rigid Bayesian Priors # # * In Bayesian way of thinking, there is some <b>prior we know works well</b>. Without any data, this is our <b>best guess from previous experiences/ Business expertise.</b> For Banner Ads in e-commerce space, the main banner which shows up beside the website heading could have a Click-through-rate of 10 % (Note that we need not be super confident about it). # # # * Let us say we own an online book-store and have thousands of customers who visit us everyday. We are <b><i>'wiser'</i></b> in some sense due to the experience we gain from our customers, and we know with high confidence that CTR is around 20 % for online bookstore Banner Ad. In this case, our priors can be stronger, and Hence <b><i>'tougher'</i></b> in some sense to change our belief system. # # # * Having extremely strong priors, defeats the purpouse of experimentation (where we are open to change our belief and account both experience + exploration), and is no more <b><i>'wise'</i></b>, and starts to tend towards being<b><i>'rigid'</i></b>. # # ## Beta Distribution and Priors # # * Let us see how Beta Distribution (Prior of Bernoulli Distribution), can help form excellent priors by tweaking its alpha and beta parameters. the parameter alpha can be modelled as number of successes and beta can be modelled as number of faliures. # importing necessary libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from scipy.stats import beta plt.figure(figsize=(18,6)) x = np.arange(0,1,0.001); y0 = beta.pdf(x, 2, 2); y1 = beta.pdf(x, 10, 10); y2 = beta.pdf(x, 20, 80) plt.plot(x, y0, label='a=2, b=2',c='r'); plt.plot(x, y1, label='a=10, b=10',c='b') plt.plot(x, y2, label='a=20, b=80',c='g') plt.grid() plt.legend() # * Observe that the <b>red distribution has weak priors</b>, Hence gives low confidence for a larger range of values. The blue distribution with a=10 and b=10, even though has the <b>same ratio as red distribution (a=2, b=2), is slightly less deviated from the mean (a/(a+b)), and is more confident.</b> The distribution in green has high confidence around the mean, with mean value of (0.2). Hence, the <b>red and blue distributions are weak priors, whereas the green distribution is 'wise'.</b> # # # * The green distribution has <b>alpha=20 and beta=80; and is modelled directly as 20 successful clicks and 80 unsuccessful clicks, with mean CTR of (20/(20+80)) = 20 %</b> Let us stick to this prior for now, and see how the posterior looks like. # ## Conducting Experiment # # * Let us say we conducted the expriment and let it run for two days, after end of two days, here are the clicks and impressions for both versions. We observe that the CTR for version-a and version-b are 19.6 % and 26.5 % respectively. We also conduct a One tailed two sample proportion test, and observe t-value > 1.645. # data from experiment df = pd.DataFrame() df['clicks_success'] = [192, 270] df['impressions'] = [980, 1020] df.index= ['version_a', 'version_b'] df['ctr'] = df['clicks_success']/df['impressions'] df['clicks_faliure'] = df['impressions'] - df['clicks_success'] df # + p = (192 + 270)/(980 + 1020) SE = np.sqrt(p*(1-p)*( (1/980) + (1/1020) )) t = (0.264706 - 0.195918)/(SE) t, SE # - # ## Updating Belief # # * In this step, we need to calculate the posterior probability/ updated belief from our new learnings. We simply need to add the number of successful and number of un-successful clicks as updated parameters. We have two updated posterior distributions. First, let us update version-a, and then version-b. # + prior_alpha, prior_beta = 20, 80 # update version-a posterior parameters updated_alpha_version_a = prior_alpha + df.clicks_success.iloc[0] updated_beta_version_a = prior_beta + df.clicks_faliure.iloc[0] # update version-b posterior parameters updated_alpha_version_b = prior_alpha + df.clicks_success.iloc[1] updated_beta_version_b = prior_beta + df.clicks_faliure.iloc[1] # + plt.figure(figsize=(18,6)) x = np.arange(0,1,0.001);y0 = beta.pdf(x, 20, 80);plt.plot(x, y0, label='Prior: a=20, b=80',c='g') ya = beta.pdf(x, updated_alpha_version_a, updated_beta_version_a); plt.plot(x, ya, label='Posterior (Version-A): a=116, b=474',c='orange') plt.axvline(0.195918, c='orange', linestyle='dashed') yb = beta.pdf(x, updated_alpha_version_b, updated_beta_version_b); plt.plot(x, yb, label='Posterior (Version-A): a=155, b=465',c='red') plt.axvline(0.264706, c='red', linestyle='dashed') plt.xlim(0.1,0.35) plt.legend() # - # ### Observations: # # * From the above plot, we observe that the <b>point estimates (CTR) for version-a and version-b are in dashed lines (orange and red) respectively.</b> We observe the <b>Green distribution is the chosen 'prior'</b>, and the posterior of version-a and version-b are in Orange and Red Beta Distributions. # # # * We observe that the <b>posterior mean for version-b (Beta distribution in red), has a slightly lower mean</b>, when compared to its point estimate (with respect to Bayesian, point estimate over-estimates the CTR). # # ### Uplift Distribution: # # * Now, let us calculate the uplift (version-a to version-b), by the following formula, and plot the pdf and cdf of uplift distribution below. For sampling, we sample 10000 data points from each of the Beta posteriors and find the lift ratio. # # $$Uplift = Ratio_{Beta} (Version_B/ Version_A)$$ # # # * From the CDF of Ratio of Posterior Betas (Uplift CDF), every sample after 0 on the x-axis, corresponds to the sample where there is some level of uplift, whereas points below 0 have less<1 uplift (version-a works better). Just by calculating areas under Pdf the curve for values > 0 and values <0, we can provide conclusions. # + np.set_printoptions(suppress=True) # data sampling simulation monte_carlo_ya = beta.rvs(updated_alpha_version_a, updated_beta_version_a, size=10000) monte_carlo_yb = beta.rvs(updated_alpha_version_b, updated_beta_version_b, size=10000) # lift ratio in Beta distributions ratio = np.array(monte_carlo_yb)/np.array(monte_carlo_ya) plt.figure(figsize=(16,4)) sns.distplot(ratio, color='orange') plt.title('PDF of Uplift') plt.figure(figsize=(16,4)) kwargs = {'cumulative': True} sns.distplot(ratio, hist_kws=kwargs, norm_hist=True, kde=False) plt.title('CDF of Uplift') # - # * From the CDF above (Blue plot), we can conclude that X % of samples fall between a lower and upper bound for Uplift ratio. For example, # ## Observations: # # ### Our assigned task was to answer one simple question: Does version-B work better than Version-A ? # # * To answer this question, we approached the problem in two ways; Frequentist and Bayesian. To answer the above question, we need some form of quality/quantity indicator, to effectively communicate our findings. To put the answer in Frequentists terms, it would be something along these lines: # # ### Question: # # <b><i>What are your findings from the experiment ? Which version should we use and why ?</i></b> # # ### Answer: # # <b><i> We found that there is a statistically significant increase in CTR for version-b, when compared to version-a, with a p-value <= 0.05.<i/></b> # # The above answer is correct, it does associate a number/ numeric to show/ convey the strength and confidence in our point estimates. In most cases, the follow-up question would be something like this: # # ### Follow-up Question: # # <b><i>So... Are you 95 % confident in your results ?</i></b> # # ### Follow-up Answer: # # <b><i>Well, not really. We have enough evidence to prove that there is 'some' difference (difference in mean point estimates to be precise). Given the null hypothesis is true, i.e., there is no statistical significant difference between the two versions, there is a 5 % chance that a sample could show up in the alternate hypothesis.<i></b> # # To simplify the confusion, you could choose take the Bayesian approach and say the following: # # #### We can conclude that <b>60% of the simulations show an Uplift ratio between 1.22 and 1.42 Uplift ratio</b>, and <b>80% of the simulations show an Uplift ratio between 1.18 and 1.45</b>. # # * See the two plots below; we choose 60 % of the simulations data and see the x-axis to report on the uplift ratio, and choose 80 % as shown in the right plot. We can also show <b>Percentage Gain: (B-A)/A </b> as a metric for reporting. # + kwargs = {'cumulative': True} plt.figure(figsize=(16,12)); plt.subplot(2,2,1); sns.distplot(ratio, hist_kws=kwargs, norm_hist=True, kde=False, label='60 % Dataset',color='blue'); plt.yticks(np.arange(0,1.05,0.05)) plt.axvline(1.22,ymin=0,ymax=0.2);plt.axvline(1.42,ymin=0,ymax=0.8);plt.title('CDF of Uplift');plt.legend() plt.subplot(2,2,2) sns.distplot(ratio, hist_kws=kwargs, norm_hist=True, kde=False, label='80 % Dataset',color='red'); plt.yticks(np.arange(0,1.05,0.05)) plt.axvline(1.18,ymin=0,ymax=0.1);plt.axvline(1.45,ymin=0,ymax=0.9);plt.title('CDF of Uplift');plt.legend() # - # * Bayesian framework is not as <b>'harsh'</b> as frequestist approach. For statistical tests, t-stat and p-value have hard cutoffs and <b>do not count for incremental or minimal gains/ boost in performance.</b> In bayesian framework, we include <b>priors as well as account for small incremental gains which can make a huge difference at a larger scale.</b> # # # * Note that there is <b> no question about Power of the Test</b> in Bayesian context. Every update for posterior is based on the prior, and the assumption here is that Beta distribution slowly tends to the 'true' or 'correct' distribution, and not a point estimate. Hence, there is no concept of power in Bayesian statistics. # # Evaluating Bayesian Hypothesis Testing (ROPE, Effect Size & Lift) # # # * Now, we are going to look at different <b>Evaluation metrics</b> for Bayesian Hypothesis Testing. # data from experiment df = pd.DataFrame() df['clicks_success'] = [192, 225] df['impressions'] = [980, 1020] df.index= ['version_a', 'version_b'] df['ctr'] = df['clicks_success']/df['impressions'] df['clicks_faliure'] = df['impressions'] - df['clicks_success'] df # ## Higher Posterior Density Interval # # * We know that Integral over the area of any Pdf equals to 1. <b>Let us plot a black line (line-1, at y=40); Pdf integral under the line is 1, and area of Pdf above the line is 0.</b> # # # * As the <b>line goes down (line-2 in red, at y=12), we see that area above the line is around 50 %. </b>At position-3, we see that the <b>area above line-3 in green is 95%</b>. <b>The corresponding theta (x-axis)</b>, where the area above the line contributes to <b>95 %</b>, is known as the Higher Posterior Density Interval, as shown between the Blue Bars below. At 95 % HPDI, it is similar to choosing dataset such that we set <b>alpha at 0.05.</b> # + plt.figure(figsize=(18,6));x = np.arange(0,1,0.001) ya = beta.pdf(x, updated_alpha_version_a, updated_beta_version_a); plt.axhline(40, color='black', linestyle='dashed', label='Line 1: Area Above line = 0'); plt.axhline(12, color='red', linestyle='dashed', label='Line 2: Area Above line = 50 %'); plt.axhline(3, color='green', linestyle='dashed', label='Line 3: Area Above line = 95 % '); plt.plot(x, ya, label='Posterior (Version-A): a=212, b=868',c='orange'); yb = beta.pdf(x, updated_alpha_version_b, updated_beta_version_b); plt.plot(x, yb, label='Posterior (Version-A): a=245, b=875',c='yellow'); plt.axvline(0.17, ymax=0.06, linewidth=7);plt.axvline(0.288, ymax=0.06, linewidth=7); plt.xlim(0.13,0.3);plt.ylim(0,45); plt.legend(); plt.show() # - # <hr> # # ## Region Of Practical Equivalence (ROPE): # # ###### Source: https://docs.pymc.io/notebooks/BEST.html # # ###### Whitepaper:https://pdfs.semanticscholar.org/dea6/0927efbd1f284b4132eae3461ea7ce0fb62a.pdf # # # # * From the paper above <b>(Bayesian Estimation supersedes the t Test)</b>, we note the following: <i>Bayesian estimation also can accept the null value, not only # reject it. The researcher specifies a region of practical equivalence # (ROPE) around the null value, which encloses those values of the # parameter that are deemed to be negligibly different from the null # value for practical purposes. The size of the ROPE will depend on # the specifics of the application domain. As a generic example, # because an effect size of 0.1 is conventionally deemed to be small # (Cohen, 1988), a ROPE on effect size might extend from -0.1 to # 0.1.</i> # # ### ROPE Threshold: # # * From the above paper, we see that <b>ROPE limits varies by application and business constraints.</b> In our case, we can choose ROPE of (-0.1, 0.1). Note that we need our <b>metric of interest to fall outside the ROPE region. </b> # # # ### Metric 1: Effect Size: # # * We know that effect size can be be calculated by following: $$ES = \frac{\mu_a - \mu_b}{\sigma}$$ # # # # <i>If ES = +/- 0.1 %, it means difference in mean CTRs of version-a and version-b, is only 10 % of the combined Standard Deviation. Hence, let us look at two sets of experiments below.</i> import abyes import warnings warnings.filterwarnings(action='ignore') # #### Experiment 1: # # * We find that the mean CTR for version-a is 0.20 and version-b is 0.25, after 1000 samples each. We run Monte Carlo simulation and sample 10000 points and find the lift. # # #### Experiment 2: # # * We find that the mean CTR for version-a is 0.20 and version-b is 0.4, after 1000 samples each. We run Monte Carlo simulation and sample 10000 points and find the lift. # + # experiment 1 data sampling for version-a and version-b data_exp1 = [np.random.binomial(1, 0.20, size=1000), np.random.binomial(1, 0.25, size=1000)] # experiment 2 data sampling for version-a and version-b data_exp2 = [np.random.binomial(1, 0.20, size=1000), np.random.binomial(1, 0.4, size=1000)] # + # exp1's std devs for version-a, version-b exp1_version_a_std = np.sqrt((0.2*(1-0.2))/ (1000)) exp1_version_b_std = np.sqrt((0.25*(1-0.25))/ (1000)) # exp2's std devs for version-a, version-b exp2_version_a_std = np.sqrt((0.2*(1-0.2))/ (1000)) exp2_version_b_std = np.sqrt((0.4*(1-0.4))/ (1000)) exp1_version_a_std, exp1_version_b_std, exp2_version_a_std, exp2_version_b_std # - # ### Calculating Effect Size for both experiments: # + ES1 = ((data_exp1[1].mean()/1000) - (data_exp1[0].mean()/1000))/(np.sqrt(exp1_version_a_std**2 + exp1_version_b_std**2)) ES2 = ((data_exp2[1].mean()/1000) - (data_exp2[0].mean()/1000))/(np.sqrt(exp2_version_a_std**2 + exp2_version_b_std**2)) # - ES1, ES2 # # Evaluating Experiment using Effect Size: # # * To evaluate our experiment, we can use either <b> Effect Size</b> or <b>Lift Distribution</b>, against <b>ROPE</b> Metric, which is a design choice. From the above, we found the Effect size for both our experiments. (Remember, CTRs of both versions in experiment are very close to each other). # # # * As shown below, after conduting experiment 1, we see the <b>ROPE metric</b>, from <b> (-0.1 to 0.1)</b>, around the null Effect size, plaotted in <b>Green</b>. See how the Pdf of <b>Effect size falls (in Blue), coincides with the Green region </b>. <b>Hence, we are not confident that there might be a large enough effect, and result in Inconclusive.</b> exp1 = abyes.AbExp(method='analytic', decision_var = 'es', rule='rope', rope=(-0.1,0.1), alpha=0.95, plot=True) exp1.experiment(data_exp1) # * From below, after conduting experiment 2, we see the <b>ROPE metric</b>, from <b> (-0.1 to 0.1)</b>, around the null Effect size, plaotted in <b>Green</b>, similar to previous setup. See how the Pdf of <b>Effect size does NOT (in Blue), coincide with the Green region </b>. <b>Hence, we can conclusively report that version-B works better than version-A.</b> exp2 = abyes.AbExp(method='analytic', decision_var = 'es', rule='rope', rope=(-0.1,0.1), alpha=0.95, plot=True) exp2.experiment(data_exp2) # * Hence, we know that having a <b> larger Effect Size </b> finds a large difference between CTRs for different versions. ES is the percent of mean difference to combined standard deviation. Hence, having either <b>large difference in CTR (numerator), and larger number of Samples (As n-increases, S.D. decreases)</b>. # # # Evaluating Experiment using Lift # # # ## Metric 2: Lift # # * We can also look at the Lift (Distribution of Difference between versions), and look at the cut-off with respect to <b>ROPE</b> around the null lift point. Similar to conclusions from the above Effect Size method, we arrive to the same conclusions below. # running experiment 1 exp1 = abyes.AbExp(method='analytic', decision_var = 'lift', rule='rope', rope=(-0.1,0.1), alpha=0.95, plot=True) exp1.experiment(data_exp1) # running experiment 2 exp2 = abyes.AbExp(method='analytic', decision_var = 'lift', rule='rope', rope=(-0.1,0.1), alpha=0.95, plot=True) exp2.experiment(data_exp2) # # Which metric works the best between Effect Size and Lift ? # # * Thinking about the <b>criteria for conlusiveness, there is a slight difference between both metrics.</b> Observe how Uplift does not consider sample size at all (Uplift is only difference in mean CTRs), whereas Effect Size takes <b> Combined Standard Deviation into consideration.</b> # # # $$ES = \frac{\mu_b - \mu_a}{\sqrt{\sigma_a^2 + \sigma_b^2}}$$ # # # * By doing so, as the number of <b>samples increase; and <i>given</i> more samples occur closer to the mean (High Sample size and Low Deviation), increases the overall value of Effect Size. Hence, generally speaking, Effect size makes more sense than absolute uplift alone (This makes more difference as sample sizes increase and deviation is low).</b>
_jupyter/2020-04-13-Bayesian Thinking in AB Testing & Evaluation (ROPE, Effect Size and Lift).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np #import pandas as pd import pickle import math import os import matplotlib.pyplot as plt from utils import get_cableidx import collections ################### n_f=4 n_node=106 time_step=1 ################### #label - load fact y_lf=np.load('data/loadfact_list.npy').reshape((-1,1))#(2960, 10) y_lf=y_lf*700000 ###node ddata #location feature node_loc=np.load('data/node_loc.npy') x=np.array([node_loc]*74*40) #load feature time_step=3 x_offsets = np.sort(np.concatenate((np.arange(0, time_step, 1),))) load_loc=[] idx_node=np.arange(1,39+1) for t in range(0,len(idx_node)-time_step+1): load_loc.append(idx_node[t + x_offsets, ...]) idx_node=np.arange(40,78+1) for t in range(0,len(idx_node)-time_step+1): load_loc.append(idx_node[t + x_offsets, ...]) load_loc=np.array(load_loc) load_loc=load_loc-1 load_loc=np.repeat(load_loc, repeats=40, axis=0) load_node=np.zeros((74*40,106)) for i, c in enumerate(load_loc): load_node[i,c]=1 x=np.concatenate((x,load_node[:,:,np.newaxis]),axis=-1);print(x.shape)#x,y,z,vehicle loc ############################# features=x.copy() label_lf=y_lf.copy() #split np.random.seed(1) indices = np.random.permutation(len(features));print(indices[0]) n_training=int(len(features)*0.6) n_val=int(len(features)*0.2) n_test=len(features)-n_training-n_val training_idx, val_idx,test_idx = indices[:n_training], indices[n_training:n_training+n_val], indices[-n_test:] f_training, f_val, f_test = features[training_idx],features[val_idx], features[test_idx] print(f_training.shape,f_val.shape,f_test.shape) l_training, l_val, l_test = label_lf[training_idx],label_lf[val_idx], label_lf[test_idx] print(l_training.shape,l_val.shape,l_test.shape) #scale-features for i in range(n_f-1): m=np.mean(f_training[...,i].flatten());std=np.std(f_training[...,i].flatten()) f_training[...,i]=(f_training[...,i]-m)/std f_val[...,i]=(f_val[...,i]-m)/std f_test[...,i]=(f_test[...,i]-m)/std for i in range(n_f): print(i,np.min(f_val[...,i]),np.max(f_val[...,i]),np.mean(f_val[...,i]),np.std(f_val[...,i])) #scale-label m=np.mean(l_training);std=np.std(l_training) l_training=(l_training-m)/std l_val=(l_val-m)/std l_test=(l_test-m)/std print('label m, std',m,",",std) print('label',np.min(l_training),np.max(l_training),np.mean(l_training),np.std(l_training)) #small graph # in_cable=np.array([1,4,9,11,16,21,24,29,31,36])-1 # node_cable= (get_cableidx()[in_cable,2])-1 # node_cable=np.concatenate((np.arange(78),node_cable),axis=0) save_flag=False#True# if save_flag: data_path='data/data_mpnn/' np.savez_compressed(data_path+'features_train',f_training) np.savez_compressed(data_path+'features_val',f_val) np.savez_compressed(data_path+'features_test',f_test) data_path='data/' np.savez_compressed(data_path+'label_train',l_training) np.savez_compressed(data_path+'label_val',l_val) np.savez_compressed(data_path+'label_test',l_test) # data_path='data/data_mpnn_simple/' # np.savez_compressed(data_path+'features_train',f_training[:,:,3:]) # np.savez_compressed(data_path+'features_val',f_val[:,:,3:]) # np.savez_compressed(data_path+'features_test',f_test[:,:,3:]) # data_path='data/data_mpnn_simple_dist/' # np.savez_compressed(data_path+'features_train',f_training[:,:,3:]) # np.savez_compressed(data_path+'features_val',f_val[:,:,3:]) # np.savez_compressed(data_path+'features_test',f_test[:,:,3:]) # data_path='data/data_mpnn_small/' # np.savez_compressed(data_path+'features_train',f_training[:,node_cable,:]) # np.savez_compressed(data_path+'features_val',f_val[:,node_cable,:]) # np.savez_compressed(data_path+'features_test',f_test[:,node_cable,:]) # + import numpy as np import pickle import collections from utils import get_cableidx with open("data/sensor_graph/adj_mx.pkl", 'rb') as pickle_file: adj_mx = pickle.load(pickle_file) for i in range(106): adj_mx[i,i]=0 #delete 50 node#adj_mx[out_node][:,out_node]=0 print(adj_mx.shape) edge_data=[[i,j] for i,j in zip(np.where(adj_mx==1)[0],np.where(adj_mx==1)[1])];edge_data=np.array(edge_data) edge_data=edge_data[edge_data[:,0]<edge_data[:,1]] #distance & E with open("data/sensor_graph/adj_mx_type_e.pkl", 'rb') as pickle_file: edge_de = pickle.load(pickle_file)#sacled [distance, E] edge_d=edge_de[2][0];edge_e=edge_de[2][1] print(collections.Counter(edge_d.flatten())) print(collections.Counter(edge_e.flatten())) for i in range(106): edge_d[i,i]=0 for i in range(106): edge_e[i,i]=0 #area with open("data/sensor_graph/adj_mx_type_a.pkl", 'rb') as pickle_file: edge_a = pickle.load(pickle_file) print(collections.Counter(edge_a.flatten())) for i in range(106): edge_a[i,i]=0 #make inputs for graph edge_a=edge_a[edge_data[:,0],edge_data[:,1]] edge_d=edge_d[edge_data[:,0],edge_data[:,1]] edge_e=edge_e[edge_data[:,0],edge_data[:,1]] edge_f=np.stack([edge_d,edge_e,edge_a],axis=1) print(edge_data.shape,edge_f.shape)#(149, 106, 6) (149, 40) (185, 2) (185, 2) #self loop edge_self=np.array([[i,i] for i in range(len(adj_mx))]) edge_data=np.concatenate([edge_data,edge_self],axis=0) edge_f=np.concatenate([edge_f,np.zeros((len(adj_mx),edge_f.shape[1]))],axis=0)#matching shape (below) edge_f_self=np.array([0]*(len(edge_f)-len(adj_mx))+[1]*(len(adj_mx)))[:,np.newaxis]#self loop feature edge_f=np.concatenate([edge_f,edge_f_self],axis=1) print(edge_data.shape,edge_f.shape) #########tension data in_cable=np.array([1,4,9,11,16,21,24,29,31,36])-1#list(range(3,40,4))#[3,7,11,15,19,23,27,31,35,39] tension=np.load('data/efo_signal_list.npy')#74,40,40 #case, step, cable #mlp label data -tension 30 '''out_cable=np.array(list(set(range(40))-set(in_cable))) tension_30=tension[:,:,out_cable].reshape((-1,30));print(tension_30.shape)''' #edge data - tension 40 tension_40=tension.reshape((-1,40)) node_cable_40=(get_cableidx()[:,1:])-1 edge_t_40=np.zeros((74*40,291,1)) for i in range(74*40): for j,(r,c) in enumerate(node_cable_40): idx=np.where((edge_data == (r, c)).all(axis=1)) edge_t_40[i,idx,0]=tension_40[i,j] edge_f_40=np.array([edge_f]*74*40) edge_f_40=np.concatenate((edge_f_40,edge_t_40),axis=-1);print(edge_f_40.shape)#distance, E, area, self, tension #edge data & mlp label data - tension 10 node_cable= (get_cableidx()[in_cable,1:])-1 tension=tension[:,:,in_cable].reshape((-1,10))#mlp input data edge_t=np.zeros((74*40,291,1)) for i in range(74*40): for j,(r,c) in enumerate(node_cable): idx=np.where((edge_data == (r, c)).all(axis=1)) edge_t[i,idx,0]=tension[i,j] edge_f=np.array([edge_f]*74*40) edge_f=np.concatenate((edge_f,edge_t),axis=-1);print(edge_f.shape)#distance, E, area, self, tension #indexing np.random.seed(1) indices = np.random.permutation(len(edge_f));print(indices[0]) n_training=int(len(edge_f)*0.6)#7:1:2 n_val=int(len(edge_f)*0.2) n_test=len(edge_f)-n_training-n_val training_idx, val_idx,test_idx = indices[:n_training], indices[n_training:n_training+n_val], indices[-n_test:] e_training, e_val,e_test = edge_f[training_idx],edge_f[val_idx], edge_f[test_idx] e_training_40, e_val_40,e_test_40 = edge_f_40[training_idx],edge_f_40[val_idx], edge_f_40[test_idx] '''mlp_training, mlp_val,mlp_test = tension[training_idx],tension[val_idx], tension[test_idx]#mlp input data mlp_label_training, mlp_label_val,mlp_label_test = tension_30[training_idx],tension_30[val_idx], tension_30[test_idx]'''#mlp input data print(e_training.shape,e_val.shape,e_test.shape) ###scale for i in [0,1,2,4]: m=np.mean(e_training[...,i].flatten());std=np.std(e_training[...,i].flatten()) e_training[...,i]=(e_training[...,i]-m)/std e_val[...,i]=(e_val[...,i]-m)/std e_test[...,i]=(e_test[...,i]-m)/std for i in range(5): print('10',i,np.min(e_val[...,i]),np.max(e_val[...,i]),np.mean(e_val[...,i]),np.std(e_val[...,i])) for i in [0,1,2,4]: m=np.mean(e_training_40[...,i].flatten());std=np.std(e_training_40[...,i].flatten()) e_training_40[...,i]=(e_training_40[...,i]-m)/std e_val_40[...,i]=(e_val_40[...,i]-m)/std e_test_40[...,i]=(e_test_40[...,i]-m)/std for i in range(5): print('40',i,np.min(e_val_40[...,i]),np.max(e_val_40[...,i]),np.mean(e_val_40[...,i]),np.std(e_val_40[...,i])) #mlp '''m=np.mean(mlp_training.flatten());std=np.std(mlp_training.flatten()) mlp_training=(mlp_training-m)/std;mlp_val=(mlp_val-m)/std;mlp_test=(mlp_test-m)/std print('mlp',np.min(mlp_val),np.max(e_val),np.mean(e_val),np.std(e_val)) m=np.mean(mlp_label_training.flatten());std=np.std(mlp_label_training.flatten()) mlp_label_training=(mlp_label_training-m)/std;mlp_label_val=(mlp_label_val-m)/std;mlp_label_test=(mlp_label_test-m)/std''' save_flag=True#False# if save_flag: data_path='data/data_mpnn/' np.savez_compressed(data_path+'edge_train',e_training) np.savez_compressed(data_path+'edge_val',e_val) np.savez_compressed(data_path+'edge_test',e_test) np.savez_compressed(data_path+'edge_data',edge_data) data_path='data/data_mpnn/tension40/' np.savez_compressed(data_path+'edge_train',e_training_40) np.savez_compressed(data_path+'edge_val',e_val_40) np.savez_compressed(data_path+'edge_test',e_test_40) np.savez_compressed(data_path+'edge_data',edge_data) '''data_path='data/data_mlp/' np.savez_compressed(data_path+'features_train',mlp_training) np.savez_compressed(data_path+'features_val',mlp_val) np.savez_compressed(data_path+'features_test',mlp_test) data_path='data/data_mlp/data_tension30/' np.savez_compressed(data_path+'label_train',mlp_label_training) np.savez_compressed(data_path+'label_val',mlp_label_val) np.savez_compressed(data_path+'label_test',mlp_label_test)''' # data_path='data/data_mpnn_simple/' # np.savez_compressed(data_path+'edge_train',e_training[:,:,-1:]) # np.savez_compressed(data_path+'edge_val',e_val[:,:,-1:]) # np.savez_compressed(data_path+'edge_test',e_test[:,:,-1:]) # np.savez_compressed(data_path+'edge_data',edge_data) # data_path='data/data_mpnn_simple_dist/' # np.savez_compressed(data_path+'edge_train',e_training[:,:,[0,-1]]) # np.savez_compressed(data_path+'edge_val',e_val[:,:,[0,-1]]) # np.savez_compressed(data_path+'edge_test',e_test[:,:,[0,-1]]) # np.savez_compressed(data_path+'edge_data',edge_data)
BRL/vehicleload/DataPreprocessing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Assignment 3 - Building a Custom Visualization # # --- # # In this assignment you must choose one of the options presented below and submit a visual as well as your source code for peer grading. The details of how you solve the assignment are up to you, although your assignment must use matplotlib so that your peers can evaluate your work. The options differ in challenge level, but there are no grades associated with the challenge level you chose. However, your peers will be asked to ensure you at least met a minimum quality for a given technique in order to pass. Implement the technique fully (or exceed it!) and you should be able to earn full grades for the assignment. # # # &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<NAME>., <NAME>., & <NAME>. (2014, April). [Sample-oriented task-driven visualizations: allowing users to make better, more confident decisions.](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/Ferreira_Fisher_Sample_Oriented_Tasks.pdf) # &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;In Proceedings of the SIGCHI Conference on Human Factors in Computing Systems (pp. 571-580). ACM. ([video](https://www.youtube.com/watch?v=BI7GAs-va-Q)) # # # In this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/Ferreira_Fisher_Sample_Oriented_Tasks.pdf) the authors describe the challenges users face when trying to make judgements about probabilistic data generated through samples. As an example, they look at a bar chart of four years of data (replicated below in Figure 1). Each year has a y-axis value, which is derived from a sample of a larger dataset. For instance, the first value might be the number votes in a given district or riding for 1992, with the average being around 33,000. On top of this is plotted the 95% confidence interval for the mean (see the boxplot lectures for more information, and the yerr parameter of barcharts). # # <br> # <img src="readonly/Assignment3Fig1.png" alt="Figure 1" style="width: 400px;"/> # <h4 style="text-align: center;" markdown="1"> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Figure 1 from (Ferreira et al, 2014).</h4> # # <br> # # A challenge that users face is that, for a given y-axis value (e.g. 42,000), it is difficult to know which x-axis values are most likely to be representative, because the confidence levels overlap and their distributions are different (the lengths of the confidence interval bars are unequal). One of the solutions the authors propose for this problem (Figure 2c) is to allow users to indicate the y-axis value of interest (e.g. 42,000) and then draw a horizontal line and color bars based on this value. So bars might be colored red if they are definitely above this value (given the confidence interval), blue if they are definitely below this value, or white if they contain this value. # # # <br> # <img src="readonly/Assignment3Fig2c.png" alt="Figure 1" style="width: 400px;"/> # <h4 style="text-align: center;" markdown="1"> Figure 2c from (Ferreira et al. 2014). Note that the colorbar legend at the bottom as well as the arrows are not required in the assignment descriptions below.</h4> # # <br> # <br> # # **Easiest option:** Implement the bar coloring as described above - a color scale with only three colors, (e.g. blue, white, and red). Assume the user provides the y axis value of interest as a parameter or variable. # # # **Harder option:** Implement the bar coloring as described in the paper, where the color of the bar is actually based on the amount of data covered (e.g. a gradient ranging from dark blue for the distribution being certainly below this y-axis, to white if the value is certainly contained, to dark red if the value is certainly not contained as the distribution is above the axis). # # **Even Harder option:** Add interactivity to the above, which allows the user to click on the y axis to set the value of interest. The bar colors should change with respect to what value the user has selected. # # **Hardest option:** Allow the user to interactively set a range of y values they are interested in, and recolor based on this (e.g. a y-axis band, see the paper for more details). # # --- # # *Note: The data given for this assignment is not the same as the data used in the article and as a result the visualizations may look a little different.* # + # Use the following data for this assignment: import pandas as pd import numpy as np np.random.seed(12345) df = pd.DataFrame([np.random.normal(32000,200000,3650), np.random.normal(43000,100000,3650), np.random.normal(43500,140000,3650), np.random.normal(48000,70000,3650)], index=[1992,1993,1994,1995]) # + # %matplotlib notebook import matplotlib.pyplot as plt import matplotlib.colors from matplotlib.lines import Line2D from scipy import stats fig = plt.figure() # Color coding map cmap = plt.cm.rainbow norm = matplotlib.colors.Normalize() sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm) sm.set_array([]) fig.colorbar(sm, orientation="horizontal") # The mean, std and 95 confidence intervals n = df.shape[1] means = df.mean(axis=1) stds = df.std(axis=1) yerr = stds / np.sqrt(n) * stats.t.ppf(1-0.05/2, n - 1) def prob(y): """ Calculate the probability of y being part of the distribution """ return stats.t.cdf((y - means) / stds * np.sqrt(n), n - 1) def draw(y): """ Draw the entire axis """ ax = plt.subplot(111) plt.title('Interactive chart for sampled data from 1992 to 1995') width = 1 bars = ax.bar(range(df.shape[0]), means, width, color='r', yerr=yerr, capsize=10) ax.set_autoscaley_on(False) ax.set_ylim(0, 55000) ax.set_xlim(-0.5,3.5) ax.set_xticks(range(df.shape[0])) ax.set_xticklabels(df.index) for b,p in zip(bars, prob(y)): b.set_color(cmap(norm(1 - p))) ax.plot([-0.5, 3.5], [y, y], color='gray') def onclick(event): """ Onclick redraw with new y value """ plt.cla() y = event.ydata draw(y) cid = fig.canvas.mpl_connect('button_press_event', onclick) draw(40000) # initialize axis plt.show() # -
drafts/Week 3/Assignment3-1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- class Solution(object): def titleToNumber(self, s): """ :type s: str :rtype: int """ # Make a set of char-num pairs conv = {} cnt = 0 for w in range(ord('A'), ord('Z') + 1): conv[chr(w)] = cnt cnt = cnt + 1 w = 1 num = 0 while s: num = num + (conv[s[-1]]+1) * w w = w * 26 s = s[0:-1] return num s = Solution() s.titleToNumber('ZY')
algorithms/171-Excel-Sheet-Column-Number.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # Data Type : Microarray data # Dependent Variable : 0 or 1 # Human Acute Myeloid Leukemia (AML) or Acute Lymphoblast Leukemia (ALL)) library(spikeslab) data(leukemia) library(glmnet) # + x <- as.matrix(leukemia[,-1]) y <- leukemia[,1] # 행, 열 cat( nrow(leukemia), ncol(leukemia) ) # - lasso.leukemia <- glmnet(x, y, family = "binomial") plot(lasso.leukemia) # 선택할 수 있는 최대 변수의 개수 = 샘플의 수 ridge.leukemia <- glmnet(x, y, family = "binomial", alpha = 0) plot(ridge.leukemia) # 모든 변수를 다 선택 # + ############### Ridge # Cross-validation 으로 모수 값 구하기. (turning parameter : lambda) currentTs <- Sys.time() # about five seconds later lambda.vec.ridge <- rep(0, 200) for (i in 1 : 200) { set.seed(i) cv.ridge.leukemia <- cv.glmnet(x, y, nfolds = 5, alpha = 0, family = "binomial") index.min.ridge <- which.min(cv.ridge.leukemia$cvm) lambda.vec.ridge[i] <- cv.ridge.leukemia$lambda[index.min.ridge] } elapsed <- Sys.time() - currentTs print(elapsed) # + # Ridge 좋은 모수 값 구하기 opt.lambda.ridge <- mean(lambda.vec.ridge) # 모든 샘플을 이용하여 Ridge 회귀 모형 적합하기 model.ridge <- glmnet(x, y, lambda = opt.lambda.ridge, alpha = 0, family = "binomial") # 이분형 종속변수 (AML 과ALL) 를 설명 하는 중요한 gene 찾기 # 선택된 변수 (gene) 의 회귀 계수 보기 length(model.ridge$beta[which(model.ridge$beta != 0)]) # 적합된 종속변수의 값 (Fitted value of Fitted probability) 보기 X.beta.ridge <- predict(model.ridge, newx = x, s = opt.lambda.ridge) fitted.y.ridge <- exp(X.beta.ridge) / (1 + exp(X.beta.ridge)) # 관찰된 종속 변수의 값이 0 또는 1 이므로 # 적합된 값이 0.5 이상이면 1로, 0.5 미만이면 0으로 바꾼 후, # 관찰된 종속변수의 값과 적합된 종속 변수의 값을 비교 table(y - round (fitted.y.ridge)) # + ############### LASSO currentTs <- Sys.time() # about five seconds later lambda.vec.lasso <- rep(0, 200) for (i in 1 : 200) { set.seed(i) cv.lasso.leukemia <- cv.glmnet(x, y, nfolds = 5, family = "binomial") index.min.lasso <- which.min(cv.lasso.leukemia$cvm) lambda.vec.lasso[i] <- cv.lasso.leukemia$lambda[index.min.lasso] } elapsed <- Sys.time() - currentTs print(elapsed) # + # LASSO 좋은 모수 값 구하기 opt.lambda.lasso <- mean(lambda.vec.lasso) # 모든 샘플을 이용하여 Ridge 회귀 모형 적합하기 model.lasso <- glmnet(x, y, lambda = opt.lambda.lasso, family = "binomial") # 이분형 종속변수 (AML 과ALL) 를 설명 하는 중요한 gene 찾기 # 선택된 변수 (gene) 의 회귀 계수 보기 length(model.lasso$beta[which(model.lasso$beta != 0)]) # 적합된 종속변수의 값 (Fitted value of Fitted probability) 보기 X.beta.lasso <- predict(model.lasso, newx = x, s = opt.lambda.lasso) fitted.y.lasso <- exp(X.beta.lasso) / (1 + exp(X.beta.lasso)) # 관찰된 종속 변수의 값이 0 또는 1 이므로 # 적합된 값이 0.5 이상이면 1로, 0.5 미만이면 0으로 바꾼 후, # 관찰된 종속변수의 값과 적합된 종속 변수의 값을 비교 table(y - round (fitted.y.lasso)) # + # Elastic Net # p > n 인 경우, LASSO 는 n 개 이상 변수 선택 불가능 # 변수가 그룹화 되어 있을 경우, LASSO 는 무시함 # 변수간의 correlation 이 높은 경우 LASSO 의 solution path 가 불안정함. elasticnet.leukemia.0.2 <- glmnet(x, y, family = "binomial", alpha = 0.2) elasticnet.leukemia.0.4 <- glmnet(x, y, family = "binomial", alpha = 0.4) elasticnet.leukemia.0.6 <- glmnet(x, y, family = "binomial", alpha = 0.6) elasticnet.leukemia.0.8 <- glmnet(x, y, family = "binomial", alpha = 0.8) layout(mat=matrix(c(1, 2, 3, 4), nrow=2, byrow = TRUE)) plot(elasticnet.leukemia.0.2) plot(elasticnet.leukemia.0.4) plot(elasticnet.leukemia.0.6) plot(elasticnet.leukemia.0.8) elapsed <- Sys.time() - currentTs print(elapsed) # + currentTs <- Sys.time() # about five seconds later lambda.enet.vec=rep(0,200) for (i in 1:200) { set.seed(i) cv.enet <- cv.glmnet(x, y, nfolds = 5, family = "binomial", alpha = 0.5) index.enet <- which.min(cv.enet$cvm) lambda.enet.vec[i] <- cv.enet$lambda[index.enet] } elapsed <- Sys.time() - currentTs print(elapsed) # + opt.lambda.enet <- mean(lambda.enet.vec) model.enet <- glmnet(x,y,lambda = opt.lambda.enet,family = "binomial", alpha = 0.5) # 이분형 종속변수 (AML 과ALL) 를 설명 하는 중요한 gene 찾기 # 선택된 변수 (gene) 의 회귀 계수 보기 length(model.enet$beta[which(model.enet$beta != 0)]) # 적합된 종속변수의 값 (Fitted value of Fitted probability) 보기 X.beta.enet <- predict(model.enet, newx = x, s = opt.lambda.enet) fitted.y.enet <- exp(X.beta.enet) / (1 + exp(X.beta.enet)) # 관찰된 종속 변수의 값이 0 또는 1 이므로 # 적합된 값이 0.5 이상이면 1로, 0.5 미만이면 0으로 바꾼 후, # 관찰된 종속변수의 값과 적합된 종속 변수의 값을 비교 table(y - round (fitted.y.enet))
2nd semester/03.ResearchMethod2/Leukemia.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/jimzhang629/RippleNet/blob/master/edited_RippleNet_training_unidirectional.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="ubB9AcSmaT5z" # # RippleNet_training_unidirectional # Training of simple unidirectional recurrent neural network (RNN) implementation in `tensorflow.keras` using LSTM (long short-term memory) layers to identify time of occurence of sharp wave ripple (SPW-R) events in temporal LFP data. # # Author: <NAME> (<https://github.com/espenhgn>) # # LICENSE: <https://github.com/CINPLA/RippleNet/blob/master/LICENSE> # + id="NiELVop4aT54" colab={"base_uri": "https://localhost:8080/"} outputId="eddcd533-ba63-4071-f686-cff45c421400" # allow running on Google Colab for training using Google Drive for file access try: from google.colab import drive drive.mount('/content/gdrive') # %cd gdrive/My\ Drive/RippleNet # %tensorflow_version 2.x except: pass # + id="PMwOuBEXaT6H" # %matplotlib inline # + id="mUcZ1pzZaT6M" # import modules import os import numpy as np import scipy.signal as ss import matplotlib.pyplot as plt from matplotlib.gridspec import GridSpec from matplotlib import colors import tensorflow as tf from tensorflow import keras from tensorflow.keras.utils import plot_model import ripplenet.edited_models import h5py import pickle import random # + id="wOvpLagbaT6S" colab={"base_uri": "https://localhost:8080/"} outputId="24f8cdaf-fb4e-4252-87a6-22efd6dccb16" from tensorflow.python.client import device_lib print(device_lib.list_local_devices()) # + id="DwjTugQJaT6Y" colab={"base_uri": "https://localhost:8080/"} outputId="629ab2c2-d9f5-4c87-d4dc-e55aac6db530" print(tf.__version__) print(tf.test.gpu_device_name()) print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU'))) # + id="tz9QovhfaT6d" # set random seeds with some additional environment variables to ensure deterministic output random_seed = 208 os.environ['TF_DETERMINISTIC_OPS'] = '1' os.environ['PYTHONHASHSEED']=str(random_seed) random.seed(random_seed) np.random.seed(random_seed) tf.random.set_seed(random_seed) # + id="z1uNRJlXHC3-" # select dataset (may have generated different sets.) dataset_index = 0 # + [markdown] id="OzZCG0OdaT6j" # # Load training/validation data # + id="Lty8L4jbkKSM" # select species for training/validation data (mouse, rat or both) mouse = True rat = False # + id="S4Izedhc3s_g" # output destination output_folder = 'trained_networks' if not os.path.isdir(output_folder): os.mkdir(output_folder) # prefix for trained network files (training loss/MSE, weights, `best' weights) rnn_prefix = 'ripplenet_unidirectional' # + id="e2PaOdJzC0vb" colab={"base_uri": "https://localhost:8080/"} outputId="14938508-3ef4-4779-cbc1-eb249a757180" if mouse: # training and validation files f_name_train = 'train_{:02}.h5' f_name_val = 'validation_{:02}.h5' # training data f = h5py.File(os.path.join('data', f_name_train.format(dataset_index)), 'r') X_train = np.expand_dims(f['X0'][:], -1) Y_train = f['Y'][:] f.close() # validation data f = h5py.File(os.path.join('data', f_name_val.format(dataset_index)), 'r') X_val = np.expand_dims(f['X0'][:], -1) Y_val = f['Y'][:] f.close() # load some data for plotting f = h5py.File(os.path.join('data', f_name_val.format(dataset_index)), 'r') X0 = f['X0'][:] X1 = f['X1'][:] S = f['S'][:] Y = f['Y'][:] S_freqs = f['S_freqs'][:] print(f) f.close() print(X_train.shape) print(Y_train.shape) print(X_val.shape) print(Y_val.shape) # + id="F8hc3OYFcTlr" def prime_factors(n): i = 2 factors = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(i) if n > 1: factors.append(n) return factors # + colab={"base_uri": "https://localhost:8080/"} id="deZPQJBBZWhQ" outputId="af8b2c29-7955-4856-d8f4-08406a96bdbb" print(X_train.shape[0]) print("prime factorization of X_train[0] * X_train[1] :", prime_factors(X_train.shape[0]*X_train.shape[1])) print("prime factorization of X_val[0] * X_val[1] :", prime_factors(X_val.shape[0]*X_val.shape[1])) print("The common factors are: 1, 2, 5, 10, 25, 50, 125, 250, 625, 1250, 3125, 6250, 15625, 31250") # + colab={"base_uri": "https://localhost:8080/"} id="foC9zzxF2Es1" outputId="8ecc64b0-e171-4933-cb49-882c8d6dbb88" samples_per_time_segment = 625 #how many samples per each time segment. Some combination of the prime factors. time_segment = samples_per_time_segment / 1250 #how many seconds are the time segments we're training on train_first_dim = int(X_train.shape[0]*X_train.shape[1] / samples_per_time_segment) #must make sure that this is evenly divisible val_first_dim = int(X_val.shape[0]*X_val.shape[1] / samples_per_time_segment) print(val_first_dim) print(time_segment) # + colab={"base_uri": "https://localhost:8080/"} id="d0ASxV4p3klt" outputId="f3a37f98-66a1-4957-9d36-781b82cdc128" reshaped_X_train = np.reshape(X_train, (train_first_dim, samples_per_time_segment, 1)) reshaped_Y_train = np.reshape(Y_train, (train_first_dim, samples_per_time_segment, 1)) reshaped_X_val = np.reshape(X_val, (val_first_dim, samples_per_time_segment, 1)) reshaped_Y_val = np.reshape(Y_val, (val_first_dim, samples_per_time_segment, 1)) #print(reshaped_X_train) print(reshaped_X_train.shape) print(reshaped_Y_train.shape) print(reshaped_X_val.shape) print(reshaped_Y_val.shape) # + id="uwZ4kvu11Va0" # Add rat training/validation data to sets if rat and mouse: # rat f_name_train = 'train_tingley_{:02}.h5' f_name_val = 'validation_tingley_{:02}.h5' # training data f = h5py.File(os.path.join('data', f_name_train.format(dataset_index)), 'r') X_train = np.concatenate((X_train, np.expand_dims(f['X0'][:], -1))) Y_train = np.concatenate((Y_train, f['Y'][:])) f.close() # validation data f = h5py.File(os.path.join('data', f_name_val.format(dataset_index)), 'r') X_val = np.concatenate((X_val, np.expand_dims(f['X0'][:], -1))) Y_val = np.concatenate((Y_val, f['Y'][:])) f.close() # load some data for plotting f = h5py.File(os.path.join('data', f_name_val.format(dataset_index)), 'r') X0 = np.concatenate((X0, f['X0'][:])) X1 = np.concatenate((X1, f['X1'][:])) S = np.concatenate((S, f['S'][:])) Y = np.concatenate((Y, f['Y'][:])) f.close() # + id="q10NkE38kKSa" if rat and not mouse: # rat f_name_train = 'train_tingley_{:02}.h5' f_name_val = 'validation_tingley_{:02}.h5' # training data f = h5py.File(os.path.join('..', 'data', f_name_train.format(dataset_index)), 'r') X_train = np.expand_dims(f['X0'][:], -1) Y_train = f['Y'][:] f.close() # validation data f = h5py.File(os.path.join('data', f_name_val.format(dataset_index)), 'r') X_val = np.expand_dims(f['X0'][:], -1) Y_val = f['Y'][:] f.close() # load some data for plotting f = h5py.File(os.path.join('data', f_name_val.format(dataset_index)), 'r') X0 = f['X0'][:] X1 = f['X1'][:] S = f['S'][:] Y = f['Y'][:] S_freqs = f['S_freqs'][:] f.close() # + id="Dl-v81WHkKSe" # needed parameters Fs = 1250 # Hz, sampling freq time = np.arange(X0.shape[1]) / Fs # center raw data X0 = (X0.T - X0.mean(axis=-1)).T # total number of samples n_samples = X0.shape[0] # + colab={"base_uri": "https://localhost:8080/", "height": 730} id="VszoOq-WaT60" outputId="92408214-f98a-4b13-d51e-fecce96ae59e" # plot all labels and raw data matrices fig, axes = plt.subplots(1, 2, sharex=True, sharey=True, figsize=(12, 12)) axes[0].pcolormesh(time, np.arange(n_samples), Y[:, :, 0]) axes[0].set_ylabel('#') axes[0].set_title('labels (y)') axes[1].pcolormesh(time, np.arange(n_samples), X0, vmin=-X0.std()*3, vmax=X0.std()*3) axes[1].set_ylabel('#') axes[1].set_xlabel('t (s)') axes[1].set_title('raw data (X)') for ax in axes: ax.axis(ax.axis('tight')) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="xqGT2ICzaT64" outputId="7bd98cdf-a869-44fe-ace8-5ce3b54f15ab" # plot wavelet spectrograms vs. labels and raw data for some samples for i in range(5): gs = GridSpec(2, 1) fig = plt.figure(figsize=(12, 6)) ax0 = fig.add_subplot(gs[0, 0]) ax0.plot(time, X0[i, ], label='$X(t)$') ax0.plot(time, X1[i, ], label=r'$\phi_\mathrm{bp}(t)$') ax0.plot(time, Y[i, :, 0], label='label ($y$)' ) ax0.legend(ncol=2) ax0.axis(ax0.axis('tight')) ax0.set_title('label, raw data and spectrograms') plt.setp(ax0.get_xticklabels(), visible=False) ax1 = fig.add_subplot(gs[1:, 0], sharex=ax0) vmin, vmax = np.exp(np.percentile(np.log(S), [1, 99])) im = ax1.pcolormesh(time, S_freqs, S[i, ].T, norm=colors.LogNorm(vmin=vmin, vmax=vmax), cmap='inferno') ax1.axis(ax1.axis('tight')) ax1.set_ylabel('$f$ (Hz)') ax1.set_xlabel('$t$ (s)') # + [markdown] id="GF-Om-cnaT6-" # # Set up recurrent neural network # + id="3VeJA7jpkKSt" model = ripplenet.edited_models.get_unidirectional_LSTM_model(input_shape=(None, reshaped_X_train.shape[2]), layer_sizes=[20, 10, 5, 5], seed=random_seed+1) # + id="nqH3VcApaT7J" colab={"base_uri": "https://localhost:8080/"} outputId="42fa1e8e-9d64-4960-d5c2-c94b846263ac" model.summary() # + id="4IE_8BYTRz-4" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="a71e5351-a525-44bc-cd35-13015ee7d0cd" plot_model(model, show_shapes=True, expand_nested=True) # + id="Wu5rxBaaaT7N" # callback for model checkpoints when validation MSE improves filepath = os.path.join(output_folder, '{}_best_random_seed{}.h5'.format(rnn_prefix, random_seed)) checkpoint_best = keras.callbacks.ModelCheckpoint(filepath, monitor='val_mse', verbose=1, save_best_only=True, mode='min') # MSE/loss callback to .csv callback_hist = keras.callbacks.CSVLogger(os.path.join(output_folder, '{}_history_random_seed{}.csv'.format(rnn_prefix, random_seed))) callbacks_list = [checkpoint_best, callback_hist] # + colab={"base_uri": "https://localhost:8080/"} id="ktXzlvIeaT7S" outputId="e421e8f4-23a4-4348-fc15-c837c5b94339" # train model history = model.fit(reshaped_X_train, reshaped_Y_train, batch_size=20, epochs=50, callbacks=callbacks_list, validation_data=(reshaped_X_val, reshaped_Y_val)) # + id="IOaPE2duWVjl" # save history to a pickle so we can load it later with open(os.path.join(output_folder, '{}_history_random_seed{}.pkl'.format(rnn_prefix, random_seed) ), 'wb') as f: pickle.dump(history.history, f) # + id="jijEvsoEaT7X" colab={"base_uri": "https://localhost:8080/", "height": 747} outputId="88aeb330-9511-4415-d287-b7c3f8ff811a" plt.figure(figsize=(12, 12)) plt.semilogy(history.history['loss'], '-o', label='loss') plt.semilogy(history.history['val_loss'], '-o', label='val_loss') plt.semilogy(history.history['mse'], '-o', label='mse') plt.semilogy(history.history['val_mse'], '-o', label='val_mse') plt.legend() plt.xlabel('epochs') plt.ylabel('MSE') plt.title('training/validation MSE') # + id="xLF11w-saT7a" # Save the trained model model.save(os.path.join(output_folder, '{}_random_seed{}.h5'.format(rnn_prefix, random_seed))) # + id="L6ith6X1rYVJ"
edited_RippleNet_training_unidirectional.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="kI7sTRYSqfzn" # # Mathematical Constant # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 34, "output_extras": [{"item_id": 1}]} colab_type="code" executionInfo={"elapsed": 1774, "status": "ok", "timestamp": 1522224764463, "user": {"displayName": "<NAME>", "photoUrl": "//lh4.googleusercontent.com/-CpdhylvG3Yc/AAAAAAAAAAI/AAAAAAAAAx0/Ug2JeW-smSE/s50-c-k-no/photo.jpg", "userId": "116730841804462629249"}, "user_tz": -420} id="xqhl3E0kqi6l" outputId="fb8647a1-ba6b-4e29-81c4-fc72f14e41c6" import math # Constant pi print(math.pi) # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 34, "output_extras": [{"item_id": 1}]} colab_type="code" executionInfo={"elapsed": 962, "status": "ok", "timestamp": 1522224776763, "user": {"displayName": "<NAME>", "photoUrl": "//lh4.googleusercontent.com/-CpdhylvG3Yc/AAAAAAAAAAI/AAAAAAAAAx0/Ug2JeW-smSE/s50-c-k-no/photo.jpg", "userId": "116730841804462629249"}, "user_tz": -420} id="VGweAxOAqp6f" outputId="94b05e8f-34c5-45b7-de30-905e92f97e7e" # Constant e print(math.e) # + [markdown] colab_type="text" id="nu1TV0J-pPF2" # # `sin(x)` # Return the sine of x radians. # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 102, "output_extras": [{"item_id": 1}]} colab_type="code" executionInfo={"elapsed": 1246, "status": "ok", "timestamp": 1522224460711, "user": {"displayName": "<NAME>", "photoUrl": "//lh4.googleusercontent.com/-CpdhylvG3Yc/AAAAAAAAAAI/AAAAAAAAAx0/Ug2JeW-smSE/s50-c-k-no/photo.jpg", "userId": "116730841804462629249"}, "user_tz": -420} id="5VOmq84cpR3u" outputId="3794eb4b-a646-4ce2-b5f6-db60fd8b031f" import math print ("sin(3) : ", math.sin(3)) print ("sin(-3) : ", math.sin(-3)) print ("sin(0) : ", math.sin(0)) print ("sin(math.pi) : ", math.sin(math.pi)) print ("sin(math.pi/2) : ", math.sin(math.pi/2)) # + [markdown] colab_type="text" id="9nQTjZKKpeLW" # # `cos(x)` # Return the cosine of x radians. # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 102, "output_extras": [{"item_id": 1}]} colab_type="code" executionInfo={"elapsed": 945, "status": "ok", "timestamp": 1522224487589, "user": {"displayName": "<NAME>", "photoUrl": "//lh4.googleusercontent.com/-CpdhylvG3Yc/AAAAAAAAAAI/AAAAAAAAAx0/Ug2JeW-smSE/s50-c-k-no/photo.jpg", "userId": "116730841804462629249"}, "user_tz": -420} id="L50U5ec2pMGb" outputId="f0b07ad1-682e-4920-e49f-ada18c238ab5" import math print ("cos(3) : ", math.cos(3)) print ("cos(-3) : ", math.cos(-3)) print ("cos(0) : ", math.cos(0)) print ("cos(math.pi) : ", math.cos(math.pi)) print ("cos(2*math.pi) : ", math.cos(2*math.pi)) # + [markdown] colab_type="text" id="kMFhVNZEpl5X" # # `tan(x)` # Return the tangent of x radians. # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 119, "output_extras": [{"item_id": 1}]} colab_type="code" executionInfo={"elapsed": 1113, "status": "ok", "timestamp": 1522224534517, "user": {"displayName": "<NAME>", "photoUrl": "//lh4.googleusercontent.com/-CpdhylvG3Yc/AAAAAAAAAAI/AAAAAAAAAx0/Ug2JeW-smSE/s50-c-k-no/photo.jpg", "userId": "116730841804462629249"}, "user_tz": -420} id="zGcrbXqtplig" outputId="8000506e-26e0-4239-ec3e-db1ac009a158" import math print ("(tan(3) : ", math.tan(3)) print ("tan(-3) : ", math.tan(-3)) print ("tan(0) : ", math.tan(0)) print ("tan(math.pi) : ", math.tan(math.pi)) print ("tan(math.pi/2) : ", math.tan(math.pi/2)) print ("tan(math.pi/4) : ", math.tan(math.pi/4)) # + [markdown] colab_type="text" id="_yizv7G-pxV6" # # `acos(x)` # Return the arc cosine of x, in radians. # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 85, "output_extras": [{"item_id": 1}]} colab_type="code" executionInfo={"elapsed": 1120, "status": "ok", "timestamp": 1522224561247, "user": {"displayName": "<NAME>", "photoUrl": "//lh4.googleusercontent.com/-CpdhylvG3Yc/AAAAAAAAAAI/AAAAAAAAAx0/Ug2JeW-smSE/s50-c-k-no/photo.jpg", "userId": "116730841804462629249"}, "user_tz": -420} id="V0VQ5iQAp1dF" outputId="1fab6d3a-b4ae-4df1-fe59-9aec022e2f6c" import math print ("acos(0.64) : ", math.acos(0.64)) print ("acos(0) : ", math.acos(0)) print ("acos(-1) : ", math.acos(-1)) print ("acos(1) : ", math.acos(1)) # + [markdown] colab_type="text" id="wsZlKON0p6rF" # # `asin(x)` # Return the arc sine of x, in radians. # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 85, "output_extras": [{"item_id": 1}]} colab_type="code" executionInfo={"elapsed": 882, "status": "ok", "timestamp": 1522224601826, "user": {"displayName": "<NAME>", "photoUrl": "//lh4.googleusercontent.com/-CpdhylvG3Yc/AAAAAAAAAAI/AAAAAAAAAx0/Ug2JeW-smSE/s50-c-k-no/photo.jpg", "userId": "116730841804462629249"}, "user_tz": -420} id="-kl0qey0p-xO" outputId="87704e10-9d09-4167-8d63-27b3e0ec1379" import math print ("asin(0.64) : ", math.asin(0.64)) print ("asin(0) : ", math.asin(0)) print ("asin(-1) : ", math.asin(-1)) print ("asin(1) : ", math.asin(1)) # + [markdown] colab_type="text" id="oUToG4aoqB_9" # # `atan(x)` # Return the arc tangent of x, in radians. # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 102, "output_extras": [{"item_id": 1}]} colab_type="code" executionInfo={"elapsed": 1101, "status": "ok", "timestamp": 1522224624910, "user": {"displayName": "<NAME>", "photoUrl": "//lh4.googleusercontent.com/-CpdhylvG3Yc/AAAAAAAAAAI/AAAAAAAAAx0/Ug2JeW-smSE/s50-c-k-no/photo.jpg", "userId": "116730841804462629249"}, "user_tz": -420} id="Xdbd6bShp3el" outputId="350b7dcf-a5cc-424a-e4ca-0449c1c0c7a8" import math print ("atan(0.64) : ", math.atan(0.64)) print ("atan(0) : ", math.atan(0)) print ("atan(10) : ", math.atan(10)) print ("atan(-1) : ", math.atan(-1)) print ("atan(1) : ", math.atan(1)) # + [markdown] colab_type="text" id="UnCUJlwXqHmg" # # `degrees(x)` # Converts angle x from radians **to degrees**. # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 119, "output_extras": [{"item_id": 1}]} colab_type="code" executionInfo={"elapsed": 910, "status": "ok", "timestamp": 1522224662894, "user": {"displayName": "<NAME>", "photoUrl": "//lh4.googleusercontent.com/-CpdhylvG3Yc/AAAAAAAAAAI/AAAAAAAAAx0/Ug2JeW-smSE/s50-c-k-no/photo.jpg", "userId": "116730841804462629249"}, "user_tz": -420} id="FJAKXw-WqHBe" outputId="9608eb0c-c930-4793-ebe4-72698223ca8c" import math print ("degrees(3) : ", math.degrees(3)) print ("degrees(-3) : ", math.degrees(-3)) print ("degrees(0) : ", math.degrees(0)) print ("degrees(math.pi) : ", math.degrees(math.pi)) print ("degrees(math.pi/2) : ", math.degrees(math.pi/2)) print ("degrees(math.pi/4) : ", math.degrees(math.pi/4)) # + [markdown] colab_type="text" id="LXa1w7_aqQve" # # `radians(x)` # Converts angle x from degrees **to radians**. # + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 119, "output_extras": [{"item_id": 1}]} colab_type="code" executionInfo={"elapsed": 825, "status": "ok", "timestamp": 1522224693524, "user": {"displayName": "<NAME>", "photoUrl": "//lh4.googleusercontent.com/-CpdhylvG3Yc/AAAAAAAAAAI/AAAAAAAAAx0/Ug2JeW-smSE/s50-c-k-no/photo.jpg", "userId": "116730841804462629249"}, "user_tz": -420} id="S5uJZmJGqUee" outputId="9b2514a3-4c14-404f-fc1a-aa6bd146bd5e" import math print ("radians(3) : ", math.radians(3)) print ("radians(-3) : ", math.radians(-3)) print ("radians(0) : ", math.radians(0)) print ("radians(math.pi) : ", math.radians(math.pi)) print ("radians(math.pi/2) : ", math.radians(math.pi/2)) print ("radians(math.pi/4) : ", math.radians(math.pi/4))
Day 2/06 Trigonometric Function.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Within the library the required packages for the code to run correctly are imported. # # If there's an import error check the following things: # # 1. Check *pandas* is isntalled. # 2. Check *fcs parser* is installed. # 3. Check *OS* is installed. # # Once you've checked this run this code in 4 simple steps: # # 1. import the *cytometry_lib* library. import cytometry_lib as cy # Make sure in the following cell that the directories are correct. # Only the fcs must exist and have fcs data on it as the other two will be created in the process. # The directory path syntaxis varies between OS # # # 2. Save directory locations. fcs_dir = './fcs sample data/' # This is the directory of the .fcs files in this case csv_dir = 'csv_files/' # This is where the converted data will be stored merged_csv_dir = 'merged_data/' # This is where the final merged dataframe will be stored merged_filename = 'data' # This is the name # The next line of code converts fcs files in the fcs directory to csv files in the csv directory. # # # 3. Convert fcs files to csv files. cy.fcs_to_csv(fcs_dir,csv_dir) # The next line of code takes the csv files, merges them into a single dataframe and saves it in the specified directory. # # # 4. Merge the data into the same dataframe. cy.merge_dfs(csv_dir,merged_csv_dir,merged_filename)
HOW TO USE THE LIBRARY.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %load_ext autoreload # %autoreload 2 # %matplotlib inline import os,sys,inspect currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) parentdir = os.path.dirname(currentdir) sys.path.insert(0,parentdir) import numpy as np import pandas as pd import seaborn as sn import json import glob, os import matplotlib.pyplot as plt import matplotlib as mpl from mpl_toolkits.mplot3d import Axes3D from scipy import stats from numpy.random import seed from scipy.stats import kruskal from termcolor import colored import my_plots from matplotlib.pylab import savefig # + # plot settings lw = 1.5 fs = 13 params = { 'axes.labelsize': fs, 'font.size': fs, 'legend.fontsize': fs, 'xtick.labelsize': fs, 'ytick.labelsize': fs, 'text.usetex': False, 'figure.figsize': [4, 4], 'boxplot.boxprops.linewidth' : lw, 'boxplot.whiskerprops.linewidth' : lw, 'boxplot.capprops.linewidth' : lw, 'boxplot.medianprops.linewidth' : lw, # 'text.usetex' : True, # 'font.family' : 'serif', } mpl.rcParams.update(params) # + # plot settings lw = 1.5 fs = 13 params = { 'axes.labelsize': fs, 'font.size': fs, 'legend.fontsize': fs, 'xtick.labelsize': fs, 'ytick.labelsize': fs, 'text.usetex': False, 'figure.figsize': [6, 3], 'boxplot.boxprops.linewidth' : lw, 'boxplot.whiskerprops.linewidth' : lw, 'boxplot.capprops.linewidth' : lw, 'boxplot.medianprops.linewidth' : lw, # 'text.usetex' : True, # 'font.family' : 'serif', } mpl.rcParams.update(params) # - # # Plots for the haptics experiment def dfFromJSON(file): with open(file) as f: temp = [] for line in f: temp.append(json.loads(line)) df = pd.io.json.json_normalize(temp[0]['allLogs']) df.columns = df.columns.map(lambda x: x.split("*.")[-1]) return df def appendToLogs(file, log, _type): df = dfFromJSON(file) log[_type].append(df) # + # path_to_folder = "/Volumes/GoogleDrive/My Drive/Matteo/EPFL/LIS/PhD/Bidirectional_wearable_interface/DATA/Haptics/Haptics_X/" path_to_folder = "../Haptics_new/Haptics_X/" subjects = glob.glob(path_to_folder + "*/") logs = {} for curr in subjects: logs[curr] = [] subjects.sort() subjects # + # # used to remove outliers # subjects.remove(subjects[5]) # subjects.remove(subjects[4]) # logs = {} # for curr in subjects: # logs[curr] = [] # subjects # + EXP_N_THOMAS = 4 EXP_N = 5 for subject in subjects: print(subject) files = glob.glob(subject+"*.json") files.sort() if len(files) != EXP_N and len(files) != EXP_N_THOMAS: print('wrong number of experiments!') break for file in files: logs[subject].append(dfFromJSON(file)) # + jupyter={"outputs_hidden": true} fields = ['first motion', 'last motion', 'first remote', 'last remote'] interfaces = ['motion', 'remote'] sub_remote = [x for x in subjects if 'Controller' in x] sub_motion = [x for x in subjects if 'Glove_haptics' in x] t = [logs[x] for x in sub_motion] logs['motion'] = [] for i in t: for j in i: logs['motion'].append(j) t = [logs[x] for x in sub_remote] logs['remote'] = [] for i in t: for j in i: logs['remote'].append(j) logs['first motion'] = [logs[x][0] for x in sub_motion] logs['last motion'] = [logs[x][-1] for x in sub_motion] logs['first remote'] = [logs[x][0] for x in sub_remote] logs['last remote'] = [logs[x][-1] for x in sub_remote] # + jupyter={"outputs_hidden": true} def t_test_kruskal(X, Y): # Kruskal-Wallis H-test # seed the random number generator seed(1) # compare samples stat, p = kruskal(X, Y) return [stat, p] def str_from_p(p): if p<0.01: add_str = ' !!!!!!!!!!' elif p<0.05: add_str = ' !!!!!!' elif p<0.1: add_str = ' !' else: add_str = '' return add_str def print_p(p): col = None if p<0.01: col = 'green' elif p<0.05: col = 'yellow' elif p<0.1: col = 'red' if col is not None: print(colored('p = '+ str(p) + str_from_p(p), col)) else: print('p = '+ str(p) + str_from_p(p)) def runBasicAnalysis(f): var = {} var['means'] = [] var['stds'] = [] for i in fields: var[i] = [] for j in logs[i]: var[i].append(f(j)) print(i) print(var[i]) var['means'].append(np.mean(var[i])) var['stds'].append(np.std(var[i])) print('mean = ', var['means'][-1]) print('std = ', var['stds'][-1]) print() for idx,i in enumerate(fields): for j in fields[idx+1:]: if i != j: t, p = t_test_kruskal(var[i],var[j]) print (i,j) print_p(p) var['diff mot'] = 1-np.array(var['last motion'])/np.array(var['first motion']) var['diff rem'] = 1-np.array(var['last remote'])/np.array(var['first remote']) print() print('motion ratio') print(var['diff mot']) print(np.mean(var['diff mot'])) print(np.std(var['diff mot'])) print() print('remote ratio') print(var['diff rem']) print(np.mean(var['diff rem'])) print(np.std(var['diff rem'])) t, p = t_test_kruskal(var['diff mot'], var['diff rem']) print() print('p = ', p, str_from_p(p)) var['diff first'] = 1-np.array(var['first motion'])/np.array(var['first remote']) var['diff last'] = 1-np.array(var['last motion'])/np.array(var['last remote']) print() print('firts ratio') print(var['diff first']) print(np.mean(var['diff first'])) print(np.std(var['diff first'])) print() print('last ratio') print(var['diff last']) print(np.mean(var['diff last'])) print(np.std(var['diff last'])) t, p = t_test_kruskal(var['diff first'], var['diff last']) print() print('p = ', p, str_from_p(p)) return var def runHapticsAnalysis(f): var = {} var['means'] = [] var['stds'] = [] for i in interfaces: var[i] = [] for j in logs[i]: var[i].append(f(j)) print(i) print(var[i]) var['means'].append(np.mean(var[i])) var['stds'].append(np.std(var[i])) print('mean = ', var['means'][-1]) print('std = ', var['stds'][-1]) print() for idx,i in enumerate(interfaces): for j in interfaces[idx+1:]: if i != j: t, p = t_test_kruskal(var[i],var[j]) print_p(p) return var def basic_plot(var): fig = plt.figure(figsize=(12, 4)) ax = fig.add_subplot(131) ax.bar([0, 1, 2, 3], var['means'], yerr=var['stds']) plt.xticks(range(4), fields, rotation = 10) ax = fig.add_subplot(132) ax.bar([0, 1], [np.mean(var['diff mot']), np.mean(var['diff rem'])], yerr=[np.std(var['diff mot']), np.std(var['diff rem'])]) plt.xticks(range(2), ['motion', 'remote'], rotation = 10) ax = fig.add_subplot(133) ax.bar([0, 1], [np.mean(var['diff first']), np.mean(var['diff last'])], yerr=[np.std(var['diff first']), np.std(var['diff last'])]) plt.xticks(range(2), ['first', 'last'], rotation = 10) def basic_box(data, names, col = 'b', leg = False, ax = None, save = False, where = None, y = '', xlim = None, ylim = None, xticks = None, yticks = None, whis = 1.5): c0 = np.array([0,0,0])/256 c1 = np.array([150,0,0])/256 c2 = np.array([0,0,100])/256 c3 = np.array([0,100,0])/256 col = [c0, c1, c2, c3] if ax is None: plt.figure() ax = plt.subplot(1, 1, 1) my_plots.boxplot_elegant(ax, data[names[0]], [1], col[0], whis = whis) my_plots.boxplot_elegant(ax, data[names[1]], [1.4], col[1], whis = whis) plt.grid() ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) if xlim is not None: plt.xlim(xlim) if ylim is not None: plt.ylim(ylim) if xticks is not None: plt.xticks(xticks) if yticks is not None: plt.yticks(yticks) plt.xticks([1, 1.4], ['Remote', 'Motion']) plt.ylabel(y) if leg: for idx,i in enumerate(labels): ax.scatter(0,0, color = c[idx], label = i) plt.legend() if save: savefig(where, bbox_inches='tight') return ax # + jupyter={"outputs_hidden": true} def moving_average(data_set, periods=3, fill_with_zeros = True): if data_set.size < periods: return data_set if isinstance(data_set, pd.DataFrame): print('a') data_set.index = range(len(data_set)) weights = np.ones(periods) / periods ma = np.convolve(data_set, weights, mode='valid') if fill_with_zeros: fill_before = np.ones(int(np.ceil((len(data_set) - len(ma))/2))) * data_set[0] fill_after = np.ones(int(np.floor((len(data_set) - len(ma))/2))) * data_set[-1] ma = np.hstack([fill_before, ma, fill_after]) return ma def filter_position(df, steps): l = moving_average(df['dronePosition.x'], periods=steps, fill_with_zeros = False) df['dronePositionFiltered.x'] = np.append([l[0]]*(steps-1), l) l = moving_average(df['dronePosition.y'], periods=steps, fill_with_zeros = False) df['dronePositionFiltered.y'] = np.append([l[0]]*(steps-1), l) l = moving_average(df['dronePosition.x'], periods=steps, fill_with_zeros = False) df['dronePositionFiltered.x'] = np.append([l[0]]*(steps-1), l) return df def running_average(signal, steps): l = moving_average(signal, periods=steps, fill_with_zeros = False) out = np.append([l[0]]*(steps-1), l) return out # + jupyter={"outputs_hidden": true} logs1 = logs.copy() logs1['first remote'][0]['dronePositionFiltered.x'] = running_average(logs1['first remote'][0]['dronePosition.x'], 15) plt.figure(figsize=(6,6)) plt.plot(logs1['first remote'][0]['dronePosition.x'][0:50]) plt.plot(logs1['first remote'][0]['dronePositionFiltered.x'][0:50]) # - # # Collision Analysis os.getcwd() # + jupyter={"outputs_hidden": true, "source_hidden": true} coll_df = pd.read_csv('../Haptics_new/Bidir - haptics X collisions - Sheet1 copy.csv') coll_df # + jupyter={"outputs_hidden": true, "source_hidden": true} coll = {} coll['motion'] = coll_df[coll_df['Interface']=='Motion'] coll['remote'] = coll_df[coll_df['Interface']=='Remote'] coll['first motion'] = coll['motion'][coll['motion']['Run']==1] coll['last motion'] = coll['motion'][coll['motion']['Run']==5] coll['first remote'] = coll['remote'][coll['remote']['Run']==1] coll['last remote'] = coll['remote'][coll['remote']['Run']==5] # logs['last motion'] = [logs[x][-1] for x in sub_motion] # logs['first remote'] = [logs[x][0] for x in sub_remote] # logs['last remote'] = [logs[x][-1] for x in sub_remote] print(coll['motion']) print(coll['remote']) # + jupyter={"outputs_hidden": true, "source_hidden": true} def compute_mean_std_collisions(data_m, data_r): coll_per_sub_m = [] for sub in np.unique(data_m['Subject']): sub_coll = data_m[data_m['Subject']==sub] coll_per_sub_m.append(sub_coll['Collided'].sum()) coll_per_sub_r = [] for sub in np.unique(data_r['Subject']): sub_coll = data_r[data_r['Subject']==sub] coll_per_sub_r.append(sub_coll['Collided'].sum()) mean_m = np.mean(coll_per_sub_m) std_m = np.std(coll_per_sub_m) mean_r = np.mean(coll_per_sub_r) std_r = np.std(coll_per_sub_r) print('mean motion = ', mean_m) print('std motion = ', std_m) print('mean remote = ', mean_r) print('std remote = ', std_r) print('') print('total motion = ', np.sum(coll_per_sub_m)) print('total remote = ', np.sum(coll_per_sub_r)) print('') t, p = t_test_kruskal(coll_per_sub_m, coll_per_sub_r) print_p(p) return [coll_per_sub_m, coll_per_sub_r] print('') print('consider all runs') print('') all_m, all_r = compute_mean_std_collisions(coll['motion'], coll['remote']) print('') print('consider first runs') print('') first_m, first_r = compute_mean_std_collisions(coll['first motion'], coll['first remote']) print('') print('consider last runs') print('') last_m, last_r = compute_mean_std_collisions(coll['last motion'], coll['last remote']) fig = plt.figure(figsize=(12, 4)) ax = fig.add_subplot(131) ax.bar([0, 1], [np.mean(all_m), np.mean(all_r)], yerr = [np.std(all_m), np.std(all_r)]) plt.xticks(range(2), ['motion', 'remote']) plt.ylim([0, 5]) plt.title('Total collisions') ax = fig.add_subplot(132) ax.bar([0, 1], [np.mean(first_m), np.mean(first_r)], yerr = [np.std(first_m), np.std(first_r)]) plt.xticks(range(2), ['motion', 'remote']) plt.ylim([0, 5]) plt.title('Collisions - first run') ax = fig.add_subplot(133) ax.bar([0, 1], [np.mean(last_m), np.mean(last_r)], yerr = [np.std(last_m), np.std(last_r)]) plt.xticks(range(2), ['motion', 'remote']) plt.ylim([0, 5]) plt.title('Total collisions') plt.title('Collisions - last run') # + jupyter={"outputs_hidden": true, "source_hidden": true} c1 = 'b' c2 = 'r' fig = plt.figure() ax = fig.add_subplot(111) ax.bar([1], [np.mean(all_r),], yerr = [np.std(all_r)], color =c1, ecolor = c1, width=0.5 ) ax.bar([2], [np.mean(all_m)], yerr = [np.std(all_m)], color =c2, ecolor = c2, width=0.5 ) plt.xticks([1,2], ['remote', 'motion']) plt.yticks([1,2,3,4,5]) plt.ylim([0, 5]) plt.xlim([0.5, 2.5]) plt.ylabel('Total collisions') plt.grid() ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) savefig('collisions_X.pdf', bbox_inches='tight') # - # ## <span style="color:red">HERE RESULTS</span> # # Approach wall Analysis def comp_max_dist(df): return np.max(df['dronePosition.x']) x_max = runHapticsAnalysis(comp_max_dist) # + jupyter={"outputs_hidden": true, "source_hidden": true} dist_obstacle = 2.5 collider = 0.07 x_collision = dist_obstacle - collider # + jupyter={"outputs_hidden": true, "source_hidden": true} distances = {} for i in interfaces: distances[i] = x_collision - np.array(x_max[i]) distances[i] = distances[i][np.where(distances[i]>0)] print() for idx,i in enumerate(interfaces): for j in interfaces[idx+1:]: if i != j: t, p = t_test_kruskal(distances[i],distances[j]) print (i,j) print_p(p) fig = plt.figure(figsize=(4, 4)) ax = fig.add_subplot(111) ax.bar([0, 1], [np.mean(distances['motion']), np.mean(distances['remote'])], yerr = [np.std(distances['motion']), np.std(distances['remote'])]) plt.xticks(range(2), ['motion', 'remote']) plt.ylim([0, 1]) plt.title('Distances') # + jupyter={"outputs_hidden": true, "source_hidden": true} c1 = 'b' c2 = 'r' fig = plt.figure() ax = fig.add_subplot(111) ax.bar([1], [np.mean(distances['remote']),], yerr = [np.std(distances['remote'])], color =c1, ecolor = c1, width=0.5 ) ax.bar([2], [np.mean(distances['motion'])], yerr = [np.std(distances['motion'])], color =c2, ecolor = c2, width=0.5 ) plt.xticks([1,2], ['remote', 'motion']) plt.yticks([0,0.5,1]) plt.ylim([0, 1]) plt.xlim([0.5, 2.5]) plt.ylabel('Distance from wall [m]') plt.grid() ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) savefig('distances_X.pdf', bbox_inches='tight') # - plt.bar(range(len(x_max['motion'])),np.array(x_max['motion']) - 2.43) plt.figure() plt.bar(range(len(x_max['remote'])),np.array(x_max['remote']) - 2.43) # ## <span style="color:red">HERE RESULTS</span>
data_analysis_new/simulations/data_analysis_haptics_X.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + #specific to extracting information from word documents import os import zipfile #useful tool for extracting information from XML import re #to pretty print out xml: import xml.dom.minidom # - #to check files in the current directory, use a single period os.listdir('.') # Read Folder os.listdir('./1st_Proof') # At its heart, a docx file is just a zip file (try running unzip on it!) containing a bunch of well defined XML and collateral files. # We will now use the <a href = "https://docs.python.org/3/library/zipfile.html">zipfile</a> library to help us read our document. The defaults are listed below, and they're all good for our purposes of reading the word document. # ```python # class zipfile.ZipFile(file, mode='r', compression=ZIP_STORED, allowZip64=True, compresslevel=None)``` document = zipfile.ZipFile('./1st_Proof/02_Book review IJPT Scott A Theology of Postnatural Right.docx') document document.namelist() #document.xml : The main textual content and structure is defined # + uglyXml = xml.dom.minidom.parseString(document.read('word/styles.xml')).toprettyxml(indent=' ') text_re = re.compile('>\n\s+([^<>\s].*?)\n\s+</', re.DOTALL) prettyXml = text_re.sub('>\g<1></', uglyXml) print(prettyXml) # + #name = 'word/people.xml' uglyXml = xml.dom.minidom.parseString(document.read('word/document.xml')).toprettyxml(indent=' ') text_re = re.compile('>\n\s+([^<>\s].*?)\n\s+</', re.DOTALL) prettyXml = text_re.sub('>\g<1></', uglyXml) print(prettyXml) # + #name = 'word/fontTable.xml' #This looks like the fonts used in the document style uglyXml = xml.dom.minidom.parseString(document.read('word/fontTable.xml')).toprettyxml(indent=' ') text_re = re.compile('>\n\s+([^<>\s].*?)\n\s+</', re.DOTALL) prettyXml = text_re.sub('>\g<1></', uglyXml) print(prettyXml) # + #Get the xml that has the text contained in the document #name = 'word/document.xml' uglyXml = xml.dom.minidom.parseString(document.read('word/document.xml')).toprettyxml(indent=' ') text_re = re.compile('>\n\s+([^<>\s].*?)\n\s+</', re.DOTALL) prettyXml = text_re.sub('>\g<1></', uglyXml) print(prettyXml) # - # Per the scope of this contract, we'll need to find all styles stored in this document and add them to a list. # But now we don't have definations about each Styles in XML look like #first try to turn the xml content into a string: xml_content = document.read('word/document.xml') document.close() xml_str = str(xml_content) # + # USE lxml library to parse string containing XML into a usable tree from lxml import etree import os,shutil _FILE_NAME = './1st_Proof/02_Book review IJPT Scott A Theology of Postnatural Right.docx' class Get_text_document(object): nsprefixes = { 'mo': 'http://schemas.microsoft.com/office/mac/office/2008/main', 'o': 'urn:schemas-microsoft-com:office:office', 've': 'http://schemas.openxmlformats.org/markup-compatibility/2006', # Text Content 'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main', 'w10': 'urn:schemas-microsoft-com:office:word', 'wne': 'http://schemas.microsoft.com/office/word/2006/wordml', # Drawing 'a': 'http://schemas.openxmlformats.org/drawingml/2006/main', 'm': 'http://schemas.openxmlformats.org/officeDocument/2006/math', 'mv': 'urn:schemas-microsoft-com:mac:vml', 'pic': 'http://schemas.openxmlformats.org/drawingml/2006/picture', 'v': 'urn:schemas-microsoft-com:vml', 'wp': 'http://schemas.openxmlformats.org/drawingml/2006/wordprocessingDrawing', # Properties (core and extended) 'cp': 'http://schemas.openxmlformats.org/package/2006/metadata/core-properties', 'dc': 'http://purl.org/dc/elements/1.1/', 'ep': 'http://schemas.openxmlformats.org/officeDocument/2006/extended-properties', 'xsi': 'http://www.w3.org/2001/XMLSchema-instance', # Content Types 'ct': 'http://schemas.openxmlformats.org/package/2006/content-types', # Package Relationships 'r': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships', 'pr': 'http://schemas.openxmlformats.org/package/2006/relationships', # Dublin Core document properties 'dcmitype': 'http://purl.org/dc/dcmitype/', 'dcterms': 'http://purl.org/dc/terms/'} def __init__ (self, filename): filename = _FILE_NAME def _read_contents(self, filename): """ Unzip and read the contents of the word/document.xml file :rtype: the contents of the file as an ElementTree """ self.zipfile = zipfile.ZipFile(filename) xml_content = self.zipfile.read('word/document.xml') return etree.fromstring(xml_content) def _get_xml_tree(xml_string): return etree.fromstring(xml_string) #print(get_xml_tree(xml_content)) def _itertext(self, my_etree): """Iterator to go through xml tree's text nodes""" for node in my_etree.iter(tag=etree.Element): if self._check_element_is(node, 't'): yield (node, node.text) def _check_element_is(self, element, type_char): return element.tag == '{%s}%s' % (self.nsprefixes['w'],type_char) def _print_text_document(self, word_filename): xml_from_file = self._read_contents(word_filename) xml_tree = self._get_xml_tree(xml_from_file) for node, txt in self._itertext(xml_tree): print (txt) def write_and_close_docx (self, xml_content, output_filename): """ Create a temp directory, expand the original docx zip. Write the modified xml to word/document.xml Zip it up as the new docx """ tmp_dir = tempfile.mkdtemp() self.zipfile.extractall(tmp_dir) with open(os.path.join(tmp_dir,'word/document.xml'), 'w') as f: xmlstr = etree.tostring (xml_content, pretty_print=True) f.write(xmlstr) # Get a list of all the files in the original docx zipfile filenames = self.zipfile.namelist() # Now, create the new zip file and add all the filex into the archive zip_copy_filename = output_filename with zipfile.ZipFile(zip_copy_filename, "w") as docx: for filename in filenames: docx.write(os.path.join(tmp_dir,filename), filename) # Clean up the temp dir shutil.rmtree(tmp_dir) #if __name__ == '__main__': #Get_text_document._print_text_document(self,'word/document.xml') # + #Test import zipfile def _itertext(my_etree): """Iterator to go through xml tree's text nodes""" for node in my_etree.iter(tag=etree.Element): if _check_element_is(node, 't'): yield (node, node.text) def _read_contents(filename): """ Unzip and read the contents of the word/document.xml file :rtype: the contents of the file as an ElementTree """ zipfile1 = zipfile.ZipFile(filename) xml_content = zipfile1.read('word/document.xml') return xml_content def _check_element_is(element, type_char): word_schema = 'http://schemas.openxmlformats.org/wordprocessingml/2006/main' return element.tag == '{%s}%s' % (word_schema,type_char) xml_from_file = _read_contents(_FILE_NAME) xml_tree = etree.fromstring(xml_from_file) for node, txt in _itertext(xml_tree): print (txt) # - # I succesfully get all of the text of the document formed as a xml-tree!! Now dive in to styles. Documents belows : <br> # https://python-docx.readthedocs.io/en/latest/dev/analysis/features/styles/index.html <br> # https://docs.microsoft.com/en-us/office/dev/add-ins/word/create-better-add-ins-for-word-with-office-open-xml <br> # '[Content_Types].xml' : https://docs.microsoft.com/en-us/visualstudio/extensibility/the-structure-of-the-content-types-dot-xml-file?view=vs-2019 <br> # '_rels/.rels', <br> # 'word/document.xml', <br> https://stackoverflow.com/questions/20534660/trying-to-parse-word-docx-file-as-a-zip-document-using-pythons-xml-elementtree # 'word/document.xml', <br> # 'word/_rels/document.xml.rels' --get the image infomation], https://stackoverflow.com/questions/36235031/where-is-the-word-rels-document-xml-rels-in-python-docx-object <br> # 'word/theme/theme1.xml', <br> # 'word/settings.xml', <br> # 'word/styles.xml', <br> # 'word/webSettings.xml', <br> # 'word/fontTable.xml', <br> # 'docProps/core.xml', <br> # 'docProps/app.xml'] <br> # + _FILE_NAME = './1st_Proof/02_Book review IJPT Scott A Theology of Postnatural Right.docx' ns = {'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main'} ns_pfx = '{http://schemas.openxmlformats.org/wordprocessingml/2006/main}' def find_style1(xml_string): root = etree.fromstring(xml_string) #Page Size & Page Margin pgSz = root.find('.//w:pgSz', ns) pgMar = root.find('.//w:pgMar', ns) if pgSz != None and pgMar != None: print (pgSz.get(ns_pfx + 'w'), pgMar.get(ns_pfx + 'footer')) print (xml_string) zip = zipfile.ZipFile(_FILE_NAME) xml_file = zip.namelist() for file in xml_file: xml_content = zip.read(file) find_style1(xml_content) # - # Try to compare 2 documents : # + import xml.etree.ElementTree as ET import logging import itertools import os import zipfile _FILE_NAME = './1st_Proof/02_Book review IJPT Scott A Theology of Postnatural Right.docx' _FILE_COPY = './1st_Proof/02_Book review IJPT Scott A Theology of Postnatural Right - Copy.docx' _FILE_EDITED = './1st_Proof/02_Book review IJPT Scott A Theology of Postnatural Right - Edited.docx' class XmlTree(): def __init__(self): self.logger = logging.getLogger('xml_compare') self.logger.setLevel(logging.DEBUG) self.hdlr = logging.FileHandler('xml-comparison.log') self.formatter = logging.Formatter('%(asctime)s - %(levelname)s- %(message)s') self.hdlr.setLevel(logging.DEBUG) self.hdlr.setFormatter(self.formatter) self.logger.addHandler(self.hdlr) @staticmethod def convert_string_to_tree(xmlString): return ET.fromstring(xmlString) def xml_compare(self, x1, x2, excludes=[]): """ Compares two xml etrees :param x1: the first tree :param x2: the second tree :param excludes: list of string of attributes to exclude from comparison :return: True if both files match """ if x1.tag != x2.tag: self.logger.debug('Tags do not match: %s and %s' % (x1.tag, x2.tag)) return False for name, value in x1.attrib.items(): if not name in excludes: if x2.attrib.get(name) != value: self.logger.debug('Attributes do not match: %s=%r, %s=%r' % (name, value, name, x2.attrib.get(name))) return False for name in x2.attrib.keys(): if not name in excludes: if name not in x1.attrib: self.logger.debug('x2 has an attribute x1 is missing: %s' % name) return False if not self.text_compare(x1.text, x2.text): self.logger.debug('text: %r != %r' % (x1.text, x2.text)) return False if not self.text_compare(x1.tail, x2.tail): self.logger.debug('tail: %r != %r' % (x1.tail, x2.tail)) return False cl1 = list(x1) #.getchildren() cl2 = list(x2) #.getchildren() if len(cl1) != len(cl2): self.logger.debug('children length differs, %i != %i' % (len(cl1), len(cl2))) return False i = 0 for c1, c2 in zip(cl1, cl2): i += 1 if not c1.tag in excludes: if not self.xml_compare(c1, c2, excludes): self.logger.debug('children %i do not match: %s vs %r' % (i, c1.text, c2.text)) return False return True def text_compare(self, t1, t2): """ Compare two text strings :param t1: text one :param t2: text two :return: True if a match """ if not t1 and not t2: return True if t1 == '*' or t2 == '*': return True return (t1 or '').strip() == (t2 or '').strip() zip1 = zipfile.ZipFile(_FILE_NAME) xml_content = zip1.read('word/document.xml') xml1 = xml_content zip2 = zipfile.ZipFile(_FILE_EDITED) xml_content = zip2.read('word/document.xml') xml2 = xml_content tree1 = XmlTree.convert_string_to_tree(xml1) tree2 = XmlTree.convert_string_to_tree(xml2) comparator = XmlTree() if comparator.xml_compare(tree1, tree2, ["from"]): print ("XMLs match") else: print ("XMLs don't match") #log_file = zip2.read('xml-comparison.log') #print(comparator.logger) f = open('xml-comparison.log', "r") print (f.read()) # - zip = zipfile.ZipFile(_FILE_NAME) xml_file = zip.namelist() for file in xml_file: xml_content = zip.read(file) print (file) print (xml_content) print ('----------------------------------------------------------------------------------------------') # + #Working with XML, I forgot that there's a wonderful library to parse XML string is Beautiful Soup 4 _FILE_NAME = './1st_Proof/02_Book review IJPT Scott A Theology of Postnatural Right.docx' from bs4 import BeautifulSoup import re zip = zipfile.ZipFile(_FILE_NAME) xml_content = zip.read('word/document.xml') soup = BeautifulSoup(xml_content) print (soup) # + zip1 = zipfile.ZipFile(_FILE_NAME) xml_content = zip1.read('word/settings.xml') xml1 = xml_content uglyXml = xml.dom.minidom.parseString(xml1).toprettyxml(indent=' ') text_re = re.compile('>\n\s+([^<>\s].*?)\n\s+</', re.DOTALL) prettyXml = text_re.sub('>\g<1></', uglyXml) print(prettyXml) # + zip2 = zipfile.ZipFile(_FILE_EDITED) xml_content = zip2.read('word/settings.xml') xml2 = xml_content uglyXml = xml.dom.minidom.parseString(xml2).toprettyxml(indent=' ') text_re = re.compile('>\n\s+([^<>\s].*?)\n\s+</', re.DOTALL) prettyXml = text_re.sub('>\g<1></', uglyXml) print(prettyXml) # - # There're 3 hightlight that's create new node in the edited xml tree (<w:rsid w:val="006D527B"/> , <w:rsid w:val="00957CCD"/>, <w:rsid w:val="00F7242A"/>) # => Conclusion : Whenever hightlight a node, xml code will create a new node, even if you undo that, the node still exits. # + from xmldiff import main, formatting import lxml.etree zip1 = zipfile.ZipFile(_FILE_NAME) xml_content = zip1.read('word/settings.xml') xml1 = xml_content uglyXml1 = xml.dom.minidom.parseString(xml1).toprettyxml(indent=' ') text_re = re.compile('>\n\s+([^<>\s].*?)\n\s+</', re.DOTALL) prettyXml1 = text_re.sub('>\g<1></', uglyXml1) zip2 = zipfile.ZipFile(_FILE_EDITED) xml_content = zip2.read('word/settings.xml') xml2 = xml_content uglyXml2 = xml.dom.minidom.parseString(xml2).toprettyxml(indent=' ') text_re = re.compile('>\n\s+([^<>\s].*?)\n\s+</', re.DOTALL) prettyXml2 = text_re.sub('>\g<1></', uglyXml2) XSLT = u'''<?xml version="1.0"?> <xsl:stylesheet version="1.0" xmlns:diff="http://namespaces.shoobx.com/diff" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <xsl:template match="@diff:insert-formatting"> <xsl:attribute name="class"> <xsl:value-of select="'insert-formatting'"/> </xsl:attribute> </xsl:template> <xsl:template match="diff:delete"> <del><xsl:apply-templates /></del> </xsl:template> <xsl:template match="diff:insert"> <ins><xsl:apply-templates /></ins> </xsl:template> <xsl:template match="@* | node()"> <xsl:copy> <xsl:apply-templates select="@* | node()"/> </xsl:copy> </xsl:template> </xsl:stylesheet>''' XSLT_TEMPLATE = lxml.etree.fromstring(XSLT) class HTMLFormatter(formatting.XMLFormatter): def render(self, result): transform = lxml.etree.XSLT(XSLT_TEMPLATE) result = transform(result) return super(HTMLFormatter, self).render(result) #formatter=formatting.XMLFormatter( # text_tags=('p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'li'), # formatting_tags=('b', 'u', 'i', 'strike', 'em', 'super', 'sup', 'sub', 'link', 'a', 'span')) formatter = HTMLFormatter( text_tags=('p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'li'), formatting_tags=('b', 'u', 'i', 'strike', 'em', 'super', 'sup', 'sub', 'link', 'a', 'span')) diff = main.diff_texts(prettyXml1, prettyXml2, diff_options={'fast_match': True}, formatter=formatter) print (diff) # - # As we can see: There're 3 differences with tag : """< diff:insert >""" which are 3 Hightlights I made for testing. # I formatted the xml Result for better examination. # + from lxml import etree root1 = etree.fromstring(prettyXml1) root2 = etree.fromstring(prettyXml2) dif_tree = main.diff_trees(root1, root2) dif_tree # - # Let's try it with 'word/document.xml' # + # zip3 = zipfile.ZipFile(_FILE_NAME) xml_content1 = zip3.read('word/document.xml') doc3 = xml_content1 root3 = etree.fromstring(doc3) # zip4 = zipfile.ZipFile(_FILE_EDITED) xml_content2 = zip4.read('word/document.xml') doc4 = xml_content2 root4 = etree.fromstring(doc4) #################### diff_doc_tree = main.diff_trees(root3, root4) diff_doc_tree # - # There're four main Moves of Edit Operations: Insert, Delete, Update, Move # CHECK OPTION 1 : FIND Unused Styles (not BuiltIn Style, not "Normal" type, InUse == false, Type not wdStyleTypeLinked) # + _FILE_NAME = './1st_Proof/02_Book review IJPT Scott A Theology of Postnatural Right.docx' _FILE_COPY = './1st_Proof/02_Book review IJPT Scott A Theology of Postnatural Right - Copy.docx' _FILE_EDITED = './1st_Proof/02_Book review IJPT Scott A Theology of Postnatural Right - Edited.docx' _FILE_EDITED_option = './1st_Proof/02_Book review IJPT Scott A Theology of Postnatural Right_Option_1.docx' import zipfile from lxml import etree from xmldiff import main, formatting # zip1 = zipfile.ZipFile(_FILE_NAME) xml_content1 = zip1.read('word/settings.xml') #Focus on 3 file : 'word/styles.xml' 'word/document.xml' 'word/settings.xml' root1 = etree.fromstring(xml_content1) # zip2 = zipfile.ZipFile(_FILE_EDITED_option) xml_content2 = zip2.read('word/settings.xml') #Focus on 3 file : 'word/styles.xml' 'word/document.xml' 'word/settings.xml' root2 = etree.fromstring(xml_content2) #################### diff_doc_tree = main.diff_trees(root1, root2) diff_doc_tree # - # Most of changes focus on this nsPrefixes {'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main'} # This is namespace of Text Documents. <br> # It means that there're a lot of unused Styles of Microsoft Word Elements (It may define with blank in order to insert sth in the future??) # There are matrix of node, every kind of node as a list(array) of node with index <br> # (node = '/w:settings/w:themeFontLang[1]', name='{http://schemas.openxmlformats.org/wordprocessingml/2006/main}val', value='720')) <br> # node='/w:settings/m:lMargin[1]', tag='{http://schemas.openxmlformats.org/wordprocessingml/2006/main}themeFontLang') # Next Question is : What're these kind of nodes: <br> # node='/w:settings/w:listSeparator <br> # node='/w:settings/w:decimalSymbol <br> # node='/w:settings/w:shapeDefaults <br> # node='/w:settings/w:clrSchemeMapping <br> # node='/w:settings/w:themeFontLang <br> # node='/w:settings/m:mathPr/w:rsid[18]' <br> # .....
archiver/old_files/xml_parse_from_MSWord.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib.pyplot as plt import numpy as np import pandas as pd df_baseline = pd.read_csv('training_log_baseline.csv') df_2frame = pd.read_csv('training_log_2frame.csv') df_3frame = pd.read_csv('training_log_3frame.csv') df_5frame = pd.read_csv('training_log_5frame.csv') df_LRCN = pd.read_csv('training_log_LRCN.csv') df_multiTRN = pd.read_csv('training_log_multiscaleTRN.csv') num_epoch = df_baseline['epoch'] val_baseline = df_baseline['val_acc'] val_2frame = df_2frame['val_acc'] val_3frame = df_3frame['val_acc'] val_5frame = df_5frame['val_acc'] val_LRCN = df_LRCN['val_acc'] val_multiTRN = df_multiTRN['val_acc'][0:30] plt.figure(figsize=(18,6)) plt.plot(num_epoch, val_baseline) plt.plot(num_epoch, val_2frame) plt.plot(num_epoch, val_3frame) plt.plot(num_epoch, val_5frame) plt.plot(num_epoch, val_LRCN) plt.plot(num_epoch, val_multiTRN) plt.legend(["Baseline","2-Frame TRN","3-Frame TRN","5-Frame TRN","LRCNs","MultiScale TRN"],loc='lower right') plt.xlabel('Epoch') plt.ylabel('Valiadation Accuracy') plt.title('Valiadation accuracy vs number of epochs')
log/Comparison_figure.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # This file covers the process of data selection for Artemis based upon the satelite's position in comparison to the Earth and Moon. # # After completing the process of filtering out invalid values and resampling on an hourly timescale, we are still seeing daily and monthly dips in the solar wind speed for the Artemis mission. This code is going to use the position of Artemis in comparison to the Moon and the Earth in order to filter out these dips. # First we need to import the two CSV files, one for the solar wind data and one for the position data and merge them based on time. import numpy as np import pandas as pd import datetime posFrame = pd.read_csv('ArtemisP2.csv') windFrame = pd.read_csv('ArtemisSW2.csv') windFrame = windFrame.loc[:, ~windFrame.columns.str.contains('^Unnamed')] posFrame = posFrame.loc[:, ~posFrame.columns.str.contains('^Unnamed')] windFrame['EPOCH_TIME__yyyy-mm-ddThh:mm:ss.sssZ'] = pd.to_datetime(windFrame['EPOCH_TIME__yyyy-mm-ddThh:mm:ss.sssZ']) windFrame['date'] = pd.to_datetime(windFrame['EPOCH_TIME__yyyy-mm-ddThh:mm:ss.sssZ']).dt.date posFrame['EPOCH_yyyy-mm-ddThh:mm:ss.sssZ'] = pd.to_datetime(posFrame['EPOCH_yyyy-mm-ddThh:mm:ss.sssZ']) posFrame['date'] = pd.to_datetime(posFrame['EPOCH_yyyy-mm-ddThh:mm:ss.sssZ']).dt.date mergedFrame = pd.merge(windFrame, posFrame, on='date') mergedFrame = mergedFrame.drop(['date', 'EPOCH_yyyy-mm-ddThh:mm:ss.sssZ'], axis=1) mergedFrame = mergedFrame[mergedFrame.IonDensityN_cc != -1.0E+31] #mergedFrame.to_csv(r'ArtemisMerged2.csv') # Now we have a merged dataset that includes time, ion density, ion speed, and position relative to the Sun, Moon, and Earth # + import numpy as np import pandas as pd import datetime import matplotlib.pyplot as plt fig = plt.figure() fig.suptitle("Artemis velocity and position data") df = pd.read_csv('ArtemisMerged2.csv') df['EPOCH_TIME__yyyy-mm-ddThh:mm:ss.sssZ'] = pd.to_datetime(df['EPOCH_TIME__yyyy-mm-ddThh:mm:ss.sssZ']) df.plot(kind='line', x='EPOCH_TIME__yyyy-mm-ddThh:mm:ss.sssZ', y='IonDensityN_cc') df.plot(kind='line', x='EPOCH_TIME__yyyy-mm-ddThh:mm:ss.sssZ', y='VTH_ION_FULL_ESA-B__km/s') df.plot(kind='line', x='EPOCH_TIME__yyyy-mm-ddThh:mm:ss.sssZ', y='X_(@_x_)_Re') df.plot(kind='line', x='EPOCH_TIME__yyyy-mm-ddThh:mm:ss.sssZ', y='X_(@_x_)_Rm') # - # As you can see from the first position plot for the Earth's orbit, and the second plot for the Moon's orbit we should be able to filter out these dips by filtering out values under -50 for the Earth's orbit and -.5 for the Moon's orbit. We will also resample the data at an hourly timescale. # + import numpy as np import pandas as pd import datetime import matplotlib.pyplot as plt fig = plt.figure() fig.suptitle("Artemis velocity and position data") df = pd.read_csv('ArtemisMerged2.csv') df['EPOCH_TIME__yyyy-mm-ddThh:mm:ss.sssZ'] = pd.to_datetime(df['EPOCH_TIME__yyyy-mm-ddThh:mm:ss.sssZ']) df = df[df['X_(@_x_)_Re'] >= -50] df = df[df['X_(@_x_)_Rm'] >= 0] df = df.resample('H', on = 'EPOCH_TIME__yyyy-mm-ddThh:mm:ss.sssZ').mean().reset_index() df.plot(kind='line', x='EPOCH_TIME__yyyy-mm-ddThh:mm:ss.sssZ', y='IonDensityN_cc') df.plot(kind='line', x='EPOCH_TIME__yyyy-mm-ddThh:mm:ss.sssZ', y='VTH_ION_FULL_ESA-B__km/s') # - # There are some gaps in the data now, but we can be sure that it is not being influenced by celstial bodies that are in the way of the solar wind. From here we can compare to the OMNI dataset to see how close the data is. # + import numpy as np import pandas as pd import datetime import matplotlib.pyplot as plt fig = plt.figure() fig.suptitle("Artemis velocity and position data") df = pd.read_csv('ArtemisMerged2.csv') df['EPOCH_TIME__yyyy-mm-ddThh:mm:ss.sssZ'] = pd.to_datetime(df['EPOCH_TIME__yyyy-mm-ddThh:mm:ss.sssZ']) df = df[df['X_(@_x_)_Re'] >= -35] df = df[df['X_(@_x_)_Rm'] >= 0] df = df.resample('H', on = 'EPOCH_TIME__yyyy-mm-ddThh:mm:ss.sssZ').mean().reset_index() df2 = pd.read_csv('Omni4Artemis2.csv') df2['EPOCH_TIME_yyyy-mm-ddThh:mm:ss.sssZ'] = pd.to_datetime(df2['EPOCH_TIME_yyyy-mm-ddThh:mm:ss.sssZ']) df2 = df2[df2['ION_DENSITY_N/cm3'] != -1.0E+31] df2 = df2.resample('H', on = 'EPOCH_TIME_yyyy-mm-ddThh:mm:ss.sssZ').mean().reset_index() ax = plt.gca() ay = plt.gca() df.plot(kind='line', x='EPOCH_TIME__yyyy-mm-ddThh:mm:ss.sssZ', y='VTH_ION_FULL_ESA-B__km/s', color='red', ax=ax) df2.plot(kind='line', x='EPOCH_TIME_yyyy-mm-ddThh:mm:ss.sssZ', y='BULK_FLOW_SPEED_km/s', ax=ax) #df.plot(kind='line', x='EPOCH_TIME__yyyy-mm-ddThh:mm:ss.sssZ', y='IonDensityN_cc', color='red', ax=ay) #df2.plot(kind='line', x='EPOCH_TIME_yyyy-mm-ddThh:mm:ss.sssZ', y='ION_DENSITY_N/cm3', ax=ay) ax.set_xlim([datetime.date(2019, 8, 1), datetime.date(2019, 10, 1)]) plt.show() # - # As you can see, there are now gaps in the data on a daily and monthly basis, but the remaining data is fairly close to what the OMNI dataset has for similar time intervals. When we figure out an appropriate time shift, we should be able to merge these two sets and be left with the data points where there is only data for both datasets. # + import numpy as np import pandas as pd import datetime df = pd.read_csv('csv files/ArtemisMerged2.csv') df['EPOCH_TIME__yyyy-mm-ddThh:mm:ss.sssZ'] = pd.to_datetime(df['EPOCH_TIME__yyyy-mm-ddThh:mm:ss.sssZ']) df = df[df['X_(@_x_)_Re'] >= -35] df = df[df['X_(@_x_)_Rm'] >= 0] df = df.resample('H', on = 'EPOCH_TIME__yyyy-mm-ddThh:mm:ss.sssZ').mean().reset_index() df2 = pd.read_csv('csv files/Omni4Artemis2.csv') df2['EPOCH_TIME_yyyy-mm-ddThh:mm:ss.sssZ'] = pd.to_datetime(df2['EPOCH_TIME_yyyy-mm-ddThh:mm:ss.sssZ']) df2 = df2[df2['ION_DENSITY_N/cm3'] != -1.0E+31] df2 = df2.resample('H', on = 'EPOCH_TIME_yyyy-mm-ddThh:mm:ss.sssZ').mean().reset_index() df = df.drop(['X_(@_x_)_Re', 'Y_(@_y_)_Re', 'Z_(@_z_)_Re', 'X_(@_x_)_Rm', 'Y_(@_y_)_Rm', 'Z_(@_z_)_Rm','Unnamed: 0'], axis=1) df['new_time'] = df['EPOCH_TIME__yyyy-mm-ddThh:mm:ss.sssZ'] - pd.Timedelta(hours = 1) df['Time_offset_hours'] = 1 mF = pd.merge(df2, df, how='right', left_on='EPOCH_TIME_yyyy-mm-ddThh:mm:ss.sssZ', right_on='new_time') mF = mF.dropna(axis=0, how='any', thresh=None, subset=None, inplace=False) mF.rename(columns = {'IonDensityN_cc':'ArtemisIonDensityN_CC', 'VTH_ION_FULL_ESA-B__km/s':'ArtemisIonSpeedKM_S', 'RAD_AU_AU':'ArtemisDistanceAU', 'HGI_LAT_deg':'ArtemisLatDeg', 'HGI_LON_deg':'ArtemisLonDeg'}, inplace = True) mF.rename(columns = {'HELIOGRAPHIC_LATITUDE_deg':'OMNILatDeg', 'HELIOGRAPHIC_LONGITUDE_deg':'OMNILonDeg', 'BULK_FLOW_SPEED_km/s':'OMNIIonSpeedKM_S', 'ION_DENSITY_N/cm3':'OMNIIonDensityN_CC'}, inplace = True) #mF.to_csv('FinalArtemisData.csv') mF.head() # -
Data Processing/Advanced Artemis Filtering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns sns.set() # - bb = pd.read_csv("..\\..\\datasets\\homecdt_eda\\bureau_balance.csv") bb.head() bb.sort_values(by=['SK_ID_BUREAU','MONTHS_BALANCE'],inplace=True) bb.head() total_status_count = pd.crosstab(bb['SK_ID_BUREAU'], bb['STATUS'],normalize=0) total_status_count.head(10) total_status_count.rename(columns={'0':'total_status_0%','1':'total_status_1%','2':'total_status_2%', '3':'total_status_3%','4':'total_status_4%','5':'total_status_5%', 'C':'total_status_C%','X':'total_status_X%'},inplace=True) total_status_count.reset_index(level='SK_ID_BUREAU',inplace=True) bb_g_id = pd.DataFrame(bb.groupby('SK_ID_BUREAU')['MONTHS_BALANCE'].max()) bb_g_id.head() bb_g_id.reset_index(level='SK_ID_BUREAU',inplace=True) bb_g_id.rename(columns={'MONTHS_BALANCE':'MONTHS_BALANC_MAX'},inplace=True) bb_m = pd.merge(bb,bb_g_id) bb_m.head(10) # + bb_m['recent_3'] = (bb_m['MONTHS_BALANCE'] >= bb_m['MONTHS_BALANC_MAX']-2) #新增欄位,並標示MONTHS_BALANCE是否為最近3期 bb_m['recent_6'] = (bb_m['MONTHS_BALANCE'] >= bb_m['MONTHS_BALANC_MAX']-5) #新增欄位,並標示MONTHS_BALANCE是否為最近6期 bb_m['recent_9'] = (bb_m['MONTHS_BALANCE'] >= bb_m['MONTHS_BALANC_MAX']-8) #新增欄位,並標示MONTHS_BALANCE是否為最近9期 recent_3_status=bb_m[bb_m["recent_3"]==True].sort_values(by=["SK_ID_BUREAU"]) #新增表格,篩選最近3期MONTHS_BALANCE recent_6_status=bb_m[bb_m["recent_6"]==True].sort_values(by=["SK_ID_BUREAU"]) #新增表格,篩選最近6期MONTHS_BALANCE recent_9_status=bb_m[bb_m["recent_9"]==True].sort_values(by=["SK_ID_BUREAU"]) #新增表格,篩選最近9期MONTHS_BALANCE recent_3_status.head(10) # - recent_3_status=pd.crosstab(recent_3_status['SK_ID_BUREAU'], recent_3_status['STATUS'],normalize=0) #列出最近3期STATUS的比例 recent_6_status=pd.crosstab(recent_6_status['SK_ID_BUREAU'], recent_6_status['STATUS'],normalize=0) #列出最近6期STATUS的比例 recent_9_status=pd.crosstab(recent_9_status['SK_ID_BUREAU'], recent_9_status['STATUS'],normalize=0) #列出最近9期STATUS的比例 recent_3_status.rename(columns={'0':'recent_3_status_0%','1':'recent_3_status_1%','2':'recent_3_status_2%', '3':'recent_3_status_3%','4':'recent_3_status_4%','5':'recent_3_status_5%', 'C':'recent_3_status_C%','X':'recent_3_status_X%'},inplace=True) #更改最近3期欄位名稱 recent_6_status.rename(columns={'0':'recent_6_status_0%','1':'recent_6_status_1%','2':'recent_6_status_2%', '3':'recent_6_status_3%','4':'recent_6_status_4%','5':'recent_6_status_5%', 'C':'recent_6_status_C%','X':'recent_6_status_X%'},inplace=True) #更改最近6期欄位名稱 recent_9_status.rename(columns={'0':'recent_9_status_0%','1':'recent_9_status_1%','2':'recent_9_status_2%', '3':'recent_9_status_3%','4':'recent_9_status_4%','5':'recent_9_status_5%', 'C':'recent_9_status_C%','X':'recent_9_status_X%'},inplace=True) #更改最近9期欄位名稱 bb[bb['SK_ID_BUREAU']==5001717] recent_9_status.head(20) recent_369_status=[recent_3_status,recent_6_status,recent_9_status] recent_369_status=pd.concat(recent_369_status,axis=1) recent_369_status.head() recent_369_status.reset_index(level='SK_ID_BUREAU',inplace=True) bureau_balance_FE=pd.merge(total_status_count, recent_369_status) bureau_balance_FE.head() bureau_balance_FE.to_csv('..\\..\\datasets\\homecdt_fteng\\bureau_balance_FE.csv',index = False)
notebooks/homecdt_fteng/ht_BureauBalance_FE.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Problem 3.6 Conditional Distribution # Show that # $$P_{C\mid E=2}\sim\mathcal{N}\left(\frac{8}{17},\frac{1}{17}\right)$$ # for the SCM # \begin{align} # C:=&\mathcal{N}(0,1)\\ # E:=&4C+\mathcal{N}(0,1) # \end{align} # ## Proof # The SCM can be seen as a [multivariate normal distribution](https://en.wikipedia.org/wiki/Multivariate_normal_distribution) # $$ # P\left(\begin{matrix}C\\E\end{matrix}\right)= # \mathcal{N}\left(\begin{bmatrix}\mu_C\\\mu_E\end{bmatrix},\Sigma\right) # $$ # where $\mu_C=\mu_E=0$ because both distributions have an expecation of zero and $\Sigma$ is the covariance matrix which is # \begin{align} # \Sigma:=&\begin{bmatrix}\sigma^2_C&\rho\sigma_C\sigma_E\\\rho\sigma_C\sigma_E&\sigma_C^2\end{bmatrix}\\ # =&\begin{bmatrix}1&14\\14&4^2+1\end{bmatrix} # \end{align} # For the bivariate case the conditional distribution is defined as # $${\displaystyle X_{1}\mid X_{2}=a\ \sim \ {\mathcal {N}}\left(\mu _{1}+{\frac {\sigma _{1}}{\sigma _{2}}}\rho (a-\mu _{2}),\,(1-\rho ^{2})\sigma _{1}^{2}\right).}$$ # Applied to $C\mid E=2$ that yields $$\mathcal{N}\left(\frac{8}{17},\frac{1}{17}\right)$$
causal-inference/problems/problem-3.6-conditional-distribution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="TZ9rtHh5R0jb" import tensorflow as tf import tensorflow.keras.backend as K import numpy as np import pandas as pd import re import matplotlib.pyplot as plt import sklearn.model_selection # + colab={"base_uri": "https://localhost:8080/"} id="dCvurSR0iMAu" outputId="09ad70d1-6a8c-49c1-f1e9-b974b758e715" dataset = pd.read_csv('news.csv', sep=',') # or sampled_news_final.csv dataset = dataset.drop(['Unnamed: 0', 'Title', 'SentimentTitle'], axis=1) dataset.columns = ['text', 'sentiment'] dataset.shape # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="7U1L1a2AQnxu" outputId="1e94e12b-9cba-4d40-e32a-d896309c5ba2" dataset.head() # + colab={"base_uri": "https://localhost:8080/"} id="EwP4BVnYE6f3" outputId="9ebb76fe-a6df-424e-a625-1d7f59234eb4" dataset['sentiment'].value_counts() # + id="HE7kD-nXR0lC" X, y = dataset['text'].astype(str).tolist(), dataset['sentiment'].tolist() NUM_CLASSES = 3 # + colab={"base_uri": "https://localhost:8080/"} id="DgF9Mt_FR0lM" outputId="2b560938-d524-4599-a911-2da1a6dcee4b" tokenizer = tf.keras.preprocessing.text.Tokenizer() tokenizer.fit_on_texts(X) X_encoded = tokenizer.texts_to_sequences(X) vocab_size = len(tokenizer.word_index) + 1 MAX_LEN = max([len(n) for n in X_encoded]) print(MAX_LEN) X_encoded = tf.keras.preprocessing.sequence.pad_sequences(X_encoded, maxlen=MAX_LEN) # One Hot Encoding Target y_encoded = np.eye(NUM_CLASSES, dtype='int')[y].astype(float) # + colab={"base_uri": "https://localhost:8080/"} id="3O99E3oznKrL" outputId="193d03bb-5430-4214-cc64-76d35a47f7fb" X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(X_encoded, y_encoded, test_size=0.1) X_train.shape, X_test.shape, y_train.shape, y_test.shape # + [markdown] id="LWcf0w44R0lo" # # Model # + id="z38d6f21se_k" import numpy as np class Embeddings(): def __init__(self, path, vector_dimension): self.path = path self.vector_dimension = vector_dimension @staticmethod def get_coefs(word, *arr): return word, np.asarray(arr) def get_embedding_index(self): embeddings_index = dict(self.get_coefs(*o.split(" ")) for o in open(self.path, 'r', errors='ignore', encoding='utf-8')) return embeddings_index def create_embedding_matrix(self, tokenizer, max_features): model_embed = self.get_embedding_index() embedding_matrix = np.zeros((max_features + 1, self.vector_dimension)) for word, index in tokenizer.word_index.items(): if index > max_features: break else: try: embedding_matrix[index] = model_embed[word] except: continue return embedding_matrix # + id="4H1aFGfotg8c" #Word embedding #Import GloVe embeddings or other pretrained embeddings embedding = Embeddings( path = 'glove.6B.200d.txt', vector_dimension = 200, ) embedding_matrix = embedding.create_embedding_matrix(tokenizer, vocab_size) embedding_dim = 200 # + id="jJh8aBkTqiql" class MultiHeadSelfAttention(tf.keras.layers.Layer): def __init__(self, embed_dim, num_heads=8): super(MultiHeadSelfAttention, self).__init__() self.embed_dim = embed_dim self.num_heads = num_heads if embed_dim % num_heads != 0: raise ValueError( f"embedding dimension = {embed_dim} should be divisible by number of heads = {num_heads}" ) self.projection_dim = embed_dim // num_heads self.query_dense = tf.keras.layers.Dense(embed_dim) self.key_dense = tf.keras.layers.Dense(embed_dim) self.value_dense = tf.keras.layers.Dense(embed_dim) self.combine_heads = tf.keras.layers.Dense(embed_dim) def attention(self, query, key, value): score = tf.matmul(query, key, transpose_b=True) dim_key = tf.cast(tf.shape(key)[-1], tf.float32) scaled_score = score / tf.math.sqrt(dim_key) weights = tf.nn.softmax(scaled_score, axis=-1) output = tf.matmul(weights, value) return output, weights def separate_heads(self, x, batch_size): x = tf.reshape(x, (batch_size, -1, self.num_heads, self.projection_dim)) return tf.transpose(x, perm=[0, 2, 1, 3]) def call(self, inputs): # x.shape = [batch_size, seq_len, embedding_dim] batch_size = tf.shape(inputs)[0] query = self.query_dense(inputs) # (batch_size, seq_len, embed_dim) key = self.key_dense(inputs) # (batch_size, seq_len, embed_dim) value = self.value_dense(inputs) # (batch_size, seq_len, embed_dim) query = self.separate_heads( query, batch_size ) # (batch_size, num_heads, seq_len, projection_dim) key = self.separate_heads( key, batch_size ) # (batch_size, num_heads, seq_len, projection_dim) value = self.separate_heads( value, batch_size ) # (batch_size, num_heads, seq_len, projection_dim) attention, weights = self.attention(query, key, value) attention = tf.transpose( attention, perm=[0, 2, 1, 3] ) # (batch_size, seq_len, num_heads, projection_dim) concat_attention = tf.reshape( attention, (batch_size, -1, self.embed_dim) ) # (batch_size, seq_len, embed_dim) output = self.combine_heads( concat_attention ) # (batch_size, seq_len, embed_dim) return output # + id="jRdKNIoRTyvh" def categorical_focal_loss(classes_num, gamma=4., alpha=.25, e=0.1): def focal_loss_fixed(target_tensor, prediction_tensor): import tensorflow as tf from tensorflow.python.ops import array_ops from keras import backend as K #1# get focal loss with no balanced weight which presented in paper function (4) zeros = array_ops.zeros_like(prediction_tensor, dtype=prediction_tensor.dtype) one_minus_p = array_ops.where(tf.greater(target_tensor,zeros), target_tensor - prediction_tensor, zeros) FT = -1 * (one_minus_p ** gamma) * tf.math.log(tf.clip_by_value(prediction_tensor, 1e-8, 1.0)) #2# get balanced weight alpha classes_weight = array_ops.zeros_like(prediction_tensor, dtype=prediction_tensor.dtype) total_num = float(sum(classes_num)) classes_w_t1 = [ total_num / ff for ff in classes_num ] sum_ = sum(classes_w_t1) classes_w_t2 = [ ff/sum_ for ff in classes_w_t1 ] #scale classes_w_tensor = tf.convert_to_tensor(classes_w_t2, dtype=prediction_tensor.dtype) classes_weight += classes_w_tensor alpha = array_ops.where(tf.greater(target_tensor, zeros), classes_weight, zeros) #3# get balanced focal loss balanced_fl = alpha * FT balanced_fl = tf.reduce_mean(balanced_fl) #4# add other op to prevent overfit # reference : https://spaces.ac.cn/archives/4493 nb_classes = len(classes_num) fianal_loss = (1-e) * balanced_fl + e * K.categorical_crossentropy(K.ones_like(prediction_tensor)/nb_classes, prediction_tensor) return fianal_loss return focal_loss_fixed # + id="g_F8WxcNYLAH" class TransformerBlock(tf.keras.Model): def __init__(self, embed_dim): super().__init__() # Check embeddings and attention dimensions self.attn = MultiHeadSelfAttention(embed_dim, 4) self.leakyrelu = tf.keras.layers.LeakyReLU() self.ffn = tf.keras.Sequential([ tf.keras.layers.Dense(128, activation=self.leakyrelu), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(64, activation=self.leakyrelu), tf.keras.layers.Dense(embed_dim, activation=self.leakyrelu), ]) self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6) self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6) self.dropout1 = tf.keras.layers.Dropout(0.5) self.dropout2 = tf.keras.layers.Dropout(0.5) def call(self, inp, training=False): x = self.attn(inp) x = self.dropout1(x) out_norm1 = self.layernorm1(tf.cast(inp, x.dtype) + x) x = self.ffn(out_norm1) x = self.dropout2(x) x = self.layernorm2(x + out_norm1) return x class Model(tf.keras.Model): def __init__(self, embed_dim): super().__init__(self) self.embedding_layer = tf.keras.layers.Embedding(vocab_size+1, embedding_dim, weights=[embedding_matrix]) self.transformer_block = TransformerBlock(embed_dim) self.average_pooling = tf.keras.layers.GlobalAveragePooling1D() self.dropout1 = tf.keras.layers.Dropout(0.5) self.ffn = tf.keras.Sequential([ tf.keras.layers.Dense(128, activation='softmax'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(128, activation='softmax'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(64, activation='softmax'), tf.keras.layers.Dropout(0.5) ]) self.dense2 = tf.keras.layers.Dense(NUM_CLASSES, activation="softmax") def call(self, inputs): x = self.embedding_layer(inputs) x = self.transformer_block(x) x = self.average_pooling(x) x = self.dropout1(x) x = self.ffn(x) x = self.dense2(x) return x def optimizer(): return tf.keras.optimizers.Adam() def create_model(classes): model = Model(embed_dim=embedding_dim) opt = optimizer() model.compile(loss=[categorical_focal_loss(classes)], optimizer=opt, metrics=["acc"]) return model # + id="1WmN_ceJHlLs" from sklearn.utils import class_weight class_weights = class_weight.compute_class_weight('balanced',np.unique(y),y) dict_class_weights = dict(enumerate(class_weights)) # + colab={"base_uri": "https://localhost:8080/"} id="PaRlxdrTR0l-" outputId="21137b7b-3435-43aa-922f-38ee315b3697" model = create_model(class_weights) history = model.fit(X_train, y_train, epochs=2, validation_data=(X_test, y_test), class_weight=dict_class_weights) # + colab={"base_uri": "https://localhost:8080/"} id="yyz1QXC_hi-h" outputId="d74842cd-a3f2-4854-ebe7-10989186dbbd" model.summary() # + [markdown] id="bZgZ5ye3OfyI" # ### Evaluation # + colab={"base_uri": "https://localhost:8080/", "height": 333} id="g6MjAWLyR4hG" outputId="7dd65596-35bc-4bcb-95af-c3b090ea6f3b" from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix y_pred = [np.argmax(p) for p in model.predict(y_encoded)] cm = confusion_matrix(y_true=[np.argmax(i) for i in y_encoded], y_pred=y_pred, labels=[0,1,2]) print(cm) disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=[0,1,2]).plot(include_values=True) plt.savefig('bar.png') # + id="F74jxUe6R_ds" from sklearn.metrics import roc_curve, auc y_pred = [np.argmax(p) for p in model.predict(y_encoded)] y_pred = np.eye(NUM_CLASSES)[y_pred] fpr = dict() tpr = dict() roc_auc = dict() for i in range(NUM_CLASSES): fpr[i], tpr[i], _ = roc_curve(y_encoded[:, i], y_pred[:, i]) roc_auc[i] = auc(fpr[i], tpr[i]) # Plot of a ROC curve for a specific class for i in range(NUM_CLASSES): plt.figure() plt.plot(fpr[i], tpr[i], label='ROC curve (area = %0.2f)' % roc_auc[i], color='orange') plt.plot([0, 1], [0, 1], 'k--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic class %s' % i) plt.legend(loc="lower right") plt.show()
Model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Writing your own python modules # You will soon realize that for every project there are always a few lines of code that end up being extremely helpful and handy to keeparound. These lines end up being the ones applied multiple times and for multiple purpuses. # # Repeating operations and reusing lines of code is key to programming. # In this tutorial we will learn how to write python [modules](https://docs.python.org/3/tutorial/modules.html). A module is nothing more than a file (ending in `.py`) containing collection of functions. A module can be as simple as containing a few functions, or as complicated as `numpy` or `seaborn`. # We have learned so far how to write functions. Functions are a handy way to reuse the same lines of code. # # As the data science projects become more complex, or you become more expert at data science projects, the number of functions that end up needing to be carried around can grow fast. # # For any sizable project, the number of functions needed to be kept around is larger than the number of functions we are willing to copy and paste in every new script or jupyter notebook. # To avoid copying an pasting dozens of functions we can use python modules. Modules are collections of functions (and other python assertions, such as variables definitions) in a file saved on the current path accessible to `python`. # # Just like functions facilitate reusing dozens of lines of code, modules facilitates reusing dozens of functions. # --- # ## Our first module # Python offers a convenient way to keep useful code and functions around by writing and importing modules. # # *What is a module?* Python modules are libraries of functions. We have encountered modules all along our tutorials. Indeed everytime we were invoking an `import` statement we were effectively loading a module. # # *How is a module defined?* A module is a python file (ending with exstension `.py`) with a series of functions definitions (i.e., statement starting with `def`) they live in the current path where your python code isrunning and because of that it can be imported. # # *How does a module work?* Python allows importing any `.py` file containing `def` statements. Importing a module file makes the functions in the file callable and usable (for example in Jupyter notebook). # ### How to write a python module # # Let's learn how to write and use Python modules! (we will start simple.) # # In a nutshell, the general process to write and use a python modules can be summarized as follows: # # **To write a module we need to:** # A) Create a file with exstension `.py`. # B) Write functions inside the file. # C) Save the file on the path accessible to python (for simplicity say the current working directory). # # **To use python modules we need to:** # A) Make sure the module is in the current working directory. # B) import the module by typing `import` and `<moduleName>` # C) Call the functions in the module withthe syntax `moduleName.functionName` # ### MyModule # # Hereafter, we will practice with the process described above. Write a module and then import and use the module. # # This means that we will write a file outside this jupyter notebook. This is something we have not done before and might feel a bit awkward (are we really leaving our safe `Jupyter Notebooks` heaven? Yes). # # Just as a heads start, our module will be called `mymodule`. The module will contain a function that will print the first few words of [Billie Eilish's song "Ocean Eyes"](https://www.youtube.com/watch?v=viimfQi_pUw&ab_channel=BillieEilish). # # So, to learn how to create a module, we will perform the following exercise. # # - Open a new Jupyter Notebook (from the File menu, select "New Notebook" # # <img src="./assets/jnb019/open_new_notebook.jpg" alt="Drawing" style="width: 300px;"/> # # - Edit the name of the new notebook and rename it from "untitled" `mymodule` # # <img src="./assets/jnb019/rename_jupyter_notebook.jpg" alt="Drawing" style="width: 400px;"/> # # - Copy and paste the code for the function provided below (`OceanEyes`) into the `mymodule` Jupyter Notebook. Note. Only create a single cell in the new notebook. Make sure no other cell is there. # # <img src="./assets/jnb019/paste_function_code.jpg" alt="Drawing" style="width: 400px;"/> # # - Download the `mymodule` notebook into the current directory with the `.py` file exstension. To do so, from the File menu navigate to "Downloads as" and select the file type "Python *.py*." # # <img src="./assets/jnb019/download_module.jpg" alt="Drawing" style="width: 400px;"/> # # - Save the file in the same directory of the current tutorial. def OceanEyes(): print('Can''t stop starin'' at those ocean eyes') # Alright, after following the instructions above, and if all went well, we should be ready to load the module and use its function. # # To load the module we will tell python to import it. This is a simple as running the following statements: import mymodule # If the previos cell executed withouterrors the module is loaded! # (If error were returned, please read the errors and try to repeatthe previous steps.) # Next, let's use the module! The module we created will "only" print the first few words of a song. But let's it. # # Our module is called is just like any other modules we have used before. For example, we have used `Pandas`, and `Numpy`, those are also modules. # So, let's take a look at our syntax! Our module is called `mymodule` and the function it contains `OceanEyes` so the call goes as follows mymodule.OceanEyes() # Did you get it? Did you get the words from the song? If you did, congratulations you just wrote a python module. # # More complex modules just contains more functions, more complex functions etc. Butthe process (given what we have covered so far) can be summarized as above. # ### More about modules # Note now that, the file name is also the name of the module (`mymodule`). The file name has the suffix `.py` appended, that suffix is not used in the code, when calling the module (in other words we do not `import mymodule.py` but we `import mymodule`). # The name for modules imported in the current workspace is alwasy available as the value of the global variable __name__ (a string). # # We can extract themodulename into a string as follows: mymodule.__name__ # Just like we have done in the past with Pandas and Numpy also our module can be imported with a different (shorter) name:| import mymodule as mm # Now the function in mymodule should be called using `mm`, give it a try: mm.OceanEyes() # Functions inside a module can be imported directly and assigned a callable name.We have seen this before ... from mymodule import OceanEyes as oe # Now we can call the function directly, avoiding the sintax `mymodule.<functionName>`. Try the following, it should work: oe() # #### Let's break this # # OK now let's try something that should breakthings for us, but perhaps also help usunderstand. Move the file `mymodule.py` out of the current directly, for example, move it to your desktop instead. # # After doing that try importing the module again. import mymodule # Did that work? Why? # #### Modules can import other modules. # # It is possible to add import operations inside a module. Say for example you want to load `Numpy` every time you load your module. You could add `import numpy as np` at the beginning of your module and the module will automatically add numpy to your current workspace as soon as you call your module. # #### The standard modules in Python # # Python comes with a library of modules called standard: The [python standard modules lbrary](https://docs.python.org/3/library/). These modules are shipped with the Python3 distribution. This means that you can simply import them without saving, or moving files. The files are pythonmagically there for you. # # A list of standard modules can be found [here](https://docs.python.org/3/py-modindex.html). The lis # ### In sum # # Write python modules is as easy as writing a file ending withthe `.py` exstension. The file should contain function definitions. The file could also contain variables definitions or other code statements, an aspect of modules that we have not experimented with in this tutorial. # ## Make the best rat lab module # # To practice with modules we will make an exercise and make a module out of the code from the previous tutorial. # # Your goal will be to take these functions sve a the module and demonstrate that it runs from within this jupyter notebook # First of all we will break down the code into the basic steps and make one function per step. After that, we will make a module, save it to disk and call it to use the function. # # Let's get started. # In Tutorial 18, we performed four independent operations. # # - We loaded reaction time data into a specific format. # # - We organized the labels for the strains of rats into the appropriate format for the data. # # - We organized the labels for the sexes of rats into the appropriate format for the data. # # - We combined the data and labels into a tidy format (one colum per variable/label) # Below the four function written from the code of Tutorial 18. These functions can now be conveniently called multiple times. Yet, to call these functions they must be copied and pasted into a new Jupyter notebook. # # Wouldn't it be easier if we could call them directly from a module? # # Below we first describe how we functionalized the code from the previous tutorial. We describe each function and what it does and then use them after loading the data. # # After that, we will open a new notebook and save it as a module. We will then repeat the data processing performed with the functions by loading the module we just created. def get_data(filename) : ''' get_data() Loads the data from a filename. Organizes the data and retunrs key data values ''' import numpy as np import pandas as pd my_input_data = pd.read_csv(filename) # read the data raw_data = my_input_data.to_numpy() # convert to numpy array obs, grps = raw_data.shape # get the number of rows and columns new_length = obs*grps # compute total number of observations values_col = np.reshape(raw_data, (new_length, 1), order = 'F') # reshape the array values_col = np.squeeze(values_col) # squeeze to make 1D return values_col, obs def get_strains(obs=10, names=['wildtype', 'mutant']) : ''' get_strains() Takes names of rat types (e.g., names=['wildtype', 'mutant']) and the number of observation per group (obs_per_grp=10). Returns the variable `strain` containing. User specifies a filename string. ''' import pandas as pd strain = pd.Series(names) # make the short series strain = strain.repeat([2*obs]) # repeat each over two cell's worth of data strain = strain.reset_index(drop=True) # reset the series's index value return strain def get_sexes(obs, sexLabels=['male', 'female']) : ''' tidyMyData() Takes one-column-per-cell rat reaction time data as input. Returns tidy one-column-per-variable data. User specifies a filename string. ''' import pandas as pd sexes = pd.Series(sexLabels) # make the short series sexes = sexes.repeat(obs) # repeat each over one cell's worth of data sexes = pd.concat([sexes]*2, ignore_index=True) # stack or "concatonate" two copies return sexes def tidy_data(values_col,strain,sexes) : ''' tidyMyData() Takes 1. A one-column-per-cell rat reaction time data (values_col). 2. A sexes variables labelling each entry in values_col by rat-sex 3. A strain variable labelling entries in values_col by rat strain Returns one-column-per-variable data adhering to the tidy format. ''' import pandas as pd # construct the data frame my_new_tidy_data = pd.DataFrame( { "RTs": values_col, # make a column named RTs and put the values in "sex": sexes, # ditto for sex "strain": strain # and for genetic strain } ) return my_new_tidy_data # Your goal is to make a module called `bestratlab.py` out of the above functions and to demonstrate that it can run from this notebook. import bestlabrat # ## A note of recycling code # # We have learned early in our journey towards Data Science that it is convenient to keep helpful code around and recycle it. So far, we have learned of at least three ways to recycle code: # # - *Loops.* Loops facilitate reusing hundreds of operations. Loops allow repeating the same operations over and over avoiding actually copying and pasting the same lines of code. # # - *Functions.* Functions facilitate reusing hundreds of lines of code. Functions allow reusing the same lines of code for different instances ofthe same situation. # # - *Modules.* Modules allow facilitates reusing hundreds of functions. Modules provide a convenient way to save good work, functions, in an accessible file. Module files can be loaded the, or better imported in the current working python stack and that allow accessing and using the functions saved in the module.
tutorial019.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Import libraries from pyspark.sql import SparkSession spark = SparkSession.builder.appName("KMeans Cluster Model").getOrCreate() from pyspark.ml.clustering import KMeans, LDA, BisectingKMeans, GaussianMixture, PowerIterationClustering from pyspark.ml.feature import VectorAssembler, VectorIndexer from pyspark.ml.feature import StandardScaler from pyspark.ml.evaluation import ClusteringEvaluator # # Load and verify data data = spark.read.csv('seeds_dataset.csv',header = True, inferSchema = True) data.printSchema() data.head(3) for item in data.head(1)[0]: print(item) data.describe().show() data.columns # # Data Preprocessing assembler = VectorAssembler(inputCols =data.columns,outputCol='features') final_data = assembler.transform(data) scaler = StandardScaler(inputCol='features',outputCol='scaledfeatures') final_data = scaler.fit(final_data).transform(final_data) final_data.head(1) # # Train and Test data train_data,test_data = final_data.randomSplit([0.7,0.3]) train_data.show(2) test_data.show(2) # # Build and Evaluate Model # ### K-means classifier = KMeans(k=2,featuresCol='scaledfeatures') model = classifier.fit(train_data) # Make predictions predictions = model.transform(test_data) # Evaluate clustering by computing Silhouette score print(ClusteringEvaluator().evaluate(predictions)) # Shows the result. centers = model.clusterCenters() print("Cluster Centers: ") for center in centers: print(center) predictions.show(3) # ### LDA Model # Build and Train Model classifier = LDA(k=10, maxIter=10) model = classifier.fit(train_data) ll = model.logLikelihood(train_data) print("The lower bound on the log likelihood of the entire corpus: " + str(ll)) lp = model.logPerplexity(train_data) print("The upper bound on perplexity: " + str(lp)) # Describe topics. topics = model.describeTopics(3) print("The topics described by their top-weighted terms:") topics.show(truncate=False) # Make predictions predictions = model.transform(test_data) predictions.show(3) # ### Bisecting k-means # Build and Train Model classifier = BisectingKMeans().setK(2).setSeed(1) model = classifier.fit(train_data) # Make predictions predictions = model.transform(test_data) # Evaluate clustering by computing Silhouette score print(ClusteringEvaluator().evaluate(predictions)) # Shows the result centers = model.clusterCenters() print("Cluster Centers: ") for center in centers: print(center) predictions.show(3) # ### Gaussian Mixture Model (GMM) # Build and Train Model classifier = GaussianMixture().setK(2).setSeed(538009335) model = classifier.fit(train_data) # Make predictions predictions = model.transform(test_data) # Evaluate clustering by computing Silhouette score print(ClusteringEvaluator().evaluate(predictions)) predictions.show(3) # ### Power Iteration Clustering (PIC) df = spark.createDataFrame([ (0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 4, 1.0), (4, 0, 0.1) ], ["src", "dst", "weight"]) # Build and Train Model classifier = PowerIterationClustering(k=2, maxIter=20, initMode="degree", weightCol="weight") # Shows the cluster assignment classifier.assignClusters(df).show()
Seed Type Clustering .ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- # [< 01 - Intro to Bookworm](01%20-%20Intro%20to%20Bookworm.ipynb) | [Home](https://github.com/harrisonpim/bookworm) | [03 - Visualising and Analysing Networks >](03%20-%20Visualising%20and%20Analysing%20Networks.ipynb) # # # Character Building # We want to be able to automate the entirity of the bookworm process, and manually inputting a list of characters to the program feels a lot like cheating. Getting over this hurdle means that we need to form a list of characters (or at least _plausible_ characters) from the raw text of the book alone. This is an inherently rough task and any solution is going to be inexact, so it's reasonable to feel sceptical about its feasibility at this point. However, I think that the results at the end of the process make it worth explaining. # # First we need to import everything from bookworm that we went through in the last notebook, and some new stuff that we'll use to interpret the novel's language - most notably NLTK and Spacy. from bookworm import * # + import pandas as pd import networkx as nx import spacy import nltk import string # - # Lets start by loading in a book and splitting it into sentences book = load_book('data/raw/fellowship_of_the_ring.txt') sequences = get_sentence_sequences(book) # ### Name Extraction # The solution uses a little bit of creativity and a whole load of brute-force. # A sensible place to start when looking for character names is to go and find _proper nouns_. Part-of-speech tagging is a pretty well known NLP technique which does just that - it assigns a tag to each word in a string according to its _type_ - noun, verb, adverb etc ([you can read more about part-of-speech tagging here](https://en.wikipedia.org/wiki/Part-of-speech_tagging)). [Spacy](https://spacy.io/) has a pretty good POS tagger - having messed around with both Spacy and NLTK, I can say that with confidence that Spacy's is currently the more adept tagger. If we run this over the whole book and grab everything tagged as `'PROPN'` we'll return a list of plausible proper nouns. # This process could take a while, given that we have this many individual words to interpret and tag: len(book.split()) # It makes more sense to start with a stripped down list of the _unique_ words which appear in the text. That way, we only have to tag each word once. We should also get strip out all punctuation from the words before we start. # + remove_punctuation = lambda s: s.translate(str.maketrans('', '', string.punctuation+'’')) words = [remove_punctuation(p) for p in book.split()] unique_words = list(set(words)) len(unique_words) # - # Now we can grab those `'PROPN'` tagged words # + nlp = spacy.load('en') candidates = [word.text for word in nlp(' '.join(unique_words)) if word.pos_ == 'PROPN'] len(candidates) # - # We're looking for names, and it's unlikely that our English-language books will include any characters whose names are shorter than 2 characters. We should get rid of those from our list of proper nouns... candidates = [c for c in candidates if len(c) > 2] len(candidates) # I've noticed that Spacy will occasionally tag non-title-case words as proper nouns, which is odd. Character names will typically be capitalised so we'll get rid of anything which isn't. We're relying on authors' 'proper' use of English language conventions here, which aren't always going to be perfect, but that's part of the joy of this project; we can still make a decent approximation of the truth despite the messiness of the initial data. candidates = [c for c in candidates if c.istitle()] len(candidates) # I've also noticed that when dealing with characters in so many contexts, we'll sometimes get characters appearing as themselves and as posessives (ie `Frodo` and `Frodo's`). After stripping out punctuation we'll see `Frodo` and `Frodos`, so we just have to get rid of those words which end in `s` but are otherwise duplicates of other words already in the list. candidates = [c for c in candidates if not (c[-1] == 's' and c[:-1] in candidates)] len(candidates) # NLTK includes a load of _stop words_ which we can compare our list against. Stop words are commonly occurring English words like 'the' or 'in' which are very unlikely to overlap with our characters' names. Strip them out... # + stopwords = nltk.corpus.stopwords.words('english') candidates = list(set([c.title() for c in [c.lower() for c in candidates]]) - set(stopwords)) len(candidates) # - candidates[:10] # All that remains to be done is to get them into the usual form that bookworm expects (a list of tuples). We'll call this our list of characters. characters = [tuple([character + ' ']) for character in set(candidates)] # Obviously this is a _significantly_ longer list than what we usually deal with when we list characters manually, and it's a lot less precise (fore- and surnames won't be connected, nicknames won't be attached to their characters etc), but it's a pretty good trade-off for the lack of required contextual knowledge before starting the analysis. You'll also notice that a few 'names' aren't actually names - they're normal words that have somehow managed to slip through the gauntlet. As we move into the typical bookworm analysis process below, we'll see that this doesn't significantly affect the final results. # # # Standard Analysis & Comparisons to Manual Character Assignment # We can now throw our list of automatically collected characters and our usual sequences into the usual bookworm analysis process: finding connections between characters and sequences and then connections between characters and characters. Finally we'll set up a dataframe of interactions which can be passed to NetworkX to visualise (we'll go through the last bit of this process in much more detail in [the next notebook](03%20-%20Visualising%20and%20Analysing%20Networks.ipynb). # # Because we're now dealing with a much longer list of characters, the computation will take longer (the cooccurence matrix calculation scales quadratically with the number of characters). It should still run comfortably on a decent laptop. df = find_connections(sequences, characters) cooccurence = calculate_cooccurence(df) # the next step gets the network into a form that is neatly interpreted by NetworkX interaction_df = get_interaction_df(cooccurence, threshold=1) interaction_df.sample(5) # We can import a few things to help visualise the graph that we've put together # %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns sns.set_style('whitegrid') plt.rcParams['figure.figsize'] = (12,9) # + G = nx.from_pandas_dataframe(interaction_df, source='source', target='target') nx.draw(G, with_labels=True) # - # We can calculate the importance of the characters using the pagerank algorithm included in NetworkX pd.Series(nx.pagerank(G)).sort_values(ascending=False)[:10] # I was stunned when I saw the results that this process produces. It's such a rough process for character naming, but all of the main characters' names are there, the process generalises to many different novels, authors and styles, and the weights attributed to the character interactions all seem quite reasonable. # # I was expecting all of the hundreds (often thousands) of plausible names to be included in the graph, lurking around, connecting legitimate characters who have no right to be connected according to the source text. I was also expecting the non-character-name-words to be much more prevalent. I'd forgotten, though, that the bookworm process relies on the close coocurrence of characters to form connections - those illegitimate, non-character-name-words are therefore very unlikely to accumulate significant scores with many (or indeed any) legitimate characters so the imposition of a low threshold sweeps away any spurious connections. In the end we're left with a very reasonable result. # # ### Place Names, Group Names # Another lovely feature of this process is the inclusion of place names (`Minas` and `Road` above). # I've [observed in the past](https://twitter.com/hmpim/status/909680260270231552) that in cases where we have hundreds of characters with interwoven narratives or storylines, the graph begins to resemble the geography of the world that the characters inhabit. In the case linked to above, the graph for _A Game of Thrones_ begins to resemble the map of the fantasy world of Westeros/Essos, with stories (and narrative clusters) separated by the Narrow Sea. # The example above is particularly interesting in that the _fantacy races_ (`Elves`, `Men`) of the characters have been extracted as names and emerge as being structurally important to the network. # # ### Caveats # It's not perfect yet... # - The algorithm doesn't know whether names represent characters or places or something entrirely different. In some ways this is great, but in others it can be frustrating. # - Although we're surprisingly unlikely to see illegitimate character names in the final graph, it does happen occasionally. # - We can't yet automatically define the limit for what a 'significant' relationship is in a novel, and the threshold for the final visualisation usually involves a bit of tuning before it settles at a reasonable level. # - The process isn't that fast, and a long list of plausible character names contributes to significant slow downs later on. # # # Wrapping Up # We can wrap up all of our useful code from the sections above into a single function to extract character names from books: def extract_character_names(book): ''' Automatically extracts lists of plausible character names from a book Parameters ---------- book : string (required) book in string form (with original upper/lowercasing intact) Returns ------- characters : list list of plasible character names ''' nlp = spacy.load('en') stopwords = nltk.corpus.stopwords.words('english') words = [remove_punctuation(w) for w in book.split()] unique_words = list(set(words)) characters = [word.text for word in nlp(' '.join(unique_words)) if word.pos_ == 'PROPN'] characters = [c for c in characters if len(c) > 2] characters = [c for c in characters if c.istitle()] characters = [c for c in characters if not (c[-1] == 's' and c[:-1] in characters)] characters = list(set([c.title() for c in [c.lower() for c in characters]]) - set(stopwords)) return [tuple([c + ' ']) for c in set(characters)] book = load_book('data/raw/fellowship_of_the_ring.txt', lower=False) extract_character_names(book)[-10:] # %%timeit extract_character_names(book) # As I said, the process isn't instantaneous. However, it's barely been optimised at this stage and the payoff for those few seconds is _enormous_ if we're interpreting an unseen book. # # In the next notebook, we'll have a go at doing some deeper analysis of the networks we've built and visualising them with NetworkX and d3.js # [< 01 - Intro to Bookworm](01%20-%20Intro%20to%20Bookworm.ipynb) | [Home](https://github.com/harrisonpim/bookworm) | [03 - Visualising and Analysing Networks >](03%20-%20Visualising%20and%20Analysing%20Networks.ipynb)
02 - Character Building.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/nc9107/Deep-Learning-Tensorflow/blob/main/CIPHAR10.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="vRC3w8gDUWj0" # # Importing Dependencies # + id="lFNOR51LT0pu" import tensorflow as tf from tensorflow.keras import layers,datasets, models import matplotlib.pyplot as plt # + [markdown] id="f6_YHQ8WVCSR" # # Downloading and preparing data # + colab={"base_uri": "https://localhost:8080/"} id="lP19CzH-VE5A" outputId="4f0c8397-5de0-432d-ab1c-32a19504a2b6" (train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data() train_images, test_images = train_images/255.0, test_images/255.0 # + [markdown] id="odRz3SWPViNd" # # Verifying Data # + colab={"base_uri": "https://localhost:8080/", "height": 589} id="qiaBhZnpVoRm" outputId="269abdd3-62a4-4c95-f960-9b05d0e10f8b" class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] plt.figure(figsize=(10,10)) for i in range(25): plt.subplot(5, 5, i+1) plt.xticks([]) plt.yticks([]) plt.imshow(train_images[i]) plt.xlabel(class_names[train_labels[i][0]]) plt.show() # + [markdown] id="vV97jZl7XECJ" # # Model Creation # + id="Ss5XQ1lPXFzx" model = models.Sequential([ layers.Conv2D(32, (3,3), activation='relu', input_shape=(32,32,3)), layers.MaxPooling2D((2,2)), layers.Conv2D(64,(3,3), activation='relu'), layers.MaxPooling2D((2,2)), layers.Conv2D(64, (3,3), activation='relu'), ]) # + colab={"base_uri": "https://localhost:8080/"} id="k6cSCRjrYuMm" outputId="d3c2d0af-ab02-43f4-9b54-5245518061ec" model.summary() # + [markdown] id="432kcQOmYxMn" # # Adding Dense layers after Convolutions # + id="sVCgjAJGY2a9" model.add(layers.Flatten()) model.add(layers.Dense(64, activation='relu')) model.add(layers.Dense(10)) # + colab={"base_uri": "https://localhost:8080/"} id="Tth5xTCxZiov" outputId="d302825e-ae47-4f02-c90e-568b77384729" model.summary() # + [markdown] id="TbRQRyIeZ3oK" # # Compile model # + [markdown] id="f89CC6gEZ6um" # # + id="lJ-WHA6FZ7Yn" model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) # + [markdown] id="vi1GxEsQaNLW" # # Training the model # + colab={"base_uri": "https://localhost:8080/"} id="90hBMvV3bgC8" outputId="a9fcca39-6042-4b6f-f5e4-5076f9b253c1" history = model.fit(train_images, train_labels, epochs=15, validation_data=(test_images, test_labels)) # + [markdown] id="Vj9e0nl4b7cf" # # Model Evaluation # + colab={"base_uri": "https://localhost:8080/", "height": 300} id="WN5EUwMOb99c" outputId="f84f6669-f0b8-46e6-cf96-6aab6945be8b" plt.plot(history.history['accuracy'], label='accuracy') plt.plot(history.history['val_accuracy'], label='val_accuracy') plt.xlabel('epoch') plt.ylabel('accuracy') plt.ylim([0.5, 1]) plt.legend(loc='lower right') test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2 )
CIPHAR10.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Scope # --- # In this notebook, I'll describe # - name_scope # - variable_scope # # ## [tf.name_scope](https://www.tensorflow.org/api_docs/python/tf/name_scope) # The `tf.name_scope` defines the group of some variables in a graph. # It add a prefix to the operation or variable made by using `tf.Variable`. # __Note that the `tf.name_scope` is ignore by `tf.get_variable`.__ # Some google developer recommends that we should use variable_scope and not use name_scope because of the reason above. # # ## [tf.variable_scope](https://www.tensorflow.org/api_docs/python/tf/variable_scope) # > A context manager for defining ops that creates variables (layers). # This context manager validates that the (optional) values are from the same graph, ensures that graph is the default graph, and pushes a name scope and a variable scope. # If name_or_scope is not None, it is used as is. If name_or_scope is None, then default_name is used. In that case, if the same name has been previously used in the same scope, it will be made unique by appending _N to it. # # ## tf.name_scope vs. tf.variable_scope # The concepts of both scope are the same. They define the prefix of the operation or variable. Both scopes have the same effect on all operations as well as variables created using tf.Variable, i.e., the scope will be added as a prefix to the operation or variable name. import tensorflow as tf print("tensorflow version: ", tf.__version__) # ## Examples # ### - `tf.name_scope` / `tf.variable_scope` # `tf.get_variable` ignores the scope defined by `tf.name_scope()` # + # clear graph tf.reset_default_graph() with tf.name_scope("name_scope"): # tf.Variable() v1 = tf.Variable([1,2,3], name="var1", dtype=tf.float32) # tf.get_variable() v2 = tf.get_variable("var2", shape=[3,], dtype=tf.float32) # define ops add = tf.add(v1, v2) print(v1.name) print(v2.name) print(add.name) # - # `tf.get_variable` doesn't ignore the scope defined by `tf.variable_scope` # + # clear graph tf.reset_default_graph() with tf.variable_scope("variable_scope"): # tf.Variable() v1 = tf.Variable([1,2,3], name="var1", dtype=tf.float32) # tf.get_variable() v2 = tf.get_variable("var2", shape=[3,], dtype=tf.float32) # define ops add = tf.add(v1, v2) print(v1.name) print(v2.name) print(add.name) # - # Duplication of the variable name. # + # clear graph tf.reset_default_graph() with tf.variable_scope("variable_scope"): v1 = tf.Variable([1,2,3], name="var1", dtype=tf.float32) v2 = tf.get_variable("var2", shape=[3,], dtype=tf.float32) add = tf.add(v1, v2) print(v1.name) print(v2.name) print(add.name) print("\n") with tf.variable_scope("variable_scope"): # The scope name will be variable_scope_1 because variable_scope/v1 is already exists. v1 = tf.Variable([1,2,3], name="var1", dtype=tf.float32) # The var3 will be added to "variable_scope". v3 = tf.get_variable("var3", shape=[3,], dtype=tf.float32) # The scope name will be variable_scope_1 because variable_scope/add is already exists. add = tf.add(v1, v3) print(v1.name) print(v3.name) print(add.name) # - # ### [Sharing variables](https://www.tensorflow.org/guide/variables) # Example: Sharing variable `w` in the calculation bellow. # - output1 = input_A $\cdot$ w # - output2 = input_B $\cdot$ w # - final_output = output1 + output2 # # Failure Case: # + # clear graph tf.reset_default_graph() input_A = tf.placeholder(tf.float32, shape=[3,1], name="input_A") input_B = tf.placeholder(tf.float32, shape=[3,1], name="input_B") # define ops def matmul(input_vector): with tf.variable_scope("matmul"): w = tf.get_variable("weight", shape=(1,3), initializer=tf.initializers.ones) return tf.matmul(input_vector, w) output1 = matmul(input_A) output2 = matmul(input_B) # This will be error because matmul/weight already exists. final_output = tf.add(output1, output2) # - # Success case: # + # clear graph tf.reset_default_graph() input_A = tf.placeholder(tf.float32, shape=[3,1], name="input_A") input_B = tf.placeholder(tf.float32, shape=[3,1], name="input_B") # define ops def matmul(input_vector, reuse = False): with tf.variable_scope("matmul", reuse=reuse): w = tf.get_variable("weight", shape=(1,3), initializer=tf.initializers.ones) print(w.name) return tf.matmul(input_vector, w) output1 = matmul(input_A) output2 = matmul(input_B, reuse = True) # This will be error because matmul/weight already exists. final_output = tf.add(output1, output2)
notebooks_tf1/1_TensorflowBasic/4_Scope.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ![classify dandelions and grass, image from Pixabay](https://cdn.pixabay.com/photo/2018/05/20/16/13/dandelion-3416140_960_720.jpg "image from Pixabay") # # Create an Image Dataset and Train an Image Classifier using FastAI # # *by: <NAME>. Inspired by [Lesson 2](https://course.fast.ai/videos/?lesson=2) of FastAI. Thanks to <NAME> and <NAME>* # # In this tutorial, we'll create an image dataset from Google Images and train a state-of-the-art image classifier extremely easily using the FastAI library. The FastAI library is built on top of the PyTorch deep learning framework, and provides commands that make training an image classifier very intuitive. # # For this tutorial, we'll build a dandelion vs. grass classifier. Let's get started! # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" from fastai.vision import * import torch import fastai import torchvision # - print(torch.__version__) print(fastai.__version__) print(torchvision.__version__) # ## **Create an Image Dataset from Google Images** # # Note: this Kaggle kernel already has the dataset created from these instructions, so if you don't want to create your own dataset, feel free to skip this section and move straight to [6] # # **How to save a list of Google Image URLs into a csv file** # # Go to Google Images and search for *grass*. Initially, there will be ~50 images, so scroll down and press the button 'Show more results' at the end of the page until ~100 images have loaded. # # Now you must run some Javascript code in your browser which will save the URLs of all the images you want for you dataset. # # Press CtrlShiftJ in Windows/Linux and CmdOptJ in Mac, and a small window the javascript 'Console' will appear. That is where you will paste the JavaScript commands. # # Run the following commands in the prompt: # # ``` # urls = Array.from(document.querySelectorAll('.rg_di .rg_meta')).map(el=>JSON.parse(el.textContent).ou); # window.open('data:text/csv;charset=utf-8,' + escape(urls.join('\n'))); # ``` # # The browser will download the file. Name the file *grass.csv*. # # Repeat the same steps above for *dandelion*, and save the respective file as *dandelion.csv*. # # **Upload the URLs as a dataset in Kaggle** # # In this Kaggle kernel, go to File -> Add or upload data # # In the top right corner, press Upload # # Now, add *grass.csv* and *dandelion.csv*. Name the dataset *greenr*. # Now, we're going to do a bit of hacky work to get things to work in Kaggle. The folder /kaggle/input is read-only, and we need to manipulate that folder to download the image URLs into our folder, so we're going to move the files to another folder, /kaggle/working. That's actually the output folder, but we'll let our dataset reside there and create the outputs in the same folder. Run the following command: # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" # !cp -r /kaggle/input/greenr /kaggle/working # - # Now, run the following commands to download the images from URLs into our dataset folder /kaggle/working/greenr/ using the *download_images* function. # # Then, we'll make sure all the images are valid using *verify_images*. # # After that, we'll create our dataset from *ImageDataBunch*. # # These are all FastAI commands that make it really easy to create a dataset :) # + classes = ['grass','dandelion'] folder = 'grass' file = 'grass.csv' path = Path('/kaggle/working/greenr/') dest = path/folder dest.mkdir(parents=True, exist_ok=True) download_images(path/file, dest, max_pics=200) folder = 'dandelion' file = 'dandelion.csv' path = Path('/kaggle/working/greenr/') dest = path/folder dest.mkdir(parents=True, exist_ok=True) download_images(path/file, dest, max_pics=200) for c in classes: print(c) verify_images(path/c, delete=True, max_size=500) np.random.seed(42) data = ImageDataBunch.from_folder(path, train=".", valid_pct=0.2, ds_tfms=get_transforms(), size=224, num_workers=4).normalize(imagenet_stats) # - # Now let's view our data and see that we have a dataset. Congrats, you now have created your own image dataset! data.classes data.show_batch(rows=3, figsize=(7,8)) data.classes, data.c, len(data.train_ds), len(data.valid_ds) # ## Train our Image Classifier # Now, let's train an image classifer from our dataset. After this, we'll have a model that classifies dandelions vs. grass. # First, let's import a ResNet34 model using *cnn_learner*. ResNet34 is a pre-trained image classifier that works really well out of the box, and we're simply going to train that model on our dataset to get it to become an expert at classifying dandelions vs. grass! # # We'll train on the dataset, find the best learning rate, and save our model using the following commands: learn = cnn_learner(data, models.resnet34, metrics=error_rate) learn.fit_one_cycle(4) learn.save('stage-1') learn.unfreeze() learn.lr_find() learn.recorder.plot() learn.fit_one_cycle(2, max_lr=slice(3e-5,3e-4)) learn.save('stage-2') learn.load('stage-2'); # ## Interpretation # Let's see how well our model did using a confusion matrix. interp = ClassificationInterpretation.from_learner(learn) interp.plot_confusion_matrix() # ## Cleaning Up # # Some of our top losses aren't due to bad performance by our model. There are images in our data set that shouldn't be. # # Using the `ImageCleaner` widget from `fastai.widgets` we can prune our top losses, removing photos that don't belong. # # Simply mark `delete` to any image that doesn't belong # + from fastai.widgets import * db = (ImageList.from_folder(path) .split_none() .label_from_folder() .transform(get_transforms(), size=224) .databunch() ) learn_cln = cnn_learner(db, models.resnet34, metrics=error_rate) learn_cln.load('stage-2'); ds, idxs = DatasetFormatter().from_toplosses(learn_cln) ImageCleaner(ds, idxs, path) # - # Let's also remove duplicates using this widget: ds, idxs = DatasetFormatter().from_similars(learn_cln) ImageCleaner(ds, idxs, path, duplicates=True) # Awesome work! Now, let's retrain our model on our pruned dataset and make it even more accurate! np.random.seed(42) data = ImageDataBunch.from_csv(path, folder=".", valid_pct=0.2, csv_labels='cleaned.csv', ds_tfms=get_transforms(), size=224, num_workers=4).normalize(imagenet_stats) learn = cnn_learner(data, models.resnet34, metrics=error_rate) learn.fit_one_cycle(4) learn.save('stage-1') learn.unfreeze() learn.lr_find() learn.recorder.plot() learn.fit_one_cycle(2, max_lr=slice(3e-5,3e-4)) learn.save('stage-2') learn.load('stage-2'); # Let's see if our confusion matrix has improved: interp = ClassificationInterpretation.from_learner(learn) interp.plot_confusion_matrix() # ## Export Our Model # Great work! Now, let's export our model, which will create a file named `export.pkl` in our `/kaggle/working/greenr` directory. We can now use this model to make predictions on other images, and deploy it into production! Let's try it out: learn.export() defaults.device = torch.device('cpu') img = open_image(path/'grass'/'00000019.jpg') img learn = load_learner(path) pred_class,pred_idx,outputs = learn.predict(img) pred_class # If you got *grass* above, then your model works! Congrats, you've now created your own image dataset and trained your own image classifier, using FastAI! # # If you'd like to export your model into production, simply download the `export.pkl` file and move it to wherever you want to make your predictions, like your phone or a web application :) # # For a live demo of a deployed web app of greenr and its source code, please visit my repository! # # [https://github.com/btphan95/greenr](https://github.com/btphan95/greenr)
greenr-train.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: venv # language: python # name: venv # --- # + # %reload_ext autoreload # %autoreload 2 from paths import RAW_PATH, TREAT_PATH, OUTPUT_PATH, FIGURES_PATH, THEMES_PATH from copy import deepcopy import numpy as np import pandas as pd pd.options.display.max_columns = 999 import warnings warnings.filterwarnings('ignore') # Plotting import plotly import plotly.graph_objs as go import cufflinks as cf plotly.offline.init_notebook_mode(connected=True) # Setting cufflinks import textwrap import cufflinks as cf cf.go_offline() cf.set_config_file(offline=False, world_readable=True) # Centering and fixing title def iplottitle(title, width=40): return '<br>'.join(textwrap.wrap(title, width)) # Adding custom colorscales (to add one: themes/custom_colorscales.yaml) import yaml custom_colorscales = yaml.load(open(THEMES_PATH / 'custom_colorscales.yaml', 'r')) cf.colors._custom_scales['qual'].update(custom_colorscales) cf.colors.reset_scales() # Setting cuffilinks template (use it with .iplot(theme='custom') cf.themes.THEMES['custom'] = yaml.load(open(THEMES_PATH / 'cufflinks_template.yaml', 'r')) # - # # Carrega coisas # ### Config config = yaml.load(open('../src/configs/config.yaml', 'r')) # ### Capacidade Hospitalar dos municípios from src import loader cities = loader._read_cities_data('br', config) cities = pd.merge( cities['cities_population'], cities['health_infrastructure'], on='city_id', how='left', suffixes=('', '_y')) cities = cities.drop([c for c in cities.columns if '_y' in c], 1) # ### Histórico de Todos os Casos cases = pd.read_csv('https://data.brasil.io/dataset/covid19/caso_full.csv.gz') cases[cases['city_ibge_code'] == 3550308.0].sort_values(by='date')\ .set_index('date')['last_available_confirmed'].iplot( title='Evolução de Casos em São Paulo', yTitle='Número de casos', theme='custom', width=5 ) # ### Últimos casos last_cases = src.loader._read_cases_data('br', config) last_cases # # Modelo from src.model import simulator # ### Carrega Parâmetros params = dict() params['population'] = { 'N': 100, 'I': 1, 'D': 0, 'R': 0 } params['strategy'] = { 'isolation': 1, 'lockdown': 90} params['supply'] = { 'n_beds': 9, 'n_ventilators': 1 } # ### Roda Modelo dfs = simulator.run_simulation(params['population'], params['strategy'], config) dfs['worst'].head() # ### Calcula dday simulator.get_dday(dfs, 'I2', params['supply']['n_beds'])
analysis/Exemplo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 循环 # - 循环是一种控制语句块重复执行的结构 # - while 适用于广度遍历 # - for 开发中经常使用 # ## while 循环 # - 当一个条件保持真的时候while循环重复执行语句 # - while 循环一定要有结束条件,否则很容易进入死循环 # - while 循环的语法是: # # while loop-contunuation-conndition: # # Statement # ## 示例: # sum = 0 # # i = 1 # # while i <10: # # sum = sum + i # i = i + 1 # ## 错误示例: # sum = 0 # # i = 1 # # while i <10: # # sum = sum + i # # i = i + 1 # - 一旦进入死循环可按 Ctrl + c 停止 a = "abcd" i =0 while i <= 3: print(a[i],end='') i += 1 # ## EP: # ![](../Photo/143.png) # ![](../Photo/144.png) number = eval(input("aaa")) max = number while number != 0: number = eval(input("bbb")) if number > max: print("max",max) print("number",number) # # 验证码 # - 随机产生四个字母的验证码,如果正确,输出验证码正确。如果错误,产生新的验证码,用户重新输入。 # - 验证码只能输入三次,如果三次都错,返回“别爬了,我们小网站没什么好爬的” # - 密码登录,如果三次错误,账号被锁定 # import random import sys i = 0 while i< 3: a = random.randint(97,122) a1 = random.randint(65,90) b1 = chr(a) b2 = chr(a1) a2 =[b1,b2] a3 = random.choice(a2) print(a3,end = '') #i += 1 c = random.randint(97,122) c1 = random.randint(65,90) d1 = chr(c) d2 = chr(c1) d3 =[d1,d2] d4 = random.choice(d3) print(d4,end = '') ############# e = random.randint(97,122) e1 = random.randint(65,90) f1 = chr(e) f2 = chr(e1) f3 =[f1,f2] f4 = random.choice(f3) print(f4,end='') ########### g = random.randint(97,122) g1 = random.randint(65,90) h1 = chr(g) h2 = chr(g1) h3 =[h1,h2] h4 = random.choice(h3) print(h4,end = '') z = input("please input you yanzhengma: ") z1 = z[0] z2 = z[1] z3 = z[2] z4 = z[3] if (z[0] == a3 and z[1] == d4 and z[2] == f4 and z[3] == h4): print("猜对了") i += 1 # sys.exit() else: print("猜错了兄弟") i +=1 if i == 3: print("别爬了兄弟") # ## 尝试死循环 # ## 实例研究:猜数字 # - 你将要编写一个能够随机生成一个0到10之间的且包括两者的数字程序,这个程序 # - 提示用户连续地输入数字直到正确,且提示用户输入的数字是过高还是过低 import random a = random.randint(0,10) z = 1 print("生成数:",a) while z==1: s =int(input("pleaes input")) if s>a: print("大了") elif s<a: print("小了") else: print("对了") break # ## 使用哨兵值来控制循环 # - 哨兵值来表明输入的结束 # - ![](../Photo/54.png) # ## 警告 # ![](../Photo/55.png) # ## for 循环 # - Python的for 循环通过一个序列中的每个值来进行迭代 # - range(a,b,k), a,b,k 必须为整数 # - a: start # - b: end # - k: step # - 注意for 是循环一切可迭代对象,而不是只能使用range # # 在Python里面一切皆对象 # ## EP: # - ![](../Photo/145.png) # ## 嵌套循环 # - 一个循环可以嵌套另一个循环 # - 每次循环外层时,内层循环都会被刷新重新完成循环 # - 也就是说,大循环执行一次,小循环会全部执行一次 # - 注意: # > - 多层循环非常耗时 # - 最多使用3层循环 # ## EP: # - 使用多层循环完成9X9乘法表 # - 显示50以内所有的素数 # ## 关键字 break 和 continue # - break 跳出循环,终止循环 # - continue 跳出此次循环,继续执行 # ## 注意 # ![](../Photo/56.png) # ![](../Photo/57.png) # # Homework # - 1 # ![](../Photo/58.png) sum = 0 zhengshu =0 fushu = 0 z =1 while z==1: s =int(input("pleaes input: ")) if s > 0: zhengshu = zhengshu + 1 elif s < 0: fushu = fushu + 1 else: break sum += s print("正数的个数为:",zhengshu) print("负数的个数为:",fushu) print("平均值为:",sum/(zhengshu+fushu)) # - 2 # ![](../Photo/59.png) money = 10000 i =0 sum =0 while i< 10: s = money*5/100 i = i+1 money +=s sum = sum+money print("第十年总学费为:",round(money)) # - 3 # ![](../Photo/58.png) sum = 0 zhengshu =0 fushu = 0 z =1 while z==1: s =int(input("pleaes input: ")) if s > 0: zhengshu = zhengshu + 1 elif s < 0: fushu = fushu + 1 else: break sum += s print("正数的个数为:",zhengshu) print("负数的个数为:",fushu) print("平均值为:",sum/(zhengshu+fushu)) # - 4 # ![](../Photo/60.png) count = 0 for num in range(100,1001): if (num%5==0 and num%6==0): if count % 10 ==0: print("") print( num,end =' ') count = count+1 # - 5 # ![](../Photo/61.png) # + i = 0 a = 0 while(a==0): s = i*i if s>12000: print("最小整数平方大于12000的是: ",i) break i = i+1 i = 200 while i < 6000: if pow(i,3) < 12000: print("最大整数立方大于12000的是:",i) break else: i -= 1 # - # - 6 # ![](../Photo/62.png) # + a = int(input("请输入贷款额: ")) b = int(input("请输入贷款周期: ")) a1 = float(a) b1 = float(b) print(a) c1 = 0.05 print("Interest Rate Monthly Payment Total Payment") while(i!=0.08): c1=c1+c1*1/8 # - # - 7 # ![](../Photo/63.png) s = 0.0 for i in range(1,50001): s = s+ 1.0/i i = i+1 print("左到右",s) s=0.0 n =50000 while(n>0 ): s = 1/n+s n = n-1 print("右到左",s) # - 8 # ![](../Photo/64.png) result=0 fenzi = 0 fenmu = 0 for i in range(3,100): if(i%2)==1: fenmu = i fenzi = i-2 result=fenzi/fenmu+result print("最终结果为:",result) # - 9 # ![](../Photo/65.png) # - 10 # ![](../Photo/66.png) # - 11 # ![](../Photo/67.png) count = 0 for i in range(1,8): for j in range(1,8): if (i != j ) : count = count+1 print(round(count/2)) # - 12 # ![](../Photo/68.png) 1 **2 + 2**2 + 3**2 + 5.5 *2 + 5.6 **2 + 6 **2 + 7**2 + 8**2 + 9 **2 + 10 **2 1 + 2 + 3 + 5.5 + 5.6 + 6 + 7 + 8+ 9 + 10
7.19.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.8 64-bit (''yahoo'': venv)' # name: python_defaultSpec_1598476105066 # --- # # Example of how to use the OHLC object # # This is an async library so there is a bit more boilerplate code to get the methods and coroutines running in your code. You should *await* the response. import sys; sys.path.insert(0, '..') import asyncio # ## Getting started # Let's suppose you want to get the last five days of daily candles on Microsoft *MSFT* ... # + tags=[] # Import from yahoo_finance_async import OHLC, Interval, History # Fetch coroutine response = await OHLC.fetch('MSFT', interval=Interval.DAY, history=History.FIVE_DAYS) response # - # You will see that the main OHLC candles are returned as a list of candle dictionaries under the `candles` key in the response dictionary. # # **Note** that the most recent candle may have a slightly different timestamp as in the example below of the last candle which should be printing the open time 13:30 UTC but seems to be out of sync until it is closed. This is somethign that the Yahoo API gives and I have not tried to fix it. response['candles'] # The meta data for the API call is returned under the `meta` key in the response dictionary. This may be useful to confirm that you have indeed requested what you wanted. response['meta'] # That's it. Very simple. # ## Requesting different symbols # The Yahoo Finance API has a huge number of different stocks available on it. # # You can look up the stock symbols at https://finance.yahoo.com/lookup # ## Different candle periods and history # The `Interval` object contains the different API options you can use for different candle lengths Interval [(e.name, e.value) for e in Interval] # Similarly the `History` object shows the available look-back periods for collecting candle data. Note that some long history with short candle intervals may throw errors or return vast amounts of data. If in doubt and you want a lot of data, use the `History.MAX` option. History [(e.name, e.value) for e in History]
examples/ohlc-example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (Data Science) # language: python # name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-1:081325390199:image/datascience-1.0 # --- # + import boto3 import sagemaker import pandas as pd sess = sagemaker.Session() bucket = sess.default_bucket() role = sagemaker.get_execution_role() region = boto3.Session().region_name sm = boto3.Session().client(service_name="sagemaker", region_name=region) iam = boto3.Session().client(service_name="iam", region_name=region) ec2 = boto3.Session().client(service_name="ec2", region_name=region) # - # %store -r processed_train_data_s3_uri try: processed_train_data_s3_uri except NameError: print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") print("[ERROR] Please run the notebooks in the PREPARE section before you continue.") print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") print(processed_train_data_s3_uri) # %store -r processed_validation_data_s3_uri try: processed_validation_data_s3_uri except NameError: print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") print("[ERROR] Please run the notebooks in the PREPARE section before you continue.") print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") print(processed_validation_data_s3_uri) # %store -r processed_test_data_s3_uri try: processed_test_data_s3_uri except NameError: print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") print("[ERROR] Please run the notebooks in the PREPARE section before you continue.") print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") print(processed_test_data_s3_uri) # %store -r max_seq_length print(max_seq_length) # # Specify the Dataset in S3 # We are using the train, validation, and test splits created in the previous section. # + print(processed_train_data_s3_uri) # !aws s3 ls $processed_train_data_s3_uri/ # + print(processed_validation_data_s3_uri) # !aws s3 ls $processed_validation_data_s3_uri/ # + print(processed_test_data_s3_uri) # !aws s3 ls $processed_test_data_s3_uri/ # - # # Specify S3 `Distribution Strategy` # + from sagemaker.inputs import TrainingInput s3_input_train_data = TrainingInput(s3_data=processed_train_data_s3_uri, distribution="ShardedByS3Key") s3_input_validation_data = TrainingInput(s3_data=processed_validation_data_s3_uri, distribution="ShardedByS3Key") s3_input_test_data = TrainingInput(s3_data=processed_test_data_s3_uri, distribution="ShardedByS3Key") print(s3_input_train_data.config) print(s3_input_validation_data.config) print(s3_input_test_data.config) # - # # Setup Hyper-Parameters for Classification Layer print(max_seq_length) epochs = 1 learning_rate = 0.00001 epsilon = 0.00000001 train_batch_size = 128 validation_batch_size = 128 test_batch_size = 128 train_steps_per_epoch = 100 validation_steps = 100 test_steps = 100 train_instance_count = 1 train_instance_type = "ml.c5.9xlarge" train_volume_size = 1024 use_xla = True use_amp = True freeze_bert_layer = False enable_sagemaker_debugger = True enable_checkpointing = False enable_tensorboard = False # input_mode='Pipe' input_mode = "File" run_validation = True run_test = True run_sample_predictions = True metrics_definitions = [ {"Name": "train:loss", "Regex": "loss: ([0-9\\.]+)"}, {"Name": "train:accuracy", "Regex": "accuracy: ([0-9\\.]+)"}, {"Name": "validation:loss", "Regex": "val_loss: ([0-9\\.]+)"}, {"Name": "validation:accuracy", "Regex": "val_accuracy: ([0-9\\.]+)"}, ] # # Setup Our BERT + TensorFlow Script to Run on SageMaker # Prepare our TensorFlow model to run on the managed SageMaker service assume_role_policy_doc = { "Version": "2012-10-17", "Statement": [ {"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"} ], } # + import time timestamp = int(time.time()) # - secure_iam_role_name = "DSOAWS_Secure_Train_VPC_{}".format(timestamp) # + import json import time from botocore.exceptions import ClientError try: secure_iam_role = iam.create_role( RoleName=secure_iam_role_name, AssumeRolePolicyDocument=json.dumps(assume_role_policy_doc), Description="DSOAWS Secure Role", ) except ClientError as e: if e.response["Error"]["Code"] == "EntityAlreadyExists": iam_role = iam.get_role(RoleName=secure_iam_role_name) # print("Role already exists") else: print("Unexpected error: %s" % e) print(secure_iam_role) time.sleep(30) # - iam_policy_allow_s3 = { "Version": "2012-10-17", "Statement": [{"Sid": "", "Effect": "Allow", "Action": ["s3:*"], "Resource": ["arn:aws:s3:::{}".format(bucket)]}], } policy_allow_s3_name = "DSOAWS_Secure_Train_Allow_S3_{}".format(timestamp) # + import time response = iam.put_role_policy( RoleName=secure_iam_role_name, PolicyName=policy_allow_s3_name, PolicyDocument=json.dumps(iam_policy_allow_s3) ) print(response) time.sleep(30) # - different_subnet_id = "blah" different_security_group_ids = ["blah"] # Create the bucket policy policy_deny_create_training_job = { "Version": "2008-10-17", "Statement": [ { "Effect": "Deny", "Action": [ "sagemaker:CreateTrainingJob", ], "Resource": ["*"], "Condition": { "StringNotEquals": { "sagemaker:VpcSecurityGroupIds": different_security_group_ids, "sagemaker:VpcSubnets": [different_subnet_id], } }, } ], } policy_deny_create_training_job_name = "DSOAWS_Secure_Train_Deny_CreateTrainingJob_VPC_{}".format(timestamp) # + import time response = iam.put_role_policy( RoleName=secure_iam_role_name, PolicyName=policy_deny_create_training_job_name, PolicyDocument=json.dumps(policy_deny_create_training_job), ) print(response) time.sleep(30) # + from sagemaker.tensorflow import TensorFlow estimator = TensorFlow( entry_point="tf_bert_reviews.py", source_dir="src", role=secure_iam_role_name, instance_count=train_instance_count, instance_type=train_instance_type, volume_size=train_volume_size, py_version="py3", framework_version="2.1.0", hyperparameters={ "epochs": epochs, "learning_rate": learning_rate, "epsilon": epsilon, "train_batch_size": train_batch_size, "validation_batch_size": validation_batch_size, "test_batch_size": test_batch_size, "train_steps_per_epoch": train_steps_per_epoch, "validation_steps": validation_steps, "test_steps": test_steps, "use_xla": use_xla, "use_amp": use_amp, "max_seq_length": max_seq_length, "freeze_bert_layer": freeze_bert_layer, "enable_sagemaker_debugger": enable_sagemaker_debugger, "enable_checkpointing": enable_checkpointing, "enable_tensorboard": enable_tensorboard, "run_validation": run_validation, "run_test": run_test, "run_sample_predictions": run_sample_predictions, }, input_mode=input_mode, subnets=None, security_group_ids=None, ) # - # # Verify `CreateTrainingJob: AccessDenied` estimator.fit( inputs={"train": s3_input_train_data, "validation": s3_input_validation_data, "test": s3_input_test_data}, wait=False, ) training_job_name = estimator.latest_training_job.name print("Training Job Name: {}".format(training_job_name)) # + from IPython.core.display import display, HTML display( HTML( '<b>Review <a target="blank" href="https://console.aws.amazon.com/sagemaker/home?region={}#/jobs/{}">Training Job</a> After About 5 Minutes</b>'.format( region, training_job_name ) ) ) # + from IPython.core.display import display, HTML display( HTML( '<b>Review <a target="blank" href="https://console.aws.amazon.com/cloudwatch/home?region={}#logStream:group=/aws/sagemaker/TrainingJobs;prefix={};streamFilter=typeLogStreamPrefix">CloudWatch Logs</a> After About 5 Minutes</b>'.format( region, training_job_name ) ) ) # + from IPython.core.display import display, HTML display( HTML( '<b>Review <a target="blank" href="https://s3.console.aws.amazon.com/s3/buckets/{}/{}/?region={}&tab=overview">S3 Output Data</a> After The Training Job Has Completed</b>'.format( bucket, training_job_name, region ) ) ) # + # %%time estimator.latest_training_job.wait(logs=False) # - # # Cleanup Policies and Roles # + response = iam.delete_role_policy(RoleName=secure_iam_role_name, PolicyName=policy_deny_create_training_job_name) print(response) time.sleep(30) # + response = iam.delete_role_policy(RoleName=secure_iam_role_name, PolicyName=policy_allow_s3_name) print(response) time.sleep(30) # + iam.delete_role(RoleName=secure_iam_role_name) time.sleep(30) # - # + import json notebook_instance_name = None try: with open("/opt/ml/metadata/resource-metadata.json") as notebook_info: data = json.load(notebook_info) resource_arn = data["ResourceArn"] region = resource_arn.split(":")[3] notebook_instance_name = data["ResourceName"] print("Notebook Instance Name: {}".format(notebook_instance_name)) except: print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") print("[ERROR]: COULD NOT RETRIEVE THE NOTEBOOK INSTANCE METADATA.") print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") # + response = sm.describe_notebook_instance(NotebookInstanceName=notebook_instance_name) print(response) # - print("SubnetId: {}".format(response["SubnetId"])) print("SecurityGroups: {}".format(response["SecurityGroups"])) print("IAM Role: {}".format(response["RoleArn"])) print("NetworkInterfaceId: {}".format(response["NetworkInterfaceId"])) print("DirectInternetAccess: {}".format(response["DirectInternetAccess"])) subnet_id = response["SubnetId"] print(subnet_id) security_group_ids = response["SecurityGroups"] print(security_group_ids) # + import time timestamp = int(time.time()) # - secure_iam_role_name = "DSOAWS_Secure_Train_VPC_{}".format(timestamp) # + import json import time from botocore.exceptions import ClientError try: secure_iam_role = iam.create_role( RoleName=secure_iam_role_name, AssumeRolePolicyDocument=json.dumps(assume_role_policy_doc), Description="DSOAWS Secure Role", ) except ClientError as e: if e.response["Error"]["Code"] == "EntityAlreadyExists": iam_role = iam.get_role(RoleName=secure_iam_role_name) # print("Role already exists") else: print("Unexpected error: %s" % e) print(secure_iam_role) time.sleep(30) # - iam_policy_allow_s3 = { "Version": "2012-10-17", "Statement": [{"Sid": "", "Effect": "Allow", "Action": ["s3:*"], "Resource": ["arn:aws:s3:::{}".format(bucket)]}], } policy_allow_s3_name = "DSOAWS_Secure_Train_Allow_S3_{}".format(timestamp) # + import time response = iam.put_role_policy( RoleName=secure_iam_role_name, PolicyName=policy_allow_s3_name, PolicyDocument=json.dumps(iam_policy_allow_s3) ) print(response) time.sleep(30) # - # Create the bucket policy policy_deny_create_training_job = { "Version": "2008-10-17", "Statement": [ { "Effect": "Deny", "Action": [ "sagemaker:CreateTrainingJob", ], "Resource": ["*"], "Condition": { "StringNotEquals": { "sagemaker:VpcSecurityGroupIds": security_group_ids, "sagemaker:VpcSubnets": [subnet_id], } }, } ], } policy_deny_create_training_job_name = "DSOAWS_Secure_Train_Deny_CreateTrainingJob_VPC_{}".format(timestamp) # + import time response = iam.put_role_policy( RoleName=secure_iam_role_name, PolicyName=policy_deny_create_training_job_name, PolicyDocument=json.dumps(policy_deny_create_training_job), ) print(response) time.sleep(30) # - # # Specify the VPC parameters and Verify Successful Training Job # + from sagemaker.tensorflow import TensorFlow estimator = TensorFlow( entry_point="tf_bert_reviews.py", source_dir="src", role=secure_iam_role_name, instance_count=train_instance_count, instance_type=train_instance_type, volume_size=train_volume_size, py_version="py3", framework_version="2.1.0", hyperparameters={ "epochs": epochs, "learning_rate": learning_rate, "epsilon": epsilon, "train_batch_size": train_batch_size, "validation_batch_size": validation_batch_size, "test_batch_size": test_batch_size, "train_steps_per_epoch": train_steps_per_epoch, "validation_steps": validation_steps, "test_steps": test_steps, "use_xla": use_xla, "use_amp": use_amp, "max_seq_length": max_seq_length, "freeze_bert_layer": freeze_bert_layer, "enable_sagemaker_debugger": enable_sagemaker_debugger, "enable_checkpointing": enable_checkpointing, "enable_tensorboard": enable_tensorboard, "run_validation": run_validation, "run_test": run_test, "run_sample_predictions": run_sample_predictions, }, input_mode=input_mode, subnets=[subnet_id], security_group_ids=security_group_ids, ) # - # # Verify Training Starts OK # # TODO: This works if we don't specify VPC because when we're not using a VPC, we are going through the public internet - which is not good... it is preferred to go through the VPC. # # ``` # UnexpectedStatusException: Error for Training job tensorflow-training-2020-12-20-23-13-52-444: Failed. Reason: ClientError: Data download failed:Please ensure that the subnet's route table has a route to an S3 VPC endpoint or a NAT device, and both the security groups and the subnet's network ACL allow outbound traffic to S3. # ``` estimator.fit( inputs={"train": s3_input_train_data, "validation": s3_input_validation_data, "test": s3_input_test_data}, wait=False, ) training_job_name = estimator.latest_training_job.name print("Training Job Name: {}".format(training_job_name)) # + from IPython.core.display import display, HTML display( HTML( '<b>Review <a target="blank" href="https://console.aws.amazon.com/sagemaker/home?region={}#/jobs/{}">Training Job</a> After About 5 Minutes</b>'.format( region, training_job_name ) ) ) # + from IPython.core.display import display, HTML display( HTML( '<b>Review <a target="blank" href="https://console.aws.amazon.com/cloudwatch/home?region={}#logStream:group=/aws/sagemaker/TrainingJobs;prefix={};streamFilter=typeLogStreamPrefix">CloudWatch Logs</a> After About 5 Minutes</b>'.format( region, training_job_name ) ) ) # + from IPython.core.display import display, HTML display( HTML( '<b>Review <a target="blank" href="https://s3.console.aws.amazon.com/s3/buckets/{}/{}/?region={}&tab=overview">S3 Output Data</a> After The Training Job Has Completed</b>'.format( bucket, training_job_name, region ) ) ) # + # %%time estimator.latest_training_job.wait(logs=False) # - # # Wait Until the ^^ Training Job ^^ Completes Above! # # [INFO] _Feel free to continue to the next workshop section while this notebook is running._ # !aws s3 cp s3://$bucket/$training_job_name/output/model.tar.gz ./model.tar.gz # !mkdir -p ./model/ # !tar -xvzf ./model.tar.gz -C ./model/ # !saved_model_cli show --all --dir ./model/tensorflow/saved_model/0/ # # Release Resources # + language="html" # # <p><b>Shutting down your kernel for this notebook to release resources.</b></p> # <button class="sm-command-button" data-commandlinker-command="kernelmenu:shutdown" style="display:none;">Shutdown Kernel</button> # # <script> # try { # els = document.getElementsByClassName("sm-command-button"); # els[0].click(); # } # catch(err) { # // NoOp # } # </script> # + language="javascript" # # try { # Jupyter.notebook.save_checkpoint(); # Jupyter.notebook.session.delete(); # } # catch(err) { # // NoOp # }
12_security/08b_Secure_Train_IAMPolicy_VPC_ConditionKey.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- """Odds""" raw_data = """ Maximum Security 4-1 Improbable 9-2 Tacitus 5-1 Game Winner 6-1 Roadster 9-1 Code of Honor 12-1 By My Standards 15-1 Win Win Win 15-1 War of Will 17-1 Vekoma 22-1 Cutting Humor 25-1 Gray Magician 31-1 Tax 37-1 Long Range Toddy 49-1 Master Fencer 50-1 Spinoff 51-1 Plus Que Parfait 55-1 Country House 64-1 Bodexpress 99-1 """ data = [] data_split = raw_data.split('\n') for i in data_split: fields = i.strip().split() if len(fields) == 0: continue name = ' '.join(fields[0:len(fields) -1]) odds = [int(x) for x in fields[-1].split('-')] data.append((name, odds[0], odds[1])) print(data) def get_space(x): if len(x) <= 7: return '\t\t\t' if len(x) < 16: return '\t\t' return '\t' total_p =0 for i in data: print('{name}{s}{win}-{pay}\t{prob}'.format( name = i[0], win = i[1], pay = i[2], prob = round(i[2]/(i[1] + i[2]),2), s = get_space(i[0]) ) ) total_p += i[2]/(i[1] + i[2]) print('total \t\t\t\t{p}'.format(p=round(total_p,2)))
ky_derby.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## A multi-layer neural network predicting MNIST hand-written digits from simpledl.DLTrainer import DLTrainer from simpledl.ModelManager import ModelManager from sklearn.datasets import load_digits from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder import matplotlib.pyplot as plt import numpy as np # + # Create our own trainer with its own load_data function class MyDLTrainer(DLTrainer): def load_data(self, test_size=0.1): """load digits dataset""" src_X, src_Y = load_digits(return_X_y=True) X = src_X / np.max(src_X) # normalize Y = OneHotEncoder(sparse=False, categories='auto').fit_transform(src_Y.reshape(-1, 1)) X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=test_size) src_X = src_X.T src_Y = src_Y.T X = X.T Y = Y.T X_train = X_train.T X_test = X_test.T Y_train = Y_train.T Y_test = Y_test.T return src_X, src_Y, X, Y, X_train, Y_train, X_test, Y_test trainer = MyDLTrainer() trainer.load_data() src_X, src_Y, X, Y, X_train, Y_train, X_test, Y_test = trainer.load_data(test_size=0.5) dim_input, dim_output = X.shape[0], Y.shape[0] # - # Create our model mgr = ModelManager() mgr.create_model(dims=[dim_input, 10, 10, dim_output], activations=[DLTrainer.nonlin_relu, DLTrainer.nonlin_relu, DLTrainer.nonlin_sigmoid], default_alpha=0.0007, default_lambda=0.001) # + # Train the model updated_model, costs, accuracy = trainer.train(mgr, X_train, Y_train, 20000, 2500) f, ax = plt.subplots() ax.plot(costs) ax.set_yscale('log') ax.set_title("Cost v epoch") mgr.update_model(updated_model) print("ModelManager updated with trained model. Dev accuracy: {}".format(trainer.correct(mgr.model, X_test, Y_test))) # + # Visualize a few examples def show_generic_with_prediction(x, y, trainer): f, ax = plt.subplots(figsize=(2,2)) y_hat = trainer.predict(mgr.model, x).ravel()[0] msg = "Correctly inferred!" if y == y_hat else "Incorrectly inferred." ax.imshow(x.reshape(8, 8), cmap=plt.cm.gray_r) ax.set_title("Y: {}, Y_HAT: {} -- {}".format(y, y_hat, msg)) def show_example_with_prediction(index, trainer): x = X_test[:,index].reshape(-1, 1) y = np.argmax(Y_test[:,index]) show_generic_with_prediction(x, y, trainer) for i in range(5): show_example_with_prediction(np.random.choice(100), trainer)
examples/Digits - Multi-layer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/ayulockin/Thought-Experiments/blob/master/DirectionCNN_with_MNIST.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="qy0RJB3TTLt4" colab_type="code" colab={} from keras.datasets import mnist (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train/255.0 x_test = x_test/255.0 # + id="Y3xhRWswTlFB" colab_type="code" colab={} import matplotlib.pyplot as plt # %matplotlib inline # + id="X0JLeB-nhE8I" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 917} outputId="f807a80f-bba0-421f-a6c4-9914e8539d0f" sample_images = x_train[:64] sample_labels = y_train[:64] rows = 8 cols = 8 fig, axs = plt.subplots(nrows=rows, ncols=cols, figsize=(20, 16)); c = 0 for i in range(rows): for j in range(cols): axs[i][j].set_title('Color: {}'.format(sample_labels[c])) axs[i][j].imshow(sample_images[c], cmap='Greys'); axs[i][j].axis('off'); c+=1 # plt.imshow(x_train[1], cmap='Greys'); # + id="t498QFEZhXQU" colab_type="code" colab={} import pandas as pd from keras.utils import to_categorical from keras.models import Model from keras.layers import Input from keras.layers import Conv2D, MaxPooling2D from keras.layers import Dense, Dropout, Flatten import keras.backend as K # + id="S07wqJUJmnrE" colab_type="code" colab={} x_train = x_train.reshape((x_train.shape+(1,))) x_test = x_test.reshape((x_test.shape+(1,))) y_train = pd.get_dummies(y_train) y_test = pd.get_dummies(y_test) # + id="RFiQpp_gnOKz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 91} outputId="99c3f76e-b427-4362-e8b4-32238fe566ed" print("X_train: ", x_train.shape) print("y_train: ", y_train.shape) print("X_test: ", x_test.shape) print("y_test: ", y_test.shape) # + id="Ytn1FdykxOLd" colab_type="code" colab={} from keras.preprocessing.image import ImageDataGenerator # + id="f9CaSyawxPkk" colab_type="code" colab={} aug = ImageDataGenerator(rescale=1/255.0) # + id="CKc1QjLQnTI4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 535} outputId="10156c7b-dc6a-47c8-a2b9-eaa4f3fea890" def model(): inputs = Input(shape=(28,28,1)) x = Conv2D(32, (3,3), strides=1, activation='relu', padding='valid')(inputs) x = Conv2D(32, (3,3), strides=1, activation='relu', padding='valid')(x) x = MaxPooling2D(pool_size=(3,3))(x) x = Flatten()(x) x = Dropout(0.5)(x) x = Dense(1024, activation='relu')(x) x = Dense(512, activation='relu')(x) x = Dense(128, activation='relu')(x) x = Dense(10, activation='softmax')(x) return Model(inputs=inputs, outputs=x) K.clear_session() model = model() model.summary() # + id="Ijn8qKzMndTt" colab_type="code" colab={} model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) # + id="_8H6NLZioFI-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 146} outputId="a908b3de-4674-4cbd-8c51-22d74719adfd" hist = model.fit(x_train, y_train, batch_size=32, epochs=3, validation_data=(x_test, y_test)) # + id="hXvc-Afm3RH9" colab_type="code" colab={} model.save('mnist.h5') # + [markdown] id="mOMkaRSV3F_L" colab_type="text" # ## Seeing how the model is predicting given input with different angles. # + id="Qx8E6NLm7ciA" colab_type="code" colab={} import numpy as np # + id="uR8NZduXoKZI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 128} outputId="ffa07b30-eabd-443e-ee76-5b341e4e4d22" sample_indexes = np.random.choice(len(x_test), 64) sample_indexes # + [markdown] id="vLM1weOg_sOU" colab_type="text" # ##### Normal prediction # + id="JryKcIGn8jhx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="0c62b4d3-9421-46d8-c030-5bb80c0ae093" rows = 8 cols = 8 fig, axs = plt.subplots(nrows=rows, ncols=cols, figsize=(20, 20)); c = 0 for i in range(rows): for j in range(cols): img_pred = x_test[sample_indexes[c]].reshape((1,)+x_test[sample_indexes[c]].shape) img_plot = x_test[sample_indexes[c]].reshape((x_test[sample_indexes[c]].shape[:-1])) pred = np.argmax(model.predict(img_pred)) axs[i][j].set_title('True: {} | Pred: {}'.format(np.argmax(y_test.values[sample_indexes[c]]), pred)) axs[i][j].imshow(img_plot, cmap='Greys'); axs[i][j].axis('off'); c+=1 # + [markdown] id="UukFWMXO_vRn" colab_type="text" # ##### What if the images are upside down and fed to the model? # + id="JEEdyTsqBhTb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 320} outputId="363f2b83-fbfb-4c0e-a11f-26a1b0f1587f" c = 4 img_plot = x_test[sample_indexes[c]].reshape((x_test[sample_indexes[c]].shape[:-1])) img_plot_updown = np.flipud(x_test[sample_indexes[c]].reshape((x_test[sample_indexes[c]].shape[:-1]))) img_pred = img_plot_updown.reshape(((1,)+img_plot_updown.shape+(1,))) print(img_pred.shape) plt.imshow(img_plot_updown, cmap='Greys') pred = np.argmax(model.predict(img_pred)) print(pred) print(np.argmax(y_test.values[sample_indexes[c]])) # + id="XbwOI-iL93wd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="1586140c-bcb3-43fa-b5ab-aa79f970f858" rows = 8 cols = 8 fig, axs = plt.subplots(nrows=rows, ncols=cols, figsize=(20, 20)); c = 0 for i in range(rows): for j in range(cols): img_plot_updown = np.flipud(x_test[sample_indexes[c]].reshape((x_test[sample_indexes[c]].shape[:-1]))) img_pred = img_plot_updown.reshape(((1,)+img_plot_updown.shape+(1,))) pred = np.argmax(model.predict(img_pred)) axs[i][j].set_title('True: {} | Pred: {}'.format(np.argmax(y_test.values[sample_indexes[c]]), pred)) axs[i][j].imshow(img_plot_updown, cmap='Greys'); axs[i][j].axis('off'); c+=1 # + id="fOvV1-1SBPEw" colab_type="code" colab={}
DirectionCNN_with_MNIST.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="mRCQfI4o9f0k" colab_type="text" # Autograd: automatic differentiation # =================================== # Central to all neural networks in PyTorch is the ``autograd`` package. # Let’s first briefly visit this, and we will then go to training our # first neural network. # # The ``autograd`` package provides automatic differentiation for all operations # on Tensors. It is a define-by-run framework, which means that your backprop is # defined by how your code is run, and that every single iteration can be # different. # # Let us see this in more simple terms with some examples. # # Tensor # -------- # ``torch.Tensor`` is the central class of the package. If you set its attribute # ``.requires_grad`` as ``True``, it starts to track all operations on it. When # you finish your computation you can call ``.backward()`` and have all the # gradients computed automatically. The gradient for this tensor will be # accumulated into ``.grad`` attribute. # # To stop a tensor from tracking history, you can call ``.detach()`` to detach # it from the computation history, and to prevent future computation from being # tracked. # # To prevent tracking history (and using memory), you can also wrap the code block # in ``with torch.no_grad():``. This can be particularly helpful when evaluating a # model because the model may have trainable parameters with `requires_grad=True`, # but for which we don't need the gradients. # # There’s one more class which is very important for autograd # implementation - a ``Function``. # # ``Tensor`` and ``Function`` are interconnected and build up an acyclic # graph, that encodes a complete history of computation. Each tensor has # a ``.grad_fn`` attribute that references a ``Function`` that has created # the ``Tensor`` (except for Tensors created by the user - their # ``grad_fn is None``). # # If you want to compute the derivatives, you can call ``.backward()`` on # a ``Tensor``. If ``Tensor`` is a scalar (i.e. it holds a one element # data), you don’t need to specify any arguments to ``backward()``, # however if it has more elements, you need to specify a ``gradient`` # argument that is a tensor of matching shape. # + id="3jf_wKDL9gfD" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} import torch # + [markdown] id="CneDH3909goS" colab_type="text" # Create a tensor and set requires_grad=True to track computation with it # + id="yyfznPfa9gxG" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} x = torch.ones(2, 2, requires_grad=True) # + [markdown] id="GBAz7nbO9g6K" colab_type="text" # Do an operation of tensor: # + id="IM8J-RHY9hEZ" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} y = x + 2 print(y) # + [markdown] id="UBdT4uNK9hNu" colab_type="text" # ``y`` was created as a result of an operation, so it has a ``grad_fn``. # + id="cnuvDZwo9hX0" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} print(y.grad_fn) # + [markdown] id="fjhDPBo_9hha" colab_type="text" # Do more operations on y # + id="fiqw_Fab9hqf" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} z = y * y * 3 out = z.mean() print(z, out) # + [markdown] id="EU9O8pjA9hzs" colab_type="text" # ``.requires_grad_( ... )`` changes an existing Tensor's ``requires_grad`` # flag in-place. The input flag defaults to ``True`` if not given. # + id="sSDJrAtv9h9a" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} a = torch.randn(2, 2) a = ((a * 3) / (a - 1)) print(a.requires_grad) a.requires_grad_(True) print(a.requires_grad) b = (a * a).sum() print(b.grad_fn) # + [markdown] id="BrhrFvj_9iGP" colab_type="text" # Gradients # --------- # Let's backprop now # Because ``out`` contains a single scalar, ``out.backward()`` is # equivalent to ``out.backward(torch.tensor(1))``. # + id="Lcldjm8D9iQd" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} out.backward() # + [markdown] id="4PU53hhT9iZy" colab_type="text" # print gradients d(out)/dx # + id="bOqPHiL_9ijp" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} print(x.grad) # + [markdown] id="PuZjS25g9isN" colab_type="text" # You should have got a matrix of ``4.5``. # # Let’s call the ``out`` # *Tensor* $o$. # We have that $o = \frac{1}{4}\sum_i z_i$, # $z_i = 3(x_i+2)^2$ and $z_i\bigr\rvert_{x_i=1} = 27$. # Therefore, # # \begin{equation} # \frac{\partial o}{\partial x_i} = \frac{3}{2}(x_i+2) # \end{equation} # # hence # # \begin{equation} # \frac{\partial o}{\partial x_i}\bigr\rvert_{x_i=1} = \frac{9}{2} = 4.5 # \end{equation} # # You can do many crazy things with autograd! # + id="QKfsVofm9i1k" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} x = torch.randn(3, requires_grad=True) y = x * 2 while y.data.norm() < 1000: y = y * 2 print(y) # + id="zy2yz5pT_03g" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} gradients = torch.tensor([0.1, 1.0, 0.0001], dtype=torch.float) y.backward(gradients) print(x.grad) # + [markdown] id="CSuBxpvT9i-o" colab_type="text" # You can also stop autograd from tracking history on Tensors # with ``.requires_grad``=True by wrapping the code block in # ``with torch.no_grad():`` # + id="7lrFqa3H9jH1" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}} print(x.requires_grad) print((x ** 2).requires_grad) with torch.no_grad(): print((x ** 2).requires_grad) # + [markdown] id="eKfV93sK9jQy" colab_type="text" # **Read Later:** # # Documentation of ``autograd`` and ``Function`` is at # http://pytorch.org/docs/autograd
pytorch/blitz/autograd_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from __future__ import division from __future__ import print_function import numpy as np import sys import pickle as pkl import networkx as nx import scipy.sparse as sp from sklearn.manifold import TSNE import matplotlib.pyplot as plt import matplotlib.cm as cm import pandas as pd import matplotlib import os import time import os # Train on CPU (hide GPU) due to memory constraints os.environ['CUDA_VISIBLE_DEVICES'] = "" import tensorflow as tf import numpy as np import scipy.sparse as sp sys.path.append('../') from sklearn.metrics import roc_auc_score from sklearn.metrics import average_precision_score from gae.optimizer import OptimizerAE, OptimizerVAE from gae.input_data import load_data from gae.model import GCNModelAE, GCNModelVAE from gae.preprocessing import preprocess_graph, construct_feed_dict, sparse_to_tuple, mask_test_edges # - def del_all_flags(FLAGS): flags_dict = FLAGS._flags() keys_list = [keys for keys in flags_dict] for keys in keys_list: FLAGS.__delattr__(keys) # + del_all_flags(tf.flags.FLAGS) flags = tf.app.flags FLAGS = flags.FLAGS flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.') flags.DEFINE_integer('epochs', 200, 'Number of epochs to train.') flags.DEFINE_integer('hidden1', 32, 'Number of units in hidden layer 1.') flags.DEFINE_integer('hidden2', 16, 'Number of units in hidden layer 2.') #flags.DEFINE_integer('hidden3', 2, 'Number of units in hidden layer 2.') flags.DEFINE_float('weight_decay', 0., 'Weight for L2 loss on embedding matrix.') flags.DEFINE_float('dropout', 0., 'Dropout rate (1 - keep probability).') flags.DEFINE_string('model', 'gcn_ae', 'Model string.') flags.DEFINE_string('dataset', 'cora', 'Dataset string.') flags.DEFINE_integer('features', 1, 'Whether to use features (1) or not (0).') flags.DEFINE_string('f', '', 'kernel') # + with open('intermediate_pkl/aminer_adj.pkl', 'rb') as f: adj_orig = pkl.load(f) #adj_orig = adj_orig - sp.dia_matrix((adj_orig.diagonal()[np.newaxis, :], [0]), shape=adj_orig.shape) with open('intermediate_pkl/feature_one_hot_matrix.pkl', 'rb') as f: features = pkl.load(f) # - # + adj_orig.eliminate_zeros() adj_train, train_edges, val_edges, val_edges_false, test_edges, test_edges_false = mask_test_edges(adj_orig) adj = adj_train features = sp.identity(features.shape[0]) # featureless # Some preprocessing adj_norm = preprocess_graph(adj) # Define placeholders placeholders = { 'features': tf.sparse_placeholder(tf.float32), 'adj': tf.sparse_placeholder(tf.float32), 'adj_orig': tf.sparse_placeholder(tf.float32), 'dropout': tf.placeholder_with_default(0., shape=()) } num_nodes = adj.shape[0] features = sparse_to_tuple(features.tocoo()) num_features = features[2][1] features_nonzero = features[1].shape[0] model = GCNModelVAE(placeholders, num_features, num_nodes, features_nonzero) pos_weight = float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum() norm = adj.shape[0] * adj.shape[0] / float((adj.shape[0] * adj.shape[0] - adj.sum()) * 2) # Optimizer with tf.name_scope('optimizer'): opt = OptimizerVAE(preds=model.reconstructions, labels=tf.reshape(tf.sparse_tensor_to_dense(placeholders['adj_orig'], validate_indices=False), [-1]), model=model, num_nodes=num_nodes, pos_weight=pos_weight, norm=norm) # Initialize session sess = tf.Session() sess.run(tf.global_variables_initializer()) cost_val = [] acc_val = [] def get_roc_score(edges_pos, edges_neg, emb=None): if emb is None: feed_dict.update({placeholders['dropout']: 0}) emb = sess.run(model.z_mean, feed_dict=feed_dict) def sigmoid(x): return 1 / (1 + np.exp(-x)) # Predict on test set of edges adj_rec = np.dot(emb, emb.T) preds = [] pos = [] for e in edges_pos: preds.append(sigmoid(adj_rec[e[0], e[1]])) pos.append(adj_orig[e[0], e[1]]) preds_neg = [] neg = [] for e in edges_neg: preds_neg.append(sigmoid(adj_rec[e[0], e[1]])) neg.append(adj_orig[e[0], e[1]]) preds_all = np.hstack([preds, preds_neg]) labels_all = np.hstack([np.ones(len(preds)), np.zeros(len(preds_neg))]) roc_score = roc_auc_score(labels_all, preds_all) ap_score = average_precision_score(labels_all, preds_all) return roc_score, ap_score, emb, labels_all cost_val = [] acc_val = [] val_roc_score = [] adj_label = adj_train + sp.eye(adj_train.shape[0]) adj_label = sparse_to_tuple(adj_label) # Train model for epoch in range(FLAGS.epochs): t = time.time() # Construct feed dictionary feed_dict = construct_feed_dict(adj_norm, adj_label, features, placeholders) feed_dict.update({placeholders['dropout']: FLAGS.dropout}) # Run single weight update outs = sess.run([opt.opt_op, opt.cost, opt.accuracy], feed_dict=feed_dict) # Compute average loss avg_cost = outs[1] avg_accuracy = outs[2] roc_curr, ap_curr, emb, labels_all = get_roc_score(val_edges, val_edges_false) val_roc_score.append(roc_curr) print("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(avg_cost), "train_acc=", "{:.5f}".format(avg_accuracy), "val_roc=", "{:.5f}".format(val_roc_score[-1]), "val_ap=", "{:.5f}".format(ap_curr), "time=", "{:.5f}".format(time.time() - t)) print("Optimization Finished!") roc_score, ap_score, emb, labels_all = get_roc_score(test_edges, test_edges_false) print('Test ROC score: ' + str(roc_score)) print('Test AP score: ' + str(ap_score)) # - features[0] features[1] features[2] features adj_orig
src/Plot Embeddings on Aminer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Model training Script: # # ### Pipeline for training an ML model for food classification task in 14 food classes. # * The objective is achieved using Inception V3 NN architecture initialized with Imagenet weights and retrained for 12 epochs. # # #### Following program serves following purposes: # * Iteratively renames all images in different subdirectories accordingly their class names for convenience. # * Adds all image paths into a dataframe and correspondingly assigns a numeric class label for each using produce_label function. # * Performs preprocessing and train-test split on training images before parsing them through the model to ouput predicted lables. # * defines a network using Inception V3 architecture initilaized with Imagenet weights. # * retrains the network for 12 iterations end to end, using gradient decent with momentum and learning rate decay. # # + import matplotlib.pyplot as plt import matplotlib.image as mpimg from scipy.misc import imresize # %matplotlib inline import numpy as np import h5py import os import pandas as pd from keras.preprocessing.image import load_img, img_to_array from keras.utils import to_categorical # - sum(folder_count_dict.values())#total number of images in training dataset for i,j in enumerate(folder_count_dict.values()):#lists the number of images for each class in training set print(i,j) # * #### Some auxilliary functions to loads images from image_path dataframe to numerical matrices and producing corresponding class labels for each, before feeding in for training. def load_data(dataframe):#to load the images as arrays of 299*299*3 ''' Input: Dataframe holidng paths to corresponding image and labels. Returns: array of image matrices shaped 180x180x3 and array of corresponding image labels ''' #img_array= np.empty((dataframe.shape[0], 180,180,3), dtype= np.uint8) img_list=[] for idx, path in enumerate(dataframe.img_paths): img_arr = img_to_array(load_img(path, target_size =(180,180))) #img_to_array(load_img(path, targte_size=(299,299))) #h_new))) img_list.append(img_arr) return np.array(img_list), dataframe.classes.values#.reshape((dataframe.shape[0],1)) def produce_labels(class_count_dict):#lable prod function ''' Input: class-count dictionary holidng class label and image count. ex: {0:1499, 1: 1500, . . .} returns: array of image labels, created according to image count in class-count dictionary. ex array([0,0,0,1,1,1,2,2,2,...]) ''' cls_count = class_count_dict.values() label_list = [] #labels = np.empty(sum(cl_count.values()), dtype=np.uint8)#generating labels for 30 class,300img/cls training set for i, num_cls in enumerate(cls_count): label_list+=[i]*num_cls #labels[num_cls*class_count_list[i]:cls*class_count_list[i] + class_count_list[i+1]]= cls return np.array(label_list) # ### The structure of data directory where images are stored is as follows: # #### Newfooddatabase folder # * | # * |_ food1 directory-- img1,img2,img3. . . . . # * | # * |_ food2 directory-- img1,img2,img3. . . . . # * | # * |_ food3 directory-- img1,img2,img3. . . . . # # # * ### Following renames all the image files for convenience accordingly their original class names or lables. # + import os path = '/home/paperspace/Desktop/newfooddatabase' for directory in os.listdir(path):#to rename all training files from random names to ordered names for idx, file in enumerate(os.scandir(os.path.join(path, directory))): #print(idx, file.name) os.rename(os.path.join(path,directory, file.name), os.path.join(path, directory, '{}{:04}.jpg'.format(directory, idx))) # + #outputs an array of image paths classes= os.listdir(path)#outputs list of different classes to train on n_classes= len(classes) folder_count_dict = dict() path_list = [] #path_array = np.empty(num_classes,) for file in classes: folder_count_dict.update({file:len(os.listdir(path +'/'+file))}) subdirs = os.scandir(os.path.join(path,file))#subdirs are class folders holding class images path_list+= [file+'/'+img.name for img in subdirs]# will output all the images from all 16 classes as list img_path_array= np.array(path_list) labels = produce_labels(folder_count_dict) print('shape of path_array:', img_path_array.shape,'shape of labels:', labels.shape) # - classes df_train = pd.DataFrame({'img_paths': img_path_array,'classes':labels}) df_train.img_paths = df_train['img_paths'].apply(lambda x: os.path.join(path,x)) #training dataset df_train.head(5) # * #### loading image data from image paths. # * #### Converting labels to one-hot encoded form. # * #### Performing train-test split # # + from keras.utils import np_utils import time tick = time.time() X_train, y_train = load_data(df_train) y_train = np_utils.to_categorical(y_train, num_classes= n_classes) print('X_train and y_train shape: ', X_train.shape, y_train.shape) print(time.time() - tick) # + from sklearn.model_selection import train_test_split xtrain, xtest, ytrain, ytest = train_test_split(X_train, y_train, test_size= 0.1) print('xtrain, ytrain shape:', (xtrain.shape, ytrain.shape), '\n ', 'xtest, ytest shape:', (xtest.shape, ytest.shape)) # - # * #### Defining network for training # + from keras.applications.inception_v3 import InceptionV3 from keras.applications.inception_v3 import preprocess_input, decode_predictions from keras.preprocessing import image from keras.layers import Input from keras.models import Model from keras.layers import Dense, Dropout, Flatten from keras.layers import AveragePooling2D #from keras.layers.normalization import BatchNormalization from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import ModelCheckpoint,TensorBoard, LearningRateScheduler, ReduceLROnPlateau from keras.optimizers import SGD from keras.regularizers import l2 import keras.backend as K import math # + K.clear_session() tick = time.time() inputs = Input(shape=(180, 180, 3)) base_model = InceptionV3(weights='imagenet', include_top=False, input_tensor=inputs)#input_shape=(120, 120, 3)) n_classes=16#30 classes x = base_model.output x = AveragePooling2D(pool_size=(4, 4))(x) x = Dropout(.4)(x) x = Flatten()(x) predictions = Dense(n_classes, init='glorot_uniform', W_regularizer=l2(.0005), activation='softmax')(x) comp_model= Model(input= inputs, output=predictions) print('exe time: ',time.time() - tick) # - comp_model.summary() # * ##### Initializing the defined network by Compiling and defining callbacks, learning rate decay function from keras.callbacks import Callback # + opt = SGD(lr=.01, momentum=.9) comp_model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy']) checkpointer = ModelCheckpoint(filepath='model4.{epoch:02d}-{val_loss:.2f}.hdf5', verbose=1, save_best_only=True) #csv_logger = CSVLogger('model4.log') def schedule(epoch): if epoch < 15: return .01 elif epoch < 28: return .002 else: return .0004 lr_scheduler = LearningRateScheduler(schedule) # - tbcallback = TensorBoard(log_dir='./Graph', batch_size= 64, histogram_freq=0, write_graph=True, write_images=True) # * #### train and test image data generator to add augumented images in batches to model training routine. # + train_datagen = ImageDataGenerator(featurewise_center=False,# set input mean to 0 over the dataset samplewise_center=False, # set each sample mean to 0 featurewise_std_normalization=False, # divide inputs by std of the dataset samplewise_std_normalization=False, # divide each input by its std zca_whitening=False, # apply ZCA whitening rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180) width_shift_range=0.2, # randomly shift images horizontally (fraction of total width) height_shift_range=0.2, # randomly shift images vertically (fraction of total height) horizontal_flip=True, # randomly flip images vertical_flip=False, # randomly flip images zoom_range=[.8, 1], channel_shift_range=30, fill_mode='reflect') #train_datagen.config['random_crop_size'] = (150, 150) #train_datagen.set_pipeline([T.random_transform, T.random_crop, T.preprocess_input]) train_generator = train_datagen.flow(xtrain, ytrain, batch_size=64)#, seed=11, pool=pool) test_datagen = ImageDataGenerator() test_generator = test_datagen.flow(xtest, ytest, batch_size=64) # + tick = time.time() history= comp_model.fit_generator(train_generator, validation_data=test_generator, nb_val_samples=xtest.shape[0], samples_per_epoch=xtrain.shape[0], nb_epoch=12, verbose=1, callbacks=[lr_scheduler,tbcallback, checkpointer]) print('total training & validation time for 22500 training images and 7500 test images: ', time.time()- tick) # - plt.plot(history.history['val_acc']) plt.plot(history.history['val_loss']) # * #### Reloading the model trained on training_dataset for 12 epochs to further train on 8 more epochs. from keras.models import load_model model = load_model('model0.88_val_accuracy.hdf5') sc= model.evaluate(xtest, ytest, batch_size=128) print('Val loss and accuracy from 12 epochs of training:',sc[0],' ', sc[1]) # + import time tick = time.time() history_8epochs = comp_model.fit_generator(train_generator, validation_data=test_generator, nb_val_samples=xtest.shape[0], samples_per_epoch=xtrain.shape[0], nb_epoch=8, verbose=1, callbacks=[lr_scheduler,tbcallback, checkpointer]) print('total training & validation time for 22500 training images and 7500 test images: ', time.time()- tick) # - plt.plot(history_8epochs.history['val_loss']) plt.plot(history_8epochs.history['val_acc'])
inception_v3_16c_training_script.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # 1. Can we generate a valid search URL from the URL Template provided? # a. given the time frame of the harvests, this has to be considered alongside linkrot in the osdd (failed search query may not mean the template is incorrect, could just mean the service url is no longer valid at all). # 2. Can we identify best practices (uses esip spatial, uses the time namespace, uses parameter elements)? # 3. dataset/granule search (i don't think we have any data to support this at all). # # Other info - can you identify the parent osdd of a resultset or of a nested osdd? can you grok it from the url only? import requests import json from lxml import etree import urlparse import urllib from bs4 import BeautifulSoup from itertools import chain # + def extract_namespaces(xml): ''' Pull all of the namespaces in the source document and generate a list of tuples (prefix, URI) to dict ''' if xml is None: return {} document_namespaces = dict(xml.xpath('/*/namespace::*')) if None in document_namespaces: document_namespaces['default'] = document_namespaces[None] del document_namespaces[None] # now run through any child namespace issues all_namespaces = xml.xpath('//namespace::*') for i, ns in enumerate(all_namespaces): if ns[1] in document_namespaces.values(): continue new_key = ns[0] if ns[0] else 'default%s' % i document_namespaces[new_key] = ns[1] return document_namespaces def extract_urls(xml, mimetype='atom+xml'): return xml.xpath('//*[local-name()="Url" and (@*[local-name()="type"]="application/%(mimetype)s" or @*[local-name()="type"]="text/%(mimetype)s")]' % {'mimetype': mimetype}) def extract_template(url, append_limit=True): # get the base url from the template template_parts = urlparse.urlparse(url) if not template_parts.scheme: return '', '', {}, False base_url = urlparse.urlunparse(( template_parts.scheme, template_parts.netloc, template_parts.path, None, None, None )) qp = {k: v[0] for k, v in urlparse.parse_qs(template_parts.query).iteritems()} # get the hard-coded params defaults = {k:v for k, v in qp.iteritems() if not v.startswith('{') and not v.endswith('}')} # a flag for some hard-coded response format type to manage # accept headers or no format_defined = len([v for k, v in defaults.iteritems() if 'atom' in v.lower() or 'rss' in v.lower()]) > 0 # get the rest (and ignore the optional/namespaces) parameters = {k: v[1:-1] for k, v in qp.iteritems() if v.startswith('{') and v.endswith('}')} if append_limit: terms = extract_parameter_key('count', parameters) if terms: defaults = dict( chain(defaults.items(), {k: 5 for k in terms.keys()}.items()) ) # note: not everyone manages url-encoded query parameter delimiters # and not everyone manages non-url-encoded values so yeah. we are # ignoring the non-url-encoded group tonight. # return the base, defaults, parameters as dict return base_url, defaults, parameters, format_defined def extract_parameter_key(value, params): # sort out the query parameter name for a parameter # and don't send curly bracketed things, please return {k: v.split(':')[-1].replace('?', '') for k, v in params.iteritems() if value in v} def extract_parameter_defs(url_elem, defined_terms): # could just go with a namespace check but # namespaces are included and not used more # than i'd like. safety first. params = url_elem.xpath('*[local-name()="Parameter"]') output = {} for i, param in enumerate(params): pname = param.attrib.get('name', i) pval = param.attrib.get('value', '') poptions = param.xpath('*[local-name()="Option"]') options = [(o.attrib.get('value'), o.attrib.get('label')) for o in poptions] output[pname] = { "value": pval, "options": options } return output def extract_query_terms(xml, param_name): # find a query element that contains an example # for the provided param_name (no namespace, no optional flag) example_queries = {} xp = '//*[local-name()="Query" and @*[local-name()="role"]="example"]/@*[local-name()="{0}"]'.format(param_name) try: example_queries = xml.xpath(xp) except: print 'failed example query: ', xp return [] return example_queries def extract_search_rels(xml): # application/opensearchdescription+xml for elem in xml.xpath('//*/*[local-name()="link" and (@*[local-name()="type"]="application/opensearchdescription+xml") and (@*[local-name()="rel"]="search" or @*[local-name()="rel"]="http://esipfed.org/ns/fedsearch/1.0/search#")]'): parent = next(iter(elem.getparent().xpath('*[local-name()="title"]')), None) yield { "link_url": elem.attrib.get('href', ''), "link_title": elem.attrib.get('title', ''), "link_type": elem.attrib.get('type', ''), "parent_title": parent.text if parent is not None else '' } def extract_response_stats(xml): total = next(iter(xml.xpath('//*[local-name()="totalResults"]/text()')), 'Unknown') subset = next(iter(xml.xpath('//*[local-name()="itemsPerPage"]/text()')), 'Unknown') return subset, total def execute_request(url, headers={}): try: req = requests.get(url, headers=headers) except: logger.error('\tSkipping connection issue\'s') return '-999', '', '' return req.status_code, req.content, req.headers def parse_response(content, headers={}): output = {} # see if it has content, see if the xml parses, see if it's even xml if not content: return {'error': 'No content'} if 'html' in headers.get('content-type'): return {'error': 'HTML response'} try: xml = etree.fromstring(content) except: return {'error': 'XML Parse error'} subset, total = extract_response_stats(xml) # this would get us to some nested search # there is no guarantee it is dataset/granule! # or can be identified as such! search_rels = [e for e in extract_search_rels(xml)] output.update({ 'subset': subset, 'total': total, }) if search_rels is not None: output.update({'search_rels': search_rels}) return output def parse_osdd(osdd): # get the url template to test basic search # get the parameter list (prefix:term) # get the parameter elements # match to parameter list # get namespaces output = {} output['namespaces'] = extract_namespaces(osdd) output['templates'] = [] for extracted_elem in extract_urls(osdd): template_base, template_defaults, template_params, format_defined = extract_template(extracted_elem.attrib.get('template')) accept_type = extracted_elem.attrib.get('type', '') search_url = '' search_terms = extract_parameter_key('searchTerms', template_params) if search_terms: qps = dict( chain( template_defaults.items(), {search_terms.keys()[0]: ''}.items() ) ) search_url = template_base + '?' + urllib.urlencode(qps.items()) example_url = '' example_terms = list( chain.from_iterable( [extract_query_terms(extracted_elem.getparent(), s) for s in search_terms.values()] ) ) if example_terms: qps = dict( chain( template_defaults.items(), {search_terms.keys()[0]: example_terms[0]}.items() ) ) example_url = template_base + '?' + urllib.urlencode(qps.items()) default_url = template_base + '?' + urllib.urlencode(template_defaults.items()) output['templates'].append({ 'base': template_base, 'defaults': template_defaults, 'parameters': template_params, 'format_definition': format_defined, 'accept_type': accept_type, 'search_url': search_url, # empty searchTerms 'example_url': example_url, # searchTerms w/ provided keywords 'default_url': default_url, # only default params, see cwic dataset osdds 'param_defs': extract_parameter_defs(extracted_elem, template_params) }) # get the basic definition bits (keywords, name, etc) output['has_title'] = len(osdd.xpath('*[local-name()="ShortName"]')) > 0 output['has_desc'] = len(osdd.xpath('*[local-name()="Description"]')) > 0 output['has_keywords'] = len(osdd.xpath('*[local-name()="Tags"]')) > 0 output['has_contact'] = len(osdd.xpath('*[local-name()="Contact"]')) > 0 return output # - # from doug, see notes re: uptime cwic_links = [ 'http://dap.onc.uvic.ca/erddap/opensearch1.1/description.xml', 'http://gcmd.gsfc.nasa.gov/KeywordSearch/default/openSearch.jsp?Portal=cwic', 'http://podaac.jpl.nasa.gov/ws/search/dataset/osd.xml', # 'http://nsidc.org/api/opensearch/1.1/dataset/description', # we're just not going to run this 'http://ghrc.nsstc.nasa.gov/hydro/ghost.xml', 'http://mirador.gsfc.nasa.gov/mirador_dataset_opensearch.xml', 'http://eo-virtual-archive4.esa.int/search/ER02_SAR_RAW_0P/description', 'http://www1.usgs.gov/erddap/opensearch1.1/description.xml', # 'http://bison.usgs.ornl.gov/doc/api.jsp', # this is now a dead link # 'http://ceocat.ccrs.nrcan.gc.ca/opensearch_description_document.xml', # this is 403 access forbidden # 'http://rs211980.rs.hosteurope.de/mule/os-description/', # 503 service down 'http://geo.spacebel.be/opensearch/description.xml', # from the fedeo documentation page listed 'http://lance-modis.eosdis.nasa.gov/user_services/dataset_opensearch.xml' ] # + # to download the osdds cwic_osdds = [] for cwic_link in cwic_links: print 'Downloading {0}'.format(cwic_link) req = requests.get(cwic_link) osdd = { 'url': cwic_link, 'status': req.status_code } if req.status_code != 200: print '\tFailed request' cwic_osdds.append(osdd) continue # just checking xml = etree.fromstring(req.content) osdd.update({'xml':req.content}) cwic_osdds.append(osdd) with open('outputs/cwic_osdds.json', 'w') as f: f.write(json.dumps(cwic_osdds, indent=4)) # - # to reload from disk for parsing, etc with open('outputs/cwic_osdds.json', 'r') as f: cwic_osdds = json.loads(f.read()) for i, osdd in enumerate(cwic_osdds): if osdd.get('status') != 200: continue xml = etree.fromstring(osdd.get('xml').encode('utf-8')) parsed_osdd = parse_osdd(xml) #print parsed_osdd # try the two example queries for j, template in enumerate(parsed_osdd.get('templates', [])): accept_type = template.get('accept_type', '') headers = {'Accept': accept_type} if accept_type else {} example_url = template.get('example_url', '') search_url = template.get('search_url', '') default_url = template.get('default_url', '') if search_url: try: req = requests.get(search_url, headers=headers, timeout=15) ex = { 'status': req.status_code, 'has_content': req.content is not None } output = parse_response(req.content, req.headers) ex.update(output) template.update({'search_url_response': ex}) except requests.exceptions.ReadTimeout: template.update({'search_url_response': {'status': 'timeout'}}) if example_url: try: req = requests.get(example_url, headers=headers, timeout=15) ex = { 'status': req.status_code, 'has_content': req.content is not None } output = parse_response(req.content, req.headers) ex.update(output) template.update({'example_url_response': ex}) except requests.exceptions.ReadTimeout: template.update({'example_url_response': {'status': 'timeout'}}) if default_url: try: req = requests.get(default_url, headers=headers, timeout=15) ex = { 'status': req.status_code, 'has_content': req.content is not None } output = parse_response(req.content, req.headers) ex.update(output) template.update({'default_url_response': ex}) except requests.exceptions.ReadTimeout: template.update({'default_url_response': {'status': 'timeout'}}) parsed_osdd['templates'][j] = template osdd.update(parsed_osdd) cwic_osdds[i] = osdd with open('outputs/cwic_osdds_extended.json', 'w') as f: f.write(json.dumps(cwic_osdds, indent=4)) with open('outputs/cwic_osdds_extended.json', 'r') as f: cwic_osdds = json.loads(f.read()) # + # let's see what we get from the nested osdds tpl_types = ['search', 'example', 'default'] for i, ex_osdd in enumerate(cwic_osdds): for j, template in enumerate(ex_osdd.get('templates', [])): for tpl in tpl_types: key = '{0}_url_response'.format(tpl) rsp = template.get(key, {}) if not rsp: continue if rsp.get('status') != 200: continue for rel in rsp.get('search_rels', []): # let's see if we get a good osdd and add the results # and this is a resultset so they may not be # very different (param diff) rel_link = rel.get('link_url') if not rel_link: continue try: req = requests.get(rel_link, timeout=15) except requests.exceptions.ReadTimeout: rel.update({"status": "timeout"}) template.update({key: rel}) ex_osdd.get('templates')[j] = template if req.status_code != 200: rel.update({"status": req.status_code}) template.update({key: rel}) ex_osdd.get('templates')[j] = template try: xml = etree.fromstring(req.content) except: rel.update({"status": "invalid xml"}) template.update({key: rel}) ex_osdd.get('templates')[j] = template parsed = parse_osdd(xml) rel.update({ "status": req.status_code, "xml": etree.tostring(xml) }) rel.update(parsed) template.update({key: rel}) ex_osdd.get('templates')[j] = template # - with open('outputs/cwic_osdds_nested.json', 'w') as f: f.write(json.dumps(cwic_osdds, indent=4))
notebooks/data_processing/OpenSearch OSDD Evaluation (CWIC).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tensorflow as tf import time from statistics import mean INPUT_SHAPE = (32, 256, 256, 3) WARMUP_STEPS = 50 NUM_STEPS = 200 def define_model(): layer_1 = tf.keras.layers.Conv2D(64, 3) layer_2 = tf.keras.layers.Conv2D(32, 3) layer_3 = tf.keras.layers.Conv2D(16, 3) layer_4 = tf.keras.layers.Conv2D(8, 3) layers = [layer_1, layer_2, layer_3, layer_4] @tf.function def network(): network = tf.random.normal(INPUT_SHAPE) for layer in layers: network = layer(network) return tf.math.reduce_sum(network) return network def run_benchmark(device_name): print('Running `{}` Model ...'.format(device_name)) with tf.device(device_name): model = define_model() # We run warmup for _ in range(WARMUP_STEPS): model().numpy() # We run full benchmark time_record = [] for _ in range(NUM_STEPS): start = time.time() model().numpy() time_record.append(time.time()-start) avg_time = mean(time_record[-50:]) return avg_time if __name__ == "__main__": device_name = tf.test.gpu_device_name() print("device name", device_name) if "gpu" not in device_name.lower(): print( '\n\nThis error most likely means that this notebook is not ' 'configured to use a GPU. Change this in Notebook Settings via the ' 'command palette (cmd/ctrl-shift-P) or the Edit menu.\n\n' ) raise SystemError('GPU device not found') cpu_avg_time = run_benchmark('/cpu:0') gpu_avg_time = run_benchmark(device_name) print("CPU average time per step:", cpu_avg_time) print("GPU average time per step:", gpu_avg_time) print('GPU speedup over CPU: %.2fx' % (cpu_avg_time/gpu_avg_time))
GPU Example 1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/elashgari/EEGNet/blob/master/Solution_HW7_part1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="dNBedp4VbYqj" # ## Exercise 1: Logistic Regression # As we are getting to the middle of the course, the in-class exercises are getting a bit more open ended and open to interpretation. Of course, we are here to help if you have any questions. # # The question below should be answered using the Weekly data set, which contains 1,089 weekly stock returns for 21 years, from the beginning of 1990 to the end of 2010. The dataset can be downloaded from here. You may want to create a Colab notebook to code and collaborate with your teammates and answer the following questions. # # (a) Produce some numerical and graphical summaries of the Weekly data. Do there appear to be any patterns? Select one interesting plot and put it in the answers table below. Explain why it is interesting. # # + id="hhPcEvckq9Gm" import numpy as np import pandas as pd import matplotlib.pyplot as plt import statsmodels.api as sm # + id="fJ1hrGJFrQZF" # for reproducibility I stored the code on Github # so it would be at the same location everytime url = "https://raw.githubusercontent.com/GregoryAlbarian/DataForCS530/main/Weekly.csv" data = pd.read_csv(url) # + id="LcufPFT8rWc2" colab={"base_uri": "https://localhost:8080/", "height": 428} outputId="40f17982-ecb2-446f-a60e-8f0302d79372" # Question a) x_Year = data['Unnamed: 0'] y_Today = data['Today'] y_Yesterday = data['Lag1'] y_BeforeYesterday = data['Lag2'] y_Volume = data['Volume'] y_Direction = data['Direction'] figure, ax = plt.subplots(nrows = 2, ncols = 3) ax.ravel()[0].plot(x_Year, y_Today) ax.ravel()[0].set_title("Figure 1.") ax.ravel()[0].set_axis_off() ax.ravel()[1].plot(x_Year, y_Volume) ax.ravel()[1].set_title("Figure 2.") ax.ravel()[1].set_axis_off() ax.ravel()[2].hist(y_Today) ax.ravel()[2].set_title("Figure 3.") ax.ravel()[2].set_axis_off() ax.ravel()[3].plot(y_Direction, y_Today) ax.ravel()[3].set_title("Figure 4.") ax.ravel()[3].set_axis_off() ax.ravel()[4].plot(y_Direction, y_Yesterday) ax.ravel()[4].set_title("Figure 5.") ax.ravel()[4].set_axis_off() ax.ravel()[5].plot(y_Direction, y_BeforeYesterday) ax.ravel()[5].set_title("Figure 6.") ax.ravel()[5].set_axis_off() plt.tight_layout() plt.show() print( "Figure 1. Weekly change of stock value", "Figure 2. Weekly stock volume", "Figure 3. Histogram of weekly change of stock value", "Figure 4. Weekly change of stock value against current direction", "Figure 5. Weekly change of stock value shifted by a week against current direction", "Figure 6. Weekly change of stock value shifted by a week against current direction", sep = "\n" ) # + [markdown] id="09JYB_I1C_3h" # Question a) # # In figure 1. we can see that there are periods with more change and periods with less change. This is interesting as it might reflect either natural fluctuations in the stock market or maybe some real world events. For instance, without knowing what this stock is, the massive fluctuation around 2008 might just reflect the financial crisis. # # In figure 2. we can see that volume typically increases, but again around 2008 the volume starts dropping which might be a result of the financial crisis. # # In figure 3. the histogram is centered around 0 indicating that stock has mostly retained its value over a 20 year period, the slight tilt to the right indicates that this stock has however slightly increased in value. # # In figure 4. the plot is trivial as Direction is generated by the sign of Today, there is thus a slope in the graph indicating that values for change were higher when the stock was going up than when the stock was going down. # # In figure 5. 6. # Here by adding a delay of 1 week or 2 week we can see that the slope becomes flat. This is interesting as it indicates that the change from more than a week ago brings little information when considering the actual direction of the stock value # + [markdown] id="PXPpMsONbzqn" # (b) Use the full data set to perform a logistic-regression analysis with Direction as the response and the five lag variables plus Volume as predictors. Use the summary function to print the results. Do any of the predictors appear to be statistically significant? If so, which ones? Explain why these results do or do not make sense to your group. # # + id="ogHrA8tSsySz" colab={"base_uri": "https://localhost:8080/"} outputId="d497f312-4be2-43f6-9096-59d1830eeda3" # Question b) X = data[['Lag1','Lag2','Lag3','Lag4','Lag5','Volume']] X = sm.add_constant(X) Direction_temp = data[['Direction']] Direction = (Direction_temp == "Up") Direction = ~(Direction_temp == "Down") logit_mod = sm.Logit(Direction, X) logit_res = logit_mod.fit() print(logit_res.summary()) # + [markdown] id="W7pSACf1bXrR" # # + [markdown] id="HCaX1XQzLnmH" # Question b) # # Considering the initial overview in question a) we did not expect any of the lags to influence the current direction. However, Lag2 is a significant coefficient. This is odd because one would expect that if Lag2 matters then Lag1 should matter too. This could indicate that this stock reacts according to a mechanism that has a 2 weeks delay (or reaction time). # No other coefficient is however significant which does confirm our expectations from question a). Further, the model is very poor with a Pseudo R-squared of 0.00658, indicating a poor fit. # + [markdown] id="uIqbkD7hhIJU" # Question c) Compute the confusion matrix and overall fraction of correct predictions. Show it in the table and explain what the confusion matrix is telling you about the types of mistakes made by the logistic-regression analysis. # + id="wTkPcjaGhdsD" from sklearn.metrics import confusion_matrix, accuracy_score from sklearn.metrics import ConfusionMatrixDisplay # + colab={"base_uri": "https://localhost:8080/"} id="u67GGFqzqvyk" outputId="ab41c3ee-e620-4017-eec7-1b017c85bd1c" # !pip install scikit-plot # + colab={"base_uri": "https://localhost:8080/", "height": 352} id="2po71ltIp134" outputId="d08c0bf1-fcc1-4ed8-892a-72408796b5d3" import scikitplot as skplt import matplotlib.pyplot as plt y_true = Direction*1 y_probas = logit_res.predict(X) y_probas = np.array((1-y_probas,y_probas)).T skplt.metrics.plot_roc_curve(y_true, y_probas) plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="qEhBKiQ3hiWb" outputId="0ad2b15e-3479-430c-d029-20ef042dfad7" y_true = Direction*1 y_pred = (logit_res.predict(X) < 0.682441)*1 score = accuracy_score(y_true,y_pred) con_matrix = pd.DataFrame(confusion_matrix(y_true, y_pred)) con_matrix.columns = ["P","N"] con_matrix.index = ["P","N"] print("Confusion Matrix") print(con_matrix) print("Score: ", score) # + [markdown] id="7ef6nBBjdwPd" # Because the model does not have a very good fit, it simply predicts the outcome that’s more likely overall, regardless of the predictors. # + [markdown] id="Y6i6tkuiAzPQ" # (d) Now fit the logistic-regression model using the data period from 1990 to 2008 as your training set, with Lag2 as the only predictor. Compute the confusion matrix and the overall fraction of correct predictions for the held out data (that is, the data from 2009 and 2010). Put these into the table. # # * Now fit the logistic-regression model using the data period from 1990 to 2008 as your training set, with Lag2 as the only predictor. Compute the confusion matrix and the overall fraction of correct predictions for the held out data (that is, the data from 2009 and 2010). Put these into the table. # + id="Vs4chG1oCyHw" colab={"base_uri": "https://localhost:8080/"} outputId="afc2da2b-5122-4891-af66-51a43157344f" train = data[(data['Year'] >= 1990) & (data['Year'] <= 2008)] X = train['Lag2'] X = sm.add_constant(X) Direction_temp = train['Direction'] Direction = (Direction_temp == "Up") Direction = ~(Direction_temp == "Down") logit_mod = sm.Logit(Direction, X) logit_res = logit_mod.fit() print(logit_res.summary()) # + colab={"base_uri": "https://localhost:8080/"} id="pbektpIuEp-E" outputId="3c909b2d-0f1a-4ba6-a9ca-539e397a4b34" #Get 2009-2010 Data test = data[data['Year']>2008] test_direction_temp = test['Direction'] test_direction = (test_direction_temp == "Up") test_direction = ~(test_direction_temp == "Down") X_test = test['Lag2'] X_test = sm.add_constant(X_test) #Predict on held out data using 1990-2008 model test_pred = (logit_res.predict(X_test) < 0.682441)*1 test_true = test_direction*1 score = accuracy_score(test_true,test_pred) con_matrix = pd.DataFrame(confusion_matrix(test_true, test_pred)) con_matrix.columns = ["P","N"] con_matrix.index = ["P","N"] print("Confusion Matrix") print(con_matrix) print("Score: ", score) # + [markdown] id="FapZ-iZRA1iY" # (e) Experiment with different combinations of predictors, including possible transformations and interactions, for each of the methods. Report the variables, method, and associated confusion matrix that appears to provide the best results. Remember that the best results should be tested on held-out data. # # # * Experiment with different combinations of predictors, including possible transformations and interactions, for each of the methods. Report the variables, method, and associated confusion matrix that appears to provide the best results. Remember that the best results should be tested on held-out data. # # + id="YCIxqo19AyBK" from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split def logistic_model_stocks (variables_used): y_temp = data['Direction'] y = (y_temp == "Up") y = ~(y_temp == "Down") X = data[variables_used] X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=42) clf = LogisticRegression().fit(X_train, y_train) y_predicted = clf.predict(X_test) score = accuracy_score(y_test, y_predicted) con_matrix = pd.DataFrame(confusion_matrix(y_test, y_predicted)) con_matrix.columns = ["P","N"] con_matrix.index = ["P","N"] print("Confusion Matrix") print(con_matrix) print("Score: ", score) # + colab={"base_uri": "https://localhost:8080/"} id="e6ALFPm-VPKZ" outputId="68593f42-9930-427a-c001-353e5e827d21" columns = ['Year', 'Lag1', 'Lag2', 'Lag3', 'Lag4', 'Lag5', 'Volume'] logistic_model_stocks (columns) columns_used = " ".join(columns) print("variables used: ", columns_used) # + id="JeJhd9kLdR4k" colab={"base_uri": "https://localhost:8080/"} outputId="cfeded1b-e97c-4e0a-d4f1-b5d5ddaf7be1" columns = ['Year', 'Lag2', 'Lag3', 'Lag4', 'Lag5', 'Volume'] logistic_model_stocks (columns) columns_used = " ".join(columns) print("variables used: ", columns_used) # + colab={"base_uri": "https://localhost:8080/"} id="P9Cj1aVdwdow" outputId="30c203e8-fb22-4431-c449-d479d4c7b536" columns = ['Year', 'Lag1', 'Lag3', 'Lag4', 'Lag5', 'Volume'] logistic_model_stocks (columns) columns_used = " ".join(columns) print("variables used: ", columns_used) # + colab={"base_uri": "https://localhost:8080/"} id="HLYExbnmwg1s" outputId="17376096-3f93-429b-8cc4-e4108ec874f9" columns = ['Year', 'Lag1', 'Lag2', 'Lag4', 'Lag5', 'Volume'] logistic_model_stocks (columns) columns_used = " ".join(columns) print("variables used: ", columns_used) # + colab={"base_uri": "https://localhost:8080/"} id="HAorm6SuwkDl" outputId="0fd07b71-14b7-41e5-f18a-7a40c04a9680" columns = ['Year', 'Lag1', 'Lag4', 'Lag5', 'Volume'] logistic_model_stocks (columns) columns_used = " ".join(columns) print("variables used: ", columns_used) # + colab={"base_uri": "https://localhost:8080/"} id="rWgRBSYRwqRg" outputId="30f51ecf-3a3e-43b0-fcc9-1840de485e58" columns = ['Year', 'Lag1', 'Lag2', 'Lag3', 'Volume'] logistic_model_stocks (columns) columns_used = " ".join(columns) print("variables used: ", columns_used) # + [markdown] id="TsfLAXoSxEPF" # The last one I did produced the best results. The variables I used were Year, Lag1, Lag2, Lag3, and Volume. The tests in part 2 were close in accuracy between with all between 50 and 60 per cent accuracy. # + id="zbs3nhRKxcWp"
Solution_HW7_part1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Week 3 - Classical ML Models - Part II # # ## 1. Support Vector Machine # # During the previous week, we learned how to use logistic regression for binary classification problems. # # This week, we are going to look at another type of supervised model that can be used for classification problems - **Support vector machine** (or **SVM**). # # ### Introduction # # ![SVM](https://miro.medium.com/max/625/1*ala8WX2z47WYpn932hUkhA.jpeg) # # SVM is a supervised ML algorithm that is most commonly used for binary classification problems. As in the previous cases, the training of such model involves passing set of examples $(x_i, y_i)$. For instance, we want to build a model for spam detection. In such case, the feature ($x_i$) could contain the word / link count, while the label ($y_i$) could be either 1 (spam) or 0 (not-spam). # # So far, we have a general understanding about the training process of SVM, on the other hand, we have not yet covered the hypothesis function we are trying to fit throughout training. # ### Hypothesis # # The goal of SVM algorithm is to find a hyperplane (boundary line) with the following properties: # - It creates separation with the maximum margin between variable classes # - Equation $>1$ output for positive class and $<-1$ for examples in negative class # # In mathematical terms this can be written as: # - $\hat{y} = -1$ if $w^T*x + b < 0$ # - $\hat{y} = 1$ if $w^T*x + b \geqslant 0$ # # ### Cost function # # As in the previous models, SVM has a cost function associated to it. As we have two main goals: minimizing the individual distances ($w$) between the data point and hyperline, and maximizing the margin width. # # Therefore, our cost function has two parts: # # $J(w) = \frac{[w]^2}{2} + C[\frac{1}{N}\sum_{i}^n max(0, 1 - y_i(wx_1 + b))]$ # # The second half of the function is also called hinge loss. In addition, we are using a regularization constant $C$ as a way of weighting misclassification. # # In python code, the cost function can be expressed in the following way: def compute_cost(W, X, Y): # calculate hinge loss N = X.shape[0] distances = 1 - Y * (np.dot(X, W)) distances[distances < 0] = 0 # equivalent to max(0, distance) hinge_loss = reg_strength * (np.sum(distances) / N) # calculate cost cost = 1 / 2 * np.dot(W, W) + hinge_loss return cost # As we now have our cost function, we need to find a way to optimize it. # ### Optimization # # Similar to the logistic regression model, we are going to apply the gradient descent algorithm for finding the minimize our cost function. After taking the partial derivative in respect to $w$, we get the following system: # - $\frac{1}{N}\sum_{i}^n w$ if $max(0, 1 - y_i*(wx_i)) = 0$ # - $\frac{1}{N}\sum_{i}^n w - Cy_ix_i$ otherwise # # Such system has the following Python implementation: def calculate_cost_gradient(W, X_batch, Y_batch): # if only one example is passed (eg. in case of SGD) if type(Y_batch) == np.float64: Y_batch = np.array([Y_batch]) X_batch = np.array([X_batch]) distance = 1 - (Y_batch * np.dot(X_batch, W)) dw = np.zeros(len(W)) for ind, d in enumerate(distance): if max(0, d) == 0: di = W else: di = W - (reg_strength * Y_batch[ind] * X_batch[ind]) dw += di dw = dw/len(Y_batch) # average return dw # After finding the gradient of the cost function, we have to update our weights which can be done in a quite similar manner: def update(features, outputs): max_epochs = 5000 weights = np.zeros(features.shape[1]) # stochastic gradient descent for epoch in range(1, max_epochs): # shuffle to prevent repeating update cycles X, Y = shuffle(features, outputs) for ind, x in enumerate(X): ascent = calculate_cost_gradient(weights, x, Y[ind]) weights = weights - (learning_rate * ascent) return weights # ### Sklearn implementation # # As in the previous cases, instead of building our model from scratch, we can save some time and use sklearn library. # + from sklearn import svm clf = svm.SVC(kernel = 'linear') clf.fit(X_train, y_train) # - # As you may notice here, we have defined **kernel** parameter. As you may remember from the start, some parameter correlations might not be linear: the hyperline might have to take circle for polynomial forms to differentiate variable classes. The kernel parameter defines the form of the this hyperline. # ### Exercise # # Now, it's time to apply our skills. For this purpose, we are going to use the breast cancer patients data. import pandas as pd import matplotlib.pyplot as plt import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn import svm from sklearn import metrics # + data = datasets.load_breast_cancer() X = data.data y = data.target #Split data X_train, X_test, y_train, y_test = #Selecting SVM model with 'linear' kernel #Fitting model into train dataset #Save predictions to y_pred variable y_pred = # - print("Accuracy:",metrics.accuracy_score(y_test, y_pred)) # ### Logistic regression vs SVM # # We can see that SVM and logistic regression has many similarities: both use lines to differentiate classes to solve classification problems. As a result, both models (in most cases) can be used as substitutes for one another without a larger drop in accuracy. # # On the other hand, it is useful to know cases when one of the models provides a better computational performance: # - When the number of features is large (1 - 10000) and number of training examples (10 - 10000) is small, use **logistic regression** or **SVM with a linear kernel**. # - When the number of features is small (1 - 1000) and number of training examples is intermediate (10 - 10000), use **SVM**. # - When number of features is small (1 - 1000) and number of training examples is large (50000 - 1000000), use **logistic regression** or **SVM with a linear kernel**.
Week-03/1_SVM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.5 # language: python # name: python3 # --- print("loaded data set from mnist_1 method") print("introduce regulisation") # https://machinelearningmastery.com/how-to-reduce-overfitting-in-deep-learning-with-weight-regularization/ from load_mnist_1 import * # + train_images, train_labels = load_images_train() train_labels_str = image_class_to_str(train_labels) test_images, test_labels = load_images_test() test_lables_str = image_class_to_str(test_labels) # + # https://betweenandbetwixt.com/2019/01/04/convolutional-neural-network-with-keras-mnist/ from keras.layers import Dense,GlobalAveragePooling2D from keras.preprocessing import image from keras.applications.mobilenet import preprocess_input from keras.preprocessing.image import ImageDataGenerator from keras.models import Model from keras.optimizers import Adam, SGD, Adadelta from keras.applications.resnet50 import ResNet50 from keras.preprocessing import image from keras.applications.resnet50 import preprocess_input, decode_predictions import numpy as np from keras.layers import Dense, Conv2D, Activation, Flatten, MaxPool2D, BatchNormalization from keras.models import Sequential from keras.utils import to_categorical # - from keras.regularizers import l2 # + # keras.applications.resnet.ResNet50(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000) model = Sequential() model.add( Conv2D( filters=8, kernel_size=3, padding="same", input_shape=(28,28,1), data_format="channels_last")) model.add(Activation("relu")) model.add(Conv2D(filters=16, kernel_size=3, padding="same", activation="relu")) model.add(Conv2D(filters=16, kernel_size=3, padding="same", activation="relu", kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01))) model.add(MaxPool2D(pool_size=2, strides=2)) model.add(Conv2D(filters=32, kernel_size=3, padding="same", activation="relu")) model.add(Conv2D(filters=32, kernel_size=3, padding="same", activation="relu", kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01))) model.add(MaxPool2D(pool_size=2, strides=2)) model.add(Conv2D(filters=64, kernel_size=3, padding="same", activation="relu")) model.add(Flatten()) # Stretching out for our FC layer model.add(Dense(128)) model.add(Activation("relu")) model.add(Dense(128, kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01))) model.add(Activation("relu")) model.add(Dense(128, kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01))) model.add(Activation("relu")) model.add(Dense(10,activation='softmax')) # print the cnn arch model.summary() model.compile(loss="categorical_crossentropy", optimizer=Adadelta(), metrics=["accuracy"]) # Test accuracy: 0.9157 5 epoch # Test accuracy: 0.918 10 epoch # + # reshape inputs no_images_train = len(train_images) no_images_test = len(test_images) train_images_reshape = train_images.reshape(no_images_train, 28,28,1) test_images_reshape = test_images.reshape(no_images_test, 28,28,1) train_labels_cat = to_categorical(train_labels, num_classes=10) test_labels_cat = to_categorical(test_labels, num_classes=10) print(train_labels[10]) print(train_labels_cat[10]) print(train_labels[5]) print(train_labels_cat[5]) train_images_reshape = train_images_reshape / 255 test_images_reshape = test_images_reshape / 255 # - history = model.fit( train_images_reshape, train_labels_cat, epochs=20, batch_size=128, validation_data=(test_images_reshape, test_labels_cat)) # + # We can get our score score = model.evaluate(test_images_reshape, test_labels_cat, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) # - # https://stackoverflow.com/questions/41908379/keras-plot-training-validation-and-test-set-accuracy # https://machinelearningmastery.com/display-deep-learning-model-training-history-in-keras/ print(history.history.keys()) # "Accuracy" plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') plt.show() # "Loss" plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'validation'], loc='upper left') plt.show()
dl_at1b-master/output_notebooks/mnist_cnn2_reg_output.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Jails # + [markdown] tags=["ignore"] # ## <font color =#d14d0f>The Data</font> <a id='sectiondata'></a> # --- # # In this notebook, you will use data from the Jail Profile Survey provided by the Board of State and Community Corrections (BSCC). # # # The Jail Profile Survey releases reports on data recorded by local agencies. Some of the information we will find here include the total number of unsentenced and sentenced males and females in each of these facilities. This data has been used to determine the needs of each of these counties when determining the distribution of state bond monies and future projections for other jail needs. # # If you are interested in learning more please visit: # # [Jail Profile Survey](http://www.bscc.ca.gov/downloads/JPSWorkbook.pdf) # # [Jail Profile Survey data](https://app.bscc.ca.gov/joq//jps/QuerySelection.asp) # # # ## <font color =#d14d0f>Context</font> <a id='sectioncontext'></a> # --- # # Throughout the course, we have learned about the policies of realignment, incarceration, and crime trends in California. By exploring these data sets we hope to obtain a clearer picture of the magnitude of prison and county jail overcrowding per facility and region, and the effects of realignment policies on state prisons and county jails. # # To review, the key difference between state prisons and jails involve the process of sentencing. Prisons are designed for long term sentences, while jails are for those who are unsentenced or have short term sentences. It is important to note that short-term sentences are generally one year or less. Another difference is that prisons are larger and controlled at the state level. In contrast, jails are smaller and handled by a city or county. # # The relationship between the two institutions is emphasized by mass incarceration. Through this activity, we will analyze how overcrowding within California’s state prisons influenced the size of jail populations after realignment policies were implemented. # + [markdown] tags=["ignore"] # ## <font color =#d14d0f>Jails</font> <a id='section1'></a> # --- # To observe how realignment influenced California's jails, we obtained data from 1995 to 2018, so that we could understand the trend over time. We will start by observing California's state prisons using the data we found through the BSCC. # # ### **Data** # # #### Data Dictionary # Below you will find a data dictionary for future reference. This data dictionary goes over what the column names represent in the data we will load below. # # |Column Name | Description | # |--------------|---------| # |Jurisdiction | The unit of government that has legal authority over an inmate (state or federal)| # |Facility | Name of the county jail | # |Year |Year that the data was collected | # |Month | Month that the data was collected | # |Unsentenced males| Non-sentenced inmates are all inmates other than those who have been sentenced on all charges pending * ** | # |Unsentenced females| Non-sentenced inmates are all inmates other than those who have been sentenced on all charges pending * **| # |Sentenced males| Sentenced inmates are those who have been sentenced on all charges and are no longer on trial. This category includes inmates who are being incarcerated pending or during an appeal. * | # |Sentenced females|Sentenced inmates are those who have been sentenced on all charges and are no longer on trial. This category includes inmates who are being incarcerated pending or during an appeal. * | # |Total facility ADP| ADP Total should include all inmates (including those under contract from any agency/jurisdiction) assigned to all single/double and multiple occupancy cells, administrative segregation, disciplinary isolation, and medical and mental health beds.| # # # **Please note the following:** # # \* Note that the counts for sentenced and unsentences male/female inmates is an *average daily population (ADP)* for the given month. # # # ** For example, if an inmate has been sentenced on three charges but is still being tried on a fourth charge, they should be reported as “non-sentenced.” # # ** If an inmate is found not to be competent for trial and is detained in a county jail facility, count them in Non-Sentenced (Male/Female & Misdemeanor/Felony). If they are detained in the state hospital, do not count them in any category. # - jail = Table().read_table("data/jails_cleaned.csv").drop(0) jail jail.group("Facility").sort("count", descending=True).show() # 161 facilities in the data set. Have the same 23 years of data from 1995 to 2018. # + months = 12 years = 2018 - 1995 # We add one to our calculation because we want to include 1996 months * years # - jail_summed_corr = jail.group(["Year","Facility"], np.average).group('Year', sum) jail_summed_corr jail_summed_corr.show() jail_summed_corr.plot('Year', "Total facility ADP average sum") jail_summed_corr.plot('Year', "Total facility ADP average sum") plt.ylim(0,85000) m_sentenced = jail_summed_corr.column("Sentenced males average sum") f_sentenced = jail_summed_corr.column("Sentenced females average sum") sentenced = m_sentenced + f_sentenced m_unsentenced = jail_summed_corr.column("Unsentenced males average sum") f_unsentenced = jail_summed_corr.column("Unsentenced females average sum") unsentenced = m_unsentenced + f_unsentenced jail_summed_corr.with_column("Total Sent", sentenced, "Total Unsent", unsentenced).select("Year", "Total Sent", "Total Unsent").plot("Year") jail_summed_corr.with_column("Total Sent", sentenced, "Total Unsent", unsentenced ).select("Year","Total Sent", "Total Unsent" ).plot("Year") plt.ylim(0, 60000)
Week_4/Thursday/.ipynb_checkpoints/Final Jails nb-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: geoml # language: python # name: geoml # --- # # Unsupervised learning on rock properties # # Sometimes we don't have labels, but would like to discover structure in a dataset. This is what clustering algorithms attempt to do. They don't require labels from us &mdash; they are 'unsupervised'. # # We'll use a subset of the [Rock Property Catalog](http://subsurfwiki.org/wiki/Rock_Property_Catalog) data, licensed CC-BY Agile Scientific. Note that the data have been preprocessed, including the addition of noise to the density data. # # We'll use two unsupervised clustering techniques: # # - k-means clustering # - DBSCAN # - HDBSCAN # # And two unsupervised dimensionality reduction techniques: # # - t-SNE # - UMAP # # We do have lithology labels for this dataset (normally in an unsupervised problem you wouldn't have labels), so we can use those as a measure of how well we're doing with the clustering. # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # - df = pd.read_csv('../data/RPC_4_lithologies.csv') df.describe() # Notice that the count of `Rho` values is smaller than for the other properties. # # Pairplots are a good way to see how the various features are distributed with respect to each other: # + features = ['Vp', 'Vs', 'Rho_n'] sns.pairplot(df.dropna(), vars=features, hue='Lithology') # - # ## Clustering with _k_-means # # From [the Wikipedia article](https://en.wikipedia.org/wiki/K-means_clustering): # # > k-means clustering is a method of vector quantization, originally from signal processing, that is popular for cluster analysis in data mining. k-means clustering aims to partition n observations into k clusters in which each observation belongs to the cluster with the nearest mean, serving as a prototype of the cluster. This results in a partitioning of the data space into Voronoi cells. from sklearn.cluster import KMeans # + clu = KMeans() # This will fail... clu.fit(df[features].values) # - # The old classic: NaNs. Remember the count of `Rho` points being smaller than the others? # # The easiest thing to do, assuming we have the data, is to drop the rows with NaNs. df = df.dropna() clu.fit(df[features]) df['Kmeans'] = clu.predict(df[features].values) for name, group in df.groupby('K means'): plt.scatter(group.Vp, group.Rho_n, label=name) plt.legend() # We actually do have the labels, so let's compare... for name, group in df.groupby('Lithology'): plt.scatter(group.Vp, group.Rho_n, label=name) plt.legend() # + clu = KMeans(n_clusters=4) df['K means'] = clu.fit_predict(df[features]) for name, group in df.groupby('K means'): plt.scatter(group.Vp, group.Rho_n, label=name) plt.legend() # We can add the centroids as well: plt.plot(*clu.cluster_centers_[:, ::2].T, 'o', c='k', ms=10) # - # ## Clustering with DBSCAN # # DBSCAN has nothing to do with databases. From [the Wikipedia article](https://en.wikipedia.org/wiki/DBSCAN): # # > Density-based spatial clustering of applications with noise (DBSCAN) is [...] a density-based clustering algorithm: given a set of points in some space, it groups together points that are closely packed together (points with many nearby neighbors), marking as outliers points that lie alone in low-density regions (whose nearest neighbors are too far away). DBSCAN is one of the most common clustering algorithms and also most cited in scientific literature. # + from sklearn.cluster import DBSCAN DBSCAN() # - # There are two important hyperparameters: # # - `eps`, the maximum distance between points in the same cluster. # - `min_samples`, the minimum number of samples in a cluster. # + clu = DBSCAN(eps=150, min_samples=10) clu.fit(df[features]) # - df['DBSCAN'] = clu.labels_ for name, group in df.groupby('DBSCAN'): plt.scatter(group.Vp, group.Rho_n, label=name) # It's a bit hard to juggle the two parameters... let's make an interactive widget: # + from ipywidgets import interact @interact(eps=(10, 250, 10)) def plot(eps): clu = DBSCAN(eps=eps) clu.fit(df[features]) df['DBSCAN'] = clu.labels_ for name, group in df.groupby('DBSCAN'): plt.scatter(group.Vp, group.Rho_n, label=name) # - # ## HDBSCAN # # ### Clustering with HDBSCAN # # HDBSCAN is an improvement on DBSCAN, but is not yet available in `sklearn`. Install it with: # # conda install -c conda-forge hdbscan # # DBSCAN has nothing to do with databases. From [the Wikipedia article](https://en.wikipedia.org/wiki/DBSCAN): # # > Density-based spatial clustering of applications with noise (DBSCAN) is [...] a density-based clustering algorithm: given a set of points in some space, it groups together points that are closely packed together (points with many nearby neighbors), marking as outliers points that lie alone in low-density regions (whose nearest neighbors are too far away). DBSCAN is one of the most common clustering algorithms and also most cited in scientific literature. # # HDBSCAN does away with the epsilon parameter (the maximum distance between points in the same cluster) in DBSCAN, leaving only `n`. So there is only one important hyperparameter: # # - `min_samples`, the minimum number of samples in a cluster. # + from hdbscan import HDBSCAN clu = HDBSCAN(min_samples=10) clu.fit(df[features]) df['HDBSCAN'] = clu.labels_ # - # Anything the algothim considers to be noise is assigned -1: np.unique(clu.labels_) # -1 is 'noise' for name, group in df.groupby('HDBSCAN'): plt.scatter(group.Vp, group.Rho_n, label=name) # + from ipywidgets import interact @interact(min_samples=(1, 60, 2)) def plot(min_samples): clu = HDBSCAN(min_samples=min_samples) clu.fit(df[features]) df['HDBSCAN'] = clu.labels_ for name, group in df.groupby('HDBSCAN'): plt.scatter(group.Vp, group.Rho_n, label=name) # - # ## Comparing clusterings # There are metrics for comparing clusterings. For example, `adjusted_rand_score` &mdash; from the scikit-learn docs: # # > The Rand Index computes a similarity measure between two clusterings by considering all pairs of samples and counting pairs that are assigned in the same or different clusters in the predicted and true clusterings. # > # > The raw RI score is then “adjusted for chance” into the ARI score using the following scheme: # > # > ARI = (RI - Expected_RI) / (max(RI) - Expected_RI) # > # > The adjusted Rand index is thus ensured to have a value close to 0.0 for random labeling independently of the number of clusters and samples and exactly 1.0 when the clusterings are identical (up to a permutation). # # Conveniently, there is no need for the labels to correspond &mdash; the algorithm just compares whether similar points in one clustering are still similar in the other # + from sklearn.metrics import adjusted_rand_score print(adjusted_rand_score(df.Lithology, df.Kmeans)) print(adjusted_rand_score(df.Lithology, df.DBSCAN)) # - # There are a lot of other clustering algorithms to try. This figure is from [the `sklearn` docs](https://scikit-learn.org/stable/modules/clustering.html): # # <img src="../images/sphx_glr_plot_cluster_comparison_0011.png" /> # ## Manifold embedding with t-SNE # # [`t-SNE`](https://scikit-learn.org/stable/modules/generated/sklearn.manifold.TSNE.html) was recently implemented in `sklearn`. t-statistic neighbourhood embedding is a popular and very effective dimensionality reduction strategy. The caveat is that distance between clusters is not typically meaningful. But it's at least a useful data exploration tool. # # Usually we want `n_components=2`, so we get the data into a 2-space, e.g. for cross-plotting. # + from sklearn.manifold import TSNE tsne = TSNE(init='random', perplexity=100) embedding = tsne.fit_transform(df[cols].values) # + from sklearn.preprocessing import LabelEncoder labels = LabelEncoder().fit_transform(df.Lithology) # - plt.scatter(*embedding.T, c=labels, cmap='tab10', vmax=10) plt.show() # ## Manifolds with UMAP # # Another popular tool is UMAP. Check out [`umap-learn`](https://pypi.org/project/umap-learn/). You can install it with # # conda install -y -c conda-forge umap-learn # # It has the same interface as `sklearn` so it's very easy to use. # + from umap import UMAP umap = UMAP(metric='euclidean') embed_umap = umap.fit_transform(df[cols].values) # - plt.scatter(*embed_umap.T, c=labels, cmap='tab10', vmax=10) plt.show() # It's fairly common to attempt clustering on the result of the dimensionality reduction, but you should be wary of treating the embedding as a metric space. That is, distances in this space may not correspond to any natural or useful property. # + [markdown] tags=["exercise"] # ### Exercises # # - Can you make the interactive widget display the Rand score? Use `plt.text(x, y, "Text")`. # - Can you write a loop to find the value of `eps` giving the highest Rand score? # - Can you add the `min_samples` parameter to the widget? # - Explore some of [the other clustering algorithms](https://scikit-learn.org/stable/modules/classes.html#module-sklearn.cluster). # - Try some clustering on one of your own datasets (or use something from [sklearn](https://scikit-learn.org/stable/modules/classes.html#module-sklearn.datasets), e.g. `sklearn.datasets.load_iris`). # -
master/Unsupervised_clustering_on_rock_properties.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + # %matplotlib inline import numpy as np import pandas as pd from matplotlib import pyplot as plt import seaborn as sns sns.set_context(rc = {'patch.linewidth': 2.0}) sns.set_style("white") sns.set_palette(sns.color_palette("dark")) plt.rcParams["patch.force_edgecolor"] = True plt.style.use('seaborn-notebook') data_dir = '../data/' # - df = pd.DataFrame.from_csv(data_dir + 'df_aadt.csv') df.head() # + min_year = df.Year.min() max_year = df.Year.max() years = range(min_year, max_year + 1) directions = ['Ahead', 'Back'] time_labels = ['Morning', 'Daytime', 'Evening', 'Nighttime'] time_splits = [5.0, 10.0, 14.0, 20.0] # - df_switrs = pd.DataFrame.from_csv(data_dir + 'df_switrs.csv') df_switrs.head() def get_df_collisions(df_switrs, df): df_collisions = {} for segment in df.Segment_ID.unique(): df_collisions[segment] = {} df_base = df_switrs[df_switrs.Segment_ID == segment] df_collisions[segment]['total'] = df_base df_collisions[segment]['ahead'] = df_base[df_base.Forward] df_collisions[segment]['back'] = df_base[~df_base.Forward] return df_collisions df_collisions = get_df_collisions(df_switrs, df) def get_collisions(s, ahead = True): if not s in df_collisions: return 0 col = 'ahead' if ahead else 'back' return len(df_collisions[s][col]) # + df['Ahead_Collisions'] = df.Segment_ID.apply(get_collisions, args=(True, )) df['Back_Collisions'] = df.Segment_ID.apply(get_collisions, args=(False, )) df['Total_Collisions'] = df.Ahead_Collisions + df.Back_Collisions df['Ahead_Collision_Ratio'] = df.Ahead_Collisions / df.Total_Collisions df['Back_Collision_Ratio'] = df.Back_Collisions / df.Total_Collisions df.Ahead_Collision_Ratio.fillna(0, inplace=True) df.Back_Collision_Ratio.fillna(0, inplace=True) # - def get_time_collisions(s, ti, tf, ahead = True): if not s in df_collisions: return 0 col = 'ahead' if ahead else 'back' df_time = df_collisions[s][col] if tf > ti: return len(df_time[(df_time.Collision_Hours >= ti) & (df_time.Collision_Hours < tf)]) else: return len(df_time[(df_time.Collision_Hours < tf) | (df_time.Collision_Hours >= ti)]) for direction in directions: print 'Setting %s Collisions...' % direction for i, label in enumerate(time_labels): print ' %s...' % label ti = time_splits[i] tf = time_splits[(i + 1) % len(time_splits)] ahead = (direction == 'Ahead') col = '%s_Collisions_%s' % (direction, label) df[col] = df.Segment_ID.apply(get_time_collisions, args=(ti, tf, ahead)) def get_accident_spread(s, ahead = True): if not s in df_collisions: return 0 col = 'ahead' if ahead else 'back' return df_collisions[s][col].Postmile.std() if len(df_collisions[s][col]) > 1 else -1 df['Ahead_Collision_Spread'] = df.Segment_ID.apply(get_accident_spread, args=(True, )) df['Back_Collision_Spread'] = df.Segment_ID.apply(get_accident_spread, args=(False, )) # + counties = {} for year in years: df_year = df[df.Year == year] counties[year] = set(df_year.County.unique()) print '%s - Total Counties: %d' % (year, len(counties[year])) for year1 in years: print for year2 in years: print len(np.intersect1d(counties[year1], counties[year2])[0]), # + counties_dict = dict([(v, k) for k, v in enumerate(df.County.unique(), 1)]) print counties_dict # - df['County_Name'] = df.County df.County = df.County.apply(lambda x: counties_dict[x]) # + cols = [ u'Segment_ID', u'Segment_Num', u'Year', u'Route', u'County', u'District', u'Postmile', u'Postmile_Boundary', u'Postmile_Distance', u'Latitude', u'Longitude', u'Back_Peak_Hourly', u'Back_Peak_Monthly', u'Back_AADT', u'Ahead_Peak_Hourly', u'Ahead_Peak_Monthly', u'Ahead_AADT', u'Ahead_Collisions', u'Back_Collisions', u'Total_Collisions', u'Ahead_Collisions_Morning', u'Ahead_Collisions_Daytime', u'Ahead_Collisions_Evening', u'Ahead_Collisions_Nighttime', u'Back_Collisions_Morning', u'Back_Collisions_Daytime', u'Back_Collisions_Evening', u'Back_Collisions_Nighttime', u'Ahead_Collision_Ratio', u'Back_Collision_Ratio', u'Ahead_Collision_Spread', u'Back_Collision_Spread' ] df = df[cols] df.head() # - for d in directions: df['%s_Collisions_Rate' % d] = df['%s_Collisions' % d] \ / df['%s_AADT' % d] df['%s_Collisions_Rate' % d].fillna(0, inplace=True) df['%s_Collisions_Rate' % d].replace(np.inf, 0, inplace=True) df['%s_Collisions_Per_Distance' % d] = df['%s_Collisions' % d] \ / df['Postmile_Distance'] df['%s_Collisions_Per_Distance' % d].fillna(0, inplace=True) for t in time_labels: df['%s_Collisions_%s_Ratio' % (d, t)] = df['%s_Collisions_%s' % (d, t)] \ / df['%s_Collisions' % d] df['%s_Collisions_%s_Ratio' % (d, t)].fillna(0, inplace=True) def is_gps_valid(row): lat = row.Latitude lng = row.Longitude year = row.Year route = row.Route s_id = row.Segment_ID df_next = df_features[(df_features.Year == year) & (df_features.Route == route) & (df_features.Segment_ID == (s_id + 1))] if len(df_next) == 0: return True next_lat = df_next.iloc[0].Latitude next_lng = df_next.iloc[0].Longitude return np.sqrt((lat - next_lat)**2 + (lng - next_lng)**2) < 0.2#5 df['GPS_Valid'] = df.apply(is_gps_valid, axis=1) print len(df), sum(df.GPS_Valid) df.head() df.to_csv(data_dir + 'df_features.csv')
notebooks/Features.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Python для анализа данных # # ## Что такое SQL. Как писать запросы. Работа с Clickhouse. # # Автор: *<NAME>, НИУ ВШЭ* # Язык SQL очень прочно влился в жизнь аналитиков и требования к кандидатам благодаря простоте, удобству и распространенности. Часто SQL используется для формирования выгрузок, витрин (с последующим построением отчетов на основе этих витрин) и администрирования баз данных. Поскольку повседневная работа аналитика неизбежно связана с выгрузками данных и витринами, навык написания SQL запросов может стать крайне полезным умением. # Мы будем работать с колоночной базой данных Clickhouse. Прочитать, детали о том, что такое Clickhouse и какой диалект SQL там используется, можно прочитать в документации: # # https://clickhouse.tech/docs/ru/ # Рассказ о том, что такое базы данных и в каком виде данные хранятся, требуется отдельный – тема очень большая. Мы будем представлять себе базу данных как набор хранящихся на нескольких серверах таблиц с именованными колонками (как в Excel-файлах, только больше (на самом деле это не совсем так, но нам усложнение логики не потребуется) # #### Структура sql-запросов # # Общая структура запроса выглядит следующим образом: # * SELECT ('столбцы или * для выбора всех столбцов; обязательно') # * FROM ('таблица; обязательно') # * WHERE ('условие/фильтрация, например, city = 'Moscow'; необязательно') # * GROUP BY ('столбец, по которому хотим сгруппировать данные; необязательно') # * HAVING ('условие/фильтрация на уровне сгруппированных данных; необязательно') # * ORDER BY ('столбец, по которому хотим отсортировать вывод; необязательно') # * LIMIT ('число, сколько строк результата надо вывести; необязательно') # Для начала попробуем порешать задачи в интерфейсе Clickhouse. Он называется Tabix. # # Web-нтерфейс Clickhouse располагается по адресу: **Tabix.beslan.pro** (прямо на занятии зайдем и посмотрим) # # Наша база данных Clickhouse состоит из четырех таблиц: # # * **events** (события в приложении) # * **checks** (чеки покупок в приложении) # * **devices** (идентификаторы устройств, на которые приложения установлены) # * **installs** (установки приложений) # # Возьмем одну из таблиц и опробуем элементы запроса на ней. Пусть это будет таблица events. # Для простоты визуализации результатов запроса мы будем загружать данные прямо в Python (для этого я уже написал функцию, а уже потом будем разбирать, как эта функция работает. # !pip install pandahouse # + import json # Чтобы разбирать поля import requests # чтобы отправлять запрос к базе import pandas as pd # чтобы в табличном виде хранить результаты запроса # имена явки пароли. если хотите, чтобы считалось с вашего логина, вставьте сюда свои логин и пароль USER = 'student' PASS = '<PASSWORD>' HOST = 'http://clickhouse.beslan.pro:8080/' def get_clickhouse_data(query, host=HOST, USER = USER, PASS = <PASSWORD>, connection_timeout = 1500, dictify=True, **kwargs): NUMBER_OF_TRIES = 5 # Количество попыток запуска DELAY = 10 #время ожидания между запусками import time params = kwargs #если вдруг нам нужно в функцию положить какие-то параметры if dictify: query += "\n FORMAT JSONEachRow" # dictify = True отдает каждую строку в виде JSON'a for i in range(NUMBER_OF_TRIES): # headers = {'Accept-Encoding': 'gzip'} r = requests.post(host, params = params, auth=(USER, PASS), timeout = connection_timeout, data=query ) # отправили запрос на сервер if r.status_code == 200 and not dictify: return r.iter_lines() # генератор :) elif r.status_code == 200 and dictify: return (json.loads(x) for x in r.iter_lines()) # генератор :) else: print('ATTENTION: try #%d failed' % i) if i != (NUMBER_OF_TRIES - 1): print(r.text) time.sleep(DELAY * (i + 1)) else: raise(ValueError, r.text) def get_data(query): return pd.DataFrame(list(get_clickhouse_data(query, dictify=True))) # - # SQL-запросы мы будем сгружать в функцию в виде текста. query = """ select DeviceID, count(EventDate) as days_count from default.events where DeviceID in (select DeviceID from default.events where (EventDate >= '2019-04-01' and EventDate <= '2019-04-07') group by DeviceID order by sum(events) desc limit 3) group by DeviceID """ gg = get_data(query) gg # Отлично! Мы достали 10 записей таблицы events. Теперь опробуем выражение where: достанем только те записи, которые соответствуют платформе iOS. query = """ select EventDate, count() as cnt from default.events where (EventDate >= '2019-04-01' and EventDate <= '2019-04-10') or (EventDate >= '2019-04-20' and EventDate <= '2019-04-30') group by EventDate having cnt < 1000000 order by EventDate """ get_data(query) # Теперь остается попробовать выражения group by, having, order by и какую-нибудь группировочную функцию. Предлагаю посчитать количество событий (сумму поля events) в платформе iOS за июнь 2019 года, отсортировав выдачу по дате и выводя только дни, количество событий в которых было больше 6000000 query = """ select EventDate, uniqExact(DeviceID) as devices, count() as events, events/devices as events_per_device from default.events where (EventDate >= '2019-04-01' and EventDate <= '2019-04-07') group by EventDate order by EventDate """ get_data(query) # А еще существует понятие "подзапрос", имеется в виду, что вы в одном запросе обращаетесь к результатам другого запроса. Например можно посчитать количество дней, в которые events_cnt было больше 6000000 query = """ select count() as days_cnt from (select EventDate, sum(events) as events_cnt from events where AppPlatform ='iOS' and EventDate between'2019-06-01' and '2019-06-30' group by EventDate having sum(events)>6000000 order by EventDate) """ get_data(query) # Кроме того результаты подзапроса можно передать в блок where. Давайте попробуем достать те DeviceID, которые совершили более 1300 событий 2019-05-15 query = """ select DeviceID from events where EventDate = '2019-05-15' group by DeviceID having sum(events)>=1300 """ get_data(query) # А теперь достанем количество событий, которые совершили эти DeviceID за июнь 2019 в разбивке по дням. query = """ select EventDate, sum(events) as events_cnt from events where EventDate between'2019-06-01' and '2019-06-30' and DeviceID in (select DeviceID from events where EventDate = '2019-05-15' group by DeviceID having sum(events)>=1300) group by EventDate order by EventDate """ get_data(query) # #### Объединение таблиц - JOIN # # Как мы узнали ранее, в реляционных базах данных таблицы имеют избыточные данные (ключи), для объединения таблиц друг с другом. И именно для объединения таблиц используется функция JOIN. # # JOIN используется в блоке FROM, после первого источника. После JOIN указывается условие для объединения. # # Базово, синтаксис выглядит так # # SELECT field # FROM table_one AS l # JOIN table_two AS r # ON l.key = r.key # # # В данном примере мы указали первую таблицу как левую ( l ), вторую как правую ( r ), и указали, что они объединяются по ключу key. # # Если с обеих сторон нашлось более одной строки с одинаковыми значениями, то по всем этим строкам строится декартово произведение (если явно не указано обратное). # # Джойны бывают разных видов. В случае Clickhouse это: # # * **INNER** (по умолчанию) — строки попадают в результат, только если значение ключевых колонок присутствует в обеих таблицах. # * **FULL**, **LEFT** и **RIGHT** — при отсутствии значения в обеих или в одной из таблиц включает строку в результат, но оставляет пустыми (NULL) колонки, соответствующие противоположной таблице. # * **CROSS** — декартово произведение двух таблиц целиком без указания ключевых колонок, секция с ON/USING явно не пишется; # # <a> <img src="https://i.pinimg.com/originals/c7/07/f9/c707f9cdc08b1cdd773c006da976c8e6.jpg" width="800" height="160" ></a> # JOIN'ы могут иметь различную строгость. # Перед JOIN'ом модет стоять модицицирующее выражение, например: # # **ANY INNER JOIN** # # **ALL** — если правая таблица содержит несколько подходящих строк, то ClickHouse выполняет их декартово произведение. Это стандартное поведение JOIN в SQL. # # **ANY** — если в правой таблице несколько соответствующих строк, то присоединяется только первая найденная. Если в правой таблице есть только одна подходящая строка, то результаты ANY и ALL совпадают. # Чтобы посмотреть как вживую работает JOIN, давайте посмотрим, какие UserID совершили установки приложения. Для этого нужно взять таблицу Installs, выбрать из нее все поля и приджойнить ее по DeviceID к таблице devices. Чтобы на результат можно было посмотреть, выведем только 10 записей. query = ''' select a.Source as Source, a.DeviceID as DeviceID, a.InstallCost as InstallCost, a.InstallationDate as InstallationDate, b.UserID as UserID from installs as a inner join devices as b on a.DeviceID = b.DeviceID where a.InstallationDate between '2019-01-01' and '2019-06-30' limit 10''' get_data(query) # Прочие нюансы SQL в Clickhouse мы разберем прямо на примерах с реальными задачами. Отдельно также нужно упомянуть, что пока Clickhouse не поддерживает оконных функций.
lect10_Selenium_SQL/SQL.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/DU-ds/pyspark_udemy/blob/main/Section3_5.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="k3WDWIRdFEAz" # ! apt update # ! apt install openjdk-8-jdk-headless -qq > /dev/null # ! wget -q http://archive.apache.org/dist/spark/spark-2.3.1/spark-2.3.1-bin-hadoop2.7.tgz # ! tar xf spark-2.3.1-bin-hadoop2.7.tgz # ! pip install -q findspark import os os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" os.environ["SPARK_HOME"] = "/content/spark-2.3.1-bin-hadoop2.7" # ! ls import findspark findspark.init() import pyspark from pyspark.sql import SparkSession spark = SparkSession.builder.getOrCreate() spark from pyspark.sql import types schema = types.StructType([ types.StructField('id', types.IntegerType()), types.StructField("first_name", types.StringType()), types.StructField("last_name", types.StringType()), types.StructField("gender", types.StringType()), types.StructField("City", types.StringType()), types.StructField("JobTitle", types.StringType()), types.StructField("Salary", types.StringType()), types.StructField("Latitude", types.FloatType()), types.StructField("Longitude", types.FloatType()) ]) df = spark.read.csv("original.csv", header=True, schema=schema) df.dtypes df.show()
Section3_5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Python module/package imports for this chapter import math, json, collections, itertools import numpy as np import matplotlib.pyplot as pp # %matplotlib inline from cartopy import crs as ccrs import geopy import cartopy # **March 2020 update**: # # * at 02:58 in the video: some versions of Basemap (such as 1.2.1) crash if `lat_0` is greater than 45 degrees. We change the code accordingly. Also, when coordinates end outside the map because a location is not visible, `world` returns `inf`, resulting in a `matplotlib` warning. We check for `inf` to avoid the warnings. # ### Code and data needed from previous videos # List of olympic cities and years: # + cities = [] years = [] for game in open('games.txt','r'): words = game.split() city = ' '.join(words[:-1]) year = words[-1].strip('()') cities.append(city) years.append(year) # - # Geolocated olympic-city coordinates (from a JSON file): coordinates_by_city = json.load(open('coords.json','r')) coordinates_by_city # ## Comprehensions and generators # + results = [] for city, year in zip(cities,years): if int(year) > 1945: results.append(city + ': ' + year) # - results = [city + ': ' + year for city, year in zip(cities,years)\ if int(year) > 1945] results[:10] cities_by_year = {year: city for city, year in zip(cities,years)} cities_by_year cities_after_1930 = {city for year,city in cities_by_year.items()\ if int(year) > 1930} cities_after_1930 # + import cartopy pp.figure(figsize=(8,8)) ax = pp.axes(projection=ccrs.Orthographic(central_latitude=75)) ax.coastlines('110m') ax.add_feature(cartopy.feature.BORDERS) # + ortho = ccrs.Orthographic(central_latitude=40) pp.figure(figsize=(8,8)) ax = pp.axes(projection=ortho) ax.coastlines('110m') ax.stock_img() ax.add_feature(cartopy.feature.BORDERS) for year,city in cities_by_year.items(): x,y = coordinates_by_city[city] ax.scatter(x=x,y=y,color='red',s=10, transform=ccrs.PlateCarree()) ax.text(x,y,year,fontsize=8,ha='center', \ va='bottom',color='navy', \ transform=ccrs.PlateCarree()) # - even = (i for i in range(20) if i % 2 == 0) even even.__next__() even.__next__() even = (i for i in range(20) if i % 2 == 0) sum(even) def fibonacci(): f1, f2 = 0, 1 while True: yield f2 f1, f2 = f2, f1 + f2 f = fibonacci() [next(f) for i in range(20)] # next(f) shorthand for f.__next__()
sandbox/efficientPy/Exercise Files/chapter2/02_04/02_04_comprehensions_begin.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 범주형 데이터의 산점도, Categorical scatterplots # 범주별로 산점도를 표현할 때 사용된다. # # * stripplot() (with kind="strip"; the default) - 한 쪽 변수가 범주형인 산점도를 그린다. # * swarmplot() (with kind="swarm") - stripplot()과 유사하지만 점이 겹치지 않도록 조정된다.(범주형 축을 따라서). import pandas as pd import seaborn as sns import matplotlib.pyplot as plt tips = pd.read_csv('./data/tips.csv') tips.info() titanic = pd.read_csv('./data/titanic.csv') titanic.info() # ## Stripplot # # 범주별 데이터 포인트들을 일일이 점 형태로 찍어서 표시한다. sns.stripplot(x='Pclass', y='Age', data=titanic) sns.stripplot(x='Pclass', y='Age', hue='Sex', alpha=0.5, data=titanic) ax = sns.stripplot(x="day", y="total_bill", data=tips) # jitter 분량을 조정한다. ax = sns.stripplot(x="day", y="total_bill", data=tips, jitter=0.05) ax = sns.stripplot(x="total_bill", y="day", hue='sex', data=tips) # ## Swarmplot # 데이터의 분산까지 고려하여, 데이터 포인트가 서로 중복되지 않도록 그린다. plt.figure(figsize=(10,5)) sns.swarmplot(x='Pclass', y='Age', data=titanic) ax = sns.swarmplot(x="day", y="total_bill", data=tips) ax = sns.swarmplot(x="day", y="total_bill", hue="sex", data=tips) # ## stipplot vs swarmplot # # * stripplot : 데이터 포인트가 중복되어 범주별 분포를 그린다. # * swarmplot : 위 그래프는 데이터의 분산까지 고려하여, 데이터 포인트가 서로 중복되지 않도록 그린다. 즉, 데이터가 퍼져 있는 정도를 입체적으로 볼 수 있다. # + fig, axes = plt.subplots(1,2, figsize=(20,5)) ax1 = sns.stripplot(x='Pclass', y='Age', hue='Sex', data=titanic, ax=axes[0]) ax1.set_title('Stripplot') ax2 = sns.swarmplot(x='Pclass', y='Age', hue='Sex', data=titanic, ax=axes[1]) ax2.set_title('Swarmplot') # - # ### sina plot # box plot이나 violin plot 에 swarmplot을 겹쳐서 그리면 데이터의 분포를 더 자세히 표현할 수 있다. # plt.figure(figsize=(10,5)) sns.boxplot(x='Pclass', y='Age', data=titanic) sns.swarmplot(x='Pclass', y='Age', data=titanic, color=".5") # # strip_swarm 실습하기 # # 1. iris 데이터 셋을 읽어들인다. # ``` # iris = pd.read_csv('./data/iris.csv') # iris.info() # ``` # # 2. 종(Species)별 petal_length의 분포를 strip chart로 시각화 하시오. # 3. 종(Species)별 petal_length의 분포를 swarm plot으로 시각화 하시오. # 4. 3번의 내용을 바이올린 플롯과 strip chart를 겹쳐 그려 시각화 하시오.
12.strip_swarm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import ifcb, glob, os,zipfile fnames = sorted(glob.glob("/opt/ifcb-data/power-buoy-deployment/D20210*.roi")) fnames for i, fname in enumerate(fnames): if i < 1000: files = ifcb.open_raw(fname) files.to_zip("/u/pdaniel/ifcb-scripts/data/{}.zip".format(os.path.basename(os.path.splitext(fname)[0]))) else: break # ## Look for large Files ## import numpy as np import zipfile, glob from PIL import Image import pandas as pd from tqdm import tqdm import matplotlib.pyplot as plt df = pd.DataFrame(columns=["pid","size"]) for zip_name in tqdm(sorted(glob.glob('data/*.zip'))): with zipfile.ZipFile(zip_name, "r") as zip_data: sizes = [] names = [] content_list = zip_data.namelist() for name_file in content_list: if name_file.endswith('.png'): img_bytes = zip_data.open(name_file) # 1 img_data = Image.open(img_bytes) sizes.append(img_data.size[0]) names.append(zip_name) df = df.append(pd.DataFrame(data={"pid":names, "size":sizes}),ignore_index=True) df['dateTime'] = pd.to_datetime(df['pid'].apply(lambda x: x.split("/")[1].split("_")[0][1:])) fig, ax = plt.subplots() df.groupby("dateTime")['size'].max().plot(marker='.',lw=0,ax=ax) ax.set_xlim("20210717","20210722") ax.set_ylabel("Max Hieght per Syringe(x*y)") ax.set_yscale('log') fname = "/opt/ifcb-data/power-buoy-deployment/D20210723T073937_IFCB161.roi" files = ifcb.open_raw(fname) # creates an FilesetBin Object, this links each of the files files.to_zip("../data/D20210723T073937_IFCB161.zip") z = ifcb.open_zip("../data/D20210723T073937_IFCB161.zip") # directory = ifcb.DataDirectory('/opt/ifcb-data/power-buoy-deployment/2021') directory. files.headers files.to_zip("{}.zip".format(os.path.basename(os.path.splitext(fname)[0]))) # This will create a zip of the .pngs from the ROI files # You can open the zip get the filenames or even open them with zipfile.ZipFile("../data/D20210723T073937_IFCB161.zip", "r") as zip_data: content_list = zip_data.namelist() print(content_list) img_bytes = zip_data.open("D20210723T073937_IFCB161.csv") # 1 # img_data = Image.open(img_bytes) import pandas as pd pd.read_csv(img_bytes)
notebooks/ifcb-raw-view-images.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + ################### # set up the environment, nis, and image saving path ##################### import os from nis_util import * path_to_nis = 'C:\\Program Files\\NIS-Elements\\nis_ar.exe' #save_base_path = 'E:\\aquisition data\\Nicolas\\Overviews' # save_base_path = 'C:\\Users\\Nikon\\Documents\\TEST' save_base_path = 'E:\\aquisition data\\Nicolas\\Overviews' # - # + ################### # select and name the scans ##################### # put the name of the optical configuration to use here #set_optical_configuration(path_to_nis, 'Dia-4x') ##################################### # where to save the files, for example # save_path_left = 'NG_Overview_024.nd2' # # if no slide just put None. For example: # save_path_left = None ######################################## save_path_left = 'YX_Overview_0405016.nd2' #None save_path_mid = 'YQ_Overview_0405017.nd2' #None save_path_right = 'YQ_Overview_0405018.nd2' #None save_path_right = os.path.join(save_base_path, save_path_right) if (save_path_right != None) else None save_path_mid = os.path.join(save_base_path, save_path_mid) if (save_path_mid != None) else None save_path_left = os.path.join(save_base_path, save_path_left) if (save_path_left != None) else None # + ############################ # run the scan # before running the scan, you should make the focus correctly # skips files that already exist ############################ # the bounding boxes of the slide holders # left slide do_scan_left = save_path_left != None if (do_scan_left and (os.path.exists(save_path_left))): print('WARNING: file {} exists.'.format(save_path_left)) do_scan_left = False if do_scan_left: print('Scanning left scan.') do_large_image_scan(path_to_nis, save_path_left, 53331, 28806, -20726, 20464) else: print('Skipping left slide.') # mid slide do_scan_mid = save_path_mid != None if (do_scan_mid and os.path.exists(save_path_mid)): print('WARNING: file {} exists.'.format(save_path_mid)) do_scan_mid = False if do_scan_mid: print('Scanning mid scan.') do_large_image_scan(path_to_nis, save_path_mid, 13400, -7850, -20954, 18220) else: print('Skipping middle slide.') # right slide do_scan_right = save_path_right != None if (do_scan_right and os.path.exists(save_path_right)): print('WARNING: file {} exists.'.format(save_path_right)) do_scan_right = False if do_scan_right: print('Scanning right scan.') do_large_image_scan(path_to_nis, save_path_right, -26500, -50000, -21053, 18177) else: print('Skipping right slide.') # - do_large_image_scan(path_to_nis, save_path_left, 53331, 28806, -20726, 20464)
simple_overview.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:notebook] * # language: python # name: conda-env-notebook-py # --- # # GOES lat/lon plotting example # This example steps through obtaining GOES data through an AWS S3 bucket, taking a subset based on lat/lon, and plotting the result. # # Credits: # - [<NAME>](mailto:<EMAIL>) - [Twitter](https://twitter.com/lucassterzinger) - University of California, Davis # - [Dr. <NAME>](mailto:<EMAIL>) - [Twitter](https://twitter.com/ChelleGentemann) - Farallon Institute # + import xarray as xr import numpy as np import matplotlib.pyplot as plt import s3fs import datetime as dt import cartopy.crs as ccrs import cartopy.feature as cfeature import dask from dask.distributed import Client # - # ## Get data # This function was written by <NAME> and was taken from https://github.com/oceanhackweek/ohw20-tutorials/blob/master/10-satellite-data-access/goes-cmp-netcdf-zarr.ipynb # # It connects to the noaa-goes16/17 S3 bucket and pulls one day of data def get_geo_data(sat,lyr,idyjl): # arguments # sat goes-east,goes-west,himawari # lyr year # idyjl day of year d = dt.datetime(lyr,1,1) + dt.timedelta(days=idyjl) fs = s3fs.S3FileSystem(anon=True) #connect to s3 bucket! #create strings for the year and julian day imon,idym=d.month,d.day syr,sjdy,smon,sdym = str(lyr).zfill(4),str(idyjl).zfill(3),str(imon).zfill(2),str(idym).zfill(2) #use glob to list all the files in the directory if sat=='goes-east': file_location,var = fs.glob('s3://noaa-goes16/ABI-L2-SSTF/'+syr+'/'+sjdy+'/*/*.nc'),'SST' if sat=='goes-west': file_location,var = fs.glob('s3://noaa-goes17/ABI-L2-SSTF/'+syr+'/'+sjdy+'/*/*.nc'),'SST' if sat=='himawari': file_location,var = fs.glob('s3://noaa-himawari8/AHI-L2-FLDK-SST/'+syr+'/'+smon+'/'+sdym+'/*/*L2P*.nc'),'sea_surface_temperature' #make a list of links to the file keys if len(file_location)<1: return file_ob file_ob = [fs.open(file) for file in file_location] #open connection to files #open all the day's data ds = xr.open_mfdataset(file_ob,combine='nested',concat_dim='time') #note file is super messed up formatting #clean up coordinates which are a MESS in GOES #rename one of the coordinates that doesn't match a dim & should if not sat=='himawari': ds = ds.rename({'t':'time'}) ds = ds.reset_coords() else: ds = ds.rename({'ni':'x','nj':'y'}) #for himawari change dims to match goes #put in to Celsius #ds[var] -= 273.15 #nice python shortcut to +- from itself a-=273.15 is the same as a=a-273.15 #ds[var].attrs['units'] = '$^\circ$C' return ds # ### Get the data from the 210th day of 2020 # %%time ds = get_geo_data("goes-east", 2020, 210) # *** # # Get lat/lon # This function uses metadata stored in the `goes_imager_projection` variable that is present in GOES data. It adds a `lat` and `lon` coordinate to the dataset. The math to convert from the GOES x/y grid to lat/lon was taken from https://makersportal.com/blog/2018/11/25/goes-r-satellite-latitude-and-longitude-grid-projection-algorithm def calc_latlon(ds): x = ds.x y = ds.y goes_imager_projection = ds.goes_imager_projection x,y = np.meshgrid(x,y) r_eq = goes_imager_projection.attrs["semi_major_axis"] r_pol = goes_imager_projection.attrs["semi_minor_axis"] l_0 = goes_imager_projection.attrs["longitude_of_projection_origin"] * (np.pi/180) h_sat = goes_imager_projection.attrs["perspective_point_height"] H = r_eq + h_sat a = np.sin(x)**2 + (np.cos(x)**2 * (np.cos(y)**2 + (r_eq**2 / r_pol**2) * np.sin(y)**2)) b = -2 * H * np.cos(x) * np.cos(y) c = H**2 - r_eq**2 r_s = (-b - np.sqrt(b**2 - 4*a*c))/(2*a) s_x = r_s * np.cos(x) * np.cos(y) s_y = -r_s * np.sin(x) s_z = r_s * np.cos(x) * np.sin(y) lat = np.arctan((r_eq**2 / r_pol**2) * (s_z / np.sqrt((H-s_x)**2 +s_y**2))) * (180/np.pi) lon = (l_0 - np.arctan(s_y / (H-s_x))) * (180/np.pi) ds = ds.assign_coords({ "lat":(["y","x"],lat), "lon":(["y","x"],lon) }) ds.lat.attrs["units"] = "degrees_north" ds.lon.attrs["units"] = "degrees_east" return ds # %%time ds = calc_latlon(ds) # ### Now, our dataset has lat and lon coordinates ds.coords # ## Use `.sel()` to select based on GOES coordinates # + # %%time subset = ds.sel(x=slice(-0.01,0.07215601),y=slice(0.12,0.09)) #reduce to GS region masked = subset.SST.where(subset.DQF==0) # p = None fig = plt.figure(figsize=(10,5)) p = masked.isel(time=14).plot(x='lon', y='lat',vmin=14+273.15,vmax=30+273.15,cmap='inferno', subplot_kws={ "projection" : ccrs.PlateCarree() }) p.axes.coastlines() p.axes.add_feature(cfeature.STATES) # - # ### Plot mean over time # + # %%time subset = ds.sel(x=slice(-0.01,0.07215601),y=slice(0.12,0.09)) #reduce to GS region masked = subset.SST.where(subset.DQF==0) # p = None fig = plt.figure(figsize=(10,5)) p = masked.mean("time", skipna=True).plot(x='lon', y='lat',vmin=14+273.15,vmax=30+273.15,cmap='inferno', subplot_kws={ "projection" : ccrs.PlateCarree() }) p.axes.coastlines() p.axes.add_feature(cfeature.STATES) # - # # Select a subset of data lat1, lat2 = 21, 43 lon1, lon2 = -84, -56 # ### Chunking data makes it easier and faster for the system to process large array operations ds= ds.chunk({ "x": 1000, "y": 1000, "time":1 }) # + subset = ds.where((ds.lon >= lon1) & (ds.lon <= lon2) & (ds.lat >= lat1) & (ds.lat <= lat2), drop=True) subset = subset.SST.where(subset.DQF == 0).mean("time") # - # This subset can be plotted directly from xarray # %%time subset.plot(x="lon", y="lat") # %%time fig = plt.figure() ax = fig.add_subplot(111, projection=ccrs.PlateCarree()) cs = ax.contourf(subset.lon, subset.lat, subset) ax.add_feature(cfeature.COASTLINE) ax.add_feature(cfeature.STATES) plt.colorbar(cs)
goes-latlon.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 0.3.10 # language: julia # name: julia-0.3 # --- # # マルコフ連鎖 # `Pkg.clone` で `master` ブランチのバージョンをインストールしてください.(2015/7/22) # # ```julia # julia> Pkg.clone("QuantEcon") # ``` using QuantEcon using PyPlot # ## 例1 # 簡単な例として次の遷移確率行列で与えられるマルコフ連鎖を考えましょう: P = [0.4 0.6 0.2 0.8] # `MarkovChain` タイプのインスタンスを作ります: mc = MarkovChain(P) # 標本経路を発生させてみましょう (第2引数は初期状態の分布): s = mc_sample_path(mc, [0.5; 0.5], 100000) # 状態 `1`, `2` を訪れる頻度は: mean(s .== 1), mean(s .== 2) fig, ax = subplots(figsize=(4,3)) bins = [1:2] frequencies = [mean(s .== i) for i in bins] ax[:set_title]("Frequency distribution") ax[:set_xlabel]("States") ax[:set_ylabel]("Frequencies") ax[:set_xlim](bins[1]-0.5, bins[end]+0.5) ax[:set_ylim](0, 1) ax[:set_xticks](bins) bar(bins, frequencies, align="center") # 定常分布を求めると: x = mc_compute_stationary(mc) # このマルコフ連鎖は既約 (irreducible) なので定常分布は一意. fig, ax = subplots(figsize=(4,3)) bins = [1:2] ax[:set_title]("Stationary distribution") ax[:set_xlabel]("States") ax[:set_ylabel]("Probabilities") ax[:set_xlim](bins[1]-0.5, bins[end]+0.5) ax[:set_ylim](0, 1) ax[:set_xticks](bins) bar(bins, x, align="center") # ## 例2 # 可約 (reducible) なマルコフ連鎖: mc2 = MarkovChain([1 0; 0 1]) # 各 recurrent class に対して1つの定常分布を返します: mc_compute_stationary(mc2) # もう少し大きな例: P = zeros(Float64, (6, 6)) P[1, 1] = 1 P[2, 5] = 1 P[3, [3, 4, 5]] = 1/3 P[4, [1, 6]] = 1/2 P[5, [2, 5]] = 1/2 P[6, [1, 4]] = 1/2 mc3 = MarkovChain(P) # Communication class たち: communication_classes(mc3) # Recurrent class たち: recurrent_classes(mc3) # 定常分布たち: stationary_dists = mc_compute_stationary(mc3) transpose(stationary_dists) * mc3.p # ## 例3 # 可約に近い既約なマルコフ連鎖: # + p = 0.5 e = 1e-10 P = [1-(p+e) p e p 1-(p+e) e e e 1-2*e] # - # 定常分布の理論値は $\varepsilon$ によらず ($\varepsilon > 0$ である限り) $(1/3, 1/3, 1/3)$. mc_compute_stationary(MarkovChain(P)) # + e = 1e-100 P = [1-(p+e) p e p 1-(p+e) e e e 1-2*e] # - mc_compute_stationary(MarkovChain(P)) # ## 解説 # http://quant-econ.net/jl/finite_markov.html を参照のこと.
JuliaTokyo04/markov_chain.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # + import configparser from datetime import datetime import os from pyspark.sql import SparkSession from pyspark.sql.functions import udf, col from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, date_format config = configparser.ConfigParser() config.read('dl.cfg') os.environ['AWS_ACCESS_KEY_ID'] = config['AWS_ACCESS_KEY_ID'] os.environ['AWS_SECRET_ACCESS_KEY'] = config['AWS_SECRET_ACCESS_KEY'] # - def create_spark_session(): spark = SparkSession \ .builder \ .config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.0") \ .getOrCreate() return spark
etl.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %run Common.ipynb # + import numpy as np import matplotlib.pyplot as plt from azure.cognitiveservices.vision.customvision.training import CustomVisionTrainingClient from azure.cognitiveservices.vision.customvision.training.models import ImageFileCreateEntry from PIL import Image # - # ### Custom Vision # #### Create Trainers # + ENDPOINT = 'https://australiaeast.api.cognitive.microsoft.com' from_custom_vision_training_key=os.getenv('FROM_CUSTOM_VISION_TRAINING_KEY') print(from_custom_vision_training_key) to_custom_vision_training_key=os.getenv('TO_CUSTOM_VISION_TRAINING_KEY') print(to_custom_vision_training_key) from_trainer = CustomVisionTrainingClient(from_custom_vision_training_key, endpoint=ENDPOINT) to_trainer = CustomVisionTrainingClient(to_custom_vision_training_key, endpoint=ENDPOINT) # + object_detection_domain_id = next(domain.id for domain in from_trainer.get_domains() if domain.type == "ObjectDetection" and domain.name == "General") print(object_detection_domain_id) classification_domain_id = next(domain.id for domain in from_trainer.get_domains() if domain.type == "Classification" and domain.name == "General") print(classification_domain_id) # - # #### Clone Projects # + projects = from_trainer.get_projects() for project in projects: project = to_trainer.create_project(name=project.name, domain_id=project.settings.domain_id, classification_type=project.settings.classification_type) # -
v1/notebooks/Clone CustomVision.ai Projects.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Jupyter notebook # # Notebook is becoming a popular tool in scientific computing and business analytics (aka data science). It is also becoming a popular teaching tool. # # * Previously (2011--2015) known as **IPython notebook** (`.ipynb` file extension is still there) # * Similar to Maple, Mathematica, Sage # * Litterate programming enviroment (live/executable document with rich output) # * Not limited to Python anymore (kernel for ~50 languages, R, Julia, Matlab/Octave, Haskel, Scala,...) # * Used in many universities as well as by Google, Microsoft, IBM, Bloomberg, O'Reilly,... # # A notebook is a collection of *cells*. [Markdown](http://daringfireball.net/projects/markdown/) cells are rendered as html. Emphasis, lists, links, images,... i.e. much more than what's possible inside a traditional programming editor or an IDE. # # Say yes to $\LaTeX$! # # $\int_0^\infty f(x) dx = 0$ # # ![actual jupiter](http://www.bobthealien.co.uk/jupitermain.png) # # Code cells can contain single or multi-line statements in any of the supported languages. When executed, code cells will display rich output directly below. "Rich" means text, html, image or javascript object (interactive visualization). # + from IPython.display import IFrame IFrame('https://jupyter.org', '100%', 450) # + # editor and interactive shell at the same time # auto print result of evaluation of the last line x = 1 print( (x + 3) * 10) # - float(5) / 2 # + # code cells are in the same namespace print('Circle area is %.2f' % area) # + import math r = 100 perimeter = 2 * math.pi * r area = math.pi * r **2 area # - # ---- # **Q: What happens if we change the value of `r` above?** # # **Q: What happens if we we rearrange the order of cells?** # # ----- # # %quickref # %whos # ## More interesting examples # ### Monte Carlo $\pi$ # canonical way of importing NumPy import numpy as np # + N = 5000 x = np.random.rand(N) y = np.random.rand(N) inside = (x**2 + y**2) <= 1 pi_est = float(np.sum(inside)) / N * 4 print(pi_est) # + # canonical way of importing Matplotlib import matplotlib.pyplot as plt # and setting graphic output as embedded bitmap # %matplotlib inline # - # %whos plt.axis('equal') plt.plot(x, y, '+') plt.plot(x[inside], y[inside], 'r.') # ### Simple interaction # # NB: Just an example of how powerful notebooks are. # + x = np.linspace(-np.pi, np.pi, 128) def plot_sine(freq=1.0): y = np.sin(x * freq) plt.plot(x, y, 'r-') plot_sine() # + import ipywidgets as wdg wdg.interact(plot_sine, freq=(1,10)) # -
code/0-notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # name: python2 # --- # ## Figure 6 # # Similar to [Figure 4](https://github.com/EdwardJKim/astroclass/blob/master/paper/notebooks/figure04/purity_mag_kde.ipynb) # but as a function of photo-$z$. The bin size of histogram in the top panel is 0.02. from __future__ import division, print_function, unicode_literals # %matplotlib inline import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.neighbors import KernelDensity plt.rc('legend', fontsize=10) truth_train = np.loadtxt('../../data/truth_train.dat') truth_test = np.loadtxt('../../data/truth_test.dat') z_train = np.loadtxt('../../data/z_phot.train.dat') z_test = np.loadtxt('../../data/z_phot.test.dat') tpc_test = np.loadtxt('../../data/clens_tpc_test.mlz', unpack=True, usecols=(2,)) som_test = np.loadtxt('../../data/clens_som_test.mlz', unpack=True, usecols=(2,)) hbc_all = np.loadtxt('../../data/clens_all.hbc', unpack=True, usecols=(0,)) hbc_cv = hbc_all[:-len(truth_test)] hbc_test = hbc_all[-len(truth_test):] bmc_test = np.loadtxt('../../data/clens_test.bmc') # + # read in FLUX_RADIUS and MAG_i and make a classification def morph_class(magnitude, half_radius, cut=[0, 25, 1.0, 3.0]): point_source = ((magnitude > cut[0]) & (magnitude < cut[1]) & (half_radius > cut[2]) & (half_radius < cut[3])) return point_source.astype(np.int) mag_i_lower = 17 mag_i_upper = 21.0 r_h_lower = 1.4 r_h_upper = 2.8 r_h_test = np.loadtxt('../../data/flux_radius.test.dat') mag_i_test = np.loadtxt('../../data/mag_i.test.dat') morph_test = morph_class(mag_i_test, r_h_test, cut=[mag_i_lower, mag_i_upper, r_h_lower, r_h_upper]) # - bins = np.arange(-0, 1.75, 0.02) def find_purity_at(truth_test, clf, step=0.001, gc=None, gp=None, sc=None, sp=None): if bool(gc) and bool(sc) and bool(gp) and bool(sp): raise Exception('Specify only one of gp or sp parameter.') pbin = np.arange(0, 1, step) pure_all = np.zeros(len(pbin)) comp_all = np.zeros(len(pbin)) for i, p in enumerate(pbin): # true galaxies classified as stars gs = ((clf >= p) & (truth_test == 0)).sum() # true galaxies classified as galaxies gg = ((clf < p) & (truth_test == 0)).sum() # true stars classified as galaxies sg = ((clf < p) & (truth_test == 1)).sum() # true stars classified as stars ss = ((clf >= p) & (truth_test == 1)).sum() if gc is not None or gp is not None: if gg == 0 and sg == 0: pure_all[i] = np.nan else: pure_all[i] = gg / (gg + sg) if gg == 0 and gs == 0: comp_all[i] = np.nan else: comp_all[i] = gg / (gg + gs) if sc is not None or sp is not None: if ss == 0 and sg == 0: comp_all[i] = np.nan else: comp_all[i] = ss / (ss + sg) if ss == 0 and gs == 0: pure_all[i] = np.nan else: pure_all[i] = ss / (ss + gs) if gc is not None: ibin = np.argmin(np.abs(comp_all - gc)) return pbin[ibin], pure_all[ibin] if gp is not None: ibin = np.argmin(np.abs(pure_all - gp)) return pbin[ibin], comp_all[ibin] if sc is not None: ibin = np.argmin(np.abs(comp_all - sc)) return pbin[ibin], pure_all[ibin] if sp is not None: ibin = np.argmin(np.abs(pure_all - sp)) return pbin[ibin], comp_all[ibin] # + def find_gal_pur(mag_i_test, bmc_test, truth_test, p_cut=0.5, bandwidth=0.1): # all objects classified as galaxies xg = mag_i_test[(bmc_test < p_cut)] bandwidth = 1.06 * np.std(xg[(xg > -99) & (xg < 99)]) * np.power(len(xg), -0.2) kde_xg = KernelDensity(kernel='gaussian', bandwidth=bandwidth).fit(xg[:, np.newaxis]) log_dens_xg = kde_xg.score_samples(x_kde) # true galaxies classified as galaxies gg = mag_i_test[(bmc_test < p_cut) & (truth_test == 0)] #bandwidth = 1.06 * np.std(gg[(gg > -99) & (gg < 99)]) * np.power(len(gg), -0.2) kde_gg = KernelDensity(kernel='gaussian', bandwidth=bandwidth).fit(gg[:, np.newaxis]) log_dens_gg = kde_gg.score_samples(x_kde) g_pur = len(gg) * np.exp(log_dens_gg) / np.exp(log_dens_gg).sum() \ / (len(xg) * np.exp(log_dens_xg) / np.exp(log_dens_xg).sum()) return g_pur def find_star_pur(mag_i_test, bmc_test, truth_test, p_cut=0.5, bandwidth=0.1): # all objects classified as stars xs = mag_i_test[(bmc_test >= p_cut)] bandwidth = 1.06 * np.std(xs[(xs > -99) & (xs < 99)]) * np.power(len(xs), -0.2) kde_xs = KernelDensity(kernel='gaussian', bandwidth=bandwidth).fit(xs[:, np.newaxis]) log_dens_xs = kde_xs.score_samples(x_kde) # true stars classified as stars ss = mag_i_test[(bmc_test >= p_cut) & (truth_test == 1)] #bandwidth = 1.06 * np.std(ss[(ss > -99) & (ss < 99)]) * np.power(len(ss), -0.2) kde_ss = KernelDensity(kernel='gaussian', bandwidth=bandwidth).fit(ss[:, np.newaxis]) log_dens_ss = kde_ss.score_samples(x_kde) s_pur = len(ss) * np.exp(log_dens_ss) / np.exp(log_dens_ss).sum() \ / (len(xs) * np.exp(log_dens_xs) / np.exp(log_dens_xs).sum()) return s_pur # - n_boots = 100 x_kde = bins[:, np.newaxis] # Silverman's rule of thumb bandwidth = 1.06 * np.std(z_test) * np.power(len(z_test), -0.2) kde_z_test = KernelDensity(kernel='gaussian', bandwidth=bandwidth).fit(z_test[:, np.newaxis]) log_dens_z_test = kde_z_test.score_samples(x_kde) def find_star_frac(mag_i_test, clf_test, truth_test, p_cut=0.5, bandwidth=0.1): # total objects xx = mag_i_test bandwidth = 1.06 * np.std(xx[(xx > -99) & (xx < 99)]) * np.power(len(xx), -0.2) kde_xx = KernelDensity(kernel='gaussian', bandwidth=bandwidth).fit(xx[:, np.newaxis]) log_dens_xx = kde_xx.score_samples(x_kde) # all true stars sx = mag_i_test[truth_test == 1] #bandwidth = 1.06 * np.std(sx[(sx > -99) & (sx < 99)]) * np.power(len(sx), -0.2) kde_sx = KernelDensity(kernel='gaussian', bandwidth=bandwidth).fit(sx[:, np.newaxis]) log_dens_sx = kde_sx.score_samples(x_kde) s_frac = len(sx) * np.exp(log_dens_sx) / np.exp(log_dens_sx).sum() \ / (len(xx) * np.exp(log_dens_xx) / np.exp(log_dens_xx).sum()) return s_frac def find_conf(func, mag_i_test, bmc_test, truth_test, p_cut=0.5, n_boots=100): s_pur_boots = [func(mag_i_test, bmc_test, truth_test, p_cut=p_cut)] print("Bootstrapping...") for i in range(1, n_boots): rows = np.floor(np.random.rand(len(truth_test)) * len(truth_test)).astype(int) mag_i_test_boots = mag_i_test[rows] bmc_test_boots = bmc_test[rows] truth_test_boots = truth_test[rows] s_pur_boots.append(func(mag_i_test_boots, bmc_test_boots, truth_test_boots, p_cut=p_cut)) if i % (n_boots / 10) == 0: print("{0:.0f} percent complete...".format(i / n_boots * 100)) s_pur_lower = np.percentile(s_pur_boots, 0.16, axis=0) s_pur_med = np.percentile(s_pur_boots, 0.50, axis=0) s_pur_upper = np.percentile(s_pur_boots, 0.84, axis=0) print("Complete.") return s_pur_med, s_pur_lower, s_pur_upper stars_frac_med, stars_frac_lower, stars_frac_upper = find_conf( find_star_frac, z_test, bmc_test, truth_test, n_boots=n_boots) # + bmc_p_cut, _ = find_purity_at(truth_test, bmc_test, gc=0.9964, step=0.001) bmc_g_pur_med, bmc_g_pur_lower, bmc_g_pur_upper = find_conf( find_gal_pur, z_test, bmc_test, truth_test, p_cut=bmc_p_cut, n_boots=n_boots) # + tpc_p_cut, _ = find_purity_at(truth_test, tpc_test, gc=0.9964, step=0.001) tpc_g_pur_med, tpc_g_pur_lower, tpc_g_pur_upper = find_conf( find_gal_pur, z_test, tpc_test, truth_test, p_cut=tpc_p_cut, n_boots=n_boots) # - morph_g_pur_med, morph_g_pur_lower, morph_g_pur_upper = find_conf( find_gal_pur, z_test, morph_test, truth_test, p_cut=0.5, n_boots=n_boots) # + bmc_p_cut, _ = find_purity_at(truth_test, bmc_test, sc=0.7145, step=0.0001) bmc_s_pur_med, bmc_s_pur_lower, bmc_s_pur_upper = find_conf( find_star_pur, z_test, bmc_test, truth_test, p_cut=bmc_p_cut, n_boots=1000) # + tpc_p_cut, _ = find_purity_at(truth_test, bmc_test, sc=0.7145, step=0.0001) tpc_s_pur_med, tpc_s_pur_lower, tpc_s_pur_upper = find_conf( find_star_pur, z_test, tpc_test, truth_test, p_cut=tpc_p_cut, n_boots=1000) # - morph_s_pur_med, morph_s_pur_lower, morph_s_pur_upper = find_conf( find_star_pur, z_test, morph_test, truth_test, p_cut=0.5, n_boots=1000) # + alpha = 0.2 p = sns.color_palette() sns.set_style("ticks") fig = plt.figure(figsize=(6, 10)) ax0 = plt.subplot2grid((6, 3), (0, 0), colspan=3) ax1 = plt.subplot2grid((6, 3), (1, 0), colspan=3) ax2 = plt.subplot2grid((6, 3), (2, 0), colspan=3, rowspan=2) ax3 = plt.subplot2grid((6, 3), (4, 0), colspan=3, rowspan=2) plt.setp(ax0.get_xticklabels(), visible=False) plt.setp(ax1.get_xticklabels(), visible=False) plt.setp(ax2.get_xticklabels(), visible=False) ax0.hist(z_test, bins=bins, histtype='bar', color=p[0], alpha=alpha) ax0.plot(x_kde[:, 0], len(z_test) * np.exp(log_dens_z_test) / np.exp(log_dens_z_test).sum(), color=p[0]) ax0.set_xlim(0.0, 1.5) ax0.set_yticks([0, 200, 400, 600]) ax0.set_ylabel('$N$') ax1.plot(x_kde[:, 0], stars_frac_med, color=p[0], label='stars') ax1.fill_between(x_kde[:, 0], stars_frac_lower, stars_frac_upper, color=p[0], alpha=alpha) ax1.set_xlim(0.0, 1.5) #ax1.set_ylim(0, 0.45) ax1.set_yticks([0, 0.2, 0.4, 0.6]) ax1.set_ylabel('fraction') ax1.legend(loc='upper center') ax2.plot(x_kde[:, 0], bmc_g_pur_med, label='BMC', ls='-', color=p[0],) ax2.fill_between(x_kde[:, 0], bmc_g_pur_lower, bmc_g_pur_upper, color=p[0], alpha=alpha) ax2.plot(x_kde[:, 0], tpc_g_pur_med, label='TPC', ls='-', color=p[1]) ax2.fill_between(x_kde[:, 0], tpc_g_pur_lower, tpc_g_pur_upper, color=p[1], alpha=alpha) ax2.errorbar(x_kde[:, 0], morph_g_pur_med, label='Morphology', ls='-', color=p[2]) ax2.fill_between(x_kde[:, 0], morph_g_pur_lower, morph_g_pur_upper, color=p[2], alpha=alpha) ax2.legend(loc='lower center') ax2.set_xlim(0.0, 1.5) ax2.set_ylim(0.83, 1.01) #ax2.set_yticks([0.8, 0.9, 1.0]) ax2.set_ylabel(r'$p_g\left(c_g=0.9964\right)$', fontsize=12) ax3.plot(x_kde[:, 0], bmc_s_pur_med, label='BMC', ls='-', color=p[0]) ax3.fill_between(x_kde[:, 0], bmc_s_pur_lower, bmc_s_pur_upper, color=p[0], alpha=alpha) ax3.plot(x_kde[:, 0], tpc_s_pur_med, label='TPC', ls='-', color=p[1]) ax3.fill_between(x_kde[:, 0], tpc_s_pur_lower, tpc_s_pur_upper, color=p[1], alpha=alpha) ax3.plot(x_kde[:, 0], morph_s_pur_med, label='Morphology', ls='-', color=p[2]) ax3.fill_between(x_kde[:, 0], morph_s_pur_lower, morph_s_pur_upper, color=p[2], alpha=alpha) ax3.set_ylabel(r'$p_s\left(c_s=0.7145\right)$', fontsize=12) ax3.set_xlim(0.0, 1.5) ax3.set_ylim(0.52, 1.04) ax3.set_xlabel(r'${z}_{\mathrm{phot}}$') plt.savefig('../../figures/purity_z.pdf') plt.show() # -
paper/notebooks/figure06/purity_z.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.2 64-bit # language: python # name: python38264bita307c9f5d73a4e6bba0d2b5e435e778a # --- # Import the required modules import cv2 import PIL.Image from io import BytesIO import IPython.display #Use 'jpeg' instead of 'png' (~5 times faster) def showarray(a, prev_display_id=None, fmt='jpeg'): f = BytesIO() PIL.Image.fromarray(a).save(f, fmt) obj = IPython.display.Image(data=f.getvalue()) if prev_display_id is not None: IPython.display.update_display(obj, display_id=prev_display_id) return prev_display_id else: return IPython.display.display(obj, display_id=True) backSub = cv2.createBackgroundSubtractorKNN(history=500, dist2Threshold=400, detectShadows=True) #backSub = cv2.createBackgroundSubtractorMOG2(history=100, dist2Threshold=400, detectShadows=false) # + # Read the image and decode to a tensor vc = cv2.VideoCapture(0) display_id = None if vc.isOpened(): # try to get the first frame try: for i in range(200): _, frame = vc.read() fgMask = backSub.apply(frame) # Convert the image from OpenCV BGR format to matplotlib RGB format # to display the image frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) result = cv2.bitwise_and(frame_rgb, frame_rgb, mask = fgMask) cv2.putText(result, str(i+1), (3, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5 , (255, 255, 255), 1) if display_id is not None: showarray(result, display_id) else: display_id = showarray(result).display_id # Display the frame info until new frame is available IPython.display.clear_output(wait=True) finally: vc.release() else: is_capturing = False print("Camera not found!") # -
video-classification/Realtime Background Removal.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tensorflow as tf import numpy as np import tsp_env def attention(W_ref, W_q, v, enc_outputs, query): with tf.variable_scope("attention_mask"): u_i0s = tf.einsum('kl,itl->itk', W_ref, enc_outputs) u_i1s = tf.expand_dims(tf.einsum('kl,il->ik', W_q, query), 1) u_is = tf.einsum('k,itk->it', v, tf.tanh(u_i0s + u_i1s)) return tf.einsum('itk,it->ik', enc_outputs, tf.nn.softmax(u_is)) def critic_network(enc_inputs, hidden_size = 128, embedding_size = 128, max_time_steps = 5, input_size = 2, batch_size = 128, initialization_stddev = 0.1, n_processing_steps = 5, d = 128): # Embed inputs in larger dimensional tensors W_embed = tf.Variable(tf.random_normal([embedding_size, input_size], stddev=initialization_stddev)) embedded_inputs = tf.einsum('kl,itl->itk', W_embed, enc_inputs) # Define encoder with tf.variable_scope("encoder"): enc_rnn_cell = tf.nn.rnn_cell.LSTMCell(hidden_size) enc_outputs, enc_final_state = tf.nn.dynamic_rnn(cell=enc_rnn_cell, inputs=embedded_inputs, dtype=tf.float32) # Define process block with tf.variable_scope("process_block"): process_cell = tf.nn.rnn_cell.LSTMCell(hidden_size) first_process_block_input = tf.tile(tf.Variable(tf.random_normal([1, embedding_size]), name='first_process_block_input'), [batch_size, 1]) # Define attention weights with tf.variable_scope("attention_weights", reuse=True): W_ref = tf.Variable(tf.random_normal([embedding_size, embedding_size], stddev=initialization_stddev), name='W_ref') W_q = tf.Variable(tf.random_normal([embedding_size, embedding_size], stddev=initialization_stddev), name='W_q') v = tf.Variable(tf.random_normal([embedding_size], stddev=initialization_stddev), name='v') # Processing chain processing_state = enc_final_state processing_input = first_process_block_input for t in range(n_processing_steps): processing_cell_output, processing_state = process_cell(inputs=processing_input, state=processing_state) processing_input = attention(W_ref, W_q, v, enc_outputs=enc_outputs, query=processing_cell_output) # Apply 2 layers of ReLu for decoding the processed state return tf.squeeze(tf.layers.dense(inputs=tf.layers.dense(inputs=processing_cell_output, units=d, activation=tf.nn.relu), units=1, activation=None)) batch_size = 128; max_time_steps = 5; input_size = 2 enc_inputs = tf.placeholder(tf.float32, [batch_size, max_time_steps, input_size]) bsln_value = critic_network(enc_inputs, hidden_size = 128, embedding_size = 128, max_time_steps = 5, input_size = 2, batch_size = 128, initialization_stddev = 0.1, n_processing_steps = 5, d = 128) tours_rewards_ph = tf.placeholder(tf.float32, [batch_size]) loss = tf.losses.mean_squared_error(labels=tours_rewards_ph, predictions=bsln_value) train_op = tf.train.AdamOptimizer(1e-2).minimize(loss) ############################################################################## # Trying it out: can we learn the reward of the optimal policy for the TSP5? # ############################################################################## def generate_batch(n_cities, batch_size): inputs_list = []; labels_list = [] env = tsp_env.TSP_env(n_cities, use_alternative_state=True) for i in range(batch_size): env.reset() s = env.reset() coords = s.reshape([4, n_cities])[:2, ].T inputs_list.append(coords) labels_list.append(env.optimal_solution()[0]) return np.array(inputs_list), np.array(labels_list) # Create tf session and initialize variables sess = tf.InteractiveSession() tf.global_variables_initializer().run() # Training loop loss_vals = [] for i in range(10000): inputs_batch, labels_batch = generate_batch(max_time_steps, batch_size) loss_val, _ = sess.run([loss, train_op], feed_dict={enc_inputs: inputs_batch, tours_rewards_ph: labels_batch}) loss_vals.append(loss_val) if i % 50 == 0: print(loss_val) import matplotlib.pyplot as plt # %matplotlib inline plt.plot(np.log(loss_vals_slow_lr)) plt.xlabel('Number of iterations') plt.ylabel('Log of mean squared error') len(loss_vals)
Actor_CriticPointer_Network-TSP/Critic Network Bello.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline customers = pd.read_csv('/Users/ken/Documents/Kshitija/caia_projects/Ecommerce_Customers.csv') customers.head() customers.tail() #checking for null value customers.isnull().sum() customers.describe() customers.info() # ## Exploratory Data Analysis # # sns.jointplot(x='Time on Website', y='Yearly Amount Spent', data=customers) # ** Do the same but with the Time on App column instead. ** sns.jointplot(x='Time on App', y ='Yearly Amount Spent', data = customers) # ** Use jointplot to create a 2D hex bin plot comparing Time on App and Length of Membership.** sns.jointplot(x='Time on App', y='Length of Membership', data = customers, kind = 'hex') sns.pairplot(customers) sns.heatmap(customers.corr()) # **Create a linear model plot (using seaborn's lmplot) of Yearly Amount Spent vs. Length of Membership. ** sns.lmplot(x='Length of Membership', y='Yearly Amount Spent', data=customers) # ## Training and Testing Data # # customers.columns # + X= customers[['Avg. Session Length', 'Time on App', 'Time on Website', 'Length of Membership']] y= customers ['Yearly Amount Spent'] # - from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=101) # ## Training the Model # # from sklearn.linear_model import LinearRegression lm = LinearRegression() # **Train/fit lm on the training data.** lm.fit(X_train, y_train) # + from sklearn import metrics pred = lm.predict(X_test) print('multiple linear Model') mean_squared_error = metrics.mean_squared_error(y_test, pred) print('Mean Squared Error (MSE) ', round(np.sqrt(mean_squared_error), 2)) print('R-squared (training) ', round(lm.score(X_train, y_train), 3)) print('R-squared (testing) ', round(lm.score(X_test, y_test), 3)) print('Intercept: ', lm.intercept_) print('Coefficient:', lm.coef_) # - # **Print out the coefficients of the model** print (lm.coef_) lm.score(X_train,y_train) lm.score(X_test,y_test) #Finding out mean, median & mode print('Mean', round(customers['Yearly Amount Spent'].mean(), 2)) print('Median', customers['Yearly Amount Spent'].median()) print('Mode', customers['Yearly Amount Spent'].mode()[0]) # ## Predicting Test Data # predictions = lm.predict(X_test) # ** Create a scatterplot of the real test values versus the predicted values. ** plt.scatter(y_test,predictions) plt.xlabel('Y Test') plt.ylabel('Predicted Y') # ## Evaluating the Model # # from sklearn import metrics print('MAE:', metrics.mean_absolute_error(y_test, predictions)) print('MSE:', metrics.mean_squared_error(y_test, predictions)) print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, predictions))) # ## Residuals # sns.distplot(y_test-predictions) pd.DataFrame(lm.coef_, X.columns, columns = ['Coefficient']) # + # 1 unit increase in Avg. Session Length is associated with an increase of 25.98 total dollars spent. # 1 unit increase in Time on App is associated with an increase of 38.59 total dollars spent. # 1 unit increase in Time on Website is associated with an increase of 0.19 total dollars spent. # 1 unit increase in Length of Membership is associated with an increase of 61.27 total dollars spent. # - """According to the data, on average, people spend time on the website,which does not result in spending. The app is more efficient. However, this implies that there is much to improve on the website. Improving the flow and usability of the website is likely to boost the total amount of spending."""
linear-regression-ecommerce.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/matteo-ticli/CNN_LSTM-stock-prices-prediction/blob/main/algo/models.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="eE-JawzoI2Sy" outputId="12675add-dc14-4ae5-ea6a-07d285757a05" from google.colab import drive drive.mount('/content/drive') # + id="KTm6cqHEI3iu" import os import numpy as np import tensorflow as tf from sklearn.model_selection import train_test_split, KFold, StratifiedKFold from keras.utils import to_categorical, plot_model from keras.models import Sequential,Input,Model from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv3D, MaxPooling3D, MaxPool3D, GlobalAveragePooling3D, Concatenate, Input from keras.layers.normalization import BatchNormalization from keras.layers.advanced_activations import LeakyReLU from keras.optimizers import Adam import keras.backend as K # + id="y2tInGwdJPaO" tensor = np.load('/content/drive/MyDrive/FinalProject/data/tensor_1000_2000.npy') labels = np.load('/content/drive/MyDrive/FinalProject/data/labels_1000_2000.npy') # + id="hxFJVXnjJbhm" x_train, x_test, y_train, y_test = train_test_split(tensor, labels, train_size=0.8, random_state = 42) x_train, x_test = np.reshape(x_train, x_train.shape + (1,)), np.reshape(x_test, x_test.shape + (1,)) y_train, y_test = to_categorical(y_train), to_categorical(y_test) # + id="N75K4vKWoOGW" input_shape = x_train.shape[1:] batch_size = 32 epochs = 100 num_filters = 10 # + [markdown] id="DNm2ibZzvX-y" # ####Try to replicate the paper model with two parallel CNN, one merge layer, one CNN, one CNN a flattened layer, a fully connected layer and the output layer # + id="zFVWoz0meFFV" ## define Input inp = Input(shape=input_shape) ## two parallel Conv3D conv_1 = Conv3D(filters=num_filters, kernel_size=(10, 1, 1), padding='SAME', data_format='channels_last', activation = 'relu', input_shape = input_shape)(inp) conv_2 = Conv3D(filters=num_filters, kernel_size=(10, 3, 3), padding='SAME', data_format='channels_last', activation = 'relu', input_shape = input_shape)(inp) ## merge layer merged = Concatenate(axis=1)([conv_1, conv_2]) ## third Conv3D conv_3 = Conv3D(filters=num_filters, kernel_size=(10, 3, 5), padding='SAME', data_format='channels_last', activation = 'relu', input_shape = input_shape)(merged) ## fourth Conv3D conv_4 = Conv3D(filters=num_filters, kernel_size=(10, 3, 1), padding='SAME', data_format='channels_last', activation = 'relu', input_shape = input_shape)(conv_3) ## flat the network in order to feed a simple ANN flat = Flatten()(conv_4) ## Fully connected network dense_1 = Dense(10, activation='softmax')(flat) dense_2 = Dense(4, activation='softmax')(dense_1) out = Dense(2, activation='softmax')(dense_2) model = Model(inputs=inp, outputs=out) # + colab={"base_uri": "https://localhost:8080/", "height": 856} id="sggZLIpdC6-m" outputId="e6183e6f-1e93-4ac3-a743-9c076e196a46" plot_model(model) # + colab={"base_uri": "https://localhost:8080/"} id="9OIhgbDkIIOP" outputId="4da3cd1e-b841-42ee-8c36-8f092254d523" # Compile the model model.compile(loss='binary_crossentropy', optimizer=Adam(lr=0.001), metrics=['accuracy']) model.summary() # + id="qONSwDz8A1zY" model.fit(x_train, y_train, batch_size=batch_size, verbose = 0, epochs=epochs); # + colab={"base_uri": "https://localhost:8080/"} id="99sLR6vHMNNb" outputId="bfb8c620-39c5-43fa-b85a-d5baf90461bc" loss, accuracy = model.evaluate(x_test, y_test, batch_size = 32, verbose=0) print(f'Accuracy: %{accuracy*100}') print(f'Loss: %{loss*100}')
algo/models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib.pyplot as plt from IPython.display import Image import numpy as np import pandas as pd from cv2 import * from PIL import Image import time import os import pandas as pd def load_video_data(movie_path, frame_indices, headless=False, background_subtract=False, randomize=False): data = [] start_frame, end_frame = frame_indices cap = cv2.VideoCapture(movie_path) i = 1 while(cap.isOpened()): ret, frame = cap.read() i += 1 if start_frame < i < end_frame: if randomize: pass frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) if background_subtract: frame = cv2.subtract(frame, background) if not headless: cv2.imshow('Frame', frame) time.sleep(0.05) data.append(frame) if i > end_frame or cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows() return data, i def show_data(data, verbose=False): fig, axs = plt.subplots(2, 5, figsize=(15, 15)) indices = iter(np.linspace(0, len(data)-1, 10)) for i in range(2): for j in range(5): index = int(next(indices)) frame = data[index] axs[i][j].xaxis.set_visible(False) axs[i][j].yaxis.set_visible(False) axs[i][j].imshow(frame) def load_sample(subject, scene, gesture, headless=False): # Get labels' path subject_path_for_labels = 'subject0{}'.format(subject) if (subject < 10) else 'subject{}'.format(subject) scene_path_for_labels = 'Scene{}'.format(scene) labels_path = 'labels/{}/{}'.format(subject_path_for_labels, scene_path_for_labels) # Change directory to current directory. cwd = os.getcwd() os.chdir(labels_path) groups = os.listdir(os.getcwd()) # Iterate through all gesture-frame pairs for group_csv, group_num in zip(groups, range(1, len(groups) + 1)): frames = pd.read_csv(group_csv, header=None) if gesture in frames[0].values: os.chdir(cwd) frame_indices = frames[frames[0] == gesture].values[0][1:] # Get video path subject_path_for_videos = 'Subject0{}'.format(subject) if subject < 10 else 'Subject{}'.format(subject) scene_path_for_videos = 'Scene{}'.format(scene) video_path = 'Videos/{}/{}/Color/rgb{}.avi'.format(subject_path_for_videos, scene_path_for_videos, group_num) # Load gesture data data = load_video_data(video_path, frame_indices, headless) return data os.chdir(cwd) return None # + data_43 = [] data_14 = [] for subject in range(1, 31): data_43.append(load_sample(subject, 1, 43, headless=True)) data_14.append(load_sample(subject, 1, 14, headless=True)) data_43 = [(x[0], [1, 0]) for x in data_43] data_14 = [(x[0], [0, 1]) for x in data_14] # - def star_clip(clip, euclidean=False): N = len(clip) M = np.zeros(clip[0].shape) for k in range(2, N): w_s = k/N I_k_1 = clip[k-1] I_k = clip[k] if euclidean: frame_diff = (I_k_1 - I_k) frame_diff = I_k_1 - I_k delta = np.abs(frame_diff) * w_s M += delta M_x = cv2.Sobel(M, cv2.CV_64F, 1, 0, ksize=5) M_y = cv2.Sobel(M, cv2.CV_64F, 0, 1, ksize=5) star = np.array([M, M_x, M_y]) return star # + star_43 = [(star_clip(sample[0]), sample[1]) for sample in data_43] star_14 = [(star_clip(sample[0]), sample[1]) for sample in data_14] star_samples = star_43 + star_14 np.random.shuffle(star_samples) # - def prepare_batches(data, batch_size, shuffle=False): batches, labels = [], [] i = 0 while i < len(star_samples): batches.append(torch.tensor([sample[0] for sample in star_samples[i:i+4]]).float()) labels.append(torch.tensor([sample[1] for sample in star_samples[i:i+4]]).long()) i += batch_size return batches, labels data, labels = prepare_batches(star_samples, 4) # + # import torch # import torchvision # import torchvision.transforms as transforms # import torch.nn as nn # import torch.nn.functional as F # class Encoder(nn.Module): # def __init__(self): # super(Net, self).__init__() # self.conv1 = nn.Conv2d(3, 6, 5) # self.pool = nn.MaxPool2d(2, 2) # self.conv2 = nn.Conv2d(6, 16, 5) # self.fc1 = nn.Linear(16 * 117 * 157, 500) # def encode(self, x): # x = self.pool(F.relu(self.conv1(x))) # x = self.pool(F.relu(self.conv2(x))) # x = x.view(-1, 16 * 117 * 157) # x = self.fc1(x) # return x # + from keras.layers import Dense, Flatten, Reshape, Input, InputLayer from keras.models import Sequential, Model def build_autoencoder(img_shape, code_size): # The encoder encoder = Sequential() encoder.add(InputLayer(img_shape)) encoder.add(Flatten()) encoder.add(Dense(code_size)) # The decoder decoder = Sequential() decoder.add(InputLayer((code_size,))) decoder.add(Dense(np.prod(img_shape))) # np.prod(img_shape) is the same as 32*32*3, it's more generic than saying 3072 decoder.add(Reshape(img_shape)) return encoder, decoder # - encoder, decoder = build_autoencoder((3, 480, 640), 1000) star_samples[0][0].shape
notebooks/EgoGesture-EDA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # COMP5318 - Machine Learning and Data Mining: Assignment 1 # <div style="text-align: right"> Due: Friday week 7 - Fri 24 Sep 2021 11:59PM </div> # + [markdown] nbpresent={"id": "375753da-1c6c-4b02-986a-6e3b185a5869"} # # 1. Summary # The goal of this assignment is to build a classifier to classify some grayscale images of the size 28x28 into a set of categories. The dimension of the original data is large, so you need to be smart on which method you gonna use and perhaps perform a pre-processing step to reduce the amount of computation. Part of your marks will be a function of the performance of your classifier on the test set. # - # # 2. Dataset description # The dataset can be downloaded from Canvas. The dataset consists of a training set of 30,000 examples and a test set of 5,000 examples. They belong to 10 different categories. The validation set is not provided, but you can randomly pick a subset of the training set for validation. The labels of the first 2,000 test examples are given, you will analyse the performance of your proposed method by exploiting the 2,000 test examples. It is NOT allowed to use any examples from the test set for training; or it will be considered as cheating. The rest 3,000 labels of the test set are reserved for marking purpose. <br /> # Here are examples illustrating sample of the dataset (each class takes one row): # <img src="Dataset_image.jpg" alt="DataSet" title="DataSet" width="450" height="300" /> # There are 10 classes in total:<br /> # 0 T-shirt/Top<br /> # 1 Trouser<br /> # 2 Pullover<br /> # 3 Dress<br /> # 4 Coat<br /> # 5 Sandal<br /> # 6 Shirt<br /> # 7 Sneaker<br /> # 8 Bag<br /> # 9 Ankle boot <br /> # # 3. How to load the data and make output prediciton # There is a Input folder including 4 main files (which can be downloaded from Canvas): # # 1. images_training.h5 (30000 image samples for training) # # 2. labels_training.h5 (30000 image lables for training) # # 3. images_testing.h5 (5000 image samples for making prediction) # # 4. labels_testing_2000.h5 (only 2000 image lables for testing, 3000 labels are not provided) # ## 3.1 How to load the data # To read the *hdf5* file and load the data into a numpy array. # # The **training data files are in the ./Input/train** and **testing data file are in ./Input/test**. <br /> Use the following code: # Then data would be a numpy array of the shape (30000, 784), and label would be a numpy array of the shape (30000, ). # # It is noted that the **labels_testing_2000** only contain **2000 samples** for your self-testing. The validation test for **fine-tuning parameters** should be splitted from the training test. We will evaluate your model on full 5000 samples which is not provided. The file **images_testing.h5** can be loaded in a similar way. import h5py import numpy as np import os print(os.listdir("./Input/train")) # + with h5py.File('./Input/train/images_training.h5','r') as H: data_train = np.copy(H['datatrain']) with h5py.File('./Input/train/labels_training.h5','r') as H: label_train = np.copy(H['labeltrain']) # using H['datatest'], H['labeltest'] for test dataset. print(data_train.shape,label_train.shape) # - # Showing a sample data. The first example belongs to class 0: T-Shirt/Top import matplotlib.pyplot as plt data_train = data_train.reshape((data_train.shape[0], 28, 28)) plt.imshow(data_train[0], cmap=plt.get_cmap('gray')) plt.title("class " + str(label_train[0]) + ": T-shirt/Top" ) plt.show() # ## 3.2 How to output the prediction # Output a file “predicted_labels.h5” that can be loaded in the same way as above. You may use the following code to generate an output file that meets the requirement: import numpy as np # assume output is the predicted labels from classifiers # (5000,) with h5py.File('Output/predicted_labels.h5','w') as H: H.create_dataset('Output',data=output) # + [markdown] nbpresent={"id": "aca7ed33-2da5-4fbf-a861-8a886f4020a8"} # We will load the output file using the code for loading data above. It is your responsibility to make sure the output file can be correctly loaded using this code. # The performance of your classifier will be evaluated in terms of the top-1 accuracy metric, i.e.<br /><br /> # <div style="text-align: center"> $$\text{Accuracy} = \frac{\text{Number of correct classifications}}{\text{Total number of test examples used}} * 100\%$$ # + [markdown] nbpresent={"id": "1e4a01db-cd92-48f8-bdaa-21c39456cfcb"} # # 4. Task description # # Your task is to determine / build a classifier for the given data set to classify images into categories and write a report. The score allocation is as follows: # # * Code: max 65 points # * Report: max 35 points # # Please refer to the rubric in Canvas for detailed marking scheme. The report and the code are to be submitted in Canvas by the due date.<br /> # - # ## 4.1 Code # ### The code must clearly show : # 1. Pre-process data # 1. Details of your implementation for each algorithm # 2. Fine-tune hyper-parameters for each algorithm and running time # 3. The comparison result between algorithms # 4. Hardware and software specifications of the computer that you used for performance evaluation # ### 4.1.1 Data pre-processing # You will need to have at least one pre-process techique before you can apply the classification algorithms. One of pre-process techique is using **Normalisation**. # ### 4.1.2 Classification algorithms with 10-fold cross-validation # You will now apply multiple classifiers to the pre-processed dataset. You have to implement at least 3 classifiers in particular: # # * Nearest Neighbor # * Logistic Regression # * Naïve Bayes # * Decision Tree # * Bagging # * Ada Boost # * SVM # # You need to evaluate the performance of these classifiers using 10-fold cross-validation. For binary classifiers, we can use those classifiers for the data which has more than 2 labels using the one-vs-rest method. The implementation can use sklearn, or can be implemented from scratch. # ### 4.1.3 Parameter Tuning # For each classifiers we would like to find the best parameters using grid search with 10-fold stratified cross validation. # ### 4.1.4 Classifier comparisons # After finding the best parameter for each algorithm, we would like to make comparisons between all classifiers using their own best hyper-parameters. # ## 4.2 Report # ### The report must clearly show: # 1. Details of your classifiers using for assignment 1 # 2. The predicted results from your classifier on test examples # 3. Results comparison and discussion # 4. Following the format in rubric : Introduction -> Methods -> Experiments result and discussion -> Conclusion # 5. The maximum length of the report is 10 (including references) # 6. Clearly provide instructions on how to run your code in the Appendix section of your report # 7. Detail of student including ID, name. # # 5. Instructions to hand in the assignment # # ### Go to Canvas -> Assignments -> "Assignment 1" and submit 3 files only: the report and the code files. # # 1) Report (a .pdf file). # # 2) Code (2 files include: a .ipynb file and a PDF file). PDF is exported from .ipynb file for plagiarism check. # The code must be able to be run with the following folder structure: # # - Classifiers (the root folder): Your .ipynb file containing Python code will be placed on this folder when we test and run your code. The PDF file is generated from .ipynb file (File => Save as PDF file) # # - Input (a sub-folder under Algorithm): We will copy the dataset into this Input folder when we run your code. Please make sure your code is able to read the dataset from this Input folder. # # - Output (a sub-folder under Algorithm): Your code must be able to generate a prediction file named “predicted_labels.h5” to be saved in this Output folder. The prediction file should contain predicted labels of the test dataset. We will use your prediction output file for grading purpose. # # If this is an individual work, an individual student needs to submit all the files which must be named with student ID numbers following format e.g. **SIDxxxx_report.pdf**, **SIDxxxx_code.ipynb**, **SIDxxxx_code.ipynb.pdf**. # # If this is a group work of 2, one student needs to submit all the files which must be named with student ID numbers of 2 members following format e.g. **SIDxxxx1_SIDxxxx2_report.pdf**, **SIDxxxx1_SIDxxxx2_code.ipynb**, **SIDxxxx1_SIDxxxx2_code.ipynb.pdf**. # ### A penalty of MINUS 5 percent (-5%) for each day after the due date. # The maximum delay for assignment submission is 5 (five) days, after which assignment will not be accepted. # # **You should upload your assignment at least half a day or one day prior to the submission deadline to avoid network congestion**. # # Canvas may not be able to handle a large number of submission happening at the same time. If you submit your assignment at a time close to the deadline, a submission error may occur causing your submission to be considered late. Penalty will be applied to late submission regardless of issues. # ### All files required for assignment 1 can be downloaded from Canvas -> Assignments -> Assignment 1 # # # 6. Academic honesty # Please read the University policy on Academic Honesty very carefully: # https://sydney.edu.au/students/academic-integrity.html # Plagiarism (copying from another student, website or other sources), making your work available to another student to copy, engaging another person to complete the assignments instead of you (for payment or not) are all examples of academic dishonesty. Note that when there is copying between students, both students are penalised – the student who copies and the student who makes his/her work available for copying. The University penalties are severe and include: # # * a permanent record of academic dishonesty on your student file, # * mark deduction, ranging from 0 for the assignment to Fail for the course # * expulsion from the University and cancelling of your student visa. # # In addition, the Australian Government passed a new legislation last year (Prohibiting Academic Cheating Services Bill) that makes it a criminal offence to provide or advertise academic cheating services - the provision or undertaking of work for students which forms a substantial part of a student’s assessment task. Do not confuse legitimate co-operation and cheating!
Assignment 1_2021.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (final) # language: python # name: final # --- # + import ast import time import requests import re import pandas as pd import geopandas as gpd import shapely as shp import osmnx as ox from tqdm._tqdm_notebook import tqdm_notebook time = 1 # - tqdm_notebook.pandas() # Read raw data file (n ~= 1.1 million) to csv df = pd.read_csv('data/ca_san-diego.csv', dtype={'parcelnumb':str,'usecode':str,'mail_zip':str,'parval':int}) # + columns = ['parcelnumb','usecode','usedesc','zoning','address', 'mailadd', 'mail_city', 'mail_state2', 'mail_zip', 'mail_country', 'parval', 'lng', 'lat' ] df = df[columns] # - all_parcels = pd.DataFrame(df, columns=['address', 'lat','lng']) all_parcels = all_parcels.set_index('address') all_parcels.columns = ('mailing_lat','mailing_lng') all_parcels = all_parcels[~all_parcels.index.duplicated()] all_parcels.to_csv('data/all_parcels.csv') df['usedesc'].unique() residential = ['Multi-Family Residential', 'Single Family Residential', 'Mixed Use', 'Spaced Rural Residential', 'Mobile Homes'] df = df[df['usedesc'].isin(residential)] df.to_csv('data/san_diego.csv',index=False)
1_investigate.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ___ # <img style="float: right; margin: 0px 0px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/thumb/4/4a/Python3-powered_hello-world.svg/1000px-Python3-powered_hello-world.svg.png" width="300px" height="100px" /> # # # # <font color= #8A0829> Simulación matemática.</font> # #### <font color= #2E9AFE> `G1 MAF2869D Lunes - Miércoles de 9:00 a 11:00 hrs`</font> # #### <font color= #2E9AFE> `G2 MAF2869I Martes - Viernes de 13:00 a 15:00 hrs`</font> # - <Strong> <NAME> </Strong> # - <Strong> Año: </Strong> 2020 - Otoño # - <Strong> Copyright: </Strong> Public Domain como en [CC](https://creativecommons.org/licenses/by/2.0/) (Exepto donde se indique lo contrario) # # - <Strong> Email: </Strong> <font color="blue"> <EMAIL>, <EMAIL> </font> # ___ # ## `Presentación de ustedes` # ___ # ## `Algunas reglas de juego` # + Se tomará asistencia dentro de los 15 primeros minutos de la clase. Si no contesta, se tomará con inasistencia. # + Si durante la clase se le hace alguna pregunta a algún estudiante y no responde, se tomará como inasistencia. Es decir, el estudiante debe estar disponible en cualquier momento que el profesor lo solicite. # + **Regla de oro:** Todas las entregas (Trabajos, tareas) se realizan a través de `canvas` con los plazos asignados. No se aceptan trabajos fuera de la plataforma ni fuera de plazo. No insistan en este punto, no hay negocio. # ___ # ___ # ## `Horario de asesoría` # # Siempre estaré disponible en el correo electrónico. Para definir algún horario en específico enviar correo a <EMAIL> y nos pondemos de a cuerdo para realizar una videollamada por `Webex` # # ___ # ## `Descripción de la asignatura` # # **Este es un curso básico de simulación utilizando python, por ende vamos a iniciar elaborando programas simples y conforme avancemos el nivel de exigencia aumentará, hasta donde el tiempo nos permita.** # # - Problemas de ingeniería: requieren soluciones adecuadas, eficientes y óptimas. # - La simulación matemática de escenarios es una estrategia relevante para resolver tales problemas, consiste en modelar numéricamente los principios físicos y matemáticos que rigen un fenómeno mediante el uso de lenguajes de modelado y herramientas de tecnologías de información. # - La asignatura está diseñada para que logres dichos propósitos e inicies un proceso que te permita apropiarte de desempeños profesionales muy útiles en tu formación profesional y en tu futuro, al incorporarte a la industria u organizaciones que te demandarán resolver e implementar la simulación de escenarios bajo diferentes situaciones a través de la sistematización de la solución al problema planteado. # ## `Objetivo general ` # > <p style='text-align: justify;'> A partir de la modelación de fenómenos físicos desarrollarás las competencias necesarias para reproducir escenarios de aplicación profesional que representen de manera más cercana el fenómeno objetivo. Podrás además realizar inferencias que ayuden a la óptima toma de decisiones en la solución de problemas.</p> # ## `Temas y subtemas` # `Módulo 1.` **Optimización** # > Se aplicará una evaluación (**exámen**) y se evaluará la presentación de un **proyecto.** # 1. ¿Qué es una simulación? - Introducción al lenguaje de trabajo (Python) e instalación de software # 2. `git, GitHub, GitKraken` # 3. `git, GitHub, GitKraken` (Continuación) # 4. Optimización de funciones de variable escalar con SymPy # 5. Programación Lineal # 6. Programación Lineal (Continuación) # 7. Ajuste de curvas # 8. Ajuste de curvas (Continuación) # 9. Clasificación # `Módulo 2.` **Montecarlo** # > Se aplicará una evaluación (**exámen**) y se evaluará una presentación de **proyecto.** # 1. Generación de números aleatorios # 1. Generación de variables aleatorias (Uniforme, triangular, exponencial) # 2. Simulación de una fila un servidor (fila de un banco, cafetería, etc). # 2. Caminata aleatoria # 3. Integrales # 4. Fractales aleatorios # 5. Bajar y organizar datos de Yahoo Finance (Pandas) # 6. Probabilidad precio-umbral # 7. Probabilidad precio-umbral (Continuación) # `Módulo 3.` **Ecuaciones diferenciales** # > Se aplicará una evaluación (**exámen**) y se evaluará una presentación de **proyecto.** # 1. Introducción a ecuaciones diferenciales # 2. ¿Cómo se mueve un péndulo? # 3. ¿Cómo crece una población? # 4. Modelo del rendimiento de una cuenta de ahorro # ## `Evaluación` # # - **3 Proyectos (trabajo en equipo) 50%** # - Cada proyecto tiene el mismo valor (16.66%) # - La evaluación de cada proyecto se divide en dos partes # - Reporte 50% # - Exposición 50% # - **3 Examenes 30%** # - **Tareas y actividades de clase 20%** # - <font color="red">Equipos de 2 integrantes mínimo y 3 máximo. **Esto no se negocia**</font>. # - Si durante algún proyecto las cosas no funcionan entre los integrantes, para el siguiente proyecto se pueden formar equipos nuevos. # ### `Bibliografía ` # > ``` # - Process Dynamics: Modeling, Analysis and Simulation by <NAME> # - Stochastic Simulation and Applications in Finance with MATLAB Programs by HuuTueHuynh # - Fluent Python by Ramalho, Luciano # - Python for Finance by Hilpisch, Yves # - Python for Scientists by Stewart, <NAME>. # - Mathematical Modeling in Continuum Mechanics by Temam & Miranville``` # Estos y muchos mas libros los pueden encontrar en la Biblioteca. # <script> # $(document).ready(function(){ # $('div.prompt').hide(); # $('div.back-to-top').hide(); # $('nav#menubar').hide(); # $('.breadcrumb').hide(); # $('.hidden-print').hide(); # }); # </script> # # <footer id="attribution" style="float:right; color:#808080; background:#fff;"> # Created with Jupyter by <NAME>. # </footer>
Modulo 1/Clase0_GuiaSimMat.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import numpy as np from StringIO import StringIO import pandas as pd from __future__ import division import matplotlib.pyplot as plt import urllib2,base64 import cPickle as pickle from collections import Counter from collections import defaultdict from collections import OrderedDict from __future__ import division import matplotlib.pyplot as plt import matplotlib.mlab as mlab import scipy as sp import gdal,ogr from gdalconst import * import datetime import math import operator import re import os import os.path import requests, time import urllib, json import csv import seaborn as sns; sns.set() import glob import newlinejson as nlj from calendar import monthrange import netCDF4 from netCDF4 import Dataset import xarray as xr import dask as ds from dask import dataframe as dd # %matplotlib inline # ### 1. Extract tweet records #year="2014" years=["2013","2014","2015","2016"] tweets="ALL" retweets="RT" # + df_list=list() df_relist=list() for year in years: path_to_tweets = "ALL-cities/" + tweets+"-cities-"+year json_files = [pos_json for pos_json in os.listdir(path_to_tweets) if pos_json.endswith('.json')] # we need both the json and an index number so use enumerate() for index, js in enumerate(json_files): data=pd.read_json(os.path.join(path_to_tweets, js), lines=True) df_list.append(data) ############################################################################################################################## path_to_retweets = "RT-cities/" + retweets+"-cities-"+year json_files = [pos_json for pos_json in os.listdir(path_to_retweets) if pos_json.endswith('.json')] # we need both the json and an index number so use enumerate() for index, js in enumerate(json_files): data=pd.read_json(os.path.join(path_to_retweets, js), lines=True) df_relist.append(data) cities_df = pd.concat(df_list) cities_df = cities_df.sort_values(by=['lat', 'timestamp']).reset_index(drop=True) cities_re_df = pd.concat(df_relist) cities_re_df = cities_re_df.sort_values(by=['lat', 'timestamp']).reset_index(drop=True) cities_df = pd.merge(cities_df, cities_re_df, on=['lat','lon','timestamp'],suffixes=('_tweet', '_retweet')) cities_df = cities_df.drop_duplicates(['lat','lon','timestamp']) cities_df['ratio']=cities_df['count_retweet']/cities_df['count_tweet'] cities_df['idx'] = pd.Categorical(cities_df['lat'].astype(str) + '_' + cities_df['lon'].astype(str)).codes #add "month","year" to each entry cities_df.timestamp = cities_df.timestamp.astype(str) cities_df['timestamp']=pd.to_datetime(cities_df['timestamp'], errors='coerce') cities_df['month']=cities_df['timestamp'].dt.month cities_df['year']=cities_df['timestamp'].dt.year # + ### demean month average from each record cities_grouped_df=cities_df.groupby([cities_df.idx,cities_df.month,cities_df.year])['count_tweet'].sum().to_frame().reset_index() cities_regrouped_df=cities_df.groupby([cities_df.idx,cities_df.month,cities_df.year])['count_retweet'].sum().to_frame().reset_index() cities_grouped_df = pd.merge(cities_grouped_df, cities_regrouped_df, on=['idx','month','year']) mean_tweet_list=cities_grouped_df.apply(lambda row: row['count_tweet'] / monthrange(row.year,int(row.month))[1], axis=1) mean_retweet_list=cities_grouped_df.apply(lambda row: row['count_retweet'] / monthrange(row.year,int(row.month))[1], axis=1) cities_grouped_df['mean_tweet'] = mean_tweet_list cities_grouped_df['remean_tweet'] = mean_retweet_list #cities_grouped_df['mean_tweet'] = cities_grouped_df.apply(lambda row: row['count_tweet'] / monthrange(row.year,int(row.month))[1], axis=1) #cities_grouped_df['mean_retweet'] = cities_grouped_df.apply(lambda row: row['count_retweet'] / monthrange(row.year,int(row.month))[1], axis=1) cities_df = pd.merge(cities_df, cities_grouped_df.drop(columns=['count_tweet', 'count_retweet']), on=['idx', 'month','year'], how='left') cities_df = cities_df.rename(columns={'lat': 'LAT', 'lon': 'LON'}) # - cities_df.head(34) cities_centers=dict() cities_centers['new york']=(40.7128,-74.0060) cities_centers['los angeles']=(34.0522,-118.2437) cities_centers['chicago']=(41.8781,-87.6298) cities_centers['houston']=(29.7604, -95.3698) cities_centers['phoenix']=(33.4484,-112.0740) cities_centers['philadelphia']=(39.9526,-75.1652) cities_centers['san antonio']=(29.4241, -98.4936) cities_centers['san diego']=(32.7157, -117.1611) cities_centers['dallas']=(32.7767, -96.7970) cities_centers['san jose']=(37.3382, -121.8863) cities_centers['austin']=(30.2672,-97.7431) cities_centers['jacksonville']=(30.3322,-81.6557) cities_centers['san francisco']=(37.7749,-122.4194) cities_centers['columbus']=(39.9612,-82.9988) cities_centers['fort worth']=(32.7555, -97.3308) cities_centers['indianapolis']=(39.7684, -86.1581) cities_centers['charlotte']=(35.2271,-80.8431) cities_centers['seattle']=(47.6062, -122.3321) cities_centers['denver']=(39.7392, -104.9903) cities_centers['washington d.c.']=(38.9072, -77.0369) cities_centers['boston']=(42.3601,-71.0589) # ### 2. Attach weather data to each tweet record # + def getWeatherNormalPRIMS(locs_df, img): ####USed for TMAX/TMIN/PRCP band = img.GetRasterBand(1) nodatavalue = band.GetNoDataValue() ncol = img.RasterXSize nrow = img.RasterYSize geotransform = img.GetGeoTransform() originX = geotransform[0] b=geotransform[2] originY = geotransform[3] pixelWidth = geotransform[1] d=geotransform[4] pixelHeight = geotransform[5] data = band.ReadAsArray() data[data == nodatavalue] = np.nan data_ind_array= np.argwhere(~np.isnan(data)).astype(float) points_list = np.array(zip(locs_df.LAT, locs_df.LON)) #list of X,Y coordinates values_list=list() for point in points_list: row = int((point[0] - originY) / pixelHeight) col = int((point[1] - originX) / pixelWidth) #print point[0],point[1], row,col, data[row][col] values_list.append(data[row][col]) return np.array(values_list) ############################################################################################################################################ def getWeatherNormalNCEP(locs_df,Dataset, M, i): ##HUM/CC/WIND datafram [LAT,LON] ; Dataset; Metrics; Date date_index =i #topoin,lons = shiftgrid(180.,topoin,Dataset.variables['lon'][:],start=False) #lat_idx = np.abs(lats - darwin['lat']).argmin() #lon_idx = np.abs(lons - darwin['lon']).argmin() data_list=list() if M=="HUM": rhum_var = Dataset.variables['rhum'][:] #rhum_var[rhum_var==-32767]=np.nan #rhum_var,lons = shiftgrid(180.,rhum_var,Dataset.variables['lon'][:],start=False) #rhum_var = Dataset.variables['rhum'][date_index,0,41.87,-87.62] lats=Dataset.variables['lat'][:] lons=Dataset.variables['lon'][:] for index, row in locs_df.iterrows(): lat_idx = np.abs(lats - row['LAT']).argmin() lon_idx = np.abs(lons - (row['LON'] % 360)).argmin() #print date_index[0],lat_idx,lon_idx rhum_var = Dataset.variables['rhum'][date_index,0,lat_idx,lon_idx] data_list.append(rhum_var) if M=="CC": tcdc_var = Dataset.variables['tcdc'][:] #tcdc_var[tcdc_var==-32767]=np.nan #tcdc_var,lons = shiftgrid(180.,tcdc_var,Dataset.variables['lon'][:],start=False) lats=Dataset.variables['lat'][:] lons=Dataset.variables['lon'][:] for index, row in locs_df[['LAT','LON']].iterrows(): lat_idx = np.abs(lats - row['LAT']).argmin() lon_idx = np.abs(lons - (row['LON'] % 360)).argmin() tcdc_var = Dataset.variables['tcdc'][date_index,lat_idx,lon_idx] data_list.append(tcdc_var) if M=="WIND": uwind_var = Dataset['uwnd'].variables['uwnd'][:] vwind_var = Dataset['vwnd'].variables['vwnd'][:] #uwind_var[uwind_var==-32767]=np.nan #vwind_var[vwind_var==-32767]=np.nan #uwind_var,uwind_lons = shiftgrid(180.,uwind_var,Dataset['uwnd'].variables['lon'][:],start=False) #vwind_var,vwind_lons = shiftgrid(180.,vwind_var,Dataset['vwnd'].variables['lon'][:],start=False) uwind_lats=Dataset['uwnd'].variables['lat'][:] vwind_lats=Dataset['vwnd'].variables['lat'][:] uwind_lons=Dataset['uwnd'].variables['lon'][:] vwind_lons=Dataset['vwnd'].variables['lon'][:] for index, row in locs_df[['LAT','LON']].iterrows(): ulat_idx = np.abs(uwind_lats - row['LAT']).argmin() ulon_idx = np.abs(uwind_lons - (row['LON'] % 360)).argmin() vlat_idx = np.abs(vwind_lats - row['LAT']).argmin() vlon_idx = np.abs(vwind_lons - (row['LON'] % 360)).argmin() uwind_var = Dataset['uwnd'].variables['uwnd'][date_index,0,ulat_idx,ulon_idx] vwind_var = Dataset['vwnd'].variables['vwnd'][date_index,0,vlat_idx,vlon_idx] wind_var=math.sqrt(math.pow(uwind_var,2)+ math.pow(vwind_var,2)) data_list.append(wind_var) #print lons #index=netCDF4.date2num(dt,Dataset.variables['time'].units) return np.array(data_list) def ncdump(nc_fid, verb=True): ''' ncdump outputs dimensions, variables and their attribute information. The information is similar to that of NCAR's ncdump utility. ncdump requires a valid instance of Dataset. Parameters ---------- nc_fid : netCDF4.Dataset A netCDF4 dateset object verb : Boolean whether or not nc_attrs, nc_dims, and nc_vars are printed Returns ------- nc_attrs : list A Python list of the NetCDF file global attributes nc_dims : list A Python list of the NetCDF file dimensions nc_vars : list A Python list of the NetCDF file variables ''' def print_ncattr(key): """ Prints the NetCDF file attributes for a given key Parameters ---------- key : unicode a valid netCDF4.Dataset.variables key """ try: print "\t\ttype:", repr(nc_fid.variables[key].dtype) for ncattr in nc_fid.variables[key].ncattrs(): print '\t\t%s:' % ncattr,\ repr(nc_fid.variables[key].getncattr(ncattr)) except KeyError: print "\t\tWARNING: %s does not contain variable attributes" % key # NetCDF global attributes nc_attrs = nc_fid.ncattrs() if verb: print "NetCDF Global Attributes:" for nc_attr in nc_attrs: print '\t%s:' % nc_attr, repr(nc_fid.getncattr(nc_attr)) nc_dims = [dim for dim in nc_fid.dimensions] # list of nc dimensions # Dimension shape information. if verb: print "NetCDF dimension information:" for dim in nc_dims: print "\tName:", dim print "\t\tsize:", len(nc_fid.dimensions[dim]) print_ncattr(dim) # Variable information. nc_vars = [var for var in nc_fid.variables] # list of nc variables if verb: print "NetCDF variable information:" for var in nc_vars: if var not in nc_dims: print '\tName:', var print "\t\tdimensions:", nc_fid.variables[var].dimensions print "\t\tsize:", nc_fid.variables[var].size print_ncattr(var) return nc_attrs, nc_dims, nc_vars # - cities_df.timestamp = cities_df.timestamp.dt.strftime("%Y%m%d").astype(str) cities_df = cities_df.sort_values(by=['timestamp','idx']).reset_index(drop=True) for norm in ['TMAX','TMIN','PRCP']: weather_list=list() for year in ['2013','2014','2015','2016']: date_range=np.array([d.strftime('%Y%m%d') for d in pd.date_range(year+'0101',year+'1231')]) for yearly_date in date_range: if norm=="PRCP": filename=r'/vsizip/data/PRISM/PRISM_'+"PPT".lower()+'_stable_4kmD2_'+year+'0101_'+year+'1231_bil.zip/PRISM_'+"PPT".lower()+'_stable_4kmD2_'+yearly_date+'_bil.bil' else: filename=r'/vsizip/data/PRISM/PRISM_'+norm.lower()+'_stable_4kmD1_'+year+'0101_'+year+'1231_bil.zip/PRISM_'+norm.lower()+'_stable_4kmD1_'+yearly_date+'_bil.bil' gdal.GetDriverByName('EHdr').Register() img = gdal.Open(filename, GA_ReadOnly) norm_list=getWeatherNormalPRIMS(cities_df[cities_df['timestamp']==yearly_date],img) weather_list.extend(norm_list) print "Get all reads for USA "+year+ ", "+ norm cities_df[norm]=weather_list for norm in ['HUM','CC','WIND']: weather_list=list() for year in ['2013','2014','2015','2016']: start_date=year+'0101' end_date=year+'1231' hum_year=Dataset('data/NCEP/rhum.'+year+'.nc') #nc_attrs, nc_dims, nc_vars = ncdump(hum_year) #print "********************************************************* SPLIT ****************************************************************" cc_year=Dataset('data/NCEP/tcdc.eatm.gauss.'+year+'.nc') #nc_attrs, nc_dims, nc_vars = ncdump(cc_year) #print "********************************************************* SPLIT ****************************************************************" vw_year=Dataset('data/NCEP/vwnd.10m.gauss.'+year+'.nc') uw_year=Dataset('data/NCEP/uwnd.10m.gauss.'+year+'.nc') wind_year=dict() wind_year['uwnd']=uw_year wind_year['vwnd']=vw_year #nc_attrs, nc_dims, nc_vars = ncdump(vw_year) #nc_attrs, nc_dims, nc_vars = ncdump(uw_year) data_year_dict=dict() data_year_dict['HUM']=hum_year data_year_dict['CC']=cc_year data_year_dict['WIND']=wind_year date_range=np.array([d.strftime('%Y%m%d') for d in pd.date_range(start_date,end_date)]) i=0 for date in date_range: weather_list.extend(getWeatherNormalNCEP(cities_df[cities_df['timestamp']==date],data_year_dict[norm], norm,i)) i=i+1 print "Get all reads for USA "+year+ ", "+ norm cities_df[norm]=weather_list cities_df['nratio']=cities_df['ratio'] - cities_df['remean_tweet']/cities_df['mean_tweet'] from matplotlib.pyplot import figure feature="ratio" figure(num=None, figsize=(18, 16), dpi=100, facecolor='w', edgecolor='k') plt.scatter(range(1461), cities_df[(cities_df['idx']==18)][feature], label="Chicago") plt.scatter(range(1461), cities_df[(cities_df['idx']==12)][feature], label="Washington D.C.") plt.scatter(range(1461), cities_df[(cities_df['idx']==7)][feature], label="Pheonix") plt.scatter(range(1461), cities_df[(cities_df['idx']==8)][feature], label="L.A.") plt.legend() cities_df.groupby(['idx'])['TMAX'].mean() cities_df.to_csv('data/US20_integrated.csv',encoding='utf-8') cities_df=pd.read_csv('data/US20_integrated.csv',encoding='utf-8',index_col=0) ####### these two lines do not have practical purpose cities_df.head(21)
Main/Data Modelling 20 Cities.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/ankit-quant/PF-OP-StockMock/blob/main/Portfolio_Optimization_Capital_Allocation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="ojk5Xp1XROeN" import pandas as pd import numpy as np import datetime as dt import matplotlib.pyplot as plt from google.colab import drive import warnings warnings.filterwarnings('ignore') # + colab={"base_uri": "https://localhost:8080/"} id="H2wHF6wwSnaj" outputId="a95fecc7-7dbd-4c06-d4f6-412a7bcefc70" drive.mount('/content/drive',force_remount=True) # + id="Yn8UiyemR2jV" from pypfopt.efficient_frontier import EfficientFrontier # + id="6nRMjZixSiYt" ## Read Raw Data from the Google Drive raw=pd.read_csv('/content/drive/MyDrive/Decibel Capital - Public Access/PFOPT - Returns.csv') # + id="K6mmUnkCczRe" data=raw.copy() ## Create a local copy data=data[1:] # Drop first row of data # + id="tMHR1nkEdipK" ## Data contains percentage returns as well as actual PNL returns. The following code removes PNL column and keep only return columns no_of_cols=len(data.columns) strat_count=int((no_of_cols-1)/2) for i in range(strat_count): #print(i) data.drop(data.columns[i+1],axis=1,inplace=True) data = data.iloc[:,:strat_count+1] # + id="A9xp1Du9gTrp" data.rename( columns={data.columns[0]:'date'}, inplace=True) data['date']=data['date'].apply(lambda x:dt.datetime.strptime(x[:-6],'%d %b %Y')) data.set_index('date',inplace=True) # + colab={"base_uri": "https://localhost:8080/", "height": 490} id="C_56DFCdjIPW" outputId="e2e5dbb6-2f26-4fe5-8028-3118c5c741aa" data.cumsum().plot(figsize=(10,8)) # + [markdown] id="caahd8UQMBMO" # ## Mean Variance Optimisation Concepts # 1. Expected Return # 2. Expected Volatility # + id="47wNtsKXjQpd" # Define No of Portfolios to Test for Mean-Variance Optimization port_count=10000 wt_columns=[s+'_w' for s in data.columns.to_list()] #Append "_w" to the asset names to denote weights on assets wts=pd.DataFrame(columns=wt_columns) #Dataframe to keep asset weights mv_port=pd.DataFrame(columns=['sharpe','pf_ret','pf_vol']) # Dataframe to keep mean-variance portfolio # + id="nfNDySsllfOC" for i in range(port_count): w=np.random.random(strat_count) ## Generate random weights w=w/sum(w) ## Normalize the weights exp_return=np.sum(data.mean()*w*252) ##Future expected return is the mean return of each asset * weights assigned - annualized for 252 days exp_vol=np.sqrt(np.dot(w.T,np.dot(data.cov()*252,w))) ## Future Expected Volatiluty is given by Sqrt(w^T*Cov(Ret)*W) exp_sharpe=exp_return/exp_vol ## Future Expected Sharpe Ratio wts.loc[len(wts),:]=w ## Write the portfolio weights into the Dataframe mv_port.loc[len(mv_port),:]=[exp_sharpe,exp_return,exp_vol] ### Write Portfolio metrics into datafrae # + colab={"base_uri": "https://localhost:8080/"} id="50LgrNcOukVi" outputId="ec1dc746-64eb-43d9-9347-ed069535bb9a" mv_port=mv_port.astype('float64') ## Convert columns to float type from object type max_sharpe=mv_port['sharpe'].max() max_sharpe_pfidx=mv_port['sharpe'].idxmax() ##index of Portfolio with max Sharpe max_sharpe_vol=mv_port.loc[max_sharpe_pfidx,'pf_vol'] max_sharpe_ret=mv_port.loc[max_sharpe_pfidx,'pf_ret'] print('Max Sharpe Ratio is: {}'.format(max_sharpe)) print('Portfolio with max Sharpe Ratio is: {}'.format(max_sharpe_pfidx)) print('Vol of max Sharpe PF is: {}'.format(max_sharpe_vol)) print('Expected Return of max Sharpe PF is: {}'.format(max_sharpe_ret)) # + colab={"base_uri": "https://localhost:8080/", "height": 621} id="ULBjpj27zsCk" outputId="da2bb27b-d39b-4998-c97e-8d6f5f4dbed4" ## Plot Mean Variance Optimized Portfolio Chart plt.figure(figsize=(12,10)) plt.scatter(mv_port['pf_vol'],mv_port['pf_ret'],c=mv_port['sharpe'],cmap='viridis') plt.colorbar(label='Sharpe Ratio') plt.xlabel('Expected Volatility') plt.ylabel('Expected Return') plt.scatter(max_sharpe_vol,max_sharpe_ret,c='red',s=50) plt.title('Mean Variance Optimized Portfolio') plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 294} id="4jQERb9RNIFr" outputId="c05bc425-37a6-4595-b575-6869a4019a34" tgt_wts=wts.loc[max_sharpe_pfidx,] tgt_wts=tgt_wts.tolist() data['pf_ret']=data.mul(tgt_wts).sum(axis=1) #plt.figure(figsize=(12,10)) # + colab={"base_uri": "https://localhost:8080/", "height": 490} id="9gWC3Yfsjh1F" outputId="40cb7bb5-adf1-4058-f706-13f9dc5615cc" data.cumsum().plot(figsize=(10,8)) # + colab={"base_uri": "https://localhost:8080/"} id="y4HV3jQvSbVq" outputId="8705ec97-ae2a-4a67-db2c-af7e0de542f9" pip install empyrical # + id="6RCzflD6Ss4X" import empyrical # + colab={"base_uri": "https://localhost:8080/", "height": 266} id="N0oA63sWVEeB" outputId="2695293c-f884-4496-d191-7bf23e499aa0" portfolio_metrics=pd.DataFrame(columns=data.columns) portfolio_metrics.loc['Sharpe',:]=empyrical.sharpe_ratio(data) portfolio_metrics.loc['CAGR',:]=empyrical.cagr(data)*100 dd=empyrical.max_drawdown(data)*100 dd.index=data.columns portfolio_metrics.loc['Max_DD',:]=dd portfolio_metrics.loc['Calmar',:]=portfolio_metrics.loc['CAGR']/abs(portfolio_metrics.loc['Max_DD']) portfolio_metrics.append(empyrical.aggregate_returns(data,convert_to='yearly')*100)
Portfolio_Optimization_Capital_Allocation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Brute force random sample generation fixed applicability domain # ### Original data import # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline # - # Load dataset df = pd.read_csv('glass.csv') df.head() # Remove targets df = df.drop(df.columns[[0,1,10]], axis=1) print(df.shape) df.head() # + # See the distributions of variables start = df.columns.get_loc('Na') # column of 'cylinders' end = df.columns.get_loc('Fe') + 1 # column of 'acceleration' fig = plt.figure(figsize=(10, 4)) sns.boxplot(data=df.iloc[:, start:end]) plt.xlabel('variables') plt.ylabel('values') plt.show() # - # Remove outliers if necessary df = df[df['K'] < 5.0] df = df[df['Ca'] < 10.0] print(df.shape) # ### Brute force # Input the size of random variables N = input('Input number of random variables for simulation : ') # simulation sample size like 10000 N = int(N) # Show original data range df.describe().transpose()[['min', 'max']] # + # Use fixed applicability domain to change the range df = df[df.iloc[:, 0] >= 12.0] df = df[df.iloc[:, 1] >= 1.0] df = df[df.iloc[:, 2] >= 0.5] df = df[df.iloc[:, 3] >= 65.0] df = df[df.iloc[:, 4] >= 0.0] df = df[df.iloc[:, 5] >= 5.0] df = df[df.iloc[:, 6] >= 0.0] df = df[df.iloc[:, 7] >= 0.0] df = df[df.iloc[:, 0] <= 17.0] df = df[df.iloc[:, 1] <= 4.0] df = df[df.iloc[:, 2] <= 3.5] df = df[df.iloc[:, 3] <= 75.0] df = df[df.iloc[:, 4] <= 2.0] df = df[df.iloc[:, 5] <= 8.0] df = df[df.iloc[:, 6] <= 2.0] df = df[df.iloc[:, 7] <= 0.0] print('Changed domain') df.describe().transpose()[['min', 'max']] # + # Generate number N of random variables between min and max values in columns list_columns = df.columns.values print('id', 'element', 'min', 'max', sep = '\t') random_var = np.zeros(N * len(list_columns[start:end])).reshape(N, len(list_columns[start:end])) for i, name in enumerate(list_columns[start:end]) : min_column = min(df.iloc[:, start+i]) max_column = max(df.iloc[:, start+i]) column_rand = np.random.uniform(min_column, max_column, N) random_var[:, i] = column_rand print(i, name, min_column, max_column, sep = '\t') df_comp_bf = pd.DataFrame(random_var) df_comp_bf.columns = list_columns[start:end] print('Shape of dataframe : ', df_comp_bf.shape) # - print(df_comp_bf.shape) df_comp_bf.head() # + # Draw scatter plots of samples generated for reverse analysis horz = 5 # vertical number of graph vert = 5 # horizontal number of graph graph_num = horz * vert # maximum number of graphs axes = list() print('Samples generated for backward prediction') fig = plt.figure(figsize=(15, 15)) for i in range(0, end-start): axes.append(fig.add_subplot(vert, horz, i+1)) for j in range(start, end) : x_sample = df_comp_bf.iloc[:, start] y_sample = df_comp_bf.iloc[:, i] axes[i].scatter(x_sample, y_sample, c='b', marker = '.', alpha = 0.01) axes[i].set_xlabel(df.columns[start], size = 12) axes[i].set_ylabel(df.columns[i], size = 12) plt.subplots_adjust(wspace=0.5, hspace=0.4) plt.show() # + # Adjust total amount say 100% if necessary df_bf_adj = df_comp_bf.apply(lambda x: 100 * x/np.sum(x), axis=1) print(df_bf_adj.shape) df_bf_adj.head() # + # draw scatter plots of samples generated for reverse analysis horz = 5 # vertical number of graph vert = 5 # horizontal number of graph graph_num = horz * vert # maximum number of graphs axes = list() print('Samples generated for reverse analysis') fig = plt.figure(figsize=(15, 15)) for i in range(0, end-start): axes.append(fig.add_subplot(vert, horz, i+1)) for j in range(start, end) : x_sample = df_bf_adj.iloc[:, start] y_sample = df_bf_adj.iloc[:, i] axes[i].scatter(x_sample, y_sample, c='r', marker = '.', alpha = 0.01) axes[i].set_xlabel(df.columns[start], size = 12) axes[i].set_ylabel(df.columns[i], size = 12) plt.subplots_adjust(wspace=0.5, hspace=0.4) plt.show() # - # Sanity check adjustment result test = np.sum(df_bf_adj, axis=1) print(test[0:2]) print(test[998:1000]) # Save generated samples df_comp_bf.to_csv('generated_samples_brute_force_AD.csv', index = None)
[4]_Random_sample_generation_for_backward_prediction/Brute_force_fixed_applicability_domain.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 2015 Temperature Data Visualization And Comparaison # # In this notebook we will be looking at the tempature variation between the years 2005 and 2014, then we will doing a comparison with the year 2015's temperatures and see where the it surpasses that period's record **low** or **high** temperatures. # # An NOAA dataset has been stored in the file `Data/BinnedCsvs_d400/fb441e62df2d58994928907a91895ec62c2c42e6cd075c2700843b89.csv`. The data comes from a subset of The National Centers for Environmental Information (NCEI) [Daily Global Historical Climatology Network](https://www1.ncdc.noaa.gov/pub/data/ghcn/daily/readme.txt) (GHCN-Daily). The GHCN-Daily is comprised of daily climate records from thousands of land surface stations across the globe. # # Each row in the datafile corresponds to a single observation. # # It contains the following variables : # # * **id** : station identification code # * **date** : date in YYYY-MM-DD format (e.g. 2012-01-24 = January 24, 2012) # * **element** : indicator of element type # * TMAX : Maximum temperature (tenths of degrees C) # * TMIN : Minimum temperature (tenths of degrees C) # * **value** : data value for element (tenths of degrees C) # # We will : # # 1. Write some python code which returns a line graph of the record high and record low temperatures by day of the year over the period 2005-2014. The area between the record high and record low temperatures for each day will be shaded. # 2. Overlay a scatter of the 2015 data for any points (highs and lows) for which the ten year record (2005-2014) record high or record low was broken in 2015. # 3. Watch out for leap days (i.e. February 29th), it is reasonable to remove these points from the dataset for the purpose of this visualization. # 4. Make sure that the visual effectively leverages the guidelines given for effective visual design (e.g., beauty, truthfulness, functionality, and insightfulness). # # The data contains temperature information for **Ann Arbor, Michigan, United States**, and the stations the data comes from are shown on the map below. # + # importing Python modules import matplotlib.pyplot as plt import mplleaflet import pandas as pd import warnings warnings.filterwarnings('ignore') # %matplotlib inline def leaflet_plot_stations(binsize, hashid): df = pd.read_csv('Data/BinSize_d{}.csv'.format(binsize)) station_locations_by_hash = df[df['hash'] == hashid] lons = station_locations_by_hash['LONGITUDE'].tolist() lats = station_locations_by_hash['LATITUDE'].tolist() plt.figure(figsize=(8,8)) plt.scatter(lons, lats, c='r', alpha=0.7, s=200) return mplleaflet.display() leaflet_plot_stations(400,'fb441e62df2d58994928907a91895ec62c2c42e6cd075c2700843b89') # - # Loading the data temperature_data = pd.read_csv('Data/BinnedCsvs_d400/fb441e62df2d58994928907a91895ec62c2c42e6cd075c2700843b89.csv') # Getting data during the period of 2005-2014 data = temperature_data[(temperature_data["Date"] >= "2005-01-01") & (temperature_data["Date"] <= "2014-12-31")] # Getting data during the year 2015 data_2015 = temperature_data[(temperature_data["Date"] >= "2015-01-01") & (temperature_data["Date"] <= "2015-12-31")] # We will remove the leap days for the first data frame, but not for 2015 since it's **not** a leap year. # Removing Leap days data = data[~data.Date.str.endswith('02-29')].copy() # Sorting the data by Date data = data.sort_values("Date") data.head() data_2015.head() # We will first start with the first data frame that contains the 2005-2014 data, then we will repeat the same steps for 2015's data. # Converting the "Date" column to datetime data["Date"] = list(map(pd.to_datetime, data["Date"])) # Diving the data into two dataframes for high and low high = data[data["Element"] == "TMAX"] low = data[data["Element"] == "TMIN"] # + # Getting record high and low temperature values for each day of the year during the period of 2004-2015 record_high = high.copy() record_high['dayofyear'] = record_high['Date'].map(lambda x: x.replace(year=2015).dayofyear) record_high = record_high.groupby("dayofyear").max() record_low = low.copy() record_low['dayofyear'] = record_low['Date'].map(lambda x: x.replace(year=2015).dayofyear) record_low = record_low.groupby("dayofyear").min() # - # And now for 2015's data : # + # Sorting values by Date data_2015 = data_2015.sort_values("Date") # Converting dates to datetime type data_2015["Date"] = list(map(pd.to_datetime, data_2015["Date"])) # Diving the data into two dataframes for high and low high_2015 = data_2015[data_2015["Element"] == "TMAX"] low_2015 = data_2015[data_2015["Element"] == "TMIN"] # Getting record high and low temperature values for each day of the year 2015 record_high_2015 = high_2015.copy() record_high_2015["dayofyear"] = record_high_2015["Date"].dt.dayofyear record_high_2015 = record_high_2015.groupby("dayofyear").max() record_low_2015 = low_2015.copy() record_low_2015["dayofyear"] = record_low_2015["Date"].dt.dayofyear record_low_2015 = record_low_2015.groupby("dayofyear").min() # - # Reseting dataframes indexes record_low = record_low.reset_index() record_high = record_high.reset_index() record_low_2015 = record_low_2015.reset_index() record_high_2015 = record_high_2015.reset_index() # Getting indexes of highs and lows that were broken broken_lows = (record_low_2015[record_low_2015["Data_Value"] < record_low['Data_Value']]).index.tolist() broken_highs = (record_high_2015[record_high_2015['Data_Value'] > record_high['Data_Value']]).index.tolist() # ### Visualizing the data : # + plt.figure(figsize=(20,7)) plt.plot(record_high["Data_Value"], c="r", alpha=0.8, label = 'Record High 2005-2014') plt.plot(record_low["Data_Value"], c="b", alpha=0.8, label = 'Record Low 2005-2014') plt.scatter(broken_lows, record_low_2015['Data_Value'].iloc[broken_lows], s=20, c = 'black', label = 'Record Low broken in 2015') plt.scatter(broken_highs, record_high_2015['Data_Value'].iloc[broken_highs], s=20, c = 'b', alpha=0.8, label = 'Record High broken in 2015') plt.legend() plt.title("2015's temperature breaking points against 2005-2014 in Ann Arbor, Michigan, US") plt.fill_between(range(len(record_low)), record_low["Data_Value"], record_high["Data_Value"], facecolor='pink', alpha=0.11); # Aligning plot plt.gca().axis([-1, 365, -400, 450]) # Hiding plot spines plt.gca().spines['top'].set_visible(False) plt.gca().spines['right'].set_visible(False) # Changing Vertical and Horizontal Ticks labels month_ticks = [0, 30, 60, 90, 120, 150, 180, 210, 240, 270, 300, 330] divs = [i+15 for i in month_ticks] month_names = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] plt.xticks(divs, month_names) temp = [str(tick/10)+str(' °C') for tick in plt.gca().get_yticks()] plt.gca().set_yticklabels(temp); plt.savefig('Temp_Plot.png');
Temperature Visualization.ipynb